From c56e07c82bbf34f12841187ee6799b8ca40bb1cd Mon Sep 17 00:00:00 2001 From: Denis Date: Tue, 20 Aug 2024 06:30:12 +0300 Subject: [PATCH] Add Rust workflow --- .github/workflows/rust.yml | 42 ++++++++++++++++++++++++++++++++++++++ README.md | 2 ++ web_api/src/main.rs | 8 ++++++-- 3 files changed, 50 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/rust.yml diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml new file mode 100644 index 0000000..ca88e5c --- /dev/null +++ b/.github/workflows/rust.yml @@ -0,0 +1,42 @@ +on: [push, pull_request] + +name: CI + +jobs: + check: + name: Check + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: dtolnay/rust-toolchain@1.75 + - run: cargo check + working-directory: web_api + + test: + name: Tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: dtolnay/rust-toolchain@1.75 + - run: cargo test + working-directory: web_api + + fmt: + name: Rustfmt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: dtolnay/rust-toolchain@1.75 + - run: rustup component add rustfmt + - run: cargo fmt --all -- --check + working-directory: web_api + + clippy: + name: Clippy + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: dtolnay/rust-toolchain@1.75 + - run: rustup component add clippy + - run: cargo clippy -- -D warnings + working-directory: web_api diff --git a/README.md b/README.md index eb17cd6..7a5b32f 100755 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ +![Build Status](https://github.com/deniskore/py-ml-to-rs/actions/workflows/rust.yml/badge.svg) + # Intro A friend of mine asked me to demonstrate how to load a model trained in Python into a Rust service. In response, this repository showcases the entire process of training a machine learning model to distinguish between various text encodings, achieving around 98.5% validation accuracy, using data sourced from the English Wiktionary. Subsequently, the trained model is seamlessly integrated into a Rust-based microservice, utilizing the ntex-rs. This implementation is streamlined with minimal dependencies, ensuring a lightweight and efficient service. diff --git a/web_api/src/main.rs b/web_api/src/main.rs index 2a2f207..dfdc771 100755 --- a/web_api/src/main.rs +++ b/web_api/src/main.rs @@ -11,8 +11,12 @@ mod model; #[ntex::main] async fn main() -> std::io::Result<()> { let model = Arc::new( - model::load_model("../../model/detector".to_string(), "predict_input", "predict_output") - .map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err))?, + model::load_model( + "../../model/detector".to_string(), + "predict_input", + "predict_output", + ) + .map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err))?, ); web::server(move || {