diff --git a/.github/workflows/rust-ci.yml b/.github/workflows/rust-ci.yml
index 823b2d9..75cc98c 100644
--- a/.github/workflows/rust-ci.yml
+++ b/.github/workflows/rust-ci.yml
@@ -1,17 +1,17 @@
-name: ci
+name: Rust-CI
on:
push:
- branches: [ main, dev ]
+ branches: [ "main", "dev", "x", "xy" , "xyz" ]
pull_request:
- branches: [ main ]
+ branches: [ "main" ]
env:
CARGO_TERM_COLOR: always
-jobs:
- build-linux:
+jobs:
+ build-on-linux:
name: build / linux / ffmpeg ${{ matrix.ffmpeg_version }}
runs-on: ubuntu-latest
container: jrottenberg/ffmpeg:${{ matrix.ffmpeg_version }}-ubuntu
@@ -35,16 +35,10 @@ jobs:
with:
toolchain: stable
- - name: Set up Rust
- uses: actions-rs/toolchain@v1
- with:
- toolchain: stable
- override: true
-
- name: Build
run: cargo build
- build-macos:
+ build-on-macos:
name: build / macos / latest ffmpeg
runs-on: macos-latest
@@ -64,7 +58,8 @@ jobs:
- name: Build
run: cargo build
- build-windows:
+
+ build-on-windows:
name: build / windows / latest ffmpeg
runs-on: windows-latest
@@ -94,9 +89,16 @@ jobs:
- name: Build
run: cargo build
- test:
+
+ test-on-linux:
+ name: test / linux / ffmpeg ${{ matrix.ffmpeg_version }}
runs-on: ubuntu-latest
- container: jrottenberg/ffmpeg:6-ubuntu
+ container: jrottenberg/ffmpeg:${{ matrix.ffmpeg_version }}-ubuntu
+
+ strategy:
+ matrix:
+ ffmpeg_version: ["4.3", "4.4", "5.0", "5.1", "6.0", "6.1", "7.0"]
+ fail-fast: false
steps:
- name: Checkout
@@ -107,14 +109,97 @@ jobs:
apt update
apt install -y --no-install-recommends clang curl pkg-config
+ - name: Setup Rust
+ uses: dtolnay/rust-toolchain@v1
+ with:
+ toolchain: stable
+
+ - name: Run Tests with All Features
+ run: cargo test --all-features
+
+ - name: Run Tests in Release Mode
+ run: cargo test --release
+
+ - name: Run Tests with No Default Features
+ run: cargo test --no-default-features
+
+ - name: Run Tests on All Targets
+ run: cargo test --all-targets
+
+
+
+ test-on-macos:
+ name: test / macos / latest ffmpeg
+ runs-on: macos-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Install dependencies
+ run: |
+ brew install ffmpeg pkg-config
+
- name: Setup Rust
uses: dtolnay/rust-toolchain@v1
with:
toolchain: stable
- - name: Test
+ - name: Run Tests with All Features
run: cargo test --all-features
+ - name: Run Tests in Release Mode
+ run: cargo test --release
+
+ - name: Run Tests with No Default Features
+ run: cargo test --no-default-features
+
+ - name: Run Tests on All Targets
+ run: cargo test --all-targets
+
+
+
+ test-on-windows:
+ name: test / windows / latest ffmpeg
+ runs-on: windows-latest
+
+ env:
+ FFMPEG_DOWNLOAD_URL: https://www.gyan.dev/ffmpeg/builds/ffmpeg-release-full-shared.7z
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+
+ - name: Install dependencies
+ run: |
+ $VCINSTALLDIR = $(& "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -latest -property installationPath)
+ Add-Content $env:GITHUB_ENV "LIBCLANG_PATH=${VCINSTALLDIR}\VC\Tools\LLVM\x64\bin`n"
+ Invoke-WebRequest "${env:FFMPEG_DOWNLOAD_URL}" -OutFile ffmpeg-release-full-shared.7z
+ 7z x ffmpeg-release-full-shared.7z
+ mkdir ffmpeg
+ mv ffmpeg-*/* ffmpeg/
+ Add-Content $env:GITHUB_ENV "FFMPEG_DIR=${pwd}\ffmpeg`n"
+ Add-Content $env:GITHUB_PATH "${pwd}\ffmpeg\bin`n"
+
+ - name: Setup Rust
+ uses: dtolnay/rust-toolchain@v1
+ with:
+ toolchain: stable
+
+ - name: Run Tests with All Features
+ run: cargo test --all-features
+
+ - name: Run Tests in Release Mode
+ run: cargo test --release
+
+ - name: Run Tests with No Default Features
+ run: cargo test --no-default-features
+
+ - name: Run Tests on All Targets
+ run: cargo test --all-targets
+
+
+
lints:
runs-on: ubuntu-latest
container: jrottenberg/ffmpeg:6-ubuntu
@@ -138,4 +223,4 @@ jobs:
run: cargo fmt --all -- --check
- name: Clippy
- run: cargo clippy --all --all-features -- -D warnings
+ run: cargo clippy --all --all-targets --all-features -- -D warnings
diff --git a/benches/yolo.rs b/benches/yolo.rs
index 5a9e806..82ecb3a 100644
--- a/benches/yolo.rs
+++ b/benches/yolo.rs
@@ -24,6 +24,7 @@ fn yolo_stage_bench(
let t0 = std::time::Instant::now();
let xs = model.preprocess(x).unwrap();
t_pre += t0.elapsed();
+ let a = 1;
let t = std::time::Instant::now();
let xs = model.inference(xs).unwrap();
diff --git a/src/lib.rs b/src/lib.rs
index cc17c6e..fd63333 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,27 +1,28 @@
-//! **`usls`** is a Rust library integrated with **ONNXRuntime** that provides a collection of state-of-the-art models for **Computer Vision** and **Vision-Language** tasks, including:
-
+//! **usls** is a Rust library integrated with **ONNXRuntime** that provides a collection of state-of-the-art models for **Computer Vision** and **Vision-Language** tasks, including:
+//!
//! - **YOLO Models**: [YOLOv5](https://github.com/ultralytics/yolov5), [YOLOv6](https://github.com/meituan/YOLOv6), [YOLOv7](https://github.com/WongKinYiu/yolov7), [YOLOv8](https://github.com/ultralytics/ultralytics), [YOLOv9](https://github.com/WongKinYiu/yolov9), [YOLOv10](https://github.com/THU-MIG/yolov10)
//! - **SAM Models**: [SAM](https://github.com/facebookresearch/segment-anything), [SAM2](https://github.com/facebookresearch/segment-anything-2), [MobileSAM](https://github.com/ChaoningZhang/MobileSAM), [EdgeSAM](https://github.com/chongzhou96/EdgeSAM), [SAM-HQ](https://github.com/SysCV/sam-hq), [FastSAM](https://github.com/CASIA-IVA-Lab/FastSAM)
//! - **Vision Models**: [RTDETR](https://arxiv.org/abs/2304.08069), [RTMO](https://github.com/open-mmlab/mmpose/tree/main/projects/rtmo), [DB](https://arxiv.org/abs/1911.08947), [SVTR](https://arxiv.org/abs/2205.00159), [Depth-Anything-v1-v2](https://github.com/LiheYoung/Depth-Anything), [DINOv2](https://github.com/facebookresearch/dinov2), [MODNet](https://github.com/ZHKKKe/MODNet), [Sapiens](https://arxiv.org/abs/2408.12569)
//! - **Vision-Language Models**: [CLIP](https://github.com/openai/CLIP), [BLIP](https://arxiv.org/abs/2201.12086), [GroundingDINO](https://github.com/IDEA-Research/GroundingDINO), [YOLO-World](https://github.com/AILab-CVC/YOLO-World)
//!
+//! # Examples
//!
+//! Refer to [All Runnable Demos](https://github.com/jamjamjon/usls/tree/main/examples)
//!
//! # Quick Start
//!
-//! The following demo shows how to build model to run and annotate the results.
+//! The following demo shows how to build a `YOLO` with [`Options`], load `image(s)`, `video` and `stream` with [`DataLoader`], and annotate the model's inference results with [`Annotator`].
//!
-//! ```rust, no_run
+//! ```ignore
//! use usls::{models::YOLO, Annotator, DataLoader, Options, Vision, YOLOTask, YOLOVersion};
//!
-//!
//! fn main() -> anyhow::Result<()> {
//! // Build model with Options
//! let options = Options::new()
//! .with_trt(0)
//! .with_model("yolo/v8-m-dyn.onnx")?
-//! .with_yolo_version(YOLOVersion::V8) // YOLOVersion: V5, V6, V7, V8, V9, V10, RTDETR
-//! .with_yolo_task(YOLOTask::Detect) // YOLOTask: Classify, Detect, Pose, Segment, Obb
+//! .with_yolo_version(YOLOVersion::V8) // YOLOVersion: V5, V6, V7, V8, V9, V10, RTDETR
+//! .with_yolo_task(YOLOTask::Detect) // YOLOTask: Classify, Detect, Pose, Segment, Obb
//! .with_i00((1, 1, 4).into())
//! .with_i02((0, 640, 640).into())
//! .with_i03((0, 640, 640).into())
@@ -41,7 +42,7 @@
//! .build()?;
//!
//! // Build annotator
-//! let annotator = Annotator::new().with_saveout("YOLO-DataLoader");
+//! let annotator = Annotator::new().with_saveout("YOLO-Demo");
//!
//! // Run and Annotate images
//! for (xs, _) in dl {
@@ -53,21 +54,18 @@
//! }
//! ```
//!
-//! Refer to [All Demos Here](https://github.com/jamjamjon/usls/tree/main/examples)
-//!
-//!
-//!
-//! # How to use Provided Models for Inference
+
+//! # What's More
//!
-//! #### 1. Build Model
+//! This guide covers the process of using provided models for inference, including how to build a model, load data, annotate results, and retrieve the outputs. Click the sections below to expand for detailed instructions.
//!
//!
-//! Click to expand
+//! Build the Model
//!
-//! Using provided [`models`] with [`Options`]
+//! To build a model, you can use the provided [models] with [Options]:
//!
-//! ```rust, no_run
-//! use usls::{ models::YOLO, Annotator, DataLoader, Options, Vision};
+//! ```ignore
+//! use usls::{models::YOLO, Annotator, DataLoader, Options, Vision};
//!
//! let options = Options::default()
//! .with_yolo_version(YOLOVersion::V8) // YOLOVersion: V5, V6, V7, V8, V9, V10, RTDETR
@@ -76,9 +74,12 @@
//! let mut model = YOLO::new(options)?;
//! ```
//!
-//! - Choose Execute Provider: `CUDA`(by default), `TensorRT`, or `CoreML`
+//! **And there're many options provided by [Options]**
//!
-//! ```rust, no_run
+//! - **Choose Execution Provider:**
+//! Select `CUDA` (default), `TensorRT`, or `CoreML`:
+//!
+//! ```ignore
//! let options = Options::default()
//! .with_cuda(0)
//! // .with_trt(0)
@@ -86,157 +87,136 @@
//! // .with_cpu();
//! ```
//!
-//! - Dynamic Input Shapes
-//! If your model has dynamic shapes, you need pre-specified it with [`MinOptMax`].
-//!
-//! `with_ixy()` means the y-th axis of the x-th input. e.g., `i00` is the first axis of the 1st input, batch usually
+//! - **Dynamic Input Shapes:**
+//! Specify dynamic shapes with [MinOptMax]:
//!
-//! ```rust, no_run
+//! ```ignore
//! let options = Options::default()
//! .with_i00((1, 2, 4).into()) // batch(min=1, opt=2, max=4)
//! .with_i02((416, 640, 800).into()) // height(min=416, opt=640, max=800)
//! .with_i03((416, 640, 800).into()); // width(min=416, opt=640, max=800)
//! ```
//!
-//! - Set Confidence Thresholds for Each Category
+//! - **Set Confidence Thresholds:**
+//! Adjust thresholds for each category:
//!
-//! ```rust, no_run
+//! ```ignore
//! let options = Options::default()
//! .with_confs(&[0.4, 0.15]); // class_0: 0.4, others: 0.15
//! ```
//!
-//! - [Optional] Set Class Names
+//! - **Set Class Names:**
+//! Provide class names if needed:
//!
-//! ```rust, no_run
+//! ```ignore
//! let options = Options::default()
//! .with_names(&COCO_CLASS_NAMES_80);
//! ```
//!
-//! More options can be found in the [`Options`] documentation.
+//! **More options are detailed in the [Options] documentation.**
+//!
//!
//!
//!
-//!
-//! #### 2. Use [`DataLoader`] to load `Image(s)`, `Video` and `Stream`
-//!
//!
-//! Click to expand
-//!
-//! - Use [`DataLoader::try_read`] to laod single image
+//! Load Images, Video and Stream
//!
-//! You can now load image from local file or remote(Github Release Page)
+//! - **Load a Single Image**
+//! Use [DataLoader::try_read] to load an image from a local file or remote source:
//!
-//! ```rust, no_run
+//! ```ignore
//! let x = DataLoader::try_read("./assets/bus.jpg")?; // from local
//! let x = DataLoader::try_read("images/bus.jpg")?; // from remote
//! ```
//!
-//! Of course You can directly use [`image::ImageReader`]
+//! Alternatively, use [image::ImageReader] directly:
//!
-//! ```rust, no_run
+//! ```ignore
//! let x = image::ImageReader::open("myimage.png")?.decode()?;
//! ```
//!
-//! - Use [`DataLoader] to load image(s), video, stream
-//!
-//! ```rust, no_run
-//! // Build DataLoader
-//! let dl = DataLoader::new(
-//! "./assets/bus.jpg", // local image
-//! // "images/bus.jpg", // remote image
-//! // "../set-negs", // local images (from folder)
-//! // "../hall.mp4", // local video
-//! // "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4", // remote video
-//! // "rtsp://admin:kkasd1234@192.168.2.217:554/h264/ch1/", // stream
-//! )?
-//! .with_batch(3) // iterate with batch_size = 3
-//! .build()?;
-//!
-//! // iterate
-//! for (xs, _) in dl {}
+//! - **Load Multiple Images, Videos, or Streams**
+//! Create a [DataLoader] instance for batch processing:
+//!
+//! ```ignore
+//! let dl = DataLoader::new(
+//! "./assets/bus.jpg", // local image
+//! // "images/bus.jpg", // remote image
+//! // "../set-negs", // local images (from folder)
+//! // "../hall.mp4", // local video
+//! // "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4", // remote video
+//! // "rtsp://admin:kkasd1234@192.168.2.217:554/h264/ch1/", // stream
+//! )?
+//! .with_batch(3) // iterate with batch_size = 3
+//! .build()?;
+//!
+//! // Iterate through the data
+//! for (xs, _) in dl {}
//! ```
//!
-//! - Use [`DataLoader::is2v`] to convert images into a video
+//! - **Convert Images to Video**
+//! Use [DataLoader::is2v] to create a video from a sequence of images:
//!
-//! ```rust, no_run
-//! let fps = 24;
-//! let image_folder = "runs/YOLO-DataLoader";
-//! let saveout = ["runs", "is2v"];
-//! let fps = 24;
-//! DataLoader::is2v(image_folder, &saveout, 24)?;
+//! ```ignore
+//! let fps = 24;
+//! let image_folder = "runs/YOLO-DataLoader";
+//! let saveout = ["runs", "is2v"];
+//! DataLoader::is2v(image_folder, &saveout, fps)?;
//! ```
//!
-//!
//!
//!
-//!
-//!
-//! #### 3. Use [`Annotator`] to annotate images
-//!
-//!
//!
-//! Click to expand
-//!
+//! Annotate Inference Results
//!
+//! - **Create an Annotator Instance**
//!
-//! ```rust, no_run
+//! ```ignore
//! let annotator = Annotator::default();
//! ```
//!
-//! - Set Saveout Name
+//! - **Set Saveout Name:**
//!
-//! ```rust, no_run
+//! ```ignore
//! let annotator = Annotator::default()
//! .with_saveout("YOLOs");
//! ```
-//!
-//! - Set Bboxes Line Width
//!
-//! ```rust, no_run
+//! - **Set Bounding Box Line Width:**
+//!
+//! ```ignore
//! let annotator = Annotator::default()
//! .with_bboxes_thickness(4);
//! ```
-//!
-//! - Disable Mask Plotting
-//!
-//! ```rust, no_run
+//!
+//! - **Disable Mask Plotting**
+//!
+//! ```ignore
//! let annotator = Annotator::default()
//! .without_masks(true);
//! ```
-//!
-//! More options can be found in the [`Annotator`] documentation.
-//!
-//!
-//!
//!
+//! - **Perform Inference and nnotate the results**
//!
-//!
-//! #### 4. Run and Annotate
-//!
-//!
-//! Click to expand
-//!
-//!
-//! ```rust, no_run
+//! ```ignore
//! for (xs, _paths) in dl {
//! let ys = model.run(&xs)?;
//! annotator.annotate(&xs, &ys);
//! }
//! ```
//!
-//!
-//!
+//! More options are detailed in the [Annotator] documentation.
//!
-//! #### 5. Get Results
+//!
//!
//!
-//! Click to expand
+//! Retrieve Model's Inference Results
//!
-//! The inference outputs of provided models will be saved to a [`Vec`].
+//! Retrieve the inference outputs, which are saved in a [`Vec`]:
//!
-//! - For Example, Get Detection Bboxes with `y.bboxes()`
+//! - **Get Detection Bounding Boxes**
//!
-//! ```rust, no_run
+//! ```ignore
//! let ys = model.run(&xs)?;
//! for y in ys {
//! // bboxes
@@ -254,16 +234,18 @@
//! }
//! }
//! }
+//! ```
//!
-//!
+//!
//!
-//! ```
+//!
+//! Custom Model Implementation
//!
-//! # Also, You Can Implement Your Own Model with [`OrtEngine`] and [`Options`]
+//! You can also implement your own model using [OrtEngine] and [Options]. [OrtEngine] supports ONNX model loading, metadata parsing, dry_run, inference, and other functions, with execution providers such as CUDA, TensorRT, CoreML, etc.
//!
-//! [`OrtEngine`] provides ONNX model loading, metadata parsing, dry_run, inference, and other functions, supporting EPs such as CUDA, TensorRT, CoreML, etc. You can use it as the ONNXRuntime engine for building models.
+//! For more details, refer to the [Demo: Depth-Anything](https://github.com/jamjamjon/usls/blob/main/src/models/depth_anything.rs).
//!
-//! Refer to [Demo: Depth-Anything](https://github.com/jamjamjon/usls/blob/main/src/models/depth_anything.rs) for more details.
+//!
mod core;
pub mod models;
diff --git a/src/ys/y.rs b/src/ys/y.rs
index c116a58..d7b8827 100644
--- a/src/ys/y.rs
+++ b/src/ys/y.rs
@@ -95,13 +95,6 @@ impl Y {
/// # Returns
///
/// * `Self` - The updated struct instance with the new probabilities set.
- ///
- /// # Examples
- ///
- /// ```
- /// let probs = Prob::default();
- /// let y = Y::default().with_probs(&probs);
- /// ```
pub fn with_probs(mut self, probs: &Prob) -> Self {
self.probs = Some(probs.clone());
self