diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml
index 05408fd6..cfe3009b 100644
--- a/.github/workflows/links.yml
+++ b/.github/workflows/links.yml
@@ -33,7 +33,21 @@ jobs:
timeout_minutes: 5
retry_wait_seconds: 60
max_attempts: 3
- command: lychee --accept 403,429,500,502,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html'
+ command: |
+ lychee --accept 403,429,500,502,999 \
+ --exclude-loopback \
+ --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \
+ --exclude-path docs/zh \
+ --exclude-path docs/es \
+ --exclude-path docs/ru \
+ --exclude-path docs/pt \
+ --exclude-path docs/fr \
+ --exclude-path docs/de \
+ --exclude-path docs/ja \
+ --exclude-path docs/ko \
+ --exclude-mail \
+ --github-token ${{ secrets.GITHUB_TOKEN }} \
+ './**/*.md' './**/*.html'
- name: Test Markdown, HTML, YAML, Python and Notebook links with retry
if: github.event_name == 'workflow_dispatch'
@@ -42,4 +56,19 @@ jobs:
timeout_minutes: 5
retry_wait_seconds: 60
max_attempts: 3
- command: lychee --accept 429,999 --exclude-loopback --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' --exclude-path '**/ci.yaml' --exclude-mail --github-token ${{ secrets.GITHUB_TOKEN }} './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb'
+ command: |
+ lychee --accept 429,999 \
+ --exclude-loopback \
+ --exclude 'https?://(www\.)?(linkedin\.com|twitter\.com|instagram\.com|kaggle\.com|fonts\.gstatic\.com|url\.com)' \
+ --exclude-path '**/ci.yaml' \
+ --exclude-path docs/zh \
+ --exclude-path docs/es \
+ --exclude-path docs/ru \
+ --exclude-path docs/pt \
+ --exclude-path docs/fr \
+ --exclude-path docs/de \
+ --exclude-path docs/ja \
+ --exclude-path docs/ko \
+ --exclude-mail \
+ --github-token ${{ secrets.GITHUB_TOKEN }} \
+ './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb'
diff --git a/docs/en/guides/triton-inference-server.md b/docs/en/guides/triton-inference-server.md
index 0be56438..9ccc88d0 100644
--- a/docs/en/guides/triton-inference-server.md
+++ b/docs/en/guides/triton-inference-server.md
@@ -78,7 +78,7 @@ The Triton Model Repository is a storage location where Triton can access and lo
Path(onnx_file).rename(triton_model_path / '1' / 'model.onnx')
# Create config file
- (triton_model_path / 'config.pdtxt').touch()
+ (triton_model_path / 'config.pbtxt').touch()
```
## Running Triton Inference Server
diff --git a/docs/en/hub/integrations.md b/docs/en/hub/integrations.md
index 162a0f02..271c5e4c 100644
--- a/docs/en/hub/integrations.md
+++ b/docs/en/hub/integrations.md
@@ -34,10 +34,10 @@ Welcome to the Integrations guide for [Ultralytics HUB](https://hub.ultralytics.
| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - |
| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize` |
| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` |
-| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` |
+| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8` |
| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` |
| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` |
-| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` |
+| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8` |
| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` |
| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` |
| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz` |
diff --git a/docs/en/integrations/index.md b/docs/en/integrations/index.md
index 3ee24ef0..e16742b1 100644
--- a/docs/en/integrations/index.md
+++ b/docs/en/integrations/index.md
@@ -47,10 +47,10 @@ We also support a variety of model export formats for deployment in different en
| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - |
| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize` |
| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` |
-| [OpenVINO](openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` |
+| [OpenVINO](openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8` |
| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` |
| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` |
-| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` |
+| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8` |
| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` |
| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` |
| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz` |
diff --git a/docs/en/modes/benchmark.md b/docs/en/modes/benchmark.md
index cdee300f..83cbc7ee 100644
--- a/docs/en/modes/benchmark.md
+++ b/docs/en/modes/benchmark.md
@@ -80,10 +80,10 @@ Benchmarks will attempt to run automatically on all possible export formats belo
| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - |
| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize` |
| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` |
-| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` |
+| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8` |
| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` |
| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` |
-| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` |
+| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8` |
| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` |
| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` |
| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz` |
diff --git a/docs/en/modes/export.md b/docs/en/modes/export.md
index d0c2da00..5e19b27b 100644
--- a/docs/en/modes/export.md
+++ b/docs/en/modes/export.md
@@ -96,10 +96,10 @@ Available YOLOv8 export formats are in the table below. You can export to any fo
| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - |
| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize` |
| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` |
-| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` |
+| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8` |
| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` |
| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` |
-| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` |
+| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8` |
| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` |
| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` |
| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz` |
diff --git a/docs/en/tasks/detect.md b/docs/en/tasks/detect.md
index 40c52ba1..bed738a3 100644
--- a/docs/en/tasks/detect.md
+++ b/docs/en/tasks/detect.md
@@ -171,10 +171,10 @@ Available YOLOv8 export formats are in the table below. You can predict or valid
| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - |
| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize` |
| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` |
-| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` |
+| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8` |
| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` |
| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` |
-| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` |
+| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8` |
| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` |
| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` |
| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz` |
diff --git a/docs/en/usage/cli.md b/docs/en/usage/cli.md
index 1851d728..a4bdffca 100644
--- a/docs/en/usage/cli.md
+++ b/docs/en/usage/cli.md
@@ -175,10 +175,10 @@ Available YOLOv8 export formats are in the table below. You can export to any fo
| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - |
| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize` |
| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` |
-| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` |
+| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8` |
| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` |
| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` |
-| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` |
+| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8` |
| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` |
| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` |
| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz` |
diff --git a/docs/en/yolov5/index.md b/docs/en/yolov5/index.md
index 9fb8e381..329c3641 100644
--- a/docs/en/yolov5/index.md
+++ b/docs/en/yolov5/index.md
@@ -22,7 +22,7 @@ keywords: Ultralytics, YOLOv5, Deep Learning, Object detection, PyTorch, Tutoria
-Welcome to the Ultralytics' [YOLOv5](https://github.com/ultralytics/yolov5) 🚀 Documentation! YOLOv5, the fifth iteration of the revolutionary "You Only Look Once" object detection model, is designed to deliver high-speed, high-accuracy results in real-time.
+Welcome to the Ultralytics' YOLOv5🚀 Documentation! YOLOv5, the fifth iteration of the revolutionary "You Only Look Once" object detection model, is designed to deliver high-speed, high-accuracy results in real-time.
Built on PyTorch, this powerful deep learning framework has garnered immense popularity for its versatility, ease of use, and high performance. Our documentation guides you through the installation process, explains the architectural nuances of the model, showcases various use-cases, and provides a series of detailed tutorials. These resources will help you harness the full potential of YOLOv5 for your computer vision projects. Let's get started!
diff --git a/docs/overrides/javascript/extra.js b/docs/overrides/javascript/extra.js
index 2da39775..b3c6f9b7 100644
--- a/docs/overrides/javascript/extra.js
+++ b/docs/overrides/javascript/extra.js
@@ -16,8 +16,18 @@ const applyAutoTheme = () => {
// Function that checks and applies light/dark theme based on the user's preference (if auto theme is enabled)
function checkAutoTheme() {
- // Retrieve the palette from local storage
- const palette = localStorage.getItem("/.__palette");
+ // Array of supported language codes -> each language has its own palette (stored in local storage)
+ const supportedLangCodes = ["en", "zh", "ko", "ja", "ru", "de", "fr", "es", "pt"];
+ // Get the URL path
+ const path = window.location.pathname;
+ // Extract the language code from the URL (assuming it's in the format /xx/...)
+ const langCode = path.split("/")[1];
+ // Check if the extracted language code is in the supported languages
+ const isValidLangCode = supportedLangCodes.includes(langCode);
+ // Construct the local storage key based on the language code if valid, otherwise default to the root key
+ const localStorageKey = isValidLangCode ? `/${langCode}/.__palette` : "/.__palette";
+ // Retrieve the palette from local storage using the constructed key
+ const palette = localStorage.getItem(localStorageKey);
if (palette) {
// Check if the palette's index is 0 (auto theme)
const paletteObj = JSON.parse(palette);
@@ -26,9 +36,11 @@ function checkAutoTheme() {
}
}
}
-// ! No need to run the function when the script loads as by default the theme is determined by the user's preference (if auto theme is enabled)
-// checkAutoTheme();
-// Run the function when the user's preference changes (when the user changes their system theme)
+
+// Run function when the script loads
+checkAutoTheme();
+
+// Re-run the function when the user's preference changes (when the user changes their system theme)
window.matchMedia("(prefers-color-scheme: light)").addEventListener("change", checkAutoTheme);
window.matchMedia("(prefers-color-scheme: dark)").addEventListener("change", checkAutoTheme);
diff --git a/docs/overrides/stylesheets/style.css b/docs/overrides/stylesheets/style.css
index 51514ad9..908aa7ca 100644
--- a/docs/overrides/stylesheets/style.css
+++ b/docs/overrides/stylesheets/style.css
@@ -1,41 +1,42 @@
/* Table format like GitHub ----------------------------------------------------------------------------------------- */
-th, td {
- border: 1px solid var(--md-typeset-table-color);
- border-spacing: 0;
- border-bottom: none;
- border-left: none;
- border-top: none;
+th,
+td {
+ border: 1px solid var(--md-typeset-table-color);
+ border-spacing: 0;
+ border-bottom: none;
+ border-left: none;
+ border-top: none;
}
.md-typeset__table {
- line-height: 1;
+ line-height: 1;
}
.md-typeset__table table:not([class]) {
- font-size: .74rem;
- border-right: none;
+ font-size: 0.74rem;
+ border-right: none;
}
.md-typeset__table table:not([class]) td,
.md-typeset__table table:not([class]) th {
- padding: 9px;
+ padding: 9px;
}
/* light mode alternating table bg colors */
.md-typeset__table tr:nth-child(2n) {
- background-color: #f8f8f8;
+ background-color: #f6f8fa;
}
/* dark mode alternating table bg colors */
-body.md-theme--slate .md-typeset__table tr:nth-child(2n) {
- background-color: hsla(207, 26%, 17%, 1);
+[data-md-color-scheme="slate"] .md-typeset__table tr:nth-child(2n) {
+ background-color: #161b22;
}
/* Table format like GitHub ----------------------------------------------------------------------------------------- */
/* Code block vertical scroll */
div.highlight {
- max-height: 20rem;
- overflow-y: auto; /* for adding a scrollbar when needed */
+ max-height: 20rem;
+ overflow-y: auto; /* for adding a scrollbar when needed */
}
/* Set content width */
diff --git a/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp b/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp
index 7e9d0aa5..a2de7727 100644
--- a/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp
+++ b/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp
@@ -39,14 +39,32 @@ char *BlobFromImage(cv::Mat &iImg, T &iBlob) {
}
-char *PostProcess(cv::Mat &iImg, std::vector iImgSize, cv::Mat &oImg) {
- cv::Mat img = iImg.clone();
- cv::resize(iImg, oImg, cv::Size(iImgSize.at(0), iImgSize.at(1)));
- if (img.channels() == 1) {
- cv::cvtColor(oImg, oImg, cv::COLOR_GRAY2BGR);
- }
- cv::cvtColor(oImg, oImg, cv::COLOR_BGR2RGB);
- return RET_OK;
+char* DL_CORE::PreProcess(cv::Mat& iImg, std::vector iImgSize, cv::Mat& oImg)
+{
+ if (iImg.channels() == 3)
+ {
+ oImg = iImg.clone();
+ cv::cvtColor(oImg, oImg, cv::COLOR_BGR2RGB);
+ }
+ else
+ {
+ cv::cvtColor(iImg, oImg, cv::COLOR_GRAY2RGB);
+ }
+
+ if (iImg.cols >= iImg.rows)
+ {
+ resizeScales = iImg.cols / (float)iImgSize.at(0);
+ cv::resize(oImg, oImg, cv::Size(iImgSize.at(0), int(iImg.rows / resizeScales)));
+ }
+ else
+ {
+ resizeScales = iImg.rows / (float)iImgSize.at(0);
+ cv::resize(oImg, oImg, cv::Size(int(iImg.cols / resizeScales), iImgSize.at(1)));
+ }
+ cv::Mat tempImg = cv::Mat::zeros(iImgSize.at(0), iImgSize.at(1), CV_8UC3);
+ oImg.copyTo(tempImg(cv::Rect(0, 0, oImg.cols, oImg.rows)));
+ oImg = tempImg;
+ return RET_OK;
}
@@ -127,7 +145,7 @@ char *DCSP_CORE::RunSession(cv::Mat &iImg, std::vector &oResult) {
char *Ret = RET_OK;
cv::Mat processedImg;
- PostProcess(iImg, imgSize, processedImg);
+ PreProcess(iImg, imgSize, processedImg);
if (modelType < 4) {
float *blob = new float[processedImg.total() * 3];
BlobFromImage(processedImg, blob);
@@ -188,8 +206,6 @@ char *DCSP_CORE::TensorProcess(clock_t &starttime_1, cv::Mat &iImg, N &blob, std
rawData = rawData.t();
float *data = (float *) rawData.data;
- float x_factor = iImg.cols / 640.;
- float y_factor = iImg.rows / 640.;
for (int i = 0; i < strideNum; ++i) {
float *classesScores = data + 4;
cv::Mat scores(1, this->classes.size(), CV_32FC1, classesScores);
@@ -205,11 +221,11 @@ char *DCSP_CORE::TensorProcess(clock_t &starttime_1, cv::Mat &iImg, N &blob, std
float w = data[2];
float h = data[3];
- int left = int((x - 0.5 * w) * x_factor);
- int top = int((y - 0.5 * h) * y_factor);
+ int left = int((x - 0.5 * w) * resizeScales);
+ int top = int((y - 0.5 * h) * resizeScales);
- int width = int(w * x_factor);
- int height = int(h * y_factor);
+ int width = int(w * resizeScales);
+ int height = int(h * resizeScales);
boxes.emplace_back(left, top, width, height);
}
@@ -254,7 +270,7 @@ char *DCSP_CORE::WarmUpSession() {
clock_t starttime_1 = clock();
cv::Mat iImg = cv::Mat(cv::Size(imgSize.at(0), imgSize.at(1)), CV_8UC3);
cv::Mat processedImg;
- PostProcess(iImg, imgSize, processedImg);
+ PreProcess(iImg, imgSize, processedImg);
if (modelType < 4) {
float *blob = new float[iImg.total() * 3];
BlobFromImage(processedImg, blob);
diff --git a/examples/YOLOv8-ONNXRuntime-CPP/inference.h b/examples/YOLOv8-ONNXRuntime-CPP/inference.h
index fe2c5a09..bd85e783 100644
--- a/examples/YOLOv8-ONNXRuntime-CPP/inference.h
+++ b/examples/YOLOv8-ONNXRuntime-CPP/inference.h
@@ -67,6 +67,8 @@ public:
char *TensorProcess(clock_t &starttime_1, cv::Mat &iImg, N &blob, std::vector &inputNodeDims,
std::vector &oResult);
+ char* PreProcess(cv::Mat& iImg, std::vector iImgSize, cv::Mat& oImg);
+
std::vector classes{};
private:
@@ -81,4 +83,5 @@ private:
std::vector imgSize;
float rectConfidenceThreshold;
float iouThreshold;
+ float resizeScales;//letterbox scale
};
diff --git a/examples/tutorial.ipynb b/examples/tutorial.ipynb
index 19eefd3e..b287e810 100644
--- a/examples/tutorial.ipynb
+++ b/examples/tutorial.ipynb
@@ -349,10 +349,10 @@
"| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - |\n",
"| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize` |\n",
"| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset` |\n",
- "| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half` |\n",
+ "| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8` |\n",
"| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace` |\n",
"| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms` |\n",
- "| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras` |\n",
+ "| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8` |\n",
"| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | `imgsz` |\n",
"| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8` |\n",
"| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz` |\n",
diff --git a/tests/test_integrations.py b/tests/test_integrations.py
index 896a6281..34d90c7b 100644
--- a/tests/test_integrations.py
+++ b/tests/test_integrations.py
@@ -55,7 +55,7 @@ def test_triton():
# Prepare Triton repo
(triton_model_path / '1').mkdir(parents=True, exist_ok=True)
Path(f).rename(triton_model_path / '1' / 'model.onnx')
- (triton_model_path / 'config.pdtxt').touch()
+ (triton_model_path / 'config.pbtxt').touch()
# Define image https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver
tag = 'nvcr.io/nvidia/tritonserver:23.09-py3' # 6.4 GB
diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py
index 223577dd..0451f3e6 100644
--- a/ultralytics/__init__.py
+++ b/ultralytics/__init__.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-__version__ = '8.0.209'
+__version__ = '8.0.210'
from ultralytics.models import RTDETR, SAM, YOLO
from ultralytics.models.fastsam import FastSAM
diff --git a/ultralytics/data/loaders.py b/ultralytics/data/loaders.py
index bcf4fbef..78ef8500 100644
--- a/ultralytics/data/loaders.py
+++ b/ultralytics/data/loaders.py
@@ -493,7 +493,7 @@ def autocast_list(source):
LOADERS = LoadStreams, LoadPilAndNumpy, LoadImages, LoadScreenshots # tuple
-def get_best_youtube_url(url, use_pafy=False):
+def get_best_youtube_url(url, use_pafy=True):
"""
Retrieves the URL of the best quality MP4 video stream from a given YouTube video.
diff --git a/ultralytics/utils/metrics.py b/ultralytics/utils/metrics.py
index 761a71d3..27e41b77 100644
--- a/ultralytics/utils/metrics.py
+++ b/ultralytics/utils/metrics.py
@@ -223,6 +223,13 @@ class ConfusionMatrix:
labels (Array[M, 5]): Ground truth bounding boxes and their associated class labels.
Each row should contain (class, x1, y1, x2, y2).
"""
+ if labels.size(0) == 0: # Check if labels is empty
+ if detections is not None:
+ detections = detections[detections[:, 4] > self.conf]
+ detection_classes = detections[:, 5].int()
+ for dc in detection_classes:
+ self.matrix[dc, self.nc] += 1 # false positives
+ return
if detections is None:
gt_classes = labels.int()
for gc in gt_classes: