ultralytics 8.0.196 instance-mean Segment loss (#5285)

Co-authored-by: Andy <39454881+yermandy@users.noreply.github.com>
This commit is contained in:
Glenn Jocher 2023-10-09 20:08:39 +02:00 committed by GitHub
parent 7517667a33
commit e7f0658744
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
72 changed files with 369 additions and 493 deletions

View File

@ -1,7 +1,7 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
# Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md # Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md
exclude: 'docs/' # exclude: 'docs/'
# Define bot property if installed via https://github.com/marketplace/pre-commit-ci # Define bot property if installed via https://github.com/marketplace/pre-commit-ci
ci: ci:
autofix_prs: true autofix_prs: true
@ -47,6 +47,7 @@ repos:
additional_dependencies: additional_dependencies:
- mdformat-gfm - mdformat-gfm
- mdformat-black - mdformat-black
exclude: 'docs/.*\.md'
# exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md"
- repo: https://github.com/PyCQA/flake8 - repo: https://github.com/PyCQA/flake8
@ -60,7 +61,7 @@ repos:
hooks: hooks:
- id: codespell - id: codespell
args: args:
- --ignore-words-list=crate,nd,strack,dota,ane,segway,fo - --ignore-words-list=crate,nd,strack,dota,ane,segway,fo,gool,winn
- repo: https://github.com/PyCQA/docformatter - repo: https://github.com/PyCQA/docformatter
rev: v1.7.5 rev: v1.7.5

View File

@ -9,8 +9,7 @@ Ultralytics Docs are deployed to [https://docs.ultralytics.com](https://docs.ult
### Install Ultralytics package ### Install Ultralytics package
To install the ultralytics package in developer mode, you will need to have Git and Python 3 installed on your system. To install the ultralytics package in developer mode, you will need to have Git and Python 3 installed on your system. Then, follow these steps:
Then, follow these steps:
1. Clone the ultralytics repository to your local machine using Git: 1. Clone the ultralytics repository to your local machine using Git:
@ -30,16 +29,13 @@ cd ultralytics
pip install -e ".[dev]" pip install -e ".[dev]"
``` ```
This will install the ultralytics package and its dependencies in developer mode, allowing you to make changes to the This will install the ultralytics package and its dependencies in developer mode, allowing you to make changes to the package code and have them reflected immediately in your Python environment.
package code and have them reflected immediately in your Python environment.
Note that you may need to use the pip3 command instead of pip if you have multiple versions of Python installed on your Note that you may need to use the pip3 command instead of pip if you have multiple versions of Python installed on your system.
system.
### Building and Serving Locally ### Building and Serving Locally
The `mkdocs serve` command is used to build and serve a local version of the MkDocs documentation site. It is typically The `mkdocs serve` command is used to build and serve a local version of the MkDocs documentation site. It is typically used during the development and testing phase of a documentation project.
used during the development and testing phase of a documentation project.
```bash ```bash
mkdocs serve mkdocs serve
@ -47,33 +43,23 @@ mkdocs serve
Here is a breakdown of what this command does: Here is a breakdown of what this command does:
- `mkdocs`: This is the command-line interface (CLI) for the MkDocs static site generator. It is used to build and serve - `mkdocs`: This is the command-line interface (CLI) for the MkDocs static site generator. It is used to build and serve MkDocs sites.
MkDocs sites.
- `serve`: This is a subcommand of the `mkdocs` CLI that tells it to build and serve the documentation site locally. - `serve`: This is a subcommand of the `mkdocs` CLI that tells it to build and serve the documentation site locally.
- `-a`: This flag specifies the hostname and port number to bind the server to. The default value is `localhost:8000`. - `-a`: This flag specifies the hostname and port number to bind the server to. The default value is `localhost:8000`.
- `-t`: This flag specifies the theme to use for the documentation site. The default value is `mkdocs`. - `-t`: This flag specifies the theme to use for the documentation site. The default value is `mkdocs`.
- `-s`: This flag tells the `serve` command to serve the site in silent mode, which means it will not display any log - `-s`: This flag tells the `serve` command to serve the site in silent mode, which means it will not display any log messages or progress updates. When you run the `mkdocs serve` command, it will build the documentation site using the files in the `docs/` directory and serve it at the specified hostname and port number. You can then view the site by going to the URL in your web browser.
messages or progress updates.
When you run the `mkdocs serve` command, it will build the documentation site using the files in the `docs/` directory
and serve it at the specified hostname and port number. You can then view the site by going to the URL in your web
browser.
While the site is being served, you can make changes to the documentation files and see them reflected in the live site While the site is being served, you can make changes to the documentation files and see them reflected in the live site immediately. This is useful for testing and debugging your documentation before deploying it to a live server.
immediately. This is useful for testing and debugging your documentation before deploying it to a live server.
To stop the serve command and terminate the local server, you can use the `CTRL+C` keyboard shortcut. To stop the serve command and terminate the local server, you can use the `CTRL+C` keyboard shortcut.
### Deploying Your Documentation Site ### Deploying Your Documentation Site
To deploy your MkDocs documentation site, you will need to choose a hosting provider and a deployment method. Some To deploy your MkDocs documentation site, you will need to choose a hosting provider and a deployment method. Some popular options include GitHub Pages, GitLab Pages, and Amazon S3.
popular options include GitHub Pages, GitLab Pages, and Amazon S3.
Before you can deploy your site, you will need to configure your `mkdocs.yml` file to specify the remote host and any Before you can deploy your site, you will need to configure your `mkdocs.yml` file to specify the remote host and any other necessary deployment settings.
other necessary deployment settings.
Once you have configured your `mkdocs.yml` file, you can use the `mkdocs deploy` command to build and deploy your site. Once you have configured your `mkdocs.yml` file, you can use the `mkdocs deploy` command to build and deploy your site. This command will build the documentation site using the files in the `docs/` directory and the specified configuration file and theme, and then deploy the site to the specified remote host.
This command will build the documentation site using the files in the `docs/` directory and the specified configuration
file and theme, and then deploy the site to the specified remote host.
For example, to deploy your site to GitHub Pages using the gh-deploy plugin, you can use the following command: For example, to deploy your site to GitHub Pages using the gh-deploy plugin, you can use the following command:
@ -81,10 +67,8 @@ For example, to deploy your site to GitHub Pages using the gh-deploy plugin, you
mkdocs gh-deploy mkdocs gh-deploy
``` ```
If you are using GitHub Pages, you can set a custom domain for your documentation site by going to the "Settings" page If you are using GitHub Pages, you can set a custom domain for your documentation site by going to the "Settings" page for your repository and updating the "Custom domain" field in the "GitHub Pages" section.
for your repository and updating the "Custom domain" field in the "GitHub Pages" section.
![196814117-fc16e711-d2be-4722-9536-b7c6d78fd167](https://user-images.githubusercontent.com/26833433/210150206-9e86dcd7-10af-43e4-9eb2-9518b3799eac.png) ![196814117-fc16e711-d2be-4722-9536-b7c6d78fd167](https://user-images.githubusercontent.com/26833433/210150206-9e86dcd7-10af-43e4-9eb2-9518b3799eac.png)
For more information on deploying your MkDocs documentation site, see For more information on deploying your MkDocs documentation site, see the [MkDocs documentation](https://www.mkdocs.org/user-guide/deploying-your-docs/).
the [MkDocs documentation](https://www.mkdocs.org/user-guide/deploying-your-docs/).

View File

@ -19,7 +19,8 @@ REFERENCE_DIR = ROOT.parent / 'docs/reference'
def extract_classes_and_functions(filepath: Path): def extract_classes_and_functions(filepath: Path):
"""Extracts class and function names from a given Python file. """
Extracts class and function names from a given Python file.
Args: Args:
filepath (Path): The path to the Python file. filepath (Path): The path to the Python file.
@ -27,9 +28,7 @@ def extract_classes_and_functions(filepath: Path):
Returns: Returns:
(tuple): A tuple containing lists of class and function names. (tuple): A tuple containing lists of class and function names.
""" """
with open(filepath, 'r') as file: content = Path(filepath).read_text()
content = file.read()
class_pattern = r'(?:^|\n)class\s(\w+)(?:\(|:)' class_pattern = r'(?:^|\n)class\s(\w+)(?:\(|:)'
func_pattern = r'(?:^|\n)def\s(\w+)\(' func_pattern = r'(?:^|\n)def\s(\w+)\('
@ -40,7 +39,8 @@ def extract_classes_and_functions(filepath: Path):
def create_markdown(py_filepath: Path, module_path: str, classes: list, functions: list): def create_markdown(py_filepath: Path, module_path: str, classes: list, functions: list):
"""Creates a Markdown file containing the API reference for the given Python module. """
Creates a Markdown file containing the API reference for the given Python module.
Args: Args:
py_filepath (Path): The path to the Python file. py_filepath (Path): The path to the Python file.
@ -53,7 +53,7 @@ def create_markdown(py_filepath: Path, module_path: str, classes: list, function
# Read existing content and keep header content between first two --- # Read existing content and keep header content between first two ---
header_content = '' header_content = ''
if md_filepath.exists(): if md_filepath.exists():
with open(md_filepath, 'r') as file: with open(md_filepath) as file:
existing_content = file.read() existing_content = file.read()
header_parts = existing_content.split('---') header_parts = existing_content.split('---')
for part in header_parts: for part in header_parts:
@ -61,11 +61,13 @@ def create_markdown(py_filepath: Path, module_path: str, classes: list, function
header_content += f'---{part}---\n\n' header_content += f'---{part}---\n\n'
module_name = module_path.replace('.__init__', '') module_name = module_path.replace('.__init__', '')
module_path = module_path.replace(".", "/") module_path = module_path.replace('.', '/')
url = f'https://github.com/ultralytics/ultralytics/blob/main/{module_path}.py' url = f'https://github.com/ultralytics/ultralytics/blob/main/{module_path}.py'
title_content = (f'# Reference for `{module_path}.py`\n\n' title_content = (
f'!!! note\n\n' f'# Reference for `{module_path}.py`\n\n'
f' Full source code for this file is available at [{url}]({url}). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!\n\n') f'!!! note\n\n'
f' Full source code for this file is available at [{url}]({url}). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!\n\n'
)
md_content = [f'---\n## ::: {module_name}.{class_name}\n<br><br>\n' for class_name in classes] md_content = [f'---\n## ::: {module_name}.{class_name}\n<br><br>\n' for class_name in classes]
md_content.extend(f'---\n## ::: {module_name}.{func_name}\n<br><br>\n' for func_name in functions) md_content.extend(f'---\n## ::: {module_name}.{func_name}\n<br><br>\n' for func_name in functions)
md_content = header_content + title_content + '\n'.join(md_content) md_content = header_content + title_content + '\n'.join(md_content)
@ -80,7 +82,8 @@ def create_markdown(py_filepath: Path, module_path: str, classes: list, function
def nested_dict(): def nested_dict():
"""Creates and returns a nested defaultdict. """
Creates and returns a nested defaultdict.
Returns: Returns:
(defaultdict): A nested defaultdict object. (defaultdict): A nested defaultdict object.
@ -89,7 +92,8 @@ def nested_dict():
def sort_nested_dict(d: dict): def sort_nested_dict(d: dict):
"""Sorts a nested dictionary recursively. """
Sorts a nested dictionary recursively.
Args: Args:
d (dict): The dictionary to sort. d (dict): The dictionary to sort.
@ -97,14 +101,12 @@ def sort_nested_dict(d: dict):
Returns: Returns:
(dict): The sorted dictionary. (dict): The sorted dictionary.
""" """
return { return {key: sort_nested_dict(value) if isinstance(value, dict) else value for key, value in sorted(d.items())}
key: sort_nested_dict(value) if isinstance(value, dict) else value
for key, value in sorted(d.items())
}
def create_nav_menu_yaml(nav_items: list): def create_nav_menu_yaml(nav_items: list):
"""Creates a YAML file for the navigation menu based on the provided list of items. """
Creates a YAML file for the navigation menu based on the provided list of items.
Args: Args:
nav_items (list): A list of relative file paths to Markdown files for the navigation menu. nav_items (list): A list of relative file paths to Markdown files for the navigation menu.

View File

@ -8,11 +8,7 @@ keywords: Ultralytics, COCO8 dataset, object detection, model testing, dataset c
## Introduction ## Introduction
[Ultralytics](https://ultralytics.com) COCO8 is a small, but versatile object detection dataset composed of the first 8 [Ultralytics](https://ultralytics.com) COCO8 is a small, but versatile object detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging
object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be
easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training
larger datasets.
This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com) This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com)
and [YOLOv8](https://github.com/ultralytics/ultralytics). and [YOLOv8](https://github.com/ultralytics/ultralytics).

View File

@ -8,11 +8,7 @@ keywords: Ultralytics, YOLOv8, pose detection, COCO8-Pose dataset, dataset, mode
## Introduction ## Introduction
[Ultralytics](https://ultralytics.com) COCO8-Pose is a small, but versatile pose detection dataset composed of the first [Ultralytics](https://ultralytics.com) COCO8-Pose is a small, but versatile pose detection dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and
debugging object detection models, or for experimenting with new detection approaches. With 8 images, it is small enough
to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before
training larger datasets.
This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com) This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com)
and [YOLOv8](https://github.com/ultralytics/ultralytics). and [YOLOv8](https://github.com/ultralytics/ultralytics).

View File

@ -34,7 +34,7 @@ Format with Dim = 3
<class-index> <x> <y> <width> <height> <px1> <py1> <p1-visibility> <px2> <py2> <p2-visibility> <pxn> <pyn> <p2-visibility> <class-index> <x> <y> <width> <height> <px1> <py1> <p1-visibility> <px2> <py2> <p2-visibility> <pxn> <pyn> <p2-visibility>
``` ```
In this format, `<class-index>` is the index of the class for the object,`<x> <y> <width> <height>` are coordinates of boudning box, and `<px1> <py1> <px2> <py2> ... <pxn> <pyn>` are the pixel coordinates of the keypoints. The coordinates are separated by spaces. In this format, `<class-index>` is the index of the class for the object,`<x> <y> <width> <height>` are coordinates of bounding box, and `<px1> <py1> <px2> <py2> ... <pxn> <pyn>` are the pixel coordinates of the keypoints. The coordinates are separated by spaces.
### Dataset YAML format ### Dataset YAML format

View File

@ -8,11 +8,7 @@ keywords: COCO8-Seg dataset, Ultralytics, YOLOv8, instance segmentation, dataset
## Introduction ## Introduction
[Ultralytics](https://ultralytics.com) COCO8-Seg is a small, but versatile instance segmentation dataset composed of the [Ultralytics](https://ultralytics.com) COCO8-Seg is a small, but versatile instance segmentation dataset composed of the first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and debugging segmentation models, or for experimenting with new detection approaches. With 8 images, it is small enough to be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training larger datasets.
first 8 images of the COCO train 2017 set, 4 for training and 4 for validation. This dataset is ideal for testing and
debugging segmentation models, or for experimenting with new detection approaches. With 8 images, it is small enough to
be easily manageable, yet diverse enough to test training pipelines for errors and act as a sanity check before training
larger datasets.
This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com) This dataset is intended for use with Ultralytics [HUB](https://hub.ultralytics.com)
and [YOLOv8](https://github.com/ultralytics/ultralytics). and [YOLOv8](https://github.com/ultralytics/ultralytics).

View File

@ -8,8 +8,7 @@ keywords: Ultralytics, YOLO, multi-object tracking, datasets, detection, segment
## Dataset Format (Coming Soon) ## Dataset Format (Coming Soon)
Multi-Object Detector doesn't need standalone training and directly supports pre-trained detection, segmentation or Pose models. Multi-Object Detector doesn't need standalone training and directly supports pre-trained detection, segmentation or Pose models. Support for training trackers alone is coming soon
Support for training trackers alone is coming soon
## Usage ## Usage

View File

@ -50,8 +50,8 @@ By integrating with Codecov, we aim to maintain and improve the quality of our c
To quickly get a glimpse of the code coverage status of the `ultralytics` python package, we have included a badge and and sunburst visual of the `ultralytics` coverage results. These images show the percentage of code covered by our tests, offering an at-a-glance metric of our testing efforts. For full details please see https://codecov.io/github/ultralytics/ultralytics. To quickly get a glimpse of the code coverage status of the `ultralytics` python package, we have included a badge and and sunburst visual of the `ultralytics` coverage results. These images show the percentage of code covered by our tests, offering an at-a-glance metric of our testing efforts. For full details please see https://codecov.io/github/ultralytics/ultralytics.
| Repository | Code Coverage | | Repository | Code Coverage |
|-----------------------------------------------------------|----------------------------------------------------------------------| |-----------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------|
| [ultralytics](https://github.com/ultralytics/ultralytics) | [![codecov](https://codecov.io/gh/ultralytics/ultralytics/branch/main/graph/badge.svg?token=HHW7IIVFVY)](https://codecov.io/gh/ultralytics/ultralytics) | | [ultralytics](https://github.com/ultralytics/ultralytics) | [![codecov](https://codecov.io/gh/ultralytics/ultralytics/branch/main/graph/badge.svg?token=HHW7IIVFVY)](https://codecov.io/gh/ultralytics/ultralytics) |
In the sunburst graphic below, the inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively. In the sunburst graphic below, the inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
@ -59,4 +59,3 @@ In the sunburst graphic below, the inner-most circle is the entire project, movi
<a href="https://codecov.io/github/ultralytics/ultralytics"> <a href="https://codecov.io/github/ultralytics/ultralytics">
<img src="https://codecov.io/gh/ultralytics/ultralytics/branch/main/graphs/sunburst.svg?token=HHW7IIVFVY" alt="Ultralytics Codecov Image"> <img src="https://codecov.io/gh/ultralytics/ultralytics/branch/main/graphs/sunburst.svg?token=HHW7IIVFVY" alt="Ultralytics Codecov Image">
</a> </a>

View File

@ -5,66 +5,27 @@ keywords: Ultralytics, Contributor License Agreement, Open Source Software, Cont
# Ultralytics Individual Contributor License Agreement # Ultralytics Individual Contributor License Agreement
Thank you for your interest in contributing to open source software projects (“Projects”) made available by Ultralytics Thank you for your interest in contributing to open source software projects (“Projects”) made available by Ultralytics SE or its affiliates (“Ultralytics”). This Individual Contributor License Agreement (“Agreement”) sets out the terms governing any source code, object code, bug fixes, configuration changes, tools, specifications, documentation, data, materials, feedback, information or other works of authorship that you submit or have submitted, in any form and in any manner, to Ultralytics in respect of any of the Projects (collectively “Contributions”). If you have any questions respecting this Agreement, please contact hello@ultralytics.com.
SE or its affiliates (“Ultralytics”). This Individual Contributor License Agreement (“Agreement”) sets out the terms
governing any source code, object code, bug fixes, configuration changes, tools, specifications, documentation, data,
materials, feedback, information or other works of authorship that you submit or have submitted, in any form and in any
manner, to Ultralytics in respect of any of the Projects (collectively “Contributions”). If you have any questions
respecting this Agreement, please contact hello@ultralytics.com.
You agree that the following terms apply to all of your past, present and future Contributions. Except for the licenses You agree that the following terms apply to all of your past, present and future Contributions. Except for the licenses granted in this Agreement, you retain all of your right, title and interest in and to your Contributions.
granted in this Agreement, you retain all of your right, title and interest in and to your Contributions.
**Copyright License.** You hereby grant, and agree to grant, to Ultralytics a non-exclusive, perpetual, irrevocable, **Copyright License.** You hereby grant, and agree to grant, to Ultralytics a non-exclusive, perpetual, irrevocable, worldwide, fully-paid, royalty-free, transferable copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, and distribute your Contributions and such derivative works, with the right to sublicense the foregoing rights through multiple tiers of sublicensees.
worldwide, fully-paid, royalty-free, transferable copyright license to reproduce, prepare derivative works of, publicly
display, publicly perform, and distribute your Contributions and such derivative works, with the right to sublicense the
foregoing rights through multiple tiers of sublicensees.
**Patent License.** You hereby grant, and agree to grant, to Ultralytics a non-exclusive, perpetual, irrevocable, **Patent License.** You hereby grant, and agree to grant, to Ultralytics a non-exclusive, perpetual, irrevocable, worldwide, fully-paid, royalty-free, transferable patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer your Contributions, where such license applies only to those patent claims licensable by you that are necessarily infringed by your Contributions alone or by combination of your Contributions with the Project to which such Contributions were submitted, with the right to sublicense the foregoing rights through multiple tiers of sublicensees.
worldwide, fully-paid, royalty-free, transferable patent license to make, have made, use, offer to sell, sell,
import, and otherwise transfer your Contributions, where such license applies only to those patent claims
licensable by you that are necessarily infringed by your Contributions alone or by combination of your
Contributions with the Project to which such Contributions were submitted, with the right to sublicense the
foregoing rights through multiple tiers of sublicensees.
**Moral Rights.** To the fullest extent permitted under applicable law, you hereby waive, and agree not to **Moral Rights.** To the fullest extent permitted under applicable law, you hereby waive, and agree not to assert, all of your “moral rights” in or relating to your Contributions for the benefit of Ultralytics, its assigns, and their respective direct and indirect sublicensees.
assert, all of your “moral rights” in or relating to your Contributions for the benefit of Ultralytics, its assigns, and
their respective direct and indirect sublicensees.
**Third Party Content/Rights.** If your Contribution includes or is based on any source code, object code, bug **Third Party Content/Rights.
fixes, configuration changes, tools, specifications, documentation, data, materials, feedback, information or ** If your Contribution includes or is based on any source code, object code, bug fixes, configuration changes, tools, specifications, documentation, data, materials, feedback, information or other works of authorship that were not authored by you (“Third Party Content”) or if you are aware of any third party intellectual property or proprietary rights associated with your Contribution (“Third Party Rights”), then you agree to include with the submission of your Contribution full details respecting such Third Party Content and Third Party Rights, including, without limitation, identification of which aspects of your Contribution contain Third Party Content or are associated with Third Party Rights, the owner/author of the Third Party Content and Third Party Rights, where you obtained the Third Party Content, and any applicable third party license terms or restrictions respecting the Third Party Content and Third Party Rights. For greater certainty, the foregoing obligations respecting the identification of Third Party Content and Third Party Rights do not apply to any portion of a Project that is incorporated into your Contribution to that same Project.
other works of authorship that were not authored by you (“Third Party Content”) or if you are aware of any
third party intellectual property or proprietary rights associated with your Contribution (“Third Party Rights”),
then you agree to include with the submission of your Contribution full details respecting such Third Party
Content and Third Party Rights, including, without limitation, identification of which aspects of your
Contribution contain Third Party Content or are associated with Third Party Rights, the owner/author of the
Third Party Content and Third Party Rights, where you obtained the Third Party Content, and any applicable
third party license terms or restrictions respecting the Third Party Content and Third Party Rights. For greater
certainty, the foregoing obligations respecting the identification of Third Party Content and Third Party Rights
do not apply to any portion of a Project that is incorporated into your Contribution to that same Project.
**Representations.** You represent that, other than the Third Party Content and Third Party Rights identified by **Representations.** You represent that, other than the Third Party Content and Third Party Rights identified by you in accordance with this Agreement, you are the sole author of your Contributions and are legally entitled to grant the foregoing licenses and waivers in respect of your Contributions. If your Contributions were created in the course of your employment with your past or present employer(s), you represent that such employer(s) has authorized you to make your Contributions on behalf of such employer(s) or such employer
you in accordance with this Agreement, you are the sole author of your Contributions and are legally entitled
to grant the foregoing licenses and waivers in respect of your Contributions. If your Contributions were
created in the course of your employment with your past or present employer(s), you represent that such
employer(s) has authorized you to make your Contributions on behalf of such employer(s) or such employer
(s) has waived all of their right, title or interest in or to your Contributions. (s) has waived all of their right, title or interest in or to your Contributions.
**Disclaimer.** To the fullest extent permitted under applicable law, your Contributions are provided on an "asis" **Disclaimer.** To the fullest extent permitted under applicable law, your Contributions are provided on an "asis"
basis, without any warranties or conditions, express or implied, including, without limitation, any implied basis, without any warranties or conditions, express or implied, including, without limitation, any implied warranties or conditions of non-infringement, merchantability or fitness for a particular purpose. You are not required to provide support for your Contributions, except to the extent you desire to provide support.
warranties or conditions of non-infringement, merchantability or fitness for a particular purpose. You are not
required to provide support for your Contributions, except to the extent you desire to provide support.
**No Obligation.** You acknowledge that Ultralytics is under no obligation to use or incorporate your Contributions **No Obligation.** You acknowledge that Ultralytics is under no obligation to use or incorporate your Contributions into any of the Projects. The decision to use or incorporate your Contributions into any of the Projects will be made at the sole discretion of Ultralytics or its authorized delegates ..
into any of the Projects. The decision to use or incorporate your Contributions into any of the Projects will be
made at the sole discretion of Ultralytics or its authorized delegates ..
**Disputes.** This Agreement shall be governed by and construed in accordance with the laws of the State of **Disputes.** This Agreement shall be governed by and construed in accordance with the laws of the State of New York, United States of America, without giving effect to its principles or rules regarding conflicts of laws, other than such principles directing application of New York law. The parties hereby submit to venue in, and jurisdiction of the courts located in New York, New York for purposes relating to this Agreement. In the event that any of the provisions of this Agreement shall be held by a court or other tribunal of competent jurisdiction to be unenforceable, the remaining portions hereof shall remain in full force and effect.
New York, United States of America, without giving effect to its principles or rules regarding conflicts of laws,
other than such principles directing application of New York law. The parties hereby submit to venue in, and
jurisdiction of the courts located in New York, New York for purposes relating to this Agreement. In the event
that any of the provisions of this Agreement shall be held by a court or other tribunal of competent jurisdiction
to be unenforceable, the remaining portions hereof shall remain in full force and effect.
**Assignment.** You agree that Ultralytics may assign this Agreement, and all of its rights, obligations and licenses **Assignment.** You agree that Ultralytics may assign this Agreement, and all of its rights, obligations and licenses hereunder.
hereunder.

View File

@ -8,124 +8,78 @@ keywords: Ultralytics, code of conduct, community, contribution, behavior guidel
## Our Pledge ## Our Pledge
We as members, contributors, and leaders pledge to make participation in our We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming, We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community.
diverse, inclusive, and healthy community.
## Our Standards ## Our Standards
Examples of behavior that contributes to a positive environment for our Examples of behavior that contributes to a positive environment for our community include:
community include:
- Demonstrating empathy and kindness toward other people - Demonstrating empathy and kindness toward other people
- Being respectful of differing opinions, viewpoints, and experiences - Being respectful of differing opinions, viewpoints, and experiences
- Giving and gracefully accepting constructive feedback - Giving and gracefully accepting constructive feedback
- Accepting responsibility and apologizing to those affected by our mistakes, - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience
and learning from the experience - Focusing on what is best not just for us as individuals, but for the overall community
- Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include: Examples of unacceptable behavior include:
- The use of sexualized language or imagery, and sexual attention or - The use of sexualized language or imagery, and sexual attention or advances of any kind
advances of any kind
- Trolling, insulting or derogatory comments, and personal or political attacks - Trolling, insulting or derogatory comments, and personal or political attacks
- Public or private harassment - Public or private harassment
- Publishing others' private information, such as a physical or email - Publishing others' private information, such as a physical or email address, without their explicit permission
address, without their explicit permission - Other conduct which could reasonably be considered inappropriate in a professional setting
- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities ## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful.
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate.
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope ## Scope
This Code of Conduct applies within all community spaces, and also applies when This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event.
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement ## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at hello@ultralytics.com. All complaints will be reviewed and investigated promptly and fairly.
reported to the community leaders responsible for enforcement at
hello@ultralytics.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the All community leaders are obligated to respect the privacy and security of the reporter of any incident.
reporter of any incident.
## Enforcement Guidelines ## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct:
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction ### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community.
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested.
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning ### 2. Warning
**Community Impact**: A violation through a single incident or series **Community Impact**: A violation through a single incident or series of actions.
of actions.
**Consequence**: A warning with consequences for continued behavior. No **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban.
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban ### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior.
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban.
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban ### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals.
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within **Consequence**: A permanent ban from any sort of public interaction within the community.
the community.
## Attribution ## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.0, available at
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
enforcement ladder](https://github.com/mozilla/diversity).
For answers to common questions about this code of conduct, see the FAQ at For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/faq. Translations are available at

View File

@ -152,7 +152,6 @@ This comparison shows the order-of-magnitude differences in the model sizes and
Tests run on a 2023 Apple M2 Macbook with 16GB of RAM. To reproduce this test: Tests run on a 2023 Apple M2 Macbook with 16GB of RAM. To reproduce this test:
!!! example "" !!! example ""
=== "Python" === "Python"

View File

@ -12,8 +12,7 @@ keywords: Meituan YOLOv6, object detection, Ultralytics, YOLOv6 docs, Bi-directi
![Meituan YOLOv6](https://user-images.githubusercontent.com/26833433/240750495-4da954ce-8b3b-41c4-8afd-ddb74361d3c2.png) ![Meituan YOLOv6](https://user-images.githubusercontent.com/26833433/240750495-4da954ce-8b3b-41c4-8afd-ddb74361d3c2.png)
![Model example image](https://user-images.githubusercontent.com/26833433/240750557-3e9ec4f0-0598-49a8-83ea-f33c91eb6d68.png) ![Model example image](https://user-images.githubusercontent.com/26833433/240750557-3e9ec4f0-0598-49a8-83ea-f33c91eb6d68.png)
**Overview of YOLOv6.** Model architecture diagram showing the redesigned network components and training strategies that have led to significant performance improvements. (a) The neck of YOLOv6 (N and S are shown). Note for M/L, RepBlocks is replaced with CSPStackRep. (b) The **Overview of YOLOv6.** Model architecture diagram showing the redesigned network components and training strategies that have led to significant performance improvements. (a) The neck of YOLOv6 (N and S are shown). Note for M/L, RepBlocks is replaced with CSPStackRep. (b) The structure of a BiC module. (c) A SimCSPSPPF block. ([source](https://arxiv.org/pdf/2301.05586.pdf)).
structure of a BiC module. (c) A SimCSPSPPF block. ([source](https://arxiv.org/pdf/2301.05586.pdf)).
### Key Features ### Key Features

View File

@ -20,7 +20,7 @@ Ultralytics provides various installation methods including pip, conda, and Dock
pip install ultralytics pip install ultralytics
``` ```
You can also install the `ultralytics` package directly from the GitHub [repository](https://github.com/ultralytics/ultralytics). This might be useful if you want the latest development version. Make sure to have the Git command-line tool installed on your system. The `@main` command installs the `main` branch and may be modified to another branch, i.e. `@my-branch`, or removed alltogether to default to `main` branch. You can also install the `ultralytics` package directly from the GitHub [repository](https://github.com/ultralytics/ultralytics). This might be useful if you want the latest development version. Make sure to have the Git command-line tool installed on your system. The `@main` command installs the `main` branch and may be modified to another branch, i.e. `@my-branch`, or removed entirely to default to `main` branch.
```bash ```bash
# Install the ultralytics package from GitHub # Install the ultralytics package from GitHub

View File

@ -37,4 +37,3 @@ div.highlight {
max-height: 20rem; max-height: 20rem;
overflow-y: auto; /* for adding a scrollbar when needed */ overflow-y: auto; /* for adding a scrollbar when needed */
} }

View File

@ -6,9 +6,7 @@ keywords: Ultralytics, YOLO, callbacks guide, training callback, validation call
## Callbacks ## Callbacks
Ultralytics framework supports callbacks as entry points in strategic stages of train, val, export, and predict modes. Ultralytics framework supports callbacks as entry points in strategic stages of train, val, export, and predict modes. Each callback accepts a `Trainer`, `Validator`, or `Predictor` object depending on the operation type. All properties of these objects can be found in Reference section of the docs.
Each callback accepts a `Trainer`, `Validator`, or `Predictor` object depending on the operation type. All properties of
these objects can be found in Reference section of the docs.
## Examples ## Examples

View File

@ -6,8 +6,7 @@ keywords: Ultralytics, YOLO, CLI, train, validation, prediction, command line in
# Command Line Interface Usage # Command Line Interface Usage
The YOLO command line interface (CLI) allows for simple single-line commands without the need for a Python environment. The YOLO command line interface (CLI) allows for simple single-line commands without the need for a Python environment. CLI requires no customization or Python code. You can simply run all tasks from the terminal with the `yolo` command.
CLI requires no customization or Python code. You can simply run all tasks from the terminal with the `yolo` command.
!!! example !!! example
@ -65,11 +64,9 @@ CLI requires no customization or Python code. You can simply run all tasks from
Where: Where:
- `TASK` (optional) is one of `[detect, segment, classify]`. If it is not passed explicitly YOLOv8 will try to guess - `TASK` (optional) is one of `[detect, segment, classify]`. If it is not passed explicitly YOLOv8 will try to guess the `TASK` from the model type.
the `TASK` from the model type.
- `MODE` (required) is one of `[train, val, predict, export, track]` - `MODE` (required) is one of `[train, val, predict, export, track]`
- `ARGS` (optional) are any number of custom `arg=value` pairs like `imgsz=320` that override defaults. - `ARGS` (optional) are any number of custom `arg=value` pairs like `imgsz=320` that override defaults. For a full list of available `ARGS` see the [Configuration](cfg.md) page and `defaults.yaml`
For a full list of available `ARGS` see the [Configuration](cfg.md) page and `defaults.yaml`
GitHub [source](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/default.yaml). GitHub [source](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/default.yaml).
!!! warning "Warning" !!! warning "Warning"
@ -82,8 +79,7 @@ Where:
## Train ## Train
Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. For a full list of available arguments see Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. For a full list of available arguments see the [Configuration](cfg.md) page.
the [Configuration](cfg.md) page.
!!! example "Example" !!! example "Example"
@ -103,8 +99,7 @@ the [Configuration](cfg.md) page.
## Val ## Val
Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the `model` retains it's Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the `model` retains it's training `data` and arguments as model attributes.
training `data` and arguments as model attributes.
!!! example "Example" !!! example "Example"
@ -162,8 +157,7 @@ Export a YOLOv8n model to a different format like ONNX, CoreML, etc.
yolo export model=path/to/best.pt format=onnx yolo export model=path/to/best.pt format=onnx
``` ```
Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, i.e. `format='onnx'` or `format='engine'`.
i.e. `format='onnx'` or `format='engine'`.
| Format | `format` Argument | Model | Metadata | Arguments | | Format | `format` Argument | Model | Metadata | Arguments |
|--------------------------------------------------------------------|-------------------|---------------------------|----------|-----------------------------------------------------| |--------------------------------------------------------------------|-------------------|---------------------------|----------|-----------------------------------------------------|
@ -207,13 +201,11 @@ Default arguments can be overridden by simply passing them as arguments in the C
## Overriding default config file ## Overriding default config file
You can override the `default.yaml` config file entirely by passing a new file with the `cfg` arguments, You can override the `default.yaml` config file entirely by passing a new file with the `cfg` arguments, i.e. `cfg=custom.yaml`.
i.e. `cfg=custom.yaml`.
To do this first create a copy of `default.yaml` in your current working dir with the `yolo copy-cfg` command. To do this first create a copy of `default.yaml` in your current working dir with the `yolo copy-cfg` command.
This will create `default_copy.yaml`, which you can then pass as `cfg=default_copy.yaml` along with any additional args, This will create `default_copy.yaml`, which you can then pass as `cfg=default_copy.yaml` along with any additional args, like `imgsz=320` in this example:
like `imgsz=320` in this example:
!!! example "" !!! example ""

View File

@ -4,18 +4,14 @@ description: Discover how to customize and extend base Ultralytics YOLO Trainer
keywords: Ultralytics, YOLO, trainer engines, BaseTrainer, DetectionTrainer, customizing trainers, extending trainers, custom model, custom dataloader keywords: Ultralytics, YOLO, trainer engines, BaseTrainer, DetectionTrainer, customizing trainers, extending trainers, custom model, custom dataloader
--- ---
Both the Ultralytics YOLO command-line and python interfaces are simply a high-level abstraction on the base engine Both the Ultralytics YOLO command-line and python interfaces are simply a high-level abstraction on the base engine executors. Let's take a look at the Trainer engine.
executors. Let's take a look at the Trainer engine.
## BaseTrainer ## BaseTrainer
BaseTrainer contains the generic boilerplate training routine. It can be customized for any task based over overriding BaseTrainer contains the generic boilerplate training routine. It can be customized for any task based over overriding the required functions or operations as long the as correct formats are followed. For example, you can support your own custom model and dataloader by just overriding these functions:
the required functions or operations as long the as correct formats are followed. For example, you can support your own
custom model and dataloader by just overriding these functions:
* `get_model(cfg, weights)` - The function that builds the model to be trained * `get_model(cfg, weights)` - The function that builds the model to be trained
* `get_dataloader()` - The function that builds the dataloader * `get_dataloader()` - The function that builds the dataloader More details and source code can be found in [`BaseTrainer` Reference](../reference/engine/trainer.md)
More details and source code can be found in [`BaseTrainer` Reference](../reference/engine/trainer.md)
## DetectionTrainer ## DetectionTrainer
@ -31,8 +27,7 @@ trained_model = trainer.best # get best model
### Customizing the DetectionTrainer ### Customizing the DetectionTrainer
Let's customize the trainer **to train a custom detection model** that is not supported directly. You can do this by Let's customize the trainer **to train a custom detection model** that is not supported directly. You can do this by simply overloading the existing the `get_model` functionality:
simply overloading the existing the `get_model` functionality:
```python ```python
from ultralytics.models.yolo.detect import DetectionTrainer from ultralytics.models.yolo.detect import DetectionTrainer

View File

@ -6,14 +6,9 @@ keywords: YOLOv8, Ultralytics, Python, object detection, segmentation, classific
# Python Usage # Python Usage
Welcome to the YOLOv8 Python Usage documentation! This guide is designed to help you seamlessly integrate YOLOv8 into Welcome to the YOLOv8 Python Usage documentation! This guide is designed to help you seamlessly integrate YOLOv8 into your Python projects for object detection, segmentation, and classification. Here, you'll learn how to load and use pretrained models, train new models, and perform predictions on images. The easy-to-use Python interface is a valuable resource for anyone looking to incorporate YOLOv8 into their Python projects, allowing you to quickly implement advanced object detection capabilities. Let's get started!
your Python projects for object detection, segmentation, and classification. Here, you'll learn how to load and use
pretrained models, train new models, and perform predictions on images. The easy-to-use Python interface is a valuable
resource for anyone looking to incorporate YOLOv8 into their Python projects, allowing you to quickly implement advanced
object detection capabilities. Let's get started!
For example, users can load a model, train it, evaluate its performance on a validation set, and even export it to ONNX For example, users can load a model, train it, evaluate its performance on a validation set, and even export it to ONNX format with just a few lines of code.
format with just a few lines of code.
!!! example "Python" !!! example "Python"
@ -41,9 +36,7 @@ format with just a few lines of code.
## [Train](../modes/train.md) ## [Train](../modes/train.md)
Train mode is used for training a YOLOv8 model on a custom dataset. In this mode, the model is trained using the Train mode is used for training a YOLOv8 model on a custom dataset. In this mode, the model is trained using the specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can accurately predict the classes and locations of objects in an image.
specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can
accurately predict the classes and locations of objects in an image.
!!! example "Train" !!! example "Train"
@ -73,9 +66,7 @@ accurately predict the classes and locations of objects in an image.
## [Val](../modes/val.md) ## [Val](../modes/val.md)
Val mode is used for validating a YOLOv8 model after it has been trained. In this mode, the model is evaluated on a Val mode is used for validating a YOLOv8 model after it has been trained. In this mode, the model is evaluated on a validation set to measure its accuracy and generalization performance. This mode can be used to tune the hyperparameters of the model to improve its performance.
validation set to measure its accuracy and generalization performance. This mode can be used to tune the hyperparameters
of the model to improve its performance.
!!! example "Val" !!! example "Val"
@ -103,9 +94,7 @@ of the model to improve its performance.
## [Predict](../modes/predict.md) ## [Predict](../modes/predict.md)
Predict mode is used for making predictions using a trained YOLOv8 model on new images or videos. In this mode, the Predict mode is used for making predictions using a trained YOLOv8 model on new images or videos. In this mode, the model is loaded from a checkpoint file, and the user can provide images or videos to perform inference. The model predicts the classes and locations of objects in the input images or videos.
model is loaded from a checkpoint file, and the user can provide images or videos to perform inference. The model
predicts the classes and locations of objects in the input images or videos.
!!! example "Predict" !!! example "Predict"
@ -173,9 +162,7 @@ predicts the classes and locations of objects in the input images or videos.
## [Export](../modes/export.md) ## [Export](../modes/export.md)
Export mode is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the model is Export mode is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the model is converted to a format that can be used by other software applications or hardware devices. This mode is useful when deploying the model to production environments.
converted to a format that can be used by other software applications or hardware devices. This mode is useful when
deploying the model to production environments.
!!! example "Export" !!! example "Export"
@ -203,9 +190,7 @@ deploying the model to production environments.
## [Track](../modes/track.md) ## [Track](../modes/track.md)
Track mode is used for tracking objects in real-time using a YOLOv8 model. In this mode, the model is loaded from a Track mode is used for tracking objects in real-time using a YOLOv8 model. In this mode, the model is loaded from a checkpoint file, and the user can provide a live video stream to perform real-time object tracking. This mode is useful for applications such as surveillance systems or self-driving cars.
checkpoint file, and the user can provide a live video stream to perform real-time object tracking. This mode is useful
for applications such as surveillance systems or self-driving cars.
!!! example "Track" !!! example "Track"
@ -228,11 +213,8 @@ for applications such as surveillance systems or self-driving cars.
## [Benchmark](../modes/benchmark.md) ## [Benchmark](../modes/benchmark.md)
Benchmark mode is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks provide Benchmark mode is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks provide information on the size of the exported format, its `mAP50-95` metrics (for object detection and segmentation)
information on the size of the exported format, its `mAP50-95` metrics (for object detection and segmentation) or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for their specific use case based on their requirements for speed and accuracy.
or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export
formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for
their specific use case based on their requirements for speed and accuracy.
!!! example "Benchmark" !!! example "Benchmark"
@ -250,8 +232,7 @@ their specific use case based on their requirements for speed and accuracy.
## Using Trainers ## Using Trainers
`YOLO` model class is a high-level wrapper on the Trainer classes. Each YOLO task has its own trainer that inherits `YOLO` model class is a high-level wrapper on the Trainer classes. Each YOLO task has its own trainer that inherits from `BaseTrainer`.
from `BaseTrainer`.
!!! tip "Detection Trainer Example" !!! tip "Detection Trainer Example"
@ -276,8 +257,6 @@ from `BaseTrainer`.
trainer = detect.DetectionTrainer(overrides=overrides) trainer = detect.DetectionTrainer(overrides=overrides)
``` ```
You can easily customize Trainers to support custom tasks or explore R&D ideas. You can easily customize Trainers to support custom tasks or explore R&D ideas. Learn more about Customizing `Trainers`, `Validators` and `Predictors` to suit your project needs in the Customization Section.
Learn more about Customizing `Trainers`, `Validators` and `Predictors` to suit your project needs in the Customization
Section.
[Customization tutorials](engine.md){ .md-button .md-button--primary} [Customization tutorials](engine.md){ .md-button .md-button--primary}

View File

@ -53,8 +53,7 @@ Here's a compilation of comprehensive tutorials that will guide you through diff
YOLOv5 is designed to be run in the following up-to-date, verified environments, with all dependencies (including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/), and [PyTorch](https://pytorch.org/)) pre-installed: YOLOv5 is designed to be run in the following up-to-date, verified environments, with all dependencies (including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/), and [PyTorch](https://pytorch.org/)) pre-installed:
- **Notebooks** with free - **Notebooks** with free GPU: <a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a> <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
GPU: <a href="https://bit.ly/yolov5-paperspace-notebook"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run on Gradient"></a> <a href="https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a> <a href="https://www.kaggle.com/ultralytics/yolov5"><img src="https://kaggle.com/static/images/open-in-kaggle.svg" alt="Open In Kaggle"></a>
- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](environments/google_cloud_quickstart_tutorial.md) - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](environments/google_cloud_quickstart_tutorial.md)
- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](environments/aws_quickstart_tutorial.md) - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](environments/aws_quickstart_tutorial.md)
- **Azure** Azure Machine Learning. See [AzureML Quickstart Guide](environments/azureml_quickstart_tutorial.md) - **Azure** Azure Machine Learning. See [AzureML Quickstart Guide](environments/azureml_quickstart_tutorial.md)

View File

@ -22,8 +22,7 @@ pip install -r requirements.txt # install
## Inference ## Inference
YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [PyTorch Hub](https://docs.ultralytics.com/yolov5/tutorials/pytorch_hub_model_loading) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
YOLOv5 [release](https://github.com/ultralytics/yolov5/releases).
```python ```python
import torch import torch
@ -43,8 +42,7 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc.
## Inference with detect.py ## Inference with detect.py
`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from `detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`.
```bash ```bash
python detect.py --weights yolov5s.pt --source 0 # webcam python detect.py --weights yolov5s.pt --source 0 # webcam
@ -63,11 +61,7 @@ python detect.py --weights yolov5s.pt --source 0 #
The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh)
results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) results. [Models](https://github.com/ultralytics/yolov5/tree/master/models)
and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are 1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the largest `--batch-size` possible, or pass `--batch-size -1` for YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are
1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://docs.ultralytics.com/yolov5/tutorials/multi_gpu_training) times faster). Use the
largest `--batch-size` possible, or pass `--batch-size -1` for
YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB.
```bash ```bash
python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128

View File

@ -176,8 +176,7 @@ The revised formulas for calculating the predicted bounding box are as follows:
![bw](https://latex.codecogs.com/svg.image?b_w=p_w\cdot(2\cdot\sigma(t_w))^2) ![bw](https://latex.codecogs.com/svg.image?b_w=p_w\cdot(2\cdot\sigma(t_w))^2)
![bh](https://latex.codecogs.com/svg.image?b_h=p_h\cdot(2\cdot\sigma(t_h))^2) ![bh](https://latex.codecogs.com/svg.image?b_h=p_h\cdot(2\cdot\sigma(t_h))^2)
Compare the center point offset before and after scaling. The center point offset range is adjusted from (0, 1) to (-0.5, 1.5). Compare the center point offset before and after scaling. The center point offset range is adjusted from (0, 1) to (-0.5, 1.5). Therefore, offset can easily get 0 or 1.
Therefore, offset can easily get 0 or 1.
<img src="https://user-images.githubusercontent.com/31005897/158508052-c24bc5e8-05c1-4154-ac97-2e1ec71f582e.png#pic_center" width=40%> <img src="https://user-images.githubusercontent.com/31005897/158508052-c24bc5e8-05c1-4154-ac97-2e1ec71f582e.png#pic_center" width=40%>

View File

@ -64,8 +64,7 @@ pip install clearml>=1.2.0
This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager.
If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. PLEASE NOTE: ClearML uses `/` as a delimiter for subprojects, so be careful when using `/` in your project name!
PLEASE NOTE: ClearML uses `/` as a delimiter for subprojects, so be careful when using `/` in your project name!
```bash ```bash
python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache
@ -92,8 +91,7 @@ This will capture:
- Validation images per epoch - Validation images per epoch
- ... - ...
That's a lot right? 🤯 That's a lot right? 🤯 Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them!
Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them!
There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works! There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works!
@ -187,8 +185,7 @@ python utils/loggers/clearml/hpo.py
## 🤯 Remote Execution (advanced) ## 🤯 Remote Execution (advanced)
Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs. Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs. This is where the ClearML Agent comes into play. Check out what the agent can do here:
This is where the ClearML Agent comes into play. Check out what the agent can do here:
- [YouTube video](https://youtu.be/MX3BrXnaULs) - [YouTube video](https://youtu.be/MX3BrXnaULs)
- [Documentation](https://clear.ml/docs/latest/docs/clearml_agent) - [Documentation](https://clear.ml/docs/latest/docs/clearml_agent)

View File

@ -90,8 +90,7 @@ By default, Comet will log the following items
# Configure Comet Logging # Configure Comet Logging
Comet can be configured to log additional data either through command line flags passed to the training script Comet can be configured to log additional data either through command line flags passed to the training script or through environment variables.
or through environment variables.
```shell ```shell
export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online
@ -106,8 +105,7 @@ export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model p
## Logging Checkpoints with Comet ## Logging Checkpoints with Comet
Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the logged checkpoints to Comet based on the interval value provided by `save-period`
logged checkpoints to Comet based on the interval value provided by `save-period`
```shell ```shell
python train.py \ python train.py \
@ -240,8 +238,7 @@ python utils/loggers/comet/hpo.py \
--comet_optimizer_config "utils/loggers/comet/optimizer_config.json" --comet_optimizer_config "utils/loggers/comet/optimizer_config.json"
``` ```
The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after the script.
the script.
```shell ```shell
python utils/loggers/comet/hpo.py \ python utils/loggers/comet/hpo.py \

View File

@ -4,8 +4,7 @@ description: Learn how to ensemble YOLOv5 models for improved mAP and Recall! Cl
keywords: YOLOv5, object detection, ensemble learning, mAP, Recall keywords: YOLOv5, object detection, ensemble learning, mAP, Recall
--- ---
📚 This guide explains how to use YOLOv5 🚀 **model ensembling** during testing and inference for improved mAP and Recall. 📚 This guide explains how to use YOLOv5 🚀 **model ensembling** during testing and inference for improved mAP and Recall. UPDATED 25 September 2022.
UPDATED 25 September 2022.
From [https://en.wikipedia.org/wiki/Ensemble_learning](https://en.wikipedia.org/wiki/Ensemble_learning): From [https://en.wikipedia.org/wiki/Ensemble_learning](https://en.wikipedia.org/wiki/Ensemble_learning):
> Ensemble modeling is a process where multiple diverse models are created to predict an outcome, either by using many different modeling algorithms or using different training data sets. The ensemble model then aggregates the prediction of each base model and results in once final prediction for the unseen data. The motivation for using ensemble models is to reduce the generalization error of the prediction. As long as the base models are diverse and independent, the prediction error of the model decreases when the ensemble approach is used. The approach seeks the wisdom of crowds in making a prediction. Even though the ensemble model has multiple base models within the model, it acts and performs as a single model. > Ensemble modeling is a process where multiple diverse models are created to predict an outcome, either by using many different modeling algorithms or using different training data sets. The ensemble model then aggregates the prediction of each base model and results in once final prediction for the unseen data. The motivation for using ensemble models is to reduce the generalization error of the prediction. As long as the base models are diverse and independent, the prediction error of the model decreases when the ensemble approach is used. The approach seeks the wisdom of crowds in making a prediction. Even though the ensemble model has multiple base models within the model, it acts and performs as a single model.

View File

@ -6,8 +6,7 @@ keywords: Ultralytics, YOLOv5, model export, PyTorch, TorchScript, ONNX, OpenVIN
# TFLite, ONNX, CoreML, TensorRT Export # TFLite, ONNX, CoreML, TensorRT Export
📚 This guide explains how to export a trained YOLOv5 🚀 model from PyTorch to ONNX and TorchScript formats. 📚 This guide explains how to export a trained YOLOv5 🚀 model from PyTorch to ONNX and TorchScript formats. UPDATED 8 December 2022.
UPDATED 8 December 2022.
## Before You Start ## Before You Start
@ -25,8 +24,7 @@ For [TensorRT](https://developer.nvidia.com/tensorrt) export example (requires G
YOLOv5 inference is officially supported in 11 formats: YOLOv5 inference is officially supported in 11 formats:
💡 ProTip: Export to ONNX or OpenVINO for up to 3x CPU speedup. See [CPU Benchmarks](https://github.com/ultralytics/yolov5/pull/6613). 💡 ProTip: Export to ONNX or OpenVINO for up to 3x CPU speedup. See [CPU Benchmarks](https://github.com/ultralytics/yolov5/pull/6613). 💡 ProTip: Export to TensorRT for up to 5x GPU speedup. See [GPU Benchmarks](https://github.com/ultralytics/yolov5/pull/6963).
💡 ProTip: Export to TensorRT for up to 5x GPU speedup. See [GPU Benchmarks](https://github.com/ultralytics/yolov5/pull/6963).
| Format | `export.py --include` | Model | | Format | `export.py --include` | Model |
|:---------------------------------------------------------------------------|:----------------------|:--------------------------| |:---------------------------------------------------------------------------|:----------------------|:--------------------------|

View File

@ -4,8 +4,7 @@ description: Improve YOLOv5 model efficiency by pruning with Ultralytics. Unders
keywords: YOLOv5, YOLO, Ultralytics, model pruning, PyTorch, machine learning, deep learning, computer vision, object detection keywords: YOLOv5, YOLO, Ultralytics, model pruning, PyTorch, machine learning, deep learning, computer vision, object detection
--- ---
📚 This guide explains how to apply **pruning** to YOLOv5 🚀 models. 📚 This guide explains how to apply **pruning** to YOLOv5 🚀 models. UPDATED 25 September 2022.
UPDATED 25 September 2022.
## Before You Start ## Before You Start

View File

@ -4,8 +4,7 @@ description: Learn how to train datasets on single or multiple GPUs using YOLOv5
keywords: YOLOv5, multi-GPU Training, YOLOv5 training, deep learning, machine learning, object detection, Ultralytics keywords: YOLOv5, multi-GPU Training, YOLOv5 training, deep learning, machine learning, object detection, Ultralytics
--- ---
📚 This guide explains how to properly use **multiple** GPUs to train a dataset with YOLOv5 🚀 on single or multiple machine(s). 📚 This guide explains how to properly use **multiple** GPUs to train a dataset with YOLOv5 🚀 on single or multiple machine(s). UPDATED 25 December 2022.
UPDATED 25 December 2022.
## Before You Start ## Before You Start
@ -103,8 +102,7 @@ python -m torch.distributed.run --nproc_per_node G --nnodes N --node_rank 0 --ma
python -m torch.distributed.run --nproc_per_node G --nnodes N --node_rank R --master_addr "192.168.1.1" --master_port 1234 train.py --batch 64 --data coco.yaml --cfg yolov5s.yaml --weights '' python -m torch.distributed.run --nproc_per_node G --nnodes N --node_rank R --master_addr "192.168.1.1" --master_port 1234 train.py --batch 64 --data coco.yaml --cfg yolov5s.yaml --weights ''
``` ```
where `G` is number of GPU per machine, `N` is the number of machines, and `R` is the machine number from `0...(N-1)`. where `G` is number of GPU per machine, `N` is the number of machines, and `R` is the machine number from `0...(N-1)`. Let's say I have two machines with two GPUs each, it would be `G = 2` , `N = 2`, and `R = 1` for the above.
Let's say I have two machines with two GPUs each, it would be `G = 2` , `N = 2`, and `R = 1` for the above.
Training will not start until <b>all </b> `N` machines are connected. Output will only be shown on master machine! Training will not start until <b>all </b> `N` machines are connected. Output will only be shown on master machine!

View File

@ -30,8 +30,7 @@ DeepSparse is an inference runtime with exceptional performance on CPUs. For ins
<img width="60%" src="https://github.com/neuralmagic/deepsparse/raw/main/examples/ultralytics-yolo/ultralytics-readmes/performance-chart-5.8x.png"> <img width="60%" src="https://github.com/neuralmagic/deepsparse/raw/main/examples/ultralytics-yolo/ultralytics-readmes/performance-chart-5.8x.png">
</p> </p>
For the first time, your deep learning workloads can meet the performance demands of production without the complexity and costs of hardware accelerators. For the first time, your deep learning workloads can meet the performance demands of production without the complexity and costs of hardware accelerators. Put simply, DeepSparse gives you the performance of GPUs and the simplicity of software:
Put simply, DeepSparse gives you the performance of GPUs and the simplicity of software:
- **Flexible Deployments**: Run consistently across cloud, data center, and edge with any hardware provider from Intel to AMD to ARM - **Flexible Deployments**: Run consistently across cloud, data center, and edge with any hardware provider from Intel to AMD to ARM
- **Infinite Scalability**: Scale vertically to 100s of cores, out with standard Kubernetes, or fully-abstracted with Serverless - **Infinite Scalability**: Scale vertically to 100s of cores, out with standard Kubernetes, or fully-abstracted with Serverless
@ -41,10 +40,7 @@ Put simply, DeepSparse gives you the performance of GPUs and the simplicity of s
DeepSparse takes advantage of model sparsity to gain its performance speedup. DeepSparse takes advantage of model sparsity to gain its performance speedup.
Sparsification through pruning and quantization is a broadly studied technique, allowing order-of-magnitude reductions in the size and compute needed to Sparsification through pruning and quantization is a broadly studied technique, allowing order-of-magnitude reductions in the size and compute needed to execute a network, while maintaining high accuracy. DeepSparse is sparsity-aware, meaning it skips the zeroed out parameters, shrinking amount of compute in a forward pass. Since the sparse computation is now memory bound, DeepSparse executes the network depth-wise, breaking the problem into Tensor Columns, vertical stripes of computation that fit in cache.
execute a network, while maintaining high accuracy. DeepSparse is sparsity-aware, meaning it skips the zeroed out parameters, shrinking amount of compute
in a forward pass. Since the sparse computation is now memory bound, DeepSparse executes the network depth-wise, breaking the problem into Tensor Columns,
vertical stripes of computation that fit in cache.
<p align="center"> <p align="center">
<img width="60%" src="https://github.com/neuralmagic/deepsparse/raw/main/examples/ultralytics-yolo/ultralytics-readmes/tensor-columns.png"> <img width="60%" src="https://github.com/neuralmagic/deepsparse/raw/main/examples/ultralytics-yolo/ultralytics-readmes/tensor-columns.png">
@ -96,8 +92,7 @@ wget -O basilica.jpg https://raw.githubusercontent.com/neuralmagic/deepsparse/ma
#### Python API #### Python API
`Pipelines` wrap pre-processing and output post-processing around the runtime, providing a clean interface for adding DeepSparse to an application. `Pipelines` wrap pre-processing and output post-processing around the runtime, providing a clean interface for adding DeepSparse to an application. The DeepSparse-Ultralytics integration includes an out-of-the-box `Pipeline` that accepts raw images and outputs the bounding boxes.
The DeepSparse-Ultralytics integration includes an out-of-the-box `Pipeline` that accepts raw images and outputs the bounding boxes.
Create a `Pipeline` and run inference: Create a `Pipeline` and run inference:
@ -127,9 +122,7 @@ apt-get install libgl1-mesa-glx
#### HTTP Server #### HTTP Server
DeepSparse Server runs on top of the popular FastAPI web framework and Uvicorn web server. With just a single CLI command, you can easily setup a model DeepSparse Server runs on top of the popular FastAPI web framework and Uvicorn web server. With just a single CLI command, you can easily setup a model service endpoint with DeepSparse. The Server supports any Pipeline from DeepSparse, including object detection with YOLOv5, enabling you to send raw images to the endpoint and receive the bounding boxes.
service endpoint with DeepSparse. The Server supports any Pipeline from DeepSparse, including object detection with YOLOv5, enabling you to send raw
images to the endpoint and receive the bounding boxes.
Spin up the Server with the pruned-quantized YOLOv5s: Spin up the Server with the pruned-quantized YOLOv5s:

View File

@ -4,8 +4,7 @@ description: Detailed guide on loading YOLOv5 from PyTorch Hub. Includes example
keywords: Ultralytics, YOLOv5, PyTorch, loading YOLOv5, PyTorch Hub, inference, multi-GPU inference, training keywords: Ultralytics, YOLOv5, PyTorch, loading YOLOv5, PyTorch Hub, inference, multi-GPU inference, training
--- ---
📚 This guide explains how to load YOLOv5 🚀 from PyTorch Hub at [https://pytorch.org/hub/ultralytics_yolov5](https://pytorch.org/hub/ultralytics_yolov5). 📚 This guide explains how to load YOLOv5 🚀 from PyTorch Hub at [https://pytorch.org/hub/ultralytics_yolov5](https://pytorch.org/hub/ultralytics_yolov5). UPDATED 26 March 2023.
UPDATED 26 March 2023.
## Before You Start ## Before You Start

View File

@ -6,8 +6,7 @@ keywords: Ultralytics, YOLOv5, Roboflow, data organization, data labelling, data
# Roboflow Datasets # Roboflow Datasets
You can now use Roboflow to organize, label, prepare, version, and host your datasets for training YOLOv5 🚀 models. Roboflow is free to use with YOLOv5 if you make your workspace public. You can now use Roboflow to organize, label, prepare, version, and host your datasets for training YOLOv5 🚀 models. Roboflow is free to use with YOLOv5 if you make your workspace public. UPDATED 7 June 2023.
UPDATED 7 June 2023.
!!! warning !!! warning

View File

@ -6,8 +6,7 @@ keywords: TensorRT, NVIDIA Jetson, DeepStream SDK, deployment, Ultralytics, YOLO
# Deploy on NVIDIA Jetson using TensorRT and DeepStream SDK # Deploy on NVIDIA Jetson using TensorRT and DeepStream SDK
📚 This guide explains how to deploy a trained model into NVIDIA Jetson Platform and perform inference using TensorRT and DeepStream SDK. Here we use TensorRT to maximize the inference performance on the Jetson platform. 📚 This guide explains how to deploy a trained model into NVIDIA Jetson Platform and perform inference using TensorRT and DeepStream SDK. Here we use TensorRT to maximize the inference performance on the Jetson platform. UPDATED 18 November 2022.
UPDATED 18 November 2022.
## Hardware Verification ## Hardware Verification

View File

@ -6,8 +6,7 @@ keywords: YOLOv5, Ultralytics, Test-Time Augmentation, TTA, mAP, Recall, model p
# Test-Time Augmentation (TTA) # Test-Time Augmentation (TTA)
📚 This guide explains how to use Test Time Augmentation (TTA) during testing and inference for improved mAP and Recall with YOLOv5 🚀. 📚 This guide explains how to use Test Time Augmentation (TTA) during testing and inference for improved mAP and Recall with YOLOv5 🚀. UPDATED 25 September 2022.
UPDATED 25 September 2022.
## Before You Start ## Before You Start

View File

@ -4,8 +4,7 @@ description: Our comprehensive guide provides insights on how to train your YOLO
keywords: Ultralytics, YOLOv5, Training guide, dataset preparation, model selection, training settings, mAP results, Machine Learning, Object Detection keywords: Ultralytics, YOLOv5, Training guide, dataset preparation, model selection, training settings, mAP results, Machine Learning, Object Detection
--- ---
📚 This guide explains how to produce the best mAP and training results with YOLOv5 🚀. 📚 This guide explains how to produce the best mAP and training results with YOLOv5 🚀. UPDATED 25 May 2022.
UPDATED 25 May 2022.
Most of the time good results can be obtained with no changes to the models or training settings, **provided your dataset is sufficiently large and well labelled**. If at first you don't get good results, there are steps you might be able to take to improve, but we always recommend users **first train with all default settings** before considering any changes. This helps establish a performance baseline and spot areas for improvement. Most of the time good results can be obtained with no changes to the models or training settings, **provided your dataset is sufficiently large and well labelled**. If at first you don't get good results, there are steps you might be able to take to improve, but we always recommend users **first train with all default settings** before considering any changes. This helps establish a performance baseline and spot areas for improvement.

View File

@ -4,8 +4,7 @@ description: Learn how to train your data on custom datasets using YOLOv5. Simpl
keywords: YOLOv5, train on custom dataset, image collection, model training, object detection, image labelling, Ultralytics, PyTorch, machine learning keywords: YOLOv5, train on custom dataset, image collection, model training, object detection, image labelling, Ultralytics, PyTorch, machine learning
--- ---
📚 This guide explains how to train your own **custom dataset** with [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. 📚 This guide explains how to train your own **custom dataset** with [YOLOv5](https://github.com/ultralytics/yolov5) 🚀. UPDATED 7 June 2023.
UPDATED 7 June 2023.
## Before You Start ## Before You Start
@ -49,35 +48,27 @@ Once you have collected images, you will need to annotate the objects of interes
<p align="center"><a href="https://app.roboflow.com/?model=yolov5&ref=ultralytics" title="Create a Free Roboflow Account"><img width="450" src="https://uploads-ssl.webflow.com/5f6bc60e665f54545a1e52a5/6152a275ad4b4ac20cd2e21a_roboflow-annotate.gif" /></a></p> <p align="center"><a href="https://app.roboflow.com/?model=yolov5&ref=ultralytics" title="Create a Free Roboflow Account"><img width="450" src="https://uploads-ssl.webflow.com/5f6bc60e665f54545a1e52a5/6152a275ad4b4ac20cd2e21a_roboflow-annotate.gif" /></a></p>
[Roboflow Annotate](https://roboflow.com/annotate?ref=ultralytics) is a simple [Roboflow Annotate](https://roboflow.com/annotate?ref=ultralytics) is a simple web-based tool for managing and labeling your images with your team and exporting them in [YOLOv5's annotation format](https://roboflow.com/formats/yolov5-pytorch-txt?ref=ultralytics).
web-based tool for managing and labeling your images with your team and exporting
them in [YOLOv5's annotation format](https://roboflow.com/formats/yolov5-pytorch-txt?ref=ultralytics).
### 1.3 Prepare Dataset for YOLOv5 ### 1.3 Prepare Dataset for YOLOv5
Whether you [label your images with Roboflow](https://roboflow.com/annotate?ref=ultralytics) or not, you can use it to convert your dataset into YOLO format, create a YOLOv5 YAML configuration file, and host it for importing into your training script. Whether you [label your images with Roboflow](https://roboflow.com/annotate?ref=ultralytics) or not, you can use it to convert your dataset into YOLO format, create a YOLOv5 YAML configuration file, and host it for importing into your training script.
[Create a free Roboflow account](https://app.roboflow.com/?model=yolov5&ref=ultralytics) [Create a free Roboflow account](https://app.roboflow.com/?model=yolov5&ref=ultralytics)
and upload your dataset to a `Public` workspace, label any unannotated images, and upload your dataset to a `Public` workspace, label any unannotated images, then generate and export a version of your dataset in `YOLOv5 Pytorch` format.
then generate and export a version of your dataset in `YOLOv5 Pytorch` format.
Note: YOLOv5 does online augmentation during training, so we do not recommend Note: YOLOv5 does online augmentation during training, so we do not recommend applying any augmentation steps in Roboflow for training with YOLOv5. But we recommend applying the following preprocessing steps:
applying any augmentation steps in Roboflow for training with YOLOv5. But we
recommend applying the following preprocessing steps:
<p align="center"><img width="450" src="https://uploads-ssl.webflow.com/5f6bc60e665f54545a1e52a5/6152a273477fccf42a0fd3d6_roboflow-preprocessing.png" title="Recommended Preprocessing Steps" /></p> <p align="center"><img width="450" src="https://uploads-ssl.webflow.com/5f6bc60e665f54545a1e52a5/6152a273477fccf42a0fd3d6_roboflow-preprocessing.png" title="Recommended Preprocessing Steps" /></p>
* **Auto-Orient** - to strip EXIF orientation from your images. * **Auto-Orient** - to strip EXIF orientation from your images.
* **Resize (Stretch)** - to the square input size of your model (640x640 is the YOLOv5 default). * **Resize (Stretch)** - to the square input size of your model (640x640 is the YOLOv5 default).
Generating a version will give you a point in time snapshot of your dataset so Generating a version will give you a point in time snapshot of your dataset so you can always go back and compare your future model training runs against it, even if you add more images or change its configuration later.
you can always go back and compare your future model training runs against it,
even if you add more images or change its configuration later.
<p align="center"><img width="450" src="https://uploads-ssl.webflow.com/5f6bc60e665f54545a1e52a5/6152a2733fd1da943619934e_roboflow-export.png" title="Export in YOLOv5 Format" /></p> <p align="center"><img width="450" src="https://uploads-ssl.webflow.com/5f6bc60e665f54545a1e52a5/6152a2733fd1da943619934e_roboflow-export.png" title="Export in YOLOv5 Format" /></p>
Export in `YOLOv5 Pytorch` format, then copy the snippet into your training Export in `YOLOv5 Pytorch` format, then copy the snippet into your training script or notebook to download your dataset.
script or notebook to download your dataset.
<p align="center"><img width="450" src="https://uploads-ssl.webflow.com/5f6bc60e665f54545a1e52a5/6152a273a92e4f5cb72594df_roboflow-snippet.png" title="Roboflow dataset download snippet" /></p> <p align="center"><img width="450" src="https://uploads-ssl.webflow.com/5f6bc60e665f54545a1e52a5/6152a273a92e4f5cb72594df_roboflow-snippet.png" title="Roboflow dataset download snippet" /></p>

View File

@ -4,8 +4,7 @@ description: Learn to freeze YOLOv5 layers for efficient transfer learning. Opti
keywords: YOLOv5, freeze layers, transfer learning, model retraining, Ultralytics keywords: YOLOv5, freeze layers, transfer learning, model retraining, Ultralytics
--- ---
📚 This guide explains how to **freeze** YOLOv5 🚀 layers when **transfer learning**. Transfer learning is a useful way to quickly retrain a model on new data without having to retrain the entire network. Instead, part of the initial weights are frozen in place, and the rest of the weights are used to compute loss and are updated by the optimizer. This requires less resources than normal training and allows for faster training times, though it may also result in reductions to final trained accuracy. 📚 This guide explains how to **freeze** YOLOv5 🚀 layers when **transfer learning**. Transfer learning is a useful way to quickly retrain a model on new data without having to retrain the entire network. Instead, part of the initial weights are frozen in place, and the rest of the weights are used to compute loss and are updated by the optimizer. This requires less resources than normal training and allows for faster training times, though it may also result in reductions to final trained accuracy. UPDATED 25 September 2022.
UPDATED 25 September 2022.
## Before You Start ## Before You Start

View File

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = '8.0.195' __version__ = '8.0.196'
from ultralytics.models import RTDETR, SAM, YOLO from ultralytics.models import RTDETR, SAM, YOLO
from ultralytics.models.fastsam import FastSAM from ultralytics.models.fastsam import FastSAM

View File

@ -342,7 +342,7 @@ def entrypoint(debug=''):
'copy-cfg': copy_default_cfg} 'copy-cfg': copy_default_cfg}
full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special} full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special}
# Define common mis-uses of special commands, i.e. -h, -help, --help # Define common misuses of special commands, i.e. -h, -help, --help
special.update({k[0]: v for k, v in special.items()}) # singular special.update({k[0]: v for k, v in special.items()}) # singular
special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith('s')}) # singular special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith('s')}) # singular
special = {**special, **{f'-{k}': v for k, v in special.items()}, **{f'--{k}': v for k, v in special.items()}} special = {**special, **{f'-{k}': v for k, v in special.items()}, **{f'--{k}': v for k, v in special.items()}}

View File

@ -212,7 +212,6 @@ class v8SegmentationLoss(v8DetectionLoss):
def __init__(self, model): # model must be de-paralleled def __init__(self, model): # model must be de-paralleled
super().__init__(model) super().__init__(model)
self.nm = model.model[-1].nm # number of masks
self.overlap = model.args.overlap_mask self.overlap = model.args.overlap_mask
def __call__(self, preds, batch): def __call__(self, preds, batch):
@ -268,38 +267,108 @@ class v8SegmentationLoss(v8DetectionLoss):
if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample
masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0]
for i in range(batch_size): loss[1] = self.calculate_segmentation_loss(fg_mask, masks, target_gt_idx, target_bboxes, batch_idx, proto,
if fg_mask[i].sum(): pred_masks, imgsz, self.overlap)
mask_idx = target_gt_idx[i][fg_mask[i]]
if self.overlap:
gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0)
else:
gt_mask = masks[batch_idx.view(-1) == i][mask_idx]
xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]]
marea = xyxy2xywh(xyxyn)[:, 2:].prod(1)
mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)
loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy, marea) # seg
# WARNING: lines below prevents Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove
else:
loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss
# WARNING: lines below prevent Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove # WARNING: lines below prevent Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove
else: else:
loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss
loss[0] *= self.hyp.box # box gain loss[0] *= self.hyp.box # box gain
loss[1] *= self.hyp.box / batch_size # seg gain loss[1] *= self.hyp.box # seg gain
loss[2] *= self.hyp.cls # cls gain loss[2] *= self.hyp.cls # cls gain
loss[3] *= self.hyp.dfl # dfl gain loss[3] *= self.hyp.dfl # dfl gain
return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl)
def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): @staticmethod
"""Mask loss for one image.""" def single_mask_loss(gt_mask: torch.Tensor, pred: torch.Tensor, proto: torch.Tensor, xyxy: torch.Tensor,
pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80) area: torch.Tensor) -> torch.Tensor:
"""
Compute the instance segmentation loss for a single image.
Args:
gt_mask (torch.Tensor): Ground truth mask of shape (n, H, W), where n is the number of objects.
pred (torch.Tensor): Predicted mask coefficients of shape (n, 32).
proto (torch.Tensor): Prototype masks of shape (32, H, W).
xyxy (torch.Tensor): Ground truth bounding boxes in xyxy format, normalized to [0, 1], of shape (n, 4).
area (torch.Tensor): Area of each ground truth bounding box of shape (n,).
Returns:
(torch.Tensor): The calculated mask loss for a single image.
Notes:
The function uses the equation pred_mask = torch.einsum('in,nhw->ihw', pred, proto) to produce the
predicted masks from the prototype masks and predicted mask coefficients.
"""
pred_mask = torch.einsum('in,nhw->ihw', pred, proto) # (n, 32) @ (32, 80, 80) -> (n, 80, 80)
loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none')
return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).sum()
def calculate_segmentation_loss(
self,
fg_mask: torch.Tensor,
masks: torch.Tensor,
target_gt_idx: torch.Tensor,
target_bboxes: torch.Tensor,
batch_idx: torch.Tensor,
proto: torch.Tensor,
pred_masks: torch.Tensor,
imgsz: torch.Tensor,
overlap: bool,
) -> torch.Tensor:
"""
Calculate the loss for instance segmentation.
Args:
fg_mask (torch.Tensor): A binary tensor of shape (BS, N_anchors) indicating which anchors are positive.
masks (torch.Tensor): Ground truth masks of shape (BS, H, W) if `overlap` is False, otherwise (BS, ?, H, W).
target_gt_idx (torch.Tensor): Indexes of ground truth objects for each anchor of shape (BS, N_anchors).
target_bboxes (torch.Tensor): Ground truth bounding boxes for each anchor of shape (BS, N_anchors, 4).
batch_idx (torch.Tensor): Batch indices of shape (N_labels_in_batch, 1).
proto (torch.Tensor): Prototype masks of shape (BS, 32, H, W).
pred_masks (torch.Tensor): Predicted masks for each anchor of shape (BS, N_anchors, 32).
imgsz (torch.Tensor): Size of the input image as a tensor of shape (2), i.e., (H, W).
overlap (bool): Whether the masks in `masks` tensor overlap.
Returns:
(torch.Tensor): The calculated loss for instance segmentation.
Notes:
The batch loss can be computed for improved speed at higher memory usage.
For example, pred_mask can be computed as follows:
pred_mask = torch.einsum('in,nhw->ihw', pred, proto) # (i, 32) @ (32, 160, 160) -> (i, 160, 160)
"""
_, _, mask_h, mask_w = proto.shape
loss = 0
# normalize to 0-1
target_bboxes_normalized = target_bboxes / imgsz[[1, 0, 1, 0]]
# areas of target bboxes
marea = xyxy2xywh(target_bboxes_normalized)[..., 2:].prod(2)
# normalize to mask size
mxyxy = target_bboxes_normalized * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=proto.device)
for i, single_i in enumerate(zip(fg_mask, target_gt_idx, pred_masks, proto, mxyxy, marea, masks)):
fg_mask_i, target_gt_idx_i, pred_masks_i, proto_i, mxyxy_i, marea_i, masks_i = single_i
if fg_mask_i.any():
mask_idx = target_gt_idx_i[fg_mask_i]
if overlap:
gt_mask = masks_i == (mask_idx + 1).view(-1, 1, 1)
gt_mask = gt_mask.float()
else:
gt_mask = masks[batch_idx.view(-1) == i][mask_idx]
loss += self.single_mask_loss(gt_mask, pred_masks_i[fg_mask_i], proto_i, mxyxy_i[fg_mask_i],
marea_i[fg_mask_i])
# WARNING: lines below prevents Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove
else:
loss += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss
return loss / fg_mask.sum()
class v8PoseLoss(v8DetectionLoss): class v8PoseLoss(v8DetectionLoss):

View File

@ -155,12 +155,12 @@ class Annotator:
masks = masks.unsqueeze(3) # shape(n,h,w,1) masks = masks.unsqueeze(3) # shape(n,h,w,1)
masks_color = masks * (colors * alpha) # shape(n,h,w,3) masks_color = masks * (colors * alpha) # shape(n,h,w,3)
inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) inv_alpha_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
mcs = masks_color.max(dim=0).values # shape(n,h,w,3) mcs = masks_color.max(dim=0).values # shape(n,h,w,3)
im_gpu = im_gpu.flip(dims=[0]) # flip channel im_gpu = im_gpu.flip(dims=[0]) # flip channel
im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
im_gpu = im_gpu * inv_alph_masks[-1] + mcs im_gpu = im_gpu * inv_alpha_masks[-1] + mcs
im_mask = (im_gpu * 255) im_mask = (im_gpu * 255)
im_mask_np = im_mask.byte().cpu().numpy() im_mask_np = im_mask.byte().cpu().numpy()
self.im[:] = im_mask_np if retina_masks else ops.scale_image(im_mask_np, self.im.shape) self.im[:] = im_mask_np if retina_masks else ops.scale_image(im_mask_np, self.im.shape)