mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-23 05:24:22 +08:00
Update docs building code (#7601)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: Muhammad Rizwan Munawar <chr043416@gmail.com> Co-authored-by: Muhammad Rizwan Munawar <muhammadrizwanmunawar123@gmail.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
5f5f5d08f2
commit
d021524e85
3
.github/workflows/publish.yml
vendored
3
.github/workflows/publish.yml
vendored
@ -28,7 +28,7 @@ jobs:
|
|||||||
- name: Set up Python environment
|
- name: Set up Python environment
|
||||||
uses: actions/setup-python@v5
|
uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: '3.10'
|
python-version: '3.11'
|
||||||
cache: 'pip' # caching pip dependencies
|
cache: 'pip' # caching pip dependencies
|
||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
@ -66,6 +66,7 @@ jobs:
|
|||||||
env:
|
env:
|
||||||
PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
|
PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }}
|
||||||
INDEXNOW_KEY: ${{ secrets.INDEXNOW_KEY_DOCS }}
|
INDEXNOW_KEY: ${{ secrets.INDEXNOW_KEY_DOCS }}
|
||||||
|
WEGLOT_KEY: ${{ secrets.WEGLOT_KEY_DOCS }}
|
||||||
run: |
|
run: |
|
||||||
python docs/build_docs.py
|
python docs/build_docs.py
|
||||||
git config --global user.name "Glenn Jocher"
|
git config --global user.name "Glenn Jocher"
|
||||||
|
@ -23,14 +23,17 @@ Usage:
|
|||||||
Note:
|
Note:
|
||||||
- This script is built to be run in an environment where Python and MkDocs are installed and properly configured.
|
- This script is built to be run in an environment where Python and MkDocs are installed and properly configured.
|
||||||
"""
|
"""
|
||||||
|
import os
|
||||||
import re
|
import re
|
||||||
import shutil
|
import shutil
|
||||||
import subprocess
|
import subprocess
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
DOCS = Path(__file__).parent.resolve()
|
DOCS = Path(__file__).parent.resolve()
|
||||||
SITE = DOCS.parent / "site"
|
SITE = DOCS.parent / "site"
|
||||||
|
LANGUAGES = True
|
||||||
|
|
||||||
|
|
||||||
def build_docs():
|
def build_docs():
|
||||||
@ -44,9 +47,10 @@ def build_docs():
|
|||||||
subprocess.run(f"mkdocs build -f {DOCS}/mkdocs.yml", check=True, shell=True)
|
subprocess.run(f"mkdocs build -f {DOCS}/mkdocs.yml", check=True, shell=True)
|
||||||
|
|
||||||
# Build other localized documentations
|
# Build other localized documentations
|
||||||
for file in DOCS.glob("mkdocs_*.yml"):
|
if LANGUAGES:
|
||||||
print(f"Building MkDocs site with configuration file: {file}")
|
for file in DOCS.glob("mkdocs_*.yml"):
|
||||||
subprocess.run(f"mkdocs build -f {file}", check=True, shell=True)
|
print(f"Building MkDocs site with configuration file: {file}")
|
||||||
|
subprocess.run(f"mkdocs build -f {file}", check=True, shell=True)
|
||||||
print(f"Site built at {SITE}")
|
print(f"Site built at {SITE}")
|
||||||
|
|
||||||
|
|
||||||
@ -100,19 +104,51 @@ def update_page_title(file_path: Path, new_title: str):
|
|||||||
file.write(updated_content)
|
file.write(updated_content)
|
||||||
|
|
||||||
|
|
||||||
|
def update_html_head(key=""):
|
||||||
|
"""Update the HTML head section of each file."""
|
||||||
|
html_files = Path(SITE).rglob("*.html")
|
||||||
|
for html_file in tqdm(html_files, desc="Processing HTML files"):
|
||||||
|
with html_file.open("r", encoding="utf-8") as file:
|
||||||
|
html_content = file.read()
|
||||||
|
|
||||||
|
script = f"""
|
||||||
|
<script type="text/javascript" src="https://cdn.weglot.com/weglot.min.js"></script>
|
||||||
|
<script>
|
||||||
|
Weglot.initialize({{
|
||||||
|
api_key: '{key}'
|
||||||
|
}});
|
||||||
|
</script>
|
||||||
|
"""
|
||||||
|
if script in html_content: # script already in HTML file
|
||||||
|
return
|
||||||
|
|
||||||
|
head_end_index = html_content.lower().rfind("</head>")
|
||||||
|
if head_end_index != -1:
|
||||||
|
# Add the specified JavaScript to the HTML file just before the end of the head tag.
|
||||||
|
new_html_content = html_content[:head_end_index] + script + html_content[head_end_index:]
|
||||||
|
with html_file.open("w", encoding="utf-8") as file:
|
||||||
|
file.write(new_html_content)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
# Build the docs
|
# Build the docs
|
||||||
build_docs()
|
build_docs()
|
||||||
|
|
||||||
|
# Update titles
|
||||||
|
update_page_title(SITE / "404.html", new_title="Ultralytics Docs - Not Found")
|
||||||
|
|
||||||
# Update .md in href links
|
# Update .md in href links
|
||||||
update_html_links()
|
if LANGUAGES:
|
||||||
|
update_html_links()
|
||||||
|
|
||||||
|
# Update HTML file head section
|
||||||
|
key = os.environ.get("WEGLOT_KEY")
|
||||||
|
if not LANGUAGES and key:
|
||||||
|
update_html_head(key)
|
||||||
|
|
||||||
# Show command to serve built website
|
# Show command to serve built website
|
||||||
print('Serve site at http://localhost:8000 with "python -m http.server --directory site"')
|
print('Serve site at http://localhost:8000 with "python -m http.server --directory site"')
|
||||||
|
|
||||||
# Update titles
|
|
||||||
update_page_title(SITE / "404.html", new_title="Ultralytics Docs - Not Found")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
@ -8,6 +8,15 @@ keywords: Open Images V7, object detection, segmentation masks, visual relations
|
|||||||
|
|
||||||
[Open Images V7](https://storage.googleapis.com/openimages/web/index.html) is a versatile and expansive dataset championed by Google. Aimed at propelling research in the realm of computer vision, it boasts a vast collection of images annotated with a plethora of data, including image-level labels, object bounding boxes, object segmentation masks, visual relationships, and localized narratives.
|
[Open Images V7](https://storage.googleapis.com/openimages/web/index.html) is a versatile and expansive dataset championed by Google. Aimed at propelling research in the realm of computer vision, it boasts a vast collection of images annotated with a plethora of data, including image-level labels, object bounding boxes, object segmentation masks, visual relationships, and localized narratives.
|
||||||
|
|
||||||
|
## Open Images V7 Pretrained Models
|
||||||
|
| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
|
||||||
|
|-------------------------------------------------------------------------------------------|-----------------------|----------------------|--------------------------------|-------------------------------------|--------------------|-------------------|
|
||||||
|
| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8n-oiv7.pt) | 640 | 18.4 | 142.4 | 1.21 | 3.5 | 10.5 |
|
||||||
|
| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8s-oiv7.pt) | 640 | 27.7 | 183.1 | 1.40 | 11.4 | 29.7 |
|
||||||
|
| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8m-oiv7.pt) | 640 | 33.6 | 408.5 | 2.26 | 26.2 | 80.6 |
|
||||||
|
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8l-oiv7.pt) | 640 | 34.9 | 596.9 | 2.43 | 44.1 | 167.4 |
|
||||||
|
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.1.0/yolov8x-oiv7.pt) | 640 | 36.3 | 860.6 | 3.56 | 68.7 | 260.6 |
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
## Key Features
|
## Key Features
|
||||||
|
@ -174,21 +174,21 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Type | Default | Description |
|
||||||
|---------------------|-------------|----------------------------|-----------------------------------------------|
|
|---------------------|-------------|----------------------------|-----------------------------------------------|
|
||||||
| view_img | `bool` | `False` | Display frames with counts |
|
| `view_img` | `bool` | `False` | Display frames with counts |
|
||||||
| view_in_counts | `bool` | `True` | Display incounts only on video frame |
|
| `view_in_counts` | `bool` | `True` | Display incounts only on video frame |
|
||||||
| view_out_counts | `bool` | `True` | Display outcounts only on video frame |
|
| `view_out_counts` | `bool` | `True` | Display outcounts only on video frame |
|
||||||
| line_thickness | `int` | `2` | Increase bounding boxes thickness |
|
| `line_thickness` | `int` | `2` | Increase bounding boxes thickness |
|
||||||
| reg_pts | `list` | `[(20, 400), (1260, 400)]` | Points defining the Region Area |
|
| `reg_pts` | `list` | `[(20, 400), (1260, 400)]` | Points defining the Region Area |
|
||||||
| classes_names | `dict` | `model.model.names` | Dictionary of Class Names |
|
| `classes_names` | `dict` | `model.model.names` | Dictionary of Class Names |
|
||||||
| region_color | `RGB Color` | `(255, 0, 255)` | Color of the Object counting Region or Line |
|
| `region_color` | `RGB Color` | `(255, 0, 255)` | Color of the Object counting Region or Line |
|
||||||
| track_thickness | `int` | `2` | Thickness of Tracking Lines |
|
| `track_thickness` | `int` | `2` | Thickness of Tracking Lines |
|
||||||
| draw_tracks | `bool` | `False` | Enable drawing Track lines |
|
| `draw_tracks` | `bool` | `False` | Enable drawing Track lines |
|
||||||
| track_color | `RGB Color` | `(0, 255, 0)` | Color for each track line |
|
| `track_color` | `RGB Color` | `(0, 255, 0)` | Color for each track line |
|
||||||
| line_dist_thresh | `int` | `15` | Euclidean Distance threshold for line counter |
|
| `line_dist_thresh` | `int` | `15` | Euclidean Distance threshold for line counter |
|
||||||
| count_txt_thickness | `int` | `2` | Thickness of Object counts text |
|
| `count_txt_thickness` | `int` | `2` | Thickness of Object counts text |
|
||||||
| count_txt_color | `RGB Color` | `(0, 0, 0)` | Foreground color for Object counts text |
|
| `count_txt_color` | `RGB Color` | `(0, 0, 0)` | Foreground color for Object counts text |
|
||||||
| count_color | `RGB Color` | `(255, 255, 255)` | Background color for Object counts text |
|
| `count_color` | `RGB Color` | `(255, 255, 255)` | Background color for Object counts text |
|
||||||
| region_thickness | `int` | `5` | Thickness for object counter region or line |
|
| `region_thickness` | `int` | `5` | Thickness for object counter region or line |
|
||||||
|
|
||||||
### Arguments `model.track`
|
### Arguments `model.track`
|
||||||
|
|
||||||
|
@ -34,10 +34,10 @@ FastSAM is designed to address the limitations of the [Segment Anything Model (S
|
|||||||
|
|
||||||
This table presents the available models with their specific pre-trained weights, the tasks they support, and their compatibility with different operating modes like [Inference](../modes/predict.md), [Validation](../modes/val.md), [Training](../modes/train.md), and [Export](../modes/export.md), indicated by ✅ emojis for supported modes and ❌ emojis for unsupported modes.
|
This table presents the available models with their specific pre-trained weights, the tasks they support, and their compatibility with different operating modes like [Inference](../modes/predict.md), [Validation](../modes/val.md), [Training](../modes/train.md), and [Export](../modes/export.md), indicated by ✅ emojis for supported modes and ❌ emojis for unsupported modes.
|
||||||
|
|
||||||
| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export |
|
| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export |
|
||||||
|------------|---------------------|----------------------------------------------|-----------|------------|----------|--------|
|
|------------|---------------------------------------------------------------------------------------------|----------------------------------------------|-----------|------------|----------|--------|
|
||||||
| FastSAM-s | `FastSAM-s.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ✅ |
|
| FastSAM-s | [FastSAM-s.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/FastSAM-s.pt) | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ✅ |
|
||||||
| FastSAM-x | `FastSAM-x.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ✅ |
|
| FastSAM-x | [FastSAM-x.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/FastSAM-x.pt) | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ✅ |
|
||||||
|
|
||||||
## Usage Examples
|
## Usage Examples
|
||||||
|
|
||||||
|
@ -20,9 +20,9 @@ MobileSAM is trained on a single GPU with a 100k dataset (1% of the original ima
|
|||||||
|
|
||||||
This table presents the available models with their specific pre-trained weights, the tasks they support, and their compatibility with different operating modes like [Inference](../modes/predict.md), [Validation](../modes/val.md), [Training](../modes/train.md), and [Export](../modes/export.md), indicated by ✅ emojis for supported modes and ❌ emojis for unsupported modes.
|
This table presents the available models with their specific pre-trained weights, the tasks they support, and their compatibility with different operating modes like [Inference](../modes/predict.md), [Validation](../modes/val.md), [Training](../modes/train.md), and [Export](../modes/export.md), indicated by ✅ emojis for supported modes and ❌ emojis for unsupported modes.
|
||||||
|
|
||||||
| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export |
|
| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export |
|
||||||
|------------|---------------------|----------------------------------------------|-----------|------------|----------|--------|
|
|------------|-----------------------------------------------------------------------------------------------|----------------------------------------------|-----------|------------|----------|--------|
|
||||||
| MobileSAM | `mobile_sam.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ |
|
| MobileSAM | [mobile_sam.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/mobile_sam.pt) | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ |
|
||||||
|
|
||||||
## Adapting from SAM to MobileSAM
|
## Adapting from SAM to MobileSAM
|
||||||
|
|
||||||
|
@ -63,10 +63,10 @@ This example provides simple RT-DETRR training and inference examples. For full
|
|||||||
|
|
||||||
This table presents the model types, the specific pre-trained weights, the tasks supported by each model, and the various modes ([Train](../modes/train.md) , [Val](../modes/val.md), [Predict](../modes/predict.md), [Export](../modes/export.md)) that are supported, indicated by ✅ emojis.
|
This table presents the model types, the specific pre-trained weights, the tasks supported by each model, and the various modes ([Train](../modes/train.md) , [Val](../modes/val.md), [Predict](../modes/predict.md), [Export](../modes/export.md)) that are supported, indicated by ✅ emojis.
|
||||||
|
|
||||||
| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export |
|
| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export |
|
||||||
|---------------------|---------------------|----------------------------------------|-----------|------------|----------|--------|
|
|---------------------|-------------------------------------------------------------------------------------------|----------------------------------------|-----------|------------|----------|--------|
|
||||||
| RT-DETR Large | `rtdetr-l.pt` | [Object Detection](../tasks/detect.md) | ✅ | ✅ | ✅ | ✅ |
|
| RT-DETR Large | [rtdetr-l.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/rtdetr-l.pt) | [Object Detection](../tasks/detect.md) | ✅ | ✅ | ✅ | ✅ |
|
||||||
| RT-DETR Extra-Large | `rtdetr-x.pt` | [Object Detection](../tasks/detect.md) | ✅ | ✅ | ✅ | ✅ |
|
| RT-DETR Extra-Large | [rtdetr-x.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/rtdetr-x.pt) | [Object Detection](../tasks/detect.md) | ✅ | ✅ | ✅ | ✅ |
|
||||||
|
|
||||||
## Citations and Acknowledgements
|
## Citations and Acknowledgements
|
||||||
|
|
||||||
|
@ -29,10 +29,10 @@ For an in-depth look at the Segment Anything Model and the SA-1B dataset, please
|
|||||||
|
|
||||||
This table presents the available models with their specific pre-trained weights, the tasks they support, and their compatibility with different operating modes like [Inference](../modes/predict.md), [Validation](../modes/val.md), [Training](../modes/train.md), and [Export](../modes/export.md), indicated by ✅ emojis for supported modes and ❌ emojis for unsupported modes.
|
This table presents the available models with their specific pre-trained weights, the tasks they support, and their compatibility with different operating modes like [Inference](../modes/predict.md), [Validation](../modes/val.md), [Training](../modes/train.md), and [Export](../modes/export.md), indicated by ✅ emojis for supported modes and ❌ emojis for unsupported modes.
|
||||||
|
|
||||||
| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export |
|
| Model Type | Pre-trained Weights | Tasks Supported | Inference | Validation | Training | Export |
|
||||||
|------------|---------------------|----------------------------------------------|-----------|------------|----------|--------|
|
|------------|-------------------------------------------------------------------------------------|----------------------------------------------|-----------|------------|----------|--------|
|
||||||
| SAM base | `sam_b.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ |
|
| SAM base | [sam_b.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/sam_b.pt) | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ |
|
||||||
| SAM large | `sam_l.pt` | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ |
|
| SAM large | [sam_l.pt](https://github.com/ultralytics/assets/releases/download/v8.1.0/sam_l.pt) | [Instance Segmentation](../tasks/segment.md) | ✅ | ❌ | ❌ | ❌ |
|
||||||
|
|
||||||
## How to Use SAM: Versatility and Power in Image Segmentation
|
## How to Use SAM: Versatility and Power in Image Segmentation
|
||||||
|
|
||||||
|
@ -547,7 +547,7 @@ def xywhr2xyxyxyxy(rboxes):
|
|||||||
be in degrees from 0 to 90.
|
be in degrees from 0 to 90.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
center (numpy.ndarray | torch.Tensor): Input data in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5).
|
rboxes (numpy.ndarray | torch.Tensor): Input data in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
(numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 4, 2) or (b, n, 4, 2).
|
(numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 4, 2) or (b, n, 4, 2).
|
||||||
|
Loading…
x
Reference in New Issue
Block a user