mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-23 05:24:22 +08:00
Omit ultralytics/utils/callbacks
from coverage (#4345)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
parent
d47718c367
commit
c940d29d4f
@ -44,7 +44,7 @@ This example provides simple inference code for YOLO, SAM and RTDETR models. For
|
||||
model.info()
|
||||
|
||||
# Train the model on the COCO8 example dataset for 100 epochs
|
||||
results model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
|
||||
# Run inference with the YOLOv8n model on the 'bus.jpg' image
|
||||
results = model('path/to/bus.jpg')
|
||||
|
@ -46,7 +46,7 @@ You can use RT-DETR for object detection tasks using the `ultralytics` pip packa
|
||||
model.info()
|
||||
|
||||
# Train the model on the COCO8 example dataset for 100 epochs
|
||||
results model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
|
||||
# Run inference with the RT-DETR-l model on the 'bus.jpg' image
|
||||
results = model('path/to/bus.jpg')
|
||||
|
@ -62,7 +62,7 @@ In this example we validate YOLO-NAS-s on the COCO8 dataset.
|
||||
model.info()
|
||||
|
||||
# Validate the model on the COCO8 example dataset
|
||||
results model.val(data='coco8.yaml')
|
||||
results = model.val(data='coco8.yaml')
|
||||
|
||||
# Run inference with the YOLO-NAS-s model on the 'bus.jpg' image
|
||||
results = model('path/to/bus.jpg')
|
||||
|
@ -69,7 +69,7 @@ You can use YOLOv3 for object detection tasks using the Ultralytics repository.
|
||||
model.info()
|
||||
|
||||
# Train the model on the COCO8 example dataset for 100 epochs
|
||||
results model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
|
||||
# Run inference with the YOLOv3n model on the 'bus.jpg' image
|
||||
results = model('path/to/bus.jpg')
|
||||
|
@ -74,7 +74,7 @@ You can use YOLOv5u for object detection tasks using the Ultralytics repository.
|
||||
model.info()
|
||||
|
||||
# Train the model on the COCO8 example dataset for 100 epochs
|
||||
results model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
|
||||
# Run inference with the YOLOv5n model on the 'bus.jpg' image
|
||||
results = model('path/to/bus.jpg')
|
||||
|
@ -56,7 +56,7 @@ You can use YOLOv6 for object detection tasks using the Ultralytics pip package.
|
||||
model.info()
|
||||
|
||||
# Train the model on the COCO8 example dataset for 100 epochs
|
||||
results model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
|
||||
# Run inference with the YOLOv6n model on the 'bus.jpg' image
|
||||
results = model('path/to/bus.jpg')
|
||||
|
@ -101,7 +101,7 @@ You can use YOLOv8 for object detection tasks using the Ultralytics pip package.
|
||||
model.info()
|
||||
|
||||
# Train the model on the COCO8 example dataset for 100 epochs
|
||||
results model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
|
||||
|
||||
# Run inference with the YOLOv8n model on the 'bus.jpg' image
|
||||
results = model('path/to/bus.jpg')
|
||||
|
@ -31,6 +31,14 @@ Ultralytics provides various installation methods including pip, conda, and Dock
|
||||
conda install -c conda-forge ultralytics
|
||||
```
|
||||
|
||||
!!! note
|
||||
|
||||
If you are installing in a CUDA environment best practice is to install `ultralytics`, `pytorch` and `pytorch-cuda` in the same command to allow the conda package manager to resolve any conflicts, or else to install `pytorch-cuda` last to allow it override the CPU-specific `pytorch` package if necesary.
|
||||
```bash
|
||||
# Install all packages together using conda
|
||||
conda install -c conda-forge -c pytorch -c nvidia ultralytics pytorch torchvision pytorch-cuda=11.8
|
||||
```
|
||||
|
||||
=== "Git clone"
|
||||
Clone the `ultralytics` repository if you are interested in contributing to the development or wish to experiment with the latest source code. After cloning, navigate into the directory and install the package in editable mode `-e` using pip.
|
||||
```bash
|
||||
@ -259,7 +267,7 @@ The table below provides an overview of the settings available for adjustment wi
|
||||
| `api_key` | `''` | `str` | Ultralytics HUB [API Key](https://hub.ultralytics.com/settings?tab=api+keys) |
|
||||
| `clearml` | `True` | `bool` | Whether to use ClearML logging |
|
||||
| `comet` | `True` | `bool` | Whether to use [Comet ML](https://bit.ly/yolov8-readme-comet) for experiment tracking and visualization |
|
||||
| `dvc` | `True` | `bool` | Whether to use [DVC for experiment tracking](https://dvc.org/doc/dvclive/ml-frameworks/yolo) and version control |
|
||||
| `dvc` | `True` | `bool` | Whether to use [DVC for experiment tracking](https://dvc.org/doc/dvclive/ml-frameworks/yolo) and version control |
|
||||
| `hub` | `True` | `bool` | Whether to use [Ultralytics HUB](https://hub.ultralytics.com) integration |
|
||||
| `mlflow` | `True` | `bool` | Whether to use MLFlow for experiment tracking |
|
||||
| `neptune` | `True` | `bool` | Whether to use Neptune for experiment tracking |
|
||||
|
@ -9,10 +9,6 @@ keywords: Ultralytics, YOLO, callbacks, logger, training, pretraining, machine l
|
||||
|
||||
Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/dvc.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/dvc.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏!
|
||||
|
||||
---
|
||||
## ::: ultralytics.utils.callbacks.dvc._logger_disabled
|
||||
<br><br>
|
||||
|
||||
---
|
||||
## ::: ultralytics.utils.callbacks.dvc._log_images
|
||||
<br><br>
|
||||
|
@ -15,6 +15,15 @@ addopts =
|
||||
--doctest-modules
|
||||
--durations=25
|
||||
--color=yes
|
||||
--cov=ultralytics/
|
||||
--cov-report=xml
|
||||
--no-cov-on-fail
|
||||
|
||||
[coverage:run]
|
||||
source = ultralytics/
|
||||
data_file = tests/.coverage
|
||||
omit =
|
||||
ultralytics/utils/callbacks/*
|
||||
|
||||
[flake8]
|
||||
max-line-length = 120
|
||||
|
@ -53,9 +53,9 @@ def test_predict(task, model, data):
|
||||
@pytest.mark.parametrize('task,model,data', TASK_ARGS)
|
||||
def test_predict_online(task, model, data):
|
||||
mode = 'track' if task in ('detect', 'segment', 'pose') else 'predict' # mode for video inference
|
||||
run(f'yolo predict model={WEIGHT_DIR / model}.pt source=https://ultralytics.com/images/bus.jpg imgsz=32')
|
||||
run(f'yolo {mode} model={WEIGHT_DIR / model}.pt source=https://ultralytics.com/assets/decelera_landscape_min.mov imgsz=32'
|
||||
)
|
||||
model = WEIGHT_DIR / model
|
||||
run(f'yolo predict model={model}.pt source=https://ultralytics.com/images/bus.jpg imgsz=32')
|
||||
run(f'yolo {mode} model={model}.pt source=https://ultralytics.com/assets/decelera_landscape_min.mov imgsz=32')
|
||||
|
||||
# Run Python YouTube tracking because CLI is broken. TODO: fix CLI YouTube
|
||||
# run(f'yolo {mode} model={model}.pt source=https://youtu.be/G17sBkb38XQ imgsz=32 tracker=bytetrack.yaml')
|
||||
@ -74,7 +74,7 @@ def test_rtdetr(task='detect', model='yolov8n-rtdetr.yaml', data='coco8.yaml'):
|
||||
run(f"yolo predict {task} model={model} source={ROOT / 'assets/bus.jpg'} imgsz=640 save save_crop save_txt")
|
||||
|
||||
|
||||
def test_fastsam(task='segment', model='FastSAM-s.pt', data='coco8-seg.yaml'):
|
||||
def test_fastsam(task='segment', model=WEIGHT_DIR / 'FastSAM-s.pt', data='coco8-seg.yaml'):
|
||||
source = ROOT / 'assets/bus.jpg'
|
||||
|
||||
run(f'yolo segment val {task} model={model} data={data} imgsz=32')
|
||||
@ -84,10 +84,10 @@ def test_fastsam(task='segment', model='FastSAM-s.pt', data='coco8-seg.yaml'):
|
||||
from ultralytics.models.fastsam import FastSAMPrompt
|
||||
|
||||
# Create a FastSAM model
|
||||
model = FastSAM('FastSAM-s.pt') # or FastSAM-x.pt
|
||||
sam_model = FastSAM(model) # or FastSAM-x.pt
|
||||
|
||||
# Run inference on an image
|
||||
everything_results = model(source, device='cpu', retina_masks=True, imgsz=1024, conf=0.4, iou=0.9)
|
||||
everything_results = sam_model(source, device='cpu', retina_masks=True, imgsz=1024, conf=0.4, iou=0.9)
|
||||
|
||||
# Everything prompt
|
||||
prompt_process = FastSAMPrompt(source, everything_results, device='cpu')
|
||||
@ -110,13 +110,19 @@ def test_mobilesam():
|
||||
from ultralytics import SAM
|
||||
|
||||
# Load the model
|
||||
model = SAM('mobile_sam.pt')
|
||||
model = SAM(WEIGHT_DIR / 'mobile_sam.pt')
|
||||
|
||||
# Source
|
||||
source = ROOT / 'assets/zidane.jpg'
|
||||
|
||||
# Predict a segment based on a point prompt
|
||||
model.predict(ROOT / 'assets/zidane.jpg', points=[900, 370], labels=[1])
|
||||
model.predict(source, points=[900, 370], labels=[1])
|
||||
|
||||
# Predict a segment based on a box prompt
|
||||
model.predict(ROOT / 'assets/zidane.jpg', bboxes=[439, 437, 524, 709])
|
||||
model.predict(source, bboxes=[439, 437, 524, 709])
|
||||
|
||||
# Predict all
|
||||
# model(source)
|
||||
|
||||
|
||||
# Slow Tests
|
||||
|
@ -212,8 +212,8 @@ def test_results():
|
||||
for r in results:
|
||||
r = r.cpu().numpy()
|
||||
r = r.to(device='cpu', dtype=torch.float32)
|
||||
r.save_txt(txt_file='label.txt', save_conf=True)
|
||||
r.save_crop(save_dir='crops/')
|
||||
r.save_txt(txt_file='runs/tests/label.txt', save_conf=True)
|
||||
r.save_crop(save_dir='runs/tests/crops/')
|
||||
r.tojson(normalize=True)
|
||||
r.plot(pil=True)
|
||||
r.plot(conf=True, boxes=True)
|
||||
|
@ -318,6 +318,7 @@ class Results(SimpleClass):
|
||||
texts.append(('%g ' * len(line)).rstrip() % line)
|
||||
|
||||
if texts:
|
||||
Path(txt_file).parent.mkdir(parents=True, exist_ok=True) # make directory
|
||||
with open(txt_file, 'a') as f:
|
||||
f.writelines(text + '\n' for text in texts)
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
"""
|
||||
Check a model's accuracy on a test or val split of a dataset
|
||||
Check a model's accuracy on a test or val split of a dataset.
|
||||
|
||||
Usage:
|
||||
$ yolo mode=val model=yolov8n.pt data=coco128.yaml imgsz=640
|
||||
|
@ -12,16 +12,18 @@ class FastSAM(Model):
|
||||
"""
|
||||
FastSAM model interface.
|
||||
|
||||
Usage - Predict:
|
||||
Example:
|
||||
```python
|
||||
from ultralytics import FastSAM
|
||||
|
||||
model = FastSAM('last.pt')
|
||||
results = model.predict('ultralytics/assets/bus.jpg')
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, model='FastSAM-x.pt'):
|
||||
"""Call the __init__ method of the parent class (YOLO) with the updated default model"""
|
||||
if model == 'FastSAM.pt':
|
||||
if str(model) == 'FastSAM.pt':
|
||||
model = 'FastSAM-x.pt'
|
||||
assert Path(model).suffix not in ('.yaml', '.yml'), 'FastSAM models only support pre-trained models.'
|
||||
super().__init__(model=model, task='segment')
|
||||
|
@ -2,11 +2,13 @@
|
||||
"""
|
||||
YOLO-NAS model interface.
|
||||
|
||||
Usage - Predict:
|
||||
Example:
|
||||
```python
|
||||
from ultralytics import NAS
|
||||
|
||||
model = NAS('yolo_nas_s')
|
||||
results = model.predict('ultralytics/assets/bus.jpg')
|
||||
```
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
@ -3,6 +3,8 @@
|
||||
SAM model interface
|
||||
"""
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from ultralytics.engine.model import Model
|
||||
from ultralytics.utils.torch_utils import model_info
|
||||
|
||||
@ -16,9 +18,8 @@ class SAM(Model):
|
||||
"""
|
||||
|
||||
def __init__(self, model='sam_b.pt') -> None:
|
||||
if model and not model.endswith('.pt') and not model.endswith('.pth'):
|
||||
# Should raise AssertionError instead?
|
||||
raise NotImplementedError('Segment anything prediction requires pre-trained checkpoint')
|
||||
if model and Path(model).suffix not in ('.pt', '.pth'):
|
||||
raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.')
|
||||
super().__init__(model=model, task='segment')
|
||||
|
||||
def _load(self, weights: str, task=None):
|
||||
|
@ -36,7 +36,7 @@ def merge_matches(m1, m2, shape):
|
||||
|
||||
|
||||
def _indices_to_matches(cost_matrix, indices, thresh):
|
||||
"""_indices_to_matches: Return matched and unmatched indices given a cost matrix, indices, and a threshold."""
|
||||
"""Return matched and unmatched indices given a cost matrix, indices, and a threshold."""
|
||||
matched_cost = cost_matrix[tuple(zip(*indices))]
|
||||
matched_mask = (matched_cost <= thresh)
|
||||
|
||||
@ -81,8 +81,12 @@ def ious(atlbrs, btlbrs):
|
||||
ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32)
|
||||
if ious.size == 0:
|
||||
return ious
|
||||
|
||||
ious = bbox_ious(np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32))
|
||||
|
||||
# TODO: replace bbox_ious() with numpy-capable update of utils.metrics.box_iou
|
||||
# from ...utils.metrics import box_iou
|
||||
# ious = box_iou()
|
||||
|
||||
return ious
|
||||
|
||||
|
||||
@ -102,8 +106,7 @@ def iou_distance(atracks, btracks):
|
||||
else:
|
||||
atlbrs = [track.tlbr for track in atracks]
|
||||
btlbrs = [track.tlbr for track in btracks]
|
||||
_ious = ious(atlbrs, btlbrs)
|
||||
return 1 - _ious # cost matrix
|
||||
return 1 - ious(atlbrs, btlbrs) # cost matrix
|
||||
|
||||
|
||||
def v_iou_distance(atracks, btracks):
|
||||
@ -122,8 +125,7 @@ def v_iou_distance(atracks, btracks):
|
||||
else:
|
||||
atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks]
|
||||
btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks]
|
||||
_ious = ious(atlbrs, btlbrs)
|
||||
return 1 - _ious # cost matrix
|
||||
return 1 - ious(atlbrs, btlbrs) # cost matrix
|
||||
|
||||
|
||||
def embedding_distance(tracks, detections, metric='cosine'):
|
||||
|
Loading…
x
Reference in New Issue
Block a user