From c940d29d4fcfdeba130da3b8cb9bd0cf5bd4536a Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Mon, 14 Aug 2023 03:25:51 +0200 Subject: [PATCH] Omit `ultralytics/utils/callbacks` from coverage (#4345) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- docs/models/index.md | 2 +- docs/models/rtdetr.md | 2 +- docs/models/yolo-nas.md | 2 +- docs/models/yolov3.md | 2 +- docs/models/yolov5.md | 2 +- docs/models/yolov6.md | 2 +- docs/models/yolov8.md | 2 +- docs/quickstart.md | 10 +++++++++- docs/reference/utils/callbacks/dvc.md | 4 ---- setup.cfg | 9 +++++++++ tests/test_cli.py | 24 +++++++++++++++--------- tests/test_python.py | 4 ++-- ultralytics/engine/results.py | 1 + ultralytics/engine/validator.py | 2 +- ultralytics/models/fastsam/model.py | 6 ++++-- ultralytics/models/nas/model.py | 4 +++- ultralytics/models/sam/model.py | 7 ++++--- ultralytics/trackers/utils/matching.py | 14 ++++++++------ 18 files changed, 63 insertions(+), 36 deletions(-) diff --git a/docs/models/index.md b/docs/models/index.md index 8210aa90..afd8e871 100644 --- a/docs/models/index.md +++ b/docs/models/index.md @@ -44,7 +44,7 @@ This example provides simple inference code for YOLO, SAM and RTDETR models. For model.info() # Train the model on the COCO8 example dataset for 100 epochs - results model.train(data='coco8.yaml', epochs=100, imgsz=640) + results = model.train(data='coco8.yaml', epochs=100, imgsz=640) # Run inference with the YOLOv8n model on the 'bus.jpg' image results = model('path/to/bus.jpg') diff --git a/docs/models/rtdetr.md b/docs/models/rtdetr.md index 256f8c44..608e7aa6 100644 --- a/docs/models/rtdetr.md +++ b/docs/models/rtdetr.md @@ -46,7 +46,7 @@ You can use RT-DETR for object detection tasks using the `ultralytics` pip packa model.info() # Train the model on the COCO8 example dataset for 100 epochs - results model.train(data='coco8.yaml', epochs=100, imgsz=640) + results = model.train(data='coco8.yaml', epochs=100, imgsz=640) # Run inference with the RT-DETR-l model on the 'bus.jpg' image results = model('path/to/bus.jpg') diff --git a/docs/models/yolo-nas.md b/docs/models/yolo-nas.md index 4137ac00..e589884f 100644 --- a/docs/models/yolo-nas.md +++ b/docs/models/yolo-nas.md @@ -62,7 +62,7 @@ In this example we validate YOLO-NAS-s on the COCO8 dataset. model.info() # Validate the model on the COCO8 example dataset - results model.val(data='coco8.yaml') + results = model.val(data='coco8.yaml') # Run inference with the YOLO-NAS-s model on the 'bus.jpg' image results = model('path/to/bus.jpg') diff --git a/docs/models/yolov3.md b/docs/models/yolov3.md index 70676abe..a7f3148d 100644 --- a/docs/models/yolov3.md +++ b/docs/models/yolov3.md @@ -69,7 +69,7 @@ You can use YOLOv3 for object detection tasks using the Ultralytics repository. model.info() # Train the model on the COCO8 example dataset for 100 epochs - results model.train(data='coco8.yaml', epochs=100, imgsz=640) + results = model.train(data='coco8.yaml', epochs=100, imgsz=640) # Run inference with the YOLOv3n model on the 'bus.jpg' image results = model('path/to/bus.jpg') diff --git a/docs/models/yolov5.md b/docs/models/yolov5.md index feb86db7..e20b9b88 100644 --- a/docs/models/yolov5.md +++ b/docs/models/yolov5.md @@ -74,7 +74,7 @@ You can use YOLOv5u for object detection tasks using the Ultralytics repository. model.info() # Train the model on the COCO8 example dataset for 100 epochs - results model.train(data='coco8.yaml', epochs=100, imgsz=640) + results = model.train(data='coco8.yaml', epochs=100, imgsz=640) # Run inference with the YOLOv5n model on the 'bus.jpg' image results = model('path/to/bus.jpg') diff --git a/docs/models/yolov6.md b/docs/models/yolov6.md index a921612b..047b9d6c 100644 --- a/docs/models/yolov6.md +++ b/docs/models/yolov6.md @@ -56,7 +56,7 @@ You can use YOLOv6 for object detection tasks using the Ultralytics pip package. model.info() # Train the model on the COCO8 example dataset for 100 epochs - results model.train(data='coco8.yaml', epochs=100, imgsz=640) + results = model.train(data='coco8.yaml', epochs=100, imgsz=640) # Run inference with the YOLOv6n model on the 'bus.jpg' image results = model('path/to/bus.jpg') diff --git a/docs/models/yolov8.md b/docs/models/yolov8.md index ca9752dc..04982247 100644 --- a/docs/models/yolov8.md +++ b/docs/models/yolov8.md @@ -101,7 +101,7 @@ You can use YOLOv8 for object detection tasks using the Ultralytics pip package. model.info() # Train the model on the COCO8 example dataset for 100 epochs - results model.train(data='coco8.yaml', epochs=100, imgsz=640) + results = model.train(data='coco8.yaml', epochs=100, imgsz=640) # Run inference with the YOLOv8n model on the 'bus.jpg' image results = model('path/to/bus.jpg') diff --git a/docs/quickstart.md b/docs/quickstart.md index 719d3c8b..3ce7f2b8 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -31,6 +31,14 @@ Ultralytics provides various installation methods including pip, conda, and Dock conda install -c conda-forge ultralytics ``` + !!! note + + If you are installing in a CUDA environment best practice is to install `ultralytics`, `pytorch` and `pytorch-cuda` in the same command to allow the conda package manager to resolve any conflicts, or else to install `pytorch-cuda` last to allow it override the CPU-specific `pytorch` package if necesary. + ```bash + # Install all packages together using conda + conda install -c conda-forge -c pytorch -c nvidia ultralytics pytorch torchvision pytorch-cuda=11.8 + ``` + === "Git clone" Clone the `ultralytics` repository if you are interested in contributing to the development or wish to experiment with the latest source code. After cloning, navigate into the directory and install the package in editable mode `-e` using pip. ```bash @@ -259,7 +267,7 @@ The table below provides an overview of the settings available for adjustment wi | `api_key` | `''` | `str` | Ultralytics HUB [API Key](https://hub.ultralytics.com/settings?tab=api+keys) | | `clearml` | `True` | `bool` | Whether to use ClearML logging | | `comet` | `True` | `bool` | Whether to use [Comet ML](https://bit.ly/yolov8-readme-comet) for experiment tracking and visualization | -| `dvc` | `True` | `bool` | Whether to use [DVC for experiment tracking](https://dvc.org/doc/dvclive/ml-frameworks/yolo) and version control | +| `dvc` | `True` | `bool` | Whether to use [DVC for experiment tracking](https://dvc.org/doc/dvclive/ml-frameworks/yolo) and version control | | `hub` | `True` | `bool` | Whether to use [Ultralytics HUB](https://hub.ultralytics.com) integration | | `mlflow` | `True` | `bool` | Whether to use MLFlow for experiment tracking | | `neptune` | `True` | `bool` | Whether to use Neptune for experiment tracking | diff --git a/docs/reference/utils/callbacks/dvc.md b/docs/reference/utils/callbacks/dvc.md index e939db0d..634740d1 100644 --- a/docs/reference/utils/callbacks/dvc.md +++ b/docs/reference/utils/callbacks/dvc.md @@ -9,10 +9,6 @@ keywords: Ultralytics, YOLO, callbacks, logger, training, pretraining, machine l Full source code for this file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/dvc.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/utils/callbacks/dvc.py). Help us fix any issues you see by submitting a [Pull Request](https://docs.ultralytics.com/help/contributing/) 🛠️. Thank you 🙏! ---- -## ::: ultralytics.utils.callbacks.dvc._logger_disabled -

- --- ## ::: ultralytics.utils.callbacks.dvc._log_images

diff --git a/setup.cfg b/setup.cfg index 2cde6a49..1cac7ec7 100644 --- a/setup.cfg +++ b/setup.cfg @@ -15,6 +15,15 @@ addopts = --doctest-modules --durations=25 --color=yes + --cov=ultralytics/ + --cov-report=xml + --no-cov-on-fail + +[coverage:run] +source = ultralytics/ +data_file = tests/.coverage +omit = + ultralytics/utils/callbacks/* [flake8] max-line-length = 120 diff --git a/tests/test_cli.py b/tests/test_cli.py index 1ee412fc..a5dc8f17 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -53,9 +53,9 @@ def test_predict(task, model, data): @pytest.mark.parametrize('task,model,data', TASK_ARGS) def test_predict_online(task, model, data): mode = 'track' if task in ('detect', 'segment', 'pose') else 'predict' # mode for video inference - run(f'yolo predict model={WEIGHT_DIR / model}.pt source=https://ultralytics.com/images/bus.jpg imgsz=32') - run(f'yolo {mode} model={WEIGHT_DIR / model}.pt source=https://ultralytics.com/assets/decelera_landscape_min.mov imgsz=32' - ) + model = WEIGHT_DIR / model + run(f'yolo predict model={model}.pt source=https://ultralytics.com/images/bus.jpg imgsz=32') + run(f'yolo {mode} model={model}.pt source=https://ultralytics.com/assets/decelera_landscape_min.mov imgsz=32') # Run Python YouTube tracking because CLI is broken. TODO: fix CLI YouTube # run(f'yolo {mode} model={model}.pt source=https://youtu.be/G17sBkb38XQ imgsz=32 tracker=bytetrack.yaml') @@ -74,7 +74,7 @@ def test_rtdetr(task='detect', model='yolov8n-rtdetr.yaml', data='coco8.yaml'): run(f"yolo predict {task} model={model} source={ROOT / 'assets/bus.jpg'} imgsz=640 save save_crop save_txt") -def test_fastsam(task='segment', model='FastSAM-s.pt', data='coco8-seg.yaml'): +def test_fastsam(task='segment', model=WEIGHT_DIR / 'FastSAM-s.pt', data='coco8-seg.yaml'): source = ROOT / 'assets/bus.jpg' run(f'yolo segment val {task} model={model} data={data} imgsz=32') @@ -84,10 +84,10 @@ def test_fastsam(task='segment', model='FastSAM-s.pt', data='coco8-seg.yaml'): from ultralytics.models.fastsam import FastSAMPrompt # Create a FastSAM model - model = FastSAM('FastSAM-s.pt') # or FastSAM-x.pt + sam_model = FastSAM(model) # or FastSAM-x.pt # Run inference on an image - everything_results = model(source, device='cpu', retina_masks=True, imgsz=1024, conf=0.4, iou=0.9) + everything_results = sam_model(source, device='cpu', retina_masks=True, imgsz=1024, conf=0.4, iou=0.9) # Everything prompt prompt_process = FastSAMPrompt(source, everything_results, device='cpu') @@ -110,13 +110,19 @@ def test_mobilesam(): from ultralytics import SAM # Load the model - model = SAM('mobile_sam.pt') + model = SAM(WEIGHT_DIR / 'mobile_sam.pt') + + # Source + source = ROOT / 'assets/zidane.jpg' # Predict a segment based on a point prompt - model.predict(ROOT / 'assets/zidane.jpg', points=[900, 370], labels=[1]) + model.predict(source, points=[900, 370], labels=[1]) # Predict a segment based on a box prompt - model.predict(ROOT / 'assets/zidane.jpg', bboxes=[439, 437, 524, 709]) + model.predict(source, bboxes=[439, 437, 524, 709]) + + # Predict all + # model(source) # Slow Tests diff --git a/tests/test_python.py b/tests/test_python.py index 37fe7829..f5de1349 100644 --- a/tests/test_python.py +++ b/tests/test_python.py @@ -212,8 +212,8 @@ def test_results(): for r in results: r = r.cpu().numpy() r = r.to(device='cpu', dtype=torch.float32) - r.save_txt(txt_file='label.txt', save_conf=True) - r.save_crop(save_dir='crops/') + r.save_txt(txt_file='runs/tests/label.txt', save_conf=True) + r.save_crop(save_dir='runs/tests/crops/') r.tojson(normalize=True) r.plot(pil=True) r.plot(conf=True, boxes=True) diff --git a/ultralytics/engine/results.py b/ultralytics/engine/results.py index 45727c55..55ae280f 100644 --- a/ultralytics/engine/results.py +++ b/ultralytics/engine/results.py @@ -318,6 +318,7 @@ class Results(SimpleClass): texts.append(('%g ' * len(line)).rstrip() % line) if texts: + Path(txt_file).parent.mkdir(parents=True, exist_ok=True) # make directory with open(txt_file, 'a') as f: f.writelines(text + '\n' for text in texts) diff --git a/ultralytics/engine/validator.py b/ultralytics/engine/validator.py index 175cb2dd..ff50b522 100644 --- a/ultralytics/engine/validator.py +++ b/ultralytics/engine/validator.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license """ -Check a model's accuracy on a test or val split of a dataset +Check a model's accuracy on a test or val split of a dataset. Usage: $ yolo mode=val model=yolov8n.pt data=coco128.yaml imgsz=640 diff --git a/ultralytics/models/fastsam/model.py b/ultralytics/models/fastsam/model.py index a4bfada1..c1895fc6 100644 --- a/ultralytics/models/fastsam/model.py +++ b/ultralytics/models/fastsam/model.py @@ -12,16 +12,18 @@ class FastSAM(Model): """ FastSAM model interface. - Usage - Predict: + Example: + ```python from ultralytics import FastSAM model = FastSAM('last.pt') results = model.predict('ultralytics/assets/bus.jpg') + ``` """ def __init__(self, model='FastSAM-x.pt'): """Call the __init__ method of the parent class (YOLO) with the updated default model""" - if model == 'FastSAM.pt': + if str(model) == 'FastSAM.pt': model = 'FastSAM-x.pt' assert Path(model).suffix not in ('.yaml', '.yml'), 'FastSAM models only support pre-trained models.' super().__init__(model=model, task='segment') diff --git a/ultralytics/models/nas/model.py b/ultralytics/models/nas/model.py index 1f7cd351..f848cc4b 100644 --- a/ultralytics/models/nas/model.py +++ b/ultralytics/models/nas/model.py @@ -2,11 +2,13 @@ """ YOLO-NAS model interface. -Usage - Predict: +Example: + ```python from ultralytics import NAS model = NAS('yolo_nas_s') results = model.predict('ultralytics/assets/bus.jpg') + ``` """ from pathlib import Path diff --git a/ultralytics/models/sam/model.py b/ultralytics/models/sam/model.py index 44b32689..2ca35011 100644 --- a/ultralytics/models/sam/model.py +++ b/ultralytics/models/sam/model.py @@ -3,6 +3,8 @@ SAM model interface """ +from pathlib import Path + from ultralytics.engine.model import Model from ultralytics.utils.torch_utils import model_info @@ -16,9 +18,8 @@ class SAM(Model): """ def __init__(self, model='sam_b.pt') -> None: - if model and not model.endswith('.pt') and not model.endswith('.pth'): - # Should raise AssertionError instead? - raise NotImplementedError('Segment anything prediction requires pre-trained checkpoint') + if model and Path(model).suffix not in ('.pt', '.pth'): + raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.') super().__init__(model=model, task='segment') def _load(self, weights: str, task=None): diff --git a/ultralytics/trackers/utils/matching.py b/ultralytics/trackers/utils/matching.py index 02a322d8..036b2c5b 100644 --- a/ultralytics/trackers/utils/matching.py +++ b/ultralytics/trackers/utils/matching.py @@ -36,7 +36,7 @@ def merge_matches(m1, m2, shape): def _indices_to_matches(cost_matrix, indices, thresh): - """_indices_to_matches: Return matched and unmatched indices given a cost matrix, indices, and a threshold.""" + """Return matched and unmatched indices given a cost matrix, indices, and a threshold.""" matched_cost = cost_matrix[tuple(zip(*indices))] matched_mask = (matched_cost <= thresh) @@ -81,8 +81,12 @@ def ious(atlbrs, btlbrs): ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32) if ious.size == 0: return ious - ious = bbox_ious(np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32)) + + # TODO: replace bbox_ious() with numpy-capable update of utils.metrics.box_iou + # from ...utils.metrics import box_iou + # ious = box_iou() + return ious @@ -102,8 +106,7 @@ def iou_distance(atracks, btracks): else: atlbrs = [track.tlbr for track in atracks] btlbrs = [track.tlbr for track in btracks] - _ious = ious(atlbrs, btlbrs) - return 1 - _ious # cost matrix + return 1 - ious(atlbrs, btlbrs) # cost matrix def v_iou_distance(atracks, btracks): @@ -122,8 +125,7 @@ def v_iou_distance(atracks, btracks): else: atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks] btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks] - _ious = ious(atlbrs, btlbrs) - return 1 - _ious # cost matrix + return 1 - ious(atlbrs, btlbrs) # cost matrix def embedding_distance(tracks, detections, metric='cosine'):