From b4dca690d44d0a95ab92ace7d3ecc1ac23879a76 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sat, 26 Aug 2023 03:45:19 +0200 Subject: [PATCH] `ultralytics 8.0.163` add new `gpu-latest` runner to CI actions (#4565) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maia Numerosky <17316848+maianumerosky@users.noreply.github.com> --- .github/workflows/ci.yaml | 11 ++++- .gitignore | 2 + docs/models/sam.md | 2 +- docs/reference/utils/checks.md | 8 ++++ tests/conftest.py | 2 +- tests/test_cli.py | 2 +- tests/test_cuda.py | 73 +++++++++++++++++++++++++++++++++ tests/test_python.py | 16 ++++++++ ultralytics/__init__.py | 2 +- ultralytics/cfg/__init__.py | 9 ++-- ultralytics/data/augment.py | 7 +++- ultralytics/engine/exporter.py | 5 ++- ultralytics/utils/benchmarks.py | 6 ++- ultralytics/utils/checks.py | 25 +++++++++++ 14 files changed, 153 insertions(+), 17 deletions(-) create mode 100644 tests/test_cuda.py diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 9411948c..45ccf57f 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -159,6 +159,7 @@ jobs: - os: ubuntu-latest python-version: '3.8' # torch 1.8.0 requires python >=3.6, <=3.8 torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/ + - os: gpu-latest # do not pass python-version steps: - uses: actions/checkout@v3 - uses: actions/setup-python@v4 @@ -176,6 +177,7 @@ jobs: fi - name: Check environment run: | + yolo checks echo "RUNNER_OS is ${{ runner.os }}" echo "GITHUB_EVENT_NAME is ${{ github.event_name }}" echo "GITHUB_WORKFLOW is ${{ github.workflow }}" @@ -187,9 +189,14 @@ jobs: pip list - name: Pytest tests shell: bash # for Windows compatibility - run: pytest --cov=ultralytics/ --cov-report xml tests/ + run: | + if [ "${{ matrix.os }}" == "gpu-latest" ]; then + pytest --cov=ultralytics/ --cov-report xml tests/test_cuda.py + else + pytest --cov=ultralytics/ --cov-report xml tests/ + fi - name: Upload Coverage Reports to CodeCov - if: github.repository == 'ultralytics/ultralytics' && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' + if: github.repository == 'ultralytics/ultralytics' # && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' uses: codecov/codecov-action@v3 with: flags: Tests diff --git a/.gitignore b/.gitignore index d197c74d..c8987d84 100644 --- a/.gitignore +++ b/.gitignore @@ -140,6 +140,7 @@ dmypy.json datasets/ runs/ wandb/ +tests/ .DS_Store # Neural Network weights ----------------------------------------------------------------------------------------------- @@ -158,6 +159,7 @@ weights/ *_web_model/ *_openvino_model/ *_paddle_model/ +pnnx* # Autogenerated files for tests /ultralytics/assets/ diff --git a/docs/models/sam.md b/docs/models/sam.md index d6ffd4d8..5b781cd2 100644 --- a/docs/models/sam.md +++ b/docs/models/sam.md @@ -51,7 +51,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t model('ultralytics/assets/zidane.jpg', bboxes=[439, 437, 524, 709]) # Run inference with points prompt - model.predict('ultralytics/assets/zidane.jpg', points=[900, 370], labels=[1]) + model('ultralytics/assets/zidane.jpg', points=[900, 370], labels=[1]) ``` !!! example "Segment everything" diff --git a/docs/reference/utils/checks.md b/docs/reference/utils/checks.md index 96c45080..e4493562 100644 --- a/docs/reference/utils/checks.md +++ b/docs/reference/utils/checks.md @@ -80,3 +80,11 @@ keywords: Ultralytics, utility checks, ASCII, check_version, pip_update, check_p --- ## ::: ultralytics.utils.checks.print_args

+ +--- +## ::: ultralytics.utils.checks.cuda_device_count +

+ +--- +## ::: ultralytics.utils.checks.cuda_is_available +

diff --git a/tests/conftest.py b/tests/conftest.py index 235e2b77..50ea353f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -44,5 +44,5 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): Path(file).unlink(missing_ok=True) # Remove directories - for directory in ['.pytest_cache/', TMP]: + for directory in [ROOT / '../.pytest_cache', TMP]: shutil.rmtree(directory, ignore_errors=True) diff --git a/tests/test_cli.py b/tests/test_cli.py index 2a97918e..51d2c123 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -63,7 +63,7 @@ def test_export(model, format): def test_rtdetr(task='detect', model='yolov8n-rtdetr.yaml', data='coco8.yaml'): # Warning: MUST use imgsz=640 - run(f'yolo train {task} model={model} data={data} imgsz=640 epochs=1, cache = disk') # add coma, space to args + run(f'yolo train {task} model={model} data={data} --imgsz= 640 epochs =1, cache = disk') # add coma, spaces to args run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=640 save save_crop save_txt") diff --git a/tests/test_cuda.py b/tests/test_cuda.py new file mode 100644 index 00000000..7f893672 --- /dev/null +++ b/tests/test_cuda.py @@ -0,0 +1,73 @@ +# Ultralytics YOLO 🚀, AGPL-3.0 license +import subprocess +from pathlib import Path + +import pytest +import torch + +from ultralytics import YOLO +from ultralytics.utils import ASSETS, SETTINGS + +CUDA_IS_AVAILABLE = torch.cuda.is_available() +CUDA_DEVICE_COUNT = torch.cuda.device_count() + +WEIGHTS_DIR = Path(SETTINGS['weights_dir']) +MODEL = WEIGHTS_DIR / 'path with spaces' / 'yolov8n.pt' # test spaces in path +DATA = 'coco8.yaml' + + +def test_checks(): + from ultralytics.utils.checks import cuda_device_count, cuda_is_available + + assert cuda_device_count() == CUDA_DEVICE_COUNT + assert cuda_is_available() == CUDA_IS_AVAILABLE + + +@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available') +def test_train(): + YOLO(MODEL).train(data=DATA, imgsz=64, epochs=1, batch=-1, device=0) # also test AutoBatch, requires imgsz>=64 + + +@pytest.mark.skipif(CUDA_DEVICE_COUNT < 2, reason=f'DDP is not available, {CUDA_DEVICE_COUNT} device(s) found') +def test_train_ddp(): + YOLO(MODEL).train(data=DATA, imgsz=64, epochs=1, device=[0, 1]) # requires imgsz>=64 + + +@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available') +def test_utils_benchmarks(): + from ultralytics.utils.benchmarks import ProfileModels + + YOLO(MODEL).export(format='engine', imgsz=32, dynamic=True, batch=1) # pre-export engine model, auto-device + ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile() + + +@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available') +def test_predict_sam(): + from ultralytics import SAM + + # Load a model + model = SAM(WEIGHTS_DIR / 'sam_b.pt') + + # Display model information (optional) + model.info() + + # Run inference + model(ASSETS / 'bus.jpg', device=0) + + # Run inference with bboxes prompt + model(ASSETS / 'zidane.jpg', bboxes=[439, 437, 524, 709], device=0) + + # Run inference with points prompt + model(ASSETS / 'zidane.jpg', points=[900, 370], labels=[1], device=0) + + +@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available') +def test_model_tune(): + subprocess.run('pip install ray[tune]'.split(), check=True) + YOLO('yolov8n-cls.yaml').tune(data='imagenet10', + grace_period=1, + max_samples=1, + imgsz=32, + epochs=1, + plots=False, + device='cpu') diff --git a/tests/test_python.py b/tests/test_python.py index 1fc5fc81..eb418cd7 100644 --- a/tests/test_python.py +++ b/tests/test_python.py @@ -1,5 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license +import contextlib import shutil from copy import copy from pathlib import Path @@ -38,6 +39,8 @@ def test_model_methods(): model = model.load(MODEL) model.to('cpu') model.fuse() + model.clear_callback('on_train_start') + model._reset_callbacks() # Model properties _ = model.names @@ -314,6 +317,15 @@ def test_events(): events(cfg) +def test_cfg_init(): + from ultralytics.cfg import check_dict_alignment, copy_default_cfg, smart_value + + with contextlib.suppress(SyntaxError): + check_dict_alignment({'a': 1}, {'b': 2}) + copy_default_cfg() + [smart_value(x) for x in ['none', 'true', 'false']] + + def test_utils_init(): from ultralytics.utils import get_git_branch, get_git_origin_url, get_ubuntu_version, is_github_actions_ci @@ -354,6 +366,7 @@ def test_utils_torchutils(): time_sync() +@pytest.mark.skipif(not ONLINE, reason='environment is offline') def test_utils_downloads(): from ultralytics.utils.downloads import get_google_drive_file_info @@ -422,8 +435,11 @@ def test_nn_modules_block(): BottleneckCSP(c1, c2)(x) +@pytest.mark.skipif(not ONLINE, reason='environment is offline') def test_hub(): from ultralytics.hub import export_fmts_hub, logout + from ultralytics.hub.utils import smart_request export_fmts_hub() logout() + smart_request('GET', 'http://github.com', progress=True) diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py index 4686f988..190a63ee 100644 --- a/ultralytics/__init__.py +++ b/ultralytics/__init__.py @@ -1,6 +1,6 @@ # Ultralytics YOLO 🚀, AGPL-3.0 license -__version__ = '8.0.162' +__version__ = '8.0.163' from ultralytics.models import RTDETR, SAM, YOLO from ultralytics.models.fastsam import FastSAM diff --git a/ultralytics/cfg/__init__.py b/ultralytics/cfg/__init__.py index 2b17340c..19558ec7 100644 --- a/ultralytics/cfg/__init__.py +++ b/ultralytics/cfg/__init__.py @@ -4,7 +4,6 @@ import contextlib import re import shutil import sys -from difflib import get_close_matches from pathlib import Path from types import SimpleNamespace from typing import Dict, List, Union @@ -177,6 +176,8 @@ def check_dict_alignment(base: Dict, custom: Dict, e=None): base_keys, custom_keys = (set(x.keys()) for x in (base, custom)) mismatched = [k for k in custom_keys if k not in base_keys] if mismatched: + from difflib import get_close_matches + string = '' for x in mismatched: matches = get_close_matches(x, base_keys) # key list @@ -373,11 +374,7 @@ def entrypoint(debug=''): mode = DEFAULT_CFG.mode or 'predict' LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.") elif mode not in MODES: - if mode not in ('checks', checks): - raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}") - LOGGER.warning("WARNING ⚠️ 'yolo mode=checks' is deprecated. Use 'yolo checks' instead.") - checks.check_yolo() - return + raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}") # Task task = overrides.pop('task', None) diff --git a/ultralytics/data/augment.py b/ultralytics/data/augment.py index 68160d44..d24f7982 100644 --- a/ultralytics/data/augment.py +++ b/ultralytics/data/augment.py @@ -483,7 +483,7 @@ class RandomHSV: self.vgain = vgain def __call__(self, labels): - """Applies random horizontal or vertical flip to an image with a given probability.""" + """Applies image HSV augmentation""" img = labels['img'] if self.hgain or self.sgain or self.vgain: r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains @@ -501,6 +501,7 @@ class RandomHSV: class RandomFlip: + """Applies random horizontal or vertical flip to an image with a given probability.""" def __init__(self, p=0.5, direction='horizontal', flip_idx=None) -> None: assert direction in ['horizontal', 'vertical'], f'Support direction `horizontal` or `vertical`, got {direction}' @@ -643,7 +644,9 @@ class CopyPaste: class Albumentations: - """YOLOv8 Albumentations class (optional, only used if package is installed)""" + """Albumentations transformations. Optional, uninstall package to disable. + Applies Blur, Median Blur, convert to grayscale, Contrast Limited Adaptive Histogram Equalization, + random change of brightness and contrast, RandomGamma and lowering of image quality by compression.""" def __init__(self, p=1.0): """Initialize the transform object for YOLO bbox formatted params.""" diff --git a/ultralytics/engine/exporter.py b/ultralytics/engine/exporter.py index 4283f6a8..abe648e6 100644 --- a/ultralytics/engine/exporter.py +++ b/ultralytics/engine/exporter.py @@ -159,7 +159,10 @@ class Exporter: raise ValueError(f"Invalid export format='{format}'. Valid formats are {fmts}") jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn = flags # export booleans - # Load PyTorch model + # Device + if format == 'engine' and self.args.device is None: + LOGGER.warning('WARNING ⚠️ TensorRT requires GPU export, automatically assigning device=0') + self.args.device = '0' self.device = select_device('cpu' if self.args.device is None else self.args.device) # Checks diff --git a/ultralytics/utils/benchmarks.py b/ultralytics/utils/benchmarks.py index 8113c2dd..d92d0625 100644 --- a/ultralytics/utils/benchmarks.py +++ b/ultralytics/utils/benchmarks.py @@ -182,6 +182,7 @@ class ProfileModels: num_warmup_runs=10, min_time=60, imgsz=640, + half=True, trt=True, device=None): self.paths = paths @@ -189,6 +190,7 @@ class ProfileModels: self.num_warmup_runs = num_warmup_runs self.min_time = min_time self.imgsz = imgsz + self.half = half self.trt = trt # run TensorRT profiling self.device = device or torch.device(0 if torch.cuda.is_available() else 'cpu') @@ -209,12 +211,12 @@ class ProfileModels: model_info = model.info() if self.trt and self.device.type != 'cpu' and not engine_file.is_file(): engine_file = model.export(format='engine', - half=True, + half=self.half, imgsz=self.imgsz, device=self.device, verbose=False) onnx_file = model.export(format='onnx', - half=True, + half=self.half, imgsz=self.imgsz, simplify=True, device=self.device, diff --git a/ultralytics/utils/checks.py b/ultralytics/utils/checks.py index aa48430b..d1de180b 100644 --- a/ultralytics/utils/checks.py +++ b/ultralytics/utils/checks.py @@ -507,3 +507,28 @@ def print_args(args: Optional[dict] = None, show_file=True, show_func=False): file = Path(file).stem s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') LOGGER.info(colorstr(s) + ', '.join(f'{k}={strip_auth(v)}' for k, v in args.items())) + + +def cuda_device_count() -> int: + """Get the number of NVIDIA GPUs available in the environment. + + Returns: + (int): The number of NVIDIA GPUs available. + """ + try: + # Run the nvidia-smi command and capture its output + output = subprocess.check_output(['nvidia-smi', '--query-gpu=count', '--format=csv,noheader,nounits'], + encoding='utf-8') + return int(output.strip()) + except (subprocess.CalledProcessError, FileNotFoundError): + # If the command fails or nvidia-smi is not found, assume no GPUs are available + return 0 + + +def cuda_is_available() -> bool: + """Check if CUDA is available in the environment. + + Returns: + (bool): True if one or more NVIDIA GPUs are available, False otherwise. + """ + return cuda_device_count() > 0