ultralytics 8.0.163 add new gpu-latest runner to CI actions (#4565)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Maia Numerosky <17316848+maianumerosky@users.noreply.github.com>
This commit is contained in:
Glenn Jocher 2023-08-26 03:45:19 +02:00 committed by GitHub
parent 431cef3955
commit b4dca690d4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 153 additions and 17 deletions

View File

@ -159,6 +159,7 @@ jobs:
- os: ubuntu-latest - os: ubuntu-latest
python-version: '3.8' # torch 1.8.0 requires python >=3.6, <=3.8 python-version: '3.8' # torch 1.8.0 requires python >=3.6, <=3.8
torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/ torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/
- os: gpu-latest # do not pass python-version
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
@ -176,6 +177,7 @@ jobs:
fi fi
- name: Check environment - name: Check environment
run: | run: |
yolo checks
echo "RUNNER_OS is ${{ runner.os }}" echo "RUNNER_OS is ${{ runner.os }}"
echo "GITHUB_EVENT_NAME is ${{ github.event_name }}" echo "GITHUB_EVENT_NAME is ${{ github.event_name }}"
echo "GITHUB_WORKFLOW is ${{ github.workflow }}" echo "GITHUB_WORKFLOW is ${{ github.workflow }}"
@ -187,9 +189,14 @@ jobs:
pip list pip list
- name: Pytest tests - name: Pytest tests
shell: bash # for Windows compatibility shell: bash # for Windows compatibility
run: pytest --cov=ultralytics/ --cov-report xml tests/ run: |
if [ "${{ matrix.os }}" == "gpu-latest" ]; then
pytest --cov=ultralytics/ --cov-report xml tests/test_cuda.py
else
pytest --cov=ultralytics/ --cov-report xml tests/
fi
- name: Upload Coverage Reports to CodeCov - name: Upload Coverage Reports to CodeCov
if: github.repository == 'ultralytics/ultralytics' && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11' if: github.repository == 'ultralytics/ultralytics' # && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'
uses: codecov/codecov-action@v3 uses: codecov/codecov-action@v3
with: with:
flags: Tests flags: Tests

2
.gitignore vendored
View File

@ -140,6 +140,7 @@ dmypy.json
datasets/ datasets/
runs/ runs/
wandb/ wandb/
tests/
.DS_Store .DS_Store
# Neural Network weights ----------------------------------------------------------------------------------------------- # Neural Network weights -----------------------------------------------------------------------------------------------
@ -158,6 +159,7 @@ weights/
*_web_model/ *_web_model/
*_openvino_model/ *_openvino_model/
*_paddle_model/ *_paddle_model/
pnnx*
# Autogenerated files for tests # Autogenerated files for tests
/ultralytics/assets/ /ultralytics/assets/

View File

@ -51,7 +51,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
model('ultralytics/assets/zidane.jpg', bboxes=[439, 437, 524, 709]) model('ultralytics/assets/zidane.jpg', bboxes=[439, 437, 524, 709])
# Run inference with points prompt # Run inference with points prompt
model.predict('ultralytics/assets/zidane.jpg', points=[900, 370], labels=[1]) model('ultralytics/assets/zidane.jpg', points=[900, 370], labels=[1])
``` ```
!!! example "Segment everything" !!! example "Segment everything"

View File

@ -80,3 +80,11 @@ keywords: Ultralytics, utility checks, ASCII, check_version, pip_update, check_p
--- ---
## ::: ultralytics.utils.checks.print_args ## ::: ultralytics.utils.checks.print_args
<br><br> <br><br>
---
## ::: ultralytics.utils.checks.cuda_device_count
<br><br>
---
## ::: ultralytics.utils.checks.cuda_is_available
<br><br>

View File

@ -44,5 +44,5 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
Path(file).unlink(missing_ok=True) Path(file).unlink(missing_ok=True)
# Remove directories # Remove directories
for directory in ['.pytest_cache/', TMP]: for directory in [ROOT / '../.pytest_cache', TMP]:
shutil.rmtree(directory, ignore_errors=True) shutil.rmtree(directory, ignore_errors=True)

View File

@ -63,7 +63,7 @@ def test_export(model, format):
def test_rtdetr(task='detect', model='yolov8n-rtdetr.yaml', data='coco8.yaml'): def test_rtdetr(task='detect', model='yolov8n-rtdetr.yaml', data='coco8.yaml'):
# Warning: MUST use imgsz=640 # Warning: MUST use imgsz=640
run(f'yolo train {task} model={model} data={data} imgsz=640 epochs=1, cache = disk') # add coma, space to args run(f'yolo train {task} model={model} data={data} --imgsz= 640 epochs =1, cache = disk') # add coma, spaces to args
run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=640 save save_crop save_txt") run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=640 save save_crop save_txt")

73
tests/test_cuda.py Normal file
View File

@ -0,0 +1,73 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
import subprocess
from pathlib import Path
import pytest
import torch
from ultralytics import YOLO
from ultralytics.utils import ASSETS, SETTINGS
CUDA_IS_AVAILABLE = torch.cuda.is_available()
CUDA_DEVICE_COUNT = torch.cuda.device_count()
WEIGHTS_DIR = Path(SETTINGS['weights_dir'])
MODEL = WEIGHTS_DIR / 'path with spaces' / 'yolov8n.pt' # test spaces in path
DATA = 'coco8.yaml'
def test_checks():
from ultralytics.utils.checks import cuda_device_count, cuda_is_available
assert cuda_device_count() == CUDA_DEVICE_COUNT
assert cuda_is_available() == CUDA_IS_AVAILABLE
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
def test_train():
YOLO(MODEL).train(data=DATA, imgsz=64, epochs=1, batch=-1, device=0) # also test AutoBatch, requires imgsz>=64
@pytest.mark.skipif(CUDA_DEVICE_COUNT < 2, reason=f'DDP is not available, {CUDA_DEVICE_COUNT} device(s) found')
def test_train_ddp():
YOLO(MODEL).train(data=DATA, imgsz=64, epochs=1, device=[0, 1]) # requires imgsz>=64
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
def test_utils_benchmarks():
from ultralytics.utils.benchmarks import ProfileModels
YOLO(MODEL).export(format='engine', imgsz=32, dynamic=True, batch=1) # pre-export engine model, auto-device
ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
def test_predict_sam():
from ultralytics import SAM
# Load a model
model = SAM(WEIGHTS_DIR / 'sam_b.pt')
# Display model information (optional)
model.info()
# Run inference
model(ASSETS / 'bus.jpg', device=0)
# Run inference with bboxes prompt
model(ASSETS / 'zidane.jpg', bboxes=[439, 437, 524, 709], device=0)
# Run inference with points prompt
model(ASSETS / 'zidane.jpg', points=[900, 370], labels=[1], device=0)
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
def test_model_tune():
subprocess.run('pip install ray[tune]'.split(), check=True)
YOLO('yolov8n-cls.yaml').tune(data='imagenet10',
grace_period=1,
max_samples=1,
imgsz=32,
epochs=1,
plots=False,
device='cpu')

View File

@ -1,5 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
import contextlib
import shutil import shutil
from copy import copy from copy import copy
from pathlib import Path from pathlib import Path
@ -38,6 +39,8 @@ def test_model_methods():
model = model.load(MODEL) model = model.load(MODEL)
model.to('cpu') model.to('cpu')
model.fuse() model.fuse()
model.clear_callback('on_train_start')
model._reset_callbacks()
# Model properties # Model properties
_ = model.names _ = model.names
@ -314,6 +317,15 @@ def test_events():
events(cfg) events(cfg)
def test_cfg_init():
from ultralytics.cfg import check_dict_alignment, copy_default_cfg, smart_value
with contextlib.suppress(SyntaxError):
check_dict_alignment({'a': 1}, {'b': 2})
copy_default_cfg()
[smart_value(x) for x in ['none', 'true', 'false']]
def test_utils_init(): def test_utils_init():
from ultralytics.utils import get_git_branch, get_git_origin_url, get_ubuntu_version, is_github_actions_ci from ultralytics.utils import get_git_branch, get_git_origin_url, get_ubuntu_version, is_github_actions_ci
@ -354,6 +366,7 @@ def test_utils_torchutils():
time_sync() time_sync()
@pytest.mark.skipif(not ONLINE, reason='environment is offline')
def test_utils_downloads(): def test_utils_downloads():
from ultralytics.utils.downloads import get_google_drive_file_info from ultralytics.utils.downloads import get_google_drive_file_info
@ -422,8 +435,11 @@ def test_nn_modules_block():
BottleneckCSP(c1, c2)(x) BottleneckCSP(c1, c2)(x)
@pytest.mark.skipif(not ONLINE, reason='environment is offline')
def test_hub(): def test_hub():
from ultralytics.hub import export_fmts_hub, logout from ultralytics.hub import export_fmts_hub, logout
from ultralytics.hub.utils import smart_request
export_fmts_hub() export_fmts_hub()
logout() logout()
smart_request('GET', 'http://github.com', progress=True)

View File

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = '8.0.162' __version__ = '8.0.163'
from ultralytics.models import RTDETR, SAM, YOLO from ultralytics.models import RTDETR, SAM, YOLO
from ultralytics.models.fastsam import FastSAM from ultralytics.models.fastsam import FastSAM

View File

@ -4,7 +4,6 @@ import contextlib
import re import re
import shutil import shutil
import sys import sys
from difflib import get_close_matches
from pathlib import Path from pathlib import Path
from types import SimpleNamespace from types import SimpleNamespace
from typing import Dict, List, Union from typing import Dict, List, Union
@ -177,6 +176,8 @@ def check_dict_alignment(base: Dict, custom: Dict, e=None):
base_keys, custom_keys = (set(x.keys()) for x in (base, custom)) base_keys, custom_keys = (set(x.keys()) for x in (base, custom))
mismatched = [k for k in custom_keys if k not in base_keys] mismatched = [k for k in custom_keys if k not in base_keys]
if mismatched: if mismatched:
from difflib import get_close_matches
string = '' string = ''
for x in mismatched: for x in mismatched:
matches = get_close_matches(x, base_keys) # key list matches = get_close_matches(x, base_keys) # key list
@ -373,11 +374,7 @@ def entrypoint(debug=''):
mode = DEFAULT_CFG.mode or 'predict' mode = DEFAULT_CFG.mode or 'predict'
LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.") LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.")
elif mode not in MODES: elif mode not in MODES:
if mode not in ('checks', checks): raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}")
raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}")
LOGGER.warning("WARNING ⚠️ 'yolo mode=checks' is deprecated. Use 'yolo checks' instead.")
checks.check_yolo()
return
# Task # Task
task = overrides.pop('task', None) task = overrides.pop('task', None)

View File

@ -483,7 +483,7 @@ class RandomHSV:
self.vgain = vgain self.vgain = vgain
def __call__(self, labels): def __call__(self, labels):
"""Applies random horizontal or vertical flip to an image with a given probability.""" """Applies image HSV augmentation"""
img = labels['img'] img = labels['img']
if self.hgain or self.sgain or self.vgain: if self.hgain or self.sgain or self.vgain:
r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains
@ -501,6 +501,7 @@ class RandomHSV:
class RandomFlip: class RandomFlip:
"""Applies random horizontal or vertical flip to an image with a given probability."""
def __init__(self, p=0.5, direction='horizontal', flip_idx=None) -> None: def __init__(self, p=0.5, direction='horizontal', flip_idx=None) -> None:
assert direction in ['horizontal', 'vertical'], f'Support direction `horizontal` or `vertical`, got {direction}' assert direction in ['horizontal', 'vertical'], f'Support direction `horizontal` or `vertical`, got {direction}'
@ -643,7 +644,9 @@ class CopyPaste:
class Albumentations: class Albumentations:
"""YOLOv8 Albumentations class (optional, only used if package is installed)""" """Albumentations transformations. Optional, uninstall package to disable.
Applies Blur, Median Blur, convert to grayscale, Contrast Limited Adaptive Histogram Equalization,
random change of brightness and contrast, RandomGamma and lowering of image quality by compression."""
def __init__(self, p=1.0): def __init__(self, p=1.0):
"""Initialize the transform object for YOLO bbox formatted params.""" """Initialize the transform object for YOLO bbox formatted params."""

View File

@ -159,7 +159,10 @@ class Exporter:
raise ValueError(f"Invalid export format='{format}'. Valid formats are {fmts}") raise ValueError(f"Invalid export format='{format}'. Valid formats are {fmts}")
jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn = flags # export booleans jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn = flags # export booleans
# Load PyTorch model # Device
if format == 'engine' and self.args.device is None:
LOGGER.warning('WARNING ⚠️ TensorRT requires GPU export, automatically assigning device=0')
self.args.device = '0'
self.device = select_device('cpu' if self.args.device is None else self.args.device) self.device = select_device('cpu' if self.args.device is None else self.args.device)
# Checks # Checks

View File

@ -182,6 +182,7 @@ class ProfileModels:
num_warmup_runs=10, num_warmup_runs=10,
min_time=60, min_time=60,
imgsz=640, imgsz=640,
half=True,
trt=True, trt=True,
device=None): device=None):
self.paths = paths self.paths = paths
@ -189,6 +190,7 @@ class ProfileModels:
self.num_warmup_runs = num_warmup_runs self.num_warmup_runs = num_warmup_runs
self.min_time = min_time self.min_time = min_time
self.imgsz = imgsz self.imgsz = imgsz
self.half = half
self.trt = trt # run TensorRT profiling self.trt = trt # run TensorRT profiling
self.device = device or torch.device(0 if torch.cuda.is_available() else 'cpu') self.device = device or torch.device(0 if torch.cuda.is_available() else 'cpu')
@ -209,12 +211,12 @@ class ProfileModels:
model_info = model.info() model_info = model.info()
if self.trt and self.device.type != 'cpu' and not engine_file.is_file(): if self.trt and self.device.type != 'cpu' and not engine_file.is_file():
engine_file = model.export(format='engine', engine_file = model.export(format='engine',
half=True, half=self.half,
imgsz=self.imgsz, imgsz=self.imgsz,
device=self.device, device=self.device,
verbose=False) verbose=False)
onnx_file = model.export(format='onnx', onnx_file = model.export(format='onnx',
half=True, half=self.half,
imgsz=self.imgsz, imgsz=self.imgsz,
simplify=True, simplify=True,
device=self.device, device=self.device,

View File

@ -507,3 +507,28 @@ def print_args(args: Optional[dict] = None, show_file=True, show_func=False):
file = Path(file).stem file = Path(file).stem
s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')
LOGGER.info(colorstr(s) + ', '.join(f'{k}={strip_auth(v)}' for k, v in args.items())) LOGGER.info(colorstr(s) + ', '.join(f'{k}={strip_auth(v)}' for k, v in args.items()))
def cuda_device_count() -> int:
"""Get the number of NVIDIA GPUs available in the environment.
Returns:
(int): The number of NVIDIA GPUs available.
"""
try:
# Run the nvidia-smi command and capture its output
output = subprocess.check_output(['nvidia-smi', '--query-gpu=count', '--format=csv,noheader,nounits'],
encoding='utf-8')
return int(output.strip())
except (subprocess.CalledProcessError, FileNotFoundError):
# If the command fails or nvidia-smi is not found, assume no GPUs are available
return 0
def cuda_is_available() -> bool:
"""Check if CUDA is available in the environment.
Returns:
(bool): True if one or more NVIDIA GPUs are available, False otherwise.
"""
return cuda_device_count() > 0