mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-23 05:24:22 +08:00
ultralytics 8.0.163
add new gpu-latest
runner to CI actions (#4565)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maia Numerosky <17316848+maianumerosky@users.noreply.github.com>
This commit is contained in:
parent
431cef3955
commit
b4dca690d4
11
.github/workflows/ci.yaml
vendored
11
.github/workflows/ci.yaml
vendored
@ -159,6 +159,7 @@ jobs:
|
||||
- os: ubuntu-latest
|
||||
python-version: '3.8' # torch 1.8.0 requires python >=3.6, <=3.8
|
||||
torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/
|
||||
- os: gpu-latest # do not pass python-version
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
@ -176,6 +177,7 @@ jobs:
|
||||
fi
|
||||
- name: Check environment
|
||||
run: |
|
||||
yolo checks
|
||||
echo "RUNNER_OS is ${{ runner.os }}"
|
||||
echo "GITHUB_EVENT_NAME is ${{ github.event_name }}"
|
||||
echo "GITHUB_WORKFLOW is ${{ github.workflow }}"
|
||||
@ -187,9 +189,14 @@ jobs:
|
||||
pip list
|
||||
- name: Pytest tests
|
||||
shell: bash # for Windows compatibility
|
||||
run: pytest --cov=ultralytics/ --cov-report xml tests/
|
||||
run: |
|
||||
if [ "${{ matrix.os }}" == "gpu-latest" ]; then
|
||||
pytest --cov=ultralytics/ --cov-report xml tests/test_cuda.py
|
||||
else
|
||||
pytest --cov=ultralytics/ --cov-report xml tests/
|
||||
fi
|
||||
- name: Upload Coverage Reports to CodeCov
|
||||
if: github.repository == 'ultralytics/ultralytics' && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'
|
||||
if: github.repository == 'ultralytics/ultralytics' # && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.11'
|
||||
uses: codecov/codecov-action@v3
|
||||
with:
|
||||
flags: Tests
|
||||
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -140,6 +140,7 @@ dmypy.json
|
||||
datasets/
|
||||
runs/
|
||||
wandb/
|
||||
tests/
|
||||
.DS_Store
|
||||
|
||||
# Neural Network weights -----------------------------------------------------------------------------------------------
|
||||
@ -158,6 +159,7 @@ weights/
|
||||
*_web_model/
|
||||
*_openvino_model/
|
||||
*_paddle_model/
|
||||
pnnx*
|
||||
|
||||
# Autogenerated files for tests
|
||||
/ultralytics/assets/
|
||||
|
@ -51,7 +51,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
|
||||
model('ultralytics/assets/zidane.jpg', bboxes=[439, 437, 524, 709])
|
||||
|
||||
# Run inference with points prompt
|
||||
model.predict('ultralytics/assets/zidane.jpg', points=[900, 370], labels=[1])
|
||||
model('ultralytics/assets/zidane.jpg', points=[900, 370], labels=[1])
|
||||
```
|
||||
|
||||
!!! example "Segment everything"
|
||||
|
@ -80,3 +80,11 @@ keywords: Ultralytics, utility checks, ASCII, check_version, pip_update, check_p
|
||||
---
|
||||
## ::: ultralytics.utils.checks.print_args
|
||||
<br><br>
|
||||
|
||||
---
|
||||
## ::: ultralytics.utils.checks.cuda_device_count
|
||||
<br><br>
|
||||
|
||||
---
|
||||
## ::: ultralytics.utils.checks.cuda_is_available
|
||||
<br><br>
|
||||
|
@ -44,5 +44,5 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
|
||||
Path(file).unlink(missing_ok=True)
|
||||
|
||||
# Remove directories
|
||||
for directory in ['.pytest_cache/', TMP]:
|
||||
for directory in [ROOT / '../.pytest_cache', TMP]:
|
||||
shutil.rmtree(directory, ignore_errors=True)
|
||||
|
@ -63,7 +63,7 @@ def test_export(model, format):
|
||||
|
||||
def test_rtdetr(task='detect', model='yolov8n-rtdetr.yaml', data='coco8.yaml'):
|
||||
# Warning: MUST use imgsz=640
|
||||
run(f'yolo train {task} model={model} data={data} imgsz=640 epochs=1, cache = disk') # add coma, space to args
|
||||
run(f'yolo train {task} model={model} data={data} --imgsz= 640 epochs =1, cache = disk') # add coma, spaces to args
|
||||
run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=640 save save_crop save_txt")
|
||||
|
||||
|
||||
|
73
tests/test_cuda.py
Normal file
73
tests/test_cuda.py
Normal file
@ -0,0 +1,73 @@
|
||||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils import ASSETS, SETTINGS
|
||||
|
||||
CUDA_IS_AVAILABLE = torch.cuda.is_available()
|
||||
CUDA_DEVICE_COUNT = torch.cuda.device_count()
|
||||
|
||||
WEIGHTS_DIR = Path(SETTINGS['weights_dir'])
|
||||
MODEL = WEIGHTS_DIR / 'path with spaces' / 'yolov8n.pt' # test spaces in path
|
||||
DATA = 'coco8.yaml'
|
||||
|
||||
|
||||
def test_checks():
|
||||
from ultralytics.utils.checks import cuda_device_count, cuda_is_available
|
||||
|
||||
assert cuda_device_count() == CUDA_DEVICE_COUNT
|
||||
assert cuda_is_available() == CUDA_IS_AVAILABLE
|
||||
|
||||
|
||||
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
|
||||
def test_train():
|
||||
YOLO(MODEL).train(data=DATA, imgsz=64, epochs=1, batch=-1, device=0) # also test AutoBatch, requires imgsz>=64
|
||||
|
||||
|
||||
@pytest.mark.skipif(CUDA_DEVICE_COUNT < 2, reason=f'DDP is not available, {CUDA_DEVICE_COUNT} device(s) found')
|
||||
def test_train_ddp():
|
||||
YOLO(MODEL).train(data=DATA, imgsz=64, epochs=1, device=[0, 1]) # requires imgsz>=64
|
||||
|
||||
|
||||
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
|
||||
def test_utils_benchmarks():
|
||||
from ultralytics.utils.benchmarks import ProfileModels
|
||||
|
||||
YOLO(MODEL).export(format='engine', imgsz=32, dynamic=True, batch=1) # pre-export engine model, auto-device
|
||||
ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
|
||||
|
||||
|
||||
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
|
||||
def test_predict_sam():
|
||||
from ultralytics import SAM
|
||||
|
||||
# Load a model
|
||||
model = SAM(WEIGHTS_DIR / 'sam_b.pt')
|
||||
|
||||
# Display model information (optional)
|
||||
model.info()
|
||||
|
||||
# Run inference
|
||||
model(ASSETS / 'bus.jpg', device=0)
|
||||
|
||||
# Run inference with bboxes prompt
|
||||
model(ASSETS / 'zidane.jpg', bboxes=[439, 437, 524, 709], device=0)
|
||||
|
||||
# Run inference with points prompt
|
||||
model(ASSETS / 'zidane.jpg', points=[900, 370], labels=[1], device=0)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
|
||||
def test_model_tune():
|
||||
subprocess.run('pip install ray[tune]'.split(), check=True)
|
||||
YOLO('yolov8n-cls.yaml').tune(data='imagenet10',
|
||||
grace_period=1,
|
||||
max_samples=1,
|
||||
imgsz=32,
|
||||
epochs=1,
|
||||
plots=False,
|
||||
device='cpu')
|
@ -1,5 +1,6 @@
|
||||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
|
||||
import contextlib
|
||||
import shutil
|
||||
from copy import copy
|
||||
from pathlib import Path
|
||||
@ -38,6 +39,8 @@ def test_model_methods():
|
||||
model = model.load(MODEL)
|
||||
model.to('cpu')
|
||||
model.fuse()
|
||||
model.clear_callback('on_train_start')
|
||||
model._reset_callbacks()
|
||||
|
||||
# Model properties
|
||||
_ = model.names
|
||||
@ -314,6 +317,15 @@ def test_events():
|
||||
events(cfg)
|
||||
|
||||
|
||||
def test_cfg_init():
|
||||
from ultralytics.cfg import check_dict_alignment, copy_default_cfg, smart_value
|
||||
|
||||
with contextlib.suppress(SyntaxError):
|
||||
check_dict_alignment({'a': 1}, {'b': 2})
|
||||
copy_default_cfg()
|
||||
[smart_value(x) for x in ['none', 'true', 'false']]
|
||||
|
||||
|
||||
def test_utils_init():
|
||||
from ultralytics.utils import get_git_branch, get_git_origin_url, get_ubuntu_version, is_github_actions_ci
|
||||
|
||||
@ -354,6 +366,7 @@ def test_utils_torchutils():
|
||||
time_sync()
|
||||
|
||||
|
||||
@pytest.mark.skipif(not ONLINE, reason='environment is offline')
|
||||
def test_utils_downloads():
|
||||
from ultralytics.utils.downloads import get_google_drive_file_info
|
||||
|
||||
@ -422,8 +435,11 @@ def test_nn_modules_block():
|
||||
BottleneckCSP(c1, c2)(x)
|
||||
|
||||
|
||||
@pytest.mark.skipif(not ONLINE, reason='environment is offline')
|
||||
def test_hub():
|
||||
from ultralytics.hub import export_fmts_hub, logout
|
||||
from ultralytics.hub.utils import smart_request
|
||||
|
||||
export_fmts_hub()
|
||||
logout()
|
||||
smart_request('GET', 'http://github.com', progress=True)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
|
||||
__version__ = '8.0.162'
|
||||
__version__ = '8.0.163'
|
||||
|
||||
from ultralytics.models import RTDETR, SAM, YOLO
|
||||
from ultralytics.models.fastsam import FastSAM
|
||||
|
@ -4,7 +4,6 @@ import contextlib
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
from difflib import get_close_matches
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from typing import Dict, List, Union
|
||||
@ -177,6 +176,8 @@ def check_dict_alignment(base: Dict, custom: Dict, e=None):
|
||||
base_keys, custom_keys = (set(x.keys()) for x in (base, custom))
|
||||
mismatched = [k for k in custom_keys if k not in base_keys]
|
||||
if mismatched:
|
||||
from difflib import get_close_matches
|
||||
|
||||
string = ''
|
||||
for x in mismatched:
|
||||
matches = get_close_matches(x, base_keys) # key list
|
||||
@ -373,11 +374,7 @@ def entrypoint(debug=''):
|
||||
mode = DEFAULT_CFG.mode or 'predict'
|
||||
LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.")
|
||||
elif mode not in MODES:
|
||||
if mode not in ('checks', checks):
|
||||
raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}")
|
||||
LOGGER.warning("WARNING ⚠️ 'yolo mode=checks' is deprecated. Use 'yolo checks' instead.")
|
||||
checks.check_yolo()
|
||||
return
|
||||
raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}")
|
||||
|
||||
# Task
|
||||
task = overrides.pop('task', None)
|
||||
|
@ -483,7 +483,7 @@ class RandomHSV:
|
||||
self.vgain = vgain
|
||||
|
||||
def __call__(self, labels):
|
||||
"""Applies random horizontal or vertical flip to an image with a given probability."""
|
||||
"""Applies image HSV augmentation"""
|
||||
img = labels['img']
|
||||
if self.hgain or self.sgain or self.vgain:
|
||||
r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains
|
||||
@ -501,6 +501,7 @@ class RandomHSV:
|
||||
|
||||
|
||||
class RandomFlip:
|
||||
"""Applies random horizontal or vertical flip to an image with a given probability."""
|
||||
|
||||
def __init__(self, p=0.5, direction='horizontal', flip_idx=None) -> None:
|
||||
assert direction in ['horizontal', 'vertical'], f'Support direction `horizontal` or `vertical`, got {direction}'
|
||||
@ -643,7 +644,9 @@ class CopyPaste:
|
||||
|
||||
|
||||
class Albumentations:
|
||||
"""YOLOv8 Albumentations class (optional, only used if package is installed)"""
|
||||
"""Albumentations transformations. Optional, uninstall package to disable.
|
||||
Applies Blur, Median Blur, convert to grayscale, Contrast Limited Adaptive Histogram Equalization,
|
||||
random change of brightness and contrast, RandomGamma and lowering of image quality by compression."""
|
||||
|
||||
def __init__(self, p=1.0):
|
||||
"""Initialize the transform object for YOLO bbox formatted params."""
|
||||
|
@ -159,7 +159,10 @@ class Exporter:
|
||||
raise ValueError(f"Invalid export format='{format}'. Valid formats are {fmts}")
|
||||
jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn = flags # export booleans
|
||||
|
||||
# Load PyTorch model
|
||||
# Device
|
||||
if format == 'engine' and self.args.device is None:
|
||||
LOGGER.warning('WARNING ⚠️ TensorRT requires GPU export, automatically assigning device=0')
|
||||
self.args.device = '0'
|
||||
self.device = select_device('cpu' if self.args.device is None else self.args.device)
|
||||
|
||||
# Checks
|
||||
|
@ -182,6 +182,7 @@ class ProfileModels:
|
||||
num_warmup_runs=10,
|
||||
min_time=60,
|
||||
imgsz=640,
|
||||
half=True,
|
||||
trt=True,
|
||||
device=None):
|
||||
self.paths = paths
|
||||
@ -189,6 +190,7 @@ class ProfileModels:
|
||||
self.num_warmup_runs = num_warmup_runs
|
||||
self.min_time = min_time
|
||||
self.imgsz = imgsz
|
||||
self.half = half
|
||||
self.trt = trt # run TensorRT profiling
|
||||
self.device = device or torch.device(0 if torch.cuda.is_available() else 'cpu')
|
||||
|
||||
@ -209,12 +211,12 @@ class ProfileModels:
|
||||
model_info = model.info()
|
||||
if self.trt and self.device.type != 'cpu' and not engine_file.is_file():
|
||||
engine_file = model.export(format='engine',
|
||||
half=True,
|
||||
half=self.half,
|
||||
imgsz=self.imgsz,
|
||||
device=self.device,
|
||||
verbose=False)
|
||||
onnx_file = model.export(format='onnx',
|
||||
half=True,
|
||||
half=self.half,
|
||||
imgsz=self.imgsz,
|
||||
simplify=True,
|
||||
device=self.device,
|
||||
|
@ -507,3 +507,28 @@ def print_args(args: Optional[dict] = None, show_file=True, show_func=False):
|
||||
file = Path(file).stem
|
||||
s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')
|
||||
LOGGER.info(colorstr(s) + ', '.join(f'{k}={strip_auth(v)}' for k, v in args.items()))
|
||||
|
||||
|
||||
def cuda_device_count() -> int:
|
||||
"""Get the number of NVIDIA GPUs available in the environment.
|
||||
|
||||
Returns:
|
||||
(int): The number of NVIDIA GPUs available.
|
||||
"""
|
||||
try:
|
||||
# Run the nvidia-smi command and capture its output
|
||||
output = subprocess.check_output(['nvidia-smi', '--query-gpu=count', '--format=csv,noheader,nounits'],
|
||||
encoding='utf-8')
|
||||
return int(output.strip())
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
# If the command fails or nvidia-smi is not found, assume no GPUs are available
|
||||
return 0
|
||||
|
||||
|
||||
def cuda_is_available() -> bool:
|
||||
"""Check if CUDA is available in the environment.
|
||||
|
||||
Returns:
|
||||
(bool): True if one or more NVIDIA GPUs are available, False otherwise.
|
||||
"""
|
||||
return cuda_device_count() > 0
|
||||
|
Loading…
x
Reference in New Issue
Block a user