ultralytics 8.0.188 fix .grad attribute leaf Tensor Warning (#5094)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
Glenn Jocher 2023-09-26 20:28:45 +02:00 committed by GitHub
parent f2ed207571
commit 19c3314e68
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 78 additions and 41 deletions

View File

@ -6,7 +6,7 @@ keywords: Ultralytics, YOLOv8, object detection, image segmentation, machine lea
<div align="center"> <div align="center">
<p> <p>
<a href="https://github.com/ultralytics/ultralytics" target="_blank"> <a href="https://yolovision.ultralytics.com" target="_blank">
<img width="1024" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png"></a> <img width="1024" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png"></a>
</p> </p>
<a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg" alt="Ultralytics CI"></a> <a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg" alt="Ultralytics CI"></a>

View File

@ -563,6 +563,18 @@
"Additional content below." "Additional content below."
] ]
}, },
{
"cell_type": "code",
"source": [
"# Pip install from source\n",
"!pip install git+https://github.com/ultralytics/ultralytics@main"
],
"metadata": {
"id": "pIdE6i8C3LYp"
},
"execution_count": null,
"outputs": []
},
{ {
"cell_type": "code", "cell_type": "code",
"source": [ "source": [

View File

@ -16,6 +16,7 @@ DATASETS_DIR = Path(SETTINGS['datasets_dir'])
WEIGHTS_DIR = Path(SETTINGS['weights_dir']) WEIGHTS_DIR = Path(SETTINGS['weights_dir'])
MODEL = WEIGHTS_DIR / 'path with spaces' / 'yolov8n.pt' # test spaces in path MODEL = WEIGHTS_DIR / 'path with spaces' / 'yolov8n.pt' # test spaces in path
DATA = 'coco8.yaml' DATA = 'coco8.yaml'
BUS = ASSETS / 'bus.jpg'
def test_checks(): def test_checks():
@ -29,6 +30,30 @@ def test_train():
YOLO(MODEL).train(data=DATA, imgsz=64, epochs=1, device=device) # requires imgsz>=64 YOLO(MODEL).train(data=DATA, imgsz=64, epochs=1, device=device) # requires imgsz>=64
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
def test_predict_multiple_devices():
model = YOLO('yolov8n.pt')
model = model.cpu()
assert str(model.device) == 'cpu'
_ = model(BUS) # CPU inference
assert str(model.device) == 'cpu'
model = model.to('cuda:0')
assert str(model.device) == 'cuda:0'
_ = model(BUS) # CUDA inference
assert str(model.device) == 'cuda:0'
model = model.cpu()
assert str(model.device) == 'cpu'
_ = model(BUS) # CPU inference
assert str(model.device) == 'cpu'
model = model.cuda()
assert str(model.device) == 'cuda:0'
_ = model(BUS) # CUDA inference
assert str(model.device) == 'cuda:0'
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available') @pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
def test_autobatch(): def test_autobatch():
from ultralytics.utils.autobatch import check_train_batch_size from ultralytics.utils.autobatch import check_train_batch_size
@ -57,10 +82,10 @@ def test_predict_sam():
model.info() model.info()
# Run inference # Run inference
model(ASSETS / 'bus.jpg', device=0) model(BUS, device=0)
# Run inference with bboxes prompt # Run inference with bboxes prompt
model(ASSETS / 'zidane.jpg', bboxes=[439, 437, 524, 709], device=0) model(BUS, bboxes=[439, 437, 524, 709], device=0)
# Run inference with points prompt # Run inference with points prompt
model(ASSETS / 'zidane.jpg', points=[900, 370], labels=[1], device=0) model(ASSETS / 'zidane.jpg', points=[900, 370], labels=[1], device=0)

View File

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = '8.0.187' __version__ = '8.0.188'
from ultralytics.models import RTDETR, SAM, YOLO from ultralytics.models import RTDETR, SAM, YOLO
from ultralytics.models.fastsam import FastSAM from ultralytics.models.fastsam import FastSAM

View File

@ -8,8 +8,7 @@ from typing import Union
from ultralytics.cfg import TASK2DATA, get_cfg, get_save_dir from ultralytics.cfg import TASK2DATA, get_cfg, get_save_dir
from ultralytics.hub.utils import HUB_WEB_ROOT from ultralytics.hub.utils import HUB_WEB_ROOT
from ultralytics.nn.tasks import attempt_load_one_weight, guess_model_task, nn, yaml_model_load from ultralytics.nn.tasks import attempt_load_one_weight, guess_model_task, nn, yaml_model_load
from ultralytics.utils import ASSETS, DEFAULT_CFG_DICT, LOGGER, RANK, callbacks, emojis, yaml_load from ultralytics.utils import ASSETS, DEFAULT_CFG_DICT, LOGGER, RANK, callbacks, checks, emojis, yaml_load
from ultralytics.utils.checks import check_file, check_imgsz, check_pip_update_available, check_yaml
from ultralytics.utils.downloads import GITHUB_ASSETS_STEMS from ultralytics.utils.downloads import GITHUB_ASSETS_STEMS
@ -139,7 +138,7 @@ class Model(nn.Module):
self.overrides = self.model.args = self._reset_ckpt_args(self.model.args) self.overrides = self.model.args = self._reset_ckpt_args(self.model.args)
self.ckpt_path = self.model.pt_path self.ckpt_path = self.model.pt_path
else: else:
weights = check_file(weights) weights = checks.check_file(weights)
self.model, self.ckpt = weights, None self.model, self.ckpt = weights, None
self.task = task or guess_model_task(weights) self.task = task or guess_model_task(weights)
self.ckpt_path = weights self.ckpt_path = weights
@ -251,8 +250,7 @@ class Model(nn.Module):
if not hasattr(self.predictor, 'trackers'): if not hasattr(self.predictor, 'trackers'):
from ultralytics.trackers import register_tracker from ultralytics.trackers import register_tracker
register_tracker(self, persist) register_tracker(self, persist)
# ByteTrack-based method needs low confidence predictions as input kwargs['conf'] = kwargs.get('conf') or 0.1 # ByteTrack-based method needs low confidence predictions as input
kwargs['conf'] = kwargs.get('conf') or 0.1
kwargs['mode'] = 'track' kwargs['mode'] = 'track'
return self.predict(source=source, stream=stream, **kwargs) return self.predict(source=source, stream=stream, **kwargs)
@ -266,7 +264,6 @@ class Model(nn.Module):
""" """
custom = {'rect': True} # method defaults custom = {'rect': True} # method defaults
args = {**self.overrides, **custom, **kwargs, 'mode': 'val'} # highest priority args on the right args = {**self.overrides, **custom, **kwargs, 'mode': 'val'} # highest priority args on the right
args['imgsz'] = check_imgsz(args['imgsz'], max_dim=1)
validator = (validator or self._smart_load('validator'))(args=args, _callbacks=self.callbacks) validator = (validator or self._smart_load('validator'))(args=args, _callbacks=self.callbacks)
validator(model=self.model) validator(model=self.model)
@ -321,9 +318,9 @@ class Model(nn.Module):
if any(kwargs): if any(kwargs):
LOGGER.warning('WARNING ⚠️ using HUB training arguments, ignoring local training arguments.') LOGGER.warning('WARNING ⚠️ using HUB training arguments, ignoring local training arguments.')
kwargs = self.session.train_args kwargs = self.session.train_args
check_pip_update_available() checks.check_pip_update_available()
overrides = yaml_load(check_yaml(kwargs['cfg'])) if kwargs.get('cfg') else self.overrides overrides = yaml_load(checks.check_yaml(kwargs['cfg'])) if kwargs.get('cfg') else self.overrides
custom = {'data': TASK2DATA[self.task]} # method defaults custom = {'data': TASK2DATA[self.task]} # method defaults
args = {**overrides, **custom, **kwargs, 'mode': 'train'} # highest priority args on the right args = {**overrides, **custom, **kwargs, 'mode': 'train'} # highest priority args on the right
if args.get('resume'): if args.get('resume'):
@ -366,7 +363,7 @@ class Model(nn.Module):
self._check_is_pytorch_model() self._check_is_pytorch_model()
self = super()._apply(fn) # noqa self = super()._apply(fn) # noqa
self.predictor = None # reset predictor as device may have changed self.predictor = None # reset predictor as device may have changed
self.overrides['device'] = str(self.device) # i.e. device(type='cuda', index=0) -> 'cuda:0' self.overrides['device'] = self.device # was str(self.device) i.e. device(type='cuda', index=0) -> 'cuda:0'
return self return self
@property @property

View File

@ -95,6 +95,7 @@ class BaseValidator:
(self.save_dir / 'labels' if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) (self.save_dir / 'labels' if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)
if self.args.conf is None: if self.args.conf is None:
self.args.conf = 0.001 # default conf=0.001 self.args.conf = 0.001 # default conf=0.001
self.args.imgsz = check_imgsz(self.args.imgsz, max_dim=1)
self.plots = {} self.plots = {}
self.callbacks = _callbacks or callbacks.get_default_callbacks() self.callbacks = _callbacks or callbacks.get_default_callbacks()

View File

@ -87,7 +87,7 @@ class FastSAMPrompt:
pbar = TQDM(annotations, total=len(annotations)) pbar = TQDM(annotations, total=len(annotations))
for ann in pbar: for ann in pbar:
result_name = os.path.basename(ann.path) result_name = os.path.basename(ann.path)
image = ann.orig_img image = ann.orig_img[..., ::-1] # BGR to RGB
original_h, original_w = ann.orig_shape original_h, original_w = ann.orig_shape
# for macOS only # for macOS only
# plt.switch_backend('TkAgg') # plt.switch_backend('TkAgg')
@ -108,8 +108,7 @@ class FastSAMPrompt:
mask = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8)) mask = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8))
masks[i] = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8)) masks[i] = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8))
self.fast_show_mask( self.fast_show_mask(masks,
masks,
plt.gca(), plt.gca(),
random_color=mask_random_color, random_color=mask_random_color,
bbox=bbox, bbox=bbox,
@ -117,8 +116,7 @@ class FastSAMPrompt:
pointlabel=point_label, pointlabel=point_label,
retinamask=retina, retinamask=retina,
target_height=original_h, target_height=original_h,
target_width=original_w, target_width=original_w)
)
if with_contours: if with_contours:
contour_all = [] contour_all = []
@ -134,17 +132,11 @@ class FastSAMPrompt:
contour_mask = temp / 255 * color.reshape(1, 1, -1) contour_mask = temp / 255 * color.reshape(1, 1, -1)
plt.imshow(contour_mask) plt.imshow(contour_mask)
plt.axis('off') # Save the figure
fig = plt.gcf()
# Check if the canvas has been drawn
if fig.canvas.get_renderer() is None: # macOS requires this or tests fail
fig.canvas.draw()
save_path = Path(output) / result_name save_path = Path(output) / result_name
save_path.parent.mkdir(exist_ok=True, parents=True) save_path.parent.mkdir(exist_ok=True, parents=True)
image = Image.frombytes('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb()) plt.axis('off')
image.save(save_path) plt.savefig(save_path, bbox_inches='tight', pad_inches=0, transparent=True)
plt.close() plt.close()
pbar.set_description(f'Saving {result_name} to {save_path}') pbar.set_description(f'Saving {result_name} to {save_path}')
@ -263,8 +255,8 @@ class FastSAMPrompt:
orig_masks_area = torch.sum(masks, dim=(1, 2)) orig_masks_area = torch.sum(masks, dim=(1, 2))
union = bbox_area + orig_masks_area - masks_area union = bbox_area + orig_masks_area - masks_area
IoUs = masks_area / union iou = masks_area / union
max_iou_index = torch.argmax(IoUs) max_iou_index = torch.argmax(iou)
self.results[0].masks.data = torch.tensor(np.array([masks[max_iou_index].cpu().numpy()])) self.results[0].masks.data = torch.tensor(np.array([masks[max_iou_index].cpu().numpy()]))
return self.results return self.results

View File

@ -39,6 +39,7 @@ def check_class_names(names):
class AutoBackend(nn.Module): class AutoBackend(nn.Module):
@torch.no_grad()
def __init__(self, def __init__(self,
weights='yolov8n.pt', weights='yolov8n.pt',
device=torch.device('cpu'), device=torch.device('cpu'),
@ -309,6 +310,11 @@ class AutoBackend(nn.Module):
names = self._apply_default_class_names(data) names = self._apply_default_class_names(data)
names = check_class_names(names) names = check_class_names(names)
# Disable gradients
if pt:
for p in model.parameters():
p.requires_grad = False
self.__dict__.update(locals()) # assign all variables to self self.__dict__.update(locals()) # assign all variables to self
def forward(self, im, augment=False, visualize=False): def forward(self, im, augment=False, visualize=False):

View File

@ -327,8 +327,9 @@ def yaml_save(file='data.yaml', data=None, header=''):
file.parent.mkdir(parents=True, exist_ok=True) file.parent.mkdir(parents=True, exist_ok=True)
# Convert Path objects to strings # Convert Path objects to strings
valid_types = int, float, str, bool, list, tuple, dict, type(None)
for k, v in data.items(): for k, v in data.items():
if isinstance(v, Path): if not isinstance(v, valid_types):
data[k] = str(v) data[k] = str(v)
# Dump data to file in YAML format # Dump data to file in YAML format

View File

@ -55,7 +55,7 @@ def parse_requirements(file_path=ROOT.parent / 'requirements.txt', package=''):
line = line.strip() line = line.strip()
if line and not line.startswith('#'): if line and not line.startswith('#'):
line = line.split('#')[0].strip() # ignore inline comments line = line.split('#')[0].strip() # ignore inline comments
match = re.match(r'([a-zA-Z0-9-_]+)([<>!=~]+.*)?', line) match = re.match(r'([a-zA-Z0-9-_]+)\s*([<>!=~]+.*)?', line)
if match: if match:
requirements.append(SimpleNamespace(name=match[1], specifier=match[2].strip() if match[2] else '')) requirements.append(SimpleNamespace(name=match[1], specifier=match[2].strip() if match[2] else ''))

View File

@ -44,6 +44,9 @@ def smart_inference_mode():
def decorate(fn): def decorate(fn):
"""Applies appropriate torch decorator for inference mode based on torch version.""" """Applies appropriate torch decorator for inference mode based on torch version."""
if TORCH_1_9 and torch.is_inference_mode_enabled():
return fn # already in inference_mode, act as a pass-through
else:
return (torch.inference_mode if TORCH_1_9 else torch.no_grad)()(fn) return (torch.inference_mode if TORCH_1_9 else torch.no_grad)()(fn)
return decorate return decorate