mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-23 05:24:22 +08:00
ultralytics 8.0.49
task, exports and metadata updates (#1197)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Mehran Ghandehari <mehran.maps@gmail.com> Co-authored-by: Paul Guerrie <97041392+paulguerrie@users.noreply.github.com>
This commit is contained in:
parent
74e4c94806
commit
3861e6c82a
1
.github/workflows/ci.yaml
vendored
1
.github/workflows/ci.yaml
vendored
@ -13,6 +13,7 @@ on:
|
||||
|
||||
jobs:
|
||||
HUB:
|
||||
if: github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push')
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
@ -18,7 +18,7 @@ RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx
|
||||
|
||||
# Security updates
|
||||
# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796
|
||||
RUN apt upgrade --no-install-recommends -y openssl
|
||||
RUN apt upgrade --no-install-recommends -y openssl tar
|
||||
|
||||
# Create working directory
|
||||
RUN mkdir -p /usr/src/ultralytics
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
|
||||
__version__ = '8.0.48'
|
||||
__version__ = '8.0.49'
|
||||
|
||||
from ultralytics.yolo.engine.model import YOLO
|
||||
from ultralytics.yolo.utils.checks import check_yolo as checks
|
||||
|
@ -75,7 +75,7 @@ class AutoBackend(nn.Module):
|
||||
fp16 &= pt or jit or onnx or engine or nn_module # FP16
|
||||
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
|
||||
stride = 32 # default stride
|
||||
model = None # TODO: resolves ONNX inference, verify effect on other backends
|
||||
model, metadata = None, None
|
||||
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
|
||||
if not (pt or triton or nn_module):
|
||||
w = attempt_download_asset(w) # download if not local
|
||||
@ -105,10 +105,7 @@ class AutoBackend(nn.Module):
|
||||
model = torch.jit.load(w, _extra_files=extra_files, map_location=device)
|
||||
model.half() if fp16 else model.float()
|
||||
if extra_files['config.txt']: # load metadata dict
|
||||
d = json.loads(extra_files['config.txt'],
|
||||
object_hook=lambda d: {int(k) if k.isdigit() else k: v
|
||||
for k, v in d.items()})
|
||||
stride, names = int(d['stride']), d['names']
|
||||
metadata = json.loads(extra_files['config.txt'], object_hook=lambda x: dict(x.items()))
|
||||
elif dnn: # ONNX OpenCV DNN
|
||||
LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')
|
||||
check_requirements('opencv-python>=4.5.4')
|
||||
@ -120,23 +117,23 @@ class AutoBackend(nn.Module):
|
||||
providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']
|
||||
session = onnxruntime.InferenceSession(w, providers=providers)
|
||||
output_names = [x.name for x in session.get_outputs()]
|
||||
meta = session.get_modelmeta().custom_metadata_map # metadata
|
||||
if 'stride' in meta:
|
||||
stride, names = int(meta['stride']), eval(meta['names'])
|
||||
metadata = session.get_modelmeta().custom_metadata_map # metadata
|
||||
elif xml: # OpenVINO
|
||||
LOGGER.info(f'Loading {w} for OpenVINO inference...')
|
||||
check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/
|
||||
from openvino.runtime import Core, Layout, get_batch # noqa
|
||||
ie = Core()
|
||||
if not Path(w).is_file(): # if not *.xml
|
||||
w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir
|
||||
network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))
|
||||
w = Path(w)
|
||||
if not w.is_file(): # if not *.xml
|
||||
w = next(w.glob('*.xml')) # get *.xml file from *_openvino_model dir
|
||||
network = ie.read_model(model=str(w), weights=w.with_suffix('.bin'))
|
||||
if network.get_parameters()[0].get_layout().empty:
|
||||
network.get_parameters()[0].set_layout(Layout('NCHW'))
|
||||
batch_dim = get_batch(network)
|
||||
if batch_dim.is_static:
|
||||
batch_size = batch_dim.get_length()
|
||||
executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for NCS2
|
||||
metadata = w.parent / 'metadata.yaml'
|
||||
elif engine: # TensorRT
|
||||
LOGGER.info(f'Loading {w} for TensorRT inference...')
|
||||
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
|
||||
@ -148,7 +145,7 @@ class AutoBackend(nn.Module):
|
||||
# Read file
|
||||
with open(w, 'rb') as f, trt.Runtime(logger) as runtime:
|
||||
meta_len = int.from_bytes(f.read(4), byteorder='little') # read metadata length
|
||||
meta = json.loads(f.read(meta_len).decode('utf-8')) # read metadata
|
||||
metadata = json.loads(f.read(meta_len).decode('utf-8')) # read metadata
|
||||
model = runtime.deserialize_cuda_engine(f.read()) # read engine
|
||||
context = model.create_execution_context()
|
||||
bindings = OrderedDict()
|
||||
@ -171,18 +168,17 @@ class AutoBackend(nn.Module):
|
||||
bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
|
||||
binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
|
||||
batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size
|
||||
stride, names = int(meta['stride']), meta['names']
|
||||
elif coreml: # CoreML
|
||||
LOGGER.info(f'Loading {w} for CoreML inference...')
|
||||
import coremltools as ct
|
||||
model = ct.models.MLModel(w)
|
||||
names, stride, task = (model.user_defined_metadata.get(k) for k in ('names', 'stride', 'task'))
|
||||
names, stride = eval(names), int(stride)
|
||||
metadata = model.user_defined_metadata
|
||||
elif saved_model: # TF SavedModel
|
||||
LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')
|
||||
import tensorflow as tf
|
||||
keras = False # assume TF1 saved_model
|
||||
model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)
|
||||
metadata = Path(w) / 'metadata.yaml'
|
||||
elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt
|
||||
LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')
|
||||
import tensorflow as tf
|
||||
@ -221,23 +217,23 @@ class AutoBackend(nn.Module):
|
||||
with contextlib.suppress(zipfile.BadZipFile):
|
||||
with zipfile.ZipFile(w, 'r') as model:
|
||||
meta_file = model.namelist()[0]
|
||||
meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))
|
||||
stride, names = int(meta['stride']), meta['names']
|
||||
metadata = ast.literal_eval(model.read(meta_file).decode('utf-8'))
|
||||
elif tfjs: # TF.js
|
||||
raise NotImplementedError('YOLOv8 TF.js inference is not supported')
|
||||
elif paddle: # PaddlePaddle
|
||||
LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
|
||||
check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
|
||||
import paddle.inference as pdi
|
||||
if not Path(w).is_file(): # if not *.pdmodel
|
||||
w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
|
||||
weights = Path(w).with_suffix('.pdiparams')
|
||||
config = pdi.Config(str(w), str(weights))
|
||||
w = Path(w)
|
||||
if not w.is_file(): # if not *.pdmodel
|
||||
w = next(w.rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
|
||||
config = pdi.Config(str(w), str(w.with_suffix('.pdiparams')))
|
||||
if cuda:
|
||||
config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)
|
||||
predictor = pdi.create_predictor(config)
|
||||
input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
|
||||
output_names = predictor.get_output_names()
|
||||
metadata = w.parents[1] / 'metadata.yaml'
|
||||
elif triton: # NVIDIA Triton Inference Server
|
||||
LOGGER.info('Triton Inference Server not supported...')
|
||||
'''
|
||||
@ -254,14 +250,16 @@ class AutoBackend(nn.Module):
|
||||
f'\n\n{EXPORT_FORMATS_TABLE}')
|
||||
|
||||
# Load external metadata YAML
|
||||
w = Path(w)
|
||||
if xml or saved_model or paddle:
|
||||
metadata = (w if saved_model else w.parents[1] if paddle else w.parent) / 'metadata.yaml'
|
||||
if metadata.exists():
|
||||
metadata = yaml_load(metadata)
|
||||
stride, names = int(metadata['stride']), metadata['names'] # load metadata
|
||||
else:
|
||||
LOGGER.warning(f"WARNING ⚠️ Metadata not found at '{metadata}'")
|
||||
if isinstance(metadata, (str, Path)) and Path(metadata).exists():
|
||||
metadata = yaml_load(metadata)
|
||||
if metadata:
|
||||
stride = int(metadata['stride'])
|
||||
task = metadata['task']
|
||||
batch = int(metadata['batch'])
|
||||
imgsz = eval(metadata['imgsz']) if isinstance(metadata['imgsz'], str) else metadata['imgsz']
|
||||
names = eval(metadata['names']) if isinstance(metadata['names'], str) else metadata['names']
|
||||
elif not (pt or triton or nn_module):
|
||||
LOGGER.warning(f"WARNING ⚠️ Metadata not found for 'model={weights}'")
|
||||
|
||||
# Check names
|
||||
if 'names' not in locals(): # names missing
|
||||
|
@ -257,7 +257,7 @@ class ClassificationModel(BaseModel):
|
||||
cfg=None,
|
||||
model=None,
|
||||
ch=3,
|
||||
nc=1000,
|
||||
nc=None,
|
||||
cutoff=10,
|
||||
verbose=True): # yaml, model, channels, number of classes, cutoff index, verbose flag
|
||||
super().__init__()
|
||||
@ -286,6 +286,8 @@ class ClassificationModel(BaseModel):
|
||||
if nc and nc != self.yaml['nc']:
|
||||
LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
|
||||
self.yaml['nc'] = nc # override yaml value
|
||||
elif not nc and not self.yaml.get('nc', None):
|
||||
raise ValueError('nc not specified. Must specify nc in model.yaml or function arguments.')
|
||||
self.model, self.save = parse_model(deepcopy(self.yaml), ch=ch, verbose=verbose) # model, savelist
|
||||
self.stride = torch.Tensor([1]) # no stride constraints
|
||||
self.names = {i: f'{i}' for i in range(self.yaml['nc'])} # default names dict
|
||||
|
@ -1,12 +1,11 @@
|
||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||
|
||||
from ultralytics.yolo.utils.checks import check_requirements, check_yaml
|
||||
|
||||
check_requirements('lap') # for linear_assignment
|
||||
|
||||
import torch
|
||||
|
||||
from ultralytics.yolo.utils import IterableSimpleNamespace, yaml_load
|
||||
from ultralytics.yolo.utils.checks import check_requirements, check_yaml
|
||||
|
||||
check_requirements('lap') # for linear_assignment
|
||||
|
||||
from .trackers import BOTSORT, BYTETracker
|
||||
|
||||
|
@ -65,17 +65,18 @@ class BOTrack(STrack):
|
||||
|
||||
@staticmethod
|
||||
def multi_predict(stracks):
|
||||
if len(stracks) > 0:
|
||||
multi_mean = np.asarray([st.mean.copy() for st in stracks])
|
||||
multi_covariance = np.asarray([st.covariance for st in stracks])
|
||||
for i, st in enumerate(stracks):
|
||||
if st.state != TrackState.Tracked:
|
||||
multi_mean[i][6] = 0
|
||||
multi_mean[i][7] = 0
|
||||
multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
|
||||
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
|
||||
stracks[i].mean = mean
|
||||
stracks[i].covariance = cov
|
||||
if len(stracks) <= 0:
|
||||
return
|
||||
multi_mean = np.asarray([st.mean.copy() for st in stracks])
|
||||
multi_covariance = np.asarray([st.covariance for st in stracks])
|
||||
for i, st in enumerate(stracks):
|
||||
if st.state != TrackState.Tracked:
|
||||
multi_mean[i][6] = 0
|
||||
multi_mean[i][7] = 0
|
||||
multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance)
|
||||
for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)):
|
||||
stracks[i].mean = mean
|
||||
stracks[i].covariance = cov
|
||||
|
||||
def convert_coords(self, tlwh):
|
||||
return self.tlwh_to_xywh(tlwh)
|
||||
@ -112,10 +113,9 @@ class BOTSORT(BYTETracker):
|
||||
return []
|
||||
if self.args.with_reid and self.encoder is not None:
|
||||
features_keep = self.encoder.inference(img, dets)
|
||||
detections = [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)]
|
||||
return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections
|
||||
else:
|
||||
detections = [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)]
|
||||
return detections
|
||||
return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections
|
||||
|
||||
def get_dists(self, tracks, detections):
|
||||
dists = matching.iou_distance(tracks, detections)
|
||||
|
@ -92,7 +92,6 @@ class STrack(BaseTrack):
|
||||
Update a matched track
|
||||
:type new_track: STrack
|
||||
:type frame_id: int
|
||||
:type update_feature: bool
|
||||
:return:
|
||||
"""
|
||||
self.frame_id = frame_id
|
||||
|
@ -71,7 +71,7 @@ class GMC:
|
||||
|
||||
def apply(self, raw_frame, detections=None):
|
||||
if self.method in ['orb', 'sift']:
|
||||
return self.applyFeaures(raw_frame, detections)
|
||||
return self.applyFeatures(raw_frame, detections)
|
||||
elif self.method == 'ecc':
|
||||
return self.applyEcc(raw_frame, detections)
|
||||
elif self.method == 'sparseOptFlow':
|
||||
@ -116,7 +116,7 @@ class GMC:
|
||||
|
||||
return H
|
||||
|
||||
def applyFeaures(self, raw_frame, detections=None):
|
||||
def applyFeatures(self, raw_frame, detections=None):
|
||||
|
||||
# Initialize
|
||||
height, width, _ = raw_frame.shape
|
||||
@ -190,13 +190,13 @@ class GMC:
|
||||
meanSpatialDistances = np.mean(spatialDistances, 0)
|
||||
stdSpatialDistances = np.std(spatialDistances, 0)
|
||||
|
||||
inliesrs = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances
|
||||
inliers = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances
|
||||
|
||||
goodMatches = []
|
||||
prevPoints = []
|
||||
currPoints = []
|
||||
for i in range(len(matches)):
|
||||
if inliesrs[i, 0] and inliesrs[i, 1]:
|
||||
if inliers[i, 0] and inliers[i, 1]:
|
||||
goodMatches.append(matches[i])
|
||||
prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt)
|
||||
currPoints.append(keypoints[matches[i].trainIdx].pt)
|
||||
@ -226,7 +226,7 @@ class GMC:
|
||||
|
||||
# Find rigid matrix
|
||||
if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):
|
||||
H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
|
||||
H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
|
||||
|
||||
# Handle downscale
|
||||
if self.downscale > 1.0:
|
||||
@ -285,7 +285,7 @@ class GMC:
|
||||
|
||||
# Find rigid matrix
|
||||
if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)):
|
||||
H, inliesrs = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
|
||||
H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC)
|
||||
|
||||
# Handle downscale
|
||||
if self.downscale > 1.0:
|
||||
|
@ -136,7 +136,7 @@ class KalmanFilterXYAH:
|
||||
The Nx8 dimensional mean matrix of the object states at the previous
|
||||
time step.
|
||||
covariance : ndarray
|
||||
The Nx8x8 dimensional covariance matrics of the object states at the
|
||||
The Nx8x8 dimensional covariance matrix of the object states at the
|
||||
previous time step.
|
||||
Returns
|
||||
-------
|
||||
@ -362,7 +362,7 @@ class KalmanFilterXYWH:
|
||||
The Nx8 dimensional mean matrix of the object states at the previous
|
||||
time step.
|
||||
covariance : ndarray
|
||||
The Nx8x8 dimensional covariance matrics of the object states at the
|
||||
The Nx8x8 dimensional covariance matrix of the object states at the
|
||||
previous time step.
|
||||
Returns
|
||||
-------
|
||||
|
@ -119,7 +119,7 @@ def embedding_distance(tracks, detections, metric='cosine'):
|
||||
# for i, track in enumerate(tracks):
|
||||
# cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric))
|
||||
track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32)
|
||||
cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Nomalized features
|
||||
cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Normalized features
|
||||
return cost_matrix
|
||||
|
||||
|
||||
|
@ -273,7 +273,7 @@ def entrypoint(debug=''):
|
||||
return
|
||||
|
||||
# Task
|
||||
task = overrides.get('task')
|
||||
task = overrides.pop('task', None)
|
||||
if task and task not in TASKS:
|
||||
raise ValueError(f"Invalid 'task={task}'. Valid tasks are {TASKS}.\n{CLI_HELP_MSG}")
|
||||
|
||||
@ -289,9 +289,8 @@ def entrypoint(debug=''):
|
||||
# Task Update
|
||||
if task and task != model.task:
|
||||
LOGGER.warning(f"WARNING ⚠️ conflicting 'task={task}' passed with 'task={model.task}' model. "
|
||||
f'This may produce errors.')
|
||||
task = task or model.task
|
||||
overrides['task'] = task
|
||||
f"Ignoring 'task={task}' and updating to 'task={model.task}' to match model.")
|
||||
task = model.task
|
||||
|
||||
# Mode
|
||||
if mode in {'predict', 'track'} and 'source' not in overrides:
|
||||
|
@ -54,7 +54,7 @@ class _RepeatSampler:
|
||||
yield from iter(self.sampler)
|
||||
|
||||
|
||||
def seed_worker(worker_id):
|
||||
def seed_worker(worker_id): # noqa
|
||||
# Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader
|
||||
worker_seed = torch.initial_seed() % 2 ** 32
|
||||
np.random.seed(worker_seed)
|
||||
@ -134,7 +134,7 @@ def build_classification_dataloader(path,
|
||||
|
||||
def check_source(source):
|
||||
webcam, screenshot, from_img, in_memory = False, False, False, False
|
||||
if isinstance(source, (str, int, Path)): # int for local usb carame
|
||||
if isinstance(source, (str, int, Path)): # int for local usb camera
|
||||
source = str(source)
|
||||
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
|
||||
is_url = source.lower().startswith(('https://', 'http://', 'rtsp://', 'rtmp://'))
|
||||
@ -147,11 +147,10 @@ def check_source(source):
|
||||
elif isinstance(source, (list, tuple)):
|
||||
source = autocast_list(source) # convert all list elements to PIL or np arrays
|
||||
from_img = True
|
||||
elif isinstance(source, ((Image.Image, np.ndarray))):
|
||||
elif isinstance(source, (Image.Image, np.ndarray)):
|
||||
from_img = True
|
||||
else:
|
||||
raise Exception(
|
||||
'Unsupported type encountered! See docs for supported types https://docs.ultralytics.com/predict')
|
||||
raise TypeError('Unsupported image type. See docs for supported types https://docs.ultralytics.com/predict')
|
||||
|
||||
return source, webcam, screenshot, from_img, in_memory
|
||||
|
||||
|
@ -215,7 +215,7 @@ class Exporter:
|
||||
self.model = model
|
||||
self.file = file
|
||||
self.output_shape = tuple(y.shape) if isinstance(y, torch.Tensor) else tuple(tuple(x.shape) for x in y)
|
||||
self.pretty_name = self.file.stem.replace('yolo', 'YOLO')
|
||||
self.pretty_name = Path(self.model.yaml.get('yaml_file', self.file)).stem.replace('yolo', 'YOLO')
|
||||
description = f'Ultralytics {self.pretty_name} model ' + f'trained on {Path(self.args.data).name}' \
|
||||
if self.args.data else '(untrained)'
|
||||
self.metadata = {
|
||||
@ -225,6 +225,8 @@ class Exporter:
|
||||
'version': __version__,
|
||||
'stride': int(max(model.stride)),
|
||||
'task': model.task,
|
||||
'batch': self.args.batch,
|
||||
'imgsz': self.imgsz,
|
||||
'names': model.names} # model metadata
|
||||
|
||||
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with input shape {tuple(im.shape)} BCHW and "
|
||||
@ -283,8 +285,7 @@ class Exporter:
|
||||
f = self.file.with_suffix('.torchscript')
|
||||
|
||||
ts = torch.jit.trace(self.model, self.im, strict=False)
|
||||
d = {'shape': self.im.shape, 'stride': int(max(self.model.stride)), 'names': self.model.names}
|
||||
extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap()
|
||||
extra_files = {'config.txt': json.dumps(self.metadata)} # torch._C.ExtraFilesMap()
|
||||
if self.args.optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
|
||||
LOGGER.info(f'{prefix} optimizing for mobile...')
|
||||
from torch.utils.mobile_optimizer import optimize_for_mobile
|
||||
@ -429,16 +430,18 @@ class Exporter:
|
||||
classifier_config=classifier_config)
|
||||
bits, mode = (8, 'kmeans_lut') if self.args.int8 else (16, 'linear') if self.args.half else (32, None)
|
||||
if bits < 32:
|
||||
if 'kmeans' in mode:
|
||||
check_requirements('scikit-learn') # scikit-learn package required for k-means quantization
|
||||
ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
|
||||
if self.args.nms and self.model.task == 'detect':
|
||||
ct_model = self._pipeline_coreml(ct_model)
|
||||
|
||||
m = self.metadata # metadata dict
|
||||
ct_model.short_description = m['description']
|
||||
ct_model.author = m['author']
|
||||
ct_model.license = m['license']
|
||||
ct_model.version = m['version']
|
||||
ct_model.user_defined_metadata.update({k: str(v) for k, v in m.items() if k in ('stride', 'task', 'names')})
|
||||
ct_model.short_description = m.pop('description')
|
||||
ct_model.author = m.pop('author')
|
||||
ct_model.license = m.pop('license')
|
||||
ct_model.version = m.pop('version')
|
||||
ct_model.user_defined_metadata.update({k: str(v) for k, v in m.items()})
|
||||
ct_model.save(str(f))
|
||||
return f, ct_model
|
||||
|
||||
|
@ -8,8 +8,8 @@ from ultralytics.nn.tasks import (ClassificationModel, DetectionModel, Segmentat
|
||||
guess_model_task, nn)
|
||||
from ultralytics.yolo.cfg import get_cfg
|
||||
from ultralytics.yolo.engine.exporter import Exporter
|
||||
from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, ROOT, callbacks,
|
||||
is_git_dir, is_pip_package, yaml_load)
|
||||
from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, ONLINE, RANK, ROOT,
|
||||
callbacks, is_git_dir, is_pip_package, yaml_load)
|
||||
from ultralytics.yolo.utils.checks import check_file, check_imgsz, check_pip_update, check_yaml
|
||||
from ultralytics.yolo.utils.downloads import GITHUB_ASSET_STEMS
|
||||
from ultralytics.yolo.utils.torch_utils import smart_inference_mode
|
||||
@ -157,7 +157,7 @@ class YOLO:
|
||||
"""
|
||||
Inform user of ultralytics package update availability
|
||||
"""
|
||||
if is_pip_package():
|
||||
if ONLINE and is_pip_package():
|
||||
check_pip_update()
|
||||
|
||||
def reset(self):
|
||||
|
@ -5,6 +5,7 @@ Ultralytics Results, Boxes and Masks classes for handling inference results
|
||||
Usage: See https://docs.ultralytics.com/predict/
|
||||
"""
|
||||
|
||||
import pprint
|
||||
from copy import deepcopy
|
||||
from functools import lru_cache
|
||||
|
||||
@ -96,10 +97,11 @@ class Results:
|
||||
return len(getattr(self, k))
|
||||
|
||||
def __str__(self):
|
||||
return ''.join(getattr(self, k).__str__() for k in self._keys)
|
||||
attr = {k: v for k, v in vars(self).items() if not isinstance(v, type(self))}
|
||||
return pprint.pformat(attr, indent=2, width=120, depth=10, compact=True)
|
||||
|
||||
def __repr__(self):
|
||||
return ''.join(getattr(self, k).__repr__() for k in self._keys)
|
||||
return self.__str__()
|
||||
|
||||
def __getattr__(self, attr):
|
||||
name = self.__class__.__name__
|
||||
@ -261,7 +263,7 @@ class Boxes:
|
||||
return self.boxes.__str__()
|
||||
|
||||
def __repr__(self):
|
||||
return (f'Ultralytics YOLO {self.__class__} masks\n' + f'type: {type(self.boxes)}\n' +
|
||||
return (f'Ultralytics YOLO {self.__class__.__name__}\n' + f'type: {type(self.boxes)}\n' +
|
||||
f'shape: {self.boxes.shape}\n' + f'dtype: {self.boxes.dtype}\n + {self.boxes.__repr__()}')
|
||||
|
||||
def __getitem__(self, idx):
|
||||
@ -337,7 +339,7 @@ class Masks:
|
||||
return self.masks.__str__()
|
||||
|
||||
def __repr__(self):
|
||||
return (f'Ultralytics YOLO {self.__class__} masks\n' + f'type: {type(self.masks)}\n' +
|
||||
return (f'Ultralytics YOLO {self.__class__.__name__}\n' + f'type: {type(self.masks)}\n' +
|
||||
f'shape: {self.masks.shape}\n' + f'dtype: {self.masks.dtype}\n + {self.masks.__repr__()}')
|
||||
|
||||
def __getitem__(self, idx):
|
||||
|
@ -102,7 +102,7 @@ class BaseValidator:
|
||||
model = model.half() if self.args.half else model.float()
|
||||
self.model = model
|
||||
self.loss = torch.zeros_like(trainer.loss_items, device=trainer.device)
|
||||
self.args.plots = trainer.epoch == trainer.epochs - 1 # always plot final epoch
|
||||
self.args.plots = trainer.stopper.possible_stop or (trainer.epoch == trainer.epochs - 1)
|
||||
model.eval()
|
||||
else:
|
||||
callbacks.add_integration_callbacks(self)
|
||||
|
@ -45,9 +45,10 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', imgsz=160, hal
|
||||
y = []
|
||||
t0 = time.time()
|
||||
for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU)
|
||||
emoji = '❌' # indicates export failure
|
||||
emoji, filename = '❌', None # export defaults
|
||||
try:
|
||||
assert i != 11, 'paddle exports coming soon'
|
||||
if model.task == 'classify':
|
||||
assert i != 11, 'paddle cls exports coming soon'
|
||||
assert i != 9 or LINUX, 'Edge TPU export only supported on Linux'
|
||||
if 'cpu' in device.type:
|
||||
assert cpu, 'inference not supported on CPU'
|
||||
@ -86,7 +87,7 @@ def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', imgsz=160, hal
|
||||
if hard_fail:
|
||||
assert type(e) is AssertionError, f'Benchmark hard_fail for {name}: {e}'
|
||||
LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}')
|
||||
y.append([name, emoji, None, None, None]) # mAP, t_inference
|
||||
y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference
|
||||
|
||||
# Print results
|
||||
check_yolo(device=device) # print system info
|
||||
|
@ -70,14 +70,14 @@ def file_date(path=__file__):
|
||||
|
||||
def file_size(path):
|
||||
# Return file/dir size (MB)
|
||||
mb = 1 << 20 # bytes to MiB (1024 ** 2)
|
||||
path = Path(path)
|
||||
if path.is_file():
|
||||
return path.stat().st_size / mb
|
||||
elif path.is_dir():
|
||||
return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb
|
||||
else:
|
||||
return 0.0
|
||||
if isinstance(path, (str, Path)):
|
||||
mb = 1 << 20 # bytes to MiB (1024 ** 2)
|
||||
path = Path(path)
|
||||
if path.is_file():
|
||||
return path.stat().st_size / mb
|
||||
elif path.is_dir():
|
||||
return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb
|
||||
return 0.0
|
||||
|
||||
|
||||
def url2file(url):
|
||||
|
@ -77,11 +77,18 @@ class SegLoss(Loss):
|
||||
anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5)
|
||||
|
||||
# targets
|
||||
batch_idx = batch['batch_idx'].view(-1, 1)
|
||||
targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1)
|
||||
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
||||
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
||||
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
|
||||
try:
|
||||
batch_idx = batch['batch_idx'].view(-1, 1)
|
||||
targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1)
|
||||
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
||||
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
||||
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
|
||||
except RuntimeError as e:
|
||||
raise TypeError('ERROR ❌ segment dataset incorrectly formatted or not a segment dataset.\n'
|
||||
"This error can occur when incorrectly training a 'segment' model on a 'detect' dataset, "
|
||||
"i.e. 'yolo train model=yolov8n-seg.pt data=coco128.yaml'.\nVerify your dataset is a "
|
||||
"correctly formatted 'segment' dataset using 'data=coco128-seg.yaml' "
|
||||
'as an example.\nSee https://docs.ultralytics.com/tasks/segmentation/ for help.') from e
|
||||
|
||||
# pboxes
|
||||
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
|
||||
|
Loading…
x
Reference in New Issue
Block a user