mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-23 13:34:23 +08:00
ultralytics 8.0.32
HUB and TensorFlow fixes (#870)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
parent
f5d003d05a
commit
c9893810c7
59
docs/hub.md
59
docs/hub.md
@ -1,30 +1,54 @@
|
|||||||
# Ultralytics HUB
|
# Ultralytics HUB
|
||||||
|
|
||||||
<div align="center">
|
<a href="https://bit.ly/ultralytics_hub" target="_blank">
|
||||||
<a href="https://hub.ultralytics.com" target="_blank">
|
<img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png"></a>
|
||||||
<img width="1024" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png"></a>
|
|
||||||
<br>
|
<br>
|
||||||
|
<br>
|
||||||
|
<div align="center">
|
||||||
|
<a href="https://github.com/ultralytics" style="text-decoration:none;">
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-github.png" width="2%" alt="" /></a>
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||||
|
<a href="https://www.linkedin.com/company/ultralytics" style="text-decoration:none;">
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-linkedin.png" width="2%" alt="" /></a>
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||||
|
<a href="https://twitter.com/ultralytics" style="text-decoration:none;">
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-twitter.png" width="2%" alt="" /></a>
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||||
|
<a href="https://www.producthunt.com/@glenn_jocher" style="text-decoration:none;">
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-producthunt.png" width="2%" alt="" /></a>
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||||
|
<a href="https://youtube.com/ultralytics" style="text-decoration:none;">
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-youtube.png" width="2%" alt="" /></a>
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||||
|
<a href="https://www.facebook.com/ultralytics" style="text-decoration:none;">
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-facebook.png" width="2%" alt="" /></a>
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="2%" alt="" />
|
||||||
|
<a href="https://www.instagram.com/ultralytics/" style="text-decoration:none;">
|
||||||
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-social-instagram.png" width="2%" alt="" /></a>
|
||||||
|
<br>
|
||||||
|
<br>
|
||||||
<a href="https://github.com/ultralytics/hub/actions/workflows/ci.yaml">
|
<a href="https://github.com/ultralytics/hub/actions/workflows/ci.yaml">
|
||||||
<img src="https://github.com/ultralytics/hub/actions/workflows/ci.yaml/badge.svg" alt="CI CPU"></a>
|
<img src="https://github.com/ultralytics/hub/actions/workflows/ci.yaml/badge.svg" alt="CI CPU"></a>
|
||||||
|
<a href="https://colab.research.google.com/github/ultralytics/hub/blob/master/hub.ipynb">
|
||||||
|
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
||||||
</div>
|
</div>
|
||||||
|
<br>
|
||||||
|
|
||||||
|
|
||||||
[Ultralytics HUB](https://hub.ultralytics.com) is a new no-code online tool developed
|
[Ultralytics HUB](https://hub.ultralytics.com) is a new no-code online tool developed
|
||||||
by [Ultralytics](https://ultralytics.com), the creators of the popular [YOLOv5](https://github.com/ultralytics/yolov5)
|
by [Ultralytics](https://ultralytics.com), the creators of the popular [YOLOv5](https://github.com/ultralytics/yolov5)
|
||||||
object detection and image segmentation models. With Ultralytics HUB, users can easily train and deploy YOLOv5 models
|
object detection and image segmentation models. With Ultralytics HUB, users can easily train and deploy YOLO models
|
||||||
without any coding or technical expertise.
|
without any coding or technical expertise.
|
||||||
|
|
||||||
Ultralytics HUB is designed to be user-friendly and intuitive, with a drag-and-drop interface that allows users to
|
Ultralytics HUB is designed to be user-friendly and intuitive, with a drag-and-drop interface that allows users to
|
||||||
easily upload their data and select their model configurations. It also offers a range of pre-trained models and
|
easily upload their data and select their model configurations. It also offers a range of pre-trained models and
|
||||||
templates to choose from, making it easy for users to get started with training their own models. Once a model is
|
templates to choose from, making it easy for users to get started with training their own models. Once a model is
|
||||||
trained, it can be easily deployed and used for real-time object detection and image segmentation tasks. Overall,
|
trained, it can be easily deployed and used for real-time object detection and image segmentation tasks. Overall,
|
||||||
Ultralytics HUB is an essential tool for anyone looking to use YOLOv5 for their object detection and image segmentation
|
Ultralytics HUB is an essential tool for anyone looking to use YOLO for their object detection and image segmentation
|
||||||
projects.
|
projects.
|
||||||
|
|
||||||
**[Get started now](https://hub.ultralytics.com)** and experience the power and simplicity of Ultralytics HUB for
|
**[Get started now](https://hub.ultralytics.com)** and experience the power and simplicity of Ultralytics HUB for
|
||||||
yourself. Sign up for a free account and
|
yourself. Sign up for a free account and start building, training, and deploying YOLOv5 and YOLOv8 models today.
|
||||||
start building, training, and deploying YOLOv5 and YOLOv8 models today.
|
|
||||||
|
|
||||||
## 1. Upload a Dataset
|
## 1. Upload a Dataset
|
||||||
|
|
||||||
@ -44,7 +68,9 @@ zip -r coco6.zip coco6
|
|||||||
The example [coco6.zip](https://github.com/ultralytics/hub/blob/master/coco6.zip) dataset in this repository can be
|
The example [coco6.zip](https://github.com/ultralytics/hub/blob/master/coco6.zip) dataset in this repository can be
|
||||||
downloaded and unzipped to see exactly how to structure your custom dataset.
|
downloaded and unzipped to see exactly how to structure your custom dataset.
|
||||||
|
|
||||||
<p align="center"><img width="80%" src="https://user-images.githubusercontent.com/26833433/201424843-20fa081b-ad4b-4d6c-a095-e810775908d8.png" title="COCO6" /></p>
|
<p align="center">
|
||||||
|
<img width="80%" src="https://user-images.githubusercontent.com/26833433/201424843-20fa081b-ad4b-4d6c-a095-e810775908d8.png" title="COCO6" />
|
||||||
|
</p>
|
||||||
|
|
||||||
The dataset YAML is the same standard YOLOv5 YAML format. See
|
The dataset YAML is the same standard YOLOv5 YAML format. See
|
||||||
the [YOLOv5 Train Custom Data tutorial](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) for full details.
|
the [YOLOv5 Train Custom Data tutorial](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) for full details.
|
||||||
@ -68,20 +94,21 @@ names:
|
|||||||
After zipping your dataset, sign in to [Ultralytics HUB](https://bit.ly/ultralytics_hub) and click the Datasets tab.
|
After zipping your dataset, sign in to [Ultralytics HUB](https://bit.ly/ultralytics_hub) and click the Datasets tab.
|
||||||
Click 'Upload Dataset' to upload, scan and visualize your new dataset before training new YOLOv5 models on it!
|
Click 'Upload Dataset' to upload, scan and visualize your new dataset before training new YOLOv5 models on it!
|
||||||
|
|
||||||
<img width="100%" alt="HUB Dataset Upload" src="https://user-images.githubusercontent.com/26833433/198611715-540c9856-49d7-4069-a2fd-7c9eb70e772e.png">
|
<img width="100%" alt="HUB Dataset Upload" src="https://user-images.githubusercontent.com/26833433/216763338-9a8812c8-a4e5-4362-8102-40dad7818396.png">
|
||||||
|
|
||||||
## 2. Train a Model
|
## 2. Train a Model
|
||||||
|
|
||||||
Connect to the Ultralytics HUB notebook and use your model API key to begin
|
Connect to the Ultralytics HUB notebook and use your model API key to begin training!
|
||||||
training! <a href="https://colab.research.google.com/github/ultralytics/hub/blob/master/hub.ipynb" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
|
||||||
|
<a href="https://colab.research.google.com/github/ultralytics/hub/blob/master/hub.ipynb" target="_blank">
|
||||||
|
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"></a>
|
||||||
|
|
||||||
## 3. Deploy to Real World
|
## 3. Deploy to Real World
|
||||||
|
|
||||||
Export your model to 13 different formats, including TensorFlow, ONNX, OpenVINO, CoreML, Paddle and many others. Run
|
Export your model to 13 different formats, including TensorFlow, ONNX, OpenVINO, CoreML, Paddle and many others. Run
|
||||||
models directly on your mobile device by downloading the [Ultralytics App](https://ultralytics.com/app_install)!
|
models directly on your [iOS](https://apps.apple.com/xk/app/ultralytics/id1583935240) or
|
||||||
|
[Android](https://play.google.com/store/apps/details?id=com.ultralytics.ultralytics_app) mobile device by downloading
|
||||||
<a href="https://ultralytics.com/app_install" target="_blank">
|
the [Ultralytics App](https://ultralytics.com/app_install)!
|
||||||
<img width="100%" alt="Ultralytics mobile app" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-app.png"></a>
|
|
||||||
|
|
||||||
## ❓ Issues
|
## ❓ Issues
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Ultralytics YOLO 🚀, GPL-3.0 license
|
# Ultralytics YOLO 🚀, GPL-3.0 license
|
||||||
|
|
||||||
__version__ = "8.0.31"
|
__version__ = "8.0.32"
|
||||||
|
|
||||||
from ultralytics.yolo.engine.model import YOLO
|
from ultralytics.yolo.engine.model import YOLO
|
||||||
from ultralytics.yolo.utils import ops
|
from ultralytics.yolo.utils import ops
|
||||||
|
@ -12,7 +12,7 @@ from ultralytics.hub.utils import HUB_API_ROOT, check_dataset_disk_space, smart_
|
|||||||
from ultralytics.yolo.utils import is_colab, threaded, LOGGER, emojis, PREFIX
|
from ultralytics.yolo.utils import is_colab, threaded, LOGGER, emojis, PREFIX
|
||||||
from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
|
from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params
|
||||||
|
|
||||||
AGENT_NAME = (f"python-{__version__}-colab" if is_colab() else f"python-{__version__}-local")
|
AGENT_NAME = f"python-{__version__}-colab" if is_colab() else f"python-{__version__}-local"
|
||||||
session = None
|
session = None
|
||||||
|
|
||||||
|
|
||||||
@ -95,7 +95,8 @@ class HubTrainingSession:
|
|||||||
|
|
||||||
if data.get("status", None) == "trained":
|
if data.get("status", None) == "trained":
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
emojis(f"Model trained. View model at https://hub.ultralytics.com/models/{self.model_id} 🚀"))
|
emojis(f"Model is already trained and uploaded to "
|
||||||
|
f"https://hub.ultralytics.com/models/{self.model_id} 🚀"))
|
||||||
|
|
||||||
if not data.get("data", None):
|
if not data.get("data", None):
|
||||||
raise ValueError("Dataset may still be processing. Please wait a minute and try again.") # RF fix
|
raise ValueError("Dataset may still be processing. Please wait a minute and try again.") # RF fix
|
||||||
|
@ -190,5 +190,4 @@ class Traces:
|
|||||||
|
|
||||||
|
|
||||||
# Run below code on hub/utils init -------------------------------------------------------------------------------------
|
# Run below code on hub/utils init -------------------------------------------------------------------------------------
|
||||||
|
|
||||||
traces = Traces()
|
traces = Traces()
|
||||||
|
@ -49,19 +49,19 @@ CLI_HELP_MSG = \
|
|||||||
GitHub: https://github.com/ultralytics/ultralytics
|
GitHub: https://github.com/ultralytics/ultralytics
|
||||||
"""
|
"""
|
||||||
|
|
||||||
CFG_FLOAT_KEYS = {'warmup_epochs', 'box', 'cls', 'dfl'}
|
CFG_FLOAT_KEYS = {'warmup_epochs', 'box', 'cls', 'dfl', 'degrees', 'shear'}
|
||||||
CFG_FRACTION_KEYS = {
|
CFG_FRACTION_KEYS = {
|
||||||
'dropout', 'iou', 'lr0', 'lrf', 'momentum', 'weight_decay', 'warmup_momentum', 'warmup_bias_lr', 'fl_gamma',
|
'dropout', 'iou', 'lr0', 'lrf', 'momentum', 'weight_decay', 'warmup_momentum', 'warmup_bias_lr', 'fl_gamma',
|
||||||
'label_smoothing', 'hsv_h', 'hsv_s', 'hsv_v', 'degrees', 'translate', 'scale', 'shear', 'perspective', 'flipud',
|
'label_smoothing', 'hsv_h', 'hsv_s', 'hsv_v', 'translate', 'scale', 'perspective', 'flipud', 'fliplr', 'mosaic',
|
||||||
'fliplr', 'mosaic', 'mixup', 'copy_paste', 'conf', 'iou'}
|
'mixup', 'copy_paste', 'conf', 'iou'}
|
||||||
CFG_INT_KEYS = {
|
CFG_INT_KEYS = {
|
||||||
'epochs', 'patience', 'batch', 'workers', 'seed', 'close_mosaic', 'mask_ratio', 'max_det', 'vid_stride',
|
'epochs', 'patience', 'batch', 'workers', 'seed', 'close_mosaic', 'mask_ratio', 'max_det', 'vid_stride',
|
||||||
'line_thickness', 'workspace', 'nbs'}
|
'line_thickness', 'workspace', 'nbs'}
|
||||||
CFG_BOOL_KEYS = {
|
CFG_BOOL_KEYS = {
|
||||||
'save', 'cache', 'exist_ok', 'pretrained', 'verbose', 'deterministic', 'single_cls', 'image_weights', 'rect',
|
'save', 'exist_ok', 'pretrained', 'verbose', 'deterministic', 'single_cls', 'image_weights', 'rect', 'cos_lr',
|
||||||
'cos_lr', 'overlap_mask', 'val', 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show', 'save_txt',
|
'overlap_mask', 'val', 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show', 'save_txt', 'save_conf',
|
||||||
'save_conf', 'save_crop', 'hide_labels', 'hide_conf', 'visualize', 'augment', 'agnostic_nms', 'retina_masks',
|
'save_crop', 'hide_labels', 'hide_conf', 'visualize', 'augment', 'agnostic_nms', 'retina_masks', 'boxes', 'keras',
|
||||||
'boxes', 'keras', 'optimize', 'int8', 'dynamic', 'simplify', 'nms', 'v5loader'}
|
'optimize', 'int8', 'dynamic', 'simplify', 'nms', 'v5loader'}
|
||||||
|
|
||||||
|
|
||||||
def cfg2dict(cfg):
|
def cfg2dict(cfg):
|
||||||
|
@ -28,7 +28,6 @@ class BaseDataset(Dataset):
|
|||||||
self,
|
self,
|
||||||
img_path,
|
img_path,
|
||||||
imgsz=640,
|
imgsz=640,
|
||||||
label_path=None,
|
|
||||||
cache=False,
|
cache=False,
|
||||||
augment=True,
|
augment=True,
|
||||||
hyp=None,
|
hyp=None,
|
||||||
@ -42,7 +41,6 @@ class BaseDataset(Dataset):
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
self.img_path = img_path
|
self.img_path = img_path
|
||||||
self.imgsz = imgsz
|
self.imgsz = imgsz
|
||||||
self.label_path = label_path
|
|
||||||
self.augment = augment
|
self.augment = augment
|
||||||
self.single_cls = single_cls
|
self.single_cls = single_cls
|
||||||
self.prefix = prefix
|
self.prefix = prefix
|
||||||
|
@ -61,7 +61,7 @@ def seed_worker(worker_id):
|
|||||||
random.seed(worker_seed)
|
random.seed(worker_seed)
|
||||||
|
|
||||||
|
|
||||||
def build_dataloader(cfg, batch_size, img_path, stride=32, rect=False, label_path=None, rank=-1, mode="train"):
|
def build_dataloader(cfg, batch, img_path, stride=32, rect=False, names=None, rank=-1, mode="train"):
|
||||||
assert mode in ["train", "val"]
|
assert mode in ["train", "val"]
|
||||||
shuffle = mode == "train"
|
shuffle = mode == "train"
|
||||||
if cfg.rect and shuffle:
|
if cfg.rect and shuffle:
|
||||||
@ -70,9 +70,8 @@ def build_dataloader(cfg, batch_size, img_path, stride=32, rect=False, label_pat
|
|||||||
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
||||||
dataset = YOLODataset(
|
dataset = YOLODataset(
|
||||||
img_path=img_path,
|
img_path=img_path,
|
||||||
label_path=label_path,
|
|
||||||
imgsz=cfg.imgsz,
|
imgsz=cfg.imgsz,
|
||||||
batch_size=batch_size,
|
batch_size=batch,
|
||||||
augment=mode == "train", # augmentation
|
augment=mode == "train", # augmentation
|
||||||
hyp=cfg, # TODO: probably add a get_hyps_from_cfg function
|
hyp=cfg, # TODO: probably add a get_hyps_from_cfg function
|
||||||
rect=cfg.rect or rect, # rectangular batches
|
rect=cfg.rect or rect, # rectangular batches
|
||||||
@ -82,18 +81,19 @@ def build_dataloader(cfg, batch_size, img_path, stride=32, rect=False, label_pat
|
|||||||
pad=0.0 if mode == "train" else 0.5,
|
pad=0.0 if mode == "train" else 0.5,
|
||||||
prefix=colorstr(f"{mode}: "),
|
prefix=colorstr(f"{mode}: "),
|
||||||
use_segments=cfg.task == "segment",
|
use_segments=cfg.task == "segment",
|
||||||
use_keypoints=cfg.task == "keypoint")
|
use_keypoints=cfg.task == "keypoint",
|
||||||
|
names=names)
|
||||||
|
|
||||||
batch_size = min(batch_size, len(dataset))
|
batch = min(batch, len(dataset))
|
||||||
nd = torch.cuda.device_count() # number of CUDA devices
|
nd = torch.cuda.device_count() # number of CUDA devices
|
||||||
workers = cfg.workers if mode == "train" else cfg.workers * 2
|
workers = cfg.workers if mode == "train" else cfg.workers * 2
|
||||||
nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers
|
nw = min([os.cpu_count() // max(nd, 1), batch if batch > 1 else 0, workers]) # number of workers
|
||||||
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
|
sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)
|
||||||
loader = DataLoader if cfg.image_weights or cfg.close_mosaic else InfiniteDataLoader # allow attribute updates
|
loader = DataLoader if cfg.image_weights or cfg.close_mosaic else InfiniteDataLoader # allow attribute updates
|
||||||
generator = torch.Generator()
|
generator = torch.Generator()
|
||||||
generator.manual_seed(6148914691236517205 + RANK)
|
generator.manual_seed(6148914691236517205 + RANK)
|
||||||
return loader(dataset=dataset,
|
return loader(dataset=dataset,
|
||||||
batch_size=batch_size,
|
batch_size=batch,
|
||||||
shuffle=shuffle and sampler is None,
|
shuffle=shuffle and sampler is None,
|
||||||
num_workers=nw,
|
num_workers=nw,
|
||||||
sampler=sampler,
|
sampler=sampler,
|
||||||
|
@ -14,7 +14,7 @@ from .utils import HELP_URL, LOCAL_RANK, get_hash, img2label_paths, verify_image
|
|||||||
|
|
||||||
|
|
||||||
class YOLODataset(BaseDataset):
|
class YOLODataset(BaseDataset):
|
||||||
cache_version = 1.0 # dataset labels *.cache version, >= 1.0 for YOLOv8
|
cache_version = '1.0.1' # dataset labels *.cache version, >= 1.0.0 for YOLOv8
|
||||||
rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
|
rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
|
||||||
"""YOLO Dataset.
|
"""YOLO Dataset.
|
||||||
Args:
|
Args:
|
||||||
@ -22,28 +22,26 @@ class YOLODataset(BaseDataset):
|
|||||||
prefix (str): prefix.
|
prefix (str): prefix.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(
|
def __init__(self,
|
||||||
self,
|
img_path,
|
||||||
img_path,
|
imgsz=640,
|
||||||
imgsz=640,
|
cache=False,
|
||||||
label_path=None,
|
augment=True,
|
||||||
cache=False,
|
hyp=None,
|
||||||
augment=True,
|
prefix="",
|
||||||
hyp=None,
|
rect=False,
|
||||||
prefix="",
|
batch_size=None,
|
||||||
rect=False,
|
stride=32,
|
||||||
batch_size=None,
|
pad=0.0,
|
||||||
stride=32,
|
single_cls=False,
|
||||||
pad=0.0,
|
use_segments=False,
|
||||||
single_cls=False,
|
use_keypoints=False,
|
||||||
use_segments=False,
|
names=None):
|
||||||
use_keypoints=False,
|
|
||||||
):
|
|
||||||
self.use_segments = use_segments
|
self.use_segments = use_segments
|
||||||
self.use_keypoints = use_keypoints
|
self.use_keypoints = use_keypoints
|
||||||
|
self.names = names
|
||||||
assert not (self.use_segments and self.use_keypoints), "Can not use both segments and keypoints."
|
assert not (self.use_segments and self.use_keypoints), "Can not use both segments and keypoints."
|
||||||
super().__init__(img_path, imgsz, label_path, cache, augment, hyp, prefix, rect, batch_size, stride, pad,
|
super().__init__(img_path, imgsz, cache, augment, hyp, prefix, rect, batch_size, stride, pad, single_cls)
|
||||||
single_cls)
|
|
||||||
|
|
||||||
def cache_labels(self, path=Path("./labels.cache")):
|
def cache_labels(self, path=Path("./labels.cache")):
|
||||||
# Cache dataset labels, check images and read shapes
|
# Cache dataset labels, check images and read shapes
|
||||||
@ -56,7 +54,7 @@ class YOLODataset(BaseDataset):
|
|||||||
with ThreadPool(NUM_THREADS) as pool:
|
with ThreadPool(NUM_THREADS) as pool:
|
||||||
results = pool.imap(func=verify_image_label,
|
results = pool.imap(func=verify_image_label,
|
||||||
iterable=zip(self.im_files, self.label_files, repeat(self.prefix),
|
iterable=zip(self.im_files, self.label_files, repeat(self.prefix),
|
||||||
repeat(self.use_keypoints)))
|
repeat(self.use_keypoints), repeat(len(self.names))))
|
||||||
pbar = tqdm(results, desc=desc, total=total, bar_format=TQDM_BAR_FORMAT)
|
pbar = tqdm(results, desc=desc, total=total, bar_format=TQDM_BAR_FORMAT)
|
||||||
for im_file, lb, shape, segments, keypoint, nm_f, nf_f, ne_f, nc_f, msg in pbar:
|
for im_file, lb, shape, segments, keypoint, nm_f, nf_f, ne_f, nc_f, msg in pbar:
|
||||||
nm += nm_f
|
nm += nm_f
|
||||||
|
@ -61,7 +61,7 @@ def exif_size(img):
|
|||||||
|
|
||||||
def verify_image_label(args):
|
def verify_image_label(args):
|
||||||
# Verify one image-label pair
|
# Verify one image-label pair
|
||||||
im_file, lb_file, prefix, keypoint = args
|
im_file, lb_file, prefix, keypoint, num_cls = args
|
||||||
# number (missing, found, empty, corrupt), message, segments, keypoints
|
# number (missing, found, empty, corrupt), message, segments, keypoints
|
||||||
nm, nf, ne, nc, msg, segments, keypoints = 0, 0, 0, 0, "", [], None
|
nm, nf, ne, nc, msg, segments, keypoints = 0, 0, 0, 0, "", [], None
|
||||||
try:
|
try:
|
||||||
@ -97,16 +97,20 @@ def verify_image_label(args):
|
|||||||
assert (lb[:, 6::3] <= 1).all(), "non-normalized or out of bounds coordinate labels"
|
assert (lb[:, 6::3] <= 1).all(), "non-normalized or out of bounds coordinate labels"
|
||||||
kpts = np.zeros((lb.shape[0], 39))
|
kpts = np.zeros((lb.shape[0], 39))
|
||||||
for i in range(len(lb)):
|
for i in range(len(lb)):
|
||||||
kpt = np.delete(lb[i, 5:], np.arange(2, lb.shape[1] - 5,
|
kpt = np.delete(lb[i, 5:], np.arange(2, lb.shape[1] - 5, 3)) # remove occlusion param from GT
|
||||||
3)) # remove the occlusion parameter from the GT
|
|
||||||
kpts[i] = np.hstack((lb[i, :5], kpt))
|
kpts[i] = np.hstack((lb[i, :5], kpt))
|
||||||
lb = kpts
|
lb = kpts
|
||||||
assert lb.shape[1] == 39, "labels require 39 columns each after removing occlusion parameter"
|
assert lb.shape[1] == 39, "labels require 39 columns each after removing occlusion parameter"
|
||||||
else:
|
else:
|
||||||
assert lb.shape[1] == 5, f"labels require 5 columns, {lb.shape[1]} columns detected"
|
assert lb.shape[1] == 5, f"labels require 5 columns, {lb.shape[1]} columns detected"
|
||||||
assert (lb >= 0).all(), f"negative label values {lb[lb < 0]}"
|
assert (lb[:, 1:] <= 1).all(), \
|
||||||
assert (lb[:, 1:] <=
|
f"non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}"
|
||||||
1).all(), f"non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}"
|
# All labels
|
||||||
|
max_cls = int(lb[:, 0].max()) # max label count
|
||||||
|
assert max_cls <= num_cls, \
|
||||||
|
f'Label class {max_cls} exceeds dataset class count {num_cls}. ' \
|
||||||
|
f'Possible class labels are 0-{num_cls - 1}'
|
||||||
|
assert (lb >= 0).all(), f"negative label values {lb[lb < 0]}"
|
||||||
_, i = np.unique(lb, axis=0, return_index=True)
|
_, i = np.unique(lb, axis=0, return_index=True)
|
||||||
if len(i) < nl: # duplicate row check
|
if len(i) < nl: # duplicate row check
|
||||||
lb = lb[i] # remove duplicates
|
lb = lb[i] # remove duplicates
|
||||||
@ -192,8 +196,8 @@ def check_det_dataset(dataset, autodownload=True):
|
|||||||
# Download (optional)
|
# Download (optional)
|
||||||
extract_dir = ''
|
extract_dir = ''
|
||||||
if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)):
|
if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)):
|
||||||
download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False, threads=1)
|
new_dir = safe_download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False)
|
||||||
data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml'))
|
data = next((DATASETS_DIR / new_dir).rglob('*.yaml'))
|
||||||
extract_dir, autodownload = data.parent, False
|
extract_dir, autodownload = data.parent, False
|
||||||
|
|
||||||
# Read yaml (optional)
|
# Read yaml (optional)
|
||||||
|
@ -203,7 +203,7 @@ class Exporter:
|
|||||||
self.im = im
|
self.im = im
|
||||||
self.model = model
|
self.model = model
|
||||||
self.file = file
|
self.file = file
|
||||||
self.output_shape = tuple(y.shape) if isinstance(y, torch.Tensor) else (x.shape for x in y)
|
self.output_shape = tuple(y.shape) if isinstance(y, torch.Tensor) else tuple(tuple(x.shape) for x in y)
|
||||||
self.pretty_name = self.file.stem.replace('yolo', 'YOLO')
|
self.pretty_name = self.file.stem.replace('yolo', 'YOLO')
|
||||||
self.metadata = {
|
self.metadata = {
|
||||||
'description': f"Ultralytics {self.pretty_name} model trained on {self.model.args['data']}",
|
'description': f"Ultralytics {self.pretty_name} model trained on {self.model.args['data']}",
|
||||||
@ -213,8 +213,8 @@ class Exporter:
|
|||||||
'stride': int(max(model.stride)),
|
'stride': int(max(model.stride)),
|
||||||
'names': model.names} # model metadata
|
'names': model.names} # model metadata
|
||||||
|
|
||||||
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with input shape {tuple(im.shape)} and "
|
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with input shape {tuple(im.shape)} BCHW and "
|
||||||
f"output shape {self.output_shape} ({file_size(file):.1f} MB)")
|
f"output shape(s) {self.output_shape} ({file_size(file):.1f} MB)")
|
||||||
|
|
||||||
# Exports
|
# Exports
|
||||||
f = [''] * len(fmts) # exported filenames
|
f = [''] * len(fmts) # exported filenames
|
||||||
@ -234,19 +234,22 @@ class Exporter:
|
|||||||
nms = False
|
nms = False
|
||||||
f[5], s_model = self._export_saved_model(nms=nms or self.args.agnostic_nms or tfjs,
|
f[5], s_model = self._export_saved_model(nms=nms or self.args.agnostic_nms or tfjs,
|
||||||
agnostic_nms=self.args.agnostic_nms or tfjs)
|
agnostic_nms=self.args.agnostic_nms or tfjs)
|
||||||
if pb or tfjs: # pb prerequisite to tfjs
|
|
||||||
f[6], _ = self._export_pb(s_model)
|
debug = False
|
||||||
if tflite or edgetpu:
|
if debug:
|
||||||
f[7], _ = self._export_tflite(s_model,
|
if pb or tfjs: # pb prerequisite to tfjs
|
||||||
int8=self.args.int8 or edgetpu,
|
f[6], _ = self._export_pb(s_model)
|
||||||
data=self.args.data,
|
if tflite or edgetpu:
|
||||||
nms=nms,
|
f[7], _ = self._export_tflite(s_model,
|
||||||
agnostic_nms=self.args.agnostic_nms)
|
int8=self.args.int8 or edgetpu,
|
||||||
if edgetpu:
|
data=self.args.data,
|
||||||
f[8], _ = self._export_edgetpu()
|
nms=nms,
|
||||||
self._add_tflite_metadata(f[8] or f[7], num_outputs=len(self.output_shape))
|
agnostic_nms=self.args.agnostic_nms)
|
||||||
if tfjs:
|
if edgetpu:
|
||||||
f[9], _ = self._export_tfjs()
|
f[8], _ = self._export_edgetpu()
|
||||||
|
self._add_tflite_metadata(f[8] or f[7], num_outputs=len(self.output_shape))
|
||||||
|
if tfjs:
|
||||||
|
f[9], _ = self._export_tfjs()
|
||||||
if paddle: # PaddlePaddle
|
if paddle: # PaddlePaddle
|
||||||
f[10], _ = self._export_paddle()
|
f[10], _ = self._export_paddle()
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ class BaseValidator:
|
|||||||
if not pt:
|
if not pt:
|
||||||
self.args.rect = False
|
self.args.rect = False
|
||||||
self.dataloader = self.dataloader or \
|
self.dataloader = self.dataloader or \
|
||||||
self.get_dataloader(self.data.get("val") or self.data.set("test"), self.args.batch)
|
self.get_dataloader(self.data.get("val") or self.data.get("test"), self.args.batch)
|
||||||
|
|
||||||
model.eval()
|
model.eval()
|
||||||
model.warmup(imgsz=(1 if pt else self.args.batch, 3, imgsz, imgsz)) # warmup
|
model.warmup(imgsz=(1 if pt else self.args.batch, 3, imgsz, imgsz)) # warmup
|
||||||
|
@ -39,6 +39,7 @@ def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):
|
|||||||
for f in zipObj.namelist(): # list all archived filenames in the zip
|
for f in zipObj.namelist(): # list all archived filenames in the zip
|
||||||
if all(x not in f for x in exclude):
|
if all(x not in f for x in exclude):
|
||||||
zipObj.extract(f, path=path)
|
zipObj.extract(f, path=path)
|
||||||
|
return zipObj.namelist()[0] # return unzip dir
|
||||||
|
|
||||||
|
|
||||||
def safe_download(url,
|
def safe_download(url,
|
||||||
@ -112,13 +113,14 @@ def safe_download(url,
|
|||||||
unzip_dir = dir or f.parent # unzip to dir if provided else unzip in place
|
unzip_dir = dir or f.parent # unzip to dir if provided else unzip in place
|
||||||
LOGGER.info(f'Unzipping {f} to {unzip_dir}...')
|
LOGGER.info(f'Unzipping {f} to {unzip_dir}...')
|
||||||
if f.suffix == '.zip':
|
if f.suffix == '.zip':
|
||||||
unzip_file(file=f, path=unzip_dir) # unzip
|
unzip_dir = unzip_file(file=f, path=unzip_dir) # unzip
|
||||||
elif f.suffix == '.tar':
|
elif f.suffix == '.tar':
|
||||||
subprocess.run(['tar', 'xf', f, '--directory', unzip_dir], check=True) # unzip
|
subprocess.run(['tar', 'xf', f, '--directory', unzip_dir], check=True) # unzip
|
||||||
elif f.suffix == '.gz':
|
elif f.suffix == '.gz':
|
||||||
subprocess.run(['tar', 'xfz', f, '--directory', unzip_dir], check=True) # unzip
|
subprocess.run(['tar', 'xfz', f, '--directory', unzip_dir], check=True) # unzip
|
||||||
if delete:
|
if delete:
|
||||||
f.unlink() # remove zip
|
f.unlink() # remove zip
|
||||||
|
return unzip_dir
|
||||||
|
|
||||||
|
|
||||||
def attempt_download_asset(file, repo='ultralytics/assets', release='v0.0.0'):
|
def attempt_download_asset(file, repo='ultralytics/assets', release='v0.0.0'):
|
||||||
|
@ -41,7 +41,7 @@ class DetectionTrainer(BaseTrainer):
|
|||||||
shuffle=mode == "train",
|
shuffle=mode == "train",
|
||||||
seed=self.args.seed)[0] if self.args.v5loader else \
|
seed=self.args.seed)[0] if self.args.v5loader else \
|
||||||
build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, rank=rank, mode=mode,
|
build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, rank=rank, mode=mode,
|
||||||
rect=mode == "val")[0]
|
rect=mode == "val", names=self.data['names'])[0]
|
||||||
|
|
||||||
def preprocess_batch(self, batch):
|
def preprocess_batch(self, batch):
|
||||||
batch["img"] = batch["img"].to(self.device, non_blocking=True).float() / 255
|
batch["img"] = batch["img"].to(self.device, non_blocking=True).float() / 255
|
||||||
|
@ -176,7 +176,8 @@ class DetectionValidator(BaseValidator):
|
|||||||
prefix=colorstr(f'{self.args.mode}: '),
|
prefix=colorstr(f'{self.args.mode}: '),
|
||||||
shuffle=False,
|
shuffle=False,
|
||||||
seed=self.args.seed)[0] if self.args.v5loader else \
|
seed=self.args.seed)[0] if self.args.v5loader else \
|
||||||
build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, mode="val")[0]
|
build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, names=self.data['names'],
|
||||||
|
mode="val")[0]
|
||||||
|
|
||||||
def plot_val_samples(self, batch, ni):
|
def plot_val_samples(self, batch, ni):
|
||||||
plot_images(batch["img"],
|
plot_images(batch["img"],
|
||||||
|
Loading…
x
Reference in New Issue
Block a user