mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-23 21:44:22 +08:00
Return metrics, Update docs (#846)
This commit is contained in:
parent
3633d4c06b
commit
2e7a533ac3
@ -98,8 +98,8 @@ model = YOLO("yolov8n.yaml") # build a new model from scratch
|
||||
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
|
||||
|
||||
# Use the model
|
||||
results = model.train(data="coco128.yaml", epochs=3) # train the model
|
||||
results = model.val() # evaluate model performance on the validation set
|
||||
model.train(data="coco128.yaml", epochs=3) # train the model
|
||||
metrics = model.val() # evaluate model performance on the validation set
|
||||
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image
|
||||
success = model.export(format="onnx") # export the model to ONNX format
|
||||
```
|
||||
|
16
docs/cfg.md
16
docs/cfg.md
@ -67,7 +67,7 @@ is important to carefully tune and experiment with these settings to achieve the
|
||||
task.
|
||||
|
||||
| Key | Value | Description |
|
||||
|-----------------|--------|-----------------------------------------------------------------------------|
|
||||
|-----------------|--------|--------------------------------------------------------------------------------|
|
||||
| model | null | path to model file, i.e. yolov8n.pt, yolov8n.yaml |
|
||||
| data | null | path to data file, i.e. i.e. coco128.yaml |
|
||||
| epochs | 100 | number of epochs to train for |
|
||||
@ -109,6 +109,7 @@ task.
|
||||
| mask_ratio | 4 | mask downsample ratio (segment train only) |
|
||||
| dropout | 0.0 | use dropout regularization (classify train only) |
|
||||
| val | True | validate/test during training |
|
||||
| min_memory | False | minimize memory footprint loss function, choices=[False, True, <roll_out_thr>] |
|
||||
|
||||
### Prediction
|
||||
|
||||
@ -121,14 +122,19 @@ for. It is important to carefully tune and experiment with these settings to ach
|
||||
given task.
|
||||
|
||||
| Key | Value | Description |
|
||||
|----------------|----------------------|---------------------------------------------------------|
|
||||
|----------------|----------------------|----------------------------------------------------------|
|
||||
| source | 'ultralytics/assets' | source directory for images or videos |
|
||||
| conf | 0.25 | object confidence threshold for detection |
|
||||
| iou | 0.7 | intersection over union (IoU) threshold for NMS |
|
||||
| half | False | use half precision (FP16) |
|
||||
| device | null | device to run on, i.e. cuda device=0/1/2/3 or device=cpu |
|
||||
| show | False | show results if possible |
|
||||
| save_txt | False | save results as .txt file |
|
||||
| save_conf | False | save results with confidence scores |
|
||||
| save_crop | False | save cropped images with results |
|
||||
| hide_labels | False | hide labels |
|
||||
| hide_conf | False | hide confidence scores |
|
||||
| max_det | 300 | maximum number of detections per image |
|
||||
| vid_stride | False | video frame-rate stride |
|
||||
| line_thickness | 3 | bounding box thickness (pixels) |
|
||||
| visualize | False | visualize model features |
|
||||
@ -136,6 +142,7 @@ given task.
|
||||
| agnostic_nms | False | class-agnostic NMS |
|
||||
| retina_masks | False | use high-resolution segmentation masks |
|
||||
| classes | null | filter results by class, i.e. class=0, or class=[0,2,3] |
|
||||
| box | True | Show boxes in segmentation predictions |
|
||||
|
||||
### Validation
|
||||
|
||||
@ -148,13 +155,14 @@ is important to carefully tune and experiment with these settings to ensure that
|
||||
validation dataset and to detect and prevent overfitting.
|
||||
|
||||
| Key | Value | Description |
|
||||
|-------------|-------|-----------------------------------------------------------------------------|
|
||||
|-------------|-------|-----------------------------------------------------------------|
|
||||
| save_json | False | save results to JSON file |
|
||||
| save_hybrid | False | save hybrid version of labels (labels + additional predictions) |
|
||||
| conf | 0.001 | object confidence threshold for detection (default 0.25 predict, 0.001 val) |
|
||||
| conf | 0.001 | object confidence threshold for detection |
|
||||
| iou | 0.6 | intersection over union (IoU) threshold for NMS |
|
||||
| max_det | 300 | maximum number of detections per image |
|
||||
| half | True | use half precision (FP16) |
|
||||
| device | null | device to run on, i.e. cuda device=0/1/2/3 or device=cpu |
|
||||
| dnn | False | use OpenCV DNN for ONNX inference |
|
||||
| plots | False | show plots during training |
|
||||
| rect | False | support rectangular evaluation |
|
||||
|
64
docs/cli.md
64
docs/cli.md
@ -35,25 +35,10 @@ the [Configuration](cfg.md) page.
|
||||
|
||||
!!! example ""
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640
|
||||
yolo detect train resume model=last.pt # resume training
|
||||
```
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load a model
|
||||
model = YOLO("yolov8n.yaml") # build a new model from scratch
|
||||
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
|
||||
|
||||
# Train the model
|
||||
results = model.train(data="coco128.yaml", epochs=100, imgsz=640)
|
||||
```
|
||||
|
||||
## Val
|
||||
|
||||
Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the `model` retains it's
|
||||
@ -61,78 +46,31 @@ training `data` and arguments as model attributes.
|
||||
|
||||
!!! example ""
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
yolo detect val model=yolov8n.pt # val official model
|
||||
yolo detect val model=path/to/best.pt # val custom model
|
||||
```
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load a model
|
||||
model = YOLO("yolov8n.pt") # load an official model
|
||||
model = YOLO("path/to/best.pt") # load a custom model
|
||||
|
||||
# Validate the model
|
||||
results = model.val() # no arguments needed, dataset and settings remembered
|
||||
```
|
||||
|
||||
## Predict
|
||||
|
||||
Use a trained YOLOv8n model to run predictions on images.
|
||||
|
||||
!!! example ""
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
yolo detect predict model=yolov8n.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model
|
||||
yolo detect predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model
|
||||
```
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load a model
|
||||
model = YOLO("yolov8n.pt") # load an official model
|
||||
model = YOLO("path/to/best.pt") # load a custom model
|
||||
|
||||
# Predict with the model
|
||||
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image
|
||||
```
|
||||
|
||||
## Export
|
||||
|
||||
Export a YOLOv8n model to a different format like ONNX, CoreML, etc.
|
||||
|
||||
!!! example ""
|
||||
|
||||
=== "CLI"
|
||||
|
||||
```bash
|
||||
yolo export model=yolov8n.pt format=onnx # export official model
|
||||
yolo export model=path/to/best.pt format=onnx # export custom trained model
|
||||
```
|
||||
|
||||
=== "Python"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load a model
|
||||
model = YOLO("yolov8n.pt") # load an official model
|
||||
model = YOLO("path/to/best.pt") # load a custom trained
|
||||
|
||||
# Export the model
|
||||
model.export(format="onnx")
|
||||
```
|
||||
|
||||
Available YOLOv8 export formats include:
|
||||
|
||||
| Format | `format=` | Model |
|
||||
|
@ -20,7 +20,9 @@ The simplest way of simply using YOLOv8 directly in a Python environment.
|
||||
|
||||
=== "Resume"
|
||||
```python
|
||||
TODO: Resume feature is under development and should be released soon.
|
||||
# TODO: Resume feature is under development and should be released soon.
|
||||
model = YOLO("last.pt")
|
||||
model.train(resume=True)
|
||||
```
|
||||
|
||||
!!! example "Val"
|
||||
|
@ -30,7 +30,7 @@ see the [Configuration](../cfg.md) page.
|
||||
model = YOLO("yolov8n-cls.pt") # load a pretrained model (recommended for training)
|
||||
|
||||
# Train the model
|
||||
results = model.train(data="mnist160", epochs=100, imgsz=64)
|
||||
model.train(data="mnist160", epochs=100, imgsz=64)
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
@ -55,7 +55,9 @@ it's training `data` and arguments as model attributes.
|
||||
model = YOLO("path/to/best.pt") # load a custom model
|
||||
|
||||
# Validate the model
|
||||
results = model.val() # no arguments needed, dataset and settings remembered
|
||||
metrics = model.val() # no arguments needed, dataset and settings remembered
|
||||
metrics.top1 # top1 accuracy
|
||||
metrics.top5 # top5 accuracy
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
@ -88,6 +90,7 @@ Use a trained YOLOv8n-cls model to run predictions on images.
|
||||
yolo classify predict model=yolov8n-cls.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model
|
||||
yolo classify predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model
|
||||
```
|
||||
Read more details of `predict` in our [Predict](https://docs.ultralytics.com/predict/) page.
|
||||
|
||||
## Export
|
||||
|
||||
|
@ -30,7 +30,7 @@ the [Configuration](../cfg.md) page.
|
||||
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
|
||||
|
||||
# Train the model
|
||||
results = model.train(data="coco128.yaml", epochs=100, imgsz=640)
|
||||
model.train(data="coco128.yaml", epochs=100, imgsz=640)
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
@ -55,7 +55,11 @@ training `data` and arguments as model attributes.
|
||||
model = YOLO("path/to/best.pt") # load a custom model
|
||||
|
||||
# Validate the model
|
||||
results = model.val() # no arguments needed, dataset and settings remembered
|
||||
metrics = model.val() # no arguments needed, dataset and settings remembered
|
||||
metrics.box.map # map50-95
|
||||
metrics.box.map50 # map50
|
||||
metrics.box.map75 # map75
|
||||
metrics.box.maps # a list contains map50-95 of each category
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
@ -88,6 +92,7 @@ Use a trained YOLOv8n model to run predictions on images.
|
||||
yolo detect predict model=yolov8n.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model
|
||||
yolo detect predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model
|
||||
```
|
||||
Read more details of `predict` in our [Predict](https://docs.ultralytics.com/predict/) page.
|
||||
|
||||
## Export
|
||||
|
||||
|
@ -30,7 +30,7 @@ arguments see the [Configuration](../cfg.md) page.
|
||||
model = YOLO("yolov8n-seg.pt") # load a pretrained model (recommended for training)
|
||||
|
||||
# Train the model
|
||||
results = model.train(data="coco128-seg.yaml", epochs=100, imgsz=640)
|
||||
model.train(data="coco128-seg.yaml", epochs=100, imgsz=640)
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
@ -55,7 +55,15 @@ retains it's training `data` and arguments as model attributes.
|
||||
model = YOLO("path/to/best.pt") # load a custom model
|
||||
|
||||
# Validate the model
|
||||
results = model.val() # no arguments needed, dataset and settings remembered
|
||||
metrics = model.val() # no arguments needed, dataset and settings remembered
|
||||
metrics.box.map # map50-95(B)
|
||||
metrics.box.map50 # map50(B)
|
||||
metrics.box.map75 # map75(B)
|
||||
metrics.box.maps # a list contains map50-95(B) of each category
|
||||
metrics.seg.map # map50-95(M)
|
||||
metrics.seg.map50 # map50(M)
|
||||
metrics.seg.map75 # map75(M)
|
||||
metrics.seg.maps # a list contains map50-95(M) of each category
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
@ -88,6 +96,7 @@ Use a trained YOLOv8n-seg model to run predictions on images.
|
||||
yolo segment predict model=yolov8n-seg.pt source="https://ultralytics.com/images/bus.jpg" # predict with official model
|
||||
yolo segment predict model=path/to/best.pt source="https://ultralytics.com/images/bus.jpg" # predict with custom model
|
||||
```
|
||||
Read more details of `predict` in our [Predict](https://docs.ultralytics.com/predict/) page.
|
||||
|
||||
## Export
|
||||
|
||||
|
@ -168,6 +168,7 @@ class YOLO:
|
||||
|
||||
validator = self.ValidatorClass(args=args)
|
||||
validator(model=self.model)
|
||||
return validator.metrics
|
||||
|
||||
@smart_inference_mode()
|
||||
def export(self, **kwargs):
|
||||
|
@ -418,6 +418,7 @@ class Metric:
|
||||
self.f1 = [] # (nc, )
|
||||
self.all_ap = [] # (nc, 10)
|
||||
self.ap_class_index = [] # (nc, )
|
||||
self.nc = 0
|
||||
|
||||
@property
|
||||
def ap50(self):
|
||||
@ -459,6 +460,14 @@ class Metric:
|
||||
"""
|
||||
return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0
|
||||
|
||||
@property
|
||||
def map75(self):
|
||||
"""Mean AP@0.75 of all classes.
|
||||
Return:
|
||||
float.
|
||||
"""
|
||||
return self.all_ap[:, 5].mean() if len(self.all_ap) else 0.0
|
||||
|
||||
@property
|
||||
def map(self):
|
||||
"""Mean AP@0.5:0.95 of all classes.
|
||||
@ -475,8 +484,10 @@ class Metric:
|
||||
"""class-aware result, return p[i], r[i], ap50[i], ap[i]"""
|
||||
return self.p[i], self.r[i], self.ap50[i], self.ap[i]
|
||||
|
||||
def get_maps(self, nc):
|
||||
maps = np.zeros(nc) + self.map
|
||||
@property
|
||||
def maps(self):
|
||||
"""mAP of each class"""
|
||||
maps = np.zeros(self.nc) + self.map
|
||||
for i, c in enumerate(self.ap_class_index):
|
||||
maps[c] = self.ap[i]
|
||||
return maps
|
||||
@ -500,33 +511,35 @@ class DetMetrics:
|
||||
self.save_dir = save_dir
|
||||
self.plot = plot
|
||||
self.names = names
|
||||
self.metric = Metric()
|
||||
self.box = Metric()
|
||||
|
||||
def process(self, tp, conf, pred_cls, target_cls):
|
||||
results = ap_per_class(tp, conf, pred_cls, target_cls, plot=self.plot, save_dir=self.save_dir,
|
||||
names=self.names)[2:]
|
||||
self.metric.update(results)
|
||||
self.box.nc = len(self.names)
|
||||
self.box.update(results)
|
||||
|
||||
@property
|
||||
def keys(self):
|
||||
return ["metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)"]
|
||||
|
||||
def mean_results(self):
|
||||
return self.metric.mean_results()
|
||||
return self.box.mean_results()
|
||||
|
||||
def class_result(self, i):
|
||||
return self.metric.class_result(i)
|
||||
return self.box.class_result(i)
|
||||
|
||||
def get_maps(self, nc):
|
||||
return self.metric.get_maps(nc)
|
||||
@property
|
||||
def maps(self):
|
||||
return self.box.maps
|
||||
|
||||
@property
|
||||
def fitness(self):
|
||||
return self.metric.fitness()
|
||||
return self.box.fitness()
|
||||
|
||||
@property
|
||||
def ap_class_index(self):
|
||||
return self.metric.ap_class_index
|
||||
return self.box.ap_class_index
|
||||
|
||||
@property
|
||||
def results_dict(self):
|
||||
@ -539,8 +552,8 @@ class SegmentMetrics:
|
||||
self.save_dir = save_dir
|
||||
self.plot = plot
|
||||
self.names = names
|
||||
self.metric_box = Metric()
|
||||
self.metric_mask = Metric()
|
||||
self.box = Metric()
|
||||
self.seg = Metric()
|
||||
|
||||
def process(self, tp_m, tp_b, conf, pred_cls, target_cls):
|
||||
results_mask = ap_per_class(tp_m,
|
||||
@ -551,7 +564,8 @@ class SegmentMetrics:
|
||||
save_dir=self.save_dir,
|
||||
names=self.names,
|
||||
prefix="Mask")[2:]
|
||||
self.metric_mask.update(results_mask)
|
||||
self.seg.nc = len(self.names)
|
||||
self.seg.update(results_mask)
|
||||
results_box = ap_per_class(tp_b,
|
||||
conf,
|
||||
pred_cls,
|
||||
@ -560,7 +574,8 @@ class SegmentMetrics:
|
||||
save_dir=self.save_dir,
|
||||
names=self.names,
|
||||
prefix="Box")[2:]
|
||||
self.metric_box.update(results_box)
|
||||
self.box.nc = len(self.names)
|
||||
self.box.update(results_box)
|
||||
|
||||
@property
|
||||
def keys(self):
|
||||
@ -569,22 +584,23 @@ class SegmentMetrics:
|
||||
"metrics/precision(M)", "metrics/recall(M)", "metrics/mAP50(M)", "metrics/mAP50-95(M)"]
|
||||
|
||||
def mean_results(self):
|
||||
return self.metric_box.mean_results() + self.metric_mask.mean_results()
|
||||
return self.box.mean_results() + self.seg.mean_results()
|
||||
|
||||
def class_result(self, i):
|
||||
return self.metric_box.class_result(i) + self.metric_mask.class_result(i)
|
||||
return self.box.class_result(i) + self.seg.class_result(i)
|
||||
|
||||
def get_maps(self, nc):
|
||||
return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)
|
||||
@property
|
||||
def maps(self):
|
||||
return self.box.maps + self.seg.maps
|
||||
|
||||
@property
|
||||
def fitness(self):
|
||||
return self.metric_mask.fitness() + self.metric_box.fitness()
|
||||
return self.seg.fitness() + self.box.fitness()
|
||||
|
||||
@property
|
||||
def ap_class_index(self):
|
||||
# boxes and masks have the same ap_class_index
|
||||
return self.metric_box.ap_class_index
|
||||
return self.box.ap_class_index
|
||||
|
||||
@property
|
||||
def results_dict(self):
|
||||
|
Loading…
x
Reference in New Issue
Block a user