mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-23 21:44:22 +08:00
ultralytics 8.0.183
RayTune and yolo checks
fixes (#5002)
Co-authored-by: Kapil Raj <103250862+raj-kapil@users.noreply.github.com> Co-authored-by: Muhammad Rizwan Munawar <62513924+RizwanMunawar@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
parent
7f78fad8ba
commit
3223e71fea
@ -189,7 +189,7 @@ Training settings for YOLO models refer to the various hyperparameters and confi
|
|||||||
| `project` | `None` | project name |
|
| `project` | `None` | project name |
|
||||||
| `name` | `None` | experiment name |
|
| `name` | `None` | experiment name |
|
||||||
| `exist_ok` | `False` | whether to overwrite existing experiment |
|
| `exist_ok` | `False` | whether to overwrite existing experiment |
|
||||||
| `pretrained` | `True` | (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str) |
|
| `pretrained` | `True` | (bool \| str) whether to use a pretrained model (bool) or a model to load weights from (str) |
|
||||||
| `optimizer` | `'auto'` | optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto] |
|
| `optimizer` | `'auto'` | optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto] |
|
||||||
| `verbose` | `False` | whether to print verbose output |
|
| `verbose` | `False` | whether to print verbose output |
|
||||||
| `seed` | `0` | random seed for reproducibility |
|
| `seed` | `0` | random seed for reproducibility |
|
||||||
@ -202,7 +202,7 @@ Training settings for YOLO models refer to the various hyperparameters and confi
|
|||||||
| `amp` | `True` | Automatic Mixed Precision (AMP) training, choices=[True, False] |
|
| `amp` | `True` | Automatic Mixed Precision (AMP) training, choices=[True, False] |
|
||||||
| `fraction` | `1.0` | dataset fraction to train on (default is 1.0, all images in train set) |
|
| `fraction` | `1.0` | dataset fraction to train on (default is 1.0, all images in train set) |
|
||||||
| `profile` | `False` | profile ONNX and TensorRT speeds during training for loggers |
|
| `profile` | `False` | profile ONNX and TensorRT speeds during training for loggers |
|
||||||
| `freeze` | `None` | (int or list, optional) freeze first n layers, or freeze list of layer indices during training |
|
| `freeze` | `None` | (int \| list, optional) freeze first n layers, or freeze list of layer indices during training |
|
||||||
| `lr0` | `0.01` | initial learning rate (i.e. SGD=1E-2, Adam=1E-3) |
|
| `lr0` | `0.01` | initial learning rate (i.e. SGD=1E-2, Adam=1E-3) |
|
||||||
| `lrf` | `0.01` | final learning rate (lr0 * lrf) |
|
| `lrf` | `0.01` | final learning rate (lr0 * lrf) |
|
||||||
| `momentum` | `0.937` | SGD momentum/Adam beta1 |
|
| `momentum` | `0.937` | SGD momentum/Adam beta1 |
|
||||||
|
@ -88,7 +88,7 @@ The training settings for YOLO models encompass various hyperparameters and conf
|
|||||||
| `project` | `None` | project name |
|
| `project` | `None` | project name |
|
||||||
| `name` | `None` | experiment name |
|
| `name` | `None` | experiment name |
|
||||||
| `exist_ok` | `False` | whether to overwrite existing experiment |
|
| `exist_ok` | `False` | whether to overwrite existing experiment |
|
||||||
| `pretrained` | `True` | (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str) |
|
| `pretrained` | `True` | (bool \| str) whether to use a pretrained model (bool) or a model to load weights from (str) |
|
||||||
| `optimizer` | `'auto'` | optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto] |
|
| `optimizer` | `'auto'` | optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto] |
|
||||||
| `verbose` | `False` | whether to print verbose output |
|
| `verbose` | `False` | whether to print verbose output |
|
||||||
| `seed` | `0` | random seed for reproducibility |
|
| `seed` | `0` | random seed for reproducibility |
|
||||||
@ -101,7 +101,7 @@ The training settings for YOLO models encompass various hyperparameters and conf
|
|||||||
| `amp` | `True` | Automatic Mixed Precision (AMP) training, choices=[True, False] |
|
| `amp` | `True` | Automatic Mixed Precision (AMP) training, choices=[True, False] |
|
||||||
| `fraction` | `1.0` | dataset fraction to train on (default is 1.0, all images in train set) |
|
| `fraction` | `1.0` | dataset fraction to train on (default is 1.0, all images in train set) |
|
||||||
| `profile` | `False` | profile ONNX and TensorRT speeds during training for loggers |
|
| `profile` | `False` | profile ONNX and TensorRT speeds during training for loggers |
|
||||||
| `freeze` | `None` | (int or list, optional) freeze first n layers, or freeze list of layer indices during training |
|
| `freeze` | `None` | (int \| list, optional) freeze first n layers, or freeze list of layer indices during training |
|
||||||
| `lr0` | `0.01` | initial learning rate (i.e. SGD=1E-2, Adam=1E-3) |
|
| `lr0` | `0.01` | initial learning rate (i.e. SGD=1E-2, Adam=1E-3) |
|
||||||
| `lrf` | `0.01` | final learning rate (lr0 * lrf) |
|
| `lrf` | `0.01` | final learning rate (lr0 * lrf) |
|
||||||
| `momentum` | `0.937` | SGD momentum/Adam beta1 |
|
| `momentum` | `0.937` | SGD momentum/Adam beta1 |
|
||||||
|
@ -5,8 +5,8 @@
|
|||||||
|
|
||||||
<div>
|
<div>
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="https://github.com/RizwanMunawar/ultralytics/assets/62513924/978c8dd4-936d-468e-b41e-1046741ec323" width="45%"/>
|
<img src="https://github.com/RizwanMunawar/ultralytics/assets/62513924/5ab3bbd7-fd12-4849-928e-5f294d6c3fcf" width="45%"/>
|
||||||
<img src="https://github.com/RizwanMunawar/ultralytics/assets/62513924/069fd81b-8451-40f3-9f14-709a7ac097ca" width="45%"/>
|
<img src="https://github.com/RizwanMunawar/ultralytics/assets/62513924/e7c1aea7-474d-4d78-8d48-b50854ffe1ca" width="45%"/>
|
||||||
|
|
||||||
</p>
|
</p>
|
||||||
</div>
|
</div>
|
||||||
@ -42,6 +42,9 @@ After the video begins playing, you can freely move the region anywhere within t
|
|||||||
# If you want to save results
|
# If you want to save results
|
||||||
python yolov8_region_counter.py --source "path/to/video.mp4" --save-img --view-img
|
python yolov8_region_counter.py --source "path/to/video.mp4" --save-img --view-img
|
||||||
|
|
||||||
|
# If you want to run model on CPU
|
||||||
|
python yolov8_region_counter.py --source "path/to/video.mp4" --save-img --view-img --device cpu
|
||||||
|
|
||||||
# If you want to change model file
|
# If you want to change model file
|
||||||
python yolov8_region_counter.py --source "path/to/video.mp4" --save-img --weights "path/to/model.pt"
|
python yolov8_region_counter.py --source "path/to/video.mp4" --save-img --weights "path/to/model.pt"
|
||||||
|
|
||||||
@ -52,10 +55,12 @@ python yolov8_region_counter.py --source "path/to/video.mp4" --view-img
|
|||||||
## Usage Options
|
## Usage Options
|
||||||
|
|
||||||
- `--source`: Specifies the path to the video file you want to run inference on.
|
- `--source`: Specifies the path to the video file you want to run inference on.
|
||||||
|
- `--device`: Specifies the device `cpu` or `0`
|
||||||
- `--save-img`: Flag to save the detection results as images.
|
- `--save-img`: Flag to save the detection results as images.
|
||||||
- `--weights`: Specifies a different YOLOv8 model file (e.g., `yolov8n.pt`, `yolov8s.pt`, `yolov8m.pt`, `yolov8l.pt`, `yolov8x.pt`).
|
- `--weights`: Specifies a different YOLOv8 model file (e.g., `yolov8n.pt`, `yolov8s.pt`, `yolov8m.pt`, `yolov8l.pt`, `yolov8x.pt`).
|
||||||
- `--line-thickness`: Specifies the bounding box thickness
|
- `--line-thickness`: Specifies the bounding box thickness
|
||||||
- `--region-thickness`: Specific the region boxes thickness
|
- `--region-thickness`: Specifies the region boxes thickness
|
||||||
|
- `--track-thickness`: Specifies the track line thickness
|
||||||
|
|
||||||
## FAQ
|
## FAQ
|
||||||
|
|
||||||
@ -63,11 +68,40 @@ python yolov8_region_counter.py --source "path/to/video.mp4" --view-img
|
|||||||
|
|
||||||
Region counting is a computational method utilized to ascertain the quantity of objects within a specific area in recorded video or real-time streams. This technique finds frequent application in image processing, computer vision, and pattern recognition, facilitating the analysis and segmentation of objects or features based on their spatial relationships.
|
Region counting is a computational method utilized to ascertain the quantity of objects within a specific area in recorded video or real-time streams. This technique finds frequent application in image processing, computer vision, and pattern recognition, facilitating the analysis and segmentation of objects or features based on their spatial relationships.
|
||||||
|
|
||||||
**2. Why Combine Region Counting with YOLOv8?**
|
**2. Is Friendly Region Plotting Supported by the Region Counter?**
|
||||||
|
|
||||||
|
The Region Counter offers the capability to create regions in various formats, such as polygons and rectangles. You have the flexibility to modify region attributes, including coordinates, colors, and other details, as demonstrated in the following code:
|
||||||
|
|
||||||
|
```python
|
||||||
|
counting_regions = [
|
||||||
|
{
|
||||||
|
"name": "YOLOv8 Polygon Region",
|
||||||
|
"polygon": Polygon(
|
||||||
|
[(50, 80), (250, 20), (450, 80), (400, 350), (100, 350)]
|
||||||
|
), # Polygon with five points (Pentagon)
|
||||||
|
"counts": 0,
|
||||||
|
"dragging": False,
|
||||||
|
"region_color": (255, 42, 4), # BGR Value
|
||||||
|
"text_color": (255, 255, 255), # Region Text Color
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "YOLOv8 Rectangle Region",
|
||||||
|
"polygon": Polygon(
|
||||||
|
[(200, 250), (440, 250), (440, 550), (200, 550)]
|
||||||
|
), # Rectangle with four points
|
||||||
|
"counts": 0,
|
||||||
|
"dragging": False,
|
||||||
|
"region_color": (37, 255, 225), # BGR Value
|
||||||
|
"text_color": (0, 0, 0), # Region Text Color
|
||||||
|
},
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
**3. Why Combine Region Counting with YOLOv8?**
|
||||||
|
|
||||||
YOLOv8 specializes in the detection and tracking of objects in video streams. Region counting complements this by enabling object counting within designated areas, making it a valuable application of YOLOv8.
|
YOLOv8 specializes in the detection and tracking of objects in video streams. Region counting complements this by enabling object counting within designated areas, making it a valuable application of YOLOv8.
|
||||||
|
|
||||||
**3. How Can I Troubleshoot Issues?**
|
**4. How Can I Troubleshoot Issues?**
|
||||||
|
|
||||||
To gain more insights during inference, you can include the `--debug` flag in your command:
|
To gain more insights during inference, you can include the `--debug` flag in your command:
|
||||||
|
|
||||||
@ -75,10 +109,10 @@ To gain more insights during inference, you can include the `--debug` flag in yo
|
|||||||
python yolov8_region_counter.py --source "path to video file" --debug
|
python yolov8_region_counter.py --source "path to video file" --debug
|
||||||
```
|
```
|
||||||
|
|
||||||
**4. Can I Employ Other YOLO Versions?**
|
**5. Can I Employ Other YOLO Versions?**
|
||||||
|
|
||||||
Certainly, you have the flexibility to specify different YOLO model weights using the `--weights` option.
|
Certainly, you have the flexibility to specify different YOLO model weights using the `--weights` option.
|
||||||
|
|
||||||
**5. Where Can I Access Additional Information?**
|
**6. Where Can I Access Additional Information?**
|
||||||
|
|
||||||
For a comprehensive guide on using YOLOv8 with Object Tracking, please refer to [Multi-Object Tracking with Ultralytics YOLO](https://docs.ultralytics.com/modes/track/).
|
For a comprehensive guide on using YOLOv8 with Object Tracking, please refer to [Multi-Object Tracking with Ultralytics YOLO](https://docs.ultralytics.com/modes/track/).
|
||||||
|
@ -4,34 +4,37 @@ from pathlib import Path
|
|||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from shapely.geometry import Polygon
|
||||||
|
from shapely.geometry.point import Point
|
||||||
|
|
||||||
from ultralytics import YOLO
|
from ultralytics import YOLO
|
||||||
|
|
||||||
track_history = defaultdict(lambda: [])
|
|
||||||
|
|
||||||
from ultralytics.utils.files import increment_path
|
from ultralytics.utils.files import increment_path
|
||||||
from ultralytics.utils.plotting import Annotator, colors
|
from ultralytics.utils.plotting import Annotator, colors
|
||||||
|
|
||||||
# Region utils
|
track_history = defaultdict(lambda: [])
|
||||||
|
|
||||||
current_region = None
|
current_region = None
|
||||||
counting_regions = [{
|
counting_regions = [
|
||||||
'name': 'YOLOv8 Region A',
|
{
|
||||||
'roi': (50, 100, 240, 300),
|
'name': 'YOLOv8 Polygon Region',
|
||||||
|
'polygon': Polygon([(50, 80), (250, 20), (450, 80), (400, 350), (100, 350)]), # Polygon points
|
||||||
'counts': 0,
|
'counts': 0,
|
||||||
'dragging': False,
|
'dragging': False,
|
||||||
'region_color': (0, 255, 0)}, {
|
'region_color': (255, 42, 4), # BGR Value
|
||||||
'name': 'YOLOv8 Region B',
|
'text_color': (255, 255, 255) # Region Text Color
|
||||||
'roi': (200, 250, 240, 300),
|
},
|
||||||
|
{
|
||||||
|
'name': 'YOLOv8 Rectangle Region',
|
||||||
|
'polygon': Polygon([(200, 250), (440, 250), (440, 550), (200, 550)]), # Polygon points
|
||||||
'counts': 0,
|
'counts': 0,
|
||||||
'dragging': False,
|
'dragging': False,
|
||||||
'region_color': (255, 144, 31)}]
|
'region_color': (37, 255, 225), # BGR Value
|
||||||
|
'text_color': (0, 0, 0), # Region Text Color
|
||||||
|
}, ]
|
||||||
|
|
||||||
|
|
||||||
def is_inside_roi(box, roi):
|
def is_inside_polygon(point, polygon):
|
||||||
"""Compare bbox with region box."""
|
return polygon.contains(Point(point))
|
||||||
x, y, _, _ = box
|
|
||||||
roi_x, roi_y, roi_w, roi_h = roi
|
|
||||||
return roi_x < x < roi_x + roi_w and roi_y < y < roi_y + roi_h
|
|
||||||
|
|
||||||
|
|
||||||
def mouse_callback(event, x, y, flags, param):
|
def mouse_callback(event, x, y, flags, param):
|
||||||
@ -41,18 +44,21 @@ def mouse_callback(event, x, y, flags, param):
|
|||||||
# Mouse left button down event
|
# Mouse left button down event
|
||||||
if event == cv2.EVENT_LBUTTONDOWN:
|
if event == cv2.EVENT_LBUTTONDOWN:
|
||||||
for region in counting_regions:
|
for region in counting_regions:
|
||||||
roi_x, roi_y, roi_w, roi_h = region['roi']
|
if is_inside_polygon((x, y), region['polygon']):
|
||||||
if roi_x < x < roi_x + roi_w and roi_y < y < roi_y + roi_h:
|
|
||||||
current_region = region
|
current_region = region
|
||||||
current_region['dragging'] = True
|
current_region['dragging'] = True
|
||||||
current_region['offset_x'] = x - roi_x
|
current_region['offset_x'] = x
|
||||||
current_region['offset_y'] = y - roi_y
|
current_region['offset_y'] = y
|
||||||
|
|
||||||
# Mouse move event
|
# Mouse move event
|
||||||
elif event == cv2.EVENT_MOUSEMOVE:
|
elif event == cv2.EVENT_MOUSEMOVE:
|
||||||
if current_region is not None and current_region['dragging']:
|
if current_region is not None and current_region['dragging']:
|
||||||
current_region['roi'] = (x - current_region['offset_x'], y - current_region['offset_y'],
|
dx = x - current_region['offset_x']
|
||||||
current_region['roi'][2], current_region['roi'][3])
|
dy = y - current_region['offset_y']
|
||||||
|
current_region['polygon'] = Polygon([
|
||||||
|
(p[0] + dx, p[1] + dy) for p in current_region['polygon'].exterior.coords])
|
||||||
|
current_region['offset_x'] = x
|
||||||
|
current_region['offset_y'] = y
|
||||||
|
|
||||||
# Mouse left button up event
|
# Mouse left button up event
|
||||||
elif event == cv2.EVENT_LBUTTONUP:
|
elif event == cv2.EVENT_LBUTTONUP:
|
||||||
@ -60,26 +66,33 @@ def mouse_callback(event, x, y, flags, param):
|
|||||||
current_region['dragging'] = False
|
current_region['dragging'] = False
|
||||||
|
|
||||||
|
|
||||||
def run(weights='yolov8n.pt',
|
def run(
|
||||||
source='test.mp4',
|
weights='yolov8n.pt',
|
||||||
|
source=None,
|
||||||
|
device='cpu',
|
||||||
view_img=False,
|
view_img=False,
|
||||||
save_img=False,
|
save_img=False,
|
||||||
exist_ok=False,
|
exist_ok=False,
|
||||||
line_thickness=2,
|
line_thickness=2,
|
||||||
region_thickness=2):
|
track_thickness=2,
|
||||||
|
region_thickness=2,
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Run Region counting on a video using YOLOv8 and ByteTrack.
|
Run Region counting on a video using YOLOv8 and ByteTrack.
|
||||||
|
|
||||||
Supports movable region for real time counting inside specific area.
|
Supports movable region for real time counting inside specific area.
|
||||||
Supports multiple regions counting.
|
Supports multiple regions counting.
|
||||||
|
Regions can be Polygons or rectangle in shape
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
weights (str): Model weights path.
|
weights (str): Model weights path.
|
||||||
source (str): Video file path.
|
source (str): Video file path.
|
||||||
|
device (str): processing device cpu, 0, 1
|
||||||
view_img (bool): Show results.
|
view_img (bool): Show results.
|
||||||
save_img (bool): Save results.
|
save_img (bool): Save results.
|
||||||
exist_ok (bool): Overwrite existing files.
|
exist_ok (bool): Overwrite existing files.
|
||||||
line_thickness (int): Bounding box thickness.
|
line_thickness (int): Bounding box thickness.
|
||||||
|
track_thickness (int): Tracking line thickness
|
||||||
region_thickness (int): Region thickness.
|
region_thickness (int): Region thickness.
|
||||||
"""
|
"""
|
||||||
vid_frame_count = 0
|
vid_frame_count = 0
|
||||||
@ -90,6 +103,7 @@ def run(weights='yolov8n.pt',
|
|||||||
|
|
||||||
# Setup Model
|
# Setup Model
|
||||||
model = YOLO(f'{weights}')
|
model = YOLO(f'{weights}')
|
||||||
|
model.to('cuda') if device == '0' else model.to('cpu')
|
||||||
|
|
||||||
# Video setup
|
# Video setup
|
||||||
videocapture = cv2.VideoCapture(source)
|
videocapture = cv2.VideoCapture(source)
|
||||||
@ -122,40 +136,43 @@ def run(weights='yolov8n.pt',
|
|||||||
label = str(names[cls])
|
label = str(names[cls])
|
||||||
xyxy = (x - w / 2), (y - h / 2), (x + w / 2), (y + h / 2)
|
xyxy = (x - w / 2), (y - h / 2), (x + w / 2), (y + h / 2)
|
||||||
|
|
||||||
# Bounding box
|
# Bounding box plot
|
||||||
bbox_color = colors(cls, True)
|
bbox_color = colors(cls, True)
|
||||||
annotator.box_label(xyxy, label, color=bbox_color)
|
annotator.box_label(xyxy, label, color=bbox_color)
|
||||||
|
|
||||||
# Tracking Lines
|
# Tracking Lines plot
|
||||||
track = track_history[track_id]
|
track = track_history[track_id]
|
||||||
track.append((float(x), float(y)))
|
track.append((float(x), float(y)))
|
||||||
if len(track) > 30:
|
if len(track) > 30:
|
||||||
track.pop(0)
|
track.pop(0)
|
||||||
points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
|
points = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
|
||||||
cv2.polylines(frame, [points], isClosed=False, color=bbox_color, thickness=line_thickness)
|
cv2.polylines(frame, [points], isClosed=False, color=bbox_color, thickness=track_thickness)
|
||||||
|
|
||||||
# Check If detection inside region
|
# Check if detection inside region
|
||||||
for region in counting_regions:
|
for region in counting_regions:
|
||||||
if is_inside_roi(box, region['roi']):
|
if is_inside_polygon((x, y), region['polygon']):
|
||||||
region['counts'] += 1
|
region['counts'] += 1
|
||||||
|
|
||||||
# Draw region boxes
|
# Draw regions (Polygons/Rectangles)
|
||||||
for region in counting_regions:
|
for region in counting_regions:
|
||||||
region_label = str(region['counts'])
|
region_label = str(region['counts'])
|
||||||
roi_x, roi_y, roi_w, roi_h = region['roi']
|
|
||||||
region_color = region['region_color']
|
region_color = region['region_color']
|
||||||
center_x = roi_x + roi_w // 2
|
region_text_color = region['text_color']
|
||||||
center_y = roi_y + roi_h // 2
|
|
||||||
text_margin = 15
|
|
||||||
|
|
||||||
# Region plotting
|
polygon_coords = np.array(region['polygon'].exterior.coords, dtype=np.int32)
|
||||||
cv2.rectangle(frame, (roi_x, roi_y), (roi_x + roi_w, roi_y + roi_h), region_color, region_thickness)
|
centroid_x, centroid_y = int(region['polygon'].centroid.x), int(region['polygon'].centroid.y)
|
||||||
t_size, _ = cv2.getTextSize(region_label, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.0, thickness=line_thickness)
|
|
||||||
text_x = center_x - t_size[0] // 2 - text_margin
|
text_size, _ = cv2.getTextSize(region_label,
|
||||||
text_y = center_y + t_size[1] // 2 + text_margin
|
cv2.FONT_HERSHEY_SIMPLEX,
|
||||||
cv2.rectangle(frame, (text_x - text_margin, text_y - t_size[1] - text_margin),
|
fontScale=0.7,
|
||||||
(text_x + t_size[0] + text_margin, text_y + text_margin), region_color, -1)
|
thickness=line_thickness)
|
||||||
cv2.putText(frame, region_label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0), line_thickness)
|
text_x = centroid_x - text_size[0] // 2
|
||||||
|
text_y = centroid_y + text_size[1] // 2
|
||||||
|
cv2.rectangle(frame, (text_x - 5, text_y - text_size[1] - 5), (text_x + text_size[0] + 5, text_y + 5),
|
||||||
|
region_color, -1)
|
||||||
|
cv2.putText(frame, region_label, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.7, region_text_color,
|
||||||
|
line_thickness)
|
||||||
|
cv2.polylines(frame, [polygon_coords], isClosed=True, color=region_color, thickness=region_thickness)
|
||||||
|
|
||||||
if view_img:
|
if view_img:
|
||||||
if vid_frame_count == 1:
|
if vid_frame_count == 1:
|
||||||
@ -182,12 +199,15 @@ def parse_opt():
|
|||||||
"""Parse command line arguments."""
|
"""Parse command line arguments."""
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('--weights', type=str, default='yolov8n.pt', help='initial weights path')
|
parser.add_argument('--weights', type=str, default='yolov8n.pt', help='initial weights path')
|
||||||
|
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
|
||||||
parser.add_argument('--source', type=str, required=True, help='video file path')
|
parser.add_argument('--source', type=str, required=True, help='video file path')
|
||||||
parser.add_argument('--view-img', action='store_true', help='show results')
|
parser.add_argument('--view-img', action='store_true', help='show results')
|
||||||
parser.add_argument('--save-img', action='store_true', help='save results')
|
parser.add_argument('--save-img', action='store_true', help='save results')
|
||||||
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
|
||||||
parser.add_argument('--line-thickness', type=int, default=2, help='bounding box thickness')
|
parser.add_argument('--line-thickness', type=int, default=2, help='bounding box thickness')
|
||||||
|
parser.add_argument('--track-thickness', type=int, default=2, help='Tracking line thickness')
|
||||||
parser.add_argument('--region-thickness', type=int, default=4, help='Region thickness')
|
parser.add_argument('--region-thickness', type=int, default=4, help='Region thickness')
|
||||||
|
|
||||||
return parser.parse_args()
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||||
|
|
||||||
__version__ = '8.0.182'
|
__version__ = '8.0.183'
|
||||||
|
|
||||||
from ultralytics.models import RTDETR, SAM, YOLO
|
from ultralytics.models import RTDETR, SAM, YOLO
|
||||||
from ultralytics.models.fastsam import FastSAM
|
from ultralytics.models.fastsam import FastSAM
|
||||||
|
@ -493,9 +493,15 @@ def collect_system_info():
|
|||||||
f"{'CPU':<20}{get_cpu_info()}\n"
|
f"{'CPU':<20}{get_cpu_info()}\n"
|
||||||
f"{'CUDA':<20}{torch.version.cuda if torch and torch.cuda.is_available() else None}\n")
|
f"{'CUDA':<20}{torch.version.cuda if torch and torch.cuda.is_available() else None}\n")
|
||||||
|
|
||||||
for r in parse_requirements():
|
if (ROOT.parent / 'requirements.txt').exists(): # pip install
|
||||||
|
requirements = parse_requirements()
|
||||||
|
else: # git install
|
||||||
|
from pkg_resources import get_distribution
|
||||||
|
requirements = get_distribution('ultralytics').requires()
|
||||||
|
|
||||||
|
for r in requirements:
|
||||||
current = version(r.name)
|
current = version(r.name)
|
||||||
is_met = '✅ ' if check_version(current, r.specifier) else '❌ '
|
is_met = '✅ ' if check_version(current, str(r.specifier)) else '❌ '
|
||||||
LOGGER.info(f'{r.name:<20}{is_met}{current}{r.specifier}')
|
LOGGER.info(f'{r.name:<20}{is_met}{current}{r.specifier}')
|
||||||
|
|
||||||
|
|
||||||
|
@ -124,7 +124,7 @@ def run_ray_tune(model,
|
|||||||
tuner_callbacks = [WandbLoggerCallback(project='YOLOv8-tune')] if wandb else []
|
tuner_callbacks = [WandbLoggerCallback(project='YOLOv8-tune')] if wandb else []
|
||||||
|
|
||||||
# Create the Ray Tune hyperparameter search tuner
|
# Create the Ray Tune hyperparameter search tuner
|
||||||
tune_dir = get_save_dir(DEFAULT_CFG, name='tune')
|
tune_dir = get_save_dir(DEFAULT_CFG, name='tune').resolve() # must be absolute dir
|
||||||
tune_dir.mkdir(parents=True, exist_ok=True)
|
tune_dir.mkdir(parents=True, exist_ok=True)
|
||||||
tuner = tune.Tuner(trainable_with_resources,
|
tuner = tune.Tuner(trainable_with_resources,
|
||||||
param_space=space,
|
param_space=space,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user