ultralytics 8.0.111 refactored model.loss() method (#2911)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Snyk bot <snyk-bot@snyk.io>
This commit is contained in:
Glenn Jocher 2023-05-30 20:23:30 +02:00 committed by GitHub
parent 305cde69d0
commit fd94d312da
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 46 additions and 35 deletions

View File

@ -27,6 +27,11 @@ description: Learn how to work with Ultralytics YOLO Detection, Segmentation & C
:::ultralytics.nn.tasks.ClassificationModel :::ultralytics.nn.tasks.ClassificationModel
<br><br> <br><br>
# RTDETRDetectionModel
---
:::ultralytics.nn.tasks.RTDETRDetectionModel
<br><br>
# Ensemble # Ensemble
--- ---
:::ultralytics.nn.tasks.Ensemble :::ultralytics.nn.tasks.Ensemble

View File

@ -36,3 +36,8 @@ description: 'Ultralytics YOLO Docs: Learn about stream loaders for image and te
--- ---
:::ultralytics.yolo.data.dataloaders.stream_loaders.autocast_list :::ultralytics.yolo.data.dataloaders.stream_loaders.autocast_list
<br><br> <br><br>
# get_best_youtube_url
---
:::ultralytics.yolo.data.dataloaders.stream_loaders.get_best_youtube_url
<br><br>

View File

@ -16,3 +16,23 @@ description: Learn about Varifocal Loss and Keypoint Loss in Ultralytics YOLO fo
--- ---
:::ultralytics.yolo.utils.loss.KeypointLoss :::ultralytics.yolo.utils.loss.KeypointLoss
<br><br> <br><br>
# v8DetectionLoss
---
:::ultralytics.yolo.utils.loss.v8DetectionLoss
<br><br>
# v8SegmentationLoss
---
:::ultralytics.yolo.utils.loss.v8SegmentationLoss
<br><br>
# v8PoseLoss
---
:::ultralytics.yolo.utils.loss.v8PoseLoss
<br><br>
# v8ClassificationLoss
---
:::ultralytics.yolo.utils.loss.v8ClassificationLoss
<br><br>

View File

@ -7,11 +7,6 @@ description: Train and optimize custom object detection models with Ultralytics
:::ultralytics.yolo.v8.detect.train.DetectionTrainer :::ultralytics.yolo.v8.detect.train.DetectionTrainer
<br><br> <br><br>
# Loss
---
:::ultralytics.yolo.v8.detect.train.Loss
<br><br>
# train # train
--- ---
:::ultralytics.yolo.v8.detect.train.train :::ultralytics.yolo.v8.detect.train.train

View File

@ -7,11 +7,6 @@ description: Boost posture detection using PoseTrainer and train models using tr
:::ultralytics.yolo.v8.pose.train.PoseTrainer :::ultralytics.yolo.v8.pose.train.PoseTrainer
<br><br> <br><br>
# PoseLoss
---
:::ultralytics.yolo.v8.pose.train.PoseLoss
<br><br>
# train # train
--- ---
:::ultralytics.yolo.v8.pose.train.train :::ultralytics.yolo.v8.pose.train.train

View File

@ -7,11 +7,6 @@ description: Learn about SegmentationTrainer and Train in Ultralytics YOLO v8 fo
:::ultralytics.yolo.v8.segment.train.SegmentationTrainer :::ultralytics.yolo.v8.segment.train.SegmentationTrainer
<br><br> <br><br>
# SegLoss
---
:::ultralytics.yolo.v8.segment.train.SegLoss
<br><br>
# train # train
--- ---
:::ultralytics.yolo.v8.segment.train.train :::ultralytics.yolo.v8.segment.train.train

View File

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license # Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = '8.0.110' __version__ = '8.0.111'
from ultralytics.hub import start from ultralytics.hub import start
from ultralytics.vit.rtdetr import RTDETR from ultralytics.vit.rtdetr import RTDETR

View File

@ -21,10 +21,9 @@ from .head import Classify, Detect, Pose, RTDETRDecoder, Segment
from .transformer import (AIFI, MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer, LayerNorm2d, from .transformer import (AIFI, MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer, LayerNorm2d,
MLPBlock, MSDeformAttn, TransformerBlock, TransformerEncoderLayer, TransformerLayer) MLPBlock, MSDeformAttn, TransformerBlock, TransformerEncoderLayer, TransformerLayer)
__all__ = [ __all__ = ('Conv', 'Conv2', 'LightConv', 'RepConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus',
'Conv', 'Conv2', 'LightConv', 'RepConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', 'GhostConv', 'GhostConv', 'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'TransformerLayer',
'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'TransformerLayer', 'TransformerBlock', 'MLPBlock', 'TransformerBlock', 'MLPBlock', 'LayerNorm2d', 'DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3',
'LayerNorm2d', 'DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3', 'C2f', 'C3x', 'C3TR', 'C3Ghost', 'C2f', 'C3x', 'C3TR', 'C3Ghost', 'GhostBottleneck', 'Bottleneck', 'BottleneckCSP', 'Proto', 'Detect',
'GhostBottleneck', 'Bottleneck', 'BottleneckCSP', 'Proto', 'Detect', 'Segment', 'Pose', 'Classify', 'Segment', 'Pose', 'Classify', 'TransformerEncoderLayer', 'RepC3', 'RTDETRDecoder', 'AIFI',
'TransformerEncoderLayer', 'RepC3', 'RTDETRDecoder', 'AIFI', 'DeformableTransformerDecoder', 'DeformableTransformerDecoder', 'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP')
'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP']

View File

@ -10,9 +10,8 @@ import torch.nn.functional as F
from .conv import Conv, DWConv, GhostConv, LightConv, RepConv from .conv import Conv, DWConv, GhostConv, LightConv, RepConv
from .transformer import TransformerBlock from .transformer import TransformerBlock
__all__ = [ __all__ = ('DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3', 'C2f', 'C3x', 'C3TR', 'C3Ghost',
'DFL', 'HGBlock', 'HGStem', 'SPP', 'SPPF', 'C1', 'C2', 'C3', 'C2f', 'C3x', 'C3TR', 'C3Ghost', 'GhostBottleneck', 'GhostBottleneck', 'Bottleneck', 'BottleneckCSP', 'Proto', 'RepC3')
'Bottleneck', 'BottleneckCSP', 'Proto', 'RepC3']
class DFL(nn.Module): class DFL(nn.Module):

View File

@ -9,9 +9,8 @@ import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
__all__ = [ __all__ = ('Conv', 'LightConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', 'GhostConv',
'Conv', 'LightConv', 'DWConv', 'DWConvTranspose2d', 'ConvTranspose', 'Focus', 'GhostConv', 'ChannelAttention', 'ChannelAttention', 'SpatialAttention', 'CBAM', 'Concat', 'RepConv')
'SpatialAttention', 'CBAM', 'Concat', 'RepConv']
def autopad(k, p=None, d=1): # kernel, padding, dilation def autopad(k, p=None, d=1): # kernel, padding, dilation

View File

@ -16,7 +16,7 @@ from .conv import Conv
from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer
from .utils import bias_init_with_prob, linear_init_ from .utils import bias_init_with_prob, linear_init_
__all__ = ['Detect', 'Segment', 'Pose', 'Classify', 'RTDETRDecoder'] __all__ = 'Detect', 'Segment', 'Pose', 'Classify', 'RTDETRDecoder'
class Detect(nn.Module): class Detect(nn.Module):

View File

@ -13,9 +13,8 @@ from torch.nn.init import constant_, xavier_uniform_
from .conv import Conv from .conv import Conv
from .utils import _get_clones, inverse_sigmoid, multi_scale_deformable_attn_pytorch from .utils import _get_clones, inverse_sigmoid, multi_scale_deformable_attn_pytorch
__all__ = [ __all__ = ('TransformerEncoderLayer', 'TransformerLayer', 'TransformerBlock', 'MLPBlock', 'LayerNorm2d', 'AIFI',
'TransformerEncoderLayer', 'TransformerLayer', 'TransformerBlock', 'MLPBlock', 'LayerNorm2d', 'AIFI', 'DeformableTransformerDecoder', 'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP')
'DeformableTransformerDecoder', 'DeformableTransformerDecoderLayer', 'MSDeformAttn', 'MLP']
class TransformerEncoderLayer(nn.Module): class TransformerEncoderLayer(nn.Module):

View File

@ -12,7 +12,7 @@ import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from torch.nn.init import uniform_ from torch.nn.init import uniform_
__all__ = ['multi_scale_deformable_attn_pytorch', 'inverse_sigmoid'] __all__ = 'multi_scale_deformable_attn_pytorch', 'inverse_sigmoid'
def _get_clones(module, n): def _get_clones(module, n):

View File

@ -9,7 +9,7 @@ from ultralytics.yolo.data.augment import Compose, Format, LetterBox
from ultralytics.yolo.utils import colorstr, ops from ultralytics.yolo.utils import colorstr, ops
from ultralytics.yolo.v8.detect import DetectionValidator from ultralytics.yolo.v8.detect import DetectionValidator
__all__ = ['RTDETRValidator'] __all__ = 'RTDETRValidator', # tuple or list
# TODO: Temporarily, RT-DETR does not need padding. # TODO: Temporarily, RT-DETR does not need padding.