mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-23 21:44:22 +08:00
Make optimizer static method inside trainer (#103)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
5c6d11bdb2
commit
8028e2b1b8
@ -142,11 +142,11 @@ class BaseTrainer:
|
|||||||
# Optimizer
|
# Optimizer
|
||||||
self.accumulate = max(round(self.args.nbs / self.batch_size), 1) # accumulate loss before optimizing
|
self.accumulate = max(round(self.args.nbs / self.batch_size), 1) # accumulate loss before optimizing
|
||||||
self.args.weight_decay *= self.batch_size * self.accumulate / self.args.nbs # scale weight_decay
|
self.args.weight_decay *= self.batch_size * self.accumulate / self.args.nbs # scale weight_decay
|
||||||
self.optimizer = build_optimizer(model=self.model,
|
self.optimizer = self.build_optimizer(model=self.model,
|
||||||
name=self.args.optimizer,
|
name=self.args.optimizer,
|
||||||
lr=self.args.lr0,
|
lr=self.args.lr0,
|
||||||
momentum=self.args.momentum,
|
momentum=self.args.momentum,
|
||||||
decay=self.args.weight_decay)
|
decay=self.args.weight_decay)
|
||||||
# Scheduler
|
# Scheduler
|
||||||
if self.args.cos_lr:
|
if self.args.cos_lr:
|
||||||
self.lf = one_cycle(1, self.args.lrf, self.epochs) # cosine 1->hyp['lrf']
|
self.lf = one_cycle(1, self.args.lrf, self.epochs) # cosine 1->hyp['lrf']
|
||||||
@ -459,33 +459,31 @@ class BaseTrainer:
|
|||||||
self.best_fitness = best_fitness
|
self.best_fitness = best_fitness
|
||||||
self.start_epoch = start_epoch
|
self.start_epoch = start_epoch
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def build_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5):
|
||||||
|
g = [], [], [] # optimizer parameter groups
|
||||||
|
bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d()
|
||||||
|
for v in model.modules():
|
||||||
|
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay)
|
||||||
|
g[2].append(v.bias)
|
||||||
|
if isinstance(v, bn): # weight (no decay)
|
||||||
|
g[1].append(v.weight)
|
||||||
|
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay)
|
||||||
|
g[0].append(v.weight)
|
||||||
|
|
||||||
def build_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5):
|
if name == 'Adam':
|
||||||
# TODO: 1. docstring with example? 2. Move this inside Trainer? or utils?
|
optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum
|
||||||
# YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay
|
elif name == 'AdamW':
|
||||||
g = [], [], [] # optimizer parameter groups
|
optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
|
||||||
bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d()
|
elif name == 'RMSProp':
|
||||||
for v in model.modules():
|
optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum)
|
||||||
if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay)
|
elif name == 'SGD':
|
||||||
g[2].append(v.bias)
|
optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True)
|
||||||
if isinstance(v, bn): # weight (no decay)
|
else:
|
||||||
g[1].append(v.weight)
|
raise NotImplementedError(f'Optimizer {name} not implemented.')
|
||||||
elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay)
|
|
||||||
g[0].append(v.weight)
|
|
||||||
|
|
||||||
if name == 'Adam':
|
optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay
|
||||||
optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum
|
optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights)
|
||||||
elif name == 'AdamW':
|
LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups "
|
||||||
optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
|
f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias")
|
||||||
elif name == 'RMSProp':
|
return optimizer
|
||||||
optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum)
|
|
||||||
elif name == 'SGD':
|
|
||||||
optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True)
|
|
||||||
else:
|
|
||||||
raise NotImplementedError(f'Optimizer {name} not implemented.')
|
|
||||||
|
|
||||||
optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay
|
|
||||||
optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights)
|
|
||||||
LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups "
|
|
||||||
f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias")
|
|
||||||
return optimizer
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user