Fix automatic optimizer LR with DDP training (#18842)
This commit is contained in:
parent
e411337778
commit
60d6d011fd
1 changed files with 1 additions and 2 deletions
|
|
@ -271,7 +271,6 @@ class BaseTrainer:
|
|||
)
|
||||
if world_size > 1:
|
||||
self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
|
||||
self.set_model_attributes() # set again after DDP wrapper
|
||||
|
||||
# Check imgsz
|
||||
gs = max(int(self.model.stride.max() if hasattr(self.model, "stride") else 32), 32) # grid size (max stride)
|
||||
|
|
@ -782,7 +781,7 @@ class BaseTrainer:
|
|||
f"ignoring 'lr0={self.args.lr0}' and 'momentum={self.args.momentum}' and "
|
||||
f"determining best 'optimizer', 'lr0' and 'momentum' automatically... "
|
||||
)
|
||||
nc = getattr(model, "nc", 10) # number of classes
|
||||
nc = self.data.get("nc", 10) # number of classes
|
||||
lr_fit = round(0.002 * 5 / (4 + nc), 6) # lr0 fit equation to 6 decimal places
|
||||
name, lr, momentum = ("SGD", 0.01, 0.9) if iterations > 10000 else ("AdamW", lr_fit, 0.9)
|
||||
self.args.warmup_bias_lr = 0.0 # no higher than 0.01 for Adam
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue