Fix automatic optimizer LR with DDP training (#18842)

This commit is contained in:
Laughing 2025-01-23 20:26:29 +08:00 committed by GitHub
parent e411337778
commit 60d6d011fd
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -271,7 +271,6 @@ class BaseTrainer:
)
if world_size > 1:
self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
self.set_model_attributes() # set again after DDP wrapper
# Check imgsz
gs = max(int(self.model.stride.max() if hasattr(self.model, "stride") else 32), 32) # grid size (max stride)
@ -782,7 +781,7 @@ class BaseTrainer:
f"ignoring 'lr0={self.args.lr0}' and 'momentum={self.args.momentum}' and "
f"determining best 'optimizer', 'lr0' and 'momentum' automatically... "
)
nc = getattr(model, "nc", 10) # number of classes
nc = self.data.get("nc", 10) # number of classes
lr_fit = round(0.002 * 5 / (4 + nc), 6) # lr0 fit equation to 6 decimal places
name, lr, momentum = ("SGD", 0.01, 0.9) if iterations > 10000 else ("AdamW", lr_fit, 0.9)
self.args.warmup_bias_lr = 0.0 # no higher than 0.01 for Adam