Fix automatic optimizer LR with DDP training (#18641)
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com>
This commit is contained in:
parent
daaebba220
commit
d9c374c69b
1 changed files with 1 additions and 0 deletions
|
|
@ -271,6 +271,7 @@ class BaseTrainer:
|
||||||
)
|
)
|
||||||
if world_size > 1:
|
if world_size > 1:
|
||||||
self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
|
self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
|
||||||
|
self.set_model_attributes() # set again after DDP wrapper
|
||||||
|
|
||||||
# Check imgsz
|
# Check imgsz
|
||||||
gs = max(int(self.model.stride.max() if hasattr(self.model, "stride") else 32), 32) # grid size (max stride)
|
gs = max(int(self.model.stride.max() if hasattr(self.model, "stride") else 32), 32) # grid size (max stride)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue