From 60d6d011fd65d87ef47ae54673ca8cfcd06ef40b Mon Sep 17 00:00:00 2001 From: Laughing <61612323+Laughing-q@users.noreply.github.com> Date: Thu, 23 Jan 2025 20:26:29 +0800 Subject: [PATCH] Fix automatic optimizer LR with DDP training (#18842) --- ultralytics/engine/trainer.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ultralytics/engine/trainer.py b/ultralytics/engine/trainer.py index 2c083d1e..8c3c9d77 100644 --- a/ultralytics/engine/trainer.py +++ b/ultralytics/engine/trainer.py @@ -271,7 +271,6 @@ class BaseTrainer: ) if world_size > 1: self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True) - self.set_model_attributes() # set again after DDP wrapper # Check imgsz gs = max(int(self.model.stride.max() if hasattr(self.model, "stride") else 32), 32) # grid size (max stride) @@ -782,7 +781,7 @@ class BaseTrainer: f"ignoring 'lr0={self.args.lr0}' and 'momentum={self.args.momentum}' and " f"determining best 'optimizer', 'lr0' and 'momentum' automatically... " ) - nc = getattr(model, "nc", 10) # number of classes + nc = self.data.get("nc", 10) # number of classes lr_fit = round(0.002 * 5 / (4 + nc), 6) # lr0 fit equation to 6 decimal places name, lr, momentum = ("SGD", 0.01, 0.9) if iterations > 10000 else ("AdamW", lr_fit, 0.9) self.args.warmup_bias_lr = 0.0 # no higher than 0.01 for Adam