Fix torch.cuda.amp.GradScaler warning (#15978)

Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Laughing 2024-09-03 18:03:01 +08:00 committed by GitHub
parent 5a6db149e4
commit 7c7f456710
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 5 additions and 1 deletions

View file

@ -42,6 +42,7 @@ from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_m
from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
from ultralytics.utils.files import get_latest_run
from ultralytics.utils.torch_utils import (
TORCH_2_4,
EarlyStopping,
ModelEMA,
autocast,
@ -265,7 +266,9 @@ class BaseTrainer:
if RANK > -1 and world_size > 1: # DDP
dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None)
self.amp = bool(self.amp) # as boolean
self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp)
self.scaler = (
torch.amp.GradScaler("cuda", enabled=self.amp) if TORCH_2_4 else torch.cuda.amp.GradScaler(enabled=self.amp)
)
if world_size > 1:
self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)