Fix HUB session with DDP training (#13103)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: Burhan <62214284+Burhan-Q@users.noreply.github.com>
Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Laughing 2024-06-24 02:00:34 +08:00 committed by GitHub
parent 68720288d3
commit 169602442c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 11 additions and 5 deletions

View file

@ -48,6 +48,7 @@ from ultralytics.utils.torch_utils import (
one_cycle,
select_device,
strip_optimizer,
torch_distributed_zero_first,
)
@ -127,7 +128,8 @@ class BaseTrainer:
# Model and Dataset
self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolov8n -> yolov8n.pt
self.trainset, self.testset = self.get_dataset()
with torch_distributed_zero_first(RANK): # avoid auto-downloading dataset multiple times
self.trainset, self.testset = self.get_dataset()
self.ema = None
# Optimization utils init
@ -143,6 +145,9 @@ class BaseTrainer:
self.csv = self.save_dir / "results.csv"
self.plot_idx = [0, 1, 2]
# HUB
self.hub_session = None
# Callbacks
self.callbacks = _callbacks or callbacks.get_default_callbacks()
if RANK in {-1, 0}: