ultralytics 8.0.162 Multi-GPU DDP fix (#4544)

Co-authored-by: Yonghye Kwon <developer.0hye@gmail.com>
Co-authored-by: andresinsitu <andres.rodriguez@ingenieriainsitu.com>
This commit is contained in:
Glenn Jocher 2023-08-24 13:13:49 +02:00 committed by GitHub
parent 1db9afc2e5
commit 2bcee56e70
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 24 additions and 14 deletions

View file

@ -23,7 +23,7 @@ def _log_scalars(scalars, step=0):
def _log_tensorboard_graph(trainer):
# Log model graph to TensorBoard
"""Log model graph to TensorBoard."""
try:
import warnings
@ -48,11 +48,16 @@ def on_pretrain_routine_start(trainer):
WRITER = SummaryWriter(str(trainer.save_dir))
prefix = colorstr('TensorBoard: ')
LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/")
_log_tensorboard_graph(trainer)
except Exception as e:
LOGGER.warning(f'WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}')
def on_train_start(trainer):
"""Log TensorBoard graph."""
if WRITER:
_log_tensorboard_graph(trainer)
def on_batch_end(trainer):
"""Logs scalar statistics at the end of a training batch."""
_log_scalars(trainer.label_loss_items(trainer.tloss, prefix='train'), trainer.epoch + 1)
@ -65,5 +70,6 @@ def on_fit_epoch_end(trainer):
callbacks = {
'on_pretrain_routine_start': on_pretrain_routine_start,
'on_train_start': on_train_start,
'on_fit_epoch_end': on_fit_epoch_end,
'on_batch_end': on_batch_end}