Save optimizer as FP16 for smaller checkpoints (#9435)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Glenn Jocher 2024-03-31 15:11:42 +02:00 committed by GitHub
parent b530a3004e
commit 1703025e8e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 20 additions and 1 deletions

View file

@ -42,6 +42,7 @@ from ultralytics.utils.files import get_latest_run
from ultralytics.utils.torch_utils import (
EarlyStopping,
ModelEMA,
convert_optimizer_state_dict_to_fp16,
init_seeds,
one_cycle,
select_device,
@ -488,7 +489,7 @@ class BaseTrainer:
"model": None, # resume and final checkpoints derive from EMA
"ema": deepcopy(self.ema.ema).half(),
"updates": self.ema.updates,
"optimizer": self.optimizer.state_dict(),
"optimizer": convert_optimizer_state_dict_to_fp16(deepcopy(self.optimizer.state_dict())),
"train_args": vars(self.args), # save as dict
"train_metrics": {**self.metrics, **{"fitness": self.fitness}},
"train_results": {k.strip(): v for k, v in pd.read_csv(self.csv).to_dict(orient="list").items()},