Save optimizer as FP16 for smaller checkpoints (#9435)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Glenn Jocher 2024-03-31 15:11:42 +02:00 committed by GitHub
parent b530a3004e
commit 1703025e8e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 20 additions and 1 deletions

View file

@ -505,6 +505,20 @@ def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "") -> None:
LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB")
def convert_optimizer_state_dict_to_fp16(state_dict):
"""
Converts the state_dict of a given optimizer to FP16, focusing on the 'state' key for tensor conversions.
This method aims to reduce storage size without altering 'param_groups' as they contain non-tensor data.
"""
for state in state_dict["state"].values():
for k, v in state.items():
if isinstance(v, torch.Tensor) and v.dtype is torch.float32:
state[k] = v.half()
return state_dict
def profile(input, ops, n=10, device=None):
"""
Ultralytics speed, memory and FLOPs profiler.