Disable FP16 val on AMP fail and improve AMP checks (#16306)

Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com>
This commit is contained in:
Mohammed Yasin 2024-09-18 00:44:56 +08:00 committed by GitHub
parent ba438aea5a
commit 6f2bb65953
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 5 additions and 3 deletions

View file

@ -656,9 +656,10 @@ def check_amp(model):
def amp_allclose(m, im):
"""All close FP32 vs AMP results."""
a = m(im, device=device, verbose=False)[0].boxes.data # FP32 inference
batch = [im] * 8
a = m(batch, imgsz=128, device=device, verbose=False)[0].boxes.data # FP32 inference
with autocast(enabled=True):
b = m(im, device=device, verbose=False)[0].boxes.data # AMP inference
b = m(batch, imgsz=128, device=device, verbose=False)[0].boxes.data # AMP inference
del m
return a.shape == b.shape and torch.allclose(a, b.float(), atol=0.5) # close to 0.5 absolute tolerance