ultralytics 8.0.31 updates and fixes (#857)

Co-authored-by: Yonghye Kwon <developer.0hye@gmail.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Kalen Michael <kalenmike@gmail.com>
This commit is contained in:
Glenn Jocher 2023-02-08 03:27:59 +04:00 committed by GitHub
parent 2e7a533ac3
commit f5d003d05a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 285 additions and 131 deletions

View file

@ -24,6 +24,10 @@ LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable
RANK = int(os.getenv('RANK', -1))
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
TORCH_1_9 = check_version(torch.__version__, '1.9.0')
TORCH_1_11 = check_version(torch.__version__, '1.11.0')
TORCH_1_12 = check_version(torch.__version__, '1.12.0')
@contextmanager
def torch_distributed_zero_first(local_rank: int):
@ -36,10 +40,10 @@ def torch_distributed_zero_first(local_rank: int):
dist.barrier(device_ids=[0])
def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):
def smart_inference_mode():
# Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator
def decorate(fn):
return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)
return (torch.inference_mode if TORCH_1_9 else torch.no_grad)()(fn)
return decorate
@ -49,7 +53,7 @@ def DDP_model(model):
assert not check_version(torch.__version__, '1.12.0', pinned=True), \
'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \
'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395'
if check_version(torch.__version__, '1.11.0'):
if TORCH_1_11:
return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True)
else:
return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)
@ -267,7 +271,7 @@ def init_seeds(seed=0, deterministic=False):
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe
# torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287
if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213
if deterministic and TORCH_1_12: # https://github.com/ultralytics/yolov5/pull/8213
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.deterministic = True
os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'