Unset CUBLAS_WORKSPACE_CONFIG for non-deterministic training and inference (#19138)
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
31e96e20d3
commit
ff1a04609f
5 changed files with 16 additions and 3 deletions
|
|
@ -128,7 +128,6 @@ torch.set_printoptions(linewidth=320, precision=4, profile="default")
|
|||
np.set_printoptions(linewidth=320, formatter={"float_kind": "{:11.5g}".format}) # format short g, %precision=5
|
||||
cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
|
||||
os.environ["NUMEXPR_MAX_THREADS"] = str(NUM_THREADS) # NumExpr max threads
|
||||
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" # for deterministic training to avoid CUDA warning
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # suppress verbose TF compiler warnings in Colab
|
||||
os.environ["TORCH_CPP_LOG_LEVEL"] = "ERROR" # suppress "NNPACK.cpp could not initialize NNPACK" warnings
|
||||
os.environ["KINETO_LOG_LEVEL"] = "5" # suppress verbose PyTorch profiler output when computing FLOPs
|
||||
|
|
|
|||
|
|
@ -488,8 +488,15 @@ def init_seeds(seed=0, deterministic=False):
|
|||
else:
|
||||
LOGGER.warning("WARNING ⚠️ Upgrade to torch>=2.0.0 for deterministic training.")
|
||||
else:
|
||||
torch.use_deterministic_algorithms(False)
|
||||
torch.backends.cudnn.deterministic = False
|
||||
unset_deterministic()
|
||||
|
||||
|
||||
def unset_deterministic():
|
||||
"""Unsets all the configurations applied for deterministic training."""
|
||||
torch.use_deterministic_algorithms(False)
|
||||
torch.backends.cudnn.deterministic = False
|
||||
os.environ.pop("CUBLAS_WORKSPACE_CONFIG", None)
|
||||
os.environ.pop("PYTHONHASHSEED", None)
|
||||
|
||||
|
||||
class ModelEMA:
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue