From ccda7ff973cfae1c6d8ece8d7ecce34d2917988d Mon Sep 17 00:00:00 2001 From: Olivier Jolly Date: Thu, 17 Oct 2024 02:53:14 +0200 Subject: [PATCH] Fix Triton inference without explicit metadata (#16938) Co-authored-by: UltralyticsAssistant Co-authored-by: Glenn Jocher --- ultralytics/nn/autobackend.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py index 3d9fbe24..7d4dbb8c 100644 --- a/ultralytics/nn/autobackend.py +++ b/ultralytics/nn/autobackend.py @@ -126,7 +126,7 @@ class AutoBackend(nn.Module): fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16 nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) stride = 32 # default stride - model, metadata = None, None + model, metadata, task = None, None, None # Set device cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA