Fix Triton inference without explicit metadata (#16938)
Co-authored-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
1aebe6ffed
commit
ccda7ff973
1 changed files with 1 additions and 1 deletions
|
|
@ -126,7 +126,7 @@ class AutoBackend(nn.Module):
|
||||||
fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
|
fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
|
||||||
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
|
nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)
|
||||||
stride = 32 # default stride
|
stride = 32 # default stride
|
||||||
model, metadata = None, None
|
model, metadata, task = None, None, None
|
||||||
|
|
||||||
# Set device
|
# Set device
|
||||||
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
|
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue