ultralytics 8.3.56 PaddlePaddle GPU Inference support (#18468)

Co-authored-by: Laughing-q <1185102784@qq.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
Jiacong Fang 2024-12-31 20:57:54 +08:00 committed by GitHub
parent 306c5f1d3e
commit 5d8e15800e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 3 additions and 3 deletions

View file

@ -133,7 +133,7 @@ class AutoBackend(nn.Module):
# Set device
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
if cuda and not any([nn_module, pt, jit, engine, onnx]): # GPU dataloader formats
if cuda and not any([nn_module, pt, jit, engine, onnx, paddle]): # GPU dataloader formats
device = torch.device("cpu")
cuda = False