ultralytics 8.3.56 PaddlePaddle GPU Inference support (#18468)
Co-authored-by: Laughing-q <1185102784@qq.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
306c5f1d3e
commit
5d8e15800e
3 changed files with 3 additions and 3 deletions
|
|
@ -133,7 +133,7 @@ class AutoBackend(nn.Module):
|
|||
|
||||
# Set device
|
||||
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
|
||||
if cuda and not any([nn_module, pt, jit, engine, onnx]): # GPU dataloader formats
|
||||
if cuda and not any([nn_module, pt, jit, engine, onnx, paddle]): # GPU dataloader formats
|
||||
device = torch.device("cpu")
|
||||
cuda = False
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue