ultralytics 8.3.56 PaddlePaddle GPU Inference support (#18468)
Co-authored-by: Laughing-q <1185102784@qq.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
306c5f1d3e
commit
5d8e15800e
3 changed files with 3 additions and 3 deletions
|
|
@ -1,6 +1,6 @@
|
|||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
|
||||
__version__ = "8.3.55"
|
||||
__version__ = "8.3.56"
|
||||
|
||||
import os
|
||||
|
||||
|
|
|
|||
|
|
@ -603,7 +603,7 @@ class Exporter:
|
|||
@try_export
|
||||
def export_paddle(self, prefix=colorstr("PaddlePaddle:")):
|
||||
"""YOLO Paddle export."""
|
||||
check_requirements(("paddlepaddle", "x2paddle"))
|
||||
check_requirements(("paddlepaddle-gpu" if torch.cuda.is_available() else "paddlepaddle", "x2paddle"))
|
||||
import x2paddle # noqa
|
||||
from x2paddle.convert import pytorch2paddle # noqa
|
||||
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ class AutoBackend(nn.Module):
|
|||
|
||||
# Set device
|
||||
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
|
||||
if cuda and not any([nn_module, pt, jit, engine, onnx]): # GPU dataloader formats
|
||||
if cuda and not any([nn_module, pt, jit, engine, onnx, paddle]): # GPU dataloader formats
|
||||
device = torch.device("cpu")
|
||||
cuda = False
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue