Fix ONNX GPU inference bug (#6840)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
d74a5a9499
commit
0f5406ec21
1 changed files with 1 additions and 1 deletions
|
|
@ -101,7 +101,7 @@ class AutoBackend(nn.Module):
|
||||||
|
|
||||||
# Set device
|
# Set device
|
||||||
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
|
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
|
||||||
if cuda and not any([nn_module, pt, jit, engine]): # GPU dataloader formats
|
if cuda and not any([nn_module, pt, jit, engine, onnx]): # GPU dataloader formats
|
||||||
device = torch.device('cpu')
|
device = torch.device('cpu')
|
||||||
cuda = False
|
cuda = False
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue