Fix ONNX GPU inference bug (#6840)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
Awsome 2023-12-08 01:44:11 +08:00 committed by GitHub
parent d74a5a9499
commit 0f5406ec21
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -101,7 +101,7 @@ class AutoBackend(nn.Module):
# Set device # Set device
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
if cuda and not any([nn_module, pt, jit, engine]): # GPU dataloader formats if cuda and not any([nn_module, pt, jit, engine, onnx]): # GPU dataloader formats
device = torch.device('cpu') device = torch.device('cpu')
cuda = False cuda = False