mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-23 05:24:22 +08:00
Fix ONNX GPU inference bug (#6840)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
d74a5a9499
commit
0f5406ec21
@ -101,7 +101,7 @@ class AutoBackend(nn.Module):
|
|||||||
|
|
||||||
# Set device
|
# Set device
|
||||||
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
|
cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA
|
||||||
if cuda and not any([nn_module, pt, jit, engine]): # GPU dataloader formats
|
if cuda and not any([nn_module, pt, jit, engine, onnx]): # GPU dataloader formats
|
||||||
device = torch.device('cpu')
|
device = torch.device('cpu')
|
||||||
cuda = False
|
cuda = False
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user