From 0f5406ec21966a05e46329b6f5b9d912b425589b Mon Sep 17 00:00:00 2001 From: Awsome <1579093407@qq.com> Date: Fri, 8 Dec 2023 01:44:11 +0800 Subject: [PATCH] Fix ONNX GPU inference bug (#6840) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- ultralytics/nn/autobackend.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ultralytics/nn/autobackend.py b/ultralytics/nn/autobackend.py index 596d9bda..da7e8bd4 100644 --- a/ultralytics/nn/autobackend.py +++ b/ultralytics/nn/autobackend.py @@ -101,7 +101,7 @@ class AutoBackend(nn.Module): # Set device cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA - if cuda and not any([nn_module, pt, jit, engine]): # GPU dataloader formats + if cuda and not any([nn_module, pt, jit, engine, onnx]): # GPU dataloader formats device = torch.device('cpu') cuda = False