mirror of
https://github.com/THU-MIG/yolov10.git
synced 2025-05-23 21:44:22 +08:00
Add verbosity flag for quantization info (#4151)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
6abab4d9ca
commit
620b726fee
@ -301,7 +301,7 @@ class Exporter:
|
|||||||
"""YOLOv8 ONNX export."""
|
"""YOLOv8 ONNX export."""
|
||||||
requirements = ['onnx>=1.12.0']
|
requirements = ['onnx>=1.12.0']
|
||||||
if self.args.simplify:
|
if self.args.simplify:
|
||||||
requirements += ['onnxsim>=0.4.17', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime']
|
requirements += ['onnxsim>=0.4.33', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime']
|
||||||
check_requirements(requirements)
|
check_requirements(requirements)
|
||||||
import onnx # noqa
|
import onnx # noqa
|
||||||
|
|
||||||
@ -572,15 +572,16 @@ class Exporter:
|
|||||||
@try_export
|
@try_export
|
||||||
def export_saved_model(self, prefix=colorstr('TensorFlow SavedModel:')):
|
def export_saved_model(self, prefix=colorstr('TensorFlow SavedModel:')):
|
||||||
"""YOLOv8 TensorFlow SavedModel export."""
|
"""YOLOv8 TensorFlow SavedModel export."""
|
||||||
|
cuda = torch.cuda.is_available()
|
||||||
try:
|
try:
|
||||||
import tensorflow as tf # noqa
|
import tensorflow as tf # noqa
|
||||||
except ImportError:
|
except ImportError:
|
||||||
cuda = torch.cuda.is_available()
|
|
||||||
check_requirements(f"tensorflow{'-macos' if MACOS else '-aarch64' if ARM64 else '' if cuda else '-cpu'}")
|
check_requirements(f"tensorflow{'-macos' if MACOS else '-aarch64' if ARM64 else '' if cuda else '-cpu'}")
|
||||||
import tensorflow as tf # noqa
|
import tensorflow as tf # noqa
|
||||||
check_requirements(('onnx', 'onnx2tf>=1.9.1', 'sng4onnx>=1.0.1', 'onnxsim>=0.4.17', 'onnx_graphsurgeon>=0.3.26',
|
check_requirements(
|
||||||
'tflite_support', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime'),
|
('onnx', 'onnx2tf>=1.15.4', 'sng4onnx>=1.0.1', 'onnxsim>=0.4.33', 'onnx_graphsurgeon>=0.3.26',
|
||||||
cmds='--extra-index-url https://pypi.ngc.nvidia.com')
|
'tflite_support', 'onnxruntime-gpu' if cuda else 'onnxruntime'),
|
||||||
|
cmds='--extra-index-url https://pypi.ngc.nvidia.com') # onnx_graphsurgeon only on NVIDIA
|
||||||
|
|
||||||
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...')
|
||||||
f = Path(str(self.file).replace(self.file.suffix, '_saved_model'))
|
f = Path(str(self.file).replace(self.file.suffix, '_saved_model'))
|
||||||
@ -595,6 +596,7 @@ class Exporter:
|
|||||||
# Export to TF
|
# Export to TF
|
||||||
tmp_file = f / 'tmp_tflite_int8_calibration_images.npy' # int8 calibration images file
|
tmp_file = f / 'tmp_tflite_int8_calibration_images.npy' # int8 calibration images file
|
||||||
if self.args.int8:
|
if self.args.int8:
|
||||||
|
verbosity = '--verbosity info'
|
||||||
if self.args.data:
|
if self.args.data:
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
@ -620,9 +622,10 @@ class Exporter:
|
|||||||
else:
|
else:
|
||||||
int8 = '-oiqt -qt per-tensor'
|
int8 = '-oiqt -qt per-tensor'
|
||||||
else:
|
else:
|
||||||
|
verbosity = '--non_verbose'
|
||||||
int8 = ''
|
int8 = ''
|
||||||
|
|
||||||
cmd = f'onnx2tf -i "{f_onnx}" -o "{f}" -nuo --non_verbose {int8}'.strip()
|
cmd = f'onnx2tf -i "{f_onnx}" -o "{f}" -nuo {verbosity} {int8}'.strip()
|
||||||
LOGGER.info(f"{prefix} running '{cmd}'")
|
LOGGER.info(f"{prefix} running '{cmd}'")
|
||||||
subprocess.run(cmd, shell=True)
|
subprocess.run(cmd, shell=True)
|
||||||
yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml
|
yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml
|
||||||
|
Loading…
x
Reference in New Issue
Block a user