Replace onnx2tf usage from CLI to Python (#8429)

Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
Andrew 2024-03-02 13:29:53 -07:00 committed by GitHub
parent 28a9d41663
commit e0b8b36967
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -41,6 +41,7 @@ Inference:
yolov8n.tflite # TensorFlow Lite
yolov8n_edgetpu.tflite # TensorFlow Edge TPU
yolov8n_paddle_model # PaddlePaddle
yolov8n_ncnn_model # NCNN
TensorFlow.js:
$ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
@ -743,10 +744,10 @@ class Exporter:
verbose=True,
msg="https://github.com/ultralytics/ultralytics/issues/5161",
)
import onnx2tf
f = Path(str(self.file).replace(self.file.suffix, "_saved_model"))
if f.is_dir():
import shutil
shutil.rmtree(f) # delete output folder
# Pre-download calibration file to fix https://github.com/PINTO0309/onnx2tf/issues/545
@ -760,8 +761,9 @@ class Exporter:
# Export to TF
tmp_file = f / "tmp_tflite_int8_calibration_images.npy" # int8 calibration images file
np_data = None
if self.args.int8:
verbosity = "--verbosity info"
verbosity = "info"
if self.args.data:
# Generate calibration data for integer quantization
LOGGER.info(f"{prefix} collecting INT8 calibration images from 'data={self.args.data}'")
@ -778,16 +780,20 @@ class Exporter:
# mean = images.view(-1, 3).mean(0) # imagenet mean [123.675, 116.28, 103.53]
# std = images.view(-1, 3).std(0) # imagenet std [58.395, 57.12, 57.375]
np.save(str(tmp_file), images.numpy()) # BHWC
int8 = f'-oiqt -qt per-tensor -cind images "{tmp_file}" "[[[[0, 0, 0]]]]" "[[[[255, 255, 255]]]]"'
else:
int8 = "-oiqt -qt per-tensor"
np_data = [["images", tmp_file, [[[[0, 0, 0]]]], [[[[255, 255, 255]]]]]]
else:
verbosity = "--non_verbose"
int8 = ""
verbosity = "error"
cmd = f'onnx2tf -i "{f_onnx}" -o "{f}" -nuo {verbosity} {int8}'.strip()
LOGGER.info(f"{prefix} running '{cmd}'")
subprocess.run(cmd, shell=True)
LOGGER.info(f"{prefix} starting TFLite export with onnx2tf {onnx2tf.__version__}...")
onnx2tf.convert(
input_onnx_file_path=f_onnx,
output_folder_path=str(f),
not_use_onnxsim=True,
verbosity=verbosity,
output_integer_quantized_tflite=self.args.int8,
quant_type="per-tensor", # "per-tensor" (faster) or "per-channel" (slower but more accurate)
custom_input_op_name_np_data_path=np_data,
)
yaml_save(f / "metadata.yaml", self.metadata) # add metadata.yaml
# Remove/rename TFLite models
@ -884,7 +890,10 @@ class Exporter:
quantization = "--quantize_float16" if self.args.half else "--quantize_uint8" if self.args.int8 else ""
with spaces_in_path(f_pb) as fpb_, spaces_in_path(f) as f_: # exporter can not handle spaces in path
cmd = f'tensorflowjs_converter --input_format=tf_frozen_model {quantization} --output_node_names={outputs} "{fpb_}" "{f_}"'
cmd = (
"tensorflowjs_converter "
f'--input_format=tf_frozen_model {quantization} --output_node_names={outputs} "{fpb_}" "{f_}"'
)
LOGGER.info(f"{prefix} running '{cmd}'")
subprocess.run(cmd, shell=True)