Fix TFLite INT8 quant bug (#13082)

This commit is contained in:
Glenn Jocher 2024-05-24 01:00:17 +02:00 committed by GitHub
parent cb99f71728
commit 11623eeb00
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 32 additions and 37 deletions

View file

@ -23,22 +23,22 @@ from tests import MODEL, SOURCE
def test_export_torchscript():
"""Test YOLO exports to TorchScript format."""
f = YOLO(MODEL).export(format="torchscript", optimize=False, imgsz=32)
YOLO(f)(SOURCE, imgsz=32) # exported model inference
file = YOLO(MODEL).export(format="torchscript", optimize=False, imgsz=32)
YOLO(file)(SOURCE, imgsz=32) # exported model inference
def test_export_onnx():
"""Test YOLO exports to ONNX format."""
f = YOLO(MODEL).export(format="onnx", dynamic=True, imgsz=32)
YOLO(f)(SOURCE, imgsz=32) # exported model inference
file = YOLO(MODEL).export(format="onnx", dynamic=True, imgsz=32)
YOLO(file)(SOURCE, imgsz=32) # exported model inference
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="OpenVINO not supported in Python 3.12")
@pytest.mark.skipif(not TORCH_1_13, reason="OpenVINO requires torch>=1.13")
def test_export_openvino():
"""Test YOLO exports to OpenVINO format."""
f = YOLO(MODEL).export(format="openvino", imgsz=32)
YOLO(f)(SOURCE, imgsz=32) # exported model inference
file = YOLO(MODEL).export(format="openvino", imgsz=32)
YOLO(file)(SOURCE, imgsz=32) # exported model inference
@pytest.mark.slow
@ -118,7 +118,7 @@ def test_export_torchscript_matrix(task, dynamic, int8, half, batch):
],
)
def test_export_coreml_matrix(task, dynamic, int8, half, batch):
"""Test YOLO exports to TorchScript format."""
"""Test YOLO exports to CoreML format."""
file = YOLO(TASK2MODEL[task]).export(
format="coreml",
imgsz=32,
@ -138,8 +138,8 @@ def test_export_coreml_matrix(task, dynamic, int8, half, batch):
def test_export_coreml():
"""Test YOLO exports to CoreML format."""
if MACOS:
f = YOLO(MODEL).export(format="coreml", imgsz=32)
YOLO(f)(SOURCE, imgsz=32) # model prediction only supported on macOS for nms=False models
file = YOLO(MODEL).export(format="coreml", imgsz=32)
YOLO(file)(SOURCE, imgsz=32) # model prediction only supported on macOS for nms=False models
else:
YOLO(MODEL).export(format="coreml", nms=True, imgsz=32)
@ -152,8 +152,8 @@ def test_export_tflite():
Note TF suffers from install conflicts on Windows and macOS.
"""
model = YOLO(MODEL)
f = model.export(format="tflite", imgsz=32)
YOLO(f)(SOURCE, imgsz=32)
file = model.export(format="tflite", imgsz=32)
YOLO(file)(SOURCE, imgsz=32)
@pytest.mark.skipif(True, reason="Test disabled")
@ -165,8 +165,8 @@ def test_export_pb():
Note TF suffers from install conflicts on Windows and macOS.
"""
model = YOLO(MODEL)
f = model.export(format="pb", imgsz=32)
YOLO(f)(SOURCE, imgsz=32)
file = model.export(format="pb", imgsz=32)
YOLO(file)(SOURCE, imgsz=32)
@pytest.mark.skipif(True, reason="Test disabled as Paddle protobuf and ONNX protobuf requirementsk conflict.")
@ -182,5 +182,5 @@ def test_export_paddle():
@pytest.mark.slow
def test_export_ncnn():
"""Test YOLO exports to NCNN format."""
f = YOLO(MODEL).export(format="ncnn", imgsz=32)
YOLO(f)(SOURCE, imgsz=32) # exported model inference
file = YOLO(MODEL).export(format="ncnn", imgsz=32)
YOLO(file)(SOURCE, imgsz=32) # exported model inference