Add support for single export format benchmark (#18740)

Signed-off-by: Muhammad Rizwan Munawar <muhammadrizwanmunawar123@gmail.com>
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com>
Co-authored-by: Laughing-q <1185102784@qq.com>
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
Muhammad Rizwan Munawar 2025-01-29 18:17:19 +05:00 committed by GitHub
parent 74b647828e
commit 3274fe7f2b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 24 additions and 4 deletions

View file

@ -327,6 +327,7 @@ jobs:
yolo train model=yolo11n.pt data=coco8.yaml epochs=1 imgsz=32
yolo val model=yolo11n.pt data=coco8.yaml imgsz=32
yolo export model=yolo11n.pt format=torchscript imgsz=160
yolo benchmark model=yolo11n.pt data='coco8.yaml' imgsz=640 format=onnx
yolo solutions
- name: Test Python
# Note this step must use the updated default bash environment, not a python environment

View file

@ -72,17 +72,23 @@ Run YOLO11n benchmarks on all supported export formats including ONNX, TensorRT
# Benchmark on GPU
benchmark(model="yolo11n.pt", data="coco8.yaml", imgsz=640, half=False, device=0)
# Benchmark specific export format
benchmark(model="yolo11n.pt", data="coco8.yaml", imgsz=640, format="onnx")
```
=== "CLI"
```bash
yolo benchmark model=yolo11n.pt data='coco8.yaml' imgsz=640 half=False device=0
# Benchmark specific export format
yolo benchmark model=yolo11n.pt data='coco8.yaml' imgsz=640 format=onnx
```
## Arguments
Arguments such as `model`, `data`, `imgsz`, `half`, `device`, and `verbose` provide users with the flexibility to fine-tune the benchmarks to their specific needs and compare the performance of different export formats with ease.
Arguments such as `model`, `data`, `imgsz`, `half`, `device`, `verbose` and `format` provide users with the flexibility to fine-tune the benchmarks to their specific needs and compare the performance of different export formats with ease.
| Key | Default Value | Description |
| --------- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@ -93,10 +99,11 @@ Arguments such as `model`, `data`, `imgsz`, `half`, `device`, and `verbose` prov
| `int8` | `False` | Activates INT8 quantization for further optimized performance on supported devices, especially useful for edge devices. Set `int8=True` to use. |
| `device` | `None` | Defines the computation device(s) for benchmarking, such as `"cpu"` or `"cuda:0"`. |
| `verbose` | `False` | Controls the level of detail in logging output. A boolean value; set `verbose=True` for detailed logs or a float for thresholding errors. |
| `format` | `''` | Benchmark the model on a single export format. i.e `format=onnx` |
## Export Formats
Benchmarks will attempt to run automatically on all possible export formats below.
Benchmarks will attempt to run automatically on all possible export formats listed below. Alternatively, you can run benchmarks for a specific format by using the `format` argument, which accepts any of the formats mentioned below.
{% include "macros/export-table.md" %}

View file

@ -661,6 +661,7 @@ class Model(nn.Module):
- int8 (bool): Whether to use int8 precision mode.
- device (str): Device to run the benchmark on (e.g., 'cpu', 'cuda').
- verbose (bool): Whether to print detailed benchmark information.
- format (str): Export format name for specific benchmarking
Returns:
(Dict): A dictionary containing the results of the benchmarking process, including metrics for
@ -686,7 +687,8 @@ class Model(nn.Module):
half=args["half"],
int8=args["int8"],
device=args["device"],
verbose=kwargs.get("verbose"),
verbose=kwargs.get("verbose", False),
format=kwargs.get("format", ""),
)
def export(

View file

@ -57,6 +57,7 @@ def benchmark(
device="cpu",
verbose=False,
eps=1e-3,
format="",
):
"""
Benchmark a YOLO model across different formats for speed and accuracy.
@ -70,6 +71,7 @@ def benchmark(
device (str): Device to run the benchmark on, either 'cpu' or 'cuda'.
verbose (bool | float): If True or a float, assert benchmarks pass with given metric.
eps (float): Epsilon value for divide by zero prevention.
format (str): Export format for benchmarking. If not supplied all formats are benchmarked.
Returns:
(pandas.DataFrame): A pandas DataFrame with benchmark results for each format, including file size, metric,
@ -94,9 +96,17 @@ def benchmark(
y = []
t0 = time.time()
format_arg = format.lower()
if format_arg:
formats = frozenset(export_formats()["Argument"])
assert format in formats, f"Expected format to be one of {formats}, but got '{format_arg}'."
for i, (name, format, suffix, cpu, gpu, _) in enumerate(zip(*export_formats().values())):
emoji, filename = "", None # export defaults
try:
if format_arg and format_arg != format:
continue
# Checks
if i == 7: # TF GraphDef
assert model.task != "obb", "TensorFlow GraphDef not supported for OBB task"
@ -155,10 +165,10 @@ def benchmark(
# Validate
data = data or TASK2DATA[model.task] # task to dataset, i.e. coco8.yaml for task=detect
key = TASK2METRIC[model.task] # task to metric, i.e. metrics/mAP50-95(B) for task=detect
results = exported_model.val(
data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, int8=int8, verbose=False
)
key = TASK2METRIC[model.task] # task to metric, i.e. metrics/mAP50-95(B) for task=detect
metric, speed = results.results_dict[key], results.speed["inference"]
fps = round(1000 / (speed + eps), 2) # frames per second
y.append([name, "", round(file_size(filename), 1), round(metric, 4), round(speed, 2), fps])