ultralytics 8.0.239 Ultralytics Actions and hub-sdk adoption (#7431)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: Burhan <62214284+Burhan-Q@users.noreply.github.com> Co-authored-by: Kayzwer <68285002+Kayzwer@users.noreply.github.com>
This commit is contained in:
parent
e795277391
commit
fe27db2f6e
139 changed files with 6870 additions and 5125 deletions
|
|
@ -42,13 +42,9 @@ from ultralytics.utils.files import file_size
|
|||
from ultralytics.utils.torch_utils import select_device
|
||||
|
||||
|
||||
def benchmark(model=WEIGHTS_DIR / 'yolov8n.pt',
|
||||
data=None,
|
||||
imgsz=160,
|
||||
half=False,
|
||||
int8=False,
|
||||
device='cpu',
|
||||
verbose=False):
|
||||
def benchmark(
|
||||
model=WEIGHTS_DIR / "yolov8n.pt", data=None, imgsz=160, half=False, int8=False, device="cpu", verbose=False
|
||||
):
|
||||
"""
|
||||
Benchmark a YOLO model across different formats for speed and accuracy.
|
||||
|
||||
|
|
@ -76,6 +72,7 @@ def benchmark(model=WEIGHTS_DIR / 'yolov8n.pt',
|
|||
"""
|
||||
|
||||
import pandas as pd
|
||||
|
||||
pd.options.display.max_columns = 10
|
||||
pd.options.display.width = 120
|
||||
device = select_device(device, verbose=False)
|
||||
|
|
@ -85,67 +82,62 @@ def benchmark(model=WEIGHTS_DIR / 'yolov8n.pt',
|
|||
y = []
|
||||
t0 = time.time()
|
||||
for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU)
|
||||
emoji, filename = '❌', None # export defaults
|
||||
emoji, filename = "❌", None # export defaults
|
||||
try:
|
||||
assert i != 9 or LINUX, 'Edge TPU export only supported on Linux'
|
||||
assert i != 9 or LINUX, "Edge TPU export only supported on Linux"
|
||||
if i == 10:
|
||||
assert MACOS or LINUX, 'TF.js export only supported on macOS and Linux'
|
||||
assert MACOS or LINUX, "TF.js export only supported on macOS and Linux"
|
||||
elif i == 11:
|
||||
assert sys.version_info < (3, 11), 'PaddlePaddle export only supported on Python<=3.10'
|
||||
if 'cpu' in device.type:
|
||||
assert cpu, 'inference not supported on CPU'
|
||||
if 'cuda' in device.type:
|
||||
assert gpu, 'inference not supported on GPU'
|
||||
assert sys.version_info < (3, 11), "PaddlePaddle export only supported on Python<=3.10"
|
||||
if "cpu" in device.type:
|
||||
assert cpu, "inference not supported on CPU"
|
||||
if "cuda" in device.type:
|
||||
assert gpu, "inference not supported on GPU"
|
||||
|
||||
# Export
|
||||
if format == '-':
|
||||
if format == "-":
|
||||
filename = model.ckpt_path or model.cfg
|
||||
exported_model = model # PyTorch format
|
||||
else:
|
||||
filename = model.export(imgsz=imgsz, format=format, half=half, int8=int8, device=device, verbose=False)
|
||||
exported_model = YOLO(filename, task=model.task)
|
||||
assert suffix in str(filename), 'export failed'
|
||||
emoji = '❎' # indicates export succeeded
|
||||
assert suffix in str(filename), "export failed"
|
||||
emoji = "❎" # indicates export succeeded
|
||||
|
||||
# Predict
|
||||
assert model.task != 'pose' or i != 7, 'GraphDef Pose inference is not supported'
|
||||
assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
|
||||
assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
|
||||
exported_model.predict(ASSETS / 'bus.jpg', imgsz=imgsz, device=device, half=half)
|
||||
assert model.task != "pose" or i != 7, "GraphDef Pose inference is not supported"
|
||||
assert i not in (9, 10), "inference not supported" # Edge TPU and TF.js are unsupported
|
||||
assert i != 5 or platform.system() == "Darwin", "inference only supported on macOS>=10.13" # CoreML
|
||||
exported_model.predict(ASSETS / "bus.jpg", imgsz=imgsz, device=device, half=half)
|
||||
|
||||
# Validate
|
||||
data = data or TASK2DATA[model.task] # task to dataset, i.e. coco8.yaml for task=detect
|
||||
key = TASK2METRIC[model.task] # task to metric, i.e. metrics/mAP50-95(B) for task=detect
|
||||
results = exported_model.val(data=data,
|
||||
batch=1,
|
||||
imgsz=imgsz,
|
||||
plots=False,
|
||||
device=device,
|
||||
half=half,
|
||||
int8=int8,
|
||||
verbose=False)
|
||||
metric, speed = results.results_dict[key], results.speed['inference']
|
||||
y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)])
|
||||
results = exported_model.val(
|
||||
data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, int8=int8, verbose=False
|
||||
)
|
||||
metric, speed = results.results_dict[key], results.speed["inference"]
|
||||
y.append([name, "✅", round(file_size(filename), 1), round(metric, 4), round(speed, 2)])
|
||||
except Exception as e:
|
||||
if verbose:
|
||||
assert type(e) is AssertionError, f'Benchmark failure for {name}: {e}'
|
||||
LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}')
|
||||
assert type(e) is AssertionError, f"Benchmark failure for {name}: {e}"
|
||||
LOGGER.warning(f"ERROR ❌️ Benchmark failure for {name}: {e}")
|
||||
y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference
|
||||
|
||||
# Print results
|
||||
check_yolo(device=device) # print system info
|
||||
df = pd.DataFrame(y, columns=['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)'])
|
||||
df = pd.DataFrame(y, columns=["Format", "Status❔", "Size (MB)", key, "Inference time (ms/im)"])
|
||||
|
||||
name = Path(model.ckpt_path).name
|
||||
s = f'\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n'
|
||||
s = f"\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n"
|
||||
LOGGER.info(s)
|
||||
with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f:
|
||||
with open("benchmarks.log", "a", errors="ignore", encoding="utf-8") as f:
|
||||
f.write(s)
|
||||
|
||||
if verbose and isinstance(verbose, float):
|
||||
metrics = df[key].array # values to compare to floor
|
||||
floor = verbose # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
|
||||
assert all(x > floor for x in metrics if pd.notna(x)), f'Benchmark failure: metric(s) < floor {floor}'
|
||||
assert all(x > floor for x in metrics if pd.notna(x)), f"Benchmark failure: metric(s) < floor {floor}"
|
||||
|
||||
return df
|
||||
|
||||
|
|
@ -175,15 +167,17 @@ class ProfileModels:
|
|||
```
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
paths: list,
|
||||
num_timed_runs=100,
|
||||
num_warmup_runs=10,
|
||||
min_time=60,
|
||||
imgsz=640,
|
||||
half=True,
|
||||
trt=True,
|
||||
device=None):
|
||||
def __init__(
|
||||
self,
|
||||
paths: list,
|
||||
num_timed_runs=100,
|
||||
num_warmup_runs=10,
|
||||
min_time=60,
|
||||
imgsz=640,
|
||||
half=True,
|
||||
trt=True,
|
||||
device=None,
|
||||
):
|
||||
"""
|
||||
Initialize the ProfileModels class for profiling models.
|
||||
|
||||
|
|
@ -204,37 +198,32 @@ class ProfileModels:
|
|||
self.imgsz = imgsz
|
||||
self.half = half
|
||||
self.trt = trt # run TensorRT profiling
|
||||
self.device = device or torch.device(0 if torch.cuda.is_available() else 'cpu')
|
||||
self.device = device or torch.device(0 if torch.cuda.is_available() else "cpu")
|
||||
|
||||
def profile(self):
|
||||
"""Logs the benchmarking results of a model, checks metrics against floor and returns the results."""
|
||||
files = self.get_files()
|
||||
|
||||
if not files:
|
||||
print('No matching *.pt or *.onnx files found.')
|
||||
print("No matching *.pt or *.onnx files found.")
|
||||
return
|
||||
|
||||
table_rows = []
|
||||
output = []
|
||||
for file in files:
|
||||
engine_file = file.with_suffix('.engine')
|
||||
if file.suffix in ('.pt', '.yaml', '.yml'):
|
||||
engine_file = file.with_suffix(".engine")
|
||||
if file.suffix in (".pt", ".yaml", ".yml"):
|
||||
model = YOLO(str(file))
|
||||
model.fuse() # to report correct params and GFLOPs in model.info()
|
||||
model_info = model.info()
|
||||
if self.trt and self.device.type != 'cpu' and not engine_file.is_file():
|
||||
engine_file = model.export(format='engine',
|
||||
half=self.half,
|
||||
imgsz=self.imgsz,
|
||||
device=self.device,
|
||||
verbose=False)
|
||||
onnx_file = model.export(format='onnx',
|
||||
half=self.half,
|
||||
imgsz=self.imgsz,
|
||||
simplify=True,
|
||||
device=self.device,
|
||||
verbose=False)
|
||||
elif file.suffix == '.onnx':
|
||||
if self.trt and self.device.type != "cpu" and not engine_file.is_file():
|
||||
engine_file = model.export(
|
||||
format="engine", half=self.half, imgsz=self.imgsz, device=self.device, verbose=False
|
||||
)
|
||||
onnx_file = model.export(
|
||||
format="onnx", half=self.half, imgsz=self.imgsz, simplify=True, device=self.device, verbose=False
|
||||
)
|
||||
elif file.suffix == ".onnx":
|
||||
model_info = self.get_onnx_model_info(file)
|
||||
onnx_file = file
|
||||
else:
|
||||
|
|
@ -254,14 +243,14 @@ class ProfileModels:
|
|||
for path in self.paths:
|
||||
path = Path(path)
|
||||
if path.is_dir():
|
||||
extensions = ['*.pt', '*.onnx', '*.yaml']
|
||||
extensions = ["*.pt", "*.onnx", "*.yaml"]
|
||||
files.extend([file for ext in extensions for file in glob.glob(str(path / ext))])
|
||||
elif path.suffix in {'.pt', '.yaml', '.yml'}: # add non-existing
|
||||
elif path.suffix in {".pt", ".yaml", ".yml"}: # add non-existing
|
||||
files.append(str(path))
|
||||
else:
|
||||
files.extend(glob.glob(str(path)))
|
||||
|
||||
print(f'Profiling: {sorted(files)}')
|
||||
print(f"Profiling: {sorted(files)}")
|
||||
return [Path(file) for file in sorted(files)]
|
||||
|
||||
def get_onnx_model_info(self, onnx_file: str):
|
||||
|
|
@ -306,7 +295,7 @@ class ProfileModels:
|
|||
run_times = []
|
||||
for _ in TQDM(range(num_runs), desc=engine_file):
|
||||
results = model(input_data, imgsz=self.imgsz, verbose=False)
|
||||
run_times.append(results[0].speed['inference']) # Convert to milliseconds
|
||||
run_times.append(results[0].speed["inference"]) # Convert to milliseconds
|
||||
|
||||
run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=3) # sigma clipping
|
||||
return np.mean(run_times), np.std(run_times)
|
||||
|
|
@ -315,31 +304,31 @@ class ProfileModels:
|
|||
"""Profiles an ONNX model by executing it multiple times and returns the mean and standard deviation of run
|
||||
times.
|
||||
"""
|
||||
check_requirements('onnxruntime')
|
||||
check_requirements("onnxruntime")
|
||||
import onnxruntime as ort
|
||||
|
||||
# Session with either 'TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider'
|
||||
sess_options = ort.SessionOptions()
|
||||
sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
|
||||
sess_options.intra_op_num_threads = 8 # Limit the number of threads
|
||||
sess = ort.InferenceSession(onnx_file, sess_options, providers=['CPUExecutionProvider'])
|
||||
sess = ort.InferenceSession(onnx_file, sess_options, providers=["CPUExecutionProvider"])
|
||||
|
||||
input_tensor = sess.get_inputs()[0]
|
||||
input_type = input_tensor.type
|
||||
|
||||
# Mapping ONNX datatype to numpy datatype
|
||||
if 'float16' in input_type:
|
||||
if "float16" in input_type:
|
||||
input_dtype = np.float16
|
||||
elif 'float' in input_type:
|
||||
elif "float" in input_type:
|
||||
input_dtype = np.float32
|
||||
elif 'double' in input_type:
|
||||
elif "double" in input_type:
|
||||
input_dtype = np.float64
|
||||
elif 'int64' in input_type:
|
||||
elif "int64" in input_type:
|
||||
input_dtype = np.int64
|
||||
elif 'int32' in input_type:
|
||||
elif "int32" in input_type:
|
||||
input_dtype = np.int32
|
||||
else:
|
||||
raise ValueError(f'Unsupported ONNX datatype {input_type}')
|
||||
raise ValueError(f"Unsupported ONNX datatype {input_type}")
|
||||
|
||||
input_data = np.random.rand(*input_tensor.shape).astype(input_dtype)
|
||||
input_name = input_tensor.name
|
||||
|
|
@ -369,25 +358,26 @@ class ProfileModels:
|
|||
def generate_table_row(self, model_name, t_onnx, t_engine, model_info):
|
||||
"""Generates a formatted string for a table row that includes model performance and metric details."""
|
||||
layers, params, gradients, flops = model_info
|
||||
return f'| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± {t_engine[1]:.2f} ms | {params / 1e6:.1f} | {flops:.1f} |'
|
||||
return f"| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± {t_engine[1]:.2f} ms | {params / 1e6:.1f} | {flops:.1f} |"
|
||||
|
||||
def generate_results_dict(self, model_name, t_onnx, t_engine, model_info):
|
||||
"""Generates a dictionary of model details including name, parameters, GFLOPS and speed metrics."""
|
||||
layers, params, gradients, flops = model_info
|
||||
return {
|
||||
'model/name': model_name,
|
||||
'model/parameters': params,
|
||||
'model/GFLOPs': round(flops, 3),
|
||||
'model/speed_ONNX(ms)': round(t_onnx[0], 3),
|
||||
'model/speed_TensorRT(ms)': round(t_engine[0], 3)}
|
||||
"model/name": model_name,
|
||||
"model/parameters": params,
|
||||
"model/GFLOPs": round(flops, 3),
|
||||
"model/speed_ONNX(ms)": round(t_onnx[0], 3),
|
||||
"model/speed_TensorRT(ms)": round(t_engine[0], 3),
|
||||
}
|
||||
|
||||
def print_table(self, table_rows):
|
||||
"""Formats and prints a comparison table for different models with given statistics and performance data."""
|
||||
gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'GPU'
|
||||
header = f'| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>{gpu} TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |'
|
||||
separator = '|-------------|---------------------|--------------------|------------------------------|-----------------------------------|------------------|-----------------|'
|
||||
gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else "GPU"
|
||||
header = f"| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>{gpu} TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |"
|
||||
separator = "|-------------|---------------------|--------------------|------------------------------|-----------------------------------|------------------|-----------------|"
|
||||
|
||||
print(f'\n\n{header}')
|
||||
print(f"\n\n{header}")
|
||||
print(separator)
|
||||
for row in table_rows:
|
||||
print(row)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue