ultralytics 8.0.108 add Meituan YOLOv6 models (#2811)

Co-authored-by: Michael Currie <mcurrie@gmail.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Hicham Talaoubrid <98521878+HichTala@users.noreply.github.com>
Co-authored-by: Zlobin Vladimir <vladimir.zlobin@intel.com>
Co-authored-by: Szymon Mikler <sjmikler@gmail.com>
This commit is contained in:
Glenn Jocher 2023-05-25 00:43:32 +02:00 committed by GitHub
parent 07b57c03c8
commit ffc0e8ccf7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 233 additions and 45 deletions

View file

@ -3,7 +3,7 @@
Benchmark a YOLO model formats for speed and accuracy
Usage:
from ultralytics.yolo.utils.benchmarks import ProfileModels, run_benchmarks
from ultralytics.yolo.utils.benchmarks import ProfileModels, benchmark
ProfileModels(['yolov8n.yaml', 'yolov8s.yaml'])
run_benchmarks(model='yolov8n.pt', imgsz=160)
@ -163,7 +163,7 @@ class ProfileModels:
profile(): Profiles the models and prints the result.
"""
def __init__(self, paths: list, num_timed_runs=100, num_warmup_runs=3, imgsz=640, trt=True):
def __init__(self, paths: list, num_timed_runs=100, num_warmup_runs=10, imgsz=640, trt=True):
self.paths = paths
self.num_timed_runs = num_timed_runs
self.num_warmup_runs = num_warmup_runs
@ -181,22 +181,22 @@ class ProfileModels:
table_rows = []
device = 0 if torch.cuda.is_available() else 'cpu'
for file in files:
engine_file = ''
engine_file = file.with_suffix('.engine')
if file.suffix in ('.pt', '.yaml'):
model = YOLO(str(file))
num_params, num_flops = model.info()
if self.trt and device == 0:
model_info = model.info()
if self.trt and device == 0 and not engine_file.is_file():
engine_file = model.export(format='engine', half=True, imgsz=self.imgsz, device=device)
onnx_file = model.export(format='onnx', half=True, imgsz=self.imgsz, simplify=True, device=device)
elif file.suffix == '.onnx':
num_params, num_flops = self.get_onnx_model_info(file)
model_info = self.get_onnx_model_info(file)
onnx_file = file
else:
continue
t_engine = self.profile_tensorrt_model(str(engine_file))
t_onnx = self.profile_onnx_model(str(onnx_file))
table_rows.append(self.generate_table_row(file.stem, t_onnx, t_engine, num_params, num_flops))
table_rows.append(self.generate_table_row(file.stem, t_onnx, t_engine, model_info))
self.print_table(table_rows)
@ -216,10 +216,21 @@ class ProfileModels:
return [Path(file) for file in sorted(files)]
def get_onnx_model_info(self, onnx_file: str):
return 0.0, 0.0
# return (num_layers, num_params, num_gradients, num_flops)
return 0.0, 0.0, 0.0, 0.0
def iterative_sigma_clipping(self, data, sigma=2, max_iters=5):
data = np.array(data)
for _ in range(max_iters):
mean, std = np.mean(data), np.std(data)
clipped_data = data[(data > mean - sigma * std) & (data < mean + sigma * std)]
if len(clipped_data) == len(data):
break
data = clipped_data
return data
def profile_tensorrt_model(self, engine_file: str):
if not Path(engine_file).is_file():
if not self.trt or not Path(engine_file).is_file():
return 0.0, 0.0
# Warmup runs
@ -230,10 +241,11 @@ class ProfileModels:
# Timed runs
run_times = []
for _ in tqdm(range(self.num_timed_runs), desc=engine_file):
for _ in tqdm(range(self.num_timed_runs * 30), desc=engine_file):
results = model(input_data, verbose=False)
run_times.append(results[0].speed['inference']) # Convert to milliseconds
run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=3) # sigma clipping
return np.mean(run_times), np.std(run_times)
def profile_onnx_model(self, onnx_file: str):
@ -246,7 +258,23 @@ class ProfileModels:
sess = ort.InferenceSession(onnx_file, sess_options, providers=['CPUExecutionProvider'])
input_tensor = sess.get_inputs()[0]
input_data = np.random.rand(*input_tensor.shape).astype(np.float16 if torch.cuda.is_available() else np.float32)
input_type = input_tensor.type
# Mapping ONNX datatype to numpy datatype
if 'float16' in input_type:
input_dtype = np.float16
elif 'float' in input_type:
input_dtype = np.float32
elif 'double' in input_type:
input_dtype = np.float64
elif 'int64' in input_type:
input_dtype = np.int64
elif 'int32' in input_type:
input_dtype = np.int32
else:
raise ValueError(f'Unsupported ONNX datatype {input_type}')
input_data = np.random.rand(*input_tensor.shape).astype(input_dtype)
input_name = input_tensor.name
output_name = sess.get_outputs()[0].name
@ -261,17 +289,19 @@ class ProfileModels:
sess.run([output_name], {input_name: input_data})
run_times.append((time.time() - start_time) * 1000) # Convert to milliseconds
run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=3) # sigma clipping
return np.mean(run_times), np.std(run_times)
def generate_table_row(self, model_name, t_onnx, t_engine, num_params, num_flops):
return f'| {model_name} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± {t_engine[1]:.2f} ms | {num_params / 1e6:.1f} | {num_flops:.1f} |'
def generate_table_row(self, model_name, t_onnx, t_engine, model_info):
layers, params, gradients, flops = model_info
return f'| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± {t_engine[1]:.2f} ms | {params / 1e6:.1f} | {flops:.1f} |'
def print_table(self, table_rows):
gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'GPU'
header = f'| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>{gpu} TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |'
separator = '|-------------|---------------------|--------------------|------------------------------|-----------------------------------|------------------|-----------------|'
print(header)
print(f'\n\n{header}')
print(separator)
for row in table_rows:
print(row)