Model coverage cleanup (#4585)

This commit is contained in:
Glenn Jocher 2023-08-27 04:19:41 +02:00 committed by GitHub
parent c635418a27
commit deac7575b1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 132 additions and 175 deletions

View file

@ -40,14 +40,6 @@ def test_train(task, model, data):
@pytest.mark.parametrize('task,model,data', TASK_ARGS)
def test_val(task, model, data):
# Download annotations to run pycocotools eval
# from ultralytics.utils import SETTINGS, Path
# from ultralytics.utils.downloads import download
# url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/'
# download(f'{url}instances_val2017.json', dir=Path(SETTINGS['datasets_dir']) / 'coco8/annotations')
# download(f'{url}person_keypoints_val2017.json', dir=Path(SETTINGS['datasets_dir']) / 'coco8-pose/annotations')
# Validate
run(f'yolo val {task} model={WEIGHTS_DIR / model}.pt data={data} imgsz=32 save_txt save_json')

View file

@ -1,16 +1,18 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
import contextlib
import subprocess
from pathlib import Path
import pytest
import torch
from ultralytics import YOLO
from ultralytics import YOLO, download
from ultralytics.utils import ASSETS, SETTINGS
CUDA_IS_AVAILABLE = torch.cuda.is_available()
CUDA_DEVICE_COUNT = torch.cuda.device_count()
DATASETS_DIR = Path(SETTINGS['datasets_dir'])
WEIGHTS_DIR = Path(SETTINGS['weights_dir'])
MODEL = WEIGHTS_DIR / 'path with spaces' / 'yolov8n.pt' # test spaces in path
DATA = 'coco8.yaml'
@ -37,13 +39,15 @@ def test_train_ddp():
def test_utils_benchmarks():
from ultralytics.utils.benchmarks import ProfileModels
YOLO(MODEL).export(format='engine', imgsz=32, dynamic=True, batch=1) # pre-export engine model, auto-device
# Pre-export a dynamic engine model to use dynamic inference
YOLO(MODEL).export(format='engine', imgsz=32, dynamic=True, batch=1)
ProfileModels([MODEL], imgsz=32, half=False, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
def test_predict_sam():
from ultralytics import SAM
from ultralytics.models.sam import Predictor as SAMPredictor
# Load a model
model = SAM(WEIGHTS_DIR / 'sam_b.pt')
@ -60,14 +64,63 @@ def test_predict_sam():
# Run inference with points prompt
model(ASSETS / 'zidane.jpg', points=[900, 370], labels=[1], device=0)
# Create SAMPredictor
overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024, model='mobile_sam.pt')
predictor = SAMPredictor(overrides=overrides)
# Set image
predictor.set_image('ultralytics/assets/zidane.jpg') # set with image file
# predictor(bboxes=[439, 437, 524, 709])
# predictor(points=[900, 370], labels=[1])
# Reset image
predictor.reset_image()
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
def test_model_tune():
subprocess.run('pip install ray[tune]'.split(), check=True)
YOLO('yolov8n-cls.yaml').tune(data='imagenet10',
grace_period=1,
max_samples=1,
imgsz=32,
epochs=1,
plots=False,
device='cpu')
with contextlib.suppress(RuntimeError): # RuntimeError may be caused by out-of-memory
YOLO('yolov8n-cls.yaml').tune(data='imagenet10',
grace_period=1,
max_samples=1,
imgsz=32,
epochs=1,
plots=False,
device='cpu')
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason='CUDA is not available')
def test_pycocotools():
from ultralytics.models.yolo.detect import DetectionValidator
from ultralytics.models.yolo.pose import PoseValidator
from ultralytics.models.yolo.segment import SegmentationValidator
# Download annotations after each dataset downloads first
url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/'
validator = DetectionValidator(args={'model': 'yolov8n.pt', 'data': 'coco8.yaml', 'save_json': True, 'imgsz': 64})
validator()
validator.is_coco = True
download(f'{url}instances_val2017.json', dir=DATASETS_DIR / 'coco8/annotations')
_ = validator.eval_json(validator.stats)
validator = SegmentationValidator(args={
'model': 'yolov8n-seg.pt',
'data': 'coco8-seg.yaml',
'save_json': True,
'imgsz': 64})
validator()
validator.is_coco = True
download(f'{url}instances_val2017.json', dir=DATASETS_DIR / 'coco8-seg/annotations')
_ = validator.eval_json(validator.stats)
validator = PoseValidator(args={
'model': 'yolov8n-pose.pt',
'data': 'coco8-pose.yaml',
'save_json': True,
'imgsz': 64})
validator()
validator.is_coco = True
download(f'{url}person_keypoints_val2017.json', dir=DATASETS_DIR / 'coco8-pose/annotations')
_ = validator.eval_json(validator.stats)

View file

@ -1,7 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
import contextlib
import shutil
from copy import copy
from pathlib import Path
@ -15,7 +14,7 @@ from torchvision.transforms import ToTensor
from ultralytics import RTDETR, YOLO
from ultralytics.cfg import TASK2DATA
from ultralytics.data.build import load_inference_source
from ultralytics.utils import ASSETS, DEFAULT_CFG, LINUX, ONLINE, ROOT, SETTINGS, WINDOWS
from ultralytics.utils import ASSETS, DEFAULT_CFG, LINUX, MACOS, ONLINE, ROOT, SETTINGS, WINDOWS
from ultralytics.utils.downloads import download
from ultralytics.utils.torch_utils import TORCH_1_9
@ -50,14 +49,22 @@ def test_model_methods():
_ = model.task_map
def test_model_profile():
# Test profile=True model argument
from ultralytics.nn.tasks import DetectionModel
model = DetectionModel() # build model
im = torch.randn(1, 3, 64, 64) # requires min imgsz=64
_ = model.predict(im, profile=True)
def test_predict_txt():
# Write a list of sources (file, dir, glob, recursive glob) to a txt file
txt_file = TMP / 'sources.txt'
with open(txt_file, 'w') as f:
for x in [ASSETS / 'bus.jpg', ASSETS, ASSETS / '*', ASSETS / '**/*.jpg']:
f.write(f'{x}\n')
model = YOLO(MODEL)
model(source=txt_file, imgsz=32)
_ = YOLO(MODEL)(source=txt_file, imgsz=32)
def test_predict_img():
@ -143,8 +150,7 @@ def test_track_stream():
def test_val():
model = YOLO(MODEL)
model.val(data='coco8.yaml', imgsz=32, save_hybrid=True)
YOLO(MODEL).val(data='coco8.yaml', imgsz=32, save_hybrid=True)
def test_train_scratch():
@ -160,29 +166,25 @@ def test_train_pretrained():
def test_export_torchscript():
model = YOLO(MODEL)
f = model.export(format='torchscript', optimize=True)
f = YOLO(MODEL).export(format='torchscript', optimize=True)
YOLO(f)(SOURCE) # exported model inference
def test_export_onnx():
model = YOLO(MODEL)
f = model.export(format='onnx', dynamic=True)
f = YOLO(MODEL).export(format='onnx', dynamic=True)
YOLO(f)(SOURCE) # exported model inference
def test_export_openvino():
model = YOLO(MODEL)
f = model.export(format='openvino')
f = YOLO(MODEL).export(format='openvino')
YOLO(f)(SOURCE) # exported model inference
def test_export_coreml():
if not WINDOWS: # RuntimeError: BlobWriter not loaded with coremltools 7.0 on windows
model = YOLO(MODEL)
model.export(format='coreml', nms=True)
# if MACOS:
# YOLO(f)(SOURCE) # model prediction only supported on macOS
f = YOLO(MODEL).export(format='coreml', nms=True)
if MACOS:
YOLO(f)(SOURCE) # model prediction only supported on macOS
def test_export_tflite(enabled=False):
@ -204,13 +206,11 @@ def test_export_pb(enabled=False):
def test_export_paddle(enabled=False):
# Paddle protobuf requirements conflicting with onnx protobuf requirements
if enabled:
model = YOLO(MODEL)
model.export(format='paddle')
YOLO(MODEL).export(format='paddle')
def test_export_ncnn():
model = YOLO(MODEL)
f = model.export(format='ncnn')
f = YOLO(MODEL).export(format='ncnn')
YOLO(f)(SOURCE) # exported model inference
@ -218,14 +218,14 @@ def test_all_model_yamls():
for m in (ROOT / 'cfg' / 'models').rglob('*.yaml'):
if 'rtdetr' in m.name:
if TORCH_1_9: # torch<=1.8 issue - TypeError: __init__() got an unexpected keyword argument 'batch_first'
RTDETR(m.name)(SOURCE, imgsz=640) # must be 640
_ = RTDETR(m.name)(SOURCE, imgsz=640) # must be 640
else:
YOLO(m.name)
def test_workflow():
model = YOLO(MODEL)
model.train(data='coco8.yaml', epochs=1, imgsz=32)
model.train(data='coco8.yaml', epochs=1, imgsz=32, optimizer='SGD')
model.val(imgsz=32)
model.predict(SOURCE, imgsz=32)
model.export(format='onnx') # export a model to ONNX format
@ -254,8 +254,7 @@ def test_predict_callback_and_setup():
def test_results():
for m in 'yolov8n-pose.pt', 'yolov8n-seg.pt', 'yolov8n.pt', 'yolov8n-cls.pt':
model = YOLO(m)
results = model([SOURCE, SOURCE], imgsz=160)
results = YOLO(m)([SOURCE, SOURCE], imgsz=160)
for r in results:
r = r.cpu().numpy()
r = r.to(device='cpu', dtype=torch.float32)
@ -278,8 +277,7 @@ def test_data_utils():
for task in 'detect', 'segment', 'pose':
file = Path(TASK2DATA[task]).with_suffix('.zip') # i.e. coco8.zip
download(f'https://github.com/ultralytics/hub/raw/main/example_datasets/{file}', unzip=False)
shutil.move(str(file), TMP) # Python 3.8 requires string input to shutil.move()
download(f'https://github.com/ultralytics/hub/raw/main/example_datasets/{file}', unzip=False, dir=TMP)
stats = HUBDatasetStats(TMP / file, task=task)
stats.get_json(save=True)
stats.process_images()
@ -294,8 +292,7 @@ def test_data_converter():
from ultralytics.data.converter import coco80_to_coco91_class, convert_coco
file = 'instances_val2017.json'
download(f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{file}')
shutil.move(file, TMP)
download(f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{file}', dir=TMP)
convert_coco(labels_dir=TMP, use_segments=True, use_keypoints=False, cls91to80=True)
coco80_to_coco91_class()