ultralytics 8.3.0 YOLO11 Models Release (#16539)

Signed-off-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: Laughing-q <1185102784@qq.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Glenn Jocher 2024-09-30 02:59:20 +02:00 committed by GitHub
parent efb0c17881
commit 6e43d1e1e5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
50 changed files with 1154 additions and 407 deletions

View file

@ -3,8 +3,8 @@
from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks
# Constants used in tests
MODEL = WEIGHTS_DIR / "path with spaces" / "yolov8n.pt" # test spaces in path
CFG = "yolov8n.yaml"
MODEL = WEIGHTS_DIR / "path with spaces" / "yolo11n.pt" # test spaces in path
CFG = "yolo11n.yaml"
SOURCE = ASSETS / "bus.jpg"
SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]
TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files

View file

@ -74,7 +74,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
# Remove files
models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
for file in ["bus.jpg", "yolov8n.onnx", "yolov8n.torchscript"] + models:
for file in ["bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models:
Path(file).unlink(missing_ok=True)
# Remove directories

View file

@ -60,7 +60,7 @@ def test_train():
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
def test_predict_multiple_devices():
"""Validate model prediction consistency across CPU and CUDA devices."""
model = YOLO("yolov8n.pt")
model = YOLO("yolo11n.pt")
model = model.cpu()
assert str(model.device) == "cpu"
_ = model(SOURCE) # CPU inference

View file

@ -21,13 +21,13 @@ def test_export():
exporter = Exporter()
exporter.add_callback("on_export_start", test_func)
assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
f = exporter(model=YOLO("yolov8n.yaml").model)
f = exporter(model=YOLO("yolo11n.yaml").model)
YOLO(f)(ASSETS) # exported model inference
def test_detect():
"""Test YOLO object detection training, validation, and prediction functionality."""
overrides = {"data": "coco8.yaml", "model": "yolov8n.yaml", "imgsz": 32, "epochs": 1, "save": False}
overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 1, "save": False}
cfg = get_cfg(DEFAULT_CFG)
cfg.data = "coco8.yaml"
cfg.imgsz = 32
@ -66,7 +66,7 @@ def test_detect():
def test_segment():
"""Tests image segmentation training, validation, and prediction pipelines using YOLO models."""
overrides = {"data": "coco8-seg.yaml", "model": "yolov8n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
overrides = {"data": "coco8-seg.yaml", "model": "yolo11n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
cfg = get_cfg(DEFAULT_CFG)
cfg.data = "coco8-seg.yaml"
cfg.imgsz = 32
@ -88,7 +88,7 @@ def test_segment():
pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
pred.add_callback("on_predict_start", test_func)
assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolov8n-seg.pt")
result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo11n-seg.pt")
assert len(result), "predictor test failed"
# Test resume
@ -105,7 +105,7 @@ def test_segment():
def test_classify():
"""Test image classification including training, validation, and prediction phases."""
overrides = {"data": "imagenet10", "model": "yolov8n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
overrides = {"data": "imagenet10", "model": "yolo11n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
cfg = get_cfg(DEFAULT_CFG)
cfg.data = "imagenet10"
cfg.imgsz = 32

View file

@ -30,7 +30,7 @@ def test_similarity():
@pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
def test_det():
"""Test detection functionalities and verify embedding table includes bounding boxes."""
exp = Explorer(data="coco8.yaml", model="yolov8n.pt")
exp = Explorer(data="coco8.yaml", model="yolo11n.pt")
exp.create_embeddings_table(force=True)
assert len(exp.table.head()["bboxes"]) > 0
similar = exp.get_similar(idx=[1, 2], limit=10)
@ -44,7 +44,7 @@ def test_det():
@pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
def test_seg():
"""Test segmentation functionalities and ensure the embedding table includes segmentation masks."""
exp = Explorer(data="coco8-seg.yaml", model="yolov8n-seg.pt")
exp = Explorer(data="coco8-seg.yaml", model="yolo11n-seg.pt")
exp.create_embeddings_table(force=True)
assert len(exp.table.head()["masks"]) > 0
similar = exp.get_similar(idx=[1, 2], limit=10)
@ -57,7 +57,7 @@ def test_seg():
@pytest.mark.skipif(not TORCH_1_13, reason="Explorer requires torch>=1.13")
def test_pose():
"""Test pose estimation functionality and verify the embedding table includes keypoints."""
exp = Explorer(data="coco8-pose.yaml", model="yolov8n-pose.pt")
exp = Explorer(data="coco8-pose.yaml", model="yolo11n-pose.pt")
exp.create_embeddings_table(force=True)
assert len(exp.table.head()["keypoints"]) > 0
similar = exp.get_similar(idx=[1, 2], limit=10)

View file

@ -17,7 +17,7 @@ from ultralytics.utils.checks import check_requirements
@pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed")
def test_model_ray_tune():
"""Tune YOLO model using Ray for hyperparameter optimization."""
YOLO("yolov8n-cls.yaml").tune(
YOLO("yolo11n-cls.yaml").tune(
use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu"
)
@ -26,7 +26,7 @@ def test_model_ray_tune():
def test_mlflow():
"""Test training with MLflow tracking enabled (see https://mlflow.org/ for details)."""
SETTINGS["mlflow"] = True
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
SETTINGS["mlflow"] = False
@ -42,7 +42,7 @@ def test_mlflow_keep_run_active():
# Test with MLFLOW_KEEP_RUN_ACTIVE=True
os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True"
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
status = mlflow.active_run().info.status
assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True"
@ -50,13 +50,13 @@ def test_mlflow_keep_run_active():
# Test with MLFLOW_KEEP_RUN_ACTIVE=False
os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False"
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
status = mlflow.get_run(run_id=run_id).info.status
assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False"
# Test with MLFLOW_KEEP_RUN_ACTIVE not set
os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None)
YOLO("yolov8n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
status = mlflow.get_run(run_id=run_id).info.status
assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set"
SETTINGS["mlflow"] = False
@ -126,23 +126,23 @@ def test_pycocotools():
from ultralytics.models.yolo.segment import SegmentationValidator
# Download annotations after each dataset downloads first
url = "https://github.com/ultralytics/assets/releases/download/v8.2.0/"
url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
args = {"model": "yolov8n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
args = {"model": "yolo11n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
validator = DetectionValidator(args=args)
validator()
validator.is_coco = True
download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
_ = validator.eval_json(validator.stats)
args = {"model": "yolov8n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
args = {"model": "yolo11n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
validator = SegmentationValidator(args=args)
validator()
validator.is_coco = True
download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
_ = validator.eval_json(validator.stats)
args = {"model": "yolov8n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
args = {"model": "yolo11n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
validator = PoseValidator(args=args)
validator()
validator.is_coco = True

View file

@ -211,7 +211,7 @@ def test_train_scratch():
def test_train_pretrained():
"""Test training of the YOLO model starting from a pre-trained checkpoint."""
model = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
model.train(data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0)
model(SOURCE)
@ -281,13 +281,13 @@ def test_results(model):
def test_labels_and_crops():
"""Test output from prediction args for saving YOLO detection labels and crops; ensures accurate saving."""
imgs = [SOURCE, ASSETS / "zidane.jpg"]
results = YOLO(WEIGHTS_DIR / "yolov8n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
results = YOLO(WEIGHTS_DIR / "yolo11n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
save_path = Path(results[0].save_dir)
for r in results:
im_name = Path(r.path).stem
cls_idxs = r.boxes.cls.int().tolist()
# Check correct detections
assert cls_idxs == ([0, 0, 5, 0, 7] if r.path.endswith("bus.jpg") else [0, 0]) # bus.jpg and zidane.jpg classes
assert cls_idxs == ([0, 7, 0, 0] if r.path.endswith("bus.jpg") else [0, 0, 0]) # bus.jpg and zidane.jpg classes
# Check label path
labels = save_path / f"labels/{im_name}.txt"
assert labels.exists()
@ -339,7 +339,7 @@ def test_data_annotator():
auto_annotate(
ASSETS,
det_model=WEIGHTS_DIR / "yolov8n.pt",
det_model=WEIGHTS_DIR / "yolo11n.pt",
sam_model=WEIGHTS_DIR / "mobile_sam.pt",
output_dir=TMP / "auto_annotate_labels",
)
@ -393,7 +393,7 @@ def test_utils_benchmarks():
"""Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'."""
from ultralytics.utils.benchmarks import ProfileModels
ProfileModels(["yolov8n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
ProfileModels(["yolo11n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
def test_utils_torchutils():
@ -568,14 +568,14 @@ def test_classify_transforms_train(image, auto_augment, erasing, force_color_jit
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
def test_model_tune():
"""Tune YOLO model for performance improvement."""
YOLO("yolov8n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
YOLO("yolov8n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
YOLO("yolo11n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
YOLO("yolo11n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
def test_model_embeddings():
"""Test YOLO model embeddings."""
model_detect = YOLO(MODEL)
model_segment = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
model_segment = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2
assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
@ -585,11 +585,11 @@ def test_model_embeddings():
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
def test_yolo_world():
"""Tests YOLO world models with CLIP support, including detection and training scenarios."""
model = YOLO("yolov8s-world.pt") # no YOLOv8n-world model yet
model = YOLO("yolov8s-world.pt") # no YOLO11n-world model yet
model.set_classes(["tree", "window"])
model(SOURCE, conf=0.01)
model = YOLO("yolov8s-worldv2.pt") # no YOLOv8n-world model yet
model = YOLO("yolov8s-worldv2.pt") # no YOLO11n-world model yet
# Training from a pretrained model. Eval is included at the final stage of training.
# Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model
model.train(
@ -603,7 +603,7 @@ def test_yolo_world():
# test WorWorldTrainerFromScratch
from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
model = YOLO("yolov8s-worldv2.yaml") # no YOLOv8n-world model yet
model = YOLO("yolov8s-worldv2.yaml") # no YOLO11n-world model yet
model.train(
data={"train": {"yolo_data": ["dota8.yaml"]}, "val": {"yolo_data": ["dota8.yaml"]}},
epochs=1,

View file

@ -14,7 +14,7 @@ WORKOUTS_SOLUTION_DEMO = "https://github.com/ultralytics/assets/releases/downloa
def test_major_solutions():
"""Test the object counting, heatmap, speed estimation and queue management solution."""
safe_download(url=MAJOR_SOLUTIONS_DEMO)
model = YOLO("yolov8n.pt")
model = YOLO("yolo11n.pt")
names = model.names
cap = cv2.VideoCapture("solutions_ci_demo.mp4")
assert cap.isOpened(), "Error reading video file"
@ -41,7 +41,7 @@ def test_major_solutions():
def test_aigym():
"""Test the workouts monitoring solution."""
safe_download(url=WORKOUTS_SOLUTION_DEMO)
model = YOLO("yolov8n-pose.pt")
model = YOLO("yolo11n-pose.pt")
cap = cv2.VideoCapture("solution_ci_pose_demo.mp4")
assert cap.isOpened(), "Error reading video file"
gym_object = solutions.AIGym(line_thickness=2, pose_type="squat", kpts_to_check=[5, 11, 13])
@ -60,7 +60,7 @@ def test_instance_segmentation():
"""Test the instance segmentation solution."""
from ultralytics.utils.plotting import Annotator, colors
model = YOLO("yolov8n-seg.pt")
model = YOLO("yolo11n-seg.pt")
names = model.names
cap = cv2.VideoCapture("solutions_ci_demo.mp4")
assert cap.isOpened(), "Error reading video file"