ultralytics 8.3.0 YOLO11 Models Release (#16539)
Signed-off-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: Laughing-q <1185102784@qq.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
efb0c17881
commit
6e43d1e1e5
50 changed files with 1154 additions and 407 deletions
|
|
@ -211,7 +211,7 @@ def test_train_scratch():
|
|||
|
||||
def test_train_pretrained():
|
||||
"""Test training of the YOLO model starting from a pre-trained checkpoint."""
|
||||
model = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
|
||||
model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
|
||||
model.train(data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0)
|
||||
model(SOURCE)
|
||||
|
||||
|
|
@ -281,13 +281,13 @@ def test_results(model):
|
|||
def test_labels_and_crops():
|
||||
"""Test output from prediction args for saving YOLO detection labels and crops; ensures accurate saving."""
|
||||
imgs = [SOURCE, ASSETS / "zidane.jpg"]
|
||||
results = YOLO(WEIGHTS_DIR / "yolov8n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
|
||||
results = YOLO(WEIGHTS_DIR / "yolo11n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
|
||||
save_path = Path(results[0].save_dir)
|
||||
for r in results:
|
||||
im_name = Path(r.path).stem
|
||||
cls_idxs = r.boxes.cls.int().tolist()
|
||||
# Check correct detections
|
||||
assert cls_idxs == ([0, 0, 5, 0, 7] if r.path.endswith("bus.jpg") else [0, 0]) # bus.jpg and zidane.jpg classes
|
||||
assert cls_idxs == ([0, 7, 0, 0] if r.path.endswith("bus.jpg") else [0, 0, 0]) # bus.jpg and zidane.jpg classes
|
||||
# Check label path
|
||||
labels = save_path / f"labels/{im_name}.txt"
|
||||
assert labels.exists()
|
||||
|
|
@ -339,7 +339,7 @@ def test_data_annotator():
|
|||
|
||||
auto_annotate(
|
||||
ASSETS,
|
||||
det_model=WEIGHTS_DIR / "yolov8n.pt",
|
||||
det_model=WEIGHTS_DIR / "yolo11n.pt",
|
||||
sam_model=WEIGHTS_DIR / "mobile_sam.pt",
|
||||
output_dir=TMP / "auto_annotate_labels",
|
||||
)
|
||||
|
|
@ -393,7 +393,7 @@ def test_utils_benchmarks():
|
|||
"""Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'."""
|
||||
from ultralytics.utils.benchmarks import ProfileModels
|
||||
|
||||
ProfileModels(["yolov8n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
|
||||
ProfileModels(["yolo11n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
|
||||
|
||||
|
||||
def test_utils_torchutils():
|
||||
|
|
@ -568,14 +568,14 @@ def test_classify_transforms_train(image, auto_augment, erasing, force_color_jit
|
|||
@pytest.mark.skipif(not ONLINE, reason="environment is offline")
|
||||
def test_model_tune():
|
||||
"""Tune YOLO model for performance improvement."""
|
||||
YOLO("yolov8n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
|
||||
YOLO("yolov8n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
|
||||
YOLO("yolo11n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
|
||||
YOLO("yolo11n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
|
||||
|
||||
|
||||
def test_model_embeddings():
|
||||
"""Test YOLO model embeddings."""
|
||||
model_detect = YOLO(MODEL)
|
||||
model_segment = YOLO(WEIGHTS_DIR / "yolov8n-seg.pt")
|
||||
model_segment = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
|
||||
|
||||
for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2
|
||||
assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
|
||||
|
|
@ -585,11 +585,11 @@ def test_model_embeddings():
|
|||
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
|
||||
def test_yolo_world():
|
||||
"""Tests YOLO world models with CLIP support, including detection and training scenarios."""
|
||||
model = YOLO("yolov8s-world.pt") # no YOLOv8n-world model yet
|
||||
model = YOLO("yolov8s-world.pt") # no YOLO11n-world model yet
|
||||
model.set_classes(["tree", "window"])
|
||||
model(SOURCE, conf=0.01)
|
||||
|
||||
model = YOLO("yolov8s-worldv2.pt") # no YOLOv8n-world model yet
|
||||
model = YOLO("yolov8s-worldv2.pt") # no YOLO11n-world model yet
|
||||
# Training from a pretrained model. Eval is included at the final stage of training.
|
||||
# Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model
|
||||
model.train(
|
||||
|
|
@ -603,7 +603,7 @@ def test_yolo_world():
|
|||
# test WorWorldTrainerFromScratch
|
||||
from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
|
||||
|
||||
model = YOLO("yolov8s-worldv2.yaml") # no YOLOv8n-world model yet
|
||||
model = YOLO("yolov8s-worldv2.yaml") # no YOLO11n-world model yet
|
||||
model.train(
|
||||
data={"train": {"yolo_data": ["dota8.yaml"]}, "val": {"yolo_data": ["dota8.yaml"]}},
|
||||
epochs=1,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue