Restrict ONNX ExecutionProviders (#18400)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Signed-off-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Glenn Jocher 2024-12-26 03:32:39 +01:00 committed by GitHub
parent 9935b45377
commit c9a48b281e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 23 additions and 20 deletions

View file

@ -102,11 +102,11 @@ jobs:
python-version: ["3.11"] python-version: ["3.11"]
model: [yolo11n] model: [yolo11n]
steps: steps:
- uses: astral-sh/setup-uv@v5
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: actions/setup-python@v5 - uses: actions/setup-python@v5
with: with:
python-version: ${{ matrix.python-version }} python-version: ${{ matrix.python-version }}
- uses: astral-sh/setup-uv@v5
- name: Install requirements - name: Install requirements
shell: bash # for Windows compatibility shell: bash # for Windows compatibility
run: | run: |

View file

@ -74,10 +74,10 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
# Remove files # Remove files
models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)] models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
for file in ["bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models: for file in ["decelera_portrait_min.mov", "bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models:
Path(file).unlink(missing_ok=True) Path(file).unlink(missing_ok=True)
# Remove directories # Remove directories
models = [path for x in ["*.mlpackage", "*_openvino_model"] for path in WEIGHTS_DIR.rglob(x)] models = [path for x in ["*.mlpackage", "*_openvino_model"] for path in WEIGHTS_DIR.rglob(x)]
for directory in [TMP.parents[1] / ".pytest_cache", TMP] + models: for directory in [WEIGHTS_DIR / "path with spaces", TMP.parents[1] / ".pytest_cache", TMP] + models:
shutil.rmtree(directory, ignore_errors=True) shutil.rmtree(directory, ignore_errors=True)

View file

@ -59,7 +59,8 @@ def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25") run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25")
run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt") run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
if TORCH_1_9: if TORCH_1_9:
run(f"yolo predict {task} model='rtdetr-l.pt' source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt") weights = WEIGHTS_DIR / "rtdetr-l.pt"
run(f"yolo predict {task} model={weights} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12") @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")

View file

@ -576,11 +576,11 @@ def test_model_embeddings():
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12") @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
def test_yolo_world(): def test_yolo_world():
"""Tests YOLO world models with CLIP support, including detection and training scenarios.""" """Tests YOLO world models with CLIP support, including detection and training scenarios."""
model = YOLO("yolov8s-world.pt") # no YOLO11n-world model yet model = YOLO(WEIGHTS_DIR / "yolov8s-world.pt") # no YOLO11n-world model yet
model.set_classes(["tree", "window"]) model.set_classes(["tree", "window"])
model(SOURCE, conf=0.01) model(SOURCE, conf=0.01)
model = YOLO("yolov8s-worldv2.pt") # no YOLO11n-world model yet model = YOLO(WEIGHTS_DIR / "yolov8s-worldv2.pt") # no YOLO11n-world model yet
# Training from a pretrained model. Eval is included at the final stage of training. # Training from a pretrained model. Eval is included at the final stage of training.
# Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model # Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model
model.train( model.train(

View file

@ -3,18 +3,20 @@
import cv2 import cv2
import pytest import pytest
from tests import TMP
from ultralytics import YOLO, solutions from ultralytics import YOLO, solutions
from ultralytics.utils import ASSETS_URL, WEIGHTS_DIR
from ultralytics.utils.downloads import safe_download from ultralytics.utils.downloads import safe_download
MAJOR_SOLUTIONS_DEMO = "https://github.com/ultralytics/assets/releases/download/v0.0.0/solutions_ci_demo.mp4" DEMO_VIDEO = "solutions_ci_demo.mp4"
WORKOUTS_SOLUTION_DEMO = "https://github.com/ultralytics/assets/releases/download/v0.0.0/solution_ci_pose_demo.mp4" POSE_VIDEO = "solution_ci_pose_demo.mp4"
@pytest.mark.slow @pytest.mark.slow
def test_major_solutions(): def test_major_solutions():
"""Test the object counting, heatmap, speed estimation and queue management solution.""" """Test the object counting, heatmap, speed estimation and queue management solution."""
safe_download(url=MAJOR_SOLUTIONS_DEMO) safe_download(url=f"{ASSETS_URL}/{DEMO_VIDEO}", dir=TMP)
cap = cv2.VideoCapture("solutions_ci_demo.mp4") cap = cv2.VideoCapture(str(TMP / DEMO_VIDEO))
assert cap.isOpened(), "Error reading video file" assert cap.isOpened(), "Error reading video file"
region_points = [(20, 400), (1080, 400), (1080, 360), (20, 360)] region_points = [(20, 400), (1080, 400), (1080, 360), (20, 360)]
counter = solutions.ObjectCounter(region=region_points, model="yolo11n.pt", show=False) # Test object counter counter = solutions.ObjectCounter(region=region_points, model="yolo11n.pt", show=False) # Test object counter
@ -42,8 +44,8 @@ def test_major_solutions():
cap.release() cap.release()
# Test workouts monitoring # Test workouts monitoring
safe_download(url=WORKOUTS_SOLUTION_DEMO) safe_download(url=f"{ASSETS_URL}/{POSE_VIDEO}", dir=TMP)
cap1 = cv2.VideoCapture("solution_ci_pose_demo.mp4") cap1 = cv2.VideoCapture(str(TMP / POSE_VIDEO))
assert cap1.isOpened(), "Error reading video file" assert cap1.isOpened(), "Error reading video file"
gym = solutions.AIGym(line_width=2, kpts=[5, 11, 13], show=False) gym = solutions.AIGym(line_width=2, kpts=[5, 11, 13], show=False)
while cap1.isOpened(): while cap1.isOpened():
@ -59,9 +61,9 @@ def test_instance_segmentation():
"""Test the instance segmentation solution.""" """Test the instance segmentation solution."""
from ultralytics.utils.plotting import Annotator, colors from ultralytics.utils.plotting import Annotator, colors
model = YOLO("yolo11n-seg.pt") model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
names = model.names names = model.names
cap = cv2.VideoCapture("solutions_ci_demo.mp4") cap = cv2.VideoCapture(TMP / DEMO_VIDEO)
assert cap.isOpened(), "Error reading video file" assert cap.isOpened(), "Error reading video file"
while cap.isOpened(): while cap.isOpened():
success, im0 = cap.read() success, im0 = cap.read()

View file

@ -192,14 +192,14 @@ class AutoBackend(nn.Module):
check_requirements("numpy==1.23.5") check_requirements("numpy==1.23.5")
import onnxruntime import onnxruntime
providers = onnxruntime.get_available_providers() providers = ["CPUExecutionProvider"]
if not cuda and "CUDAExecutionProvider" in providers: if cuda and "CUDAExecutionProvider" in onnxruntime.get_available_providers():
providers.remove("CUDAExecutionProvider") providers.insert(0, "CUDAExecutionProvider")
elif cuda and "CUDAExecutionProvider" not in providers: elif cuda: # Only log warning if CUDA was requested but unavailable
LOGGER.warning("WARNING ⚠️ Failed to start ONNX Runtime session with CUDA. Falling back to CPU...") LOGGER.warning("WARNING ⚠️ Failed to start ONNX Runtime with CUDA. Using CPU...")
device = torch.device("cpu") device = torch.device("cpu")
cuda = False cuda = False
LOGGER.info(f"Preferring ONNX Runtime {providers[0]}") LOGGER.info(f"Using ONNX Runtime {providers[0]}")
if onnx: if onnx:
session = onnxruntime.InferenceSession(w, providers=providers) session = onnxruntime.InferenceSession(w, providers=providers)
else: else: