Threaded inference docs improvements (#16313)

Signed-off-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Glenn Jocher 2024-09-16 20:47:37 +02:00 committed by GitHub
parent 617d58d430
commit 7b19e0daa0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 70 additions and 95 deletions

View file

@ -61,7 +61,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m
model = YOLO("yolov8n.pt") # pretrained YOLOv8n model
# Run batched inference on a list of images
results = model(["im1.jpg", "im2.jpg"]) # return a list of Results objects
results = model(["image1.jpg", "image2.jpg"]) # return a list of Results objects
# Process results list
for result in results:
@ -83,7 +83,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m
model = YOLO("yolov8n.pt") # pretrained YOLOv8n model
# Run batched inference on a list of images
results = model(["im1.jpg", "im2.jpg"], stream=True) # return a generator of Results objects
results = model(["image1.jpg", "image2.jpg"], stream=True) # return a generator of Results objects
# Process results generator
for result in results:
@ -109,8 +109,8 @@ YOLOv8 can process different types of input sources for inference, as shown in t
| image | `'image.jpg'` | `str` or `Path` | Single image file. |
| URL | `'https://ultralytics.com/images/bus.jpg'` | `str` | URL to an image. |
| screenshot | `'screen'` | `str` | Capture a screenshot. |
| PIL | `Image.open('im.jpg')` | `PIL.Image` | HWC format with RGB channels. |
| OpenCV | `cv2.imread('im.jpg')` | `np.ndarray` | HWC format with BGR channels `uint8 (0-255)`. |
| PIL | `Image.open('image.jpg')` | `PIL.Image` | HWC format with RGB channels. |
| OpenCV | `cv2.imread('image.jpg')` | `np.ndarray` | HWC format with BGR channels `uint8 (0-255)`. |
| numpy | `np.zeros((640,1280,3))` | `np.ndarray` | HWC format with BGR channels `uint8 (0-255)`. |
| torch | `torch.zeros(16,3,320,640)` | `torch.Tensor` | BCHW format with RGB channels `float32 (0.0-1.0)`. |
| CSV | `'sources.csv'` | `str` or `Path` | CSV file containing paths to images, videos, or directories. |
@ -710,16 +710,16 @@ When using YOLO models in a multi-threaded application, it's important to instan
from ultralytics import YOLO
def thread_safe_predict(image_path):
def thread_safe_predict(model, image_path):
"""Performs thread-safe prediction on an image using a locally instantiated YOLO model."""
local_model = YOLO("yolov8n.pt")
results = local_model.predict(image_path)
model = YOLO(model)
results = model.predict(image_path)
# Process results
# Starting threads that each have their own model instance
Thread(target=thread_safe_predict, args=("image1.jpg",)).start()
Thread(target=thread_safe_predict, args=("image2.jpg",)).start()
Thread(target=thread_safe_predict, args=("yolov8n.pt", "image1.jpg")).start()
Thread(target=thread_safe_predict, args=("yolov8n.pt", "image2.jpg")).start()
```
For an in-depth look at thread-safe inference with YOLO models and step-by-step instructions, please refer to our [YOLO Thread-Safe Inference Guide](../guides/yolo-thread-safe-inference.md). This guide will provide you with all the necessary information to avoid common pitfalls and ensure that your multi-threaded inference runs smoothly.

View file

@ -290,63 +290,50 @@ Finally, after all threads have completed their task, the windows displaying the
from ultralytics import YOLO
# Define model names and video sources
MODEL_NAMES = ["yolov8n.pt", "yolov8n-seg.pt"]
SOURCES = ["path/to/video1.mp4", 0] # local video, 0 for webcam
def run_tracker_in_thread(filename, model, file_index):
def run_tracker_in_thread(model_name, filename, index):
"""
Runs a video file or webcam stream concurrently with the YOLOv8 model using threading.
This function captures video frames from a given file or camera source and utilizes the YOLOv8 model for object
tracking. The function runs in its own thread for concurrent processing.
Runs a video file or webcam stream concurrently with the YOLOv8 model using threading. This function captures video
frames from a given file or camera source and utilizes the YOLOv8 model for object tracking. The function runs in
its own thread for concurrent processing.
Args:
filename (str): The path to the video file or the identifier for the webcam/external camera source.
model (obj): The YOLOv8 model object.
file_index (int): An index to uniquely identify the file being processed, used for display purposes.
Note:
Press 'q' to quit the video display window.
index (int): An index to uniquely identify the file being processed, used for display purposes.
"""
video = cv2.VideoCapture(filename) # Read the video file
model = YOLO(model_name)
video = cv2.VideoCapture(filename)
while True:
ret, frame = video.read() # Read the video frames
# Exit the loop if no more frames in either video
ret, frame = video.read()
if not ret:
break
# Track objects in frames if available
results = model.track(frame, persist=True)
res_plotted = results[0].plot()
cv2.imshow(f"Tracking_Stream_{file_index}", res_plotted)
cv2.imshow(f"Tracking_Stream_{index}", res_plotted)
key = cv2.waitKey(1)
if key == ord("q"):
if cv2.waitKey(1) == ord("q"):
break
# Release video sources
video.release()
# Load the models
model1 = YOLO("yolov8n.pt")
model2 = YOLO("yolov8n-seg.pt")
# Create and start tracker threads using a for loop
tracker_threads = []
for i, (video_file, model_name) in enumerate(zip(SOURCES, MODEL_NAMES), start=1):
thread = threading.Thread(target=run_tracker_in_thread, args=(model_name, video_file, i), daemon=True)
tracker_threads.append(thread)
thread.start()
# Define the video files for the trackers
video_file1 = "path/to/video1.mp4" # Path to video file, 0 for webcam
video_file2 = 0 # Path to video file, 0 for webcam, 1 for external camera
# Create the tracker threads
tracker_thread1 = threading.Thread(target=run_tracker_in_thread, args=(video_file1, model1, 1), daemon=True)
tracker_thread2 = threading.Thread(target=run_tracker_in_thread, args=(video_file2, model2, 2), daemon=True)
# Start the tracker threads
tracker_thread1.start()
tracker_thread2.start()
# Wait for the tracker threads to finish
tracker_thread1.join()
tracker_thread2.join()
# Wait for all tracker threads to finish
for thread in tracker_threads:
thread.join()
# Clean up and close windows
cv2.destroyAllWindows()

View file

@ -44,8 +44,8 @@ Harness `detect.py` for versatile inference on various sources. It automatically
```bash
python detect.py --weights yolov5s.pt --source 0 # webcam
img.jpg # image
vid.mp4 # video
image.jpg # image
video.mp4 # video
screen # screenshot
path/ # directory
list.txt # list of images