Threaded inference docs improvements (#16313)

Signed-off-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Glenn Jocher 2024-09-16 20:47:37 +02:00 committed by GitHub
parent 617d58d430
commit 7b19e0daa0
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 70 additions and 95 deletions

View file

@ -290,63 +290,50 @@ Finally, after all threads have completed their task, the windows displaying the
from ultralytics import YOLO
# Define model names and video sources
MODEL_NAMES = ["yolov8n.pt", "yolov8n-seg.pt"]
SOURCES = ["path/to/video1.mp4", 0] # local video, 0 for webcam
def run_tracker_in_thread(filename, model, file_index):
def run_tracker_in_thread(model_name, filename, index):
"""
Runs a video file or webcam stream concurrently with the YOLOv8 model using threading.
This function captures video frames from a given file or camera source and utilizes the YOLOv8 model for object
tracking. The function runs in its own thread for concurrent processing.
Runs a video file or webcam stream concurrently with the YOLOv8 model using threading. This function captures video
frames from a given file or camera source and utilizes the YOLOv8 model for object tracking. The function runs in
its own thread for concurrent processing.
Args:
filename (str): The path to the video file or the identifier for the webcam/external camera source.
model (obj): The YOLOv8 model object.
file_index (int): An index to uniquely identify the file being processed, used for display purposes.
Note:
Press 'q' to quit the video display window.
index (int): An index to uniquely identify the file being processed, used for display purposes.
"""
video = cv2.VideoCapture(filename) # Read the video file
model = YOLO(model_name)
video = cv2.VideoCapture(filename)
while True:
ret, frame = video.read() # Read the video frames
# Exit the loop if no more frames in either video
ret, frame = video.read()
if not ret:
break
# Track objects in frames if available
results = model.track(frame, persist=True)
res_plotted = results[0].plot()
cv2.imshow(f"Tracking_Stream_{file_index}", res_plotted)
cv2.imshow(f"Tracking_Stream_{index}", res_plotted)
key = cv2.waitKey(1)
if key == ord("q"):
if cv2.waitKey(1) == ord("q"):
break
# Release video sources
video.release()
# Load the models
model1 = YOLO("yolov8n.pt")
model2 = YOLO("yolov8n-seg.pt")
# Create and start tracker threads using a for loop
tracker_threads = []
for i, (video_file, model_name) in enumerate(zip(SOURCES, MODEL_NAMES), start=1):
thread = threading.Thread(target=run_tracker_in_thread, args=(model_name, video_file, i), daemon=True)
tracker_threads.append(thread)
thread.start()
# Define the video files for the trackers
video_file1 = "path/to/video1.mp4" # Path to video file, 0 for webcam
video_file2 = 0 # Path to video file, 0 for webcam, 1 for external camera
# Create the tracker threads
tracker_thread1 = threading.Thread(target=run_tracker_in_thread, args=(video_file1, model1, 1), daemon=True)
tracker_thread2 = threading.Thread(target=run_tracker_in_thread, args=(video_file2, model2, 2), daemon=True)
# Start the tracker threads
tracker_thread1.start()
tracker_thread2.start()
# Wait for the tracker threads to finish
tracker_thread1.join()
tracker_thread2.join()
# Wait for all tracker threads to finish
for thread in tracker_threads:
thread.join()
# Clean up and close windows
cv2.destroyAllWindows()