Threaded inference docs improvements (#16313)
Signed-off-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
617d58d430
commit
7b19e0daa0
5 changed files with 70 additions and 95 deletions
42
README.md
42
README.md
|
|
@ -87,14 +87,25 @@ YOLOv8 may also be used directly in a Python environment, and accepts the same [
|
||||||
from ultralytics import YOLO
|
from ultralytics import YOLO
|
||||||
|
|
||||||
# Load a model
|
# Load a model
|
||||||
model = YOLO("yolov8n.yaml") # build a new model from scratch
|
model = YOLO("yolov8n.pt")
|
||||||
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
|
|
||||||
|
|
||||||
# Use the model
|
# Train the model
|
||||||
model.train(data="coco8.yaml", epochs=3) # train the model
|
train_results = model.train(
|
||||||
metrics = model.val() # evaluate model performance on the validation set
|
data="coco8.yaml", # path to dataset YAML
|
||||||
results = model("https://ultralytics.com/images/bus.jpg") # predict on an image
|
epochs=100, # number of training epochs
|
||||||
path = model.export(format="onnx") # export the model to ONNX format
|
imgsz=640, # training image size
|
||||||
|
device="cpu", # device to run on, i.e. device=0 or device=0,1,2,3 or device=cpu
|
||||||
|
)
|
||||||
|
|
||||||
|
# Evaluate model performance on the validation set
|
||||||
|
metrics = model.val()
|
||||||
|
|
||||||
|
# Perform object detection on an image
|
||||||
|
results = model("path/to/image.jpg")
|
||||||
|
results[0].show()
|
||||||
|
|
||||||
|
# Export the model to ONNX format
|
||||||
|
path = model.export(format="onnx") # return path to exported model
|
||||||
```
|
```
|
||||||
|
|
||||||
See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python/) for more examples.
|
See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python/) for more examples.
|
||||||
|
|
@ -139,23 +150,6 @@ See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examp
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details><summary>Detection (Open Image V7)</summary>
|
|
||||||
|
|
||||||
See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examples with these models trained on [Open Image V7](https://docs.ultralytics.com/datasets/detect/open-images-v7/), which include 600 pre-trained classes.
|
|
||||||
|
|
||||||
| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
|
|
||||||
| ----------------------------------------------------------------------------------------- | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
|
|
||||||
| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-oiv7.pt) | 640 | 18.4 | 142.4 | 1.21 | 3.5 | 10.5 |
|
|
||||||
| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-oiv7.pt) | 640 | 27.7 | 183.1 | 1.40 | 11.4 | 29.7 |
|
|
||||||
| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-oiv7.pt) | 640 | 33.6 | 408.5 | 2.26 | 26.2 | 80.6 |
|
|
||||||
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-oiv7.pt) | 640 | 34.9 | 596.9 | 2.43 | 44.1 | 167.4 |
|
|
||||||
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-oiv7.pt) | 640 | 36.3 | 860.6 | 3.56 | 68.7 | 260.6 |
|
|
||||||
|
|
||||||
- **mAP<sup>val</sup>** values are for single-model single-scale on [Open Image V7](https://docs.ultralytics.com/datasets/detect/open-images-v7/) dataset. <br>Reproduce by `yolo val detect data=open-images-v7.yaml device=0`
|
|
||||||
- **Speed** averaged over Open Image V7 val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val detect data=open-images-v7.yaml batch=1 device=0|cpu`
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<details><summary>Segmentation (COCO)</summary>
|
<details><summary>Segmentation (COCO)</summary>
|
||||||
|
|
||||||
See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage examples with these models trained on [COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/), which include 80 pre-trained classes.
|
See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage examples with these models trained on [COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/), which include 80 pre-trained classes.
|
||||||
|
|
|
||||||
|
|
@ -89,14 +89,25 @@ YOLOv8 也可以在 Python 环境中直接使用,并接受与上述 CLI 示例
|
||||||
from ultralytics import YOLO
|
from ultralytics import YOLO
|
||||||
|
|
||||||
# 加载模型
|
# 加载模型
|
||||||
model = YOLO("yolov8n.yaml") # 从头开始构建新模型
|
model = YOLO("yolov8n.pt")
|
||||||
model = YOLO("yolov8n.pt") # 加载预训练模型(建议用于训练)
|
|
||||||
|
|
||||||
# 使用模型
|
# 训练模型
|
||||||
model.train(data="coco8.yaml", epochs=3) # 训练模型
|
train_results = model.train(
|
||||||
metrics = model.val() # 在验证集上评估模型性能
|
data="coco8.yaml", # 数据配置文件的路径
|
||||||
results = model("https://ultralytics.com/images/bus.jpg") # 对图像进行预测
|
epochs=100, # 训练的轮数
|
||||||
success = model.export(format="onnx") # 将模型导出为 ONNX 格式
|
imgsz=640, # 训练图像大小
|
||||||
|
device="cpu", # 运行的设备,例如 device=0 或 device=0,1,2,3 或 device=cpu
|
||||||
|
)
|
||||||
|
|
||||||
|
# 在验证集上评估模型性能
|
||||||
|
metrics = model.val()
|
||||||
|
|
||||||
|
# 对图像进行目标检测
|
||||||
|
results = model("path/to/image.jpg")
|
||||||
|
results[0].show()
|
||||||
|
|
||||||
|
# 将模型导出为 ONNX 格式
|
||||||
|
path = model.export(format="onnx") # 返回导出的模型路径
|
||||||
```
|
```
|
||||||
|
|
||||||
查看 YOLOv8 [Python 文档](https://docs.ultralytics.com/usage/python/)以获取更多示例。
|
查看 YOLOv8 [Python 文档](https://docs.ultralytics.com/usage/python/)以获取更多示例。
|
||||||
|
|
@ -141,23 +152,6 @@ Ultralytics 提供了 YOLOv8 的交互式笔记本,涵盖训练、验证、跟
|
||||||
|
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
<details><summary>检测(Open Image V7)</summary>
|
|
||||||
|
|
||||||
查看[检测文档](https://docs.ultralytics.com/tasks/detect/)以获取这些在[Open Image V7](https://docs.ultralytics.com/datasets/detect/open-images-v7/)上训练的模型的使用示例,其中包括600个预训练类别。
|
|
||||||
|
|
||||||
| 模型 | 尺寸<br><sup>(像素) | mAP<sup>验证<br>50-95 | 速度<br><sup>CPU ONNX<br>(毫秒) | 速度<br><sup>A100 TensorRT<br>(毫秒) | 参数<br><sup>(M) | 浮点运算<br><sup>(B) |
|
|
||||||
| ----------------------------------------------------------------------------------------- | ------------------- | --------------------- | ------------------------------- | ------------------------------------ | ---------------- | -------------------- |
|
|
||||||
| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-oiv7.pt) | 640 | 18.4 | 142.4 | 1.21 | 3.5 | 10.5 |
|
|
||||||
| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-oiv7.pt) | 640 | 27.7 | 183.1 | 1.40 | 11.4 | 29.7 |
|
|
||||||
| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-oiv7.pt) | 640 | 33.6 | 408.5 | 2.26 | 26.2 | 80.6 |
|
|
||||||
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-oiv7.pt) | 640 | 34.9 | 596.9 | 2.43 | 44.1 | 167.4 |
|
|
||||||
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-oiv7.pt) | 640 | 36.3 | 860.6 | 3.56 | 68.7 | 260.6 |
|
|
||||||
|
|
||||||
- **mAP<sup>验证</sup>** 值适用于在[Open Image V7](https://docs.ultralytics.com/datasets/detect/open-images-v7/)数据集上的单模型单尺度。 <br>通过 `yolo val detect data=open-images-v7.yaml device=0` 以复现。
|
|
||||||
- **速度** 在使用[Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/)实例对Open Image V7验证图像进行平均测算。 <br>通过 `yolo val detect data=open-images-v7.yaml batch=1 device=0|cpu` 以复现。
|
|
||||||
|
|
||||||
</details>
|
|
||||||
|
|
||||||
<details><summary>分割 (COCO)</summary>
|
<details><summary>分割 (COCO)</summary>
|
||||||
|
|
||||||
查看[分割文档](https://docs.ultralytics.com/tasks/segment/)以获取这些在[COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/)上训练的模型的使用示例,其中包括80个预训练类别。
|
查看[分割文档](https://docs.ultralytics.com/tasks/segment/)以获取这些在[COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/)上训练的模型的使用示例,其中包括80个预训练类别。
|
||||||
|
|
|
||||||
|
|
@ -61,7 +61,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m
|
||||||
model = YOLO("yolov8n.pt") # pretrained YOLOv8n model
|
model = YOLO("yolov8n.pt") # pretrained YOLOv8n model
|
||||||
|
|
||||||
# Run batched inference on a list of images
|
# Run batched inference on a list of images
|
||||||
results = model(["im1.jpg", "im2.jpg"]) # return a list of Results objects
|
results = model(["image1.jpg", "image2.jpg"]) # return a list of Results objects
|
||||||
|
|
||||||
# Process results list
|
# Process results list
|
||||||
for result in results:
|
for result in results:
|
||||||
|
|
@ -83,7 +83,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m
|
||||||
model = YOLO("yolov8n.pt") # pretrained YOLOv8n model
|
model = YOLO("yolov8n.pt") # pretrained YOLOv8n model
|
||||||
|
|
||||||
# Run batched inference on a list of images
|
# Run batched inference on a list of images
|
||||||
results = model(["im1.jpg", "im2.jpg"], stream=True) # return a generator of Results objects
|
results = model(["image1.jpg", "image2.jpg"], stream=True) # return a generator of Results objects
|
||||||
|
|
||||||
# Process results generator
|
# Process results generator
|
||||||
for result in results:
|
for result in results:
|
||||||
|
|
@ -109,8 +109,8 @@ YOLOv8 can process different types of input sources for inference, as shown in t
|
||||||
| image | `'image.jpg'` | `str` or `Path` | Single image file. |
|
| image | `'image.jpg'` | `str` or `Path` | Single image file. |
|
||||||
| URL | `'https://ultralytics.com/images/bus.jpg'` | `str` | URL to an image. |
|
| URL | `'https://ultralytics.com/images/bus.jpg'` | `str` | URL to an image. |
|
||||||
| screenshot | `'screen'` | `str` | Capture a screenshot. |
|
| screenshot | `'screen'` | `str` | Capture a screenshot. |
|
||||||
| PIL | `Image.open('im.jpg')` | `PIL.Image` | HWC format with RGB channels. |
|
| PIL | `Image.open('image.jpg')` | `PIL.Image` | HWC format with RGB channels. |
|
||||||
| OpenCV | `cv2.imread('im.jpg')` | `np.ndarray` | HWC format with BGR channels `uint8 (0-255)`. |
|
| OpenCV | `cv2.imread('image.jpg')` | `np.ndarray` | HWC format with BGR channels `uint8 (0-255)`. |
|
||||||
| numpy | `np.zeros((640,1280,3))` | `np.ndarray` | HWC format with BGR channels `uint8 (0-255)`. |
|
| numpy | `np.zeros((640,1280,3))` | `np.ndarray` | HWC format with BGR channels `uint8 (0-255)`. |
|
||||||
| torch | `torch.zeros(16,3,320,640)` | `torch.Tensor` | BCHW format with RGB channels `float32 (0.0-1.0)`. |
|
| torch | `torch.zeros(16,3,320,640)` | `torch.Tensor` | BCHW format with RGB channels `float32 (0.0-1.0)`. |
|
||||||
| CSV | `'sources.csv'` | `str` or `Path` | CSV file containing paths to images, videos, or directories. |
|
| CSV | `'sources.csv'` | `str` or `Path` | CSV file containing paths to images, videos, or directories. |
|
||||||
|
|
@ -710,16 +710,16 @@ When using YOLO models in a multi-threaded application, it's important to instan
|
||||||
from ultralytics import YOLO
|
from ultralytics import YOLO
|
||||||
|
|
||||||
|
|
||||||
def thread_safe_predict(image_path):
|
def thread_safe_predict(model, image_path):
|
||||||
"""Performs thread-safe prediction on an image using a locally instantiated YOLO model."""
|
"""Performs thread-safe prediction on an image using a locally instantiated YOLO model."""
|
||||||
local_model = YOLO("yolov8n.pt")
|
model = YOLO(model)
|
||||||
results = local_model.predict(image_path)
|
results = model.predict(image_path)
|
||||||
# Process results
|
# Process results
|
||||||
|
|
||||||
|
|
||||||
# Starting threads that each have their own model instance
|
# Starting threads that each have their own model instance
|
||||||
Thread(target=thread_safe_predict, args=("image1.jpg",)).start()
|
Thread(target=thread_safe_predict, args=("yolov8n.pt", "image1.jpg")).start()
|
||||||
Thread(target=thread_safe_predict, args=("image2.jpg",)).start()
|
Thread(target=thread_safe_predict, args=("yolov8n.pt", "image2.jpg")).start()
|
||||||
```
|
```
|
||||||
|
|
||||||
For an in-depth look at thread-safe inference with YOLO models and step-by-step instructions, please refer to our [YOLO Thread-Safe Inference Guide](../guides/yolo-thread-safe-inference.md). This guide will provide you with all the necessary information to avoid common pitfalls and ensure that your multi-threaded inference runs smoothly.
|
For an in-depth look at thread-safe inference with YOLO models and step-by-step instructions, please refer to our [YOLO Thread-Safe Inference Guide](../guides/yolo-thread-safe-inference.md). This guide will provide you with all the necessary information to avoid common pitfalls and ensure that your multi-threaded inference runs smoothly.
|
||||||
|
|
|
||||||
|
|
@ -290,63 +290,50 @@ Finally, after all threads have completed their task, the windows displaying the
|
||||||
|
|
||||||
from ultralytics import YOLO
|
from ultralytics import YOLO
|
||||||
|
|
||||||
|
# Define model names and video sources
|
||||||
|
MODEL_NAMES = ["yolov8n.pt", "yolov8n-seg.pt"]
|
||||||
|
SOURCES = ["path/to/video1.mp4", 0] # local video, 0 for webcam
|
||||||
|
|
||||||
def run_tracker_in_thread(filename, model, file_index):
|
|
||||||
|
def run_tracker_in_thread(model_name, filename, index):
|
||||||
"""
|
"""
|
||||||
Runs a video file or webcam stream concurrently with the YOLOv8 model using threading.
|
Runs a video file or webcam stream concurrently with the YOLOv8 model using threading. This function captures video
|
||||||
|
frames from a given file or camera source and utilizes the YOLOv8 model for object tracking. The function runs in
|
||||||
This function captures video frames from a given file or camera source and utilizes the YOLOv8 model for object
|
its own thread for concurrent processing.
|
||||||
tracking. The function runs in its own thread for concurrent processing.
|
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
filename (str): The path to the video file or the identifier for the webcam/external camera source.
|
filename (str): The path to the video file or the identifier for the webcam/external camera source.
|
||||||
model (obj): The YOLOv8 model object.
|
model (obj): The YOLOv8 model object.
|
||||||
file_index (int): An index to uniquely identify the file being processed, used for display purposes.
|
index (int): An index to uniquely identify the file being processed, used for display purposes.
|
||||||
|
|
||||||
Note:
|
|
||||||
Press 'q' to quit the video display window.
|
|
||||||
"""
|
"""
|
||||||
video = cv2.VideoCapture(filename) # Read the video file
|
model = YOLO(model_name)
|
||||||
|
video = cv2.VideoCapture(filename)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
ret, frame = video.read() # Read the video frames
|
ret, frame = video.read()
|
||||||
|
|
||||||
# Exit the loop if no more frames in either video
|
|
||||||
if not ret:
|
if not ret:
|
||||||
break
|
break
|
||||||
|
|
||||||
# Track objects in frames if available
|
|
||||||
results = model.track(frame, persist=True)
|
results = model.track(frame, persist=True)
|
||||||
res_plotted = results[0].plot()
|
res_plotted = results[0].plot()
|
||||||
cv2.imshow(f"Tracking_Stream_{file_index}", res_plotted)
|
cv2.imshow(f"Tracking_Stream_{index}", res_plotted)
|
||||||
|
|
||||||
key = cv2.waitKey(1)
|
if cv2.waitKey(1) == ord("q"):
|
||||||
if key == ord("q"):
|
|
||||||
break
|
break
|
||||||
|
|
||||||
# Release video sources
|
|
||||||
video.release()
|
video.release()
|
||||||
|
|
||||||
|
|
||||||
# Load the models
|
# Create and start tracker threads using a for loop
|
||||||
model1 = YOLO("yolov8n.pt")
|
tracker_threads = []
|
||||||
model2 = YOLO("yolov8n-seg.pt")
|
for i, (video_file, model_name) in enumerate(zip(SOURCES, MODEL_NAMES), start=1):
|
||||||
|
thread = threading.Thread(target=run_tracker_in_thread, args=(model_name, video_file, i), daemon=True)
|
||||||
|
tracker_threads.append(thread)
|
||||||
|
thread.start()
|
||||||
|
|
||||||
# Define the video files for the trackers
|
# Wait for all tracker threads to finish
|
||||||
video_file1 = "path/to/video1.mp4" # Path to video file, 0 for webcam
|
for thread in tracker_threads:
|
||||||
video_file2 = 0 # Path to video file, 0 for webcam, 1 for external camera
|
thread.join()
|
||||||
|
|
||||||
# Create the tracker threads
|
|
||||||
tracker_thread1 = threading.Thread(target=run_tracker_in_thread, args=(video_file1, model1, 1), daemon=True)
|
|
||||||
tracker_thread2 = threading.Thread(target=run_tracker_in_thread, args=(video_file2, model2, 2), daemon=True)
|
|
||||||
|
|
||||||
# Start the tracker threads
|
|
||||||
tracker_thread1.start()
|
|
||||||
tracker_thread2.start()
|
|
||||||
|
|
||||||
# Wait for the tracker threads to finish
|
|
||||||
tracker_thread1.join()
|
|
||||||
tracker_thread2.join()
|
|
||||||
|
|
||||||
# Clean up and close windows
|
# Clean up and close windows
|
||||||
cv2.destroyAllWindows()
|
cv2.destroyAllWindows()
|
||||||
|
|
|
||||||
|
|
@ -44,8 +44,8 @@ Harness `detect.py` for versatile inference on various sources. It automatically
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python detect.py --weights yolov5s.pt --source 0 # webcam
|
python detect.py --weights yolov5s.pt --source 0 # webcam
|
||||||
img.jpg # image
|
image.jpg # image
|
||||||
vid.mp4 # video
|
video.mp4 # video
|
||||||
screen # screenshot
|
screen # screenshot
|
||||||
path/ # directory
|
path/ # directory
|
||||||
list.txt # list of images
|
list.txt # list of images
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue