Simplify Solutions Docs code examples (#17493)
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
7da74600ad
commit
cece2ee2cf
8 changed files with 74 additions and 501 deletions
|
|
@ -45,6 +45,12 @@ This guide provides a comprehensive overview of three fundamental types of [data
|
||||||
|
|
||||||
# generate the pie chart
|
# generate the pie chart
|
||||||
yolo solutions analytics analytics_type="pie" show=True
|
yolo solutions analytics analytics_type="pie" show=True
|
||||||
|
|
||||||
|
# generate the bar plots
|
||||||
|
yolo solutions analytics analytics_type="bar" show=True
|
||||||
|
|
||||||
|
# generate the area plots
|
||||||
|
yolo solutions analytics analytics_type="area" show=True
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
@ -56,9 +62,9 @@ This guide provides a comprehensive overview of three fundamental types of [data
|
||||||
|
|
||||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
||||||
assert cap.isOpened(), "Error reading video file"
|
assert cap.isOpened(), "Error reading video file"
|
||||||
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||||
|
|
||||||
|
# Video writer
|
||||||
out = cv2.VideoWriter(
|
out = cv2.VideoWriter(
|
||||||
"ultralytics_analytics.avi",
|
"ultralytics_analytics.avi",
|
||||||
cv2.VideoWriter_fourcc(*"MJPG"),
|
cv2.VideoWriter_fourcc(*"MJPG"),
|
||||||
|
|
@ -66,128 +72,15 @@ This guide provides a comprehensive overview of three fundamental types of [data
|
||||||
(1920, 1080), # This is fixed
|
(1920, 1080), # This is fixed
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Init analytics
|
||||||
analytics = solutions.Analytics(
|
analytics = solutions.Analytics(
|
||||||
analytics_type="line",
|
show=True, # Display the output
|
||||||
show=True,
|
analytics_type="line", # Pass the analytics type, could be "pie", "bar" or "area".
|
||||||
)
|
model="yolo11n.pt", # Path to the YOLO11 model file
|
||||||
|
# classes=[0, 2], # If you want to count specific classes i.e person and car with COCO pretrained model.
|
||||||
frame_count = 0
|
|
||||||
while cap.isOpened():
|
|
||||||
success, im0 = cap.read()
|
|
||||||
if success:
|
|
||||||
frame_count += 1
|
|
||||||
im0 = analytics.process_data(im0, frame_count) # update analytics graph every frame
|
|
||||||
out.write(im0) # write the video file
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
out.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Pie Chart"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
out = cv2.VideoWriter(
|
|
||||||
"ultralytics_analytics.avi",
|
|
||||||
cv2.VideoWriter_fourcc(*"MJPG"),
|
|
||||||
fps,
|
|
||||||
(1920, 1080), # This is fixed
|
|
||||||
)
|
|
||||||
|
|
||||||
analytics = solutions.Analytics(
|
|
||||||
analytics_type="pie",
|
|
||||||
show=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
frame_count = 0
|
|
||||||
while cap.isOpened():
|
|
||||||
success, im0 = cap.read()
|
|
||||||
if success:
|
|
||||||
frame_count += 1
|
|
||||||
im0 = analytics.process_data(im0, frame_count) # update analytics graph every frame
|
|
||||||
out.write(im0) # write the video file
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
out.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Bar Plot"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
out = cv2.VideoWriter(
|
|
||||||
"ultralytics_analytics.avi",
|
|
||||||
cv2.VideoWriter_fourcc(*"MJPG"),
|
|
||||||
fps,
|
|
||||||
(1920, 1080), # This is fixed
|
|
||||||
)
|
|
||||||
|
|
||||||
analytics = solutions.Analytics(
|
|
||||||
analytics_type="bar",
|
|
||||||
show=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
frame_count = 0
|
|
||||||
while cap.isOpened():
|
|
||||||
success, im0 = cap.read()
|
|
||||||
if success:
|
|
||||||
frame_count += 1
|
|
||||||
im0 = analytics.process_data(im0, frame_count) # update analytics graph every frame
|
|
||||||
out.write(im0) # write the video file
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
out.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Area chart"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
out = cv2.VideoWriter(
|
|
||||||
"ultralytics_analytics.avi",
|
|
||||||
cv2.VideoWriter_fourcc(*"MJPG"),
|
|
||||||
fps,
|
|
||||||
(1920, 1080), # This is fixed
|
|
||||||
)
|
|
||||||
|
|
||||||
analytics = solutions.Analytics(
|
|
||||||
analytics_type="area",
|
|
||||||
show=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Process video
|
||||||
frame_count = 0
|
frame_count = 0
|
||||||
while cap.isOpened():
|
while cap.isOpened():
|
||||||
success, im0 = cap.read()
|
success, im0 = cap.read()
|
||||||
|
|
|
||||||
|
|
@ -55,6 +55,7 @@ Measuring the gap between two objects is known as distance calculation within a
|
||||||
# Init distance-calculation obj
|
# Init distance-calculation obj
|
||||||
distance = solutions.DistanceCalculation(model="yolo11n.pt", show=True)
|
distance = solutions.DistanceCalculation(model="yolo11n.pt", show=True)
|
||||||
|
|
||||||
|
# Process video
|
||||||
while cap.isOpened():
|
while cap.isOpened():
|
||||||
success, im0 = cap.read()
|
success, im0 = cap.read()
|
||||||
if not success:
|
if not success:
|
||||||
|
|
|
||||||
|
|
@ -47,6 +47,9 @@ A heatmap generated with [Ultralytics YOLO11](https://github.com/ultralytics/ult
|
||||||
|
|
||||||
# Pass a custom colormap
|
# Pass a custom colormap
|
||||||
yolo solutions heatmap colormap=cv2.COLORMAP_INFERNO
|
yolo solutions heatmap colormap=cv2.COLORMAP_INFERNO
|
||||||
|
|
||||||
|
# Heatmaps + object counting
|
||||||
|
yolo solutions heatmap region=[(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
@ -63,161 +66,24 @@ A heatmap generated with [Ultralytics YOLO11](https://github.com/ultralytics/ult
|
||||||
# Video writer
|
# Video writer
|
||||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||||
|
|
||||||
# Init heatmap
|
# In case you want to apply object counting + heatmaps, you can pass region points.
|
||||||
heatmap = solutions.Heatmap(
|
# region_points = [(20, 400), (1080, 404)] # Define line points
|
||||||
show=True,
|
# region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)] # Define region points
|
||||||
model="yolo11n.pt",
|
# region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360), (20, 400)] # Define polygon points
|
||||||
colormap=cv2.COLORMAP_PARULA,
|
|
||||||
)
|
|
||||||
|
|
||||||
while cap.isOpened():
|
|
||||||
success, im0 = cap.read()
|
|
||||||
if not success:
|
|
||||||
print("Video frame is empty or video processing has been successfully completed.")
|
|
||||||
break
|
|
||||||
im0 = heatmap.generate_heatmap(im0)
|
|
||||||
video_writer.write(im0)
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
video_writer.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Line Counting"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
# Video writer
|
|
||||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
|
||||||
|
|
||||||
# line for object counting
|
|
||||||
line_points = [(20, 400), (1080, 404)]
|
|
||||||
|
|
||||||
# Init heatmap
|
# Init heatmap
|
||||||
heatmap = solutions.Heatmap(
|
heatmap = solutions.Heatmap(
|
||||||
show=True,
|
show=True, # Display the output
|
||||||
model="yolo11n.pt",
|
model="yolo11n.pt", # Path to the YOLO11 model file
|
||||||
colormap=cv2.COLORMAP_PARULA,
|
colormap=cv2.COLORMAP_PARULA, # Colormap of heatmap
|
||||||
region=line_points,
|
# region=region_points, # If you want to do object counting with heatmaps, you can pass region_points
|
||||||
)
|
# classes=[0, 2], # If you want to generate heatmap for specific classes i.e person and car.
|
||||||
|
# show_in=True, # Display in counts
|
||||||
while cap.isOpened():
|
# show_out=True, # Display out counts
|
||||||
success, im0 = cap.read()
|
# line_width=2, # Adjust the line width for bounding boxes and text display
|
||||||
if not success:
|
|
||||||
print("Video frame is empty or video processing has been successfully completed.")
|
|
||||||
break
|
|
||||||
im0 = heatmap.generate_heatmap(im0)
|
|
||||||
video_writer.write(im0)
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
video_writer.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Polygon Counting"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
# Video writer
|
|
||||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
|
||||||
|
|
||||||
# Define polygon points
|
|
||||||
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360), (20, 400)]
|
|
||||||
|
|
||||||
# Init heatmap
|
|
||||||
heatmap = solutions.Heatmap(
|
|
||||||
show=True,
|
|
||||||
model="yolo11n.pt",
|
|
||||||
colormap=cv2.COLORMAP_PARULA,
|
|
||||||
region=region_points,
|
|
||||||
)
|
|
||||||
|
|
||||||
while cap.isOpened():
|
|
||||||
success, im0 = cap.read()
|
|
||||||
if not success:
|
|
||||||
print("Video frame is empty or video processing has been successfully completed.")
|
|
||||||
break
|
|
||||||
im0 = heatmap.generate_heatmap(im0)
|
|
||||||
video_writer.write(im0)
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
video_writer.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Region Counting"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
# Video writer
|
|
||||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
|
||||||
|
|
||||||
# Define region points
|
|
||||||
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
|
||||||
|
|
||||||
# Init heatmap
|
|
||||||
heatmap = solutions.Heatmap(
|
|
||||||
show=True,
|
|
||||||
model="yolo11n.pt",
|
|
||||||
colormap=cv2.COLORMAP_PARULA,
|
|
||||||
region=region_points,
|
|
||||||
)
|
|
||||||
|
|
||||||
while cap.isOpened():
|
|
||||||
success, im0 = cap.read()
|
|
||||||
if not success:
|
|
||||||
print("Video frame is empty or video processing has been successfully completed.")
|
|
||||||
break
|
|
||||||
im0 = heatmap.generate_heatmap(im0)
|
|
||||||
video_writer.write(im0)
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
video_writer.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Specific Classes"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
# Video writer
|
|
||||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
|
||||||
|
|
||||||
# Init heatmap
|
|
||||||
heatmap = solutions.Heatmap(
|
|
||||||
show=True,
|
|
||||||
model="yolo11n.pt",
|
|
||||||
classes=[0, 2],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Process video
|
||||||
while cap.isOpened():
|
while cap.isOpened():
|
||||||
success, im0 = cap.read()
|
success, im0 = cap.read()
|
||||||
if not success:
|
if not success:
|
||||||
|
|
|
||||||
|
|
@ -73,165 +73,22 @@ Object counting with [Ultralytics YOLO11](https://github.com/ultralytics/ultraly
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||||
|
|
||||||
# Define region points
|
# Define region points
|
||||||
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
# region_points = [(20, 400), (1080, 400)] # For line counting
|
||||||
|
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)] # For rectangle region counting
|
||||||
|
# region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360), (20, 400)] # For polygon region counting
|
||||||
|
|
||||||
# Video writer
|
# Video writer
|
||||||
video_writer = cv2.VideoWriter("object_counting_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
video_writer = cv2.VideoWriter("object_counting_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||||
|
|
||||||
# Init Object Counter
|
# Init Object Counter
|
||||||
counter = solutions.ObjectCounter(
|
counter = solutions.ObjectCounter(
|
||||||
show=True,
|
show=True, # Display the output
|
||||||
region=region_points,
|
region=region_points, # Pass region points
|
||||||
model="yolo11n.pt",
|
model="yolo11n.pt", # model="yolo11n-obb.pt" for object counting using YOLO11 OBB model.
|
||||||
)
|
# classes=[0, 2], # If you want to count specific classes i.e person and car with COCO pretrained model.
|
||||||
|
# show_in=True, # Display in counts
|
||||||
# Process video
|
# show_out=True, # Display out counts
|
||||||
while cap.isOpened():
|
# line_width=2, # Adjust the line width for bounding boxes and text display
|
||||||
success, im0 = cap.read()
|
|
||||||
if not success:
|
|
||||||
print("Video frame is empty or video processing has been successfully completed.")
|
|
||||||
break
|
|
||||||
im0 = counter.count(im0)
|
|
||||||
video_writer.write(im0)
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
video_writer.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "OBB Object Counting"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
# line or region points
|
|
||||||
line_points = [(20, 400), (1080, 400)]
|
|
||||||
|
|
||||||
# Video writer
|
|
||||||
video_writer = cv2.VideoWriter("object_counting_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
|
||||||
|
|
||||||
# Init Object Counter
|
|
||||||
counter = solutions.ObjectCounter(
|
|
||||||
show=True,
|
|
||||||
region=line_points,
|
|
||||||
model="yolo11n-obb.pt",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Process video
|
|
||||||
while cap.isOpened():
|
|
||||||
success, im0 = cap.read()
|
|
||||||
if not success:
|
|
||||||
print("Video frame is empty or video processing has been successfully completed.")
|
|
||||||
break
|
|
||||||
im0 = counter.count(im0)
|
|
||||||
video_writer.write(im0)
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
video_writer.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Count in Polygon"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
# Define region points
|
|
||||||
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360), (20, 400)]
|
|
||||||
|
|
||||||
# Video writer
|
|
||||||
video_writer = cv2.VideoWriter("object_counting_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
|
||||||
|
|
||||||
# Init Object Counter
|
|
||||||
counter = solutions.ObjectCounter(
|
|
||||||
show=True,
|
|
||||||
region=region_points,
|
|
||||||
model="yolo11n.pt",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Process video
|
|
||||||
while cap.isOpened():
|
|
||||||
success, im0 = cap.read()
|
|
||||||
if not success:
|
|
||||||
print("Video frame is empty or video processing has been successfully completed.")
|
|
||||||
break
|
|
||||||
im0 = counter.count(im0)
|
|
||||||
video_writer.write(im0)
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
video_writer.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Count in Line"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
# Define region points
|
|
||||||
line_points = [(20, 400), (1080, 400)]
|
|
||||||
|
|
||||||
# Video writer
|
|
||||||
video_writer = cv2.VideoWriter("object_counting_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
|
||||||
|
|
||||||
# Init Object Counter
|
|
||||||
counter = solutions.ObjectCounter(
|
|
||||||
show=True,
|
|
||||||
region=line_points,
|
|
||||||
model="yolo11n.pt",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Process video
|
|
||||||
while cap.isOpened():
|
|
||||||
success, im0 = cap.read()
|
|
||||||
if not success:
|
|
||||||
print("Video frame is empty or video processing has been successfully completed.")
|
|
||||||
break
|
|
||||||
im0 = counter.count(im0)
|
|
||||||
video_writer.write(im0)
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
video_writer.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Specific Classes"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
# Video writer
|
|
||||||
video_writer = cv2.VideoWriter("object_counting_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
|
||||||
|
|
||||||
# Init Object Counter
|
|
||||||
counter = solutions.ObjectCounter(
|
|
||||||
show=True,
|
|
||||||
model="yolo11n.pt",
|
|
||||||
classes=[0, 1],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process video
|
# Process video
|
||||||
|
|
|
||||||
|
|
@ -60,53 +60,23 @@ Queue management using [Ultralytics YOLO11](https://github.com/ultralytics/ultra
|
||||||
assert cap.isOpened(), "Error reading video file"
|
assert cap.isOpened(), "Error reading video file"
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||||
|
|
||||||
|
# Video writer
|
||||||
video_writer = cv2.VideoWriter("queue_management.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
video_writer = cv2.VideoWriter("queue_management.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||||
|
|
||||||
queue_region = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
# Define queue region points
|
||||||
|
queue_region = [(20, 400), (1080, 404), (1080, 360), (20, 360)] # Define queue region points
|
||||||
|
# queue_region = [(20, 400), (1080, 404), (1080, 360), (20, 360), (20, 400)] # Define queue polygon points
|
||||||
|
|
||||||
|
# Init Queue Manager
|
||||||
queue = solutions.QueueManager(
|
queue = solutions.QueueManager(
|
||||||
model="yolo11n.pt",
|
show=True, # Display the output
|
||||||
region=queue_region,
|
model="yolo11n.pt", # Path to the YOLO11 model file
|
||||||
)
|
region=queue_region, # Pass queue region points
|
||||||
|
# classes=[0, 2], # If you want to count specific classes i.e person and car with COCO pretrained model.
|
||||||
while cap.isOpened():
|
# line_width=2, # Adjust the line width for bounding boxes and text display
|
||||||
success, im0 = cap.read()
|
|
||||||
|
|
||||||
if success:
|
|
||||||
out = queue.process_queue(im0)
|
|
||||||
video_writer.write(im0)
|
|
||||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
|
||||||
break
|
|
||||||
continue
|
|
||||||
|
|
||||||
print("Video frame is empty or video processing has been successfully completed.")
|
|
||||||
break
|
|
||||||
|
|
||||||
cap.release()
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Queue Manager Specific Classes"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
|
||||||
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
video_writer = cv2.VideoWriter("queue_management.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
|
||||||
|
|
||||||
queue_region = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
|
||||||
|
|
||||||
queue = solutions.QueueManager(
|
|
||||||
model="yolo11n.pt",
|
|
||||||
classes=3,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Process video
|
||||||
while cap.isOpened():
|
while cap.isOpened():
|
||||||
success, im0 = cap.read()
|
success, im0 = cap.read()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -61,16 +61,24 @@ keywords: Ultralytics YOLO11, speed estimation, object tracking, computer vision
|
||||||
from ultralytics import solutions
|
from ultralytics import solutions
|
||||||
|
|
||||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
||||||
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
assert cap.isOpened(), "Error reading video file"
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||||
|
|
||||||
|
# Video writer
|
||||||
video_writer = cv2.VideoWriter("speed_management.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
video_writer = cv2.VideoWriter("speed_management.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||||
|
|
||||||
|
# Define speed region points
|
||||||
speed_region = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
speed_region = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
||||||
|
|
||||||
speed = solutions.SpeedEstimator(model="yolo11n.pt", region=speed_region, show=True)
|
speed = solutions.SpeedEstimator(
|
||||||
|
show=True, # Display the output
|
||||||
|
model="yolo11n-pose.pt", # Path to the YOLO11 model file.
|
||||||
|
region=speed_region, # Pass region points
|
||||||
|
# classes=[0, 2], # If you want to estimate speed of specific classes.
|
||||||
|
# line_width=2, # Adjust the line width for bounding boxes and text display
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process video
|
||||||
while cap.isOpened():
|
while cap.isOpened():
|
||||||
success, im0 = cap.read()
|
success, im0 = cap.read()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -40,6 +40,12 @@ Streamlit makes it simple to build and deploy interactive web applications. Comb
|
||||||
|
|
||||||
!!! example "Streamlit Application"
|
!!! example "Streamlit Application"
|
||||||
|
|
||||||
|
=== "CLI"
|
||||||
|
|
||||||
|
```bash
|
||||||
|
yolo streamlit-predict
|
||||||
|
```
|
||||||
|
|
||||||
=== "Python"
|
=== "Python"
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
|
@ -50,12 +56,6 @@ Streamlit makes it simple to build and deploy interactive web applications. Comb
|
||||||
### Make sure to run the file using command `streamlit run <file-name.py>`
|
### Make sure to run the file using command `streamlit run <file-name.py>`
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "CLI"
|
|
||||||
|
|
||||||
```bash
|
|
||||||
yolo streamlit-predict
|
|
||||||
```
|
|
||||||
|
|
||||||
This will launch the Streamlit application in your default web browser. You will see the main title, subtitle, and the sidebar with configuration options. Select your desired YOLO11 model, set the confidence and NMS thresholds, and click the "Start" button to begin the real-time object detection.
|
This will launch the Streamlit application in your default web browser. You will see the main title, subtitle, and the sidebar with configuration options. Select your desired YOLO11 model, set the confidence and NMS thresholds, and click the "Start" button to begin the real-time object detection.
|
||||||
|
|
||||||
You can optionally supply a specific model in Python:
|
You can optionally supply a specific model in Python:
|
||||||
|
|
|
||||||
|
|
@ -60,40 +60,18 @@ Monitoring workouts through pose estimation with [Ultralytics YOLO11](https://gi
|
||||||
assert cap.isOpened(), "Error reading video file"
|
assert cap.isOpened(), "Error reading video file"
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||||
|
|
||||||
gym = solutions.AIGym(
|
# Video writer
|
||||||
model="yolo11n-pose.pt",
|
|
||||||
show=True,
|
|
||||||
kpts=[6, 8, 10],
|
|
||||||
)
|
|
||||||
|
|
||||||
while cap.isOpened():
|
|
||||||
success, im0 = cap.read()
|
|
||||||
if not success:
|
|
||||||
print("Video frame is empty or video processing has been successfully completed.")
|
|
||||||
break
|
|
||||||
im0 = gym.monitor(im0)
|
|
||||||
|
|
||||||
cv2.destroyAllWindows()
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "Workouts Monitoring with Save Output"
|
|
||||||
|
|
||||||
```python
|
|
||||||
import cv2
|
|
||||||
|
|
||||||
from ultralytics import solutions
|
|
||||||
|
|
||||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
|
||||||
assert cap.isOpened(), "Error reading video file"
|
|
||||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
|
||||||
|
|
||||||
video_writer = cv2.VideoWriter("workouts.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
video_writer = cv2.VideoWriter("workouts.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||||
|
|
||||||
|
# Init AIGym
|
||||||
gym = solutions.AIGym(
|
gym = solutions.AIGym(
|
||||||
show=True,
|
show=True, # Display the frame
|
||||||
kpts=[6, 8, 10],
|
kpts=[6, 8, 10], # keypoints index of person for monitoring specific exercise, by default it's for pushup
|
||||||
|
model="yolo11n-pose.pt", # Path to the YOLO11 pose estimation model file
|
||||||
|
# line_width=2, # Adjust the line width for bounding boxes and text display
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Process video
|
||||||
while cap.isOpened():
|
while cap.isOpened():
|
||||||
success, im0 = cap.read()
|
success, im0 = cap.read()
|
||||||
if not success:
|
if not success:
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue