Add real-world projects in Ultralytics + guides in Docs (#6695)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
9618025416
commit
8c4094e7d9
13 changed files with 869 additions and 23 deletions
130
ultralytics/solutions/ai_gym.py
Normal file
130
ultralytics/solutions/ai_gym.py
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
|
||||
import cv2
|
||||
|
||||
from ultralytics.utils.plotting import Annotator
|
||||
|
||||
|
||||
class AIGym:
|
||||
"""A class to manage the gym steps of people in a real-time video stream based on their poses."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initializes the AIGym with default values for Visual and Image parameters."""
|
||||
|
||||
# Image and line thickness
|
||||
self.im0 = None
|
||||
self.tf = None
|
||||
|
||||
# Keypoints and count information
|
||||
self.keypoints = None
|
||||
self.poseup_angle = None
|
||||
self.posedown_angle = None
|
||||
self.threshold = 0.001
|
||||
|
||||
# Store stage, count and angle information
|
||||
self.angle = None
|
||||
self.count = None
|
||||
self.stage = None
|
||||
self.pose_type = 'pushup'
|
||||
self.kpts_to_check = None
|
||||
|
||||
# Visual Information
|
||||
self.view_img = False
|
||||
self.annotator = None
|
||||
|
||||
def set_args(self,
|
||||
kpts_to_check,
|
||||
line_thickness=2,
|
||||
view_img=False,
|
||||
pose_up_angle=145.0,
|
||||
pose_down_angle=90.0,
|
||||
pose_type='pullup'):
|
||||
"""
|
||||
Configures the AIGym line_thickness, save image and view image parameters
|
||||
Args:
|
||||
kpts_to_check (list): 3 keypoints for counting
|
||||
line_thickness (int): Line thickness for bounding boxes.
|
||||
view_img (bool): display the im0
|
||||
pose_up_angle (float): Angle to set pose position up
|
||||
pose_down_angle (float): Angle to set pose position down
|
||||
pose_type: "pushup", "pullup" or "abworkout"
|
||||
"""
|
||||
self.kpts_to_check = kpts_to_check
|
||||
self.tf = line_thickness
|
||||
self.view_img = view_img
|
||||
self.poseup_angle = pose_up_angle
|
||||
self.posedown_angle = pose_down_angle
|
||||
self.pose_type = pose_type
|
||||
|
||||
def start_counting(self, im0, results, frame_count):
|
||||
"""
|
||||
function used to count the gym steps
|
||||
Args:
|
||||
im0 (ndarray): Current frame from the video stream.
|
||||
results: Pose estimation data
|
||||
frame_count: store current frame count
|
||||
"""
|
||||
self.im0 = im0
|
||||
if frame_count == 1:
|
||||
self.count = [0] * len(results[0])
|
||||
self.angle = [0] * len(results[0])
|
||||
self.stage = ['-' for _ in results[0]]
|
||||
self.keypoints = results[0].keypoints.data
|
||||
self.annotator = Annotator(im0, line_width=2)
|
||||
|
||||
for ind, k in enumerate(reversed(self.keypoints)):
|
||||
if self.pose_type == 'pushup' or self.pose_type == 'pullup':
|
||||
self.angle[ind] = self.annotator.estimate_pose_angle(k[int(self.kpts_to_check[0])].cpu(),
|
||||
k[int(self.kpts_to_check[1])].cpu(),
|
||||
k[int(self.kpts_to_check[2])].cpu())
|
||||
self.im0 = self.annotator.draw_specific_points(k, self.kpts_to_check, shape=(640, 640), radius=10)
|
||||
|
||||
if self.pose_type == 'abworkout':
|
||||
self.angle[ind] = self.annotator.estimate_pose_angle(k[int(self.kpts_to_check[0])].cpu(),
|
||||
k[int(self.kpts_to_check[1])].cpu(),
|
||||
k[int(self.kpts_to_check[2])].cpu())
|
||||
self.im0 = self.annotator.draw_specific_points(k, self.kpts_to_check, shape=(640, 640), radius=10)
|
||||
if self.angle[ind] > self.poseup_angle:
|
||||
self.stage[ind] = 'down'
|
||||
if self.angle[ind] < self.posedown_angle and self.stage[ind] == 'down':
|
||||
self.stage[ind] = 'up'
|
||||
self.count[ind] += 1
|
||||
self.annotator.plot_angle_and_count_and_stage(angle_text=self.angle[ind],
|
||||
count_text=self.count[ind],
|
||||
stage_text=self.stage[ind],
|
||||
center_kpt=k[int(self.kpts_to_check[1])],
|
||||
line_thickness=self.tf)
|
||||
|
||||
if self.pose_type == 'pushup':
|
||||
if self.angle[ind] > self.poseup_angle:
|
||||
self.stage[ind] = 'up'
|
||||
if self.angle[ind] < self.posedown_angle and self.stage[ind] == 'up':
|
||||
self.stage[ind] = 'down'
|
||||
self.count[ind] += 1
|
||||
self.annotator.plot_angle_and_count_and_stage(angle_text=self.angle[ind],
|
||||
count_text=self.count[ind],
|
||||
stage_text=self.stage[ind],
|
||||
center_kpt=k[int(self.kpts_to_check[1])],
|
||||
line_thickness=self.tf)
|
||||
if self.pose_type == 'pullup':
|
||||
if self.angle[ind] > self.poseup_angle:
|
||||
self.stage[ind] = 'down'
|
||||
if self.angle[ind] < self.posedown_angle and self.stage[ind] == 'down':
|
||||
self.stage[ind] = 'up'
|
||||
self.count[ind] += 1
|
||||
self.annotator.plot_angle_and_count_and_stage(angle_text=self.angle[ind],
|
||||
count_text=self.count[ind],
|
||||
stage_text=self.stage[ind],
|
||||
center_kpt=k[int(self.kpts_to_check[1])],
|
||||
line_thickness=self.tf)
|
||||
|
||||
self.annotator.kpts(k, shape=(640, 640), radius=1, kpt_line=True)
|
||||
|
||||
if self.view_img:
|
||||
cv2.imshow('Ultralytics YOLOv8 AI GYM', self.im0)
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
return
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
AIGym()
|
||||
165
ultralytics/solutions/object_counter.py
Normal file
165
ultralytics/solutions/object_counter.py
Normal file
|
|
@ -0,0 +1,165 @@
|
|||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
import cv2
|
||||
|
||||
from ultralytics.utils.checks import check_requirements
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
check_requirements('shapely>=2.0.0')
|
||||
|
||||
from shapely.geometry import Polygon
|
||||
from shapely.geometry.point import Point
|
||||
|
||||
|
||||
class ObjectCounter:
|
||||
"""A class to manage the counting of objects in a real-time video stream based on their tracks."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initializes the Counter with default values for various tracking and counting parameters."""
|
||||
|
||||
# Mouse events
|
||||
self.is_drawing = False
|
||||
self.selected_point = None
|
||||
|
||||
# Region Information
|
||||
self.reg_pts = None
|
||||
self.counting_region = None
|
||||
self.region_color = (255, 255, 255)
|
||||
|
||||
# Image and annotation Information
|
||||
self.im0 = None
|
||||
self.tf = None
|
||||
self.view_img = False
|
||||
|
||||
self.names = None # Classes names
|
||||
self.annotator = None # Annotator
|
||||
|
||||
# Object counting Information
|
||||
self.in_counts = 0
|
||||
self.out_counts = 0
|
||||
self.counting_list = []
|
||||
|
||||
# Tracks info
|
||||
self.track_history = defaultdict(list)
|
||||
self.track_thickness = 2
|
||||
self.draw_tracks = False
|
||||
|
||||
def set_args(self,
|
||||
classes_names,
|
||||
reg_pts,
|
||||
region_color=None,
|
||||
line_thickness=2,
|
||||
track_thickness=2,
|
||||
view_img=False,
|
||||
draw_tracks=False):
|
||||
"""
|
||||
Configures the Counter's image, bounding box line thickness, and counting region points.
|
||||
|
||||
Args:
|
||||
line_thickness (int): Line thickness for bounding boxes.
|
||||
view_img (bool): Flag to control whether to display the video stream.
|
||||
reg_pts (list): Initial list of points defining the counting region.
|
||||
classes_names (dict): Classes names
|
||||
region_color (tuple): color for region line
|
||||
track_thickness (int): Track thickness
|
||||
draw_tracks (Bool): draw tracks
|
||||
"""
|
||||
self.tf = line_thickness
|
||||
self.view_img = view_img
|
||||
self.track_thickness = track_thickness
|
||||
self.draw_tracks = draw_tracks
|
||||
self.reg_pts = reg_pts
|
||||
self.counting_region = Polygon(self.reg_pts)
|
||||
self.names = classes_names
|
||||
self.region_color = region_color if region_color else self.region_color
|
||||
|
||||
def mouse_event_for_region(self, event, x, y, flags, params):
|
||||
"""
|
||||
This function is designed to move region with mouse events in a real-time video stream.
|
||||
|
||||
Args:
|
||||
event (int): The type of mouse event (e.g., cv2.EVENT_MOUSEMOVE, cv2.EVENT_LBUTTONDOWN, etc.).
|
||||
x (int): The x-coordinate of the mouse pointer.
|
||||
y (int): The y-coordinate of the mouse pointer.
|
||||
flags (int): Any flags associated with the event (e.g., cv2.EVENT_FLAG_CTRLKEY,
|
||||
cv2.EVENT_FLAG_SHIFTKEY, etc.).
|
||||
params (dict): Additional parameters you may want to pass to the function.
|
||||
"""
|
||||
# global is_drawing, selected_point
|
||||
if event == cv2.EVENT_LBUTTONDOWN:
|
||||
for i, point in enumerate(self.reg_pts):
|
||||
if isinstance(point, (tuple, list)) and len(point) >= 2:
|
||||
if abs(x - point[0]) < 10 and abs(y - point[1]) < 10:
|
||||
self.selected_point = i
|
||||
self.is_drawing = True
|
||||
break
|
||||
|
||||
elif event == cv2.EVENT_MOUSEMOVE:
|
||||
if self.is_drawing and self.selected_point is not None:
|
||||
self.reg_pts[self.selected_point] = (x, y)
|
||||
self.counting_region = Polygon(self.reg_pts)
|
||||
|
||||
elif event == cv2.EVENT_LBUTTONUP:
|
||||
self.is_drawing = False
|
||||
self.selected_point = None
|
||||
|
||||
def extract_and_process_tracks(self, tracks):
|
||||
boxes = tracks[0].boxes.xyxy.cpu()
|
||||
clss = tracks[0].boxes.cls.cpu().tolist()
|
||||
track_ids = tracks[0].boxes.id.int().cpu().tolist()
|
||||
|
||||
self.annotator = Annotator(self.im0, self.tf, self.names)
|
||||
self.annotator.draw_region(reg_pts=self.reg_pts, color=(0, 255, 0))
|
||||
|
||||
for box, track_id, cls in zip(boxes, track_ids, clss):
|
||||
self.annotator.box_label(box, label=self.names[cls], color=colors(int(cls), True)) # Draw bounding box
|
||||
|
||||
# Draw Tracks
|
||||
track_line = self.track_history[track_id]
|
||||
track_line.append((float((box[0] + box[2]) / 2), float((box[1] + box[3]) / 2)))
|
||||
track_line.pop(0) if len(track_line) > 30 else None
|
||||
|
||||
if self.draw_tracks:
|
||||
self.annotator.draw_centroid_and_tracks(track_line,
|
||||
color=(0, 255, 0),
|
||||
track_thickness=self.track_thickness)
|
||||
|
||||
# Count objects
|
||||
if self.counting_region.contains(Point(track_line[-1])):
|
||||
if track_id not in self.counting_list:
|
||||
self.counting_list.append(track_id)
|
||||
if box[0] < self.counting_region.centroid.x:
|
||||
self.out_counts += 1
|
||||
else:
|
||||
self.in_counts += 1
|
||||
|
||||
if self.view_img:
|
||||
incount_label = 'InCount : ' + f'{self.in_counts}'
|
||||
outcount_label = 'OutCount : ' + f'{self.out_counts}'
|
||||
self.annotator.count_labels(in_count=incount_label, out_count=outcount_label)
|
||||
cv2.namedWindow('Ultralytics YOLOv8 Object Counter')
|
||||
cv2.setMouseCallback('Ultralytics YOLOv8 Object Counter', self.mouse_event_for_region,
|
||||
{'region_points': self.reg_pts})
|
||||
cv2.imshow('Ultralytics YOLOv8 Object Counter', self.im0)
|
||||
# Break Window
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
return
|
||||
|
||||
def start_counting(self, im0, tracks):
|
||||
"""
|
||||
Main function to start the object counting process.
|
||||
|
||||
Args:
|
||||
im0 (ndarray): Current frame from the video stream.
|
||||
tracks (list): List of tracks obtained from the object tracking process.
|
||||
"""
|
||||
self.im0 = im0 # store image
|
||||
if tracks[0].boxes.id is None:
|
||||
return
|
||||
self.extract_and_process_tracks(tracks)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
ObjectCounter()
|
||||
Loading…
Add table
Add a link
Reference in a new issue