Add https://youtu.be/LGGxqLZtvuw to docs & bbox dimension retrieval utilities. (#9679)

Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Muhammad Rizwan Munawar 2024-04-05 18:25:58 +05:00 committed by GitHub
parent e597bed2f2
commit 99c61d6f7b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 65 additions and 11 deletions

View file

@ -8,6 +8,18 @@ keywords: Ultralytics, YOLOv8, Object Detection, Pose Estimation, PushUps, PullU
Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) enhances exercise assessment by accurately tracking key body landmarks and joints in real-time. This technology provides instant feedback on exercise form, tracks workout routines, and measures performance metrics, optimizing training sessions for users and trainers alike. Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics/) enhances exercise assessment by accurately tracking key body landmarks and joints in real-time. This technology provides instant feedback on exercise form, tracks workout routines, and measures performance metrics, optimizing training sessions for users and trainers alike.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/LGGxqLZtvuw"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Workouts Monitoring using Ultralytics YOLOv8 | Pushups, Pullups, Ab Workouts
</p>
## Advantages of Workouts Monitoring? ## Advantages of Workouts Monitoring?
- **Optimized Performance:** Tailoring workouts based on monitoring data for better results. - **Optimized Performance:** Tailoring workouts based on monitoring data for better results.

View file

@ -59,6 +59,28 @@ convert_coco(#(1)!
For additional information about the `convert_coco` function, [visit the reference page](../reference/data/converter.md#ultralytics.data.converter.convert_coco) For additional information about the `convert_coco` function, [visit the reference page](../reference/data/converter.md#ultralytics.data.converter.convert_coco)
### Get Bounding Box Dimensions
```{.py .annotate }
from ultralytics.utils.plotting import Annotator
from ultralytics import YOLO
import cv2
model = YOLO('yolov8n.pt') # Load pretrain or fine-tune model
# Process the image
source = cv2.imread('path/to/image.jpg')
results = model(source)
# Extract results
annotator = Annotator(source, example=model.names)
for box in results[0].boxes.xyxy.cpu():
width, height, area = annotator.get_bbox_dimension(box)
print("Bounding Box Width {}, Height {}, Area {}".format(
width.item(), height.item(), area.item()))
```
### Convert Bounding Boxes to Segments ### Convert Bounding Boxes to Segments
With existing `x y w h` bounding box data, convert to segments using the `yolo_bbox2segment` function. The files for images and annotations need to be organized like this: With existing `x y w h` bounding box data, convert to segments using the `yolo_bbox2segment` function. The files for images and annotations need to be organized like this:

View file

@ -339,6 +339,21 @@ class Annotator:
"""Save the annotated image to 'filename'.""" """Save the annotated image to 'filename'."""
cv2.imwrite(filename, np.asarray(self.im)) cv2.imwrite(filename, np.asarray(self.im))
def get_bbox_dimension(self, bbox=None):
"""
Calculate the area of a bounding box.
Args:
bbox (tuple): Bounding box coordinates in the format (x_min, y_min, x_max, y_max).
Returns:
angle (degree): Degree value of angle between three points
"""
x_min, y_min, x_max, y_max = bbox
width = x_max - x_min
height = y_max - y_min
return width, height, width * height
def draw_region(self, reg_pts=None, color=(0, 255, 0), thickness=5): def draw_region(self, reg_pts=None, color=(0, 255, 0), thickness=5):
""" """
Draw region line. Draw region line.
@ -364,13 +379,22 @@ class Annotator:
cv2.circle(self.im, (int(track[-1][0]), int(track[-1][1])), track_thickness * 2, color, -1) cv2.circle(self.im, (int(track[-1][0]), int(track[-1][1])), track_thickness * 2, color, -1)
def queue_counts_display(self, label, points=None, region_color=(255, 255, 255), txt_color=(0, 0, 0), fontsize=0.7): def queue_counts_display(self, label, points=None, region_color=(255, 255, 255), txt_color=(0, 0, 0), fontsize=0.7):
"""Displays queue counts on an image centered at the points with customizable font size and colors.""" """
Displays queue counts on an image centered at the points with customizable font size and colors.
Args:
label (str): queue counts label
points (tuple): region points for center point calculation to display text
region_color (RGB): queue region color
txt_color (RGB): text display color
fontsize (float): text fontsize
"""
x_values = [point[0] for point in points] x_values = [point[0] for point in points]
y_values = [point[1] for point in points] y_values = [point[1] for point in points]
center_x = sum(x_values) // len(points) center_x = sum(x_values) // len(points)
center_y = sum(y_values) // len(points) center_y = sum(y_values) // len(points)
text_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, fontScale=fontsize, thickness=self.tf)[0] text_size = cv2.getTextSize(label, 0, fontScale=fontsize, thickness=self.tf)[0]
text_width = text_size[0] text_width = text_size[0]
text_height = text_size[1] text_height = text_size[1]
@ -388,7 +412,7 @@ class Annotator:
self.im, self.im,
label, label,
(text_x, text_y), (text_x, text_y),
cv2.FONT_HERSHEY_SIMPLEX, 0,
fontScale=fontsize, fontScale=fontsize,
color=txt_color, color=txt_color,
thickness=self.tf, thickness=self.tf,
@ -595,30 +619,26 @@ class Annotator:
line_color (RGB): Distance line color. line_color (RGB): Distance line color.
centroid_color (RGB): Bounding box centroid color. centroid_color (RGB): Bounding box centroid color.
""" """
(text_width_m, text_height_m), _ = cv2.getTextSize( (text_width_m, text_height_m), _ = cv2.getTextSize(f"Distance M: {distance_m:.2f}m", 0, 0.8, 2)
f"Distance M: {distance_m:.2f}m", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2
)
cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 10, 25 + text_height_m + 20), (255, 255, 255), -1) cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 10, 25 + text_height_m + 20), (255, 255, 255), -1)
cv2.putText( cv2.putText(
self.im, self.im,
f"Distance M: {distance_m:.2f}m", f"Distance M: {distance_m:.2f}m",
(20, 50), (20, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0,
0.8, 0.8,
(0, 0, 0), (0, 0, 0),
2, 2,
cv2.LINE_AA, cv2.LINE_AA,
) )
(text_width_mm, text_height_mm), _ = cv2.getTextSize( (text_width_mm, text_height_mm), _ = cv2.getTextSize(f"Distance MM: {distance_mm:.2f}mm", 0, 0.8, 2)
f"Distance MM: {distance_mm:.2f}mm", cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2
)
cv2.rectangle(self.im, (15, 75), (15 + text_width_mm + 10, 75 + text_height_mm + 20), (255, 255, 255), -1) cv2.rectangle(self.im, (15, 75), (15 + text_width_mm + 10, 75 + text_height_mm + 20), (255, 255, 255), -1)
cv2.putText( cv2.putText(
self.im, self.im,
f"Distance MM: {distance_mm:.2f}mm", f"Distance MM: {distance_mm:.2f}mm",
(20, 100), (20, 100),
cv2.FONT_HERSHEY_SIMPLEX, 0,
0.8, 0.8,
(0, 0, 0), (0, 0, 0),
2, 2,