ultralytics 8.3.16 PyTorch 2.5.0 support (#16998)
Signed-off-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: RizwanMunawar <chr043416@gmail.com> Co-authored-by: Muhammad Rizwan Munawar <muhammadrizwanmunawar123@gmail.com>
This commit is contained in:
parent
ef28f1078c
commit
8d7d1fe390
17 changed files with 570 additions and 144 deletions
|
|
@ -4,15 +4,43 @@ from time import time
|
|||
|
||||
import numpy as np
|
||||
|
||||
from ultralytics.solutions.solutions import BaseSolution, LineString
|
||||
from ultralytics.solutions.solutions import BaseSolution
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
|
||||
class SpeedEstimator(BaseSolution):
|
||||
"""A class to estimate the speed of objects in a real-time video stream based on their tracks."""
|
||||
"""
|
||||
A class to estimate the speed of objects in a real-time video stream based on their tracks.
|
||||
|
||||
This class extends the BaseSolution class and provides functionality for estimating object speeds using
|
||||
tracking data in video streams.
|
||||
|
||||
Attributes:
|
||||
spd (Dict[int, float]): Dictionary storing speed data for tracked objects.
|
||||
trkd_ids (List[int]): List of tracked object IDs that have already been speed-estimated.
|
||||
trk_pt (Dict[int, float]): Dictionary storing previous timestamps for tracked objects.
|
||||
trk_pp (Dict[int, Tuple[float, float]]): Dictionary storing previous positions for tracked objects.
|
||||
annotator (Annotator): Annotator object for drawing on images.
|
||||
region (List[Tuple[int, int]]): List of points defining the speed estimation region.
|
||||
track_line (List[Tuple[float, float]]): List of points representing the object's track.
|
||||
r_s (LineString): LineString object representing the speed estimation region.
|
||||
|
||||
Methods:
|
||||
initialize_region: Initializes the speed estimation region.
|
||||
estimate_speed: Estimates the speed of objects based on tracking data.
|
||||
store_tracking_history: Stores the tracking history for an object.
|
||||
extract_tracks: Extracts tracks from the current frame.
|
||||
display_output: Displays the output with annotations.
|
||||
|
||||
Examples:
|
||||
>>> estimator = SpeedEstimator()
|
||||
>>> frame = cv2.imread("frame.jpg")
|
||||
>>> processed_frame = estimator.estimate_speed(frame)
|
||||
>>> cv2.imshow("Speed Estimation", processed_frame)
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Initializes the SpeedEstimator with the given parameters."""
|
||||
"""Initializes the SpeedEstimator object with speed estimation parameters and data structures."""
|
||||
super().__init__(**kwargs)
|
||||
|
||||
self.initialize_region() # Initialize speed region
|
||||
|
|
@ -27,9 +55,15 @@ class SpeedEstimator(BaseSolution):
|
|||
Estimates the speed of objects based on tracking data.
|
||||
|
||||
Args:
|
||||
im0 (ndarray): The input image that will be used for processing
|
||||
Returns
|
||||
im0 (ndarray): The processed image for more usage
|
||||
im0 (np.ndarray): Input image for processing. Shape is typically (H, W, C) for RGB images.
|
||||
|
||||
Returns:
|
||||
(np.ndarray): Processed image with speed estimations and annotations.
|
||||
|
||||
Examples:
|
||||
>>> estimator = SpeedEstimator()
|
||||
>>> image = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
||||
>>> processed_image = estimator.estimate_speed(image)
|
||||
"""
|
||||
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
|
||||
self.extract_tracks(im0) # Extract tracks
|
||||
|
|
@ -56,7 +90,7 @@ class SpeedEstimator(BaseSolution):
|
|||
)
|
||||
|
||||
# Calculate object speed and direction based on region intersection
|
||||
if LineString([self.trk_pp[track_id], self.track_line[-1]]).intersects(self.l_s):
|
||||
if self.LineString([self.trk_pp[track_id], self.track_line[-1]]).intersects(self.r_s):
|
||||
direction = "known"
|
||||
else:
|
||||
direction = "unknown"
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue