Update notebooks (#17065)
Co-authored-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
b9747791df
commit
71624018e2
2 changed files with 11 additions and 28 deletions
|
|
@ -96,10 +96,7 @@
|
||||||
"source": [
|
"source": [
|
||||||
"import cv2\n",
|
"import cv2\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from ultralytics import YOLO, solutions\n",
|
"from ultralytics import solutions\n",
|
||||||
"\n",
|
|
||||||
"# Load YOLO model\n",
|
|
||||||
"model = YOLO(\"yolo11n.pt\")\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"# Open video file\n",
|
"# Open video file\n",
|
||||||
"cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
|
"cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
|
||||||
|
|
@ -113,10 +110,9 @@
|
||||||
"\n",
|
"\n",
|
||||||
"# Initialize heatmap object\n",
|
"# Initialize heatmap object\n",
|
||||||
"heatmap_obj = solutions.Heatmap(\n",
|
"heatmap_obj = solutions.Heatmap(\n",
|
||||||
" colormap=cv2.COLORMAP_PARULA,\n",
|
" colormap=cv2.COLORMAP_PARULA, # Color of the heatmap\n",
|
||||||
" view_img=True,\n",
|
" show=True, # Display the image during processing\n",
|
||||||
" shape=\"circle\",\n",
|
" model=yolo11n.pt, # Ultralytics YOLO11 model file\n",
|
||||||
" names=model.names,\n",
|
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"while cap.isOpened():\n",
|
"while cap.isOpened():\n",
|
||||||
|
|
@ -125,11 +121,8 @@
|
||||||
" print(\"Video frame is empty or video processing has been successfully completed.\")\n",
|
" print(\"Video frame is empty or video processing has been successfully completed.\")\n",
|
||||||
" break\n",
|
" break\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Perform tracking on the current frame\n",
|
|
||||||
" tracks = model.track(im0, persist=True, show=False)\n",
|
|
||||||
"\n",
|
|
||||||
" # Generate heatmap on the frame\n",
|
" # Generate heatmap on the frame\n",
|
||||||
" im0 = heatmap_obj.generate_heatmap(im0, tracks)\n",
|
" im0 = heatmap_obj.generate_heatmap(im0)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Write the frame to the output video\n",
|
" # Write the frame to the output video\n",
|
||||||
" video_writer.write(im0)\n",
|
" video_writer.write(im0)\n",
|
||||||
|
|
|
||||||
|
|
@ -104,10 +104,7 @@
|
||||||
"source": [
|
"source": [
|
||||||
"import cv2\n",
|
"import cv2\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from ultralytics import YOLO, solutions\n",
|
"from ultralytics import solutions\n",
|
||||||
"\n",
|
|
||||||
"# Load the pre-trained YOLO11 model\n",
|
|
||||||
"model = YOLO(\"yolo11n.pt\")\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"# Open the video file\n",
|
"# Open the video file\n",
|
||||||
"cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
|
"cap = cv2.VideoCapture(\"path/to/video/file.mp4\")\n",
|
||||||
|
|
@ -119,19 +116,15 @@
|
||||||
"# Define points for a line or region of interest in the video frame\n",
|
"# Define points for a line or region of interest in the video frame\n",
|
||||||
"line_points = [(20, 400), (1080, 400)] # Line coordinates\n",
|
"line_points = [(20, 400), (1080, 400)] # Line coordinates\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Specify classes to count, for example: person (0) and car (2)\n",
|
|
||||||
"classes_to_count = [0, 2] # Class IDs for person and car\n",
|
|
||||||
"\n",
|
|
||||||
"# Initialize the video writer to save the output video\n",
|
"# Initialize the video writer to save the output video\n",
|
||||||
"video_writer = cv2.VideoWriter(\"object_counting_output.avi\", cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (w, h))\n",
|
"video_writer = cv2.VideoWriter(\"object_counting_output.avi\", cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (w, h))\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Initialize the Object Counter with visualization options and other parameters\n",
|
"# Initialize the Object Counter with visualization options and other parameters\n",
|
||||||
"counter = solutions.ObjectCounter(\n",
|
"counter = solutions.ObjectCounter(\n",
|
||||||
" view_img=True, # Display the image during processing\n",
|
" show=True, # Display the image during processing\n",
|
||||||
" reg_pts=line_points, # Region of interest points\n",
|
" region=line_points, # Region of interest points\n",
|
||||||
" names=model.names, # Class names from the YOLO model\n",
|
" model=yolo11n.pt, # Ultralytics YOLO11 model file\n",
|
||||||
" draw_tracks=True, # Draw tracking lines for objects\n",
|
" line_width=2, # Thickness of the lines and bounding boxes\n",
|
||||||
" line_thickness=2, # Thickness of the lines drawn\n",
|
|
||||||
")\n",
|
")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Process video frames in a loop\n",
|
"# Process video frames in a loop\n",
|
||||||
|
|
@ -141,11 +134,8 @@
|
||||||
" print(\"Video frame is empty or video processing has been successfully completed.\")\n",
|
" print(\"Video frame is empty or video processing has been successfully completed.\")\n",
|
||||||
" break\n",
|
" break\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Perform object tracking on the current frame, filtering by specified classes\n",
|
|
||||||
" tracks = model.track(im0, persist=True, show=False, classes=classes_to_count)\n",
|
|
||||||
"\n",
|
|
||||||
" # Use the Object Counter to count objects in the frame and get the annotated image\n",
|
" # Use the Object Counter to count objects in the frame and get the annotated image\n",
|
||||||
" im0 = counter.start_counting(im0, tracks)\n",
|
" im0 = counter.count(im0)\n",
|
||||||
"\n",
|
"\n",
|
||||||
" # Write the annotated frame to the output video\n",
|
" # Write the annotated frame to the output video\n",
|
||||||
" video_writer.write(im0)\n",
|
" video_writer.write(im0)\n",
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue