Reformat Markdown code blocks (#12795)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
2af71d15a6
commit
fceea033ad
128 changed files with 1067 additions and 1018 deletions
|
|
@ -70,8 +70,8 @@ With Ultralytics installed, you can now start using its robust features for obje
|
|||
```python
|
||||
from ultralytics import YOLO
|
||||
|
||||
model = YOLO('yolov8n.pt') # initialize model
|
||||
results = model('path/to/image.jpg') # perform inference
|
||||
model = YOLO("yolov8n.pt") # initialize model
|
||||
results = model("path/to/image.jpg") # perform inference
|
||||
results[0].show() # display results for the first image
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -82,10 +82,10 @@ To use the Edge TPU, you need to convert your model into a compatible format. It
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a model
|
||||
model = YOLO('path/to/model.pt') # Load an official model or custom model
|
||||
model = YOLO("path/to/model.pt") # Load an official model or custom model
|
||||
|
||||
# Export the model
|
||||
model.export(format='edgetpu')
|
||||
model.export(format="edgetpu")
|
||||
```
|
||||
|
||||
=== "CLI"
|
||||
|
|
@ -108,7 +108,7 @@ After exporting your model, you can run inference with it using the following co
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a model
|
||||
model = YOLO('path/to/edgetpu_model.tflite') # Load an official model or custom model
|
||||
model = YOLO("path/to/edgetpu_model.tflite") # Load an official model or custom model
|
||||
|
||||
# Run Prediction
|
||||
model.predict("path/to/source.png")
|
||||
|
|
|
|||
|
|
@ -42,8 +42,8 @@ Measuring the gap between two objects is known as distance calculation within a
|
|||
=== "Video Stream"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO, solutions
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
names = model.model.names
|
||||
|
|
@ -53,7 +53,7 @@ Measuring the gap between two objects is known as distance calculation within a
|
|||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("distance_calculation.avi", cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
video_writer = cv2.VideoWriter("distance_calculation.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
# Init distance-calculation obj
|
||||
dist_obj = solutions.DistanceCalculation(names=names, view_img=True)
|
||||
|
|
@ -71,7 +71,6 @@ Measuring the gap between two objects is known as distance calculation within a
|
|||
cap.release()
|
||||
video_writer.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
```
|
||||
|
||||
???+ tip "Note"
|
||||
|
|
|
|||
|
|
@ -44,8 +44,8 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
=== "Heatmap"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO, solutions
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
|
|
@ -53,13 +53,15 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
# Init heatmap
|
||||
heatmap_obj = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
classes_names=model.names)
|
||||
heatmap_obj = solutions.Heatmap(
|
||||
colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
classes_names=model.names,
|
||||
)
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
|
|
@ -74,14 +76,13 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
cap.release()
|
||||
video_writer.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
```
|
||||
|
||||
=== "Line Counting"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO, solutions
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
|
|
@ -89,16 +90,18 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
line_points = [(20, 400), (1080, 404)] # line for object counting
|
||||
|
||||
# Init heatmap
|
||||
heatmap_obj = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
count_reg_pts=line_points,
|
||||
classes_names=model.names)
|
||||
heatmap_obj = solutions.Heatmap(
|
||||
colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
count_reg_pts=line_points,
|
||||
classes_names=model.names,
|
||||
)
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
|
|
@ -117,30 +120,29 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
|
||||
=== "Polygon Counting"
|
||||
```python
|
||||
from ultralytics import YOLO, solutions
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
assert cap.isOpened(), "Error reading video file"
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("heatmap_output.avi",
|
||||
cv2.VideoWriter_fourcc(*'mp4v'),
|
||||
fps,
|
||||
(w, h))
|
||||
|
||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
# Define polygon points
|
||||
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360), (20, 400)]
|
||||
|
||||
|
||||
# Init heatmap
|
||||
heatmap_obj = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
count_reg_pts=region_points,
|
||||
classes_names=model.names)
|
||||
|
||||
heatmap_obj = solutions.Heatmap(
|
||||
colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
count_reg_pts=region_points,
|
||||
classes_names=model.names,
|
||||
)
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
if not success:
|
||||
|
|
@ -150,7 +152,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
tracks = model.track(im0, persist=True, show=False)
|
||||
im0 = heatmap_obj.generate_heatmap(im0, tracks)
|
||||
video_writer.write(im0)
|
||||
|
||||
|
||||
cap.release()
|
||||
video_writer.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
|
@ -159,8 +161,8 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
=== "Region Counting"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO, solutions
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
|
|
@ -168,24 +170,26 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
# Define region points
|
||||
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
||||
|
||||
# Init heatmap
|
||||
heatmap_obj = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
count_reg_pts=region_points,
|
||||
classes_names=model.names)
|
||||
heatmap_obj = solutions.Heatmap(
|
||||
colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
count_reg_pts=region_points,
|
||||
classes_names=model.names,
|
||||
)
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
if not success:
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
|
||||
|
||||
tracks = model.track(im0, persist=True, show=False)
|
||||
im0 = heatmap_obj.generate_heatmap(im0, tracks)
|
||||
video_writer.write(im0)
|
||||
|
|
@ -198,19 +202,21 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
=== "Im0"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO, solutions
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8s.pt") # YOLOv8 custom/pretrained model
|
||||
model = YOLO("yolov8s.pt") # YOLOv8 custom/pretrained model
|
||||
|
||||
im0 = cv2.imread("path/to/image.png") # path to image file
|
||||
h, w = im0.shape[:2] # image height and width
|
||||
|
||||
|
||||
# Heatmap Init
|
||||
heatmap_obj = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
classes_names=model.names)
|
||||
heatmap_obj = solutions.Heatmap(
|
||||
colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
classes_names=model.names,
|
||||
)
|
||||
|
||||
results = model.track(im0, persist=True)
|
||||
im0 = heatmap_obj.generate_heatmap(im0, tracks=results)
|
||||
|
|
@ -220,8 +226,8 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
=== "Specific Classes"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO, solutions
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
|
|
@ -229,23 +235,24 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
video_writer = cv2.VideoWriter("heatmap_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
classes_for_heatmap = [0, 2] # classes for heatmap
|
||||
|
||||
# Init heatmap
|
||||
heatmap_obj = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
classes_names=model.names)
|
||||
heatmap_obj = solutions.Heatmap(
|
||||
colormap=cv2.COLORMAP_PARULA,
|
||||
view_img=True,
|
||||
shape="circle",
|
||||
classes_names=model.names,
|
||||
)
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
if not success:
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
tracks = model.track(im0, persist=True, show=False,
|
||||
classes=classes_for_heatmap)
|
||||
tracks = model.track(im0, persist=True, show=False, classes=classes_for_heatmap)
|
||||
|
||||
im0 = heatmap_obj.generate_heatmap(im0, tracks)
|
||||
video_writer.write(im0)
|
||||
|
|
|
|||
|
|
@ -77,10 +77,10 @@ Here's how to use the `model.tune()` method to utilize the `Tuner` class for hyp
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Initialize the YOLO model
|
||||
model = YOLO('yolov8n.pt')
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
||||
# Tune hyperparameters on COCO8 for 30 epochs
|
||||
model.tune(data='coco8.yaml', epochs=30, iterations=300, optimizer='AdamW', plots=False, save=False, val=False)
|
||||
model.tune(data="coco8.yaml", epochs=30, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
|
||||
```
|
||||
|
||||
## Results
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ There are two types of instance segmentation tracking available in the Ultralyti
|
|||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
out = cv2.VideoWriter('instance-segmentation.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (w, h))
|
||||
out = cv2.VideoWriter("instance-segmentation.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h))
|
||||
|
||||
while True:
|
||||
ret, im0 = cap.read()
|
||||
|
|
@ -63,38 +63,35 @@ There are two types of instance segmentation tracking available in the Ultralyti
|
|||
clss = results[0].boxes.cls.cpu().tolist()
|
||||
masks = results[0].masks.xy
|
||||
for mask, cls in zip(masks, clss):
|
||||
annotator.seg_bbox(mask=mask,
|
||||
mask_color=colors(int(cls), True),
|
||||
det_label=names[int(cls)])
|
||||
annotator.seg_bbox(mask=mask, mask_color=colors(int(cls), True), det_label=names[int(cls)])
|
||||
|
||||
out.write(im0)
|
||||
cv2.imshow("instance-segmentation", im0)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
out.release()
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
```
|
||||
|
||||
=== "Instance Segmentation with Object Tracking"
|
||||
|
||||
```python
|
||||
from collections import defaultdict
|
||||
|
||||
import cv2
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
track_history = defaultdict(lambda: [])
|
||||
|
||||
model = YOLO("yolov8n-seg.pt") # segmentation model
|
||||
model = YOLO("yolov8n-seg.pt") # segmentation model
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
out = cv2.VideoWriter('instance-segmentation-object-tracking.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (w, h))
|
||||
out = cv2.VideoWriter("instance-segmentation-object-tracking.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h))
|
||||
|
||||
while True:
|
||||
ret, im0 = cap.read()
|
||||
|
|
@ -111,14 +108,12 @@ There are two types of instance segmentation tracking available in the Ultralyti
|
|||
track_ids = results[0].boxes.id.int().cpu().tolist()
|
||||
|
||||
for mask, track_id in zip(masks, track_ids):
|
||||
annotator.seg_bbox(mask=mask,
|
||||
mask_color=colors(track_id, True),
|
||||
track_label=str(track_id))
|
||||
annotator.seg_bbox(mask=mask, mask_color=colors(track_id, True), track_label=str(track_id))
|
||||
|
||||
out.write(im0)
|
||||
cv2.imshow("instance-segmentation-object-tracking", im0)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
out.release()
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ After performing the [Segment Task](../tasks/segment.md), it's sometimes desirab
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a model
|
||||
model = YOLO('yolov8n-seg.pt')
|
||||
model = YOLO("yolov8n-seg.pt")
|
||||
|
||||
# Run inference
|
||||
results = model.predict()
|
||||
|
|
@ -159,7 +159,6 @@ After performing the [Segment Task](../tasks/segment.md), it's sometimes desirab
|
|||
|
||||
# Isolate object with binary mask
|
||||
isolated = cv2.bitwise_and(mask3ch, img)
|
||||
|
||||
```
|
||||
|
||||
??? question "How does this work?"
|
||||
|
|
@ -209,7 +208,6 @@ After performing the [Segment Task](../tasks/segment.md), it's sometimes desirab
|
|||
```py
|
||||
# Isolate object with transparent background (when saved as PNG)
|
||||
isolated = np.dstack([img, b_mask])
|
||||
|
||||
```
|
||||
|
||||
??? question "How does this work?"
|
||||
|
|
@ -266,7 +264,7 @@ After performing the [Segment Task](../tasks/segment.md), it's sometimes desirab
|
|||
|
||||
```py
|
||||
# Save isolated object to file
|
||||
_ = cv2.imwrite(f'{img_name}_{label}-{ci}.png', iso_crop)
|
||||
_ = cv2.imwrite(f"{img_name}_{label}-{ci}.png", iso_crop)
|
||||
```
|
||||
|
||||
- In this example, the `img_name` is the base-name of the source image file, `label` is the detected class-name, and `ci` is the index of the object detection (in case of multiple instances with the same class name).
|
||||
|
|
|
|||
|
|
@ -62,36 +62,36 @@ Without further ado, let's dive in!
|
|||
```python
|
||||
import datetime
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from collections import Counter
|
||||
from pathlib import Path
|
||||
|
||||
import yaml
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from ultralytics import YOLO
|
||||
import yaml
|
||||
from sklearn.model_selection import KFold
|
||||
from ultralytics import YOLO
|
||||
```
|
||||
|
||||
2. Proceed to retrieve all label files for your dataset.
|
||||
|
||||
```python
|
||||
dataset_path = Path('./Fruit-detection') # replace with 'path/to/dataset' for your custom data
|
||||
labels = sorted(dataset_path.rglob("*labels/*.txt")) # all data in 'labels'
|
||||
dataset_path = Path("./Fruit-detection") # replace with 'path/to/dataset' for your custom data
|
||||
labels = sorted(dataset_path.rglob("*labels/*.txt")) # all data in 'labels'
|
||||
```
|
||||
|
||||
3. Now, read the contents of the dataset YAML file and extract the indices of the class labels.
|
||||
|
||||
```python
|
||||
yaml_file = 'path/to/data.yaml' # your data YAML with data directories and names dictionary
|
||||
with open(yaml_file, 'r', encoding="utf8") as y:
|
||||
classes = yaml.safe_load(y)['names']
|
||||
yaml_file = "path/to/data.yaml" # your data YAML with data directories and names dictionary
|
||||
with open(yaml_file, "r", encoding="utf8") as y:
|
||||
classes = yaml.safe_load(y)["names"]
|
||||
cls_idx = sorted(classes.keys())
|
||||
```
|
||||
|
||||
4. Initialize an empty `pandas` DataFrame.
|
||||
|
||||
```python
|
||||
indx = [l.stem for l in labels] # uses base filename as ID (no extension)
|
||||
indx = [l.stem for l in labels] # uses base filename as ID (no extension)
|
||||
labels_df = pd.DataFrame([], columns=cls_idx, index=indx)
|
||||
```
|
||||
|
||||
|
|
@ -101,16 +101,16 @@ Without further ado, let's dive in!
|
|||
for label in labels:
|
||||
lbl_counter = Counter()
|
||||
|
||||
with open(label,'r') as lf:
|
||||
with open(label, "r") as lf:
|
||||
lines = lf.readlines()
|
||||
|
||||
for l in lines:
|
||||
# classes for YOLO label uses integer at first position of each line
|
||||
lbl_counter[int(l.split(' ')[0])] += 1
|
||||
lbl_counter[int(l.split(" ")[0])] += 1
|
||||
|
||||
labels_df.loc[label.stem] = lbl_counter
|
||||
|
||||
labels_df = labels_df.fillna(0.0) # replace `nan` values with `0.0`
|
||||
labels_df = labels_df.fillna(0.0) # replace `nan` values with `0.0`
|
||||
```
|
||||
|
||||
6. The following is a sample view of the populated DataFrame:
|
||||
|
|
@ -142,7 +142,7 @@ The rows index the label files, each corresponding to an image in your dataset,
|
|||
|
||||
```python
|
||||
ksplit = 5
|
||||
kf = KFold(n_splits=ksplit, shuffle=True, random_state=20) # setting random_state for repeatable results
|
||||
kf = KFold(n_splits=ksplit, shuffle=True, random_state=20) # setting random_state for repeatable results
|
||||
|
||||
kfolds = list(kf.split(labels_df))
|
||||
```
|
||||
|
|
@ -150,12 +150,12 @@ The rows index the label files, each corresponding to an image in your dataset,
|
|||
2. The dataset has now been split into `k` folds, each having a list of `train` and `val` indices. We will construct a DataFrame to display these results more clearly.
|
||||
|
||||
```python
|
||||
folds = [f'split_{n}' for n in range(1, ksplit + 1)]
|
||||
folds = [f"split_{n}" for n in range(1, ksplit + 1)]
|
||||
folds_df = pd.DataFrame(index=indx, columns=folds)
|
||||
|
||||
for idx, (train, val) in enumerate(kfolds, start=1):
|
||||
folds_df[f'split_{idx}'].loc[labels_df.iloc[train].index] = 'train'
|
||||
folds_df[f'split_{idx}'].loc[labels_df.iloc[val].index] = 'val'
|
||||
folds_df[f"split_{idx}"].loc[labels_df.iloc[train].index] = "train"
|
||||
folds_df[f"split_{idx}"].loc[labels_df.iloc[val].index] = "val"
|
||||
```
|
||||
|
||||
3. Now we will calculate the distribution of class labels for each fold as a ratio of the classes present in `val` to those present in `train`.
|
||||
|
|
@ -168,8 +168,8 @@ The rows index the label files, each corresponding to an image in your dataset,
|
|||
val_totals = labels_df.iloc[val_indices].sum()
|
||||
|
||||
# To avoid division by zero, we add a small value (1E-7) to the denominator
|
||||
ratio = val_totals / (train_totals + 1E-7)
|
||||
fold_lbl_distrb.loc[f'split_{n}'] = ratio
|
||||
ratio = val_totals / (train_totals + 1e-7)
|
||||
fold_lbl_distrb.loc[f"split_{n}"] = ratio
|
||||
```
|
||||
|
||||
The ideal scenario is for all class ratios to be reasonably similar for each split and across classes. This, however, will be subject to the specifics of your dataset.
|
||||
|
|
@ -177,17 +177,17 @@ The rows index the label files, each corresponding to an image in your dataset,
|
|||
4. Next, we create the directories and dataset YAML files for each split.
|
||||
|
||||
```python
|
||||
supported_extensions = ['.jpg', '.jpeg', '.png']
|
||||
supported_extensions = [".jpg", ".jpeg", ".png"]
|
||||
|
||||
# Initialize an empty list to store image file paths
|
||||
images = []
|
||||
|
||||
# Loop through supported extensions and gather image files
|
||||
for ext in supported_extensions:
|
||||
images.extend(sorted((dataset_path / 'images').rglob(f"*{ext}")))
|
||||
images.extend(sorted((dataset_path / "images").rglob(f"*{ext}")))
|
||||
|
||||
# Create the necessary directories and dataset YAML files (unchanged)
|
||||
save_path = Path(dataset_path / f'{datetime.date.today().isoformat()}_{ksplit}-Fold_Cross-val')
|
||||
save_path = Path(dataset_path / f"{datetime.date.today().isoformat()}_{ksplit}-Fold_Cross-val")
|
||||
save_path.mkdir(parents=True, exist_ok=True)
|
||||
ds_yamls = []
|
||||
|
||||
|
|
@ -195,22 +195,25 @@ The rows index the label files, each corresponding to an image in your dataset,
|
|||
# Create directories
|
||||
split_dir = save_path / split
|
||||
split_dir.mkdir(parents=True, exist_ok=True)
|
||||
(split_dir / 'train' / 'images').mkdir(parents=True, exist_ok=True)
|
||||
(split_dir / 'train' / 'labels').mkdir(parents=True, exist_ok=True)
|
||||
(split_dir / 'val' / 'images').mkdir(parents=True, exist_ok=True)
|
||||
(split_dir / 'val' / 'labels').mkdir(parents=True, exist_ok=True)
|
||||
(split_dir / "train" / "images").mkdir(parents=True, exist_ok=True)
|
||||
(split_dir / "train" / "labels").mkdir(parents=True, exist_ok=True)
|
||||
(split_dir / "val" / "images").mkdir(parents=True, exist_ok=True)
|
||||
(split_dir / "val" / "labels").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create dataset YAML files
|
||||
dataset_yaml = split_dir / f'{split}_dataset.yaml'
|
||||
dataset_yaml = split_dir / f"{split}_dataset.yaml"
|
||||
ds_yamls.append(dataset_yaml)
|
||||
|
||||
with open(dataset_yaml, 'w') as ds_y:
|
||||
yaml.safe_dump({
|
||||
'path': split_dir.as_posix(),
|
||||
'train': 'train',
|
||||
'val': 'val',
|
||||
'names': classes
|
||||
}, ds_y)
|
||||
with open(dataset_yaml, "w") as ds_y:
|
||||
yaml.safe_dump(
|
||||
{
|
||||
"path": split_dir.as_posix(),
|
||||
"train": "train",
|
||||
"val": "val",
|
||||
"names": classes,
|
||||
},
|
||||
ds_y,
|
||||
)
|
||||
```
|
||||
|
||||
5. Lastly, copy images and labels into the respective directory ('train' or 'val') for each split.
|
||||
|
|
@ -221,8 +224,8 @@ The rows index the label files, each corresponding to an image in your dataset,
|
|||
for image, label in zip(images, labels):
|
||||
for split, k_split in folds_df.loc[image.stem].items():
|
||||
# Destination directory
|
||||
img_to_path = save_path / split / k_split / 'images'
|
||||
lbl_to_path = save_path / split / k_split / 'labels'
|
||||
img_to_path = save_path / split / k_split / "images"
|
||||
lbl_to_path = save_path / split / k_split / "labels"
|
||||
|
||||
# Copy image and label files to new directory (SamefileError if file already exists)
|
||||
shutil.copy(image, img_to_path / image.name)
|
||||
|
|
@ -243,8 +246,8 @@ fold_lbl_distrb.to_csv(save_path / "kfold_label_distribution.csv")
|
|||
1. First, load the YOLO model.
|
||||
|
||||
```python
|
||||
weights_path = 'path/to/weights.pt'
|
||||
model = YOLO(weights_path, task='detect')
|
||||
weights_path = "path/to/weights.pt"
|
||||
model = YOLO(weights_path, task="detect")
|
||||
```
|
||||
|
||||
2. Next, iterate over the dataset YAML files to run training. The results will be saved to a directory specified by the `project` and `name` arguments. By default, this directory is 'exp/runs#' where # is an integer index.
|
||||
|
|
@ -254,12 +257,12 @@ fold_lbl_distrb.to_csv(save_path / "kfold_label_distribution.csv")
|
|||
|
||||
# Define your additional arguments here
|
||||
batch = 16
|
||||
project = 'kfold_demo'
|
||||
project = "kfold_demo"
|
||||
epochs = 100
|
||||
|
||||
for k in range(ksplit):
|
||||
dataset_yaml = ds_yamls[k]
|
||||
model.train(data=dataset_yaml,epochs=epochs, batch=batch, project=project) # include any train arguments
|
||||
model.train(data=dataset_yaml, epochs=epochs, batch=batch, project=project) # include any train arguments
|
||||
results[k] = model.metrics # save output metrics for further analysis
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -158,16 +158,16 @@ The YOLOv8n model in PyTorch format is converted to TensorRT to run inference wi
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a YOLOv8n PyTorch model
|
||||
model = YOLO('yolov8n.pt')
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
||||
# Export the model
|
||||
model.export(format='engine') # creates 'yolov8n.engine'
|
||||
model.export(format="engine") # creates 'yolov8n.engine'
|
||||
|
||||
# Load the exported TensorRT model
|
||||
trt_model = YOLO('yolov8n.engine')
|
||||
trt_model = YOLO("yolov8n.engine")
|
||||
|
||||
# Run inference
|
||||
results = trt_model('https://ultralytics.com/images/bus.jpg')
|
||||
results = trt_model("https://ultralytics.com/images/bus.jpg")
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
|
|
@ -290,10 +290,10 @@ To reproduce the above Ultralytics benchmarks on all export [formats](../modes/e
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a YOLOv8n PyTorch model
|
||||
model = YOLO('yolov8n.pt')
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
||||
# Benchmark YOLOv8n speed and accuracy on the COCO8 dataset for all all export formats
|
||||
results = model.benchmarks(data='coco8.yaml', imgsz=640)
|
||||
results = model.benchmarks(data="coco8.yaml", imgsz=640)
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
|
|
|
|||
|
|
@ -21,9 +21,9 @@ Object blurring with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
=== "Object Blurring"
|
||||
|
||||
```python
|
||||
import cv2
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
import cv2
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
names = model.names
|
||||
|
|
@ -36,9 +36,7 @@ Object blurring with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
blur_ratio = 50
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("object_blurring_output.avi",
|
||||
cv2.VideoWriter_fourcc(*'mp4v'),
|
||||
fps, (w, h))
|
||||
video_writer = cv2.VideoWriter("object_blurring_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
|
|
@ -55,14 +53,14 @@ Object blurring with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
for box, cls in zip(boxes, clss):
|
||||
annotator.box_label(box, color=colors(int(cls), True), label=names[int(cls)])
|
||||
|
||||
obj = im0[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
|
||||
obj = im0[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])]
|
||||
blur_obj = cv2.blur(obj, (blur_ratio, blur_ratio))
|
||||
|
||||
im0[int(box[1]):int(box[3]), int(box[0]):int(box[2])] = blur_obj
|
||||
im0[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] = blur_obj
|
||||
|
||||
cv2.imshow("ultralytics", im0)
|
||||
video_writer.write(im0)
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
cap.release()
|
||||
|
|
|
|||
|
|
@ -53,18 +53,18 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
```python
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
assert cap.isOpened(), "Error reading video file"
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
|
||||
# Define region points
|
||||
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
||||
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("object_counting_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
|
||||
# Init Object Counter
|
||||
counter = solutions.ObjectCounter(
|
||||
view_img=True,
|
||||
|
|
@ -73,17 +73,17 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
draw_tracks=True,
|
||||
line_thickness=2,
|
||||
)
|
||||
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
if not success:
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
tracks = model.track(im0, persist=True, show=False)
|
||||
|
||||
|
||||
im0 = counter.start_counting(im0, tracks)
|
||||
video_writer.write(im0)
|
||||
|
||||
|
||||
cap.release()
|
||||
video_writer.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
|
@ -94,18 +94,18 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
```python
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
assert cap.isOpened(), "Error reading video file"
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
|
||||
# Define region points as a polygon with 5 points
|
||||
region_points = [(20, 400), (1080, 404), (1080, 360), (20, 360), (20, 400)]
|
||||
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("object_counting_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
|
||||
# Init Object Counter
|
||||
counter = solutions.ObjectCounter(
|
||||
view_img=True,
|
||||
|
|
@ -114,17 +114,17 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
draw_tracks=True,
|
||||
line_thickness=2,
|
||||
)
|
||||
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
if not success:
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
tracks = model.track(im0, persist=True, show=False)
|
||||
|
||||
|
||||
im0 = counter.start_counting(im0, tracks)
|
||||
video_writer.write(im0)
|
||||
|
||||
|
||||
cap.release()
|
||||
video_writer.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
|
@ -135,18 +135,18 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
```python
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
assert cap.isOpened(), "Error reading video file"
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
|
||||
# Define line points
|
||||
line_points = [(20, 400), (1080, 400)]
|
||||
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("object_counting_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
|
||||
# Init Object Counter
|
||||
counter = solutions.ObjectCounter(
|
||||
view_img=True,
|
||||
|
|
@ -155,17 +155,17 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
draw_tracks=True,
|
||||
line_thickness=2,
|
||||
)
|
||||
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
if not success:
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
tracks = model.track(im0, persist=True, show=False)
|
||||
|
||||
|
||||
im0 = counter.start_counting(im0, tracks)
|
||||
video_writer.write(im0)
|
||||
|
||||
|
||||
cap.release()
|
||||
video_writer.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
|
@ -176,18 +176,18 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
```python
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
assert cap.isOpened(), "Error reading video file"
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
|
||||
line_points = [(20, 400), (1080, 400)] # line or region points
|
||||
classes_to_count = [0, 2] # person and car classes for count
|
||||
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("object_counting_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
|
||||
# Init Object Counter
|
||||
counter = solutions.ObjectCounter(
|
||||
view_img=True,
|
||||
|
|
@ -196,17 +196,17 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
draw_tracks=True,
|
||||
line_thickness=2,
|
||||
)
|
||||
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
if not success:
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
tracks = model.track(im0, persist=True, show=False, classes=classes_to_count)
|
||||
|
||||
|
||||
im0 = counter.start_counting(im0, tracks)
|
||||
video_writer.write(im0)
|
||||
|
||||
|
||||
cap.release()
|
||||
video_writer.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
|
|
|||
|
|
@ -28,10 +28,11 @@ Object cropping with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
=== "Object Cropping"
|
||||
|
||||
```python
|
||||
import os
|
||||
|
||||
import cv2
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
import cv2
|
||||
import os
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
names = model.names
|
||||
|
|
@ -45,9 +46,7 @@ Object cropping with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
os.mkdir(crop_dir_name)
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("object_cropping_output.avi",
|
||||
cv2.VideoWriter_fourcc(*'mp4v'),
|
||||
fps, (w, h))
|
||||
video_writer = cv2.VideoWriter("object_cropping_output.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
idx = 0
|
||||
while cap.isOpened():
|
||||
|
|
@ -66,14 +65,14 @@ Object cropping with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
idx += 1
|
||||
annotator.box_label(box, color=colors(int(cls), True), label=names[int(cls)])
|
||||
|
||||
crop_obj = im0[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
|
||||
crop_obj = im0[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])]
|
||||
|
||||
cv2.imwrite(os.path.join(crop_dir_name, str(idx)+".png"), crop_obj)
|
||||
cv2.imwrite(os.path.join(crop_dir_name, str(idx) + ".png"), crop_obj)
|
||||
|
||||
cv2.imshow("ultralytics", im0)
|
||||
video_writer.write(im0)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
cap.release()
|
||||
|
|
|
|||
|
|
@ -62,36 +62,34 @@ root.mainloop()
|
|||
|
||||
# Path to json file, that created with above point selection app
|
||||
polygon_json_path = "bounding_boxes.json"
|
||||
|
||||
|
||||
# Video capture
|
||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
||||
assert cap.isOpened(), "Error reading video file"
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH,
|
||||
cv2.CAP_PROP_FRAME_HEIGHT,
|
||||
cv2.CAP_PROP_FPS))
|
||||
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("parking management.avi", cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
|
||||
video_writer = cv2.VideoWriter("parking management.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
# Initialize parking management object
|
||||
management = solutions.ParkingManagement(model_path="yolov8n.pt")
|
||||
|
||||
|
||||
while cap.isOpened():
|
||||
ret, im0 = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
|
||||
|
||||
json_data = management.parking_regions_extraction(polygon_json_path)
|
||||
results = management.model.track(im0, persist=True, show=False)
|
||||
|
||||
|
||||
if results[0].boxes.id is not None:
|
||||
boxes = results[0].boxes.xyxy.cpu().tolist()
|
||||
clss = results[0].boxes.cls.cpu().tolist()
|
||||
management.process_data(json_data, im0, boxes, clss)
|
||||
|
||||
|
||||
management.display_frames(im0)
|
||||
video_writer.write(im0)
|
||||
|
||||
|
||||
cap.release()
|
||||
video_writer.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
|
|
|||
|
|
@ -29,39 +29,40 @@ Queue management using [Ultralytics YOLOv8](https://github.com/ultralytics/ultra
|
|||
```python
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
|
||||
|
||||
assert cap.isOpened(), "Error reading video file"
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
video_writer = cv2.VideoWriter("queue_management.avi", cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
|
||||
|
||||
video_writer = cv2.VideoWriter("queue_management.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
queue_region = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
||||
|
||||
queue = solutions.QueueManager(classes_names=model.names,
|
||||
reg_pts=queue_region,
|
||||
line_thickness=3,
|
||||
fontsize=1.0,
|
||||
region_color=(255, 144, 31))
|
||||
|
||||
|
||||
queue = solutions.QueueManager(
|
||||
classes_names=model.names,
|
||||
reg_pts=queue_region,
|
||||
line_thickness=3,
|
||||
fontsize=1.0,
|
||||
region_color=(255, 144, 31),
|
||||
)
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
|
||||
|
||||
if success:
|
||||
tracks = model.track(im0, show=False, persist=True,
|
||||
verbose=False)
|
||||
tracks = model.track(im0, show=False, persist=True, verbose=False)
|
||||
out = queue.process_queue(im0, tracks)
|
||||
|
||||
|
||||
video_writer.write(im0)
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
continue
|
||||
|
||||
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
|
||||
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
```
|
||||
|
|
@ -71,39 +72,40 @@ Queue management using [Ultralytics YOLOv8](https://github.com/ultralytics/ultra
|
|||
```python
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
|
||||
|
||||
assert cap.isOpened(), "Error reading video file"
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
video_writer = cv2.VideoWriter("queue_management.avi", cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
|
||||
|
||||
video_writer = cv2.VideoWriter("queue_management.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
queue_region = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
||||
|
||||
queue = solutions.QueueManager(classes_names=model.names,
|
||||
reg_pts=queue_region,
|
||||
line_thickness=3,
|
||||
fontsize=1.0,
|
||||
region_color=(255, 144, 31))
|
||||
|
||||
|
||||
queue = solutions.QueueManager(
|
||||
classes_names=model.names,
|
||||
reg_pts=queue_region,
|
||||
line_thickness=3,
|
||||
fontsize=1.0,
|
||||
region_color=(255, 144, 31),
|
||||
)
|
||||
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
|
||||
|
||||
if success:
|
||||
tracks = model.track(im0, show=False, persist=True,
|
||||
verbose=False, classes=0) # Only person class
|
||||
tracks = model.track(im0, show=False, persist=True, verbose=False, classes=0) # Only person class
|
||||
out = queue.process_queue(im0, tracks)
|
||||
|
||||
|
||||
video_writer.write(im0)
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
continue
|
||||
|
||||
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
|
||||
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
```
|
||||
|
|
|
|||
|
|
@ -108,16 +108,16 @@ The YOLOv8n model in PyTorch format is converted to NCNN to run inference with t
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a YOLOv8n PyTorch model
|
||||
model = YOLO('yolov8n.pt')
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
||||
# Export the model to NCNN format
|
||||
model.export(format='ncnn') # creates 'yolov8n_ncnn_model'
|
||||
model.export(format="ncnn") # creates 'yolov8n_ncnn_model'
|
||||
|
||||
# Load the exported NCNN model
|
||||
ncnn_model = YOLO('yolov8n_ncnn_model')
|
||||
ncnn_model = YOLO("yolov8n_ncnn_model")
|
||||
|
||||
# Run inference
|
||||
results = ncnn_model('https://ultralytics.com/images/bus.jpg')
|
||||
results = ncnn_model("https://ultralytics.com/images/bus.jpg")
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
|
|
@ -231,10 +231,10 @@ To reproduce the above Ultralytics benchmarks on all [export formats](../modes/e
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a YOLOv8n PyTorch model
|
||||
model = YOLO('yolov8n.pt')
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
||||
# Benchmark YOLOv8n speed and accuracy on the COCO8 dataset for all all export formats
|
||||
results = model.benchmarks(data='coco8.yaml', imgsz=640)
|
||||
results = model.benchmarks(data="coco8.yaml", imgsz=640)
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
|
|
@ -293,10 +293,10 @@ With the TCP stream initiated, you can perform YOLOv8 inference.
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a YOLOv8n PyTorch model
|
||||
model = YOLO('yolov8n.pt')
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
||||
# Run inference
|
||||
results = model('tcp://127.0.0.1:8888')
|
||||
results = model("tcp://127.0.0.1:8888")
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
|
|
|
|||
|
|
@ -60,21 +60,28 @@ pip install -U ultralytics sahi
|
|||
Here's how to import the necessary modules and download a YOLOv8 model and some test images:
|
||||
|
||||
```python
|
||||
from sahi.utils.yolov8 import download_yolov8s_model
|
||||
from pathlib import Path
|
||||
|
||||
from IPython.display import Image
|
||||
from sahi import AutoDetectionModel
|
||||
from sahi.predict import get_prediction, get_sliced_prediction, predict
|
||||
from sahi.utils.cv import read_image
|
||||
from sahi.utils.file import download_from_url
|
||||
from sahi.predict import get_prediction, get_sliced_prediction, predict
|
||||
from pathlib import Path
|
||||
from IPython.display import Image
|
||||
from sahi.utils.yolov8 import download_yolov8s_model
|
||||
|
||||
# Download YOLOv8 model
|
||||
yolov8_model_path = "models/yolov8s.pt"
|
||||
download_yolov8s_model(yolov8_model_path)
|
||||
|
||||
# Download test images
|
||||
download_from_url('https://raw.githubusercontent.com/obss/sahi/main/demo/demo_data/small-vehicles1.jpeg', 'demo_data/small-vehicles1.jpeg')
|
||||
download_from_url('https://raw.githubusercontent.com/obss/sahi/main/demo/demo_data/terrain2.png', 'demo_data/terrain2.png')
|
||||
download_from_url(
|
||||
"https://raw.githubusercontent.com/obss/sahi/main/demo/demo_data/small-vehicles1.jpeg",
|
||||
"demo_data/small-vehicles1.jpeg",
|
||||
)
|
||||
download_from_url(
|
||||
"https://raw.githubusercontent.com/obss/sahi/main/demo/demo_data/terrain2.png",
|
||||
"demo_data/terrain2.png",
|
||||
)
|
||||
```
|
||||
|
||||
## Standard Inference with YOLOv8
|
||||
|
|
@ -85,7 +92,7 @@ You can instantiate a YOLOv8 model for object detection like this:
|
|||
|
||||
```python
|
||||
detection_model = AutoDetectionModel.from_pretrained(
|
||||
model_type='yolov8',
|
||||
model_type="yolov8",
|
||||
model_path=yolov8_model_path,
|
||||
confidence_threshold=0.3,
|
||||
device="cpu", # or 'cuda:0'
|
||||
|
|
@ -124,7 +131,7 @@ result = get_sliced_prediction(
|
|||
slice_height=256,
|
||||
slice_width=256,
|
||||
overlap_height_ratio=0.2,
|
||||
overlap_width_ratio=0.2
|
||||
overlap_width_ratio=0.2,
|
||||
)
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -30,15 +30,16 @@ The Security Alarm System Project utilizing Ultralytics YOLOv8 integrates advanc
|
|||
#### Import Libraries
|
||||
|
||||
```python
|
||||
import torch
|
||||
import numpy as np
|
||||
import cv2
|
||||
from time import time
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
import smtplib
|
||||
from email.mime.multipart import MIMEMultipart
|
||||
from email.mime.text import MIMEText
|
||||
from time import time
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
```
|
||||
|
||||
#### Set up the parameters of the message
|
||||
|
|
@ -58,7 +59,7 @@ to_email = "" # receiver email
|
|||
#### Server creation and authentication
|
||||
|
||||
```python
|
||||
server = smtplib.SMTP('smtp.gmail.com: 587')
|
||||
server = smtplib.SMTP("smtp.gmail.com: 587")
|
||||
server.starttls()
|
||||
server.login(from_email, password)
|
||||
```
|
||||
|
|
@ -69,13 +70,13 @@ server.login(from_email, password)
|
|||
def send_email(to_email, from_email, object_detected=1):
|
||||
"""Sends an email notification indicating the number of objects detected; defaults to 1 object."""
|
||||
message = MIMEMultipart()
|
||||
message['From'] = from_email
|
||||
message['To'] = to_email
|
||||
message['Subject'] = "Security Alert"
|
||||
message["From"] = from_email
|
||||
message["To"] = to_email
|
||||
message["Subject"] = "Security Alert"
|
||||
# Add in the message body
|
||||
message_body = f'ALERT - {object_detected} objects has been detected!!'
|
||||
message_body = f"ALERT - {object_detected} objects has been detected!!"
|
||||
|
||||
message.attach(MIMEText(message_body, 'plain'))
|
||||
message.attach(MIMEText(message_body, "plain"))
|
||||
server.sendmail(from_email, to_email, message.as_string())
|
||||
```
|
||||
|
||||
|
|
@ -97,7 +98,7 @@ class ObjectDetection:
|
|||
self.end_time = 0
|
||||
|
||||
# device information
|
||||
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||
self.device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
def predict(self, im0):
|
||||
"""Run prediction using a YOLO model for the input image `im0`."""
|
||||
|
|
@ -108,10 +109,16 @@ class ObjectDetection:
|
|||
"""Displays the FPS on an image `im0` by calculating and overlaying as white text on a black rectangle."""
|
||||
self.end_time = time()
|
||||
fps = 1 / np.round(self.end_time - self.start_time, 2)
|
||||
text = f'FPS: {int(fps)}'
|
||||
text = f"FPS: {int(fps)}"
|
||||
text_size = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 1.0, 2)[0]
|
||||
gap = 10
|
||||
cv2.rectangle(im0, (20 - gap, 70 - text_size[1] - gap), (20 + text_size[0] + gap, 70 + gap), (255, 255, 255), -1)
|
||||
cv2.rectangle(
|
||||
im0,
|
||||
(20 - gap, 70 - text_size[1] - gap),
|
||||
(20 + text_size[0] + gap, 70 + gap),
|
||||
(255, 255, 255),
|
||||
-1,
|
||||
)
|
||||
cv2.putText(im0, text, (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 0), 2)
|
||||
|
||||
def plot_bboxes(self, results, im0):
|
||||
|
|
@ -148,7 +155,7 @@ class ObjectDetection:
|
|||
self.email_sent = False
|
||||
|
||||
self.display_fps(im0)
|
||||
cv2.imshow('YOLOv8 Detection', im0)
|
||||
cv2.imshow("YOLOv8 Detection", im0)
|
||||
frame_count += 1
|
||||
if cv2.waitKey(5) & 0xFF == 27:
|
||||
break
|
||||
|
|
|
|||
|
|
@ -39,8 +39,8 @@ Speed estimation is the process of calculating the rate of movement of an object
|
|||
=== "Speed Estimation"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO, solutions
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
names = model.model.names
|
||||
|
|
@ -50,17 +50,18 @@ Speed estimation is the process of calculating the rate of movement of an object
|
|||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
# Video writer
|
||||
video_writer = cv2.VideoWriter("speed_estimation.avi", cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
video_writer = cv2.VideoWriter("speed_estimation.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
line_pts = [(0, 360), (1280, 360)]
|
||||
|
||||
# Init speed-estimation obj
|
||||
speed_obj = solutions.SpeedEstimator(reg_pts=line_pts,
|
||||
names=names,
|
||||
view_img=True)
|
||||
speed_obj = solutions.SpeedEstimator(
|
||||
reg_pts=line_pts,
|
||||
names=names,
|
||||
view_img=True,
|
||||
)
|
||||
|
||||
while cap.isOpened():
|
||||
|
||||
success, im0 = cap.read()
|
||||
if not success:
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
|
|
@ -74,7 +75,6 @@ Speed estimation is the process of calculating the rate of movement of an object
|
|||
cap.release()
|
||||
video_writer.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
||||
```
|
||||
|
||||
???+ warning "Speed is Estimate"
|
||||
|
|
|
|||
|
|
@ -46,10 +46,10 @@ Before deploying the model on Triton, it must be exported to the ONNX format. ON
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a model
|
||||
model = YOLO('yolov8n.pt') # load an official model
|
||||
model = YOLO("yolov8n.pt") # load an official model
|
||||
|
||||
# Export the model
|
||||
onnx_file = model.export(format='onnx', dynamic=True)
|
||||
onnx_file = model.export(format="onnx", dynamic=True)
|
||||
```
|
||||
|
||||
## Setting Up Triton Model Repository
|
||||
|
|
@ -62,11 +62,11 @@ The Triton Model Repository is a storage location where Triton can access and lo
|
|||
from pathlib import Path
|
||||
|
||||
# Define paths
|
||||
triton_repo_path = Path('tmp') / 'triton_repo'
|
||||
triton_model_path = triton_repo_path / 'yolo'
|
||||
triton_repo_path = Path("tmp") / "triton_repo"
|
||||
triton_model_path = triton_repo_path / "yolo"
|
||||
|
||||
# Create directories
|
||||
(triton_model_path / '1').mkdir(parents=True, exist_ok=True)
|
||||
(triton_model_path / "1").mkdir(parents=True, exist_ok=True)
|
||||
```
|
||||
|
||||
2. Move the exported ONNX model to the Triton repository:
|
||||
|
|
@ -75,10 +75,10 @@ The Triton Model Repository is a storage location where Triton can access and lo
|
|||
from pathlib import Path
|
||||
|
||||
# Move ONNX model to Triton Model path
|
||||
Path(onnx_file).rename(triton_model_path / '1' / 'model.onnx')
|
||||
Path(onnx_file).rename(triton_model_path / "1" / "model.onnx")
|
||||
|
||||
# Create config file
|
||||
(triton_model_path / 'config.pbtxt').touch()
|
||||
(triton_model_path / "config.pbtxt").touch()
|
||||
```
|
||||
|
||||
## Running Triton Inference Server
|
||||
|
|
@ -92,18 +92,23 @@ import time
|
|||
from tritonclient.http import InferenceServerClient
|
||||
|
||||
# Define image https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver
|
||||
tag = 'nvcr.io/nvidia/tritonserver:23.09-py3' # 6.4 GB
|
||||
tag = "nvcr.io/nvidia/tritonserver:23.09-py3" # 6.4 GB
|
||||
|
||||
# Pull the image
|
||||
subprocess.call(f'docker pull {tag}', shell=True)
|
||||
subprocess.call(f"docker pull {tag}", shell=True)
|
||||
|
||||
# Run the Triton server and capture the container ID
|
||||
container_id = subprocess.check_output(
|
||||
f'docker run -d --rm -v {triton_repo_path}:/models -p 8000:8000 {tag} tritonserver --model-repository=/models',
|
||||
shell=True).decode('utf-8').strip()
|
||||
container_id = (
|
||||
subprocess.check_output(
|
||||
f"docker run -d --rm -v {triton_repo_path}:/models -p 8000:8000 {tag} tritonserver --model-repository=/models",
|
||||
shell=True,
|
||||
)
|
||||
.decode("utf-8")
|
||||
.strip()
|
||||
)
|
||||
|
||||
# Wait for the Triton server to start
|
||||
triton_client = InferenceServerClient(url='localhost:8000', verbose=False, ssl=False)
|
||||
triton_client = InferenceServerClient(url="localhost:8000", verbose=False, ssl=False)
|
||||
|
||||
# Wait until model is ready
|
||||
for _ in range(10):
|
||||
|
|
@ -119,17 +124,17 @@ Then run inference using the Triton Server model:
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load the Triton Server model
|
||||
model = YOLO(f'http://localhost:8000/yolo', task='detect')
|
||||
model = YOLO(f"http://localhost:8000/yolo", task="detect")
|
||||
|
||||
# Run inference on the server
|
||||
results = model('path/to/image.jpg')
|
||||
results = model("path/to/image.jpg")
|
||||
```
|
||||
|
||||
Cleanup the container:
|
||||
|
||||
```python
|
||||
# Kill and remove the container at the end of the test
|
||||
subprocess.call(f'docker kill {container_id}', shell=True)
|
||||
subprocess.call(f"docker kill {container_id}", shell=True)
|
||||
```
|
||||
|
||||
---
|
||||
|
|
|
|||
|
|
@ -47,9 +47,8 @@ The VSCode compatible protocols for viewing images using the integrated terminal
|
|||
import io
|
||||
|
||||
import cv2 as cv
|
||||
|
||||
from ultralytics import YOLO
|
||||
from sixel import SixelWriter
|
||||
from ultralytics import YOLO
|
||||
```
|
||||
|
||||
1. Load a model and execute inference, then plot the results and store in a variable. See more about inference arguments and working with results on the [predict mode](../modes/predict.md) page.
|
||||
|
|
|
|||
|
|
@ -24,14 +24,14 @@ keywords: Ultralytics, YOLOv8, Object Detection, Object Tracking, IDetection, Vi
|
|||
```python
|
||||
import cv2
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import colors, Annotator
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
names = model.model.names
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
out = cv2.VideoWriter('visioneye-pinpoint.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (w, h))
|
||||
out = cv2.VideoWriter("visioneye-pinpoint.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h))
|
||||
|
||||
center_point = (-10, h)
|
||||
|
||||
|
|
@ -54,7 +54,7 @@ keywords: Ultralytics, YOLOv8, Object Detection, Object Tracking, IDetection, Vi
|
|||
out.write(im0)
|
||||
cv2.imshow("visioneye-pinpoint", im0)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
out.release()
|
||||
|
|
@ -67,13 +67,13 @@ keywords: Ultralytics, YOLOv8, Object Detection, Object Tracking, IDetection, Vi
|
|||
```python
|
||||
import cv2
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import colors, Annotator
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
out = cv2.VideoWriter('visioneye-pinpoint.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (w, h))
|
||||
out = cv2.VideoWriter("visioneye-pinpoint.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h))
|
||||
|
||||
center_point = (-10, h)
|
||||
|
||||
|
|
@ -98,7 +98,7 @@ keywords: Ultralytics, YOLOv8, Object Detection, Object Tracking, IDetection, Vi
|
|||
out.write(im0)
|
||||
cv2.imshow("visioneye-pinpoint", im0)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
out.release()
|
||||
|
|
@ -109,55 +109,56 @@ keywords: Ultralytics, YOLOv8, Object Detection, Object Tracking, IDetection, Vi
|
|||
=== "VisionEye with Distance Calculation"
|
||||
|
||||
```python
|
||||
import cv2
|
||||
import math
|
||||
|
||||
import cv2
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
|
||||
model = YOLO("yolov8s.pt")
|
||||
cap = cv2.VideoCapture("Path/to/video/file.mp4")
|
||||
|
||||
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
out = cv2.VideoWriter('visioneye-distance-calculation.avi', cv2.VideoWriter_fourcc(*'MJPG'), fps, (w, h))
|
||||
|
||||
|
||||
out = cv2.VideoWriter("visioneye-distance-calculation.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h))
|
||||
|
||||
center_point = (0, h)
|
||||
pixel_per_meter = 10
|
||||
|
||||
|
||||
txt_color, txt_background, bbox_clr = ((0, 0, 0), (255, 255, 255), (255, 0, 255))
|
||||
|
||||
|
||||
while True:
|
||||
ret, im0 = cap.read()
|
||||
if not ret:
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
|
||||
|
||||
annotator = Annotator(im0, line_width=2)
|
||||
|
||||
|
||||
results = model.track(im0, persist=True)
|
||||
boxes = results[0].boxes.xyxy.cpu()
|
||||
|
||||
|
||||
if results[0].boxes.id is not None:
|
||||
track_ids = results[0].boxes.id.int().cpu().tolist()
|
||||
|
||||
|
||||
for box, track_id in zip(boxes, track_ids):
|
||||
annotator.box_label(box, label=str(track_id), color=bbox_clr)
|
||||
annotator.visioneye(box, center_point)
|
||||
|
||||
x1, y1 = int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2) # Bounding box centroid
|
||||
|
||||
distance = (math.sqrt((x1 - center_point[0]) ** 2 + (y1 - center_point[1]) ** 2))/pixel_per_meter
|
||||
|
||||
text_size, _ = cv2.getTextSize(f"Distance: {distance:.2f} m", cv2.FONT_HERSHEY_SIMPLEX,1.2, 3)
|
||||
cv2.rectangle(im0, (x1, y1 - text_size[1] - 10),(x1 + text_size[0] + 10, y1), txt_background, -1)
|
||||
cv2.putText(im0, f"Distance: {distance:.2f} m",(x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 1.2,txt_color, 3)
|
||||
|
||||
|
||||
x1, y1 = int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2) # Bounding box centroid
|
||||
|
||||
distance = (math.sqrt((x1 - center_point[0]) ** 2 + (y1 - center_point[1]) ** 2)) / pixel_per_meter
|
||||
|
||||
text_size, _ = cv2.getTextSize(f"Distance: {distance:.2f} m", cv2.FONT_HERSHEY_SIMPLEX, 1.2, 3)
|
||||
cv2.rectangle(im0, (x1, y1 - text_size[1] - 10), (x1 + text_size[0] + 10, y1), txt_background, -1)
|
||||
cv2.putText(im0, f"Distance: {distance:.2f} m", (x1, y1 - 5), cv2.FONT_HERSHEY_SIMPLEX, 1.2, txt_color, 3)
|
||||
|
||||
out.write(im0)
|
||||
cv2.imshow("visioneye-distance-calculation", im0)
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord('q'):
|
||||
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
|
||||
|
||||
out.release()
|
||||
cap.release()
|
||||
cv2.destroyAllWindows()
|
||||
|
|
|
|||
|
|
@ -39,28 +39,30 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi
|
|||
=== "Workouts Monitoring"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO, solutions
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n-pose.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
assert cap.isOpened(), "Error reading video file"
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
gym_object = solutions.AIGym(line_thickness=2,
|
||||
view_img=True,
|
||||
pose_type="pushup",
|
||||
kpts_to_check=[6, 8, 10])
|
||||
gym_object = solutions.AIGym(
|
||||
line_thickness=2,
|
||||
view_img=True,
|
||||
pose_type="pushup",
|
||||
kpts_to_check=[6, 8, 10],
|
||||
)
|
||||
|
||||
frame_count = 0
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
if not success:
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
frame_count += 1
|
||||
results = model.track(im0, verbose=False) # Tracking recommended
|
||||
#results = model.predict(im0) # Prediction also supported
|
||||
# results = model.predict(im0) # Prediction also supported
|
||||
im0 = gym_object.start_counting(im0, results, frame_count)
|
||||
|
||||
cv2.destroyAllWindows()
|
||||
|
|
@ -69,30 +71,32 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi
|
|||
=== "Workouts Monitoring with Save Output"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO, solutions
|
||||
import cv2
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n-pose.pt")
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
assert cap.isOpened(), "Error reading video file"
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
video_writer = cv2.VideoWriter("workouts.avi", cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
|
||||
video_writer = cv2.VideoWriter("workouts.avi", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
||||
|
||||
gym_object = solutions.AIGym(line_thickness=2,
|
||||
view_img=True,
|
||||
pose_type="pushup",
|
||||
kpts_to_check=[6, 8, 10])
|
||||
gym_object = solutions.AIGym(
|
||||
line_thickness=2,
|
||||
view_img=True,
|
||||
pose_type="pushup",
|
||||
kpts_to_check=[6, 8, 10],
|
||||
)
|
||||
|
||||
frame_count = 0
|
||||
while cap.isOpened():
|
||||
success, im0 = cap.read()
|
||||
if not success:
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
print("Video frame is empty or video processing has been successfully completed.")
|
||||
break
|
||||
frame_count += 1
|
||||
results = model.track(im0, verbose=False) # Tracking recommended
|
||||
#results = model.predict(im0) # Prediction also supported
|
||||
# results = model.predict(im0) # Prediction also supported
|
||||
im0 = gym_object.start_counting(im0, results, frame_count)
|
||||
video_writer.write(im0)
|
||||
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ This section will address common issues faced while training and their respectiv
|
|||
- Make sure you pass the path to your `.yaml` file as the `data` argument when calling `model.train()`, as shown below:
|
||||
|
||||
```python
|
||||
model.train(data='/path/to/your/data.yaml', batch=4)
|
||||
model.train(data="/path/to/your/data.yaml", batch=4)
|
||||
```
|
||||
|
||||
#### Accelerating Training with Multiple GPUs
|
||||
|
|
@ -98,7 +98,7 @@ model.train(data='/path/to/your/data.yaml', batch=4)
|
|||
|
||||
```python
|
||||
# Adjust the batch size and other settings as needed to optimize training speed
|
||||
model.train(data='/path/to/your/data.yaml', batch=32, multi_scale=True)
|
||||
model.train(data="/path/to/your/data.yaml", batch=32, multi_scale=True)
|
||||
```
|
||||
|
||||
#### Continuous Monitoring Parameters
|
||||
|
|
@ -221,10 +221,10 @@ yolo task=detect mode=segment model=yolov8n-seg.pt source='path/to/car.mp4' show
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a pre-trained YOLOv8 model
|
||||
model = YOLO('yolov8n.pt')
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
||||
# Specify the source image
|
||||
source = 'https://ultralytics.com/images/bus.jpg'
|
||||
source = "https://ultralytics.com/images/bus.jpg"
|
||||
|
||||
# Make predictions
|
||||
results = model.predict(source, save=True, imgsz=320, conf=0.5)
|
||||
|
|
|
|||
|
|
@ -28,9 +28,10 @@ When using threads in Python, it's important to recognize patterns that can lead
|
|||
|
||||
```python
|
||||
# Unsafe: Sharing a single model instance across threads
|
||||
from ultralytics import YOLO
|
||||
from threading import Thread
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Instantiate the model outside the thread
|
||||
shared_model = YOLO("yolov8n.pt")
|
||||
|
||||
|
|
@ -54,9 +55,10 @@ Similarly, here is an unsafe pattern with multiple YOLO model instances:
|
|||
|
||||
```python
|
||||
# Unsafe: Sharing multiple model instances across threads can still lead to issues
|
||||
from ultralytics import YOLO
|
||||
from threading import Thread
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Instantiate multiple models outside the thread
|
||||
shared_model_1 = YOLO("yolov8n_1.pt")
|
||||
shared_model_2 = YOLO("yolov8n_2.pt")
|
||||
|
|
@ -85,9 +87,10 @@ Here's how to instantiate a YOLO model inside each thread for safe parallel infe
|
|||
|
||||
```python
|
||||
# Safe: Instantiating a single model inside each thread
|
||||
from ultralytics import YOLO
|
||||
from threading import Thread
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
|
||||
def thread_safe_predict(image_path):
|
||||
"""Predict on an image using a new YOLO model instance in a thread-safe manner; takes image path as input."""
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue