ultralytics 8.2.30 automated tags and release notes (#13164)
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
6367ff4748
commit
59eedcc3fa
29 changed files with 135 additions and 22 deletions
|
|
@ -28,6 +28,7 @@ This guide provides a comprehensive overview of three fundamental types of data
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8s.pt")
|
||||
|
|
@ -76,6 +77,7 @@ This guide provides a comprehensive overview of three fundamental types of data
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8s.pt")
|
||||
|
|
@ -136,6 +138,7 @@ This guide provides a comprehensive overview of three fundamental types of data
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8s.pt")
|
||||
|
|
@ -185,6 +188,7 @@ This guide provides a comprehensive overview of three fundamental types of data
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8s.pt")
|
||||
|
|
@ -234,52 +238,53 @@ This guide provides a comprehensive overview of three fundamental types of data
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8s.pt")
|
||||
|
||||
|
||||
cap = cv2.VideoCapture("path/to/video/file.mp4")
|
||||
assert cap.isOpened(), "Error reading video file"
|
||||
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
||||
|
||||
|
||||
out = cv2.VideoWriter("area_plot.avi", cv2.VideoWriter_fourcc(*"MJPG"), fps, (w, h))
|
||||
|
||||
|
||||
analytics = solutions.Analytics(
|
||||
type="area",
|
||||
writer=out,
|
||||
im0_shape=(w, h),
|
||||
view_img=True,
|
||||
)
|
||||
|
||||
|
||||
clswise_count = {}
|
||||
frame_count = 0
|
||||
|
||||
|
||||
while cap.isOpened():
|
||||
success, frame = cap.read()
|
||||
if success:
|
||||
|
||||
frame_count += 1
|
||||
results = model.track(frame, persist=True, verbose=True)
|
||||
|
||||
|
||||
if results[0].boxes.id is not None:
|
||||
boxes = results[0].boxes.xyxy.cpu()
|
||||
clss = results[0].boxes.cls.cpu().tolist()
|
||||
|
||||
|
||||
for box, cls in zip(boxes, clss):
|
||||
if model.names[int(cls)] in clswise_count:
|
||||
clswise_count[model.names[int(cls)]] += 1
|
||||
else:
|
||||
clswise_count[model.names[int(cls)]] = 1
|
||||
|
||||
|
||||
analytics.update_area(frame_count, clswise_count)
|
||||
clswise_count = {}
|
||||
if cv2.waitKey(1) & 0xFF == ord("q"):
|
||||
break
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
cap.release()
|
||||
out.release()
|
||||
cv2.destroyAllWindows()
|
||||
cv2.destroyAllWindows()
|
||||
```
|
||||
|
||||
### Argument `Analytics`
|
||||
|
|
|
|||
|
|
@ -43,6 +43,7 @@ Measuring the gap between two objects is known as distance calculation within a
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
@ -82,6 +83,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
@ -121,6 +123,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
=== "Polygon Counting"
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
@ -162,6 +165,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
@ -203,6 +207,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8s.pt") # YOLOv8 custom/pretrained model
|
||||
|
|
@ -227,6 +232,7 @@ A heatmap generated with [Ultralytics YOLOv8](https://github.com/ultralytics/ult
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@ There are two types of instance segmentation tracking available in the Ultralyti
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
|
|
@ -82,6 +83,7 @@ There are two types of instance segmentation tracking available in the Ultralyti
|
|||
from collections import defaultdict
|
||||
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ After performing the [Segment Task](../tasks/segment.md), it's sometimes desirab
|
|||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from ultralytics import YOLO
|
||||
```
|
||||
|
||||
|
|
@ -272,6 +273,7 @@ from pathlib import Path
|
|||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
m = YOLO("yolov8n-seg.pt") # (4)!
|
||||
|
|
|
|||
|
|
@ -69,6 +69,7 @@ Without further ado, let's dive in!
|
|||
import pandas as pd
|
||||
import yaml
|
||||
from sklearn.model_selection import KFold
|
||||
|
||||
from ultralytics import YOLO
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ Object blurring with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
@ -93,6 +94,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
@ -134,6 +136,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
@ -175,6 +178,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@ Object cropping with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly
|
|||
import os
|
||||
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ Parking management with [Ultralytics YOLOv8](https://github.com/ultralytics/ultr
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import solutions
|
||||
|
||||
# Path to json file, that created with above point selection app
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ Queue management using [Ultralytics YOLOv8](https://github.com/ultralytics/ultra
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
@ -71,6 +72,7 @@ Queue management using [Ultralytics YOLOv8](https://github.com/ultralytics/ultra
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
|
|||
|
|
@ -286,6 +286,7 @@ There are 2 methods of using the Raspberry Pi Camera to inference YOLOv8 models.
|
|||
```python
|
||||
import cv2
|
||||
from picamera2 import Picamera2
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Initialize the Picamera2
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ from time import time
|
|||
import cv2
|
||||
import numpy as np
|
||||
import torch
|
||||
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
```
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ keywords: Ultralytics YOLOv8, speed estimation, object tracking, computer vision
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ The VSCode compatible protocols for viewing images using the integrated terminal
|
|||
|
||||
import cv2 as cv
|
||||
from sixel import SixelWriter
|
||||
|
||||
from ultralytics import YOLO
|
||||
```
|
||||
|
||||
|
|
@ -111,6 +112,7 @@ import io
|
|||
|
||||
import cv2 as cv
|
||||
from sixel import SixelWriter
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load a model
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ keywords: VisionEye, YOLOv8, Ultralytics, object mapping, object tracking, dista
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
|
|
@ -66,6 +67,7 @@ keywords: VisionEye, YOLOv8, Ultralytics, object mapping, object tracking, dista
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
|
|
@ -112,6 +114,7 @@ keywords: VisionEye, YOLOv8, Ultralytics, object mapping, object tracking, dista
|
|||
import math
|
||||
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n-pose.pt")
|
||||
|
|
@ -72,6 +73,7 @@ Monitoring workouts through pose estimation with [Ultralytics YOLOv8](https://gi
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO, solutions
|
||||
|
||||
model = YOLO("yolov8n-pose.pt")
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ When running inference on a 0-channel image, I get an error related to the dimen
|
|||
|
||||
```python
|
||||
import torch
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load the model
|
||||
|
|
|
|||
|
|
@ -64,6 +64,7 @@ Before diving into the usage instructions, be sure to check out the range of [YO
|
|||
|
||||
```python
|
||||
from clearml import Task
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Step 1: Creating a ClearML Task
|
||||
|
|
|
|||
|
|
@ -57,6 +57,7 @@ This section provides the Python code used to create the Gradio interface with t
|
|||
```python
|
||||
import gradio as gr
|
||||
import PIL.Image as Image
|
||||
|
||||
from ultralytics import ASSETS, YOLO
|
||||
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
|
|
|||
|
|
@ -403,6 +403,7 @@ Expand sections below for information on how these models were exported and test
|
|||
|
||||
```py
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
model = YOLO("yolov8n.engine")
|
||||
|
|
|
|||
|
|
@ -64,9 +64,10 @@ Before diving into the usage instructions for YOLOv8 model training with Weights
|
|||
=== "Python"
|
||||
```python
|
||||
import wandb
|
||||
from ultralytics import YOLO
|
||||
from wandb.integration.ultralytics import add_wandb_callback
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Step 1: Initialize a Weights & Biases run
|
||||
wandb.init(project="ultralytics", job_type="training")
|
||||
|
||||
|
|
|
|||
|
|
@ -178,6 +178,7 @@ Below are code examples for using each source type:
|
|||
Run inference on an image opened with Python Imaging Library (PIL).
|
||||
```python
|
||||
from PIL import Image
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load a pretrained YOLOv8n model
|
||||
|
|
@ -195,6 +196,7 @@ Below are code examples for using each source type:
|
|||
Run inference on an image read with OpenCV.
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load a pretrained YOLOv8n model
|
||||
|
|
@ -212,6 +214,7 @@ Below are code examples for using each source type:
|
|||
Run inference on an image represented as a numpy array.
|
||||
```python
|
||||
import numpy as np
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load a pretrained YOLOv8n model
|
||||
|
|
@ -229,6 +232,7 @@ Below are code examples for using each source type:
|
|||
Run inference on an image represented as a PyTorch tensor.
|
||||
```python
|
||||
import torch
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load a pretrained YOLOv8n model
|
||||
|
|
@ -246,6 +250,7 @@ Below are code examples for using each source type:
|
|||
Run inference on a collection of images, URLs, videos and directories listed in a CSV file.
|
||||
```python
|
||||
import torch
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load a pretrained YOLOv8n model
|
||||
|
|
@ -673,6 +678,7 @@ The `plot()` method in `Results` objects facilitates visualization of prediction
|
|||
|
||||
```python
|
||||
from PIL import Image
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load a pretrained YOLOv8n model
|
||||
|
|
@ -754,6 +760,7 @@ Here's a Python script using OpenCV (`cv2`) and YOLOv8 to run inference on video
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load the YOLOv8 model
|
||||
|
|
|
|||
|
|
@ -159,6 +159,7 @@ Here is a Python script using OpenCV (`cv2`) and YOLOv8 to run object tracking o
|
|||
|
||||
```python
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load the YOLOv8 model
|
||||
|
|
@ -210,6 +211,7 @@ In the following example, we demonstrate how to utilize YOLOv8's tracking capabi
|
|||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
# Load the YOLOv8 model
|
||||
|
|
@ -285,6 +287,7 @@ Finally, after all threads have completed their task, the windows displaying the
|
|||
import threading
|
||||
|
||||
import cv2
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -127,6 +127,7 @@ Predict mode is used for making predictions using a trained YOLOv8 model on new
|
|||
```python
|
||||
import cv2
|
||||
from PIL import Image
|
||||
|
||||
from ultralytics import YOLO
|
||||
|
||||
model = YOLO("model.pt")
|
||||
|
|
|
|||
|
|
@ -130,6 +130,7 @@ If you have a dataset that uses the [segmentation dataset format](../datasets/se
|
|||
|
||||
```python
|
||||
import numpy as np
|
||||
|
||||
from ultralytics.utils.ops import segments2boxes
|
||||
|
||||
segments = np.array(
|
||||
|
|
@ -194,6 +195,7 @@ Convert a single polygon (as list) to a binary mask of the specified image size.
|
|||
|
||||
```python
|
||||
import numpy as np
|
||||
|
||||
from ultralytics.data.utils import polygon2mask
|
||||
|
||||
imgsz = (1080, 810)
|
||||
|
|
@ -215,6 +217,7 @@ To manage bounding box data, the `Bboxes` class will help to convert between box
|
|||
|
||||
```python
|
||||
import numpy as np
|
||||
|
||||
from ultralytics.utils.instance import Bboxes
|
||||
|
||||
boxes = Bboxes(
|
||||
|
|
@ -259,6 +262,7 @@ When scaling and image up or down, corresponding bounding box coordinates can be
|
|||
```{ .py .annotate }
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
|
||||
from ultralytics.utils.ops import scale_boxes
|
||||
|
||||
image = cv.imread("ultralytics/assets/bus.jpg")
|
||||
|
|
@ -307,6 +311,7 @@ Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, h
|
|||
|
||||
```python
|
||||
import numpy as np
|
||||
|
||||
from ultralytics.utils.ops import xyxy2xywh
|
||||
|
||||
xyxy_boxes = np.array(
|
||||
|
|
@ -359,6 +364,7 @@ Ultralytics includes an Annotator class that can be used to annotate any kind of
|
|||
```{ .py .annotate }
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
names = { # (1)!
|
||||
|
|
@ -402,6 +408,7 @@ image_with_bboxes = ann.result()
|
|||
```python
|
||||
import cv2 as cv
|
||||
import numpy as np
|
||||
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
obb_names = {10: "small vehicle"}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue