Docs Ruff codeblocks reformat and fix (#12847)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
be5cf7a033
commit
68031133fd
9 changed files with 167 additions and 178 deletions
|
|
@ -36,10 +36,10 @@ Dataset annotation is a very resource intensive and time-consuming process. If y
|
|||
```{ .py .annotate }
|
||||
from ultralytics.data.annotator import auto_annotate
|
||||
|
||||
auto_annotate(#(1)!
|
||||
data='path/to/new/data',
|
||||
det_model='yolov8n.pt',
|
||||
sam_model='mobile_sam.pt',
|
||||
auto_annotate( # (1)!
|
||||
data="path/to/new/data",
|
||||
det_model="yolov8n.pt",
|
||||
sam_model="mobile_sam.pt",
|
||||
device="cuda",
|
||||
output_dir="path/to/save_labels",
|
||||
)
|
||||
|
|
@ -58,9 +58,9 @@ Use to convert COCO JSON annotations into proper YOLO format. For object detecti
|
|||
```{ .py .annotate }
|
||||
from ultralytics.data.converter import convert_coco
|
||||
|
||||
convert_coco(#(1)!
|
||||
'../datasets/coco/annotations/',
|
||||
use_segments=False,
|
||||
convert_coco( # (1)!
|
||||
"../datasets/coco/annotations/",
|
||||
use_segments=False,
|
||||
use_keypoints=False,
|
||||
cls91to80=True,
|
||||
)
|
||||
|
|
@ -113,10 +113,10 @@ data
|
|||
```{ .py .annotate }
|
||||
from ultralytics.data.converter import yolo_bbox2segment
|
||||
|
||||
yolo_bbox2segment(#(1)!
|
||||
yolo_bbox2segment( # (1)!
|
||||
im_dir="path/to/images",
|
||||
save_dir=None, # saved to "labels-segment" in images directory
|
||||
sam_model="sam_b.pt"
|
||||
save_dir=None, # saved to "labels-segment" in images directory
|
||||
sam_model="sam_b.pt",
|
||||
)
|
||||
```
|
||||
|
||||
|
|
@ -129,20 +129,22 @@ yolo_bbox2segment(#(1)!
|
|||
If you have a dataset that uses the [segmentation dataset format](../datasets/segment/index.md) you can easily convert these into up-right (or horizontal) bounding boxes (`x y w h` format) with this function.
|
||||
|
||||
```python
|
||||
import numpy as np
|
||||
from ultralytics.utils.ops import segments2boxes
|
||||
|
||||
segments = np.array(
|
||||
[[805, 392, 797, 400, ..., 808, 714, 808, 392],
|
||||
[115, 398, 113, 400, ..., 150, 400, 149, 298],
|
||||
[267, 412, 265, 413, ..., 300, 413, 299, 412],
|
||||
[
|
||||
[805, 392, 797, 400, ..., 808, 714, 808, 392],
|
||||
[115, 398, 113, 400, ..., 150, 400, 149, 298],
|
||||
[267, 412, 265, 413, ..., 300, 413, 299, 412],
|
||||
]
|
||||
)
|
||||
|
||||
segments2boxes([s.reshape(-1,2) for s in segments])
|
||||
>>> array([[ 741.66, 631.12, 133.31, 479.25],
|
||||
[ 146.81, 649.69, 185.62, 502.88],
|
||||
[ 281.81, 636.19, 118.12, 448.88]],
|
||||
dtype=float32) # xywh bounding boxes
|
||||
segments2boxes([s.reshape(-1, 2) for s in segments])
|
||||
# >>> array([[ 741.66, 631.12, 133.31, 479.25],
|
||||
# [ 146.81, 649.69, 185.62, 502.88],
|
||||
# [ 281.81, 636.19, 118.12, 448.88]],
|
||||
# dtype=float32) # xywh bounding boxes
|
||||
```
|
||||
|
||||
To understand how this function works, visit the [reference page](../reference/utils/ops.md#ultralytics.utils.ops.segments2boxes)
|
||||
|
|
@ -155,10 +157,11 @@ Compresses a single image file to reduced size while preserving its aspect ratio
|
|||
|
||||
```{ .py .annotate }
|
||||
from pathlib import Path
|
||||
|
||||
from ultralytics.data.utils import compress_one_image
|
||||
|
||||
for f in Path('path/to/dataset').rglob('*.jpg'):
|
||||
compress_one_image(f)#(1)!
|
||||
for f in Path("path/to/dataset").rglob("*.jpg"):
|
||||
compress_one_image(f) # (1)!
|
||||
```
|
||||
|
||||
1. Nothing returns from this function
|
||||
|
|
@ -170,10 +173,10 @@ Automatically split a dataset into `train`/`val`/`test` splits and save the resu
|
|||
```{ .py .annotate }
|
||||
from ultralytics.data.utils import autosplit
|
||||
|
||||
autosplit( #(1)!
|
||||
autosplit( # (1)!
|
||||
path="path/to/images",
|
||||
weights=(0.9, 0.1, 0.0), # (train, validation, test) fractional splits
|
||||
annotated_only=False # split only images with annotation file when True
|
||||
weights=(0.9, 0.1, 0.0), # (train, validation, test) fractional splits
|
||||
annotated_only=False, # split only images with annotation file when True
|
||||
)
|
||||
```
|
||||
|
||||
|
|
@ -194,9 +197,7 @@ import numpy as np
|
|||
from ultralytics.data.utils import polygon2mask
|
||||
|
||||
imgsz = (1080, 810)
|
||||
polygon = np.array(
|
||||
[805, 392, 797, 400, ..., 808, 714, 808, 392], # (238, 2)
|
||||
)
|
||||
polygon = np.array([805, 392, 797, 400, ..., 808, 714, 808, 392]) # (238, 2)
|
||||
|
||||
mask = polygon2mask(
|
||||
imgsz, # tuple
|
||||
|
|
@ -213,32 +214,36 @@ mask = polygon2mask(
|
|||
To manage bounding box data, the `Bboxes` class will help to convert between box coordinate formatting, scale box dimensions, calculate areas, include offsets, and more!
|
||||
|
||||
```python
|
||||
import numpy as np
|
||||
from ultralytics.utils.instance import Bboxes
|
||||
|
||||
boxes = Bboxes(
|
||||
bboxes=np.array(
|
||||
[[ 22.878, 231.27, 804.98, 756.83,],
|
||||
[ 48.552, 398.56, 245.35, 902.71,],
|
||||
[ 669.47, 392.19, 809.72, 877.04,],
|
||||
[ 221.52, 405.8, 344.98, 857.54,],
|
||||
[ 0, 550.53, 63.01, 873.44,],
|
||||
[ 0.0584, 254.46, 32.561, 324.87,]]
|
||||
[
|
||||
[22.878, 231.27, 804.98, 756.83],
|
||||
[48.552, 398.56, 245.35, 902.71],
|
||||
[669.47, 392.19, 809.72, 877.04],
|
||||
[221.52, 405.8, 344.98, 857.54],
|
||||
[0, 550.53, 63.01, 873.44],
|
||||
[0.0584, 254.46, 32.561, 324.87],
|
||||
]
|
||||
),
|
||||
format="xyxy",
|
||||
)
|
||||
|
||||
boxes.areas()
|
||||
>>> array([ 4.1104e+05, 99216, 68000, 55772, 20347, 2288.5])
|
||||
# >>> array([ 4.1104e+05, 99216, 68000, 55772, 20347, 2288.5])
|
||||
|
||||
boxes.convert("xywh")
|
||||
boxes.bboxes
|
||||
>>> array(
|
||||
[[ 413.93, 494.05, 782.1, 525.56],
|
||||
[ 146.95, 650.63, 196.8, 504.15],
|
||||
[ 739.6, 634.62, 140.25, 484.85],
|
||||
[ 283.25, 631.67, 123.46, 451.74],
|
||||
[ 31.505, 711.99, 63.01, 322.91],
|
||||
[ 16.31, 289.67, 32.503, 70.41]]
|
||||
)
|
||||
print(boxes.bboxes)
|
||||
# >>> array(
|
||||
# [[ 413.93, 494.05, 782.1, 525.56],
|
||||
# [ 146.95, 650.63, 196.8, 504.15],
|
||||
# [ 739.6, 634.62, 140.25, 484.85],
|
||||
# [ 283.25, 631.67, 123.46, 451.74],
|
||||
# [ 31.505, 711.99, 63.01, 322.91],
|
||||
# [ 16.31, 289.67, 32.503, 70.41]]
|
||||
# )
|
||||
```
|
||||
|
||||
See the [`Bboxes` reference section](../reference/utils/instance.md#ultralytics.utils.instance.Bboxes) for more attributes and methods available.
|
||||
|
|
@ -257,37 +262,39 @@ import numpy as np
|
|||
from ultralytics.utils.ops import scale_boxes
|
||||
|
||||
image = cv.imread("ultralytics/assets/bus.jpg")
|
||||
*(h, w), c = image.shape
|
||||
h, w, c = image.shape
|
||||
resized = cv.resize(image, None, (), fx=1.2, fy=1.2)
|
||||
*(new_h, new_w), _ = resized.shape
|
||||
new_h, new_w, _ = resized.shape
|
||||
|
||||
xyxy_boxes = np.array(
|
||||
[[ 22.878, 231.27, 804.98, 756.83,],
|
||||
[ 48.552, 398.56, 245.35, 902.71,],
|
||||
[ 669.47, 392.19, 809.72, 877.04,],
|
||||
[ 221.52, 405.8, 344.98, 857.54,],
|
||||
[ 0, 550.53, 63.01, 873.44,],
|
||||
[ 0.0584, 254.46, 32.561, 324.87,]]
|
||||
[
|
||||
[22.878, 231.27, 804.98, 756.83],
|
||||
[48.552, 398.56, 245.35, 902.71],
|
||||
[669.47, 392.19, 809.72, 877.04],
|
||||
[221.52, 405.8, 344.98, 857.54],
|
||||
[0, 550.53, 63.01, 873.44],
|
||||
[0.0584, 254.46, 32.561, 324.87],
|
||||
]
|
||||
)
|
||||
|
||||
new_boxes = scale_boxes(
|
||||
img1_shape=(h, w), # original image dimensions
|
||||
boxes=xyxy_boxes, # boxes from original image
|
||||
img1_shape=(h, w), # original image dimensions
|
||||
boxes=xyxy_boxes, # boxes from original image
|
||||
img0_shape=(new_h, new_w), # resized image dimensions (scale to)
|
||||
ratio_pad=None,
|
||||
padding=False,
|
||||
xywh=False,
|
||||
)
|
||||
|
||||
new_boxes#(1)!
|
||||
>>> array(
|
||||
[[ 27.454, 277.52, 965.98, 908.2],
|
||||
[ 58.262, 478.27, 294.42, 1083.3],
|
||||
[ 803.36, 470.63, 971.66, 1052.4],
|
||||
[ 265.82, 486.96, 413.98, 1029],
|
||||
[ 0, 660.64, 75.612, 1048.1],
|
||||
[ 0.0701, 305.35, 39.073, 389.84]]
|
||||
)
|
||||
print(new_boxes) # (1)!
|
||||
# >>> array(
|
||||
# [[ 27.454, 277.52, 965.98, 908.2],
|
||||
# [ 58.262, 478.27, 294.42, 1083.3],
|
||||
# [ 803.36, 470.63, 971.66, 1052.4],
|
||||
# [ 265.82, 486.96, 413.98, 1029],
|
||||
# [ 0, 660.64, 75.612, 1048.1],
|
||||
# [ 0.0701, 305.35, 39.073, 389.84]]
|
||||
# )
|
||||
```
|
||||
|
||||
1. Bounding boxes scaled for the new image size
|
||||
|
|
@ -303,24 +310,26 @@ import numpy as np
|
|||
from ultralytics.utils.ops import xyxy2xywh
|
||||
|
||||
xyxy_boxes = np.array(
|
||||
[[ 22.878, 231.27, 804.98, 756.83,],
|
||||
[ 48.552, 398.56, 245.35, 902.71,],
|
||||
[ 669.47, 392.19, 809.72, 877.04,],
|
||||
[ 221.52, 405.8, 344.98, 857.54,],
|
||||
[ 0, 550.53, 63.01, 873.44,],
|
||||
[ 0.0584, 254.46, 32.561, 324.87,]]
|
||||
[
|
||||
[22.878, 231.27, 804.98, 756.83],
|
||||
[48.552, 398.56, 245.35, 902.71],
|
||||
[669.47, 392.19, 809.72, 877.04],
|
||||
[221.52, 405.8, 344.98, 857.54],
|
||||
[0, 550.53, 63.01, 873.44],
|
||||
[0.0584, 254.46, 32.561, 324.87],
|
||||
]
|
||||
)
|
||||
xywh = xyxy2xywh(xyxy_boxes)
|
||||
|
||||
xywh
|
||||
>>> array(
|
||||
[[ 413.93, 494.05, 782.1, 525.56],
|
||||
[ 146.95, 650.63, 196.8, 504.15],
|
||||
[ 739.6, 634.62, 140.25, 484.85],
|
||||
[ 283.25, 631.67, 123.46, 451.74],
|
||||
[ 31.505, 711.99, 63.01, 322.91],
|
||||
[ 16.31, 289.67, 32.503, 70.41]]
|
||||
)
|
||||
print(xywh)
|
||||
# >>> array(
|
||||
# [[ 413.93, 494.05, 782.1, 525.56],
|
||||
# [ 146.95, 650.63, 196.8, 504.15],
|
||||
# [ 739.6, 634.62, 140.25, 484.85],
|
||||
# [ 283.25, 631.67, 123.46, 451.74],
|
||||
# [ 31.505, 711.99, 63.01, 322.91],
|
||||
# [ 16.31, 289.67, 32.503, 70.41]]
|
||||
# )
|
||||
```
|
||||
|
||||
### All Bounding Box Conversions
|
||||
|
|
@ -352,9 +361,9 @@ import cv2 as cv
|
|||
import numpy as np
|
||||
from ultralytics.utils.plotting import Annotator, colors
|
||||
|
||||
names { #(1)!
|
||||
0: "person",
|
||||
5: "bus",
|
||||
names = { # (1)!
|
||||
0: "person",
|
||||
5: "bus",
|
||||
11: "stop sign",
|
||||
}
|
||||
|
||||
|
|
@ -362,18 +371,20 @@ image = cv.imread("ultralytics/assets/bus.jpg")
|
|||
ann = Annotator(
|
||||
image,
|
||||
line_width=None, # default auto-size
|
||||
font_size=None, # default auto-size
|
||||
font="Arial.ttf", # must be ImageFont compatible
|
||||
pil=False, # use PIL, otherwise uses OpenCV
|
||||
font_size=None, # default auto-size
|
||||
font="Arial.ttf", # must be ImageFont compatible
|
||||
pil=False, # use PIL, otherwise uses OpenCV
|
||||
)
|
||||
|
||||
xyxy_boxes = np.array(
|
||||
[[ 5, 22.878, 231.27, 804.98, 756.83,], # class-idx x1 y1 x2 y2
|
||||
[ 0, 48.552, 398.56, 245.35, 902.71,],
|
||||
[ 0, 669.47, 392.19, 809.72, 877.04,],
|
||||
[ 0, 221.52, 405.8, 344.98, 857.54,],
|
||||
[ 0, 0, 550.53, 63.01, 873.44,],
|
||||
[11, 0.0584, 254.46, 32.561, 324.87,]]
|
||||
[
|
||||
[5, 22.878, 231.27, 804.98, 756.83], # class-idx x1 y1 x2 y2
|
||||
[0, 48.552, 398.56, 245.35, 902.71],
|
||||
[0, 669.47, 392.19, 809.72, 877.04],
|
||||
[0, 221.52, 405.8, 344.98, 857.54],
|
||||
[0, 0, 550.53, 63.01, 873.44],
|
||||
[11, 0.0584, 254.46, 32.561, 324.87],
|
||||
]
|
||||
)
|
||||
|
||||
for nb, box in enumerate(xyxy_boxes):
|
||||
|
|
@ -412,7 +423,7 @@ ann = Annotator(
|
|||
for obb in obb_boxes:
|
||||
c_idx, *obb = obb
|
||||
obb = np.array(obb).reshape(-1, 4, 2).squeeze()
|
||||
label = f"{names.get(int(c_idx))}"
|
||||
label = f"{obb_names.get(int(c_idx))}"
|
||||
ann.box_label(
|
||||
obb,
|
||||
label,
|
||||
|
|
@ -434,11 +445,11 @@ Check duration for code to run/process either using `with` or as a decorator.
|
|||
```python
|
||||
from ultralytics.utils.ops import Profile
|
||||
|
||||
with Profile(device=device) as dt:
|
||||
with Profile(device="cuda:0") as dt:
|
||||
pass # operation to measure
|
||||
|
||||
print(dt)
|
||||
>>> "Elapsed time is 9.5367431640625e-07 s"
|
||||
# >>> "Elapsed time is 9.5367431640625e-07 s"
|
||||
```
|
||||
|
||||
### Ultralytics Supported Formats
|
||||
|
|
@ -446,11 +457,10 @@ print(dt)
|
|||
Want or need to use the formats of [images or videos types supported](../modes/predict.md#image-and-video-formats) by Ultralytics programmatically? Use these constants if you need.
|
||||
|
||||
```python
|
||||
from ultralytics.data.utils import IMG_FORMATS
|
||||
from ultralytics.data.utils import VID_FORMATS
|
||||
from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
|
||||
|
||||
print(IMG_FORMATS)
|
||||
>>> ('bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm')
|
||||
# >>> ('bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm')
|
||||
```
|
||||
|
||||
### Make Divisible
|
||||
|
|
@ -461,7 +471,7 @@ Calculates the nearest whole number to `x` to make evenly divisible when divided
|
|||
from ultralytics.utils.ops import make_divisible
|
||||
|
||||
make_divisible(7, 3)
|
||||
>>> 9
|
||||
# >>> 9
|
||||
make_divisible(7, 2)
|
||||
>>> 8
|
||||
# >>> 8
|
||||
```
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue