Ruff format docstring Python code (#15792)

Signed-off-by: UltralyticsAssistant <web@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Glenn Jocher 2024-08-25 01:08:07 +08:00 committed by GitHub
parent c1882a4327
commit d27664216b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
63 changed files with 370 additions and 374 deletions

View file

@ -198,15 +198,15 @@ def cfg2dict(cfg):
Examples:
Convert a YAML file path to a dictionary:
>>> config_dict = cfg2dict('config.yaml')
>>> config_dict = cfg2dict("config.yaml")
Convert a SimpleNamespace to a dictionary:
>>> from types import SimpleNamespace
>>> config_sn = SimpleNamespace(param1='value1', param2='value2')
>>> config_sn = SimpleNamespace(param1="value1", param2="value2")
>>> config_dict = cfg2dict(config_sn)
Pass through an already existing dictionary:
>>> config_dict = cfg2dict({'param1': 'value1', 'param2': 'value2'})
>>> config_dict = cfg2dict({"param1": "value1", "param2": "value2"})
Notes:
- If cfg is a path or string, it's loaded as YAML and converted to a dictionary.
@ -235,7 +235,7 @@ def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, ove
Examples:
>>> from ultralytics.cfg import get_cfg
>>> config = get_cfg() # Load default configuration
>>> config = get_cfg('path/to/config.yaml', overrides={'epochs': 50, 'batch_size': 16})
>>> config = get_cfg("path/to/config.yaml", overrides={"epochs": 50, "batch_size": 16})
Notes:
- If both `cfg` and `overrides` are provided, the values in `overrides` will take precedence.
@ -282,10 +282,10 @@ def check_cfg(cfg, hard=True):
Examples:
>>> config = {
... 'epochs': 50, # valid integer
... 'lr0': 0.01, # valid float
... 'momentum': 1.2, # invalid float (out of 0.0-1.0 range)
... 'save': 'true', # invalid bool
... "epochs": 50, # valid integer
... "lr0": 0.01, # valid float
... "momentum": 1.2, # invalid float (out of 0.0-1.0 range)
... "save": "true", # invalid bool
... }
>>> check_cfg(config, hard=False)
>>> print(config)
@ -345,7 +345,7 @@ def get_save_dir(args, name=None):
Examples:
>>> from types import SimpleNamespace
>>> args = SimpleNamespace(project='my_project', task='detect', mode='train', exist_ok=True)
>>> args = SimpleNamespace(project="my_project", task="detect", mode="train", exist_ok=True)
>>> save_dir = get_save_dir(args)
>>> print(save_dir)
my_project/detect/train
@ -413,8 +413,8 @@ def check_dict_alignment(base: Dict, custom: Dict, e=None):
SystemExit: If mismatched keys are found between the custom and base dictionaries.
Examples:
>>> base_cfg = {'epochs': 50, 'lr0': 0.01, 'batch_size': 16}
>>> custom_cfg = {'epoch': 100, 'lr': 0.02, 'batch_size': 32}
>>> base_cfg = {"epochs": 50, "lr0": 0.01, "batch_size": 16}
>>> custom_cfg = {"epoch": 100, "lr": 0.02, "batch_size": 32}
>>> try:
... check_dict_alignment(base_cfg, custom_cfg)
... except SystemExit:

View file

@ -21,7 +21,7 @@ def auto_annotate(data, det_model="yolov8x.pt", sam_model="sam_b.pt", device="",
Examples:
>>> from ultralytics.data.annotator import auto_annotate
>>> auto_annotate(data='ultralytics/assets', det_model='yolov8n.pt', sam_model='mobile_sam.pt')
>>> auto_annotate(data="ultralytics/assets", det_model="yolov8n.pt", sam_model="mobile_sam.pt")
Notes:
- The function creates a new directory for output if not specified.

View file

@ -38,7 +38,7 @@ class BaseTransform:
Examples:
>>> transform = BaseTransform()
>>> labels = {'image': np.array(...), 'instances': [...], 'semantic': np.array(...)}
>>> labels = {"image": np.array(...), "instances": [...], "semantic": np.array(...)}
>>> transformed_labels = transform(labels)
"""
@ -93,7 +93,7 @@ class BaseTransform:
Examples:
>>> transform = BaseTransform()
>>> labels = {'instances': Instances(xyxy=torch.rand(5, 4), cls=torch.randint(0, 80, (5,)))}
>>> labels = {"instances": Instances(xyxy=torch.rand(5, 4), cls=torch.randint(0, 80, (5,)))}
>>> transformed_labels = transform.apply_instances(labels)
"""
pass
@ -135,7 +135,7 @@ class BaseTransform:
Examples:
>>> transform = BaseTransform()
>>> labels = {'img': np.random.rand(640, 640, 3), 'instances': []}
>>> labels = {"img": np.random.rand(640, 640, 3), "instances": []}
>>> transformed_labels = transform(labels)
"""
self.apply_image(labels)
@ -338,6 +338,7 @@ class BaseMixTransform:
... def _mix_transform(self, labels):
... # Implement custom mix logic here
... return labels
...
... def get_indexes(self):
... return [random.randint(0, len(self.dataset) - 1) for _ in range(3)]
>>> dataset = YourDataset()
@ -421,7 +422,7 @@ class BaseMixTransform:
Examples:
>>> transform = BaseMixTransform(dataset)
>>> labels = {'image': img, 'bboxes': boxes, 'mix_labels': [{'image': img2, 'bboxes': boxes2}]}
>>> labels = {"image": img, "bboxes": boxes, "mix_labels": [{"image": img2, "bboxes": boxes2}]}
>>> augmented_labels = transform._mix_transform(labels)
"""
raise NotImplementedError
@ -456,20 +457,17 @@ class BaseMixTransform:
Examples:
>>> labels = {
... 'texts': [['cat'], ['dog']],
... 'cls': torch.tensor([[0], [1]]),
... 'mix_labels': [{
... 'texts': [['bird'], ['fish']],
... 'cls': torch.tensor([[0], [1]])
... }]
... "texts": [["cat"], ["dog"]],
... "cls": torch.tensor([[0], [1]]),
... "mix_labels": [{"texts": [["bird"], ["fish"]], "cls": torch.tensor([[0], [1]])}],
... }
>>> updated_labels = self._update_label_text(labels)
>>> print(updated_labels['texts'])
>>> print(updated_labels["texts"])
[['cat'], ['dog'], ['bird'], ['fish']]
>>> print(updated_labels['cls'])
>>> print(updated_labels["cls"])
tensor([[0],
[1]])
>>> print(updated_labels['mix_labels'][0]['cls'])
>>> print(updated_labels["mix_labels"][0]["cls"])
tensor([[2],
[3]])
"""
@ -616,9 +614,12 @@ class Mosaic(BaseMixTransform):
Examples:
>>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=3)
>>> labels = {'img': np.random.rand(480, 640, 3), 'mix_labels': [{'img': np.random.rand(480, 640, 3)} for _ in range(2)]}
>>> labels = {
... "img": np.random.rand(480, 640, 3),
... "mix_labels": [{"img": np.random.rand(480, 640, 3)} for _ in range(2)],
... }
>>> result = mosaic._mosaic3(labels)
>>> print(result['img'].shape)
>>> print(result["img"].shape)
(640, 640, 3)
"""
mosaic_labels = []
@ -670,9 +671,10 @@ class Mosaic(BaseMixTransform):
Examples:
>>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=4)
>>> labels = {"img": np.random.rand(480, 640, 3), "mix_labels": [
... {"img": np.random.rand(480, 640, 3)} for _ in range(3)
... ]}
>>> labels = {
... "img": np.random.rand(480, 640, 3),
... "mix_labels": [{"img": np.random.rand(480, 640, 3)} for _ in range(3)],
... }
>>> result = mosaic._mosaic4(labels)
>>> assert result["img"].shape == (1280, 1280, 3)
"""
@ -734,7 +736,7 @@ class Mosaic(BaseMixTransform):
>>> mosaic = Mosaic(dataset, imgsz=640, p=1.0, n=9)
>>> input_labels = dataset[0]
>>> mosaic_result = mosaic._mosaic9(input_labels)
>>> mosaic_image = mosaic_result['img']
>>> mosaic_image = mosaic_result["img"]
"""
mosaic_labels = []
s = self.imgsz
@ -898,7 +900,7 @@ class MixUp(BaseMixTransform):
Examples:
>>> from ultralytics.data.dataset import YOLODataset
>>> dataset = YOLODataset('path/to/data.yaml')
>>> dataset = YOLODataset("path/to/data.yaml")
>>> mixup = MixUp(dataset, pre_transform=None, p=0.5)
"""
super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
@ -974,10 +976,10 @@ class RandomPerspective:
Examples:
>>> transform = RandomPerspective(degrees=10, translate=0.1, scale=0.1, shear=10)
>>> image = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
>>> labels = {'img': image, 'cls': np.array([0, 1]), 'instances': Instances(...)}
>>> labels = {"img": image, "cls": np.array([0, 1]), "instances": Instances(...)}
>>> result = transform(labels)
>>> transformed_image = result['img']
>>> transformed_instances = result['instances']
>>> transformed_image = result["img"]
>>> transformed_instances = result["instances"]
"""
def __init__(
@ -1209,12 +1211,12 @@ class RandomPerspective:
>>> transform = RandomPerspective()
>>> image = np.random.randint(0, 255, (640, 640, 3), dtype=np.uint8)
>>> labels = {
... 'img': image,
... 'cls': np.array([0, 1, 2]),
... 'instances': Instances(bboxes=np.array([[10, 10, 50, 50], [100, 100, 150, 150]]))
... "img": image,
... "cls": np.array([0, 1, 2]),
... "instances": Instances(bboxes=np.array([[10, 10, 50, 50], [100, 100, 150, 150]])),
... }
>>> result = transform(labels)
>>> assert result['img'].shape[:2] == result['resized_shape']
>>> assert result["img"].shape[:2] == result["resized_shape"]
"""
if self.pre_transform and "mosaic_border" not in labels:
labels = self.pre_transform(labels)
@ -1358,9 +1360,9 @@ class RandomHSV:
Examples:
>>> hsv_augmenter = RandomHSV(hgain=0.5, sgain=0.5, vgain=0.5)
>>> labels = {'img': np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)}
>>> labels = {"img": np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8)}
>>> hsv_augmenter(labels)
>>> augmented_img = labels['img']
>>> augmented_img = labels["img"]
"""
img = labels["img"]
if self.hgain or self.sgain or self.vgain:
@ -1394,7 +1396,7 @@ class RandomFlip:
__call__: Applies the random flip transformation to an image and its annotations.
Examples:
>>> transform = RandomFlip(p=0.5, direction='horizontal')
>>> transform = RandomFlip(p=0.5, direction="horizontal")
>>> result = transform({"img": image, "instances": instances})
>>> flipped_image = result["img"]
>>> flipped_instances = result["instances"]
@ -1416,8 +1418,8 @@ class RandomFlip:
AssertionError: If direction is not 'horizontal' or 'vertical', or if p is not between 0 and 1.
Examples:
>>> flip = RandomFlip(p=0.5, direction='horizontal')
>>> flip = RandomFlip(p=0.7, direction='vertical', flip_idx=[1, 0, 3, 2, 5, 4])
>>> flip = RandomFlip(p=0.5, direction="horizontal")
>>> flip = RandomFlip(p=0.7, direction="vertical", flip_idx=[1, 0, 3, 2, 5, 4])
"""
assert direction in {"horizontal", "vertical"}, f"Support direction `horizontal` or `vertical`, got {direction}"
assert 0 <= p <= 1.0, f"The probability should be in range [0, 1], but got {p}."
@ -1446,8 +1448,8 @@ class RandomFlip:
'instances' (ultralytics.utils.instance.Instances): Updated instances matching the flipped image.
Examples:
>>> labels = {'img': np.random.rand(640, 640, 3), 'instances': Instances(...)}
>>> random_flip = RandomFlip(p=0.5, direction='horizontal')
>>> labels = {"img": np.random.rand(640, 640, 3), "instances": Instances(...)}
>>> random_flip = RandomFlip(p=0.5, direction="horizontal")
>>> flipped_labels = random_flip(labels)
"""
img = labels["img"]
@ -1493,8 +1495,8 @@ class LetterBox:
Examples:
>>> transform = LetterBox(new_shape=(640, 640))
>>> result = transform(labels)
>>> resized_img = result['img']
>>> updated_instances = result['instances']
>>> resized_img = result["img"]
>>> updated_instances = result["instances"]
"""
def __init__(self, new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, center=True, stride=32):
@ -1548,9 +1550,9 @@ class LetterBox:
Examples:
>>> letterbox = LetterBox(new_shape=(640, 640))
>>> result = letterbox(labels={'img': np.zeros((480, 640, 3)), 'instances': Instances(...)})
>>> resized_img = result['img']
>>> updated_instances = result['instances']
>>> result = letterbox(labels={"img": np.zeros((480, 640, 3)), "instances": Instances(...)})
>>> resized_img = result["img"]
>>> updated_instances = result["instances"]
"""
if labels is None:
labels = {}
@ -1616,7 +1618,7 @@ class LetterBox:
Examples:
>>> letterbox = LetterBox(new_shape=(640, 640))
>>> labels = {'instances': Instances(...)}
>>> labels = {"instances": Instances(...)}
>>> ratio = (0.5, 0.5)
>>> padw, padh = 10, 20
>>> updated_labels = letterbox._update_labels(labels, ratio, padw, padh)
@ -1643,7 +1645,7 @@ class CopyPaste:
Examples:
>>> copypaste = CopyPaste(p=0.5)
>>> augmented_labels = copypaste(labels)
>>> augmented_image = augmented_labels['img']
>>> augmented_image = augmented_labels["img"]
"""
def __init__(self, p=0.5) -> None:
@ -1680,7 +1682,7 @@ class CopyPaste:
(Dict): Dictionary with augmented image and updated instances under 'img', 'cls', and 'instances' keys.
Examples:
>>> labels = {'img': np.random.rand(640, 640, 3), 'cls': np.array([0, 1, 2]), 'instances': Instances(...)}
>>> labels = {"img": np.random.rand(640, 640, 3), "cls": np.array([0, 1, 2]), "instances": Instances(...)}
>>> augmenter = CopyPaste(p=0.5)
>>> augmented_labels = augmenter(labels)
"""
@ -1765,8 +1767,8 @@ class Albumentations:
Examples:
>>> transform = Albumentations(p=0.5)
>>> augmented = transform(image=image, bboxes=bboxes, class_labels=classes)
>>> augmented_image = augmented['image']
>>> augmented_bboxes = augmented['bboxes']
>>> augmented_image = augmented["image"]
>>> augmented_bboxes = augmented["bboxes"]
Notes:
- Requires Albumentations version 1.0.3 or higher.
@ -1871,7 +1873,7 @@ class Albumentations:
>>> labels = {
... "img": np.random.rand(640, 640, 3),
... "cls": np.array([0, 1]),
... "instances": Instances(bboxes=np.array([[0, 0, 1, 1], [0.5, 0.5, 0.8, 0.8]]))
... "instances": Instances(bboxes=np.array([[0, 0, 1, 1], [0.5, 0.5, 0.8, 0.8]])),
... }
>>> augmented = transform(labels)
>>> assert augmented["img"].shape == (640, 640, 3)
@ -1927,11 +1929,11 @@ class Format:
_format_segments: Converts polygon points to bitmap masks.
Examples:
>>> formatter = Format(bbox_format='xywh', normalize=True, return_mask=True)
>>> formatter = Format(bbox_format="xywh", normalize=True, return_mask=True)
>>> formatted_labels = formatter(labels)
>>> img = formatted_labels['img']
>>> bboxes = formatted_labels['bboxes']
>>> masks = formatted_labels['masks']
>>> img = formatted_labels["img"]
>>> bboxes = formatted_labels["bboxes"]
>>> masks = formatted_labels["masks"]
"""
def __init__(
@ -1975,7 +1977,7 @@ class Format:
bgr (float): The probability to return BGR images.
Examples:
>>> format = Format(bbox_format='xyxy', return_mask=True, return_keypoint=False)
>>> format = Format(bbox_format="xyxy", return_mask=True, return_keypoint=False)
>>> print(format.bbox_format)
xyxy
"""
@ -2013,8 +2015,8 @@ class Format:
- 'batch_idx': Batch index tensor (if batch_idx is True).
Examples:
>>> formatter = Format(bbox_format='xywh', normalize=True, return_mask=True)
>>> labels = {'img': np.random.rand(640, 640, 3), 'cls': np.array([0, 1]), 'instances': Instances(...)}
>>> formatter = Format(bbox_format="xywh", normalize=True, return_mask=True)
>>> labels = {"img": np.random.rand(640, 640, 3), "cls": np.array([0, 1]), "instances": Instances(...)}
>>> formatted_labels = formatter(labels)
>>> print(formatted_labels.keys())
"""
@ -2275,8 +2277,8 @@ def v8_transforms(dataset, imgsz, hyp, stretch=False):
Examples:
>>> from ultralytics.data.dataset import YOLODataset
>>> dataset = YOLODataset(img_path='path/to/images', imgsz=640)
>>> hyp = {'mosaic': 1.0, 'copy_paste': 0.5, 'degrees': 10.0, 'translate': 0.2, 'scale': 0.9}
>>> dataset = YOLODataset(img_path="path/to/images", imgsz=640)
>>> hyp = {"mosaic": 1.0, "copy_paste": 0.5, "degrees": 10.0, "translate": 0.2, "scale": 0.9}
>>> transforms = v8_transforms(dataset, imgsz=640, hyp=hyp)
>>> augmented_data = transforms(dataset[0])
"""
@ -2343,7 +2345,7 @@ def classify_transforms(
Examples:
>>> transforms = classify_transforms(size=224)
>>> img = Image.open('path/to/image.jpg')
>>> img = Image.open("path/to/image.jpg")
>>> transformed_img = transforms(img)
"""
import torchvision.transforms as T # scope for faster 'import ultralytics'
@ -2415,7 +2417,7 @@ def classify_augmentations(
(torchvision.transforms.Compose): A composition of image augmentation transforms.
Examples:
>>> transforms = classify_augmentations(size=224, auto_augment='randaugment')
>>> transforms = classify_augmentations(size=224, auto_augment="randaugment")
>>> augmented_image = transforms(original_image)
"""
# Transforms to apply if Albumentations not installed

View file

@ -298,10 +298,10 @@ class BaseDataset(Dataset):
im_file=im_file,
shape=shape, # format: (height, width)
cls=cls,
bboxes=bboxes, # xywh
bboxes=bboxes, # xywh
segments=segments, # xy
keypoints=keypoints, # xy
normalized=True, # or False
keypoints=keypoints, # xy
normalized=True, # or False
bbox_format="xyxy", # or xywh, ltwh
)
```

View file

@ -123,8 +123,8 @@ def coco80_to_coco91_class():
```python
import numpy as np
a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
a = np.loadtxt("data/coco.names", dtype="str", delimiter="\n")
b = np.loadtxt("data/coco_paper.names", dtype="str", delimiter="\n")
x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
```
@ -236,8 +236,8 @@ def convert_coco(
```python
from ultralytics.data.converter import convert_coco
convert_coco('../datasets/coco/annotations/', use_segments=True, use_keypoints=False, cls91to80=True)
convert_coco('../datasets/lvis/annotations/', use_segments=True, use_keypoints=False, cls91to80=False, lvis=True)
convert_coco("../datasets/coco/annotations/", use_segments=True, use_keypoints=False, cls91to80=True)
convert_coco("../datasets/lvis/annotations/", use_segments=True, use_keypoints=False, cls91to80=False, lvis=True)
```
Output:
@ -351,7 +351,7 @@ def convert_segment_masks_to_yolo_seg(masks_dir, output_dir, classes):
from ultralytics.data.converter import convert_segment_masks_to_yolo_seg
# The classes here is the total classes in the dataset, for COCO dataset we have 80 classes
convert_segment_masks_to_yolo_seg('path/to/masks_directory', 'path/to/output/directory', classes=80)
convert_segment_masks_to_yolo_seg("path/to/masks_directory", "path/to/output/directory", classes=80)
```
Notes:
@ -429,7 +429,7 @@ def convert_dota_to_yolo_obb(dota_root_path: str):
```python
from ultralytics.data.converter import convert_dota_to_yolo_obb
convert_dota_to_yolo_obb('path/to/DOTA')
convert_dota_to_yolo_obb("path/to/DOTA")
```
Notes:

View file

@ -163,7 +163,7 @@ class Explorer:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.query(img='https://ultralytics.com/images/zidane.jpg')
similar = exp.query(img="https://ultralytics.com/images/zidane.jpg")
```
"""
if self.table is None:
@ -271,7 +271,7 @@ class Explorer:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.get_similar(img='https://ultralytics.com/images/zidane.jpg')
similar = exp.get_similar(img="https://ultralytics.com/images/zidane.jpg")
```
"""
assert return_type in {"pandas", "arrow"}, f"Return type should be `pandas` or `arrow`, but got {return_type}"
@ -306,7 +306,7 @@ class Explorer:
```python
exp = Explorer()
exp.create_embeddings_table()
similar = exp.plot_similar(img='https://ultralytics.com/images/zidane.jpg')
similar = exp.plot_similar(img="https://ultralytics.com/images/zidane.jpg")
```
"""
similar = self.get_similar(img, idx, limit, return_type="arrow")
@ -395,8 +395,8 @@ class Explorer:
exp.create_embeddings_table()
similarity_idx_plot = exp.plot_similarity_index()
similarity_idx_plot.show() # view image preview
similarity_idx_plot.save('path/to/save/similarity_index_plot.png') # save contents to file
similarity_idx_plot.show() # view image preview
similarity_idx_plot.save("path/to/save/similarity_index_plot.png") # save contents to file
```
"""
sim_idx = self.similarity_index(max_dist=max_dist, top_k=top_k, force=force)
@ -447,7 +447,7 @@ class Explorer:
```python
exp = Explorer()
exp.create_embeddings_table()
answer = exp.ask_ai('Show images with 1 person and 2 dogs')
answer = exp.ask_ai("Show images with 1 person and 2 dogs")
```
"""
result = prompt_sql_query(query)

View file

@ -438,11 +438,11 @@ class HUBDatasetStats:
```python
from ultralytics.data.utils import HUBDatasetStats
stats = HUBDatasetStats('path/to/coco8.zip', task='detect') # detect dataset
stats = HUBDatasetStats('path/to/coco8-seg.zip', task='segment') # segment dataset
stats = HUBDatasetStats('path/to/coco8-pose.zip', task='pose') # pose dataset
stats = HUBDatasetStats('path/to/dota8.zip', task='obb') # OBB dataset
stats = HUBDatasetStats('path/to/imagenet10.zip', task='classify') # classification dataset
stats = HUBDatasetStats("path/to/coco8.zip", task="detect") # detect dataset
stats = HUBDatasetStats("path/to/coco8-seg.zip", task="segment") # segment dataset
stats = HUBDatasetStats("path/to/coco8-pose.zip", task="pose") # pose dataset
stats = HUBDatasetStats("path/to/dota8.zip", task="obb") # OBB dataset
stats = HUBDatasetStats("path/to/imagenet10.zip", task="classify") # classification dataset
stats.get_json(save=True)
stats.process_images()
@ -598,7 +598,7 @@ def compress_one_image(f, f_new=None, max_dim=1920, quality=50):
from pathlib import Path
from ultralytics.data.utils import compress_one_image
for f in Path('path/to/dataset').rglob('*.jpg'):
for f in Path("path/to/dataset").rglob("*.jpg"):
compress_one_image(f)
```
"""

View file

@ -72,11 +72,11 @@ class Model(nn.Module):
Examples:
>>> from ultralytics import YOLO
>>> model = YOLO('yolov8n.pt')
>>> results = model.predict('image.jpg')
>>> model.train(data='coco128.yaml', epochs=3)
>>> model = YOLO("yolov8n.pt")
>>> results = model.predict("image.jpg")
>>> model.train(data="coco128.yaml", epochs=3)
>>> metrics = model.val()
>>> model.export(format='onnx')
>>> model.export(format="onnx")
"""
def __init__(
@ -166,8 +166,8 @@ class Model(nn.Module):
Results object.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> results = model('https://ultralytics.com/images/bus.jpg')
>>> model = YOLO("yolov8n.pt")
>>> results = model("https://ultralytics.com/images/bus.jpg")
>>> for r in results:
... print(f"Detected {len(r)} objects in image")
"""
@ -188,9 +188,9 @@ class Model(nn.Module):
(bool): True if the model string is a valid Triton Server URL, False otherwise.
Examples:
>>> Model.is_triton_model('http://localhost:8000/v2/models/yolov8n')
>>> Model.is_triton_model("http://localhost:8000/v2/models/yolov8n")
True
>>> Model.is_triton_model('yolov8n.pt')
>>> Model.is_triton_model("yolov8n.pt")
False
"""
from urllib.parse import urlsplit
@ -253,7 +253,7 @@ class Model(nn.Module):
Examples:
>>> model = Model()
>>> model._new('yolov8n.yaml', task='detect', verbose=True)
>>> model._new("yolov8n.yaml", task="detect", verbose=True)
"""
cfg_dict = yaml_model_load(cfg)
self.cfg = cfg
@ -284,8 +284,8 @@ class Model(nn.Module):
Examples:
>>> model = Model()
>>> model._load('yolov8n.pt')
>>> model._load('path/to/weights.pth', task='detect')
>>> model._load("yolov8n.pt")
>>> model._load("path/to/weights.pth", task="detect")
"""
if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")):
weights = checks.check_file(weights, download_dir=SETTINGS["weights_dir"]) # download and return local file
@ -348,7 +348,7 @@ class Model(nn.Module):
AssertionError: If the model is not a PyTorch model.
Examples:
>>> model = Model('yolov8n.pt')
>>> model = Model("yolov8n.pt")
>>> model.reset_weights()
"""
self._check_is_pytorch_model()
@ -377,8 +377,8 @@ class Model(nn.Module):
Examples:
>>> model = Model()
>>> model.load('yolov8n.pt')
>>> model.load(Path('path/to/weights.pt'))
>>> model.load("yolov8n.pt")
>>> model.load(Path("path/to/weights.pt"))
"""
self._check_is_pytorch_model()
if isinstance(weights, (str, Path)):
@ -402,8 +402,8 @@ class Model(nn.Module):
AssertionError: If the model is not a PyTorch model.
Examples:
>>> model = Model('yolov8n.pt')
>>> model.save('my_model.pt')
>>> model = Model("yolov8n.pt")
>>> model.save("my_model.pt")
"""
self._check_is_pytorch_model()
from copy import deepcopy
@ -439,7 +439,7 @@ class Model(nn.Module):
TypeError: If the model is not a PyTorch model.
Examples:
>>> model = Model('yolov8n.pt')
>>> model = Model("yolov8n.pt")
>>> model.info() # Prints model summary
>>> info_list = model.info(detailed=True, verbose=False) # Returns detailed info as a list
"""
@ -494,8 +494,8 @@ class Model(nn.Module):
AssertionError: If the model is not a PyTorch model.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> image = 'https://ultralytics.com/images/bus.jpg'
>>> model = YOLO("yolov8n.pt")
>>> image = "https://ultralytics.com/images/bus.jpg"
>>> embeddings = model.embed(image)
>>> print(embeddings[0].shape)
"""
@ -531,8 +531,8 @@ class Model(nn.Module):
Results object.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> results = model.predict(source='path/to/image.jpg', conf=0.25)
>>> model = YOLO("yolov8n.pt")
>>> results = model.predict(source="path/to/image.jpg", conf=0.25)
>>> for r in results:
... print(r.boxes.data) # print detection bounding boxes
@ -592,8 +592,8 @@ class Model(nn.Module):
AttributeError: If the predictor does not have registered trackers.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> results = model.track(source='path/to/video.mp4', show=True)
>>> model = YOLO("yolov8n.pt")
>>> results = model.track(source="path/to/video.mp4", show=True)
>>> for r in results:
... print(r.boxes.id) # print tracking IDs
@ -635,8 +635,8 @@ class Model(nn.Module):
AssertionError: If the model is not a PyTorch model.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> results = model.val(data='coco128.yaml', imgsz=640)
>>> model = YOLO("yolov8n.pt")
>>> results = model.val(data="coco128.yaml", imgsz=640)
>>> print(results.box.map) # Print mAP50-95
"""
custom = {"rect": True} # method defaults
@ -677,8 +677,8 @@ class Model(nn.Module):
AssertionError: If the model is not a PyTorch model.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> results = model.benchmark(data='coco8.yaml', imgsz=640, half=True)
>>> model = YOLO("yolov8n.pt")
>>> results = model.benchmark(data="coco8.yaml", imgsz=640, half=True)
>>> print(results)
"""
self._check_is_pytorch_model()
@ -727,8 +727,8 @@ class Model(nn.Module):
RuntimeError: If the export process fails due to errors.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> model.export(format='onnx', dynamic=True, simplify=True)
>>> model = YOLO("yolov8n.pt")
>>> model.export(format="onnx", dynamic=True, simplify=True)
'path/to/exported/model.onnx'
"""
self._check_is_pytorch_model()
@ -782,8 +782,8 @@ class Model(nn.Module):
ModuleNotFoundError: If the HUB SDK is not installed.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> results = model.train(data='coco128.yaml', epochs=3)
>>> model = YOLO("yolov8n.pt")
>>> results = model.train(data="coco128.yaml", epochs=3)
"""
self._check_is_pytorch_model()
if hasattr(self.session, "model") and self.session.model.id: # Ultralytics HUB session with loaded model
@ -847,7 +847,7 @@ class Model(nn.Module):
AssertionError: If the model is not a PyTorch model.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> model = YOLO("yolov8n.pt")
>>> results = model.tune(use_ray=True, iterations=20)
>>> print(results)
"""
@ -907,7 +907,7 @@ class Model(nn.Module):
AttributeError: If the model or predictor does not have a 'names' attribute.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> model = YOLO("yolov8n.pt")
>>> print(model.names)
{0: 'person', 1: 'bicycle', 2: 'car', ...}
"""
@ -957,7 +957,7 @@ class Model(nn.Module):
(object | None): The transform object of the model if available, otherwise None.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> model = YOLO("yolov8n.pt")
>>> transforms = model.transforms
>>> if transforms:
... print(f"Model transforms: {transforms}")
@ -986,9 +986,9 @@ class Model(nn.Module):
Examples:
>>> def on_train_start(trainer):
... print("Training is starting!")
>>> model = YOLO('yolov8n.pt')
>>> model = YOLO("yolov8n.pt")
>>> model.add_callback("on_train_start", on_train_start)
>>> model.train(data='coco128.yaml', epochs=1)
>>> model.train(data="coco128.yaml", epochs=1)
"""
self.callbacks[event].append(func)
@ -1005,9 +1005,9 @@ class Model(nn.Module):
recognized by the Ultralytics callback system.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> model.add_callback('on_train_start', lambda: print('Training started'))
>>> model.clear_callback('on_train_start')
>>> model = YOLO("yolov8n.pt")
>>> model.add_callback("on_train_start", lambda: print("Training started"))
>>> model.clear_callback("on_train_start")
>>> # All callbacks for 'on_train_start' are now removed
Notes:
@ -1035,8 +1035,8 @@ class Model(nn.Module):
modifications, ensuring consistent behavior across different runs or experiments.
Examples:
>>> model = YOLO('yolov8n.pt')
>>> model.add_callback('on_train_start', custom_function)
>>> model = YOLO("yolov8n.pt")
>>> model.add_callback("on_train_start", custom_function)
>>> model.reset_callbacks()
# All callbacks are now reset to their default functions
"""
@ -1059,7 +1059,7 @@ class Model(nn.Module):
(dict): A new dictionary containing only the specified include keys from the input arguments.
Examples:
>>> original_args = {'imgsz': 640, 'data': 'coco.yaml', 'task': 'detect', 'batch': 16, 'epochs': 100}
>>> original_args = {"imgsz": 640, "data": "coco.yaml", "task": "detect", "batch": 16, "epochs": 100}
>>> reset_args = Model._reset_ckpt_args(original_args)
>>> print(reset_args)
{'imgsz': 640, 'data': 'coco.yaml', 'task': 'detect'}
@ -1090,9 +1090,9 @@ class Model(nn.Module):
NotImplementedError: If the specified key is not supported for the current task.
Examples:
>>> model = Model(task='detect')
>>> predictor = model._smart_load('predictor')
>>> trainer = model._smart_load('trainer')
>>> model = Model(task="detect")
>>> predictor = model._smart_load("predictor")
>>> trainer = model._smart_load("trainer")
Notes:
- This method is typically used internally by other methods of the Model class.
@ -1128,8 +1128,8 @@ class Model(nn.Module):
Examples:
>>> model = Model()
>>> task_map = model.task_map
>>> detect_class_map = task_map['detect']
>>> segment_class_map = task_map['segment']
>>> detect_class_map = task_map["detect"]
>>> segment_class_map = task_map["segment"]
Note:
The actual implementation of this method may vary depending on the specific tasks and

View file

@ -143,7 +143,7 @@ class BaseTensor(SimpleClass):
Examples:
>>> base_tensor = BaseTensor(torch.randn(3, 4), orig_shape=(480, 640))
>>> cuda_tensor = base_tensor.to('cuda')
>>> cuda_tensor = base_tensor.to("cuda")
>>> float16_tensor = base_tensor.to(dtype=torch.float16)
"""
return self.__class__(torch.as_tensor(self.data).to(*args, **kwargs), self.orig_shape)
@ -223,7 +223,7 @@ class Results(SimpleClass):
>>> for result in results:
... print(result.boxes) # Print detection boxes
... result.show() # Display the annotated image
... result.save(filename='result.jpg') # Save annotated image
... result.save(filename="result.jpg") # Save annotated image
"""
def __init__(
@ -280,7 +280,7 @@ class Results(SimpleClass):
(Results): A new Results object containing the specified subset of inference results.
Examples:
>>> results = model('path/to/image.jpg') # Perform inference
>>> results = model("path/to/image.jpg") # Perform inference
>>> single_result = results[0] # Get the first result
>>> subset_results = results[1:4] # Get a slice of results
"""
@ -319,7 +319,7 @@ class Results(SimpleClass):
obb (torch.Tensor | None): A tensor of shape (N, 5) containing oriented bounding box coordinates.
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> new_boxes = torch.tensor([[100, 100, 200, 200, 0.9, 0]])
>>> results[0].update(boxes=new_boxes)
"""
@ -370,7 +370,7 @@ class Results(SimpleClass):
(Results): A new Results object with all tensor attributes on CPU memory.
Examples:
>>> results = model('path/to/image.jpg') # Perform inference
>>> results = model("path/to/image.jpg") # Perform inference
>>> cpu_result = results[0].cpu() # Move the first result to CPU
>>> print(cpu_result.boxes.device) # Output: cpu
"""
@ -384,7 +384,7 @@ class Results(SimpleClass):
(Results): A new Results object with all tensors converted to numpy arrays.
Examples:
>>> results = model('path/to/image.jpg')
>>> results = model("path/to/image.jpg")
>>> numpy_result = results[0].numpy()
>>> type(numpy_result.boxes.data)
<class 'numpy.ndarray'>
@ -488,7 +488,7 @@ class Results(SimpleClass):
(np.ndarray): Annotated image as a numpy array.
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> for result in results:
... im = result.plot()
... im.show()
@ -578,7 +578,7 @@ class Results(SimpleClass):
**kwargs (Any): Arbitrary keyword arguments to be passed to the `plot()` method.
Examples:
>>> results = model('path/to/image.jpg')
>>> results = model("path/to/image.jpg")
>>> results[0].show() # Display the first result
>>> for result in results:
... result.show() # Display all results
@ -599,12 +599,12 @@ class Results(SimpleClass):
**kwargs (Any): Arbitrary keyword arguments to be passed to the `plot` method.
Examples:
>>> results = model('path/to/image.jpg')
>>> results = model("path/to/image.jpg")
>>> for result in results:
... result.save('annotated_image.jpg')
... result.save("annotated_image.jpg")
>>> # Or with custom plot arguments
>>> for result in results:
... result.save('annotated_image.jpg', conf=False, line_width=2)
... result.save("annotated_image.jpg", conf=False, line_width=2)
"""
if not filename:
filename = f"results_{Path(self.path).name}"
@ -623,7 +623,7 @@ class Results(SimpleClass):
number of detections per class. For classification tasks, it includes the top 5 class probabilities.
Examples:
>>> results = model('path/to/image.jpg')
>>> results = model("path/to/image.jpg")
>>> for result in results:
... print(result.verbose())
2 persons, 1 car, 3 traffic lights,
@ -660,7 +660,7 @@ class Results(SimpleClass):
Examples:
>>> from ultralytics import YOLO
>>> model = YOLO('yolov8n.pt')
>>> model = YOLO("yolov8n.pt")
>>> results = model("path/to/image.jpg")
>>> for result in results:
... result.save_txt("output.txt")
@ -757,7 +757,7 @@ class Results(SimpleClass):
task type (classification or detection) and available information (boxes, masks, keypoints).
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> summary = results[0].summary()
>>> print(summary)
"""
@ -919,7 +919,7 @@ class Boxes(BaseTensor):
coordinates in [x1, y1, x2, y2] format, where n is the number of boxes.
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> boxes = results[0].boxes
>>> xyxy = boxes.xyxy
>>> print(xyxy)
@ -953,7 +953,7 @@ class Boxes(BaseTensor):
The shape is (N,), where N is the number of boxes.
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> boxes = results[0].boxes
>>> class_ids = boxes.cls
>>> print(class_ids) # tensor([0., 2., 1.])
@ -970,7 +970,7 @@ class Boxes(BaseTensor):
otherwise None. Shape is (N,) where N is the number of boxes.
Examples:
>>> results = model.track('path/to/video.mp4')
>>> results = model.track("path/to/video.mp4")
>>> for result in results:
... boxes = result.boxes
... if boxes.is_track:
@ -1116,7 +1116,7 @@ class Masks(BaseTensor):
mask contour.
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> masks = results[0].masks
>>> normalized_coords = masks.xyn
>>> print(normalized_coords[0]) # Normalized coordinates of the first mask
@ -1141,7 +1141,7 @@ class Masks(BaseTensor):
number of points in the segment.
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> masks = results[0].masks
>>> xy_coords = masks.xy
>>> print(len(xy_coords)) # Number of masks
@ -1223,7 +1223,7 @@ class Keypoints(BaseTensor):
the number of detections and K is the number of keypoints per detection.
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> keypoints = results[0].keypoints
>>> xy = keypoints.xy
>>> print(xy.shape) # (N, K, 2)
@ -1388,7 +1388,7 @@ class Probs(BaseTensor):
(torch.Tensor | numpy.ndarray): A tensor containing the confidence score of the top 1 class.
Examples:
>>> results = model('image.jpg') # classify an image
>>> results = model("image.jpg") # classify an image
>>> probs = results[0].probs # get classification probabilities
>>> top1_confidence = probs.top1conf # get confidence of top 1 class
>>> print(f"Top 1 class confidence: {top1_confidence.item():.4f}")
@ -1410,7 +1410,7 @@ class Probs(BaseTensor):
top 5 predicted classes, sorted in descending order of probability.
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> probs = results[0].probs
>>> top5_conf = probs.top5conf
>>> print(top5_conf) # Prints confidence scores for top 5 classes
@ -1497,7 +1497,7 @@ class OBB(BaseTensor):
[x_center, y_center, width, height, rotation]. The shape is (N, 5) where N is the number of boxes.
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> obb = results[0].obb
>>> xywhr = obb.xywhr
>>> print(xywhr.shape)
@ -1518,7 +1518,7 @@ class OBB(BaseTensor):
for N detections, where each score is in the range [0, 1].
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> obb_result = results[0].obb
>>> confidence_scores = obb_result.conf
>>> print(confidence_scores)
@ -1535,7 +1535,7 @@ class OBB(BaseTensor):
bounding box. The shape is (N,), where N is the number of boxes.
Examples:
>>> results = model('image.jpg')
>>> results = model("image.jpg")
>>> result = results[0]
>>> obb = result.obb
>>> class_values = obb.cls
@ -1553,7 +1553,7 @@ class OBB(BaseTensor):
oriented bounding box. Returns None if tracking IDs are not available.
Examples:
>>> results = model('image.jpg', tracker=True) # Run inference with tracking
>>> results = model("image.jpg", tracker=True) # Run inference with tracking
>>> for result in results:
... if result.obb is not None:
... track_ids = result.obb.id
@ -1620,8 +1620,8 @@ class OBB(BaseTensor):
Examples:
>>> import torch
>>> from ultralytics import YOLO
>>> model = YOLO('yolov8n-obb.pt')
>>> results = model('path/to/image.jpg')
>>> model = YOLO("yolov8n-obb.pt")
>>> results = model("path/to/image.jpg")
>>> for result in results:
... obb = result.obb
... if obb is not None:

View file

@ -12,8 +12,8 @@ Example:
```python
from ultralytics import YOLO
model = YOLO('yolov8n.pt')
model.tune(data='coco8.yaml', epochs=10, iterations=300, optimizer='AdamW', plots=False, save=False, val=False)
model = YOLO("yolov8n.pt")
model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
```
"""
@ -54,15 +54,15 @@ class Tuner:
```python
from ultralytics import YOLO
model = YOLO('yolov8n.pt')
model.tune(data='coco8.yaml', epochs=10, iterations=300, optimizer='AdamW', plots=False, save=False, val=False)
model = YOLO("yolov8n.pt")
model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
```
Tune with custom search space.
```python
from ultralytics import YOLO
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
model.tune(space={key1: val1, key2: val2}) # custom search space dictionary
```
"""

View file

@ -136,11 +136,11 @@ def check_dataset(path: str, task: str) -> None:
```python
from ultralytics.hub import check_dataset
check_dataset('path/to/coco8.zip', task='detect') # detect dataset
check_dataset('path/to/coco8-seg.zip', task='segment') # segment dataset
check_dataset('path/to/coco8-pose.zip', task='pose') # pose dataset
check_dataset('path/to/dota8.zip', task='obb') # OBB dataset
check_dataset('path/to/imagenet10.zip', task='classify') # classification dataset
check_dataset("path/to/coco8.zip", task="detect") # detect dataset
check_dataset("path/to/coco8-seg.zip", task="segment") # segment dataset
check_dataset("path/to/coco8-pose.zip", task="pose") # pose dataset
check_dataset("path/to/dota8.zip", task="obb") # OBB dataset
check_dataset("path/to/imagenet10.zip", task="classify") # classification dataset
```
"""
HUBDatasetStats(path=path, task=task).get_json()

View file

@ -16,8 +16,8 @@ class FastSAM(Model):
```python
from ultralytics import FastSAM
model = FastSAM('last.pt')
results = model.predict('ultralytics/assets/bus.jpg')
model = FastSAM("last.pt")
results = model.predict("ultralytics/assets/bus.jpg")
```
"""

View file

@ -92,8 +92,8 @@ class FastSAMPredictor(SegmentationPredictor):
if labels.sum() == 0 # all negative points
else torch.zeros(len(result), dtype=torch.bool, device=self.device)
)
for p, l in zip(points, labels):
point_idx[torch.nonzero(masks[:, p[1], p[0]], as_tuple=True)[0]] = True if l else False
for point, label in zip(points, labels):
point_idx[torch.nonzero(masks[:, point[1], point[0]], as_tuple=True)[0]] = True if label else False
idx |= point_idx
if texts is not None:
if isinstance(texts, str):

View file

@ -6,8 +6,8 @@ Example:
```python
from ultralytics import NAS
model = NAS('yolo_nas_s')
results = model.predict('ultralytics/assets/bus.jpg')
model = NAS("yolo_nas_s")
results = model.predict("ultralytics/assets/bus.jpg")
```
"""
@ -34,8 +34,8 @@ class NAS(Model):
```python
from ultralytics import NAS
model = NAS('yolo_nas_s')
results = model.predict('ultralytics/assets/bus.jpg')
model = NAS("yolo_nas_s")
results = model.predict("ultralytics/assets/bus.jpg")
```
Attributes:

View file

@ -22,7 +22,7 @@ class NASPredictor(BasePredictor):
```python
from ultralytics import NAS
model = NAS('yolo_nas_s')
model = NAS("yolo_nas_s")
predictor = model.predictor
# Assumes that raw_preds, img, orig_imgs are available
results = predictor.postprocess(raw_preds, img, orig_imgs)

View file

@ -24,7 +24,7 @@ class NASValidator(DetectionValidator):
```python
from ultralytics import NAS
model = NAS('yolo_nas_s')
model = NAS("yolo_nas_s")
validator = model.validator
# Assumes that raw_preds are available
final_preds = validator.postprocess(raw_preds)

View file

@ -21,7 +21,7 @@ class RTDETRPredictor(BasePredictor):
from ultralytics.utils import ASSETS
from ultralytics.models.rtdetr import RTDETRPredictor
args = dict(model='rtdetr-l.pt', source=ASSETS)
args = dict(model="rtdetr-l.pt", source=ASSETS)
predictor = RTDETRPredictor(overrides=args)
predictor.predict_cli()
```

View file

@ -25,7 +25,7 @@ class RTDETRTrainer(DetectionTrainer):
```python
from ultralytics.models.rtdetr.train import RTDETRTrainer
args = dict(model='rtdetr-l.yaml', data='coco8.yaml', imgsz=640, epochs=3)
args = dict(model="rtdetr-l.yaml", data="coco8.yaml", imgsz=640, epochs=3)
trainer = RTDETRTrainer(overrides=args)
trainer.train()
```

View file

@ -62,7 +62,7 @@ class RTDETRValidator(DetectionValidator):
```python
from ultralytics.models.rtdetr import RTDETRValidator
args = dict(model='rtdetr-l.pt', data='coco8.yaml')
args = dict(model="rtdetr-l.pt", data="coco8.yaml")
validator = RTDETRValidator(args=args)
validator()
```

View file

@ -41,8 +41,8 @@ class SAM(Model):
info: Logs information about the SAM model.
Examples:
>>> sam = SAM('sam_b.pt')
>>> results = sam.predict('image.jpg', points=[[500, 375]])
>>> sam = SAM("sam_b.pt")
>>> results = sam.predict("image.jpg", points=[[500, 375]])
>>> for r in results:
>>> print(f"Detected {len(r.masks)} masks")
"""
@ -58,7 +58,7 @@ class SAM(Model):
NotImplementedError: If the model file extension is not .pt or .pth.
Examples:
>>> sam = SAM('sam_b.pt')
>>> sam = SAM("sam_b.pt")
>>> print(sam.is_sam2)
"""
if model and Path(model).suffix not in {".pt", ".pth"}:
@ -78,8 +78,8 @@ class SAM(Model):
task (str | None): Task name. If provided, it specifies the particular task the model is being loaded for.
Examples:
>>> sam = SAM('sam_b.pt')
>>> sam._load('path/to/custom_weights.pt')
>>> sam = SAM("sam_b.pt")
>>> sam._load("path/to/custom_weights.pt")
"""
self.model = build_sam(weights)
@ -100,8 +100,8 @@ class SAM(Model):
(List): The model predictions.
Examples:
>>> sam = SAM('sam_b.pt')
>>> results = sam.predict('image.jpg', points=[[500, 375]])
>>> sam = SAM("sam_b.pt")
>>> results = sam.predict("image.jpg", points=[[500, 375]])
>>> for r in results:
... print(f"Detected {len(r.masks)} masks")
"""
@ -130,8 +130,8 @@ class SAM(Model):
(List): The model predictions, typically containing segmentation masks and other relevant information.
Examples:
>>> sam = SAM('sam_b.pt')
>>> results = sam('image.jpg', points=[[500, 375]])
>>> sam = SAM("sam_b.pt")
>>> results = sam("image.jpg", points=[[500, 375]])
>>> print(f"Detected {len(results[0].masks)} masks")
"""
return self.predict(source, stream, bboxes, points, labels, **kwargs)
@ -151,7 +151,7 @@ class SAM(Model):
(Tuple): A tuple containing the model's information (string representations of the model).
Examples:
>>> sam = SAM('sam_b.pt')
>>> sam = SAM("sam_b.pt")
>>> info = sam.info()
>>> print(info[0]) # Print summary information
"""
@ -167,7 +167,7 @@ class SAM(Model):
class. For SAM2 models, it maps to SAM2Predictor, otherwise to the standard Predictor.
Examples:
>>> sam = SAM('sam_b.pt')
>>> sam = SAM("sam_b.pt")
>>> task_map = sam.task_map
>>> print(task_map)
{'segment': <class 'ultralytics.models.sam.predict.Predictor'>}

View file

@ -32,8 +32,9 @@ class MaskDecoder(nn.Module):
Examples:
>>> decoder = MaskDecoder(transformer_dim=256, transformer=transformer_module)
>>> masks, iou_pred = decoder(image_embeddings, image_pe, sparse_prompt_embeddings,
... dense_prompt_embeddings, multimask_output=True)
>>> masks, iou_pred = decoder(
... image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, multimask_output=True
... )
>>> print(f"Predicted masks shape: {masks.shape}, IoU predictions shape: {iou_pred.shape}")
"""
@ -213,7 +214,8 @@ class SAM2MaskDecoder(nn.Module):
>>> dense_prompt_embeddings = torch.rand(1, 256, 64, 64)
>>> decoder = SAM2MaskDecoder(256, transformer)
>>> masks, iou_pred, sam_tokens_out, obj_score_logits = decoder.forward(
... image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, True, False)
... image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, True, False
... )
"""
def __init__(
@ -345,7 +347,8 @@ class SAM2MaskDecoder(nn.Module):
>>> dense_prompt_embeddings = torch.rand(1, 256, 64, 64)
>>> decoder = SAM2MaskDecoder(256, transformer)
>>> masks, iou_pred, sam_tokens_out, obj_score_logits = decoder.forward(
... image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, True, False)
... image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, True, False
... )
"""
masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks(
image_embeddings=image_embeddings,

View file

@ -417,7 +417,15 @@ class SAM2Model(torch.nn.Module):
>>> point_inputs = {"point_coords": torch.rand(1, 2, 2), "point_labels": torch.tensor([[1, 0]])}
>>> mask_inputs = torch.rand(1, 1, 512, 512)
>>> results = model._forward_sam_heads(backbone_features, point_inputs, mask_inputs)
>>> low_res_multimasks, high_res_multimasks, ious, low_res_masks, high_res_masks, obj_ptr, object_score_logits = results
>>> (
... low_res_multimasks,
... high_res_multimasks,
... ious,
... low_res_masks,
... high_res_masks,
... obj_ptr,
... object_score_logits,
... ) = results
"""
B = backbone_features.size(0)
device = backbone_features.device

View file

@ -716,7 +716,7 @@ class BasicLayer(nn.Module):
Examples:
>>> layer = BasicLayer(dim=96, input_resolution=(56, 56), depth=2, num_heads=3, window_size=7)
>>> x = torch.randn(1, 56*56, 96)
>>> x = torch.randn(1, 56 * 56, 96)
>>> output = layer(x)
>>> print(output.shape)
"""

View file

@ -22,7 +22,7 @@ def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num
Examples:
>>> frame_idx = 5
>>> cond_frame_outputs = {1: 'a', 3: 'b', 7: 'c', 9: 'd'}
>>> cond_frame_outputs = {1: "a", 3: "b", 7: "c", 9: "d"}
>>> max_cond_frame_num = 2
>>> selected, unselected = select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num)
>>> print(selected)

View file

@ -69,8 +69,8 @@ class Predictor(BasePredictor):
Examples:
>>> predictor = Predictor()
>>> predictor.setup_model(model_path='sam_model.pt')
>>> predictor.set_image('image.jpg')
>>> predictor.setup_model(model_path="sam_model.pt")
>>> predictor.set_image("image.jpg")
>>> masks, scores, boxes = predictor.generate()
>>> results = predictor.postprocess((masks, scores, boxes), im, orig_img)
"""
@ -90,8 +90,8 @@ class Predictor(BasePredictor):
Examples:
>>> predictor = Predictor(cfg=DEFAULT_CFG)
>>> predictor = Predictor(overrides={'imgsz': 640})
>>> predictor = Predictor(_callbacks={'on_predict_start': custom_callback})
>>> predictor = Predictor(overrides={"imgsz": 640})
>>> predictor = Predictor(_callbacks={"on_predict_start": custom_callback})
"""
if overrides is None:
overrides = {}
@ -188,8 +188,8 @@ class Predictor(BasePredictor):
Examples:
>>> predictor = Predictor()
>>> predictor.setup_model(model_path='sam_model.pt')
>>> predictor.set_image('image.jpg')
>>> predictor.setup_model(model_path="sam_model.pt")
>>> predictor.set_image("image.jpg")
>>> masks, scores, logits = predictor.inference(im, bboxes=[[0, 0, 100, 100]])
"""
# Override prompts if any stored in self.prompts
@ -475,8 +475,8 @@ class Predictor(BasePredictor):
Examples:
>>> predictor = Predictor()
>>> predictor.setup_source('path/to/images')
>>> predictor.setup_source('video.mp4')
>>> predictor.setup_source("path/to/images")
>>> predictor.setup_source("video.mp4")
>>> predictor.setup_source(None) # Uses default source if available
Notes:
@ -504,8 +504,8 @@ class Predictor(BasePredictor):
Examples:
>>> predictor = Predictor()
>>> predictor.set_image('path/to/image.jpg')
>>> predictor.set_image(cv2.imread('path/to/image.jpg'))
>>> predictor.set_image("path/to/image.jpg")
>>> predictor.set_image(cv2.imread("path/to/image.jpg"))
Notes:
- This method should be called before performing inference on a new image.

View file

@ -21,7 +21,7 @@ class ClassificationPredictor(BasePredictor):
from ultralytics.utils import ASSETS
from ultralytics.models.yolo.classify import ClassificationPredictor
args = dict(model='yolov8n-cls.pt', source=ASSETS)
args = dict(model="yolov8n-cls.pt", source=ASSETS)
predictor = ClassificationPredictor(overrides=args)
predictor.predict_cli()
```

View file

@ -22,7 +22,7 @@ class ClassificationTrainer(BaseTrainer):
```python
from ultralytics.models.yolo.classify import ClassificationTrainer
args = dict(model='yolov8n-cls.pt', data='imagenet10', epochs=3)
args = dict(model="yolov8n-cls.pt", data="imagenet10", epochs=3)
trainer = ClassificationTrainer(overrides=args)
trainer.train()
```

View file

@ -20,7 +20,7 @@ class ClassificationValidator(BaseValidator):
```python
from ultralytics.models.yolo.classify import ClassificationValidator
args = dict(model='yolov8n-cls.pt', data='imagenet10')
args = dict(model="yolov8n-cls.pt", data="imagenet10")
validator = ClassificationValidator(args=args)
validator()
```

View file

@ -14,7 +14,7 @@ class DetectionPredictor(BasePredictor):
from ultralytics.utils import ASSETS
from ultralytics.models.yolo.detect import DetectionPredictor
args = dict(model='yolov8n.pt', source=ASSETS)
args = dict(model="yolov8n.pt", source=ASSETS)
predictor = DetectionPredictor(overrides=args)
predictor.predict_cli()
```

View file

@ -24,7 +24,7 @@ class DetectionTrainer(BaseTrainer):
```python
from ultralytics.models.yolo.detect import DetectionTrainer
args = dict(model='yolov8n.pt', data='coco8.yaml', epochs=3)
args = dict(model="yolov8n.pt", data="coco8.yaml", epochs=3)
trainer = DetectionTrainer(overrides=args)
trainer.train()
```

View file

@ -22,7 +22,7 @@ class DetectionValidator(BaseValidator):
```python
from ultralytics.models.yolo.detect import DetectionValidator
args = dict(model='yolov8n.pt', data='coco8.yaml')
args = dict(model="yolov8n.pt", data="coco8.yaml")
validator = DetectionValidator(args=args)
validator()
```

View file

@ -16,7 +16,7 @@ class OBBPredictor(DetectionPredictor):
from ultralytics.utils import ASSETS
from ultralytics.models.yolo.obb import OBBPredictor
args = dict(model='yolov8n-obb.pt', source=ASSETS)
args = dict(model="yolov8n-obb.pt", source=ASSETS)
predictor = OBBPredictor(overrides=args)
predictor.predict_cli()
```

View file

@ -15,7 +15,7 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
```python
from ultralytics.models.yolo.obb import OBBTrainer
args = dict(model='yolov8n-obb.pt', data='dota8.yaml', epochs=3)
args = dict(model="yolov8n-obb.pt", data="dota8.yaml", epochs=3)
trainer = OBBTrainer(overrides=args)
trainer.train()
```

View file

@ -18,9 +18,9 @@ class OBBValidator(DetectionValidator):
```python
from ultralytics.models.yolo.obb import OBBValidator
args = dict(model='yolov8n-obb.pt', data='dota8.yaml')
args = dict(model="yolov8n-obb.pt", data="dota8.yaml")
validator = OBBValidator(args=args)
validator(model=args['model'])
validator(model=args["model"])
```
"""

View file

@ -14,7 +14,7 @@ class PosePredictor(DetectionPredictor):
from ultralytics.utils import ASSETS
from ultralytics.models.yolo.pose import PosePredictor
args = dict(model='yolov8n-pose.pt', source=ASSETS)
args = dict(model="yolov8n-pose.pt", source=ASSETS)
predictor = PosePredictor(overrides=args)
predictor.predict_cli()
```

View file

@ -16,7 +16,7 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
```python
from ultralytics.models.yolo.pose import PoseTrainer
args = dict(model='yolov8n-pose.pt', data='coco8-pose.yaml', epochs=3)
args = dict(model="yolov8n-pose.pt", data="coco8-pose.yaml", epochs=3)
trainer = PoseTrainer(overrides=args)
trainer.train()
```

View file

@ -20,7 +20,7 @@ class PoseValidator(DetectionValidator):
```python
from ultralytics.models.yolo.pose import PoseValidator
args = dict(model='yolov8n-pose.pt', data='coco8-pose.yaml')
args = dict(model="yolov8n-pose.pt", data="coco8-pose.yaml")
validator = PoseValidator(args=args)
validator()
```

View file

@ -14,7 +14,7 @@ class SegmentationPredictor(DetectionPredictor):
from ultralytics.utils import ASSETS
from ultralytics.models.yolo.segment import SegmentationPredictor
args = dict(model='yolov8n-seg.pt', source=ASSETS)
args = dict(model="yolov8n-seg.pt", source=ASSETS)
predictor = SegmentationPredictor(overrides=args)
predictor.predict_cli()
```

View file

@ -16,7 +16,7 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
```python
from ultralytics.models.yolo.segment import SegmentationTrainer
args = dict(model='yolov8n-seg.pt', data='coco8-seg.yaml', epochs=3)
args = dict(model="yolov8n-seg.pt", data="coco8-seg.yaml", epochs=3)
trainer = SegmentationTrainer(overrides=args)
trainer.train()
```

View file

@ -22,7 +22,7 @@ class SegmentationValidator(DetectionValidator):
```python
from ultralytics.models.yolo.segment import SegmentationValidator
args = dict(model='yolov8n-seg.pt', data='coco8-seg.yaml')
args = dict(model="yolov8n-seg.pt", data="coco8-seg.yaml")
validator = SegmentationValidator(args=args)
validator()
```

View file

@ -29,7 +29,7 @@ class WorldTrainer(yolo.detect.DetectionTrainer):
```python
from ultralytics.models.yolo.world import WorldModel
args = dict(model='yolov8s-world.pt', data='coco8.yaml', epochs=3)
args = dict(model="yolov8s-world.pt", data="coco8.yaml", epochs=3)
trainer = WorldTrainer(overrides=args)
trainer.train()
```

View file

@ -11,9 +11,9 @@ Example:
x = torch.ones(1, 128, 40, 40)
m = Conv(128, 128)
f = f'{m._get_name()}.onnx'
f = f"{m._get_name()}.onnx"
torch.onnx.export(m, x, f)
os.system(f'onnxslim {f} {f} && open {f}') # pip install onnxslim
os.system(f"onnxslim {f} {f} && open {f}") # pip install onnxslim
```
"""

View file

@ -713,7 +713,7 @@ def temporary_modules(modules=None, attributes=None):
Example:
```python
with temporary_modules({'old.module': 'new.module'}, {'old.module.attribute': 'new.module.attribute'}):
with temporary_modules({"old.module": "new.module"}, {"old.module.attribute": "new.module.attribute"}):
import old.module # this will now import new.module
from old.module import attribute # this will now import new.module.attribute
```

View file

@ -20,4 +20,5 @@ __all__ = (
"QueueManager",
"SpeedEstimator",
"Analytics",
"inference",
)

View file

@ -42,7 +42,7 @@ class STrack(BaseTrack):
Examples:
Initialize and activate a new track
>>> track = STrack(xywh=[100, 200, 50, 80, 0], score=0.9, cls='person')
>>> track = STrack(xywh=[100, 200, 50, 80, 0], score=0.9, cls="person")
>>> track.activate(kalman_filter=KalmanFilterXYAH(), frame_id=1)
"""
@ -61,7 +61,7 @@ class STrack(BaseTrack):
Examples:
>>> xywh = [100.0, 150.0, 50.0, 75.0, 1]
>>> score = 0.9
>>> cls = 'person'
>>> cls = "person"
>>> track = STrack(xywh, score, cls)
"""
super().__init__()

View file

@ -33,7 +33,7 @@ class GMC:
Examples:
Create a GMC object and apply it to a frame
>>> gmc = GMC(method='sparseOptFlow', downscale=2)
>>> gmc = GMC(method="sparseOptFlow", downscale=2)
>>> frame = np.array([[1, 2, 3], [4, 5, 6]])
>>> processed_frame = gmc.apply(frame)
>>> print(processed_frame)
@ -51,7 +51,7 @@ class GMC:
Examples:
Initialize a GMC object with the 'sparseOptFlow' method and a downscale factor of 2
>>> gmc = GMC(method='sparseOptFlow', downscale=2)
>>> gmc = GMC(method="sparseOptFlow", downscale=2)
"""
super().__init__()
@ -101,7 +101,7 @@ class GMC:
(np.ndarray): Processed frame with applied object detection.
Examples:
>>> gmc = GMC(method='sparseOptFlow')
>>> gmc = GMC(method="sparseOptFlow")
>>> raw_frame = np.random.rand(480, 640, 3)
>>> processed_frame = gmc.apply(raw_frame)
>>> print(processed_frame.shape)
@ -127,7 +127,7 @@ class GMC:
(np.ndarray): The processed frame with the applied ECC transformation.
Examples:
>>> gmc = GMC(method='ecc')
>>> gmc = GMC(method="ecc")
>>> processed_frame = gmc.applyEcc(np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]))
>>> print(processed_frame)
[[1. 0. 0.]
@ -173,7 +173,7 @@ class GMC:
(np.ndarray): Processed frame.
Examples:
>>> gmc = GMC(method='orb')
>>> gmc = GMC(method="orb")
>>> raw_frame = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
>>> processed_frame = gmc.applyFeatures(raw_frame)
>>> print(processed_frame.shape)

View file

@ -268,7 +268,7 @@ class KalmanFilterXYAH:
>>> mean = np.array([0, 0, 1, 1, 0, 0, 0, 0])
>>> covariance = np.eye(8)
>>> measurements = np.array([[1, 1, 1, 1], [2, 2, 1, 1]])
>>> distances = kf.gating_distance(mean, covariance, measurements, only_position=False, metric='maha')
>>> distances = kf.gating_distance(mean, covariance, measurements, only_position=False, metric="maha")
"""
mean, covariance = self.project(mean, covariance)
if only_position:

View file

@ -121,7 +121,7 @@ def embedding_distance(tracks: list, detections: list, metric: str = "cosine") -
Compute the embedding distance between tracks and detections using cosine metric
>>> tracks = [STrack(...), STrack(...)] # List of track objects with embedding features
>>> detections = [BaseTrack(...), BaseTrack(...)] # List of detection objects with embedding features
>>> cost_matrix = embedding_distance(tracks, detections, metric='cosine')
>>> cost_matrix = embedding_distance(tracks, detections, metric="cosine")
"""
cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32)

View file

@ -765,8 +765,8 @@ def remove_colorstr(input_string):
(str): A new string with all ANSI escape codes removed.
Examples:
>>> remove_colorstr(colorstr('blue', 'bold', 'hello world'))
>>> 'hello world'
>>> remove_colorstr(colorstr("blue", "bold", "hello world"))
>>> "hello world"
"""
ansi_escape = re.compile(r"\x1B\[[0-9;]*[A-Za-z]")
return ansi_escape.sub("", input_string)
@ -780,12 +780,12 @@ class TryExcept(contextlib.ContextDecorator):
As a decorator:
>>> @TryExcept(msg="Error occurred in func", verbose=True)
>>> def func():
>>> # Function logic here
>>> # Function logic here
>>> pass
As a context manager:
>>> with TryExcept(msg="Error occurred in block", verbose=True):
>>> # Code block here
>>> # Code block here
>>> pass
"""
@ -816,7 +816,7 @@ class Retry(contextlib.ContextDecorator):
Example usage as a decorator:
>>> @Retry(times=3, delay=2)
>>> def test_func():
>>> # Replace with function logic that may raise exceptions
>>> # Replace with function logic that may raise exceptions
>>> return True
"""

View file

@ -71,7 +71,7 @@ def benchmark(
```python
from ultralytics.utils.benchmarks import benchmark
benchmark(model='yolov8n.pt', imgsz=640)
benchmark(model="yolov8n.pt", imgsz=640)
```
"""
import pandas as pd # scope for faster 'import ultralytics'
@ -302,7 +302,7 @@ class ProfileModels:
```python
from ultralytics.utils.benchmarks import ProfileModels
ProfileModels(['yolov8n.yaml', 'yolov8s.yaml'], imgsz=640).profile()
ProfileModels(["yolov8n.yaml", "yolov8s.yaml"], imgsz=640).profile()
```
"""

View file

@ -62,7 +62,7 @@ def parse_requirements(file_path=ROOT.parent / "requirements.txt", package=""):
```python
from ultralytics.utils.checks import parse_requirements
parse_requirements(package='ultralytics')
parse_requirements(package="ultralytics")
```
"""
@ -197,16 +197,16 @@ def check_version(
Example:
```python
# Check if current version is exactly 22.04
check_version(current='22.04', required='==22.04')
check_version(current="22.04", required="==22.04")
# Check if current version is greater than or equal to 22.04
check_version(current='22.10', required='22.04') # assumes '>=' inequality if none passed
check_version(current="22.10", required="22.04") # assumes '>=' inequality if none passed
# Check if current version is less than or equal to 22.04
check_version(current='22.04', required='<=22.04')
check_version(current="22.04", required="<=22.04")
# Check if current version is between 20.04 (inclusive) and 22.04 (exclusive)
check_version(current='21.10', required='>20.04,<22.04')
check_version(current="21.10", required=">20.04,<22.04")
```
"""
if not current: # if current is '' or None
@ -353,13 +353,13 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
from ultralytics.utils.checks import check_requirements
# Check a requirements.txt file
check_requirements('path/to/requirements.txt')
check_requirements("path/to/requirements.txt")
# Check a single package
check_requirements('ultralytics>=8.0.0')
check_requirements("ultralytics>=8.0.0")
# Check multiple packages
check_requirements(['numpy', 'ultralytics>=8.0.0'])
check_requirements(["numpy", "ultralytics>=8.0.0"])
```
"""
@ -634,7 +634,7 @@ def check_amp(model):
from ultralytics import YOLO
from ultralytics.utils.checks import check_amp
model = YOLO('yolov8n.pt').model.cuda()
model = YOLO("yolov8n.pt").model.cuda()
check_amp(model)
```

View file

@ -75,7 +75,7 @@ def delete_dsstore(path, files_to_delete=(".DS_Store", "__MACOSX")):
```python
from ultralytics.utils.downloads import delete_dsstore
delete_dsstore('path/to/dir')
delete_dsstore("path/to/dir")
```
Note:
@ -107,7 +107,7 @@ def zip_directory(directory, compress=True, exclude=(".DS_Store", "__MACOSX"), p
```python
from ultralytics.utils.downloads import zip_directory
file = zip_directory('path/to/dir')
file = zip_directory("path/to/dir")
```
"""
from zipfile import ZIP_DEFLATED, ZIP_STORED, ZipFile
@ -153,7 +153,7 @@ def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX"), exist_ok=Fals
```python
from ultralytics.utils.downloads import unzip_file
dir = unzip_file('path/to/file.zip')
dir = unzip_file("path/to/file.zip")
```
"""
from zipfile import BadZipFile, ZipFile, is_zipfile
@ -392,7 +392,7 @@ def get_github_assets(repo="ultralytics/assets", version="latest", retry=False):
Example:
```python
tag, assets = get_github_assets(repo='ultralytics/assets', version='latest')
tag, assets = get_github_assets(repo="ultralytics/assets", version="latest")
```
"""
@ -425,7 +425,7 @@ def attempt_download_asset(file, repo="ultralytics/assets", release="v8.2.0", **
Example:
```python
file_path = attempt_download_asset('yolov8n.pt', repo='ultralytics/assets', release='latest')
file_path = attempt_download_asset("yolov8n.pt", repo="ultralytics/assets", release="latest")
```
"""
from ultralytics.utils import SETTINGS # scoped for circular import
@ -480,7 +480,7 @@ def download(url, dir=Path.cwd(), unzip=True, delete=False, curl=False, threads=
Example:
```python
download('https://ultralytics.com/assets/example.zip', dir='path/to/dir', unzip=True)
download("https://ultralytics.com/assets/example.zip", dir="path/to/dir", unzip=True)
```
"""
dir = Path(dir)

View file

@ -28,13 +28,13 @@ class WorkingDirectory(contextlib.ContextDecorator):
Examples:
Using as a context manager:
>>> with WorkingDirectory('/path/to/new/dir'):
>>> # Perform operations in the new directory
>>> # Perform operations in the new directory
>>> pass
Using as a decorator:
>>> @WorkingDirectory('/path/to/new/dir')
>>> def some_function():
>>> # Perform operations in the new directory
>>> # Perform operations in the new directory
>>> pass
"""
@ -69,7 +69,7 @@ def spaces_in_path(path):
Use the context manager to handle paths with spaces:
>>> from ultralytics.utils.files import spaces_in_path
>>> with spaces_in_path('/path/with spaces') as new_path:
>>> # Your code here
>>> # Your code here
"""
# If path has spaces, replace them with underscores

View file

@ -199,7 +199,7 @@ class Instances:
instances = Instances(
bboxes=np.array([[10, 10, 30, 30], [20, 20, 40, 40]]),
segments=[np.array([[5, 5], [10, 10]]), np.array([[15, 15], [20, 20]])],
keypoints=np.array([[[5, 5, 1], [10, 10, 1]], [[15, 15, 1], [20, 20, 1]]])
keypoints=np.array([[[5, 5, 1], [10, 10, 1]], [[15, 15, 1], [20, 20, 1]]]),
)
```

View file

@ -902,8 +902,8 @@ def save_one_box(xyxy, im, file=Path("im.jpg"), gain=1.02, pad=10, square=False,
from ultralytics.utils.plotting import save_one_box
xyxy = [50, 50, 150, 150]
im = cv2.imread('image.jpg')
cropped_im = save_one_box(xyxy, im, file='cropped.jpg', square=True)
im = cv2.imread("image.jpg")
cropped_im = save_one_box(xyxy, im, file="cropped.jpg", square=True)
```
"""
@ -1109,7 +1109,7 @@ def plot_results(file="path/to/results.csv", dir="", segment=False, pose=False,
```python
from ultralytics.utils.plotting import plot_results
plot_results('path/to/results.csv', segment=True)
plot_results("path/to/results.csv", segment=True)
```
"""
import pandas as pd # scope for faster 'import ultralytics'
@ -1195,7 +1195,7 @@ def plot_tune_results(csv_file="tune_results.csv"):
csv_file (str, optional): Path to the CSV file containing the tuning results. Defaults to 'tune_results.csv'.
Examples:
>>> plot_tune_results('path/to/tune_results.csv')
>>> plot_tune_results("path/to/tune_results.csv")
"""
import pandas as pd # scope for faster 'import ultralytics'

View file

@ -137,10 +137,10 @@ def select_device(device="", batch=0, newline=False, verbose=True):
devices when using multiple GPUs.
Examples:
>>> select_device('cuda:0')
>>> select_device("cuda:0")
device(type='cuda', index=0)
>>> select_device('cpu')
>>> select_device("cpu")
device(type='cpu')
Note:
@ -331,11 +331,13 @@ def model_info_for_loggers(trainer):
Example:
YOLOv8n info for loggers
```python
results = {'model/parameters': 3151904,
'model/GFLOPs': 8.746,
'model/speed_ONNX(ms)': 41.244,
'model/speed_TensorRT(ms)': 3.211,
'model/speed_PyTorch(ms)': 18.755}
results = {
"model/parameters": 3151904,
"model/GFLOPs": 8.746,
"model/speed_ONNX(ms)": 41.244,
"model/speed_TensorRT(ms)": 3.211,
"model/speed_PyTorch(ms)": 18.755,
}
```
"""
if trainer.args.profile: # profile ONNX and TensorRT times
@ -542,7 +544,7 @@ def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "") -> None:
from pathlib import Path
from ultralytics.utils.torch_utils import strip_optimizer
for f in Path('path/to/model/checkpoints').rglob('*.pt'):
for f in Path("path/to/model/checkpoints").rglob("*.pt"):
strip_optimizer(f)
```

View file

@ -28,10 +28,10 @@ def run_ray_tune(
from ultralytics import YOLO
# Load a YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Start tuning hyperparameters for YOLOv8n training on the COCO8 dataset
result_grid = model.tune(data='coco8.yaml', use_ray=True)
result_grid = model.tune(data="coco8.yaml", use_ray=True)
```
"""