Reformat Markdown code blocks (#12795)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
2af71d15a6
commit
fceea033ad
128 changed files with 1067 additions and 1018 deletions
|
|
@ -41,13 +41,13 @@ def on_predict_batch_end(predictor):
|
|||
|
||||
|
||||
# Create a YOLO model instance
|
||||
model = YOLO(f'yolov8n.pt')
|
||||
model = YOLO(f"yolov8n.pt")
|
||||
|
||||
# Add the custom callback to the model
|
||||
model.add_callback("on_predict_batch_end", on_predict_batch_end)
|
||||
|
||||
# Iterate through the results and frames
|
||||
for (result, frame) in model.predict(): # or model.track()
|
||||
for result, frame in model.predict(): # or model.track()
|
||||
pass
|
||||
```
|
||||
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ Ultralytics commands use the following syntax:
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a YOLOv8 model from a pre-trained weights file
|
||||
model = YOLO('yolov8n.pt')
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
||||
# Run MODE mode using the custom arguments ARGS (guess TASK)
|
||||
model.MODE(ARGS)
|
||||
|
|
|
|||
|
|
@ -27,22 +27,22 @@ For example, users can load a model, train it, evaluate its performance on a val
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Create a new YOLO model from scratch
|
||||
model = YOLO('yolov8n.yaml')
|
||||
model = YOLO("yolov8n.yaml")
|
||||
|
||||
# Load a pretrained YOLO model (recommended for training)
|
||||
model = YOLO('yolov8n.pt')
|
||||
model = YOLO("yolov8n.pt")
|
||||
|
||||
# Train the model using the 'coco8.yaml' dataset for 3 epochs
|
||||
results = model.train(data='coco8.yaml', epochs=3)
|
||||
results = model.train(data="coco8.yaml", epochs=3)
|
||||
|
||||
# Evaluate the model's performance on the validation set
|
||||
results = model.val()
|
||||
|
||||
# Perform object detection on an image using the model
|
||||
results = model('https://ultralytics.com/images/bus.jpg')
|
||||
results = model("https://ultralytics.com/images/bus.jpg")
|
||||
|
||||
# Export the model to ONNX format
|
||||
success = model.export(format='onnx')
|
||||
success = model.export(format="onnx")
|
||||
```
|
||||
|
||||
## [Train](../modes/train.md)
|
||||
|
|
@ -56,7 +56,7 @@ Train mode is used for training a YOLOv8 model on a custom dataset. In this mode
|
|||
```python
|
||||
from ultralytics import YOLO
|
||||
|
||||
model = YOLO('yolov8n.pt') # pass any model type
|
||||
model = YOLO("yolov8n.pt") # pass any model type
|
||||
results = model.train(epochs=5)
|
||||
```
|
||||
|
||||
|
|
@ -65,8 +65,8 @@ Train mode is used for training a YOLOv8 model on a custom dataset. In this mode
|
|||
```python
|
||||
from ultralytics import YOLO
|
||||
|
||||
model = YOLO('yolov8n.yaml')
|
||||
results = model.train(data='coco8.yaml', epochs=5)
|
||||
model = YOLO("yolov8n.yaml")
|
||||
results = model.train(data="coco8.yaml", epochs=5)
|
||||
```
|
||||
|
||||
=== "Resume"
|
||||
|
|
@ -117,14 +117,14 @@ Predict mode is used for making predictions using a trained YOLOv8 model on new
|
|||
=== "From source"
|
||||
|
||||
```python
|
||||
from ultralytics import YOLO
|
||||
from PIL import Image
|
||||
import cv2
|
||||
from PIL import Image
|
||||
from ultralytics import YOLO
|
||||
|
||||
model = YOLO("model.pt")
|
||||
# accepts all formats - image/dir/Path/URL/video/PIL/ndarray. 0 for webcam
|
||||
results = model.predict(source="0")
|
||||
results = model.predict(source="folder", show=True) # Display preds. Accepts all YOLO predict arguments
|
||||
results = model.predict(source="folder", show=True) # Display preds. Accepts all YOLO predict arguments
|
||||
|
||||
# from PIL
|
||||
im1 = Image.open("bus.jpg")
|
||||
|
|
@ -153,20 +153,20 @@ Predict mode is used for making predictions using a trained YOLOv8 model on new
|
|||
|
||||
for result in results:
|
||||
# Detection
|
||||
result.boxes.xyxy # box with xyxy format, (N, 4)
|
||||
result.boxes.xywh # box with xywh format, (N, 4)
|
||||
result.boxes.xyxy # box with xyxy format, (N, 4)
|
||||
result.boxes.xywh # box with xywh format, (N, 4)
|
||||
result.boxes.xyxyn # box with xyxy format but normalized, (N, 4)
|
||||
result.boxes.xywhn # box with xywh format but normalized, (N, 4)
|
||||
result.boxes.conf # confidence score, (N, 1)
|
||||
result.boxes.cls # cls, (N, 1)
|
||||
result.boxes.conf # confidence score, (N, 1)
|
||||
result.boxes.cls # cls, (N, 1)
|
||||
|
||||
# Segmentation
|
||||
result.masks.data # masks, (N, H, W)
|
||||
result.masks.xy # x,y segments (pixels), List[segment] * N
|
||||
result.masks.xyn # x,y segments (normalized), List[segment] * N
|
||||
result.masks.data # masks, (N, H, W)
|
||||
result.masks.xy # x,y segments (pixels), List[segment] * N
|
||||
result.masks.xyn # x,y segments (normalized), List[segment] * N
|
||||
|
||||
# Classification
|
||||
result.probs # cls prob, (num_class, )
|
||||
result.probs # cls prob, (num_class, )
|
||||
|
||||
# Each result is composed of torch.Tensor by default,
|
||||
# in which you can easily use following functionality:
|
||||
|
|
@ -218,9 +218,9 @@ Track mode is used for tracking objects in real-time using a YOLOv8 model. In th
|
|||
from ultralytics import YOLO
|
||||
|
||||
# Load a model
|
||||
model = YOLO('yolov8n.pt') # load an official detection model
|
||||
model = YOLO('yolov8n-seg.pt') # load an official segmentation model
|
||||
model = YOLO('path/to/best.pt') # load a custom model
|
||||
model = YOLO("yolov8n.pt") # load an official detection model
|
||||
model = YOLO("yolov8n-seg.pt") # load an official segmentation model
|
||||
model = YOLO("path/to/best.pt") # load a custom model
|
||||
|
||||
# Track with the model
|
||||
results = model.track(source="https://youtu.be/LNwODJXcvt4", show=True)
|
||||
|
|
@ -242,7 +242,7 @@ Benchmark mode is used to profile the speed and accuracy of various export forma
|
|||
from ultralytics.utils.benchmarks import benchmark
|
||||
|
||||
# Benchmark
|
||||
benchmark(model='yolov8n.pt', data='coco8.yaml', imgsz=640, half=False, device=0)
|
||||
benchmark(model="yolov8n.pt", data="coco8.yaml", imgsz=640, half=False, device=0)
|
||||
```
|
||||
|
||||
[Benchmark Examples](../modes/benchmark.md){ .md-button }
|
||||
|
|
@ -259,18 +259,16 @@ Explorer API can be used to explore datasets with advanced semantic, vector-simi
|
|||
from ultralytics import Explorer
|
||||
|
||||
# create an Explorer object
|
||||
exp = Explorer(data='coco8.yaml', model='yolov8n.pt')
|
||||
exp = Explorer(data="coco8.yaml", model="yolov8n.pt")
|
||||
exp.create_embeddings_table()
|
||||
|
||||
similar = exp.get_similar(img='https://ultralytics.com/images/bus.jpg', limit=10)
|
||||
similar = exp.get_similar(img="https://ultralytics.com/images/bus.jpg", limit=10)
|
||||
print(similar.head())
|
||||
|
||||
# Search using multiple indices
|
||||
similar = exp.get_similar(
|
||||
img=['https://ultralytics.com/images/bus.jpg',
|
||||
'https://ultralytics.com/images/bus.jpg'],
|
||||
limit=10
|
||||
)
|
||||
img=["https://ultralytics.com/images/bus.jpg", "https://ultralytics.com/images/bus.jpg"], limit=10
|
||||
)
|
||||
print(similar.head())
|
||||
```
|
||||
|
||||
|
|
@ -280,14 +278,14 @@ Explorer API can be used to explore datasets with advanced semantic, vector-simi
|
|||
from ultralytics import Explorer
|
||||
|
||||
# create an Explorer object
|
||||
exp = Explorer(data='coco8.yaml', model='yolov8n.pt')
|
||||
exp = Explorer(data="coco8.yaml", model="yolov8n.pt")
|
||||
exp.create_embeddings_table()
|
||||
|
||||
similar = exp.get_similar(idx=1, limit=10)
|
||||
print(similar.head())
|
||||
|
||||
# Search using multiple indices
|
||||
similar = exp.get_similar(idx=[1,10], limit=10)
|
||||
similar = exp.get_similar(idx=[1, 10], limit=10)
|
||||
print(similar.head())
|
||||
```
|
||||
|
||||
|
|
@ -300,7 +298,7 @@ Explorer API can be used to explore datasets with advanced semantic, vector-simi
|
|||
!!! Tip "Detection Trainer Example"
|
||||
|
||||
```python
|
||||
from ultralytics.models.yolo import DetectionTrainer, DetectionValidator, DetectionPredictor
|
||||
from ultralytics.models.yolo import DetectionPredictor, DetectionTrainer, DetectionValidator
|
||||
|
||||
# trainer
|
||||
trainer = DetectionTrainer(overrides={})
|
||||
|
|
|
|||
|
|
@ -195,15 +195,15 @@ from ultralytics.data.utils import polygon2mask
|
|||
|
||||
imgsz = (1080, 810)
|
||||
polygon = np.array(
|
||||
[805, 392, 797, 400, ..., 808, 714, 808, 392], # (238, 2)
|
||||
[805, 392, 797, 400, ..., 808, 714, 808, 392], # (238, 2)
|
||||
)
|
||||
|
||||
mask = polygon2mask(
|
||||
imgsz, # tuple
|
||||
[polygon], # input as list
|
||||
color=255, # 8-bit binary
|
||||
downsample_ratio=1
|
||||
)
|
||||
imgsz, # tuple
|
||||
[polygon], # input as list
|
||||
color=255, # 8-bit binary
|
||||
downsample_ratio=1,
|
||||
)
|
||||
```
|
||||
|
||||
## Bounding Boxes
|
||||
|
|
@ -326,13 +326,15 @@ xywh
|
|||
### All Bounding Box Conversions
|
||||
|
||||
```python
|
||||
from ultralytics.utils.ops import xywh2xyxy
|
||||
from ultralytics.utils.ops import xywhn2xyxy # normalized → pixel
|
||||
from ultralytics.utils.ops import xyxy2xywhn # pixel → normalized
|
||||
from ultralytics.utils.ops import xywh2ltwh # xywh → top-left corner, w, h
|
||||
from ultralytics.utils.ops import xyxy2ltwh # xyxy → top-left corner, w, h
|
||||
from ultralytics.utils.ops import ltwh2xywh
|
||||
from ultralytics.utils.ops import ltwh2xyxy
|
||||
from ultralytics.utils.ops import (
|
||||
ltwh2xywh,
|
||||
ltwh2xyxy,
|
||||
xywh2ltwh, # xywh → top-left corner, w, h
|
||||
xywh2xyxy,
|
||||
xywhn2xyxy, # normalized → pixel
|
||||
xyxy2ltwh, # xyxy → top-left corner, w, h
|
||||
xyxy2xywhn, # pixel → normalized
|
||||
)
|
||||
```
|
||||
|
||||
See docstring for each function or visit the `ultralytics.utils.ops` [reference page](../reference/utils/ops.md) to read more about each function.
|
||||
|
|
@ -394,17 +396,18 @@ from ultralytics.utils.plotting import Annotator, colors
|
|||
obb_names = {10: "small vehicle"}
|
||||
obb_image = cv.imread("datasets/dota8/images/train/P1142__1024__0___824.jpg")
|
||||
obb_boxes = np.array(
|
||||
[[ 0, 635, 560, 919, 719, 1087, 420, 803, 261,], # class-idx x1 y1 x2 y2 x3 y2 x4 y4
|
||||
[ 0, 331, 19, 493, 260, 776, 70, 613, -171,],
|
||||
[ 9, 869, 161, 886, 147, 851, 101, 833, 115,]
|
||||
[
|
||||
[0, 635, 560, 919, 719, 1087, 420, 803, 261], # class-idx x1 y1 x2 y2 x3 y2 x4 y4
|
||||
[0, 331, 19, 493, 260, 776, 70, 613, -171],
|
||||
[9, 869, 161, 886, 147, 851, 101, 833, 115],
|
||||
]
|
||||
)
|
||||
ann = Annotator(
|
||||
obb_image,
|
||||
line_width=None, # default auto-size
|
||||
font_size=None, # default auto-size
|
||||
font="Arial.ttf", # must be ImageFont compatible
|
||||
pil=False, # use PIL, otherwise uses OpenCV
|
||||
font_size=None, # default auto-size
|
||||
font="Arial.ttf", # must be ImageFont compatible
|
||||
pil=False, # use PIL, otherwise uses OpenCV
|
||||
)
|
||||
for obb in obb_boxes:
|
||||
c_idx, *obb = obb
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue