Reformat Markdown code blocks (#12795)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Glenn Jocher 2024-05-18 18:58:06 +02:00 committed by GitHub
parent 2af71d15a6
commit fceea033ad
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
128 changed files with 1067 additions and 1018 deletions

View file

@ -60,7 +60,7 @@ Run YOLOv8n benchmarks on all supported export formats including ONNX, TensorRT
from ultralytics.utils.benchmarks import benchmark
# Benchmark on GPU
benchmark(model='yolov8n.pt', data='coco8.yaml', imgsz=640, half=False, device=0)
benchmark(model="yolov8n.pt", data="coco8.yaml", imgsz=640, half=False, device=0)
```
=== "CLI"

View file

@ -56,11 +56,11 @@ Export a YOLOv8n model to a different format like ONNX or TensorRT. See Argument
from ultralytics import YOLO
# Load a model
model = YOLO('yolov8n.pt') # load an official model
model = YOLO('path/to/best.pt') # load a custom trained model
model = YOLO("yolov8n.pt") # load an official model
model = YOLO("path/to/best.pt") # load a custom trained model
# Export the model
model.export(format='onnx')
model.export(format="onnx")
```
=== "CLI"

View file

@ -58,10 +58,10 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m
from ultralytics import YOLO
# Load a model
model = YOLO('yolov8n.pt') # pretrained YOLOv8n model
model = YOLO("yolov8n.pt") # pretrained YOLOv8n model
# Run batched inference on a list of images
results = model(['im1.jpg', 'im2.jpg']) # return a list of Results objects
results = model(["im1.jpg", "im2.jpg"]) # return a list of Results objects
# Process results list
for result in results:
@ -71,7 +71,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m
probs = result.probs # Probs object for classification outputs
obb = result.obb # Oriented boxes object for OBB outputs
result.show() # display to screen
result.save(filename='result.jpg') # save to disk
result.save(filename="result.jpg") # save to disk
```
=== "Return a generator with `stream=True`"
@ -80,10 +80,10 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m
from ultralytics import YOLO
# Load a model
model = YOLO('yolov8n.pt') # pretrained YOLOv8n model
model = YOLO("yolov8n.pt") # pretrained YOLOv8n model
# Run batched inference on a list of images
results = model(['im1.jpg', 'im2.jpg'], stream=True) # return a generator of Results objects
results = model(["im1.jpg", "im2.jpg"], stream=True) # return a generator of Results objects
# Process results generator
for result in results:
@ -93,7 +93,7 @@ Ultralytics YOLO models return either a Python list of `Results` objects, or a m
probs = result.probs # Probs object for classification outputs
obb = result.obb # Oriented boxes object for OBB outputs
result.show() # display to screen
result.save(filename='result.jpg') # save to disk
result.save(filename="result.jpg") # save to disk
```
## Inference Sources
@ -132,10 +132,10 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Define path to the image file
source = 'path/to/image.jpg'
source = "path/to/image.jpg"
# Run inference on the source
results = model(source) # list of Results objects
@ -148,10 +148,10 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Define current screenshot as source
source = 'screen'
source = "screen"
# Run inference on the source
results = model(source) # list of Results objects
@ -164,10 +164,10 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Define remote image or video URL
source = 'https://ultralytics.com/images/bus.jpg'
source = "https://ultralytics.com/images/bus.jpg"
# Run inference on the source
results = model(source) # list of Results objects
@ -181,10 +181,10 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Open an image using PIL
source = Image.open('path/to/image.jpg')
source = Image.open("path/to/image.jpg")
# Run inference on the source
results = model(source) # list of Results objects
@ -198,10 +198,10 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Read an image using OpenCV
source = cv2.imread('path/to/image.jpg')
source = cv2.imread("path/to/image.jpg")
# Run inference on the source
results = model(source) # list of Results objects
@ -215,10 +215,10 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Create a random numpy array of HWC shape (640, 640, 3) with values in range [0, 255] and type uint8
source = np.random.randint(low=0, high=255, size=(640, 640, 3), dtype='uint8')
source = np.random.randint(low=0, high=255, size=(640, 640, 3), dtype="uint8")
# Run inference on the source
results = model(source) # list of Results objects
@ -232,7 +232,7 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Create a random torch tensor of BCHW shape (1, 3, 640, 640) with values in range [0, 1] and type float32
source = torch.rand(1, 3, 640, 640, dtype=torch.float32)
@ -249,10 +249,10 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Define a path to a CSV file with images, URLs, videos and directories
source = 'path/to/file.csv'
source = "path/to/file.csv"
# Run inference on the source
results = model(source) # list of Results objects
@ -265,10 +265,10 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Define path to video file
source = 'path/to/video.mp4'
source = "path/to/video.mp4"
# Run inference on the source
results = model(source, stream=True) # generator of Results objects
@ -281,10 +281,10 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Define path to directory containing images and videos for inference
source = 'path/to/dir'
source = "path/to/dir"
# Run inference on the source
results = model(source, stream=True) # generator of Results objects
@ -297,13 +297,13 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Define a glob search for all JPG files in a directory
source = 'path/to/dir/*.jpg'
source = "path/to/dir/*.jpg"
# OR define a recursive glob search for all JPG files including subdirectories
source = 'path/to/dir/**/*.jpg'
source = "path/to/dir/**/*.jpg"
# Run inference on the source
results = model(source, stream=True) # generator of Results objects
@ -316,10 +316,10 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Define source as YouTube video URL
source = 'https://youtu.be/LNwODJXcvt4'
source = "https://youtu.be/LNwODJXcvt4"
# Run inference on the source
results = model(source, stream=True) # generator of Results objects
@ -332,13 +332,13 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Single stream with batch-size 1 inference
source = 'rtsp://example.com/media.mp4' # RTSP, RTMP, TCP or IP streaming address
source = "rtsp://example.com/media.mp4" # RTSP, RTMP, TCP or IP streaming address
# Multiple streams with batched inference (i.e. batch-size 8 for 8 streams)
source = 'path/to/list.streams' # *.streams text file with one streaming address per row
source = "path/to/list.streams" # *.streams text file with one streaming address per row
# Run inference on the source
results = model(source, stream=True) # generator of Results objects
@ -354,10 +354,10 @@ Below are code examples for using each source type:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Run inference on 'bus.jpg' with arguments
model.predict('bus.jpg', save=True, imgsz=320, conf=0.5)
model.predict("bus.jpg", save=True, imgsz=320, conf=0.5)
```
Inference arguments:
@ -445,11 +445,11 @@ All Ultralytics `predict()` calls will return a list of `Results` objects:
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Run inference on an image
results = model('bus.jpg') # list of 1 Results object
results = model(['bus.jpg', 'zidane.jpg']) # list of 2 Results objects
results = model("bus.jpg") # list of 1 Results object
results = model(["bus.jpg", "zidane.jpg"]) # list of 2 Results objects
```
`Results` objects have the following attributes:
@ -497,10 +497,10 @@ For more details see the [`Results` class documentation](../reference/engine/res
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Run inference on an image
results = model('bus.jpg') # results list
results = model("bus.jpg") # results list
# View results
for r in results:
@ -535,10 +535,10 @@ For more details see the [`Boxes` class documentation](../reference/engine/resul
from ultralytics import YOLO
# Load a pretrained YOLOv8n-seg Segment model
model = YOLO('yolov8n-seg.pt')
model = YOLO("yolov8n-seg.pt")
# Run inference on an image
results = model('bus.jpg') # results list
results = model("bus.jpg") # results list
# View results
for r in results:
@ -568,10 +568,10 @@ For more details see the [`Masks` class documentation](../reference/engine/resul
from ultralytics import YOLO
# Load a pretrained YOLOv8n-pose Pose model
model = YOLO('yolov8n-pose.pt')
model = YOLO("yolov8n-pose.pt")
# Run inference on an image
results = model('bus.jpg') # results list
results = model("bus.jpg") # results list
# View results
for r in results:
@ -602,10 +602,10 @@ For more details see the [`Keypoints` class documentation](../reference/engine/r
from ultralytics import YOLO
# Load a pretrained YOLOv8n-cls Classify model
model = YOLO('yolov8n-cls.pt')
model = YOLO("yolov8n-cls.pt")
# Run inference on an image
results = model('bus.jpg') # results list
results = model("bus.jpg") # results list
# View results
for r in results:
@ -637,10 +637,10 @@ For more details see the [`Probs` class documentation](../reference/engine/resul
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n-obb.pt')
model = YOLO("yolov8n-obb.pt")
# Run inference on an image
results = model('bus.jpg') # results list
results = model("bus.jpg") # results list
# View results
for r in results:
@ -676,22 +676,22 @@ The `plot()` method in `Results` objects facilitates visualization of prediction
from ultralytics import YOLO
# Load a pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Run inference on 'bus.jpg'
results = model(['bus.jpg', 'zidane.jpg']) # results list
results = model(["bus.jpg", "zidane.jpg"]) # results list
# Visualize the results
for i, r in enumerate(results):
# Plot results image
im_bgr = r.plot() # BGR-order numpy array
im_rgb = Image.fromarray(im_bgr[..., ::-1]) # RGB-order PIL image
# Show results to screen (in supported environments)
r.show()
# Save results to disk
r.save(filename=f'results{i}.jpg')
r.save(filename=f"results{i}.jpg")
```
### `plot()` Method Parameters
@ -727,9 +727,11 @@ When using YOLO models in a multi-threaded application, it's important to instan
Instantiate a single model inside each thread for thread-safe inference:
```python
from ultralytics import YOLO
from threading import Thread
from ultralytics import YOLO
def thread_safe_predict(image_path):
"""Performs thread-safe prediction on an image using a locally instantiated YOLO model."""
local_model = YOLO("yolov8n.pt")
@ -755,7 +757,7 @@ Here's a Python script using OpenCV (`cv2`) and YOLOv8 to run inference on video
from ultralytics import YOLO
# Load the YOLOv8 model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Open the video file
video_path = "path/to/your/video/file.mp4"

View file

@ -70,14 +70,14 @@ To run the tracker on video streams, use a trained Detect, Segment or Pose model
from ultralytics import YOLO
# Load an official or custom model
model = YOLO('yolov8n.pt') # Load an official Detect model
model = YOLO('yolov8n-seg.pt') # Load an official Segment model
model = YOLO('yolov8n-pose.pt') # Load an official Pose model
model = YOLO('path/to/best.pt') # Load a custom trained model
model = YOLO("yolov8n.pt") # Load an official Detect model
model = YOLO("yolov8n-seg.pt") # Load an official Segment model
model = YOLO("yolov8n-pose.pt") # Load an official Pose model
model = YOLO("path/to/best.pt") # Load a custom trained model
# Perform tracking with the model
results = model.track(source="https://youtu.be/LNwODJXcvt4", show=True) # Tracking with default tracker
results = model.track(source="https://youtu.be/LNwODJXcvt4", show=True, tracker="bytetrack.yaml") # Tracking with ByteTrack tracker
results = model.track("https://youtu.be/LNwODJXcvt4", show=True) # Tracking with default tracker
results = model.track("https://youtu.be/LNwODJXcvt4", show=True, tracker="bytetrack.yaml") # with ByteTrack
```
=== "CLI"
@ -113,7 +113,7 @@ Tracking configuration shares properties with Predict mode, such as `conf`, `iou
from ultralytics import YOLO
# Configure the tracking parameters and run the tracker
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
results = model.track(source="https://youtu.be/LNwODJXcvt4", conf=0.3, iou=0.5, show=True)
```
@ -136,8 +136,8 @@ Ultralytics also allows you to use a modified tracker configuration file. To do
from ultralytics import YOLO
# Load the model and run the tracker with a custom configuration file
model = YOLO('yolov8n.pt')
results = model.track(source="https://youtu.be/LNwODJXcvt4", tracker='custom_tracker.yaml')
model = YOLO("yolov8n.pt")
results = model.track(source="https://youtu.be/LNwODJXcvt4", tracker="custom_tracker.yaml")
```
=== "CLI"
@ -162,7 +162,7 @@ Here is a Python script using OpenCV (`cv2`) and YOLOv8 to run object tracking o
from ultralytics import YOLO
# Load the YOLOv8 model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Open the video file
video_path = "path/to/video.mp4"
@ -210,11 +210,10 @@ In the following example, we demonstrate how to utilize YOLOv8's tracking capabi
import cv2
import numpy as np
from ultralytics import YOLO
# Load the YOLOv8 model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Open the video file
video_path = "path/to/video.mp4"
@ -284,6 +283,7 @@ Finally, after all threads have completed their task, the windows displaying the
```python
import threading
import cv2
from ultralytics import YOLO
@ -318,7 +318,7 @@ Finally, after all threads have completed their task, the windows displaying the
cv2.imshow(f"Tracking_Stream_{file_index}", res_plotted)
key = cv2.waitKey(1)
if key == ord('q'):
if key == ord("q"):
break
# Release video sources
@ -326,8 +326,8 @@ Finally, after all threads have completed their task, the windows displaying the
# Load the models
model1 = YOLO('yolov8n.pt')
model2 = YOLO('yolov8n-seg.pt')
model1 = YOLO("yolov8n.pt")
model2 = YOLO("yolov8n-seg.pt")
# Define the video files for the trackers
video_file1 = "path/to/video1.mp4" # Path to video file, 0 for webcam

View file

@ -59,12 +59,12 @@ Train YOLOv8n on the COCO8 dataset for 100 epochs at image size 640. The trainin
from ultralytics import YOLO
# Load a model
model = YOLO('yolov8n.yaml') # build a new model from YAML
model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training)
model = YOLO('yolov8n.yaml').load('yolov8n.pt') # build from YAML and transfer weights
model = YOLO("yolov8n.yaml") # build a new model from YAML
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
model = YOLO("yolov8n.yaml").load("yolov8n.pt") # build from YAML and transfer weights
# Train the model
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
```
=== "CLI"
@ -94,10 +94,10 @@ Multi-GPU training allows for more efficient utilization of available hardware r
from ultralytics import YOLO
# Load a model
model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training)
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
# Train the model with 2 GPUs
results = model.train(data='coco8.yaml', epochs=100, imgsz=640, device=[0, 1])
results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device=[0, 1])
```
=== "CLI"
@ -121,10 +121,10 @@ To enable training on Apple M1 and M2 chips, you should specify 'mps' as your de
from ultralytics import YOLO
# Load a model
model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training)
model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
# Train the model with 2 GPUs
results = model.train(data='coco8.yaml', epochs=100, imgsz=640, device='mps')
results = model.train(data="coco8.yaml", epochs=100, imgsz=640, device="mps")
```
=== "CLI"
@ -154,7 +154,7 @@ Below is an example of how to resume an interrupted training using Python and vi
from ultralytics import YOLO
# Load a model
model = YOLO('path/to/last.pt') # load a partially trained model
model = YOLO("path/to/last.pt") # load a partially trained model
# Resume training
results = model.train(resume=True)

View file

@ -57,15 +57,15 @@ Validate trained YOLOv8n model accuracy on the COCO8 dataset. No argument need t
from ultralytics import YOLO
# Load a model
model = YOLO('yolov8n.pt') # load an official model
model = YOLO('path/to/best.pt') # load a custom model
model = YOLO("yolov8n.pt") # load an official model
model = YOLO("path/to/best.pt") # load a custom model
# Validate the model
metrics = model.val() # no arguments needed, dataset and settings remembered
metrics.box.map # map50-95
metrics.box.map # map50-95
metrics.box.map50 # map50
metrics.box.map75 # map75
metrics.box.maps # a list contains map50-95 of each category
metrics.box.maps # a list contains map50-95 of each category
```
=== "CLI"
@ -108,17 +108,12 @@ The below examples showcase YOLO model validation with custom arguments in Pytho
```python
from ultralytics import YOLO
# Load a model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Customize validation settings
validation_results = model.val(data='coco8.yaml',
imgsz=640,
batch=16,
conf=0.25,
iou=0.6,
device='0')
validation_results = model.val(data="coco8.yaml", imgsz=640, batch=16, conf=0.25, iou=0.6, device="0")
```
=== "CLI"