Reformat Markdown code blocks (#12795)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Glenn Jocher 2024-05-18 18:58:06 +02:00 committed by GitHub
parent 2af71d15a6
commit fceea033ad
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
128 changed files with 1067 additions and 1018 deletions

View file

@ -56,16 +56,16 @@ To perform object detection on an image, use the `predict` method as shown below
from ultralytics.models.fastsam import FastSAMPrompt
# Define an inference source
source = 'path/to/bus.jpg'
source = "path/to/bus.jpg"
# Create a FastSAM model
model = FastSAM('FastSAM-s.pt') # or FastSAM-x.pt
model = FastSAM("FastSAM-s.pt") # or FastSAM-x.pt
# Run inference on an image
everything_results = model(source, device='cpu', retina_masks=True, imgsz=1024, conf=0.4, iou=0.9)
everything_results = model(source, device="cpu", retina_masks=True, imgsz=1024, conf=0.4, iou=0.9)
# Prepare a Prompt Process object
prompt_process = FastSAMPrompt(source, everything_results, device='cpu')
prompt_process = FastSAMPrompt(source, everything_results, device="cpu")
# Everything prompt
ann = prompt_process.everything_prompt()
@ -74,13 +74,13 @@ To perform object detection on an image, use the `predict` method as shown below
ann = prompt_process.box_prompt(bbox=[200, 200, 300, 300])
# Text prompt
ann = prompt_process.text_prompt(text='a photo of a dog')
ann = prompt_process.text_prompt(text="a photo of a dog")
# Point prompt
# points default [[0,0]] [[x1,y1],[x2,y2]]
# point_label default [0] [1,0] 0:background, 1:foreground
ann = prompt_process.point_prompt(points=[[200, 200]], pointlabel=[1])
prompt_process.plot(annotations=ann, output='./')
prompt_process.plot(annotations=ann, output="./")
```
=== "CLI"
@ -104,10 +104,10 @@ Validation of the model on a dataset can be done as follows:
from ultralytics import FastSAM
# Create a FastSAM model
model = FastSAM('FastSAM-s.pt') # or FastSAM-x.pt
model = FastSAM("FastSAM-s.pt") # or FastSAM-x.pt
# Validate the model
results = model.val(data='coco8-seg.yaml')
results = model.val(data="coco8-seg.yaml")
```
=== "CLI"
@ -131,7 +131,7 @@ To perform object tracking on an image, use the `track` method as shown below:
from ultralytics import FastSAM
# Create a FastSAM model
model = FastSAM('FastSAM-s.pt') # or FastSAM-x.pt
model = FastSAM("FastSAM-s.pt") # or FastSAM-x.pt
# Track with a FastSAM model on a video
results = model.track(source="path/to/video.mp4", imgsz=640)

View file

@ -53,16 +53,16 @@ Note the below example is for YOLOv8 [Detect](../tasks/detect.md) models for obj
from ultralytics import YOLO
# Load a COCO-pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Display model information (optional)
model.info()
# Train the model on the COCO8 example dataset for 100 epochs
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
# Run inference with the YOLOv8n model on the 'bus.jpg' image
results = model('path/to/bus.jpg')
results = model("path/to/bus.jpg")
```
=== "CLI"

View file

@ -77,10 +77,10 @@ You can download the model [here](https://github.com/ChaoningZhang/MobileSAM/blo
from ultralytics import SAM
# Load the model
model = SAM('mobile_sam.pt')
model = SAM("mobile_sam.pt")
# Predict a segment based on a point prompt
model.predict('ultralytics/assets/zidane.jpg', points=[900, 370], labels=[1])
model.predict("ultralytics/assets/zidane.jpg", points=[900, 370], labels=[1])
```
### Box Prompt
@ -93,10 +93,10 @@ You can download the model [here](https://github.com/ChaoningZhang/MobileSAM/blo
from ultralytics import SAM
# Load the model
model = SAM('mobile_sam.pt')
model = SAM("mobile_sam.pt")
# Predict a segment based on a box prompt
model.predict('ultralytics/assets/zidane.jpg', bboxes=[439, 437, 524, 709])
model.predict("ultralytics/assets/zidane.jpg", bboxes=[439, 437, 524, 709])
```
We have implemented `MobileSAM` and `SAM` using the same API. For more usage information, please see the [SAM page](sam.md).

View file

@ -48,16 +48,16 @@ This example provides simple RT-DETR training and inference examples. For full d
from ultralytics import RTDETR
# Load a COCO-pretrained RT-DETR-l model
model = RTDETR('rtdetr-l.pt')
model = RTDETR("rtdetr-l.pt")
# Display model information (optional)
model.info()
# Train the model on the COCO8 example dataset for 100 epochs
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
# Run inference with the RT-DETR-l model on the 'bus.jpg' image
results = model('path/to/bus.jpg')
results = model("path/to/bus.jpg")
```
=== "CLI"

View file

@ -50,16 +50,16 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
from ultralytics import SAM
# Load a model
model = SAM('sam_b.pt')
model = SAM("sam_b.pt")
# Display model information (optional)
model.info()
# Run inference with bboxes prompt
model('ultralytics/assets/zidane.jpg', bboxes=[439, 437, 524, 709])
model("ultralytics/assets/zidane.jpg", bboxes=[439, 437, 524, 709])
# Run inference with points prompt
model('ultralytics/assets/zidane.jpg', points=[900, 370], labels=[1])
model("ultralytics/assets/zidane.jpg", points=[900, 370], labels=[1])
```
!!! Example "Segment everything"
@ -72,13 +72,13 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
from ultralytics import SAM
# Load a model
model = SAM('sam_b.pt')
model = SAM("sam_b.pt")
# Display model information (optional)
model.info()
# Run inference
model('path/to/image.jpg')
model("path/to/image.jpg")
```
=== "CLI"
@ -100,7 +100,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
from ultralytics.models.sam import Predictor as SAMPredictor
# Create SAMPredictor
overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024, model="mobile_sam.pt")
overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model="mobile_sam.pt")
predictor = SAMPredictor(overrides=overrides)
# Set image
@ -121,7 +121,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
from ultralytics.models.sam import Predictor as SAMPredictor
# Create SAMPredictor
overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024, model="mobile_sam.pt")
overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model="mobile_sam.pt")
predictor = SAMPredictor(overrides=overrides)
# Segment with additional args
@ -150,27 +150,27 @@ Tests run on a 2023 Apple M2 Macbook with 16GB of RAM. To reproduce this test:
=== "Python"
```python
from ultralytics import FastSAM, SAM, YOLO
from ultralytics import SAM, YOLO, FastSAM
# Profile SAM-b
model = SAM('sam_b.pt')
model = SAM("sam_b.pt")
model.info()
model('ultralytics/assets')
model("ultralytics/assets")
# Profile MobileSAM
model = SAM('mobile_sam.pt')
model = SAM("mobile_sam.pt")
model.info()
model('ultralytics/assets')
model("ultralytics/assets")
# Profile FastSAM-s
model = FastSAM('FastSAM-s.pt')
model = FastSAM("FastSAM-s.pt")
model.info()
model('ultralytics/assets')
model("ultralytics/assets")
# Profile YOLOv8n-seg
model = YOLO('yolov8n-seg.pt')
model = YOLO("yolov8n-seg.pt")
model.info()
model('ultralytics/assets')
model("ultralytics/assets")
```
## Auto-Annotation: A Quick Path to Segmentation Datasets
@ -188,7 +188,7 @@ To auto-annotate your dataset with the Ultralytics framework, use the `auto_anno
```python
from ultralytics.data.annotator import auto_annotate
auto_annotate(data="path/to/images", det_model="yolov8x.pt", sam_model='sam_b.pt')
auto_annotate(data="path/to/images", det_model="yolov8x.pt", sam_model="sam_b.pt")
```
| Argument | Type | Description | Default |

View file

@ -55,16 +55,16 @@ In this example we validate YOLO-NAS-s on the COCO8 dataset.
from ultralytics import NAS
# Load a COCO-pretrained YOLO-NAS-s model
model = NAS('yolo_nas_s.pt')
model = NAS("yolo_nas_s.pt")
# Display model information (optional)
model.info()
# Validate the model on the COCO8 example dataset
results = model.val(data='coco8.yaml')
results = model.val(data="coco8.yaml")
# Run inference with the YOLO-NAS-s model on the 'bus.jpg' image
results = model('path/to/bus.jpg')
results = model("path/to/bus.jpg")
```
=== "CLI"

View file

@ -92,13 +92,13 @@ Object detection is straightforward with the `train` method, as illustrated belo
from ultralytics import YOLOWorld
# Load a pretrained YOLOv8s-worldv2 model
model = YOLOWorld('yolov8s-worldv2.pt')
model = YOLOWorld("yolov8s-worldv2.pt")
# Train the model on the COCO8 example dataset for 100 epochs
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
# Run inference with the YOLOv8n model on the 'bus.jpg' image
results = model('path/to/bus.jpg')
results = model("path/to/bus.jpg")
```
=== "CLI"
@ -120,10 +120,10 @@ Object detection is straightforward with the `predict` method, as illustrated be
from ultralytics import YOLOWorld
# Initialize a YOLO-World model
model = YOLOWorld('yolov8s-world.pt') # or select yolov8m/l-world.pt for different sizes
model = YOLOWorld("yolov8s-world.pt") # or select yolov8m/l-world.pt for different sizes
# Execute inference with the YOLOv8s-world model on the specified image
results = model.predict('path/to/image.jpg')
results = model.predict("path/to/image.jpg")
# Show results
results[0].show()
@ -150,10 +150,10 @@ Model validation on a dataset is streamlined as follows:
from ultralytics import YOLO
# Create a YOLO-World model
model = YOLO('yolov8s-world.pt') # or select yolov8m/l-world.pt for different sizes
model = YOLO("yolov8s-world.pt") # or select yolov8m/l-world.pt for different sizes
# Conduct model validation on the COCO8 example dataset
metrics = model.val(data='coco8.yaml')
metrics = model.val(data="coco8.yaml")
```
=== "CLI"
@ -175,7 +175,7 @@ Object tracking with YOLO-World model on a video/images is streamlined as follow
from ultralytics import YOLO
# Create a YOLO-World model
model = YOLO('yolov8s-world.pt') # or select yolov8m/l-world.pt for different sizes
model = YOLO("yolov8s-world.pt") # or select yolov8m/l-world.pt for different sizes
# Track with a YOLO-World model on a video
results = model.track(source="path/to/video.mp4")
@ -208,13 +208,13 @@ For instance, if your application only requires detecting 'person' and 'bus' obj
from ultralytics import YOLO
# Initialize a YOLO-World model
model = YOLO('yolov8s-world.pt') # or choose yolov8m/l-world.pt
model = YOLO("yolov8s-world.pt") # or choose yolov8m/l-world.pt
# Define custom classes
model.set_classes(["person", "bus"])
# Execute prediction for specified categories on an image
results = model.predict('path/to/image.jpg')
results = model.predict("path/to/image.jpg")
# Show results
results[0].show()
@ -232,8 +232,8 @@ You can also save a model after setting custom classes. By doing this you create
from ultralytics import YOLO
# Initialize a YOLO-World model
model = YOLO('yolov8s-world.pt') # or select yolov8m/l-world.pt
model = YOLO("yolov8s-world.pt") # or select yolov8m/l-world.pt
# Define custom classes
model.set_classes(["person", "bus"])
@ -247,10 +247,10 @@ You can also save a model after setting custom classes. By doing this you create
from ultralytics import YOLO
# Load your custom model
model = YOLO('custom_yolov8s.pt')
model = YOLO("custom_yolov8s.pt")
# Run inference to detect your custom classes
results = model.predict('path/to/image.jpg')
results = model.predict("path/to/image.jpg")
# Show results
results[0].show()
@ -294,8 +294,8 @@ This approach provides a powerful means of customizing state-of-the-art object d
=== "Python"
```python
from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
from ultralytics import YOLOWorld
from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
data = dict(
train=dict(
@ -315,7 +315,6 @@ This approach provides a powerful means of customizing state-of-the-art object d
)
model = YOLOWorld("yolov8s-worldv2.yaml")
model.train(data=data, batch=128, epochs=100, trainer=WorldTrainerFromScratch)
```
## Citations and Acknowledgements

View file

@ -54,16 +54,16 @@ This example provides simple YOLOv3 training and inference examples. For full do
from ultralytics import YOLO
# Load a COCO-pretrained YOLOv3n model
model = YOLO('yolov3n.pt')
model = YOLO("yolov3n.pt")
# Display model information (optional)
model.info()
# Train the model on the COCO8 example dataset for 100 epochs
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
# Run inference with the YOLOv3n model on the 'bus.jpg' image
results = model('path/to/bus.jpg')
results = model("path/to/bus.jpg")
```
=== "CLI"

View file

@ -66,16 +66,16 @@ This example provides simple YOLOv5 training and inference examples. For full do
from ultralytics import YOLO
# Load a COCO-pretrained YOLOv5n model
model = YOLO('yolov5n.pt')
model = YOLO("yolov5n.pt")
# Display model information (optional)
model.info()
# Train the model on the COCO8 example dataset for 100 epochs
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
# Run inference with the YOLOv5n model on the 'bus.jpg' image
results = model('path/to/bus.jpg')
results = model("path/to/bus.jpg")
```
=== "CLI"

View file

@ -46,16 +46,16 @@ This example provides simple YOLOv6 training and inference examples. For full do
from ultralytics import YOLO
# Build a YOLOv6n model from scratch
model = YOLO('yolov6n.yaml')
model = YOLO("yolov6n.yaml")
# Display model information (optional)
model.info()
# Train the model on the COCO8 example dataset for 100 epochs
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
# Run inference with the YOLOv6n model on the 'bus.jpg' image
results = model('path/to/bus.jpg')
results = model("path/to/bus.jpg")
```
=== "CLI"

View file

@ -139,16 +139,16 @@ Note the below example is for YOLOv8 [Detect](../tasks/detect.md) models for obj
from ultralytics import YOLO
# Load a COCO-pretrained YOLOv8n model
model = YOLO('yolov8n.pt')
model = YOLO("yolov8n.pt")
# Display model information (optional)
model.info()
# Train the model on the COCO8 example dataset for 100 epochs
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
# Run inference with the YOLOv8n model on the 'bus.jpg' image
results = model('path/to/bus.jpg')
results = model("path/to/bus.jpg")
```
=== "CLI"

View file

@ -110,19 +110,19 @@ This example provides simple YOLOv9 training and inference examples. For full do
from ultralytics import YOLO
# Build a YOLOv9c model from scratch
model = YOLO('yolov9c.yaml')
model = YOLO("yolov9c.yaml")
# Build a YOLOv9c model from pretrained weight
model = YOLO('yolov9c.pt')
model = YOLO("yolov9c.pt")
# Display model information (optional)
model.info()
# Train the model on the COCO8 example dataset for 100 epochs
results = model.train(data='coco8.yaml', epochs=100, imgsz=640)
results = model.train(data="coco8.yaml", epochs=100, imgsz=640)
# Run inference with the YOLOv9c model on the 'bus.jpg' image
results = model('path/to/bus.jpg')
results = model("path/to/bus.jpg")
```
=== "CLI"