ultralytics 8.0.141 create new SettingsManager (#3790)
This commit is contained in:
parent
42afe772d5
commit
20f5efd40a
215 changed files with 917 additions and 749 deletions
|
|
@ -37,10 +37,10 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
|
|||
Segment image with given prompts.
|
||||
|
||||
=== "Python"
|
||||
|
||||
|
||||
```python
|
||||
from ultralytics import SAM
|
||||
|
||||
|
||||
# Load a model
|
||||
model = SAM('sam_b.pt')
|
||||
|
||||
|
|
@ -59,10 +59,10 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
|
|||
Segment the whole image.
|
||||
|
||||
=== "Python"
|
||||
|
||||
|
||||
```python
|
||||
from ultralytics import SAM
|
||||
|
||||
|
||||
# Load a model
|
||||
model = SAM('sam_b.pt')
|
||||
|
||||
|
|
@ -73,7 +73,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
|
|||
model('path/to/image.jpg')
|
||||
```
|
||||
=== "CLI"
|
||||
|
||||
|
||||
```bash
|
||||
# Run inference with a SAM model
|
||||
yolo predict model=sam_b.pt source=path/to/image.jpg
|
||||
|
|
@ -86,7 +86,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
|
|||
This way you can set image once and run prompts inference multiple times without running image encoder multiple times.
|
||||
|
||||
=== "Prompt inference"
|
||||
|
||||
|
||||
```python
|
||||
from ultralytics.models.sam import Predictor as SAMPredictor
|
||||
|
||||
|
|
@ -106,7 +106,7 @@ The Segment Anything Model can be employed for a multitude of downstream tasks t
|
|||
Segment everything with additional args.
|
||||
|
||||
=== "Segment everything"
|
||||
|
||||
|
||||
```python
|
||||
from ultralytics.models.sam import Predictor as SAMPredictor
|
||||
|
||||
|
|
@ -207,7 +207,7 @@ If you find SAM useful in your research or development work, please consider cit
|
|||
|
||||
```bibtex
|
||||
@misc{kirillov2023segment,
|
||||
title={Segment Anything},
|
||||
title={Segment Anything},
|
||||
author={Alexander Kirillov and Eric Mintun and Nikhila Ravi and Hanzi Mao and Chloe Rolland and Laura Gustafson and Tete Xiao and Spencer Whitehead and Alexander C. Berg and Wan-Yen Lo and Piotr Dollár and Ross Girshick},
|
||||
year={2023},
|
||||
eprint={2304.02643},
|
||||
|
|
@ -218,4 +218,4 @@ If you find SAM useful in your research or development work, please consider cit
|
|||
|
||||
We would like to express our gratitude to Meta AI for creating and maintaining this valuable resource for the computer vision community.
|
||||
|
||||
*keywords: Segment Anything, Segment Anything Model, SAM, Meta SAM, image segmentation, promptable segmentation, zero-shot performance, SA-1B dataset, advanced architecture, auto-annotation, Ultralytics, pre-trained models, SAM base, SAM large, instance segmentation, computer vision, AI, artificial intelligence, machine learning, data annotation, segmentation masks, detection model, YOLO detection model, bibtex, Meta AI.*
|
||||
*keywords: Segment Anything, Segment Anything Model, SAM, Meta SAM, image segmentation, promptable segmentation, zero-shot performance, SA-1B dataset, advanced architecture, auto-annotation, Ultralytics, pre-trained models, SAM base, SAM large, instance segmentation, computer vision, AI, artificial intelligence, machine learning, data annotation, segmentation masks, detection model, YOLO detection model, bibtex, Meta AI.*
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue