ultralytics 8.0.93 HUB docs and JSON2YOLO converter (#2431)

Co-authored-by: Ayush Chaurasia <ayush.chaurarsia@gmail.com>
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: 李际朝 <tubkninght@gmail.com>
Co-authored-by: Danny Kim <imbird0312@gmail.com>
This commit is contained in:
Glenn Jocher 2023-05-06 01:12:43 +02:00 committed by GitHub
parent 0ebd3f2959
commit ddb354ce5e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
34 changed files with 1107 additions and 759 deletions

View file

@ -8,7 +8,6 @@ from ultralytics.yolo.utils.torch_utils import select_device
def auto_annotate(data, det_model='yolov8x.pt', sam_model='sam_b.pt', device='', output_dir=None):
"""
Automatically annotates images using a YOLO object detection model and a SAM segmentation model.
Args:
data (str): Path to a folder containing images to be annotated.
det_model (str, optional): Pre-trained YOLO detection model. Defaults to 'yolov8x.pt'.
@ -16,7 +15,6 @@ def auto_annotate(data, det_model='yolov8x.pt', sam_model='sam_b.pt', device='',
device (str, optional): Device to run the models on. Defaults to an empty string (CPU or GPU, if available).
output_dir (str, None, optional): Directory to save the annotated results.
Defaults to a 'labels' folder in the same directory as 'data'.
"""
device = select_device(device)
det_model = YOLO(det_model)
@ -34,21 +32,22 @@ def auto_annotate(data, det_model='yolov8x.pt', sam_model='sam_b.pt', device='',
for result in det_results:
boxes = result.boxes.xyxy # Boxes object for bbox outputs
class_ids = result.boxes.cls.int().tolist() # noqa
prompt_predictor.set_image(result.orig_img)
masks, _, _ = prompt_predictor.predict_torch(
point_coords=None,
point_labels=None,
boxes=prompt_predictor.transform.apply_boxes_torch(boxes, result.orig_shape[:2]),
multimask_output=False,
)
if len(class_ids):
prompt_predictor.set_image(result.orig_img)
masks, _, _ = prompt_predictor.predict_torch(
point_coords=None,
point_labels=None,
boxes=prompt_predictor.transform.apply_boxes_torch(boxes, result.orig_shape[:2]),
multimask_output=False,
)
result.update(masks=masks.squeeze(1))
segments = result.masks.xyn # noqa
result.update(masks=masks.squeeze(1))
segments = result.masks.xyn # noqa
with open(f'{str(Path(output_dir) / Path(result.path).stem)}.txt', 'w') as f:
for i in range(len(segments)):
s = segments[i]
if len(s) == 0:
continue
segment = map(str, segments[i].reshape(-1).tolist())
f.write(f'{class_ids[i]} ' + ' '.join(segment) + '\n')
with open(str(Path(output_dir) / Path(result.path).stem) + '.txt', 'w') as f:
for i in range(len(segments)):
s = segments[i]
if len(s) == 0:
continue
segment = map(str, segments[i].reshape(-1).tolist())
f.write(f'{class_ids[i]} ' + ' '.join(segment) + '\n')