ultralytics 8.0.239 Ultralytics Actions and hub-sdk adoption (#7431)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: Burhan <62214284+Burhan-Q@users.noreply.github.com> Co-authored-by: Kayzwer <68285002+Kayzwer@users.noreply.github.com>
This commit is contained in:
parent
e795277391
commit
fe27db2f6e
139 changed files with 6870 additions and 5125 deletions
|
|
@ -5,4 +5,4 @@ from .predict import FastSAMPredictor
|
|||
from .prompt import FastSAMPrompt
|
||||
from .val import FastSAMValidator
|
||||
|
||||
__all__ = 'FastSAMPredictor', 'FastSAM', 'FastSAMPrompt', 'FastSAMValidator'
|
||||
__all__ = "FastSAMPredictor", "FastSAM", "FastSAMPrompt", "FastSAMValidator"
|
||||
|
|
|
|||
|
|
@ -21,14 +21,14 @@ class FastSAM(Model):
|
|||
```
|
||||
"""
|
||||
|
||||
def __init__(self, model='FastSAM-x.pt'):
|
||||
def __init__(self, model="FastSAM-x.pt"):
|
||||
"""Call the __init__ method of the parent class (YOLO) with the updated default model."""
|
||||
if str(model) == 'FastSAM.pt':
|
||||
model = 'FastSAM-x.pt'
|
||||
assert Path(model).suffix not in ('.yaml', '.yml'), 'FastSAM models only support pre-trained models.'
|
||||
super().__init__(model=model, task='segment')
|
||||
if str(model) == "FastSAM.pt":
|
||||
model = "FastSAM-x.pt"
|
||||
assert Path(model).suffix not in (".yaml", ".yml"), "FastSAM models only support pre-trained models."
|
||||
super().__init__(model=model, task="segment")
|
||||
|
||||
@property
|
||||
def task_map(self):
|
||||
"""Returns a dictionary mapping segment task to corresponding predictor and validator classes."""
|
||||
return {'segment': {'predictor': FastSAMPredictor, 'validator': FastSAMValidator}}
|
||||
return {"segment": {"predictor": FastSAMPredictor, "validator": FastSAMValidator}}
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ class FastSAMPredictor(DetectionPredictor):
|
|||
_callbacks (dict, optional): Optional list of callback functions to be invoked during prediction.
|
||||
"""
|
||||
super().__init__(cfg, overrides, _callbacks)
|
||||
self.args.task = 'segment'
|
||||
self.args.task = "segment"
|
||||
|
||||
def postprocess(self, preds, img, orig_imgs):
|
||||
"""
|
||||
|
|
@ -55,7 +55,8 @@ class FastSAMPredictor(DetectionPredictor):
|
|||
agnostic=self.args.agnostic_nms,
|
||||
max_det=self.args.max_det,
|
||||
nc=1, # set to 1 class since SAM has no class predictions
|
||||
classes=self.args.classes)
|
||||
classes=self.args.classes,
|
||||
)
|
||||
full_box = torch.zeros(p[0].shape[1], device=p[0].device)
|
||||
full_box[2], full_box[3], full_box[4], full_box[6:] = img.shape[3], img.shape[2], 1.0, 1.0
|
||||
full_box = full_box.view(1, -1)
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ class FastSAMPrompt:
|
|||
clip: CLIP model for linear assignment.
|
||||
"""
|
||||
|
||||
def __init__(self, source, results, device='cuda') -> None:
|
||||
def __init__(self, source, results, device="cuda") -> None:
|
||||
"""Initializes FastSAMPrompt with given source, results and device, and assigns clip for linear assignment."""
|
||||
self.device = device
|
||||
self.results = results
|
||||
|
|
@ -34,7 +34,8 @@ class FastSAMPrompt:
|
|||
import clip # for linear_assignment
|
||||
except ImportError:
|
||||
from ultralytics.utils.checks import check_requirements
|
||||
check_requirements('git+https://github.com/openai/CLIP.git')
|
||||
|
||||
check_requirements("git+https://github.com/openai/CLIP.git")
|
||||
import clip
|
||||
self.clip = clip
|
||||
|
||||
|
|
@ -46,11 +47,11 @@ class FastSAMPrompt:
|
|||
x1, y1, x2, y2 = bbox
|
||||
segmented_image_array[y1:y2, x1:x2] = image_array[y1:y2, x1:x2]
|
||||
segmented_image = Image.fromarray(segmented_image_array)
|
||||
black_image = Image.new('RGB', image.size, (255, 255, 255))
|
||||
black_image = Image.new("RGB", image.size, (255, 255, 255))
|
||||
# transparency_mask = np.zeros_like((), dtype=np.uint8)
|
||||
transparency_mask = np.zeros((image_array.shape[0], image_array.shape[1]), dtype=np.uint8)
|
||||
transparency_mask[y1:y2, x1:x2] = 255
|
||||
transparency_mask_image = Image.fromarray(transparency_mask, mode='L')
|
||||
transparency_mask_image = Image.fromarray(transparency_mask, mode="L")
|
||||
black_image.paste(segmented_image, mask=transparency_mask_image)
|
||||
return black_image
|
||||
|
||||
|
|
@ -65,11 +66,12 @@ class FastSAMPrompt:
|
|||
mask = result.masks.data[i] == 1.0
|
||||
if torch.sum(mask) >= filter:
|
||||
annotation = {
|
||||
'id': i,
|
||||
'segmentation': mask.cpu().numpy(),
|
||||
'bbox': result.boxes.data[i],
|
||||
'score': result.boxes.conf[i]}
|
||||
annotation['area'] = annotation['segmentation'].sum()
|
||||
"id": i,
|
||||
"segmentation": mask.cpu().numpy(),
|
||||
"bbox": result.boxes.data[i],
|
||||
"score": result.boxes.conf[i],
|
||||
}
|
||||
annotation["area"] = annotation["segmentation"].sum()
|
||||
annotations.append(annotation)
|
||||
return annotations
|
||||
|
||||
|
|
@ -91,16 +93,18 @@ class FastSAMPrompt:
|
|||
y2 = max(y2, y_t + h_t)
|
||||
return [x1, y1, x2, y2]
|
||||
|
||||
def plot(self,
|
||||
annotations,
|
||||
output,
|
||||
bbox=None,
|
||||
points=None,
|
||||
point_label=None,
|
||||
mask_random_color=True,
|
||||
better_quality=True,
|
||||
retina=False,
|
||||
with_contours=True):
|
||||
def plot(
|
||||
self,
|
||||
annotations,
|
||||
output,
|
||||
bbox=None,
|
||||
points=None,
|
||||
point_label=None,
|
||||
mask_random_color=True,
|
||||
better_quality=True,
|
||||
retina=False,
|
||||
with_contours=True,
|
||||
):
|
||||
"""
|
||||
Plots annotations, bounding boxes, and points on images and saves the output.
|
||||
|
||||
|
|
@ -139,15 +143,17 @@ class FastSAMPrompt:
|
|||
mask = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((3, 3), np.uint8))
|
||||
masks[i] = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_OPEN, np.ones((8, 8), np.uint8))
|
||||
|
||||
self.fast_show_mask(masks,
|
||||
plt.gca(),
|
||||
random_color=mask_random_color,
|
||||
bbox=bbox,
|
||||
points=points,
|
||||
pointlabel=point_label,
|
||||
retinamask=retina,
|
||||
target_height=original_h,
|
||||
target_width=original_w)
|
||||
self.fast_show_mask(
|
||||
masks,
|
||||
plt.gca(),
|
||||
random_color=mask_random_color,
|
||||
bbox=bbox,
|
||||
points=points,
|
||||
pointlabel=point_label,
|
||||
retinamask=retina,
|
||||
target_height=original_h,
|
||||
target_width=original_w,
|
||||
)
|
||||
|
||||
if with_contours:
|
||||
contour_all = []
|
||||
|
|
@ -166,10 +172,10 @@ class FastSAMPrompt:
|
|||
# Save the figure
|
||||
save_path = Path(output) / result_name
|
||||
save_path.parent.mkdir(exist_ok=True, parents=True)
|
||||
plt.axis('off')
|
||||
plt.savefig(save_path, bbox_inches='tight', pad_inches=0, transparent=True)
|
||||
plt.axis("off")
|
||||
plt.savefig(save_path, bbox_inches="tight", pad_inches=0, transparent=True)
|
||||
plt.close()
|
||||
pbar.set_description(f'Saving {result_name} to {save_path}')
|
||||
pbar.set_description(f"Saving {result_name} to {save_path}")
|
||||
|
||||
@staticmethod
|
||||
def fast_show_mask(
|
||||
|
|
@ -212,26 +218,26 @@ class FastSAMPrompt:
|
|||
mask_image = np.expand_dims(annotation, -1) * visual
|
||||
|
||||
show = np.zeros((h, w, 4))
|
||||
h_indices, w_indices = np.meshgrid(np.arange(h), np.arange(w), indexing='ij')
|
||||
h_indices, w_indices = np.meshgrid(np.arange(h), np.arange(w), indexing="ij")
|
||||
indices = (index[h_indices, w_indices], h_indices, w_indices, slice(None))
|
||||
|
||||
show[h_indices, w_indices, :] = mask_image[indices]
|
||||
if bbox is not None:
|
||||
x1, y1, x2, y2 = bbox
|
||||
ax.add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='b', linewidth=1))
|
||||
ax.add_patch(plt.Rectangle((x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor="b", linewidth=1))
|
||||
# Draw point
|
||||
if points is not None:
|
||||
plt.scatter(
|
||||
[point[0] for i, point in enumerate(points) if pointlabel[i] == 1],
|
||||
[point[1] for i, point in enumerate(points) if pointlabel[i] == 1],
|
||||
s=20,
|
||||
c='y',
|
||||
c="y",
|
||||
)
|
||||
plt.scatter(
|
||||
[point[0] for i, point in enumerate(points) if pointlabel[i] == 0],
|
||||
[point[1] for i, point in enumerate(points) if pointlabel[i] == 0],
|
||||
s=20,
|
||||
c='m',
|
||||
c="m",
|
||||
)
|
||||
|
||||
if not retinamask:
|
||||
|
|
@ -258,7 +264,7 @@ class FastSAMPrompt:
|
|||
image = Image.fromarray(cv2.cvtColor(self.results[0].orig_img, cv2.COLOR_BGR2RGB))
|
||||
ori_w, ori_h = image.size
|
||||
annotations = format_results
|
||||
mask_h, mask_w = annotations[0]['segmentation'].shape
|
||||
mask_h, mask_w = annotations[0]["segmentation"].shape
|
||||
if ori_w != mask_w or ori_h != mask_h:
|
||||
image = image.resize((mask_w, mask_h))
|
||||
cropped_boxes = []
|
||||
|
|
@ -266,19 +272,19 @@ class FastSAMPrompt:
|
|||
not_crop = []
|
||||
filter_id = []
|
||||
for _, mask in enumerate(annotations):
|
||||
if np.sum(mask['segmentation']) <= 100:
|
||||
if np.sum(mask["segmentation"]) <= 100:
|
||||
filter_id.append(_)
|
||||
continue
|
||||
bbox = self._get_bbox_from_mask(mask['segmentation']) # mask 的 bbox
|
||||
cropped_boxes.append(self._segment_image(image, bbox)) # 保存裁剪的图片
|
||||
cropped_images.append(bbox) # 保存裁剪的图片的bbox
|
||||
bbox = self._get_bbox_from_mask(mask["segmentation"]) # bbox from mask
|
||||
cropped_boxes.append(self._segment_image(image, bbox)) # save cropped image
|
||||
cropped_images.append(bbox) # save cropped image bbox
|
||||
|
||||
return cropped_boxes, cropped_images, not_crop, filter_id, annotations
|
||||
|
||||
def box_prompt(self, bbox):
|
||||
"""Modifies the bounding box properties and calculates IoU between masks and bounding box."""
|
||||
if self.results[0].masks is not None:
|
||||
assert (bbox[2] != 0 and bbox[3] != 0)
|
||||
assert bbox[2] != 0 and bbox[3] != 0
|
||||
if os.path.isdir(self.source):
|
||||
raise ValueError(f"'{self.source}' is a directory, not a valid source for this function.")
|
||||
masks = self.results[0].masks.data
|
||||
|
|
@ -290,7 +296,8 @@ class FastSAMPrompt:
|
|||
int(bbox[0] * w / target_width),
|
||||
int(bbox[1] * h / target_height),
|
||||
int(bbox[2] * w / target_width),
|
||||
int(bbox[3] * h / target_height), ]
|
||||
int(bbox[3] * h / target_height),
|
||||
]
|
||||
bbox[0] = max(round(bbox[0]), 0)
|
||||
bbox[1] = max(round(bbox[1]), 0)
|
||||
bbox[2] = min(round(bbox[2]), w)
|
||||
|
|
@ -299,7 +306,7 @@ class FastSAMPrompt:
|
|||
# IoUs = torch.zeros(len(masks), dtype=torch.float32)
|
||||
bbox_area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
|
||||
|
||||
masks_area = torch.sum(masks[:, bbox[1]:bbox[3], bbox[0]:bbox[2]], dim=(1, 2))
|
||||
masks_area = torch.sum(masks[:, bbox[1] : bbox[3], bbox[0] : bbox[2]], dim=(1, 2))
|
||||
orig_masks_area = torch.sum(masks, dim=(1, 2))
|
||||
|
||||
union = bbox_area + orig_masks_area - masks_area
|
||||
|
|
@ -316,13 +323,13 @@ class FastSAMPrompt:
|
|||
raise ValueError(f"'{self.source}' is a directory, not a valid source for this function.")
|
||||
masks = self._format_results(self.results[0], 0)
|
||||
target_height, target_width = self.results[0].orig_shape
|
||||
h = masks[0]['segmentation'].shape[0]
|
||||
w = masks[0]['segmentation'].shape[1]
|
||||
h = masks[0]["segmentation"].shape[0]
|
||||
w = masks[0]["segmentation"].shape[1]
|
||||
if h != target_height or w != target_width:
|
||||
points = [[int(point[0] * w / target_width), int(point[1] * h / target_height)] for point in points]
|
||||
onemask = np.zeros((h, w))
|
||||
for annotation in masks:
|
||||
mask = annotation['segmentation'] if isinstance(annotation, dict) else annotation
|
||||
mask = annotation["segmentation"] if isinstance(annotation, dict) else annotation
|
||||
for i, point in enumerate(points):
|
||||
if mask[point[1], point[0]] == 1 and pointlabel[i] == 1:
|
||||
onemask += mask
|
||||
|
|
@ -337,12 +344,12 @@ class FastSAMPrompt:
|
|||
if self.results[0].masks is not None:
|
||||
format_results = self._format_results(self.results[0], 0)
|
||||
cropped_boxes, cropped_images, not_crop, filter_id, annotations = self._crop_image(format_results)
|
||||
clip_model, preprocess = self.clip.load('ViT-B/32', device=self.device)
|
||||
clip_model, preprocess = self.clip.load("ViT-B/32", device=self.device)
|
||||
scores = self.retrieve(clip_model, preprocess, cropped_boxes, text, device=self.device)
|
||||
max_idx = scores.argsort()
|
||||
max_idx = max_idx[-1]
|
||||
max_idx += sum(np.array(filter_id) <= int(max_idx))
|
||||
self.results[0].masks.data = torch.tensor(np.array([annotations[max_idx]['segmentation']]))
|
||||
self.results[0].masks.data = torch.tensor(np.array([annotations[max_idx]["segmentation"]]))
|
||||
return self.results
|
||||
|
||||
def everything_prompt(self):
|
||||
|
|
|
|||
|
|
@ -35,6 +35,6 @@ class FastSAMValidator(SegmentationValidator):
|
|||
Plots for ConfusionMatrix and other related metrics are disabled in this class to avoid errors.
|
||||
"""
|
||||
super().__init__(dataloader, save_dir, pbar, args, _callbacks)
|
||||
self.args.task = 'segment'
|
||||
self.args.task = "segment"
|
||||
self.args.plots = False # disable ConfusionMatrix and other plots to avoid errors
|
||||
self.metrics = SegmentMetrics(save_dir=self.save_dir, on_plot=self.on_plot)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue