Replace enumerate + index with zip() (#14574)
This commit is contained in:
parent
68f1f326f5
commit
e59376b55f
4 changed files with 7 additions and 15 deletions
|
|
@ -71,9 +71,7 @@ class FastSAMPredictor(DetectionPredictor):
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported
|
proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported
|
||||||
for i, pred in enumerate(p):
|
for i, (pred, orig_img, img_path) in enumerate(zip(p, orig_imgs, self.batch[0])):
|
||||||
orig_img = orig_imgs[i]
|
|
||||||
img_path = self.batch[0][i]
|
|
||||||
if not len(pred): # save empty boxes
|
if not len(pred): # save empty boxes
|
||||||
masks = None
|
masks = None
|
||||||
elif self.args.retina_masks:
|
elif self.args.retina_masks:
|
||||||
|
|
|
||||||
|
|
@ -52,9 +52,7 @@ class NASPredictor(BasePredictor):
|
||||||
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
|
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
for i, pred in enumerate(preds):
|
for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
|
||||||
orig_img = orig_imgs[i]
|
|
||||||
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
|
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
|
||||||
img_path = self.batch[0][i]
|
|
||||||
results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred))
|
results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred))
|
||||||
return results
|
return results
|
||||||
|
|
|
||||||
|
|
@ -56,18 +56,16 @@ class RTDETRPredictor(BasePredictor):
|
||||||
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
|
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
for i, bbox in enumerate(bboxes): # (300, 4)
|
for bbox, score, orig_img, img_path in zip(bboxes, scores, orig_imgs, self.batch[0]): # (300, 4)
|
||||||
bbox = ops.xywh2xyxy(bbox)
|
bbox = ops.xywh2xyxy(bbox)
|
||||||
score, cls = scores[i].max(-1, keepdim=True) # (300, 1)
|
max_score, cls = score.max(-1, keepdim=True) # (300, 1)
|
||||||
idx = score.squeeze(-1) > self.args.conf # (300, )
|
idx = max_score.squeeze(-1) > self.args.conf # (300, )
|
||||||
if self.args.classes is not None:
|
if self.args.classes is not None:
|
||||||
idx = (cls == torch.tensor(self.args.classes, device=cls.device)).any(1) & idx
|
idx = (cls == torch.tensor(self.args.classes, device=cls.device)).any(1) & idx
|
||||||
pred = torch.cat([bbox, score, cls], dim=-1)[idx] # filter
|
pred = torch.cat([bbox, max_score, cls], dim=-1)[idx] # filter
|
||||||
orig_img = orig_imgs[i]
|
|
||||||
oh, ow = orig_img.shape[:2]
|
oh, ow = orig_img.shape[:2]
|
||||||
pred[..., [0, 2]] *= ow
|
pred[..., [0, 2]] *= ow
|
||||||
pred[..., [1, 3]] *= oh
|
pred[..., [1, 3]] *= oh
|
||||||
img_path = self.batch[0][i]
|
|
||||||
results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred))
|
results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred))
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -372,8 +372,7 @@ class Predictor(BasePredictor):
|
||||||
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
|
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
for i, masks in enumerate([pred_masks]):
|
for masks, orig_img, img_path in zip([pred_masks], orig_imgs, self.batch[0]):
|
||||||
orig_img = orig_imgs[i]
|
|
||||||
if pred_bboxes is not None:
|
if pred_bboxes is not None:
|
||||||
pred_bboxes = ops.scale_boxes(img.shape[2:], pred_bboxes.float(), orig_img.shape, padding=False)
|
pred_bboxes = ops.scale_boxes(img.shape[2:], pred_bboxes.float(), orig_img.shape, padding=False)
|
||||||
cls = torch.arange(len(pred_masks), dtype=torch.int32, device=pred_masks.device)
|
cls = torch.arange(len(pred_masks), dtype=torch.int32, device=pred_masks.device)
|
||||||
|
|
@ -381,7 +380,6 @@ class Predictor(BasePredictor):
|
||||||
|
|
||||||
masks = ops.scale_masks(masks[None].float(), orig_img.shape[:2], padding=False)[0]
|
masks = ops.scale_masks(masks[None].float(), orig_img.shape[:2], padding=False)[0]
|
||||||
masks = masks > self.model.mask_threshold # to bool
|
masks = masks > self.model.mask_threshold # to bool
|
||||||
img_path = self.batch[0][i]
|
|
||||||
results.append(Results(orig_img, path=img_path, names=names, masks=masks, boxes=pred_bboxes))
|
results.append(Results(orig_img, path=img_path, names=names, masks=masks, boxes=pred_bboxes))
|
||||||
# Reset segment-all mode.
|
# Reset segment-all mode.
|
||||||
self.segment_all = False
|
self.segment_all = False
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue