From db82d1c6ae9a6379a04b787deaa653ce5138dcd3 Mon Sep 17 00:00:00 2001 From: Kayzwer <68285002+Kayzwer@users.noreply.github.com> Date: Tue, 23 Jul 2024 01:52:43 +0800 Subject: [PATCH] Replace enumerate with zip in models/yolo (#14599) --- ultralytics/models/yolo/classify/predict.py | 4 +--- ultralytics/models/yolo/detect/predict.py | 4 +--- ultralytics/models/yolo/pose/predict.py | 4 +--- ultralytics/models/yolo/segment/predict.py | 4 +--- 4 files changed, 4 insertions(+), 12 deletions(-) diff --git a/ultralytics/models/yolo/classify/predict.py b/ultralytics/models/yolo/classify/predict.py index 853ef048..1ca42fe8 100644 --- a/ultralytics/models/yolo/classify/predict.py +++ b/ultralytics/models/yolo/classify/predict.py @@ -54,8 +54,6 @@ class ClassificationPredictor(BasePredictor): orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) results = [] - for i, pred in enumerate(preds): - orig_img = orig_imgs[i] - img_path = self.batch[0][i] + for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]): results.append(Results(orig_img, path=img_path, names=self.model.names, probs=pred)) return results diff --git a/ultralytics/models/yolo/detect/predict.py b/ultralytics/models/yolo/detect/predict.py index 3a0c6287..9842928d 100644 --- a/ultralytics/models/yolo/detect/predict.py +++ b/ultralytics/models/yolo/detect/predict.py @@ -35,9 +35,7 @@ class DetectionPredictor(BasePredictor): orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) results = [] - for i, pred in enumerate(preds): - orig_img = orig_imgs[i] + for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]): pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) - img_path = self.batch[0][i] results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred)) return results diff --git a/ultralytics/models/yolo/pose/predict.py b/ultralytics/models/yolo/pose/predict.py index 7c55709f..911c424f 100644 --- a/ultralytics/models/yolo/pose/predict.py +++ b/ultralytics/models/yolo/pose/predict.py @@ -46,12 +46,10 @@ class PosePredictor(DetectionPredictor): orig_imgs = ops.convert_torch2numpy_batch(orig_imgs) results = [] - for i, pred in enumerate(preds): - orig_img = orig_imgs[i] + for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]): pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape).round() pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:] pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, orig_img.shape) - img_path = self.batch[0][i] results.append( Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], keypoints=pred_kpts) ) diff --git a/ultralytics/models/yolo/segment/predict.py b/ultralytics/models/yolo/segment/predict.py index 9d7015ff..d007eeea 100644 --- a/ultralytics/models/yolo/segment/predict.py +++ b/ultralytics/models/yolo/segment/predict.py @@ -42,9 +42,7 @@ class SegmentationPredictor(DetectionPredictor): results = [] proto = preds[1][-1] if isinstance(preds[1], tuple) else preds[1] # tuple if PyTorch model or array if exported - for i, pred in enumerate(p): - orig_img = orig_imgs[i] - img_path = self.batch[0][i] + for i, (pred, orig_img, img_path) in enumerate(zip(p, orig_imgs, self.batch[0])): if not len(pred): # save empty boxes masks = None elif self.args.retina_masks: