Apply Ruff 0.9.0 (#18622)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
Glenn Jocher 2025-01-10 00:05:09 +01:00 committed by GitHub
parent cc1e77138c
commit 3902e740cf
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
22 changed files with 69 additions and 65 deletions

View file

@ -87,9 +87,9 @@ class FastSAMPredictor(SegmentationPredictor):
if labels is None:
labels = torch.ones(points.shape[0])
labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
assert len(labels) == len(
points
), f"Excepted `labels` got same size as `point`, but got {len(labels)} and {len(points)}"
assert len(labels) == len(points), (
f"Excepted `labels` got same size as `point`, but got {len(labels)} and {len(points)}"
)
point_idx = (
torch.ones(len(result), dtype=torch.bool, device=self.device)
if labels.sum() == 0 # all negative points

View file

@ -479,9 +479,9 @@ class ImageEncoder(nn.Module):
self.trunk = trunk
self.neck = neck
self.scalp = scalp
assert (
self.trunk.channel_list == self.neck.backbone_channel_list
), f"Channel dims of trunk {self.trunk.channel_list} and neck {self.neck.backbone_channel_list} do not match."
assert self.trunk.channel_list == self.neck.backbone_channel_list, (
f"Channel dims of trunk {self.trunk.channel_list} and neck {self.neck.backbone_channel_list} do not match."
)
def forward(self, sample: torch.Tensor):
"""Encodes input through patch embedding, positional embedding, transformer blocks, and neck module."""

View file

@ -279,9 +279,9 @@ class Predictor(BasePredictor):
if labels is None:
labels = np.ones(points.shape[:-1])
labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
assert (
points.shape[-2] == labels.shape[-1]
), f"Number of points {points.shape[-2]} should match number of labels {labels.shape[-1]}."
assert points.shape[-2] == labels.shape[-1], (
f"Number of points {points.shape[-2]} should match number of labels {labels.shape[-1]}."
)
points *= r
if points.ndim == 2:
# (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)
@ -552,9 +552,9 @@ class Predictor(BasePredictor):
def get_im_features(self, im):
"""Extracts image features using the SAM model's image encoder for subsequent mask prediction."""
assert (
isinstance(self.imgsz, (tuple, list)) and self.imgsz[0] == self.imgsz[1]
), f"SAM models only support square image size, but got {self.imgsz}."
assert isinstance(self.imgsz, (tuple, list)) and self.imgsz[0] == self.imgsz[1], (
f"SAM models only support square image size, but got {self.imgsz}."
)
self.model.set_imgsz(self.imgsz)
return self.model.image_encoder(im)
@ -795,9 +795,9 @@ class SAM2Predictor(Predictor):
def get_im_features(self, im):
"""Extracts image features from the SAM image encoder for subsequent processing."""
assert (
isinstance(self.imgsz, (tuple, list)) and self.imgsz[0] == self.imgsz[1]
), f"SAM 2 models only support square image size, but got {self.imgsz}."
assert isinstance(self.imgsz, (tuple, list)) and self.imgsz[0] == self.imgsz[1], (
f"SAM 2 models only support square image size, but got {self.imgsz}."
)
self.model.set_imgsz(self.imgsz)
self._bb_feat_sizes = [[x // (4 * i) for x in self.imgsz] for i in [1, 2, 4]]

View file

@ -168,7 +168,7 @@ class DetectionValidator(BaseValidator):
predn,
self.args.save_conf,
pbatch["ori_shape"],
self.save_dir / "labels" / f'{Path(batch["im_file"][si]).stem}.txt',
self.save_dir / "labels" / f"{Path(batch['im_file'][si]).stem}.txt",
)
def finalize_metrics(self, *args, **kwargs):

View file

@ -163,7 +163,7 @@ class OBBValidator(DetectionValidator):
classname = self.names[d["category_id"] - 1].replace(" ", "-")
p = d["poly"]
with open(f'{pred_txt / f"Task1_{classname}"}.txt', "a") as f:
with open(f"{pred_txt / f'Task1_{classname}'}.txt", "a") as f:
f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n")
# Save merged results, this could result slightly lower map than using official merging script,
# because of the probiou calculation.
@ -197,7 +197,7 @@ class OBBValidator(DetectionValidator):
p = [round(i, 3) for i in x[:-2]] # poly
score = round(x[-2], 3)
with open(f'{pred_merged_txt / f"Task1_{classname}"}.txt', "a") as f:
with open(f"{pred_merged_txt / f'Task1_{classname}'}.txt", "a") as f:
f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n")
return stats

View file

@ -153,7 +153,7 @@ class PoseValidator(DetectionValidator):
pred_kpts,
self.args.save_conf,
pbatch["ori_shape"],
self.save_dir / "labels" / f'{Path(batch["im_file"][si]).stem}.txt',
self.save_dir / "labels" / f"{Path(batch['im_file'][si]).stem}.txt",
)
def _process_batch(self, detections, gt_bboxes, gt_cls, pred_kpts=None, gt_kpts=None):

View file

@ -162,7 +162,7 @@ class SegmentationValidator(DetectionValidator):
pred_masks,
self.args.save_conf,
pbatch["ori_shape"],
self.save_dir / "labels" / f'{Path(batch["im_file"][si]).stem}.txt',
self.save_dir / "labels" / f"{Path(batch['im_file'][si]).stem}.txt",
)
def finalize_metrics(self, *args, **kwargs):