Apply Ruff 0.9.0 (#18622)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
cc1e77138c
commit
3902e740cf
22 changed files with 69 additions and 65 deletions
|
|
@ -271,9 +271,9 @@ class Compose:
|
|||
"""
|
||||
assert isinstance(index, (int, list)), f"The indices should be either list or int type but got {type(index)}"
|
||||
if isinstance(index, list):
|
||||
assert isinstance(
|
||||
value, list
|
||||
), f"The indices should be the same type as values, but got {type(index)} and {type(value)}"
|
||||
assert isinstance(value, list), (
|
||||
f"The indices should be the same type as values, but got {type(index)} and {type(value)}"
|
||||
)
|
||||
if isinstance(index, int):
|
||||
index, value = [index], [value]
|
||||
for i, v in zip(index, value):
|
||||
|
|
|
|||
|
|
@ -242,7 +242,9 @@ def convert_coco(
|
|||
from ultralytics.data.converter import convert_coco
|
||||
|
||||
convert_coco("../datasets/coco/annotations/", use_segments=True, use_keypoints=False, cls91to80=False)
|
||||
convert_coco("../datasets/lvis/annotations/", use_segments=True, use_keypoints=False, cls91to80=False, lvis=True)
|
||||
convert_coco(
|
||||
"../datasets/lvis/annotations/", use_segments=True, use_keypoints=False, cls91to80=False, lvis=True
|
||||
)
|
||||
```
|
||||
|
||||
Output:
|
||||
|
|
@ -270,7 +272,7 @@ def convert_coco(
|
|||
data = json.load(f)
|
||||
|
||||
# Create image dict
|
||||
images = {f'{x["id"]:d}': x for x in data["images"]}
|
||||
images = {f"{x['id']:d}": x for x in data["images"]}
|
||||
# Create image-annotations dict
|
||||
imgToAnns = defaultdict(list)
|
||||
for ann in data["annotations"]:
|
||||
|
|
|
|||
|
|
@ -299,7 +299,7 @@ class GroundingDataset(YOLODataset):
|
|||
LOGGER.info("Loading annotation file...")
|
||||
with open(self.json_file) as f:
|
||||
annotations = json.load(f)
|
||||
images = {f'{x["id"]:d}': x for x in annotations["images"]}
|
||||
images = {f"{x['id']:d}": x for x in annotations["images"]}
|
||||
img_to_anns = defaultdict(list)
|
||||
for ann in annotations["annotations"]:
|
||||
img_to_anns[ann["image_id"]].append(ann)
|
||||
|
|
|
|||
|
|
@ -451,7 +451,7 @@ def check_cls_dataset(dataset, split=""):
|
|||
|
||||
# Print to console
|
||||
for k, v in {"train": train_set, "val": val_set, "test": test_set}.items():
|
||||
prefix = f'{colorstr(f"{k}:")} {v}...'
|
||||
prefix = f"{colorstr(f'{k}:')} {v}..."
|
||||
if v is None:
|
||||
LOGGER.info(prefix)
|
||||
else:
|
||||
|
|
@ -519,7 +519,7 @@ class HUBDatasetStats:
|
|||
except Exception as e:
|
||||
raise Exception("error/HUB/dataset_stats/init") from e
|
||||
|
||||
self.hub_dir = Path(f'{data["path"]}-hub')
|
||||
self.hub_dir = Path(f"{data['path']}-hub")
|
||||
self.im_dir = self.hub_dir / "images"
|
||||
self.stats = {"nc": len(data["names"]), "names": list(data["names"].values())} # statistics dictionary
|
||||
self.data = data
|
||||
|
|
@ -531,7 +531,7 @@ class HUBDatasetStats:
|
|||
return False, None, path
|
||||
unzip_dir = unzip_file(path, path=path.parent)
|
||||
assert unzip_dir.is_dir(), (
|
||||
f"Error unzipping {path}, {unzip_dir} not found. " f"path/to/abc.zip MUST unzip to path/to/abc/"
|
||||
f"Error unzipping {path}, {unzip_dir} not found. path/to/abc.zip MUST unzip to path/to/abc/"
|
||||
)
|
||||
return True, str(unzip_dir), find_dataset_yaml(unzip_dir) # zipped, data_dir, yaml_path
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue