PyUpgrade 3.8 updates (#15941)
Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
ea13dc6208
commit
9ec8e9acbf
13 changed files with 25 additions and 25 deletions
|
|
@ -164,7 +164,7 @@ def update_docs_html():
|
||||||
# Convert plaintext links to HTML hyperlinks
|
# Convert plaintext links to HTML hyperlinks
|
||||||
files_modified = 0
|
files_modified = 0
|
||||||
for html_file in tqdm(SITE.rglob("*.html"), desc="Converting plaintext links"):
|
for html_file in tqdm(SITE.rglob("*.html"), desc="Converting plaintext links"):
|
||||||
with open(html_file, "r", encoding="utf-8") as file:
|
with open(html_file, encoding="utf-8") as file:
|
||||||
content = file.read()
|
content = file.read()
|
||||||
updated_content = convert_plaintext_links_to_html(content)
|
updated_content = convert_plaintext_links_to_html(content)
|
||||||
if updated_content != content:
|
if updated_content != content:
|
||||||
|
|
|
||||||
|
|
@ -490,7 +490,7 @@ def convert_dota_to_yolo_obb(dota_root_path: str):
|
||||||
normalized_coords = [
|
normalized_coords = [
|
||||||
coords[i] / image_width if i % 2 == 0 else coords[i] / image_height for i in range(8)
|
coords[i] / image_width if i % 2 == 0 else coords[i] / image_height for i in range(8)
|
||||||
]
|
]
|
||||||
formatted_coords = ["{:.6g}".format(coord) for coord in normalized_coords]
|
formatted_coords = [f"{coord:.6g}" for coord in normalized_coords]
|
||||||
g.write(f"{class_idx} {' '.join(formatted_coords)}\n")
|
g.write(f"{class_idx} {' '.join(formatted_coords)}\n")
|
||||||
|
|
||||||
for phase in ["train", "val"]:
|
for phase in ["train", "val"]:
|
||||||
|
|
|
||||||
|
|
@ -296,7 +296,7 @@ class GroundingDataset(YOLODataset):
|
||||||
"""Loads annotations from a JSON file, filters, and normalizes bounding boxes for each image."""
|
"""Loads annotations from a JSON file, filters, and normalizes bounding boxes for each image."""
|
||||||
labels = []
|
labels = []
|
||||||
LOGGER.info("Loading annotation file...")
|
LOGGER.info("Loading annotation file...")
|
||||||
with open(self.json_file, "r") as f:
|
with open(self.json_file) as f:
|
||||||
annotations = json.load(f)
|
annotations = json.load(f)
|
||||||
images = {f'{x["id"]:d}': x for x in annotations["images"]}
|
images = {f'{x["id"]:d}': x for x in annotations["images"]}
|
||||||
img_to_anns = defaultdict(list)
|
img_to_anns = defaultdict(list)
|
||||||
|
|
|
||||||
|
|
@ -193,7 +193,7 @@ def crop_and_save(anno, windows, window_objs, im_dir, lb_dir, allow_background_i
|
||||||
|
|
||||||
with open(Path(lb_dir) / f"{new_name}.txt", "w") as f:
|
with open(Path(lb_dir) / f"{new_name}.txt", "w") as f:
|
||||||
for lb in label:
|
for lb in label:
|
||||||
formatted_coords = ["{:.6g}".format(coord) for coord in lb[1:]]
|
formatted_coords = [f"{coord:.6g}" for coord in lb[1:]]
|
||||||
f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")
|
f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -328,7 +328,7 @@ class BasePredictor:
|
||||||
frame = int(match[1]) if match else None # 0 if frame undetermined
|
frame = int(match[1]) if match else None # 0 if frame undetermined
|
||||||
|
|
||||||
self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}"))
|
self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}"))
|
||||||
string += "%gx%g " % im.shape[2:]
|
string += "{:g}x{:g} ".format(*im.shape[2:])
|
||||||
result = self.results[i]
|
result = self.results[i]
|
||||||
result.save_dir = self.save_dir.__str__() # used in other locations
|
result.save_dir = self.save_dir.__str__() # used in other locations
|
||||||
string += f"{result.verbose()}{result.speed['inference']:.1f}ms"
|
string += f"{result.verbose()}{result.speed['inference']:.1f}ms"
|
||||||
|
|
|
||||||
|
|
@ -202,8 +202,9 @@ class BaseValidator:
|
||||||
return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats
|
return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats
|
||||||
else:
|
else:
|
||||||
LOGGER.info(
|
LOGGER.info(
|
||||||
"Speed: %.1fms preprocess, %.1fms inference, %.1fms loss, %.1fms postprocess per image"
|
"Speed: {:.1f}ms preprocess, {:.1f}ms inference, {:.1f}ms loss, {:.1f}ms postprocess per image".format(
|
||||||
% tuple(self.speed.values())
|
*tuple(self.speed.values())
|
||||||
|
)
|
||||||
)
|
)
|
||||||
if self.args.save_json and self.jdict:
|
if self.args.save_json and self.jdict:
|
||||||
with open(str(self.save_dir / "predictions.json"), "w") as f:
|
with open(str(self.save_dir / "predictions.json"), "w") as f:
|
||||||
|
|
|
||||||
|
|
@ -55,23 +55,22 @@ def request_with_credentials(url: str) -> any:
|
||||||
|
|
||||||
display.display(
|
display.display(
|
||||||
display.Javascript(
|
display.Javascript(
|
||||||
"""
|
f"""
|
||||||
window._hub_tmp = new Promise((resolve, reject) => {
|
window._hub_tmp = new Promise((resolve, reject) => {{
|
||||||
const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000)
|
const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000)
|
||||||
fetch("%s", {
|
fetch("{url}", {{
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
credentials: 'include'
|
credentials: 'include'
|
||||||
})
|
}})
|
||||||
.then((response) => resolve(response.json()))
|
.then((response) => resolve(response.json()))
|
||||||
.then((json) => {
|
.then((json) => {{
|
||||||
clearTimeout(timeout);
|
clearTimeout(timeout);
|
||||||
}).catch((err) => {
|
}}).catch((err) => {{
|
||||||
clearTimeout(timeout);
|
clearTimeout(timeout);
|
||||||
reject(err);
|
reject(err);
|
||||||
});
|
}});
|
||||||
});
|
}});
|
||||||
"""
|
"""
|
||||||
% url
|
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return output.eval_js("_hub_tmp")
|
return output.eval_js("_hub_tmp")
|
||||||
|
|
|
||||||
|
|
@ -100,7 +100,7 @@ class FastSAMPredictor(SegmentationPredictor):
|
||||||
texts = [texts]
|
texts = [texts]
|
||||||
crop_ims, filter_idx = [], []
|
crop_ims, filter_idx = [], []
|
||||||
for i, b in enumerate(result.boxes.xyxy.tolist()):
|
for i, b in enumerate(result.boxes.xyxy.tolist()):
|
||||||
x1, y1, x2, y2 = [int(x) for x in b]
|
x1, y1, x2, y2 = (int(x) for x in b)
|
||||||
if masks[i].sum() <= 100:
|
if masks[i].sum() <= 100:
|
||||||
filter_idx.append(i)
|
filter_idx.append(i)
|
||||||
continue
|
continue
|
||||||
|
|
|
||||||
|
|
@ -35,7 +35,7 @@ class DropPath(nn.Module):
|
||||||
|
|
||||||
def __init__(self, drop_prob=0.0, scale_by_keep=True):
|
def __init__(self, drop_prob=0.0, scale_by_keep=True):
|
||||||
"""Initialize DropPath module for stochastic depth regularization during training."""
|
"""Initialize DropPath module for stochastic depth regularization during training."""
|
||||||
super(DropPath, self).__init__()
|
super().__init__()
|
||||||
self.drop_prob = drop_prob
|
self.drop_prob = drop_prob
|
||||||
self.scale_by_keep = scale_by_keep
|
self.scale_by_keep = scale_by_keep
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -672,7 +672,7 @@ class CBLinear(nn.Module):
|
||||||
|
|
||||||
def __init__(self, c1, c2s, k=1, s=1, p=None, g=1):
|
def __init__(self, c1, c2s, k=1, s=1, p=None, g=1):
|
||||||
"""Initializes the CBLinear module, passing inputs unchanged."""
|
"""Initializes the CBLinear module, passing inputs unchanged."""
|
||||||
super(CBLinear, self).__init__()
|
super().__init__()
|
||||||
self.c2s = c2s
|
self.c2s = c2s
|
||||||
self.conv = nn.Conv2d(c1, sum(c2s), k, s, autopad(k, p), groups=g, bias=True)
|
self.conv = nn.Conv2d(c1, sum(c2s), k, s, autopad(k, p), groups=g, bias=True)
|
||||||
|
|
||||||
|
|
@ -686,7 +686,7 @@ class CBFuse(nn.Module):
|
||||||
|
|
||||||
def __init__(self, idx):
|
def __init__(self, idx):
|
||||||
"""Initializes CBFuse module with layer index for selective feature fusion."""
|
"""Initializes CBFuse module with layer index for selective feature fusion."""
|
||||||
super(CBFuse, self).__init__()
|
super().__init__()
|
||||||
self.idx = idx
|
self.idx = idx
|
||||||
|
|
||||||
def forward(self, xs):
|
def forward(self, xs):
|
||||||
|
|
|
||||||
|
|
@ -210,7 +210,7 @@ class ParkingManagement:
|
||||||
Args:
|
Args:
|
||||||
json_file (str): file that have all parking slot points
|
json_file (str): file that have all parking slot points
|
||||||
"""
|
"""
|
||||||
with open(json_file, "r") as f:
|
with open(json_file) as f:
|
||||||
return json.load(f)
|
return json.load(f)
|
||||||
|
|
||||||
def process_data(self, json_data, im0, boxes, clss):
|
def process_data(self, json_data, im0, boxes, clss):
|
||||||
|
|
|
||||||
|
|
@ -198,7 +198,7 @@ class RF100Benchmark:
|
||||||
os.mkdir("ultralytics-benchmarks")
|
os.mkdir("ultralytics-benchmarks")
|
||||||
safe_download("https://github.com/ultralytics/assets/releases/download/v0.0.0/datasets_links.txt")
|
safe_download("https://github.com/ultralytics/assets/releases/download/v0.0.0/datasets_links.txt")
|
||||||
|
|
||||||
with open(ds_link_txt, "r") as file:
|
with open(ds_link_txt) as file:
|
||||||
for line in file:
|
for line in file:
|
||||||
try:
|
try:
|
||||||
_, url, workspace, project, version = re.split("/+", line.strip())
|
_, url, workspace, project, version = re.split("/+", line.strip())
|
||||||
|
|
@ -222,7 +222,7 @@ class RF100Benchmark:
|
||||||
Args:
|
Args:
|
||||||
path (str): YAML file path.
|
path (str): YAML file path.
|
||||||
"""
|
"""
|
||||||
with open(path, "r") as file:
|
with open(path) as file:
|
||||||
yaml_data = yaml.safe_load(file)
|
yaml_data = yaml.safe_load(file)
|
||||||
yaml_data["train"] = "train/images"
|
yaml_data["train"] = "train/images"
|
||||||
yaml_data["val"] = "valid/images"
|
yaml_data["val"] = "valid/images"
|
||||||
|
|
@ -242,7 +242,7 @@ class RF100Benchmark:
|
||||||
skip_symbols = ["🚀", "⚠️", "💡", "❌"]
|
skip_symbols = ["🚀", "⚠️", "💡", "❌"]
|
||||||
with open(yaml_path) as stream:
|
with open(yaml_path) as stream:
|
||||||
class_names = yaml.safe_load(stream)["names"]
|
class_names = yaml.safe_load(stream)["names"]
|
||||||
with open(val_log_file, "r", encoding="utf-8") as f:
|
with open(val_log_file, encoding="utf-8") as f:
|
||||||
lines = f.readlines()
|
lines = f.readlines()
|
||||||
eval_lines = []
|
eval_lines = []
|
||||||
for line in lines:
|
for line in lines:
|
||||||
|
|
|
||||||
|
|
@ -460,7 +460,7 @@ def plot_pr_curve(px, py, ap, save_dir=Path("pr_curve.png"), names={}, on_plot=N
|
||||||
else:
|
else:
|
||||||
ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision)
|
ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision)
|
||||||
|
|
||||||
ax.plot(px, py.mean(1), linewidth=3, color="blue", label="all classes %.3f mAP@0.5" % ap[:, 0].mean())
|
ax.plot(px, py.mean(1), linewidth=3, color="blue", label=f"all classes {ap[:, 0].mean():.3f} mAP@0.5")
|
||||||
ax.set_xlabel("Recall")
|
ax.set_xlabel("Recall")
|
||||||
ax.set_ylabel("Precision")
|
ax.set_ylabel("Precision")
|
||||||
ax.set_xlim(0, 1)
|
ax.set_xlim(0, 1)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue