diff --git a/README.zh-CN.md b/README.zh-CN.md
index 418652ed..aec15a2e 100644
--- a/README.zh-CN.md
+++ b/README.zh-CN.md
@@ -8,7 +8,7 @@

-

+

diff --git a/docs/en/hub/models.md b/docs/en/hub/models.md
index 1533a19f..45cc7881 100644
--- a/docs/en/hub/models.md
+++ b/docs/en/hub/models.md
@@ -66,7 +66,7 @@ In this step, you have to choose the project in which you want to create your mo
!!! info
- You can read more about the available [YOLO models](https://docs.ultralytics.com/models) and architectures in our documentation.
+ You can read more about the available [YOLO models](https://docs.ultralytics.com/models/) and architectures in our documentation.
By default, your model will use a pre-trained model (trained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset) to reduce training time. You can change this behavior and tweak your model's configuration by opening the **Advanced Model Configuration** accordion.
diff --git a/docs/en/index.md b/docs/en/index.md
index 3794403c..1d52a315 100644
--- a/docs/en/index.md
+++ b/docs/en/index.md
@@ -20,7 +20,7 @@ keywords: Ultralytics, YOLO, YOLO11, object detection, image segmentation, deep

-

+

diff --git a/docs/overrides/javascript/extra.js b/docs/overrides/javascript/extra.js
index 916cc8da..08771104 100644
--- a/docs/overrides/javascript/extra.js
+++ b/docs/overrides/javascript/extra.js
@@ -30,7 +30,9 @@ checkAutoTheme();
document.addEventListener("DOMContentLoaded", () => {
const autoThemeInput = document.getElementById("__palette_1");
autoThemeInput?.addEventListener("click", () => {
- if (autoThemeInput.checked) setTimeout(checkAutoTheme);
+ if (autoThemeInput.checked) {
+ setTimeout(checkAutoTheme);
+ }
});
});
diff --git a/ultralytics/nn/tasks.py b/ultralytics/nn/tasks.py
index 1e69a8f2..c1a24c34 100644
--- a/ultralytics/nn/tasks.py
+++ b/ultralytics/nn/tasks.py
@@ -960,10 +960,8 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
m = getattr(torch.nn, m[3:]) if "nn." in m else globals()[m] # get module
for j, a in enumerate(args):
if isinstance(a, str):
- try:
+ with contextlib.suppress(ValueError):
args[j] = locals()[a] if a in locals() else ast.literal_eval(a)
- except ValueError:
- pass
n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain
if m in {
Classify,
@@ -1141,24 +1139,16 @@ def guess_model_task(model):
# Guess from model cfg
if isinstance(model, dict):
- try:
+ with contextlib.suppress(Exception):
return cfg2task(model)
- except Exception:
- pass
-
# Guess from PyTorch model
if isinstance(model, nn.Module): # PyTorch model
for x in "model.args", "model.model.args", "model.model.model.args":
- try:
+ with contextlib.suppress(Exception):
return eval(x)["task"]
- except Exception:
- pass
for x in "model.yaml", "model.model.yaml", "model.model.model.yaml":
- try:
+ with contextlib.suppress(Exception):
return cfg2task(eval(x))
- except Exception:
- pass
-
for m in model.modules():
if isinstance(m, Segment):
return "segment"
diff --git a/ultralytics/solutions/object_counter.py b/ultralytics/solutions/object_counter.py
index 94e7d87c..d52acda7 100644
--- a/ultralytics/solutions/object_counter.py
+++ b/ultralytics/solutions/object_counter.py
@@ -80,37 +80,33 @@ class ObjectCounter(BaseSolution):
else: # Moving left
self.out_count += 1
self.classwise_counts[self.names[cls]]["OUT"] += 1
- else:
- # Horizontal region: Compare y-coordinates to determine direction
- if current_centroid[1] > prev_position[1]: # Moving downward
- self.in_count += 1
- self.classwise_counts[self.names[cls]]["IN"] += 1
- else: # Moving upward
- self.out_count += 1
- self.classwise_counts[self.names[cls]]["OUT"] += 1
+ # Horizontal region: Compare y-coordinates to determine direction
+ elif current_centroid[1] > prev_position[1]: # Moving downward
+ self.in_count += 1
+ self.classwise_counts[self.names[cls]]["IN"] += 1
+ else: # Moving upward
+ self.out_count += 1
+ self.classwise_counts[self.names[cls]]["OUT"] += 1
self.counted_ids.append(track_id)
elif len(self.region) > 2: # Polygonal region
polygon = self.Polygon(self.region)
if polygon.contains(self.Point(current_centroid)):
# Determine motion direction for vertical or horizontal polygons
- region_width = max([p[0] for p in self.region]) - min([p[0] for p in self.region])
- region_height = max([p[1] for p in self.region]) - min([p[1] for p in self.region])
+ region_width = max(p[0] for p in self.region) - min(p[0] for p in self.region)
+ region_height = max(p[1] for p in self.region) - min(p[1] for p in self.region)
- if region_width < region_height: # Vertical-oriented polygon
- if current_centroid[0] > prev_position[0]: # Moving right
- self.in_count += 1
- self.classwise_counts[self.names[cls]]["IN"] += 1
- else: # Moving left
- self.out_count += 1
- self.classwise_counts[self.names[cls]]["OUT"] += 1
- else: # Horizontal-oriented polygon
- if current_centroid[1] > prev_position[1]: # Moving downward
- self.in_count += 1
- self.classwise_counts[self.names[cls]]["IN"] += 1
- else: # Moving upward
- self.out_count += 1
- self.classwise_counts[self.names[cls]]["OUT"] += 1
+ if (
+ region_width < region_height
+ and current_centroid[0] > prev_position[0]
+ or region_width >= region_height
+ and current_centroid[1] > prev_position[1]
+ ): # Moving right
+ self.in_count += 1
+ self.classwise_counts[self.names[cls]]["IN"] += 1
+ else: # Moving left
+ self.out_count += 1
+ self.classwise_counts[self.names[cls]]["OUT"] += 1
self.counted_ids.append(track_id)
def store_classwise_counts(self, cls):
diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py
index 4bacc79a..b413297b 100644
--- a/ultralytics/utils/torch_utils.py
+++ b/ultralytics/utils/torch_utils.py
@@ -675,7 +675,7 @@ def profile(input, ops, n=10, device=None, max_num_obj=0):
torch.randn(
x.shape[0],
max_num_obj,
- int(sum([(x.shape[-1] / s) * (x.shape[-2] / s) for s in m.stride.tolist()])),
+ int(sum((x.shape[-1] / s) * (x.shape[-2] / s) for s in m.stride.tolist())),
device=device,
dtype=torch.float32,
)