PyCharm Code Inspect fixes (#18392)
Signed-off-by: UltralyticsAssistant <web@ultralytics.com> Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
d35860d4a1
commit
e5e91967d9
31 changed files with 72 additions and 72 deletions
|
|
@ -86,7 +86,7 @@ SOLUTIONS_HELP_MSG = f"""
|
|||
yolo solutions count source="path/to/video/file.mp4" region=[(20, 400), (1080, 400), (1080, 360), (20, 360)]
|
||||
|
||||
2. Call heatmaps solution
|
||||
yolo solutions heatmap colormap=cv2.COLORMAP_PARAULA model=yolo11n.pt
|
||||
yolo solutions heatmap colormap=cv2.COLORMAP_PARULA model=yolo11n.pt
|
||||
|
||||
3. Call queue management solution
|
||||
yolo solutions queue region=[(20, 400), (1080, 400), (1080, 360), (20, 360)] model=yolo11n.pt
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
path: ../datasets/lvis # dataset root dir
|
||||
train: train.txt # train images (relative to 'path') 100170 images
|
||||
val: val.txt # val images (relative to 'path') 19809 images
|
||||
minival: minival.txt # minval images (relative to 'path') 5000 images
|
||||
minival: minival.txt # minival images (relative to 'path') 5000 images
|
||||
|
||||
names:
|
||||
0: aerosol can/spray can
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ colormap: # (int | str) colormap for heatmap, Only OPENCV supported colormaps c
|
|||
# Workouts monitoring settings -----------------------------------------------------------------------------------------
|
||||
up_angle: 145.0 # (float) Workouts up_angle for counts, 145.0 is default value.
|
||||
down_angle: 90 # (float) Workouts down_angle for counts, 90 is default value. Y
|
||||
kpts: [6, 8, 10] # (list[int]) keypoints for workouts monitoring, i.e. for pushups kpts have values of [6, 8, 10].
|
||||
kpts: [6, 8, 10] # (list[int]) keypoints for workouts monitoring, i.e. for push-ups kpts have values of [6, 8, 10].
|
||||
|
||||
# Analytics settings ---------------------------------------------------------------------------------------------------
|
||||
analytics_type: "line" # (str) analytics type i.e "line", "pie", "bar" or "area" charts.
|
||||
|
|
|
|||
|
|
@ -441,7 +441,8 @@ class BaseMixTransform:
|
|||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def _update_label_text(self, labels):
|
||||
@staticmethod
|
||||
def _update_label_text(labels):
|
||||
"""
|
||||
Updates label text and class IDs for mixed labels in image augmentation.
|
||||
|
||||
|
|
@ -1259,7 +1260,8 @@ class RandomPerspective:
|
|||
labels["resized_shape"] = img.shape[:2]
|
||||
return labels
|
||||
|
||||
def box_candidates(self, box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):
|
||||
@staticmethod
|
||||
def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):
|
||||
"""
|
||||
Compute candidate boxes for further processing based on size and aspect ratio criteria.
|
||||
|
||||
|
|
@ -1598,7 +1600,8 @@ class LetterBox:
|
|||
else:
|
||||
return img
|
||||
|
||||
def _update_labels(self, labels, ratio, padw, padh):
|
||||
@staticmethod
|
||||
def _update_labels(labels, ratio, padw, padh):
|
||||
"""
|
||||
Updates labels after applying letterboxing to an image.
|
||||
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ class YOLODataset(BaseDataset):
|
|||
Cache dataset labels, check images and read shapes.
|
||||
|
||||
Args:
|
||||
path (Path): Path where to save the cache file. Default is Path('./labels.cache').
|
||||
path (Path): Path where to save the cache file. Default is Path("./labels.cache").
|
||||
|
||||
Returns:
|
||||
(dict): labels.
|
||||
|
|
@ -219,7 +219,7 @@ class YOLODataset(BaseDataset):
|
|||
segment_resamples = 100 if self.use_obb else 1000
|
||||
if len(segments) > 0:
|
||||
# make sure segments interpolate correctly if original length is greater than segment_resamples
|
||||
max_len = max([len(s) for s in segments])
|
||||
max_len = max(len(s) for s in segments)
|
||||
segment_resamples = (max_len + 1) if segment_resamples < max_len else segment_resamples
|
||||
# list[np.array(segment_resamples, 2)] * num_samples
|
||||
segments = np.stack(resample_segments(segments, n=segment_resamples), axis=0)
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@
|
|||
python - <<EOF
|
||||
from ultralytics.utils.downloads import attempt_download_asset
|
||||
|
||||
assets = [f'yolov8{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '-cls', '-seg', '-pose')]
|
||||
assets = [f"yolov8{size}{suffix}.pt" for size in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose")]
|
||||
for x in assets:
|
||||
attempt_download_asset(f'weights/{x}')
|
||||
attempt_download_asset(f"weights/{x}")
|
||||
|
||||
EOF
|
||||
|
|
|
|||
|
|
@ -813,7 +813,7 @@ class Exporter:
|
|||
workspace = int(self.args.workspace * (1 << 30)) if self.args.workspace is not None else 0
|
||||
if is_trt10 and workspace > 0:
|
||||
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace)
|
||||
elif workspace > 0 and not is_trt10: # TensorRT versions 7, 8
|
||||
elif workspace > 0: # TensorRT versions 7, 8
|
||||
config.max_workspace_size = workspace
|
||||
flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
|
||||
network = builder.create_network(flag)
|
||||
|
|
|
|||
|
|
@ -1170,6 +1170,4 @@ class Model(nn.Module):
|
|||
>>> print(model.stride)
|
||||
>>> print(model.task)
|
||||
"""
|
||||
if name == "model":
|
||||
return self._modules["model"]
|
||||
return getattr(self.model, name)
|
||||
return self._modules["model"] if name == "model" else getattr(self.model, name)
|
||||
|
|
|
|||
|
|
@ -245,7 +245,7 @@ class BaseValidator:
|
|||
|
||||
cost_matrix = iou * (iou >= threshold)
|
||||
if cost_matrix.any():
|
||||
labels_idx, detections_idx = scipy.optimize.linear_sum_assignment(cost_matrix, maximize=True)
|
||||
labels_idx, detections_idx = scipy.optimize.linear_sum_assignment(cost_matrix)
|
||||
valid = cost_matrix[labels_idx, detections_idx] > 0
|
||||
if valid.any():
|
||||
correct[detections_idx[valid], i] = True
|
||||
|
|
|
|||
|
|
@ -955,7 +955,8 @@ class TinyViT(nn.Module):
|
|||
|
||||
self.apply(_check_lr_scale)
|
||||
|
||||
def _init_weights(self, m):
|
||||
@staticmethod
|
||||
def _init_weights(m):
|
||||
"""Initializes weights for linear and normalization layers in the TinyViT model."""
|
||||
if isinstance(m, nn.Linear):
|
||||
# NOTE: This initialization is needed only for training.
|
||||
|
|
|
|||
|
|
@ -1377,7 +1377,7 @@ class SAM2VideoPredictor(SAM2Predictor):
|
|||
if "maskmem_pos_enc" not in model_constants:
|
||||
assert isinstance(out_maskmem_pos_enc, list)
|
||||
# only take the slice for one object, since it's same across objects
|
||||
maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc]
|
||||
maskmem_pos_enc = [x[:1].clone() for x in out_maskmem_pos_enc]
|
||||
model_constants["maskmem_pos_enc"] = maskmem_pos_enc
|
||||
else:
|
||||
maskmem_pos_enc = model_constants["maskmem_pos_enc"]
|
||||
|
|
|
|||
|
|
@ -429,10 +429,7 @@ class AutoBackend(nn.Module):
|
|||
|
||||
import MNN
|
||||
|
||||
config = {}
|
||||
config["precision"] = "low"
|
||||
config["backend"] = "CPU"
|
||||
config["numThread"] = (os.cpu_count() + 1) // 2
|
||||
config = {"precision": "low", "backend": "CPU", "numThread": (os.cpu_count() + 1) // 2}
|
||||
rt = MNN.nn.create_runtime_manager((config,))
|
||||
net = MNN.nn.load_module_from_file(w, [], [], runtime_manager=rt, rearrange=True)
|
||||
|
||||
|
|
|
|||
|
|
@ -181,12 +181,8 @@ class Inference:
|
|||
if __name__ == "__main__":
|
||||
import sys # Import the sys module for accessing command-line arguments
|
||||
|
||||
model = None # Initialize the model variable as None
|
||||
|
||||
# Check if a model name is provided as a command-line argument
|
||||
args = len(sys.argv)
|
||||
if args > 1:
|
||||
model = args # Assign the first argument as the model name
|
||||
|
||||
model = args if args > 1 else None
|
||||
# Create an instance of the Inference class and run inference
|
||||
Inference(model=model).inference()
|
||||
|
|
|
|||
|
|
@ -440,7 +440,8 @@ class ProfileModels:
|
|||
print(f"Profiling: {sorted(files)}")
|
||||
return [Path(file) for file in sorted(files)]
|
||||
|
||||
def get_onnx_model_info(self, onnx_file: str):
|
||||
@staticmethod
|
||||
def get_onnx_model_info(onnx_file: str):
|
||||
"""Extracts metadata from an ONNX model file including parameters, GFLOPs, and input shape."""
|
||||
return 0.0, 0.0, 0.0, 0.0 # return (num_layers, num_params, num_gradients, num_flops)
|
||||
|
||||
|
|
|
|||
|
|
@ -138,7 +138,7 @@ def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX"), exist_ok=Fals
|
|||
If a path is not provided, the function will use the parent directory of the zipfile as the default path.
|
||||
|
||||
Args:
|
||||
file (str): The path to the zipfile to be extracted.
|
||||
file (str | Path): The path to the zipfile to be extracted.
|
||||
path (str, optional): The path to extract the zipfile to. Defaults to None.
|
||||
exclude (tuple, optional): A tuple of filename strings to be excluded. Defaults to ('.DS_Store', '__MACOSX').
|
||||
exist_ok (bool, optional): Whether to overwrite existing contents if they exist. Defaults to False.
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ to_4tuple = _ntuple(4)
|
|||
# `ltwh` means left top and width, height(COCO format)
|
||||
_formats = ["xyxy", "xywh", "ltwh"]
|
||||
|
||||
__all__ = ("Bboxes",) # tuple or list
|
||||
__all__ = ("Bboxes", "Instances") # tuple or list
|
||||
|
||||
|
||||
class Bboxes:
|
||||
|
|
|
|||
|
|
@ -545,7 +545,8 @@ class Annotator:
|
|||
"""Save the annotated image to 'filename'."""
|
||||
cv2.imwrite(filename, np.asarray(self.im))
|
||||
|
||||
def get_bbox_dimension(self, bbox=None):
|
||||
@staticmethod
|
||||
def get_bbox_dimension(bbox=None):
|
||||
"""
|
||||
Calculate the area of a bounding box.
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue