Add docformatter to pre-commit (#5279)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Burhan <62214284+Burhan-Q@users.noreply.github.com>
This commit is contained in:
parent
c7aa83da31
commit
7517667a33
90 changed files with 1396 additions and 497 deletions
|
|
@ -20,7 +20,11 @@ from ultralytics.utils.downloads import attempt_download_asset, is_url
|
|||
|
||||
|
||||
def check_class_names(names):
|
||||
"""Check class names. Map imagenet class codes to human-readable names if required. Convert lists to dicts."""
|
||||
"""
|
||||
Check class names.
|
||||
|
||||
Map imagenet class codes to human-readable names if required. Convert lists to dicts.
|
||||
"""
|
||||
if isinstance(names, list): # names is a list
|
||||
names = dict(enumerate(names)) # convert to dict
|
||||
if isinstance(names, dict):
|
||||
|
|
@ -37,6 +41,32 @@ def check_class_names(names):
|
|||
|
||||
|
||||
class AutoBackend(nn.Module):
|
||||
"""
|
||||
Handles dynamic backend selection for running inference using Ultralytics YOLO models.
|
||||
|
||||
The AutoBackend class is designed to provide an abstraction layer for various inference engines. It supports a wide
|
||||
range of formats, each with specific naming conventions as outlined below:
|
||||
|
||||
Supported Formats and Naming Conventions:
|
||||
| Format | File Suffix |
|
||||
|-----------------------|------------------|
|
||||
| PyTorch | *.pt |
|
||||
| TorchScript | *.torchscript |
|
||||
| ONNX Runtime | *.onnx |
|
||||
| ONNX OpenCV DNN | *.onnx (dnn=True)|
|
||||
| OpenVINO | *openvino_model/ |
|
||||
| CoreML | *.mlpackage |
|
||||
| TensorRT | *.engine |
|
||||
| TensorFlow SavedModel | *_saved_model |
|
||||
| TensorFlow GraphDef | *.pb |
|
||||
| TensorFlow Lite | *.tflite |
|
||||
| TensorFlow Edge TPU | *_edgetpu.tflite |
|
||||
| PaddlePaddle | *_paddle_model |
|
||||
| ncnn | *_ncnn_model |
|
||||
|
||||
This class offers dynamic backend switching capabilities based on the input model format, making it easier to deploy
|
||||
models across various platforms.
|
||||
"""
|
||||
|
||||
@torch.no_grad()
|
||||
def __init__(self,
|
||||
|
|
@ -48,33 +78,16 @@ class AutoBackend(nn.Module):
|
|||
fuse=True,
|
||||
verbose=True):
|
||||
"""
|
||||
MultiBackend class for python inference on various platforms using Ultralytics YOLO.
|
||||
Initialize the AutoBackend for inference.
|
||||
|
||||
Args:
|
||||
weights (str): The path to the weights file. Default: 'yolov8n.pt'
|
||||
device (torch.device): The device to run the model on.
|
||||
dnn (bool): Use OpenCV DNN module for inference if True, defaults to False.
|
||||
data (str | Path | optional): Additional data.yaml file for class names.
|
||||
fp16 (bool): If True, use half precision. Default: False
|
||||
fuse (bool): Whether to fuse the model or not. Default: True
|
||||
verbose (bool): Whether to run in verbose mode or not. Default: True
|
||||
|
||||
Supported formats and their naming conventions:
|
||||
| Format | Suffix |
|
||||
|-----------------------|------------------|
|
||||
| PyTorch | *.pt |
|
||||
| TorchScript | *.torchscript |
|
||||
| ONNX Runtime | *.onnx |
|
||||
| ONNX OpenCV DNN | *.onnx dnn=True |
|
||||
| OpenVINO | *.xml |
|
||||
| CoreML | *.mlpackage |
|
||||
| TensorRT | *.engine |
|
||||
| TensorFlow SavedModel | *_saved_model |
|
||||
| TensorFlow GraphDef | *.pb |
|
||||
| TensorFlow Lite | *.tflite |
|
||||
| TensorFlow Edge TPU | *_edgetpu.tflite |
|
||||
| PaddlePaddle | *_paddle_model |
|
||||
| ncnn | *_ncnn_model |
|
||||
weights (str): Path to the model weights file. Defaults to 'yolov8n.pt'.
|
||||
device (torch.device): Device to run the model on. Defaults to CPU.
|
||||
dnn (bool): Use OpenCV DNN module for ONNX inference. Defaults to False.
|
||||
data (str | Path | optional): Path to the additional data.yaml file containing class names. Optional.
|
||||
fp16 (bool): Enable half-precision inference. Supported only on specific backends. Defaults to False.
|
||||
fuse (bool): Fuse Conv2D + BatchNorm layers for optimization. Defaults to True.
|
||||
verbose (bool): Enable verbose logging. Defaults to True.
|
||||
"""
|
||||
super().__init__()
|
||||
w = str(weights[0] if isinstance(weights, list) else weights)
|
||||
|
|
@ -440,14 +453,14 @@ class AutoBackend(nn.Module):
|
|||
|
||||
def from_numpy(self, x):
|
||||
"""
|
||||
Convert a numpy array to a tensor.
|
||||
Convert a numpy array to a tensor.
|
||||
|
||||
Args:
|
||||
x (np.ndarray): The array to be converted.
|
||||
Args:
|
||||
x (np.ndarray): The array to be converted.
|
||||
|
||||
Returns:
|
||||
(torch.Tensor): The converted tensor
|
||||
"""
|
||||
Returns:
|
||||
(torch.Tensor): The converted tensor
|
||||
"""
|
||||
return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x
|
||||
|
||||
def warmup(self, imgsz=(1, 3, 640, 640)):
|
||||
|
|
@ -476,7 +489,7 @@ class AutoBackend(nn.Module):
|
|||
@staticmethod
|
||||
def _model_type(p='path/to/model.pt'):
|
||||
"""
|
||||
This function takes a path to a model file and returns the model type
|
||||
This function takes a path to a model file and returns the model type.
|
||||
|
||||
Args:
|
||||
p: path to the model file. Defaults to path/to/model.pt
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue