ultralytics 8.3.65 Rockchip RKNN Integration for Ultralytics YOLO models (#16308)
Signed-off-by: Francesco Mattioli <Francesco.mttl@gmail.com> Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: Burhan <62214284+Burhan-Q@users.noreply.github.com> Co-authored-by: Lakshantha Dissanayake <lakshantha@ultralytics.com> Co-authored-by: Burhan <Burhan-Q@users.noreply.github.com> Co-authored-by: Laughing-q <1185102784@qq.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com> Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com> Co-authored-by: Lakshantha Dissanayake <lakshanthad@yahoo.com> Co-authored-by: Francesco Mattioli <Francesco.mttl@gmail.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
617dea8e25
commit
b5e0cee943
41 changed files with 390 additions and 118 deletions
|
|
@ -19,6 +19,7 @@ PaddlePaddle | `paddle` | yolo11n_paddle_model/
|
|||
MNN | `mnn` | yolo11n.mnn
|
||||
NCNN | `ncnn` | yolo11n_ncnn_model/
|
||||
IMX | `imx` | yolo11n_imx_model/
|
||||
RKNN | `rknn` | yolo11n_rknn_model/
|
||||
|
||||
Requirements:
|
||||
$ pip install "ultralytics[export]"
|
||||
|
|
@ -78,11 +79,13 @@ from ultralytics.nn.tasks import DetectionModel, SegmentationModel, WorldModel
|
|||
from ultralytics.utils import (
|
||||
ARM64,
|
||||
DEFAULT_CFG,
|
||||
IS_COLAB,
|
||||
IS_JETSON,
|
||||
LINUX,
|
||||
LOGGER,
|
||||
MACOS,
|
||||
PYTHON_VERSION,
|
||||
RKNN_CHIPS,
|
||||
ROOT,
|
||||
WINDOWS,
|
||||
__version__,
|
||||
|
|
@ -122,6 +125,7 @@ def export_formats():
|
|||
["MNN", "mnn", ".mnn", True, True, ["batch", "half", "int8"]],
|
||||
["NCNN", "ncnn", "_ncnn_model", True, True, ["batch", "half"]],
|
||||
["IMX", "imx", "_imx_model", True, True, ["int8"]],
|
||||
["RKNN", "rknn", "_rknn_model", False, False, ["batch", "name"]],
|
||||
]
|
||||
return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU", "Arguments"], zip(*x)))
|
||||
|
||||
|
|
@ -226,22 +230,10 @@ class Exporter:
|
|||
flags = [x == fmt for x in fmts]
|
||||
if sum(flags) != 1:
|
||||
raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}")
|
||||
(
|
||||
jit,
|
||||
onnx,
|
||||
xml,
|
||||
engine,
|
||||
coreml,
|
||||
saved_model,
|
||||
pb,
|
||||
tflite,
|
||||
edgetpu,
|
||||
tfjs,
|
||||
paddle,
|
||||
mnn,
|
||||
ncnn,
|
||||
imx,
|
||||
) = flags # export booleans
|
||||
(jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, mnn, ncnn, imx, rknn) = (
|
||||
flags # export booleans
|
||||
)
|
||||
|
||||
is_tf_format = any((saved_model, pb, tflite, edgetpu, tfjs))
|
||||
|
||||
# Device
|
||||
|
|
@ -277,6 +269,16 @@ class Exporter:
|
|||
if self.args.optimize:
|
||||
assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
|
||||
assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
|
||||
if rknn:
|
||||
if not self.args.name:
|
||||
LOGGER.warning(
|
||||
"WARNING ⚠️ Rockchip RKNN export requires a missing 'name' arg for processor type. Using default name='rk3588'."
|
||||
)
|
||||
self.args.name = "rk3588"
|
||||
self.args.name = self.args.name.lower()
|
||||
assert self.args.name in RKNN_CHIPS, (
|
||||
f"Invalid processor name '{self.args.name}' for Rockchip RKNN export. Valid names are {RKNN_CHIPS}."
|
||||
)
|
||||
if self.args.int8 and tflite:
|
||||
assert not getattr(model, "end2end", False), "TFLite INT8 export not supported for end2end models."
|
||||
if edgetpu:
|
||||
|
|
@ -417,6 +419,8 @@ class Exporter:
|
|||
f[12], _ = self.export_ncnn()
|
||||
if imx:
|
||||
f[13], _ = self.export_imx()
|
||||
if rknn:
|
||||
f[14], _ = self.export_rknn()
|
||||
|
||||
# Finish
|
||||
f = [str(x) for x in f if x] # filter out '' and None
|
||||
|
|
@ -746,7 +750,7 @@ class Exporter:
|
|||
model = IOSDetectModel(self.model, self.im) if self.args.nms else self.model
|
||||
else:
|
||||
if self.args.nms:
|
||||
LOGGER.warning(f"{prefix} WARNING ⚠️ 'nms=True' is only available for Detect models like 'yolov8n.pt'.")
|
||||
LOGGER.warning(f"{prefix} WARNING ⚠️ 'nms=True' is only available for Detect models like 'yolo11n.pt'.")
|
||||
# TODO CoreML Segment and Pose model pipelining
|
||||
model = self.model
|
||||
|
||||
|
|
@ -1141,6 +1145,35 @@ class Exporter:
|
|||
return f, None
|
||||
|
||||
@try_export
|
||||
def export_rknn(self, prefix=colorstr("RKNN:")):
|
||||
"""YOLO RKNN model export."""
|
||||
LOGGER.info(f"\n{prefix} starting export with rknn-toolkit2...")
|
||||
|
||||
check_requirements("rknn-toolkit2")
|
||||
if IS_COLAB:
|
||||
# Prevent 'exit' from closing the notebook https://github.com/airockchip/rknn-toolkit2/issues/259
|
||||
import builtins
|
||||
|
||||
builtins.exit = lambda: None
|
||||
|
||||
from rknn.api import RKNN
|
||||
|
||||
f, _ = self.export_onnx()
|
||||
|
||||
platform = self.args.name
|
||||
|
||||
export_path = Path(f"{Path(f).stem}_rknn_model")
|
||||
export_path.mkdir(exist_ok=True)
|
||||
|
||||
rknn = RKNN(verbose=False)
|
||||
rknn.config(mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], target_platform=platform)
|
||||
_ = rknn.load_onnx(model=f)
|
||||
_ = rknn.build(do_quantization=False) # TODO: Add quantization support
|
||||
f = f.replace(".onnx", f"-{platform}.rknn")
|
||||
_ = rknn.export_rknn(f"{export_path / f}")
|
||||
yaml_save(export_path / "metadata.yaml", self.metadata)
|
||||
return export_path, None
|
||||
|
||||
def export_imx(self, prefix=colorstr("IMX:")):
|
||||
"""YOLO IMX export."""
|
||||
gptq = False
|
||||
|
|
|
|||
|
|
@ -194,7 +194,7 @@ class Model(nn.Module):
|
|||
(bool): True if the model string is a valid Triton Server URL, False otherwise.
|
||||
|
||||
Examples:
|
||||
>>> Model.is_triton_model("http://localhost:8000/v2/models/yolov8n")
|
||||
>>> Model.is_triton_model("http://localhost:8000/v2/models/yolo11n")
|
||||
True
|
||||
>>> Model.is_triton_model("yolo11n.pt")
|
||||
False
|
||||
|
|
@ -247,7 +247,7 @@ class Model(nn.Module):
|
|||
|
||||
Examples:
|
||||
>>> model = Model()
|
||||
>>> model._new("yolov8n.yaml", task="detect", verbose=True)
|
||||
>>> model._new("yolo11n.yaml", task="detect", verbose=True)
|
||||
"""
|
||||
cfg_dict = yaml_model_load(cfg)
|
||||
self.cfg = cfg
|
||||
|
|
@ -283,7 +283,7 @@ class Model(nn.Module):
|
|||
"""
|
||||
if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")):
|
||||
weights = checks.check_file(weights, download_dir=SETTINGS["weights_dir"]) # download and return local file
|
||||
weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolov8n -> yolov8n.pt
|
||||
weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolo11n -> yolo11n.pt
|
||||
|
||||
if Path(weights).suffix == ".pt":
|
||||
self.model, self.ckpt = attempt_load_one_weight(weights)
|
||||
|
|
@ -313,7 +313,7 @@ class Model(nn.Module):
|
|||
Examples:
|
||||
>>> model = Model("yolo11n.pt")
|
||||
>>> model._check_is_pytorch_model() # No error raised
|
||||
>>> model = Model("yolov8n.onnx")
|
||||
>>> model = Model("yolo11n.onnx")
|
||||
>>> model._check_is_pytorch_model() # Raises TypeError
|
||||
"""
|
||||
pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == ".pt"
|
||||
|
|
@ -323,7 +323,7 @@ class Model(nn.Module):
|
|||
f"model='{self.model}' should be a *.pt PyTorch model to run this method, but is a different format. "
|
||||
f"PyTorch models can train, val, predict and export, i.e. 'model.train(data=...)', but exported "
|
||||
f"formats like ONNX, TensorRT etc. only support 'predict' and 'val' modes, "
|
||||
f"i.e. 'yolo predict model=yolov8n.onnx'.\nTo run CUDA or MPS inference please pass the device "
|
||||
f"i.e. 'yolo predict model=yolo11n.onnx'.\nTo run CUDA or MPS inference please pass the device "
|
||||
f"argument directly in your inference command, i.e. 'model.predict(source=..., device=0)'"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
Run prediction on images, videos, directories, globs, YouTube, webcam, streams, etc.
|
||||
|
||||
Usage - sources:
|
||||
$ yolo mode=predict model=yolov8n.pt source=0 # webcam
|
||||
$ yolo mode=predict model=yolo11n.pt source=0 # webcam
|
||||
img.jpg # image
|
||||
vid.mp4 # video
|
||||
screen # screenshot
|
||||
|
|
@ -15,19 +15,21 @@ Usage - sources:
|
|||
'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP, TCP stream
|
||||
|
||||
Usage - formats:
|
||||
$ yolo mode=predict model=yolov8n.pt # PyTorch
|
||||
yolov8n.torchscript # TorchScript
|
||||
yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
|
||||
yolov8n_openvino_model # OpenVINO
|
||||
yolov8n.engine # TensorRT
|
||||
yolov8n.mlpackage # CoreML (macOS-only)
|
||||
yolov8n_saved_model # TensorFlow SavedModel
|
||||
yolov8n.pb # TensorFlow GraphDef
|
||||
yolov8n.tflite # TensorFlow Lite
|
||||
yolov8n_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov8n_paddle_model # PaddlePaddle
|
||||
yolov8n.mnn # MNN
|
||||
yolov8n_ncnn_model # NCNN
|
||||
$ yolo mode=predict model=yolo11n.pt # PyTorch
|
||||
yolo11n.torchscript # TorchScript
|
||||
yolo11n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
|
||||
yolo11n_openvino_model # OpenVINO
|
||||
yolo11n.engine # TensorRT
|
||||
yolo11n.mlpackage # CoreML (macOS-only)
|
||||
yolo11n_saved_model # TensorFlow SavedModel
|
||||
yolo11n.pb # TensorFlow GraphDef
|
||||
yolo11n.tflite # TensorFlow Lite
|
||||
yolo11n_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolo11n_paddle_model # PaddlePaddle
|
||||
yolo11n.mnn # MNN
|
||||
yolo11n_ncnn_model # NCNN
|
||||
yolo11n_imx_model # Sony IMX
|
||||
yolo11n_rknn_model # Rockchip RKNN
|
||||
"""
|
||||
|
||||
import platform
|
||||
|
|
|
|||
|
|
@ -1718,7 +1718,7 @@ class OBB(BaseTensor):
|
|||
Examples:
|
||||
>>> import torch
|
||||
>>> from ultralytics import YOLO
|
||||
>>> model = YOLO("yolov8n-obb.pt")
|
||||
>>> model = YOLO("yolo11n-obb.pt")
|
||||
>>> results = model("path/to/image.jpg")
|
||||
>>> for result in results:
|
||||
... obb = result.obb
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
Train a model on a dataset.
|
||||
|
||||
Usage:
|
||||
$ yolo mode=train model=yolov8n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
|
||||
$ yolo mode=train model=yolo11n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
|
||||
"""
|
||||
|
||||
import gc
|
||||
|
|
@ -128,7 +128,7 @@ class BaseTrainer:
|
|||
self.args.workers = 0 # faster CPU training as time dominated by inference, not dataloading
|
||||
|
||||
# Model and Dataset
|
||||
self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolov8n -> yolov8n.pt
|
||||
self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolo11n -> yolo11n.pt
|
||||
with torch_distributed_zero_first(LOCAL_RANK): # avoid auto-downloading dataset multiple times
|
||||
self.trainset, self.testset = self.get_dataset()
|
||||
self.ema = None
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ that yield the best model performance. This is particularly crucial in deep lear
|
|||
where small changes in hyperparameters can lead to significant differences in model accuracy and efficiency.
|
||||
|
||||
Example:
|
||||
Tune hyperparameters for YOLOv8n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
|
||||
Tune hyperparameters for YOLO11n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
|
||||
```python
|
||||
from ultralytics import YOLO
|
||||
|
||||
|
|
@ -50,7 +50,7 @@ class Tuner:
|
|||
Executes the hyperparameter evolution across multiple iterations.
|
||||
|
||||
Example:
|
||||
Tune hyperparameters for YOLOv8n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
|
||||
Tune hyperparameters for YOLO11n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
|
||||
```python
|
||||
from ultralytics import YOLO
|
||||
|
||||
|
|
|
|||
|
|
@ -3,22 +3,24 @@
|
|||
Check a model's accuracy on a test or val split of a dataset.
|
||||
|
||||
Usage:
|
||||
$ yolo mode=val model=yolov8n.pt data=coco8.yaml imgsz=640
|
||||
$ yolo mode=val model=yolo11n.pt data=coco8.yaml imgsz=640
|
||||
|
||||
Usage - formats:
|
||||
$ yolo mode=val model=yolov8n.pt # PyTorch
|
||||
yolov8n.torchscript # TorchScript
|
||||
yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
|
||||
yolov8n_openvino_model # OpenVINO
|
||||
yolov8n.engine # TensorRT
|
||||
yolov8n.mlpackage # CoreML (macOS-only)
|
||||
yolov8n_saved_model # TensorFlow SavedModel
|
||||
yolov8n.pb # TensorFlow GraphDef
|
||||
yolov8n.tflite # TensorFlow Lite
|
||||
yolov8n_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolov8n_paddle_model # PaddlePaddle
|
||||
yolov8n.mnn # MNN
|
||||
yolov8n_ncnn_model # NCNN
|
||||
$ yolo mode=val model=yolo11n.pt # PyTorch
|
||||
yolo11n.torchscript # TorchScript
|
||||
yolo11n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
|
||||
yolo11n_openvino_model # OpenVINO
|
||||
yolo11n.engine # TensorRT
|
||||
yolo11n.mlpackage # CoreML (macOS-only)
|
||||
yolo11n_saved_model # TensorFlow SavedModel
|
||||
yolo11n.pb # TensorFlow GraphDef
|
||||
yolo11n.tflite # TensorFlow Lite
|
||||
yolo11n_edgetpu.tflite # TensorFlow Edge TPU
|
||||
yolo11n_paddle_model # PaddlePaddle
|
||||
yolo11n.mnn # MNN
|
||||
yolo11n_ncnn_model # NCNN
|
||||
yolo11n_imx_model # Sony IMX
|
||||
yolo11n_rknn_model # Rockchip RKNN
|
||||
"""
|
||||
|
||||
import json
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue