Fix undefined ‘im_array’ bug in predict.md (#8565)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: fang_chenfang <1217690899@qq.com>
This commit is contained in:
parent
aa592efda5
commit
6bdf8dfaa2
13 changed files with 23 additions and 23 deletions
|
|
@ -16,7 +16,7 @@ TensorFlow Lite | `tflite` | yolov8n.tflite
|
|||
TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite
|
||||
TensorFlow.js | `tfjs` | yolov8n_web_model/
|
||||
PaddlePaddle | `paddle` | yolov8n_paddle_model/
|
||||
ncnn | `ncnn` | yolov8n_ncnn_model/
|
||||
NCNN | `ncnn` | yolov8n_ncnn_model/
|
||||
|
||||
Requirements:
|
||||
$ pip install "ultralytics[export]"
|
||||
|
|
@ -293,7 +293,7 @@ class Exporter:
|
|||
f[9], _ = self.export_tfjs()
|
||||
if paddle: # PaddlePaddle
|
||||
f[10], _ = self.export_paddle()
|
||||
if ncnn: # ncnn
|
||||
if ncnn: # NCNN
|
||||
f[11], _ = self.export_ncnn()
|
||||
|
||||
# Finish
|
||||
|
|
@ -496,14 +496,14 @@ class Exporter:
|
|||
return f, None
|
||||
|
||||
@try_export
|
||||
def export_ncnn(self, prefix=colorstr("ncnn:")):
|
||||
def export_ncnn(self, prefix=colorstr("NCNN:")):
|
||||
"""
|
||||
YOLOv8 ncnn export using PNNX https://github.com/pnnx/pnnx.
|
||||
YOLOv8 NCNN export using PNNX https://github.com/pnnx/pnnx.
|
||||
"""
|
||||
check_requirements("ncnn")
|
||||
import ncnn # noqa
|
||||
|
||||
LOGGER.info(f"\n{prefix} starting export with ncnn {ncnn.__version__}...")
|
||||
LOGGER.info(f"\n{prefix} starting export with NCNN {ncnn.__version__}...")
|
||||
f = Path(str(self.file).replace(self.file.suffix, f"_ncnn_model{os.sep}"))
|
||||
f_ts = self.file.with_suffix(".torchscript")
|
||||
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ class AutoBackend(nn.Module):
|
|||
| TensorFlow Lite | *.tflite |
|
||||
| TensorFlow Edge TPU | *_edgetpu.tflite |
|
||||
| PaddlePaddle | *_paddle_model |
|
||||
| ncnn | *_ncnn_model |
|
||||
| NCNN | *_ncnn_model |
|
||||
|
||||
This class offers dynamic backend switching capabilities based on the input model format, making it easier to deploy
|
||||
models across various platforms.
|
||||
|
|
@ -304,9 +304,9 @@ class AutoBackend(nn.Module):
|
|||
input_handle = predictor.get_input_handle(predictor.get_input_names()[0])
|
||||
output_names = predictor.get_output_names()
|
||||
metadata = w.parents[1] / "metadata.yaml"
|
||||
elif ncnn: # ncnn
|
||||
LOGGER.info(f"Loading {w} for ncnn inference...")
|
||||
check_requirements("git+https://github.com/Tencent/ncnn.git" if ARM64 else "ncnn") # requires ncnn
|
||||
elif ncnn: # NCNN
|
||||
LOGGER.info(f"Loading {w} for NCNN inference...")
|
||||
check_requirements("git+https://github.com/Tencent/ncnn.git" if ARM64 else "ncnn") # requires NCNN
|
||||
import ncnn as pyncnn
|
||||
|
||||
net = pyncnn.Net()
|
||||
|
|
@ -431,7 +431,7 @@ class AutoBackend(nn.Module):
|
|||
self.input_handle.copy_from_cpu(im)
|
||||
self.predictor.run()
|
||||
y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]
|
||||
elif self.ncnn: # ncnn
|
||||
elif self.ncnn: # NCNN
|
||||
mat_in = self.pyncnn.Mat(im[0].cpu().numpy())
|
||||
ex = self.net.create_extractor()
|
||||
input_names, output_names = self.net.input_names(), self.net.output_names()
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ TensorFlow Lite | `tflite` | yolov8n.tflite
|
|||
TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite
|
||||
TensorFlow.js | `tfjs` | yolov8n_web_model/
|
||||
PaddlePaddle | `paddle` | yolov8n_paddle_model/
|
||||
ncnn | `ncnn` | yolov8n_ncnn_model/
|
||||
NCNN | `ncnn` | yolov8n_ncnn_model/
|
||||
"""
|
||||
|
||||
import glob
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue