Raspberry Pi 5 self-hosted CI (#8828)
Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: Ultralytics AI Assistant <135830346+UltralyticsAssistant@users.noreply.github.com>
This commit is contained in:
parent
c54b013188
commit
b76400a06b
5 changed files with 45 additions and 11 deletions
|
|
@ -14,7 +14,7 @@ import torch
|
|||
import torch.nn as nn
|
||||
from PIL import Image
|
||||
|
||||
from ultralytics.utils import ARM64, LINUX, LOGGER, ROOT, yaml_load
|
||||
from ultralytics.utils import ARM64, IS_JETSON, IS_RASPBERRYPI, LINUX, LOGGER, ROOT, yaml_load
|
||||
from ultralytics.utils.checks import check_requirements, check_suffix, check_version, check_yaml
|
||||
from ultralytics.utils.downloads import attempt_download_asset, is_url
|
||||
|
||||
|
|
@ -183,6 +183,9 @@ class AutoBackend(nn.Module):
|
|||
elif onnx:
|
||||
LOGGER.info(f"Loading {w} for ONNX Runtime inference...")
|
||||
check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime"))
|
||||
if IS_RASPBERRYPI or IS_JETSON:
|
||||
# Fix error: module 'numpy.linalg._umath_linalg' has no attribute '_ilp64' when exporting to Tensorflow SavedModel on RPi and Jetson
|
||||
check_requirements("numpy==1.23.5")
|
||||
import onnxruntime
|
||||
|
||||
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] if cuda else ["CPUExecutionProvider"]
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue