ultralytics 8.0.51 add assets and CI actions (#1296)

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Paul Kehrer <paulhkehrer@gmail.com>
This commit is contained in:
Glenn Jocher 2023-03-07 20:25:10 +01:00 committed by GitHub
parent f0d8e4718b
commit 790f9c067c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
21 changed files with 272 additions and 107 deletions

View file

@ -14,7 +14,7 @@ import torch
import torch.nn as nn
from PIL import Image
from ultralytics.yolo.utils import LOGGER, ROOT, yaml_load
from ultralytics.yolo.utils import LINUX, LOGGER, ROOT, yaml_load
from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_version, check_yaml
from ultralytics.yolo.utils.downloads import attempt_download_asset, is_url
from ultralytics.yolo.utils.ops import xywh2xyxy
@ -143,7 +143,12 @@ class AutoBackend(nn.Module):
metadata = w.parent / 'metadata.yaml'
elif engine: # TensorRT
LOGGER.info(f'Loading {w} for TensorRT inference...')
import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download
try:
import tensorrt as trt # noqa https://developer.nvidia.com/nvidia-tensorrt-download
except ImportError:
if LINUX:
check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com')
import tensorrt as trt # noqa
check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0
if device.type == 'cpu':
device = torch.device('cuda:0')
@ -230,7 +235,7 @@ class AutoBackend(nn.Module):
elif paddle: # PaddlePaddle
LOGGER.info(f'Loading {w} for PaddlePaddle inference...')
check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')
import paddle.inference as pdi
import paddle.inference as pdi # noqa
w = Path(w)
if not w.is_file(): # if not *.pdmodel
w = next(w.rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir
@ -260,11 +265,16 @@ class AutoBackend(nn.Module):
if isinstance(metadata, (str, Path)) and Path(metadata).exists():
metadata = yaml_load(metadata)
if metadata:
stride = int(metadata['stride'])
for k, v in metadata.items():
if k in ('stride', 'batch'):
metadata[k] = int(v)
elif k in ('imgsz', 'names') and isinstance(v, str):
metadata[k] = eval(v)
stride = metadata['stride']
task = metadata['task']
batch = int(metadata['batch'])
imgsz = eval(metadata['imgsz']) if isinstance(metadata['imgsz'], str) else metadata['imgsz']
names = eval(metadata['names']) if isinstance(metadata['names'], str) else metadata['names']
batch = metadata['batch']
imgsz = metadata['imgsz']
names = metadata['names']
elif not (pt or triton or nn_module):
LOGGER.warning(f"WARNING ⚠️ Metadata not found for 'model={weights}'")
@ -285,7 +295,7 @@ class AutoBackend(nn.Module):
visualize (bool): whether to visualize the output predictions, defaults to False
Returns:
(tuple): Tuple containing the raw output tensor, and the processed output for visualization (if visualize=True)
(tuple): Tuple containing the raw output tensor, and processed output for visualization (if visualize=True)
"""
b, ch, h, w = im.shape # batch, channel, height, width
if self.fp16 and im.dtype != torch.float16: