ultralytics 8.2.42 NVIDIA TensorRT 10 default (#13943)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: DeepDiver <zhaoxu1015@gmail.com> Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com>
This commit is contained in:
parent
1d7eeae325
commit
3bb0c5afa3
10 changed files with 23 additions and 22 deletions
|
|
@ -20,7 +20,7 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
|
||||||
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
|
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
|
||||||
# libsm6 required by libqxcb to create QT-based windows for visualization; set 'QT_DEBUG_PLUGINS=1' to test in docker
|
# libsm6 required by libqxcb to create QT-based windows for visualization; set 'QT_DEBUG_PLUGINS=1' to test in docker
|
||||||
RUN apt update \
|
RUN apt update \
|
||||||
&& apt install --no-install-recommends -y gcc git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 libsm6
|
&& apt install --no-install-recommends -y gcc git zip unzip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0 libsm6
|
||||||
|
|
||||||
# Security updates
|
# Security updates
|
||||||
# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796
|
# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796
|
||||||
|
|
@ -62,7 +62,7 @@ RUN rm -rf tmp
|
||||||
# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus '"device=2,3"' $t
|
# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus '"device=2,3"' $t
|
||||||
|
|
||||||
# Pull and Run with local directory access
|
# Pull and Run with local directory access
|
||||||
# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t
|
# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/shared/datasets:/usr/src/datasets $t
|
||||||
|
|
||||||
# Kill all
|
# Kill all
|
||||||
# sudo docker kill $(sudo docker ps -q)
|
# sudo docker kill $(sudo docker ps -q)
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
|
||||||
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
|
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
|
||||||
# pkg-config and libhdf5-dev (not included) are needed to build 'h5py==3.11.0' aarch64 wheel required by 'tensorflow'
|
# pkg-config and libhdf5-dev (not included) are needed to build 'h5py==3.11.0' aarch64 wheel required by 'tensorflow'
|
||||||
RUN apt update \
|
RUN apt update \
|
||||||
&& apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
|
&& apt install --no-install-recommends -y python3-pip git zip unzip curl htop gcc libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
|
||||||
|
|
||||||
# Create working directory
|
# Create working directory
|
||||||
WORKDIR $APP_HOME
|
WORKDIR $APP_HOME
|
||||||
|
|
@ -52,4 +52,4 @@ RUN ln -sf /usr/bin/python3 /usr/bin/python
|
||||||
# t=ultralytics/ultralytics:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host $t
|
# t=ultralytics/ultralytics:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host $t
|
||||||
|
|
||||||
# Pull and Run with local volume mounted
|
# Pull and Run with local volume mounted
|
||||||
# t=ultralytics/ultralytics:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
|
# t=ultralytics/ultralytics:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/usr/src/datasets $t
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,7 @@ ADD https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt .
|
||||||
RUN conda config --set solver libmamba && \
|
RUN conda config --set solver libmamba && \
|
||||||
conda install pytorch torchvision pytorch-cuda=12.1 -c pytorch -c nvidia && \
|
conda install pytorch torchvision pytorch-cuda=12.1 -c pytorch -c nvidia && \
|
||||||
conda install -c conda-forge ultralytics mkl
|
conda install -c conda-forge ultralytics mkl
|
||||||
# conda install -c pytorch -c nvidia -c conda-forge pytorch torchvision pytorch-cuda=11.8 ultralytics mkl
|
# conda install -c pytorch -c nvidia -c conda-forge pytorch torchvision pytorch-cuda=12.1 ultralytics mkl
|
||||||
|
|
||||||
|
|
||||||
# Usage Examples -------------------------------------------------------------------------------------------------------
|
# Usage Examples -------------------------------------------------------------------------------------------------------
|
||||||
|
|
@ -37,4 +37,4 @@ RUN conda config --set solver libmamba && \
|
||||||
# t=ultralytics/ultralytics:latest-conda && sudo docker pull $t && sudo docker run -it --ipc=host $t
|
# t=ultralytics/ultralytics:latest-conda && sudo docker pull $t && sudo docker run -it --ipc=host $t
|
||||||
|
|
||||||
# Pull and Run with local volume mounted
|
# Pull and Run with local volume mounted
|
||||||
# t=ultralytics/ultralytics:latest-conda && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
|
# t=ultralytics/ultralytics:latest-conda && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/usr/src/datasets $t
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
|
||||||
# Install linux packages
|
# Install linux packages
|
||||||
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
|
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
|
||||||
RUN apt update \
|
RUN apt update \
|
||||||
&& apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
|
&& apt install --no-install-recommends -y python3-pip git zip unzip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
|
||||||
|
|
||||||
# Create working directory
|
# Create working directory
|
||||||
WORKDIR $APP_HOME
|
WORKDIR $APP_HOME
|
||||||
|
|
@ -57,4 +57,4 @@ RUN ln -sf /usr/bin/python3 /usr/bin/python
|
||||||
# t=ultralytics/ultralytics:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host --name NAME $t
|
# t=ultralytics/ultralytics:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host --name NAME $t
|
||||||
|
|
||||||
# Pull and Run with local volume mounted
|
# Pull and Run with local volume mounted
|
||||||
# t=ultralytics/ultralytics:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
|
# t=ultralytics/ultralytics:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/usr/src/datasets $t
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
|
||||||
# libusb-1.0-0 required for 'tflite_support' package when exporting to TFLite
|
# libusb-1.0-0 required for 'tflite_support' package when exporting to TFLite
|
||||||
# pkg-config and libhdf5-dev (not included) are needed to build 'h5py==3.11.0' aarch64 wheel required by 'tensorflow'
|
# pkg-config and libhdf5-dev (not included) are needed to build 'h5py==3.11.0' aarch64 wheel required by 'tensorflow'
|
||||||
RUN apt update \
|
RUN apt update \
|
||||||
&& apt install --no-install-recommends -y gcc git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
|
&& apt install --no-install-recommends -y gcc git zip unzip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
|
||||||
|
|
||||||
# Create working directory
|
# Create working directory
|
||||||
WORKDIR $APP_HOME
|
WORKDIR $APP_HOME
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ ADD https://github.com/ultralytics/assets/releases/download/v0.0.0/Arial.ttf \
|
||||||
# Install linux packages
|
# Install linux packages
|
||||||
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
|
# g++ required to build 'tflite_support' and 'lap' packages, libusb-1.0-0 required for 'tflite_support' package
|
||||||
RUN apt update \
|
RUN apt update \
|
||||||
&& apt install --no-install-recommends -y python3-pip git zip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
|
&& apt install --no-install-recommends -y python3-pip git zip unzip curl htop libgl1 libglib2.0-0 libpython3-dev gnupg g++ libusb-1.0-0
|
||||||
|
|
||||||
# Create working directory
|
# Create working directory
|
||||||
WORKDIR $APP_HOME
|
WORKDIR $APP_HOME
|
||||||
|
|
@ -54,4 +54,4 @@ RUN rm -rf tmp
|
||||||
# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host $t
|
# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host $t
|
||||||
|
|
||||||
# Pull and Run with local volume mounted
|
# Pull and Run with local volume mounted
|
||||||
# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t
|
# t=ultralytics/ultralytics:latest-python && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/shared/datasets:/usr/src/datasets $t
|
||||||
|
|
|
||||||
|
|
@ -9,14 +9,15 @@ FROM ultralytics/ultralytics:latest
|
||||||
WORKDIR /actions-runner
|
WORKDIR /actions-runner
|
||||||
|
|
||||||
# Download and unpack the latest runner from https://github.com/actions/runner
|
# Download and unpack the latest runner from https://github.com/actions/runner
|
||||||
RUN FILENAME=actions-runner-linux-x64-2.309.0.tar.gz && \
|
RUN FILENAME=actions-runner-linux-x64-2.317.0.tar.gz && \
|
||||||
curl -o $FILENAME -L https://github.com/actions/runner/releases/download/v2.309.0/$FILENAME && \
|
curl -o $FILENAME -L https://github.com/actions/runner/releases/download/v2.317.0/$FILENAME && \
|
||||||
tar xzf $FILENAME && \
|
tar xzf $FILENAME && \
|
||||||
rm $FILENAME
|
rm $FILENAME
|
||||||
|
|
||||||
# Install runner dependencies
|
# Install runner dependencies
|
||||||
ENV RUNNER_ALLOW_RUNASROOT=1
|
ENV RUNNER_ALLOW_RUNASROOT=1
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
RUN pip install --no-cache-dir pytest-cov
|
||||||
RUN ./bin/installdependencies.sh && \
|
RUN ./bin/installdependencies.sh && \
|
||||||
apt-get -y install libicu-dev
|
apt-get -y install libicu-dev
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||||
|
|
||||||
__version__ = "8.2.41"
|
__version__ = "8.2.42"
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -822,13 +822,13 @@ class Exporter:
|
||||||
import tensorflow as tf # noqa
|
import tensorflow as tf # noqa
|
||||||
check_requirements(
|
check_requirements(
|
||||||
(
|
(
|
||||||
"keras", # required by onnx2tf package
|
"keras", # required by 'onnx2tf' package
|
||||||
"tf_keras", # required by onnx2tf package
|
"tf_keras", # required by 'onnx2tf' package
|
||||||
|
"sng4onnx>=1.0.1", # required by 'onnx2tf' package
|
||||||
|
"onnx_graphsurgeon>=0.3.26", # required by 'onnx2tf' package
|
||||||
"onnx>=1.12.0",
|
"onnx>=1.12.0",
|
||||||
"onnx2tf>1.17.5,<=1.22.3",
|
"onnx2tf>1.17.5,<=1.22.3",
|
||||||
"sng4onnx>=1.0.1",
|
|
||||||
"onnxslim>=0.1.31",
|
"onnxslim>=0.1.31",
|
||||||
"onnx_graphsurgeon>=0.3.26",
|
|
||||||
"tflite_support<=0.4.3" if IS_JETSON else "tflite_support", # fix ImportError 'GLIBCXX_3.4.29'
|
"tflite_support<=0.4.3" if IS_JETSON else "tflite_support", # fix ImportError 'GLIBCXX_3.4.29'
|
||||||
"flatbuffers>=23.5.26,<100", # update old 'flatbuffers' included inside tensorflow package
|
"flatbuffers>=23.5.26,<100", # update old 'flatbuffers' included inside tensorflow package
|
||||||
"onnxruntime-gpu" if cuda else "onnxruntime",
|
"onnxruntime-gpu" if cuda else "onnxruntime",
|
||||||
|
|
|
||||||
|
|
@ -214,7 +214,7 @@ class v8DetectionLoss:
|
||||||
targets = torch.cat((batch["batch_idx"].view(-1, 1), batch["cls"].view(-1, 1), batch["bboxes"]), 1)
|
targets = torch.cat((batch["batch_idx"].view(-1, 1), batch["cls"].view(-1, 1), batch["bboxes"]), 1)
|
||||||
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
||||||
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
||||||
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
|
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0.0)
|
||||||
|
|
||||||
# Pboxes
|
# Pboxes
|
||||||
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
|
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
|
||||||
|
|
@ -280,7 +280,7 @@ class v8SegmentationLoss(v8DetectionLoss):
|
||||||
targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1)
|
targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1)
|
||||||
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
||||||
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
||||||
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
|
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0.0)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
raise TypeError(
|
raise TypeError(
|
||||||
"ERROR ❌ segment dataset incorrectly formatted or not a segment dataset.\n"
|
"ERROR ❌ segment dataset incorrectly formatted or not a segment dataset.\n"
|
||||||
|
|
@ -467,7 +467,7 @@ class v8PoseLoss(v8DetectionLoss):
|
||||||
targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1)
|
targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1)
|
||||||
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
||||||
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
||||||
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
|
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0.0)
|
||||||
|
|
||||||
# Pboxes
|
# Pboxes
|
||||||
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
|
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
|
||||||
|
|
@ -652,7 +652,7 @@ class v8OBBLoss(v8DetectionLoss):
|
||||||
targets = targets[(rw >= 2) & (rh >= 2)] # filter rboxes of tiny size to stabilize training
|
targets = targets[(rw >= 2) & (rh >= 2)] # filter rboxes of tiny size to stabilize training
|
||||||
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
||||||
gt_labels, gt_bboxes = targets.split((1, 5), 2) # cls, xywhr
|
gt_labels, gt_bboxes = targets.split((1, 5), 2) # cls, xywhr
|
||||||
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
|
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0.0)
|
||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
raise TypeError(
|
raise TypeError(
|
||||||
"ERROR ❌ OBB dataset incorrectly formatted or not a OBB dataset.\n"
|
"ERROR ❌ OBB dataset incorrectly formatted or not a OBB dataset.\n"
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue