diff --git a/.github/workflows/merge-main-into-prs.yml b/.github/workflows/merge-main-into-prs.yml index 001cef95..c7216682 100644 --- a/.github/workflows/merge-main-into-prs.yml +++ b/.github/workflows/merge-main-into-prs.yml @@ -9,50 +9,50 @@ on: # push: # branches: # - main - + jobs: Merge: if: github.repository == 'ultralytics/ultralytics' runs-on: ubuntu-latest steps: - - name: Checkout repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - ref: main - - uses: actions/setup-python@v5 - with: - python-version: "3.11" - cache: "pip" # caching pip dependencies - - name: Install requirements - run: | - pip install pygithub - - name: Merge main into PRs - shell: python - run: | - from github import Github - import os - - # Authenticate with the GitHub Token - g = Github(os.getenv('GITHUB_TOKEN')) - - # Get the repository dynamically - repo = g.get_repo(os.getenv('GITHUB_REPOSITORY')) - - # List all open pull requests - open_pulls = repo.get_pulls(state='open', sort='created') - - for pr in open_pulls: - try: - # Compare PR head with main to see if it's behind - comparison = repo.compare(pr.base.ref, pr.head.ref) # ensure correct order of base and head - if comparison.behind_by > 0: - # Merge main into the PR branch - success = pr.update_branch() - assert success, "Branch update failed" - print(f"Merged 'main' into PR #{pr.number} ({pr.head.ref}) successfully.") - except Exception as e: - print(f"Could not merge 'main' into PR #{pr.number} ({pr.head.ref}): {e}") - env: - GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} - GITHUB_REPOSITORY: ${{ github.repository }} + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: main + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + cache: "pip" # caching pip dependencies + - name: Install requirements + run: | + pip install pygithub + - name: Merge main into PRs + shell: python + run: | + from github import Github + import os + + # Authenticate with the GitHub Token + g = Github(os.getenv('GITHUB_TOKEN')) + + # Get the repository dynamically + repo = g.get_repo(os.getenv('GITHUB_REPOSITORY')) + + # List all open pull requests + open_pulls = repo.get_pulls(state='open', sort='created') + + for pr in open_pulls: + try: + # Compare PR head with main to see if it's behind + comparison = repo.compare(pr.base.ref, pr.head.ref) # ensure correct order of base and head + if comparison.behind_by > 0: + # Merge main into the PR branch + success = pr.update_branch() + assert success, "Branch update failed" + print(f"Merged 'main' into PR #{pr.number} ({pr.head.ref}) successfully.") + except Exception as e: + print(f"Could not merge 'main' into PR #{pr.number} ({pr.head.ref}): {e}") + env: + GITHUB_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + GITHUB_REPOSITORY: ${{ github.repository }} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 9a3e0fe1..1dd3748a 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -40,23 +40,23 @@ jobs: import os import ultralytics from ultralytics.utils.checks import check_latest_pypi_version - + v_local = tuple(map(int, ultralytics.__version__.split('.'))) v_pypi = tuple(map(int, check_latest_pypi_version().split('.'))) - + print(f'Local version is {v_local}') print(f'PyPI version is {v_pypi}') - + d = [a - b for a, b in zip(v_local, v_pypi)] # diff - + increment_patch = (d[0] == d[1] == 0) and (0 < d[2] < 3) # publish if patch version increments by 1 or 2 increment_minor = (d[0] == 0) and (d[1] == 1) and v_local[2] == 0 # publish if minor version increments - + increment = increment_patch or increment_minor - + os.system(f'echo "increment={increment}" >> $GITHUB_OUTPUT') os.system(f'echo "version={ultralytics.__version__}" >> $GITHUB_OUTPUT') - + if increment: print('Local version is higher than PyPI version. Publishing new version to PyPI ✅.') id: check_pypi diff --git a/docs/en/guides/docker-quickstart.md b/docs/en/guides/docker-quickstart.md index 00918d40..59e80c68 100644 --- a/docs/en/guides/docker-quickstart.md +++ b/docs/en/guides/docker-quickstart.md @@ -93,12 +93,16 @@ sudo docker pull $t ## Running Ultralytics in Docker Container Here's how to execute the Ultralytics Docker container: + ### Using only the CPU + ```bash # Run with all GPUs sudo docker run -it --ipc=host $t ``` + ### Using GPUs + ```bash # Run with all GPUs sudo docker run -it --ipc=host --gpus all $t @@ -109,7 +113,6 @@ sudo docker run -it --ipc=host --gpus '"device=2,3"' $t The `-it` flag assigns a pseudo-TTY and keeps stdin open, allowing you to interact with the container. The `--ipc=host` flag enables sharing of host's IPC namespace, essential for sharing memory between processes. The `--gpus` flag allows the container to access the host's GPUs. - ## Running Ultralytics in Docker Container Here's how to execute the Ultralytics Docker container: diff --git a/docs/en/guides/nvidia-jetson.md b/docs/en/guides/nvidia-jetson.md index 7803fae6..06f188ee 100644 --- a/docs/en/guides/nvidia-jetson.md +++ b/docs/en/guides/nvidia-jetson.md @@ -23,7 +23,7 @@ NVIDIA Jetson is a series of embedded computing boards designed to bring acceler [Jetson Orin](https://www.nvidia.com/en-us/autonomous-machines/embedded-systems/jetson-orin/) is the latest iteration of the NVIDIA Jetson family based on NVIDIA Ampere architecture which brings drastically improved AI performance when compared to the previous generations. Below table compared few of the Jetson devices in the ecosystem. | | Jetson AGX Orin 64GB | Jetson Orin NX 16GB | Jetson Orin Nano 8GB | Jetson AGX Xavier | Jetson Xavier NX | Jetson Nano | -| ----------------- | ---------------------------------------------------------------- | --------------------------------------------------------------- | ------------------------------------------------------------- | ----------------------------------------------------------- | ------------------------------------------------------------ | ------------------------------------------- | +|-------------------|------------------------------------------------------------------|-----------------------------------------------------------------|---------------------------------------------------------------|-------------------------------------------------------------|--------------------------------------------------------------|---------------------------------------------| | AI Performance | 275 TOPS | 100 TOPS | 40 TOPs | 32 TOPS | 21 TOPS | 472 GFLOPS | | GPU | 2048-core NVIDIA Ampere architecture GPU with 64 Tensor Cores | 1024-core NVIDIA Ampere architecture GPU with 32 Tensor Cores | 1024-core NVIDIA Ampere architecture GPU with 32 Tensor Cores | 512-core NVIDIA Volta architecture GPU with 64 Tensor Cores | 384-core NVIDIA Volta™ architecture GPU with 48 Tensor Cores | 128-core NVIDIA Maxwell™ architecture GPU | | GPU Max Frequency | 1.3 GHz | 918 MHz | 625 MHz | 1377 MHz | 1100 MHz | 921MHz | diff --git a/docs/en/guides/raspberry-pi.md b/docs/en/guides/raspberry-pi.md index 97cfe51d..c5560357 100644 --- a/docs/en/guides/raspberry-pi.md +++ b/docs/en/guides/raspberry-pi.md @@ -130,7 +130,7 @@ The YOLOv8n model in PyTorch format is converted to NCNN to run inference with t ``` !!! Tip - + For more details about supported export options, visit the [Ultralytics documentation page on deployment options](https://docs.ultralytics.com/guides/model-deployment-options). ## Raspberry Pi 5 vs Raspberry Pi 4 YOLOv8 Benchmarks diff --git a/docs/en/guides/vision-eye.md b/docs/en/guides/vision-eye.md index eba2a6d1..545a91c3 100644 --- a/docs/en/guides/vision-eye.md +++ b/docs/en/guides/vision-eye.md @@ -166,10 +166,10 @@ keywords: Ultralytics, YOLOv8, Object Detection, Object Tracking, IDetection, Vi ### `visioneye` Arguments -| Name | Type | Default | Description | -|---------------|---------|------------------|--------------------------------------------------| -| `color` | `tuple` | `(235, 219, 11)` | Line and object centroid color | -| `pin_color` | `tuple` | `(255, 0, 255)` | VisionEye pinpoint color | +| Name | Type | Default | Description | +|-------------|---------|------------------|--------------------------------| +| `color` | `tuple` | `(235, 219, 11)` | Line and object centroid color | +| `pin_color` | `tuple` | `(255, 0, 255)` | VisionEye pinpoint color | ## Note diff --git a/docs/en/hub/index.md b/docs/en/hub/index.md index 7bc06a46..5c53c27c 100644 --- a/docs/en/hub/index.md +++ b/docs/en/hub/index.md @@ -62,5 +62,5 @@ We hope that the resources here will help you get the most out of HUB. Please br - [**Teams**](teams.md): Collaborate effortlessly with your team. - [**Integrations**](integrations.md): Explore different integration options. - [**Ultralytics HUB App**](app/index.md): Learn about the Ultralytics HUB App, which allows you to run models directly on your mobile device. - - [**iOS**](app/ios.md): Explore CoreML acceleration on iPhones and iPads. - - [**Android**](app/android.md): Explore TFLite acceleration on Android devices. \ No newline at end of file + - [**iOS**](app/ios.md): Explore CoreML acceleration on iPhones and iPads. + - [**Android**](app/android.md): Explore TFLite acceleration on Android devices. diff --git a/docs/en/hub/inference-api.md b/docs/en/hub/inference-api.md index 89520aed..d3b269c5 100644 --- a/docs/en/hub/inference-api.md +++ b/docs/en/hub/inference-api.md @@ -70,7 +70,7 @@ curl -X POST "https://api.ultralytics.com/v1/predict/MODEL_ID" \ See the table below for a full list of available inference arguments. | Argument | Default | Type | Description | -| ------------ | ------- | ------- | -------------------------------------- | +|--------------|---------|---------|----------------------------------------| | `image` | | `image` | image file | | `url` | | `str` | URL of the image if not passing a file | | `size` | `640` | `int` | valid range `32` - `1280` pixels | @@ -91,10 +91,10 @@ The [Ultralytics HUB](https://bit.ly/ultralytics_hub) Inference API returns a JS from ultralytics import YOLO # Load model - model = YOLO('yolov8n-cls.pt') + model = YOLO("yolov8n-cls.pt") # Run inference - results = model('image.jpg') + results = model("image.jpg") # Print image.jpg results in JSON format print(results[0].tojson()) @@ -159,10 +159,10 @@ The [Ultralytics HUB](https://bit.ly/ultralytics_hub) Inference API returns a JS from ultralytics import YOLO # Load model - model = YOLO('yolov8n.pt') + model = YOLO("yolov8n.pt") # Run inference - results = model('image.jpg') + results = model("image.jpg") # Print image.jpg results in JSON format print(results[0].tojson()) @@ -231,10 +231,10 @@ The [Ultralytics HUB](https://bit.ly/ultralytics_hub) Inference API returns a JS from ultralytics import YOLO # Load model - model = YOLO('yolov8n-obb.pt') + model = YOLO("yolov8n-obb.pt") # Run inference - results = model('image.jpg') + results = model("image.jpg") # Print image.jpg results in JSON format print(results[0].tojson()) @@ -305,10 +305,10 @@ The [Ultralytics HUB](https://bit.ly/ultralytics_hub) Inference API returns a JS from ultralytics import YOLO # Load model - model = YOLO('yolov8n-seg.pt') + model = YOLO("yolov8n-seg.pt") # Run inference - results = model('image.jpg') + results = model("image.jpg") # Print image.jpg results in JSON format print(results[0].tojson()) @@ -374,10 +374,10 @@ The [Ultralytics HUB](https://bit.ly/ultralytics_hub) Inference API returns a JS from ultralytics import YOLO # Load model - model = YOLO('yolov8n-pose.pt') + model = YOLO("yolov8n-pose.pt") # Run inference - results = model('image.jpg') + results = model("image.jpg") # Print image.jpg results in JSON format print(results[0].tojson()) diff --git a/docs/en/integrations/google-colab.md b/docs/en/integrations/google-colab.md index 05707936..610eb9fc 100644 --- a/docs/en/integrations/google-colab.md +++ b/docs/en/integrations/google-colab.md @@ -14,7 +14,7 @@ You can use Google Colab to work on projects related to [Ultralytics YOLOv8](htt Google Colaboratory, commonly known as Google Colab, was developed by Google Research in 2017. It is a free online cloud-based Jupyter Notebook environment that allows you to train your machine learning and deep learning models on CPUs, GPUs, and TPUs. The motivation behind developing Google Colab was Google's broader goals to advance AI technology and educational tools, and encourage the use of cloud services. -You can use Google Colab regardless of the specifications and configurations of your local computer. All you need is a Google account and a web browser, and you’re good to go. +You can use Google Colab regardless of the specifications and configurations of your local computer. All you need is a Google account and a web browser, and you’re good to go. ## Training YOLOv8 Using Google Colaboratory @@ -39,7 +39,7 @@ Learn how to train a YOLOv8 model with custom data on YouTube with Nicolai. Chec ### Common Questions While Working with Google Colab -When working with Google Colab, you might have a few common questions. Let’s answer them. +When working with Google Colab, you might have a few common questions. Let’s answer them. **Q: Why does my Google Colab session timeout?** A: Google Colab sessions can timeout due to inactivity, especially for free users who have a limited session duration. diff --git a/docs/en/integrations/index.md b/docs/en/integrations/index.md index 5d5aeb6b..d6dcc95f 100644 --- a/docs/en/integrations/index.md +++ b/docs/en/integrations/index.md @@ -87,19 +87,19 @@ We also support a variety of model export formats for deployment in different en | Format | `format` Argument | Model | Metadata | Arguments | |---------------------------------------------------|-------------------|---------------------------|----------|----------------------------------------------------------------------| -| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | -| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | -| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | -| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | -| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | -| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n.pb` | ❌ | `imgsz`, `batch` | -| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz`, `batch` | -| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz`, `batch` | -| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | +| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | +| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | +| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | +| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | +| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n.pb` | ❌ | `imgsz`, `batch` | +| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz`, `batch` | +| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz`, `batch` | +| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | Explore the links to learn more about each integration and how to get the most out of them with Ultralytics. See full `export` details in the [Export](../modes/export.md) page. diff --git a/docs/en/integrations/paperspace.md b/docs/en/integrations/paperspace.md index cf8f48ee..7563125f 100644 --- a/docs/en/integrations/paperspace.md +++ b/docs/en/integrations/paperspace.md @@ -16,7 +16,7 @@ This is where a platform like Paperspace Gradient can make things simpler. Paper Paperspace Overview

-[Paperspace](https://www.paperspace.com/), launched in 2014 by University of Michigan graduates and acquired by DigitalOcean in 2023, is a cloud platform specifically designed for machine learning. It provides users with powerful GPUs, collaborative Jupyter notebooks, a container service for deployments, automated workflows for machine learning tasks, and high-performance virtual machines. These features aim to streamline the entire machine learning development process, from coding to deployment. +[Paperspace](https://www.paperspace.com/), launched in 2014 by University of Michigan graduates and acquired by DigitalOcean in 2023, is a cloud platform specifically designed for machine learning. It provides users with powerful GPUs, collaborative Jupyter notebooks, a container service for deployments, automated workflows for machine learning tasks, and high-performance virtual machines. These features aim to streamline the entire machine learning development process, from coding to deployment. ## Paperspace Gradient diff --git a/docs/en/integrations/tensorrt.md b/docs/en/integrations/tensorrt.md index af53d9f7..577d4e8a 100644 --- a/docs/en/integrations/tensorrt.md +++ b/docs/en/integrations/tensorrt.md @@ -111,7 +111,7 @@ For more details about the export process, visit the [Ultralytics documentation ### Exporting TensorRT with INT8 Quantization -Exporting Ultralytics YOLO models using TensorRT with INT8 precision executes post-training quantization (PTQ). TensorRT uses calibration for PTQ, which measures the distribution of activations within each activation tensor as the YOLO model processes inference on representative input data, and then uses that distribution to estimate scale values for each tensor. Each activation tensor that is a candidate for quantization has an associated scale that is deduced by a calibration process. +Exporting Ultralytics YOLO models using TensorRT with INT8 precision executes post-training quantization (PTQ). TensorRT uses calibration for PTQ, which measures the distribution of activations within each activation tensor as the YOLO model processes inference on representative input data, and then uses that distribution to estimate scale values for each tensor. Each activation tensor that is a candidate for quantization has an associated scale that is deduced by a calibration process. When processing implicitly quantized networks TensorRT uses INT8 opportunistically to optimize layer execution time. If a layer runs faster in INT8 and has assigned quantization scales on its data inputs and outputs, then a kernel with INT8 precision is assigned to that layer, otherwise TensorRT selects a precision of either FP32 or FP16 for the kernel based on whichever results in faster execution time for that layer. @@ -123,20 +123,20 @@ When processing implicitly quantized networks TensorRT uses INT8 opportunistical The arguments provided when using [export](../modes/export.md) for an Ultralytics YOLO model will **greatly** influence the performance of the exported model. They will also need to be selected based on the device resources available, however the default arguments _should_ work for most [Ampere (or newer) NVIDIA discrete GPUs](https://developer.nvidia.com/blog/nvidia-ampere-architecture-in-depth/). The calibration algorithm used is `"ENTROPY_CALIBRATION_2"` and you can read more details about the options available [in the TensorRT Developer Guide](https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#enable_int8_c). Ultralytics tests found that `"ENTROPY_CALIBRATION_2"` was the best choice and exports are fixed to using this algorithm. - - `workspace` : Controls the size (in GiB) of the device memory allocation while converting the model weights. - +- `workspace` : Controls the size (in GiB) of the device memory allocation while converting the model weights. + - Aim to use the minimum `workspace` value required as this prevents testing algorithms that require more `workspace` from being considered by the TensorRT builder. Setting a higher value for `workspace` may take **considerably longer** to calibrate and export. - Default is `workspace=4` (GiB), this value may need to be increased if calibration crashes (exits without warning). - + - TensorRT will report `UNSUPPORTED_STATE` during export if the value for `workspace` is larger than the memory available to the device, which means the value for `workspace` should be lowered. - + - If `workspace` is set to max value and calibration fails/crashes, consider reducing the values for `imgsz` and `batch` to reduce memory requirements. - Remember calibration for INT8 is specific to each device, borrowing a "high-end" GPU for calibration, might result in poor performance when inference is run on another device. - - `batch` : The maximum batch-size that will be used for inference. During inference smaller batches can be used, but inference will not accept batches any larger than what is specified. - +- `batch` : The maximum batch-size that will be used for inference. During inference smaller batches can be used, but inference will not accept batches any larger than what is specified. + !!! note During calibration, twice the `batch` size provided will be used. Using small batches can lead to inaccurate scaling during calibration. This is because the process adjusts based on the data it sees. Small batches might not capture the full range of values, leading to issues with the final calibration, so the `batch` size is doubled automatically. If no batch size is specified `batch=1`, calibration will be run at `batch=1 * 2` to reduce calibration scaling errors. @@ -182,7 +182,6 @@ Experimentation by NVIDIA led them to recommend using at least 500 calibration i yolo predict model=yolov8n.engine source='https://ultralytics.com/images/bus.jpg' ``` - ???+ warning "Calibration Cache" TensorRT will generate a calibration `.cache` which can be re-used to speed up export of future model weights using the same data, but this may result in poor calibration when the data is vastly different or if the `batch` value is changed drastically. In these circumstances, the existing `.cache` should be renamed and moved to a different directory or deleted entirely. @@ -467,7 +466,6 @@ Having successfully exported your Ultralytics YOLOv8 models to TensorRT format, - **[GitHub Repository for NVIDIA TensorRT:](https://github.com/NVIDIA/TensorRT)**: This is the official GitHub repository that contains the source code and documentation for NVIDIA TensorRT. - ## Summary In this guide, we focused on converting Ultralytics YOLOv8 models to NVIDIA's TensorRT model format. This conversion step is crucial for improving the efficiency and speed of YOLOv8 models, making them more effective and suitable for diverse deployment environments. diff --git a/docs/en/modes/benchmark.md b/docs/en/modes/benchmark.md index 9af9d140..a3fe5b8f 100644 --- a/docs/en/modes/benchmark.md +++ b/docs/en/modes/benchmark.md @@ -89,18 +89,18 @@ Benchmarks will attempt to run automatically on all possible export formats belo | Format | `format` Argument | Model | Metadata | Arguments | |---------------------------------------------------|-------------------|---------------------------|----------|----------------------------------------------------------------------| -| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | -| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | -| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | -| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | -| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | -| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n.pb` | ❌ | `imgsz`, `batch` | -| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz`, `batch` | -| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz`, `batch` | -| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | +| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | +| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | +| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | +| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | +| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n.pb` | ❌ | `imgsz`, `batch` | +| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz`, `batch` | +| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz`, `batch` | +| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | See full `export` details in the [Export](../modes/export.md) page. diff --git a/docs/en/modes/export.md b/docs/en/modes/export.md index c9e7237d..569b523b 100644 --- a/docs/en/modes/export.md +++ b/docs/en/modes/export.md @@ -97,16 +97,16 @@ Available YOLOv8 export formats are in the table below. You can export to any fo | Format | `format` Argument | Model | Metadata | Arguments | |---------------------------------------------------|-------------------|---------------------------|----------|----------------------------------------------------------------------| -| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | -| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | -| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | -| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | -| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | -| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n.pb` | ❌ | `imgsz`, `batch` | -| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz`, `batch` | -| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz`, `batch` | -| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | +| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | +| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | +| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | +| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | +| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n.pb` | ❌ | `imgsz`, `batch` | +| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz`, `batch` | +| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz`, `batch` | +| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | diff --git a/docs/en/modes/predict.md b/docs/en/modes/predict.md index ee5ef58c..216b6c25 100644 --- a/docs/en/modes/predict.md +++ b/docs/en/modes/predict.md @@ -403,18 +403,18 @@ YOLOv8 supports various image and video formats, as specified in [ultralytics/da The below table contains valid Ultralytics image formats. -| Image Suffixes | Example Predict Command | Reference | -|----------------|----------------------------------|-------------------------------------------------------------------------------| -| `.bmp` | `yolo predict source=image.bmp` | [Microsoft BMP File Format](https://en.wikipedia.org/wiki/BMP_file_format) | -| `.dng` | `yolo predict source=image.dng` | [Adobe DNG](https://helpx.adobe.com/camera-raw/digital-negative.html) | -| `.jpeg` | `yolo predict source=image.jpeg` | [JPEG](https://en.wikipedia.org/wiki/JPEG) | -| `.jpg` | `yolo predict source=image.jpg` | [JPEG](https://en.wikipedia.org/wiki/JPEG) | -| `.mpo` | `yolo predict source=image.mpo` | [Multi Picture Object](https://fileinfo.com/extension/mpo) | -| `.png` | `yolo predict source=image.png` | [Portable Network Graphics](https://en.wikipedia.org/wiki/PNG) | -| `.tif` | `yolo predict source=image.tif` | [Tag Image File Format](https://en.wikipedia.org/wiki/TIFF) | -| `.tiff` | `yolo predict source=image.tiff` | [Tag Image File Format](https://en.wikipedia.org/wiki/TIFF) | -| `.webp` | `yolo predict source=image.webp` | [WebP](https://en.wikipedia.org/wiki/WebP) | -| `.pfm` | `yolo predict source=image.pfm` | [Portable FloatMap](https://en.wikipedia.org/wiki/Netpbm#File_formats) | +| Image Suffixes | Example Predict Command | Reference | +|----------------|----------------------------------|----------------------------------------------------------------------------| +| `.bmp` | `yolo predict source=image.bmp` | [Microsoft BMP File Format](https://en.wikipedia.org/wiki/BMP_file_format) | +| `.dng` | `yolo predict source=image.dng` | [Adobe DNG](https://helpx.adobe.com/camera-raw/digital-negative.html) | +| `.jpeg` | `yolo predict source=image.jpeg` | [JPEG](https://en.wikipedia.org/wiki/JPEG) | +| `.jpg` | `yolo predict source=image.jpg` | [JPEG](https://en.wikipedia.org/wiki/JPEG) | +| `.mpo` | `yolo predict source=image.mpo` | [Multi Picture Object](https://fileinfo.com/extension/mpo) | +| `.png` | `yolo predict source=image.png` | [Portable Network Graphics](https://en.wikipedia.org/wiki/PNG) | +| `.tif` | `yolo predict source=image.tif` | [Tag Image File Format](https://en.wikipedia.org/wiki/TIFF) | +| `.tiff` | `yolo predict source=image.tiff` | [Tag Image File Format](https://en.wikipedia.org/wiki/TIFF) | +| `.webp` | `yolo predict source=image.webp` | [WebP](https://en.wikipedia.org/wiki/WebP) | +| `.pfm` | `yolo predict source=image.pfm` | [Portable FloatMap](https://en.wikipedia.org/wiki/Netpbm#File_formats) | ### Videos diff --git a/docs/en/modes/track.md b/docs/en/modes/track.md index a72c5578..2e9e307b 100644 --- a/docs/en/modes/track.md +++ b/docs/en/modes/track.md @@ -57,7 +57,7 @@ The default tracker is BoT-SORT. ## Tracking !!! Warning "Tracker Threshold Information" - + If object confidence score will be low, i.e lower than [`track_high_thresh`](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/trackers/bytetrack.yaml#L5), then there will be no tracks successfully returned and updated. To run the tracker on video streams, use a trained Detect, Segment or Pose model such as YOLOv8n, YOLOv8n-seg and YOLOv8n-pose. @@ -98,7 +98,7 @@ As can be seen in the above usage, tracking is available for all Detect, Segment ## Configuration !!! Warning "Tracker Threshold Information" - + If object confidence score will be low, i.e lower than [`track_high_thresh`](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/cfg/trackers/bytetrack.yaml#L5), then there will be no tracks successfully returned and updated. ### Tracking Arguments diff --git a/docs/en/tasks/classify.md b/docs/en/tasks/classify.md index fcc76842..38809b27 100644 --- a/docs/en/tasks/classify.md +++ b/docs/en/tasks/classify.md @@ -164,18 +164,18 @@ Available YOLOv8-cls export formats are in the table below. You can export to an | Format | `format` Argument | Model | Metadata | Arguments | |---------------------------------------------------|-------------------|-------------------------------|----------|----------------------------------------------------------------------| -| [PyTorch](https://pytorch.org/) | - | `yolov8n-cls.pt` | ✅ | - | -| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-cls.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | -| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-cls.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-cls_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-cls.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | -| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-cls.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | -| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-cls_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | -| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n-cls.pb` | ❌ | `imgsz`, `batch` | -| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n-cls.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n-cls_edgetpu.tflite` | ✅ | `imgsz`, `batch` | -| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n-cls_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n-cls_paddle_model/` | ✅ | `imgsz`, `batch` | -| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n-cls_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | +| [PyTorch](https://pytorch.org/) | - | `yolov8n-cls.pt` | ✅ | - | +| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-cls.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | +| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-cls.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-cls_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-cls.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | +| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-cls.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | +| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-cls_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | +| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n-cls.pb` | ❌ | `imgsz`, `batch` | +| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n-cls.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n-cls_edgetpu.tflite` | ✅ | `imgsz`, `batch` | +| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n-cls_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n-cls_paddle_model/` | ✅ | `imgsz`, `batch` | +| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n-cls_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | See full `export` details in the [Export](../modes/export.md) page. diff --git a/docs/en/tasks/detect.md b/docs/en/tasks/detect.md index 7782e66c..a9cfbe2a 100644 --- a/docs/en/tasks/detect.md +++ b/docs/en/tasks/detect.md @@ -165,18 +165,18 @@ Available YOLOv8 export formats are in the table below. You can export to any fo | Format | `format` Argument | Model | Metadata | Arguments | |---------------------------------------------------|-------------------|---------------------------|----------|----------------------------------------------------------------------| -| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | -| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | -| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | -| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | -| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | -| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n.pb` | ❌ | `imgsz`, `batch` | -| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz`, `batch` | -| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz`, `batch` | -| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | +| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | +| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | +| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | +| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | +| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n.pb` | ❌ | `imgsz`, `batch` | +| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz`, `batch` | +| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz`, `batch` | +| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | See full `export` details in the [Export](../modes/export.md) page. diff --git a/docs/en/tasks/obb.md b/docs/en/tasks/obb.md index 93d14ea1..0955f072 100644 --- a/docs/en/tasks/obb.md +++ b/docs/en/tasks/obb.md @@ -186,18 +186,18 @@ Available YOLOv8-obb export formats are in the table below. You can export to an | Format | `format` Argument | Model | Metadata | Arguments | |---------------------------------------------------|-------------------|-------------------------------|----------|----------------------------------------------------------------------| -| [PyTorch](https://pytorch.org/) | - | `yolov8n-obb.pt` | ✅ | - | -| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-obb.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | -| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-obb.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-obb_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-obb.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | -| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-obb.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | -| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-obb_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | -| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n-obb.pb` | ❌ | `imgsz`, `batch` | -| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n-obb.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n-obb_edgetpu.tflite` | ✅ | `imgsz`, `batch` | -| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n-obb_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n-obb_paddle_model/` | ✅ | `imgsz`, `batch` | -| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n-obb_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | +| [PyTorch](https://pytorch.org/) | - | `yolov8n-obb.pt` | ✅ | - | +| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-obb.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | +| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-obb.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-obb_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-obb.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | +| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-obb.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | +| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-obb_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | +| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n-obb.pb` | ❌ | `imgsz`, `batch` | +| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n-obb.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n-obb_edgetpu.tflite` | ✅ | `imgsz`, `batch` | +| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n-obb_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n-obb_paddle_model/` | ✅ | `imgsz`, `batch` | +| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n-obb_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | See full `export` details in the [Export](../modes/export.md) page. diff --git a/docs/en/tasks/pose.md b/docs/en/tasks/pose.md index d8c5ed34..c345dd99 100644 --- a/docs/en/tasks/pose.md +++ b/docs/en/tasks/pose.md @@ -180,18 +180,18 @@ Available YOLOv8-pose export formats are in the table below. You can export to a | Format | `format` Argument | Model | Metadata | Arguments | |---------------------------------------------------|-------------------|--------------------------------|----------|----------------------------------------------------------------------| -| [PyTorch](https://pytorch.org/) | - | `yolov8n-pose.pt` | ✅ | - | -| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-pose.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | -| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-pose.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-pose_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-pose.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | -| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-pose.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | -| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-pose_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | -| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n-pose.pb` | ❌ | `imgsz`, `batch` | -| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n-pose.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n-pose_edgetpu.tflite` | ✅ | `imgsz`, `batch` | -| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n-pose_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n-pose_paddle_model/` | ✅ | `imgsz`, `batch` | -| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n-pose_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | +| [PyTorch](https://pytorch.org/) | - | `yolov8n-pose.pt` | ✅ | - | +| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-pose.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | +| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-pose.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-pose_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-pose.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | +| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-pose.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | +| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-pose_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | +| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n-pose.pb` | ❌ | `imgsz`, `batch` | +| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n-pose.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n-pose_edgetpu.tflite` | ✅ | `imgsz`, `batch` | +| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n-pose_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n-pose_paddle_model/` | ✅ | `imgsz`, `batch` | +| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n-pose_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | See full `export` details in the [Export](../modes/export.md) page. diff --git a/docs/en/tasks/segment.md b/docs/en/tasks/segment.md index a73ddb3f..30c6d9a3 100644 --- a/docs/en/tasks/segment.md +++ b/docs/en/tasks/segment.md @@ -170,18 +170,18 @@ Available YOLOv8-seg export formats are in the table below. You can export to an | Format | `format` Argument | Model | Metadata | Arguments | |---------------------------------------------------|-------------------|-------------------------------|----------|----------------------------------------------------------------------| -| [PyTorch](https://pytorch.org/) | - | `yolov8n-seg.pt` | ✅ | - | -| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-seg.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | -| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-seg.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-seg_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-seg.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | -| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-seg.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | -| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-seg_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | -| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n-seg.pb` | ❌ | `imgsz`, `batch` | -| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n-seg.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n-seg_edgetpu.tflite` | ✅ | `imgsz`, `batch` | -| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n-seg_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n-seg_paddle_model/` | ✅ | `imgsz`, `batch` | -| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n-seg_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | +| [PyTorch](https://pytorch.org/) | - | `yolov8n-seg.pt` | ✅ | - | +| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n-seg.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | +| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n-seg.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n-seg_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n-seg.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | +| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n-seg.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | +| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n-seg_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | +| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n-seg.pb` | ❌ | `imgsz`, `batch` | +| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n-seg.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n-seg_edgetpu.tflite` | ✅ | `imgsz`, `batch` | +| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n-seg_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n-seg_paddle_model/` | ✅ | `imgsz`, `batch` | +| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n-seg_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | See full `export` details in the [Export](../modes/export.md) page. diff --git a/docs/en/usage/cli.md b/docs/en/usage/cli.md index 0df7828f..35dd45da 100644 --- a/docs/en/usage/cli.md +++ b/docs/en/usage/cli.md @@ -172,19 +172,19 @@ Available YOLOv8 export formats are in the table below. You can export to any fo | Format | `format` Argument | Model | Metadata | Arguments | |---------------------------------------------------|-------------------|---------------------------|----------|----------------------------------------------------------------------| -| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | -| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | -| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | -| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | -| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | -| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | -| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n.pb` | ❌ | `imgsz`, `batch` | -| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz`, `batch` | -| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | -| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz`, `batch` | -| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | - | +| [TorchScript](../integrations/torchscript.md) | `torchscript` | `yolov8n.torchscript` | ✅ | `imgsz`, `optimize`, `batch` | +| [ONNX](../integrations/onnx.md) | `onnx` | `yolov8n.onnx` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `opset`, `batch` | +| [OpenVINO](../integrations/openvino.md) | `openvino` | `yolov8n_openvino_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TensorRT](../integrations/tensorrt.md) | `engine` | `yolov8n.engine` | ✅ | `imgsz`, `half`, `dynamic`, `simplify`, `workspace`, `int8`, `batch` | +| [CoreML](../integrations/coreml.md) | `coreml` | `yolov8n.mlpackage` | ✅ | `imgsz`, `half`, `int8`, `nms`, `batch` | +| [TF SavedModel](../integrations/tf-savedmodel.md) | `saved_model` | `yolov8n_saved_model/` | ✅ | `imgsz`, `keras`, `int8`, `batch` | +| [TF GraphDef](../integrations/tf-graphdef.md) | `pb` | `yolov8n.pb` | ❌ | `imgsz`, `batch` | +| [TF Lite](../integrations/tflite.md) | `tflite` | `yolov8n.tflite` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [TF Edge TPU](../integrations/edge-tpu.md) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | `imgsz`, `batch` | +| [TF.js](../integrations/tfjs.md) | `tfjs` | `yolov8n_web_model/` | ✅ | `imgsz`, `half`, `int8`, `batch` | +| [PaddlePaddle](../integrations/paddlepaddle.md) | `paddle` | `yolov8n_paddle_model/` | ✅ | `imgsz`, `batch` | +| [NCNN](../integrations/ncnn.md) | `ncnn` | `yolov8n_ncnn_model/` | ✅ | `imgsz`, `half`, `batch` | See full `export` details in the [Export](../modes/export.md) page. diff --git a/ultralytics/cfg/models/v9/yolov9c-seg.yaml b/ultralytics/cfg/models/v9/yolov9c-seg.yaml index 784a2dca..e1bf982b 100644 --- a/ultralytics/cfg/models/v9/yolov9c-seg.yaml +++ b/ultralytics/cfg/models/v9/yolov9c-seg.yaml @@ -3,36 +3,36 @@ # 654 layers, 27897120 parameters, 159.4 GFLOPs # parameters -nc: 80 # number of classes +nc: 80 # number of classes # gelan backbone backbone: - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - - [-1, 1, RepNCSPELAN4, [256, 128, 64, 1]] # 2 - - [-1, 1, ADown, [256]] # 3-P3/8 - - [-1, 1, RepNCSPELAN4, [512, 256, 128, 1]] # 4 - - [-1, 1, ADown, [512]] # 5-P4/16 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 6 - - [-1, 1, ADown, [512]] # 7-P5/32 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 8 - - [-1, 1, SPPELAN, [512, 256]] # 9 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, RepNCSPELAN4, [256, 128, 64, 1]] # 2 + - [-1, 1, ADown, [256]] # 3-P3/8 + - [-1, 1, RepNCSPELAN4, [512, 256, 128, 1]] # 4 + - [-1, 1, ADown, [512]] # 5-P4/16 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 6 + - [-1, 1, ADown, [512]] # 7-P5/32 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 8 + - [-1, 1, SPPELAN, [512, 256]] # 9 head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 1, RepNCSPELAN4, [256, 256, 128, 1]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 1, RepNCSPELAN4, [256, 256, 128, 1]] # 15 (P3/8-small) - [-1, 1, ADown, [256]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 18 (P4/16-medium) - [-1, 1, ADown, [512]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 21 (P5/32-large) - - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5) + - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5) diff --git a/ultralytics/cfg/models/v9/yolov9c.yaml b/ultralytics/cfg/models/v9/yolov9c.yaml index 4bea1876..eeacd0d6 100644 --- a/ultralytics/cfg/models/v9/yolov9c.yaml +++ b/ultralytics/cfg/models/v9/yolov9c.yaml @@ -3,36 +3,36 @@ # 618 layers, 25590912 parameters, 104.0 GFLOPs # parameters -nc: 80 # number of classes +nc: 80 # number of classes # gelan backbone backbone: - - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 - - [-1, 1, RepNCSPELAN4, [256, 128, 64, 1]] # 2 - - [-1, 1, ADown, [256]] # 3-P3/8 - - [-1, 1, RepNCSPELAN4, [512, 256, 128, 1]] # 4 - - [-1, 1, ADown, [512]] # 5-P4/16 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 6 - - [-1, 1, ADown, [512]] # 7-P5/32 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 8 - - [-1, 1, SPPELAN, [512, 256]] # 9 + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 1, RepNCSPELAN4, [256, 128, 64, 1]] # 2 + - [-1, 1, ADown, [256]] # 3-P3/8 + - [-1, 1, RepNCSPELAN4, [512, 256, 128, 1]] # 4 + - [-1, 1, ADown, [512]] # 5-P4/16 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 6 + - [-1, 1, ADown, [512]] # 7-P5/32 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 8 + - [-1, 1, SPPELAN, [512, 256]] # 9 head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 6], 1, Concat, [1]] # cat backbone P4 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 12 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 12 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 4], 1, Concat, [1]] # cat backbone P3 - - [-1, 1, RepNCSPELAN4, [256, 256, 128, 1]] # 15 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 1, RepNCSPELAN4, [256, 256, 128, 1]] # 15 (P3/8-small) - [-1, 1, ADown, [256]] - - [[-1, 12], 1, Concat, [1]] # cat head P4 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 18 (P4/16-medium) + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 18 (P4/16-medium) - [-1, 1, ADown, [512]] - - [[-1, 9], 1, Concat, [1]] # cat head P5 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 21 (P5/32-large) + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 1]] # 21 (P5/32-large) - - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/ultralytics/cfg/models/v9/yolov9e-seg.yaml b/ultralytics/cfg/models/v9/yolov9e-seg.yaml index 636b78a8..7a72b29f 100644 --- a/ultralytics/cfg/models/v9/yolov9e-seg.yaml +++ b/ultralytics/cfg/models/v9/yolov9e-seg.yaml @@ -3,20 +3,20 @@ # 1261 layers, 60512800 parameters, 248.4 GFLOPs # parameters -nc: 80 # number of classes +nc: 80 # number of classes # gelan backbone backbone: - [-1, 1, Silence, []] - - [-1, 1, Conv, [64, 3, 2]] # 1-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 2-P2/4 - - [-1, 1, RepNCSPELAN4, [256, 128, 64, 2]] # 3 - - [-1, 1, ADown, [256]] # 4-P3/8 - - [-1, 1, RepNCSPELAN4, [512, 256, 128, 2]] # 5 - - [-1, 1, ADown, [512]] # 6-P4/16 - - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 7 - - [-1, 1, ADown, [1024]] # 8-P5/32 - - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 9 + - [-1, 1, Conv, [64, 3, 2]] # 1-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 2-P2/4 + - [-1, 1, RepNCSPELAN4, [256, 128, 64, 2]] # 3 + - [-1, 1, ADown, [256]] # 4-P3/8 + - [-1, 1, RepNCSPELAN4, [512, 256, 128, 2]] # 5 + - [-1, 1, ADown, [512]] # 6-P4/16 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 7 + - [-1, 1, ADown, [1024]] # 8-P5/32 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 9 - [1, 1, CBLinear, [[64]]] # 10 - [3, 1, CBLinear, [[64, 128]]] # 11 @@ -24,38 +24,38 @@ backbone: - [7, 1, CBLinear, [[64, 128, 256, 512]]] # 13 - [9, 1, CBLinear, [[64, 128, 256, 512, 1024]]] # 14 - - [0, 1, Conv, [64, 3, 2]] # 15-P1/2 + - [0, 1, Conv, [64, 3, 2]] # 15-P1/2 - [[10, 11, 12, 13, 14, -1], 1, CBFuse, [[0, 0, 0, 0, 0]]] # 16 - - [-1, 1, Conv, [128, 3, 2]] # 17-P2/4 - - [[11, 12, 13, 14, -1], 1, CBFuse, [[1, 1, 1, 1]]] # 18 - - [-1, 1, RepNCSPELAN4, [256, 128, 64, 2]] # 19 - - [-1, 1, ADown, [256]] # 20-P3/8 - - [[12, 13, 14, -1], 1, CBFuse, [[2, 2, 2]]] # 21 - - [-1, 1, RepNCSPELAN4, [512, 256, 128, 2]] # 22 - - [-1, 1, ADown, [512]] # 23-P4/16 - - [[13, 14, -1], 1, CBFuse, [[3, 3]]] # 24 - - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 25 - - [-1, 1, ADown, [1024]] # 26-P5/32 + - [-1, 1, Conv, [128, 3, 2]] # 17-P2/4 + - [[11, 12, 13, 14, -1], 1, CBFuse, [[1, 1, 1, 1]]] # 18 + - [-1, 1, RepNCSPELAN4, [256, 128, 64, 2]] # 19 + - [-1, 1, ADown, [256]] # 20-P3/8 + - [[12, 13, 14, -1], 1, CBFuse, [[2, 2, 2]]] # 21 + - [-1, 1, RepNCSPELAN4, [512, 256, 128, 2]] # 22 + - [-1, 1, ADown, [512]] # 23-P4/16 + - [[13, 14, -1], 1, CBFuse, [[3, 3]]] # 24 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 25 + - [-1, 1, ADown, [1024]] # 26-P5/32 - [[14, -1], 1, CBFuse, [[4]]] # 27 - - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 28 - - [-1, 1, SPPELAN, [512, 256]] # 29 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 28 + - [-1, 1, SPPELAN, [512, 256]] # 29 # gelan head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 25], 1, Concat, [1]] # cat backbone P4 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 2]] # 32 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 25], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 2]] # 32 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 22], 1, Concat, [1]] # cat backbone P3 - - [-1, 1, RepNCSPELAN4, [256, 256, 128, 2]] # 35 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 22], 1, Concat, [1]] # cat backbone P3 + - [-1, 1, RepNCSPELAN4, [256, 256, 128, 2]] # 35 (P3/8-small) - [-1, 1, ADown, [256]] - - [[-1, 32], 1, Concat, [1]] # cat head P4 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 2]] # 38 (P4/16-medium) + - [[-1, 32], 1, Concat, [1]] # cat head P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 2]] # 38 (P4/16-medium) - [-1, 1, ADown, [512]] - - [[-1, 29], 1, Concat, [1]] # cat head P5 - - [-1, 1, RepNCSPELAN4, [512, 1024, 512, 2]] # 41 (P5/32-large) + - [[-1, 29], 1, Concat, [1]] # cat head P5 + - [-1, 1, RepNCSPELAN4, [512, 1024, 512, 2]] # 41 (P5/32-large) - - [[35, 38, 41], 1, Segment, [nc, 32, 256]] # Segment (P3, P4, P5) + - [[35, 38, 41], 1, Segment, [nc, 32, 256]] # Segment (P3, P4, P5) diff --git a/ultralytics/cfg/models/v9/yolov9e.yaml b/ultralytics/cfg/models/v9/yolov9e.yaml index ed2594bf..b05a5ff4 100644 --- a/ultralytics/cfg/models/v9/yolov9e.yaml +++ b/ultralytics/cfg/models/v9/yolov9e.yaml @@ -3,20 +3,20 @@ # 1225 layers, 58206592 parameters, 193.0 GFLOPs # parameters -nc: 80 # number of classes +nc: 80 # number of classes # gelan backbone backbone: - [-1, 1, Silence, []] - - [-1, 1, Conv, [64, 3, 2]] # 1-P1/2 - - [-1, 1, Conv, [128, 3, 2]] # 2-P2/4 - - [-1, 1, RepNCSPELAN4, [256, 128, 64, 2]] # 3 - - [-1, 1, ADown, [256]] # 4-P3/8 - - [-1, 1, RepNCSPELAN4, [512, 256, 128, 2]] # 5 - - [-1, 1, ADown, [512]] # 6-P4/16 - - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 7 - - [-1, 1, ADown, [1024]] # 8-P5/32 - - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 9 + - [-1, 1, Conv, [64, 3, 2]] # 1-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 2-P2/4 + - [-1, 1, RepNCSPELAN4, [256, 128, 64, 2]] # 3 + - [-1, 1, ADown, [256]] # 4-P3/8 + - [-1, 1, RepNCSPELAN4, [512, 256, 128, 2]] # 5 + - [-1, 1, ADown, [512]] # 6-P4/16 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 7 + - [-1, 1, ADown, [1024]] # 8-P5/32 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 9 - [1, 1, CBLinear, [[64]]] # 10 - [3, 1, CBLinear, [[64, 128]]] # 11 @@ -24,38 +24,38 @@ backbone: - [7, 1, CBLinear, [[64, 128, 256, 512]]] # 13 - [9, 1, CBLinear, [[64, 128, 256, 512, 1024]]] # 14 - - [0, 1, Conv, [64, 3, 2]] # 15-P1/2 + - [0, 1, Conv, [64, 3, 2]] # 15-P1/2 - [[10, 11, 12, 13, 14, -1], 1, CBFuse, [[0, 0, 0, 0, 0]]] # 16 - - [-1, 1, Conv, [128, 3, 2]] # 17-P2/4 - - [[11, 12, 13, 14, -1], 1, CBFuse, [[1, 1, 1, 1]]] # 18 - - [-1, 1, RepNCSPELAN4, [256, 128, 64, 2]] # 19 - - [-1, 1, ADown, [256]] # 20-P3/8 - - [[12, 13, 14, -1], 1, CBFuse, [[2, 2, 2]]] # 21 - - [-1, 1, RepNCSPELAN4, [512, 256, 128, 2]] # 22 - - [-1, 1, ADown, [512]] # 23-P4/16 - - [[13, 14, -1], 1, CBFuse, [[3, 3]]] # 24 - - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 25 - - [-1, 1, ADown, [1024]] # 26-P5/32 + - [-1, 1, Conv, [128, 3, 2]] # 17-P2/4 + - [[11, 12, 13, 14, -1], 1, CBFuse, [[1, 1, 1, 1]]] # 18 + - [-1, 1, RepNCSPELAN4, [256, 128, 64, 2]] # 19 + - [-1, 1, ADown, [256]] # 20-P3/8 + - [[12, 13, 14, -1], 1, CBFuse, [[2, 2, 2]]] # 21 + - [-1, 1, RepNCSPELAN4, [512, 256, 128, 2]] # 22 + - [-1, 1, ADown, [512]] # 23-P4/16 + - [[13, 14, -1], 1, CBFuse, [[3, 3]]] # 24 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 25 + - [-1, 1, ADown, [1024]] # 26-P5/32 - [[14, -1], 1, CBFuse, [[4]]] # 27 - - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 28 - - [-1, 1, SPPELAN, [512, 256]] # 29 + - [-1, 1, RepNCSPELAN4, [1024, 512, 256, 2]] # 28 + - [-1, 1, SPPELAN, [512, 256]] # 29 # gelan head head: - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 25], 1, Concat, [1]] # cat backbone P4 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 2]] # 32 + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 25], 1, Concat, [1]] # cat backbone P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 2]] # 32 - - [-1, 1, nn.Upsample, [None, 2, 'nearest']] - - [[-1, 22], 1, Concat, [1]] # cat backbone P3 - - [-1, 1, RepNCSPELAN4, [256, 256, 128, 2]] # 35 (P3/8-small) + - [-1, 1, nn.Upsample, [None, 2, "nearest"]] + - [[-1, 22], 1, Concat, [1]] # cat backbone P3 + - [-1, 1, RepNCSPELAN4, [256, 256, 128, 2]] # 35 (P3/8-small) - [-1, 1, ADown, [256]] - - [[-1, 32], 1, Concat, [1]] # cat head P4 - - [-1, 1, RepNCSPELAN4, [512, 512, 256, 2]] # 38 (P4/16-medium) + - [[-1, 32], 1, Concat, [1]] # cat head P4 + - [-1, 1, RepNCSPELAN4, [512, 512, 256, 2]] # 38 (P4/16-medium) - [-1, 1, ADown, [512]] - - [[-1, 29], 1, Concat, [1]] # cat head P5 - - [-1, 1, RepNCSPELAN4, [512, 1024, 512, 2]] # 41 (P5/32-large) + - [[-1, 29], 1, Concat, [1]] # cat head P5 + - [-1, 1, RepNCSPELAN4, [512, 1024, 512, 2]] # 41 (P5/32-large) - - [[35, 38, 41], 1, Detect, [nc]] # Detect(P3, P4, P5) + - [[35, 38, 41], 1, Detect, [nc]] # Detect(P3, P4, P5)