From 064e2fd282ed37acb6339afa631e16270e4823e6 Mon Sep 17 00:00:00 2001 From: Glenn Jocher Date: Sun, 2 Jun 2024 14:07:14 +0200 Subject: [PATCH] Docs spelling and grammar fixes (#13307) Signed-off-by: Glenn Jocher Co-authored-by: RainRat --- docs/build_docs.py | 18 ++++++-- docs/en/datasets/detect/roboflow-100.md | 2 +- .../guides/data-collection-and-annotation.md | 12 ++--- docs/en/guides/defining-project-goals.md | 16 +++---- docs/en/guides/docker-quickstart.md | 2 +- docs/en/guides/model-deployment-options.md | 28 +++++------ docs/en/guides/nvidia-jetson.md | 10 ++-- docs/en/guides/object-counting.md | 2 +- docs/en/guides/object-cropping.md | 2 +- ...ng-openvino-latency-vs-throughput-modes.md | 1 - docs/en/guides/parking-management.md | 2 +- .../en/guides/preprocessing_annotated_data.md | 18 ++++---- docs/en/guides/raspberry-pi.md | 8 ++-- docs/en/guides/steps-of-a-cv-project.md | 46 +++++++++---------- docs/en/guides/yolo-common-issues.md | 4 +- docs/en/guides/yolo-performance-metrics.md | 6 +-- docs/en/help/code_of_conduct.md | 2 +- docs/en/help/environmental-health-safety.md | 2 +- docs/en/hub/pro.md | 4 +- docs/en/integrations/amazon-sagemaker.md | 6 +-- docs/en/integrations/clearml.md | 4 +- docs/en/integrations/comet.md | 4 +- docs/en/integrations/coreml.md | 6 +-- docs/en/integrations/dvc.md | 2 +- docs/en/integrations/edge-tpu.md | 6 +-- docs/en/integrations/google-colab.md | 20 ++++---- docs/en/integrations/ncnn.md | 2 +- docs/en/integrations/neural-magic.md | 22 ++++----- docs/en/integrations/onnx.md | 6 +-- docs/en/integrations/paddlepaddle.md | 4 +- docs/en/integrations/paperspace.md | 6 +-- docs/en/integrations/ray-tune.md | 4 +- docs/en/integrations/roboflow.md | 2 +- docs/en/integrations/tensorboard.md | 10 ++-- docs/en/integrations/tensorrt.md | 6 +-- docs/en/integrations/tf-graphdef.md | 2 +- docs/en/integrations/tf-savedmodel.md | 4 +- docs/en/integrations/tfjs.md | 6 +-- docs/en/integrations/tflite.md | 4 +- docs/en/integrations/torchscript.md | 10 ++-- docs/en/integrations/weights-biases.md | 12 ++--- docs/en/models/rtdetr.md | 2 +- docs/en/modes/val.md | 2 +- docs/en/reference/data/annotator.md | 2 +- docs/en/reference/data/utils.md | 2 +- docs/en/tasks/detect.md | 2 +- docs/en/usage/cli.md | 2 +- .../google_cloud_quickstart_tutorial.md | 6 +-- 48 files changed, 179 insertions(+), 172 deletions(-) diff --git a/docs/build_docs.py b/docs/build_docs.py index da09705c..566c1f93 100644 --- a/docs/build_docs.py +++ b/docs/build_docs.py @@ -116,13 +116,21 @@ def update_subdir_edit_links(subdir="", docs_url=""): file.write(str(soup)) -def add_frontmatter(md_filepath: Path): +def update_page(md_filepath: Path): """Creates or updates a Markdown file, ensuring frontmatter is present.""" if md_filepath.exists(): - existing_content = md_filepath.read_text() - if not existing_content.strip().startswith("---\n"): + content = md_filepath.read_text() + + # Replace apostrophes + content = content.replace("‘", "'").replace("’", "'") + + # Add frontmatter if missing + if not content.strip().startswith("---\n"): header = "---\ncomments: true\ndescription: TODO ADD DESCRIPTION\nkeywords: TODO ADD KEYWORDS\n---\n\n" - md_filepath.write_text(header + existing_content) + content = header + content + + # Save page + md_filepath.write_text(content) return @@ -132,7 +140,7 @@ def main(): # Add frontmatter for file in tqdm((DOCS / "en").rglob("*.md"), desc="Adding frontmatter"): - add_frontmatter(file) + update_page(file) # Update titles update_page_title(SITE / "404.html", new_title="Ultralytics Docs - Not Found") diff --git a/docs/en/datasets/detect/roboflow-100.md b/docs/en/datasets/detect/roboflow-100.md index 9d17fbc0..583591fe 100644 --- a/docs/en/datasets/detect/roboflow-100.md +++ b/docs/en/datasets/detect/roboflow-100.md @@ -101,7 +101,7 @@ You can access it directly from the Roboflow 100 GitHub repository. In addition, ## Sample Data and Annotations -Roboflow 100 consists of datasets with diverse images and videos captured from various angles and domains. Here’s a look at examples of annotated images in the RF100 benchmark. +Roboflow 100 consists of datasets with diverse images and videos captured from various angles and domains. Here's a look at examples of annotated images in the RF100 benchmark.

Sample Data and Annotations diff --git a/docs/en/guides/data-collection-and-annotation.md b/docs/en/guides/data-collection-and-annotation.md index cf0b18bb..c12fb065 100644 --- a/docs/en/guides/data-collection-and-annotation.md +++ b/docs/en/guides/data-collection-and-annotation.md @@ -8,7 +8,7 @@ keywords: What is Data Annotation, Data Annotation Tools, Annotating Data, Avoid ## Introduction -The key to success in any [computer vision project](./steps-of-a-cv-project.md) starts with effective data collection and annotation strategies. The quality of the data directly impacts model performance, so it’s important to understand the best practices related to data collection and data annotation. +The key to success in any [computer vision project](./steps-of-a-cv-project.md) starts with effective data collection and annotation strategies. The quality of the data directly impacts model performance, so it's important to understand the best practices related to data collection and data annotation. Every consideration regarding the data should closely align with [your project's goals](./defining-project-goals.md). Changes in your annotation strategies could shift the project's focus or effectiveness and vice versa. With this in mind, let's take a closer look at the best ways to approach data collection and annotation. @@ -22,7 +22,7 @@ One of the first questions when starting a computer vision project is how many c For example, if you want to monitor traffic, your classes might include "car," "truck," "bus," "motorcycle," and "bicycle." On the other hand, for tracking items in a store, your classes could be "fruits," "vegetables," "beverages," and "snacks." Defining classes based on your project goals helps keep your dataset relevant and focused. -When you define your classes, another important distinction to make is whether to choose coarse or fine class counts. ‘Count' refers to the number of distinct classes you are interested in. This decision influences the granularity of your data and the complexity of your model. Here are the considerations for each approach: +When you define your classes, another important distinction to make is whether to choose coarse or fine class counts. 'Count' refers to the number of distinct classes you are interested in. This decision influences the granularity of your data and the complexity of your model. Here are the considerations for each approach: - **Coarse Class-Count**: These are broader, more inclusive categories, such as "vehicle" and "non-vehicle." They simplify annotation and require fewer computational resources but provide less detailed information, potentially limiting the model's effectiveness in complex scenarios. - **Fine Class-Count**: More categories with finer distinctions, such as "sedan," "SUV," "pickup truck," and "motorcycle." They capture more detailed information, improving model accuracy and performance. However, they are more time-consuming and labor-intensive to annotate and require more computational resources. @@ -67,9 +67,9 @@ Depending on the specific requirements of a [computer vision task](../tasks/inde ### Common Annotation Formats -After selecting a type of annotation, it’s important to choose the appropriate format for storing and sharing annotations. +After selecting a type of annotation, it's important to choose the appropriate format for storing and sharing annotations. -Commonly used formats include [COCO](../datasets/detect/coco.md), which supports various annotation types like object detection, keypoint detection, stuff segmentation, panoptic segmentation, and image captioning, stored in JSON. [Pascal VOC](../datasets/detect/voc.md)) uses XML files and is popular for object detection tasks. YOLO, on the other hand, creates a .txt file for each image, containing annotations like object class, coordinates, height, and width, making it suitable for object detection. +Commonly used formats include [COCO](../datasets/detect/coco.md), which supports various annotation types like object detection, keypoint detection, stuff segmentation, panoptic segmentation, and image captioning, stored in JSON. [Pascal VOC](../datasets/detect/voc.md) uses XML files and is popular for object detection tasks. YOLO, on the other hand, creates a .txt file for each image, containing annotations like object class, coordinates, height, and width, making it suitable for object detection. ### Techniques of Annotation @@ -78,7 +78,7 @@ Now, assuming you've chosen a type of annotation and format, it's time to establ - **Clarity and Detail**: Make sure your instructions are clear. Use examples and illustrations to understand what's expected. - **Consistency**: Keep your annotations uniform. Set standard criteria for annotating different types of data, so all annotations follow the same rules. - **Reducing Bias**: Stay neutral. Train yourself to be objective and minimize personal biases to ensure fair annotations. -- **Efficiency**: Work smarter, not harder. Use tools and workflows that automate repetitive tasks, making the annotation process faster and more efficient.. +- **Efficiency**: Work smarter, not harder. Use tools and workflows that automate repetitive tasks, making the annotation process faster and more efficient. Regularly reviewing and updating your labeling rules will help keep your annotations accurate, consistent, and aligned with your project goals. @@ -86,7 +86,7 @@ Regularly reviewing and updating your labeling rules will help keep your annotat Let's say you are ready to annotate now. There are several open-source tools available to help streamline the data annotation process. Here are some useful open annotation tools: -- **[LabeI Studio](https://github.com/HumanSignal/label-studio)**: A flexible tool that supports a wide range of annotation tasks and includes features for managing projects and quality control. +- **[Label Studio](https://github.com/HumanSignal/label-studio)**: A flexible tool that supports a wide range of annotation tasks and includes features for managing projects and quality control. - **[CVAT](https://github.com/cvat-ai/cvat)**: A powerful tool that supports various annotation formats and customizable workflows, making it suitable for complex projects. - **[Labelme](https://github.com/labelmeai/labelme)**: A simple and easy-to-use tool that allows for quick annotation of images with polygons, making it ideal for straightforward tasks. diff --git a/docs/en/guides/defining-project-goals.md b/docs/en/guides/defining-project-goals.md index 16ef6dc6..6827dee7 100644 --- a/docs/en/guides/defining-project-goals.md +++ b/docs/en/guides/defining-project-goals.md @@ -10,13 +10,13 @@ keywords: Computer Vision Project, Defining Problems, Setting Objectives, SMART The first step in any computer vision project is defining what you want to achieve. It's crucial to have a clear roadmap from the start, which includes everything from data collection to deploying your model. -If you need a quick refresher on the basics of a computer vision project, take a moment to read our guide on [the key steps in a computer vision project](./steps-of-a-cv-project.md). It’ll give you a solid overview of the whole process. Once you’re caught up, come back here so we can dive into how exactly you can define and refine the goals for your project. +If you need a quick refresher on the basics of a computer vision project, take a moment to read our guide on [the key steps in a computer vision project](./steps-of-a-cv-project.md). It'll give you a solid overview of the whole process. Once you're caught up, come back here to dive into how exactly you can define and refine the goals for your project. -Now, let’s get to the heart of defining a clear problem statement for your project and exploring the key decisions you’ll need to make along the way. +Now, let's get to the heart of defining a clear problem statement for your project and exploring the key decisions you'll need to make along the way. ## Defining A Clear Problem Statement -Setting clear goals and objectives for your project is the first big step toward finding the most effective solutions. Let’s understand how you can clearly define your project’s problem statement: +Setting clear goals and objectives for your project is the first big step toward finding the most effective solutions. Let's understand how you can clearly define your project's problem statement: - **Identify the Core Issue:** Pinpoint the specific challenge your computer vision project aims to solve. - **Determine the Scope:** Define the boundaries of your problem. @@ -25,7 +25,7 @@ Setting clear goals and objectives for your project is the first big step toward ### Example of a Business Problem Statement -Let’s walk through an example. +Let's walk through an example. Consider a computer vision project where you want to [estimate the speed of vehicles](./speed-estimation.md) on a highway. The core issue is that current speed monitoring methods are inefficient and error-prone due to outdated radar systems and manual processes. The project aims to develop a real-time computer vision system that can replace legacy [speed estimation](https://www.ultralytics.com/blog/ultralytics-yolov8-for-speed-estimation-in-computer-vision-projects) systems. @@ -56,7 +56,7 @@ For example, if your problem is monitoring vehicle speeds on a highway, the rele Example of Object Tracking

-Other tasks, like [object detection](../tasks/detect.md), are not suitable as they don't provide continuous location or movement information. Once you’ve identified the appropriate computer vision task, it guides several critical aspects of your project, like model selection, dataset preparation, and model training approaches. +Other tasks, like [object detection](../tasks/detect.md), are not suitable as they don't provide continuous location or movement information. Once you've identified the appropriate computer vision task, it guides several critical aspects of your project, like model selection, dataset preparation, and model training approaches. ## Which Comes First: Model Selection, Dataset Preparation, or Model Training Approach? @@ -93,14 +93,14 @@ No, pre-trained models don't "remember" classes in the traditional sense. They l Overview of Transfer Learning

-If you want to use the classes the model was pre-trained on, a practical approach is to use two models: one retains the original performance, and the other is fine-tuned for your specific task. This way, you can combine the outputs of both models. There are also other options like freezing layers, using the pre-trained model as a feature extractor, and task-specific branching, but these are more complex solutions and require more expertise. +If you want to use the classes the model was pre-trained on, a practical approach is to use two models: one retains the original performance, and the other is fine-tuned for your specific task. This way, you can combine the outputs of both models. There are other options like freezing layers, using the pre-trained model as a feature extractor, and task-specific branching, but these are more complex solutions and require more expertise. ### How Do Deployment Options Affect My Computer Vision Project? [Model deployment options](./model-deployment-options.md) critically impact the performance of your computer vision project. For instance, the deployment environment must handle the computational load of your model. Here are some practical examples: - **Edge Devices**: Deploying on edge devices like smartphones or IoT devices requires lightweight models due to their limited computational resources. Example technologies include [TensorFlow Lite](../integrations/tflite.md) and [ONNX Runtime](../integrations/onnx.md), which are optimized for such environments. -- **Cloud Servers**: Cloud deployments can handle more complex models with larger computational demands. Cloud platforms like [AWS](../integrations/amazon-sagemaker.md), Google Cloud, and Azure offer robust hardware options that can scale based on the project’s needs. +- **Cloud Servers**: Cloud deployments can handle more complex models with larger computational demands. Cloud platforms like [AWS](../integrations/amazon-sagemaker.md), Google Cloud, and Azure offer robust hardware options that can scale based on the project's needs. - **On-Premise Servers**: For scenarios requiring high data privacy and security, deploying on-premise might be necessary. This involves significant upfront hardware investment but allows full control over the data and infrastructure. - **Hybrid Solutions**: Some projects might benefit from a hybrid approach, where some processing is done on the edge, while more complex analyses are offloaded to the cloud. This can balance performance needs with cost and latency considerations. @@ -138,4 +138,4 @@ Connecting with other computer vision enthusiasts can be incredibly helpful for ## Conclusion -Defining a clear problem and setting measurable goals is key to a successful computer vision project. We’ve highlighted the importance of being clear and focused from the start. Having specific goals helps avoid oversight. Also, staying connected with others in the community through platforms like GitHub or Discord is important for learning and staying current. In short, good planning and engaging with the community is a huge part of successful computer vision projects. +Defining a clear problem and setting measurable goals is key to a successful computer vision project. We've highlighted the importance of being clear and focused from the start. Having specific goals helps avoid oversight. Also, staying connected with others in the community through platforms like GitHub or Discord is important for learning and staying current. In short, good planning and engaging with the community is a huge part of successful computer vision projects. diff --git a/docs/en/guides/docker-quickstart.md b/docs/en/guides/docker-quickstart.md index 8fd01b20..dd9eddab 100644 --- a/docs/en/guides/docker-quickstart.md +++ b/docs/en/guides/docker-quickstart.md @@ -154,7 +154,7 @@ Replace `/path/on/host` with the directory path on your local machine and `/path The following instructions are experimental. Sharing a X11 socket with a Docker container poses potential security risks. Therefore, it's recommended to test this solution only in a controlled environment. For more information, refer to these resources on how to use `xhost`[(1)](http://users.stat.umn.edu/~geyer/secure.html)[(2)](https://linux.die.net/man/1/xhost). -Docker is primarily used to containerize background applications and CLI programs, but it can also run graphical programs. In the Linux world, two main graphic servers handle graphical display: [X11](https://www.x.org/wiki/) (also known as the X Window System) and [Wayland](https://wayland.freedesktop.org/). Before starting it's essential to determine which graphics server you're currently using. Just run this command to find out: +Docker is primarily used to containerize background applications and CLI programs, but it can also run graphical programs. In the Linux world, two main graphic servers handle graphical display: [X11](https://www.x.org/wiki/) (also known as the X Window System) and [Wayland](https://wayland.freedesktop.org/). Before starting, it's essential to determine which graphics server you are currently using. Run this command to find out: ```bash env | grep -E -i 'x11|xorg|wayland' diff --git a/docs/en/guides/model-deployment-options.md b/docs/en/guides/model-deployment-options.md index a487ea4d..9b988bda 100644 --- a/docs/en/guides/model-deployment-options.md +++ b/docs/en/guides/model-deployment-options.md @@ -4,13 +4,13 @@ description: A guide to help determine which deployment option to choose for you keywords: YOLOv8, Deployment, PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, CoreML, TensorFlow, Export --- -# Understanding YOLOv8’s Deployment Options +# Understanding YOLOv8's Deployment Options ## Introduction -You've come a long way on your journey with YOLOv8. You've diligently collected data, meticulously annotated it, and put in the hours to train and rigorously evaluate your custom YOLOv8 model. Now, it’s time to put your model to work for your specific application, use case, or project. But there's a critical decision that stands before you: how to export and deploy your model effectively. +You've come a long way on your journey with YOLOv8. You've diligently collected data, meticulously annotated it, and put in the hours to train and rigorously evaluate your custom YOLOv8 model. Now, it's time to put your model to work for your specific application, use case, or project. But there's a critical decision that stands before you: how to export and deploy your model effectively. -This guide walks you through YOLOv8’s deployment options and the essential factors to consider to choose the right option for your project. +This guide walks you through YOLOv8's deployment options and the essential factors to consider to choose the right option for your project. ## How to Select the Right Deployment Option for Your YOLOv8 Model @@ -18,9 +18,9 @@ When it's time to deploy your YOLOv8 model, selecting a suitable export format i The ideal format depends on your model's intended operational context, balancing speed, hardware constraints, and ease of integration. In the following section, we'll take a closer look at each export option, understanding when to choose each one. -### YOLOv8’s Deployment Options +### YOLOv8's Deployment Options -Let’s walk through the different YOLOv8 deployment options. For a detailed walkthrough of the export process, visit the [Ultralytics documentation page on exporting](../modes/export.md). +Let's walk through the different YOLOv8 deployment options. For a detailed walkthrough of the export process, visit the [Ultralytics documentation page on exporting](../modes/export.md). #### PyTorch @@ -36,27 +36,27 @@ PyTorch is an open-source machine learning library widely used for applications - **Maintenance and Updates**: Regular updates with active development and support for new features. -- **Security Considerations**: Regular patches for security issues, but security is largely dependent on the overall environment it’s deployed in. +- **Security Considerations**: Regular patches for security issues, but security is largely dependent on the overall environment it's deployed in. - **Hardware Acceleration**: Supports CUDA for GPU acceleration, essential for speeding up model training and inference. #### TorchScript -TorchScript extends PyTorch’s capabilities by allowing the exportation of models to be run in a C++ runtime environment. This makes it suitable for production environments where Python is unavailable. +TorchScript extends PyTorch's capabilities by allowing the exportation of models to be run in a C++ runtime environment. This makes it suitable for production environments where Python is unavailable. - **Performance Benchmarks**: Can offer improved performance over native PyTorch, especially in production environments. - **Compatibility and Integration**: Designed for seamless transition from PyTorch to C++ production environments, though some advanced features might not translate perfectly. -- **Community Support and Ecosystem**: Benefits from PyTorch’s large community but has a narrower scope of specialized developers. +- **Community Support and Ecosystem**: Benefits from PyTorch's large community but has a narrower scope of specialized developers. -- **Case Studies**: Widely used in industry settings where Python’s performance overhead is a bottleneck. +- **Case Studies**: Widely used in industry settings where Python's performance overhead is a bottleneck. - **Maintenance and Updates**: Maintained alongside PyTorch with consistent updates. - **Security Considerations**: Offers improved security by enabling the running of models in environments without full Python installations. -- **Hardware Acceleration**: Inherits PyTorch’s CUDA support, ensuring efficient GPU utilization. +- **Hardware Acceleration**: Inherits PyTorch's CUDA support, ensuring efficient GPU utilization. #### ONNX @@ -104,7 +104,7 @@ TensorRT is a high-performance deep learning inference optimizer and runtime fro - **Compatibility and Integration**: Best suited for NVIDIA hardware, with limited support outside this environment. -- **Community Support and Ecosystem**: Strong support network through NVIDIA’s developer forums and documentation. +- **Community Support and Ecosystem**: Strong support network through NVIDIA's developer forums and documentation. - **Case Studies**: Widely adopted in industries requiring real-time inference on video and image data. @@ -116,7 +116,7 @@ TensorRT is a high-performance deep learning inference optimizer and runtime fro #### CoreML -CoreML is Apple’s machine learning framework, optimized for on-device performance in the Apple ecosystem, including iOS, macOS, watchOS, and tvOS. +CoreML is Apple's machine learning framework, optimized for on-device performance in the Apple ecosystem, including iOS, macOS, watchOS, and tvOS. - **Performance Benchmarks**: Optimized for on-device performance on Apple hardware with minimal battery usage. @@ -134,7 +134,7 @@ CoreML is Apple’s machine learning framework, optimized for on-device performa #### TF SavedModel -TF SavedModel is TensorFlow’s format for saving and serving machine learning models, particularly suited for scalable server environments. +TF SavedModel is TensorFlow's format for saving and serving machine learning models, particularly suited for scalable server environments. - **Performance Benchmarks**: Offers scalable performance in server environments, especially when used with TensorFlow Serving. @@ -170,7 +170,7 @@ TF GraphDef is a TensorFlow format that represents the model as a graph, which i #### TF Lite -TF Lite is TensorFlow’s solution for mobile and embedded device machine learning, providing a lightweight library for on-device inference. +TF Lite is TensorFlow's solution for mobile and embedded device machine learning, providing a lightweight library for on-device inference. - **Performance Benchmarks**: Designed for speed and efficiency on mobile and embedded devices. diff --git a/docs/en/guides/nvidia-jetson.md b/docs/en/guides/nvidia-jetson.md index 06f188ee..44a139f4 100644 --- a/docs/en/guides/nvidia-jetson.md +++ b/docs/en/guides/nvidia-jetson.md @@ -73,7 +73,7 @@ After this is done, skip to [Use TensorRT on NVIDIA Jetson section](#use-tensorr #### Install Ultralytics Package -Here we will install ultralyics package on the Jetson with optional dependencies so that we can export the PyTorch models to other different formats. We will mainly focus on [NVIDIA TensorRT exports](../integrations/tensorrt.md) because TensoRT will make sure we can get the maximum performance out of the Jetson devices. +Here we will install Ultralytics package on the Jetson with optional dependencies so that we can export the PyTorch models to other different formats. We will mainly focus on [NVIDIA TensorRT exports](../integrations/tensorrt.md) because TensorRT will make sure we can get the maximum performance out of the Jetson devices. 1. Update packages list, install pip and upgrade to latest @@ -129,7 +129,7 @@ Visit the [PyTorch for Jetson page](https://forums.developer.nvidia.com/t/pytorc The [onnxruntime-gpu](https://pypi.org/project/onnxruntime-gpu/) package hosted in PyPI does not have `aarch64` binaries for the Jetson. So we need to manually install this package. This package is needed for some of the exports. -All different `onnxruntime-gpu` packages corresponsing to different JetPack and Python versions are listed [here](https://elinux.org/Jetson_Zoo#ONNX_Runtime). However, here we will download and install `onnxruntime-gpu 1.17.0` with `Python3.8` support for the JetPack we are using for this guide. +All different `onnxruntime-gpu` packages corresponding to different JetPack and Python versions are listed [here](https://elinux.org/Jetson_Zoo#ONNX_Runtime). However, here we will download and install `onnxruntime-gpu 1.17.0` with `Python3.8` support for the JetPack we are using for this guide. ```bash wget https://nvidia.box.com/shared/static/zostg6agm00fb6t5uisw51qi6kpcuwzd.whl -O onnxruntime_gpu-1.17.0-cp38-cp38-linux_aarch64.whl @@ -185,11 +185,11 @@ The YOLOv8n model in PyTorch format is converted to TensorRT to run inference wi ## NVIDIA Jetson Orin YOLOv8 Benchmarks -YOLOv8 benchmarks were run by the Ultralytics team on 10 different model formats measuring speed and accuracy: PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, TF SavedModel, TF Graphdef, TF Lite, PaddlePaddle, NCNN. Benchmarks were run on Seeed Studio reComputer J4012 powered by Jetson Orin NX 16GB device at FP32 precision with default input image size of 640. +YOLOv8 benchmarks were run by the Ultralytics team on 10 different model formats measuring speed and accuracy: PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, TF SavedModel, TF GraphDef, TF Lite, PaddlePaddle, NCNN. Benchmarks were run on Seeed Studio reComputer J4012 powered by Jetson Orin NX 16GB device at FP32 precision with default input image size of 640. ### Comparison Chart -Eventhough all model exports are working with NVIDIA Jetson, we have only included **PyTorch, TorchScript, TensorRT** for the comparison chart below because, they make use of the GPU on the Jetson and are guaranteed to produce the best results. All the other exports only utilize the CPU and the performance is not as good as the above three. You can find benchmarks for all exports in the section after this chart. +Even though all model exports are working with NVIDIA Jetson, we have only included **PyTorch, TorchScript, TensorRT** for the comparison chart below because, they make use of the GPU on the Jetson and are guaranteed to produce the best results. All the other exports only utilize the CPU and the performance is not as good as the above three. You can find benchmarks for all exports in the section after this chart.
NVIDIA Jetson Ecosystem @@ -197,7 +197,7 @@ Eventhough all model exports are working with NVIDIA Jetson, we have only includ ### Detailed Comparison Table -The below table represents the benchmark results for five different models (YOLOv8n, YOLOv8s, YOLOv8m, YOLOv8l, YOLOv8x) across ten different formats (PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, TF SavedModel, TF Graphdef, TF Lite, PaddlePaddle, NCNN), giving us the status, size, mAP50-95(B) metric, and inference time for each combination. +The below table represents the benchmark results for five different models (YOLOv8n, YOLOv8s, YOLOv8m, YOLOv8l, YOLOv8x) across ten different formats (PyTorch, TorchScript, ONNX, OpenVINO, TensorRT, TF SavedModel, TF GraphDef, TF Lite, PaddlePaddle, NCNN), giving us the status, size, mAP50-95(B) metric, and inference time for each combination. !!! Performance diff --git a/docs/en/guides/object-counting.md b/docs/en/guides/object-counting.md index e625544b..934e91c6 100644 --- a/docs/en/guides/object-counting.md +++ b/docs/en/guides/object-counting.md @@ -28,7 +28,7 @@ Object counting with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly allowfullscreen>
- Watch: Classwise Object Counting using Ultralytics YOLOv8 + Watch: Class-wise Object Counting using Ultralytics YOLOv8 diff --git a/docs/en/guides/object-cropping.md b/docs/en/guides/object-cropping.md index 3f2823b7..c03b478e 100644 --- a/docs/en/guides/object-cropping.md +++ b/docs/en/guides/object-cropping.md @@ -98,4 +98,4 @@ Object cropping with [Ultralytics YOLOv8](https://github.com/ultralytics/ultraly | `agnostic_nms` | `bool` | `False` | Enables class-agnostic Non-Maximum Suppression (NMS), which merges overlapping boxes of different classes. Useful in multi-class detection scenarios where class overlap is common. | | `classes` | `list[int]` | `None` | Filters predictions to a set of class IDs. Only detections belonging to the specified classes will be returned. Useful for focusing on relevant objects in multi-class detection tasks. | | `retina_masks` | `bool` | `False` | Uses high-resolution segmentation masks if available in the model. This can enhance mask quality for segmentation tasks, providing finer detail. | -| `embed` | `list[int]` | `None` | Specifies the layers from which to extract feature vectors or embeddings. Useful for downstream tasks like clustering or similarity search. +| `embed` | `list[int]` | `None` | Specifies the layers from which to extract feature vectors or embeddings. Useful for downstream tasks like clustering or similarity search. | diff --git a/docs/en/guides/optimizing-openvino-latency-vs-throughput-modes.md b/docs/en/guides/optimizing-openvino-latency-vs-throughput-modes.md index 8231be2f..395c96c9 100644 --- a/docs/en/guides/optimizing-openvino-latency-vs-throughput-modes.md +++ b/docs/en/guides/optimizing-openvino-latency-vs-throughput-modes.md @@ -37,7 +37,6 @@ Throughput optimization is crucial for scenarios serving numerous inference requ 1. **OpenVINO Performance Hints:** A high-level, future-proof method to enhance throughput across devices using performance hints. ```python - import openvino.properties as props import openvino.properties.hint as hints config = {hints.performance_mode: hints.PerformanceMode.THROUGHPUT} diff --git a/docs/en/guides/parking-management.md b/docs/en/guides/parking-management.md index 803df0f0..92741e4d 100644 --- a/docs/en/guides/parking-management.md +++ b/docs/en/guides/parking-management.md @@ -21,7 +21,7 @@ Parking management with [Ultralytics YOLOv8](https://github.com/ultralytics/ultr | Parking Management System | Parking Management System | |:-------------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------:| | ![Parking lots Analytics Using Ultralytics YOLOv8](https://github.com/RizwanMunawar/RizwanMunawar/assets/62513924/e3d4bc3e-cf4a-4da9-b42e-0da55cc74ad6) | ![Parking management top view using Ultralytics YOLOv8](https://github.com/RizwanMunawar/RizwanMunawar/assets/62513924/fe186719-1aca-43c9-b388-1ded91280eb5) | -| Parking management Aeriel View using Ultralytics YOLOv8 | Parking management Top View using Ultralytics YOLOv8 | +| Parking management Aerial View using Ultralytics YOLOv8 | Parking management Top View using Ultralytics YOLOv8 | ## Parking Management System Code Workflow diff --git a/docs/en/guides/preprocessing_annotated_data.md b/docs/en/guides/preprocessing_annotated_data.md index 7c25e72a..6173ec14 100644 --- a/docs/en/guides/preprocessing_annotated_data.md +++ b/docs/en/guides/preprocessing_annotated_data.md @@ -8,9 +8,9 @@ keywords: What is Data Preprocessing, Data Preprocessing Techniques, What is Dat ## Introduction -After you’ve defined your computer vision [project’s goals](./defining-project-goals.md) and [collected and annotated data](./data-collection-and-annotation.md), the next step is to preprocess annotated data and prepare it for model training. Clean and consistent data are vital to creating a model that performs well. +After you've defined your computer vision [project's goals](./defining-project-goals.md) and [collected and annotated data](./data-collection-and-annotation.md), the next step is to preprocess annotated data and prepare it for model training. Clean and consistent data are vital to creating a model that performs well. -Preprocessing is a step in the [computer vision project workflow](./steps-of-a-cv-project.md) that includes resizing images, normalizing pixel values, augmenting the dataset, and splitting the data into training, validation, and test sets. Let’s explore the essential techniques and best practices for cleaning your data! +Preprocessing is a step in the [computer vision project workflow](./steps-of-a-cv-project.md) that includes resizing images, normalizing pixel values, augmenting the dataset, and splitting the data into training, validation, and test sets. Let's explore the essential techniques and best practices for cleaning your data! ## Importance of Data Preprocessing @@ -36,7 +36,7 @@ To make resizing a simpler task, you can use the following tools: - **OpenCV**: A popular computer vision library with extensive functions for image processing. - **PIL (Pillow)**: A Python Imaging Library for opening, manipulating, and saving image files. -With respect to YOLOv8, the ‘imgsz’ parameter during [model training](../modes/train.md) allows for flexible input sizes. When set to a specific size, such as 640, the model will resize input images so their largest dimension is 640 pixels while maintaining the original aspect ratio. +With respect to YOLOv8, the 'imgsz' parameter during [model training](../modes/train.md) allows for flexible input sizes. When set to a specific size, such as 640, the model will resize input images so their largest dimension is 640 pixels while maintaining the original aspect ratio. By evaluating your model's and dataset's specific needs, you can determine whether resizing is a necessary preprocessing step or if your model can efficiently handle images of varying sizes. @@ -51,12 +51,12 @@ With respect to YOLOv8, normalization is seamlessly handled as part of its prepr ### Splitting the Dataset -Once you’ve cleaned the data, you are ready to split the dataset. Splitting the data into training, validation, and test sets is done to ensure that the model can be evaluated on unseen data to assess its generalization performance. A common split is 70% for training, 20% for validation, and 10% for testing. There are various tools and libraries that you can use to split your data like scikit-learn or TensorFlow. +Once you've cleaned the data, you are ready to split the dataset. Splitting the data into training, validation, and test sets is done to ensure that the model can be evaluated on unseen data to assess its generalization performance. A common split is 70% for training, 20% for validation, and 10% for testing. There are various tools and libraries that you can use to split your data like scikit-learn or TensorFlow. Consider the following when splitting your dataset: - **Maintaining Data Distribution**: Ensure that the data distribution of classes is maintained across training, validation, and test sets. - **Avoiding Data Leakage**: Typically, data augmentation is done after the dataset is split. Data augmentation and any other preprocessing should only be applied to the training set to prevent information from the validation or test sets from influencing the model training. --**Balancing Classes**: For imbalanced datasets, consider techniques such as oversampling the minority class or undersampling the majority class within the training set. +-**Balancing Classes**: For imbalanced datasets, consider techniques such as oversampling the minority class or under-sampling the majority class within the training set. ### What is Data Augmentation? @@ -73,7 +73,7 @@ Here are some other benefits of data augmentation: Common augmentation techniques include flipping, rotation, scaling, and color adjustments. Several libraries, such as Albumentations, Imgaug, and TensorFlow's ImageDataGenerator, can generate these augmentations.

- Overview of Data Augmentationsr + Overview of Data Augmentations

With respect to YOLOv8, you can [augment your custom dataset](../modes/train.md) by modifying the dataset configuration file, a .yaml file. In this file, you can add an augmentation section with parameters that specify how you want to augment your data. @@ -89,11 +89,11 @@ Also, you can adjust the intensity of these augmentation techniques through spec ## A Case Study of Preprocessing -Consider a project aimed at developing a model to detect and classify different types of vehicles in traffic images using YOLOv8. We’ve collected traffic images and annotated them with bounding boxes and labels. +Consider a project aimed at developing a model to detect and classify different types of vehicles in traffic images using YOLOv8. We've collected traffic images and annotated them with bounding boxes and labels. -Here’s what each step of preprocessing would look like for this project: +Here's what each step of preprocessing would look like for this project: -- Resizing Images: Since YOLOv8 handles flexible input sizes and performs resizing automatically, manual resizing is not required. The model will adjust the image size according to the specified ‘imgsz’ parameter during training. +- Resizing Images: Since YOLOv8 handles flexible input sizes and performs resizing automatically, manual resizing is not required. The model will adjust the image size according to the specified 'imgsz' parameter during training. - Normalizing Pixel Values: YOLOv8 automatically normalizes pixel values to a range of 0 to 1 during preprocessing, so it's not required. - Splitting the Dataset: Divide the dataset into training (70%), validation (20%), and test (10%) sets using tools like scikit-learn. - Data Augmentation: Modify the dataset configuration file (.yaml) to include data augmentation techniques such as random crops, horizontal flips, and brightness adjustments. diff --git a/docs/en/guides/raspberry-pi.md b/docs/en/guides/raspberry-pi.md index 7fb41963..159a6596 100644 --- a/docs/en/guides/raspberry-pi.md +++ b/docs/en/guides/raspberry-pi.md @@ -70,7 +70,7 @@ After this is done, skip to [Use NCNN on Raspberry Pi section](#use-ncnn-on-rasp #### Install Ultralytics Package -Here we will install Ultralyics package on the Raspberry Pi with optional dependencies so that we can export the PyTorch models to other different formats. +Here we will install Ultralytics package on the Raspberry Pi with optional dependencies so that we can export the PyTorch models to other different formats. 1. Update packages list, install pip and upgrade to latest @@ -94,7 +94,7 @@ Here we will install Ultralyics package on the Raspberry Pi with optional depend ## Use NCNN on Raspberry Pi -Out of all the model export formats supported by Ultralytics, [NCNN](https://docs.ultralytics.com/integrations/ncnn) delivers the best inference performance when working with Raspberry Pi devices because NCNN is highly optimized for mobile/ embedded platforms (such as ARM architecture). Therefore our recommendation is to use NCNN with Raspberry Pi. +Out of all the model export formats supported by Ultralytics, [NCNN](https://docs.ultralytics.com/integrations/ncnn) delivers the best inference performance when working with Raspberry Pi devices because NCNN is highly optimized for mobile/ embedded platforms (such as ARM architecture). Therefor our recommendation is to use NCNN with Raspberry Pi. ## Convert Model to NCNN and Run Inference @@ -135,7 +135,7 @@ The YOLOv8n model in PyTorch format is converted to NCNN to run inference with t ## Raspberry Pi 5 vs Raspberry Pi 4 YOLOv8 Benchmarks -YOLOv8 benchmarks were run by the Ultralytics team on nine different model formats measuring speed and accuracy: PyTorch, TorchScript, ONNX, OpenVINO, TF SavedModel, TF Graphdef, TF Lite, PaddlePaddle, NCNN. Benchmarks were run on both Raspberry Pi 5 and Raspberry Pi 4 at FP32 precision with default input image size of 640. +YOLOv8 benchmarks were run by the Ultralytics team on nine different model formats measuring speed and accuracy: PyTorch, TorchScript, ONNX, OpenVINO, TF SavedModel, TF GraphDef, TF Lite, PaddlePaddle, NCNN. Benchmarks were run on both Raspberry Pi 5 and Raspberry Pi 4 at FP32 precision with default input image size of 640. !!! Note @@ -159,7 +159,7 @@ YOLOv8 benchmarks were run by the Ultralytics team on nine different model forma ### Detailed Comparison Table -The below table represents the benchmark results for two different models (YOLOv8n, YOLOv8s) across nine different formats (PyTorch, TorchScript, ONNX, OpenVINO, TF SavedModel, TF Graphdef, TF Lite, PaddlePaddle, NCNN), running on both Raspberry Pi 4 and Raspberry Pi 5, giving us the status, size, mAP50-95(B) metric, and inference time for each combination. +The below table represents the benchmark results for two different models (YOLOv8n, YOLOv8s) across nine different formats (PyTorch, TorchScript, ONNX, OpenVINO, TF SavedModel, TF GraphDef, TF Lite, PaddlePaddle, NCNN), running on both Raspberry Pi 4 and Raspberry Pi 5, giving us the status, size, mAP50-95(B) metric, and inference time for each combination. !!! tip "Performance" diff --git a/docs/en/guides/steps-of-a-cv-project.md b/docs/en/guides/steps-of-a-cv-project.md index fb9c103d..11d52a5c 100644 --- a/docs/en/guides/steps-of-a-cv-project.md +++ b/docs/en/guides/steps-of-a-cv-project.md @@ -16,17 +16,17 @@ Computer vision techniques like [object detection](../tasks/detect.md), [image c Overview of computer vision techniques

-Working on your own computer vision projects is a great way to understand and learn more about computer vision. However, a computer vision project can consist of many steps, and it might seem confusing at first. By the end of this guide, you’ll be familiar with the steps involved in a computer vision project. We’ll walk through everything from the beginning to the end of a project, explaining why each part is important. Let’s get started and make your computer vision project a success! +Working on your own computer vision projects is a great way to understand and learn more about computer vision. However, a computer vision project can consist of many steps, and it might seem confusing at first. By the end of this guide, you'll be familiar with the steps involved in a computer vision project. We'll walk through everything from the beginning to the end of a project, explaining why each part is important. Let's get started and make your computer vision project a success! ## An Overview of a Computer Vision Project -Before discussing the details of each step involved in a computer vision project, let's look at the overall process. If you started a computer vision project today, you’d take the following steps: +Before discussing the details of each step involved in a computer vision project, let's look at the overall process. If you started a computer vision project today, you'd take the following steps: -- Your first priority would be to understand your project’s requirements. -- Then, you’d collect and accurately label the images that will help train your model. -- Next, you’d clean your data and apply augmentation techniques to prepare it for model training. -- After model training, you’d thoroughly test and evaluate your model to make sure it performs consistently under different conditions. -- Finally, you’d deploy your model into the real world and update it based on new insights and feedback. +- Your first priority would be to understand your project's requirements. +- Then, you'd collect and accurately label the images that will help train your model. +- Next, you'd clean your data and apply augmentation techniques to prepare it for model training. +- After model training, you'd thoroughly test and evaluate your model to make sure it performs consistently under different conditions. +- Finally, you'd deploy your model into the real world and update it based on new insights and feedback.

Computer Vision Project Steps Overview @@ -34,9 +34,9 @@ Before discussing the details of each step involved in a computer vision project Now that we know what to expect, let's dive right into the steps and get your project moving forward. -## Step 1: Defining Your Project’s Goals +## Step 1: Defining Your Project's Goals -The first step in any computer vision project is clearly defining the problem you’re trying to solve. Knowing the end goal helps you start to build a solution. This is especially true when it comes to computer vision because your project’s objective will directly affect which computer vision task you need to focus on. +The first step in any computer vision project is clearly defining the problem you're trying to solve. Knowing the end goal helps you start to build a solution. This is especially true when it comes to computer vision because your project's objective will directly affect which computer vision task you need to focus on. Here are some examples of project objectives and the computer vision tasks that can be used to reach these objectives: @@ -55,17 +55,17 @@ After understanding the project objective and suitable computer vision tasks, an Depending on the objective, you might choose to select the model first or after seeing what data you are able to collect in Step 2. For example, suppose your project is highly dependent on the availability of specific types of data. In that case, it may be more practical to gather and analyze the data first before selecting a model. On the other hand, if you have a clear understanding of the model requirements, you can choose the model first and then collect data that fits those specifications. -Choosing between training from scratch or using transfer learning affects how you prepare your data. Training from scratch requires a diverse dataset to build the model’s understanding from the ground up. Transfer learning, on the other hand, allows you to use a pre-trained model and adapt it with a smaller, more specific dataset. Also, choosing a specific model to train will determine how you need to prepare your data, such as resizing images or adding annotations, according to the model’s specific requirements. +Choosing between training from scratch or using transfer learning affects how you prepare your data. Training from scratch requires a diverse dataset to build the model's understanding from the ground up. Transfer learning, on the other hand, allows you to use a pre-trained model and adapt it with a smaller, more specific dataset. Also, choosing a specific model to train will determine how you need to prepare your data, such as resizing images or adding annotations, according to the model's specific requirements.

Training From Scratch Vs. Using Transfer Learning

-Note: When choosing a model, consider its [deployment](./model-deployment-options.md) to ensure compatibility and performance. For example, lightweight models are ideal for edge computing due to their efficiency on resource-constrained devices. To learn more about the key points related to defining your project, read [our guide](./defining-project-goals.md) on defining your project’s goals and selecting the right model. +Note: When choosing a model, consider its [deployment](./model-deployment-options.md) to ensure compatibility and performance. For example, lightweight models are ideal for edge computing due to their efficiency on resource-constrained devices. To learn more about the key points related to defining your project, read [our guide](./defining-project-goals.md) on defining your project's goals and selecting the right model. Before getting into the hands-on work of a computer vision project, it's important to have a clear understanding of these details. Double-check that you've considered the following before moving on to Step 2: -- Clearly define the problem you’re trying to solve. +- Clearly define the problem you're trying to solve. - Determine the end goal of your project. - Identify the specific computer vision task needed (e.g., object detection, image classification, image segmentation). - Decide whether to train a model from scratch or use transfer learning. @@ -77,11 +77,11 @@ The quality of your computer vision models depend on the quality of your dataset Some libraries, like Ultralytics, provide [built-in support for various datasets](../datasets/index.md), making it easier to get started with high-quality data. These libraries often include utilities for using popular datasets seamlessly, which can save you a lot of time and effort in the initial stages of your project. -However, if you choose to collect images or take your own pictures, you’ll need to annotate your data. Data annotation is the process of labeling your data to impart knowledge to your model. The type of data annotation you’ll work with depends on your specific computer vision technique. Here are some examples: +However, if you choose to collect images or take your own pictures, you'll need to annotate your data. Data annotation is the process of labeling your data to impart knowledge to your model. The type of data annotation you'll work with depends on your specific computer vision technique. Here are some examples: -- **Image Classification:** You’ll label the entire image as a single class. -- **Object Detection:** You’ll draw bounding boxes around each object in the image and label each box. -- **Image Segmentation:** You’ll label each pixel in the image according to the object it belongs to, creating detailed object boundaries. +- **Image Classification:** You'll label the entire image as a single class. +- **Object Detection:** You'll draw bounding boxes around each object in the image and label each box. +- **Image Segmentation:** You'll label each pixel in the image according to the object it belongs to, creating detailed object boundaries.

Different Types of Image Annotation @@ -119,7 +119,7 @@ By properly [understanding, splitting, and augmenting your data](./preprocessing Once your dataset is ready for training, you can focus on setting up the necessary environment, managing your datasets, and training your model. -First, you’ll need to make sure your environment is configured correctly. Typically, this includes the following: +First, you'll need to make sure your environment is configured correctly. Typically, this includes the following: - Installing essential libraries and frameworks like TensorFlow, PyTorch, or [Ultralytics](../quickstart.md). - If you are using a GPU, installing libraries like CUDA and cuDNN will help enable GPU acceleration and speed up the training process. @@ -132,9 +132,9 @@ It's important to keep in mind that proper dataset management is vital for effic ## Step 5: Model Evaluation and Model Finetuning -It’s important to assess your model's performance using various metrics and refine it to improve accuracy. [Evaluating](../modes/val.md) helps identify areas where the model excels and where it may need improvement. Fine-tuning ensures the model is optimized for the best possible performance. +It's important to assess your model's performance using various metrics and refine it to improve accuracy. [Evaluating](../modes/val.md) helps identify areas where the model excels and where it may need improvement. Fine-tuning ensures the model is optimized for the best possible performance. -- **[Performance Metrics](./yolo-performance-metrics.md):** Use metrics like accuracy, precision, recall, and F1-score to evaluate your model’s performance. These metrics provide insights into how well your model is making predictions. +- **[Performance Metrics](./yolo-performance-metrics.md):** Use metrics like accuracy, precision, recall, and F1-score to evaluate your model's performance. These metrics provide insights into how well your model is making predictions. - **[Hyperparameter Tuning](./hyperparameter-tuning.md):** Adjust hyperparameters to optimize model performance. Techniques like grid search or random search can help find the best hyperparameter values. - Fine-Tuning: Make small adjustments to the model architecture or training process to enhance performance. This might involve tweaking learning rates, batch sizes, or other model parameters. @@ -159,7 +159,7 @@ Once your model has been thoroughly tested, it's time to deploy it. Deployment i ## Step 8: Monitoring, Maintenance, and Documentation -Once your model is deployed, it’s important to continuously monitor its performance, maintain it to handle any issues, and document the entire process for future reference and improvements. +Once your model is deployed, it's important to continuously monitor its performance, maintain it to handle any issues, and document the entire process for future reference and improvements. Monitoring tools can help you track key performance indicators (KPIs) and detect anomalies or drops in accuracy. By monitoring the model, you can be aware of model drift, where the model's performance declines over time due to changes in the input data. Periodically retrain the model with updated data to maintain accuracy and relevance. @@ -174,12 +174,12 @@ In addition to monitoring and maintenance, documentation is also key. Thoroughly Here are some common questions that might arise during a computer vision project: - **Q1:** How do the steps change if I already have a dataset or data when starting a computer vision project? - - **A1:** Starting with a pre-existing dataset or data affects the initial steps of your project. In Step 1, along with deciding the computer vision task and model, you’ll also need to explore your dataset thoroughly. Understanding its quality, variety, and limitations will guide your choice of model and training approach. Your approach should align closely with the data's characteristics for more effective outcomes. Depending on your data or dataset, you may be able to skip Step 2 as well. + - **A1:** Starting with a pre-existing dataset or data affects the initial steps of your project. In Step 1, along with deciding the computer vision task and model, you'll also need to explore your dataset thoroughly. Understanding its quality, variety, and limitations will guide your choice of model and training approach. Your approach should align closely with the data's characteristics for more effective outcomes. Depending on your data or dataset, you may be able to skip Step 2 as well. -- **Q2:** I’m not sure what computer vision project to start my AI learning journey with. +- **Q2:** I'm not sure what computer vision project to start my AI learning journey with. - **A2:** Check out our [guides on Real-World Projects](./index.md) for inspiration and guidance. -- **Q3:** I don’t want to train a model. I just want to try running a model on an image. How can I do that? +- **Q3:** I don't want to train a model. I just want to try running a model on an image. How can I do that? - **A3:** You can use a pre-trained model to run predictions on an image without training a new model. Check out the [YOLOv8 predict docs page](../modes/predict.md) for instructions on how to use a pre-trained YOLOv8 model to make predictions on your images. - **Q4:** Where can I find more detailed articles and updates about computer vision applications and YOLOv8? diff --git a/docs/en/guides/yolo-common-issues.md b/docs/en/guides/yolo-common-issues.md index bca0e3cd..427b6d37 100644 --- a/docs/en/guides/yolo-common-issues.md +++ b/docs/en/guides/yolo-common-issues.md @@ -183,7 +183,7 @@ This section will address common issues faced during model prediction. **Solution**: -- Coordinate Format: YOLOv8 provides bounding box coordinates in absolute pixel values. To convert these to relative coordinates (ranging from 0 to 1), you need to divide by the image dimensions. For example, let’s say your image size is 640x640. Then you would do the following: +- Coordinate Format: YOLOv8 provides bounding box coordinates in absolute pixel values. To convert these to relative coordinates (ranging from 0 to 1), you need to divide by the image dimensions. For example, let's say your image size is 640x640. Then you would do the following: ```python # Convert absolute coordinates to relative coordinates @@ -268,7 +268,7 @@ Engaging with a community of like-minded individuals can significantly enhance y ### Forums and Channels for Getting Help -**GitHub Issues:** The YOLOv8 repository on GitHub has an [Issues tab](https://github.com/ultralytics/ultralytics/issues) where you can ask questions, report bugs, and suggest new features. The community and maintainers are active here, and it’s a great place to get help with specific problems. +**GitHub Issues:** The YOLOv8 repository on GitHub has an [Issues tab](https://github.com/ultralytics/ultralytics/issues) where you can ask questions, report bugs, and suggest new features. The community and maintainers are active here, and it's a great place to get help with specific problems. **Ultralytics Discord Server:** Ultralytics has a [Discord server](https://ultralytics.com/discord/) where you can interact with other users and the developers. diff --git a/docs/en/guides/yolo-performance-metrics.md b/docs/en/guides/yolo-performance-metrics.md index a34f161e..5024fa4c 100644 --- a/docs/en/guides/yolo-performance-metrics.md +++ b/docs/en/guides/yolo-performance-metrics.md @@ -23,7 +23,7 @@ Performance metrics are key tools to evaluate the accuracy and efficiency of obj ## Object Detection Metrics -Let’s start by discussing some metrics that are not only important to YOLOv8 but are broadly applicable across different object detection models. +Let's start by discussing some metrics that are not only important to YOLOv8 but are broadly applicable across different object detection models. - **Intersection over Union (IoU):** IoU is a measure that quantifies the overlap between a predicted bounding box and a ground truth bounding box. It plays a fundamental role in evaluating the accuracy of object localization. @@ -115,7 +115,7 @@ For real-time applications, speed metrics like FPS (Frames Per Second) and laten ## Interpretation of Results -It’s important to understand the metrics. Here's what some of the commonly observed lower scores might suggest: +It's important to understand the metrics. Here's what some of the commonly observed lower scores might suggest: - **Low mAP:** Indicates the model may need general refinements. @@ -157,7 +157,7 @@ Tapping into a community of enthusiasts and experts can amplify your journey wit ### Engage with the Broader Community -- **GitHub Issues:** The YOLOv8 repository on GitHub has an [Issues tab](https://github.com/ultralytics/ultralytics/issues) where you can ask questions, report bugs, and suggest new features. The community and maintainers are active here, and it’s a great place to get help with specific problems. +- **GitHub Issues:** The YOLOv8 repository on GitHub has an [Issues tab](https://github.com/ultralytics/ultralytics/issues) where you can ask questions, report bugs, and suggest new features. The community and maintainers are active here, and it's a great place to get help with specific problems. - **Ultralytics Discord Server:** Ultralytics has a [Discord server](https://ultralytics.com/discord/) where you can interact with other users and the developers. diff --git a/docs/en/help/code_of_conduct.md b/docs/en/help/code_of_conduct.md index 46313c4e..ca895fe8 100644 --- a/docs/en/help/code_of_conduct.md +++ b/docs/en/help/code_of_conduct.md @@ -1,6 +1,6 @@ --- comments: true -description: Explore Ultralytics community’s Code of Conduct, ensuring a supportive, inclusive environment for contributors & members at all levels. Find our guidelines on acceptable behavior & enforcement. +description: Explore Ultralytics community's Code of Conduct, ensuring a supportive, inclusive environment for contributors & members at all levels. Find our guidelines on acceptable behavior & enforcement. keywords: Ultralytics, code of conduct, community, contribution, behavior guidelines, enforcement, open source contributions --- diff --git a/docs/en/help/environmental-health-safety.md b/docs/en/help/environmental-health-safety.md index 9fee240b..2cf5d8bb 100644 --- a/docs/en/help/environmental-health-safety.md +++ b/docs/en/help/environmental-health-safety.md @@ -1,6 +1,6 @@ --- comments: false -description: Discover Ultralytics’ EHS policy principles and implementation measures. Committed to safety, environment, and continuous improvement for a sustainable future. +description: Discover Ultralytics' EHS policy principles and implementation measures. Committed to safety, environment, and continuous improvement for a sustainable future. keywords: Ultralytics policy, EHS, environment, health and safety, compliance, prevention, continuous improvement, risk management, emergency preparedness, resource allocation, communication --- diff --git a/docs/en/hub/pro.md b/docs/en/hub/pro.md index c301e7bd..30887aa5 100644 --- a/docs/en/hub/pro.md +++ b/docs/en/hub/pro.md @@ -47,7 +47,7 @@ That's it! The account balance is used to pay for [Ultralytics Cloud Training](./cloud-training.md) resources. -In order to top-up your account balance, simply click on the **Top-Up** button. +In order to top up your account balance, simply click on the **Top-Up** button. ![Ultralytics HUB screenshot of the Settings page Billing & License tab with an arrow pointing to the Top-Up button](https://raw.githubusercontent.com/ultralytics/assets/main/docs/hub/pro/hub_pro_account_balance_1.jpg) @@ -57,4 +57,4 @@ Next, set the amount you want to top-up. That's it! -![Ultralytics HUB screenshot of the Payment Successful dialog](https://raw.githubusercontent.com/ultralytics/assets/main/docs/hub/pro/hub_pro_account_balance_3.jpg) \ No newline at end of file +![Ultralytics HUB screenshot of the Payment Successful dialog](https://raw.githubusercontent.com/ultralytics/assets/main/docs/hub/pro/hub_pro_account_balance_3.jpg) diff --git a/docs/en/integrations/amazon-sagemaker.md b/docs/en/integrations/amazon-sagemaker.md index c57d95c4..ef071d75 100644 --- a/docs/en/integrations/amazon-sagemaker.md +++ b/docs/en/integrations/amazon-sagemaker.md @@ -6,7 +6,7 @@ keywords: YOLOv8, Amazon SageMaker, deploy YOLOv8, AWS deployment, machine learn # A Guide to Deploying YOLOv8 on Amazon SageMaker Endpoints -Deploying advanced computer vision models like [Ultralytics’ YOLOv8](https://github.com/ultralytics/ultralytics) on Amazon SageMaker Endpoints opens up a wide range of possibilities for various machine learning applications. The key to effectively using these models lies in understanding their setup, configuration, and deployment processes. YOLOv8 becomes even more powerful when integrated seamlessly with Amazon SageMaker, a robust and scalable machine learning service by AWS. +Deploying advanced computer vision models like [Ultralytics' YOLOv8](https://github.com/ultralytics/ultralytics) on Amazon SageMaker Endpoints opens up a wide range of possibilities for various machine learning applications. The key to effectively using these models lies in understanding their setup, configuration, and deployment processes. YOLOv8 becomes even more powerful when integrated seamlessly with Amazon SageMaker, a robust and scalable machine learning service by AWS. This guide will take you through the process of deploying YOLOv8 PyTorch models on Amazon SageMaker Endpoints step by step. You'll learn the essentials of preparing your AWS environment, configuring the model appropriately, and using tools like AWS CloudFormation and the AWS Cloud Development Kit (CDK) for deployment. @@ -32,7 +32,7 @@ First, ensure you have the following prerequisites in place: - An AWS Account: If you don't already have one, sign up for an AWS account. -- Configured IAM Roles: You’ll need an IAM role with the necessary permissions for Amazon SageMaker, AWS CloudFormation, and Amazon S3. This role should have policies that allow it to access these services. +- Configured IAM Roles: You'll need an IAM role with the necessary permissions for Amazon SageMaker, AWS CloudFormation, and Amazon S3. This role should have policies that allow it to access these services. - AWS CLI: If not already installed, download and install the AWS Command Line Interface (CLI) and configure it with your account details. Follow [the AWS CLI instructions](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) for installation. @@ -144,7 +144,7 @@ Now that your YOLOv8 model is deployed, it's important to test its performance a - Open the Test Notebook: In the same Jupyter environment, locate and open the 2_TestEndpoint.ipynb notebook, also in the sm-notebook directory. -- Run the Test Notebook: Follow the instructions within the notebook to test the deployed SageMaker endpoint. This includes sending an image to the endpoint and running inferences. Then, you’ll plot the output to visualize the model’s performance and accuracy, as shown below. +- Run the Test Notebook: Follow the instructions within the notebook to test the deployed SageMaker endpoint. This includes sending an image to the endpoint and running inferences. Then, you'll plot the output to visualize the model's performance and accuracy, as shown below.

Testing Results YOLOv8 diff --git a/docs/en/integrations/clearml.md b/docs/en/integrations/clearml.md index b59a7828..6b480729 100644 --- a/docs/en/integrations/clearml.md +++ b/docs/en/integrations/clearml.md @@ -41,7 +41,7 @@ For detailed instructions and best practices related to the installation process Once you have installed the necessary packages, the next step is to initialize and configure your ClearML SDK. This involves setting up your ClearML account and obtaining the necessary credentials for a seamless connection between your development environment and the ClearML server. -Begin by initializing the ClearML SDK in your environment. The ‘clearml-init’ command starts the setup process and prompts you for the necessary credentials. +Begin by initializing the ClearML SDK in your environment. The 'clearml-init' command starts the setup process and prompts you for the necessary credentials. !!! Tip "Initial SDK Setup" @@ -86,7 +86,7 @@ Before diving into the usage instructions, be sure to check out the range of [YO ### Understanding the Code -Let’s understand the steps showcased in the usage code snippet above. +Let's understand the steps showcased in the usage code snippet above. **Step 1: Creating a ClearML Task**: A new task is initialized in ClearML, specifying your project and task names. This task will track and manage your model's training. diff --git a/docs/en/integrations/comet.md b/docs/en/integrations/comet.md index e395d202..5ce8a875 100644 --- a/docs/en/integrations/comet.md +++ b/docs/en/integrations/comet.md @@ -37,7 +37,7 @@ To install the required packages, run: ## Configuring Comet ML -After installing the required packages, you’ll need to sign up, get a [Comet API Key](https://www.comet.com/signup), and configure it. +After installing the required packages, you'll need to sign up, get a [Comet API Key](https://www.comet.com/signup), and configure it. !!! Tip "Configuring Comet ML" @@ -89,7 +89,7 @@ Comet automatically logs the following data with no additional configuration: me ## Understanding Your Model's Performance with Comet ML Visualizations -Let's dive into what you'll see on the Comet ML dashboard once your YOLOv8 model begins training. The dashboard is where all the action happens, presenting a range of automatically logged information through visuals and statistics. Here’s a quick tour: +Let's dive into what you'll see on the Comet ML dashboard once your YOLOv8 model begins training. The dashboard is where all the action happens, presenting a range of automatically logged information through visuals and statistics. Here's a quick tour: **Experiment Panels** diff --git a/docs/en/integrations/coreml.md b/docs/en/integrations/coreml.md index 8c76cd61..71cf08d8 100644 --- a/docs/en/integrations/coreml.md +++ b/docs/en/integrations/coreml.md @@ -40,7 +40,7 @@ Apple's CoreML framework offers robust features for on-device machine learning. ## CoreML Deployment Options -Before we look at the code for exporting YOLOv8 models to the CoreML format, let’s understand where CoreML models are usually used. +Before we look at the code for exporting YOLOv8 models to the CoreML format, let's understand where CoreML models are usually used. CoreML offers various deployment options for machine learning models, including: @@ -50,7 +50,7 @@ CoreML offers various deployment options for machine learning models, including: - **Downloaded Models**: These models are fetched from a server as needed. This approach is suitable for larger models or those needing regular updates. It helps keep the app bundle size smaller. -- **Cloud-Based Deployment**: CoreML models are hosted on servers and accessed by the iOS app through API requests. This scalable and flexible option enables easy model updates without app revisions. It’s ideal for complex models or large-scale apps requiring regular updates. However, it does require an internet connection and may pose latency and security issues​. +- **Cloud-Based Deployment**: CoreML models are hosted on servers and accessed by the iOS app through API requests. This scalable and flexible option enables easy model updates without app revisions. It's ideal for complex models or large-scale apps requiring regular updates. However, it does require an internet connection and may pose latency and security issues​. ## Exporting YOLOv8 Models to CoreML @@ -123,4 +123,4 @@ In this guide, we went over how to export Ultralytics YOLOv8 models to CoreML fo For further details on usage, visit the [CoreML official documentation](https://developer.apple.com/documentation/coreml). -Also, if you’d like to know more about other Ultralytics YOLOv8 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of valuable resources and insights there. +Also, if you'd like to know more about other Ultralytics YOLOv8 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of valuable resources and insights there. diff --git a/docs/en/integrations/dvc.md b/docs/en/integrations/dvc.md index b723a1a2..087079a0 100644 --- a/docs/en/integrations/dvc.md +++ b/docs/en/integrations/dvc.md @@ -166,6 +166,6 @@ Based on your analysis, iterate on your experiments. Adjust model configurations This guide has led you through the process of integrating DVCLive with Ultralytics' YOLOv8. You have learned how to harness the power of DVCLive for detailed experiment monitoring, effective visualization, and insightful analysis in your machine learning endeavors. -For further details on usage, visit [DVCLive’s official documentation](https://dvc.org/doc/dvclive/ml-frameworks/yolo). +For further details on usage, visit [DVCLive's official documentation](https://dvc.org/doc/dvclive/ml-frameworks/yolo). Additionally, explore more integrations and capabilities of Ultralytics by visiting the [Ultralytics integration guide page](../integrations/index.md), which is a collection of great resources and insights. diff --git a/docs/en/integrations/edge-tpu.md b/docs/en/integrations/edge-tpu.md index 98a22309..715e0475 100644 --- a/docs/en/integrations/edge-tpu.md +++ b/docs/en/integrations/edge-tpu.md @@ -32,7 +32,7 @@ Here are the key features that make TFLite Edge TPU a great model format choice ## Deployment Options with TFLite Edge TPU -Before we jump into how to export YOLOv8 models to the TFLite Edge TPU format, let’s understand where TFLite Edge TPU models are usually used. +Before we jump into how to export YOLOv8 models to the TFLite Edge TPU format, let's understand where TFLite Edge TPU models are usually used. TFLite Edge TPU offers various deployment options for machine learning models, including: @@ -76,7 +76,7 @@ Before diving into the usage instructions, it's important to note that while all model = YOLO("yolov8n.pt") # Export the model to TFLite Edge TPU format - model.export(format="edgetpu") # creates 'yolov8n_full_integer_quant_edgetpu.tflite’ + model.export(format="edgetpu") # creates 'yolov8n_full_integer_quant_edgetpu.tflite' # Load the exported TFLite Edge TPU model edgetpu_model = YOLO("yolov8n_full_integer_quant_edgetpu.tflite") @@ -111,7 +111,7 @@ However, for in-depth instructions on deploying your TFLite Edge TPU models, tak ## Summary -In this guide, we’ve learned how to export Ultralytics YOLOv8 models to TFLite Edge TPU format. By following the steps mentioned above, you can increase the speed and power of your computer vision applications. +In this guide, we've learned how to export Ultralytics YOLOv8 models to TFLite Edge TPU format. By following the steps mentioned above, you can increase the speed and power of your computer vision applications. For further details on usage, visit the [Edge TPU official website](https://cloud.google.com/edge-tpu). diff --git a/docs/en/integrations/google-colab.md b/docs/en/integrations/google-colab.md index 610eb9fc..73af1032 100644 --- a/docs/en/integrations/google-colab.md +++ b/docs/en/integrations/google-colab.md @@ -6,15 +6,15 @@ keywords: Ultralytics YOLOv8, Google Colab, CPU, GPU, TPU, Browser-based, Hardwa # Accelerating YOLOv8 Projects with Google Colab -Many developers lack the powerful computing resources needed to build deep learning models. Acquiring high-end hardware or renting a decent GPU can be expensive. Google Colab is a great solution to this. It’s a browser-based platform that allows you to work with large datasets, develop complex models, and share your work with others without a huge cost. +Many developers lack the powerful computing resources needed to build deep learning models. Acquiring high-end hardware or renting a decent GPU can be expensive. Google Colab is a great solution to this. It's a browser-based platform that allows you to work with large datasets, develop complex models, and share your work with others without a huge cost. -You can use Google Colab to work on projects related to [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models. Google Colab’s user-friendly environment is well suited for efficient model development and experimentation. Let’s learn more about Google Colab, its key features, and how you can use it to train YOLOv8 models. +You can use Google Colab to work on projects related to [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models. Google Colab's user-friendly environment is well suited for efficient model development and experimentation. Let's learn more about Google Colab, its key features, and how you can use it to train YOLOv8 models. ## Google Colaboratory Google Colaboratory, commonly known as Google Colab, was developed by Google Research in 2017. It is a free online cloud-based Jupyter Notebook environment that allows you to train your machine learning and deep learning models on CPUs, GPUs, and TPUs. The motivation behind developing Google Colab was Google's broader goals to advance AI technology and educational tools, and encourage the use of cloud services. -You can use Google Colab regardless of the specifications and configurations of your local computer. All you need is a Google account and a web browser, and you’re good to go. +You can use Google Colab regardless of the specifications and configurations of your local computer. All you need is a Google account and a web browser, and you're good to go. ## Training YOLOv8 Using Google Colaboratory @@ -39,10 +39,10 @@ Learn how to train a YOLOv8 model with custom data on YouTube with Nicolai. Chec ### Common Questions While Working with Google Colab -When working with Google Colab, you might have a few common questions. Let’s answer them. +When working with Google Colab, you might have a few common questions. Let's answer them. **Q: Why does my Google Colab session timeout?** -A: Google Colab sessions can timeout due to inactivity, especially for free users who have a limited session duration. +A: Google Colab sessions can time out due to inactivity, especially for free users who have a limited session duration. **Q: Can I increase the session duration in Google Colab?** A: Free users face limits, but Google Colab Pro offers extended session durations. @@ -85,7 +85,7 @@ There are many options for training and evaluating YOLOv8 models, so what makes - **Integration with Google Drive:** Colab seamlessly integrates with Google Drive to make data storage, access, and management simple. Datasets and models can be stored and retrieved directly from Google Drive. -- **Markdown Support:** You can use markdown format for enhanced documentation within notebooks. +- **Markdown Support:** You can use Markdown format for enhanced documentation within notebooks. - **Scheduled Execution:** Developers can set notebooks to run automatically at specified times. @@ -93,18 +93,18 @@ There are many options for training and evaluating YOLOv8 models, so what makes ## Keep Learning about Google Colab -If you’d like to dive deeper into Google Colab, here are a few resources to guide you. +If you'd like to dive deeper into Google Colab, here are a few resources to guide you. - **[Training Custom Datasets with Ultralytics YOLOv8 in Google Colab](https://www.ultralytics.com/blog/training-custom-datasets-with-ultralytics-yolov8-in-google-colab)**: Learn how to train custom datasets with Ultralytics YOLOv8 on Google Colab. This comprehensive blog post will take you through the entire process, from initial setup to the training and evaluation stages. - **[Curated Notebooks](https://colab.google/notebooks/)**: Here you can explore a series of organized and educational notebooks, each grouped by specific topic areas. -- **[Google Colab’s Medium Page](https://medium.com/google-colab)**: You can find tutorials, updates, and community contributions here that can help you better understand and utilize this tool. +- **[Google Colab's Medium Page](https://medium.com/google-colab)**: You can find tutorials, updates, and community contributions here that can help you better understand and utilize this tool. ## Summary -We’ve discussed how you can easily experiment with Ultralytics YOLOv8 models on Google Colab. You can use Google Colab to train and evaluate your models on GPUs and TPUs with a few clicks. +We've discussed how you can easily experiment with Ultralytics YOLOv8 models on Google Colab. You can use Google Colab to train and evaluate your models on GPUs and TPUs with a few clicks. -For more details, visit [Google Colab’s FAQ page](https://research.google.com/colaboratory/intl/en-GB/faq.html). +For more details, visit [Google Colab's FAQ page](https://research.google.com/colaboratory/intl/en-GB/faq.html). Interested in more YOLOv8 integrations? Visit the [Ultralytics integration guide page](index.md) to explore additional tools and capabilities that can improve your machine-learning projects. diff --git a/docs/en/integrations/ncnn.md b/docs/en/integrations/ncnn.md index a2841bc7..e6ae1e6e 100644 --- a/docs/en/integrations/ncnn.md +++ b/docs/en/integrations/ncnn.md @@ -34,7 +34,7 @@ NCNN models offer a wide range of key features that enable on-device machine lea ## Deployment Options with NCNN -Before we look at the code for exporting YOLOv8 models to the NCNN format, let’s understand how NCNN models are normally used. +Before we look at the code for exporting YOLOv8 models to the NCNN format, let's understand how NCNN models are normally used. NCNN models, designed for efficiency and performance, are compatible with a variety of deployment platforms: diff --git a/docs/en/integrations/neural-magic.md b/docs/en/integrations/neural-magic.md index 3e9e0e38..16fbf892 100644 --- a/docs/en/integrations/neural-magic.md +++ b/docs/en/integrations/neural-magic.md @@ -1,26 +1,26 @@ --- comments: true -description: Learn how to deploy your YOLOv8 models rapidly using Neural Magic’s DeepSparse. This guide focuses on integrating Ultralytics YOLOv8 with the DeepSparse Engine for high-speed, CPU-based inference, leveraging advanced neural network sparsity techniques. +description: Learn how to deploy your YOLOv8 models rapidly using Neural Magic's DeepSparse. This guide focuses on integrating Ultralytics YOLOv8 with the DeepSparse Engine for high-speed, CPU-based inference, leveraging advanced neural network sparsity techniques. keywords: YOLOv8, DeepSparse Engine, Ultralytics, CPU Inference, Neural Network Sparsity, Object Detection, Model Optimization --- -# Optimizing YOLOv8 Inferences with Neural Magic’s DeepSparse Engine +# Optimizing YOLOv8 Inferences with Neural Magic's DeepSparse Engine -When deploying object detection models like [Ultralytics YOLOv8](https://ultralytics.com) on various hardware, you can bump into unique issues like optimization. This is where YOLOv8’s integration with Neural Magic’s DeepSparse Engine steps in. It transforms the way YOLOv8 models are executed and enables GPU-level performance directly on CPUs. +When deploying object detection models like [Ultralytics YOLOv8](https://ultralytics.com) on various hardware, you can bump into unique issues like optimization. This is where YOLOv8's integration with Neural Magic's DeepSparse Engine steps in. It transforms the way YOLOv8 models are executed and enables GPU-level performance directly on CPUs. This guide shows you how to deploy YOLOv8 using Neural Magic's DeepSparse, how to run inferences, and also how to benchmark performance to ensure it is optimized. -## Neural Magic’s DeepSparse +## Neural Magic's DeepSparse

- Neural Magic’s DeepSparse Overview + Neural Magic's DeepSparse Overview

-[Neural Magic’s DeepSparse](https://neuralmagic.com/deepsparse/) is an inference run-time designed to optimize the execution of neural networks on CPUs. It applies advanced techniques like sparsity, pruning, and quantization to dramatically reduce computational demands while maintaining accuracy. DeepSparse offers an agile solution for efficient and scalable neural network execution across various devices. +[Neural Magic's DeepSparse](https://neuralmagic.com/deepsparse/) is an inference run-time designed to optimize the execution of neural networks on CPUs. It applies advanced techniques like sparsity, pruning, and quantization to dramatically reduce computational demands while maintaining accuracy. DeepSparse offers an agile solution for efficient and scalable neural network execution across various devices. -## Benefits of Integrating Neural Magic’s DeepSparse with YOLOv8 +## Benefits of Integrating Neural Magic's DeepSparse with YOLOv8 -Before diving into how to deploy YOLOV8 using DeepSparse, let’s understand the benefits of using DeepSparse. Some key advantages include: +Before diving into how to deploy YOLOV8 using DeepSparse, let's understand the benefits of using DeepSparse. Some key advantages include: - **Enhanced Inference Speed**: Achieves up to 525 FPS (on YOLOv8n), significantly speeding up YOLOv8's inference capabilities compared to traditional methods. @@ -44,7 +44,7 @@ Before diving into how to deploy YOLOV8 using DeepSparse, let’s understand the ## How Does Neural Magic's DeepSparse Technology Works? -Neural Magic’s Deep Sparse technology is inspired by the human brain’s efficiency in neural network computation. It adopts two key principles from the brain as follows: +Neural Magic's Deep Sparse technology is inspired by the human brain's efficiency in neural network computation. It adopts two key principles from the brain as follows: - **Sparsity**: The process of sparsification involves pruning redundant information from deep learning networks, leading to smaller and faster models without compromising accuracy. This technique reduces the network's size and computational needs significantly. @@ -155,8 +155,8 @@ After running the eval command, you will receive detailed output metrics such as ## Summary -This guide explored integrating Ultralytics’ YOLOv8 with Neural Magic's DeepSparse Engine. It highlighted how this integration enhances YOLOv8's performance on CPU platforms, offering GPU-level efficiency and advanced neural network sparsity techniques. +This guide explored integrating Ultralytics' YOLOv8 with Neural Magic's DeepSparse Engine. It highlighted how this integration enhances YOLOv8's performance on CPU platforms, offering GPU-level efficiency and advanced neural network sparsity techniques. -For more detailed information and advanced usage, visit [Neural Magic’s DeepSparse documentation](https://docs.neuralmagic.com/products/deepsparse/). Also, check out Neural Magic’s documentation on the integration with YOLOv8 [here](https://github.com/neuralmagic/deepsparse/tree/main/src/deepsparse/yolov8#yolov8-inference-pipelines) and watch a great session on it [here](https://www.youtube.com/watch?v=qtJ7bdt52x8). +For more detailed information and advanced usage, visit [Neural Magic's DeepSparse documentation](https://docs.neuralmagic.com/products/deepsparse/). Also, check out Neural Magic's documentation on the integration with YOLOv8 [here](https://github.com/neuralmagic/deepsparse/tree/main/src/deepsparse/yolov8#yolov8-inference-pipelines) and watch a great session on it [here](https://www.youtube.com/watch?v=qtJ7bdt52x8). Additionally, for a broader understanding of various YOLOv8 integrations, visit the [Ultralytics integration guide page](../integrations/index.md), where you can discover a range of other exciting integration possibilities. diff --git a/docs/en/integrations/onnx.md b/docs/en/integrations/onnx.md index a050b198..f2612b42 100644 --- a/docs/en/integrations/onnx.md +++ b/docs/en/integrations/onnx.md @@ -6,7 +6,7 @@ keywords: Ultralytics, YOLOv8, ONNX Format, Export YOLOv8, CUDA Support, Model D # ONNX Export for YOLOv8 Models -Often, when deploying computer vision models, you’ll need a model format that's both flexible and compatible with multiple platforms. +Often, when deploying computer vision models, you'll need a model format that's both flexible and compatible with multiple platforms. Exporting [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models to ONNX format streamlines deployment and ensures optimal performance across various environments. This guide will show you how to easily convert your YOLOv8 models to ONNX and enhance their scalability and effectiveness in real-world applications. @@ -44,7 +44,7 @@ The ability of ONNX to handle various formats can be attributed to the following ## Common Usage of ONNX -Before we jump into how to export YOLOv8 models to the ONNX format, let’s take a look at where ONNX models are usually used. +Before we jump into how to export YOLOv8 models to the ONNX format, let's take a look at where ONNX models are usually used. ### CPU Deployment @@ -131,4 +131,4 @@ In this guide, you've learned how to export Ultralytics YOLOv8 models to ONNX fo For further details on usage, visit the [ONNX official documentation](https://onnx.ai/onnx/intro/). -Also, if you’d like to know more about other Ultralytics YOLOv8 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of useful resources and insights there. +Also, if you'd like to know more about other Ultralytics YOLOv8 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of useful resources and insights there. diff --git a/docs/en/integrations/paddlepaddle.md b/docs/en/integrations/paddlepaddle.md index 97e3e698..8dc6e118 100644 --- a/docs/en/integrations/paddlepaddle.md +++ b/docs/en/integrations/paddlepaddle.md @@ -16,11 +16,11 @@ The ability to export to PaddlePaddle model format allows you to optimize your [ PaddlePaddle Logo

-Developed by Baidu, [PaddlePaddle](https://www.paddlepaddle.org.cn/en) (**PA**rallel **D**istributed **D**eep **LE**arning) is China's first open-source deep learning platform. Unlike some frameworks built mainly for research, PaddlePaddle prioritizes ease of use and smooth integration across industries. +Developed by Baidu, [PaddlePaddle](https://www.paddlepaddle.org.cn/en) (**PArallel **D**istributed **D**eep **LE**arning) is China's first open-source deep learning platform. Unlike some frameworks built mainly for research, PaddlePaddle prioritizes ease of use and smooth integration across industries. It offers tools and resources similar to popular frameworks like TensorFlow and PyTorch, making it accessible for developers of all experience levels. From farming and factories to service businesses, PaddlePaddle's large developer community of over 4.77 million is helping create and deploy AI applications. -By exporting your Ultralytics YOLOv8 models to PaddlePaddle format, you can tap into PaddlePaddle’s strengths in performance optimization. PaddlePaddle prioritizes efficient model execution and reduced memory usage. As a result, your YOLOv8 models can potentially achieve even better performance, delivering top-notch results in practical scenarios. +By exporting your Ultralytics YOLOv8 models to PaddlePaddle format, you can tap into PaddlePaddle's strengths in performance optimization. PaddlePaddle prioritizes efficient model execution and reduced memory usage. As a result, your YOLOv8 models can potentially achieve even better performance, delivering top-notch results in practical scenarios. ## Key Features of PaddlePaddle Models diff --git a/docs/en/integrations/paperspace.md b/docs/en/integrations/paperspace.md index 7563125f..3ce8a1b3 100644 --- a/docs/en/integrations/paperspace.md +++ b/docs/en/integrations/paperspace.md @@ -46,12 +46,12 @@ Explore more capabilities of YOLOv8 and Paperspace Gradient in a discussion with allowfullscreen>
- Watch: Ultralytics Live Session 7: It’s All About the Environment: Optimizing YOLOv8 Training With Gradient + Watch: Ultralytics Live Session 7: It's All About the Environment: Optimizing YOLOv8 Training With Gradient

## Key Features of Paperspace Gradient -As you explore the Paperspace console, you’ll see how each step of the machine-learning workflow is supported and enhanced. Here are some things to look out for: +As you explore the Paperspace console, you'll see how each step of the machine-learning workflow is supported and enhanced. Here are some things to look out for: - **One-Click Notebooks:** Gradient provides pre-configured Jupyter Notebooks specifically tailored for YOLOv8, eliminating the need for environment setup and dependency management. Simply choose the desired notebook and start experimenting immediately. @@ -81,6 +81,6 @@ While many options are available for training, deploying, and evaluating YOLOv8 This guide explored the Paperspace Gradient integration for training YOLOv8 models. Gradient provides the tools and infrastructure to accelerate your AI development journey from effortless model training and evaluation to streamlined deployment options. -For further exploration, visit [PaperSpace’s official documentation](https://docs.digitalocean.com/products/paperspace/). +For further exploration, visit [PaperSpace's official documentation](https://docs.digitalocean.com/products/paperspace/). Also, visit the [Ultralytics integration guide page](index.md) to learn more about different YOLOv8 integrations. It's full of insights and tips to take your computer vision projects to the next level. diff --git a/docs/en/integrations/ray-tune.md b/docs/en/integrations/ray-tune.md index 65f6f771..5cd63ecc 100644 --- a/docs/en/integrations/ray-tune.md +++ b/docs/en/integrations/ray-tune.md @@ -6,7 +6,7 @@ keywords: Ultralytics, YOLOv8, Ray Tune, hyperparameter tuning, machine learning # Efficient Hyperparameter Tuning with Ray Tune and YOLOv8 -Hyperparameter tuning is vital in achieving peak model performance by discovering the optimal set of hyperparameters. This involves running trials with different hyperparameters and evaluating each trial’s performance. +Hyperparameter tuning is vital in achieving peak model performance by discovering the optimal set of hyperparameters. This involves running trials with different hyperparameters and evaluating each trial's performance. ## Accelerate Tuning with Ultralytics YOLOv8 and Ray Tune @@ -182,4 +182,4 @@ plt.show() In this documentation, we covered common workflows to analyze the results of experiments run with Ray Tune using Ultralytics. The key steps include loading the experiment results from a directory, performing basic experiment-level and trial-level analysis and plotting metrics. -Explore further by looking into Ray Tune’s [Analyze Results](https://docs.ray.io/en/latest/tune/examples/tune_analyze_results.html) docs page to get the most out of your hyperparameter tuning experiments. +Explore further by looking into Ray Tune's [Analyze Results](https://docs.ray.io/en/latest/tune/examples/tune_analyze_results.html) docs page to get the most out of your hyperparameter tuning experiments. diff --git a/docs/en/integrations/roboflow.md b/docs/en/integrations/roboflow.md index a4a07d9c..19b20a62 100644 --- a/docs/en/integrations/roboflow.md +++ b/docs/en/integrations/roboflow.md @@ -6,7 +6,7 @@ keywords: Ultralytics, YOLOv8, Roboflow, vector analysis, confusion matrix, data # Roboflow -[Roboflow](https://roboflow.com/?ref=ultralytics) has everything you need to build and deploy computer vision models. Connect Roboflow at any step in your pipeline with APIs and SDKs, or use the end-to-end interface to automate the entire process from image to inference. Whether you’re in need of [data labeling](https://roboflow.com/annotate?ref=ultralytics), [model training](https://roboflow.com/train?ref=ultralytics), or [model deployment](https://roboflow.com/deploy?ref=ultralytics), Roboflow gives you building blocks to bring custom computer vision solutions to your project. +[Roboflow](https://roboflow.com/?ref=ultralytics) has everything you need to build and deploy computer vision models. Connect Roboflow at any step in your pipeline with APIs and SDKs, or use the end-to-end interface to automate the entire process from image to inference. Whether you're in need of [data labeling](https://roboflow.com/annotate?ref=ultralytics), [model training](https://roboflow.com/train?ref=ultralytics), or [model deployment](https://roboflow.com/deploy?ref=ultralytics), Roboflow gives you building blocks to bring custom computer vision solutions to your project. !!! Question "Licensing" diff --git a/docs/en/integrations/tensorboard.md b/docs/en/integrations/tensorboard.md index 1473da9c..32e8886e 100644 --- a/docs/en/integrations/tensorboard.md +++ b/docs/en/integrations/tensorboard.md @@ -4,9 +4,9 @@ description: Walk through the integration of YOLOv8 with TensorBoard to be able keywords: TensorBoard, YOLOv8, Visualization, TensorFlow, Training Analysis, Metric Tracking, Model Graphs, Experimentation, Ultralytics --- -# Gain Visual Insights with YOLOv8’s Integration with TensorBoard +# Gain Visual Insights with YOLOv8's Integration with TensorBoard -Understanding and fine-tuning computer vision models like [Ultralytics’ YOLOv8](https://ultralytics.com) becomes more straightforward when you take a closer look at their training processes. Model training visualization helps with getting insights into the model's learning patterns, performance metrics, and overall behavior. YOLOv8's integration with TensorBoard makes this process of visualization and analysis easier and enables more efficient and informed adjustments to the model. +Understanding and fine-tuning computer vision models like [Ultralytics' YOLOv8](https://ultralytics.com) becomes more straightforward when you take a closer look at their training processes. Model training visualization helps with getting insights into the model's learning patterns, performance metrics, and overall behavior. YOLOv8's integration with TensorBoard makes this process of visualization and analysis easier and enables more efficient and informed adjustments to the model. This guide covers how to use TensorBoard with YOLOv8. You'll learn about various visualizations, from tracking metrics to analyzing model graphs. These tools will help you understand your YOLOv8 model's performance better. @@ -82,7 +82,7 @@ For more information related to the model training process, be sure to check our ## Understanding Your TensorBoard for YOLOv8 Training -Now, let’s focus on understanding the various features and components of TensorBoard in the context of YOLOv8 training. The three key sections of the TensorBoard are Time Series, Scalars, and Graphs. +Now, let's focus on understanding the various features and components of TensorBoard in the context of YOLOv8 training. The three key sections of the TensorBoard are Time Series, Scalars, and Graphs. ### Time Series @@ -102,7 +102,7 @@ The Time Series feature in the TensorBoard offers a dynamic and detailed perspec #### Importance of Time Series in YOLOv8 Training -The Time Series section is essential for a thorough analysis of the YOLOv8 model's training progress. It lets you track the metrics in real time to promptly identify and solve issues. It also offers a detailed view of each metric's progression, which is crucial for fine-tuning the model and enhancing its performance. +The Time Series section is essential for a thorough analysis of the YOLOv8 model's training progress. It lets you track the metrics in real time to promptly identify and solve issues. It also offers a detailed view of each metrics progression, which is crucial for fine-tuning the model and enhancing its performance. ### Scalars @@ -148,6 +148,6 @@ Graphs are particularly useful for debugging the model, especially in complex ar This guide aims to help you use TensorBoard with YOLOv8 for visualization and analysis of machine learning model training. It focuses on explaining how key TensorBoard features can provide insights into training metrics and model performance during YOLOv8 training sessions. -For a more detailed exploration of these features and effective utilization strategies, you can refer to TensorFlow’s official [TensorBoard documentation](https://www.tensorflow.org/tensorboard/get_started) and their [GitHub repository](https://github.com/tensorflow/tensorboard). +For a more detailed exploration of these features and effective utilization strategies, you can refer to TensorFlow's official [TensorBoard documentation](https://www.tensorflow.org/tensorboard/get_started) and their [GitHub repository](https://github.com/tensorflow/tensorboard). Want to learn more about the various integrations of Ultralytics? Check out the [Ultralytics integrations guide page](../integrations/index.md) to see what other exciting capabilities are waiting to be discovered! diff --git a/docs/en/integrations/tensorrt.md b/docs/en/integrations/tensorrt.md index 75eaa2e7..43446cfd 100644 --- a/docs/en/integrations/tensorrt.md +++ b/docs/en/integrations/tensorrt.md @@ -16,7 +16,7 @@ By using the TensorRT export format, you can enhance your [Ultralytics YOLOv8](h TensorRT Overview

-[TensorRT](https://developer.nvidia.com/tensorrt), developed by NVIDIA, is an advanced software development kit (SDK) designed for high-speed deep learning inference. It’s well-suited for real-time applications like object detection. +[TensorRT](https://developer.nvidia.com/tensorrt), developed by NVIDIA, is an advanced software development kit (SDK) designed for high-speed deep learning inference. It's well-suited for real-time applications like object detection. This toolkit optimizes deep learning models for NVIDIA GPUs and results in faster and more efficient operations. TensorRT models undergo TensorRT optimization, which includes techniques like layer fusion, precision calibration (INT8 and FP16), dynamic tensor memory management, and kernel auto-tuning. Converting deep learning models into the TensorRT format allows developers to realize the potential of NVIDIA GPUs fully. @@ -40,7 +40,7 @@ TensorRT models offer a range of key features that contribute to their efficienc ## Deployment Options in TensorRT -Before we look at the code for exporting YOLOv8 models to the TensorRT format, let’s understand where TensorRT models are normally used. +Before we look at the code for exporting YOLOv8 models to the TensorRT format, let's understand where TensorRT models are normally used. TensorRT offers several deployment options, and each option balances ease of integration, performance optimization, and flexibility differently: @@ -205,7 +205,7 @@ Experimentation by NVIDIA led them to recommend using at least 500 calibration i - **Increased development times:** Finding the "optimal" settings for INT8 calibration for dataset and device can take a significant amount of testing. -- **Hardware dependency:** Calibration and performance gains could be highly hardware dependent and model weights are less transferrable. +- **Hardware dependency:** Calibration and performance gains could be highly hardware dependent and model weights are less transferable. ## Ultralytics YOLO TensorRT Export Performance diff --git a/docs/en/integrations/tf-graphdef.md b/docs/en/integrations/tf-graphdef.md index 95d6f392..eb396ac3 100644 --- a/docs/en/integrations/tf-graphdef.md +++ b/docs/en/integrations/tf-graphdef.md @@ -107,7 +107,7 @@ For more details about supported export options, visit the [Ultralytics document ## Deploying Exported YOLOv8 TF GraphDef Models -Once you’ve exported your YOLOv8 model to the TF GraphDef format, the next step is deployment. The primary and recommended first step for running a TF GraphDef model is to use the YOLO("model.pb") method, as previously shown in the usage code snippet. +Once you've exported your YOLOv8 model to the TF GraphDef format, the next step is deployment. The primary and recommended first step for running a TF GraphDef model is to use the YOLO("model.pb") method, as previously shown in the usage code snippet. However, for more information on deploying your TF GraphDef models, take a look at the following resources: diff --git a/docs/en/integrations/tf-savedmodel.md b/docs/en/integrations/tf-savedmodel.md index 4fdbd001..e77a95ce 100644 --- a/docs/en/integrations/tf-savedmodel.md +++ b/docs/en/integrations/tf-savedmodel.md @@ -42,7 +42,7 @@ TF SavedModel provides a range of options to deploy your machine learning models - **Mobile and Embedded Devices:** TensorFlow Lite, a lightweight solution for running machine learning models on mobile, embedded, and IoT devices, supports converting TF SavedModels to the TensorFlow Lite format. This allows you to deploy your models on a wide range of devices, from smartphones and tablets to microcontrollers and edge devices. -- **TensorFlow Runtime:** TensorFlow Runtime (tfrt) is a high-performance runtime for executing TensorFlow graphs. It provides lower-level APIs for loading and running TF SavedModels in C++ environments. TensorFlow Runtime offers better performance compared to the standard TensorFlow runtime. It is suitable for deployment scenarios that require low-latency inference and tight integration with existing C++ codebases. +- **TensorFlow Runtime:** TensorFlow Runtime (`tfrt`) is a high-performance runtime for executing TensorFlow graphs. It provides lower-level APIs for loading and running TF SavedModels in C++ environments. TensorFlow Runtime offers better performance compared to the standard TensorFlow runtime. It is suitable for deployment scenarios that require low-latency inference and tight integration with existing C++ codebases. ## Exporting YOLOv8 Models to TF SavedModel @@ -105,7 +105,7 @@ Now that you have exported your YOLOv8 model to the TF SavedModel format, the ne However, for in-depth instructions on deploying your TF SavedModel models, take a look at the following resources: -- **[TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving)**: Here’s the developer documentation for how to deploy your TF SavedModel models using TensorFlow Serving. +- **[TensorFlow Serving](https://www.tensorflow.org/tfx/guide/serving)**: Here's the developer documentation for how to deploy your TF SavedModel models using TensorFlow Serving. - **[Run a TensorFlow SavedModel in Node.js](https://blog.tensorflow.org/2020/01/run-tensorflow-savedmodel-in-nodejs-directly-without-conversion.html)**: A TensorFlow blog post on running a TensorFlow SavedModel in Node.js directly without conversion. diff --git a/docs/en/integrations/tfjs.md b/docs/en/integrations/tfjs.md index 513adefb..474cd617 100644 --- a/docs/en/integrations/tfjs.md +++ b/docs/en/integrations/tfjs.md @@ -6,9 +6,9 @@ keywords: Ultralytics YOLOv8, TensorFlow.js, TF.js, Model Deployment, Node.js, M # Export to TF.js Model Format From a YOLOv8 Model Format -Deploying machine learning models directly in the browser or on Node.js can be tricky. You’ll need to make sure your model format is optimized for faster performance so that the model can be used to run interactive applications locally on the user’s device. The TensorFlow.js, or TF.js, model format is designed to use minimal power while delivering fast performance. +Deploying machine learning models directly in the browser or on Node.js can be tricky. You'll need to make sure your model format is optimized for faster performance so that the model can be used to run interactive applications locally on the user's device. The TensorFlow.js, or TF.js, model format is designed to use minimal power while delivering fast performance. -The ‘export to TF.js model format’ feature allows you to optimize your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models for high-speed and locally-run object detection inference. In this guide, we'll walk you through converting your models to the TF.js format, making it easier for your models to perform well on various local browsers and Node.js applications. +The 'export to TF.js model format' feature allows you to optimize your [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) models for high-speed and locally-run object detection inference. In this guide, we'll walk you through converting your models to the TF.js format, making it easier for your models to perform well on various local browsers and Node.js applications. ## Why Should You Export to TF.js? @@ -103,7 +103,7 @@ Now that you have exported your YOLOv8 model to the TF.js format, the next step However, for in-depth instructions on deploying your TF.js models, take a look at the following resources: -- **[Chrome Extension](https://www.tensorflow.org/js/tutorials/deployment/web_ml_in_chrome)**: Here’s the developer documentation for how to deploy your TF.js models to a Chrome extension. +- **[Chrome Extension](https://www.tensorflow.org/js/tutorials/deployment/web_ml_in_chrome)**: Here's the developer documentation for how to deploy your TF.js models to a Chrome extension. - **[Run TensorFlow.js in Node.js](https://www.tensorflow.org/js/guide/nodejs)**: A TensorFlow blog post on running TensorFlow.js in Node.js directly. diff --git a/docs/en/integrations/tflite.md b/docs/en/integrations/tflite.md index 5a39185b..d88223c4 100644 --- a/docs/en/integrations/tflite.md +++ b/docs/en/integrations/tflite.md @@ -34,7 +34,7 @@ TFLite models offer a wide range of key features that enable on-device machine l ## Deployment Options in TFLite -Before we look at the code for exporting YOLOv8 models to the TFLite format, let’s understand how TFLite models are normally used. +Before we look at the code for exporting YOLOv8 models to the TFLite format, let's understand how TFLite models are normally used. TFLite offers various on-device deployment options for machine learning models, including: @@ -117,6 +117,6 @@ After successfully exporting your Ultralytics YOLOv8 models to TFLite format, yo In this guide, we focused on how to export to TFLite format. By converting your Ultralytics YOLOv8 models to TFLite model format, you can improve the efficiency and speed of YOLOv8 models, making them more effective and suitable for edge computing environments. -For further details on usage, visit [TFLite’s official documentation](https://www.tensorflow.org/lite/guide). +For further details on usage, visit the [TFLite official documentation](https://www.tensorflow.org/lite/guide). Also, if you're curious about other Ultralytics YOLOv8 integrations, make sure to check out our [integration guide page](../integrations/index.md). You'll find tons of helpful info and insights waiting for you there. diff --git a/docs/en/integrations/torchscript.md b/docs/en/integrations/torchscript.md index 61ba35af..2f536a85 100644 --- a/docs/en/integrations/torchscript.md +++ b/docs/en/integrations/torchscript.md @@ -30,11 +30,11 @@ TorchScript, a key part of the PyTorch ecosystem, provides powerful features for Here are the key features that make TorchScript a valuable tool for developers: -- **Static Graph Execution**: TorchScript uses a static graph representation of the model’s computation, which is different from PyTorch’s dynamic graph execution. In static graph execution, the computational graph is defined and compiled once before the actual execution, resulting in improved performance during inference. +- **Static Graph Execution**: TorchScript uses a static graph representation of the model's computation, which is different from PyTorch's dynamic graph execution. In static graph execution, the computational graph is defined and compiled once before the actual execution, resulting in improved performance during inference. - **Model Serialization**: TorchScript allows you to serialize PyTorch models into a platform-independent format. Serialized models can be loaded without requiring the original Python code, enabling deployment in different runtime environments. -- **JIT Compilation**: TorchScript uses Just-In-Time (JIT) compilation to convert PyTorch models into an optimized intermediate representation. JIT compiles the model’s computational graph, enabling efficient execution on target devices. +- **JIT Compilation**: TorchScript uses Just-In-Time (JIT) compilation to convert PyTorch models into an optimized intermediate representation. JIT compiles the model's computational graph, enabling efficient execution on target devices. - **Cross-Language Integration**: With TorchScript, you can export PyTorch models to other languages such as C++, Java, and JavaScript. This makes it easier to integrate PyTorch models into existing software systems written in different languages. @@ -42,7 +42,7 @@ Here are the key features that make TorchScript a valuable tool for developers: ## Deployment Options in TorchScript -Before we look at the code for exporting YOLOv8 models to the TorchScript format, let’s understand where TorchScript models are normally used. +Before we look at the code for exporting YOLOv8 models to the TorchScript format, let's understand where TorchScript models are normally used. TorchScript offers various deployment options for machine learning models, such as: @@ -121,6 +121,6 @@ After successfully exporting your Ultralytics YOLOv8 models to TorchScript forma In this guide, we explored the process of exporting Ultralytics YOLOv8 models to the TorchScript format. By following the provided instructions, you can optimize YOLOv8 models for performance and gain the flexibility to deploy them across various platforms and environments. -For further details on usage, visit [TorchScript’s official documentation](https://pytorch.org/docs/stable/jit.html). +For further details on usage, visit [TorchScript's official documentation](https://pytorch.org/docs/stable/jit.html). -Also, if you’d like to know more about other Ultralytics YOLOv8 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of useful resources and insights there. +Also, if you'd like to know more about other Ultralytics YOLOv8 integrations, visit our [integration guide page](../integrations/index.md). You'll find plenty of useful resources and insights there. diff --git a/docs/en/integrations/weights-biases.md b/docs/en/integrations/weights-biases.md index 847172ca..edb9b15a 100644 --- a/docs/en/integrations/weights-biases.md +++ b/docs/en/integrations/weights-biases.md @@ -8,7 +8,7 @@ keywords: Ultralytics, YOLOv8, Object Detection, Weights & Biases, Model Trainin Object detection models like [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) have become integral to many computer vision applications. However, training, evaluating, and deploying these complex models introduces several challenges. Tracking key training metrics, comparing model variants, analyzing model behavior, and detecting issues require substantial instrumentation and experiment management. -This guide showcases Ultralytics YOLOv8 integration with Weights & Biases’ for enhanced experiment tracking, model-checkpointing, and visualization of model performance. It also includes instructions for setting up the integration, training, fine-tuning, and visualizing results using Weights & Biases’ interactive features. +This guide showcases Ultralytics YOLOv8 integration with Weights & Biases' for enhanced experiment tracking, model-checkpointing, and visualization of model performance. It also includes instructions for setting up the integration, training, fine-tuning, and visualizing results using Weights & Biases' interactive features. ## Weights & Biases @@ -93,7 +93,7 @@ Before diving into the usage instructions for YOLOv8 model training with Weights ### Understanding the Code -Let’s understand the steps showcased in the usage code snippet above. +Let's understand the steps showcased in the usage code snippet above. - **Step 1: Initialize a Weights & Biases Run**: Start by initializing a Weights & Biases run, specifying the project name and the job type. This run will track and manage the training and validation processes of your model. @@ -114,7 +114,7 @@ Let’s understand the steps showcased in the usage code snippet above. Upon running the usage code snippet above, you can expect the following key outputs: - The setup of a new run with its unique ID, indicating the start of the training process. -- A concise summary of the model’s structure, including the number of layers and parameters. +- A concise summary of the model's structure, including the number of layers and parameters. - Regular updates on important metrics such as box loss, cls loss, dfl loss, precision, recall, and mAP scores during each training epoch. - At the end of training, detailed metrics including the model's inference speed, and overall accuracy metrics are displayed. - Links to the Weights & Biases dashboard for in-depth analysis and visualization of the training process, along with information on local log file locations. @@ -141,15 +141,15 @@ After running the usage code snippet, you can access the Weights & Biases (W&B) - **Model Artifacts Management**: Access and share model checkpoints, facilitating easy deployment and collaboration. -- **Viewing Inference Results with Image Overlay**: Visualize the prediction results on images using interactive overlays in Weights & Biases, providing a clear and detailed view of model performance on real-world data. For more detailed information on Weights & Biases’ image overlay capabilities, check out this [link](https://docs.wandb.ai/guides/track/log/media#image-overlays). +- **Viewing Inference Results with Image Overlay**: Visualize the prediction results on images using interactive overlays in Weights & Biases, providing a clear and detailed view of model performance on real-world data. For more detailed information on Weights & Biases' image overlay capabilities, check out this [link](https://docs.wandb.ai/guides/track/log/media#image-overlays). -
Take a look at how Weights & Biases’ image overlays helps visualize model inferences.
+
Take a look at how Weights & Biases' image overlays helps visualize model inferences.
By using these features, you can effectively track, analyze, and optimize your YOLOv8 model's training, ensuring the best possible performance and efficiency. ## Summary -This guide helped you explore Ultralytics’ YOLOv8 integration with Weights & Biases. It illustrates the ability of this integration to efficiently track and visualize model training and prediction results. +This guide helped you explore Ultralytics' YOLOv8 integration with Weights & Biases. It illustrates the ability of this integration to efficiently track and visualize model training and prediction results. For further details on usage, visit [Weights & Biases' official documentation](https://docs.wandb.ai/guides/integrations/ultralytics). diff --git a/docs/en/models/rtdetr.md b/docs/en/models/rtdetr.md index e93d036f..5dd55567 100644 --- a/docs/en/models/rtdetr.md +++ b/docs/en/models/rtdetr.md @@ -1,6 +1,6 @@ --- comments: true -description: Discover the features and benefits of RT-DETR, Baidu’s efficient and adaptable real-time object detector powered by Vision Transformers, including pre-trained models. +description: Discover the features and benefits of RT-DETR, Baidu's efficient and adaptable real-time object detector powered by Vision Transformers, including pre-trained models. keywords: RT-DETR, Baidu, Vision Transformers, object detection, real-time performance, CUDA, TensorRT, IoU-aware query selection, Ultralytics, Python API, PaddlePaddle --- diff --git a/docs/en/modes/val.md b/docs/en/modes/val.md index 98f6fd00..006937ba 100644 --- a/docs/en/modes/val.md +++ b/docs/en/modes/val.md @@ -47,7 +47,7 @@ These are the notable functionalities offered by YOLOv8's Val mode: ## Usage Examples -Validate trained YOLOv8n model accuracy on the COCO8 dataset. No argument need to passed as the `model` retains it's training `data` and arguments as model attributes. See Arguments section below for a full list of export arguments. +Validate trained YOLOv8n model accuracy on the COCO8 dataset. No argument need to passed as the `model` retains its training `data` and arguments as model attributes. See Arguments section below for a full list of export arguments. !!! Example diff --git a/docs/en/reference/data/annotator.md b/docs/en/reference/data/annotator.md index 8e9309ec..568fbe9d 100644 --- a/docs/en/reference/data/annotator.md +++ b/docs/en/reference/data/annotator.md @@ -1,5 +1,5 @@ --- -description: Enhance your machine learning model with Ultralytics’ auto_annotate function. Simplify data annotation for improved model training. +description: Enhance your machine learning model with Ultralytics' auto_annotate function. Simplify data annotation for improved model training. keywords: Ultralytics, Auto-Annotate, Machine Learning, AI, Annotation, Data Processing, Model Training --- diff --git a/docs/en/reference/data/utils.md b/docs/en/reference/data/utils.md index 7ac3add6..a157ce8c 100644 --- a/docs/en/reference/data/utils.md +++ b/docs/en/reference/data/utils.md @@ -1,5 +1,5 @@ --- -description: Uncover a detailed guide to Ultralytics data utilities. Learn functions from img2label_paths to autosplit, all boosting your YOLO model’s efficiency. +description: Uncover a detailed guide to Ultralytics data utilities. Learn functions from img2label_paths to autosplit, all boosting your YOLO model's efficiency. keywords: Ultralytics, data utils, YOLO, img2label_paths, exif_size, polygon2mask, polygons2masks_overlap, check_cls_dataset, delete_dsstore, autosplit --- diff --git a/docs/en/tasks/detect.md b/docs/en/tasks/detect.md index a9cfbe2a..ae13a00d 100644 --- a/docs/en/tasks/detect.md +++ b/docs/en/tasks/detect.md @@ -82,7 +82,7 @@ YOLO detection dataset format can be found in detail in the [Dataset Guide](../d ## Val -Validate trained YOLOv8n model accuracy on the COCO8 dataset. No argument need to passed as the `model` retains it's training `data` and arguments as model attributes. +Validate trained YOLOv8n model accuracy on the COCO8 dataset. No argument need to passed as the `model` retains its training `data` and arguments as model attributes. !!! Example diff --git a/docs/en/usage/cli.md b/docs/en/usage/cli.md index 35dd45da..596c624a 100644 --- a/docs/en/usage/cli.md +++ b/docs/en/usage/cli.md @@ -110,7 +110,7 @@ Train YOLOv8n on the COCO8 dataset for 100 epochs at image size 640. For a full ## Val -Validate trained YOLOv8n model accuracy on the COCO8 dataset. No argument need to passed as the `model` retains it's training `data` and arguments as model attributes. +Validate trained YOLOv8n model accuracy on the COCO8 dataset. No argument need to passed as the `model` retains its training `data` and arguments as model attributes. !!! Example "Example" diff --git a/docs/en/yolov5/environments/google_cloud_quickstart_tutorial.md b/docs/en/yolov5/environments/google_cloud_quickstart_tutorial.md index 45754445..1574dedd 100644 --- a/docs/en/yolov5/environments/google_cloud_quickstart_tutorial.md +++ b/docs/en/yolov5/environments/google_cloud_quickstart_tutorial.md @@ -6,15 +6,15 @@ keywords: YOLOv5, Google Cloud Platform, GCP, Deep Learning VM, ML model trainin # Mastering YOLOv5 🚀 Deployment on Google Cloud Platform (GCP) Deep Learning Virtual Machine (VM) ⭐ -Embarking on the journey of artificial intelligence and machine learning can be exhilarating, especially when you leverage the power and flexibility of a cloud platform. Google Cloud Platform (GCP) offers robust tools tailored for machine learning enthusiasts and professionals alike. One such tool is the Deep Learning VM that is preconfigured for data science and ML tasks. In this tutorial, we will navigate through the process of setting up YOLOv5 on a GCP Deep Learning VM. Whether you’re taking your first steps in ML or you’re a seasoned practitioner, this guide is designed to provide you with a clear pathway to implementing object detection models powered by YOLOv5. +Embarking on the journey of artificial intelligence and machine learning can be exhilarating, especially when you leverage the power and flexibility of a cloud platform. Google Cloud Platform (GCP) offers robust tools tailored for machine learning enthusiasts and professionals alike. One such tool is the Deep Learning VM that is preconfigured for data science and ML tasks. In this tutorial, we will navigate through the process of setting up YOLOv5 on a GCP Deep Learning VM. Whether you're taking your first steps in ML or you're a seasoned practitioner, this guide is designed to provide you with a clear pathway to implementing object detection models powered by YOLOv5. -🆓 Plus, if you're a fresh GCP user, you’re in luck with a [$300 free credit offer](https://cloud.google.com/free/docs/gcp-free-tier#free-trial) to kickstart your projects. +🆓 Plus, if you're a fresh GCP user, you're in luck with a [$300 free credit offer](https://cloud.google.com/free/docs/gcp-free-tier#free-trial) to kickstart your projects. In addition to GCP, explore other accessible quickstart options for YOLOv5, like our [Colab Notebook](https://colab.research.google.com/github/ultralytics/yolov5/blob/master/tutorial.ipynb) Open In Colab for a browser-based experience, or the scalability of [Amazon AWS](./aws_quickstart_tutorial.md). Furthermore, container aficionados can utilize our official Docker image at [Docker Hub](https://hub.docker.com/r/ultralytics/yolov5) Docker Pulls for an encapsulated environment. ## Step 1: Create and Configure Your Deep Learning VM -Let’s begin by creating a virtual machine that’s tuned for deep learning: +Let's begin by creating a virtual machine that's tuned for deep learning: 1. Head over to the [GCP marketplace](https://console.cloud.google.com/marketplace/details/click-to-deploy-images/deeplearning) and select the **Deep Learning VM**. 2. Opt for a **n1-standard-8** instance; it offers a balance of 8 vCPUs and 30 GB of memory, ideally suited for our needs.