Add visualize_image_annotations function (#18430)
Co-authored-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: Ultralytics Assistant <135830346+UltralyticsAssistant@users.noreply.github.com>
This commit is contained in:
parent
7f1a50e893
commit
990424cb63
3 changed files with 73 additions and 0 deletions
|
|
@ -35,6 +35,10 @@ keywords: Ultralytics, dataset utils, data handling, image verification, Python,
|
||||||
|
|
||||||
<br><br><hr><br>
|
<br><br><hr><br>
|
||||||
|
|
||||||
|
## ::: ultralytics.data.utils.visualize_image_annotations
|
||||||
|
|
||||||
|
<br><br><hr><br>
|
||||||
|
|
||||||
## ::: ultralytics.data.utils.polygon2mask
|
## ::: ultralytics.data.utils.polygon2mask
|
||||||
|
|
||||||
<br><br><hr><br>
|
<br><br><hr><br>
|
||||||
|
|
|
||||||
|
|
@ -46,6 +46,26 @@ This function does not return any value. For further details on how the function
|
||||||
- [See the reference section for `annotator.auto_annotate`](../reference/data/annotator.md#ultralytics.data.annotator.auto_annotate) for more insight on how the function operates.
|
- [See the reference section for `annotator.auto_annotate`](../reference/data/annotator.md#ultralytics.data.annotator.auto_annotate) for more insight on how the function operates.
|
||||||
- Use in combination with the [function `segments2boxes`](#convert-segments-to-bounding-boxes) to generate object detection bounding boxes as well
|
- Use in combination with the [function `segments2boxes`](#convert-segments-to-bounding-boxes) to generate object detection bounding boxes as well
|
||||||
|
|
||||||
|
### Visualize Dataset Annotations
|
||||||
|
|
||||||
|
This function visualizes YOLO annotations on an image before training, helping to identify and correct any wrong annotations that could lead to incorrect detection results. It draws bounding boxes, labels objects with class names, and adjusts text color based on the background's luminance for better readability.
|
||||||
|
|
||||||
|
```{ .py .annotate }
|
||||||
|
from ultralytics.data.utils import visualize_image_annotations
|
||||||
|
|
||||||
|
label_map = { # Define the label map with all annotated class labels.
|
||||||
|
0: "person",
|
||||||
|
1: "car",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Visualize
|
||||||
|
visualize_image_annotations(
|
||||||
|
"path/to/image.jpg", # Input image path.
|
||||||
|
"path/to/annotations.txt", # Annotation file path for the image.
|
||||||
|
label_map,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
### Convert Segmentation Masks into YOLO Format
|
### Convert Segmentation Masks into YOLO Format
|
||||||
|
|
||||||

|

|
||||||
|
|
|
||||||
|
|
@ -167,6 +167,55 @@ def verify_image_label(args):
|
||||||
return [None, None, None, None, None, nm, nf, ne, nc, msg]
|
return [None, None, None, None, None, nm, nf, ne, nc, msg]
|
||||||
|
|
||||||
|
|
||||||
|
def visualize_image_annotations(image_path, txt_path, label_map):
|
||||||
|
"""
|
||||||
|
Visualizes YOLO annotations (bounding boxes and class labels) on an image.
|
||||||
|
|
||||||
|
This function reads an image and its corresponding annotation file in YOLO format, then
|
||||||
|
draws bounding boxes around detected objects and labels them with their respective class names.
|
||||||
|
The bounding box colors are assigned based on the class ID, and the text color is dynamically
|
||||||
|
adjusted for readability, depending on the background color's luminance.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
image_path (str): The path to the image file to annotate, and it can be in formats supported by PIL (e.g., .jpg, .png).
|
||||||
|
txt_path (str): The path to the annotation file in YOLO format, that should contain one line per object with:
|
||||||
|
- class_id (int): The class index.
|
||||||
|
- x_center (float): The X center of the bounding box (relative to image width).
|
||||||
|
- y_center (float): The Y center of the bounding box (relative to image height).
|
||||||
|
- width (float): The width of the bounding box (relative to image width).
|
||||||
|
- height (float): The height of the bounding box (relative to image height).
|
||||||
|
label_map (dict): A dictionary that maps class IDs (integers) to class labels (strings).
|
||||||
|
|
||||||
|
Example:
|
||||||
|
>>> label_map = {0: "cat", 1: "dog", 2: "bird"} # It should include all annotated classes details
|
||||||
|
>>> visualize_image_annotations("path/to/image.jpg", "path/to/annotations.txt", label_map)
|
||||||
|
"""
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
from ultralytics.utils.plotting import colors
|
||||||
|
|
||||||
|
img = np.array(Image.open(image_path))
|
||||||
|
img_height, img_width = img.shape[:2]
|
||||||
|
annotations = []
|
||||||
|
with open(txt_path) as file:
|
||||||
|
for line in file:
|
||||||
|
class_id, x_center, y_center, width, height = map(float, line.split())
|
||||||
|
x = (x_center - width / 2) * img_width
|
||||||
|
y = (y_center - height / 2) * img_height
|
||||||
|
w = width * img_width
|
||||||
|
h = height * img_height
|
||||||
|
annotations.append((x, y, w, h, int(class_id)))
|
||||||
|
fig, ax = plt.subplots(1) # Plot the image and annotations
|
||||||
|
for x, y, w, h, label in annotations:
|
||||||
|
color = tuple(c / 255 for c in colors(label, True)) # Get and normalize the RGB color
|
||||||
|
rect = plt.Rectangle((x, y), w, h, linewidth=2, edgecolor=color, facecolor="none") # Create a rectangle
|
||||||
|
ax.add_patch(rect)
|
||||||
|
luminance = 0.2126 * color[0] + 0.7152 * color[1] + 0.0722 * color[2] # Formula for luminance
|
||||||
|
ax.text(x, y - 5, label_map[label], color="white" if luminance < 0.5 else "black", backgroundcolor=color)
|
||||||
|
ax.imshow(img)
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
def polygon2mask(imgsz, polygons, color=1, downsample_ratio=1):
|
def polygon2mask(imgsz, polygons, color=1, downsample_ratio=1):
|
||||||
"""
|
"""
|
||||||
Convert a list of polygons to a binary mask of the specified image size.
|
Convert a list of polygons to a binary mask of the specified image size.
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue