Add docformatter to pre-commit (#5279)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Burhan <62214284+Burhan-Q@users.noreply.github.com>
This commit is contained in:
parent
c7aa83da31
commit
7517667a33
90 changed files with 1396 additions and 497 deletions
|
|
@ -9,11 +9,12 @@ from ultralytics.utils import ASSETS, yaml_load
|
|||
from ultralytics.utils.checks import check_requirements, check_yaml
|
||||
|
||||
|
||||
class Yolov8:
|
||||
class YOLOv8:
|
||||
"""YOLOv8 object detection model class for handling inference and visualization."""
|
||||
|
||||
def __init__(self, onnx_model, input_image, confidence_thres, iou_thres):
|
||||
"""
|
||||
Initializes an instance of the Yolov8 class.
|
||||
Initializes an instance of the YOLOv8 class.
|
||||
|
||||
Args:
|
||||
onnx_model: Path to the ONNX model.
|
||||
|
|
@ -213,8 +214,8 @@ if __name__ == '__main__':
|
|||
# Check the requirements and select the appropriate backend (CPU or GPU)
|
||||
check_requirements('onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime')
|
||||
|
||||
# Create an instance of the Yolov8 class with the specified arguments
|
||||
detection = Yolov8(args.model, args.img, args.conf_thres, args.iou_thres)
|
||||
# Create an instance of the YOLOv8 class with the specified arguments
|
||||
detection = YOLOv8(args.model, args.img, args.conf_thres, args.iou_thres)
|
||||
|
||||
# Perform object detection and obtain the output image
|
||||
output_image = detection.main()
|
||||
|
|
|
|||
|
|
@ -7,11 +7,22 @@ from ultralytics.utils import ASSETS, yaml_load
|
|||
from ultralytics.utils.checks import check_yaml
|
||||
|
||||
CLASSES = yaml_load(check_yaml('coco128.yaml'))['names']
|
||||
|
||||
colors = np.random.uniform(0, 255, size=(len(CLASSES), 3))
|
||||
|
||||
|
||||
def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
|
||||
"""
|
||||
Draws bounding boxes on the input image based on the provided arguments.
|
||||
|
||||
Args:
|
||||
img (numpy.ndarray): The input image to draw the bounding box on.
|
||||
class_id (int): Class ID of the detected object.
|
||||
confidence (float): Confidence score of the detected object.
|
||||
x (int): X-coordinate of the top-left corner of the bounding box.
|
||||
y (int): Y-coordinate of the top-left corner of the bounding box.
|
||||
x_plus_w (int): X-coordinate of the bottom-right corner of the bounding box.
|
||||
y_plus_h (int): Y-coordinate of the bottom-right corner of the bounding box.
|
||||
"""
|
||||
label = f'{CLASSES[class_id]} ({confidence:.2f})'
|
||||
color = colors[class_id]
|
||||
cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2)
|
||||
|
|
@ -19,18 +30,39 @@ def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h):
|
|||
|
||||
|
||||
def main(onnx_model, input_image):
|
||||
"""
|
||||
Main function to load ONNX model, perform inference, draw bounding boxes, and display the output image.
|
||||
|
||||
Args:
|
||||
onnx_model (str): Path to the ONNX model.
|
||||
input_image (str): Path to the input image.
|
||||
|
||||
Returns:
|
||||
list: List of dictionaries containing detection information such as class_id, class_name, confidence, etc.
|
||||
"""
|
||||
# Load the ONNX model
|
||||
model: cv2.dnn.Net = cv2.dnn.readNetFromONNX(onnx_model)
|
||||
|
||||
# Read the input image
|
||||
original_image: np.ndarray = cv2.imread(input_image)
|
||||
[height, width, _] = original_image.shape
|
||||
|
||||
# Prepare a square image for inference
|
||||
length = max((height, width))
|
||||
image = np.zeros((length, length, 3), np.uint8)
|
||||
image[0:height, 0:width] = original_image
|
||||
|
||||
# Calculate scale factor
|
||||
scale = length / 640
|
||||
|
||||
# Preprocess the image and prepare blob for model
|
||||
blob = cv2.dnn.blobFromImage(image, scalefactor=1 / 255, size=(640, 640), swapRB=True)
|
||||
model.setInput(blob)
|
||||
|
||||
# Perform inference
|
||||
outputs = model.forward()
|
||||
|
||||
# Prepare output array
|
||||
outputs = np.array([cv2.transpose(outputs[0])])
|
||||
rows = outputs.shape[1]
|
||||
|
||||
|
|
@ -38,6 +70,7 @@ def main(onnx_model, input_image):
|
|||
scores = []
|
||||
class_ids = []
|
||||
|
||||
# Iterate through output to collect bounding boxes, confidence scores, and class IDs
|
||||
for i in range(rows):
|
||||
classes_scores = outputs[0][i][4:]
|
||||
(minScore, maxScore, minClassLoc, (x, maxClassIndex)) = cv2.minMaxLoc(classes_scores)
|
||||
|
|
@ -49,9 +82,12 @@ def main(onnx_model, input_image):
|
|||
scores.append(maxScore)
|
||||
class_ids.append(maxClassIndex)
|
||||
|
||||
# Apply NMS (Non-maximum suppression)
|
||||
result_boxes = cv2.dnn.NMSBoxes(boxes, scores, 0.25, 0.45, 0.5)
|
||||
|
||||
detections = []
|
||||
|
||||
# Iterate through NMS results to draw bounding boxes and labels
|
||||
for i in range(len(result_boxes)):
|
||||
index = result_boxes[i]
|
||||
box = boxes[index]
|
||||
|
|
@ -65,6 +101,7 @@ def main(onnx_model, input_image):
|
|||
draw_bounding_box(original_image, class_ids[index], scores[index], round(box[0] * scale), round(box[1] * scale),
|
||||
round((box[0] + box[2]) * scale), round((box[1] + box[3]) * scale))
|
||||
|
||||
# Display the image with bounding boxes
|
||||
cv2.imshow('image', original_image)
|
||||
cv2.waitKey(0)
|
||||
cv2.destroyAllWindows()
|
||||
|
|
@ -74,7 +111,7 @@ def main(onnx_model, input_image):
|
|||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--model', default='yolov8n.onnx', help='Input your onnx model.')
|
||||
parser.add_argument('--model', default='yolov8n.onnx', help='Input your ONNX model.')
|
||||
parser.add_argument('--img', default=str(ASSETS / 'bus.jpg'), help='Path to input image.')
|
||||
args = parser.parse_args()
|
||||
main(args.model, args.img)
|
||||
|
|
|
|||
|
|
@ -33,10 +33,6 @@ counting_regions = [
|
|||
}, ]
|
||||
|
||||
|
||||
def is_inside_polygon(point, polygon):
|
||||
return polygon.contains(Point(point))
|
||||
|
||||
|
||||
def mouse_callback(event, x, y, flags, param):
|
||||
"""Mouse call back event."""
|
||||
global current_region
|
||||
|
|
@ -44,7 +40,7 @@ def mouse_callback(event, x, y, flags, param):
|
|||
# Mouse left button down event
|
||||
if event == cv2.EVENT_LBUTTONDOWN:
|
||||
for region in counting_regions:
|
||||
if is_inside_polygon((x, y), region['polygon']):
|
||||
if region['polygon'].contains(Point((x, y))):
|
||||
current_region = region
|
||||
current_region['dragging'] = True
|
||||
current_region['offset_x'] = x
|
||||
|
|
@ -150,7 +146,7 @@ def run(
|
|||
|
||||
# Check if detection inside region
|
||||
for region in counting_regions:
|
||||
if is_inside_polygon((x, y), region['polygon']):
|
||||
if region['polygon'].contains(Point((x, y))):
|
||||
region['counts'] += 1
|
||||
|
||||
# Draw regions (Polygons/Rectangles)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue