ultralytics 8.0.197 save P, R, F1 curves to metrics (#5354)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: erminkev1 <83356055+erminkev1@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Andy <39454881+yermandy@users.noreply.github.com>
This commit is contained in:
parent
7fd5dcbd86
commit
12e3eef844
33 changed files with 337 additions and 195 deletions
|
|
@ -38,7 +38,7 @@ def on_pretrain_routine_end(trainer):
|
|||
if not active_run:
|
||||
active_run = mlflow.start_run(experiment_id=experiment.experiment_id, run_name=run_name)
|
||||
LOGGER.info(f'{prefix}Using run_id({active_run.info.run_id}) at {mlflow_location}')
|
||||
run.log_params(vars(trainer.model.args))
|
||||
run.log_params(trainer.args)
|
||||
except Exception as err:
|
||||
LOGGER.error(f'{prefix}Failing init - {repr(err)}')
|
||||
LOGGER.warning(f'{prefix}Continuing without Mlflow')
|
||||
|
|
|
|||
|
|
@ -8,7 +8,10 @@ try:
|
|||
assert SETTINGS['wandb'] is True # verify integration is enabled
|
||||
import wandb as wb
|
||||
|
||||
assert hasattr(wb, '__version__')
|
||||
assert hasattr(wb, '__version__') # verify package is not directory
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
|
||||
_processed_plots = {}
|
||||
|
||||
|
|
@ -16,6 +19,83 @@ except (ImportError, AssertionError):
|
|||
wb = None
|
||||
|
||||
|
||||
def _custom_table(x, y, classes, title='Precision Recall Curve', x_axis_title='Recall', y_axis_title='Precision'):
|
||||
"""
|
||||
Create and log a custom metric visualization to wandb.plot.pr_curve.
|
||||
|
||||
This function crafts a custom metric visualization that mimics the behavior of wandb's default precision-recall curve
|
||||
while allowing for enhanced customization. The visual metric is useful for monitoring model performance across different classes.
|
||||
|
||||
Args:
|
||||
x (List): Values for the x-axis; expected to have length N.
|
||||
y (List): Corresponding values for the y-axis; also expected to have length N.
|
||||
classes (List): Labels identifying the class of each point; length N.
|
||||
title (str, optional): Title for the plot; defaults to 'Precision Recall Curve'.
|
||||
x_title (str, optional): Label for the x-axis; defaults to 'Recall'.
|
||||
y_title (str, optional): Label for the y-axis; defaults to 'Precision'.
|
||||
|
||||
Returns:
|
||||
(wandb.Object): A wandb object suitable for logging, showcasing the crafted metric visualization.
|
||||
"""
|
||||
df = pd.DataFrame({'class': classes, 'y': y, 'x': x}).round(3)
|
||||
fields = {'x': 'x', 'y': 'y', 'class': 'class'}
|
||||
string_fields = {'title': title, 'x-axis-title': x_axis_title, 'y-axis-title': y_axis_title}
|
||||
return wb.plot_table('wandb/area-under-curve/v0',
|
||||
wb.Table(dataframe=df),
|
||||
fields=fields,
|
||||
string_fields=string_fields)
|
||||
|
||||
|
||||
def _plot_curve(x,
|
||||
y,
|
||||
names=None,
|
||||
id='precision-recall',
|
||||
title='Precision Recall Curve',
|
||||
x_title='Recall',
|
||||
y_title='Precision',
|
||||
num_x=100,
|
||||
only_mean=False):
|
||||
"""
|
||||
Log a metric curve visualization.
|
||||
|
||||
This function generates a metric curve based on input data and logs the visualization to wandb.
|
||||
The curve can represent aggregated data (mean) or individual class data, depending on the 'only_mean' flag.
|
||||
|
||||
Args:
|
||||
x (np.ndarray): Data points for the x-axis with length N.
|
||||
y (np.ndarray): Corresponding data points for the y-axis with shape CxN, where C represents the number of classes.
|
||||
names (list, optional): Names of the classes corresponding to the y-axis data; length C. Defaults to an empty list.
|
||||
id (str, optional): Unique identifier for the logged data in wandb. Defaults to 'precision-recall'.
|
||||
title (str, optional): Title for the visualization plot. Defaults to 'Precision Recall Curve'.
|
||||
x_title (str, optional): Label for the x-axis. Defaults to 'Recall'.
|
||||
y_title (str, optional): Label for the y-axis. Defaults to 'Precision'.
|
||||
num_x (int, optional): Number of interpolated data points for visualization. Defaults to 100.
|
||||
only_mean (bool, optional): Flag to indicate if only the mean curve should be plotted. Defaults to True.
|
||||
|
||||
Note:
|
||||
The function leverages the '_custom_table' function to generate the actual visualization.
|
||||
"""
|
||||
# Create new x
|
||||
if names is None:
|
||||
names = []
|
||||
x_new = np.linspace(x[0], x[-1], num_x).round(5)
|
||||
|
||||
# Create arrays for logging
|
||||
x_log = x_new.tolist()
|
||||
y_log = np.interp(x_new, x, np.mean(y, axis=0)).round(3).tolist()
|
||||
|
||||
if only_mean:
|
||||
table = wb.Table(data=list(zip(x_log, y_log)), columns=[x_title, y_title])
|
||||
wb.run.log({title: wb.plot.line(table, x_title, y_title, title=title)})
|
||||
else:
|
||||
classes = ['mean'] * len(x_log)
|
||||
for i, yi in enumerate(y):
|
||||
x_log.extend(x_new) # add new x
|
||||
y_log.extend(np.interp(x_new, x, yi)) # interpolate y to new x
|
||||
classes.extend([names[i]] * len(x_new)) # add class names
|
||||
wb.log({id: _custom_table(x_log, y_log, classes, title, x_title, y_title)}, commit=False)
|
||||
|
||||
|
||||
def _log_plots(plots, step):
|
||||
"""Logs plots from the input dictionary if they haven't been logged already at the specified step."""
|
||||
for name, params in plots.items():
|
||||
|
|
@ -55,6 +135,17 @@ def on_train_end(trainer):
|
|||
if trainer.best.exists():
|
||||
art.add_file(trainer.best)
|
||||
wb.run.log_artifact(art, aliases=['best'])
|
||||
for curve_name, curve_values in zip(trainer.validator.metrics.curves, trainer.validator.metrics.curves_results):
|
||||
x, y, x_title, y_title = curve_values
|
||||
_plot_curve(
|
||||
x,
|
||||
y,
|
||||
names=list(trainer.validator.metrics.names.values()),
|
||||
id=f'curves/{curve_name}',
|
||||
title=curve_name,
|
||||
x_title=x_title,
|
||||
y_title=y_title,
|
||||
)
|
||||
wb.run.finish() # required or run continues on dashboard
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -165,16 +165,16 @@ def check_version(current: str = '0.0.0',
|
|||
|
||||
Example:
|
||||
```python
|
||||
# check if current version is exactly 22.04
|
||||
# Check if current version is exactly 22.04
|
||||
check_version(current='22.04', required='==22.04')
|
||||
|
||||
# check if current version is greater than or equal to 22.04
|
||||
# Check if current version is greater than or equal to 22.04
|
||||
check_version(current='22.10', required='22.04') # assumes '>=' inequality if none passed
|
||||
|
||||
# check if current version is less than or equal to 22.04
|
||||
# Check if current version is less than or equal to 22.04
|
||||
check_version(current='22.04', required='<=22.04')
|
||||
|
||||
# check if current version is between 20.04 (inclusive) and 22.04 (exclusive)
|
||||
# Check if current version is between 20.04 (inclusive) and 22.04 (exclusive)
|
||||
check_version(current='21.10', required='>20.04,<22.04')
|
||||
```
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ def spaces_in_path(path):
|
|||
with ultralytics.utils.files import spaces_in_path
|
||||
|
||||
with spaces_in_path('/path/with spaces') as new_path:
|
||||
# your code here
|
||||
# Your code here
|
||||
```
|
||||
"""
|
||||
|
||||
|
|
|
|||
|
|
@ -219,7 +219,7 @@ class Instances:
|
|||
self.normalized = normalized
|
||||
|
||||
if len(segments) > 0:
|
||||
# list[np.array(1000, 2)] * num_samples
|
||||
# List[np.array(1000, 2)] * num_samples
|
||||
segments = resample_segments(segments)
|
||||
# (N, 1000, 2)
|
||||
segments = np.stack(segments, axis=0)
|
||||
|
|
|
|||
|
|
@ -176,13 +176,13 @@ class v8DetectionLoss:
|
|||
imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w)
|
||||
anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5)
|
||||
|
||||
# targets
|
||||
# Targets
|
||||
targets = torch.cat((batch['batch_idx'].view(-1, 1), batch['cls'].view(-1, 1), batch['bboxes']), 1)
|
||||
targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
||||
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
||||
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
|
||||
|
||||
# pboxes
|
||||
# Pboxes
|
||||
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
|
||||
|
||||
_, target_bboxes, target_scores, fg_mask, _ = self.assigner(
|
||||
|
|
@ -191,11 +191,11 @@ class v8DetectionLoss:
|
|||
|
||||
target_scores_sum = max(target_scores.sum(), 1)
|
||||
|
||||
# cls loss
|
||||
# Cls loss
|
||||
# loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way
|
||||
loss[1] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE
|
||||
|
||||
# bbox loss
|
||||
# Bbox loss
|
||||
if fg_mask.sum():
|
||||
target_bboxes /= stride_tensor
|
||||
loss[0], loss[2] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores,
|
||||
|
|
@ -224,7 +224,7 @@ class v8SegmentationLoss(v8DetectionLoss):
|
|||
pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split(
|
||||
(self.reg_max * 4, self.nc), 1)
|
||||
|
||||
# b, grids, ..
|
||||
# B, grids, ..
|
||||
pred_scores = pred_scores.permute(0, 2, 1).contiguous()
|
||||
pred_distri = pred_distri.permute(0, 2, 1).contiguous()
|
||||
pred_masks = pred_masks.permute(0, 2, 1).contiguous()
|
||||
|
|
@ -233,7 +233,7 @@ class v8SegmentationLoss(v8DetectionLoss):
|
|||
imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w)
|
||||
anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5)
|
||||
|
||||
# targets
|
||||
# Targets
|
||||
try:
|
||||
batch_idx = batch['batch_idx'].view(-1, 1)
|
||||
targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1)
|
||||
|
|
@ -247,7 +247,7 @@ class v8SegmentationLoss(v8DetectionLoss):
|
|||
"correctly formatted 'segment' dataset using 'data=coco128-seg.yaml' "
|
||||
'as an example.\nSee https://docs.ultralytics.com/tasks/segment/ for help.') from e
|
||||
|
||||
# pboxes
|
||||
# Pboxes
|
||||
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
|
||||
|
||||
_, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner(
|
||||
|
|
@ -256,15 +256,15 @@ class v8SegmentationLoss(v8DetectionLoss):
|
|||
|
||||
target_scores_sum = max(target_scores.sum(), 1)
|
||||
|
||||
# cls loss
|
||||
# Cls loss
|
||||
# loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way
|
||||
loss[2] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE
|
||||
|
||||
if fg_mask.sum():
|
||||
# bbox loss
|
||||
# Bbox loss
|
||||
loss[0], loss[3] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes / stride_tensor,
|
||||
target_scores, target_scores_sum, fg_mask)
|
||||
# masks loss
|
||||
# Masks loss
|
||||
masks = batch['masks'].to(self.device).float()
|
||||
if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample
|
||||
masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0]
|
||||
|
|
@ -344,13 +344,13 @@ class v8SegmentationLoss(v8DetectionLoss):
|
|||
_, _, mask_h, mask_w = proto.shape
|
||||
loss = 0
|
||||
|
||||
# normalize to 0-1
|
||||
# Normalize to 0-1
|
||||
target_bboxes_normalized = target_bboxes / imgsz[[1, 0, 1, 0]]
|
||||
|
||||
# areas of target bboxes
|
||||
# Areas of target bboxes
|
||||
marea = xyxy2xywh(target_bboxes_normalized)[..., 2:].prod(2)
|
||||
|
||||
# normalize to mask size
|
||||
# Normalize to mask size
|
||||
mxyxy = target_bboxes_normalized * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=proto.device)
|
||||
|
||||
for i, single_i in enumerate(zip(fg_mask, target_gt_idx, pred_masks, proto, mxyxy, marea, masks)):
|
||||
|
|
@ -393,7 +393,7 @@ class v8PoseLoss(v8DetectionLoss):
|
|||
pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split(
|
||||
(self.reg_max * 4, self.nc), 1)
|
||||
|
||||
# b, grids, ..
|
||||
# B, grids, ..
|
||||
pred_scores = pred_scores.permute(0, 2, 1).contiguous()
|
||||
pred_distri = pred_distri.permute(0, 2, 1).contiguous()
|
||||
pred_kpts = pred_kpts.permute(0, 2, 1).contiguous()
|
||||
|
|
@ -402,7 +402,7 @@ class v8PoseLoss(v8DetectionLoss):
|
|||
imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w)
|
||||
anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5)
|
||||
|
||||
# targets
|
||||
# Targets
|
||||
batch_size = pred_scores.shape[0]
|
||||
batch_idx = batch['batch_idx'].view(-1, 1)
|
||||
targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes']), 1)
|
||||
|
|
@ -410,7 +410,7 @@ class v8PoseLoss(v8DetectionLoss):
|
|||
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
||||
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0)
|
||||
|
||||
# pboxes
|
||||
# Pboxes
|
||||
pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4)
|
||||
pred_kpts = self.kpts_decode(anchor_points, pred_kpts.view(batch_size, -1, *self.kpt_shape)) # (b, h*w, 17, 3)
|
||||
|
||||
|
|
@ -420,11 +420,11 @@ class v8PoseLoss(v8DetectionLoss):
|
|||
|
||||
target_scores_sum = max(target_scores.sum(), 1)
|
||||
|
||||
# cls loss
|
||||
# Cls loss
|
||||
# loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way
|
||||
loss[3] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE
|
||||
|
||||
# bbox loss
|
||||
# Bbox loss
|
||||
if fg_mask.sum():
|
||||
target_bboxes /= stride_tensor
|
||||
loss[0], loss[4] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores,
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ def bbox_ioa(box1, box2, iou=False, eps=1e-7):
|
|||
inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * \
|
||||
(np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0)
|
||||
|
||||
# box2 area
|
||||
# Box2 area
|
||||
area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1)
|
||||
if iou:
|
||||
box1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1)
|
||||
|
|
@ -440,13 +440,18 @@ def ap_per_class(tp,
|
|||
|
||||
Returns:
|
||||
(tuple): A tuple of six arrays and one array of unique classes, where:
|
||||
tp (np.ndarray): True positive counts for each class.
|
||||
fp (np.ndarray): False positive counts for each class.
|
||||
p (np.ndarray): Precision values at each confidence threshold.
|
||||
r (np.ndarray): Recall values at each confidence threshold.
|
||||
f1 (np.ndarray): F1-score values at each confidence threshold.
|
||||
ap (np.ndarray): Average precision for each class at different IoU thresholds.
|
||||
unique_classes (np.ndarray): An array of unique classes that have data.
|
||||
tp (np.ndarray): True positive counts at threshold given by max F1 metric for each class.Shape: (nc,).
|
||||
fp (np.ndarray): False positive counts at threshold given by max F1 metric for each class. Shape: (nc,).
|
||||
p (np.ndarray): Precision values at threshold given by max F1 metric for each class. Shape: (nc,).
|
||||
r (np.ndarray): Recall values at threshold given by max F1 metric for each class. Shape: (nc,).
|
||||
f1 (np.ndarray): F1-score values at threshold given by max F1 metric for each class. Shape: (nc,).
|
||||
ap (np.ndarray): Average precision for each class at different IoU thresholds. Shape: (nc, 10).
|
||||
unique_classes (np.ndarray): An array of unique classes that have data. Shape: (nc,).
|
||||
p_curve (np.ndarray): Precision curves for each class. Shape: (nc, 1000).
|
||||
r_curve (np.ndarray): Recall curves for each class. Shape: (nc, 1000).
|
||||
f1_curve (np.ndarray): F1-score curves for each class. Shape: (nc, 1000).
|
||||
x (np.ndarray): X-axis values for the curves. Shape: (1000,).
|
||||
prec_values: Precision values at mAP@0.5 for each class. Shape: (nc, 1000).
|
||||
"""
|
||||
|
||||
# Sort by objectness
|
||||
|
|
@ -458,8 +463,10 @@ def ap_per_class(tp,
|
|||
nc = unique_classes.shape[0] # number of classes, number of detections
|
||||
|
||||
# Create Precision-Recall curve and compute AP for each class
|
||||
px, py = np.linspace(0, 1, 1000), [] # for plotting
|
||||
ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
|
||||
x, prec_values = np.linspace(0, 1, 1000), []
|
||||
|
||||
# Average precision, precision and recall curves
|
||||
ap, p_curve, r_curve = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000))
|
||||
for ci, c in enumerate(unique_classes):
|
||||
i = pred_cls == c
|
||||
n_l = nt[ci] # number of labels
|
||||
|
|
@ -473,33 +480,35 @@ def ap_per_class(tp,
|
|||
|
||||
# Recall
|
||||
recall = tpc / (n_l + eps) # recall curve
|
||||
r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
|
||||
r_curve[ci] = np.interp(-x, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases
|
||||
|
||||
# Precision
|
||||
precision = tpc / (tpc + fpc) # precision curve
|
||||
p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score
|
||||
p_curve[ci] = np.interp(-x, -conf[i], precision[:, 0], left=1) # p at pr_score
|
||||
|
||||
# AP from recall-precision curve
|
||||
for j in range(tp.shape[1]):
|
||||
ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
|
||||
if plot and j == 0:
|
||||
py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5
|
||||
prec_values.append(np.interp(x, mrec, mpre)) # precision at mAP@0.5
|
||||
|
||||
prec_values = np.array(prec_values) # (nc, 1000)
|
||||
|
||||
# Compute F1 (harmonic mean of precision and recall)
|
||||
f1 = 2 * p * r / (p + r + eps)
|
||||
f1_curve = 2 * p_curve * r_curve / (p_curve + r_curve + eps)
|
||||
names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data
|
||||
names = dict(enumerate(names)) # to dict
|
||||
if plot:
|
||||
plot_pr_curve(px, py, ap, save_dir / f'{prefix}PR_curve.png', names, on_plot=on_plot)
|
||||
plot_mc_curve(px, f1, save_dir / f'{prefix}F1_curve.png', names, ylabel='F1', on_plot=on_plot)
|
||||
plot_mc_curve(px, p, save_dir / f'{prefix}P_curve.png', names, ylabel='Precision', on_plot=on_plot)
|
||||
plot_mc_curve(px, r, save_dir / f'{prefix}R_curve.png', names, ylabel='Recall', on_plot=on_plot)
|
||||
plot_pr_curve(x, prec_values, ap, save_dir / f'{prefix}PR_curve.png', names, on_plot=on_plot)
|
||||
plot_mc_curve(x, f1_curve, save_dir / f'{prefix}F1_curve.png', names, ylabel='F1', on_plot=on_plot)
|
||||
plot_mc_curve(x, p_curve, save_dir / f'{prefix}P_curve.png', names, ylabel='Precision', on_plot=on_plot)
|
||||
plot_mc_curve(x, r_curve, save_dir / f'{prefix}R_curve.png', names, ylabel='Recall', on_plot=on_plot)
|
||||
|
||||
i = smooth(f1.mean(0), 0.1).argmax() # max F1 index
|
||||
p, r, f1 = p[:, i], r[:, i], f1[:, i]
|
||||
i = smooth(f1_curve.mean(0), 0.1).argmax() # max F1 index
|
||||
p, r, f1 = p_curve[:, i], r_curve[:, i], f1_curve[:, i] # max-F1 precision, recall, F1 values
|
||||
tp = (r * nt).round() # true positives
|
||||
fp = (tp / (p + eps) - tp).round() # false positives
|
||||
return tp, fp, p, r, f1, ap, unique_classes.astype(int)
|
||||
return tp, fp, p, r, f1, ap, unique_classes.astype(int), p_curve, r_curve, f1_curve, x, prec_values
|
||||
|
||||
|
||||
class Metric(SimpleClass):
|
||||
|
|
@ -645,7 +654,19 @@ class Metric(SimpleClass):
|
|||
Updates the class attributes `self.p`, `self.r`, `self.f1`, `self.all_ap`, and `self.ap_class_index` based
|
||||
on the values provided in the `results` tuple.
|
||||
"""
|
||||
self.p, self.r, self.f1, self.all_ap, self.ap_class_index = results
|
||||
(self.p, self.r, self.f1, self.all_ap, self.ap_class_index, self.p_curve, self.r_curve, self.f1_curve, self.px,
|
||||
self.prec_values) = results
|
||||
|
||||
@property
|
||||
def curves(self):
|
||||
"""Returns a list of curves for accessing specific metrics curves."""
|
||||
return []
|
||||
|
||||
@property
|
||||
def curves_results(self):
|
||||
"""Returns a list of curves for accessing specific metrics curves."""
|
||||
return [[self.px, self.prec_values, 'Recall', 'Precision'], [self.px, self.f1_curve, 'Confidence', 'F1'],
|
||||
[self.px, self.p_curve, 'Confidence', 'Precision'], [self.px, self.r_curve, 'Confidence', 'Recall']]
|
||||
|
||||
|
||||
class DetMetrics(SimpleClass):
|
||||
|
|
@ -676,6 +697,8 @@ class DetMetrics(SimpleClass):
|
|||
fitness: Computes the fitness score based on the computed detection metrics.
|
||||
ap_class_index: Returns a list of class indices sorted by their average precision (AP) values.
|
||||
results_dict: Returns a dictionary that maps detection metric keys to their computed values.
|
||||
curves: TODO
|
||||
curves_results: TODO
|
||||
"""
|
||||
|
||||
def __init__(self, save_dir=Path('.'), plot=False, on_plot=None, names=()) -> None:
|
||||
|
|
@ -686,6 +709,7 @@ class DetMetrics(SimpleClass):
|
|||
self.names = names
|
||||
self.box = Metric()
|
||||
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0}
|
||||
self.task = 'detect'
|
||||
|
||||
def process(self, tp, conf, pred_cls, target_cls):
|
||||
"""Process predicted results for object detection and update metrics."""
|
||||
|
|
@ -733,6 +757,16 @@ class DetMetrics(SimpleClass):
|
|||
"""Returns dictionary of computed performance metrics and statistics."""
|
||||
return dict(zip(self.keys + ['fitness'], self.mean_results() + [self.fitness]))
|
||||
|
||||
@property
|
||||
def curves(self):
|
||||
"""Returns a list of curves for accessing specific metrics curves."""
|
||||
return ['Precision-Recall(B)', 'F1-Confidence(B)', 'Precision-Confidence(B)', 'Recall-Confidence(B)']
|
||||
|
||||
@property
|
||||
def curves_results(self):
|
||||
"""Returns dictionary of computed performance metrics and statistics."""
|
||||
return self.box.curves_results
|
||||
|
||||
|
||||
class SegmentMetrics(SimpleClass):
|
||||
"""
|
||||
|
|
@ -772,6 +806,7 @@ class SegmentMetrics(SimpleClass):
|
|||
self.box = Metric()
|
||||
self.seg = Metric()
|
||||
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0}
|
||||
self.task = 'segment'
|
||||
|
||||
def process(self, tp_b, tp_m, conf, pred_cls, target_cls):
|
||||
"""
|
||||
|
|
@ -843,6 +878,18 @@ class SegmentMetrics(SimpleClass):
|
|||
"""Returns results of object detection model for evaluation."""
|
||||
return dict(zip(self.keys + ['fitness'], self.mean_results() + [self.fitness]))
|
||||
|
||||
@property
|
||||
def curves(self):
|
||||
"""Returns a list of curves for accessing specific metrics curves."""
|
||||
return [
|
||||
'Precision-Recall(B)', 'F1-Confidence(B)', 'Precision-Confidence(B)', 'Recall-Confidence(B)',
|
||||
'Precision-Recall(M)', 'F1-Confidence(M)', 'Precision-Confidence(M)', 'Recall-Confidence(M)']
|
||||
|
||||
@property
|
||||
def curves_results(self):
|
||||
"""Returns dictionary of computed performance metrics and statistics."""
|
||||
return self.box.curves_results + self.seg.curves_results
|
||||
|
||||
|
||||
class PoseMetrics(SegmentMetrics):
|
||||
"""
|
||||
|
|
@ -883,6 +930,7 @@ class PoseMetrics(SegmentMetrics):
|
|||
self.box = Metric()
|
||||
self.pose = Metric()
|
||||
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0}
|
||||
self.task = 'pose'
|
||||
|
||||
def process(self, tp_b, tp_p, conf, pred_cls, target_cls):
|
||||
"""
|
||||
|
|
@ -944,6 +992,18 @@ class PoseMetrics(SegmentMetrics):
|
|||
"""Computes classification metrics and speed using the `targets` and `pred` inputs."""
|
||||
return self.pose.fitness() + self.box.fitness()
|
||||
|
||||
@property
|
||||
def curves(self):
|
||||
"""Returns a list of curves for accessing specific metrics curves."""
|
||||
return [
|
||||
'Precision-Recall(B)', 'F1-Confidence(B)', 'Precision-Confidence(B)', 'Recall-Confidence(B)',
|
||||
'Precision-Recall(P)', 'F1-Confidence(P)', 'Precision-Confidence(P)', 'Recall-Confidence(P)']
|
||||
|
||||
@property
|
||||
def curves_results(self):
|
||||
"""Returns dictionary of computed performance metrics and statistics."""
|
||||
return self.box.curves_results + self.pose.curves_results
|
||||
|
||||
|
||||
class ClassifyMetrics(SimpleClass):
|
||||
"""
|
||||
|
|
@ -968,6 +1028,7 @@ class ClassifyMetrics(SimpleClass):
|
|||
self.top1 = 0
|
||||
self.top5 = 0
|
||||
self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0}
|
||||
self.task = 'classify'
|
||||
|
||||
def process(self, targets, pred):
|
||||
"""Target classes and predicted classes."""
|
||||
|
|
@ -990,3 +1051,13 @@ class ClassifyMetrics(SimpleClass):
|
|||
def keys(self):
|
||||
"""Returns a list of keys for the results_dict property."""
|
||||
return ['metrics/accuracy_top1', 'metrics/accuracy_top5']
|
||||
|
||||
@property
|
||||
def curves(self):
|
||||
"""Returns a list of curves for accessing specific metrics curves."""
|
||||
return []
|
||||
|
||||
@property
|
||||
def curves_results(self):
|
||||
"""Returns a list of curves for accessing specific metrics curves."""
|
||||
return []
|
||||
|
|
|
|||
|
|
@ -193,7 +193,7 @@ class TaskAlignedAssigner(nn.Module):
|
|||
# Expand topk_idxs for each value of k and add 1 at the specified positions
|
||||
count_tensor.scatter_add_(-1, topk_idxs[:, :, k:k + 1], ones)
|
||||
# count_tensor.scatter_add_(-1, topk_idxs, torch.ones_like(topk_idxs, dtype=torch.int8, device=topk_idxs.device))
|
||||
# filter invalid bboxes
|
||||
# Filter invalid bboxes
|
||||
count_tensor.masked_fill_(count_tensor > 1, 0)
|
||||
|
||||
return count_tensor.to(metrics.dtype)
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ def run_ray_tune(model,
|
|||
try:
|
||||
subprocess.run('pip install ray[tune]'.split(), check=True)
|
||||
|
||||
import ray
|
||||
from ray import tune
|
||||
from ray.air import RunConfig
|
||||
from ray.air.integrations.wandb import WandbLoggerCallback
|
||||
|
|
@ -83,6 +84,10 @@ def run_ray_tune(model,
|
|||
'mixup': tune.uniform(0.0, 1.0), # image mixup (probability)
|
||||
'copy_paste': tune.uniform(0.0, 1.0)} # segment copy-paste (probability)
|
||||
|
||||
# Put the model in ray store
|
||||
task = model.task
|
||||
model_in_store = ray.put(model)
|
||||
|
||||
def _tune(config):
|
||||
"""
|
||||
Trains the YOLO model with the specified hyperparameters and additional arguments.
|
||||
|
|
@ -93,9 +98,10 @@ def run_ray_tune(model,
|
|||
Returns:
|
||||
None.
|
||||
"""
|
||||
model.reset_callbacks()
|
||||
model_to_train = ray.get(model_in_store) # get the model from ray store for tuning
|
||||
model_to_train.reset_callbacks()
|
||||
config.update(train_args)
|
||||
results = model.train(**config)
|
||||
results = model_to_train.train(**config)
|
||||
return results.results_dict
|
||||
|
||||
# Get search space
|
||||
|
|
@ -104,7 +110,7 @@ def run_ray_tune(model,
|
|||
LOGGER.warning('WARNING ⚠️ search space not provided, using default search space.')
|
||||
|
||||
# Get dataset
|
||||
data = train_args.get('data', TASK2DATA[model.task])
|
||||
data = train_args.get('data', TASK2DATA[task])
|
||||
space['data'] = data
|
||||
if 'data' not in train_args:
|
||||
LOGGER.warning(f'WARNING ⚠️ data not provided, using default "data={data}".')
|
||||
|
|
@ -114,7 +120,7 @@ def run_ray_tune(model,
|
|||
|
||||
# Define the ASHA scheduler for hyperparameter search
|
||||
asha_scheduler = ASHAScheduler(time_attr='epoch',
|
||||
metric=TASK2METRIC[model.task],
|
||||
metric=TASK2METRIC[task],
|
||||
mode='max',
|
||||
max_t=train_args.get('epochs') or DEFAULT_CFG_DICT['epochs'] or 100,
|
||||
grace_period=grace_period,
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue