ultralytics 8.0.239 Ultralytics Actions and hub-sdk adoption (#7431)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: Burhan <62214284+Burhan-Q@users.noreply.github.com> Co-authored-by: Kayzwer <68285002+Kayzwer@users.noreply.github.com>
This commit is contained in:
parent
e795277391
commit
fe27db2f6e
139 changed files with 6870 additions and 5125 deletions
|
|
@ -4,4 +4,4 @@ from .model import RTDETR
|
|||
from .predict import RTDETRPredictor
|
||||
from .val import RTDETRValidator
|
||||
|
||||
__all__ = 'RTDETRPredictor', 'RTDETRValidator', 'RTDETR'
|
||||
__all__ = "RTDETRPredictor", "RTDETRValidator", "RTDETR"
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ class RTDETR(Model):
|
|||
model (str): Path to the pre-trained model. Defaults to 'rtdetr-l.pt'.
|
||||
"""
|
||||
|
||||
def __init__(self, model='rtdetr-l.pt') -> None:
|
||||
def __init__(self, model="rtdetr-l.pt") -> None:
|
||||
"""
|
||||
Initializes the RT-DETR model with the given pre-trained model file. Supports .pt and .yaml formats.
|
||||
|
||||
|
|
@ -34,9 +34,9 @@ class RTDETR(Model):
|
|||
Raises:
|
||||
NotImplementedError: If the model file extension is not 'pt', 'yaml', or 'yml'.
|
||||
"""
|
||||
if model and model.split('.')[-1] not in ('pt', 'yaml', 'yml'):
|
||||
raise NotImplementedError('RT-DETR only supports creating from *.pt, *.yaml, or *.yml files.')
|
||||
super().__init__(model=model, task='detect')
|
||||
if model and model.split(".")[-1] not in ("pt", "yaml", "yml"):
|
||||
raise NotImplementedError("RT-DETR only supports creating from *.pt, *.yaml, or *.yml files.")
|
||||
super().__init__(model=model, task="detect")
|
||||
|
||||
@property
|
||||
def task_map(self) -> dict:
|
||||
|
|
@ -47,8 +47,10 @@ class RTDETR(Model):
|
|||
dict: A dictionary mapping task names to Ultralytics task classes for the RT-DETR model.
|
||||
"""
|
||||
return {
|
||||
'detect': {
|
||||
'predictor': RTDETRPredictor,
|
||||
'validator': RTDETRValidator,
|
||||
'trainer': RTDETRTrainer,
|
||||
'model': RTDETRDetectionModel}}
|
||||
"detect": {
|
||||
"predictor": RTDETRPredictor,
|
||||
"validator": RTDETRValidator,
|
||||
"trainer": RTDETRTrainer,
|
||||
"model": RTDETRDetectionModel,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -43,12 +43,12 @@ class RTDETRTrainer(DetectionTrainer):
|
|||
Returns:
|
||||
(RTDETRDetectionModel): Initialized model.
|
||||
"""
|
||||
model = RTDETRDetectionModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1)
|
||||
model = RTDETRDetectionModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1)
|
||||
if weights:
|
||||
model.load(weights)
|
||||
return model
|
||||
|
||||
def build_dataset(self, img_path, mode='val', batch=None):
|
||||
def build_dataset(self, img_path, mode="val", batch=None):
|
||||
"""
|
||||
Build and return an RT-DETR dataset for training or validation.
|
||||
|
||||
|
|
@ -60,15 +60,17 @@ class RTDETRTrainer(DetectionTrainer):
|
|||
Returns:
|
||||
(RTDETRDataset): Dataset object for the specific mode.
|
||||
"""
|
||||
return RTDETRDataset(img_path=img_path,
|
||||
imgsz=self.args.imgsz,
|
||||
batch_size=batch,
|
||||
augment=mode == 'train',
|
||||
hyp=self.args,
|
||||
rect=False,
|
||||
cache=self.args.cache or None,
|
||||
prefix=colorstr(f'{mode}: '),
|
||||
data=self.data)
|
||||
return RTDETRDataset(
|
||||
img_path=img_path,
|
||||
imgsz=self.args.imgsz,
|
||||
batch_size=batch,
|
||||
augment=mode == "train",
|
||||
hyp=self.args,
|
||||
rect=False,
|
||||
cache=self.args.cache or None,
|
||||
prefix=colorstr(f"{mode}: "),
|
||||
data=self.data,
|
||||
)
|
||||
|
||||
def get_validator(self):
|
||||
"""
|
||||
|
|
@ -77,7 +79,7 @@ class RTDETRTrainer(DetectionTrainer):
|
|||
Returns:
|
||||
(RTDETRValidator): Validator object for model validation.
|
||||
"""
|
||||
self.loss_names = 'giou_loss', 'cls_loss', 'l1_loss'
|
||||
self.loss_names = "giou_loss", "cls_loss", "l1_loss"
|
||||
return RTDETRValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
|
||||
|
||||
def preprocess_batch(self, batch):
|
||||
|
|
@ -91,10 +93,10 @@ class RTDETRTrainer(DetectionTrainer):
|
|||
(dict): Preprocessed batch.
|
||||
"""
|
||||
batch = super().preprocess_batch(batch)
|
||||
bs = len(batch['img'])
|
||||
batch_idx = batch['batch_idx']
|
||||
bs = len(batch["img"])
|
||||
batch_idx = batch["batch_idx"]
|
||||
gt_bbox, gt_class = [], []
|
||||
for i in range(bs):
|
||||
gt_bbox.append(batch['bboxes'][batch_idx == i].to(batch_idx.device))
|
||||
gt_class.append(batch['cls'][batch_idx == i].to(device=batch_idx.device, dtype=torch.long))
|
||||
gt_bbox.append(batch["bboxes"][batch_idx == i].to(batch_idx.device))
|
||||
gt_class.append(batch["cls"][batch_idx == i].to(device=batch_idx.device, dtype=torch.long))
|
||||
return batch
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from ultralytics.data.augment import Compose, Format, v8_transforms
|
|||
from ultralytics.models.yolo.detect import DetectionValidator
|
||||
from ultralytics.utils import colorstr, ops
|
||||
|
||||
__all__ = 'RTDETRValidator', # tuple or list
|
||||
__all__ = ("RTDETRValidator",) # tuple or list
|
||||
|
||||
|
||||
class RTDETRDataset(YOLODataset):
|
||||
|
|
@ -37,13 +37,16 @@ class RTDETRDataset(YOLODataset):
|
|||
# transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), auto=False, scaleFill=True)])
|
||||
transforms = Compose([])
|
||||
transforms.append(
|
||||
Format(bbox_format='xywh',
|
||||
normalize=True,
|
||||
return_mask=self.use_segments,
|
||||
return_keypoint=self.use_keypoints,
|
||||
batch_idx=True,
|
||||
mask_ratio=hyp.mask_ratio,
|
||||
mask_overlap=hyp.overlap_mask))
|
||||
Format(
|
||||
bbox_format="xywh",
|
||||
normalize=True,
|
||||
return_mask=self.use_segments,
|
||||
return_keypoint=self.use_keypoints,
|
||||
batch_idx=True,
|
||||
mask_ratio=hyp.mask_ratio,
|
||||
mask_overlap=hyp.overlap_mask,
|
||||
)
|
||||
)
|
||||
return transforms
|
||||
|
||||
|
||||
|
|
@ -68,7 +71,7 @@ class RTDETRValidator(DetectionValidator):
|
|||
For further details on the attributes and methods, refer to the parent DetectionValidator class.
|
||||
"""
|
||||
|
||||
def build_dataset(self, img_path, mode='val', batch=None):
|
||||
def build_dataset(self, img_path, mode="val", batch=None):
|
||||
"""
|
||||
Build an RTDETR Dataset.
|
||||
|
||||
|
|
@ -85,8 +88,9 @@ class RTDETRValidator(DetectionValidator):
|
|||
hyp=self.args,
|
||||
rect=False, # no rect
|
||||
cache=self.args.cache or None,
|
||||
prefix=colorstr(f'{mode}: '),
|
||||
data=self.data)
|
||||
prefix=colorstr(f"{mode}: "),
|
||||
data=self.data,
|
||||
)
|
||||
|
||||
def postprocess(self, preds):
|
||||
"""Apply Non-maximum suppression to prediction outputs."""
|
||||
|
|
@ -108,12 +112,12 @@ class RTDETRValidator(DetectionValidator):
|
|||
|
||||
def _prepare_batch(self, si, batch):
|
||||
"""Prepares a batch for training or inference by applying transformations."""
|
||||
idx = batch['batch_idx'] == si
|
||||
cls = batch['cls'][idx].squeeze(-1)
|
||||
bbox = batch['bboxes'][idx]
|
||||
ori_shape = batch['ori_shape'][si]
|
||||
imgsz = batch['img'].shape[2:]
|
||||
ratio_pad = batch['ratio_pad'][si]
|
||||
idx = batch["batch_idx"] == si
|
||||
cls = batch["cls"][idx].squeeze(-1)
|
||||
bbox = batch["bboxes"][idx]
|
||||
ori_shape = batch["ori_shape"][si]
|
||||
imgsz = batch["img"].shape[2:]
|
||||
ratio_pad = batch["ratio_pad"][si]
|
||||
if len(cls):
|
||||
bbox = ops.xywh2xyxy(bbox) # target boxes
|
||||
bbox[..., [0, 2]] *= ori_shape[1] # native-space pred
|
||||
|
|
@ -124,6 +128,6 @@ class RTDETRValidator(DetectionValidator):
|
|||
def _prepare_pred(self, pred, pbatch):
|
||||
"""Prepares and returns a batch with transformed bounding boxes and class labels."""
|
||||
predn = pred.clone()
|
||||
predn[..., [0, 2]] *= pbatch['ori_shape'][1] / self.args.imgsz # native-space pred
|
||||
predn[..., [1, 3]] *= pbatch['ori_shape'][0] / self.args.imgsz # native-space pred
|
||||
predn[..., [0, 2]] *= pbatch["ori_shape"][1] / self.args.imgsz # native-space pred
|
||||
predn[..., [1, 3]] *= pbatch["ori_shape"][0] / self.args.imgsz # native-space pred
|
||||
return predn.float()
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue