Update YOLOv3 and YOLOv5 YAMLs (#7574)

Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
Glenn Jocher 2024-01-14 20:10:32 +01:00 committed by GitHub
parent 596c068b18
commit d762496989
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
51 changed files with 284 additions and 304 deletions

View file

@ -11,7 +11,7 @@ from ultralytics.utils.tal import TORCH_1_10, dist2bbox, dist2rbox, make_anchors
from .block import DFL, Proto
from .conv import Conv
from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer
from .utils import bias_init_with_prob, linear_init_
from .utils import bias_init_with_prob, linear_init
__all__ = "Detect", "Segment", "Pose", "Classify", "OBB", "RTDETRDecoder"
@ -417,18 +417,18 @@ class RTDETRDecoder(nn.Module):
"""Initializes or resets the parameters of the model's various components with predefined weights and biases."""
# Class and bbox head init
bias_cls = bias_init_with_prob(0.01) / 80 * self.nc
# NOTE: the weight initialization in `linear_init_` would cause NaN when training with custom datasets.
# linear_init_(self.enc_score_head)
# NOTE: the weight initialization in `linear_init` would cause NaN when training with custom datasets.
# linear_init(self.enc_score_head)
constant_(self.enc_score_head.bias, bias_cls)
constant_(self.enc_bbox_head.layers[-1].weight, 0.0)
constant_(self.enc_bbox_head.layers[-1].bias, 0.0)
for cls_, reg_ in zip(self.dec_score_head, self.dec_bbox_head):
# linear_init_(cls_)
# linear_init(cls_)
constant_(cls_.bias, bias_cls)
constant_(reg_.layers[-1].weight, 0.0)
constant_(reg_.layers[-1].bias, 0.0)
linear_init_(self.enc_output[0])
linear_init(self.enc_output[0])
xavier_uniform_(self.enc_output[0].weight)
if self.learnt_init_query:
xavier_uniform_(self.tgt_embed.weight)

View file

@ -23,7 +23,7 @@ def bias_init_with_prob(prior_prob=0.01):
return float(-np.log((1 - prior_prob) / prior_prob)) # return bias_init
def linear_init_(module):
def linear_init(module):
"""Initialize the weights and biases of a linear module."""
bound = 1 / math.sqrt(module.weight.shape[0])
uniform_(module.weight, -bound, bound)