ultralytics 8.0.58 new SimpleClass, fixes and updates (#1636)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com>
This commit is contained in:
parent
ef03e6732a
commit
ec10002a4a
30 changed files with 351 additions and 314 deletions
|
|
@ -8,6 +8,7 @@ import time
|
|||
from contextlib import contextmanager
|
||||
from copy import deepcopy
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
import numpy as np
|
||||
import thop
|
||||
|
|
@ -15,15 +16,10 @@ import torch
|
|||
import torch.distributed as dist
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch.nn.parallel import DistributedDataParallel as DDP
|
||||
|
||||
from ultralytics.yolo.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, __version__
|
||||
from ultralytics.yolo.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, __version__
|
||||
from ultralytics.yolo.utils.checks import check_version
|
||||
|
||||
LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html
|
||||
RANK = int(os.getenv('RANK', -1))
|
||||
WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1))
|
||||
|
||||
TORCH_1_9 = check_version(torch.__version__, '1.9.0')
|
||||
TORCH_1_11 = check_version(torch.__version__, '1.11.0')
|
||||
TORCH_1_12 = check_version(torch.__version__, '1.12.0')
|
||||
|
|
@ -49,17 +45,6 @@ def smart_inference_mode():
|
|||
return decorate
|
||||
|
||||
|
||||
def DDP_model(model):
|
||||
# Model DDP creation with checks
|
||||
assert not check_version(torch.__version__, '1.12.0', pinned=True), \
|
||||
'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \
|
||||
'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395'
|
||||
if TORCH_1_11:
|
||||
return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True)
|
||||
else:
|
||||
return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK)
|
||||
|
||||
|
||||
def select_device(device='', batch=0, newline=False, verbose=True):
|
||||
# device = None or 'cpu' or 0 or '0' or '0,1,2,3'
|
||||
s = f'Ultralytics YOLOv{__version__} 🚀 Python-{platform.python_version()} torch-{torch.__version__} '
|
||||
|
|
@ -141,6 +126,7 @@ def fuse_conv_and_bn(conv, bn):
|
|||
|
||||
|
||||
def fuse_deconv_and_bn(deconv, bn):
|
||||
# Fuse ConvTranspose2d() and BatchNorm2d() layers
|
||||
fuseddconv = nn.ConvTranspose2d(deconv.in_channels,
|
||||
deconv.out_channels,
|
||||
kernel_size=deconv.kernel_size,
|
||||
|
|
@ -186,14 +172,17 @@ def model_info(model, detailed=False, verbose=True, imgsz=640):
|
|||
|
||||
|
||||
def get_num_params(model):
|
||||
# Return the total number of parameters in a YOLO model
|
||||
return sum(x.numel() for x in model.parameters())
|
||||
|
||||
|
||||
def get_num_gradients(model):
|
||||
# Return the total number of parameters with gradients in a YOLO model
|
||||
return sum(x.numel() for x in model.parameters() if x.requires_grad)
|
||||
|
||||
|
||||
def get_flops(model, imgsz=640):
|
||||
# Return a YOLO model's FLOPs
|
||||
try:
|
||||
model = de_parallel(model)
|
||||
p = next(model.parameters())
|
||||
|
|
@ -208,6 +197,7 @@ def get_flops(model, imgsz=640):
|
|||
|
||||
|
||||
def initialize_weights(model):
|
||||
# Initialize model weights to random values
|
||||
for m in model.modules():
|
||||
t = type(m)
|
||||
if t is nn.Conv2d:
|
||||
|
|
@ -239,7 +229,7 @@ def make_divisible(x, divisor):
|
|||
|
||||
|
||||
def copy_attr(a, b, include=(), exclude=()):
|
||||
# Copy attributes from b to a, options to only include [...] and to exclude [...]
|
||||
# Copy attributes from 'b' to 'a', options to only include [...] and to exclude [...]
|
||||
for k, v in b.__dict__.items():
|
||||
if (len(include) and k not in include) or k.startswith('_') or k in exclude:
|
||||
continue
|
||||
|
|
@ -322,7 +312,7 @@ class ModelEMA:
|
|||
copy_attr(self.ema, model, include, exclude)
|
||||
|
||||
|
||||
def strip_optimizer(f='best.pt', s=''):
|
||||
def strip_optimizer(f: Union[str, Path] = 'best.pt', s: str = '') -> None:
|
||||
"""
|
||||
Strip optimizer from 'f' to finalize training, optionally save as 's'.
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue