ultralytics 8.0.34 security updates and fixes (#924)
Co-authored-by: Laughing <61612323+Laughing-q@users.noreply.github.com> Co-authored-by: Fabian Greavu <fabiangreavu@gmail.com> Co-authored-by: Yonghye Kwon <developer.0hye@gmail.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
This commit is contained in:
parent
254adfa652
commit
d32b339373
31 changed files with 134 additions and 120 deletions
|
|
@ -20,12 +20,11 @@ from torch.nn.parallel import DistributedDataParallel as DDP
|
|||
from torch.optim import lr_scheduler
|
||||
from tqdm import tqdm
|
||||
|
||||
from ultralytics import __version__
|
||||
from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights
|
||||
from ultralytics.yolo.cfg import get_cfg
|
||||
from ultralytics.yolo.data.utils import check_cls_dataset, check_det_dataset
|
||||
from ultralytics.yolo.utils import (DEFAULT_CFG, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, callbacks, colorstr, emojis,
|
||||
yaml_save)
|
||||
from ultralytics.yolo.utils import (DEFAULT_CFG, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, __version__, callbacks,
|
||||
colorstr, emojis, yaml_save)
|
||||
from ultralytics.yolo.utils.autobatch import check_train_batch_size
|
||||
from ultralytics.yolo.utils.checks import check_file, check_imgsz, print_args
|
||||
from ultralytics.yolo.utils.dist import ddp_cleanup, generate_ddp_command
|
||||
|
|
@ -51,6 +50,7 @@ class BaseTrainer:
|
|||
wdir (Path): Directory to save weights.
|
||||
last (Path): Path to last checkpoint.
|
||||
best (Path): Path to best checkpoint.
|
||||
save_period (int): Save checkpoint every x epochs (disabled if < 1).
|
||||
batch_size (int): Batch size for training.
|
||||
epochs (int): Number of epochs to train for.
|
||||
start_epoch (int): Starting epoch for training.
|
||||
|
|
@ -101,6 +101,7 @@ class BaseTrainer:
|
|||
self.args.save_dir = str(self.save_dir)
|
||||
yaml_save(self.save_dir / 'args.yaml', vars(self.args)) # save run args
|
||||
self.last, self.best = self.wdir / 'last.pt', self.wdir / 'best.pt' # checkpoint paths
|
||||
self.save_period = self.args.save_period
|
||||
|
||||
self.batch_size = self.args.batch
|
||||
self.epochs = self.args.epochs
|
||||
|
|
@ -392,6 +393,8 @@ class BaseTrainer:
|
|||
torch.save(ckpt, self.last)
|
||||
if self.best_fitness == self.fitness:
|
||||
torch.save(ckpt, self.best)
|
||||
if (self.epoch > 0) and (self.save_period > 0) and (self.epoch % self.save_period == 0):
|
||||
torch.save(ckpt, self.wdir / f'epoch{self.epoch}.pt')
|
||||
del ckpt
|
||||
|
||||
def get_dataset(self, data):
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue