ultralytics 8.0.173 fix missing val plots and new Conda Guide (#4783)

This commit is contained in:
Glenn Jocher 2023-09-07 19:24:54 +02:00 committed by GitHub
parent 602022a56e
commit dfe6dfb1d2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 188 additions and 23 deletions

View file

@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
__version__ = '8.0.172'
__version__ = '8.0.173'
from ultralytics.models import RTDETR, SAM, YOLO
from ultralytics.models.fastsam import FastSAM

View file

@ -571,6 +571,7 @@ class BaseTrainer:
strip_optimizer(f) # strip optimizers
if f is self.best:
LOGGER.info(f'\nValidating {f}...')
self.validator.args.plots = self.args.plots
self.metrics = self.validator(model=f)
self.metrics.pop('fitness', None)
self.run_callbacks('on_fit_epoch_end')

View file

@ -102,8 +102,8 @@ class BaseValidator:
@smart_inference_mode()
def __call__(self, trainer=None, model=None):
"""
Supports validation of a pre-trained model if passed or a model being trained
if trainer is passed (trainer gets priority).
Supports validation of a pre-trained model if passed or a model being trained if trainer is passed (trainer
gets priority).
"""
self.training = trainer is not None
augment = self.args.augment and (not self.training)

View file

@ -58,7 +58,7 @@ class ClassificationTrainer(BaseTrainer):
return model
def setup_model(self):
"""load/create/download model for any task"""
"""Load, create or download model for any task."""
if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed
return
@ -131,13 +131,13 @@ class ClassificationTrainer(BaseTrainer):
for f in self.last, self.best:
if f.exists():
strip_optimizer(f) # strip optimizers
# TODO: validate best.pt after training completes
# if f is self.best:
# LOGGER.info(f'\nValidating {f}...')
# self.validator.args.save_json = True
# self.metrics = self.validator(model=f)
# self.metrics.pop('fitness', None)
# self.run_callbacks('on_fit_epoch_end')
if f is self.best:
LOGGER.info(f'\nValidating {f}...')
self.validator.args.data = self.args.data
self.validator.args.plots = self.args.plots
self.metrics = self.validator(model=f)
self.metrics.pop('fitness', None)
self.run_callbacks('on_fit_epoch_end')
LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}")
def plot_training_samples(self, batch, ni):