diff --git a/docs/en/reference/utils/torch_utils.md b/docs/en/reference/utils/torch_utils.md
index 4f8f3d1b..ac31ec2c 100644
--- a/docs/en/reference/utils/torch_utils.md
+++ b/docs/en/reference/utils/torch_utils.md
@@ -35,6 +35,10 @@ keywords: Ultralytics, torch utils, model optimization, device selection, infere
+## ::: ultralytics.utils.torch_utils.get_gpu_info
+
+
+
## ::: ultralytics.utils.torch_utils.select_device
diff --git a/ultralytics/__init__.py b/ultralytics/__init__.py
index b5e68098..ce089ca6 100644
--- a/ultralytics/__init__.py
+++ b/ultralytics/__init__.py
@@ -1,6 +1,6 @@
# Ultralytics YOLO 🚀, AGPL-3.0 license
-__version__ = "8.3.5"
+__version__ = "8.3.6"
import os
diff --git a/ultralytics/engine/trainer.py b/ultralytics/engine/trainer.py
index 9fcc6970..aadf63b0 100644
--- a/ultralytics/engine/trainer.py
+++ b/ultralytics/engine/trainer.py
@@ -469,11 +469,11 @@ class BaseTrainer:
if RANK in {-1, 0}:
# Do final val with best.pt
- LOGGER.info(
- f"\n{epoch - self.start_epoch + 1} epochs completed in "
- f"{(time.time() - self.train_time_start) / 3600:.3f} hours."
- )
+ epochs = epoch - self.start_epoch + 1 # total training epochs
+ seconds = time.time() - self.train_time_start # total training seconds
+ LOGGER.info(f"\n{epochs} epochs completed in {seconds / 3600:.3f} hours.")
self.final_eval()
+ self.validator.metrics.training = {"epochs": epochs, "seconds": seconds} # add training speed
if self.args.plots:
self.plot_metrics()
self.run_callbacks("on_train_end")
diff --git a/ultralytics/utils/checks.py b/ultralytics/utils/checks.py
index 85eccf67..2a461b03 100644
--- a/ultralytics/utils/checks.py
+++ b/ultralytics/utils/checks.py
@@ -593,20 +593,29 @@ def collect_system_info():
import psutil
from ultralytics.utils import ENVIRONMENT # scope to avoid circular import
- from ultralytics.utils.torch_utils import get_cpu_info
+ from ultralytics.utils.torch_utils import get_cpu_info, get_gpu_info
- ram_info = psutil.virtual_memory().total / (1024**3) # Convert bytes to GB
+ gib = 1 << 30 # bytes per GiB
+ cuda = torch and torch.cuda.is_available()
check_yolo()
- LOGGER.info(
- f"\n{'OS':<20}{platform.platform()}\n"
- f"{'Environment':<20}{ENVIRONMENT}\n"
- f"{'Python':<20}{PYTHON_VERSION}\n"
- f"{'Install':<20}{'git' if IS_GIT_DIR else 'pip' if IS_PIP_PACKAGE else 'other'}\n"
- f"{'RAM':<20}{ram_info:.2f} GB\n"
- f"{'CPU':<20}{get_cpu_info()}\n"
- f"{'CUDA':<20}{torch.version.cuda if torch and torch.cuda.is_available() else None}\n"
- )
+ total, used, free = shutil.disk_usage("/")
+ info_dict = {
+ "OS": platform.platform(),
+ "Environment": ENVIRONMENT,
+ "Python": PYTHON_VERSION,
+ "Install": "git" if IS_GIT_DIR else "pip" if IS_PIP_PACKAGE else "other",
+ "RAM": f"{psutil.virtual_memory().total / gib:.2f} GB",
+ "Disk": f"{(total - free) / gib:.1f}/{total / gib:.1f} GB",
+ "CPU": get_cpu_info(),
+ "CPU count": os.cpu_count(),
+ "GPU": get_gpu_info(index=0) if cuda else None,
+ "GPU count": torch.cuda.device_count() if cuda else None,
+ "CUDA": torch.version.cuda if cuda else None,
+ }
+ LOGGER.info("\n" + "\n".join(f"{k:<20}{v}" for k, v in info_dict.items()) + "\n")
+
+ package_info = {}
for r in parse_requirements(package="ultralytics"):
try:
current = metadata.version(r.name)
@@ -614,17 +623,24 @@ def collect_system_info():
except metadata.PackageNotFoundError:
current = "(not installed)"
is_met = "❌ "
- LOGGER.info(f"{r.name:<20}{is_met}{current}{r.specifier}")
+ package_info[r.name] = f"{is_met}{current}{r.specifier}"
+ LOGGER.info(f"{r.name:<20}{package_info[r.name]}")
+
+ info_dict["Package Info"] = package_info
if is_github_action_running():
- LOGGER.info(
- f"\nRUNNER_OS: {os.getenv('RUNNER_OS')}\n"
- f"GITHUB_EVENT_NAME: {os.getenv('GITHUB_EVENT_NAME')}\n"
- f"GITHUB_WORKFLOW: {os.getenv('GITHUB_WORKFLOW')}\n"
- f"GITHUB_ACTOR: {os.getenv('GITHUB_ACTOR')}\n"
- f"GITHUB_REPOSITORY: {os.getenv('GITHUB_REPOSITORY')}\n"
- f"GITHUB_REPOSITORY_OWNER: {os.getenv('GITHUB_REPOSITORY_OWNER')}\n"
- )
+ github_info = {
+ "RUNNER_OS": os.getenv("RUNNER_OS"),
+ "GITHUB_EVENT_NAME": os.getenv("GITHUB_EVENT_NAME"),
+ "GITHUB_WORKFLOW": os.getenv("GITHUB_WORKFLOW"),
+ "GITHUB_ACTOR": os.getenv("GITHUB_ACTOR"),
+ "GITHUB_REPOSITORY": os.getenv("GITHUB_REPOSITORY"),
+ "GITHUB_REPOSITORY_OWNER": os.getenv("GITHUB_REPOSITORY_OWNER"),
+ }
+ LOGGER.info("\n" + "\n".join(f"{k}: {v}" for k, v in github_info.items()))
+ info_dict["GitHub Info"] = github_info
+
+ return info_dict
def check_amp(model):
diff --git a/ultralytics/utils/torch_utils.py b/ultralytics/utils/torch_utils.py
index 00176d30..db84ed69 100644
--- a/ultralytics/utils/torch_utils.py
+++ b/ultralytics/utils/torch_utils.py
@@ -123,6 +123,12 @@ def get_cpu_info():
return PERSISTENT_CACHE.get("cpu_info", "unknown")
+def get_gpu_info(index):
+ """Return a string with system GPU information, i.e. 'Tesla T4, 15102MiB'."""
+ properties = torch.cuda.get_device_properties(index)
+ return f"{properties.name}, {properties.total_memory / (1 << 20):.0f}MiB"
+
+
def select_device(device="", batch=0, newline=False, verbose=True):
"""
Selects the appropriate PyTorch device based on the provided arguments.
@@ -208,8 +214,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
)
space = " " * (len(s) + 1)
for i, d in enumerate(devices):
- p = torch.cuda.get_device_properties(i)
- s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB
+ s += f"{'' if i == 0 else space}CUDA:{d} ({get_gpu_info(i)})\n" # bytes to MB
arg = "cuda:0"
elif mps and TORCH_2_0 and torch.backends.mps.is_available():
# Prefer MPS if available