ultralytics 8.3.6 improve collect_system_info (#16722)
Signed-off-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
4884311991
commit
d90efbd297
5 changed files with 52 additions and 27 deletions
|
|
@ -35,6 +35,10 @@ keywords: Ultralytics, torch utils, model optimization, device selection, infere
|
||||||
|
|
||||||
<br><br><hr><br>
|
<br><br><hr><br>
|
||||||
|
|
||||||
|
## ::: ultralytics.utils.torch_utils.get_gpu_info
|
||||||
|
|
||||||
|
<br><br><hr><br>
|
||||||
|
|
||||||
## ::: ultralytics.utils.torch_utils.select_device
|
## ::: ultralytics.utils.torch_utils.select_device
|
||||||
|
|
||||||
<br><br><hr><br>
|
<br><br><hr><br>
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||||
|
|
||||||
__version__ = "8.3.5"
|
__version__ = "8.3.6"
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -469,11 +469,11 @@ class BaseTrainer:
|
||||||
|
|
||||||
if RANK in {-1, 0}:
|
if RANK in {-1, 0}:
|
||||||
# Do final val with best.pt
|
# Do final val with best.pt
|
||||||
LOGGER.info(
|
epochs = epoch - self.start_epoch + 1 # total training epochs
|
||||||
f"\n{epoch - self.start_epoch + 1} epochs completed in "
|
seconds = time.time() - self.train_time_start # total training seconds
|
||||||
f"{(time.time() - self.train_time_start) / 3600:.3f} hours."
|
LOGGER.info(f"\n{epochs} epochs completed in {seconds / 3600:.3f} hours.")
|
||||||
)
|
|
||||||
self.final_eval()
|
self.final_eval()
|
||||||
|
self.validator.metrics.training = {"epochs": epochs, "seconds": seconds} # add training speed
|
||||||
if self.args.plots:
|
if self.args.plots:
|
||||||
self.plot_metrics()
|
self.plot_metrics()
|
||||||
self.run_callbacks("on_train_end")
|
self.run_callbacks("on_train_end")
|
||||||
|
|
|
||||||
|
|
@ -593,20 +593,29 @@ def collect_system_info():
|
||||||
import psutil
|
import psutil
|
||||||
|
|
||||||
from ultralytics.utils import ENVIRONMENT # scope to avoid circular import
|
from ultralytics.utils import ENVIRONMENT # scope to avoid circular import
|
||||||
from ultralytics.utils.torch_utils import get_cpu_info
|
from ultralytics.utils.torch_utils import get_cpu_info, get_gpu_info
|
||||||
|
|
||||||
ram_info = psutil.virtual_memory().total / (1024**3) # Convert bytes to GB
|
gib = 1 << 30 # bytes per GiB
|
||||||
|
cuda = torch and torch.cuda.is_available()
|
||||||
check_yolo()
|
check_yolo()
|
||||||
LOGGER.info(
|
total, used, free = shutil.disk_usage("/")
|
||||||
f"\n{'OS':<20}{platform.platform()}\n"
|
|
||||||
f"{'Environment':<20}{ENVIRONMENT}\n"
|
|
||||||
f"{'Python':<20}{PYTHON_VERSION}\n"
|
|
||||||
f"{'Install':<20}{'git' if IS_GIT_DIR else 'pip' if IS_PIP_PACKAGE else 'other'}\n"
|
|
||||||
f"{'RAM':<20}{ram_info:.2f} GB\n"
|
|
||||||
f"{'CPU':<20}{get_cpu_info()}\n"
|
|
||||||
f"{'CUDA':<20}{torch.version.cuda if torch and torch.cuda.is_available() else None}\n"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
info_dict = {
|
||||||
|
"OS": platform.platform(),
|
||||||
|
"Environment": ENVIRONMENT,
|
||||||
|
"Python": PYTHON_VERSION,
|
||||||
|
"Install": "git" if IS_GIT_DIR else "pip" if IS_PIP_PACKAGE else "other",
|
||||||
|
"RAM": f"{psutil.virtual_memory().total / gib:.2f} GB",
|
||||||
|
"Disk": f"{(total - free) / gib:.1f}/{total / gib:.1f} GB",
|
||||||
|
"CPU": get_cpu_info(),
|
||||||
|
"CPU count": os.cpu_count(),
|
||||||
|
"GPU": get_gpu_info(index=0) if cuda else None,
|
||||||
|
"GPU count": torch.cuda.device_count() if cuda else None,
|
||||||
|
"CUDA": torch.version.cuda if cuda else None,
|
||||||
|
}
|
||||||
|
LOGGER.info("\n" + "\n".join(f"{k:<20}{v}" for k, v in info_dict.items()) + "\n")
|
||||||
|
|
||||||
|
package_info = {}
|
||||||
for r in parse_requirements(package="ultralytics"):
|
for r in parse_requirements(package="ultralytics"):
|
||||||
try:
|
try:
|
||||||
current = metadata.version(r.name)
|
current = metadata.version(r.name)
|
||||||
|
|
@ -614,17 +623,24 @@ def collect_system_info():
|
||||||
except metadata.PackageNotFoundError:
|
except metadata.PackageNotFoundError:
|
||||||
current = "(not installed)"
|
current = "(not installed)"
|
||||||
is_met = "❌ "
|
is_met = "❌ "
|
||||||
LOGGER.info(f"{r.name:<20}{is_met}{current}{r.specifier}")
|
package_info[r.name] = f"{is_met}{current}{r.specifier}"
|
||||||
|
LOGGER.info(f"{r.name:<20}{package_info[r.name]}")
|
||||||
|
|
||||||
|
info_dict["Package Info"] = package_info
|
||||||
|
|
||||||
if is_github_action_running():
|
if is_github_action_running():
|
||||||
LOGGER.info(
|
github_info = {
|
||||||
f"\nRUNNER_OS: {os.getenv('RUNNER_OS')}\n"
|
"RUNNER_OS": os.getenv("RUNNER_OS"),
|
||||||
f"GITHUB_EVENT_NAME: {os.getenv('GITHUB_EVENT_NAME')}\n"
|
"GITHUB_EVENT_NAME": os.getenv("GITHUB_EVENT_NAME"),
|
||||||
f"GITHUB_WORKFLOW: {os.getenv('GITHUB_WORKFLOW')}\n"
|
"GITHUB_WORKFLOW": os.getenv("GITHUB_WORKFLOW"),
|
||||||
f"GITHUB_ACTOR: {os.getenv('GITHUB_ACTOR')}\n"
|
"GITHUB_ACTOR": os.getenv("GITHUB_ACTOR"),
|
||||||
f"GITHUB_REPOSITORY: {os.getenv('GITHUB_REPOSITORY')}\n"
|
"GITHUB_REPOSITORY": os.getenv("GITHUB_REPOSITORY"),
|
||||||
f"GITHUB_REPOSITORY_OWNER: {os.getenv('GITHUB_REPOSITORY_OWNER')}\n"
|
"GITHUB_REPOSITORY_OWNER": os.getenv("GITHUB_REPOSITORY_OWNER"),
|
||||||
)
|
}
|
||||||
|
LOGGER.info("\n" + "\n".join(f"{k}: {v}" for k, v in github_info.items()))
|
||||||
|
info_dict["GitHub Info"] = github_info
|
||||||
|
|
||||||
|
return info_dict
|
||||||
|
|
||||||
|
|
||||||
def check_amp(model):
|
def check_amp(model):
|
||||||
|
|
|
||||||
|
|
@ -123,6 +123,12 @@ def get_cpu_info():
|
||||||
return PERSISTENT_CACHE.get("cpu_info", "unknown")
|
return PERSISTENT_CACHE.get("cpu_info", "unknown")
|
||||||
|
|
||||||
|
|
||||||
|
def get_gpu_info(index):
|
||||||
|
"""Return a string with system GPU information, i.e. 'Tesla T4, 15102MiB'."""
|
||||||
|
properties = torch.cuda.get_device_properties(index)
|
||||||
|
return f"{properties.name}, {properties.total_memory / (1 << 20):.0f}MiB"
|
||||||
|
|
||||||
|
|
||||||
def select_device(device="", batch=0, newline=False, verbose=True):
|
def select_device(device="", batch=0, newline=False, verbose=True):
|
||||||
"""
|
"""
|
||||||
Selects the appropriate PyTorch device based on the provided arguments.
|
Selects the appropriate PyTorch device based on the provided arguments.
|
||||||
|
|
@ -208,8 +214,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
|
||||||
)
|
)
|
||||||
space = " " * (len(s) + 1)
|
space = " " * (len(s) + 1)
|
||||||
for i, d in enumerate(devices):
|
for i, d in enumerate(devices):
|
||||||
p = torch.cuda.get_device_properties(i)
|
s += f"{'' if i == 0 else space}CUDA:{d} ({get_gpu_info(i)})\n" # bytes to MB
|
||||||
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB
|
|
||||||
arg = "cuda:0"
|
arg = "cuda:0"
|
||||||
elif mps and TORCH_2_0 and torch.backends.mps.is_available():
|
elif mps and TORCH_2_0 and torch.backends.mps.is_available():
|
||||||
# Prefer MPS if available
|
# Prefer MPS if available
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue