ultralytics 8.3.6 improve collect_system_info (#16722)
Signed-off-by: UltralyticsAssistant <web@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
4884311991
commit
d90efbd297
5 changed files with 52 additions and 27 deletions
|
|
@ -593,20 +593,29 @@ def collect_system_info():
|
|||
import psutil
|
||||
|
||||
from ultralytics.utils import ENVIRONMENT # scope to avoid circular import
|
||||
from ultralytics.utils.torch_utils import get_cpu_info
|
||||
from ultralytics.utils.torch_utils import get_cpu_info, get_gpu_info
|
||||
|
||||
ram_info = psutil.virtual_memory().total / (1024**3) # Convert bytes to GB
|
||||
gib = 1 << 30 # bytes per GiB
|
||||
cuda = torch and torch.cuda.is_available()
|
||||
check_yolo()
|
||||
LOGGER.info(
|
||||
f"\n{'OS':<20}{platform.platform()}\n"
|
||||
f"{'Environment':<20}{ENVIRONMENT}\n"
|
||||
f"{'Python':<20}{PYTHON_VERSION}\n"
|
||||
f"{'Install':<20}{'git' if IS_GIT_DIR else 'pip' if IS_PIP_PACKAGE else 'other'}\n"
|
||||
f"{'RAM':<20}{ram_info:.2f} GB\n"
|
||||
f"{'CPU':<20}{get_cpu_info()}\n"
|
||||
f"{'CUDA':<20}{torch.version.cuda if torch and torch.cuda.is_available() else None}\n"
|
||||
)
|
||||
total, used, free = shutil.disk_usage("/")
|
||||
|
||||
info_dict = {
|
||||
"OS": platform.platform(),
|
||||
"Environment": ENVIRONMENT,
|
||||
"Python": PYTHON_VERSION,
|
||||
"Install": "git" if IS_GIT_DIR else "pip" if IS_PIP_PACKAGE else "other",
|
||||
"RAM": f"{psutil.virtual_memory().total / gib:.2f} GB",
|
||||
"Disk": f"{(total - free) / gib:.1f}/{total / gib:.1f} GB",
|
||||
"CPU": get_cpu_info(),
|
||||
"CPU count": os.cpu_count(),
|
||||
"GPU": get_gpu_info(index=0) if cuda else None,
|
||||
"GPU count": torch.cuda.device_count() if cuda else None,
|
||||
"CUDA": torch.version.cuda if cuda else None,
|
||||
}
|
||||
LOGGER.info("\n" + "\n".join(f"{k:<20}{v}" for k, v in info_dict.items()) + "\n")
|
||||
|
||||
package_info = {}
|
||||
for r in parse_requirements(package="ultralytics"):
|
||||
try:
|
||||
current = metadata.version(r.name)
|
||||
|
|
@ -614,17 +623,24 @@ def collect_system_info():
|
|||
except metadata.PackageNotFoundError:
|
||||
current = "(not installed)"
|
||||
is_met = "❌ "
|
||||
LOGGER.info(f"{r.name:<20}{is_met}{current}{r.specifier}")
|
||||
package_info[r.name] = f"{is_met}{current}{r.specifier}"
|
||||
LOGGER.info(f"{r.name:<20}{package_info[r.name]}")
|
||||
|
||||
info_dict["Package Info"] = package_info
|
||||
|
||||
if is_github_action_running():
|
||||
LOGGER.info(
|
||||
f"\nRUNNER_OS: {os.getenv('RUNNER_OS')}\n"
|
||||
f"GITHUB_EVENT_NAME: {os.getenv('GITHUB_EVENT_NAME')}\n"
|
||||
f"GITHUB_WORKFLOW: {os.getenv('GITHUB_WORKFLOW')}\n"
|
||||
f"GITHUB_ACTOR: {os.getenv('GITHUB_ACTOR')}\n"
|
||||
f"GITHUB_REPOSITORY: {os.getenv('GITHUB_REPOSITORY')}\n"
|
||||
f"GITHUB_REPOSITORY_OWNER: {os.getenv('GITHUB_REPOSITORY_OWNER')}\n"
|
||||
)
|
||||
github_info = {
|
||||
"RUNNER_OS": os.getenv("RUNNER_OS"),
|
||||
"GITHUB_EVENT_NAME": os.getenv("GITHUB_EVENT_NAME"),
|
||||
"GITHUB_WORKFLOW": os.getenv("GITHUB_WORKFLOW"),
|
||||
"GITHUB_ACTOR": os.getenv("GITHUB_ACTOR"),
|
||||
"GITHUB_REPOSITORY": os.getenv("GITHUB_REPOSITORY"),
|
||||
"GITHUB_REPOSITORY_OWNER": os.getenv("GITHUB_REPOSITORY_OWNER"),
|
||||
}
|
||||
LOGGER.info("\n" + "\n".join(f"{k}: {v}" for k, v in github_info.items()))
|
||||
info_dict["GitHub Info"] = github_info
|
||||
|
||||
return info_dict
|
||||
|
||||
|
||||
def check_amp(model):
|
||||
|
|
|
|||
|
|
@ -123,6 +123,12 @@ def get_cpu_info():
|
|||
return PERSISTENT_CACHE.get("cpu_info", "unknown")
|
||||
|
||||
|
||||
def get_gpu_info(index):
|
||||
"""Return a string with system GPU information, i.e. 'Tesla T4, 15102MiB'."""
|
||||
properties = torch.cuda.get_device_properties(index)
|
||||
return f"{properties.name}, {properties.total_memory / (1 << 20):.0f}MiB"
|
||||
|
||||
|
||||
def select_device(device="", batch=0, newline=False, verbose=True):
|
||||
"""
|
||||
Selects the appropriate PyTorch device based on the provided arguments.
|
||||
|
|
@ -208,8 +214,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
|
|||
)
|
||||
space = " " * (len(s) + 1)
|
||||
for i, d in enumerate(devices):
|
||||
p = torch.cuda.get_device_properties(i)
|
||||
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB
|
||||
s += f"{'' if i == 0 else space}CUDA:{d} ({get_gpu_info(i)})\n" # bytes to MB
|
||||
arg = "cuda:0"
|
||||
elif mps and TORCH_2_0 and torch.backends.mps.is_available():
|
||||
# Prefer MPS if available
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue