ultralytics 8.0.163 add new gpu-latest runner to CI actions (#4565)
Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maia Numerosky <17316848+maianumerosky@users.noreply.github.com>
This commit is contained in:
parent
431cef3955
commit
b4dca690d4
14 changed files with 153 additions and 17 deletions
|
|
@ -1,6 +1,6 @@
|
|||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
|
||||
__version__ = '8.0.162'
|
||||
__version__ = '8.0.163'
|
||||
|
||||
from ultralytics.models import RTDETR, SAM, YOLO
|
||||
from ultralytics.models.fastsam import FastSAM
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import contextlib
|
|||
import re
|
||||
import shutil
|
||||
import sys
|
||||
from difflib import get_close_matches
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
from typing import Dict, List, Union
|
||||
|
|
@ -177,6 +176,8 @@ def check_dict_alignment(base: Dict, custom: Dict, e=None):
|
|||
base_keys, custom_keys = (set(x.keys()) for x in (base, custom))
|
||||
mismatched = [k for k in custom_keys if k not in base_keys]
|
||||
if mismatched:
|
||||
from difflib import get_close_matches
|
||||
|
||||
string = ''
|
||||
for x in mismatched:
|
||||
matches = get_close_matches(x, base_keys) # key list
|
||||
|
|
@ -373,11 +374,7 @@ def entrypoint(debug=''):
|
|||
mode = DEFAULT_CFG.mode or 'predict'
|
||||
LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.")
|
||||
elif mode not in MODES:
|
||||
if mode not in ('checks', checks):
|
||||
raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}")
|
||||
LOGGER.warning("WARNING ⚠️ 'yolo mode=checks' is deprecated. Use 'yolo checks' instead.")
|
||||
checks.check_yolo()
|
||||
return
|
||||
raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}")
|
||||
|
||||
# Task
|
||||
task = overrides.pop('task', None)
|
||||
|
|
|
|||
|
|
@ -483,7 +483,7 @@ class RandomHSV:
|
|||
self.vgain = vgain
|
||||
|
||||
def __call__(self, labels):
|
||||
"""Applies random horizontal or vertical flip to an image with a given probability."""
|
||||
"""Applies image HSV augmentation"""
|
||||
img = labels['img']
|
||||
if self.hgain or self.sgain or self.vgain:
|
||||
r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains
|
||||
|
|
@ -501,6 +501,7 @@ class RandomHSV:
|
|||
|
||||
|
||||
class RandomFlip:
|
||||
"""Applies random horizontal or vertical flip to an image with a given probability."""
|
||||
|
||||
def __init__(self, p=0.5, direction='horizontal', flip_idx=None) -> None:
|
||||
assert direction in ['horizontal', 'vertical'], f'Support direction `horizontal` or `vertical`, got {direction}'
|
||||
|
|
@ -643,7 +644,9 @@ class CopyPaste:
|
|||
|
||||
|
||||
class Albumentations:
|
||||
"""YOLOv8 Albumentations class (optional, only used if package is installed)"""
|
||||
"""Albumentations transformations. Optional, uninstall package to disable.
|
||||
Applies Blur, Median Blur, convert to grayscale, Contrast Limited Adaptive Histogram Equalization,
|
||||
random change of brightness and contrast, RandomGamma and lowering of image quality by compression."""
|
||||
|
||||
def __init__(self, p=1.0):
|
||||
"""Initialize the transform object for YOLO bbox formatted params."""
|
||||
|
|
|
|||
|
|
@ -159,7 +159,10 @@ class Exporter:
|
|||
raise ValueError(f"Invalid export format='{format}'. Valid formats are {fmts}")
|
||||
jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, ncnn = flags # export booleans
|
||||
|
||||
# Load PyTorch model
|
||||
# Device
|
||||
if format == 'engine' and self.args.device is None:
|
||||
LOGGER.warning('WARNING ⚠️ TensorRT requires GPU export, automatically assigning device=0')
|
||||
self.args.device = '0'
|
||||
self.device = select_device('cpu' if self.args.device is None else self.args.device)
|
||||
|
||||
# Checks
|
||||
|
|
|
|||
|
|
@ -182,6 +182,7 @@ class ProfileModels:
|
|||
num_warmup_runs=10,
|
||||
min_time=60,
|
||||
imgsz=640,
|
||||
half=True,
|
||||
trt=True,
|
||||
device=None):
|
||||
self.paths = paths
|
||||
|
|
@ -189,6 +190,7 @@ class ProfileModels:
|
|||
self.num_warmup_runs = num_warmup_runs
|
||||
self.min_time = min_time
|
||||
self.imgsz = imgsz
|
||||
self.half = half
|
||||
self.trt = trt # run TensorRT profiling
|
||||
self.device = device or torch.device(0 if torch.cuda.is_available() else 'cpu')
|
||||
|
||||
|
|
@ -209,12 +211,12 @@ class ProfileModels:
|
|||
model_info = model.info()
|
||||
if self.trt and self.device.type != 'cpu' and not engine_file.is_file():
|
||||
engine_file = model.export(format='engine',
|
||||
half=True,
|
||||
half=self.half,
|
||||
imgsz=self.imgsz,
|
||||
device=self.device,
|
||||
verbose=False)
|
||||
onnx_file = model.export(format='onnx',
|
||||
half=True,
|
||||
half=self.half,
|
||||
imgsz=self.imgsz,
|
||||
simplify=True,
|
||||
device=self.device,
|
||||
|
|
|
|||
|
|
@ -507,3 +507,28 @@ def print_args(args: Optional[dict] = None, show_file=True, show_func=False):
|
|||
file = Path(file).stem
|
||||
s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')
|
||||
LOGGER.info(colorstr(s) + ', '.join(f'{k}={strip_auth(v)}' for k, v in args.items()))
|
||||
|
||||
|
||||
def cuda_device_count() -> int:
|
||||
"""Get the number of NVIDIA GPUs available in the environment.
|
||||
|
||||
Returns:
|
||||
(int): The number of NVIDIA GPUs available.
|
||||
"""
|
||||
try:
|
||||
# Run the nvidia-smi command and capture its output
|
||||
output = subprocess.check_output(['nvidia-smi', '--query-gpu=count', '--format=csv,noheader,nounits'],
|
||||
encoding='utf-8')
|
||||
return int(output.strip())
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
# If the command fails or nvidia-smi is not found, assume no GPUs are available
|
||||
return 0
|
||||
|
||||
|
||||
def cuda_is_available() -> bool:
|
||||
"""Check if CUDA is available in the environment.
|
||||
|
||||
Returns:
|
||||
(bool): True if one or more NVIDIA GPUs are available, False otherwise.
|
||||
"""
|
||||
return cuda_device_count() > 0
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue