ultralytics 8.2.13 enable --slow CUDA tests (#12511)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com>
This commit is contained in:
parent
756a224bcd
commit
274d6e7e74
3 changed files with 10 additions and 12 deletions
|
|
@ -19,14 +19,6 @@ def test_checks():
|
|||
assert torch.cuda.device_count() == CUDA_DEVICE_COUNT
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
|
||||
def test_export_engine():
|
||||
"""Test exporting the YOLO model to NVIDIA TensorRT format."""
|
||||
f = YOLO(MODEL).export(format="engine", device=0)
|
||||
YOLO(f)(SOURCE, device=0)
|
||||
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
|
||||
@pytest.mark.parametrize(
|
||||
|
|
@ -49,6 +41,7 @@ def test_export_engine_matrix(task, dynamic, int8, half, batch):
|
|||
half=half,
|
||||
batch=batch,
|
||||
data=TASK2DATA[task],
|
||||
workspace=1, # reduce workspace GB for less resource utilization during testing
|
||||
)
|
||||
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
|
||||
Path(file).unlink() # cleanup
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue