Implement gc.collect() to free memory (#10129)
Signed-off-by: Glenn Jocher <glenn.jocher@ultralytics.com> Co-authored-by: UltralyticsAssistant <web@ultralytics.com>
This commit is contained in:
parent
37ffebd690
commit
1f4bed233a
3 changed files with 7 additions and 0 deletions
|
|
@ -50,6 +50,7 @@ TensorFlow.js:
|
|||
$ npm start
|
||||
"""
|
||||
|
||||
import gc
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
|
|
@ -713,6 +714,7 @@ class Exporter:
|
|||
|
||||
# Free CUDA memory
|
||||
del self.model
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
|
||||
# Write file
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ Usage:
|
|||
$ yolo mode=train model=yolov8n.pt data=coco128.yaml imgsz=640 epochs=100 batch=16
|
||||
"""
|
||||
|
||||
import gc
|
||||
import math
|
||||
import os
|
||||
import subprocess
|
||||
|
|
@ -437,6 +438,7 @@ class BaseTrainer:
|
|||
self.scheduler.last_epoch = self.epoch # do not move
|
||||
self.stop |= epoch >= self.epochs # stop if exceeded epochs
|
||||
self.run_callbacks("on_fit_epoch_end")
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache() # clear GPU memory at end of epoch, may help reduce CUDA out of memory errors
|
||||
|
||||
# Early Stopping
|
||||
|
|
@ -458,6 +460,7 @@ class BaseTrainer:
|
|||
if self.args.plots:
|
||||
self.plot_metrics()
|
||||
self.run_callbacks("on_train_end")
|
||||
gc.collect()
|
||||
torch.cuda.empty_cache()
|
||||
self.run_callbacks("teardown")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
||||
|
||||
import gc
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
|
|
@ -581,6 +582,7 @@ def profile(input, ops, n=10, device=None):
|
|||
except Exception as e:
|
||||
LOGGER.info(e)
|
||||
results.append(None)
|
||||
gc.collect() # attempt to free unused memory
|
||||
torch.cuda.empty_cache()
|
||||
return results
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue