mirror of
https://git.datalinker.icu/comfyanonymous/ComfyUI
synced 2025-12-08 21:44:33 +08:00
feat: add --total-ram option for controlling visible system RAM in Comfy
Adds a new command-line argument `--total-ram` to limit the amount of system RAM that ComfyUI considers available, allowing users to simulate lower memory environments. This enables more predictable behavior when testing or running on systems with limited resources. Rationale: I run Comfy inside a Docker container. Using `mem_limit` doesn't hide total system RAM from Comfy, so OOM can occur easily. Cache pressure limits cause frequent out-of-memory errors. Adding this flag allows precise control over visible memory. Signed-off-by: blob42 <contact@blob42.xyz>
This commit is contained in:
parent
8402c8700a
commit
6f4d889053
@ -90,6 +90,7 @@ parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE"
|
||||
parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
|
||||
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
|
||||
parser.add_argument("--supports-fp8-compute", action="store_true", help="ComfyUI will act like if the device supports fp8 compute.")
|
||||
parser.add_argument("--total-ram", type=float, default=0, help="Maximum system RAM visible to comfy in GB (default 0: all)")
|
||||
|
||||
class LatentPreviewMethod(enum.Enum):
|
||||
NoPreviews = "none"
|
||||
|
||||
@ -192,8 +192,12 @@ def get_total_memory(dev=None, torch_total_too=False):
|
||||
if dev is None:
|
||||
dev = get_torch_device()
|
||||
|
||||
if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
|
||||
mem_total = psutil.virtual_memory().total
|
||||
if hasattr(dev, "type") and (dev.type == "cpu" or dev.type == "mps"):
|
||||
mem_total = 0
|
||||
if args.total_ram != 0:
|
||||
mem_total = args.total_ram * 1024 * 1024
|
||||
else:
|
||||
mem_total = psutil.virtual_memory().total
|
||||
mem_total_torch = mem_total
|
||||
else:
|
||||
if directml_enabled:
|
||||
@ -236,8 +240,15 @@ def mac_version():
|
||||
return None
|
||||
|
||||
total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
|
||||
total_ram = psutil.virtual_memory().total / (1024 * 1024)
|
||||
logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
|
||||
|
||||
total_ram = 0
|
||||
if args.total_ram != 0:
|
||||
total_ram = args.total_ram * (1024) # arg in GB
|
||||
else:
|
||||
total_ram = psutil.virtual_memory().total / (1024 * 1024)
|
||||
logging.info(
|
||||
"Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)
|
||||
)
|
||||
|
||||
try:
|
||||
logging.info("pytorch version: {}".format(torch_version))
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user