mirror of
https://git.datalinker.icu/comfyanonymous/ComfyUI
synced 2025-12-08 21:44:33 +08:00
Merge 6f4d889053b61b378d8d0c1fc1265b5bcfd674a2 into fd271dedfde6e192a1f1a025521070876e89e04a
This commit is contained in:
commit
0d7a82ddbb
@ -90,6 +90,7 @@ parser.add_argument("--directml", type=int, nargs="?", metavar="DIRECTML_DEVICE"
|
||||
parser.add_argument("--oneapi-device-selector", type=str, default=None, metavar="SELECTOR_STRING", help="Sets the oneAPI device(s) this instance will use.")
|
||||
parser.add_argument("--disable-ipex-optimize", action="store_true", help="Disables ipex.optimize default when loading models with Intel's Extension for Pytorch.")
|
||||
parser.add_argument("--supports-fp8-compute", action="store_true", help="ComfyUI will act like if the device supports fp8 compute.")
|
||||
parser.add_argument("--total-ram", type=float, default=0, help="Maximum system RAM visible to comfy in GB (default 0: all)")
|
||||
|
||||
class LatentPreviewMethod(enum.Enum):
|
||||
NoPreviews = "none"
|
||||
|
||||
@ -192,8 +192,12 @@ def get_total_memory(dev=None, torch_total_too=False):
|
||||
if dev is None:
|
||||
dev = get_torch_device()
|
||||
|
||||
if hasattr(dev, 'type') and (dev.type == 'cpu' or dev.type == 'mps'):
|
||||
mem_total = psutil.virtual_memory().total
|
||||
if hasattr(dev, "type") and (dev.type == "cpu" or dev.type == "mps"):
|
||||
mem_total = 0
|
||||
if args.total_ram != 0:
|
||||
mem_total = args.total_ram * 1024 * 1024
|
||||
else:
|
||||
mem_total = psutil.virtual_memory().total
|
||||
mem_total_torch = mem_total
|
||||
else:
|
||||
if directml_enabled:
|
||||
@ -236,8 +240,15 @@ def mac_version():
|
||||
return None
|
||||
|
||||
total_vram = get_total_memory(get_torch_device()) / (1024 * 1024)
|
||||
total_ram = psutil.virtual_memory().total / (1024 * 1024)
|
||||
logging.info("Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram))
|
||||
|
||||
total_ram = 0
|
||||
if args.total_ram != 0:
|
||||
total_ram = args.total_ram * (1024) # arg in GB
|
||||
else:
|
||||
total_ram = psutil.virtual_memory().total / (1024 * 1024)
|
||||
logging.info(
|
||||
"Total VRAM {:0.0f} MB, total RAM {:0.0f} MB".format(total_vram, total_ram)
|
||||
)
|
||||
|
||||
try:
|
||||
logging.info("pytorch version: {}".format(torch_version))
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user