Merge f43ff9fcbdd183b0ec5bd18524c10061b2619eec into fd271dedfde6e192a1f1a025521070876e89e04a

This commit is contained in:
rattus 2025-12-08 20:34:51 +09:00 committed by GitHub
commit 92747943b4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -132,14 +132,14 @@ class LowVramPatch:
def __call__(self, weight):
return comfy.lora.calculate_weight(self.patches[self.key], weight, self.key, intermediate_dtype=weight.dtype)
#The above patch logic may cast up the weight to fp32, and do math. Go with fp32 x 3
LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR = 3
LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR = 2
def low_vram_patch_estimate_vram(model, key):
weight, set_func, convert_func = get_key_weight(model, key)
if weight is None:
return 0
return weight.numel() * torch.float32.itemsize * LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR
model_dtype = getattr(model, "manual_cast_dtype", torch.float32)
return weight.numel() * model_dtype.itemsize * LOWVRAM_PATCH_ESTIMATE_MATH_FACTOR
def get_key_weight(model, key):
set_func = None