Merge pull request #202 from DocShotgun/main

Fix nightly torch check for fp16 accumulation
This commit is contained in:
Jukka Seppänen 2025-02-28 22:20:19 +02:00 committed by GitHub
commit 6a16be7f21
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -197,13 +197,14 @@ class DiffusionModelLoaderKJ(BaseLoaderKJ):
model_options["dtype"] = torch.float8_e4m3fn
model_options["fp8_optimizations"] = True
try:
if enable_fp16_accumulation:
if enable_fp16_accumulation:
if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"):
torch.backends.cuda.matmul.allow_fp16_accumulation = True
else:
raise RuntimeError("Failed to set fp16 accumulation, this requires pytorch 2.7.0 nightly currently")
else:
if hasattr(torch.backends.cuda.matmul, "allow_fp16_accumulation"):
torch.backends.cuda.matmul.allow_fp16_accumulation = False
except:
raise RuntimeError("Failed to set fp16 accumulation, this requires pytorch 2.7.0 nightly currently")
unet_path = folder_paths.get_full_path_or_raise("diffusion_models", model_name)
model = comfy.sd.load_diffusion_model(unet_path, model_options=model_options)
@ -673,4 +674,4 @@ class TorchCompileCosmosModel:
except:
raise RuntimeError("Failed to compile model")
return (m, )
return (m, )