From 276b3b86d94d442be4d57490a95bb8083e25e9db Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 12 Oct 2024 02:24:53 +0300 Subject: [PATCH] possible fix for NaNs when using LoRA and fp8 --- nodes.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index 82c593b..e1caadd 100644 --- a/nodes.py +++ b/nodes.py @@ -372,7 +372,10 @@ class DownloadAndLoadCogVideoModel: if "patch_embed" not in name: param.data = param.data.to(torch.float8_e4m3fn) else: - transformer.to(torch.float8_e4m3fn) + #transformer.to(torch.float8_e4m3fn) + for name, param in transformer.named_parameters(): + if "lora" not in name: + param.data = param.data.to(torch.float8_e4m3fn) if fp8_transformer == "fastmode": from .fp8_optimization import convert_fp8_linear