From 8643d75a6b98dfd1f39eb97ea53e1c927314200a Mon Sep 17 00:00:00 2001 From: Dango233 Date: Tue, 28 Oct 2025 22:40:05 -0400 Subject: [PATCH] Extend fp8 diff path when either model is scaled --- nodes/lora_nodes.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/nodes/lora_nodes.py b/nodes/lora_nodes.py index 543757f..6b60cfc 100644 --- a/nodes/lora_nodes.py +++ b/nodes/lora_nodes.py @@ -237,11 +237,14 @@ class LoraExtractKJ: scaled_fp8_ft = getattr(getattr(finetuned_model.model, "model_config", None), "scaled_fp8", None) scaled_fp8_orig = getattr(getattr(original_model.model, "model_config", None), "scaled_fp8", None) + scaled_fp8_present = scaled_fp8_ft is not None or scaled_fp8_orig is not None - if scaled_fp8_ft is not None and scaled_fp8_orig is not None: + if scaled_fp8_present: comfy.model_management.load_models_gpu([finetuned_model, original_model], force_patch_weights=True) logging.info( - "LoraExtractKJ: detected scaled fp8 weights on both models; using high-precision diff path." + "LoraExtractKJ: detected scaled fp8 weights (finetuned=%s, original=%s); using high-precision diff path.", + scaled_fp8_ft is not None, + scaled_fp8_orig is not None, ) sd_override = _build_scaled_fp8_diff( finetuned_model, original_model, "diffusion_model.", bias_diff