Log when scaled fp8 diff path is used

This commit is contained in:
Dango233 2025-10-28 22:30:26 -04:00
parent cedea47902
commit e6ee59b4c2

View File

@ -240,6 +240,9 @@ class LoraExtractKJ:
if scaled_fp8_ft is not None and scaled_fp8_orig is not None: if scaled_fp8_ft is not None and scaled_fp8_orig is not None:
comfy.model_management.load_models_gpu([finetuned_model, original_model], force_patch_weights=True) comfy.model_management.load_models_gpu([finetuned_model, original_model], force_patch_weights=True)
logging.info(
"LoraExtractKJ: detected scaled fp8 weights on both models; using high-precision diff path."
)
sd_override = _build_scaled_fp8_diff( sd_override = _build_scaled_fp8_diff(
finetuned_model, original_model, "diffusion_model.", bias_diff finetuned_model, original_model, "diffusion_model.", bias_diff
) )