From e52dc36bc57209951deb064520d0b08cc8c20f33 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Wed, 20 Nov 2024 21:28:53 +0200 Subject: [PATCH] Update model_loading.py --- model_loading.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/model_loading.py b/model_loading.py index 8308b68..c2883ff 100644 --- a/model_loading.py +++ b/model_loading.py @@ -270,13 +270,13 @@ class DownloadAndLoadCogVideoModel: transformer = merge_lora(transformer, l["path"], l["strength"]) except: raise ValueError(f"Can't recognize LoRA {l['path']}") - - pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) - if fuse: - lora_scale = 1 - if dimensionx_lora: - lora_scale = lora_scale / lora_rank - pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) + if adapter_list: + pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) + if fuse: + lora_scale = 1 + if dimensionx_lora: + lora_scale = lora_scale / lora_rank + pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) if "fused" in attention_mode: