mirror of
https://git.datalinker.icu/kijai/ComfyUI-CogVideoXWrapper.git
synced 2025-12-08 20:34:23 +08:00
Allow orbit LoRAs with Fun models as well
This commit is contained in:
parent
f606d745e9
commit
6302e4b668
@ -84,7 +84,7 @@
|
||||
},
|
||||
"widgets_values": [
|
||||
49,
|
||||
50,
|
||||
25,
|
||||
6,
|
||||
458091243358272,
|
||||
"randomize",
|
||||
@ -268,7 +268,7 @@
|
||||
},
|
||||
"widgets_values": [
|
||||
49,
|
||||
false,
|
||||
true,
|
||||
0
|
||||
]
|
||||
},
|
||||
|
||||
@ -240,12 +240,12 @@ class DownloadAndLoadCogVideoModel:
|
||||
|
||||
#LoRAs
|
||||
if lora is not None:
|
||||
from .lora_utils import merge_lora#, load_lora_into_transformer
|
||||
if "fun" in model.lower():
|
||||
for l in lora:
|
||||
log.info(f"Merging LoRA weights from {l['path']} with strength {l['strength']}")
|
||||
transformer = merge_lora(transformer, l["path"], l["strength"])
|
||||
else:
|
||||
# from .lora_utils import merge_lora#, load_lora_into_transformer
|
||||
# if "fun" in model.lower():
|
||||
# for l in lora:
|
||||
# log.info(f"Merging LoRA weights from {l['path']} with strength {l['strength']}")
|
||||
# transformer = merge_lora(transformer, l["path"], l["strength"])
|
||||
#else:
|
||||
adapter_list = []
|
||||
adapter_weights = []
|
||||
for l in lora:
|
||||
@ -653,27 +653,22 @@ class CogVideoXModelLoader:
|
||||
with open(transformer_config_path) as f:
|
||||
transformer_config = json.load(f)
|
||||
|
||||
with init_empty_weights():
|
||||
if model_type in ["I2V", "I2V_5b", "fun_5b_pose", "5b_I2V_1_5"]:
|
||||
transformer_config["in_channels"] = 32
|
||||
if "1_5" in model_type:
|
||||
transformer_config["ofs_embed_dim"] = 512
|
||||
elif "fun" in model_type:
|
||||
transformer_config["in_channels"] = 33
|
||||
else:
|
||||
transformer_config["in_channels"] = 16
|
||||
if "1_5" in model_type:
|
||||
transformer_config["use_learned_positional_embeddings"] = False
|
||||
transformer_config["patch_size_t"] = 2
|
||||
transformer_config["patch_bias"] = False
|
||||
transformer_config["sample_height"] = 300
|
||||
transformer_config["sample_width"] = 300
|
||||
elif "fun" in model_type:
|
||||
transformer_config["in_channels"] = 33
|
||||
else:
|
||||
if "1_5" in model_type:
|
||||
transformer_config["use_learned_positional_embeddings"] = False
|
||||
transformer_config["patch_size_t"] = 2
|
||||
transformer_config["patch_bias"] = False
|
||||
#transformer_config["sample_height"] = 300 todo: check if this is needed
|
||||
#transformer_config["sample_width"] = 300
|
||||
transformer_config["in_channels"] = 16
|
||||
|
||||
with init_empty_weights():
|
||||
transformer = CogVideoXTransformer3DModel.from_config(transformer_config)
|
||||
|
||||
#load weights
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user