mirror of
https://git.datalinker.icu/kijai/ComfyUI-CogVideoXWrapper.git
synced 2025-12-08 20:34:23 +08:00
update
This commit is contained in:
parent
6302e4b668
commit
feeff366b5
1922
examples/cogvideox_Fun_180_orbit_01.json
Normal file
1922
examples/cogvideox_Fun_180_orbit_01.json
Normal file
File diff suppressed because it is too large
Load Diff
@ -240,37 +240,37 @@ class DownloadAndLoadCogVideoModel:
|
||||
|
||||
#LoRAs
|
||||
if lora is not None:
|
||||
# from .lora_utils import merge_lora#, load_lora_into_transformer
|
||||
# if "fun" in model.lower():
|
||||
# for l in lora:
|
||||
# log.info(f"Merging LoRA weights from {l['path']} with strength {l['strength']}")
|
||||
# transformer = merge_lora(transformer, l["path"], l["strength"])
|
||||
#else:
|
||||
adapter_list = []
|
||||
adapter_weights = []
|
||||
for l in lora:
|
||||
fuse = True if l["fuse_lora"] else False
|
||||
lora_sd = load_torch_file(l["path"])
|
||||
for key, val in lora_sd.items():
|
||||
if "lora_B" in key:
|
||||
lora_rank = val.shape[1]
|
||||
break
|
||||
log.info(f"Merging rank {lora_rank} LoRA weights from {l['path']} with strength {l['strength']}")
|
||||
adapter_name = l['path'].split("/")[-1].split(".")[0]
|
||||
adapter_weight = l['strength']
|
||||
pipe.load_lora_weights(l['path'], weight_name=l['path'].split("/")[-1], lora_rank=lora_rank, adapter_name=adapter_name)
|
||||
|
||||
#transformer = load_lora_into_transformer(lora, transformer)
|
||||
adapter_list.append(adapter_name)
|
||||
adapter_weights.append(adapter_weight)
|
||||
for l in lora:
|
||||
pipe.set_adapters(adapter_list, adapter_weights=adapter_weights)
|
||||
if fuse:
|
||||
lora_scale = 1
|
||||
dimension_loras = ["orbit", "dimensionx"] # for now dimensionx loras need scaling
|
||||
if any(item in lora[-1]["path"].lower() for item in dimension_loras):
|
||||
lora_scale = lora_scale / lora_rank
|
||||
pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"])
|
||||
try:
|
||||
adapter_list = []
|
||||
adapter_weights = []
|
||||
for l in lora:
|
||||
fuse = True if l["fuse_lora"] else False
|
||||
lora_sd = load_torch_file(l["path"])
|
||||
for key, val in lora_sd.items():
|
||||
if "lora_B" in key:
|
||||
lora_rank = val.shape[1]
|
||||
break
|
||||
log.info(f"Merging rank {lora_rank} LoRA weights from {l['path']} with strength {l['strength']}")
|
||||
adapter_name = l['path'].split("/")[-1].split(".")[0]
|
||||
adapter_weight = l['strength']
|
||||
pipe.load_lora_weights(l['path'], weight_name=l['path'].split("/")[-1], lora_rank=lora_rank, adapter_name=adapter_name)
|
||||
|
||||
#transformer = load_lora_into_transformer(lora, transformer)
|
||||
adapter_list.append(adapter_name)
|
||||
adapter_weights.append(adapter_weight)
|
||||
for l in lora:
|
||||
pipe.set_adapters(adapter_list, adapter_weights=adapter_weights)
|
||||
if fuse:
|
||||
lora_scale = 1
|
||||
dimension_loras = ["orbit", "dimensionx"] # for now dimensionx loras need scaling
|
||||
if any(item in lora[-1]["path"].lower() for item in dimension_loras):
|
||||
lora_scale = lora_scale / lora_rank
|
||||
pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"])
|
||||
except: #Fun trainer LoRAs are loaded differently
|
||||
from .lora_utils import merge_lora
|
||||
for l in lora:
|
||||
log.info(f"Merging LoRA weights from {l['path']} with strength {l['strength']}")
|
||||
transformer = merge_lora(transformer, l["path"], l["strength"])
|
||||
|
||||
if "fused" in attention_mode:
|
||||
from diffusers.models.attention import Attention
|
||||
|
||||
27
readme.md
27
readme.md
@ -1,5 +1,32 @@
|
||||
# WORK IN PROGRESS
|
||||
|
||||
## BREAKING Update8
|
||||
|
||||
This is big one, and unfortunately to do the necessary cleanup and refactoring this will break every old workflow as they are.
|
||||
I apologize for the inconvenience, if I don't do this now I'll keep making it worse until maintaining becomes too much of a chore, so from my pov there was no choice.
|
||||
|
||||
*Please either use the new workflows or fix the nodes in your old ones before posting issue reports!*
|
||||
|
||||
Old version will be kept in a legacy branch, but not maintained
|
||||
|
||||
- Support CogVideoX 1.5 models
|
||||
- Major code cleanup (it was bad, still isn't great, wip)
|
||||
- Merge Fun -model functionality into main pipeline:
|
||||
- All Fun specific nodes, besides image encode node for Fun -InP models are gone
|
||||
- Main CogVideo Sampler works with Fun models
|
||||
- DimensionX LoRAs now work with Fun models as well
|
||||
|
||||
- Remove width/height from the sampler widgets and detect from input instead, this meanst text2vid now requires using empty latents
|
||||
- Separate VAE from the model, allow using fp32 VAE
|
||||
- Add ability to load some of the non-GGUF models as single files (only few available for now: https://huggingface.co/Kijai/CogVideoX-comfy)
|
||||
- Add some torchao quantizations as options
|
||||
- Add interpolation as option for the main encode node, old interpolation specific node is gone
|
||||
- torch.compile optimizations
|
||||
- Remove PAB in favor of FasterCache and cleaner code
|
||||
- other smaller things I forgot about at this point
|
||||
|
||||
For Fun -model based workflows it's more drastic change, for others migrating generally means re-setting many of the nodes.
|
||||
|
||||
## Update7
|
||||
|
||||
- Refactored the Fun version's sampler to accept any resolution, this should make it lot simpler to use with Tora. **BREAKS OLD WORKFLOWS**, old FunSampler nodes need to be remade.
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user