mirror of
https://git.datalinker.icu/kijai/ComfyUI-CogVideoXWrapper.git
synced 2026-04-30 21:52:19 +08:00
attention compile works with higher cache_size_limit
This commit is contained in:
parent
a630bb3314
commit
fb246f95ef
@ -71,7 +71,7 @@ class CogVideoXAttnProcessor2_0:
|
|||||||
if not hasattr(F, "scaled_dot_product_attention"):
|
if not hasattr(F, "scaled_dot_product_attention"):
|
||||||
raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
|
||||||
|
|
||||||
@torch.compiler.disable()
|
#@torch.compiler.disable()
|
||||||
def __call__(
|
def __call__(
|
||||||
self,
|
self,
|
||||||
attn: Attention,
|
attn: Attention,
|
||||||
|
|||||||
@ -262,7 +262,10 @@ class DownloadAndLoadCogVideoModel:
|
|||||||
for l in lora:
|
for l in lora:
|
||||||
pipe.set_adapters(adapter_list, adapter_weights=adapter_weights)
|
pipe.set_adapters(adapter_list, adapter_weights=adapter_weights)
|
||||||
if fuse:
|
if fuse:
|
||||||
pipe.fuse_lora(lora_scale=1 / lora_rank, components=["transformer"])
|
lora_scale = 1
|
||||||
|
if "dimensionx" in lora[-1]["path"].lower():
|
||||||
|
lora_scale = lora_scale / lora_rank
|
||||||
|
pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"])
|
||||||
|
|
||||||
if enable_sequential_cpu_offload:
|
if enable_sequential_cpu_offload:
|
||||||
pipe.enable_sequential_cpu_offload()
|
pipe.enable_sequential_cpu_offload()
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user