mirror of
https://git.datalinker.icu/kijai/ComfyUI-KJNodes.git
synced 2026-05-02 09:40:05 +08:00
Add TorchCompileModelWanVideoV2
This commit is contained in:
parent
16f60e53e5
commit
07b804cb3f
@ -182,6 +182,7 @@ NODE_CONFIG = {
|
|||||||
"TorchCompileLTXModel": {"class": TorchCompileLTXModel, "name": "TorchCompileLTXModel"},
|
"TorchCompileLTXModel": {"class": TorchCompileLTXModel, "name": "TorchCompileLTXModel"},
|
||||||
"TorchCompileCosmosModel": {"class": TorchCompileCosmosModel, "name": "TorchCompileCosmosModel"},
|
"TorchCompileCosmosModel": {"class": TorchCompileCosmosModel, "name": "TorchCompileCosmosModel"},
|
||||||
"TorchCompileModelWanVideo": {"class": TorchCompileModelWanVideo, "name": "TorchCompileModelWanVideo"},
|
"TorchCompileModelWanVideo": {"class": TorchCompileModelWanVideo, "name": "TorchCompileModelWanVideo"},
|
||||||
|
"TorchCompileModelWanVideoV2": {"class": TorchCompileModelWanVideoV2, "name": "TorchCompileModelWanVideoV2"},
|
||||||
"PathchSageAttentionKJ": {"class": PathchSageAttentionKJ, "name": "Patch Sage Attention KJ"},
|
"PathchSageAttentionKJ": {"class": PathchSageAttentionKJ, "name": "Patch Sage Attention KJ"},
|
||||||
"LeapfusionHunyuanI2VPatcher": {"class": LeapfusionHunyuanI2V, "name": "Leapfusion Hunyuan I2V Patcher"},
|
"LeapfusionHunyuanI2VPatcher": {"class": LeapfusionHunyuanI2V, "name": "Leapfusion Hunyuan I2V Patcher"},
|
||||||
"VAELoaderKJ": {"class": VAELoaderKJ, "name": "VAELoader KJ"},
|
"VAELoaderKJ": {"class": VAELoaderKJ, "name": "VAELoader KJ"},
|
||||||
|
|||||||
@ -612,11 +612,11 @@ class TorchCompileModelFluxAdvancedV2:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
if double_blocks:
|
if double_blocks:
|
||||||
for block in diffusion_model.double_blocks:
|
for i, block in enumerate(diffusion_model.double_blocks):
|
||||||
compile_key_list.append(block)
|
compile_key_list.append(f"diffusion_model.double_blocks.{i}")
|
||||||
if single_blocks:
|
if single_blocks:
|
||||||
for block in diffusion_model.single_blocks:
|
for i, block in enumerate(diffusion_model.single_blocks):
|
||||||
compile_key_list.append(block)
|
compile_key_list.append(f"diffusion_model.single_blocks.{i}")
|
||||||
|
|
||||||
set_torch_compile_wrapper(model=m, keys=compile_key_list, backend=backend, mode=mode, dynamic=dynamic, fullgraph=fullgraph)
|
set_torch_compile_wrapper(model=m, keys=compile_key_list, backend=backend, mode=mode, dynamic=dynamic, fullgraph=fullgraph)
|
||||||
except:
|
except:
|
||||||
@ -744,6 +744,48 @@ class TorchCompileModelWanVideo:
|
|||||||
raise RuntimeError("Failed to compile model")
|
raise RuntimeError("Failed to compile model")
|
||||||
return (m, )
|
return (m, )
|
||||||
|
|
||||||
|
class TorchCompileModelWanVideoV2:
|
||||||
|
def __init__(self):
|
||||||
|
self._compiled = False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {
|
||||||
|
"required": {
|
||||||
|
"model": ("MODEL",),
|
||||||
|
"backend": (["inductor","cudagraphs"], {"default": "inductor"}),
|
||||||
|
"fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
|
||||||
|
"mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
|
||||||
|
"dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}),
|
||||||
|
"compile_transformer_blocks_only": ("BOOLEAN", {"default": True, "tooltip": "Compile only transformer blocks, faster compile and less error prone"}),
|
||||||
|
"dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
RETURN_TYPES = ("MODEL",)
|
||||||
|
FUNCTION = "patch"
|
||||||
|
|
||||||
|
CATEGORY = "KJNodes/torchcompile"
|
||||||
|
EXPERIMENTAL = True
|
||||||
|
|
||||||
|
def patch(self, model, backend, fullgraph, mode, dynamic, dynamo_cache_size_limit, compile_transformer_blocks_only):
|
||||||
|
from comfy_api.torch_helpers import set_torch_compile_wrapper
|
||||||
|
m = model.clone()
|
||||||
|
diffusion_model = m.get_model_object("diffusion_model")
|
||||||
|
torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit
|
||||||
|
try:
|
||||||
|
if compile_transformer_blocks_only:
|
||||||
|
compile_key_list = []
|
||||||
|
for i, block in enumerate(diffusion_model.blocks):
|
||||||
|
compile_key_list.append(f"diffusion_model.blocks.{i}")
|
||||||
|
else:
|
||||||
|
compile_key_list =["diffusion_model"]
|
||||||
|
|
||||||
|
set_torch_compile_wrapper(model=m, keys=compile_key_list, backend=backend, mode=mode, dynamic=dynamic, fullgraph=fullgraph)
|
||||||
|
except:
|
||||||
|
raise RuntimeError("Failed to compile model")
|
||||||
|
|
||||||
|
return (m, )
|
||||||
|
|
||||||
class TorchCompileVAE:
|
class TorchCompileVAE:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._compiled_encoder = False
|
self._compiled_encoder = False
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user