mirror of
https://git.datalinker.icu/kijai/ComfyUI-KJNodes.git
synced 2025-12-16 00:04:35 +08:00
Add TorchCompileLTXModel
This commit is contained in:
parent
e2e8e7ebe5
commit
fb5aa296ae
@ -158,6 +158,7 @@ NODE_CONFIG = {
|
|||||||
"TorchCompileVAE": {"class": TorchCompileVAE, "name": "TorchCompileVAE"},
|
"TorchCompileVAE": {"class": TorchCompileVAE, "name": "TorchCompileVAE"},
|
||||||
"TorchCompileControlNet": {"class": TorchCompileControlNet, "name": "TorchCompileControlNet"},
|
"TorchCompileControlNet": {"class": TorchCompileControlNet, "name": "TorchCompileControlNet"},
|
||||||
"PatchModelPatcherOrder": {"class": PatchModelPatcherOrder, "name": "Patch Model Patcher Order"},
|
"PatchModelPatcherOrder": {"class": PatchModelPatcherOrder, "name": "Patch Model Patcher Order"},
|
||||||
|
"TorchCompileLTXModel": {"class": TorchCompileLTXModel, "name": "TorchCompileLTXModel"},
|
||||||
|
|
||||||
#instance diffusion
|
#instance diffusion
|
||||||
"CreateInstanceDiffusionTracking": {"class": CreateInstanceDiffusionTracking},
|
"CreateInstanceDiffusionTracking": {"class": CreateInstanceDiffusionTracking},
|
||||||
|
|||||||
@ -2180,6 +2180,7 @@ class CheckpointLoaderKJ:
|
|||||||
if sage_attention:
|
if sage_attention:
|
||||||
from sageattention import sageattn
|
from sageattention import sageattn
|
||||||
|
|
||||||
|
@torch.compiler.disable()
|
||||||
def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False):
|
def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False):
|
||||||
if skip_reshape:
|
if skip_reshape:
|
||||||
b, _, _, dim_head = q.shape
|
b, _, _, dim_head = q.shape
|
||||||
@ -2483,6 +2484,47 @@ class TorchCompileControlNet:
|
|||||||
|
|
||||||
return (controlnet, )
|
return (controlnet, )
|
||||||
|
|
||||||
|
class TorchCompileLTXModel:
|
||||||
|
def __init__(self):
|
||||||
|
self._compiled = False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def INPUT_TYPES(s):
|
||||||
|
return {"required": {
|
||||||
|
"model": ("MODEL",),
|
||||||
|
"backend": (["inductor", "cudagraphs"],),
|
||||||
|
"fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
|
||||||
|
"mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
|
||||||
|
"dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}),
|
||||||
|
}}
|
||||||
|
RETURN_TYPES = ("MODEL",)
|
||||||
|
FUNCTION = "patch"
|
||||||
|
|
||||||
|
CATEGORY = "KJNodes/experimental"
|
||||||
|
EXPERIMENTAL = True
|
||||||
|
|
||||||
|
def patch(self, model, backend, mode, fullgraph, dynamic):
|
||||||
|
m = model.clone()
|
||||||
|
diffusion_model = m.get_model_object("diffusion_model")
|
||||||
|
|
||||||
|
if not self._compiled:
|
||||||
|
try:
|
||||||
|
for i, block in enumerate(diffusion_model.transformer_blocks):
|
||||||
|
#print("Compiling double_block", i)
|
||||||
|
m.add_object_patch(f"diffusion_model.transformer_blocks.{i}", torch.compile(block, mode=mode, dynamic=dynamic, fullgraph=fullgraph, backend=backend))
|
||||||
|
self._compiled = True
|
||||||
|
compile_settings = {
|
||||||
|
"backend": backend,
|
||||||
|
"mode": mode,
|
||||||
|
"fullgraph": fullgraph,
|
||||||
|
"dynamic": dynamic,
|
||||||
|
}
|
||||||
|
setattr(m.model, "compile_settings", compile_settings)
|
||||||
|
except:
|
||||||
|
raise RuntimeError("Failed to compile model")
|
||||||
|
|
||||||
|
return (m, )
|
||||||
|
|
||||||
class StyleModelApplyAdvanced:
|
class StyleModelApplyAdvanced:
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def INPUT_TYPES(s):
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user