mirror of
https://git.datalinker.icu/kijai/ComfyUI-KJNodes.git
synced 2025-12-09 04:44:30 +08:00
Compare commits
7 Commits
8ed1655f3b
...
300721ea68
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
300721ea68 | ||
|
|
06a60ac3fe | ||
|
|
a7ce03e735 | ||
|
|
f37df472df | ||
|
|
390d05fe7e | ||
|
|
f0ed965cd9 | ||
|
|
6717a58837 |
@ -211,6 +211,7 @@ NODE_CONFIG = {
|
||||
"GGUFLoaderKJ": {"class": GGUFLoaderKJ, "name": "GGUF Loader KJ"},
|
||||
"LatentInpaintTTM": {"class": LatentInpaintTTM, "name": "Latent Inpaint TTM"},
|
||||
"NABLA_AttentionKJ": {"class": NABLA_AttentionKJ, "name": "NABLA Attention KJ"},
|
||||
"TorchCompileModelAdvanced": {"class": TorchCompileModelAdvanced, "name": "TorchCompileModelAdvanced"},
|
||||
|
||||
#instance diffusion
|
||||
"CreateInstanceDiffusionTracking": {"class": CreateInstanceDiffusionTracking},
|
||||
|
||||
@ -176,7 +176,7 @@ Saves an image and mask as .PNG with the mask as the alpha channel.
|
||||
def file_counter():
|
||||
max_counter = 0
|
||||
# Loop through the existing files
|
||||
for existing_file in os.listdir(full_output_folder):
|
||||
for existing_file in sorted(os.listdir(full_output_folder)):
|
||||
# Check if the file matches the expected format
|
||||
match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file)
|
||||
if match:
|
||||
@ -785,8 +785,8 @@ class GetImageSizeAndCount:
|
||||
"image": ("IMAGE",),
|
||||
}}
|
||||
|
||||
RETURN_TYPES = ("IMAGE","INT", "INT", "INT",)
|
||||
RETURN_NAMES = ("image", "width", "height", "count",)
|
||||
RETURN_TYPES = ("IMAGE","INT", "INT", "INT", "INT",)
|
||||
RETURN_NAMES = ("image", "width", "height", "count", "channels",)
|
||||
FUNCTION = "getsize"
|
||||
CATEGORY = "KJNodes/image"
|
||||
DESCRIPTION = """
|
||||
@ -799,9 +799,10 @@ and passes it through unchanged.
|
||||
width = image.shape[2]
|
||||
height = image.shape[1]
|
||||
count = image.shape[0]
|
||||
channels = image.shape[3]
|
||||
return {"ui": {
|
||||
"text": [f"{count}x{width}x{height}"]},
|
||||
"result": (image, width, height, count)
|
||||
"text": [f"{count}x{width}x{height}x{channels}"]},
|
||||
"result": (image, width, height, count, channels)
|
||||
}
|
||||
|
||||
class GetLatentSizeAndCount:
|
||||
@ -2981,7 +2982,7 @@ class LoadImagesFromFolderKJ:
|
||||
except OSError:
|
||||
pass
|
||||
else:
|
||||
for file in os.listdir(folder):
|
||||
for file in sorted(os.listdir(folder)):
|
||||
if any(file.lower().endswith(ext) for ext in valid_extensions):
|
||||
path = os.path.join(folder, file)
|
||||
try:
|
||||
@ -3043,7 +3044,7 @@ class LoadImagesFromFolderKJ:
|
||||
if any(file.lower().endswith(ext) for ext in valid_extensions):
|
||||
image_paths.append(os.path.join(root, file))
|
||||
else:
|
||||
for file in os.listdir(folder):
|
||||
for file in sorted(os.listdir(folder)):
|
||||
if any(file.lower().endswith(ext) for ext in valid_extensions):
|
||||
image_paths.append(os.path.join(folder, file))
|
||||
|
||||
@ -3964,7 +3965,7 @@ class LoadVideosFromFolder:
|
||||
raise ImportError("This node requires ComfyUI-VideoHelperSuite to be installed.")
|
||||
videos_list = []
|
||||
filenames = []
|
||||
for f in os.listdir(kwargs['video']):
|
||||
for f in sorted(os.listdir(kwargs['video'])):
|
||||
if os.path.isfile(os.path.join(kwargs['video'], f)):
|
||||
file_parts = f.split('.')
|
||||
if len(file_parts) > 1 and (file_parts[-1].lower() in ['webm', 'mp4', 'mkv', 'gif', 'mov']):
|
||||
|
||||
@ -73,6 +73,9 @@ def get_sage_func(sage_attention, allow_compile=False):
|
||||
|
||||
@wrap_attn
|
||||
def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False, **kwargs):
|
||||
in_dtype = v.dtype
|
||||
if q.dtype == torch.float32 or k.dtype == torch.float32 or v.dtype == torch.float32:
|
||||
q, k, v = q.to(torch.float16), k.to(torch.float16), v.to(torch.float16)
|
||||
if skip_reshape:
|
||||
b, _, _, dim_head = q.shape
|
||||
tensor_layout="HND"
|
||||
@ -91,7 +94,7 @@ def get_sage_func(sage_attention, allow_compile=False):
|
||||
# add a heads dimension if there isn't already one
|
||||
if mask.ndim == 3:
|
||||
mask = mask.unsqueeze(1)
|
||||
out = sage_func(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout)
|
||||
out = sage_func(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout).to(in_dtype)
|
||||
if tensor_layout == "HND":
|
||||
if not skip_output_reshape:
|
||||
out = (
|
||||
@ -853,7 +856,60 @@ class TorchCompileModelWanVideoV2:
|
||||
raise RuntimeError("Failed to compile model")
|
||||
|
||||
return (m, )
|
||||
|
||||
|
||||
|
||||
class TorchCompileModelAdvanced:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"model": ("MODEL",),
|
||||
"backend": (["inductor","cudagraphs"], {"default": "inductor"}),
|
||||
"fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
|
||||
"mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
|
||||
"dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}),
|
||||
"compile_transformer_blocks_only": ("BOOLEAN", {"default": True, "tooltip": "Compile only transformer blocks, faster compile and less error prone"}),
|
||||
"dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}),
|
||||
"debug_compile_keys": ("BOOLEAN", {"default": False, "tooltip": "Print the compile keys used for torch.compile"}),
|
||||
},
|
||||
}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "patch"
|
||||
CATEGORY = "KJNodes/torchcompile"
|
||||
DESCRIPTION = "Advanced torch.compile patching for diffusion models."
|
||||
EXPERIMENTAL = True
|
||||
|
||||
def patch(self, model, backend, fullgraph, mode, dynamic, dynamo_cache_size_limit, compile_transformer_blocks_only, debug_compile_keys):
|
||||
from comfy_api.torch_helpers import set_torch_compile_wrapper
|
||||
m = model.clone()
|
||||
diffusion_model = m.get_model_object("diffusion_model")
|
||||
torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit
|
||||
|
||||
try:
|
||||
if compile_transformer_blocks_only:
|
||||
layer_types = ["double_blocks", "single_blocks", "layers", "transformer_blocks", "blocks", "visual_transformer_blocks", "text_transformer_blocks"]
|
||||
compile_key_list = []
|
||||
for layer_name in layer_types:
|
||||
if hasattr(diffusion_model, layer_name):
|
||||
blocks = getattr(diffusion_model, layer_name)
|
||||
for i in range(len(blocks)):
|
||||
compile_key_list.append(f"diffusion_model.{layer_name}.{i}")
|
||||
if not compile_key_list:
|
||||
logging.warning("No known transformer blocks found to compile, compiling entire diffusion model instead")
|
||||
elif debug_compile_keys:
|
||||
logging.info("TorchCompileModelAdvanced: Compile key list:")
|
||||
for key in compile_key_list:
|
||||
logging.info(f" - {key}")
|
||||
if not compile_key_list:
|
||||
compile_key_list =["diffusion_model"]
|
||||
|
||||
set_torch_compile_wrapper(model=m, keys=compile_key_list, backend=backend, mode=mode, dynamic=dynamic, fullgraph=fullgraph)
|
||||
except:
|
||||
raise RuntimeError("Failed to compile model")
|
||||
|
||||
return (m, )
|
||||
|
||||
|
||||
class TorchCompileModelQwenImage:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
|
||||
@ -175,6 +175,7 @@ app.registerExtension({
|
||||
this.outputs[1]["label"] = "width"
|
||||
this.outputs[2]["label"] = "height"
|
||||
this.outputs[3]["label"] = "count"
|
||||
this.outputs[4]["label"] = "channels"
|
||||
return v;
|
||||
}
|
||||
//const onGetImageSizeExecuted = nodeType.prototype.onExecuted;
|
||||
@ -187,6 +188,7 @@ app.registerExtension({
|
||||
this.outputs[1]["label"] = values[1] + " width"
|
||||
this.outputs[2]["label"] = values[2] + " height"
|
||||
this.outputs[3]["label"] = values[0] + " count"
|
||||
this.outputs[4]["label"] = values[3] + " channels"
|
||||
return r
|
||||
}
|
||||
break;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user