Compare commits

...

7 Commits

Author SHA1 Message Date
Makki Shizu
300721ea68
Merge 6717a58837544c64a7966818466e368c31ca1483 into 06a60ac3fec854909f35aba20aa5be39ff59a6e3 2025-12-02 01:30:55 +08:00
Jukka Seppänen
06a60ac3fe
Merge pull request #450 from m-sokes/patch-1
Update image_nodes.py
2025-12-01 11:13:51 +02:00
Sokes
a7ce03e735
Update image_nodes.py
sorted() is needed around os.listdir for proper linux file sorting.

The reason your files are not in order is that os.listdir() returns filenames in an arbitrary order (usually based on how they are stored in the file system's inode table), not alphabetically or numerically.
On Windows, os.listdir sometimes appears sorted due to how NTFS works, but on Ubuntu (Linux), the raw directory listing is almost never sorted by name.
The Fix
You need to sort the list of files before iterating through them.
Change this line:
code
Python
for f in os.listdir(kwargs['video']):
To this:
code
Python
for f in sorted(os.listdir(kwargs['video'])):
2025-11-30 18:35:29 -05:00
kijai
f37df472df Kandinsky5 blocks for compile too 2025-11-27 17:57:52 +02:00
kijai
390d05fe7e Add generic TorchCompileModelAdvanced node to handle advanced compile options for all diffusion models
Avoids needing different nodes for different models
2025-11-27 13:59:31 +02:00
kijai
f0ed965cd9 Allow fp32 input for sageattn function 2025-11-27 13:33:41 +02:00
MakkiShizu
6717a58837 Add channels to GetImageSizeAndCount 2025-04-24 18:59:53 +08:00
4 changed files with 70 additions and 10 deletions

View File

@ -211,6 +211,7 @@ NODE_CONFIG = {
"GGUFLoaderKJ": {"class": GGUFLoaderKJ, "name": "GGUF Loader KJ"},
"LatentInpaintTTM": {"class": LatentInpaintTTM, "name": "Latent Inpaint TTM"},
"NABLA_AttentionKJ": {"class": NABLA_AttentionKJ, "name": "NABLA Attention KJ"},
"TorchCompileModelAdvanced": {"class": TorchCompileModelAdvanced, "name": "TorchCompileModelAdvanced"},
#instance diffusion
"CreateInstanceDiffusionTracking": {"class": CreateInstanceDiffusionTracking},

View File

@ -176,7 +176,7 @@ Saves an image and mask as .PNG with the mask as the alpha channel.
def file_counter():
max_counter = 0
# Loop through the existing files
for existing_file in os.listdir(full_output_folder):
for existing_file in sorted(os.listdir(full_output_folder)):
# Check if the file matches the expected format
match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file)
if match:
@ -785,8 +785,8 @@ class GetImageSizeAndCount:
"image": ("IMAGE",),
}}
RETURN_TYPES = ("IMAGE","INT", "INT", "INT",)
RETURN_NAMES = ("image", "width", "height", "count",)
RETURN_TYPES = ("IMAGE","INT", "INT", "INT", "INT",)
RETURN_NAMES = ("image", "width", "height", "count", "channels",)
FUNCTION = "getsize"
CATEGORY = "KJNodes/image"
DESCRIPTION = """
@ -799,9 +799,10 @@ and passes it through unchanged.
width = image.shape[2]
height = image.shape[1]
count = image.shape[0]
channels = image.shape[3]
return {"ui": {
"text": [f"{count}x{width}x{height}"]},
"result": (image, width, height, count)
"text": [f"{count}x{width}x{height}x{channels}"]},
"result": (image, width, height, count, channels)
}
class GetLatentSizeAndCount:
@ -2981,7 +2982,7 @@ class LoadImagesFromFolderKJ:
except OSError:
pass
else:
for file in os.listdir(folder):
for file in sorted(os.listdir(folder)):
if any(file.lower().endswith(ext) for ext in valid_extensions):
path = os.path.join(folder, file)
try:
@ -3043,7 +3044,7 @@ class LoadImagesFromFolderKJ:
if any(file.lower().endswith(ext) for ext in valid_extensions):
image_paths.append(os.path.join(root, file))
else:
for file in os.listdir(folder):
for file in sorted(os.listdir(folder)):
if any(file.lower().endswith(ext) for ext in valid_extensions):
image_paths.append(os.path.join(folder, file))
@ -3964,7 +3965,7 @@ class LoadVideosFromFolder:
raise ImportError("This node requires ComfyUI-VideoHelperSuite to be installed.")
videos_list = []
filenames = []
for f in os.listdir(kwargs['video']):
for f in sorted(os.listdir(kwargs['video'])):
if os.path.isfile(os.path.join(kwargs['video'], f)):
file_parts = f.split('.')
if len(file_parts) > 1 and (file_parts[-1].lower() in ['webm', 'mp4', 'mkv', 'gif', 'mov']):

View File

@ -73,6 +73,9 @@ def get_sage_func(sage_attention, allow_compile=False):
@wrap_attn
def attention_sage(q, k, v, heads, mask=None, attn_precision=None, skip_reshape=False, skip_output_reshape=False, **kwargs):
in_dtype = v.dtype
if q.dtype == torch.float32 or k.dtype == torch.float32 or v.dtype == torch.float32:
q, k, v = q.to(torch.float16), k.to(torch.float16), v.to(torch.float16)
if skip_reshape:
b, _, _, dim_head = q.shape
tensor_layout="HND"
@ -91,7 +94,7 @@ def get_sage_func(sage_attention, allow_compile=False):
# add a heads dimension if there isn't already one
if mask.ndim == 3:
mask = mask.unsqueeze(1)
out = sage_func(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout)
out = sage_func(q, k, v, attn_mask=mask, is_causal=False, tensor_layout=tensor_layout).to(in_dtype)
if tensor_layout == "HND":
if not skip_output_reshape:
out = (
@ -853,7 +856,60 @@ class TorchCompileModelWanVideoV2:
raise RuntimeError("Failed to compile model")
return (m, )
class TorchCompileModelAdvanced:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
"backend": (["inductor","cudagraphs"], {"default": "inductor"}),
"fullgraph": ("BOOLEAN", {"default": False, "tooltip": "Enable full graph mode"}),
"mode": (["default", "max-autotune", "max-autotune-no-cudagraphs", "reduce-overhead"], {"default": "default"}),
"dynamic": ("BOOLEAN", {"default": False, "tooltip": "Enable dynamic mode"}),
"compile_transformer_blocks_only": ("BOOLEAN", {"default": True, "tooltip": "Compile only transformer blocks, faster compile and less error prone"}),
"dynamo_cache_size_limit": ("INT", {"default": 64, "min": 0, "max": 1024, "step": 1, "tooltip": "torch._dynamo.config.cache_size_limit"}),
"debug_compile_keys": ("BOOLEAN", {"default": False, "tooltip": "Print the compile keys used for torch.compile"}),
},
}
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
CATEGORY = "KJNodes/torchcompile"
DESCRIPTION = "Advanced torch.compile patching for diffusion models."
EXPERIMENTAL = True
def patch(self, model, backend, fullgraph, mode, dynamic, dynamo_cache_size_limit, compile_transformer_blocks_only, debug_compile_keys):
from comfy_api.torch_helpers import set_torch_compile_wrapper
m = model.clone()
diffusion_model = m.get_model_object("diffusion_model")
torch._dynamo.config.cache_size_limit = dynamo_cache_size_limit
try:
if compile_transformer_blocks_only:
layer_types = ["double_blocks", "single_blocks", "layers", "transformer_blocks", "blocks", "visual_transformer_blocks", "text_transformer_blocks"]
compile_key_list = []
for layer_name in layer_types:
if hasattr(diffusion_model, layer_name):
blocks = getattr(diffusion_model, layer_name)
for i in range(len(blocks)):
compile_key_list.append(f"diffusion_model.{layer_name}.{i}")
if not compile_key_list:
logging.warning("No known transformer blocks found to compile, compiling entire diffusion model instead")
elif debug_compile_keys:
logging.info("TorchCompileModelAdvanced: Compile key list:")
for key in compile_key_list:
logging.info(f" - {key}")
if not compile_key_list:
compile_key_list =["diffusion_model"]
set_torch_compile_wrapper(model=m, keys=compile_key_list, backend=backend, mode=mode, dynamic=dynamic, fullgraph=fullgraph)
except:
raise RuntimeError("Failed to compile model")
return (m, )
class TorchCompileModelQwenImage:
@classmethod
def INPUT_TYPES(s):

View File

@ -175,6 +175,7 @@ app.registerExtension({
this.outputs[1]["label"] = "width"
this.outputs[2]["label"] = "height"
this.outputs[3]["label"] = "count"
this.outputs[4]["label"] = "channels"
return v;
}
//const onGetImageSizeExecuted = nodeType.prototype.onExecuted;
@ -187,6 +188,7 @@ app.registerExtension({
this.outputs[1]["label"] = values[1] + " width"
this.outputs[2]["label"] = values[2] + " height"
this.outputs[3]["label"] = values[0] + " count"
this.outputs[4]["label"] = values[3] + " channels"
return r
}
break;