Compare commits

...

6 Commits

Author SHA1 Message Date
Dango233
21dd6170d8
Merge 8643d75a6b98dfd1f39eb97ea53e1c927314200a into 06a60ac3fec854909f35aba20aa5be39ff59a6e3 2025-12-01 22:21:35 +08:00
Jukka Seppänen
06a60ac3fe
Merge pull request #450 from m-sokes/patch-1
Update image_nodes.py
2025-12-01 11:13:51 +02:00
Sokes
a7ce03e735
Update image_nodes.py
sorted() is needed around os.listdir for proper linux file sorting.

The reason your files are not in order is that os.listdir() returns filenames in an arbitrary order (usually based on how they are stored in the file system's inode table), not alphabetically or numerically.
On Windows, os.listdir sometimes appears sorted due to how NTFS works, but on Ubuntu (Linux), the raw directory listing is almost never sorted by name.
The Fix
You need to sort the list of files before iterating through them.
Change this line:
code
Python
for f in os.listdir(kwargs['video']):
To this:
code
Python
for f in sorted(os.listdir(kwargs['video'])):
2025-11-30 18:35:29 -05:00
Dango233
8643d75a6b Extend fp8 diff path when either model is scaled 2025-10-28 22:40:05 -04:00
Dango233
e6ee59b4c2 Log when scaled fp8 diff path is used 2025-10-28 22:30:26 -04:00
Dango233
cedea47902 Fix LoRA extraction for scaled fp8 models 2025-10-28 22:28:43 -04:00
2 changed files with 88 additions and 18 deletions

View File

@ -176,7 +176,7 @@ Saves an image and mask as .PNG with the mask as the alpha channel.
def file_counter():
max_counter = 0
# Loop through the existing files
for existing_file in os.listdir(full_output_folder):
for existing_file in sorted(os.listdir(full_output_folder)):
# Check if the file matches the expected format
match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file)
if match:
@ -2981,7 +2981,7 @@ class LoadImagesFromFolderKJ:
except OSError:
pass
else:
for file in os.listdir(folder):
for file in sorted(os.listdir(folder)):
if any(file.lower().endswith(ext) for ext in valid_extensions):
path = os.path.join(folder, file)
try:
@ -3043,7 +3043,7 @@ class LoadImagesFromFolderKJ:
if any(file.lower().endswith(ext) for ext in valid_extensions):
image_paths.append(os.path.join(root, file))
else:
for file in os.listdir(folder):
for file in sorted(os.listdir(folder)):
if any(file.lower().endswith(ext) for ext in valid_extensions):
image_paths.append(os.path.join(folder, file))
@ -3964,7 +3964,7 @@ class LoadVideosFromFolder:
raise ImportError("This node requires ComfyUI-VideoHelperSuite to be installed.")
videos_list = []
filenames = []
for f in os.listdir(kwargs['video']):
for f in sorted(os.listdir(kwargs['video'])):
if os.path.isfile(os.path.join(kwargs['video'], f)):
file_parts = f.split('.')
if len(file_parts) > 1 and (file_parts[-1].lower() in ['webm', 'mp4', 'mkv', 'gif', 'mov']):

View File

@ -1,6 +1,7 @@
import torch
import comfy.model_management
import comfy.utils
import comfy.lora
import folder_paths
import os
import logging
@ -11,6 +12,50 @@ device = comfy.model_management.get_torch_device()
CLAMP_QUANTILE = 0.99
def _resolve_weight_from_patches(patches, key):
base_weight, convert_func = patches[0]
weight_tensor = comfy.model_management.cast_to_device(
base_weight, torch.device("cpu"), torch.float32, copy=True
)
try:
weight_tensor = convert_func(weight_tensor, inplace=True)
except TypeError:
weight_tensor = convert_func(weight_tensor)
if len(patches) > 1:
weight_tensor = comfy.lora.calculate_weight(
patches[1:],
weight_tensor,
key,
intermediate_dtype=torch.float32,
original_weights={key: patches},
)
return weight_tensor
def _build_scaled_fp8_diff(finetuned_model, original_model, prefix, bias_diff):
finetuned_patches = finetuned_model.get_key_patches(prefix)
original_patches = original_model.get_key_patches(prefix)
common_keys = set(finetuned_patches.keys()).intersection(original_patches.keys())
diff_sd = {}
for key in common_keys:
is_weight = key.endswith(".weight")
is_bias = key.endswith(".bias")
if not is_weight and not (bias_diff and is_bias):
continue
ft_tensor = _resolve_weight_from_patches(finetuned_patches[key], key)
orig_tensor = _resolve_weight_from_patches(original_patches[key], key)
diff_sd[key] = ft_tensor.sub(orig_tensor)
return diff_sd
def extract_lora(diff, key, rank, algorithm, lora_type, lowrank_iters=7, adaptive_param=1.0, clamp_quantile=True):
"""
Extracts LoRA weights from a weight difference tensor using SVD.
@ -99,15 +144,18 @@ def extract_lora(diff, key, rank, algorithm, lora_type, lowrank_iters=7, adaptiv
return (U, Vh)
def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora_type, algorithm, lowrank_iters, out_dtype, bias_diff=False, adaptive_param=1.0, clamp_quantile=True):
comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True)
model_diff.model.diffusion_model.cpu()
sd = model_diff.model_state_dict(filter_prefix=prefix_model)
del model_diff
comfy.model_management.soft_empty_cache()
for k, v in sd.items():
if isinstance(v, torch.Tensor):
sd[k] = v.cpu()
def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora_type, algorithm, lowrank_iters, out_dtype, bias_diff=False, adaptive_param=1.0, clamp_quantile=True, sd_override=None):
if sd_override is None:
comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True)
model_diff.model.diffusion_model.cpu()
sd = model_diff.model_state_dict(filter_prefix=prefix_model)
del model_diff
comfy.model_management.soft_empty_cache()
for k, v in sd.items():
if isinstance(v, torch.Tensor):
sd[k] = v.cpu()
else:
sd = sd_override
# Get total number of keys to process for progress bar
total_keys = len([k for k in sd if k.endswith(".weight") or (bias_diff and k.endswith(".bias"))])
@ -183,17 +231,39 @@ class LoraExtractKJ:
raise ValueError("svd_lowrank algorithm is only supported for standard LoRA extraction.")
dtype = {"fp8_e4m3fn": torch.float8_e4m3fn, "bf16": torch.bfloat16, "fp16": torch.float16, "fp16_fast": torch.float16, "fp32": torch.float32}[output_dtype]
m = finetuned_model.clone()
kp = original_model.get_key_patches("diffusion_model.")
for k in kp:
m.add_patches({k: kp[k]}, - 1.0, 1.0)
model_diff = m
model_diff = None
sd_override = None
scaled_fp8_ft = getattr(getattr(finetuned_model.model, "model_config", None), "scaled_fp8", None)
scaled_fp8_orig = getattr(getattr(original_model.model, "model_config", None), "scaled_fp8", None)
scaled_fp8_present = scaled_fp8_ft is not None or scaled_fp8_orig is not None
if scaled_fp8_present:
comfy.model_management.load_models_gpu([finetuned_model, original_model], force_patch_weights=True)
logging.info(
"LoraExtractKJ: detected scaled fp8 weights (finetuned=%s, original=%s); using high-precision diff path.",
scaled_fp8_ft is not None,
scaled_fp8_orig is not None,
)
sd_override = _build_scaled_fp8_diff(
finetuned_model, original_model, "diffusion_model.", bias_diff
)
comfy.model_management.soft_empty_cache()
else:
m = finetuned_model.clone()
kp = original_model.get_key_patches("diffusion_model.")
for k in kp:
m.add_patches({k: kp[k]}, - 1.0, 1.0)
model_diff = m
full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir)
output_sd = {}
if model_diff is not None:
output_sd = calc_lora_model(model_diff, rank, "diffusion_model.", "diffusion_model.", output_sd, lora_type, algorithm, lowrank_iters, dtype, bias_diff=bias_diff, adaptive_param=adaptive_param, clamp_quantile=clamp_quantile)
elif sd_override is not None:
output_sd = calc_lora_model(None, rank, "diffusion_model.", "diffusion_model.", output_sd, lora_type, algorithm, lowrank_iters, dtype, bias_diff=bias_diff, adaptive_param=adaptive_param, clamp_quantile=clamp_quantile, sd_override=sd_override)
if "adaptive" in lora_type:
rank_str = f"{lora_type}_{adaptive_param:.2f}"
else: