mirror of
https://git.datalinker.icu/kijai/ComfyUI-KJNodes.git
synced 2025-12-09 12:54:40 +08:00
Compare commits
6 Commits
a83a1029ee
...
21dd6170d8
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
21dd6170d8 | ||
|
|
06a60ac3fe | ||
|
|
a7ce03e735 | ||
|
|
8643d75a6b | ||
|
|
e6ee59b4c2 | ||
|
|
cedea47902 |
@ -176,7 +176,7 @@ Saves an image and mask as .PNG with the mask as the alpha channel.
|
|||||||
def file_counter():
|
def file_counter():
|
||||||
max_counter = 0
|
max_counter = 0
|
||||||
# Loop through the existing files
|
# Loop through the existing files
|
||||||
for existing_file in os.listdir(full_output_folder):
|
for existing_file in sorted(os.listdir(full_output_folder)):
|
||||||
# Check if the file matches the expected format
|
# Check if the file matches the expected format
|
||||||
match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file)
|
match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file)
|
||||||
if match:
|
if match:
|
||||||
@ -2981,7 +2981,7 @@ class LoadImagesFromFolderKJ:
|
|||||||
except OSError:
|
except OSError:
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
for file in os.listdir(folder):
|
for file in sorted(os.listdir(folder)):
|
||||||
if any(file.lower().endswith(ext) for ext in valid_extensions):
|
if any(file.lower().endswith(ext) for ext in valid_extensions):
|
||||||
path = os.path.join(folder, file)
|
path = os.path.join(folder, file)
|
||||||
try:
|
try:
|
||||||
@ -3043,7 +3043,7 @@ class LoadImagesFromFolderKJ:
|
|||||||
if any(file.lower().endswith(ext) for ext in valid_extensions):
|
if any(file.lower().endswith(ext) for ext in valid_extensions):
|
||||||
image_paths.append(os.path.join(root, file))
|
image_paths.append(os.path.join(root, file))
|
||||||
else:
|
else:
|
||||||
for file in os.listdir(folder):
|
for file in sorted(os.listdir(folder)):
|
||||||
if any(file.lower().endswith(ext) for ext in valid_extensions):
|
if any(file.lower().endswith(ext) for ext in valid_extensions):
|
||||||
image_paths.append(os.path.join(folder, file))
|
image_paths.append(os.path.join(folder, file))
|
||||||
|
|
||||||
@ -3964,7 +3964,7 @@ class LoadVideosFromFolder:
|
|||||||
raise ImportError("This node requires ComfyUI-VideoHelperSuite to be installed.")
|
raise ImportError("This node requires ComfyUI-VideoHelperSuite to be installed.")
|
||||||
videos_list = []
|
videos_list = []
|
||||||
filenames = []
|
filenames = []
|
||||||
for f in os.listdir(kwargs['video']):
|
for f in sorted(os.listdir(kwargs['video'])):
|
||||||
if os.path.isfile(os.path.join(kwargs['video'], f)):
|
if os.path.isfile(os.path.join(kwargs['video'], f)):
|
||||||
file_parts = f.split('.')
|
file_parts = f.split('.')
|
||||||
if len(file_parts) > 1 and (file_parts[-1].lower() in ['webm', 'mp4', 'mkv', 'gif', 'mov']):
|
if len(file_parts) > 1 and (file_parts[-1].lower() in ['webm', 'mp4', 'mkv', 'gif', 'mov']):
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
import torch
|
import torch
|
||||||
import comfy.model_management
|
import comfy.model_management
|
||||||
import comfy.utils
|
import comfy.utils
|
||||||
|
import comfy.lora
|
||||||
import folder_paths
|
import folder_paths
|
||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
@ -11,6 +12,50 @@ device = comfy.model_management.get_torch_device()
|
|||||||
|
|
||||||
CLAMP_QUANTILE = 0.99
|
CLAMP_QUANTILE = 0.99
|
||||||
|
|
||||||
|
|
||||||
|
def _resolve_weight_from_patches(patches, key):
|
||||||
|
base_weight, convert_func = patches[0]
|
||||||
|
weight_tensor = comfy.model_management.cast_to_device(
|
||||||
|
base_weight, torch.device("cpu"), torch.float32, copy=True
|
||||||
|
)
|
||||||
|
try:
|
||||||
|
weight_tensor = convert_func(weight_tensor, inplace=True)
|
||||||
|
except TypeError:
|
||||||
|
weight_tensor = convert_func(weight_tensor)
|
||||||
|
|
||||||
|
if len(patches) > 1:
|
||||||
|
weight_tensor = comfy.lora.calculate_weight(
|
||||||
|
patches[1:],
|
||||||
|
weight_tensor,
|
||||||
|
key,
|
||||||
|
intermediate_dtype=torch.float32,
|
||||||
|
original_weights={key: patches},
|
||||||
|
)
|
||||||
|
|
||||||
|
return weight_tensor
|
||||||
|
|
||||||
|
|
||||||
|
def _build_scaled_fp8_diff(finetuned_model, original_model, prefix, bias_diff):
|
||||||
|
finetuned_patches = finetuned_model.get_key_patches(prefix)
|
||||||
|
original_patches = original_model.get_key_patches(prefix)
|
||||||
|
|
||||||
|
common_keys = set(finetuned_patches.keys()).intersection(original_patches.keys())
|
||||||
|
diff_sd = {}
|
||||||
|
|
||||||
|
for key in common_keys:
|
||||||
|
is_weight = key.endswith(".weight")
|
||||||
|
is_bias = key.endswith(".bias")
|
||||||
|
|
||||||
|
if not is_weight and not (bias_diff and is_bias):
|
||||||
|
continue
|
||||||
|
|
||||||
|
ft_tensor = _resolve_weight_from_patches(finetuned_patches[key], key)
|
||||||
|
orig_tensor = _resolve_weight_from_patches(original_patches[key], key)
|
||||||
|
|
||||||
|
diff_sd[key] = ft_tensor.sub(orig_tensor)
|
||||||
|
|
||||||
|
return diff_sd
|
||||||
|
|
||||||
def extract_lora(diff, key, rank, algorithm, lora_type, lowrank_iters=7, adaptive_param=1.0, clamp_quantile=True):
|
def extract_lora(diff, key, rank, algorithm, lora_type, lowrank_iters=7, adaptive_param=1.0, clamp_quantile=True):
|
||||||
"""
|
"""
|
||||||
Extracts LoRA weights from a weight difference tensor using SVD.
|
Extracts LoRA weights from a weight difference tensor using SVD.
|
||||||
@ -99,7 +144,8 @@ def extract_lora(diff, key, rank, algorithm, lora_type, lowrank_iters=7, adaptiv
|
|||||||
return (U, Vh)
|
return (U, Vh)
|
||||||
|
|
||||||
|
|
||||||
def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora_type, algorithm, lowrank_iters, out_dtype, bias_diff=False, adaptive_param=1.0, clamp_quantile=True):
|
def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora_type, algorithm, lowrank_iters, out_dtype, bias_diff=False, adaptive_param=1.0, clamp_quantile=True, sd_override=None):
|
||||||
|
if sd_override is None:
|
||||||
comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True)
|
comfy.model_management.load_models_gpu([model_diff], force_patch_weights=True)
|
||||||
model_diff.model.diffusion_model.cpu()
|
model_diff.model.diffusion_model.cpu()
|
||||||
sd = model_diff.model_state_dict(filter_prefix=prefix_model)
|
sd = model_diff.model_state_dict(filter_prefix=prefix_model)
|
||||||
@ -108,6 +154,8 @@ def calc_lora_model(model_diff, rank, prefix_model, prefix_lora, output_sd, lora
|
|||||||
for k, v in sd.items():
|
for k, v in sd.items():
|
||||||
if isinstance(v, torch.Tensor):
|
if isinstance(v, torch.Tensor):
|
||||||
sd[k] = v.cpu()
|
sd[k] = v.cpu()
|
||||||
|
else:
|
||||||
|
sd = sd_override
|
||||||
|
|
||||||
# Get total number of keys to process for progress bar
|
# Get total number of keys to process for progress bar
|
||||||
total_keys = len([k for k in sd if k.endswith(".weight") or (bias_diff and k.endswith(".bias"))])
|
total_keys = len([k for k in sd if k.endswith(".weight") or (bias_diff and k.endswith(".bias"))])
|
||||||
@ -183,6 +231,26 @@ class LoraExtractKJ:
|
|||||||
raise ValueError("svd_lowrank algorithm is only supported for standard LoRA extraction.")
|
raise ValueError("svd_lowrank algorithm is only supported for standard LoRA extraction.")
|
||||||
|
|
||||||
dtype = {"fp8_e4m3fn": torch.float8_e4m3fn, "bf16": torch.bfloat16, "fp16": torch.float16, "fp16_fast": torch.float16, "fp32": torch.float32}[output_dtype]
|
dtype = {"fp8_e4m3fn": torch.float8_e4m3fn, "bf16": torch.bfloat16, "fp16": torch.float16, "fp16_fast": torch.float16, "fp32": torch.float32}[output_dtype]
|
||||||
|
|
||||||
|
model_diff = None
|
||||||
|
sd_override = None
|
||||||
|
|
||||||
|
scaled_fp8_ft = getattr(getattr(finetuned_model.model, "model_config", None), "scaled_fp8", None)
|
||||||
|
scaled_fp8_orig = getattr(getattr(original_model.model, "model_config", None), "scaled_fp8", None)
|
||||||
|
scaled_fp8_present = scaled_fp8_ft is not None or scaled_fp8_orig is not None
|
||||||
|
|
||||||
|
if scaled_fp8_present:
|
||||||
|
comfy.model_management.load_models_gpu([finetuned_model, original_model], force_patch_weights=True)
|
||||||
|
logging.info(
|
||||||
|
"LoraExtractKJ: detected scaled fp8 weights (finetuned=%s, original=%s); using high-precision diff path.",
|
||||||
|
scaled_fp8_ft is not None,
|
||||||
|
scaled_fp8_orig is not None,
|
||||||
|
)
|
||||||
|
sd_override = _build_scaled_fp8_diff(
|
||||||
|
finetuned_model, original_model, "diffusion_model.", bias_diff
|
||||||
|
)
|
||||||
|
comfy.model_management.soft_empty_cache()
|
||||||
|
else:
|
||||||
m = finetuned_model.clone()
|
m = finetuned_model.clone()
|
||||||
kp = original_model.get_key_patches("diffusion_model.")
|
kp = original_model.get_key_patches("diffusion_model.")
|
||||||
for k in kp:
|
for k in kp:
|
||||||
@ -194,6 +262,8 @@ class LoraExtractKJ:
|
|||||||
output_sd = {}
|
output_sd = {}
|
||||||
if model_diff is not None:
|
if model_diff is not None:
|
||||||
output_sd = calc_lora_model(model_diff, rank, "diffusion_model.", "diffusion_model.", output_sd, lora_type, algorithm, lowrank_iters, dtype, bias_diff=bias_diff, adaptive_param=adaptive_param, clamp_quantile=clamp_quantile)
|
output_sd = calc_lora_model(model_diff, rank, "diffusion_model.", "diffusion_model.", output_sd, lora_type, algorithm, lowrank_iters, dtype, bias_diff=bias_diff, adaptive_param=adaptive_param, clamp_quantile=clamp_quantile)
|
||||||
|
elif sd_override is not None:
|
||||||
|
output_sd = calc_lora_model(None, rank, "diffusion_model.", "diffusion_model.", output_sd, lora_type, algorithm, lowrank_iters, dtype, bias_diff=bias_diff, adaptive_param=adaptive_param, clamp_quantile=clamp_quantile, sd_override=sd_override)
|
||||||
if "adaptive" in lora_type:
|
if "adaptive" in lora_type:
|
||||||
rank_str = f"{lora_type}_{adaptive_param:.2f}"
|
rank_str = f"{lora_type}_{adaptive_param:.2f}"
|
||||||
else:
|
else:
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user