mirror of
https://git.datalinker.icu/kijai/ComfyUI-KJNodes.git
synced 2025-12-09 12:54:40 +08:00
Compare commits
16 Commits
4a1235bf54
...
f91e7c53e3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f91e7c53e3 | ||
|
|
4dfb85dcc5 | ||
|
|
8660778ea1 | ||
|
|
3b9c1b49ab | ||
|
|
246920d8b9 | ||
|
|
7aafce8578 | ||
|
|
d03f2e975a | ||
|
|
a4a2ecc14d | ||
|
|
a69d7c1a95 | ||
|
|
e5935115cf | ||
|
|
5c0564a47f | ||
|
|
5325ea9cc1 | ||
|
|
3a8786c206 | ||
|
|
aa89c147d0 | ||
|
|
b4af62112c | ||
|
|
61ffeb548e |
@ -209,6 +209,7 @@ NODE_CONFIG = {
|
||||
"ModelPatchTorchSettings": {"class": ModelPatchTorchSettings, "name": "Model Patch Torch Settings"},
|
||||
"WanVideoNAG": {"class": WanVideoNAG, "name": "WanVideoNAG"},
|
||||
"GGUFLoaderKJ": {"class": GGUFLoaderKJ, "name": "GGUF Loader KJ"},
|
||||
"LatentInpaintTTM": {"class": LatentInpaintTTM, "name": "Latent Inpaint TTM"},
|
||||
|
||||
#instance diffusion
|
||||
"CreateInstanceDiffusionTracking": {"class": CreateInstanceDiffusionTracking},
|
||||
|
||||
@ -20,7 +20,8 @@ except:
|
||||
from PIL import ImageGrab, ImageDraw, ImageFont, Image, ImageOps
|
||||
|
||||
from nodes import MAX_RESOLUTION, SaveImage
|
||||
from comfy_extras.nodes_mask import ImageCompositeMasked
|
||||
from comfy_extras.nodes_mask import composite
|
||||
import node_helpers
|
||||
from comfy.cli_args import args
|
||||
from comfy.utils import ProgressBar, common_upscale
|
||||
import folder_paths
|
||||
@ -97,6 +98,10 @@ https://github.com/hahnec/color-matcher/
|
||||
"""
|
||||
|
||||
def colormatch(self, image_ref, image_target, method, strength=1.0, multithread=True):
|
||||
# Skip unnecessary processing
|
||||
if strength == 0:
|
||||
return (image_target,)
|
||||
|
||||
try:
|
||||
from color_matcher import ColorMatcher
|
||||
except:
|
||||
@ -117,9 +122,12 @@ https://github.com/hahnec/color-matcher/
|
||||
image_target_np_i = images_target_np if batch_size == 1 else images_target[i].numpy()
|
||||
image_ref_np_i = image_ref_np if image_ref.size(0) == 1 else images_ref[i].numpy()
|
||||
try:
|
||||
image_result = cm.transfer(src=image_target_np_i, ref=image_ref_np_i, method=method)
|
||||
image_result = image_target_np_i + strength * (image_result - image_target_np_i)
|
||||
image_result = cm.transfer(src=image_target_np_i, ref=image_ref_np_i, method=method) # Avoid potential blur when only the fully color-matched image is used
|
||||
if strength != 1:
|
||||
image_result = image_target_np_i + strength * (image_result - image_target_np_i)
|
||||
|
||||
return torch.from_numpy(image_result)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Thread {i} error: {e}")
|
||||
return torch.from_numpy(image_target_np_i) # fallback
|
||||
@ -1284,8 +1292,11 @@ nodes for example.
|
||||
mask_image[:, :, :, 0] = color_list[0] / 255 # Red channel
|
||||
mask_image[:, :, :, 1] = color_list[1] / 255 # Green channel
|
||||
mask_image[:, :, :, 2] = color_list[2] / 255 # Blue channel
|
||||
|
||||
preview, = ImageCompositeMasked.composite(self, image, mask_image, 0, 0, True, mask_adjusted)
|
||||
|
||||
destination, source = node_helpers.image_alpha_fix(image, mask_image)
|
||||
destination = destination.clone().movedim(-1, 1)
|
||||
preview = composite(destination, source.movedim(-1, 1), 0, 0, mask_adjusted, 1, True).movedim(1, -1)
|
||||
|
||||
if pass_through:
|
||||
return (preview, )
|
||||
return(self.save_images(preview, filename_prefix, prompt, extra_pnginfo))
|
||||
|
||||
@ -794,8 +794,8 @@ class CreateShapeMask:
|
||||
FUNCTION = "createshapemask"
|
||||
CATEGORY = "KJNodes/masking/generate"
|
||||
DESCRIPTION = """
|
||||
Creates a mask or batch of masks with the specified shape.
|
||||
Locations are center locations.
|
||||
Creates a mask or batch of masks with the specified shape and movement trajectory.
|
||||
Locations are the starting center locations.
|
||||
Grow value is the amount to grow the shape on each frame, creating animated masks.
|
||||
"""
|
||||
|
||||
@ -804,25 +804,71 @@ Grow value is the amount to grow the shape on each frame, creating animated mask
|
||||
return {
|
||||
"required": {
|
||||
"shape": (
|
||||
[ 'circle',
|
||||
'square',
|
||||
'triangle',
|
||||
],
|
||||
{
|
||||
"default": 'circle'
|
||||
}),
|
||||
"frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}),
|
||||
"location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
|
||||
"location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}),
|
||||
['circle', 'square', 'triangle'],
|
||||
{"default": 'circle'}
|
||||
),
|
||||
"frames": ("INT", {"default": 1, "min": 1, "max": 4096, "step": 1}),
|
||||
"start_location_x": ("INT", {"default": 256, "min": 0, "max": 4096, "step": 1}),
|
||||
"start_location_y": ("INT", {"default": 256, "min": 0, "max": 4096, "step": 1}),
|
||||
"grow": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}),
|
||||
"frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
||||
"frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}),
|
||||
"shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
|
||||
"shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}),
|
||||
},
|
||||
}
|
||||
"frame_width": ("INT", {"default": 512, "min": 16, "max": 4096, "step": 1}),
|
||||
"frame_height": ("INT", {"default": 512, "min": 16, "max": 4096, "step": 1}),
|
||||
"shape_width": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 1}),
|
||||
"shape_height": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 1}),
|
||||
"movement_type": (
|
||||
['none', 'linear', 'circular', 'zigzag', 'bounce', 'sinusoidal', 'spiral', 'random'],
|
||||
{"default": 'none'}
|
||||
),
|
||||
"end_location_x": ("INT", {"default": 256, "min": 0, "max": 4096, "step": 1}),
|
||||
"end_location_y": ("INT", {"default": 256, "min": 0, "max": 4096, "step": 1}),
|
||||
"radius": ("INT", {"default": 128, "min": 0, "max": 4096, "step": 1}),
|
||||
"angle_step": ("FLOAT", {"default": 10.0, "min": 0.1, "max": 360.0, "step": 0.1}),
|
||||
"zigzag_amplitude": ("INT", {"default": 20, "min": 1, "max": 512, "step": 1}),
|
||||
"bounce_height": ("INT", {"default": 50, "min": 1, "max": 512, "step": 1}),
|
||||
"sin_amplitude": ("INT", {"default": 50, "min": 1, "max": 512, "step": 1}),
|
||||
"spiral_tightness": ("FLOAT", {"default": 0.1, "min": 0.01, "max": 1.0, "step": 0.01}),
|
||||
},
|
||||
}
|
||||
|
||||
def createshapemask(self, frames, frame_width, frame_height, location_x, location_y, shape_width, shape_height, grow, shape):
|
||||
def createshapemask(self, frames, frame_width, frame_height, start_location_x, start_location_y, shape_width, shape_height, grow, shape, movement_type, end_location_x, end_location_y, radius, angle_step, zigzag_amplitude, bounce_height, sin_amplitude, spiral_tightness):
|
||||
from math import sin, cos, radians, pi
|
||||
import random
|
||||
|
||||
def get_position(i, movement_type, start_x, start_y, end_x, end_y, radius, angle_step, zigzag_amplitude, bounce_height, sin_amplitude, spiral_tightness):
|
||||
if movement_type == 'linear':
|
||||
fraction = i / (frames - 1)
|
||||
return start_x + fraction * (end_x - start_x), start_y + fraction * (end_y - start_y)
|
||||
elif movement_type == 'circular':
|
||||
angle = angle_step * i
|
||||
return start_x + radius * cos(radians(angle)), start_y + radius * sin(radians(angle))
|
||||
elif movement_type == 'zigzag':
|
||||
fraction = i / (frames - 1)
|
||||
zigzag_offset = zigzag_amplitude * sin(2 * pi * fraction * 10)
|
||||
return start_x + fraction * (end_x - start_x), start_y + zigzag_offset
|
||||
elif movement_type == 'bounce':
|
||||
fraction = i / (frames - 1)
|
||||
bounce_offset = bounce_height * abs(sin(pi * fraction * 2))
|
||||
return start_x + fraction * (end_x - start_x), start_y + bounce_offset
|
||||
elif movement_type == 'sinusoidal':
|
||||
fraction = i / (frames - 1)
|
||||
sin_offset = sin_amplitude * sin(2 * pi * fraction)
|
||||
return start_x + fraction * (end_x - start_x), start_y + sin_offset
|
||||
elif movement_type == 'spiral':
|
||||
angle = i * spiral_tightness
|
||||
r = radius * (i / frames)
|
||||
return start_x + r * cos(angle), start_y + r * sin(angle)
|
||||
elif movement_type == 'random':
|
||||
if i == 0: # initialize previous_positions if first frame
|
||||
self.previous_positions = (start_x, start_y)
|
||||
else:
|
||||
prev_x, prev_y = self.previous_positions
|
||||
new_x = max(0, min(frame_width, prev_x + random.randint(-10, 10)))
|
||||
new_y = max(0, min(frame_height, prev_y + random.randint(-10, 10)))
|
||||
self.previous_positions = (new_x, new_y)
|
||||
return self.previous_positions
|
||||
else:
|
||||
return start_x, start_y
|
||||
|
||||
# Define the number of images in the batch
|
||||
batch_size = frames
|
||||
out = []
|
||||
@ -834,6 +880,9 @@ Grow value is the amount to grow the shape on each frame, creating animated mask
|
||||
# Calculate the size for this frame and ensure it's not less than 0
|
||||
current_width = max(0, shape_width + i*grow)
|
||||
current_height = max(0, shape_height + i*grow)
|
||||
|
||||
# Get the current position based on the movement type
|
||||
location_x, location_y = get_position(i, movement_type, start_location_x, start_location_y, end_location_x, end_location_y, radius, angle_step, zigzag_amplitude, bounce_height, sin_amplitude, spiral_tightness)
|
||||
|
||||
if shape == 'circle' or shape == 'square':
|
||||
# Define the bounding box for the shape
|
||||
|
||||
@ -391,6 +391,14 @@ class DiffusionModelLoaderKJ(BaseLoaderKJ):
|
||||
|
||||
sd = comfy.utils.load_torch_file(unet_path)
|
||||
if extra_state_dict is not None:
|
||||
# If the model is a checkpoint, strip additional non-diffusion model entries before adding extra state dict
|
||||
from comfy import model_detection
|
||||
diffusion_model_prefix = model_detection.unet_prefix_from_state_dict(sd)
|
||||
if diffusion_model_prefix == "model.diffusion_model.":
|
||||
temp_sd = comfy.utils.state_dict_prefix_replace(sd, {diffusion_model_prefix: ""}, filter_keys=True)
|
||||
if len(temp_sd) > 0:
|
||||
sd = temp_sd
|
||||
|
||||
extra_sd = comfy.utils.load_torch_file(extra_state_dict)
|
||||
sd.update(extra_sd)
|
||||
del extra_sd
|
||||
|
||||
@ -669,12 +669,12 @@ Converts any type to a string.
|
||||
"""
|
||||
|
||||
def stringify(self, input, prefix="", suffix=""):
|
||||
if isinstance(input, (int, float, bool)):
|
||||
if isinstance(input, (int, float, bool, str)):
|
||||
stringified = str(input)
|
||||
elif isinstance(input, list):
|
||||
stringified = ', '.join(str(item) for item in input)
|
||||
else:
|
||||
return
|
||||
return input,
|
||||
if prefix: # Check if prefix is not empty
|
||||
stringified = prefix + stringified # Add the prefix
|
||||
if suffix: # Check if suffix is not empty
|
||||
@ -2622,4 +2622,81 @@ class LazySwitchKJ:
|
||||
|
||||
def switch(self, switch, on_false = None, on_true=None):
|
||||
value = on_true if switch else on_false
|
||||
return (value,)
|
||||
return (value,)
|
||||
|
||||
|
||||
from comfy.patcher_extension import WrappersMP
|
||||
from comfy.sampler_helpers import prepare_mask
|
||||
class TTM_SampleWrapper:
|
||||
def __init__(self, mask, steps):
|
||||
self.mask = mask
|
||||
self.steps = steps
|
||||
|
||||
def __call__(self, sampler, guider, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar):
|
||||
model_options = extra_args["model_options"]
|
||||
wrappers = model_options["transformer_options"]["wrappers"]
|
||||
w = wrappers.setdefault(WrappersMP.APPLY_MODEL, {})
|
||||
|
||||
if self.mask is not None:
|
||||
motion_mask = self.mask.reshape((-1, 1, self.mask.shape[-2], self.mask.shape[-1]))
|
||||
motion_mask = prepare_mask(motion_mask, noise.shape, noise.device)
|
||||
|
||||
scale_latent_inpaint = guider.model_patcher.model.scale_latent_inpaint
|
||||
w["TTM_ApplyModel_Wrapper"] = [TTM_ApplyModel_Wrapper(latent_image, noise, motion_mask, self.steps, scale_latent_inpaint)]
|
||||
|
||||
out = sampler(guider, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
class TTM_ApplyModel_Wrapper:
|
||||
def __init__(self, reference_samples, noise, motion_mask, steps, scale_latent_inpaint):
|
||||
self.reference_samples = reference_samples
|
||||
self.noise = noise
|
||||
self.motion_mask = motion_mask
|
||||
self.steps = steps
|
||||
self.scale_latent_inpaint = scale_latent_inpaint
|
||||
|
||||
def __call__(self, executor, x, t, c_concat, c_crossattn, control, transformer_options, **kwargs):
|
||||
sigmas = transformer_options["sample_sigmas"]
|
||||
|
||||
matched = (sigmas == t).nonzero(as_tuple=True)[0]
|
||||
if matched.numel() > 0:
|
||||
current_step_index = matched.item()
|
||||
else:
|
||||
crossing = ((sigmas[:-1] - t) * (sigmas[1:] - t) <= 0).nonzero(as_tuple=True)[0]
|
||||
current_step_index = crossing.item() if crossing.numel() > 0 else 0
|
||||
|
||||
next_sigma = sigmas[current_step_index + 1] if current_step_index < len(sigmas) - 1 else sigmas[current_step_index]
|
||||
|
||||
if current_step_index != 0 and current_step_index < self.steps:
|
||||
noisy_latent = self.scale_latent_inpaint(x=x, sigma=torch.tensor([next_sigma]), noise=self.noise.to(x), latent_image=self.reference_samples.to(x))
|
||||
if self.motion_mask is not None:
|
||||
x = x * (1-self.motion_mask).to(x) + noisy_latent * self.motion_mask.to(x)
|
||||
else:
|
||||
x = noisy_latent
|
||||
|
||||
return executor(x, t, c_concat, c_crossattn, control, transformer_options, **kwargs)
|
||||
|
||||
|
||||
class LatentInpaintTTM:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {"required": {
|
||||
"model": ("MODEL", ),
|
||||
"steps": ("INT", {"default": 7, "min": 0, "max": 888, "step": 1, "tooltip": "Number of steps to apply TTM inpainting for."}),
|
||||
},
|
||||
"optional": {
|
||||
"mask": ("MASK", {"tooltip": "Latent mask where white (1.0) is the area to inpaint and black (0.0) is the area to keep unchanged."}),
|
||||
}
|
||||
}
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "patch"
|
||||
EXPERIMENTAL = True
|
||||
DESCRIPTION = "https://github.com/time-to-move/TTM"
|
||||
CATEGORY = "KJNodes/experimental"
|
||||
|
||||
def patch(self, model, steps, mask=None):
|
||||
m = model.clone()
|
||||
m.add_wrapper_with_key(WrappersMP.SAMPLER_SAMPLE, "TTM_SampleWrapper", TTM_SampleWrapper(mask, steps))
|
||||
return (m, )
|
||||
@ -4,4 +4,4 @@ color-matcher
|
||||
matplotlib
|
||||
huggingface_hub
|
||||
mss
|
||||
opencv-python
|
||||
opencv-python-headless
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user