From f259e062c7fef42fd868dafa5705bf9bd8c66f22 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Wed, 1 May 2024 18:44:30 +0300 Subject: [PATCH] New nodes and continue restructuring --- __init__.py | 2 + nodes/curve_nodes.py | 30 +- nodes/image_nodes.py | 1032 +++++++++++++++++++++++++++++++++++++++ nodes/nodes.py | 1100 +++--------------------------------------- web/js/jsnodes.js | 22 + 5 files changed, 1139 insertions(+), 1047 deletions(-) create mode 100644 nodes/image_nodes.py diff --git a/__init__.py b/__init__.py index 0c435a1..fd2bba9 100644 --- a/__init__.py +++ b/__init__.py @@ -2,6 +2,7 @@ from .nodes.nodes import * from .nodes.curve_nodes import * from .nodes.batchcrop_nodes import * from .nodes.audioscheduler_nodes import * +from .nodes.image_nodes import * NODE_CLASS_MAPPINGS = { #constants "INTConstant": INTConstant, @@ -80,6 +81,7 @@ NODE_CLASS_MAPPINGS = { "ScaleBatchPromptSchedule": ScaleBatchPromptSchedule, "CameraPoseVisualizer": CameraPoseVisualizer, "JoinStrings": JoinStrings, + "JoinStringMulti": JoinStringMulti, "Sleep": Sleep, "VRAM_Debug" : VRAM_Debug, "SomethingToString" : SomethingToString, diff --git a/nodes/curve_nodes.py b/nodes/curve_nodes.py index ccb9199..668ed23 100644 --- a/nodes/curve_nodes.py +++ b/nodes/curve_nodes.py @@ -217,6 +217,7 @@ class MaskOrImageToWeight: 'list', 'pandas series', 'tensor', + 'string' ], { "default": 'list' @@ -228,7 +229,7 @@ class MaskOrImageToWeight: }, } - RETURN_TYPES = ("FLOAT",) + RETURN_TYPES = ("FLOAT", "STRING",) FUNCTION = "execute" CATEGORY = "KJNodes" DESCRIPTION = """ @@ -249,18 +250,17 @@ and returns that as the selected output type. # Convert mean_values to the specified output_type if output_type == 'list': - return mean_values, + out = mean_values, elif output_type == 'pandas series': try: import pandas as pd except: raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type") - return pd.Series(mean_values), + out = pd.Series(mean_values), elif output_type == 'tensor': - return torch.tensor(mean_values, dtype=torch.float32), - else: - raise ValueError(f"Unsupported output_type: {output_type}") - + out = torch.tensor(mean_values, dtype=torch.float32), + return (out, [str(value) for value in mean_values],) + class WeightScheduleConvert: @classmethod @@ -287,7 +287,7 @@ class WeightScheduleConvert: }, } - RETURN_TYPES = ("FLOAT",) + RETURN_TYPES = ("FLOAT", "STRING",) FUNCTION = "execute" CATEGORY = "KJNodes" DESCRIPTION = """ @@ -344,18 +344,18 @@ Converts different value lists/series to another type. float_values = float_values * repeat if output_type == 'list': - return float_values, + out = float_values, elif output_type == 'pandas series': - return pd.Series(float_values), + out = pd.Series(float_values), elif output_type == 'tensor': if input_type == 'pandas series': - return torch.tensor(float_values.values, dtype=torch.float32), + out = torch.tensor(float_values.values, dtype=torch.float32), else: - return torch.tensor(float_values, dtype=torch.float32), + out = torch.tensor(float_values, dtype=torch.float32), elif output_type == 'match_input': - return float_values, - else: - raise ValueError(f"Unsupported output_type: {output_type}") + out = float_values, + return (out, [str(value) for value in float_values],) + class FloatToMask: diff --git a/nodes/image_nodes.py b/nodes/image_nodes.py new file mode 100644 index 0000000..042a612 --- /dev/null +++ b/nodes/image_nodes.py @@ -0,0 +1,1032 @@ +import numpy as np +import time +import torch +import torch.nn.functional as F +import random +import math +import os +import re +import json +from PIL import ImageGrab, ImageDraw, ImageFont, Image + +from nodes import MAX_RESOLUTION, SaveImage +from comfy_extras.nodes_mask import ImageCompositeMasked +from comfy.cli_args import args +from comfy.utils import ProgressBar +import folder_paths +import model_management + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +class ImagePass: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + }, + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "passthrough" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Passes the image through without modifying it. +""" + + def passthrough(self, image): + return image, + +class ColorMatch: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_ref": ("IMAGE",), + "image_target": ("IMAGE",), + "method": ( + [ + 'mkl', + 'hm', + 'reinhard', + 'mvgd', + 'hm-mvgd-hm', + 'hm-mkl-hm', + ], { + "default": 'mkl' + }), + + }, + } + + CATEGORY = "KJNodes/image" + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "colormatch" + DESCRIPTION = """ +color-matcher enables color transfer across images which comes in handy for automatic +color-grading of photographs, paintings and film sequences as well as light-field +and stopmotion corrections. + +The methods behind the mappings are based on the approach from Reinhard et al., +the Monge-Kantorovich Linearization (MKL) as proposed by Pitie et al. and our analytical solution +to a Multi-Variate Gaussian Distribution (MVGD) transfer in conjunction with classical histogram +matching. As shown below our HM-MVGD-HM compound outperforms existing methods. +https://github.com/hahnec/color-matcher/ + +""" + + def colormatch(self, image_ref, image_target, method): + try: + from color_matcher import ColorMatcher + except: + raise Exception("Can't import color-matcher, did you install requirements.txt? Manual install: pip install color-matcher") + cm = ColorMatcher() + image_ref = image_ref.cpu() + image_target = image_target.cpu() + batch_size = image_target.size(0) + out = [] + images_target = image_target.squeeze() + images_ref = image_ref.squeeze() + + image_ref_np = images_ref.numpy() + images_target_np = images_target.numpy() + + if image_ref.size(0) > 1 and image_ref.size(0) != batch_size: + raise ValueError("ColorMatch: Use either single reference image or a matching batch of reference images.") + + for i in range(batch_size): + image_target_np = images_target_np if batch_size == 1 else images_target[i].numpy() + image_ref_np_i = image_ref_np if image_ref.size(0) == 1 else images_ref[i].numpy() + try: + image_result = cm.transfer(src=image_target_np, ref=image_ref_np_i, method=method) + except BaseException as e: + print(f"Error occurred during transfer: {e}") + break + out.append(torch.from_numpy(image_result)) + return (torch.stack(out, dim=0).to(torch.float32), ) + +class SaveImageWithAlpha: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), + "mask": ("MASK", ), + "filename_prefix": ("STRING", {"default": "ComfyUI"})}, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = () + FUNCTION = "save_images_alpha" + OUTPUT_NODE = True + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Saves an image and mask as .PNG with the mask as the alpha channel. +""" + + def save_images_alpha(self, images, mask, filename_prefix="ComfyUI_image_with_alpha", prompt=None, extra_pnginfo=None): + from PIL.PngImagePlugin import PngInfo + filename_prefix += self.prefix_append + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) + results = list() + if mask.dtype == torch.float16: + mask = mask.to(torch.float32) + def file_counter(): + max_counter = 0 + # Loop through the existing files + for existing_file in os.listdir(full_output_folder): + # Check if the file matches the expected format + match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file) + if match: + # Extract the numeric portion of the filename + file_counter = int(match.group(1)) + # Update the maximum counter value if necessary + if file_counter > max_counter: + max_counter = file_counter + return max_counter + + for image, alpha in zip(images, mask): + i = 255. * image.cpu().numpy() + a = 255. * alpha.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + + # Resize the mask to match the image size + a_resized = Image.fromarray(a).resize(img.size, Image.LANCZOS) + a_resized = np.clip(a_resized, 0, 255).astype(np.uint8) + img.putalpha(Image.fromarray(a_resized, mode='L')) + metadata = None + if not args.disable_metadata: + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + # Increment the counter by 1 to get the next available value + counter = file_counter() + 1 + file = f"{filename}_{counter:05}.png" + img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + return { "ui": { "images": results } } + +class ImageConcanate: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "direction": ( + [ 'right', + 'down', + 'left', + 'up', + ], + { + "default": 'right' + }), + "match_image_size": ("BOOLEAN", {"default": False}), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "concanate" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the image2 to image1 in the specified direction. +""" + + def concanate(self, image1, image2, direction, match_image_size): + if match_image_size: + image2 = torch.nn.functional.interpolate(image2, size=(image1.shape[2], image1.shape[3]), mode="bilinear") + if direction == 'right': + row = torch.cat((image1, image2), dim=2) + elif direction == 'down': + row = torch.cat((image1, image2), dim=1) + elif direction == 'left': + row = torch.cat((image2, image1), dim=2) + elif direction == 'up': + row = torch.cat((image2, image1), dim=1) + return (row,) + +class ImageGridComposite2x2: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "image3": ("IMAGE",), + "image4": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "compositegrid" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the 4 input images into a 2x2 grid. +""" + + def compositegrid(self, image1, image2, image3, image4): + top_row = torch.cat((image1, image2), dim=2) + bottom_row = torch.cat((image3, image4), dim=2) + grid = torch.cat((top_row, bottom_row), dim=1) + return (grid,) + +class ImageGridComposite3x3: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "image3": ("IMAGE",), + "image4": ("IMAGE",), + "image5": ("IMAGE",), + "image6": ("IMAGE",), + "image7": ("IMAGE",), + "image8": ("IMAGE",), + "image9": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "compositegrid" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the 9 input images into a 3x3 grid. +""" + + def compositegrid(self, image1, image2, image3, image4, image5, image6, image7, image8, image9): + top_row = torch.cat((image1, image2, image3), dim=2) + mid_row = torch.cat((image4, image5, image6), dim=2) + bottom_row = torch.cat((image7, image8, image9), dim=2) + grid = torch.cat((top_row, mid_row, bottom_row), dim=1) + return (grid,) + +class ImageBatchTestPattern: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "batch_size": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + "start_from": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "text_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "text_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "font_size": ("INT", {"default": 255,"min": 8, "max": 4096, "step": 1}), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "generatetestpattern" + CATEGORY = "KJNodes/text" + + def generatetestpattern(self, batch_size, font, font_size, start_from, width, height, text_x, text_y): + out = [] + # Generate the sequential numbers for each image + numbers = np.arange(start_from, start_from + batch_size) + font_path = folder_paths.get_full_path("kjnodes_fonts", font) + + for number in numbers: + # Create a black image with the number as a random color text + image = Image.new("RGB", (width, height), color='black') + draw = ImageDraw.Draw(image) + + # Generate a random color for the text + font_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) + + font = ImageFont.truetype(font_path, font_size) + + # Get the size of the text and position it in the center + text = str(number) + + try: + draw.text((text_x, text_y), text, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, text_y), text, font=font, fill=font_color,) + + # Convert the image to a numpy array and normalize the pixel values + image_np = np.array(image).astype(np.float32) / 255.0 + image_tensor = torch.from_numpy(image_np).unsqueeze(0) + out.append(image_tensor) + out_tensor = torch.cat(out, dim=0) + + return (out_tensor,) + +class ImageGrabPIL: + + @classmethod + def IS_CHANGED(cls): + + return + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "screencap" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Captures an area specified by screen coordinates. +Can be used for realtime diffusion with autoqueue. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "width": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + "delay": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.01}), + }, + } + + def screencap(self, x, y, width, height, num_frames, delay): + captures = [] + bbox = (x, y, x + width, y + height) + + for _ in range(num_frames): + # Capture screen + screen_capture = ImageGrab.grab(bbox=bbox) + screen_capture_torch = torch.tensor(np.array(screen_capture), dtype=torch.float32) / 255.0 + screen_capture_torch = screen_capture_torch.unsqueeze(0) + captures.append(screen_capture_torch) + + # Wait for a short delay if more than one frame is to be captured + if num_frames > 1: + time.sleep(delay) + + return (torch.cat(captures, dim=0),) + +class AddLabel: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image":("IMAGE",), + "text_x": ("INT", {"default": 10, "min": 0, "max": 4096, "step": 1}), + "text_y": ("INT", {"default": 2, "min": 0, "max": 4096, "step": 1}), + "height": ("INT", {"default": 48, "min": 0, "max": 4096, "step": 1}), + "font_size": ("INT", {"default": 32, "min": 0, "max": 4096, "step": 1}), + "font_color": ("STRING", {"default": "white"}), + "label_color": ("STRING", {"default": "black"}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "text": ("STRING", {"default": "Text"}), + "direction": ( + [ 'up', + 'down', + 'left', + 'right', + 'overlay' + ], + { + "default": 'up' + }), + }, + "optional":{ + "caption": ("STRING", {"default": "", "forceInput": True}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "addlabel" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Creates a new with the given text, and concatenates it to +either above or below the input image. +Note that this changes the input image's height! +Fonts are loaded from this folder: +ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts +""" + + def addlabel(self, image, text_x, text_y, text, height, font_size, font_color, label_color, font, direction, caption=""): + batch_size = image.shape[0] + width = image.shape[2] + + font_path = os.path.join(script_directory, "fonts", "TTNorms-Black.otf") if font == "TTNorms-Black.otf" else folder_paths.get_full_path("kjnodes_fonts", font) + + def process_image(input_image, caption_text): + if direction == 'overlay': + pil_image = Image.fromarray((input_image.cpu().numpy() * 255).astype(np.uint8)) + draw = ImageDraw.Draw(pil_image) + font = ImageFont.truetype(font_path, font_size) + try: + draw.text((text_x, text_y), caption_text, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, text_y), caption_text, font=font, fill=font_color) + processed_image = torch.from_numpy(np.array(pil_image).astype(np.float32) / 255.0).unsqueeze(0) + else: + label_image = Image.new("RGB", (width, height), label_color) + draw = ImageDraw.Draw(label_image) + font = ImageFont.truetype(font_path, font_size) + try: + draw.text((text_x, text_y), caption_text, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, text_y), caption_text, font=font, fill=font_color) + processed_image = torch.from_numpy(np.array(label_image).astype(np.float32) / 255.0)[None, :, :, :] + return processed_image + + if caption == "": + processed_images = [process_image(img, text) for img in image] + else: + assert len(caption) == batch_size, "Number of captions does not match number of images" + processed_images = [process_image(img, cap) for img, cap in zip(image, caption)] + processed_batch = torch.cat(processed_images, dim=0) + + # Combine images based on direction + if direction == 'down': + combined_images = torch.cat((image, processed_batch), dim=1) + elif direction == 'up': + combined_images = torch.cat((processed_batch, image), dim=1) + elif direction == 'left': + processed_batch = torch.rot90(processed_batch, 3, (2, 3)).permute(0, 3, 1, 2) + combined_images = torch.cat((processed_batch, image), dim=2) + elif direction == 'right': + processed_batch = torch.rot90(processed_batch, 3, (2, 3)).permute(0, 3, 1, 2) + combined_images = torch.cat((image, processed_batch), dim=2) + else: + combined_images = processed_batch + + return (combined_images,) + +class ImageBatchRepeatInterleaving: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "repeat" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Repeats each image in a batch by the specified number of times. +Example batch of 5 images: 0, 1 ,2, 3, 4 +with repeats 2 becomes batch of 10 images: 0, 0, 1, 1, 2, 2, 3, 3, 4, 4 +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "repeats": ("INT", {"default": 1, "min": 1, "max": 4096}), + }, + } + + def repeat(self, images, repeats): + + repeated_images = torch.repeat_interleave(images, repeats=repeats, dim=0) + return (repeated_images, ) + +class ImageUpscaleWithModelBatched: + @classmethod + def INPUT_TYPES(s): + return {"required": { "upscale_model": ("UPSCALE_MODEL",), + "images": ("IMAGE",), + "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "upscale" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Same as ComfyUI native model upscaling node, +but allows setting sub-batches for reduced VRAM usage. +""" + def upscale(self, upscale_model, images, per_batch): + + device = model_management.get_torch_device() + upscale_model.to(device) + in_img = images.movedim(-1,-3).to(device) + + steps = in_img.shape[0] + pbar = ProgressBar(steps) + t = [] + + for start_idx in range(0, in_img.shape[0], per_batch): + sub_images = upscale_model(in_img[start_idx:start_idx+per_batch]) + t.append(sub_images.cpu()) + # Calculate the number of images processed in this batch + batch_count = sub_images.shape[0] + # Update the progress bar by the number of images processed in this batch + pbar.update(batch_count) + upscale_model.cpu() + + t = torch.cat(t, dim=0).permute(0, 2, 3, 1).cpu() + + return (t,) + +class ImageNormalize_Neg1_To_1: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE",), + + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "normalize" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Normalize the images to be in the range [-1, 1] +""" + + def normalize(self,images): + images = images * 2.0 - 1.0 + return (images,) + +class RemapImageRange: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}), + "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}), + "clamp": ("BOOLEAN", {"default": True}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "remap" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Remaps the image values to the specified range. +""" + + def remap(self, image, min, max, clamp): + if image.dtype == torch.float16: + image = image.to(torch.float32) + image = min + image * (max - min) + if clamp: + image = torch.clamp(image, min=0.0, max=1.0) + return (image, ) + +class SplitImageChannels: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + }, + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK") + RETURN_NAMES = ("red", "green", "blue", "mask") + FUNCTION = "split" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Splits image channels into images where the selected channel +is repeated for all channels, and the alpha as a mask. +""" + + def split(self, image): + red = image[:, :, :, 0:1] # Red channel + green = image[:, :, :, 1:2] # Green channel + blue = image[:, :, :, 2:3] # Blue channel + alpha = image[:, :, :, 3:4] # Alpha channel + alpha = alpha.squeeze(-1) + + # Repeat the selected channel for all channels + red = torch.cat([red, red, red], dim=3) + green = torch.cat([green, green, green], dim=3) + blue = torch.cat([blue, blue, blue], dim=3) + return (red, green, blue, alpha) + +class MergeImageChannels: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "red": ("IMAGE",), + "green": ("IMAGE",), + "blue": ("IMAGE",), + + }, + "optional": { + "mask": ("MASK", {"default": None}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "merge" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Merges channel data into an image. +""" + + def merge(self, red, green, blue, alpha=None): + image = torch.stack([ + red[..., 0, None], # Red channel + green[..., 1, None], # Green channel + blue[..., 2, None] # Blue channel + ], dim=-1) + image = image.squeeze(-2) + if alpha is not None: + image = torch.cat([image, alpha], dim=-1) + return (image,) + +class ImagePadForOutpaintMasked: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + }, + "optional": { + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "expand_image" + + CATEGORY = "image" + + def expand_image(self, image, left, top, right, bottom, feathering, mask=None): + B, H, W, C = image.size() + + new_image = torch.ones( + (B, H + top + bottom, W + left + right, C), + dtype=torch.float32, + ) * 0.5 + + new_image[:, top:top + H, left:left + W, :] = image + + if mask is None: + new_mask = torch.ones( + (H + top + bottom, W + left + right), + dtype=torch.float32, + ) + + t = torch.zeros( + (H, W), + dtype=torch.float32 + ) + else: + # If a mask is provided, pad it to fit the new image size + mask = F.pad(mask, (left, right, top, bottom), mode='constant', value=0) + mask = 1 - mask + t = torch.zeros_like(mask) + + + + if feathering > 0 and feathering * 2 < H and feathering * 2 < W: + + for i in range(H): + for j in range(W): + dt = i if top != 0 else H + db = H - i if bottom != 0 else H + + dl = j if left != 0 else W + dr = W - j if right != 0 else W + + d = min(dt, db, dl, dr) + + if d >= feathering: + continue + + v = (feathering - d) / feathering + + if mask is None: + t[i, j] = v * v + else: + t[:, top + i, left + j] = v * v + + if mask is None: + mask = new_mask.squeeze(0) + mask[top:top + H, left:left + W] = t + mask = mask.unsqueeze(0) + + return (new_image, mask,) + +class ImageAndMaskPreview(SaveImage): + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + self.compress_level = 4 + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask_opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "mask_color": ("STRING", {"default": "255, 255, 255"}), + "pass_through": ("BOOLEAN", {"default": False}), + }, + "optional": { + "image": ("IMAGE",), + "mask": ("MASK",), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("composite",) + FUNCTION = "execute" + CATEGORY = "KJNodes" + DESCRIPTION = """ +Preview an image or a mask, when both inputs are used +composites the mask on top of the image. +with pass_through on the preview is disabled and the +composite is returned from the composite slot instead, +this allows for the preview to be passed for video combine +nodes for example. +""" + + def execute(self, mask_opacity, mask_color, pass_through, filename_prefix="ComfyUI", image=None, mask=None, prompt=None, extra_pnginfo=None): + if mask is not None and image is None: + preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) + elif mask is None and image is not None: + preview = image + elif mask is not None and image is not None: + mask_adjusted = mask * mask_opacity + mask_image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3).clone() + + color_list = list(map(int, mask_color.split(', '))) + print(color_list[0]) + mask_image[:, :, :, 0] = color_list[0] // 255 # Red channel + mask_image[:, :, :, 1] = color_list[1] // 255 # Green channel + mask_image[:, :, :, 2] = color_list[2] // 255 # Blue channel + + preview, = ImageCompositeMasked.composite(self, image, mask_image, 0, 0, True, mask_adjusted) + if pass_through: + return (preview, ) + return(self.save_images(preview, filename_prefix, prompt, extra_pnginfo)) + +class CrossFadeImages: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "crossfadeimages" + CATEGORY = "KJNodes/image" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images_1": ("IMAGE",), + "images_2": ("IMAGE",), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), + "transition_start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + "start_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}), + "end_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + def crossfadeimages(self, images_1, images_2, transition_start_index, transitioning_frames, interpolation, start_level, end_level): + + def crossfade(images_1, images_2, alpha): + crossfade = (1 - alpha) * images_1 + alpha * images_2 + return crossfade + def ease_in(t): + return t * t + def ease_out(t): + return 1 - (1 - t) * (1 - t) + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + def bounce(t): + if t < 0.5: + return self.ease_out(t * 2) * 0.5 + else: + return self.ease_in((t - 0.5) * 2) * 0.5 + 0.5 + def elastic(t): + return math.sin(13 * math.pi / 2 * t) * math.pow(2, 10 * (t - 1)) + def glitchy(t): + return t + 0.1 * math.sin(40 * t) + def exponential_ease_out(t): + return 1 - (1 - t) ** 4 + + easing_functions = { + "linear": lambda t: t, + "ease_in": ease_in, + "ease_out": ease_out, + "ease_in_out": ease_in_out, + "bounce": bounce, + "elastic": elastic, + "glitchy": glitchy, + "exponential_ease_out": exponential_ease_out, + } + + crossfade_images = [] + + alphas = torch.linspace(start_level, end_level, transitioning_frames) + for i in range(transitioning_frames): + alpha = alphas[i] + image1 = images_1[i + transition_start_index] + image2 = images_2[i + transition_start_index] + easing_function = easing_functions.get(interpolation) + alpha = easing_function(alpha) # Apply the easing function to the alpha value + + crossfade_image = crossfade(image1, image2, alpha) + crossfade_images.append(crossfade_image) + + # Convert crossfade_images to tensor + crossfade_images = torch.stack(crossfade_images, dim=0) + # Get the last frame result of the interpolation + last_frame = crossfade_images[-1] + # Calculate the number of remaining frames from images_2 + remaining_frames = len(images_2) - (transition_start_index + transitioning_frames) + # Crossfade the remaining frames with the last used alpha value + for i in range(remaining_frames): + alpha = alphas[-1] + image1 = images_1[i + transition_start_index + transitioning_frames] + image2 = images_2[i + transition_start_index + transitioning_frames] + easing_function = easing_functions.get(interpolation) + alpha = easing_function(alpha) # Apply the easing function to the alpha value + + crossfade_image = crossfade(image1, image2, alpha) + crossfade_images = torch.cat([crossfade_images, crossfade_image.unsqueeze(0)], dim=0) + # Append the beginning of images_1 + beginning_images_1 = images_1[:transition_start_index] + crossfade_images = torch.cat([beginning_images_1, crossfade_images], dim=0) + return (crossfade_images, ) + +class GetImageRangeFromBatch: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "imagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates a new batch using images from the input, +batch, starting from start_index. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "start_index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + }, + } + + def imagesfrombatch(self, images, start_index, num_frames): + if start_index == -1: + start_index = len(images) - num_frames + if start_index < 0 or start_index >= len(images): + raise ValueError("GetImageRangeFromBatch: Start index is out of range") + end_index = start_index + num_frames + if end_index > len(images): + raise ValueError("GetImageRangeFromBatch: End index is out of range") + chosen_images = images[start_index:end_index] + return (chosen_images, ) + +class GetImagesFromBatchIndexed: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "indexedimagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Selects and returns the images at the specified indices as an image batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), + }, + } + + def indexedimagesfrombatch(self, images, indexes): + + # Parse the indexes string into a list of integers + index_list = [int(index.strip()) for index in indexes.split(',')] + + # Convert list of indices to a PyTorch tensor + indices_tensor = torch.tensor(index_list, dtype=torch.long) + + # Select the images at the specified indices + chosen_images = images[indices_tensor] + + return (chosen_images,) + +class InsertImagesToBatchIndexed: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "insertimagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Inserts images at the specified indices into the original image batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "original_images": ("IMAGE",), + "images_to_insert": ("IMAGE",), + "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), + }, + } + + def insertimagesfrombatch(self, original_images, images_to_insert, indexes): + + # Parse the indexes string into a list of integers + index_list = [int(index.strip()) for index in indexes.split(',')] + + # Convert list of indices to a PyTorch tensor + indices_tensor = torch.tensor(index_list, dtype=torch.long) + + # Ensure the images_to_insert is a tensor + if not isinstance(images_to_insert, torch.Tensor): + images_to_insert = torch.tensor(images_to_insert) + + # Insert the images at the specified indices + for index, image in zip(indices_tensor, images_to_insert): + original_images[index] = image + + return (original_images,) + +class ReplaceImagesInBatch: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "replace" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Replaces the images in a batch, starting from the specified start index, +with the replacement images. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "original_images": ("IMAGE",), + "replacement_images": ("IMAGE",), + "start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + }, + } + + def replace(self, original_images, replacement_images, start_index): + images = None + if start_index >= len(original_images): + raise ValueError("GetImageRangeFromBatch: Start index is out of range") + end_index = start_index + len(replacement_images) + if end_index > len(original_images): + raise ValueError("GetImageRangeFromBatch: End index is out of range") + # Create a copy of the original_images tensor + original_images_copy = original_images.clone() + original_images_copy[start_index:end_index] = replacement_images + images = original_images_copy + return (images, ) + + +class ReverseImageBatch: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "reverseimagebatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Reverses the order of the images in a batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + }, + } + + def reverseimagebatch(self, images): + reversed_images = torch.flip(images, [0]) + return (reversed_images, ) + +class ImageBatchMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE", ), + "image_2": ("IMAGE", ), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "combine" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates an image batch from multiple images. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, **kwargs): + from nodes import ImageBatch + image_batch_node = ImageBatch() + image = kwargs["image_1"] + for c in range(1, inputcount): + new_image = kwargs[f"image_{c + 1}"] + image, = image_batch_node.batch(image, new_image) + return (image,) diff --git a/nodes/nodes.py b/nodes/nodes.py index 9c2bab9..725d5e8 100644 --- a/nodes/nodes.py +++ b/nodes/nodes.py @@ -12,12 +12,10 @@ import json import re import os import io -import random -import math import model_management -from nodes import MAX_RESOLUTION, SaveImage, CLIPTextEncode -from comfy_extras.nodes_mask import ImageCompositeMasked +from nodes import MAX_RESOLUTION, CLIPTextEncode + import comfy.sample import folder_paths from ..utility.utility import tensor2pil, pil2tensor @@ -100,24 +98,6 @@ class StringConstantMultiline: new_string = "\n".join(new_string) return (new_string, ) - -class JoinStrings: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "string1": ("STRING", {"default": '', "forceInput": True}), - "string2": ("STRING", {"default": '', "forceInput": True}), - "delimiter": ("STRING", {"default": ' ', "multiline": False}), - } - } - RETURN_TYPES = ("STRING",) - FUNCTION = "joinstring" - CATEGORY = "KJNodes/constants" - - def joinstring(self, string1, string2, delimiter): - joined_string = string1 + delimiter + string2 - return (joined_string, ) class CreateFluidMask: @@ -501,193 +481,7 @@ to a different frame count. print(output_str) return (output_str,) -class CrossFadeImages: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "crossfadeimages" - CATEGORY = "KJNodes/image" - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images_1": ("IMAGE",), - "images_2": ("IMAGE",), - "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), - "transition_start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), - "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), - "start_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}), - "end_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}), - }, - } - - def crossfadeimages(self, images_1, images_2, transition_start_index, transitioning_frames, interpolation, start_level, end_level): - - def crossfade(images_1, images_2, alpha): - crossfade = (1 - alpha) * images_1 + alpha * images_2 - return crossfade - def ease_in(t): - return t * t - def ease_out(t): - return 1 - (1 - t) * (1 - t) - def ease_in_out(t): - return 3 * t * t - 2 * t * t * t - def bounce(t): - if t < 0.5: - return self.ease_out(t * 2) * 0.5 - else: - return self.ease_in((t - 0.5) * 2) * 0.5 + 0.5 - def elastic(t): - return math.sin(13 * math.pi / 2 * t) * math.pow(2, 10 * (t - 1)) - def glitchy(t): - return t + 0.1 * math.sin(40 * t) - def exponential_ease_out(t): - return 1 - (1 - t) ** 4 - - easing_functions = { - "linear": lambda t: t, - "ease_in": ease_in, - "ease_out": ease_out, - "ease_in_out": ease_in_out, - "bounce": bounce, - "elastic": elastic, - "glitchy": glitchy, - "exponential_ease_out": exponential_ease_out, - } - - crossfade_images = [] - - alphas = torch.linspace(start_level, end_level, transitioning_frames) - for i in range(transitioning_frames): - alpha = alphas[i] - image1 = images_1[i + transition_start_index] - image2 = images_2[i + transition_start_index] - easing_function = easing_functions.get(interpolation) - alpha = easing_function(alpha) # Apply the easing function to the alpha value - - crossfade_image = crossfade(image1, image2, alpha) - crossfade_images.append(crossfade_image) - - # Convert crossfade_images to tensor - crossfade_images = torch.stack(crossfade_images, dim=0) - # Get the last frame result of the interpolation - last_frame = crossfade_images[-1] - # Calculate the number of remaining frames from images_2 - remaining_frames = len(images_2) - (transition_start_index + transitioning_frames) - # Crossfade the remaining frames with the last used alpha value - for i in range(remaining_frames): - alpha = alphas[-1] - image1 = images_1[i + transition_start_index + transitioning_frames] - image2 = images_2[i + transition_start_index + transitioning_frames] - easing_function = easing_functions.get(interpolation) - alpha = easing_function(alpha) # Apply the easing function to the alpha value - - crossfade_image = crossfade(image1, image2, alpha) - crossfade_images = torch.cat([crossfade_images, crossfade_image.unsqueeze(0)], dim=0) - # Append the beginning of images_1 - beginning_images_1 = images_1[:transition_start_index] - crossfade_images = torch.cat([beginning_images_1, crossfade_images], dim=0) - return (crossfade_images, ) - -class GetImageRangeFromBatch: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "imagesfrombatch" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Creates a new batch using images from the input, -batch, starting from start_index. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images": ("IMAGE",), - "start_index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}), - "num_frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), - }, - } - - def imagesfrombatch(self, images, start_index, num_frames): - if start_index == -1: - start_index = len(images) - num_frames - if start_index < 0 or start_index >= len(images): - raise ValueError("GetImageRangeFromBatch: Start index is out of range") - end_index = start_index + num_frames - if end_index > len(images): - raise ValueError("GetImageRangeFromBatch: End index is out of range") - chosen_images = images[start_index:end_index] - return (chosen_images, ) - -class GetImagesFromBatchIndexed: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "indexedimagesfrombatch" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Selects and returns the images at the specified indices as an image batch. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images": ("IMAGE",), - "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), - }, - } - - def indexedimagesfrombatch(self, images, indexes): - - # Parse the indexes string into a list of integers - index_list = [int(index.strip()) for index in indexes.split(',')] - - # Convert list of indices to a PyTorch tensor - indices_tensor = torch.tensor(index_list, dtype=torch.long) - - # Select the images at the specified indices - chosen_images = images[indices_tensor] - - return (chosen_images,) - -class InsertImagesToBatchIndexed: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "insertimagesfrombatch" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Inserts images at the specified indices into the original image batch. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "original_images": ("IMAGE",), - "images_to_insert": ("IMAGE",), - "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), - }, - } - - def insertimagesfrombatch(self, original_images, images_to_insert, indexes): - - # Parse the indexes string into a list of integers - index_list = [int(index.strip()) for index in indexes.split(',')] - - # Convert list of indices to a PyTorch tensor - indices_tensor = torch.tensor(index_list, dtype=torch.long) - - # Ensure the images_to_insert is a tensor - if not isinstance(images_to_insert, torch.Tensor): - images_to_insert = torch.tensor(images_to_insert) - - # Insert the images at the specified indices - for index, image in zip(indices_tensor, images_to_insert): - original_images[index] = image - - return (original_images,) - class GetLatentsFromBatchIndexed: RETURN_TYPES = ("LATENT",) @@ -723,63 +517,6 @@ Selects and returns the latents at the specified indices as an latent batch. samples["samples"] = chosen_latents return (samples,) -class ReplaceImagesInBatch: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "replace" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Replaces the images in a batch, starting from the specified start index, -with the replacement images. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "original_images": ("IMAGE",), - "replacement_images": ("IMAGE",), - "start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), - }, - } - - def replace(self, original_images, replacement_images, start_index): - images = None - if start_index >= len(original_images): - raise ValueError("GetImageRangeFromBatch: Start index is out of range") - end_index = start_index + len(replacement_images) - if end_index > len(original_images): - raise ValueError("GetImageRangeFromBatch: End index is out of range") - # Create a copy of the original_images tensor - original_images_copy = original_images.clone() - original_images_copy[start_index:end_index] = replacement_images - images = original_images_copy - return (images, ) - - -class ReverseImageBatch: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "reverseimagebatch" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Reverses the order of the images in a batch. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images": ("IMAGE",), - }, - } - - def reverseimagebatch(self, images): - reversed_images = torch.flip(images, [0]) - return (reversed_images, ) - - - class CreateTextMask: RETURN_TYPES = ("IMAGE", "MASK",) @@ -1068,35 +805,7 @@ Combines multiple conditioning nodes into one cond = cond_combine_node.combine(new_cond, cond)[0] return (cond, inputcount,) -class ImageBatchMulti: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), - "image_1": ("IMAGE", ), - "image_2": ("IMAGE", ), - }, - } - RETURN_TYPES = ("IMAGE",) - RETURN_NAMES = ("images",) - FUNCTION = "combine" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Creates an image batch from multiple images. -You can set how many inputs the node has, -with the **inputcount** and clicking update. -""" - - def combine(self, inputcount, **kwargs): - from nodes import ImageBatch - image_batch_node = ImageBatch() - image = kwargs["image_1"] - for c in range(1, inputcount): - new_image = kwargs[f"image_{c + 1}"] - image, = image_batch_node.batch(image, new_image) - return (image,) class MaskBatchMulti: @classmethod @@ -1128,6 +837,63 @@ with the **inputcount** and clicking update. mask = torch.cat((mask, new_mask), dim=0) return (mask,) +class JoinStrings: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string1": ("STRING", {"default": '', "forceInput": True}), + "string2": ("STRING", {"default": '', "forceInput": True}), + "delimiter": ("STRING", {"default": ' ', "multiline": False}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "joinstring" + CATEGORY = "KJNodes/constants" + + def joinstring(self, string1, string2, delimiter): + joined_string = string1 + delimiter + string2 + return (joined_string, ) + +class JoinStringMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "string_1": ("STRING", {"default": '', "forceInput": True}), + "string_2": ("STRING", {"default": '', "forceInput": True}), + "delimiter": ("STRING", {"default": ' ', "multiline": False}), + "return_list": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("string",) + FUNCTION = "combine" + CATEGORY = "KJNodes" + DESCRIPTION = """ +Creates single string, or a list of strings, from +multiple input strings. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, delimiter, **kwargs): + string = kwargs["string_1"] + return_list = kwargs["return_list"] + strings = [string] # Initialize a list with the first string + for c in range(1, inputcount): + new_string = kwargs[f"string_{c + 1}"] + if return_list: + strings.append(new_string) # Add new string to the list + else: + string = string + delimiter + new_string + if return_list: + return (strings,) # Return the list of strings + else: + return (string,) # Return the combined string + class CondPassThrough: @classmethod def INPUT_TYPES(s): @@ -1463,33 +1229,19 @@ Converts any type to a string. """ def stringify(self, input, prefix="", suffix=""): - if isinstance(input, (int, float, bool)): + if isinstance(input, (int, float, bool)): stringified = str(input) - if prefix: # Check if prefix is not empty - stringified = prefix + stringified # Add the prefix - if suffix: # Check if suffix is not empty - stringified = stringified + suffix # Add the suffix + elif isinstance(input, list): + print("input is a list") + stringified = ', '.join(str(item) for item in input) else: return - return (stringified,) - -class ImagePass: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - }, - } - RETURN_TYPES = ("IMAGE",) - FUNCTION = "passthrough" - CATEGORY = "KJNodes/misc" - DESCRIPTION = """ -Passes the image through without modifying it. -""" + if prefix: # Check if prefix is not empty + stringified = prefix + stringified # Add the prefix + if suffix: # Check if suffix is not empty + stringified = stringified + suffix # Add the suffix - def passthrough(self, image): - return image, + return (stringified,) class Sleep: @classmethod @@ -1561,291 +1313,6 @@ class EmptyLatentImagePresets: return (latent, int(width), int(height),) -class ColorMatch: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "image_ref": ("IMAGE",), - "image_target": ("IMAGE",), - "method": ( - [ - 'mkl', - 'hm', - 'reinhard', - 'mvgd', - 'hm-mvgd-hm', - 'hm-mkl-hm', - ], { - "default": 'mkl' - }), - - }, - } - - CATEGORY = "KJNodes/image" - - RETURN_TYPES = ("IMAGE",) - RETURN_NAMES = ("image",) - FUNCTION = "colormatch" - DESCRIPTION = """ -color-matcher enables color transfer across images which comes in handy for automatic -color-grading of photographs, paintings and film sequences as well as light-field -and stopmotion corrections. - -The methods behind the mappings are based on the approach from Reinhard et al., -the Monge-Kantorovich Linearization (MKL) as proposed by Pitie et al. and our analytical solution -to a Multi-Variate Gaussian Distribution (MVGD) transfer in conjunction with classical histogram -matching. As shown below our HM-MVGD-HM compound outperforms existing methods. -https://github.com/hahnec/color-matcher/ - -""" - - def colormatch(self, image_ref, image_target, method): - try: - from color_matcher import ColorMatcher - except: - raise Exception("Can't import color-matcher, did you install requirements.txt? Manual install: pip install color-matcher") - cm = ColorMatcher() - image_ref = image_ref.cpu() - image_target = image_target.cpu() - batch_size = image_target.size(0) - out = [] - images_target = image_target.squeeze() - images_ref = image_ref.squeeze() - - image_ref_np = images_ref.numpy() - images_target_np = images_target.numpy() - - if image_ref.size(0) > 1 and image_ref.size(0) != batch_size: - raise ValueError("ColorMatch: Use either single reference image or a matching batch of reference images.") - - for i in range(batch_size): - image_target_np = images_target_np if batch_size == 1 else images_target[i].numpy() - image_ref_np_i = image_ref_np if image_ref.size(0) == 1 else images_ref[i].numpy() - try: - image_result = cm.transfer(src=image_target_np, ref=image_ref_np_i, method=method) - except BaseException as e: - print(f"Error occurred during transfer: {e}") - break - out.append(torch.from_numpy(image_result)) - return (torch.stack(out, dim=0).to(torch.float32), ) - -class SaveImageWithAlpha: - def __init__(self): - self.output_dir = folder_paths.get_output_directory() - self.type = "output" - self.prefix_append = "" - - @classmethod - def INPUT_TYPES(s): - return {"required": - {"images": ("IMAGE", ), - "mask": ("MASK", ), - "filename_prefix": ("STRING", {"default": "ComfyUI"})}, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } - - RETURN_TYPES = () - FUNCTION = "save_images_alpha" - OUTPUT_NODE = True - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Saves an image and mask as .PNG with the mask as the alpha channel. -""" - - def save_images_alpha(self, images, mask, filename_prefix="ComfyUI_image_with_alpha", prompt=None, extra_pnginfo=None): - from comfy.cli_args import args - from PIL.PngImagePlugin import PngInfo - filename_prefix += self.prefix_append - full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) - results = list() - if mask.dtype == torch.float16: - mask = mask.to(torch.float32) - def file_counter(): - max_counter = 0 - # Loop through the existing files - for existing_file in os.listdir(full_output_folder): - # Check if the file matches the expected format - match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file) - if match: - # Extract the numeric portion of the filename - file_counter = int(match.group(1)) - # Update the maximum counter value if necessary - if file_counter > max_counter: - max_counter = file_counter - return max_counter - - for image, alpha in zip(images, mask): - i = 255. * image.cpu().numpy() - a = 255. * alpha.cpu().numpy() - img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) - - # Resize the mask to match the image size - a_resized = Image.fromarray(a).resize(img.size, Image.LANCZOS) - a_resized = np.clip(a_resized, 0, 255).astype(np.uint8) - img.putalpha(Image.fromarray(a_resized, mode='L')) - metadata = None - if not args.disable_metadata: - metadata = PngInfo() - if prompt is not None: - metadata.add_text("prompt", json.dumps(prompt)) - if extra_pnginfo is not None: - for x in extra_pnginfo: - metadata.add_text(x, json.dumps(extra_pnginfo[x])) - - # Increment the counter by 1 to get the next available value - counter = file_counter() + 1 - file = f"{filename}_{counter:05}.png" - img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4) - results.append({ - "filename": file, - "subfolder": subfolder, - "type": self.type - }) - - return { "ui": { "images": results } } - -class ImageConcanate: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "image1": ("IMAGE",), - "image2": ("IMAGE",), - "direction": ( - [ 'right', - 'down', - 'left', - 'up', - ], - { - "default": 'right' - }), - "match_image_size": ("BOOLEAN", {"default": False}), - }} - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "concanate" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Concatenates the image2 to image1 in the specified direction. -""" - - def concanate(self, image1, image2, direction, match_image_size): - if match_image_size: - image2 = torch.nn.functional.interpolate(image2, size=(image1.shape[2], image1.shape[3]), mode="bilinear") - if direction == 'right': - row = torch.cat((image1, image2), dim=2) - elif direction == 'down': - row = torch.cat((image1, image2), dim=1) - elif direction == 'left': - row = torch.cat((image2, image1), dim=2) - elif direction == 'up': - row = torch.cat((image2, image1), dim=1) - return (row,) - -class ImageGridComposite2x2: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "image1": ("IMAGE",), - "image2": ("IMAGE",), - "image3": ("IMAGE",), - "image4": ("IMAGE",), - }} - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "compositegrid" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Concatenates the 4 input images into a 2x2 grid. -""" - - def compositegrid(self, image1, image2, image3, image4): - top_row = torch.cat((image1, image2), dim=2) - bottom_row = torch.cat((image3, image4), dim=2) - grid = torch.cat((top_row, bottom_row), dim=1) - return (grid,) - -class ImageGridComposite3x3: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "image1": ("IMAGE",), - "image2": ("IMAGE",), - "image3": ("IMAGE",), - "image4": ("IMAGE",), - "image5": ("IMAGE",), - "image6": ("IMAGE",), - "image7": ("IMAGE",), - "image8": ("IMAGE",), - "image9": ("IMAGE",), - }} - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "compositegrid" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Concatenates the 9 input images into a 3x3 grid. -""" - - def compositegrid(self, image1, image2, image3, image4, image5, image6, image7, image8, image9): - top_row = torch.cat((image1, image2, image3), dim=2) - mid_row = torch.cat((image4, image5, image6), dim=2) - bottom_row = torch.cat((image7, image8, image9), dim=2) - grid = torch.cat((top_row, mid_row, bottom_row), dim=1) - return (grid,) - -class ImageBatchTestPattern: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "batch_size": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), - "start_from": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), - "text_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), - "text_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), - "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), - "font_size": ("INT", {"default": 255,"min": 8, "max": 4096, "step": 1}), - }} - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "generatetestpattern" - CATEGORY = "KJNodes/text" - - def generatetestpattern(self, batch_size, font, font_size, start_from, width, height, text_x, text_y): - out = [] - # Generate the sequential numbers for each image - numbers = np.arange(start_from, start_from + batch_size) - font_path = folder_paths.get_full_path("kjnodes_fonts", font) - - for number in numbers: - # Create a black image with the number as a random color text - image = Image.new("RGB", (width, height), color='black') - draw = ImageDraw.Draw(image) - - # Generate a random color for the text - font_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) - - font = ImageFont.truetype(font_path, font_size) - - # Get the size of the text and position it in the center - text = str(number) - - try: - draw.text((text_x, text_y), text, font=font, fill=font_color, features=['-liga']) - except: - draw.text((text_x, text_y), text, font=font, fill=font_color,) - - # Convert the image to a numpy array and normalize the pixel values - image_np = np.array(image).astype(np.float32) / 255.0 - image_tensor = torch.from_numpy(image_np).unsqueeze(0) - out.append(image_tensor) - out_tensor = torch.cat(out, dim=0) - - return (out_tensor,) - class BatchCLIPSeg: def __init__(self): @@ -2432,54 +1899,6 @@ Visualizes the specified bbox on the image. return (torch.cat(image_list, dim=0),) -from PIL import ImageGrab -import time -class ImageGrabPIL: - - @classmethod - def IS_CHANGED(cls): - - return - - RETURN_TYPES = ("IMAGE",) - RETURN_NAMES = ("image",) - FUNCTION = "screencap" - CATEGORY = "KJNodes/experimental" - DESCRIPTION = """ -Captures an area specified by screen coordinates. -Can be used for realtime diffusion with autoqueue. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), - "y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), - "width": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), - "height": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), - "num_frames": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), - "delay": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.01}), - }, - } - - def screencap(self, x, y, width, height, num_frames, delay): - captures = [] - bbox = (x, y, x + width, y + height) - - for _ in range(num_frames): - # Capture screen - screen_capture = ImageGrab.grab(bbox=bbox) - screen_capture_torch = torch.tensor(np.array(screen_capture), dtype=torch.float32) / 255.0 - screen_capture_torch = screen_capture_torch.unsqueeze(0) - captures.append(screen_capture_torch) - - # Wait for a short delay if more than one frame is to be captured - if num_frames > 1: - time.sleep(delay) - - return (torch.cat(captures, dim=0),) - class DummyLatentOut: @classmethod @@ -2634,91 +2053,6 @@ class InjectNoiseToLatent: rand_noise) / ((mix_randn_amount**2 + (1-mix_randn_amount)**2) ** 0.5) samples["samples"] = noised return (samples,) - -class AddLabel: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "image":("IMAGE",), - "text_x": ("INT", {"default": 10, "min": 0, "max": 4096, "step": 1}), - "text_y": ("INT", {"default": 2, "min": 0, "max": 4096, "step": 1}), - "height": ("INT", {"default": 48, "min": 0, "max": 4096, "step": 1}), - "font_size": ("INT", {"default": 32, "min": 0, "max": 4096, "step": 1}), - "font_color": ("STRING", {"default": "white"}), - "label_color": ("STRING", {"default": "black"}), - "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), - "text": ("STRING", {"default": "Text"}), - "direction": ( - [ 'up', - 'down', - ], - { - "default": 'up' - }), - }, - "optional":{ - "caption": ("STRING", {"default": "", "forceInput": True}), - } - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "addlabel" - CATEGORY = "KJNodes/text" - DESCRIPTION = """ -Creates a new with the given text, and concatenates it to -either above or below the input image. -Note that this changes the input image's height! -Fonts are loaded from this folder: -ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts -""" - - def addlabel(self, image, text_x, text_y, text, height, font_size, font_color, label_color, font, direction, caption=""): - batch_size = image.shape[0] - width = image.shape[2] - - if font == "TTNorms-Black.otf": - font_path = os.path.join(script_directory, "fonts", "TTNorms-Black.otf") - else: - font_path = folder_paths.get_full_path("kjnodes_fonts", font) - - if caption == "": - label_image = Image.new("RGB", (width, height), label_color) - draw = ImageDraw.Draw(label_image) - font = ImageFont.truetype(font_path, font_size) - try: - draw.text((text_x, text_y), text, font=font, fill=font_color, features=['-liga']) - except: - draw.text((text_x, text_y), text, font=font, fill=font_color) - - label_image = np.array(label_image).astype(np.float32) / 255.0 - label_image = torch.from_numpy(label_image)[None, :, :, :] - # Duplicate the label image for the entire batch - label_batch = label_image.repeat(batch_size, 1, 1, 1) - else: - label_list = [] - assert len(caption) == batch_size, "Number of captions does not match number of images" - for cap in caption: - label_image = Image.new("RGB", (width, height), label_color) - draw = ImageDraw.Draw(label_image) - font = ImageFont.truetype(font_path, font_size) - try: - draw.text((text_x, text_y), cap, font=font, fill=font_color, features=['-liga']) - except: - draw.text((text_x, text_y), cap, font=font, fill=font_color) - - label_image = np.array(label_image).astype(np.float32) / 255.0 - label_image = torch.from_numpy(label_image) - label_list.append(label_image) - label_batch = torch.stack(label_list) - print(label_batch.shape) - - if direction == 'down': - combined_images = torch.cat((image, label_batch), dim=1) - elif direction == 'up': - combined_images = torch.cat((label_batch, image), dim=1) - - return (combined_images,) - class SoundReactive: @classmethod @@ -3077,30 +2411,7 @@ https://huggingface.co/stabilityai/sv3d latent = torch.zeros([batch_size, 4, height // 8, width // 8]) return (final_positive, final_negative, {"samples": latent}) -class ImageBatchRepeatInterleaving: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "repeat" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Repeats each image in a batch by the specified number of times. -Example batch of 5 images: 0, 1 ,2, 3, 4 -with repeats 2 becomes batch of 10 images: 0, 0, 1, 1, 2, 2, 3, 3, 4, 4 -""" - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images": ("IMAGE",), - "repeats": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - } - - def repeat(self, images, repeats): - - repeated_images = torch.repeat_interleave(images, repeats=repeats, dim=0) - return (repeated_images, ) def parse_coordinates(coordinates_str): coordinates = {} @@ -3252,60 +2563,7 @@ Experimental, does not function yet as ComfyUI base changes are needed return (c, plot_image_tensor,) -class ImageUpscaleWithModelBatched: - @classmethod - def INPUT_TYPES(s): - return {"required": { "upscale_model": ("UPSCALE_MODEL",), - "images": ("IMAGE",), - "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), - }} - RETURN_TYPES = ("IMAGE",) - FUNCTION = "upscale" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Same as ComfyUI native model upscaling node, -but allows setting sub-batches for reduced VRAM usage. -""" - def upscale(self, upscale_model, images, per_batch): - - device = model_management.get_torch_device() - upscale_model.to(device) - in_img = images.movedim(-1,-3).to(device) - - steps = in_img.shape[0] - pbar = comfy.utils.ProgressBar(steps) - t = [] - - for start_idx in range(0, in_img.shape[0], per_batch): - sub_images = upscale_model(in_img[start_idx:start_idx+per_batch]) - t.append(sub_images.cpu()) - # Calculate the number of images processed in this batch - batch_count = sub_images.shape[0] - # Update the progress bar by the number of images processed in this batch - pbar.update(batch_count) - upscale_model.cpu() - - t = torch.cat(t, dim=0).permute(0, 2, 3, 1).cpu() - return (t,) - -class ImageNormalize_Neg1_To_1: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "images": ("IMAGE",), - - }} - RETURN_TYPES = ("IMAGE",) - FUNCTION = "normalize" - CATEGORY = "KJNodes/misc" - DESCRIPTION = """ -Normalize the images to be in the range [-1, 1] -""" - - def normalize(self,images): - images = images * 2.0 - 1.0 - return (images,) folder_paths.add_model_folder_path("intristic_loras", os.path.join(script_directory, "intristic_loras")) @@ -3533,94 +2791,7 @@ https://huggingface.co/roborovski/superprompt-v1 return (out, ) -class RemapImageRange: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "image": ("IMAGE",), - "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}), - "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}), - "clamp": ("BOOLEAN", {"default": True}), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "remap" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Remaps the image values to the specified range. -""" - - def remap(self, image, min, max, clamp): - if image.dtype == torch.float16: - image = image.to(torch.float32) - image = min + image * (max - min) - if clamp: - image = torch.clamp(image, min=0.0, max=1.0) - return (image, ) -class SplitImageChannels: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "image": ("IMAGE",), - }, - } - - RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK") - RETURN_NAMES = ("red", "green", "blue", "mask") - FUNCTION = "split" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Splits image channels into images where the selected channel -is repeated for all channels, and the alpha as a mask. -""" - - def split(self, image): - red = image[:, :, :, 0:1] # Red channel - green = image[:, :, :, 1:2] # Green channel - blue = image[:, :, :, 2:3] # Blue channel - alpha = image[:, :, :, 3:4] # Alpha channel - alpha = alpha.squeeze(-1) - - # Repeat the selected channel for all channels - red = torch.cat([red, red, red], dim=3) - green = torch.cat([green, green, green], dim=3) - blue = torch.cat([blue, blue, blue], dim=3) - return (red, green, blue, alpha) - -class MergeImageChannels: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "red": ("IMAGE",), - "green": ("IMAGE",), - "blue": ("IMAGE",), - - }, - "optional": { - "mask": ("MASK", {"default": None}), - }, - } - - RETURN_TYPES = ("IMAGE",) - RETURN_NAMES = ("image",) - FUNCTION = "merge" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Merges channel data into an image. -""" - - def merge(self, red, green, blue, alpha=None): - image = torch.stack([ - red[..., 0, None], # Red channel - green[..., 1, None], # Green channel - blue[..., 2, None] # Blue channel - ], dim=-1) - image = image.squeeze(-2) - if alpha is not None: - image = torch.cat([image, alpha], dim=-1) - return (image,) class CameraPoseVisualizer: @@ -3776,141 +2947,6 @@ or a .txt file with RealEstate camera intrinsics and coordinates, in a 3D plot. ret_poses = [transform_matrix @ x for x in ret_poses] return np.array(ret_poses, dtype=np.float32) -class ImagePadForOutpaintMasked: - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - }, - "optional": { - "mask": ("MASK",), - } - } - - RETURN_TYPES = ("IMAGE", "MASK") - FUNCTION = "expand_image" - - CATEGORY = "image" - - def expand_image(self, image, left, top, right, bottom, feathering, mask=None): - B, H, W, C = image.size() - - new_image = torch.ones( - (B, H + top + bottom, W + left + right, C), - dtype=torch.float32, - ) * 0.5 - - new_image[:, top:top + H, left:left + W, :] = image - - if mask is None: - new_mask = torch.ones( - (H + top + bottom, W + left + right), - dtype=torch.float32, - ) - - t = torch.zeros( - (H, W), - dtype=torch.float32 - ) - else: - # If a mask is provided, pad it to fit the new image size - mask = F.pad(mask, (left, right, top, bottom), mode='constant', value=0) - mask = 1 - mask - t = torch.zeros_like(mask) - - - - if feathering > 0 and feathering * 2 < H and feathering * 2 < W: - - for i in range(H): - for j in range(W): - dt = i if top != 0 else H - db = H - i if bottom != 0 else H - - dl = j if left != 0 else W - dr = W - j if right != 0 else W - - d = min(dt, db, dl, dr) - - if d >= feathering: - continue - - v = (feathering - d) / feathering - - if mask is None: - t[i, j] = v * v - else: - t[:, top + i, left + j] = v * v - - if mask is None: - mask = new_mask.squeeze(0) - mask[top:top + H, left:left + W] = t - mask = mask.unsqueeze(0) - - return (new_image, mask,) - -class ImageAndMaskPreview(SaveImage): - def __init__(self): - self.output_dir = folder_paths.get_temp_directory() - self.type = "temp" - self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) - self.compress_level = 4 - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "mask_opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - "mask_color": ("STRING", {"default": "255, 255, 255"}), - "pass_through": ("BOOLEAN", {"default": False}), - }, - "optional": { - "image": ("IMAGE",), - "mask": ("MASK",), - }, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } - RETURN_TYPES = ("IMAGE",) - RETURN_NAMES = ("composite",) - FUNCTION = "execute" - CATEGORY = "KJNodes" - DESCRIPTION = """ -Preview an image or a mask, when both inputs are used -composites the mask on top of the image. -with pass_through on the preview is disabled and the -composite is returned from the composite slot instead, -this allows for the preview to be passed for video combine -nodes for example. -""" - - def execute(self, mask_opacity, mask_color, pass_through, filename_prefix="ComfyUI", image=None, mask=None, prompt=None, extra_pnginfo=None): - if mask is not None and image is None: - preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) - elif mask is None and image is not None: - preview = image - elif mask is not None and image is not None: - mask_adjusted = mask * mask_opacity - mask_image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3).clone() - - color_list = list(map(int, mask_color.split(', '))) - print(color_list[0]) - mask_image[:, :, :, 0] = color_list[0] // 255 # Red channel - mask_image[:, :, :, 1] = color_list[1] // 255 # Green channel - mask_image[:, :, :, 2] = color_list[2] // 255 # Blue channel - - preview, = ImageCompositeMasked.composite(self, image, mask_image, 0, 0, True, mask_adjusted) - if pass_through: - return (preview, ) - return(self.save_images(preview, filename_prefix, prompt, extra_pnginfo)) - - class StabilityAPI_SD3: diff --git a/web/js/jsnodes.js b/web/js/jsnodes.js index a20a7a7..9a9483e 100644 --- a/web/js/jsnodes.js +++ b/web/js/jsnodes.js @@ -73,6 +73,28 @@ app.registerExtension({ }); } break; + case "JoinStringMulti": + nodeType.prototype.onNodeCreated = function () { + this._type = "STRING" + this.inputs_offset = nodeData.name.includes("selective")?1:0 + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing + + if(target_number_of_inputs < this.inputs.length){ + for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--) + this.removeInput(i) + } + else{ + for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i) + this.addInput(`string_${i}`, this._type) + } + }); + } + break; case "SoundReactive": nodeType.prototype.onNodeCreated = function () { let audioContext;