diff --git a/__init__.py b/__init__.py index 95d3414..1f5de7b 100644 --- a/__init__.py +++ b/__init__.py @@ -51,6 +51,7 @@ NODE_CONFIG = { "ImageBatchRepeatInterleaving": {"class": ImageBatchRepeatInterleaving}, "ImageBatchTestPattern": {"class": ImageBatchTestPattern, "name": "Image Batch Test Pattern"}, "ImageConcanate": {"class": ImageConcanate, "name": "Image Concatenate"}, + "ImageConcatMulti": {"class": ImageConcatMulti, "name": "Image Concatenate Multi"}, "ImageGrabPIL": {"class": ImageGrabPIL, "name": "Image Grab PIL"}, "ImageGridComposite2x2": {"class": ImageGridComposite2x2, "name": "Image Grid Composite 2x2"}, "ImageGridComposite3x3": {"class": ImageGridComposite3x3, "name": "Image Grid Composite 3x3"}, diff --git a/nodes/image_nodes.py b/nodes/image_nodes.py index 5b724fa..2d4cd23 100644 --- a/nodes/image_nodes.py +++ b/nodes/image_nodes.py @@ -209,11 +209,11 @@ class ImageConcanate: Concatenates the image2 to image1 in the specified direction. """ - def concanate(self, image1, image2, direction, match_image_size): + def concanate(self, image1, image2, direction, match_image_size, first_image_shape=None): # Check if the batch sizes are different batch_size1 = image1.size(0) batch_size2 = image2.size(0) - + if batch_size1 != batch_size2: # Calculate the number of repetitions needed max_batch_size = max(batch_size1, batch_size2) @@ -224,15 +224,18 @@ Concatenates the image2 to image1 in the specified direction. image1 = image1.repeat(repeats1, 1, 1, 1) image2 = image2.repeat(repeats2, 1, 1, 1) if match_image_size: - image2 = torch.nn.functional.interpolate(image2, size=(image1.shape[2], image1.shape[3]), mode="bilinear") + image2_resized = image2.movedim(-1,1) + image2_resized = common_upscale(image2_resized, first_image_shape[2], first_image_shape[1], "lanczos", "disabled").movedim(1,-1) + else: + image2_resized = image2 if direction == 'right': - row = torch.cat((image1, image2), dim=2) + row = torch.cat((image1, image2_resized), dim=2) elif direction == 'down': - row = torch.cat((image1, image2), dim=1) + row = torch.cat((image1, image2_resized), dim=1) elif direction == 'left': - row = torch.cat((image2, image1), dim=2) + row = torch.cat((image2_resized, image1), dim=2) elif direction == 'up': - row = torch.cat((image2, image1), dim=1) + row = torch.cat((image2_resized, image1), dim=1) return (row,) class ImageGridComposite2x2: @@ -1198,6 +1201,48 @@ with the **inputcount** and clicking update. image = torch.sub(image, new_image) return (image,) +class ImageConcatMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE", ), + "image_2": ("IMAGE", ), + "direction": ( + [ 'right', + 'down', + 'left', + 'up', + ], + { + "default": 'right' + }), + "match_image_size": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "combine" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates an image from multiple images. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, direction, match_image_size, **kwargs): + image = kwargs["image_1"] + first_image_shape = None + if first_image_shape is None: + first_image_shape = image.shape + for c in range(1, inputcount): + new_image = kwargs[f"image_{c + 1}"] + image, = ImageConcanate.concanate(self, image, new_image, direction, match_image_size, first_image_shape=first_image_shape) + first_image_shape = None + return (image,) + class PreviewAnimation: def __init__(self): self.output_dir = folder_paths.get_temp_directory() diff --git a/web/js/jsnodes.js b/web/js/jsnodes.js index c8fe533..1770745 100644 --- a/web/js/jsnodes.js +++ b/web/js/jsnodes.js @@ -31,6 +31,7 @@ app.registerExtension({ break; case "ImageBatchMulti": case "ImageAddMulti": + case "ImageConcatMulti": nodeType.prototype.onNodeCreated = function () { this._type = "IMAGE" this.inputs_offset = nodeData.name.includes("selective")?1:0