convert nodes_post_processing to V3 schema (#9491)

This commit is contained in:
Alexander Piskun 2025-09-27 00:14:42 +03:00 committed by GitHub
parent d20576e6a3
commit 2103e39335
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1,3 +1,4 @@
from typing_extensions import override
import numpy as np import numpy as np
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
@ -7,33 +8,27 @@ import math
import comfy.utils import comfy.utils
import comfy.model_management import comfy.model_management
import node_helpers import node_helpers
from comfy_api.latest import ComfyExtension, io
class Blend: class Blend(io.ComfyNode):
def __init__(self): @classmethod
pass def define_schema(cls):
return io.Schema(
node_id="ImageBlend",
category="image/postprocessing",
inputs=[
io.Image.Input("image1"),
io.Image.Input("image2"),
io.Float.Input("blend_factor", default=0.5, min=0.0, max=1.0, step=0.01),
io.Combo.Input("blend_mode", options=["normal", "multiply", "screen", "overlay", "soft_light", "difference"]),
],
outputs=[
io.Image.Output(),
],
)
@classmethod @classmethod
def INPUT_TYPES(s): def execute(cls, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str) -> io.NodeOutput:
return {
"required": {
"image1": ("IMAGE",),
"image2": ("IMAGE",),
"blend_factor": ("FLOAT", {
"default": 0.5,
"min": 0.0,
"max": 1.0,
"step": 0.01
}),
"blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light", "difference"],),
},
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "blend_images"
CATEGORY = "image/postprocessing"
def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str):
image1, image2 = node_helpers.image_alpha_fix(image1, image2) image1, image2 = node_helpers.image_alpha_fix(image1, image2)
image2 = image2.to(image1.device) image2 = image2.to(image1.device)
if image1.shape != image2.shape: if image1.shape != image2.shape:
@ -41,12 +36,13 @@ class Blend:
image2 = comfy.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center') image2 = comfy.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center')
image2 = image2.permute(0, 2, 3, 1) image2 = image2.permute(0, 2, 3, 1)
blended_image = self.blend_mode(image1, image2, blend_mode) blended_image = cls.blend_mode(image1, image2, blend_mode)
blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor
blended_image = torch.clamp(blended_image, 0, 1) blended_image = torch.clamp(blended_image, 0, 1)
return (blended_image,) return io.NodeOutput(blended_image)
def blend_mode(self, img1, img2, mode): @classmethod
def blend_mode(cls, img1, img2, mode):
if mode == "normal": if mode == "normal":
return img2 return img2
elif mode == "multiply": elif mode == "multiply":
@ -56,13 +52,13 @@ class Blend:
elif mode == "overlay": elif mode == "overlay":
return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2)) return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2))
elif mode == "soft_light": elif mode == "soft_light":
return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (self.g(img1) - img1)) return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (cls.g(img1) - img1))
elif mode == "difference": elif mode == "difference":
return img1 - img2 return img1 - img2
else: raise ValueError(f"Unsupported blend mode: {mode}")
raise ValueError(f"Unsupported blend mode: {mode}")
def g(self, x): @classmethod
def g(cls, x):
return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x)) return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x))
def gaussian_kernel(kernel_size: int, sigma: float, device=None): def gaussian_kernel(kernel_size: int, sigma: float, device=None):
@ -71,38 +67,26 @@ def gaussian_kernel(kernel_size: int, sigma: float, device=None):
g = torch.exp(-(d * d) / (2.0 * sigma * sigma)) g = torch.exp(-(d * d) / (2.0 * sigma * sigma))
return g / g.sum() return g / g.sum()
class Blur: class Blur(io.ComfyNode):
def __init__(self): @classmethod
pass def define_schema(cls):
return io.Schema(
node_id="ImageBlur",
category="image/postprocessing",
inputs=[
io.Image.Input("image"),
io.Int.Input("blur_radius", default=1, min=1, max=31, step=1),
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.1),
],
outputs=[
io.Image.Output(),
],
)
@classmethod @classmethod
def INPUT_TYPES(s): def execute(cls, image: torch.Tensor, blur_radius: int, sigma: float) -> io.NodeOutput:
return {
"required": {
"image": ("IMAGE",),
"blur_radius": ("INT", {
"default": 1,
"min": 1,
"max": 31,
"step": 1
}),
"sigma": ("FLOAT", {
"default": 1.0,
"min": 0.1,
"max": 10.0,
"step": 0.1
}),
},
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "blur"
CATEGORY = "image/postprocessing"
def blur(self, image: torch.Tensor, blur_radius: int, sigma: float):
if blur_radius == 0: if blur_radius == 0:
return (image,) return io.NodeOutput(image)
image = image.to(comfy.model_management.get_torch_device()) image = image.to(comfy.model_management.get_torch_device())
batch_size, height, width, channels = image.shape batch_size, height, width, channels = image.shape
@ -115,31 +99,24 @@ class Blur:
blurred = F.conv2d(padded_image, kernel, padding=kernel_size // 2, groups=channels)[:,:,blur_radius:-blur_radius, blur_radius:-blur_radius] blurred = F.conv2d(padded_image, kernel, padding=kernel_size // 2, groups=channels)[:,:,blur_radius:-blur_radius, blur_radius:-blur_radius]
blurred = blurred.permute(0, 2, 3, 1) blurred = blurred.permute(0, 2, 3, 1)
return (blurred.to(comfy.model_management.intermediate_device()),) return io.NodeOutput(blurred.to(comfy.model_management.intermediate_device()))
class Quantize:
def __init__(self):
pass
class Quantize(io.ComfyNode):
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return { return io.Schema(
"required": { node_id="ImageQuantize",
"image": ("IMAGE",), category="image/postprocessing",
"colors": ("INT", { inputs=[
"default": 256, io.Image.Input("image"),
"min": 1, io.Int.Input("colors", default=256, min=1, max=256, step=1),
"max": 256, io.Combo.Input("dither", options=["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"]),
"step": 1 ],
}), outputs=[
"dither": (["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"],), io.Image.Output(),
}, ],
} )
RETURN_TYPES = ("IMAGE",)
FUNCTION = "quantize"
CATEGORY = "image/postprocessing"
@staticmethod @staticmethod
def bayer(im, pal_im, order): def bayer(im, pal_im, order):
@ -167,7 +144,8 @@ class Quantize:
im = im.quantize(palette=pal_im, dither=Image.Dither.NONE) im = im.quantize(palette=pal_im, dither=Image.Dither.NONE)
return im return im
def quantize(self, image: torch.Tensor, colors: int, dither: str): @classmethod
def execute(cls, image: torch.Tensor, colors: int, dither: str) -> io.NodeOutput:
batch_size, height, width, _ = image.shape batch_size, height, width, _ = image.shape
result = torch.zeros_like(image) result = torch.zeros_like(image)
@ -187,46 +165,29 @@ class Quantize:
quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255 quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255
result[b] = quantized_array result[b] = quantized_array
return (result,) return io.NodeOutput(result)
class Sharpen: class Sharpen(io.ComfyNode):
def __init__(self): @classmethod
pass def define_schema(cls):
return io.Schema(
node_id="ImageSharpen",
category="image/postprocessing",
inputs=[
io.Image.Input("image"),
io.Int.Input("sharpen_radius", default=1, min=1, max=31, step=1),
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.01),
io.Float.Input("alpha", default=1.0, min=0.0, max=5.0, step=0.01),
],
outputs=[
io.Image.Output(),
],
)
@classmethod @classmethod
def INPUT_TYPES(s): def execute(cls, image: torch.Tensor, sharpen_radius: int, sigma:float, alpha: float) -> io.NodeOutput:
return {
"required": {
"image": ("IMAGE",),
"sharpen_radius": ("INT", {
"default": 1,
"min": 1,
"max": 31,
"step": 1
}),
"sigma": ("FLOAT", {
"default": 1.0,
"min": 0.1,
"max": 10.0,
"step": 0.01
}),
"alpha": ("FLOAT", {
"default": 1.0,
"min": 0.0,
"max": 5.0,
"step": 0.01
}),
},
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "sharpen"
CATEGORY = "image/postprocessing"
def sharpen(self, image: torch.Tensor, sharpen_radius: int, sigma:float, alpha: float):
if sharpen_radius == 0: if sharpen_radius == 0:
return (image,) return io.NodeOutput(image)
batch_size, height, width, channels = image.shape batch_size, height, width, channels = image.shape
image = image.to(comfy.model_management.get_torch_device()) image = image.to(comfy.model_management.get_torch_device())
@ -245,23 +206,29 @@ class Sharpen:
result = torch.clamp(sharpened, 0, 1) result = torch.clamp(sharpened, 0, 1)
return (result.to(comfy.model_management.intermediate_device()),) return io.NodeOutput(result.to(comfy.model_management.intermediate_device()))
class ImageScaleToTotalPixels: class ImageScaleToTotalPixels(io.ComfyNode):
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"] upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
crop_methods = ["disabled", "center"] crop_methods = ["disabled", "center"]
@classmethod @classmethod
def INPUT_TYPES(s): def define_schema(cls):
return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,), return io.Schema(
"megapixels": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 16.0, "step": 0.01}), node_id="ImageScaleToTotalPixels",
}} category="image/upscaling",
RETURN_TYPES = ("IMAGE",) inputs=[
FUNCTION = "upscale" io.Image.Input("image"),
io.Combo.Input("upscale_method", options=cls.upscale_methods),
io.Float.Input("megapixels", default=1.0, min=0.01, max=16.0, step=0.01),
],
outputs=[
io.Image.Output(),
],
)
CATEGORY = "image/upscaling" @classmethod
def execute(cls, image, upscale_method, megapixels) -> io.NodeOutput:
def upscale(self, image, upscale_method, megapixels):
samples = image.movedim(-1,1) samples = image.movedim(-1,1)
total = int(megapixels * 1024 * 1024) total = int(megapixels * 1024 * 1024)
@ -271,12 +238,18 @@ class ImageScaleToTotalPixels:
s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled") s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
s = s.movedim(1,-1) s = s.movedim(1,-1)
return (s,) return io.NodeOutput(s)
NODE_CLASS_MAPPINGS = { class PostProcessingExtension(ComfyExtension):
"ImageBlend": Blend, @override
"ImageBlur": Blur, async def get_node_list(self) -> list[type[io.ComfyNode]]:
"ImageQuantize": Quantize, return [
"ImageSharpen": Sharpen, Blend,
"ImageScaleToTotalPixels": ImageScaleToTotalPixels, Blur,
} Quantize,
Sharpen,
ImageScaleToTotalPixels,
]
async def comfy_entrypoint() -> PostProcessingExtension:
return PostProcessingExtension()