Merge branch 'master' into asset-management

This commit is contained in:
Jedrzej Kosinski 2025-09-26 20:24:25 -07:00
commit 9f4c0f3afe
13 changed files with 404 additions and 356 deletions

View File

@ -10,7 +10,7 @@ jobs:
test:
strategy:
matrix:
os: [ubuntu-latest, windows-latest, macos-latest]
os: [ubuntu-latest, windows-2022, macos-latest]
runs-on: ${{ matrix.os }}
continue-on-error: true
steps:

View File

@ -1,25 +1,3 @@
# Admins
* @comfyanonymous
# Note: Github teams syntax cannot be used here as the repo is not owned by Comfy-Org.
# Inlined the team members for now.
# Maintainers
*.md @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
/tests/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
/tests-unit/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
/notebooks/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
/script_examples/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
/.github/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
/requirements.txt @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
/pyproject.toml @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
# Python web server
/api_server/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill
/app/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill
/utils/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill
# Node developers
/comfy_extras/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill
/comfy/comfy_types/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill
/comfy_api_nodes/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill
* @kosinkadink

View File

@ -360,7 +360,7 @@ def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options):
def cfg_function(model, cond_pred, uncond_pred, cond_scale, x, timestep, model_options={}, cond=None, uncond=None):
if "sampler_cfg_function" in model_options:
args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep,
"cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options}
"cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options, "input_cond": cond, "input_uncond": uncond}
cfg_result = x - model_options["sampler_cfg_function"](args)
else:
cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale

View File

@ -63,7 +63,13 @@ class HunyuanImageTEModel(QwenImageTEModel):
self.byt5_small = None
def encode_token_weights(self, token_weight_pairs):
cond, p, extra = super().encode_token_weights(token_weight_pairs)
tok_pairs = token_weight_pairs["qwen25_7b"][0]
template_end = -1
if tok_pairs[0][0] == 27:
if len(tok_pairs) > 36: # refiner prompt uses a fixed 36 template_end
template_end = 36
cond, p, extra = super().encode_token_weights(token_weight_pairs, template_end=template_end)
if self.byt5_small is not None and "byt5" in token_weight_pairs:
out = self.byt5_small.encode_token_weights(token_weight_pairs["byt5"])
extra["conditioning_byt5small"] = out[0]

View File

@ -18,6 +18,15 @@ class QwenImageTokenizer(sd1_clip.SD1Tokenizer):
self.llama_template_images = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n"
def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, images=[], **kwargs):
skip_template = False
if text.startswith('<|im_start|>'):
skip_template = True
if text.startswith('<|start_header_id|>'):
skip_template = True
if skip_template:
llama_text = text
else:
if llama_template is None:
if len(images) > 0:
llama_text = self.llama_template_images.format(text)
@ -47,10 +56,11 @@ class QwenImageTEModel(sd1_clip.SD1ClipModel):
def __init__(self, device="cpu", dtype=None, model_options={}):
super().__init__(device=device, dtype=dtype, name="qwen25_7b", clip_model=Qwen25_7BVLIModel, model_options=model_options)
def encode_token_weights(self, token_weight_pairs):
def encode_token_weights(self, token_weight_pairs, template_end=-1):
out, pooled, extra = super().encode_token_weights(token_weight_pairs)
tok_pairs = token_weight_pairs["qwen25_7b"][0]
count_im_start = 0
if template_end == -1:
for i, v in enumerate(tok_pairs):
elem = v[0]
if not torch.is_tensor(elem):

View File

@ -1,43 +1,52 @@
from nodes import MAX_RESOLUTION
from typing_extensions import override
class CLIPTextEncodeSDXLRefiner:
import nodes
from comfy_api.latest import ComfyExtension, io
class CLIPTextEncodeSDXLRefiner(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": {
"ascore": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 1000.0, "step": 0.01}),
"width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
"height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
"text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "encode"
def define_schema(cls):
return io.Schema(
node_id="CLIPTextEncodeSDXLRefiner",
category="advanced/conditioning",
inputs=[
io.Float.Input("ascore", default=6.0, min=0.0, max=1000.0, step=0.01),
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
io.String.Input("text", multiline=True, dynamic_prompts=True),
io.Clip.Input("clip"),
],
outputs=[io.Conditioning.Output()],
)
CATEGORY = "advanced/conditioning"
def encode(self, clip, ascore, width, height, text):
@classmethod
def execute(cls, clip, ascore, width, height, text) -> io.NodeOutput:
tokens = clip.tokenize(text)
return (clip.encode_from_tokens_scheduled(tokens, add_dict={"aesthetic_score": ascore, "width": width, "height": height}), )
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"aesthetic_score": ascore, "width": width, "height": height}))
class CLIPTextEncodeSDXL:
class CLIPTextEncodeSDXL(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": {
"clip": ("CLIP", ),
"width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
"height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
"crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
"crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
"target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
"target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
"text_g": ("STRING", {"multiline": True, "dynamicPrompts": True}),
"text_l": ("STRING", {"multiline": True, "dynamicPrompts": True}),
}}
RETURN_TYPES = ("CONDITIONING",)
FUNCTION = "encode"
def define_schema(cls):
return io.Schema(
node_id="CLIPTextEncodeSDXL",
category="advanced/conditioning",
inputs=[
io.Clip.Input("clip"),
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
io.Int.Input("crop_w", default=0, min=0, max=nodes.MAX_RESOLUTION),
io.Int.Input("crop_h", default=0, min=0, max=nodes.MAX_RESOLUTION),
io.Int.Input("target_width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
io.Int.Input("target_height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
io.String.Input("text_g", multiline=True, dynamic_prompts=True),
io.String.Input("text_l", multiline=True, dynamic_prompts=True),
],
outputs=[io.Conditioning.Output()],
)
CATEGORY = "advanced/conditioning"
def encode(self, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l):
@classmethod
def execute(cls, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l) -> io.NodeOutput:
tokens = clip.tokenize(text_g)
tokens["l"] = clip.tokenize(text_l)["l"]
if len(tokens["l"]) != len(tokens["g"]):
@ -46,9 +55,17 @@ class CLIPTextEncodeSDXL:
tokens["l"] += empty["l"]
while len(tokens["l"]) > len(tokens["g"]):
tokens["g"] += empty["g"]
return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}), )
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}))
NODE_CLASS_MAPPINGS = {
"CLIPTextEncodeSDXLRefiner": CLIPTextEncodeSDXLRefiner,
"CLIPTextEncodeSDXL": CLIPTextEncodeSDXL,
}
class ClipSdxlExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
CLIPTextEncodeSDXLRefiner,
CLIPTextEncodeSDXL,
]
async def comfy_entrypoint() -> ClipSdxlExtension:
return ClipSdxlExtension()

View File

@ -1,6 +1,8 @@
# Code based on https://github.com/WikiChao/FreSca (MIT License)
import torch
import torch.fft as fft
from typing_extensions import override
from comfy_api.latest import ComfyExtension, io
def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20):
@ -51,25 +53,31 @@ def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20):
return x_filtered
class FreSca:
class FreSca(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
"scale_low": ("FLOAT", {"default": 1.0, "min": 0, "max": 10, "step": 0.01,
"tooltip": "Scaling factor for low-frequency components"}),
"scale_high": ("FLOAT", {"default": 1.25, "min": 0, "max": 10, "step": 0.01,
"tooltip": "Scaling factor for high-frequency components"}),
"freq_cutoff": ("INT", {"default": 20, "min": 1, "max": 10000, "step": 1,
"tooltip": "Number of frequency indices around center to consider as low-frequency"}),
}
}
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
CATEGORY = "_for_testing"
DESCRIPTION = "Applies frequency-dependent scaling to the guidance"
def patch(self, model, scale_low, scale_high, freq_cutoff):
def define_schema(cls):
return io.Schema(
node_id="FreSca",
display_name="FreSca",
category="_for_testing",
description="Applies frequency-dependent scaling to the guidance",
inputs=[
io.Model.Input("model"),
io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01,
tooltip="Scaling factor for low-frequency components"),
io.Float.Input("scale_high", default=1.25, min=0, max=10, step=0.01,
tooltip="Scaling factor for high-frequency components"),
io.Int.Input("freq_cutoff", default=20, min=1, max=10000, step=1,
tooltip="Number of frequency indices around center to consider as low-frequency"),
],
outputs=[
io.Model.Output(),
],
is_experimental=True,
)
@classmethod
def execute(cls, model, scale_low, scale_high, freq_cutoff):
def custom_cfg_function(args):
conds_out = args["conds_out"]
if len(conds_out) <= 1 or None in args["conds"][:2]:
@ -91,13 +99,16 @@ class FreSca:
m = model.clone()
m.set_model_sampler_pre_cfg_function(custom_cfg_function)
return (m,)
return io.NodeOutput(m)
NODE_CLASS_MAPPINGS = {
"FreSca": FreSca,
}
class FreScaExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
FreSca,
]
NODE_DISPLAY_NAME_MAPPINGS = {
"FreSca": "FreSca",
}
async def comfy_entrypoint() -> FreScaExtension:
return FreScaExtension()

View File

@ -12,35 +12,38 @@ from nodes import MAX_RESOLUTION
def composite(destination, source, x, y, mask = None, multiplier = 8, resize_source = False):
source = source.to(destination.device)
if resize_source:
source = torch.nn.functional.interpolate(source, size=(destination.shape[2], destination.shape[3]), mode="bilinear")
source = torch.nn.functional.interpolate(source, size=(destination.shape[-2], destination.shape[-1]), mode="bilinear")
source = comfy.utils.repeat_to_batch_size(source, destination.shape[0])
x = max(-source.shape[3] * multiplier, min(x, destination.shape[3] * multiplier))
y = max(-source.shape[2] * multiplier, min(y, destination.shape[2] * multiplier))
x = max(-source.shape[-1] * multiplier, min(x, destination.shape[-1] * multiplier))
y = max(-source.shape[-2] * multiplier, min(y, destination.shape[-2] * multiplier))
left, top = (x // multiplier, y // multiplier)
right, bottom = (left + source.shape[3], top + source.shape[2],)
right, bottom = (left + source.shape[-1], top + source.shape[-2],)
if mask is None:
mask = torch.ones_like(source)
else:
mask = mask.to(destination.device, copy=True)
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[2], source.shape[3]), mode="bilinear")
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[-2], source.shape[-1]), mode="bilinear")
mask = comfy.utils.repeat_to_batch_size(mask, source.shape[0])
# calculate the bounds of the source that will be overlapping the destination
# this prevents the source trying to overwrite latent pixels that are out of bounds
# of the destination
visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),)
visible_width, visible_height = (destination.shape[-1] - left + min(0, x), destination.shape[-2] - top + min(0, y),)
mask = mask[:, :, :visible_height, :visible_width]
if mask.ndim < source.ndim:
mask = mask.unsqueeze(1)
inverse_mask = torch.ones_like(mask) - mask
source_portion = mask * source[:, :, :visible_height, :visible_width]
destination_portion = inverse_mask * destination[:, :, top:bottom, left:right]
source_portion = mask * source[..., :visible_height, :visible_width]
destination_portion = inverse_mask * destination[..., top:bottom, left:right]
destination[:, :, top:bottom, left:right] = source_portion + destination_portion
destination[..., top:bottom, left:right] = source_portion + destination_portion
return destination
class LatentCompositeMasked:

View File

@ -1,3 +1,4 @@
from typing_extensions import override
import numpy as np
import torch
import torch.nn.functional as F
@ -7,33 +8,27 @@ import math
import comfy.utils
import comfy.model_management
import node_helpers
from comfy_api.latest import ComfyExtension, io
class Blend:
def __init__(self):
pass
class Blend(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ImageBlend",
category="image/postprocessing",
inputs=[
io.Image.Input("image1"),
io.Image.Input("image2"),
io.Float.Input("blend_factor", default=0.5, min=0.0, max=1.0, step=0.01),
io.Combo.Input("blend_mode", options=["normal", "multiply", "screen", "overlay", "soft_light", "difference"]),
],
outputs=[
io.Image.Output(),
],
)
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"image1": ("IMAGE",),
"image2": ("IMAGE",),
"blend_factor": ("FLOAT", {
"default": 0.5,
"min": 0.0,
"max": 1.0,
"step": 0.01
}),
"blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light", "difference"],),
},
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "blend_images"
CATEGORY = "image/postprocessing"
def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str):
def execute(cls, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str) -> io.NodeOutput:
image1, image2 = node_helpers.image_alpha_fix(image1, image2)
image2 = image2.to(image1.device)
if image1.shape != image2.shape:
@ -41,12 +36,13 @@ class Blend:
image2 = comfy.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center')
image2 = image2.permute(0, 2, 3, 1)
blended_image = self.blend_mode(image1, image2, blend_mode)
blended_image = cls.blend_mode(image1, image2, blend_mode)
blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor
blended_image = torch.clamp(blended_image, 0, 1)
return (blended_image,)
return io.NodeOutput(blended_image)
def blend_mode(self, img1, img2, mode):
@classmethod
def blend_mode(cls, img1, img2, mode):
if mode == "normal":
return img2
elif mode == "multiply":
@ -56,13 +52,13 @@ class Blend:
elif mode == "overlay":
return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2))
elif mode == "soft_light":
return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (self.g(img1) - img1))
return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (cls.g(img1) - img1))
elif mode == "difference":
return img1 - img2
else:
raise ValueError(f"Unsupported blend mode: {mode}")
def g(self, x):
@classmethod
def g(cls, x):
return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x))
def gaussian_kernel(kernel_size: int, sigma: float, device=None):
@ -71,38 +67,26 @@ def gaussian_kernel(kernel_size: int, sigma: float, device=None):
g = torch.exp(-(d * d) / (2.0 * sigma * sigma))
return g / g.sum()
class Blur:
def __init__(self):
pass
class Blur(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ImageBlur",
category="image/postprocessing",
inputs=[
io.Image.Input("image"),
io.Int.Input("blur_radius", default=1, min=1, max=31, step=1),
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.1),
],
outputs=[
io.Image.Output(),
],
)
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"image": ("IMAGE",),
"blur_radius": ("INT", {
"default": 1,
"min": 1,
"max": 31,
"step": 1
}),
"sigma": ("FLOAT", {
"default": 1.0,
"min": 0.1,
"max": 10.0,
"step": 0.1
}),
},
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "blur"
CATEGORY = "image/postprocessing"
def blur(self, image: torch.Tensor, blur_radius: int, sigma: float):
def execute(cls, image: torch.Tensor, blur_radius: int, sigma: float) -> io.NodeOutput:
if blur_radius == 0:
return (image,)
return io.NodeOutput(image)
image = image.to(comfy.model_management.get_torch_device())
batch_size, height, width, channels = image.shape
@ -115,31 +99,24 @@ class Blur:
blurred = F.conv2d(padded_image, kernel, padding=kernel_size // 2, groups=channels)[:,:,blur_radius:-blur_radius, blur_radius:-blur_radius]
blurred = blurred.permute(0, 2, 3, 1)
return (blurred.to(comfy.model_management.intermediate_device()),)
return io.NodeOutput(blurred.to(comfy.model_management.intermediate_device()))
class Quantize:
def __init__(self):
pass
class Quantize(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"image": ("IMAGE",),
"colors": ("INT", {
"default": 256,
"min": 1,
"max": 256,
"step": 1
}),
"dither": (["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"],),
},
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "quantize"
CATEGORY = "image/postprocessing"
def define_schema(cls):
return io.Schema(
node_id="ImageQuantize",
category="image/postprocessing",
inputs=[
io.Image.Input("image"),
io.Int.Input("colors", default=256, min=1, max=256, step=1),
io.Combo.Input("dither", options=["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"]),
],
outputs=[
io.Image.Output(),
],
)
@staticmethod
def bayer(im, pal_im, order):
@ -167,7 +144,8 @@ class Quantize:
im = im.quantize(palette=pal_im, dither=Image.Dither.NONE)
return im
def quantize(self, image: torch.Tensor, colors: int, dither: str):
@classmethod
def execute(cls, image: torch.Tensor, colors: int, dither: str) -> io.NodeOutput:
batch_size, height, width, _ = image.shape
result = torch.zeros_like(image)
@ -187,46 +165,29 @@ class Quantize:
quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255
result[b] = quantized_array
return (result,)
return io.NodeOutput(result)
class Sharpen:
def __init__(self):
pass
class Sharpen(io.ComfyNode):
@classmethod
def define_schema(cls):
return io.Schema(
node_id="ImageSharpen",
category="image/postprocessing",
inputs=[
io.Image.Input("image"),
io.Int.Input("sharpen_radius", default=1, min=1, max=31, step=1),
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.01),
io.Float.Input("alpha", default=1.0, min=0.0, max=5.0, step=0.01),
],
outputs=[
io.Image.Output(),
],
)
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"image": ("IMAGE",),
"sharpen_radius": ("INT", {
"default": 1,
"min": 1,
"max": 31,
"step": 1
}),
"sigma": ("FLOAT", {
"default": 1.0,
"min": 0.1,
"max": 10.0,
"step": 0.01
}),
"alpha": ("FLOAT", {
"default": 1.0,
"min": 0.0,
"max": 5.0,
"step": 0.01
}),
},
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "sharpen"
CATEGORY = "image/postprocessing"
def sharpen(self, image: torch.Tensor, sharpen_radius: int, sigma:float, alpha: float):
def execute(cls, image: torch.Tensor, sharpen_radius: int, sigma:float, alpha: float) -> io.NodeOutput:
if sharpen_radius == 0:
return (image,)
return io.NodeOutput(image)
batch_size, height, width, channels = image.shape
image = image.to(comfy.model_management.get_torch_device())
@ -245,23 +206,29 @@ class Sharpen:
result = torch.clamp(sharpened, 0, 1)
return (result.to(comfy.model_management.intermediate_device()),)
return io.NodeOutput(result.to(comfy.model_management.intermediate_device()))
class ImageScaleToTotalPixels:
class ImageScaleToTotalPixels(io.ComfyNode):
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
crop_methods = ["disabled", "center"]
@classmethod
def INPUT_TYPES(s):
return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
"megapixels": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 16.0, "step": 0.01}),
}}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "upscale"
def define_schema(cls):
return io.Schema(
node_id="ImageScaleToTotalPixels",
category="image/upscaling",
inputs=[
io.Image.Input("image"),
io.Combo.Input("upscale_method", options=cls.upscale_methods),
io.Float.Input("megapixels", default=1.0, min=0.01, max=16.0, step=0.01),
],
outputs=[
io.Image.Output(),
],
)
CATEGORY = "image/upscaling"
def upscale(self, image, upscale_method, megapixels):
@classmethod
def execute(cls, image, upscale_method, megapixels) -> io.NodeOutput:
samples = image.movedim(-1,1)
total = int(megapixels * 1024 * 1024)
@ -271,12 +238,18 @@ class ImageScaleToTotalPixels:
s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
s = s.movedim(1,-1)
return (s,)
return io.NodeOutput(s)
NODE_CLASS_MAPPINGS = {
"ImageBlend": Blend,
"ImageBlur": Blur,
"ImageQuantize": Quantize,
"ImageSharpen": Sharpen,
"ImageScaleToTotalPixels": ImageScaleToTotalPixels,
}
class PostProcessingExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
Blend,
Blur,
Quantize,
Sharpen,
ImageScaleToTotalPixels,
]
async def comfy_entrypoint() -> PostProcessingExtension:
return PostProcessingExtension()

View File

@ -1,18 +1,25 @@
from typing_extensions import override
import torch
class LatentRebatch:
from comfy_api.latest import ComfyExtension, io
class LatentRebatch(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": { "latents": ("LATENT",),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
}}
RETURN_TYPES = ("LATENT",)
INPUT_IS_LIST = True
OUTPUT_IS_LIST = (True, )
FUNCTION = "rebatch"
CATEGORY = "latent/batch"
def define_schema(cls):
return io.Schema(
node_id="RebatchLatents",
display_name="Rebatch Latents",
category="latent/batch",
is_input_list=True,
inputs=[
io.Latent.Input("latents"),
io.Int.Input("batch_size", default=1, min=1, max=4096),
],
outputs=[
io.Latent.Output(is_output_list=True),
],
)
@staticmethod
def get_batch(latents, list_ind, offset):
@ -53,7 +60,8 @@ class LatentRebatch:
result = [torch.cat((b1, b2)) if torch.is_tensor(b1) else b1 + b2 for b1, b2 in zip(batch1, batch2)]
return result
def rebatch(self, latents, batch_size):
@classmethod
def execute(cls, latents, batch_size):
batch_size = batch_size[0]
output_list = []
@ -63,24 +71,24 @@ class LatentRebatch:
for i in range(len(latents)):
# fetch new entry of list
#samples, masks, indices = self.get_batch(latents, i)
next_batch = self.get_batch(latents, i, processed)
next_batch = cls.get_batch(latents, i, processed)
processed += len(next_batch[2])
# set to current if current is None
if current_batch[0] is None:
current_batch = next_batch
# add previous to list if dimensions do not match
elif next_batch[0].shape[-1] != current_batch[0].shape[-1] or next_batch[0].shape[-2] != current_batch[0].shape[-2]:
sliced, _ = self.slice_batch(current_batch, 1, batch_size)
sliced, _ = cls.slice_batch(current_batch, 1, batch_size)
output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]})
current_batch = next_batch
# cat if everything checks out
else:
current_batch = self.cat_batch(current_batch, next_batch)
current_batch = cls.cat_batch(current_batch, next_batch)
# add to list if dimensions gone above target batch size
if current_batch[0].shape[0] > batch_size:
num = current_batch[0].shape[0] // batch_size
sliced, remainder = self.slice_batch(current_batch, num, batch_size)
sliced, remainder = cls.slice_batch(current_batch, num, batch_size)
for i in range(num):
output_list.append({'samples': sliced[0][i], 'noise_mask': sliced[1][i], 'batch_index': sliced[2][i]})
@ -89,7 +97,7 @@ class LatentRebatch:
#add remainder
if current_batch[0] is not None:
sliced, _ = self.slice_batch(current_batch, 1, batch_size)
sliced, _ = cls.slice_batch(current_batch, 1, batch_size)
output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]})
#get rid of empty masks
@ -97,23 +105,27 @@ class LatentRebatch:
if s['noise_mask'].mean() == 1.0:
del s['noise_mask']
return (output_list,)
return io.NodeOutput(output_list)
class ImageRebatch:
class ImageRebatch(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": { "images": ("IMAGE",),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
}}
RETURN_TYPES = ("IMAGE",)
INPUT_IS_LIST = True
OUTPUT_IS_LIST = (True, )
def define_schema(cls):
return io.Schema(
node_id="RebatchImages",
display_name="Rebatch Images",
category="image/batch",
is_input_list=True,
inputs=[
io.Image.Input("images"),
io.Int.Input("batch_size", default=1, min=1, max=4096),
],
outputs=[
io.Image.Output(is_output_list=True),
],
)
FUNCTION = "rebatch"
CATEGORY = "image/batch"
def rebatch(self, images, batch_size):
@classmethod
def execute(cls, images, batch_size):
batch_size = batch_size[0]
output_list = []
@ -125,14 +137,17 @@ class ImageRebatch:
for i in range(0, len(all_images), batch_size):
output_list.append(torch.cat(all_images[i:i+batch_size], dim=0))
return (output_list,)
return io.NodeOutput(output_list)
NODE_CLASS_MAPPINGS = {
"RebatchLatents": LatentRebatch,
"RebatchImages": ImageRebatch,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"RebatchLatents": "Rebatch Latents",
"RebatchImages": "Rebatch Images",
}
class RebatchExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
LatentRebatch,
ImageRebatch,
]
async def comfy_entrypoint() -> RebatchExtension:
return RebatchExtension()

View File

@ -2,10 +2,13 @@ import torch
from torch import einsum
import torch.nn.functional as F
import math
from typing_extensions import override
from einops import rearrange, repeat
from comfy.ldm.modules.attention import optimized_attention
import comfy.samplers
from comfy_api.latest import ComfyExtension, io
# from comfy/ldm/modules/attention.py
# but modified to return attention scores as well as output
@ -104,19 +107,26 @@ def gaussian_blur_2d(img, kernel_size, sigma):
img = F.conv2d(img, kernel2d, groups=img.shape[-3])
return img
class SelfAttentionGuidance:
class SelfAttentionGuidance(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": { "model": ("MODEL",),
"scale": ("FLOAT", {"default": 0.5, "min": -2.0, "max": 5.0, "step": 0.01}),
"blur_sigma": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 10.0, "step": 0.1}),
}}
RETURN_TYPES = ("MODEL",)
FUNCTION = "patch"
def define_schema(cls):
return io.Schema(
node_id="SelfAttentionGuidance",
display_name="Self-Attention Guidance",
category="_for_testing",
inputs=[
io.Model.Input("model"),
io.Float.Input("scale", default=0.5, min=-2.0, max=5.0, step=0.01),
io.Float.Input("blur_sigma", default=2.0, min=0.0, max=10.0, step=0.1),
],
outputs=[
io.Model.Output(),
],
is_experimental=True,
)
CATEGORY = "_for_testing"
def patch(self, model, scale, blur_sigma):
@classmethod
def execute(cls, model, scale, blur_sigma):
m = model.clone()
attn_scores = None
@ -170,12 +180,16 @@ class SelfAttentionGuidance:
# unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch
m.set_model_attn1_replace(attn_and_record, "middle", 0, 0)
return (m, )
return io.NodeOutput(m)
NODE_CLASS_MAPPINGS = {
"SelfAttentionGuidance": SelfAttentionGuidance,
}
NODE_DISPLAY_NAME_MAPPINGS = {
"SelfAttentionGuidance": "Self-Attention Guidance",
}
class SagExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
SelfAttentionGuidance,
]
async def comfy_entrypoint() -> SagExtension:
return SagExtension()

View File

@ -1,23 +1,31 @@
from typing_extensions import override
import torch
import comfy.utils
from comfy_api.latest import ComfyExtension, io
class SD_4XUpscale_Conditioning:
class SD_4XUpscale_Conditioning(io.ComfyNode):
@classmethod
def INPUT_TYPES(s):
return {"required": { "images": ("IMAGE",),
"positive": ("CONDITIONING",),
"negative": ("CONDITIONING",),
"scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
}}
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
RETURN_NAMES = ("positive", "negative", "latent")
def define_schema(cls):
return io.Schema(
node_id="SD_4XUpscale_Conditioning",
category="conditioning/upscale_diffusion",
inputs=[
io.Image.Input("images"),
io.Conditioning.Input("positive"),
io.Conditioning.Input("negative"),
io.Float.Input("scale_ratio", default=4.0, min=0.0, max=10.0, step=0.01),
io.Float.Input("noise_augmentation", default=0.0, min=0.0, max=1.0, step=0.001),
],
outputs=[
io.Conditioning.Output(display_name="positive"),
io.Conditioning.Output(display_name="negative"),
io.Latent.Output(display_name="latent"),
],
)
FUNCTION = "encode"
CATEGORY = "conditioning/upscale_diffusion"
def encode(self, images, positive, negative, scale_ratio, noise_augmentation):
@classmethod
def execute(cls, images, positive, negative, scale_ratio, noise_augmentation):
width = max(1, round(images.shape[-2] * scale_ratio))
height = max(1, round(images.shape[-3] * scale_ratio))
@ -39,8 +47,16 @@ class SD_4XUpscale_Conditioning:
out_cn.append(n)
latent = torch.zeros([images.shape[0], 4, height // 4, width // 4])
return (out_cp, out_cn, {"samples":latent})
return io.NodeOutput(out_cp, out_cn, {"samples":latent})
NODE_CLASS_MAPPINGS = {
"SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning,
}
class SdUpscaleExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
SD_4XUpscale_Conditioning,
]
async def comfy_entrypoint() -> SdUpscaleExtension:
return SdUpscaleExtension()

View File

@ -1,8 +1,9 @@
# TCFG: Tangential Damping Classifier-free Guidance - (arXiv: https://arxiv.org/abs/2503.18137)
from typing_extensions import override
import torch
from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict
from comfy_api.latest import ComfyExtension, io
def score_tangential_damping(cond_score: torch.Tensor, uncond_score: torch.Tensor) -> torch.Tensor:
@ -26,23 +27,24 @@ def score_tangential_damping(cond_score: torch.Tensor, uncond_score: torch.Tenso
return uncond_score_td.reshape_as(uncond_score).to(uncond_score.dtype)
class TCFG(ComfyNodeABC):
class TCFG(io.ComfyNode):
@classmethod
def INPUT_TYPES(cls) -> InputTypeDict:
return {
"required": {
"model": (IO.MODEL, {}),
}
}
def define_schema(cls):
return io.Schema(
node_id="TCFG",
display_name="Tangential Damping CFG",
category="advanced/guidance",
description="TCFG Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality.",
inputs=[
io.Model.Input("model"),
],
outputs=[
io.Model.Output(display_name="patched_model"),
],
)
RETURN_TYPES = (IO.MODEL,)
RETURN_NAMES = ("patched_model",)
FUNCTION = "patch"
CATEGORY = "advanced/guidance"
DESCRIPTION = "TCFG Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality."
def patch(self, model):
@classmethod
def execute(cls, model):
m = model.clone()
def tangential_damping_cfg(args):
@ -59,13 +61,16 @@ class TCFG(ComfyNodeABC):
return [cond_pred, uncond_pred_td] + conds_out[2:]
m.set_model_sampler_pre_cfg_function(tangential_damping_cfg)
return (m,)
return io.NodeOutput(m)
NODE_CLASS_MAPPINGS = {
"TCFG": TCFG,
}
class TcfgExtension(ComfyExtension):
@override
async def get_node_list(self) -> list[type[io.ComfyNode]]:
return [
TCFG,
]
NODE_DISPLAY_NAME_MAPPINGS = {
"TCFG": "Tangential Damping CFG",
}
async def comfy_entrypoint() -> TcfgExtension:
return TcfgExtension()