mirror of
https://git.datalinker.icu/comfyanonymous/ComfyUI
synced 2025-12-14 16:34:36 +08:00
Merge branch 'master' into asset-management
This commit is contained in:
commit
9f4c0f3afe
2
.github/workflows/test-unit.yml
vendored
2
.github/workflows/test-unit.yml
vendored
@ -10,7 +10,7 @@ jobs:
|
|||||||
test:
|
test:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: [ubuntu-latest, windows-latest, macos-latest]
|
os: [ubuntu-latest, windows-2022, macos-latest]
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
steps:
|
steps:
|
||||||
|
|||||||
24
CODEOWNERS
24
CODEOWNERS
@ -1,25 +1,3 @@
|
|||||||
# Admins
|
# Admins
|
||||||
* @comfyanonymous
|
* @comfyanonymous
|
||||||
|
* @kosinkadink
|
||||||
# Note: Github teams syntax cannot be used here as the repo is not owned by Comfy-Org.
|
|
||||||
# Inlined the team members for now.
|
|
||||||
|
|
||||||
# Maintainers
|
|
||||||
*.md @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
|
|
||||||
/tests/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
|
|
||||||
/tests-unit/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
|
|
||||||
/notebooks/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
|
|
||||||
/script_examples/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
|
|
||||||
/.github/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
|
|
||||||
/requirements.txt @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
|
|
||||||
/pyproject.toml @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @Kosinkadink @christian-byrne @guill
|
|
||||||
|
|
||||||
# Python web server
|
|
||||||
/api_server/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill
|
|
||||||
/app/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill
|
|
||||||
/utils/ @yoland68 @robinjhuang @webfiltered @pythongosssss @ltdrdata @christian-byrne @guill
|
|
||||||
|
|
||||||
# Node developers
|
|
||||||
/comfy_extras/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill
|
|
||||||
/comfy/comfy_types/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill
|
|
||||||
/comfy_api_nodes/ @yoland68 @robinjhuang @pythongosssss @ltdrdata @Kosinkadink @webfiltered @christian-byrne @guill
|
|
||||||
|
|||||||
@ -360,7 +360,7 @@ def calc_cond_uncond_batch(model, cond, uncond, x_in, timestep, model_options):
|
|||||||
def cfg_function(model, cond_pred, uncond_pred, cond_scale, x, timestep, model_options={}, cond=None, uncond=None):
|
def cfg_function(model, cond_pred, uncond_pred, cond_scale, x, timestep, model_options={}, cond=None, uncond=None):
|
||||||
if "sampler_cfg_function" in model_options:
|
if "sampler_cfg_function" in model_options:
|
||||||
args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep,
|
args = {"cond": x - cond_pred, "uncond": x - uncond_pred, "cond_scale": cond_scale, "timestep": timestep, "input": x, "sigma": timestep,
|
||||||
"cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options}
|
"cond_denoised": cond_pred, "uncond_denoised": uncond_pred, "model": model, "model_options": model_options, "input_cond": cond, "input_uncond": uncond}
|
||||||
cfg_result = x - model_options["sampler_cfg_function"](args)
|
cfg_result = x - model_options["sampler_cfg_function"](args)
|
||||||
else:
|
else:
|
||||||
cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale
|
cfg_result = uncond_pred + (cond_pred - uncond_pred) * cond_scale
|
||||||
|
|||||||
@ -63,7 +63,13 @@ class HunyuanImageTEModel(QwenImageTEModel):
|
|||||||
self.byt5_small = None
|
self.byt5_small = None
|
||||||
|
|
||||||
def encode_token_weights(self, token_weight_pairs):
|
def encode_token_weights(self, token_weight_pairs):
|
||||||
cond, p, extra = super().encode_token_weights(token_weight_pairs)
|
tok_pairs = token_weight_pairs["qwen25_7b"][0]
|
||||||
|
template_end = -1
|
||||||
|
if tok_pairs[0][0] == 27:
|
||||||
|
if len(tok_pairs) > 36: # refiner prompt uses a fixed 36 template_end
|
||||||
|
template_end = 36
|
||||||
|
|
||||||
|
cond, p, extra = super().encode_token_weights(token_weight_pairs, template_end=template_end)
|
||||||
if self.byt5_small is not None and "byt5" in token_weight_pairs:
|
if self.byt5_small is not None and "byt5" in token_weight_pairs:
|
||||||
out = self.byt5_small.encode_token_weights(token_weight_pairs["byt5"])
|
out = self.byt5_small.encode_token_weights(token_weight_pairs["byt5"])
|
||||||
extra["conditioning_byt5small"] = out[0]
|
extra["conditioning_byt5small"] = out[0]
|
||||||
|
|||||||
@ -18,6 +18,15 @@ class QwenImageTokenizer(sd1_clip.SD1Tokenizer):
|
|||||||
self.llama_template_images = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n"
|
self.llama_template_images = "<|im_start|>system\nDescribe the key features of the input image (color, shape, size, texture, objects, background), then explain how the user's text instruction should alter or modify the image. Generate a new image that meets the user's requirements while maintaining consistency with the original input where appropriate.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>{}<|im_end|>\n<|im_start|>assistant\n"
|
||||||
|
|
||||||
def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, images=[], **kwargs):
|
def tokenize_with_weights(self, text, return_word_ids=False, llama_template=None, images=[], **kwargs):
|
||||||
|
skip_template = False
|
||||||
|
if text.startswith('<|im_start|>'):
|
||||||
|
skip_template = True
|
||||||
|
if text.startswith('<|start_header_id|>'):
|
||||||
|
skip_template = True
|
||||||
|
|
||||||
|
if skip_template:
|
||||||
|
llama_text = text
|
||||||
|
else:
|
||||||
if llama_template is None:
|
if llama_template is None:
|
||||||
if len(images) > 0:
|
if len(images) > 0:
|
||||||
llama_text = self.llama_template_images.format(text)
|
llama_text = self.llama_template_images.format(text)
|
||||||
@ -47,10 +56,11 @@ class QwenImageTEModel(sd1_clip.SD1ClipModel):
|
|||||||
def __init__(self, device="cpu", dtype=None, model_options={}):
|
def __init__(self, device="cpu", dtype=None, model_options={}):
|
||||||
super().__init__(device=device, dtype=dtype, name="qwen25_7b", clip_model=Qwen25_7BVLIModel, model_options=model_options)
|
super().__init__(device=device, dtype=dtype, name="qwen25_7b", clip_model=Qwen25_7BVLIModel, model_options=model_options)
|
||||||
|
|
||||||
def encode_token_weights(self, token_weight_pairs):
|
def encode_token_weights(self, token_weight_pairs, template_end=-1):
|
||||||
out, pooled, extra = super().encode_token_weights(token_weight_pairs)
|
out, pooled, extra = super().encode_token_weights(token_weight_pairs)
|
||||||
tok_pairs = token_weight_pairs["qwen25_7b"][0]
|
tok_pairs = token_weight_pairs["qwen25_7b"][0]
|
||||||
count_im_start = 0
|
count_im_start = 0
|
||||||
|
if template_end == -1:
|
||||||
for i, v in enumerate(tok_pairs):
|
for i, v in enumerate(tok_pairs):
|
||||||
elem = v[0]
|
elem = v[0]
|
||||||
if not torch.is_tensor(elem):
|
if not torch.is_tensor(elem):
|
||||||
|
|||||||
@ -1,43 +1,52 @@
|
|||||||
from nodes import MAX_RESOLUTION
|
from typing_extensions import override
|
||||||
|
|
||||||
class CLIPTextEncodeSDXLRefiner:
|
import nodes
|
||||||
|
from comfy_api.latest import ComfyExtension, io
|
||||||
|
|
||||||
|
|
||||||
|
class CLIPTextEncodeSDXLRefiner(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def define_schema(cls):
|
||||||
return {"required": {
|
return io.Schema(
|
||||||
"ascore": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 1000.0, "step": 0.01}),
|
node_id="CLIPTextEncodeSDXLRefiner",
|
||||||
"width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
|
category="advanced/conditioning",
|
||||||
"height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
|
inputs=[
|
||||||
"text": ("STRING", {"multiline": True, "dynamicPrompts": True}), "clip": ("CLIP", ),
|
io.Float.Input("ascore", default=6.0, min=0.0, max=1000.0, step=0.01),
|
||||||
}}
|
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||||
RETURN_TYPES = ("CONDITIONING",)
|
io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||||
FUNCTION = "encode"
|
io.String.Input("text", multiline=True, dynamic_prompts=True),
|
||||||
|
io.Clip.Input("clip"),
|
||||||
|
],
|
||||||
|
outputs=[io.Conditioning.Output()],
|
||||||
|
)
|
||||||
|
|
||||||
CATEGORY = "advanced/conditioning"
|
@classmethod
|
||||||
|
def execute(cls, clip, ascore, width, height, text) -> io.NodeOutput:
|
||||||
def encode(self, clip, ascore, width, height, text):
|
|
||||||
tokens = clip.tokenize(text)
|
tokens = clip.tokenize(text)
|
||||||
return (clip.encode_from_tokens_scheduled(tokens, add_dict={"aesthetic_score": ascore, "width": width, "height": height}), )
|
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"aesthetic_score": ascore, "width": width, "height": height}))
|
||||||
|
|
||||||
class CLIPTextEncodeSDXL:
|
class CLIPTextEncodeSDXL(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def define_schema(cls):
|
||||||
return {"required": {
|
return io.Schema(
|
||||||
"clip": ("CLIP", ),
|
node_id="CLIPTextEncodeSDXL",
|
||||||
"width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
|
category="advanced/conditioning",
|
||||||
"height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
|
inputs=[
|
||||||
"crop_w": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
|
io.Clip.Input("clip"),
|
||||||
"crop_h": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION}),
|
io.Int.Input("width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||||
"target_width": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
|
io.Int.Input("height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||||
"target_height": ("INT", {"default": 1024.0, "min": 0, "max": MAX_RESOLUTION}),
|
io.Int.Input("crop_w", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||||
"text_g": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
io.Int.Input("crop_h", default=0, min=0, max=nodes.MAX_RESOLUTION),
|
||||||
"text_l": ("STRING", {"multiline": True, "dynamicPrompts": True}),
|
io.Int.Input("target_width", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||||
}}
|
io.Int.Input("target_height", default=1024, min=0, max=nodes.MAX_RESOLUTION),
|
||||||
RETURN_TYPES = ("CONDITIONING",)
|
io.String.Input("text_g", multiline=True, dynamic_prompts=True),
|
||||||
FUNCTION = "encode"
|
io.String.Input("text_l", multiline=True, dynamic_prompts=True),
|
||||||
|
],
|
||||||
|
outputs=[io.Conditioning.Output()],
|
||||||
|
)
|
||||||
|
|
||||||
CATEGORY = "advanced/conditioning"
|
@classmethod
|
||||||
|
def execute(cls, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l) -> io.NodeOutput:
|
||||||
def encode(self, clip, width, height, crop_w, crop_h, target_width, target_height, text_g, text_l):
|
|
||||||
tokens = clip.tokenize(text_g)
|
tokens = clip.tokenize(text_g)
|
||||||
tokens["l"] = clip.tokenize(text_l)["l"]
|
tokens["l"] = clip.tokenize(text_l)["l"]
|
||||||
if len(tokens["l"]) != len(tokens["g"]):
|
if len(tokens["l"]) != len(tokens["g"]):
|
||||||
@ -46,9 +55,17 @@ class CLIPTextEncodeSDXL:
|
|||||||
tokens["l"] += empty["l"]
|
tokens["l"] += empty["l"]
|
||||||
while len(tokens["l"]) > len(tokens["g"]):
|
while len(tokens["l"]) > len(tokens["g"]):
|
||||||
tokens["g"] += empty["g"]
|
tokens["g"] += empty["g"]
|
||||||
return (clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}), )
|
return io.NodeOutput(clip.encode_from_tokens_scheduled(tokens, add_dict={"width": width, "height": height, "crop_w": crop_w, "crop_h": crop_h, "target_width": target_width, "target_height": target_height}))
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
|
||||||
"CLIPTextEncodeSDXLRefiner": CLIPTextEncodeSDXLRefiner,
|
class ClipSdxlExtension(ComfyExtension):
|
||||||
"CLIPTextEncodeSDXL": CLIPTextEncodeSDXL,
|
@override
|
||||||
}
|
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||||
|
return [
|
||||||
|
CLIPTextEncodeSDXLRefiner,
|
||||||
|
CLIPTextEncodeSDXL,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
async def comfy_entrypoint() -> ClipSdxlExtension:
|
||||||
|
return ClipSdxlExtension()
|
||||||
|
|||||||
@ -1,6 +1,8 @@
|
|||||||
# Code based on https://github.com/WikiChao/FreSca (MIT License)
|
# Code based on https://github.com/WikiChao/FreSca (MIT License)
|
||||||
import torch
|
import torch
|
||||||
import torch.fft as fft
|
import torch.fft as fft
|
||||||
|
from typing_extensions import override
|
||||||
|
from comfy_api.latest import ComfyExtension, io
|
||||||
|
|
||||||
|
|
||||||
def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20):
|
def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20):
|
||||||
@ -51,25 +53,31 @@ def Fourier_filter(x, scale_low=1.0, scale_high=1.5, freq_cutoff=20):
|
|||||||
return x_filtered
|
return x_filtered
|
||||||
|
|
||||||
|
|
||||||
class FreSca:
|
class FreSca(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def define_schema(cls):
|
||||||
return {
|
return io.Schema(
|
||||||
"required": {
|
node_id="FreSca",
|
||||||
"model": ("MODEL",),
|
display_name="FreSca",
|
||||||
"scale_low": ("FLOAT", {"default": 1.0, "min": 0, "max": 10, "step": 0.01,
|
category="_for_testing",
|
||||||
"tooltip": "Scaling factor for low-frequency components"}),
|
description="Applies frequency-dependent scaling to the guidance",
|
||||||
"scale_high": ("FLOAT", {"default": 1.25, "min": 0, "max": 10, "step": 0.01,
|
inputs=[
|
||||||
"tooltip": "Scaling factor for high-frequency components"}),
|
io.Model.Input("model"),
|
||||||
"freq_cutoff": ("INT", {"default": 20, "min": 1, "max": 10000, "step": 1,
|
io.Float.Input("scale_low", default=1.0, min=0, max=10, step=0.01,
|
||||||
"tooltip": "Number of frequency indices around center to consider as low-frequency"}),
|
tooltip="Scaling factor for low-frequency components"),
|
||||||
}
|
io.Float.Input("scale_high", default=1.25, min=0, max=10, step=0.01,
|
||||||
}
|
tooltip="Scaling factor for high-frequency components"),
|
||||||
RETURN_TYPES = ("MODEL",)
|
io.Int.Input("freq_cutoff", default=20, min=1, max=10000, step=1,
|
||||||
FUNCTION = "patch"
|
tooltip="Number of frequency indices around center to consider as low-frequency"),
|
||||||
CATEGORY = "_for_testing"
|
],
|
||||||
DESCRIPTION = "Applies frequency-dependent scaling to the guidance"
|
outputs=[
|
||||||
def patch(self, model, scale_low, scale_high, freq_cutoff):
|
io.Model.Output(),
|
||||||
|
],
|
||||||
|
is_experimental=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def execute(cls, model, scale_low, scale_high, freq_cutoff):
|
||||||
def custom_cfg_function(args):
|
def custom_cfg_function(args):
|
||||||
conds_out = args["conds_out"]
|
conds_out = args["conds_out"]
|
||||||
if len(conds_out) <= 1 or None in args["conds"][:2]:
|
if len(conds_out) <= 1 or None in args["conds"][:2]:
|
||||||
@ -91,13 +99,16 @@ class FreSca:
|
|||||||
m = model.clone()
|
m = model.clone()
|
||||||
m.set_model_sampler_pre_cfg_function(custom_cfg_function)
|
m.set_model_sampler_pre_cfg_function(custom_cfg_function)
|
||||||
|
|
||||||
return (m,)
|
return io.NodeOutput(m)
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
class FreScaExtension(ComfyExtension):
|
||||||
"FreSca": FreSca,
|
@override
|
||||||
}
|
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||||
|
return [
|
||||||
|
FreSca,
|
||||||
|
]
|
||||||
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
||||||
"FreSca": "FreSca",
|
async def comfy_entrypoint() -> FreScaExtension:
|
||||||
}
|
return FreScaExtension()
|
||||||
|
|||||||
@ -12,35 +12,38 @@ from nodes import MAX_RESOLUTION
|
|||||||
def composite(destination, source, x, y, mask = None, multiplier = 8, resize_source = False):
|
def composite(destination, source, x, y, mask = None, multiplier = 8, resize_source = False):
|
||||||
source = source.to(destination.device)
|
source = source.to(destination.device)
|
||||||
if resize_source:
|
if resize_source:
|
||||||
source = torch.nn.functional.interpolate(source, size=(destination.shape[2], destination.shape[3]), mode="bilinear")
|
source = torch.nn.functional.interpolate(source, size=(destination.shape[-2], destination.shape[-1]), mode="bilinear")
|
||||||
|
|
||||||
source = comfy.utils.repeat_to_batch_size(source, destination.shape[0])
|
source = comfy.utils.repeat_to_batch_size(source, destination.shape[0])
|
||||||
|
|
||||||
x = max(-source.shape[3] * multiplier, min(x, destination.shape[3] * multiplier))
|
x = max(-source.shape[-1] * multiplier, min(x, destination.shape[-1] * multiplier))
|
||||||
y = max(-source.shape[2] * multiplier, min(y, destination.shape[2] * multiplier))
|
y = max(-source.shape[-2] * multiplier, min(y, destination.shape[-2] * multiplier))
|
||||||
|
|
||||||
left, top = (x // multiplier, y // multiplier)
|
left, top = (x // multiplier, y // multiplier)
|
||||||
right, bottom = (left + source.shape[3], top + source.shape[2],)
|
right, bottom = (left + source.shape[-1], top + source.shape[-2],)
|
||||||
|
|
||||||
if mask is None:
|
if mask is None:
|
||||||
mask = torch.ones_like(source)
|
mask = torch.ones_like(source)
|
||||||
else:
|
else:
|
||||||
mask = mask.to(destination.device, copy=True)
|
mask = mask.to(destination.device, copy=True)
|
||||||
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[2], source.shape[3]), mode="bilinear")
|
mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(source.shape[-2], source.shape[-1]), mode="bilinear")
|
||||||
mask = comfy.utils.repeat_to_batch_size(mask, source.shape[0])
|
mask = comfy.utils.repeat_to_batch_size(mask, source.shape[0])
|
||||||
|
|
||||||
# calculate the bounds of the source that will be overlapping the destination
|
# calculate the bounds of the source that will be overlapping the destination
|
||||||
# this prevents the source trying to overwrite latent pixels that are out of bounds
|
# this prevents the source trying to overwrite latent pixels that are out of bounds
|
||||||
# of the destination
|
# of the destination
|
||||||
visible_width, visible_height = (destination.shape[3] - left + min(0, x), destination.shape[2] - top + min(0, y),)
|
visible_width, visible_height = (destination.shape[-1] - left + min(0, x), destination.shape[-2] - top + min(0, y),)
|
||||||
|
|
||||||
mask = mask[:, :, :visible_height, :visible_width]
|
mask = mask[:, :, :visible_height, :visible_width]
|
||||||
|
if mask.ndim < source.ndim:
|
||||||
|
mask = mask.unsqueeze(1)
|
||||||
|
|
||||||
inverse_mask = torch.ones_like(mask) - mask
|
inverse_mask = torch.ones_like(mask) - mask
|
||||||
|
|
||||||
source_portion = mask * source[:, :, :visible_height, :visible_width]
|
source_portion = mask * source[..., :visible_height, :visible_width]
|
||||||
destination_portion = inverse_mask * destination[:, :, top:bottom, left:right]
|
destination_portion = inverse_mask * destination[..., top:bottom, left:right]
|
||||||
|
|
||||||
destination[:, :, top:bottom, left:right] = source_portion + destination_portion
|
destination[..., top:bottom, left:right] = source_portion + destination_portion
|
||||||
return destination
|
return destination
|
||||||
|
|
||||||
class LatentCompositeMasked:
|
class LatentCompositeMasked:
|
||||||
|
|||||||
@ -1,3 +1,4 @@
|
|||||||
|
from typing_extensions import override
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
@ -7,33 +8,27 @@ import math
|
|||||||
import comfy.utils
|
import comfy.utils
|
||||||
import comfy.model_management
|
import comfy.model_management
|
||||||
import node_helpers
|
import node_helpers
|
||||||
|
from comfy_api.latest import ComfyExtension, io
|
||||||
|
|
||||||
class Blend:
|
class Blend(io.ComfyNode):
|
||||||
def __init__(self):
|
@classmethod
|
||||||
pass
|
def define_schema(cls):
|
||||||
|
return io.Schema(
|
||||||
|
node_id="ImageBlend",
|
||||||
|
category="image/postprocessing",
|
||||||
|
inputs=[
|
||||||
|
io.Image.Input("image1"),
|
||||||
|
io.Image.Input("image2"),
|
||||||
|
io.Float.Input("blend_factor", default=0.5, min=0.0, max=1.0, step=0.01),
|
||||||
|
io.Combo.Input("blend_mode", options=["normal", "multiply", "screen", "overlay", "soft_light", "difference"]),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Image.Output(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def execute(cls, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str) -> io.NodeOutput:
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"image1": ("IMAGE",),
|
|
||||||
"image2": ("IMAGE",),
|
|
||||||
"blend_factor": ("FLOAT", {
|
|
||||||
"default": 0.5,
|
|
||||||
"min": 0.0,
|
|
||||||
"max": 1.0,
|
|
||||||
"step": 0.01
|
|
||||||
}),
|
|
||||||
"blend_mode": (["normal", "multiply", "screen", "overlay", "soft_light", "difference"],),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE",)
|
|
||||||
FUNCTION = "blend_images"
|
|
||||||
|
|
||||||
CATEGORY = "image/postprocessing"
|
|
||||||
|
|
||||||
def blend_images(self, image1: torch.Tensor, image2: torch.Tensor, blend_factor: float, blend_mode: str):
|
|
||||||
image1, image2 = node_helpers.image_alpha_fix(image1, image2)
|
image1, image2 = node_helpers.image_alpha_fix(image1, image2)
|
||||||
image2 = image2.to(image1.device)
|
image2 = image2.to(image1.device)
|
||||||
if image1.shape != image2.shape:
|
if image1.shape != image2.shape:
|
||||||
@ -41,12 +36,13 @@ class Blend:
|
|||||||
image2 = comfy.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center')
|
image2 = comfy.utils.common_upscale(image2, image1.shape[2], image1.shape[1], upscale_method='bicubic', crop='center')
|
||||||
image2 = image2.permute(0, 2, 3, 1)
|
image2 = image2.permute(0, 2, 3, 1)
|
||||||
|
|
||||||
blended_image = self.blend_mode(image1, image2, blend_mode)
|
blended_image = cls.blend_mode(image1, image2, blend_mode)
|
||||||
blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor
|
blended_image = image1 * (1 - blend_factor) + blended_image * blend_factor
|
||||||
blended_image = torch.clamp(blended_image, 0, 1)
|
blended_image = torch.clamp(blended_image, 0, 1)
|
||||||
return (blended_image,)
|
return io.NodeOutput(blended_image)
|
||||||
|
|
||||||
def blend_mode(self, img1, img2, mode):
|
@classmethod
|
||||||
|
def blend_mode(cls, img1, img2, mode):
|
||||||
if mode == "normal":
|
if mode == "normal":
|
||||||
return img2
|
return img2
|
||||||
elif mode == "multiply":
|
elif mode == "multiply":
|
||||||
@ -56,13 +52,13 @@ class Blend:
|
|||||||
elif mode == "overlay":
|
elif mode == "overlay":
|
||||||
return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2))
|
return torch.where(img1 <= 0.5, 2 * img1 * img2, 1 - 2 * (1 - img1) * (1 - img2))
|
||||||
elif mode == "soft_light":
|
elif mode == "soft_light":
|
||||||
return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (self.g(img1) - img1))
|
return torch.where(img2 <= 0.5, img1 - (1 - 2 * img2) * img1 * (1 - img1), img1 + (2 * img2 - 1) * (cls.g(img1) - img1))
|
||||||
elif mode == "difference":
|
elif mode == "difference":
|
||||||
return img1 - img2
|
return img1 - img2
|
||||||
else:
|
|
||||||
raise ValueError(f"Unsupported blend mode: {mode}")
|
raise ValueError(f"Unsupported blend mode: {mode}")
|
||||||
|
|
||||||
def g(self, x):
|
@classmethod
|
||||||
|
def g(cls, x):
|
||||||
return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x))
|
return torch.where(x <= 0.25, ((16 * x - 12) * x + 4) * x, torch.sqrt(x))
|
||||||
|
|
||||||
def gaussian_kernel(kernel_size: int, sigma: float, device=None):
|
def gaussian_kernel(kernel_size: int, sigma: float, device=None):
|
||||||
@ -71,38 +67,26 @@ def gaussian_kernel(kernel_size: int, sigma: float, device=None):
|
|||||||
g = torch.exp(-(d * d) / (2.0 * sigma * sigma))
|
g = torch.exp(-(d * d) / (2.0 * sigma * sigma))
|
||||||
return g / g.sum()
|
return g / g.sum()
|
||||||
|
|
||||||
class Blur:
|
class Blur(io.ComfyNode):
|
||||||
def __init__(self):
|
@classmethod
|
||||||
pass
|
def define_schema(cls):
|
||||||
|
return io.Schema(
|
||||||
|
node_id="ImageBlur",
|
||||||
|
category="image/postprocessing",
|
||||||
|
inputs=[
|
||||||
|
io.Image.Input("image"),
|
||||||
|
io.Int.Input("blur_radius", default=1, min=1, max=31, step=1),
|
||||||
|
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.1),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Image.Output(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def execute(cls, image: torch.Tensor, blur_radius: int, sigma: float) -> io.NodeOutput:
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"image": ("IMAGE",),
|
|
||||||
"blur_radius": ("INT", {
|
|
||||||
"default": 1,
|
|
||||||
"min": 1,
|
|
||||||
"max": 31,
|
|
||||||
"step": 1
|
|
||||||
}),
|
|
||||||
"sigma": ("FLOAT", {
|
|
||||||
"default": 1.0,
|
|
||||||
"min": 0.1,
|
|
||||||
"max": 10.0,
|
|
||||||
"step": 0.1
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE",)
|
|
||||||
FUNCTION = "blur"
|
|
||||||
|
|
||||||
CATEGORY = "image/postprocessing"
|
|
||||||
|
|
||||||
def blur(self, image: torch.Tensor, blur_radius: int, sigma: float):
|
|
||||||
if blur_radius == 0:
|
if blur_radius == 0:
|
||||||
return (image,)
|
return io.NodeOutput(image)
|
||||||
|
|
||||||
image = image.to(comfy.model_management.get_torch_device())
|
image = image.to(comfy.model_management.get_torch_device())
|
||||||
batch_size, height, width, channels = image.shape
|
batch_size, height, width, channels = image.shape
|
||||||
@ -115,31 +99,24 @@ class Blur:
|
|||||||
blurred = F.conv2d(padded_image, kernel, padding=kernel_size // 2, groups=channels)[:,:,blur_radius:-blur_radius, blur_radius:-blur_radius]
|
blurred = F.conv2d(padded_image, kernel, padding=kernel_size // 2, groups=channels)[:,:,blur_radius:-blur_radius, blur_radius:-blur_radius]
|
||||||
blurred = blurred.permute(0, 2, 3, 1)
|
blurred = blurred.permute(0, 2, 3, 1)
|
||||||
|
|
||||||
return (blurred.to(comfy.model_management.intermediate_device()),)
|
return io.NodeOutput(blurred.to(comfy.model_management.intermediate_device()))
|
||||||
|
|
||||||
class Quantize:
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
class Quantize(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def define_schema(cls):
|
||||||
return {
|
return io.Schema(
|
||||||
"required": {
|
node_id="ImageQuantize",
|
||||||
"image": ("IMAGE",),
|
category="image/postprocessing",
|
||||||
"colors": ("INT", {
|
inputs=[
|
||||||
"default": 256,
|
io.Image.Input("image"),
|
||||||
"min": 1,
|
io.Int.Input("colors", default=256, min=1, max=256, step=1),
|
||||||
"max": 256,
|
io.Combo.Input("dither", options=["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"]),
|
||||||
"step": 1
|
],
|
||||||
}),
|
outputs=[
|
||||||
"dither": (["none", "floyd-steinberg", "bayer-2", "bayer-4", "bayer-8", "bayer-16"],),
|
io.Image.Output(),
|
||||||
},
|
],
|
||||||
}
|
)
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE",)
|
|
||||||
FUNCTION = "quantize"
|
|
||||||
|
|
||||||
CATEGORY = "image/postprocessing"
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def bayer(im, pal_im, order):
|
def bayer(im, pal_im, order):
|
||||||
@ -167,7 +144,8 @@ class Quantize:
|
|||||||
im = im.quantize(palette=pal_im, dither=Image.Dither.NONE)
|
im = im.quantize(palette=pal_im, dither=Image.Dither.NONE)
|
||||||
return im
|
return im
|
||||||
|
|
||||||
def quantize(self, image: torch.Tensor, colors: int, dither: str):
|
@classmethod
|
||||||
|
def execute(cls, image: torch.Tensor, colors: int, dither: str) -> io.NodeOutput:
|
||||||
batch_size, height, width, _ = image.shape
|
batch_size, height, width, _ = image.shape
|
||||||
result = torch.zeros_like(image)
|
result = torch.zeros_like(image)
|
||||||
|
|
||||||
@ -187,46 +165,29 @@ class Quantize:
|
|||||||
quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255
|
quantized_array = torch.tensor(np.array(quantized_image.convert("RGB"))).float() / 255
|
||||||
result[b] = quantized_array
|
result[b] = quantized_array
|
||||||
|
|
||||||
return (result,)
|
return io.NodeOutput(result)
|
||||||
|
|
||||||
class Sharpen:
|
class Sharpen(io.ComfyNode):
|
||||||
def __init__(self):
|
@classmethod
|
||||||
pass
|
def define_schema(cls):
|
||||||
|
return io.Schema(
|
||||||
|
node_id="ImageSharpen",
|
||||||
|
category="image/postprocessing",
|
||||||
|
inputs=[
|
||||||
|
io.Image.Input("image"),
|
||||||
|
io.Int.Input("sharpen_radius", default=1, min=1, max=31, step=1),
|
||||||
|
io.Float.Input("sigma", default=1.0, min=0.1, max=10.0, step=0.01),
|
||||||
|
io.Float.Input("alpha", default=1.0, min=0.0, max=5.0, step=0.01),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Image.Output(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def execute(cls, image: torch.Tensor, sharpen_radius: int, sigma:float, alpha: float) -> io.NodeOutput:
|
||||||
return {
|
|
||||||
"required": {
|
|
||||||
"image": ("IMAGE",),
|
|
||||||
"sharpen_radius": ("INT", {
|
|
||||||
"default": 1,
|
|
||||||
"min": 1,
|
|
||||||
"max": 31,
|
|
||||||
"step": 1
|
|
||||||
}),
|
|
||||||
"sigma": ("FLOAT", {
|
|
||||||
"default": 1.0,
|
|
||||||
"min": 0.1,
|
|
||||||
"max": 10.0,
|
|
||||||
"step": 0.01
|
|
||||||
}),
|
|
||||||
"alpha": ("FLOAT", {
|
|
||||||
"default": 1.0,
|
|
||||||
"min": 0.0,
|
|
||||||
"max": 5.0,
|
|
||||||
"step": 0.01
|
|
||||||
}),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
RETURN_TYPES = ("IMAGE",)
|
|
||||||
FUNCTION = "sharpen"
|
|
||||||
|
|
||||||
CATEGORY = "image/postprocessing"
|
|
||||||
|
|
||||||
def sharpen(self, image: torch.Tensor, sharpen_radius: int, sigma:float, alpha: float):
|
|
||||||
if sharpen_radius == 0:
|
if sharpen_radius == 0:
|
||||||
return (image,)
|
return io.NodeOutput(image)
|
||||||
|
|
||||||
batch_size, height, width, channels = image.shape
|
batch_size, height, width, channels = image.shape
|
||||||
image = image.to(comfy.model_management.get_torch_device())
|
image = image.to(comfy.model_management.get_torch_device())
|
||||||
@ -245,23 +206,29 @@ class Sharpen:
|
|||||||
|
|
||||||
result = torch.clamp(sharpened, 0, 1)
|
result = torch.clamp(sharpened, 0, 1)
|
||||||
|
|
||||||
return (result.to(comfy.model_management.intermediate_device()),)
|
return io.NodeOutput(result.to(comfy.model_management.intermediate_device()))
|
||||||
|
|
||||||
class ImageScaleToTotalPixels:
|
class ImageScaleToTotalPixels(io.ComfyNode):
|
||||||
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
|
upscale_methods = ["nearest-exact", "bilinear", "area", "bicubic", "lanczos"]
|
||||||
crop_methods = ["disabled", "center"]
|
crop_methods = ["disabled", "center"]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def define_schema(cls):
|
||||||
return {"required": { "image": ("IMAGE",), "upscale_method": (s.upscale_methods,),
|
return io.Schema(
|
||||||
"megapixels": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 16.0, "step": 0.01}),
|
node_id="ImageScaleToTotalPixels",
|
||||||
}}
|
category="image/upscaling",
|
||||||
RETURN_TYPES = ("IMAGE",)
|
inputs=[
|
||||||
FUNCTION = "upscale"
|
io.Image.Input("image"),
|
||||||
|
io.Combo.Input("upscale_method", options=cls.upscale_methods),
|
||||||
|
io.Float.Input("megapixels", default=1.0, min=0.01, max=16.0, step=0.01),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Image.Output(),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
CATEGORY = "image/upscaling"
|
@classmethod
|
||||||
|
def execute(cls, image, upscale_method, megapixels) -> io.NodeOutput:
|
||||||
def upscale(self, image, upscale_method, megapixels):
|
|
||||||
samples = image.movedim(-1,1)
|
samples = image.movedim(-1,1)
|
||||||
total = int(megapixels * 1024 * 1024)
|
total = int(megapixels * 1024 * 1024)
|
||||||
|
|
||||||
@ -271,12 +238,18 @@ class ImageScaleToTotalPixels:
|
|||||||
|
|
||||||
s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
|
s = comfy.utils.common_upscale(samples, width, height, upscale_method, "disabled")
|
||||||
s = s.movedim(1,-1)
|
s = s.movedim(1,-1)
|
||||||
return (s,)
|
return io.NodeOutput(s)
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
class PostProcessingExtension(ComfyExtension):
|
||||||
"ImageBlend": Blend,
|
@override
|
||||||
"ImageBlur": Blur,
|
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||||
"ImageQuantize": Quantize,
|
return [
|
||||||
"ImageSharpen": Sharpen,
|
Blend,
|
||||||
"ImageScaleToTotalPixels": ImageScaleToTotalPixels,
|
Blur,
|
||||||
}
|
Quantize,
|
||||||
|
Sharpen,
|
||||||
|
ImageScaleToTotalPixels,
|
||||||
|
]
|
||||||
|
|
||||||
|
async def comfy_entrypoint() -> PostProcessingExtension:
|
||||||
|
return PostProcessingExtension()
|
||||||
|
|||||||
@ -1,18 +1,25 @@
|
|||||||
|
from typing_extensions import override
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
class LatentRebatch:
|
from comfy_api.latest import ComfyExtension, io
|
||||||
|
|
||||||
|
|
||||||
|
class LatentRebatch(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def define_schema(cls):
|
||||||
return {"required": { "latents": ("LATENT",),
|
return io.Schema(
|
||||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
|
node_id="RebatchLatents",
|
||||||
}}
|
display_name="Rebatch Latents",
|
||||||
RETURN_TYPES = ("LATENT",)
|
category="latent/batch",
|
||||||
INPUT_IS_LIST = True
|
is_input_list=True,
|
||||||
OUTPUT_IS_LIST = (True, )
|
inputs=[
|
||||||
|
io.Latent.Input("latents"),
|
||||||
FUNCTION = "rebatch"
|
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||||
|
],
|
||||||
CATEGORY = "latent/batch"
|
outputs=[
|
||||||
|
io.Latent.Output(is_output_list=True),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_batch(latents, list_ind, offset):
|
def get_batch(latents, list_ind, offset):
|
||||||
@ -53,7 +60,8 @@ class LatentRebatch:
|
|||||||
result = [torch.cat((b1, b2)) if torch.is_tensor(b1) else b1 + b2 for b1, b2 in zip(batch1, batch2)]
|
result = [torch.cat((b1, b2)) if torch.is_tensor(b1) else b1 + b2 for b1, b2 in zip(batch1, batch2)]
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def rebatch(self, latents, batch_size):
|
@classmethod
|
||||||
|
def execute(cls, latents, batch_size):
|
||||||
batch_size = batch_size[0]
|
batch_size = batch_size[0]
|
||||||
|
|
||||||
output_list = []
|
output_list = []
|
||||||
@ -63,24 +71,24 @@ class LatentRebatch:
|
|||||||
for i in range(len(latents)):
|
for i in range(len(latents)):
|
||||||
# fetch new entry of list
|
# fetch new entry of list
|
||||||
#samples, masks, indices = self.get_batch(latents, i)
|
#samples, masks, indices = self.get_batch(latents, i)
|
||||||
next_batch = self.get_batch(latents, i, processed)
|
next_batch = cls.get_batch(latents, i, processed)
|
||||||
processed += len(next_batch[2])
|
processed += len(next_batch[2])
|
||||||
# set to current if current is None
|
# set to current if current is None
|
||||||
if current_batch[0] is None:
|
if current_batch[0] is None:
|
||||||
current_batch = next_batch
|
current_batch = next_batch
|
||||||
# add previous to list if dimensions do not match
|
# add previous to list if dimensions do not match
|
||||||
elif next_batch[0].shape[-1] != current_batch[0].shape[-1] or next_batch[0].shape[-2] != current_batch[0].shape[-2]:
|
elif next_batch[0].shape[-1] != current_batch[0].shape[-1] or next_batch[0].shape[-2] != current_batch[0].shape[-2]:
|
||||||
sliced, _ = self.slice_batch(current_batch, 1, batch_size)
|
sliced, _ = cls.slice_batch(current_batch, 1, batch_size)
|
||||||
output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]})
|
output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]})
|
||||||
current_batch = next_batch
|
current_batch = next_batch
|
||||||
# cat if everything checks out
|
# cat if everything checks out
|
||||||
else:
|
else:
|
||||||
current_batch = self.cat_batch(current_batch, next_batch)
|
current_batch = cls.cat_batch(current_batch, next_batch)
|
||||||
|
|
||||||
# add to list if dimensions gone above target batch size
|
# add to list if dimensions gone above target batch size
|
||||||
if current_batch[0].shape[0] > batch_size:
|
if current_batch[0].shape[0] > batch_size:
|
||||||
num = current_batch[0].shape[0] // batch_size
|
num = current_batch[0].shape[0] // batch_size
|
||||||
sliced, remainder = self.slice_batch(current_batch, num, batch_size)
|
sliced, remainder = cls.slice_batch(current_batch, num, batch_size)
|
||||||
|
|
||||||
for i in range(num):
|
for i in range(num):
|
||||||
output_list.append({'samples': sliced[0][i], 'noise_mask': sliced[1][i], 'batch_index': sliced[2][i]})
|
output_list.append({'samples': sliced[0][i], 'noise_mask': sliced[1][i], 'batch_index': sliced[2][i]})
|
||||||
@ -89,7 +97,7 @@ class LatentRebatch:
|
|||||||
|
|
||||||
#add remainder
|
#add remainder
|
||||||
if current_batch[0] is not None:
|
if current_batch[0] is not None:
|
||||||
sliced, _ = self.slice_batch(current_batch, 1, batch_size)
|
sliced, _ = cls.slice_batch(current_batch, 1, batch_size)
|
||||||
output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]})
|
output_list.append({'samples': sliced[0][0], 'noise_mask': sliced[1][0], 'batch_index': sliced[2][0]})
|
||||||
|
|
||||||
#get rid of empty masks
|
#get rid of empty masks
|
||||||
@ -97,23 +105,27 @@ class LatentRebatch:
|
|||||||
if s['noise_mask'].mean() == 1.0:
|
if s['noise_mask'].mean() == 1.0:
|
||||||
del s['noise_mask']
|
del s['noise_mask']
|
||||||
|
|
||||||
return (output_list,)
|
return io.NodeOutput(output_list)
|
||||||
|
|
||||||
class ImageRebatch:
|
class ImageRebatch(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def define_schema(cls):
|
||||||
return {"required": { "images": ("IMAGE",),
|
return io.Schema(
|
||||||
"batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}),
|
node_id="RebatchImages",
|
||||||
}}
|
display_name="Rebatch Images",
|
||||||
RETURN_TYPES = ("IMAGE",)
|
category="image/batch",
|
||||||
INPUT_IS_LIST = True
|
is_input_list=True,
|
||||||
OUTPUT_IS_LIST = (True, )
|
inputs=[
|
||||||
|
io.Image.Input("images"),
|
||||||
|
io.Int.Input("batch_size", default=1, min=1, max=4096),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Image.Output(is_output_list=True),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
FUNCTION = "rebatch"
|
@classmethod
|
||||||
|
def execute(cls, images, batch_size):
|
||||||
CATEGORY = "image/batch"
|
|
||||||
|
|
||||||
def rebatch(self, images, batch_size):
|
|
||||||
batch_size = batch_size[0]
|
batch_size = batch_size[0]
|
||||||
|
|
||||||
output_list = []
|
output_list = []
|
||||||
@ -125,14 +137,17 @@ class ImageRebatch:
|
|||||||
for i in range(0, len(all_images), batch_size):
|
for i in range(0, len(all_images), batch_size):
|
||||||
output_list.append(torch.cat(all_images[i:i+batch_size], dim=0))
|
output_list.append(torch.cat(all_images[i:i+batch_size], dim=0))
|
||||||
|
|
||||||
return (output_list,)
|
return io.NodeOutput(output_list)
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
|
||||||
"RebatchLatents": LatentRebatch,
|
|
||||||
"RebatchImages": ImageRebatch,
|
|
||||||
}
|
|
||||||
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
class RebatchExtension(ComfyExtension):
|
||||||
"RebatchLatents": "Rebatch Latents",
|
@override
|
||||||
"RebatchImages": "Rebatch Images",
|
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||||
}
|
return [
|
||||||
|
LatentRebatch,
|
||||||
|
ImageRebatch,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
async def comfy_entrypoint() -> RebatchExtension:
|
||||||
|
return RebatchExtension()
|
||||||
|
|||||||
@ -2,10 +2,13 @@ import torch
|
|||||||
from torch import einsum
|
from torch import einsum
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
import math
|
import math
|
||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
from einops import rearrange, repeat
|
from einops import rearrange, repeat
|
||||||
from comfy.ldm.modules.attention import optimized_attention
|
from comfy.ldm.modules.attention import optimized_attention
|
||||||
import comfy.samplers
|
import comfy.samplers
|
||||||
|
from comfy_api.latest import ComfyExtension, io
|
||||||
|
|
||||||
|
|
||||||
# from comfy/ldm/modules/attention.py
|
# from comfy/ldm/modules/attention.py
|
||||||
# but modified to return attention scores as well as output
|
# but modified to return attention scores as well as output
|
||||||
@ -104,19 +107,26 @@ def gaussian_blur_2d(img, kernel_size, sigma):
|
|||||||
img = F.conv2d(img, kernel2d, groups=img.shape[-3])
|
img = F.conv2d(img, kernel2d, groups=img.shape[-3])
|
||||||
return img
|
return img
|
||||||
|
|
||||||
class SelfAttentionGuidance:
|
class SelfAttentionGuidance(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def define_schema(cls):
|
||||||
return {"required": { "model": ("MODEL",),
|
return io.Schema(
|
||||||
"scale": ("FLOAT", {"default": 0.5, "min": -2.0, "max": 5.0, "step": 0.01}),
|
node_id="SelfAttentionGuidance",
|
||||||
"blur_sigma": ("FLOAT", {"default": 2.0, "min": 0.0, "max": 10.0, "step": 0.1}),
|
display_name="Self-Attention Guidance",
|
||||||
}}
|
category="_for_testing",
|
||||||
RETURN_TYPES = ("MODEL",)
|
inputs=[
|
||||||
FUNCTION = "patch"
|
io.Model.Input("model"),
|
||||||
|
io.Float.Input("scale", default=0.5, min=-2.0, max=5.0, step=0.01),
|
||||||
|
io.Float.Input("blur_sigma", default=2.0, min=0.0, max=10.0, step=0.1),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Model.Output(),
|
||||||
|
],
|
||||||
|
is_experimental=True,
|
||||||
|
)
|
||||||
|
|
||||||
CATEGORY = "_for_testing"
|
@classmethod
|
||||||
|
def execute(cls, model, scale, blur_sigma):
|
||||||
def patch(self, model, scale, blur_sigma):
|
|
||||||
m = model.clone()
|
m = model.clone()
|
||||||
|
|
||||||
attn_scores = None
|
attn_scores = None
|
||||||
@ -170,12 +180,16 @@ class SelfAttentionGuidance:
|
|||||||
# unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch
|
# unet.mid_block.attentions[0].transformer_blocks[0].attn1.patch
|
||||||
m.set_model_attn1_replace(attn_and_record, "middle", 0, 0)
|
m.set_model_attn1_replace(attn_and_record, "middle", 0, 0)
|
||||||
|
|
||||||
return (m, )
|
return io.NodeOutput(m)
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
|
||||||
"SelfAttentionGuidance": SelfAttentionGuidance,
|
|
||||||
}
|
|
||||||
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
class SagExtension(ComfyExtension):
|
||||||
"SelfAttentionGuidance": "Self-Attention Guidance",
|
@override
|
||||||
}
|
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||||
|
return [
|
||||||
|
SelfAttentionGuidance,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
async def comfy_entrypoint() -> SagExtension:
|
||||||
|
return SagExtension()
|
||||||
|
|||||||
@ -1,23 +1,31 @@
|
|||||||
|
from typing_extensions import override
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
import comfy.utils
|
import comfy.utils
|
||||||
|
from comfy_api.latest import ComfyExtension, io
|
||||||
|
|
||||||
class SD_4XUpscale_Conditioning:
|
class SD_4XUpscale_Conditioning(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(s):
|
def define_schema(cls):
|
||||||
return {"required": { "images": ("IMAGE",),
|
return io.Schema(
|
||||||
"positive": ("CONDITIONING",),
|
node_id="SD_4XUpscale_Conditioning",
|
||||||
"negative": ("CONDITIONING",),
|
category="conditioning/upscale_diffusion",
|
||||||
"scale_ratio": ("FLOAT", {"default": 4.0, "min": 0.0, "max": 10.0, "step": 0.01}),
|
inputs=[
|
||||||
"noise_augmentation": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
|
io.Image.Input("images"),
|
||||||
}}
|
io.Conditioning.Input("positive"),
|
||||||
RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT")
|
io.Conditioning.Input("negative"),
|
||||||
RETURN_NAMES = ("positive", "negative", "latent")
|
io.Float.Input("scale_ratio", default=4.0, min=0.0, max=10.0, step=0.01),
|
||||||
|
io.Float.Input("noise_augmentation", default=0.0, min=0.0, max=1.0, step=0.001),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Conditioning.Output(display_name="positive"),
|
||||||
|
io.Conditioning.Output(display_name="negative"),
|
||||||
|
io.Latent.Output(display_name="latent"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
FUNCTION = "encode"
|
@classmethod
|
||||||
|
def execute(cls, images, positive, negative, scale_ratio, noise_augmentation):
|
||||||
CATEGORY = "conditioning/upscale_diffusion"
|
|
||||||
|
|
||||||
def encode(self, images, positive, negative, scale_ratio, noise_augmentation):
|
|
||||||
width = max(1, round(images.shape[-2] * scale_ratio))
|
width = max(1, round(images.shape[-2] * scale_ratio))
|
||||||
height = max(1, round(images.shape[-3] * scale_ratio))
|
height = max(1, round(images.shape[-3] * scale_ratio))
|
||||||
|
|
||||||
@ -39,8 +47,16 @@ class SD_4XUpscale_Conditioning:
|
|||||||
out_cn.append(n)
|
out_cn.append(n)
|
||||||
|
|
||||||
latent = torch.zeros([images.shape[0], 4, height // 4, width // 4])
|
latent = torch.zeros([images.shape[0], 4, height // 4, width // 4])
|
||||||
return (out_cp, out_cn, {"samples":latent})
|
return io.NodeOutput(out_cp, out_cn, {"samples":latent})
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
|
||||||
"SD_4XUpscale_Conditioning": SD_4XUpscale_Conditioning,
|
class SdUpscaleExtension(ComfyExtension):
|
||||||
}
|
@override
|
||||||
|
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||||
|
return [
|
||||||
|
SD_4XUpscale_Conditioning,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
async def comfy_entrypoint() -> SdUpscaleExtension:
|
||||||
|
return SdUpscaleExtension()
|
||||||
|
|||||||
@ -1,8 +1,9 @@
|
|||||||
# TCFG: Tangential Damping Classifier-free Guidance - (arXiv: https://arxiv.org/abs/2503.18137)
|
# TCFG: Tangential Damping Classifier-free Guidance - (arXiv: https://arxiv.org/abs/2503.18137)
|
||||||
|
|
||||||
|
from typing_extensions import override
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from comfy.comfy_types import IO, ComfyNodeABC, InputTypeDict
|
from comfy_api.latest import ComfyExtension, io
|
||||||
|
|
||||||
|
|
||||||
def score_tangential_damping(cond_score: torch.Tensor, uncond_score: torch.Tensor) -> torch.Tensor:
|
def score_tangential_damping(cond_score: torch.Tensor, uncond_score: torch.Tensor) -> torch.Tensor:
|
||||||
@ -26,23 +27,24 @@ def score_tangential_damping(cond_score: torch.Tensor, uncond_score: torch.Tenso
|
|||||||
return uncond_score_td.reshape_as(uncond_score).to(uncond_score.dtype)
|
return uncond_score_td.reshape_as(uncond_score).to(uncond_score.dtype)
|
||||||
|
|
||||||
|
|
||||||
class TCFG(ComfyNodeABC):
|
class TCFG(io.ComfyNode):
|
||||||
@classmethod
|
@classmethod
|
||||||
def INPUT_TYPES(cls) -> InputTypeDict:
|
def define_schema(cls):
|
||||||
return {
|
return io.Schema(
|
||||||
"required": {
|
node_id="TCFG",
|
||||||
"model": (IO.MODEL, {}),
|
display_name="Tangential Damping CFG",
|
||||||
}
|
category="advanced/guidance",
|
||||||
}
|
description="TCFG – Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality.",
|
||||||
|
inputs=[
|
||||||
|
io.Model.Input("model"),
|
||||||
|
],
|
||||||
|
outputs=[
|
||||||
|
io.Model.Output(display_name="patched_model"),
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
RETURN_TYPES = (IO.MODEL,)
|
@classmethod
|
||||||
RETURN_NAMES = ("patched_model",)
|
def execute(cls, model):
|
||||||
FUNCTION = "patch"
|
|
||||||
|
|
||||||
CATEGORY = "advanced/guidance"
|
|
||||||
DESCRIPTION = "TCFG – Tangential Damping CFG (2503.18137)\n\nRefine the uncond (negative) to align with the cond (positive) for improving quality."
|
|
||||||
|
|
||||||
def patch(self, model):
|
|
||||||
m = model.clone()
|
m = model.clone()
|
||||||
|
|
||||||
def tangential_damping_cfg(args):
|
def tangential_damping_cfg(args):
|
||||||
@ -59,13 +61,16 @@ class TCFG(ComfyNodeABC):
|
|||||||
return [cond_pred, uncond_pred_td] + conds_out[2:]
|
return [cond_pred, uncond_pred_td] + conds_out[2:]
|
||||||
|
|
||||||
m.set_model_sampler_pre_cfg_function(tangential_damping_cfg)
|
m.set_model_sampler_pre_cfg_function(tangential_damping_cfg)
|
||||||
return (m,)
|
return io.NodeOutput(m)
|
||||||
|
|
||||||
|
|
||||||
NODE_CLASS_MAPPINGS = {
|
class TcfgExtension(ComfyExtension):
|
||||||
"TCFG": TCFG,
|
@override
|
||||||
}
|
async def get_node_list(self) -> list[type[io.ComfyNode]]:
|
||||||
|
return [
|
||||||
|
TCFG,
|
||||||
|
]
|
||||||
|
|
||||||
NODE_DISPLAY_NAME_MAPPINGS = {
|
|
||||||
"TCFG": "Tangential Damping CFG",
|
async def comfy_entrypoint() -> TcfgExtension:
|
||||||
}
|
return TcfgExtension()
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user