mirror of
https://git.datalinker.icu/kijai/ComfyUI-KJNodes.git
synced 2025-12-09 12:54:40 +08:00
Add LeapfusionHunyuanI2VPatcher
This commit is contained in:
parent
188793f2ba
commit
6c4aa273c8
@ -47,6 +47,7 @@ NODE_CONFIG = {
|
||||
"CrossFadeImagesMulti": {"class": CrossFadeImagesMulti, "name": "Cross Fade Images Multi"},
|
||||
"GetImagesFromBatchIndexed": {"class": GetImagesFromBatchIndexed, "name": "Get Images From Batch Indexed"},
|
||||
"GetImageRangeFromBatch": {"class": GetImageRangeFromBatch, "name": "Get Image or Mask Range From Batch"},
|
||||
"GetLatentRangeFromBatch": {"class": GetLatentRangeFromBatch, "name": "Get Latent Range From Batch"},
|
||||
"GetImageSizeAndCount": {"class": GetImageSizeAndCount, "name": "Get Image Size & Count"},
|
||||
"FastPreview": {"class": FastPreview, "name": "Fast Preview"},
|
||||
"ImageAndMaskPreview": {"class": ImageAndMaskPreview},
|
||||
@ -165,6 +166,7 @@ NODE_CONFIG = {
|
||||
"TorchCompileLTXModel": {"class": TorchCompileLTXModel, "name": "TorchCompileLTXModel"},
|
||||
"TorchCompileCosmosModel": {"class": TorchCompileCosmosModel, "name": "TorchCompileCosmosModel"},
|
||||
"PathchSageAttentionKJ": {"class": PathchSageAttentionKJ, "name": "Pathch Sage Attention KJ"},
|
||||
"LeapfusionHunyuanI2VPatcher": {"class": LeapfusionHunyuanI2V, "name": "Leapfusion Hunyuan I2V Patcher"},
|
||||
|
||||
#instance diffusion
|
||||
"CreateInstanceDiffusionTracking": {"class": CreateInstanceDiffusionTracking},
|
||||
|
||||
@ -1568,7 +1568,7 @@ class GetImageRangeFromBatch:
|
||||
FUNCTION = "imagesfrombatch"
|
||||
CATEGORY = "KJNodes/image"
|
||||
DESCRIPTION = """
|
||||
Randomizes image order within a batch.
|
||||
Returns a range of images from a batch.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@ -1594,9 +1594,7 @@ Randomizes image order within a batch.
|
||||
start_index = max(0, len(images) - num_frames)
|
||||
if start_index < 0 or start_index >= len(images):
|
||||
raise ValueError("Start index is out of range")
|
||||
end_index = start_index + num_frames
|
||||
if end_index > len(images):
|
||||
raise ValueError("End index is out of range")
|
||||
end_index = min(start_index + num_frames, len(images))
|
||||
chosen_images = images[start_index:end_index]
|
||||
|
||||
# Process masks if provided
|
||||
@ -1605,12 +1603,53 @@ Randomizes image order within a batch.
|
||||
start_index = max(0, len(masks) - num_frames)
|
||||
if start_index < 0 or start_index >= len(masks):
|
||||
raise ValueError("Start index is out of range for masks")
|
||||
end_index = start_index + num_frames
|
||||
if end_index > len(masks):
|
||||
raise ValueError("End index is out of range for masks")
|
||||
end_index = min(start_index + num_frames, len(masks))
|
||||
chosen_masks = masks[start_index:end_index]
|
||||
|
||||
return (chosen_images, chosen_masks,)
|
||||
|
||||
class GetLatentRangeFromBatch:
|
||||
|
||||
RETURN_TYPES = ("LATENT", )
|
||||
FUNCTION = "latentsfrombatch"
|
||||
CATEGORY = "KJNodes/latents"
|
||||
DESCRIPTION = """
|
||||
Returns a range of latents from a batch.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"latents": ("LATENT",),
|
||||
"start_index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}),
|
||||
"num_frames": ("INT", {"default": 1,"min": -1, "max": 4096, "step": 1}),
|
||||
},
|
||||
}
|
||||
|
||||
def latentsfrombatch(self, latents, start_index, num_frames):
|
||||
chosen_latents = None
|
||||
samples = latents["samples"]
|
||||
if len(samples.shape) == 4:
|
||||
B, C, H, W = samples.shape
|
||||
num_latents = B
|
||||
elif len(samples.shape) == 5:
|
||||
B, C, T, H, W = samples.shape
|
||||
num_latents = T
|
||||
|
||||
if start_index == -1:
|
||||
start_index = max(0, num_latents - num_frames)
|
||||
if start_index < 0 or start_index >= num_latents:
|
||||
raise ValueError("Start index is out of range")
|
||||
|
||||
end_index = num_latents if num_frames == -1 else min(start_index + num_frames, num_latents)
|
||||
|
||||
if len(samples.shape) == 4:
|
||||
chosen_latents = samples[start_index:end_index]
|
||||
elif len(samples.shape) == 5:
|
||||
chosen_latents = samples[:, :, start_index:end_index]
|
||||
|
||||
return ({"samples": chosen_latents,},)
|
||||
|
||||
class GetImagesFromBatchIndexed:
|
||||
|
||||
|
||||
@ -1110,15 +1110,12 @@ class GenerateNoise:
|
||||
"normalize": ("BOOLEAN", {"default": False}),
|
||||
},
|
||||
"optional": {
|
||||
"model": ("MODEL", ),
|
||||
"sigmas": ("SIGMAS", ),
|
||||
"latent_channels": (
|
||||
[ '4',
|
||||
'16',
|
||||
],
|
||||
),
|
||||
}
|
||||
"model": ("MODEL", ),
|
||||
"sigmas": ("SIGMAS", ),
|
||||
"latent_channels": (['4', '16', ],),
|
||||
"shape": (["BCHW", "BCTHW"],),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("LATENT",)
|
||||
FUNCTION = "generatenoise"
|
||||
@ -1127,10 +1124,14 @@ class GenerateNoise:
|
||||
Generates noise for injection or to be used as empty latents on samplers with add_noise off.
|
||||
"""
|
||||
|
||||
def generatenoise(self, batch_size, width, height, seed, multiplier, constant_batch_noise, normalize, sigmas=None, model=None, latent_channels=4):
|
||||
def generatenoise(self, batch_size, width, height, seed, multiplier, constant_batch_noise, normalize, sigmas=None, model=None, latent_channels=4, shape="BCHW"):
|
||||
|
||||
generator = torch.manual_seed(seed)
|
||||
noise = torch.randn([batch_size, int(latent_channels), height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu")
|
||||
if shape == "BCHW":
|
||||
noise = torch.randn([batch_size, int(latent_channels), height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu")
|
||||
elif shape == "BCTHW":
|
||||
noise = torch.randn([1, int(latent_channels), batch_size,height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu")
|
||||
print(noise.shape)
|
||||
if sigmas is not None:
|
||||
sigma = sigmas[0] - sigmas[-1]
|
||||
sigma /= model.model.latent_format.scale_factor
|
||||
@ -2223,4 +2224,34 @@ Concatenates the audio1 to audio2 in the specified direction.
|
||||
elif direction == 'left':
|
||||
concatenated_audio= torch.cat((waveform_2, waveform_1), dim=2) # Concatenate along width
|
||||
return ({"waveform": concatenated_audio, "sample_rate": sample_rate_1},)
|
||||
|
||||
|
||||
class LeapfusionHunyuanI2V:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"model": ("MODEL",),
|
||||
"latent": ("LATENT",),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("MODEL",)
|
||||
FUNCTION = "patch"
|
||||
|
||||
CATEGORY = "KJNodes/experimental"
|
||||
|
||||
def patch(self, model, latent):
|
||||
|
||||
def outer_wrapper(samples):
|
||||
def unet_wrapper(apply_model, args):
|
||||
inp, timestep, c = args["input"], args["timestep"], args["c"]
|
||||
if samples is not None:
|
||||
inp[:, :, [0], :, :] = samples[:, :, [0], :, :].to(inp)
|
||||
return apply_model(inp, timestep, **c)
|
||||
return unet_wrapper
|
||||
|
||||
samples = latent["samples"] * 0.476986
|
||||
m = model.clone()
|
||||
m.set_model_unet_function_wrapper(outer_wrapper(samples))
|
||||
|
||||
return (m,)
|
||||
Loading…
x
Reference in New Issue
Block a user