mirror of
https://git.datalinker.icu/kijai/ComfyUI-CogVideoXWrapper.git
synced 2025-12-16 00:14:25 +08:00
expose chunk_size for encode node
This commit is contained in:
parent
fe1dded986
commit
097faeeff0
7
nodes.py
7
nodes.py
@ -169,6 +169,9 @@ class CogVideoImageEncode:
|
|||||||
"pipeline": ("COGVIDEOPIPE",),
|
"pipeline": ("COGVIDEOPIPE",),
|
||||||
"image": ("IMAGE", ),
|
"image": ("IMAGE", ),
|
||||||
},
|
},
|
||||||
|
"optional": {
|
||||||
|
"chunk_size": ("INT", {"default": 16, "min": 1}),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
RETURN_TYPES = ("LATENT",)
|
RETURN_TYPES = ("LATENT",)
|
||||||
@ -176,7 +179,7 @@ class CogVideoImageEncode:
|
|||||||
FUNCTION = "encode"
|
FUNCTION = "encode"
|
||||||
CATEGORY = "CogVideoWrapper"
|
CATEGORY = "CogVideoWrapper"
|
||||||
|
|
||||||
def encode(self, pipeline, image):
|
def encode(self, pipeline, image, chunk_size=16):
|
||||||
device = mm.get_torch_device()
|
device = mm.get_torch_device()
|
||||||
offload_device = mm.unet_offload_device()
|
offload_device = mm.unet_offload_device()
|
||||||
generator = torch.Generator(device=device).manual_seed(0)
|
generator = torch.Generator(device=device).manual_seed(0)
|
||||||
@ -187,7 +190,7 @@ class CogVideoImageEncode:
|
|||||||
input_image = input_image.to(vae.dtype).to(device)
|
input_image = input_image.to(vae.dtype).to(device)
|
||||||
input_image = input_image.unsqueeze(0).permute(0, 4, 1, 2, 3) # B, C, T, H, W
|
input_image = input_image.unsqueeze(0).permute(0, 4, 1, 2, 3) # B, C, T, H, W
|
||||||
B, C, T, H, W = input_image.shape
|
B, C, T, H, W = input_image.shape
|
||||||
chunk_size = 16
|
|
||||||
latents_list = []
|
latents_list = []
|
||||||
# Loop through the temporal dimension in chunks of 16
|
# Loop through the temporal dimension in chunks of 16
|
||||||
for i in range(0, T, chunk_size):
|
for i in range(0, T, chunk_size):
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user