mirror of
https://git.datalinker.icu/kijai/ComfyUI-CogVideoXWrapper.git
synced 2025-12-09 21:04:23 +08:00
fun text2vid
This commit is contained in:
parent
09f53ce0c0
commit
a125f61aad
@ -674,7 +674,7 @@ class CogVideoX_Fun_Pipeline_Inpaint(DiffusionPipeline):
|
|||||||
height,
|
height,
|
||||||
width,
|
width,
|
||||||
video_length,
|
video_length,
|
||||||
prompt_embeds.dtype,
|
self.vae.dtype,
|
||||||
device,
|
device,
|
||||||
generator,
|
generator,
|
||||||
latents,
|
latents,
|
||||||
@ -721,7 +721,7 @@ class CogVideoX_Fun_Pipeline_Inpaint(DiffusionPipeline):
|
|||||||
batch_size,
|
batch_size,
|
||||||
height,
|
height,
|
||||||
width,
|
width,
|
||||||
prompt_embeds.dtype,
|
self.vae.dtype,
|
||||||
device,
|
device,
|
||||||
generator,
|
generator,
|
||||||
do_classifier_free_guidance,
|
do_classifier_free_guidance,
|
||||||
|
|||||||
21
nodes.py
21
nodes.py
@ -525,6 +525,7 @@ class CogVideoXFunSampler:
|
|||||||
"optional":{
|
"optional":{
|
||||||
"start_img": ("IMAGE",),
|
"start_img": ("IMAGE",),
|
||||||
"end_img": ("IMAGE",),
|
"end_img": ("IMAGE",),
|
||||||
|
"opt_empty_latent": ("LATENT",),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -533,7 +534,8 @@ class CogVideoXFunSampler:
|
|||||||
FUNCTION = "process"
|
FUNCTION = "process"
|
||||||
CATEGORY = "CogVideoWrapper"
|
CATEGORY = "CogVideoWrapper"
|
||||||
|
|
||||||
def process(self, pipeline, positive, negative, video_length, base_resolution, seed, steps, cfg, scheduler, start_img=None, end_img=None):
|
def process(self, pipeline, positive, negative, video_length, base_resolution, seed, steps, cfg, scheduler,
|
||||||
|
start_img=None, end_img=None, opt_empty_latent=None):
|
||||||
device = mm.get_torch_device()
|
device = mm.get_torch_device()
|
||||||
offload_device = mm.unet_offload_device()
|
offload_device = mm.unet_offload_device()
|
||||||
pipe = pipeline["pipe"]
|
pipe = pipeline["pipe"]
|
||||||
@ -543,14 +545,21 @@ class CogVideoXFunSampler:
|
|||||||
|
|
||||||
mm.soft_empty_cache()
|
mm.soft_empty_cache()
|
||||||
|
|
||||||
start_img = [to_pil(_start_img) for _start_img in start_img] if start_img is not None else None
|
aspect_ratio_sample_size = {key : [x / 512 * base_resolution for x in ASPECT_RATIO_512[key]] for key in ASPECT_RATIO_512.keys()}
|
||||||
end_img = [to_pil(_end_img) for _end_img in end_img] if end_img is not None else None
|
|
||||||
# Count most suitable height and width
|
if start_img is not None:
|
||||||
aspect_ratio_sample_size = {key : [x / 512 * base_resolution for x in ASPECT_RATIO_512[key]] for key in ASPECT_RATIO_512.keys()}
|
start_img = [to_pil(_start_img) for _start_img in start_img] if start_img is not None else None
|
||||||
original_width, original_height = start_img[0].size if type(start_img) is list else Image.open(start_img).size
|
end_img = [to_pil(_end_img) for _end_img in end_img] if end_img is not None else None
|
||||||
|
# Count most suitable height and width
|
||||||
|
original_width, original_height = start_img[0].size if type(start_img) is list else Image.open(start_img).size
|
||||||
|
else:
|
||||||
|
original_width = opt_empty_latent["samples"][0].shape[-1] * 8
|
||||||
|
original_height = opt_empty_latent["samples"][0].shape[-2] * 8
|
||||||
closest_size, closest_ratio = get_closest_ratio(original_height, original_width, ratios=aspect_ratio_sample_size)
|
closest_size, closest_ratio = get_closest_ratio(original_height, original_width, ratios=aspect_ratio_sample_size)
|
||||||
height, width = [int(x / 16) * 16 for x in closest_size]
|
height, width = [int(x / 16) * 16 for x in closest_size]
|
||||||
|
print(f"Closest size: {width}:{height}")
|
||||||
|
|
||||||
|
|
||||||
base_path = pipeline["base_path"]
|
base_path = pipeline["base_path"]
|
||||||
|
|
||||||
# Load Sampler
|
# Load Sampler
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user