Compare commits

...

5 Commits

Author SHA1 Message Date
Tom
23193a4f5a
Merge fb5c22292e6302678f140f67e25f05d83dc33902 into dbc63f622dd095391335612d0c7d7bbff8745cc8 2025-01-30 12:11:23 +08:00
kijai
dbc63f622d some tweaks to test I2V with context windows, add context window preview 2025-01-28 22:40:58 +02:00
kijai
fed499e971 Update pyproject.toml 2025-01-28 16:39:21 +02:00
Jukka Seppänen
f3dda43cdf
Update readme.md 2025-01-23 11:18:57 +02:00
Tom
fb5c22292e
added batch files for comfyUI portable users
install-portable.bat - installs the requirements.txt for ComfyUI portable users (embedded python)

setup-onediff.bat - installs the recommended extras for ComfyUI Portable users (embedded python)

It can be easy to install to system python by accident when running the the Portable version of ComfyUI, so i use these to help make life easier.
2024-09-08 19:35:02 +01:00
5 changed files with 48 additions and 5 deletions

16
install-portable.bat Normal file
View File

@ -0,0 +1,16 @@
@echo off
set "requirements_txt=%~dp0\requirements.txt"
set "python_exec=..\..\..\python_embeded\python.exe"
echo Installing node...
if exist "%python_exec%" (
echo Installing with ComfyUI Portable
"%python_exec%" -s -m pip install -r "%requirements_txt%"
) else (
echo Installing with system Python
pip install -r "%requirements_txt%"
)
pause

View File

@ -658,10 +658,9 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin):
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
counter = torch.zeros_like(latent_model_input)
noise_pred = torch.zeros_like(latent_model_input)
if image_cond_latents is not None:
latent_image_input = torch.cat([image_cond_latents] * 2) if do_classifier_free_guidance else image_cond_latents
latent_model_input = torch.cat([latent_model_input, latent_image_input], dim=2)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timestep = t.expand(latent_model_input.shape[0])
@ -724,7 +723,14 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin):
noise_pred = noise_pred.float()
else:
for c in context_queue:
print("c:", c)
partial_latent_model_input = latent_model_input[:, c, :, :, :]
if image_cond_latents is not None:
partial_latent_image_input = latent_image_input[:, :len(c), :, :, :]
partial_latent_model_input = torch.cat([partial_latent_model_input,partial_latent_image_input], dim=2)
print(partial_latent_model_input.shape)
if (tora is not None and tora["start_percent"] <= current_step_percentage <= tora["end_percent"]):
if do_classifier_free_guidance:
partial_video_flow_features = tora["video_flow_features"][:, c, :, :, :].repeat(1, 2, 1, 1, 1).contiguous()
@ -768,7 +774,13 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin):
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
progress_bar.update()
comfy_pbar.update(1)
if callback is not None:
alpha_prod_t = self.scheduler.alphas_cumprod[t]
beta_prod_t = 1 - alpha_prod_t
callback_tensor = (alpha_prod_t**0.5) * latent_model_input[0][:, :16, :, :] - (beta_prod_t**0.5) * noise_pred.detach()[0]
callback(i, callback_tensor * 5, None, num_inference_steps)
else:
comfy_pbar.update(1)
# region sampling
else:

View File

@ -1,6 +1,6 @@
[project]
name = "comfyui-cogvideoxwrapper"
description = "Diffusers wrapper for CogVideoX -models: [a/https://github.com/THUDM/CogVideo](https://github.com/THUDM/CogVideo)"
description = "Diffusers wrapper for CogVideoX -models: https://github.com/THUDM/CogVideo"
version = "1.5.1"
license = {file = "LICENSE"}
dependencies = ["huggingface_hub", "diffusers>=0.31.0", "accelerate>=0.33.0"]

View File

@ -5,7 +5,7 @@ Spreadsheet (WIP) of supported models and their supported features: https://docs
## Update 9
Added preliminary support for [Go-with-the-Flow](https://github.com/VGenAI-Netflix-Eyeline-Research/Go-with-the-Flow)
This uses LoRA weights available here: https://huggingface.co/VGenAI-Netflix-Eyeline-Research/Go-with-the-Flow/tree/main
This uses LoRA weights available here: https://huggingface.co/Eyeline-Research/Go-with-the-Flow/tree/main
To create the input videos for the NoiseWarp process, I've added a node to KJNodes that works alongside my SplineEditor, and either [comfyui-inpaint-nodes](https://github.com/Acly/comfyui-inpaint-nodes) or just cv2 inpainting to create the cut and drag input videos.

15
setup-onediff.bat Normal file
View File

@ -0,0 +1,15 @@
@echo off
set "python_exec=..\..\..\python_embeded\python.exe"
echo Installing node...
if exist "%python_exec%" (
echo Installing with ComfyUI Portable
"%python_exec%" -s -m pip install --pre onediff onediffx && pip install nexfort"
) else (
echo Installing with system Python
pip install --pre onediff onediffx && pip install nexfort"
)
pause