mirror of
https://git.datalinker.icu/kijai/ComfyUI-CogVideoXWrapper.git
synced 2025-12-08 20:34:23 +08:00
tweaks
This commit is contained in:
parent
44a8305fcb
commit
1e356fa905
2
nodes.py
2
nodes.py
@ -59,7 +59,7 @@ class DownloadAndLoadCogVideoModel:
|
||||
|
||||
snapshot_download(
|
||||
repo_id=model,
|
||||
ignore_patterns=["*text_encoder*"],
|
||||
ignore_patterns=["*text_encoder*", "*tokenizer*"],
|
||||
local_dir=base_path,
|
||||
local_dir_use_symlinks=False,
|
||||
)
|
||||
|
||||
@ -286,10 +286,10 @@ class CogVideoXPipeline(DiffusionPipeline):
|
||||
use_real=True,
|
||||
)
|
||||
|
||||
if start_frame is not None:
|
||||
freqs_cos = freqs_cos.view(num_frames, grid_height * grid_width, -1)
|
||||
freqs_sin = freqs_sin.view(num_frames, grid_height * grid_width, -1)
|
||||
|
||||
if start_frame is not None:
|
||||
freqs_cos = freqs_cos[start_frame:end_frame]
|
||||
freqs_sin = freqs_sin[start_frame:end_frame]
|
||||
|
||||
@ -444,12 +444,12 @@ class CogVideoXPipeline(DiffusionPipeline):
|
||||
print("latents.device", latents.device)
|
||||
|
||||
|
||||
# # 6.5. Create rotary embeds if required
|
||||
# image_rotary_emb = (
|
||||
# self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device)
|
||||
# if self.transformer.config.use_rotary_positional_embeddings
|
||||
# else None
|
||||
# )
|
||||
# 6.5. Create rotary embeds if required
|
||||
image_rotary_emb = (
|
||||
self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device)
|
||||
if self.transformer.config.use_rotary_positional_embeddings
|
||||
else None
|
||||
)
|
||||
|
||||
# 7. Denoising loop
|
||||
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
||||
@ -541,11 +541,7 @@ class CogVideoXPipeline(DiffusionPipeline):
|
||||
comfy_pbar.update(1)
|
||||
# ==========================================
|
||||
else:
|
||||
image_rotary_emb = (
|
||||
self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device)
|
||||
if self.transformer.config.use_rotary_positional_embeddings
|
||||
else None
|
||||
)
|
||||
|
||||
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
|
||||
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user