From dd60660feed8c322bc52498c7cb43a73e0bd5bd5 Mon Sep 17 00:00:00 2001
From: kijai <40791699+kijai@users.noreply.github.com>
Date: Wed, 18 Sep 2024 17:00:03 +0300
Subject: [PATCH] initial CogVideoX-Fun support
---
cogvideox_fun/autoencoder_magvit.py | 1296 +++++++++++++++++++
cogvideox_fun/pipeline_cogvideox_inpaint.py | 862 ++++++++++++
cogvideox_fun/transformer_3d.py | 605 +++++++++
cogvideox_fun/utils.py | 246 ++++
nodes.py | 162 ++-
pipeline_cogvideox.py | 19 +
6 files changed, 3177 insertions(+), 13 deletions(-)
create mode 100644 cogvideox_fun/autoencoder_magvit.py
create mode 100644 cogvideox_fun/pipeline_cogvideox_inpaint.py
create mode 100644 cogvideox_fun/transformer_3d.py
create mode 100644 cogvideox_fun/utils.py
diff --git a/cogvideox_fun/autoencoder_magvit.py b/cogvideox_fun/autoencoder_magvit.py
new file mode 100644
index 0000000..9c2b906
--- /dev/null
+++ b/cogvideox_fun/autoencoder_magvit.py
@@ -0,0 +1,1296 @@
+# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers.loaders.single_file_model import FromOriginalModelMixin
+from diffusers.utils import logging
+from diffusers.utils.accelerate_utils import apply_forward_hook
+from diffusers.models.activations import get_activation
+from diffusers.models.downsampling import CogVideoXDownsample3D
+from diffusers.models.modeling_outputs import AutoencoderKLOutput
+from diffusers.models.modeling_utils import ModelMixin
+from diffusers.models.upsampling import CogVideoXUpsample3D
+from diffusers.models.autoencoders.vae import DecoderOutput, DiagonalGaussianDistribution
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+class CogVideoXSafeConv3d(nn.Conv3d):
+ r"""
+ A 3D convolution layer that splits the input tensor into smaller parts to avoid OOM in CogVideoX Model.
+ """
+
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
+ memory_count = torch.prod(torch.tensor(input.shape)).item() * 2 / 1024**3
+
+ # Set to 2GB, suitable for CuDNN
+ if memory_count > 2:
+ kernel_size = self.kernel_size[0]
+ part_num = int(memory_count / 2) + 1
+ input_chunks = torch.chunk(input, part_num, dim=2)
+
+ if kernel_size > 1:
+ input_chunks = [input_chunks[0]] + [
+ torch.cat((input_chunks[i - 1][:, :, -kernel_size + 1 :], input_chunks[i]), dim=2)
+ for i in range(1, len(input_chunks))
+ ]
+
+ output_chunks = []
+ for input_chunk in input_chunks:
+ output_chunks.append(super().forward(input_chunk))
+ output = torch.cat(output_chunks, dim=2)
+ return output
+ else:
+ return super().forward(input)
+
+
+class CogVideoXCausalConv3d(nn.Module):
+ r"""A 3D causal convolution layer that pads the input tensor to ensure causality in CogVideoX Model.
+
+ Args:
+ in_channels (`int`): Number of channels in the input tensor.
+ out_channels (`int`): Number of output channels produced by the convolution.
+ kernel_size (`int` or `Tuple[int, int, int]`): Kernel size of the convolutional kernel.
+ stride (`int`, defaults to `1`): Stride of the convolution.
+ dilation (`int`, defaults to `1`): Dilation rate of the convolution.
+ pad_mode (`str`, defaults to `"constant"`): Padding mode.
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ kernel_size: Union[int, Tuple[int, int, int]],
+ stride: int = 1,
+ dilation: int = 1,
+ pad_mode: str = "constant",
+ ):
+ super().__init__()
+
+ if isinstance(kernel_size, int):
+ kernel_size = (kernel_size,) * 3
+
+ time_kernel_size, height_kernel_size, width_kernel_size = kernel_size
+
+ self.pad_mode = pad_mode
+ time_pad = dilation * (time_kernel_size - 1) + (1 - stride)
+ height_pad = height_kernel_size // 2
+ width_pad = width_kernel_size // 2
+
+ self.height_pad = height_pad
+ self.width_pad = width_pad
+ self.time_pad = time_pad
+ self.time_causal_padding = (width_pad, width_pad, height_pad, height_pad, time_pad, 0)
+
+ self.temporal_dim = 2
+ self.time_kernel_size = time_kernel_size
+
+ stride = (stride, 1, 1)
+ dilation = (dilation, 1, 1)
+ self.conv = CogVideoXSafeConv3d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ stride=stride,
+ dilation=dilation,
+ )
+
+ self.conv_cache = None
+
+ def fake_context_parallel_forward(self, inputs: torch.Tensor) -> torch.Tensor:
+ kernel_size = self.time_kernel_size
+ if kernel_size > 1:
+ cached_inputs = (
+ [self.conv_cache] if self.conv_cache is not None else [inputs[:, :, :1]] * (kernel_size - 1)
+ )
+ inputs = torch.cat(cached_inputs + [inputs], dim=2)
+ return inputs
+
+ def _clear_fake_context_parallel_cache(self):
+ del self.conv_cache
+ self.conv_cache = None
+
+ def forward(self, inputs: torch.Tensor) -> torch.Tensor:
+ inputs = self.fake_context_parallel_forward(inputs)
+
+ self._clear_fake_context_parallel_cache()
+ # Note: we could move these to the cpu for a lower maximum memory usage but its only a few
+ # hundred megabytes and so let's not do it for now
+ self.conv_cache = inputs[:, :, -self.time_kernel_size + 1 :].clone()
+
+ padding_2d = (self.width_pad, self.width_pad, self.height_pad, self.height_pad)
+ inputs = F.pad(inputs, padding_2d, mode="constant", value=0)
+
+ output = self.conv(inputs)
+ return output
+
+
+class CogVideoXSpatialNorm3D(nn.Module):
+ r"""
+ Spatially conditioned normalization as defined in https://arxiv.org/abs/2209.09002. This implementation is specific
+ to 3D-video like data.
+
+ CogVideoXSafeConv3d is used instead of nn.Conv3d to avoid OOM in CogVideoX Model.
+
+ Args:
+ f_channels (`int`):
+ The number of channels for input to group normalization layer, and output of the spatial norm layer.
+ zq_channels (`int`):
+ The number of channels for the quantized vector as described in the paper.
+ groups (`int`):
+ Number of groups to separate the channels into for group normalization.
+ """
+
+ def __init__(
+ self,
+ f_channels: int,
+ zq_channels: int,
+ groups: int = 32,
+ ):
+ super().__init__()
+ self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=groups, eps=1e-6, affine=True)
+ self.conv_y = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1)
+ self.conv_b = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1)
+
+ def forward(self, f: torch.Tensor, zq: torch.Tensor) -> torch.Tensor:
+ if f.shape[2] > 1 and f.shape[2] % 2 == 1:
+ f_first, f_rest = f[:, :, :1], f[:, :, 1:]
+ f_first_size, f_rest_size = f_first.shape[-3:], f_rest.shape[-3:]
+ z_first, z_rest = zq[:, :, :1], zq[:, :, 1:]
+ z_first = F.interpolate(z_first, size=f_first_size)
+ z_rest = F.interpolate(z_rest, size=f_rest_size)
+ zq = torch.cat([z_first, z_rest], dim=2)
+ else:
+ zq = F.interpolate(zq, size=f.shape[-3:])
+
+ norm_f = self.norm_layer(f)
+ new_f = norm_f * self.conv_y(zq) + self.conv_b(zq)
+ return new_f
+
+
+class CogVideoXResnetBlock3D(nn.Module):
+ r"""
+ A 3D ResNet block used in the CogVideoX model.
+
+ Args:
+ in_channels (`int`):
+ Number of input channels.
+ out_channels (`int`, *optional*):
+ Number of output channels. If None, defaults to `in_channels`.
+ dropout (`float`, defaults to `0.0`):
+ Dropout rate.
+ temb_channels (`int`, defaults to `512`):
+ Number of time embedding channels.
+ groups (`int`, defaults to `32`):
+ Number of groups to separate the channels into for group normalization.
+ eps (`float`, defaults to `1e-6`):
+ Epsilon value for normalization layers.
+ non_linearity (`str`, defaults to `"swish"`):
+ Activation function to use.
+ conv_shortcut (bool, defaults to `False`):
+ Whether or not to use a convolution shortcut.
+ spatial_norm_dim (`int`, *optional*):
+ The dimension to use for spatial norm if it is to be used instead of group norm.
+ pad_mode (str, defaults to `"first"`):
+ Padding mode.
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: Optional[int] = None,
+ dropout: float = 0.0,
+ temb_channels: int = 512,
+ groups: int = 32,
+ eps: float = 1e-6,
+ non_linearity: str = "swish",
+ conv_shortcut: bool = False,
+ spatial_norm_dim: Optional[int] = None,
+ pad_mode: str = "first",
+ ):
+ super().__init__()
+
+ out_channels = out_channels or in_channels
+
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.nonlinearity = get_activation(non_linearity)
+ self.use_conv_shortcut = conv_shortcut
+
+ if spatial_norm_dim is None:
+ self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=groups, eps=eps)
+ self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=groups, eps=eps)
+ else:
+ self.norm1 = CogVideoXSpatialNorm3D(
+ f_channels=in_channels,
+ zq_channels=spatial_norm_dim,
+ groups=groups,
+ )
+ self.norm2 = CogVideoXSpatialNorm3D(
+ f_channels=out_channels,
+ zq_channels=spatial_norm_dim,
+ groups=groups,
+ )
+
+ self.conv1 = CogVideoXCausalConv3d(
+ in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode
+ )
+
+ if temb_channels > 0:
+ self.temb_proj = nn.Linear(in_features=temb_channels, out_features=out_channels)
+
+ self.dropout = nn.Dropout(dropout)
+ self.conv2 = CogVideoXCausalConv3d(
+ in_channels=out_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode
+ )
+
+ if self.in_channels != self.out_channels:
+ if self.use_conv_shortcut:
+ self.conv_shortcut = CogVideoXCausalConv3d(
+ in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode
+ )
+ else:
+ self.conv_shortcut = CogVideoXSafeConv3d(
+ in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0
+ )
+
+ def forward(
+ self,
+ inputs: torch.Tensor,
+ temb: Optional[torch.Tensor] = None,
+ zq: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ hidden_states = inputs
+
+ if zq is not None:
+ hidden_states = self.norm1(hidden_states, zq)
+ else:
+ hidden_states = self.norm1(hidden_states)
+
+ hidden_states = self.nonlinearity(hidden_states)
+ hidden_states = self.conv1(hidden_states)
+
+ if temb is not None:
+ hidden_states = hidden_states + self.temb_proj(self.nonlinearity(temb))[:, :, None, None, None]
+
+ if zq is not None:
+ hidden_states = self.norm2(hidden_states, zq)
+ else:
+ hidden_states = self.norm2(hidden_states)
+
+ hidden_states = self.nonlinearity(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.conv2(hidden_states)
+
+ if self.in_channels != self.out_channels:
+ inputs = self.conv_shortcut(inputs)
+
+ hidden_states = hidden_states + inputs
+ return hidden_states
+
+
+class CogVideoXDownBlock3D(nn.Module):
+ r"""
+ A downsampling block used in the CogVideoX model.
+
+ Args:
+ in_channels (`int`):
+ Number of input channels.
+ out_channels (`int`, *optional*):
+ Number of output channels. If None, defaults to `in_channels`.
+ temb_channels (`int`, defaults to `512`):
+ Number of time embedding channels.
+ num_layers (`int`, defaults to `1`):
+ Number of resnet layers.
+ dropout (`float`, defaults to `0.0`):
+ Dropout rate.
+ resnet_eps (`float`, defaults to `1e-6`):
+ Epsilon value for normalization layers.
+ resnet_act_fn (`str`, defaults to `"swish"`):
+ Activation function to use.
+ resnet_groups (`int`, defaults to `32`):
+ Number of groups to separate the channels into for group normalization.
+ add_downsample (`bool`, defaults to `True`):
+ Whether or not to use a downsampling layer. If not used, output dimension would be same as input dimension.
+ compress_time (`bool`, defaults to `False`):
+ Whether or not to downsample across temporal dimension.
+ pad_mode (str, defaults to `"first"`):
+ Padding mode.
+ """
+
+ _supports_gradient_checkpointing = True
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ add_downsample: bool = True,
+ downsample_padding: int = 0,
+ compress_time: bool = False,
+ pad_mode: str = "first",
+ ):
+ super().__init__()
+
+ resnets = []
+ for i in range(num_layers):
+ in_channel = in_channels if i == 0 else out_channels
+ resnets.append(
+ CogVideoXResnetBlock3D(
+ in_channels=in_channel,
+ out_channels=out_channels,
+ dropout=dropout,
+ temb_channels=temb_channels,
+ groups=resnet_groups,
+ eps=resnet_eps,
+ non_linearity=resnet_act_fn,
+ pad_mode=pad_mode,
+ )
+ )
+
+ self.resnets = nn.ModuleList(resnets)
+ self.downsamplers = None
+
+ if add_downsample:
+ self.downsamplers = nn.ModuleList(
+ [
+ CogVideoXDownsample3D(
+ out_channels, out_channels, padding=downsample_padding, compress_time=compress_time
+ )
+ ]
+ )
+
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ temb: Optional[torch.Tensor] = None,
+ zq: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ for resnet in self.resnets:
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module):
+ def create_forward(*inputs):
+ return module(*inputs)
+
+ return create_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(resnet), hidden_states, temb, zq
+ )
+ else:
+ hidden_states = resnet(hidden_states, temb, zq)
+
+ if self.downsamplers is not None:
+ for downsampler in self.downsamplers:
+ hidden_states = downsampler(hidden_states)
+
+ return hidden_states
+
+
+class CogVideoXMidBlock3D(nn.Module):
+ r"""
+ A middle block used in the CogVideoX model.
+
+ Args:
+ in_channels (`int`):
+ Number of input channels.
+ temb_channels (`int`, defaults to `512`):
+ Number of time embedding channels.
+ dropout (`float`, defaults to `0.0`):
+ Dropout rate.
+ num_layers (`int`, defaults to `1`):
+ Number of resnet layers.
+ resnet_eps (`float`, defaults to `1e-6`):
+ Epsilon value for normalization layers.
+ resnet_act_fn (`str`, defaults to `"swish"`):
+ Activation function to use.
+ resnet_groups (`int`, defaults to `32`):
+ Number of groups to separate the channels into for group normalization.
+ spatial_norm_dim (`int`, *optional*):
+ The dimension to use for spatial norm if it is to be used instead of group norm.
+ pad_mode (str, defaults to `"first"`):
+ Padding mode.
+ """
+
+ _supports_gradient_checkpointing = True
+
+ def __init__(
+ self,
+ in_channels: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ spatial_norm_dim: Optional[int] = None,
+ pad_mode: str = "first",
+ ):
+ super().__init__()
+
+ resnets = []
+ for _ in range(num_layers):
+ resnets.append(
+ CogVideoXResnetBlock3D(
+ in_channels=in_channels,
+ out_channels=in_channels,
+ dropout=dropout,
+ temb_channels=temb_channels,
+ groups=resnet_groups,
+ eps=resnet_eps,
+ spatial_norm_dim=spatial_norm_dim,
+ non_linearity=resnet_act_fn,
+ pad_mode=pad_mode,
+ )
+ )
+ self.resnets = nn.ModuleList(resnets)
+
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ temb: Optional[torch.Tensor] = None,
+ zq: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ for resnet in self.resnets:
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module):
+ def create_forward(*inputs):
+ return module(*inputs)
+
+ return create_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(resnet), hidden_states, temb, zq
+ )
+ else:
+ hidden_states = resnet(hidden_states, temb, zq)
+
+ return hidden_states
+
+
+class CogVideoXUpBlock3D(nn.Module):
+ r"""
+ An upsampling block used in the CogVideoX model.
+
+ Args:
+ in_channels (`int`):
+ Number of input channels.
+ out_channels (`int`, *optional*):
+ Number of output channels. If None, defaults to `in_channels`.
+ temb_channels (`int`, defaults to `512`):
+ Number of time embedding channels.
+ dropout (`float`, defaults to `0.0`):
+ Dropout rate.
+ num_layers (`int`, defaults to `1`):
+ Number of resnet layers.
+ resnet_eps (`float`, defaults to `1e-6`):
+ Epsilon value for normalization layers.
+ resnet_act_fn (`str`, defaults to `"swish"`):
+ Activation function to use.
+ resnet_groups (`int`, defaults to `32`):
+ Number of groups to separate the channels into for group normalization.
+ spatial_norm_dim (`int`, defaults to `16`):
+ The dimension to use for spatial norm if it is to be used instead of group norm.
+ add_upsample (`bool`, defaults to `True`):
+ Whether or not to use a upsampling layer. If not used, output dimension would be same as input dimension.
+ compress_time (`bool`, defaults to `False`):
+ Whether or not to downsample across temporal dimension.
+ pad_mode (str, defaults to `"first"`):
+ Padding mode.
+ """
+
+ def __init__(
+ self,
+ in_channels: int,
+ out_channels: int,
+ temb_channels: int,
+ dropout: float = 0.0,
+ num_layers: int = 1,
+ resnet_eps: float = 1e-6,
+ resnet_act_fn: str = "swish",
+ resnet_groups: int = 32,
+ spatial_norm_dim: int = 16,
+ add_upsample: bool = True,
+ upsample_padding: int = 1,
+ compress_time: bool = False,
+ pad_mode: str = "first",
+ ):
+ super().__init__()
+
+ resnets = []
+ for i in range(num_layers):
+ in_channel = in_channels if i == 0 else out_channels
+ resnets.append(
+ CogVideoXResnetBlock3D(
+ in_channels=in_channel,
+ out_channels=out_channels,
+ dropout=dropout,
+ temb_channels=temb_channels,
+ groups=resnet_groups,
+ eps=resnet_eps,
+ non_linearity=resnet_act_fn,
+ spatial_norm_dim=spatial_norm_dim,
+ pad_mode=pad_mode,
+ )
+ )
+
+ self.resnets = nn.ModuleList(resnets)
+ self.upsamplers = None
+
+ if add_upsample:
+ self.upsamplers = nn.ModuleList(
+ [
+ CogVideoXUpsample3D(
+ out_channels, out_channels, padding=upsample_padding, compress_time=compress_time
+ )
+ ]
+ )
+
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ temb: Optional[torch.Tensor] = None,
+ zq: Optional[torch.Tensor] = None,
+ ) -> torch.Tensor:
+ r"""Forward method of the `CogVideoXUpBlock3D` class."""
+ for resnet in self.resnets:
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module):
+ def create_forward(*inputs):
+ return module(*inputs)
+
+ return create_forward
+
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(resnet), hidden_states, temb, zq
+ )
+ else:
+ hidden_states = resnet(hidden_states, temb, zq)
+
+ if self.upsamplers is not None:
+ for upsampler in self.upsamplers:
+ hidden_states = upsampler(hidden_states)
+
+ return hidden_states
+
+
+class CogVideoXEncoder3D(nn.Module):
+ r"""
+ The `CogVideoXEncoder3D` layer of a variational autoencoder that encodes its input into a latent representation.
+
+ Args:
+ in_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ out_channels (`int`, *optional*, defaults to 3):
+ The number of output channels.
+ down_block_types (`Tuple[str, ...]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
+ The types of down blocks to use. See `~diffusers.models.unet_2d_blocks.get_down_block` for available
+ options.
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
+ The number of output channels for each block.
+ act_fn (`str`, *optional*, defaults to `"silu"`):
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
+ layers_per_block (`int`, *optional*, defaults to 2):
+ The number of layers per block.
+ norm_num_groups (`int`, *optional*, defaults to 32):
+ The number of groups for normalization.
+ """
+
+ _supports_gradient_checkpointing = True
+
+ def __init__(
+ self,
+ in_channels: int = 3,
+ out_channels: int = 16,
+ down_block_types: Tuple[str, ...] = (
+ "CogVideoXDownBlock3D",
+ "CogVideoXDownBlock3D",
+ "CogVideoXDownBlock3D",
+ "CogVideoXDownBlock3D",
+ ),
+ block_out_channels: Tuple[int, ...] = (128, 256, 256, 512),
+ layers_per_block: int = 3,
+ act_fn: str = "silu",
+ norm_eps: float = 1e-6,
+ norm_num_groups: int = 32,
+ dropout: float = 0.0,
+ pad_mode: str = "first",
+ temporal_compression_ratio: float = 4,
+ ):
+ super().__init__()
+
+ # log2 of temporal_compress_times
+ temporal_compress_level = int(np.log2(temporal_compression_ratio))
+
+ self.conv_in = CogVideoXCausalConv3d(in_channels, block_out_channels[0], kernel_size=3, pad_mode=pad_mode)
+ self.down_blocks = nn.ModuleList([])
+
+ # down blocks
+ output_channel = block_out_channels[0]
+ for i, down_block_type in enumerate(down_block_types):
+ input_channel = output_channel
+ output_channel = block_out_channels[i]
+ is_final_block = i == len(block_out_channels) - 1
+ compress_time = i < temporal_compress_level
+
+ if down_block_type == "CogVideoXDownBlock3D":
+ down_block = CogVideoXDownBlock3D(
+ in_channels=input_channel,
+ out_channels=output_channel,
+ temb_channels=0,
+ dropout=dropout,
+ num_layers=layers_per_block,
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ resnet_groups=norm_num_groups,
+ add_downsample=not is_final_block,
+ compress_time=compress_time,
+ )
+ else:
+ raise ValueError("Invalid `down_block_type` encountered. Must be `CogVideoXDownBlock3D`")
+
+ self.down_blocks.append(down_block)
+
+ # mid block
+ self.mid_block = CogVideoXMidBlock3D(
+ in_channels=block_out_channels[-1],
+ temb_channels=0,
+ dropout=dropout,
+ num_layers=2,
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ resnet_groups=norm_num_groups,
+ pad_mode=pad_mode,
+ )
+
+ self.norm_out = nn.GroupNorm(norm_num_groups, block_out_channels[-1], eps=1e-6)
+ self.conv_act = nn.SiLU()
+ self.conv_out = CogVideoXCausalConv3d(
+ block_out_channels[-1], 2 * out_channels, kernel_size=3, pad_mode=pad_mode
+ )
+
+ self.gradient_checkpointing = False
+
+ def forward(self, sample: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor:
+ r"""The forward method of the `CogVideoXEncoder3D` class."""
+ hidden_states = self.conv_in(sample)
+
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+
+ return custom_forward
+
+ # 1. Down
+ for down_block in self.down_blocks:
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(down_block), hidden_states, temb, None
+ )
+
+ # 2. Mid
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(self.mid_block), hidden_states, temb, None
+ )
+ else:
+ # 1. Down
+ for down_block in self.down_blocks:
+ hidden_states = down_block(hidden_states, temb, None)
+
+ # 2. Mid
+ hidden_states = self.mid_block(hidden_states, temb, None)
+
+ # 3. Post-process
+ hidden_states = self.norm_out(hidden_states)
+ hidden_states = self.conv_act(hidden_states)
+ hidden_states = self.conv_out(hidden_states)
+ return hidden_states
+
+
+class CogVideoXDecoder3D(nn.Module):
+ r"""
+ The `CogVideoXDecoder3D` layer of a variational autoencoder that decodes its latent representation into an output
+ sample.
+
+ Args:
+ in_channels (`int`, *optional*, defaults to 3):
+ The number of input channels.
+ out_channels (`int`, *optional*, defaults to 3):
+ The number of output channels.
+ up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
+ The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options.
+ block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`):
+ The number of output channels for each block.
+ act_fn (`str`, *optional*, defaults to `"silu"`):
+ The activation function to use. See `~diffusers.models.activations.get_activation` for available options.
+ layers_per_block (`int`, *optional*, defaults to 2):
+ The number of layers per block.
+ norm_num_groups (`int`, *optional*, defaults to 32):
+ The number of groups for normalization.
+ """
+
+ _supports_gradient_checkpointing = True
+
+ def __init__(
+ self,
+ in_channels: int = 16,
+ out_channels: int = 3,
+ up_block_types: Tuple[str, ...] = (
+ "CogVideoXUpBlock3D",
+ "CogVideoXUpBlock3D",
+ "CogVideoXUpBlock3D",
+ "CogVideoXUpBlock3D",
+ ),
+ block_out_channels: Tuple[int, ...] = (128, 256, 256, 512),
+ layers_per_block: int = 3,
+ act_fn: str = "silu",
+ norm_eps: float = 1e-6,
+ norm_num_groups: int = 32,
+ dropout: float = 0.0,
+ pad_mode: str = "first",
+ temporal_compression_ratio: float = 4,
+ ):
+ super().__init__()
+
+ reversed_block_out_channels = list(reversed(block_out_channels))
+
+ self.conv_in = CogVideoXCausalConv3d(
+ in_channels, reversed_block_out_channels[0], kernel_size=3, pad_mode=pad_mode
+ )
+
+ # mid block
+ self.mid_block = CogVideoXMidBlock3D(
+ in_channels=reversed_block_out_channels[0],
+ temb_channels=0,
+ num_layers=2,
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ resnet_groups=norm_num_groups,
+ spatial_norm_dim=in_channels,
+ pad_mode=pad_mode,
+ )
+
+ # up blocks
+ self.up_blocks = nn.ModuleList([])
+
+ output_channel = reversed_block_out_channels[0]
+ temporal_compress_level = int(np.log2(temporal_compression_ratio))
+
+ for i, up_block_type in enumerate(up_block_types):
+ prev_output_channel = output_channel
+ output_channel = reversed_block_out_channels[i]
+ is_final_block = i == len(block_out_channels) - 1
+ compress_time = i < temporal_compress_level
+
+ if up_block_type == "CogVideoXUpBlock3D":
+ up_block = CogVideoXUpBlock3D(
+ in_channels=prev_output_channel,
+ out_channels=output_channel,
+ temb_channels=0,
+ dropout=dropout,
+ num_layers=layers_per_block + 1,
+ resnet_eps=norm_eps,
+ resnet_act_fn=act_fn,
+ resnet_groups=norm_num_groups,
+ spatial_norm_dim=in_channels,
+ add_upsample=not is_final_block,
+ compress_time=compress_time,
+ pad_mode=pad_mode,
+ )
+ prev_output_channel = output_channel
+ else:
+ raise ValueError("Invalid `up_block_type` encountered. Must be `CogVideoXUpBlock3D`")
+
+ self.up_blocks.append(up_block)
+
+ self.norm_out = CogVideoXSpatialNorm3D(reversed_block_out_channels[-1], in_channels, groups=norm_num_groups)
+ self.conv_act = nn.SiLU()
+ self.conv_out = CogVideoXCausalConv3d(
+ reversed_block_out_channels[-1], out_channels, kernel_size=3, pad_mode=pad_mode
+ )
+
+ self.gradient_checkpointing = False
+
+ def forward(self, sample: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor:
+ r"""The forward method of the `CogVideoXDecoder3D` class."""
+ hidden_states = self.conv_in(sample)
+
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+
+ return custom_forward
+
+ # 1. Mid
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(self.mid_block), hidden_states, temb, sample
+ )
+
+ # 2. Up
+ for up_block in self.up_blocks:
+ hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(up_block), hidden_states, temb, sample
+ )
+ else:
+ # 1. Mid
+ hidden_states = self.mid_block(hidden_states, temb, sample)
+
+ # 2. Up
+ for up_block in self.up_blocks:
+ hidden_states = up_block(hidden_states, temb, sample)
+
+ # 3. Post-process
+ hidden_states = self.norm_out(hidden_states, sample)
+ hidden_states = self.conv_act(hidden_states)
+ hidden_states = self.conv_out(hidden_states)
+ return hidden_states
+
+
+class AutoencoderKLCogVideoX(ModelMixin, ConfigMixin, FromOriginalModelMixin):
+ r"""
+ A VAE model with KL loss for encoding images into latents and decoding latent representations into images. Used in
+ [CogVideoX](https://github.com/THUDM/CogVideo).
+
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
+ for all models (such as downloading or saving).
+
+ Parameters:
+ in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
+ out_channels (int, *optional*, defaults to 3): Number of channels in the output.
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
+ Tuple of downsample block types.
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
+ Tuple of upsample block types.
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
+ Tuple of block output channels.
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
+ sample_size (`int`, *optional*, defaults to `32`): Sample input size.
+ scaling_factor (`float`, *optional*, defaults to `1.15258426`):
+ The component-wise standard deviation of the trained latent space computed using the first batch of the
+ training set. This is used to scale the latent space to have unit variance when training the diffusion
+ model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
+ diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
+ / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
+ Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
+ force_upcast (`bool`, *optional*, default to `True`):
+ If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
+ can be fine-tuned / trained to a lower range without loosing too much precision in which case
+ `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
+ """
+
+ _supports_gradient_checkpointing = True
+ _no_split_modules = ["CogVideoXResnetBlock3D"]
+
+ @register_to_config
+ def __init__(
+ self,
+ in_channels: int = 3,
+ out_channels: int = 3,
+ down_block_types: Tuple[str] = (
+ "CogVideoXDownBlock3D",
+ "CogVideoXDownBlock3D",
+ "CogVideoXDownBlock3D",
+ "CogVideoXDownBlock3D",
+ ),
+ up_block_types: Tuple[str] = (
+ "CogVideoXUpBlock3D",
+ "CogVideoXUpBlock3D",
+ "CogVideoXUpBlock3D",
+ "CogVideoXUpBlock3D",
+ ),
+ block_out_channels: Tuple[int] = (128, 256, 256, 512),
+ latent_channels: int = 16,
+ layers_per_block: int = 3,
+ act_fn: str = "silu",
+ norm_eps: float = 1e-6,
+ norm_num_groups: int = 32,
+ temporal_compression_ratio: float = 4,
+ sample_height: int = 480,
+ sample_width: int = 720,
+ scaling_factor: float = 1.15258426,
+ shift_factor: Optional[float] = None,
+ latents_mean: Optional[Tuple[float]] = None,
+ latents_std: Optional[Tuple[float]] = None,
+ force_upcast: float = True,
+ use_quant_conv: bool = False,
+ use_post_quant_conv: bool = False,
+ ):
+ super().__init__()
+
+ self.encoder = CogVideoXEncoder3D(
+ in_channels=in_channels,
+ out_channels=latent_channels,
+ down_block_types=down_block_types,
+ block_out_channels=block_out_channels,
+ layers_per_block=layers_per_block,
+ act_fn=act_fn,
+ norm_eps=norm_eps,
+ norm_num_groups=norm_num_groups,
+ temporal_compression_ratio=temporal_compression_ratio,
+ )
+ self.decoder = CogVideoXDecoder3D(
+ in_channels=latent_channels,
+ out_channels=out_channels,
+ up_block_types=up_block_types,
+ block_out_channels=block_out_channels,
+ layers_per_block=layers_per_block,
+ act_fn=act_fn,
+ norm_eps=norm_eps,
+ norm_num_groups=norm_num_groups,
+ temporal_compression_ratio=temporal_compression_ratio,
+ )
+ self.quant_conv = CogVideoXSafeConv3d(2 * out_channels, 2 * out_channels, 1) if use_quant_conv else None
+ self.post_quant_conv = CogVideoXSafeConv3d(out_channels, out_channels, 1) if use_post_quant_conv else None
+
+ self.use_slicing = False
+ self.use_tiling = False
+
+ # Can be increased to decode more latent frames at once, but comes at a reasonable memory cost and it is not
+ # recommended because the temporal parts of the VAE, here, are tricky to understand.
+ # If you decode X latent frames together, the number of output frames is:
+ # (X + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) => X + 6 frames
+ #
+ # Example with num_latent_frames_batch_size = 2:
+ # - 12 latent frames: (0, 1), (2, 3), (4, 5), (6, 7), (8, 9), (10, 11) are processed together
+ # => (12 // 2 frame slices) * ((2 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale))
+ # => 6 * 8 = 48 frames
+ # - 13 latent frames: (0, 1, 2) (special case), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12) are processed together
+ # => (1 frame slice) * ((3 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) +
+ # ((13 - 3) // 2) * ((2 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale))
+ # => 1 * 9 + 5 * 8 = 49 frames
+ # It has been implemented this way so as to not have "magic values" in the code base that would be hard to explain. Note that
+ # setting it to anything other than 2 would give poor results because the VAE hasn't been trained to be adaptive with different
+ # number of temporal frames.
+ self.num_latent_frames_batch_size = 2
+
+ # We make the minimum height and width of sample for tiling half that of the generally supported
+ self.tile_sample_min_height = sample_height // 2
+ self.tile_sample_min_width = sample_width // 2
+ self.tile_latent_min_height = int(
+ self.tile_sample_min_height / (2 ** (len(self.config.block_out_channels) - 1))
+ )
+ self.tile_latent_min_width = int(self.tile_sample_min_width / (2 ** (len(self.config.block_out_channels) - 1)))
+
+ # These are experimental overlap factors that were chosen based on experimentation and seem to work best for
+ # 720x480 (WxH) resolution. The above resolution is the strongly recommended generation resolution in CogVideoX
+ # and so the tiling implementation has only been tested on those specific resolutions.
+ self.tile_overlap_factor_height = 1 / 6
+ self.tile_overlap_factor_width = 1 / 5
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ if isinstance(module, (CogVideoXEncoder3D, CogVideoXDecoder3D)):
+ module.gradient_checkpointing = value
+
+ def _clear_fake_context_parallel_cache(self):
+ for name, module in self.named_modules():
+ if isinstance(module, CogVideoXCausalConv3d):
+ logger.debug(f"Clearing fake Context Parallel cache for layer: {name}")
+ module._clear_fake_context_parallel_cache()
+
+ def enable_tiling(
+ self,
+ tile_sample_min_height: Optional[int] = None,
+ tile_sample_min_width: Optional[int] = None,
+ tile_overlap_factor_height: Optional[float] = None,
+ tile_overlap_factor_width: Optional[float] = None,
+ ) -> None:
+ r"""
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
+ processing larger images.
+
+ Args:
+ tile_sample_min_height (`int`, *optional*):
+ The minimum height required for a sample to be separated into tiles across the height dimension.
+ tile_sample_min_width (`int`, *optional*):
+ The minimum width required for a sample to be separated into tiles across the width dimension.
+ tile_overlap_factor_height (`int`, *optional*):
+ The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are
+ no tiling artifacts produced across the height dimension. Must be between 0 and 1. Setting a higher
+ value might cause more tiles to be processed leading to slow down of the decoding process.
+ tile_overlap_factor_width (`int`, *optional*):
+ The minimum amount of overlap between two consecutive horizontal tiles. This is to ensure that there
+ are no tiling artifacts produced across the width dimension. Must be between 0 and 1. Setting a higher
+ value might cause more tiles to be processed leading to slow down of the decoding process.
+ """
+ self.use_tiling = True
+ self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height
+ self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width
+ self.tile_latent_min_height = int(
+ self.tile_sample_min_height / (2 ** (len(self.config.block_out_channels) - 1))
+ )
+ self.tile_latent_min_width = int(self.tile_sample_min_width / (2 ** (len(self.config.block_out_channels) - 1)))
+ self.tile_overlap_factor_height = tile_overlap_factor_height or self.tile_overlap_factor_height
+ self.tile_overlap_factor_width = tile_overlap_factor_width or self.tile_overlap_factor_width
+
+ def disable_tiling(self) -> None:
+ r"""
+ Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
+ decoding in one step.
+ """
+ self.use_tiling = False
+
+ def enable_slicing(self) -> None:
+ r"""
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
+ """
+ self.use_slicing = True
+
+ def disable_slicing(self) -> None:
+ r"""
+ Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
+ decoding in one step.
+ """
+ self.use_slicing = False
+
+ @apply_forward_hook
+ def encode(
+ self, x: torch.Tensor, return_dict: bool = True
+ ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
+ """
+ Encode a batch of images into latents.
+
+ Args:
+ x (`torch.Tensor`): Input batch of images.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
+
+ Returns:
+ The latent representations of the encoded images. If `return_dict` is True, a
+ [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
+ """
+ batch_size, num_channels, num_frames, height, width = x.shape
+ if num_frames == 1:
+ h = self.encoder(x)
+ if self.quant_conv is not None:
+ h = self.quant_conv(h)
+ posterior = DiagonalGaussianDistribution(h)
+ else:
+ frame_batch_size = 4
+ h = []
+ for i in range(num_frames // frame_batch_size):
+ remaining_frames = num_frames % frame_batch_size
+ start_frame = frame_batch_size * i + (0 if i == 0 else remaining_frames)
+ end_frame = frame_batch_size * (i + 1) + remaining_frames
+ z_intermediate = x[:, :, start_frame:end_frame]
+ z_intermediate = self.encoder(z_intermediate)
+ if self.quant_conv is not None:
+ z_intermediate = self.quant_conv(z_intermediate)
+ h.append(z_intermediate)
+ self._clear_fake_context_parallel_cache()
+ h = torch.cat(h, dim=2)
+ posterior = DiagonalGaussianDistribution(h)
+ if not return_dict:
+ return (posterior,)
+ return AutoencoderKLOutput(latent_dist=posterior)
+
+ def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
+ batch_size, num_channels, num_frames, height, width = z.shape
+
+ if self.use_tiling and (width > self.tile_latent_min_width or height > self.tile_latent_min_height):
+ return self.tiled_decode(z, return_dict=return_dict)
+
+ if num_frames == 1:
+ dec = []
+ z_intermediate = z
+ if self.post_quant_conv is not None:
+ z_intermediate = self.post_quant_conv(z_intermediate)
+ z_intermediate = self.decoder(z_intermediate)
+ dec.append(z_intermediate)
+ else:
+ frame_batch_size = self.num_latent_frames_batch_size
+ dec = []
+ for i in range(num_frames // frame_batch_size):
+ remaining_frames = num_frames % frame_batch_size
+ start_frame = frame_batch_size * i + (0 if i == 0 else remaining_frames)
+ end_frame = frame_batch_size * (i + 1) + remaining_frames
+ z_intermediate = z[:, :, start_frame:end_frame]
+ if self.post_quant_conv is not None:
+ z_intermediate = self.post_quant_conv(z_intermediate)
+ z_intermediate = self.decoder(z_intermediate)
+ dec.append(z_intermediate)
+
+ self._clear_fake_context_parallel_cache()
+ dec = torch.cat(dec, dim=2)
+
+ if not return_dict:
+ return (dec,)
+
+ return DecoderOutput(sample=dec)
+
+ @apply_forward_hook
+ def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
+ """
+ Decode a batch of images.
+
+ Args:
+ z (`torch.Tensor`): Input batch of latent vectors.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
+
+ Returns:
+ [`~models.vae.DecoderOutput`] or `tuple`:
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
+ returned.
+ """
+ if self.use_slicing and z.shape[0] > 1:
+ decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
+ decoded = torch.cat(decoded_slices)
+ else:
+ decoded = self._decode(z).sample
+
+ if not return_dict:
+ return (decoded,)
+ return DecoderOutput(sample=decoded)
+
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
+ for y in range(blend_extent):
+ b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (
+ y / blend_extent
+ )
+ return b
+
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
+ blend_extent = min(a.shape[4], b.shape[4], blend_extent)
+ for x in range(blend_extent):
+ b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (
+ x / blend_extent
+ )
+ return b
+
+ def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
+ r"""
+ Decode a batch of images using a tiled decoder.
+
+ Args:
+ z (`torch.Tensor`): Input batch of latent vectors.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.
+
+ Returns:
+ [`~models.vae.DecoderOutput`] or `tuple`:
+ If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
+ returned.
+ """
+ # Rough memory assessment:
+ # - In CogVideoX-2B, there are a total of 24 CausalConv3d layers.
+ # - The biggest intermediate dimensions are: [1, 128, 9, 480, 720].
+ # - Assume fp16 (2 bytes per value).
+ # Memory required: 1 * 128 * 9 * 480 * 720 * 24 * 2 / 1024**3 = 17.8 GB
+ #
+ # Memory assessment when using tiling:
+ # - Assume everything as above but now HxW is 240x360 by tiling in half
+ # Memory required: 1 * 128 * 9 * 240 * 360 * 24 * 2 / 1024**3 = 4.5 GB
+
+ batch_size, num_channels, num_frames, height, width = z.shape
+
+ overlap_height = int(self.tile_latent_min_height * (1 - self.tile_overlap_factor_height))
+ overlap_width = int(self.tile_latent_min_width * (1 - self.tile_overlap_factor_width))
+ blend_extent_height = int(self.tile_sample_min_height * self.tile_overlap_factor_height)
+ blend_extent_width = int(self.tile_sample_min_width * self.tile_overlap_factor_width)
+ row_limit_height = self.tile_sample_min_height - blend_extent_height
+ row_limit_width = self.tile_sample_min_width - blend_extent_width
+ frame_batch_size = self.num_latent_frames_batch_size
+
+ # Split z into overlapping tiles and decode them separately.
+ # The tiles have an overlap to avoid seams between tiles.
+ rows = []
+ for i in range(0, height, overlap_height):
+ row = []
+ for j in range(0, width, overlap_width):
+ time = []
+ for k in range(num_frames // frame_batch_size):
+ remaining_frames = num_frames % frame_batch_size
+ start_frame = frame_batch_size * k + (0 if k == 0 else remaining_frames)
+ end_frame = frame_batch_size * (k + 1) + remaining_frames
+ tile = z[
+ :,
+ :,
+ start_frame:end_frame,
+ i : i + self.tile_latent_min_height,
+ j : j + self.tile_latent_min_width,
+ ]
+ if self.post_quant_conv is not None:
+ tile = self.post_quant_conv(tile)
+ tile = self.decoder(tile)
+ time.append(tile)
+ self._clear_fake_context_parallel_cache()
+ row.append(torch.cat(time, dim=2))
+ rows.append(row)
+
+ result_rows = []
+ for i, row in enumerate(rows):
+ result_row = []
+ for j, tile in enumerate(row):
+ # blend the above tile and the left tile
+ # to the current tile and add the current tile to the result row
+ if i > 0:
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent_height)
+ if j > 0:
+ tile = self.blend_h(row[j - 1], tile, blend_extent_width)
+ result_row.append(tile[:, :, :, :row_limit_height, :row_limit_width])
+ result_rows.append(torch.cat(result_row, dim=4))
+
+ dec = torch.cat(result_rows, dim=3)
+
+ if not return_dict:
+ return (dec,)
+
+ return DecoderOutput(sample=dec)
+
+ def forward(
+ self,
+ sample: torch.Tensor,
+ sample_posterior: bool = False,
+ return_dict: bool = True,
+ generator: Optional[torch.Generator] = None,
+ ) -> Union[torch.Tensor, torch.Tensor]:
+ x = sample
+ posterior = self.encode(x).latent_dist
+ if sample_posterior:
+ z = posterior.sample(generator=generator)
+ else:
+ z = posterior.mode()
+ dec = self.decode(z)
+ if not return_dict:
+ return (dec,)
+ return dec
diff --git a/cogvideox_fun/pipeline_cogvideox_inpaint.py b/cogvideox_fun/pipeline_cogvideox_inpaint.py
new file mode 100644
index 0000000..01fc8ac
--- /dev/null
+++ b/cogvideox_fun/pipeline_cogvideox_inpaint.py
@@ -0,0 +1,862 @@
+# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import inspect
+import math
+from dataclasses import dataclass
+from typing import Callable, Dict, List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+from einops import rearrange
+from transformers import T5EncoderModel, T5Tokenizer
+
+from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
+from diffusers.models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel
+from diffusers.models.embeddings import get_3d_rotary_pos_embed
+from diffusers.pipelines.pipeline_utils import DiffusionPipeline
+from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler
+from diffusers.utils import BaseOutput, logging, replace_example_docstring
+from diffusers.utils.torch_utils import randn_tensor
+from diffusers.video_processor import VideoProcessor
+from diffusers.image_processor import VaeImageProcessor
+from einops import rearrange
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+
+EXAMPLE_DOC_STRING = """
+ Examples:
+ ```python
+ >>> import torch
+ >>> from diffusers import CogVideoX_Fun_Pipeline
+ >>> from diffusers.utils import export_to_video
+
+ >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b"
+ >>> pipe = CogVideoX_Fun_Pipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16).to("cuda")
+ >>> prompt = (
+ ... "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. "
+ ... "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "
+ ... "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "
+ ... "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. "
+ ... "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "
+ ... "atmosphere of this unique musical performance."
+ ... )
+ >>> video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0]
+ >>> export_to_video(video, "output.mp4", fps=8)
+ ```
+"""
+
+
+# Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid
+def get_resize_crop_region_for_grid(src, tgt_width, tgt_height):
+ tw = tgt_width
+ th = tgt_height
+ h, w = src
+ r = h / w
+ if r > (th / tw):
+ resize_height = th
+ resize_width = int(round(th / h * w))
+ else:
+ resize_width = tw
+ resize_height = int(round(tw / w * h))
+
+ crop_top = int(round((th - resize_height) / 2.0))
+ crop_left = int(round((tw - resize_width) / 2.0))
+
+ return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width)
+
+
+# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
+def retrieve_timesteps(
+ scheduler,
+ num_inference_steps: Optional[int] = None,
+ device: Optional[Union[str, torch.device]] = None,
+ timesteps: Optional[List[int]] = None,
+ sigmas: Optional[List[float]] = None,
+ **kwargs,
+):
+ """
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
+
+ Args:
+ scheduler (`SchedulerMixin`):
+ The scheduler to get timesteps from.
+ num_inference_steps (`int`):
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
+ must be `None`.
+ device (`str` or `torch.device`, *optional*):
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
+ timesteps (`List[int]`, *optional*):
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
+ `num_inference_steps` and `sigmas` must be `None`.
+ sigmas (`List[float]`, *optional*):
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
+ `num_inference_steps` and `timesteps` must be `None`.
+
+ Returns:
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
+ second element is the number of inference steps.
+ """
+ if timesteps is not None and sigmas is not None:
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
+ if timesteps is not None:
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accepts_timesteps:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" timestep schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ elif sigmas is not None:
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
+ if not accept_sigmas:
+ raise ValueError(
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
+ )
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ num_inference_steps = len(timesteps)
+ else:
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
+ timesteps = scheduler.timesteps
+ return timesteps, num_inference_steps
+
+
+def resize_mask(mask, latent, process_first_frame_only=True):
+ latent_size = latent.size()
+ batch_size, channels, num_frames, height, width = mask.shape
+
+ if process_first_frame_only:
+ target_size = list(latent_size[2:])
+ target_size[0] = 1
+ first_frame_resized = F.interpolate(
+ mask[:, :, 0:1, :, :],
+ size=target_size,
+ mode='trilinear',
+ align_corners=False
+ )
+
+ target_size = list(latent_size[2:])
+ target_size[0] = target_size[0] - 1
+ if target_size[0] != 0:
+ remaining_frames_resized = F.interpolate(
+ mask[:, :, 1:, :, :],
+ size=target_size,
+ mode='trilinear',
+ align_corners=False
+ )
+ resized_mask = torch.cat([first_frame_resized, remaining_frames_resized], dim=2)
+ else:
+ resized_mask = first_frame_resized
+ else:
+ target_size = list(latent_size[2:])
+ resized_mask = F.interpolate(
+ mask,
+ size=target_size,
+ mode='trilinear',
+ align_corners=False
+ )
+ return resized_mask
+
+
+@dataclass
+class CogVideoX_Fun_PipelineOutput(BaseOutput):
+ r"""
+ Output class for CogVideo pipelines.
+
+ Args:
+ video (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]):
+ List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing
+ denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape
+ `(batch_size, num_frames, channels, height, width)`.
+ """
+
+ videos: torch.Tensor
+
+
+class CogVideoX_Fun_Pipeline_Inpaint(DiffusionPipeline):
+ r"""
+ Pipeline for text-to-video generation using CogVideoX.
+
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
+
+ Args:
+ vae ([`AutoencoderKL`]):
+ Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations.
+ transformer ([`CogVideoXTransformer3DModel`]):
+ A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents.
+ scheduler ([`SchedulerMixin`]):
+ A scheduler to be used in combination with `transformer` to denoise the encoded video latents.
+ """
+
+ _optional_components = []
+ model_cpu_offload_seq = "text_encoder->vae->transformer->vae"
+
+ _callback_tensor_inputs = [
+ "latents",
+ "prompt_embeds",
+ "negative_prompt_embeds",
+ ]
+
+ def __init__(
+ self,
+ vae: AutoencoderKLCogVideoX,
+ transformer: CogVideoXTransformer3DModel,
+ scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler],
+ ):
+ super().__init__()
+
+ self.register_modules(
+ vae=vae, transformer=transformer, scheduler=scheduler
+ )
+ self.vae_scale_factor_spatial = (
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
+ )
+ self.vae_scale_factor_temporal = (
+ self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4
+ )
+
+ self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial)
+
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
+ self.mask_processor = VaeImageProcessor(
+ vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True
+ )
+
+ def prepare_latents(
+ self,
+ batch_size,
+ num_channels_latents,
+ height,
+ width,
+ video_length,
+ dtype,
+ device,
+ generator,
+ latents=None,
+ video=None,
+ timestep=None,
+ is_strength_max=True,
+ return_noise=False,
+ return_video_latents=False,
+ ):
+ shape = (
+ batch_size,
+ (video_length - 1) // self.vae_scale_factor_temporal + 1,
+ num_channels_latents,
+ height // self.vae_scale_factor_spatial,
+ width // self.vae_scale_factor_spatial,
+ )
+ if isinstance(generator, list) and len(generator) != batch_size:
+ raise ValueError(
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
+ )
+
+ if return_video_latents or (latents is None and not is_strength_max):
+ video = video.to(device=device, dtype=self.vae.dtype)
+
+ bs = 1
+ new_video = []
+ for i in range(0, video.shape[0], bs):
+ video_bs = video[i : i + bs]
+ video_bs = self.vae.encode(video_bs)[0]
+ video_bs = video_bs.sample()
+ new_video.append(video_bs)
+ video = torch.cat(new_video, dim = 0)
+ video = video * self.vae.config.scaling_factor
+
+ video_latents = video.repeat(batch_size // video.shape[0], 1, 1, 1, 1)
+ video_latents = video_latents.to(device=device, dtype=dtype)
+ video_latents = rearrange(video_latents, "b c f h w -> b f c h w")
+
+ if latents is None:
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
+ # if strength is 1. then initialise the latents to noise, else initial to image + noise
+ latents = noise if is_strength_max else self.scheduler.add_noise(video_latents, noise, timestep)
+ # if pure noise then scale the initial latents by the Scheduler's init sigma
+ latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
+ else:
+ noise = latents.to(device)
+ latents = noise * self.scheduler.init_noise_sigma
+
+ # scale the initial noise by the standard deviation required by the scheduler
+ outputs = (latents,)
+
+ if return_noise:
+ outputs += (noise,)
+
+ if return_video_latents:
+ outputs += (video_latents,)
+
+ return outputs
+
+ def prepare_mask_latents(
+ self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
+ ):
+ # resize the mask to latents shape as we concatenate the mask to the latents
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
+ # and half precision
+
+ if mask is not None:
+ mask = mask.to(device=device, dtype=self.vae.dtype)
+ bs = 1
+ new_mask = []
+ for i in range(0, mask.shape[0], bs):
+ mask_bs = mask[i : i + bs]
+ mask_bs = self.vae.encode(mask_bs)[0]
+ mask_bs = mask_bs.mode()
+ new_mask.append(mask_bs)
+ mask = torch.cat(new_mask, dim = 0)
+ mask = mask * self.vae.config.scaling_factor
+
+ if masked_image is not None:
+ masked_image = masked_image.to(device=device, dtype=self.vae.dtype)
+ bs = 1
+ new_mask_pixel_values = []
+ for i in range(0, masked_image.shape[0], bs):
+ mask_pixel_values_bs = masked_image[i : i + bs]
+ mask_pixel_values_bs = self.vae.encode(mask_pixel_values_bs)[0]
+ mask_pixel_values_bs = mask_pixel_values_bs.mode()
+ new_mask_pixel_values.append(mask_pixel_values_bs)
+ masked_image_latents = torch.cat(new_mask_pixel_values, dim = 0)
+ masked_image_latents = masked_image_latents * self.vae.config.scaling_factor
+ else:
+ masked_image_latents = None
+
+ return mask, masked_image_latents
+
+ def decode_latents(self, latents: torch.Tensor) -> torch.Tensor:
+ latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width]
+ latents = 1 / self.vae.config.scaling_factor * latents
+
+ frames = self.vae.decode(latents).sample
+ frames = (frames / 2 + 0.5).clamp(0, 1)
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
+ frames = frames.cpu().float().numpy()
+ return frames
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
+ def prepare_extra_step_kwargs(self, generator, eta):
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
+ # and should be between [0, 1]
+
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ extra_step_kwargs = {}
+ if accepts_eta:
+ extra_step_kwargs["eta"] = eta
+
+ # check if the scheduler accepts generator
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
+ if accepts_generator:
+ extra_step_kwargs["generator"] = generator
+ return extra_step_kwargs
+
+ # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs
+ def check_inputs(
+ self,
+ prompt,
+ height,
+ width,
+ negative_prompt,
+ callback_on_step_end_tensor_inputs,
+ prompt_embeds=None,
+ negative_prompt_embeds=None,
+ ):
+ if height % 8 != 0 or width % 8 != 0:
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
+
+ if callback_on_step_end_tensor_inputs is not None and not all(
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
+ ):
+ raise ValueError(
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
+ )
+ if prompt is not None and prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
+ " only forward one of the two."
+ )
+ elif prompt is None and prompt_embeds is None:
+ raise ValueError(
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
+ )
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
+
+ if prompt is not None and negative_prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
+ )
+
+ if negative_prompt is not None and negative_prompt_embeds is not None:
+ raise ValueError(
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
+ )
+
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
+ raise ValueError(
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
+ f" {negative_prompt_embeds.shape}."
+ )
+
+ def fuse_qkv_projections(self) -> None:
+ r"""Enables fused QKV projections."""
+ self.fusing_transformer = True
+ self.transformer.fuse_qkv_projections()
+
+ def unfuse_qkv_projections(self) -> None:
+ r"""Disable QKV projection fusion if enabled."""
+ if not self.fusing_transformer:
+ logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.")
+ else:
+ self.transformer.unfuse_qkv_projections()
+ self.fusing_transformer = False
+
+ def _prepare_rotary_positional_embeddings(
+ self,
+ height: int,
+ width: int,
+ num_frames: int,
+ device: torch.device,
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
+ grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
+ base_size_width = 720 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
+ base_size_height = 480 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size)
+
+ grid_crops_coords = get_resize_crop_region_for_grid(
+ (grid_height, grid_width), base_size_width, base_size_height
+ )
+ freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
+ embed_dim=self.transformer.config.attention_head_dim,
+ crops_coords=grid_crops_coords,
+ grid_size=(grid_height, grid_width),
+ temporal_size=num_frames,
+ use_real=True,
+ )
+
+ freqs_cos = freqs_cos.to(device=device)
+ freqs_sin = freqs_sin.to(device=device)
+ return freqs_cos, freqs_sin
+
+ @property
+ def guidance_scale(self):
+ return self._guidance_scale
+
+ @property
+ def num_timesteps(self):
+ return self._num_timesteps
+
+ @property
+ def interrupt(self):
+ return self._interrupt
+
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
+ def get_timesteps(self, num_inference_steps, strength, device):
+ # get the original timestep using init_timestep
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
+
+ t_start = max(num_inference_steps - init_timestep, 0)
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
+
+ return timesteps, num_inference_steps - t_start
+
+ @torch.no_grad()
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
+ def __call__(
+ self,
+ prompt: Optional[Union[str, List[str]]] = None,
+ negative_prompt: Optional[Union[str, List[str]]] = None,
+ height: int = 480,
+ width: int = 720,
+ video: Union[torch.FloatTensor] = None,
+ mask_video: Union[torch.FloatTensor] = None,
+ masked_video_latents: Union[torch.FloatTensor] = None,
+ num_frames: int = 49,
+ num_inference_steps: int = 50,
+ timesteps: Optional[List[int]] = None,
+ guidance_scale: float = 6,
+ use_dynamic_cfg: bool = False,
+ num_videos_per_prompt: int = 1,
+ eta: float = 0.0,
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
+ latents: Optional[torch.FloatTensor] = None,
+ prompt_embeds: Optional[torch.FloatTensor] = None,
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
+ output_type: str = "numpy",
+ return_dict: bool = False,
+ callback_on_step_end: Optional[
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
+ ] = None,
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
+ max_sequence_length: int = 226,
+ strength: float = 1,
+ comfyui_progressbar: bool = False,
+ ) -> Union[CogVideoX_Fun_PipelineOutput, Tuple]:
+ """
+ Function invoked when calling the pipeline for generation.
+
+ Args:
+ prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
+ instead.
+ negative_prompt (`str` or `List[str]`, *optional*):
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
+ less than `1`).
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
+ num_frames (`int`, defaults to `48`):
+ Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will
+ contain 1 extra frame because CogVideoX_Fun is conditioned with (num_seconds * fps + 1) frames where
+ num_seconds is 6 and fps is 4. However, since videos can be saved at any fps, the only condition that
+ needs to be satisfied is that of divisibility mentioned above.
+ num_inference_steps (`int`, *optional*, defaults to 50):
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
+ expense of slower inference.
+ timesteps (`List[int]`, *optional*):
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
+ passed will be used. Must be in descending order.
+ guidance_scale (`float`, *optional*, defaults to 7.0):
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
+ usually at the expense of lower image quality.
+ num_videos_per_prompt (`int`, *optional*, defaults to 1):
+ The number of videos to generate per prompt.
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
+ to make generation deterministic.
+ latents (`torch.FloatTensor`, *optional*):
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
+ tensor will ge generated by sampling using the supplied random `generator`.
+ prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
+ provided, text embeddings will be generated from `prompt` input argument.
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
+ argument.
+ output_type (`str`, *optional*, defaults to `"pil"`):
+ The output format of the generate image. Choose between
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
+ return_dict (`bool`, *optional*, defaults to `True`):
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
+ of a plain tuple.
+ callback_on_step_end (`Callable`, *optional*):
+ A function that calls at the end of each denoising steps during the inference. The function is called
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
+ `callback_on_step_end_tensor_inputs`.
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
+ `._callback_tensor_inputs` attribute of your pipeline class.
+ max_sequence_length (`int`, defaults to `226`):
+ Maximum sequence length in encoded prompt. Must be consistent with
+ `self.transformer.config.max_text_seq_length` otherwise may lead to poor results.
+
+ Examples:
+
+ Returns:
+ [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] or `tuple`:
+ [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] if `return_dict` is True, otherwise a
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
+ """
+
+ if num_frames > 49:
+ raise ValueError(
+ "The number of frames must be less than 49 for now due to static positional embeddings. This will be updated in the future to remove this limitation."
+ )
+
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
+
+ height = height or self.transformer.config.sample_size * self.vae_scale_factor_spatial
+ width = width or self.transformer.config.sample_size * self.vae_scale_factor_spatial
+ num_videos_per_prompt = 1
+
+ # 1. Check inputs. Raise error if not correct
+ self.check_inputs(
+ prompt,
+ height,
+ width,
+ negative_prompt,
+ callback_on_step_end_tensor_inputs,
+ prompt_embeds,
+ negative_prompt_embeds,
+ )
+ self._guidance_scale = guidance_scale
+ self._interrupt = False
+
+ # 2. Default call parameters
+ if prompt is not None and isinstance(prompt, str):
+ batch_size = 1
+ elif prompt is not None and isinstance(prompt, list):
+ batch_size = len(prompt)
+ else:
+ batch_size = prompt_embeds.shape[0]
+
+ device = self._execution_device
+
+ self.vae.to(device)
+
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
+ # corresponds to doing no classifier free guidance.
+ do_classifier_free_guidance = guidance_scale > 1.0
+
+ if do_classifier_free_guidance:
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
+
+ # 4. set timesteps
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
+ timesteps, num_inference_steps = self.get_timesteps(
+ num_inference_steps=num_inference_steps, strength=strength, device=device
+ )
+ self._num_timesteps = len(timesteps)
+ if comfyui_progressbar:
+ from comfy.utils import ProgressBar
+ pbar = ProgressBar(num_inference_steps + 2)
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
+ latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt)
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
+ is_strength_max = strength == 1.0
+
+ # 5. Prepare latents.
+ if video is not None:
+ video_length = video.shape[2]
+ init_video = self.image_processor.preprocess(rearrange(video, "b c f h w -> (b f) c h w"), height=height, width=width)
+ init_video = init_video.to(dtype=torch.float32)
+ init_video = rearrange(init_video, "(b f) c h w -> b c f h w", f=video_length)
+ else:
+ init_video = None
+
+ num_channels_latents = self.vae.config.latent_channels
+ num_channels_transformer = self.transformer.config.in_channels
+ return_image_latents = num_channels_transformer == num_channels_latents
+
+ latents_outputs = self.prepare_latents(
+ batch_size * num_videos_per_prompt,
+ num_channels_latents,
+ height,
+ width,
+ video_length,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ latents,
+ video=init_video,
+ timestep=latent_timestep,
+ is_strength_max=is_strength_max,
+ return_noise=True,
+ return_video_latents=return_image_latents,
+ )
+ if return_image_latents:
+ latents, noise, image_latents = latents_outputs
+ else:
+ latents, noise = latents_outputs
+ if comfyui_progressbar:
+ pbar.update(1)
+
+ if mask_video is not None:
+ if (mask_video == 255).all():
+ mask_latents = torch.zeros_like(latents)[:, :, :1].to(latents.device, latents.dtype)
+ masked_video_latents = torch.zeros_like(latents).to(latents.device, latents.dtype)
+
+ mask_input = torch.cat([mask_latents] * 2) if do_classifier_free_guidance else mask_latents
+ masked_video_latents_input = (
+ torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents
+ )
+ inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=2).to(latents.dtype)
+ else:
+ # Prepare mask latent variables
+ video_length = video.shape[2]
+ mask_condition = self.mask_processor.preprocess(rearrange(mask_video, "b c f h w -> (b f) c h w"), height=height, width=width)
+ mask_condition = mask_condition.to(dtype=torch.float32)
+ mask_condition = rearrange(mask_condition, "(b f) c h w -> b c f h w", f=video_length)
+
+ if num_channels_transformer != num_channels_latents:
+ mask_condition_tile = torch.tile(mask_condition, [1, 3, 1, 1, 1])
+ if masked_video_latents is None:
+ masked_video = init_video * (mask_condition_tile < 0.5) + torch.ones_like(init_video) * (mask_condition_tile > 0.5) * -1
+ else:
+ masked_video = masked_video_latents
+
+ _, masked_video_latents = self.prepare_mask_latents(
+ None,
+ masked_video,
+ batch_size,
+ height,
+ width,
+ prompt_embeds.dtype,
+ device,
+ generator,
+ do_classifier_free_guidance,
+ )
+ mask_latents = resize_mask(1 - mask_condition, masked_video_latents)
+ mask_latents = mask_latents.to(masked_video_latents.device) * self.vae.config.scaling_factor
+
+ mask = torch.tile(mask_condition, [1, num_channels_latents, 1, 1, 1])
+ mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype)
+
+ mask_input = torch.cat([mask_latents] * 2) if do_classifier_free_guidance else mask_latents
+ masked_video_latents_input = (
+ torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents
+ )
+
+ mask = rearrange(mask, "b c f h w -> b f c h w")
+ mask_input = rearrange(mask_input, "b c f h w -> b f c h w")
+ masked_video_latents_input = rearrange(masked_video_latents_input, "b c f h w -> b f c h w")
+
+ inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=2).to(latents.dtype)
+ else:
+ mask = torch.tile(mask_condition, [1, num_channels_latents, 1, 1, 1])
+ mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype)
+ mask = rearrange(mask, "b c f h w -> b f c h w")
+
+ inpaint_latents = None
+ else:
+ if num_channels_transformer != num_channels_latents:
+ mask = torch.zeros_like(latents).to(latents.device, latents.dtype)
+ masked_video_latents = torch.zeros_like(latents).to(latents.device, latents.dtype)
+
+ mask_input = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
+ masked_video_latents_input = (
+ torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents
+ )
+ inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=1).to(latents.dtype)
+ else:
+ mask = torch.zeros_like(init_video[:, :1])
+ mask = torch.tile(mask, [1, num_channels_latents, 1, 1, 1])
+ mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype)
+ mask = rearrange(mask, "b c f h w -> b f c h w")
+
+ inpaint_latents = None
+ if comfyui_progressbar:
+ pbar.update(1)
+
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
+
+ # 7. Create rotary embeds if required
+ image_rotary_emb = (
+ self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device)
+ if self.transformer.config.use_rotary_positional_embeddings
+ else None
+ )
+
+ # 8. Denoising loop
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
+
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
+ # for DPM-solver++
+ old_pred_original_sample = None
+ for i, t in enumerate(timesteps):
+ if self.interrupt:
+ continue
+
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
+ timestep = t.expand(latent_model_input.shape[0])
+
+ # predict noise model_output
+ noise_pred = self.transformer(
+ hidden_states=latent_model_input,
+ encoder_hidden_states=prompt_embeds,
+ timestep=timestep,
+ image_rotary_emb=image_rotary_emb,
+ return_dict=False,
+ inpaint_latents=inpaint_latents,
+ )[0]
+ noise_pred = noise_pred.float()
+
+ # perform guidance
+ if use_dynamic_cfg:
+ self._guidance_scale = 1 + guidance_scale * (
+ (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2
+ )
+ if do_classifier_free_guidance:
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
+
+ # compute the previous noisy sample x_t -> x_t-1
+ if not isinstance(self.scheduler, CogVideoXDPMScheduler):
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
+ else:
+ latents, old_pred_original_sample = self.scheduler.step(
+ noise_pred,
+ old_pred_original_sample,
+ t,
+ timesteps[i - 1] if i > 0 else None,
+ latents,
+ **extra_step_kwargs,
+ return_dict=False,
+ )
+ latents = latents.to(prompt_embeds.dtype)
+
+ # call the callback, if provided
+ if callback_on_step_end is not None:
+ callback_kwargs = {}
+ for k in callback_on_step_end_tensor_inputs:
+ callback_kwargs[k] = locals()[k]
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
+
+ latents = callback_outputs.pop("latents", latents)
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
+
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
+ progress_bar.update()
+ if comfyui_progressbar:
+ pbar.update(1)
+
+ # if output_type == "numpy":
+ # video = self.decode_latents(latents)
+ # elif not output_type == "latent":
+ # video = self.decode_latents(latents)
+ # video = self.video_processor.postprocess_video(video=video, output_type=output_type)
+ # else:
+ # video = latents
+
+ # Offload all models
+ self.maybe_free_model_hooks()
+
+ # if not return_dict:
+ # video = torch.from_numpy(video)
+
+ return latents
\ No newline at end of file
diff --git a/cogvideox_fun/transformer_3d.py b/cogvideox_fun/transformer_3d.py
new file mode 100644
index 0000000..b80af91
--- /dev/null
+++ b/cogvideox_fun/transformer_3d.py
@@ -0,0 +1,605 @@
+# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team.
+# All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Dict, Optional, Tuple, Union
+
+import os
+import json
+import torch
+import glob
+import torch.nn.functional as F
+from torch import nn
+
+from diffusers.configuration_utils import ConfigMixin, register_to_config
+from diffusers.utils import is_torch_version, logging
+from diffusers.utils.torch_utils import maybe_allow_in_graph
+from diffusers.models.attention import Attention, FeedForward
+from diffusers.models.attention_processor import AttentionProcessor, CogVideoXAttnProcessor2_0, FusedCogVideoXAttnProcessor2_0
+from diffusers.models.embeddings import TimestepEmbedding, Timesteps, get_3d_sincos_pos_embed
+from diffusers.models.modeling_outputs import Transformer2DModelOutput
+from diffusers.models.modeling_utils import ModelMixin
+from diffusers.models.normalization import AdaLayerNorm, CogVideoXLayerNormZero
+
+
+logger = logging.get_logger(__name__) # pylint: disable=invalid-name
+
+class CogVideoXPatchEmbed(nn.Module):
+ def __init__(
+ self,
+ patch_size: int = 2,
+ in_channels: int = 16,
+ embed_dim: int = 1920,
+ text_embed_dim: int = 4096,
+ bias: bool = True,
+ ) -> None:
+ super().__init__()
+ self.patch_size = patch_size
+
+ self.proj = nn.Conv2d(
+ in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias
+ )
+ self.text_proj = nn.Linear(text_embed_dim, embed_dim)
+
+ def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor):
+ r"""
+ Args:
+ text_embeds (`torch.Tensor`):
+ Input text embeddings. Expected shape: (batch_size, seq_length, embedding_dim).
+ image_embeds (`torch.Tensor`):
+ Input image embeddings. Expected shape: (batch_size, num_frames, channels, height, width).
+ """
+ text_embeds = self.text_proj(text_embeds)
+
+ batch, num_frames, channels, height, width = image_embeds.shape
+ image_embeds = image_embeds.reshape(-1, channels, height, width)
+ image_embeds = self.proj(image_embeds)
+ image_embeds = image_embeds.view(batch, num_frames, *image_embeds.shape[1:])
+ image_embeds = image_embeds.flatten(3).transpose(2, 3) # [batch, num_frames, height x width, channels]
+ image_embeds = image_embeds.flatten(1, 2) # [batch, num_frames x height x width, channels]
+
+ embeds = torch.cat(
+ [text_embeds, image_embeds], dim=1
+ ).contiguous() # [batch, seq_length + num_frames x height x width, channels]
+ return embeds
+
+@maybe_allow_in_graph
+class CogVideoXBlock(nn.Module):
+ r"""
+ Transformer block used in [CogVideoX](https://github.com/THUDM/CogVideo) model.
+
+ Parameters:
+ dim (`int`):
+ The number of channels in the input and output.
+ num_attention_heads (`int`):
+ The number of heads to use for multi-head attention.
+ attention_head_dim (`int`):
+ The number of channels in each head.
+ time_embed_dim (`int`):
+ The number of channels in timestep embedding.
+ dropout (`float`, defaults to `0.0`):
+ The dropout probability to use.
+ activation_fn (`str`, defaults to `"gelu-approximate"`):
+ Activation function to be used in feed-forward.
+ attention_bias (`bool`, defaults to `False`):
+ Whether or not to use bias in attention projection layers.
+ qk_norm (`bool`, defaults to `True`):
+ Whether or not to use normalization after query and key projections in Attention.
+ norm_elementwise_affine (`bool`, defaults to `True`):
+ Whether to use learnable elementwise affine parameters for normalization.
+ norm_eps (`float`, defaults to `1e-5`):
+ Epsilon value for normalization layers.
+ final_dropout (`bool` defaults to `False`):
+ Whether to apply a final dropout after the last feed-forward layer.
+ ff_inner_dim (`int`, *optional*, defaults to `None`):
+ Custom hidden dimension of Feed-forward layer. If not provided, `4 * dim` is used.
+ ff_bias (`bool`, defaults to `True`):
+ Whether or not to use bias in Feed-forward layer.
+ attention_out_bias (`bool`, defaults to `True`):
+ Whether or not to use bias in Attention output projection layer.
+ """
+
+ def __init__(
+ self,
+ dim: int,
+ num_attention_heads: int,
+ attention_head_dim: int,
+ time_embed_dim: int,
+ dropout: float = 0.0,
+ activation_fn: str = "gelu-approximate",
+ attention_bias: bool = False,
+ qk_norm: bool = True,
+ norm_elementwise_affine: bool = True,
+ norm_eps: float = 1e-5,
+ final_dropout: bool = True,
+ ff_inner_dim: Optional[int] = None,
+ ff_bias: bool = True,
+ attention_out_bias: bool = True,
+ ):
+ super().__init__()
+
+ # 1. Self Attention
+ self.norm1 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True)
+
+ self.attn1 = Attention(
+ query_dim=dim,
+ dim_head=attention_head_dim,
+ heads=num_attention_heads,
+ qk_norm="layer_norm" if qk_norm else None,
+ eps=1e-6,
+ bias=attention_bias,
+ out_bias=attention_out_bias,
+ processor=CogVideoXAttnProcessor2_0(),
+ )
+
+ # 2. Feed Forward
+ self.norm2 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True)
+
+ self.ff = FeedForward(
+ dim,
+ dropout=dropout,
+ activation_fn=activation_fn,
+ final_dropout=final_dropout,
+ inner_dim=ff_inner_dim,
+ bias=ff_bias,
+ )
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ encoder_hidden_states: torch.Tensor,
+ temb: torch.Tensor,
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
+ ) -> torch.Tensor:
+ text_seq_length = encoder_hidden_states.size(1)
+
+ # norm & modulate
+ norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1(
+ hidden_states, encoder_hidden_states, temb
+ )
+
+ # attention
+ attn_hidden_states, attn_encoder_hidden_states = self.attn1(
+ hidden_states=norm_hidden_states,
+ encoder_hidden_states=norm_encoder_hidden_states,
+ image_rotary_emb=image_rotary_emb,
+ )
+
+ hidden_states = hidden_states + gate_msa * attn_hidden_states
+ encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states
+
+ # norm & modulate
+ norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2(
+ hidden_states, encoder_hidden_states, temb
+ )
+
+ # feed-forward
+ norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1)
+ ff_output = self.ff(norm_hidden_states)
+
+ hidden_states = hidden_states + gate_ff * ff_output[:, text_seq_length:]
+ encoder_hidden_states = encoder_hidden_states + enc_gate_ff * ff_output[:, :text_seq_length]
+
+ return hidden_states, encoder_hidden_states
+
+
+class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin):
+ """
+ A Transformer model for video-like data in [CogVideoX](https://github.com/THUDM/CogVideo).
+
+ Parameters:
+ num_attention_heads (`int`, defaults to `30`):
+ The number of heads to use for multi-head attention.
+ attention_head_dim (`int`, defaults to `64`):
+ The number of channels in each head.
+ in_channels (`int`, defaults to `16`):
+ The number of channels in the input.
+ out_channels (`int`, *optional*, defaults to `16`):
+ The number of channels in the output.
+ flip_sin_to_cos (`bool`, defaults to `True`):
+ Whether to flip the sin to cos in the time embedding.
+ time_embed_dim (`int`, defaults to `512`):
+ Output dimension of timestep embeddings.
+ text_embed_dim (`int`, defaults to `4096`):
+ Input dimension of text embeddings from the text encoder.
+ num_layers (`int`, defaults to `30`):
+ The number of layers of Transformer blocks to use.
+ dropout (`float`, defaults to `0.0`):
+ The dropout probability to use.
+ attention_bias (`bool`, defaults to `True`):
+ Whether or not to use bias in the attention projection layers.
+ sample_width (`int`, defaults to `90`):
+ The width of the input latents.
+ sample_height (`int`, defaults to `60`):
+ The height of the input latents.
+ sample_frames (`int`, defaults to `49`):
+ The number of frames in the input latents. Note that this parameter was incorrectly initialized to 49
+ instead of 13 because CogVideoX processed 13 latent frames at once in its default and recommended settings,
+ but cannot be changed to the correct value to ensure backwards compatibility. To create a transformer with
+ K latent frames, the correct value to pass here would be: ((K - 1) * temporal_compression_ratio + 1).
+ patch_size (`int`, defaults to `2`):
+ The size of the patches to use in the patch embedding layer.
+ temporal_compression_ratio (`int`, defaults to `4`):
+ The compression ratio across the temporal dimension. See documentation for `sample_frames`.
+ max_text_seq_length (`int`, defaults to `226`):
+ The maximum sequence length of the input text embeddings.
+ activation_fn (`str`, defaults to `"gelu-approximate"`):
+ Activation function to use in feed-forward.
+ timestep_activation_fn (`str`, defaults to `"silu"`):
+ Activation function to use when generating the timestep embeddings.
+ norm_elementwise_affine (`bool`, defaults to `True`):
+ Whether or not to use elementwise affine in normalization layers.
+ norm_eps (`float`, defaults to `1e-5`):
+ The epsilon value to use in normalization layers.
+ spatial_interpolation_scale (`float`, defaults to `1.875`):
+ Scaling factor to apply in 3D positional embeddings across spatial dimensions.
+ temporal_interpolation_scale (`float`, defaults to `1.0`):
+ Scaling factor to apply in 3D positional embeddings across temporal dimensions.
+ """
+
+ _supports_gradient_checkpointing = True
+
+ @register_to_config
+ def __init__(
+ self,
+ num_attention_heads: int = 30,
+ attention_head_dim: int = 64,
+ in_channels: int = 16,
+ out_channels: Optional[int] = 16,
+ flip_sin_to_cos: bool = True,
+ freq_shift: int = 0,
+ time_embed_dim: int = 512,
+ text_embed_dim: int = 4096,
+ num_layers: int = 30,
+ dropout: float = 0.0,
+ attention_bias: bool = True,
+ sample_width: int = 90,
+ sample_height: int = 60,
+ sample_frames: int = 49,
+ patch_size: int = 2,
+ temporal_compression_ratio: int = 4,
+ max_text_seq_length: int = 226,
+ activation_fn: str = "gelu-approximate",
+ timestep_activation_fn: str = "silu",
+ norm_elementwise_affine: bool = True,
+ norm_eps: float = 1e-5,
+ spatial_interpolation_scale: float = 1.875,
+ temporal_interpolation_scale: float = 1.0,
+ use_rotary_positional_embeddings: bool = False,
+ ):
+ super().__init__()
+ inner_dim = num_attention_heads * attention_head_dim
+
+ post_patch_height = sample_height // patch_size
+ post_patch_width = sample_width // patch_size
+ post_time_compression_frames = (sample_frames - 1) // temporal_compression_ratio + 1
+ self.num_patches = post_patch_height * post_patch_width * post_time_compression_frames
+ self.post_patch_height = post_patch_height
+ self.post_patch_width = post_patch_width
+ self.post_time_compression_frames = post_time_compression_frames
+ self.patch_size = patch_size
+
+ # 1. Patch embedding
+ self.patch_embed = CogVideoXPatchEmbed(patch_size, in_channels, inner_dim, text_embed_dim, bias=True)
+ self.embedding_dropout = nn.Dropout(dropout)
+
+ # 2. 3D positional embeddings
+ spatial_pos_embedding = get_3d_sincos_pos_embed(
+ inner_dim,
+ (post_patch_width, post_patch_height),
+ post_time_compression_frames,
+ spatial_interpolation_scale,
+ temporal_interpolation_scale,
+ )
+ spatial_pos_embedding = torch.from_numpy(spatial_pos_embedding).flatten(0, 1)
+ pos_embedding = torch.zeros(1, max_text_seq_length + self.num_patches, inner_dim, requires_grad=False)
+ pos_embedding.data[:, max_text_seq_length:].copy_(spatial_pos_embedding)
+ self.register_buffer("pos_embedding", pos_embedding, persistent=False)
+
+ # 3. Time embeddings
+ self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift)
+ self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn)
+
+ # 4. Define spatio-temporal transformers blocks
+ self.transformer_blocks = nn.ModuleList(
+ [
+ CogVideoXBlock(
+ dim=inner_dim,
+ num_attention_heads=num_attention_heads,
+ attention_head_dim=attention_head_dim,
+ time_embed_dim=time_embed_dim,
+ dropout=dropout,
+ activation_fn=activation_fn,
+ attention_bias=attention_bias,
+ norm_elementwise_affine=norm_elementwise_affine,
+ norm_eps=norm_eps,
+ )
+ for _ in range(num_layers)
+ ]
+ )
+ self.norm_final = nn.LayerNorm(inner_dim, norm_eps, norm_elementwise_affine)
+
+ # 5. Output blocks
+ self.norm_out = AdaLayerNorm(
+ embedding_dim=time_embed_dim,
+ output_dim=2 * inner_dim,
+ norm_elementwise_affine=norm_elementwise_affine,
+ norm_eps=norm_eps,
+ chunk_dim=1,
+ )
+ self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels)
+
+ self.gradient_checkpointing = False
+
+ def _set_gradient_checkpointing(self, module, value=False):
+ self.gradient_checkpointing = value
+
+ @property
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
+ r"""
+ Returns:
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
+ indexed by its weight name.
+ """
+ # set recursively
+ processors = {}
+
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
+ if hasattr(module, "get_processor"):
+ processors[f"{name}.processor"] = module.get_processor()
+
+ for sub_name, child in module.named_children():
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
+
+ return processors
+
+ for name, module in self.named_children():
+ fn_recursive_add_processors(name, module, processors)
+
+ return processors
+
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
+ r"""
+ Sets the attention processor to use to compute attention.
+
+ Parameters:
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
+ for **all** `Attention` layers.
+
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
+ processor. This is strongly recommended when setting trainable attention processors.
+
+ """
+ count = len(self.attn_processors.keys())
+
+ if isinstance(processor, dict) and len(processor) != count:
+ raise ValueError(
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
+ )
+
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
+ if hasattr(module, "set_processor"):
+ if not isinstance(processor, dict):
+ module.set_processor(processor)
+ else:
+ module.set_processor(processor.pop(f"{name}.processor"))
+
+ for sub_name, child in module.named_children():
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
+
+ for name, module in self.named_children():
+ fn_recursive_attn_processor(name, module, processor)
+
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedCogVideoXAttnProcessor2_0
+ def fuse_qkv_projections(self):
+ """
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
+ are fused. For cross-attention modules, key and value projection matrices are fused.
+
+
+
+ This API is 🧪 experimental.
+
+
+ """
+ self.original_attn_processors = None
+
+ for _, attn_processor in self.attn_processors.items():
+ if "Added" in str(attn_processor.__class__.__name__):
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
+
+ self.original_attn_processors = self.attn_processors
+
+ for module in self.modules():
+ if isinstance(module, Attention):
+ module.fuse_projections(fuse=True)
+
+ self.set_attn_processor(FusedCogVideoXAttnProcessor2_0())
+
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
+ def unfuse_qkv_projections(self):
+ """Disables the fused QKV projection if enabled.
+
+
+
+ This API is 🧪 experimental.
+
+
+
+ """
+ if self.original_attn_processors is not None:
+ self.set_attn_processor(self.original_attn_processors)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ encoder_hidden_states: torch.Tensor,
+ timestep: Union[int, float, torch.LongTensor],
+ timestep_cond: Optional[torch.Tensor] = None,
+ inpaint_latents: Optional[torch.Tensor] = None,
+ image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
+ return_dict: bool = True,
+ ):
+ batch_size, num_frames, channels, height, width = hidden_states.shape
+
+ # 1. Time embedding
+ timesteps = timestep
+ t_emb = self.time_proj(timesteps)
+
+ # timesteps does not contain any weights and will always return f32 tensors
+ # but time_embedding might actually be running in fp16. so we need to cast here.
+ # there might be better ways to encapsulate this.
+ t_emb = t_emb.to(dtype=hidden_states.dtype)
+ emb = self.time_embedding(t_emb, timestep_cond)
+
+ # 2. Patch embedding
+ if inpaint_latents is not None:
+ hidden_states = torch.concat([hidden_states, inpaint_latents], 2)
+ hidden_states = self.patch_embed(encoder_hidden_states, hidden_states)
+
+ # 3. Position embedding
+ text_seq_length = encoder_hidden_states.shape[1]
+ if not self.config.use_rotary_positional_embeddings:
+ seq_length = height * width * num_frames // (self.config.patch_size**2)
+ # pos_embeds = self.pos_embedding[:, : text_seq_length + seq_length]
+ pos_embeds = self.pos_embedding
+ emb_size = hidden_states.size()[-1]
+ pos_embeds_without_text = pos_embeds[:, text_seq_length: ].view(1, self.post_time_compression_frames, self.post_patch_height, self.post_patch_width, emb_size)
+ pos_embeds_without_text = pos_embeds_without_text.permute([0, 4, 1, 2, 3])
+ pos_embeds_without_text = F.interpolate(pos_embeds_without_text,size=[self.post_time_compression_frames, height // self.config.patch_size, width // self.config.patch_size],mode='trilinear',align_corners=False)
+ pos_embeds_without_text = pos_embeds_without_text.permute([0, 2, 3, 4, 1]).view(1, -1, emb_size)
+ pos_embeds = torch.cat([pos_embeds[:, :text_seq_length], pos_embeds_without_text], dim = 1)
+ pos_embeds = pos_embeds[:, : text_seq_length + seq_length]
+ hidden_states = hidden_states + pos_embeds
+ hidden_states = self.embedding_dropout(hidden_states)
+
+ encoder_hidden_states = hidden_states[:, :text_seq_length]
+ hidden_states = hidden_states[:, text_seq_length:]
+
+ # 4. Transformer blocks
+ for i, block in enumerate(self.transformer_blocks):
+ if self.training and self.gradient_checkpointing:
+
+ def create_custom_forward(module):
+ def custom_forward(*inputs):
+ return module(*inputs)
+
+ return custom_forward
+
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
+ hidden_states, encoder_hidden_states = torch.utils.checkpoint.checkpoint(
+ create_custom_forward(block),
+ hidden_states,
+ encoder_hidden_states,
+ emb,
+ image_rotary_emb,
+ **ckpt_kwargs,
+ )
+ else:
+ hidden_states, encoder_hidden_states = block(
+ hidden_states=hidden_states,
+ encoder_hidden_states=encoder_hidden_states,
+ temb=emb,
+ image_rotary_emb=image_rotary_emb,
+ )
+
+ if not self.config.use_rotary_positional_embeddings:
+ # CogVideoX-2B
+ hidden_states = self.norm_final(hidden_states)
+ else:
+ # CogVideoX-5B
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
+ hidden_states = self.norm_final(hidden_states)
+ hidden_states = hidden_states[:, text_seq_length:]
+
+ # 5. Final block
+ hidden_states = self.norm_out(hidden_states, temb=emb)
+ hidden_states = self.proj_out(hidden_states)
+
+ # 6. Unpatchify
+ p = self.config.patch_size
+ output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, channels, p, p)
+ output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4)
+
+ if not return_dict:
+ return (output,)
+ return Transformer2DModelOutput(sample=output)
+
+ @classmethod
+ def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, transformer_additional_kwargs={}):
+ if subfolder is not None:
+ pretrained_model_path = os.path.join(pretrained_model_path, subfolder)
+ print(f"loaded 3D transformer's pretrained weights from {pretrained_model_path} ...")
+
+ config_file = os.path.join(pretrained_model_path, 'config.json')
+ if not os.path.isfile(config_file):
+ raise RuntimeError(f"{config_file} does not exist")
+ with open(config_file, "r") as f:
+ config = json.load(f)
+
+ from diffusers.utils import WEIGHTS_NAME
+ model = cls.from_config(config, **transformer_additional_kwargs)
+ model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)
+ model_file_safetensors = model_file.replace(".bin", ".safetensors")
+ if os.path.exists(model_file):
+ state_dict = torch.load(model_file, map_location="cpu")
+ elif os.path.exists(model_file_safetensors):
+ from safetensors.torch import load_file, safe_open
+ state_dict = load_file(model_file_safetensors)
+ else:
+ from safetensors.torch import load_file, safe_open
+ model_files_safetensors = glob.glob(os.path.join(pretrained_model_path, "*.safetensors"))
+ state_dict = {}
+ for model_file_safetensors in model_files_safetensors:
+ _state_dict = load_file(model_file_safetensors)
+ for key in _state_dict:
+ state_dict[key] = _state_dict[key]
+
+ if model.state_dict()['patch_embed.proj.weight'].size() != state_dict['patch_embed.proj.weight'].size():
+ new_shape = model.state_dict()['patch_embed.proj.weight'].size()
+ if len(new_shape) == 5:
+ state_dict['patch_embed.proj.weight'] = state_dict['patch_embed.proj.weight'].unsqueeze(2).expand(new_shape).clone()
+ state_dict['patch_embed.proj.weight'][:, :, :-1] = 0
+ else:
+ if model.state_dict()['patch_embed.proj.weight'].size()[1] > state_dict['patch_embed.proj.weight'].size()[1]:
+ model.state_dict()['patch_embed.proj.weight'][:, :state_dict['patch_embed.proj.weight'].size()[1], :, :] = state_dict['patch_embed.proj.weight']
+ model.state_dict()['patch_embed.proj.weight'][:, state_dict['patch_embed.proj.weight'].size()[1]:, :, :] = 0
+ state_dict['patch_embed.proj.weight'] = model.state_dict()['patch_embed.proj.weight']
+ else:
+ model.state_dict()['patch_embed.proj.weight'][:, :, :, :] = state_dict['patch_embed.proj.weight'][:, :model.state_dict()['patch_embed.proj.weight'].size()[1], :, :]
+ state_dict['patch_embed.proj.weight'] = model.state_dict()['patch_embed.proj.weight']
+
+ tmp_state_dict = {}
+ for key in state_dict:
+ if key in model.state_dict().keys() and model.state_dict()[key].size() == state_dict[key].size():
+ tmp_state_dict[key] = state_dict[key]
+ else:
+ print(key, "Size don't match, skip")
+ state_dict = tmp_state_dict
+
+ m, u = model.load_state_dict(state_dict, strict=False)
+ print(f"### missing keys: {len(m)}; \n### unexpected keys: {len(u)};")
+ print(m)
+
+ params = [p.numel() if "mamba" in n else 0 for n, p in model.named_parameters()]
+ print(f"### Mamba Parameters: {sum(params) / 1e6} M")
+
+ params = [p.numel() if "attn1." in n else 0 for n, p in model.named_parameters()]
+ print(f"### attn1 Parameters: {sum(params) / 1e6} M")
+
+ return model
\ No newline at end of file
diff --git a/cogvideox_fun/utils.py b/cogvideox_fun/utils.py
new file mode 100644
index 0000000..e9c5cc7
--- /dev/null
+++ b/cogvideox_fun/utils.py
@@ -0,0 +1,246 @@
+import os
+import gc
+import imageio
+import numpy as np
+import torch
+import torchvision
+import cv2
+from einops import rearrange
+from PIL import Image
+
+# Copyright (c) OpenMMLab. All rights reserved.
+import os
+import cv2
+import numpy as np
+import torch
+from PIL import Image
+
+
+def tensor2pil(image):
+ return Image.fromarray(np.clip(255. * image.cpu().numpy(), 0, 255).astype(np.uint8))
+
+def numpy2pil(image):
+ return Image.fromarray(np.clip(255. * image, 0, 255).astype(np.uint8))
+
+def to_pil(image):
+ if isinstance(image, Image.Image):
+ return image
+ if isinstance(image, torch.Tensor):
+ return tensor2pil(image)
+ if isinstance(image, np.ndarray):
+ return numpy2pil(image)
+ raise ValueError(f"Cannot convert {type(image)} to PIL.Image")
+
+ASPECT_RATIO_512 = {
+ '0.25': [256.0, 1024.0], '0.26': [256.0, 992.0], '0.27': [256.0, 960.0], '0.28': [256.0, 928.0],
+ '0.32': [288.0, 896.0], '0.33': [288.0, 864.0], '0.35': [288.0, 832.0], '0.4': [320.0, 800.0],
+ '0.42': [320.0, 768.0], '0.48': [352.0, 736.0], '0.5': [352.0, 704.0], '0.52': [352.0, 672.0],
+ '0.57': [384.0, 672.0], '0.6': [384.0, 640.0], '0.68': [416.0, 608.0], '0.72': [416.0, 576.0],
+ '0.78': [448.0, 576.0], '0.82': [448.0, 544.0], '0.88': [480.0, 544.0], '0.94': [480.0, 512.0],
+ '1.0': [512.0, 512.0], '1.07': [512.0, 480.0], '1.13': [544.0, 480.0], '1.21': [544.0, 448.0],
+ '1.29': [576.0, 448.0], '1.38': [576.0, 416.0], '1.46': [608.0, 416.0], '1.67': [640.0, 384.0],
+ '1.75': [672.0, 384.0], '2.0': [704.0, 352.0], '2.09': [736.0, 352.0], '2.4': [768.0, 320.0],
+ '2.5': [800.0, 320.0], '2.89': [832.0, 288.0], '3.0': [864.0, 288.0], '3.11': [896.0, 288.0],
+ '3.62': [928.0, 256.0], '3.75': [960.0, 256.0], '3.88': [992.0, 256.0], '4.0': [1024.0, 256.0]
+}
+ASPECT_RATIO_RANDOM_CROP_512 = {
+ '0.42': [320.0, 768.0], '0.5': [352.0, 704.0],
+ '0.57': [384.0, 672.0], '0.68': [416.0, 608.0], '0.78': [448.0, 576.0], '0.88': [480.0, 544.0],
+ '0.94': [480.0, 512.0], '1.0': [512.0, 512.0], '1.07': [512.0, 480.0],
+ '1.13': [544.0, 480.0], '1.29': [576.0, 448.0], '1.46': [608.0, 416.0], '1.75': [672.0, 384.0],
+ '2.0': [704.0, 352.0], '2.4': [768.0, 320.0]
+}
+ASPECT_RATIO_RANDOM_CROP_PROB = [
+ 1, 2,
+ 4, 4, 4, 4,
+ 8, 8, 8,
+ 4, 4, 4, 4,
+ 2, 1
+]
+ASPECT_RATIO_RANDOM_CROP_PROB = np.array(ASPECT_RATIO_RANDOM_CROP_PROB) / sum(ASPECT_RATIO_RANDOM_CROP_PROB)
+
+def get_closest_ratio(height: float, width: float, ratios: dict = ASPECT_RATIO_512):
+ aspect_ratio = height / width
+ closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - aspect_ratio))
+ return ratios[closest_ratio], float(closest_ratio)
+
+
+def get_width_and_height_from_image_and_base_resolution(image, base_resolution):
+ target_pixels = int(base_resolution) * int(base_resolution)
+ original_width, original_height = Image.open(image).size
+ ratio = (target_pixels / (original_width * original_height)) ** 0.5
+ width_slider = round(original_width * ratio)
+ height_slider = round(original_height * ratio)
+ return height_slider, width_slider
+
+def color_transfer(sc, dc):
+ """
+ Transfer color distribution from of sc, referred to dc.
+
+ Args:
+ sc (numpy.ndarray): input image to be transfered.
+ dc (numpy.ndarray): reference image
+
+ Returns:
+ numpy.ndarray: Transferred color distribution on the sc.
+ """
+
+ def get_mean_and_std(img):
+ x_mean, x_std = cv2.meanStdDev(img)
+ x_mean = np.hstack(np.around(x_mean, 2))
+ x_std = np.hstack(np.around(x_std, 2))
+ return x_mean, x_std
+
+ sc = cv2.cvtColor(sc, cv2.COLOR_RGB2LAB)
+ s_mean, s_std = get_mean_and_std(sc)
+ dc = cv2.cvtColor(dc, cv2.COLOR_RGB2LAB)
+ t_mean, t_std = get_mean_and_std(dc)
+ img_n = ((sc - s_mean) * (t_std / s_std)) + t_mean
+ np.putmask(img_n, img_n > 255, 255)
+ np.putmask(img_n, img_n < 0, 0)
+ dst = cv2.cvtColor(cv2.convertScaleAbs(img_n), cv2.COLOR_LAB2RGB)
+ return dst
+
+def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=12, imageio_backend=True, color_transfer_post_process=False):
+ videos = rearrange(videos, "b c t h w -> t b c h w")
+ outputs = []
+ for x in videos:
+ x = torchvision.utils.make_grid(x, nrow=n_rows)
+ x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)
+ if rescale:
+ x = (x + 1.0) / 2.0 # -1,1 -> 0,1
+ x = (x * 255).numpy().astype(np.uint8)
+ outputs.append(Image.fromarray(x))
+
+ if color_transfer_post_process:
+ for i in range(1, len(outputs)):
+ outputs[i] = Image.fromarray(color_transfer(np.uint8(outputs[i]), np.uint8(outputs[0])))
+
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+ if imageio_backend:
+ if path.endswith("mp4"):
+ imageio.mimsave(path, outputs, fps=fps)
+ else:
+ imageio.mimsave(path, outputs, duration=(1000 * 1/fps))
+ else:
+ if path.endswith("mp4"):
+ path = path.replace('.mp4', '.gif')
+ outputs[0].save(path, format='GIF', append_images=outputs, save_all=True, duration=100, loop=0)
+
+def get_image_to_video_latent(validation_image_start, validation_image_end, video_length, sample_size):
+ if validation_image_start is not None and validation_image_end is not None:
+ if type(validation_image_start) is str and os.path.isfile(validation_image_start):
+ image_start = clip_image = Image.open(validation_image_start).convert("RGB")
+ image_start = image_start.resize([sample_size[1], sample_size[0]])
+ clip_image = clip_image.resize([sample_size[1], sample_size[0]])
+ else:
+ image_start = clip_image = validation_image_start
+ image_start = [_image_start.resize([sample_size[1], sample_size[0]]) for _image_start in image_start]
+ clip_image = [_clip_image.resize([sample_size[1], sample_size[0]]) for _clip_image in clip_image]
+
+ if type(validation_image_end) is str and os.path.isfile(validation_image_end):
+ image_end = Image.open(validation_image_end).convert("RGB")
+ image_end = image_end.resize([sample_size[1], sample_size[0]])
+ else:
+ image_end = validation_image_end
+ image_end = [_image_end.resize([sample_size[1], sample_size[0]]) for _image_end in image_end]
+
+ if type(image_start) is list:
+ clip_image = clip_image[0]
+ start_video = torch.cat(
+ [torch.from_numpy(np.array(_image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0) for _image_start in image_start],
+ dim=2
+ )
+ input_video = torch.tile(start_video[:, :, :1], [1, 1, video_length, 1, 1])
+ input_video[:, :, :len(image_start)] = start_video
+
+ input_video_mask = torch.zeros_like(input_video[:, :1])
+ input_video_mask[:, :, len(image_start):] = 255
+ else:
+ input_video = torch.tile(
+ torch.from_numpy(np.array(image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0),
+ [1, 1, video_length, 1, 1]
+ )
+ input_video_mask = torch.zeros_like(input_video[:, :1])
+ input_video_mask[:, :, 1:] = 255
+
+ if type(image_end) is list:
+ image_end = [_image_end.resize(image_start[0].size if type(image_start) is list else image_start.size) for _image_end in image_end]
+ end_video = torch.cat(
+ [torch.from_numpy(np.array(_image_end)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0) for _image_end in image_end],
+ dim=2
+ )
+ input_video[:, :, -len(end_video):] = end_video
+
+ input_video_mask[:, :, -len(image_end):] = 0
+ else:
+ image_end = image_end.resize(image_start[0].size if type(image_start) is list else image_start.size)
+ input_video[:, :, -1:] = torch.from_numpy(np.array(image_end)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0)
+ input_video_mask[:, :, -1:] = 0
+
+ input_video = input_video / 255
+
+ elif validation_image_start is not None:
+ if type(validation_image_start) is str and os.path.isfile(validation_image_start):
+ image_start = clip_image = Image.open(validation_image_start).convert("RGB")
+ image_start = image_start.resize([sample_size[1], sample_size[0]])
+ clip_image = clip_image.resize([sample_size[1], sample_size[0]])
+ else:
+ image_start = clip_image = validation_image_start
+ image_start = [_image_start.resize([sample_size[1], sample_size[0]]) for _image_start in image_start]
+ clip_image = [_clip_image.resize([sample_size[1], sample_size[0]]) for _clip_image in clip_image]
+ image_end = None
+
+ if type(image_start) is list:
+ clip_image = clip_image[0]
+ start_video = torch.cat(
+ [torch.from_numpy(np.array(_image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0) for _image_start in image_start],
+ dim=2
+ )
+ input_video = torch.tile(start_video[:, :, :1], [1, 1, video_length, 1, 1])
+ input_video[:, :, :len(image_start)] = start_video
+ input_video = input_video / 255
+
+ input_video_mask = torch.zeros_like(input_video[:, :1])
+ input_video_mask[:, :, len(image_start):] = 255
+ else:
+ input_video = torch.tile(
+ torch.from_numpy(np.array(image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0),
+ [1, 1, video_length, 1, 1]
+ ) / 255
+ input_video_mask = torch.zeros_like(input_video[:, :1])
+ input_video_mask[:, :, 1:, ] = 255
+ else:
+ image_start = None
+ image_end = None
+ input_video = torch.zeros([1, 3, video_length, sample_size[0], sample_size[1]])
+ input_video_mask = torch.ones([1, 1, video_length, sample_size[0], sample_size[1]]) * 255
+ clip_image = None
+
+ del image_start
+ del image_end
+ gc.collect()
+
+ return input_video, input_video_mask, clip_image
+
+def get_video_to_video_latent(input_video_path, video_length, sample_size):
+ if type(input_video_path) is str:
+ cap = cv2.VideoCapture(input_video_path)
+ input_video = []
+ while True:
+ ret, frame = cap.read()
+ if not ret:
+ break
+ frame = cv2.resize(frame, (sample_size[1], sample_size[0]))
+ input_video.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
+ cap.release()
+ else:
+ input_video = input_video_path
+
+ input_video = torch.from_numpy(np.array(input_video))[:video_length]
+ input_video = input_video.permute([3, 0, 1, 2]).unsqueeze(0) / 255
+
+ input_video_mask = torch.zeros_like(input_video[:, :1])
+ input_video_mask[:, :, :] = 255
+
+ return input_video, input_video_mask, None
\ No newline at end of file
diff --git a/nodes.py b/nodes.py
index 0c6ff19..849eb8b 100644
--- a/nodes.py
+++ b/nodes.py
@@ -3,11 +3,17 @@ import torch
import folder_paths
import comfy.model_management as mm
from comfy.utils import ProgressBar
-from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler
+from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler, DDIMScheduler, PNDMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler
+
from diffusers.models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel
from .pipeline_cogvideox import CogVideoXPipeline
from contextlib import nullcontext
+from .cogvideox_fun.transformer_3d import CogVideoXTransformer3DModel as CogVideoXTransformer3DModelFun
+from .cogvideox_fun.autoencoder_magvit import AutoencoderKLCogVideoX as AutoencoderKLCogVideoXFun
+from .cogvideox_fun.utils import get_image_to_video_latent, ASPECT_RATIO_512, get_closest_ratio, to_pil
+from .cogvideox_fun.pipeline_cogvideox_inpaint import CogVideoX_Fun_Pipeline_Inpaint
+from PIL import Image
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
@@ -24,6 +30,7 @@ class DownloadAndLoadCogVideoModel:
"THUDM/CogVideoX-2b",
"THUDM/CogVideoX-5b",
"bertjiazheng/KoolCogVideoX-5b",
+ "kijai/CogVideoX-Fun-pruned"
],
),
@@ -50,10 +57,16 @@ class DownloadAndLoadCogVideoModel:
dtype = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[precision]
- if "2b" in model:
+ if "Fun" in model:
+ base_path = os.path.join(folder_paths.models_dir, "CogVideoX_Fun", "CogVideoX-Fun-5b-InP")
+ if not os.path.exists(base_path):
+ base_path = os.path.join(folder_paths.models_dir, "CogVideo", "CogVideoX-Fun-5b-InP")
+
+ elif "2b" in model:
base_path = os.path.join(folder_paths.models_dir, "CogVideo", "CogVideo2B")
elif "5b" in model:
base_path = os.path.join(folder_paths.models_dir, "CogVideo", (model.split("/")[-1]))
+
if not os.path.exists(base_path):
log.info(f"Downloading model to: {base_path}")
@@ -65,25 +78,36 @@ class DownloadAndLoadCogVideoModel:
local_dir=base_path,
local_dir_use_symlinks=False,
)
+
+ if "Fun" in model:
+ transformer = CogVideoXTransformer3DModelFun.from_pretrained(base_path, subfolder="transformer")
+ else:
+ transformer = CogVideoXTransformer3DModel.from_pretrained(base_path, subfolder="transformer")
+
+ transformer = transformer.to(dtype).to(offload_device)
+
if fp8_transformer == "enabled" or fp8_transformer == "fastmode":
- transformer = CogVideoXTransformer3DModel.from_pretrained(base_path, subfolder="transformer").to(offload_device)
if "2b" in model:
for name, param in transformer.named_parameters():
if name != "pos_embedding":
param.data = param.data.to(torch.float8_e4m3fn)
else:
transformer.to(torch.float8_e4m3fn)
-
+
if fp8_transformer == "fastmode":
from .fp8_optimization import convert_fp8_linear
convert_fp8_linear(transformer, dtype)
- else:
- transformer = CogVideoXTransformer3DModel.from_pretrained(base_path, subfolder="transformer").to(dtype).to(offload_device)
- vae = AutoencoderKLCogVideoX.from_pretrained(base_path, subfolder="vae").to(dtype).to(offload_device)
+ if "Fun" in model:
+ vae = AutoencoderKLCogVideoXFun.from_pretrained(base_path, subfolder="vae").to(dtype).to(offload_device)
+ else:
+ vae = AutoencoderKLCogVideoX.from_pretrained(base_path, subfolder="vae").to(dtype).to(offload_device)
scheduler = CogVideoXDDIMScheduler.from_pretrained(base_path, subfolder="scheduler")
- pipe = CogVideoXPipeline(vae, transformer, scheduler)
+ if "Fun" in model:
+ pipe = CogVideoX_Fun_Pipeline_Inpaint(vae, transformer, scheduler)
+ else:
+ pipe = CogVideoXPipeline(vae, transformer, scheduler)
if enable_sequential_cpu_offload:
pipe.enable_sequential_cpu_offload()
@@ -92,7 +116,7 @@ class DownloadAndLoadCogVideoModel:
pipe.transformer.to(memory_format=torch.channels_last)
pipe.transformer = torch.compile(pipe.transformer, mode="max-autotune", fullgraph=True)
elif compile == "onediff":
- from onediffx import compile_pipe, quantize_pipe
+ from onediffx import compile_pipe
os.environ['NEXFORT_FX_FORCE_TRITON_SDPA'] = '1'
pipe = compile_pipe(
@@ -280,6 +304,7 @@ class CogVideoSampler:
"optional": {
"samples": ("LATENT", ),
"denoise_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
+ "image_cond_latents": ("LATENT", ),
}
}
@@ -288,7 +313,8 @@ class CogVideoSampler:
FUNCTION = "process"
CATEGORY = "CogVideoWrapper"
- def process(self, pipeline, positive, negative, steps, cfg, seed, height, width, num_frames, scheduler, t_tile_length, t_tile_overlap, samples=None, denoise_strength=1.0):
+ def process(self, pipeline, positive, negative, steps, cfg, seed, height, width, num_frames, scheduler, t_tile_length, t_tile_overlap, samples=None,
+ denoise_strength=1.0, image_cond_latents=None):
mm.soft_empty_cache()
assert t_tile_length > t_tile_overlap, "t_tile_length must be greater than t_tile_overlap"
@@ -328,6 +354,7 @@ class CogVideoSampler:
t_tile_overlap = t_tile_overlap,
guidance_scale=cfg,
latents=samples["samples"] if samples is not None else None,
+ image_cond_latents=image_cond_latents["samples"] if image_cond_latents is not None else None,
denoise_strength=denoise_strength,
prompt_embeds=positive.to(dtype).to(device),
negative_prompt_embeds=negative.to(dtype).to(device),
@@ -387,7 +414,7 @@ class CogVideoDecode:
latents = latents.to(vae.dtype)
latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width]
latents = 1 / vae.config.scaling_factor * latents
-
+
frames = vae.decode(latents).sample
if not pipeline["cpu_offloading"]:
vae.to(offload_device)
@@ -399,18 +426,127 @@ class CogVideoDecode:
return (video,)
+class CogVideoXFunSampler:
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ "pipeline": ("COGVIDEOPIPE",),
+ "positive": ("CONDITIONING", ),
+ "negative": ("CONDITIONING", ),
+ "video_length": ("INT", {"default": 49, "min": 5, "max": 49, "step": 4}),
+ "base_resolution": (
+ [
+ 512,
+ 768,
+ 960,
+ 1024,
+ ], {"default": 768}
+ ),
+ "seed": ("INT", {"default": 43, "min": 0, "max": 0xffffffffffffffff}),
+ "steps": ("INT", {"default": 50, "min": 1, "max": 200, "step": 1}),
+ "cfg": ("FLOAT", {"default": 6.0, "min": 1.0, "max": 20.0, "step": 0.01}),
+ "scheduler": (
+ [
+ "Euler",
+ "Euler A",
+ "DPM++",
+ "PNDM",
+ "DDIM",
+ ],
+ {
+ "default": 'DDIM'
+ }
+ )
+ },
+ "optional":{
+ "start_img": ("IMAGE",),
+ "end_img": ("IMAGE",),
+ },
+ }
+
+ RETURN_TYPES = ("COGVIDEOPIPE", "LATENT",)
+ RETURN_NAMES = ("cogvideo_pipe", "samples",)
+ FUNCTION = "process"
+ CATEGORY = "CogVideoWrapper"
+
+ def process(self, pipeline, positive, negative, video_length, base_resolution, seed, steps, cfg, scheduler, start_img=None, end_img=None):
+ device = mm.get_torch_device()
+ offload_device = mm.unet_offload_device()
+ pipe = pipeline["pipe"]
+ dtype = pipeline["dtype"]
+
+ pipe.enable_model_cpu_offload()
+
+ mm.soft_empty_cache()
+
+ start_img = [to_pil(_start_img) for _start_img in start_img] if start_img is not None else None
+ end_img = [to_pil(_end_img) for _end_img in end_img] if end_img is not None else None
+ # Count most suitable height and width
+ aspect_ratio_sample_size = {key : [x / 512 * base_resolution for x in ASPECT_RATIO_512[key]] for key in ASPECT_RATIO_512.keys()}
+ original_width, original_height = start_img[0].size if type(start_img) is list else Image.open(start_img).size
+ closest_size, closest_ratio = get_closest_ratio(original_height, original_width, ratios=aspect_ratio_sample_size)
+ height, width = [int(x / 16) * 16 for x in closest_size]
+
+ base_path = pipeline["base_path"]
+
+ # Load Sampler
+ if scheduler == "DPM++":
+ noise_scheduler = DPMSolverMultistepScheduler.from_pretrained(base_path, subfolder= 'scheduler')
+ elif scheduler == "Euler":
+ noise_scheduler = EulerDiscreteScheduler.from_pretrained(base_path, subfolder= 'scheduler')
+ elif scheduler == "Euler A":
+ noise_scheduler = EulerAncestralDiscreteScheduler.from_pretrained(base_path, subfolder= 'scheduler')
+ elif scheduler == "PNDM":
+ noise_scheduler = PNDMScheduler.from_pretrained(base_path, subfolder= 'scheduler')
+ elif scheduler == "DDIM":
+ noise_scheduler = DDIMScheduler.from_pretrained(base_path, subfolder= 'scheduler')
+ pipe.scheduler = noise_scheduler
+
+ #if not pipeline["cpu_offloading"]:
+ # pipe.transformer.to(device)
+ generator= torch.Generator(device=device).manual_seed(seed)
+
+ autocastcondition = not pipeline["onediff"]
+ autocast_context = torch.autocast(mm.get_autocast_device(device)) if autocastcondition else nullcontext()
+ with autocast_context:
+ video_length = int((video_length - 1) // pipe.vae.config.temporal_compression_ratio * pipe.vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1
+ input_video, input_video_mask, clip_image = get_image_to_video_latent(start_img, end_img, video_length=video_length, sample_size=(height, width))
+
+ latents = pipe(
+ prompt_embeds=positive.to(dtype).to(device),
+ negative_prompt_embeds=negative.to(dtype).to(device),
+ num_frames = video_length,
+ height = height,
+ width = width,
+ generator = generator,
+ guidance_scale = cfg,
+ num_inference_steps = steps,
+
+ video = input_video,
+ mask_video = input_video_mask,
+ comfyui_progressbar = True,
+ )
+ #if not pipeline["cpu_offloading"]:
+ # pipe.transformer.to(offload_device)
+ mm.soft_empty_cache()
+ print(latents.shape)
+
+ return (pipeline, {"samples": latents})
NODE_CLASS_MAPPINGS = {
"DownloadAndLoadCogVideoModel": DownloadAndLoadCogVideoModel,
"CogVideoSampler": CogVideoSampler,
"CogVideoDecode": CogVideoDecode,
"CogVideoTextEncode": CogVideoTextEncode,
- "CogVideoImageEncode": CogVideoImageEncode
+ "CogVideoImageEncode": CogVideoImageEncode,
+ "CogVideoXFunSampler": CogVideoXFunSampler
}
NODE_DISPLAY_NAME_MAPPINGS = {
"DownloadAndLoadCogVideoModel": "(Down)load CogVideo Model",
"CogVideoSampler": "CogVideo Sampler",
"CogVideoDecode": "CogVideo Decode",
"CogVideoTextEncode": "CogVideo TextEncode",
- "CogVideoImageEncode": "CogVideo ImageEncode"
+ "CogVideoImageEncode": "CogVideo ImageEncode",
+ "CogVideoXFunSampler": "CogVideoXFun Sampler"
}
\ No newline at end of file
diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py
index 09eeeec..2bae2f8 100644
--- a/pipeline_cogvideox.py
+++ b/pipeline_cogvideox.py
@@ -333,6 +333,7 @@ class CogVideoXPipeline(DiffusionPipeline):
eta: float = 0.0,
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
latents: Optional[torch.Tensor] = None,
+ image_cond_latents: Optional[torch.Tensor] = None,
prompt_embeds: Optional[torch.Tensor] = None,
negative_prompt_embeds: Optional[torch.Tensor] = None,
device = torch.device("cuda"),
@@ -442,6 +443,20 @@ class CogVideoXPipeline(DiffusionPipeline):
latents
)
latents = latents.to(self.transformer.dtype)
+
+ # 5.5.
+ if image_cond_latents is not None:
+ image_cond_latents = torch.cat(image_cond_latents, dim=0).to(self.transformer.dtype)#.permute(0, 2, 1, 3, 4) # [B, F, C, H, W]
+
+ padding_shape = (
+ batch_size,
+ num_frames - 1,
+ latent_channels,
+ height // self.vae_scale_factor_spatial,
+ width // self.vae_scale_factor_spatial,
+ )
+ latent_padding = torch.zeros(padding_shape, device=device, dtype=self.transformer.dtype)
+ image_latents = torch.cat([image_latents, latent_padding], dim=1)
# 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
@@ -582,6 +597,10 @@ class CogVideoXPipeline(DiffusionPipeline):
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
+ if image_cond_latents is not None:
+ latent_image_input = torch.cat([image_cond_latents] * 2) if do_classifier_free_guidance else image_cond_latents
+ latent_model_input = torch.cat([latent_model_input, latent_image_input], dim=2)
+
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timestep = t.expand(latent_model_input.shape[0])