From 2074ba578ec4170cb67b4f2255fca8fcc78aaf1f Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Fri, 8 Nov 2024 21:24:20 +0200 Subject: [PATCH 01/49] doesn't work yet --- convert_weight_sat2hf.py | 303 +++++++++++++++++++++++++ custom_cogvideox_transformer_3d.py | 74 +++++- embeddings.py | 353 +++++++++++++++++++++++++++++ model_loading.py | 1 + pipeline_cogvideox.py | 3 +- 5 files changed, 725 insertions(+), 9 deletions(-) create mode 100644 convert_weight_sat2hf.py create mode 100644 embeddings.py diff --git a/convert_weight_sat2hf.py b/convert_weight_sat2hf.py new file mode 100644 index 0000000..545925b --- /dev/null +++ b/convert_weight_sat2hf.py @@ -0,0 +1,303 @@ +""" + +The script demonstrates how to convert the weights of the CogVideoX model from SAT to Hugging Face format. +This script supports the conversion of the following models: +- CogVideoX-2B +- CogVideoX-5B, CogVideoX-5B-I2V +- CogVideoX1.1-5B, CogVideoX1.1-5B-I2V + +Original Script: +https://github.com/huggingface/diffusers/blob/main/scripts/convert_cogvideox_to_diffusers.py + +""" +import argparse +from typing import Any, Dict + +import torch +from transformers import T5EncoderModel, T5Tokenizer + +from diffusers import ( + AutoencoderKLCogVideoX, + CogVideoXDDIMScheduler, + CogVideoXImageToVideoPipeline, + CogVideoXPipeline, + #CogVideoXTransformer3DModel, +) +from custom_cogvideox_transformer_3d import CogVideoXTransformer3DModel + + +def reassign_query_key_value_inplace(key: str, state_dict: Dict[str, Any]): + to_q_key = key.replace("query_key_value", "to_q") + to_k_key = key.replace("query_key_value", "to_k") + to_v_key = key.replace("query_key_value", "to_v") + to_q, to_k, to_v = torch.chunk(state_dict[key], chunks=3, dim=0) + state_dict[to_q_key] = to_q + state_dict[to_k_key] = to_k + state_dict[to_v_key] = to_v + state_dict.pop(key) + + +def reassign_query_key_layernorm_inplace(key: str, state_dict: Dict[str, Any]): + layer_id, weight_or_bias = key.split(".")[-2:] + + if "query" in key: + new_key = f"transformer_blocks.{layer_id}.attn1.norm_q.{weight_or_bias}" + elif "key" in key: + new_key = f"transformer_blocks.{layer_id}.attn1.norm_k.{weight_or_bias}" + + state_dict[new_key] = state_dict.pop(key) + + +def reassign_adaln_norm_inplace(key: str, state_dict: Dict[str, Any]): + layer_id, _, weight_or_bias = key.split(".")[-3:] + + weights_or_biases = state_dict[key].chunk(12, dim=0) + norm1_weights_or_biases = torch.cat(weights_or_biases[0:3] + weights_or_biases[6:9]) + norm2_weights_or_biases = torch.cat(weights_or_biases[3:6] + weights_or_biases[9:12]) + + norm1_key = f"transformer_blocks.{layer_id}.norm1.linear.{weight_or_bias}" + state_dict[norm1_key] = norm1_weights_or_biases + + norm2_key = f"transformer_blocks.{layer_id}.norm2.linear.{weight_or_bias}" + state_dict[norm2_key] = norm2_weights_or_biases + + state_dict.pop(key) + + +def remove_keys_inplace(key: str, state_dict: Dict[str, Any]): + state_dict.pop(key) + + +def replace_up_keys_inplace(key: str, state_dict: Dict[str, Any]): + key_split = key.split(".") + layer_index = int(key_split[2]) + replace_layer_index = 4 - 1 - layer_index + + key_split[1] = "up_blocks" + key_split[2] = str(replace_layer_index) + new_key = ".".join(key_split) + + state_dict[new_key] = state_dict.pop(key) + + +TRANSFORMER_KEYS_RENAME_DICT = { + "transformer.final_layernorm": "norm_final", + "transformer": "transformer_blocks", + "attention": "attn1", + "mlp": "ff.net", + "dense_h_to_4h": "0.proj", + "dense_4h_to_h": "2", + ".layers": "", + "dense": "to_out.0", + "input_layernorm": "norm1.norm", + "post_attn1_layernorm": "norm2.norm", + "time_embed.0": "time_embedding.linear_1", + "time_embed.2": "time_embedding.linear_2", + "mixins.patch_embed": "patch_embed", + "mixins.final_layer.norm_final": "norm_out.norm", + "mixins.final_layer.linear": "proj_out", + "mixins.final_layer.adaLN_modulation.1": "norm_out.linear", + "mixins.pos_embed.pos_embedding": "patch_embed.pos_embedding", # Specific to CogVideoX-5b-I2V +} + +TRANSFORMER_SPECIAL_KEYS_REMAP = { + "query_key_value": reassign_query_key_value_inplace, + "query_layernorm_list": reassign_query_key_layernorm_inplace, + "key_layernorm_list": reassign_query_key_layernorm_inplace, + "adaln_layer.adaLN_modulations": reassign_adaln_norm_inplace, + "embed_tokens": remove_keys_inplace, + "freqs_sin": remove_keys_inplace, + "freqs_cos": remove_keys_inplace, + "position_embedding": remove_keys_inplace, +} + +VAE_KEYS_RENAME_DICT = { + "block.": "resnets.", + "down.": "down_blocks.", + "downsample": "downsamplers.0", + "upsample": "upsamplers.0", + "nin_shortcut": "conv_shortcut", + "encoder.mid.block_1": "encoder.mid_block.resnets.0", + "encoder.mid.block_2": "encoder.mid_block.resnets.1", + "decoder.mid.block_1": "decoder.mid_block.resnets.0", + "decoder.mid.block_2": "decoder.mid_block.resnets.1", +} + +VAE_SPECIAL_KEYS_REMAP = { + "loss": remove_keys_inplace, + "up.": replace_up_keys_inplace, +} + +TOKENIZER_MAX_LENGTH = 226 + + +def get_state_dict(saved_dict: Dict[str, Any]) -> Dict[str, Any]: + state_dict = saved_dict + if "model" in saved_dict.keys(): + state_dict = state_dict["model"] + if "module" in saved_dict.keys(): + state_dict = state_dict["module"] + if "state_dict" in saved_dict.keys(): + state_dict = state_dict["state_dict"] + return state_dict + + +def update_state_dict_inplace(state_dict: Dict[str, Any], old_key: str, new_key: str) -> Dict[str, Any]: + state_dict[new_key] = state_dict.pop(old_key) + + +def convert_transformer( + ckpt_path: str, + num_layers: int, + num_attention_heads: int, + use_rotary_positional_embeddings: bool, + i2v: bool, + dtype: torch.dtype, +): + PREFIX_KEY = "model.diffusion_model." + + original_state_dict = get_state_dict(torch.load(ckpt_path, map_location="cpu", mmap=True)) + transformer = CogVideoXTransformer3DModel( + in_channels=32 if i2v else 16, + num_layers=num_layers, + num_attention_heads=num_attention_heads, + use_rotary_positional_embeddings=use_rotary_positional_embeddings, + use_learned_positional_embeddings=i2v, + ).to(dtype=dtype) + + for key in list(original_state_dict.keys()): + new_key = key[len(PREFIX_KEY):] + for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items(): + new_key = new_key.replace(replace_key, rename_key) + update_state_dict_inplace(original_state_dict, key, new_key) + + for key in list(original_state_dict.keys()): + for special_key, handler_fn_inplace in TRANSFORMER_SPECIAL_KEYS_REMAP.items(): + if special_key not in key: + continue + handler_fn_inplace(key, original_state_dict) + transformer.load_state_dict(original_state_dict, strict=True) + return transformer + + +def convert_vae(ckpt_path: str, scaling_factor: float, dtype: torch.dtype): + original_state_dict = get_state_dict(torch.load(ckpt_path, map_location="cpu", mmap=True)) + vae = AutoencoderKLCogVideoX(scaling_factor=scaling_factor).to(dtype=dtype) + + for key in list(original_state_dict.keys()): + new_key = key[:] + for replace_key, rename_key in VAE_KEYS_RENAME_DICT.items(): + new_key = new_key.replace(replace_key, rename_key) + update_state_dict_inplace(original_state_dict, key, new_key) + + for key in list(original_state_dict.keys()): + for special_key, handler_fn_inplace in VAE_SPECIAL_KEYS_REMAP.items(): + if special_key not in key: + continue + handler_fn_inplace(key, original_state_dict) + + vae.load_state_dict(original_state_dict, strict=True) + return vae + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--transformer_ckpt_path", type=str, default=None, help="Path to original transformer checkpoint" + ) + parser.add_argument("--vae_ckpt_path", type=str, default=None, help="Path to original vae checkpoint") + parser.add_argument("--output_path", type=str, required=True, help="Path where converted model should be saved") + parser.add_argument("--fp16", action="store_true", default=False, help="Whether to save the model weights in fp16") + parser.add_argument("--bf16", action="store_true", default=False, help="Whether to save the model weights in bf16") + parser.add_argument( + "--push_to_hub", action="store_true", default=False, help="Whether to push to HF Hub after saving" + ) + parser.add_argument( + "--text_encoder_cache_dir", type=str, default=None, help="Path to text encoder cache directory" + ) + # For CogVideoX-2B, num_layers is 30. For 5B, it is 42 + parser.add_argument("--num_layers", type=int, default=30, help="Number of transformer blocks") + # For CogVideoX-2B, num_attention_heads is 30. For 5B, it is 48 + parser.add_argument("--num_attention_heads", type=int, default=30, help="Number of attention heads") + # For CogVideoX-2B, use_rotary_positional_embeddings is False. For 5B, it is True + parser.add_argument( + "--use_rotary_positional_embeddings", action="store_true", default=False, help="Whether to use RoPE or not" + ) + # For CogVideoX-2B, scaling_factor is 1.15258426. For 5B, it is 0.7 + parser.add_argument("--scaling_factor", type=float, default=1.15258426, help="Scaling factor in the VAE") + # For CogVideoX-2B, snr_shift_scale is 3.0. For 5B, it is 1.0 + parser.add_argument("--snr_shift_scale", type=float, default=3.0, help="Scaling factor in the VAE") + parser.add_argument("--i2v", action="store_true", default=False, help="Whether to save the model weights in fp16") + return parser.parse_args() + + +if __name__ == "__main__": + args = get_args() + + transformer = None + vae = None + + if args.fp16 and args.bf16: + raise ValueError("You cannot pass both --fp16 and --bf16 at the same time.") + + dtype = torch.float16 if args.fp16 else torch.bfloat16 if args.bf16 else torch.float32 + + if args.transformer_ckpt_path is not None: + transformer = convert_transformer( + args.transformer_ckpt_path, + args.num_layers, + args.num_attention_heads, + args.use_rotary_positional_embeddings, + args.i2v, + dtype, + ) + if args.vae_ckpt_path is not None: + vae = convert_vae(args.vae_ckpt_path, args.scaling_factor, dtype) + + #text_encoder_id = "/share/official_pretrains/hf_home/t5-v1_1-xxl" + #tokenizer = T5Tokenizer.from_pretrained(text_encoder_id, model_max_length=TOKENIZER_MAX_LENGTH) + #text_encoder = T5EncoderModel.from_pretrained(text_encoder_id, cache_dir=args.text_encoder_cache_dir) + + # Apparently, the conversion does not work anymore without this :shrug: + #for param in text_encoder.parameters(): + # param.data = param.data.contiguous() + + scheduler = CogVideoXDDIMScheduler.from_config( + { + "snr_shift_scale": args.snr_shift_scale, + "beta_end": 0.012, + "beta_schedule": "scaled_linear", + "beta_start": 0.00085, + "clip_sample": False, + "num_train_timesteps": 1000, + "prediction_type": "v_prediction", + "rescale_betas_zero_snr": True, + "set_alpha_to_one": True, + "timestep_spacing": "trailing", + } + ) + if args.i2v: + pipeline_cls = CogVideoXImageToVideoPipeline + else: + pipeline_cls = CogVideoXPipeline + + pipe = pipeline_cls( + tokenizer=None, + text_encoder=None, + vae=vae, + transformer=transformer, + scheduler=scheduler, + ) + + if args.fp16: + pipe = pipe.to(dtype=torch.float16) + if args.bf16: + pipe = pipe.to(dtype=torch.bfloat16) + + # We don't use variant here because the model must be run in fp16 (2B) or bf16 (5B). It would be weird + # for users to specify variant when the default is not fp32 and they want to run with the correct default (which + # is either fp16/bf16 here). + + # This is necessary This is necessary for users with insufficient memory, + # such as those using Colab and notebooks, as it can save some memory used for model loading. + pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB", push_to_hub=args.push_to_hub) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 09e6771..0e36cba 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -21,6 +21,8 @@ import torch.nn.functional as F import numpy as np from einops import rearrange +from functools import reduce +from operator import mul from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.utils import logging @@ -32,6 +34,7 @@ from diffusers.models.modeling_outputs import Transformer2DModelOutput from diffusers.models.modeling_utils import ModelMixin from diffusers.models.normalization import AdaLayerNorm, CogVideoXLayerNormZero from diffusers.loaders import PeftAdapterMixin +from .embeddings import CogVideoX1_1PatchEmbed logger = logging.get_logger(__name__) # pylint: disable=invalid-name @@ -61,6 +64,14 @@ def fft(tensor): return low_freq_fft, high_freq_fft +def rotate_half(x): + x = rearrange(x, "... (d r) -> ... d r", r=2) + x1, x2 = x.unbind(dim=-1) + x = torch.stack((-x2, x1), dim=-1) + return rearrange(x, "... d r -> ... (d r)") + + + class CogVideoXAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on @@ -70,6 +81,16 @@ class CogVideoXAttnProcessor2_0: def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") + def rotary(self, t, rope_args): + def reshape_freq(freqs): + freqs = freqs[: rope_args["T"], : rope_args["H"], : rope_args["W"]].contiguous() + freqs = rearrange(freqs, "t h w d -> (t h w) d") + freqs = freqs.unsqueeze(0).unsqueeze(0) + return freqs + freqs_cos = reshape_freq(self.freqs_cos).to(t.dtype) + freqs_sin = reshape_freq(self.freqs_sin).to(t.dtype) + + return t * freqs_cos + rotate_half(t) * freqs_sin @torch.compiler.disable() def __call__( self, @@ -78,6 +99,7 @@ class CogVideoXAttnProcessor2_0: encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, + rope_args: Optional[dict] = None ) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) @@ -107,13 +129,33 @@ class CogVideoXAttnProcessor2_0: if attn.norm_k is not None: key = attn.norm_k(key) + + # Apply RoPE if needed if image_rotary_emb is not None: + self.freqs_cos = image_rotary_emb[0] + self.freqs_sin = image_rotary_emb[1] + print("rope args", rope_args) #{'T': 6, 'H': 30, 'W': 45, 'seq_length': 8775} + print("freqs_cos", self.freqs_cos.shape) #torch.Size([13, 30, 45, 64]) + print("freqs_sin", self.freqs_sin.shape) + + from diffusers.models.embeddings import apply_rotary_emb - query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) + #query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) + query = torch.cat( + (query[:, :, : text_seq_length], + self.rotary(query[:, :, text_seq_length:], + rope_args)), + dim=2) + if not attn.is_cross_attention: - key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) + #key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) + key = torch.cat( + (key[ :, :, : text_seq_length], + self.rotary(key[:, :, text_seq_length:], + rope_args)), + dim=2) if SAGEATTN_IS_AVAILABLE: hidden_states = sageattn(query, key, value, is_causal=False) @@ -303,6 +345,7 @@ class CogVideoXBlock(nn.Module): fastercache_counter=0, fastercache_start_step=15, fastercache_device="cuda:0", + rope_args=None ) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) # norm & modulate @@ -335,7 +378,7 @@ class CogVideoXBlock(nn.Module): attn_hidden_states, attn_encoder_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, - image_rotary_emb=image_rotary_emb, + image_rotary_emb=image_rotary_emb, rope_args=rope_args ) if fastercache_counter == fastercache_start_step: self.cached_hidden_states = [attn_hidden_states.to(fastercache_device), attn_hidden_states.to(fastercache_device)] @@ -458,12 +501,12 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): ) # 1. Patch embedding - self.patch_embed = CogVideoXPatchEmbed( + self.patch_embed = CogVideoX1_1PatchEmbed( patch_size=patch_size, in_channels=in_channels, embed_dim=inner_dim, text_embed_dim=text_embed_dim, - bias=True, + #bias=True, sample_width=sample_width, sample_height=sample_height, sample_frames=sample_frames, @@ -507,7 +550,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): norm_eps=norm_eps, chunk_dim=1, ) - self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels) + self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * patch_size * out_channels) self.gradient_checkpointing = False @@ -635,6 +678,8 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): return_dict: bool = True, ): batch_size, num_frames, channels, height, width = hidden_states.shape + p = self.config.patch_size + print("p", p) # 1. Time embedding timesteps = timestep @@ -646,6 +691,18 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): t_emb = t_emb.to(dtype=hidden_states.dtype) emb = self.time_embedding(t_emb, timestep_cond) + # RoPE + seq_length = num_frames * height * width // reduce(mul, [p, p, p]) + rope_T = num_frames // p + rope_H = height // p + rope_W = width // p + rope_args = { + "T": rope_T, + "H": rope_H, + "W": rope_W, + "seq_length": seq_length, + } + # 2. Patch embedding hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) hidden_states = self.embedding_dropout(hidden_states) @@ -696,7 +753,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): # Note: we use `-1` instead of `channels`: # - It is okay to `channels` use for CogVideoX-2b and CogVideoX-5b (number of input channels is equal to output channels) # - However, for CogVideoX-5b-I2V also takes concatenated input image latents (number of input channels is twice the output channels) - p = self.config.patch_size + output = hidden_states.reshape(1, num_frames, height // p, width // p, -1, p, p) output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) @@ -728,7 +785,8 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): video_flow_feature=video_flow_features[i] if video_flow_features is not None else None, fuser = self.fuser_list[i] if self.fuser_list is not None else None, fastercache_counter = self.fastercache_counter, - fastercache_device = self.fastercache_device + fastercache_device = self.fastercache_device, + rope_args=rope_args ) if (controlnet_states is not None) and (i < len(controlnet_states)): diff --git a/embeddings.py b/embeddings.py new file mode 100644 index 0000000..9747e91 --- /dev/null +++ b/embeddings.py @@ -0,0 +1,353 @@ +import torch +import torch.nn as nn +import numpy as np +from typing import Tuple, Union + +def get_1d_rotary_pos_embed( + dim: int, + pos: Union[np.ndarray, int], + theta: float = 10000.0, + use_real=False, + linear_factor=1.0, + ntk_factor=1.0, + repeat_interleave_real=True, + freqs_dtype=torch.float32, # torch.float32, torch.float64 (flux) +): + """ + Precompute the frequency tensor for complex exponentials (cis) with given dimensions. + + This function calculates a frequency tensor with complex exponentials using the given dimension 'dim' and the end + index 'end'. The 'theta' parameter scales the frequencies. The returned tensor contains complex values in complex64 + data type. + + Args: + dim (`int`): Dimension of the frequency tensor. + pos (`np.ndarray` or `int`): Position indices for the frequency tensor. [S] or scalar + theta (`float`, *optional*, defaults to 10000.0): + Scaling factor for frequency computation. Defaults to 10000.0. + use_real (`bool`, *optional*): + If True, return real part and imaginary part separately. Otherwise, return complex numbers. + linear_factor (`float`, *optional*, defaults to 1.0): + Scaling factor for the context extrapolation. Defaults to 1.0. + ntk_factor (`float`, *optional*, defaults to 1.0): + Scaling factor for the NTK-Aware RoPE. Defaults to 1.0. + repeat_interleave_real (`bool`, *optional*, defaults to `True`): + If `True` and `use_real`, real part and imaginary part are each interleaved with themselves to reach `dim`. + Otherwise, they are concateanted with themselves. + freqs_dtype (`torch.float32` or `torch.float64`, *optional*, defaults to `torch.float32`): + the dtype of the frequency tensor. + Returns: + `torch.Tensor`: Precomputed frequency tensor with complex exponentials. [S, D/2] + """ + assert dim % 2 == 0 + + if isinstance(pos, int): + pos = torch.arange(pos) + if isinstance(pos, np.ndarray): + pos = torch.from_numpy(pos) # type: ignore # [S] + + theta = theta * ntk_factor + freqs = ( + 1.0 + / (theta ** (torch.arange(0, dim, 2, dtype=freqs_dtype, device=pos.device)[: (dim // 2)] / dim)) + / linear_factor + ) # [D/2] + freqs = torch.outer(pos, freqs) # type: ignore # [S, D/2] + if use_real and repeat_interleave_real: + # flux, hunyuan-dit, cogvideox + freqs_cos = freqs.cos().repeat_interleave(2, dim=1).float() # [S, D] + freqs_sin = freqs.sin().repeat_interleave(2, dim=1).float() # [S, D] + return freqs_cos, freqs_sin + elif use_real: + # stable audio + freqs_cos = torch.cat([freqs.cos(), freqs.cos()], dim=-1).float() # [S, D] + freqs_sin = torch.cat([freqs.sin(), freqs.sin()], dim=-1).float() # [S, D] + return freqs_cos, freqs_sin + else: + # lumina + freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64 # [S, D/2] + return freqs_cis + +def get_3d_rotary_pos_embed( + embed_dim, crops_coords, grid_size, temporal_size, theta: int = 10000, use_real: bool = True +) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """ + RoPE for video tokens with 3D structure. + + Args: + embed_dim: (`int`): + The embedding dimension size, corresponding to hidden_size_head. + crops_coords (`Tuple[int]`): + The top-left and bottom-right coordinates of the crop. + grid_size (`Tuple[int]`): + The grid size of the spatial positional embedding (height, width). + temporal_size (`int`): + The size of the temporal dimension. + theta (`float`): + Scaling factor for frequency computation. + + Returns: + `torch.Tensor`: positional embedding with shape `(temporal_size * grid_size[0] * grid_size[1], embed_dim/2)`. + """ + if use_real is not True: + raise ValueError(" `use_real = False` is not currently supported for get_3d_rotary_pos_embed") + start, stop = crops_coords + grid_size_h, grid_size_w = grid_size + grid_h = np.linspace(start[0], stop[0], grid_size_h, endpoint=False, dtype=np.float32) + grid_w = np.linspace(start[1], stop[1], grid_size_w, endpoint=False, dtype=np.float32) + grid_t = np.linspace(0, temporal_size, temporal_size, endpoint=False, dtype=np.float32) + + # Compute dimensions for each axis + dim_t = embed_dim // 4 + dim_h = embed_dim // 8 * 3 + dim_w = embed_dim // 8 * 3 + + # Temporal frequencies + freqs_t = get_1d_rotary_pos_embed(dim_t, grid_t, use_real=True) + # Spatial frequencies for height and width + freqs_h = get_1d_rotary_pos_embed(dim_h, grid_h, use_real=True) + freqs_w = get_1d_rotary_pos_embed(dim_w, grid_w, use_real=True) + + # BroadCast and concatenate temporal and spaial frequencie (height and width) into a 3d tensor + def combine_time_height_width(freqs_t, freqs_h, freqs_w): + freqs_t = freqs_t[:, None, None, :].expand( + -1, grid_size_h, grid_size_w, -1 + ) # temporal_size, grid_size_h, grid_size_w, dim_t + freqs_h = freqs_h[None, :, None, :].expand( + temporal_size, -1, grid_size_w, -1 + ) # temporal_size, grid_size_h, grid_size_2, dim_h + freqs_w = freqs_w[None, None, :, :].expand( + temporal_size, grid_size_h, -1, -1 + ) # temporal_size, grid_size_h, grid_size_2, dim_w + + freqs = torch.cat( + [freqs_t, freqs_h, freqs_w], dim=-1 + ) # temporal_size, grid_size_h, grid_size_w, (dim_t + dim_h + dim_w) + #freqs = freqs.view( + # temporal_size * grid_size_h * grid_size_w, -1 + #) # (temporal_size * grid_size_h * grid_size_w), (dim_t + dim_h + dim_w) + return freqs + + t_cos, t_sin = freqs_t # both t_cos and t_sin has shape: temporal_size, dim_t + h_cos, h_sin = freqs_h # both h_cos and h_sin has shape: grid_size_h, dim_h + w_cos, w_sin = freqs_w # both w_cos and w_sin has shape: grid_size_w, dim_w + cos = combine_time_height_width(t_cos, h_cos, w_cos) + sin = combine_time_height_width(t_sin, h_sin, w_sin) + return cos, sin + +def get_3d_sincos_pos_embed( + embed_dim: int, + spatial_size: Union[int, Tuple[int, int]], + temporal_size: int, + spatial_interpolation_scale: float = 1.0, + temporal_interpolation_scale: float = 1.0, +) -> np.ndarray: + r""" + Args: + embed_dim (`int`): + spatial_size (`int` or `Tuple[int, int]`): + temporal_size (`int`): + spatial_interpolation_scale (`float`, defaults to 1.0): + temporal_interpolation_scale (`float`, defaults to 1.0): + """ + if embed_dim % 4 != 0: + raise ValueError("`embed_dim` must be divisible by 4") + if isinstance(spatial_size, int): + spatial_size = (spatial_size, spatial_size) + + embed_dim_spatial = 3 * embed_dim // 4 + embed_dim_temporal = embed_dim // 4 + + # 1. Spatial + grid_h = np.arange(spatial_size[1], dtype=np.float32) / spatial_interpolation_scale + grid_w = np.arange(spatial_size[0], dtype=np.float32) / spatial_interpolation_scale + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, spatial_size[1], spatial_size[0]]) + pos_embed_spatial = get_2d_sincos_pos_embed_from_grid(embed_dim_spatial, grid) + + # 2. Temporal + grid_t = np.arange(temporal_size, dtype=np.float32) / temporal_interpolation_scale + pos_embed_temporal = get_1d_sincos_pos_embed_from_grid(embed_dim_temporal, grid_t) + + # 3. Concat + pos_embed_spatial = pos_embed_spatial[np.newaxis, :, :] + pos_embed_spatial = np.repeat(pos_embed_spatial, temporal_size, axis=0) # [T, H*W, D // 4 * 3] + + pos_embed_temporal = pos_embed_temporal[:, np.newaxis, :] + pos_embed_temporal = np.repeat(pos_embed_temporal, spatial_size[0] * spatial_size[1], axis=1) # [T, H*W, D // 4] + + pos_embed = np.concatenate([pos_embed_temporal, pos_embed_spatial], axis=-1) # [T, H*W, D] + return pos_embed + + +def get_2d_sincos_pos_embed( + embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=16 +): + """ + grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or + [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + if isinstance(grid_size, int): + grid_size = (grid_size, grid_size) + + grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0] / base_size) / interpolation_scale + grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1] / base_size) / interpolation_scale + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size[1], grid_size[0]]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token and extra_tokens > 0: + pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + if embed_dim % 2 != 0: + raise ValueError("embed_dim must be divisible by 2") + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D) + """ + if embed_dim % 2 != 0: + raise ValueError("embed_dim must be divisible by 2") + + omega = np.arange(embed_dim // 2, dtype=np.float64) + omega /= embed_dim / 2.0 + omega = 1.0 / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb + +class CogVideoX1_1PatchEmbed(nn.Module): + def __init__( + self, + patch_size: int = 2, + in_channels: int = 16, + embed_dim: int = 1920, + text_embed_dim: int = 4096, + sample_width: int = 90, + sample_height: int = 60, + sample_frames: int = 81, + temporal_compression_ratio: int = 4, + max_text_seq_length: int = 226, + spatial_interpolation_scale: float = 1.875, + temporal_interpolation_scale: float = 1.0, + use_positional_embeddings: bool = True, + use_learned_positional_embeddings: bool = True, + ) -> None: + super().__init__() + + # Adjust patch_size to handle three dimensions + self.patch_size = (patch_size, patch_size, patch_size) # (depth, height, width) + self.embed_dim = embed_dim + self.sample_height = sample_height + self.sample_width = sample_width + self.sample_frames = sample_frames + self.temporal_compression_ratio = temporal_compression_ratio + self.max_text_seq_length = max_text_seq_length + self.spatial_interpolation_scale = spatial_interpolation_scale + self.temporal_interpolation_scale = temporal_interpolation_scale + self.use_positional_embeddings = use_positional_embeddings + self.use_learned_positional_embeddings = use_learned_positional_embeddings + + # Use Linear layer for projection + self.proj = nn.Linear(in_channels * (patch_size ** 3), embed_dim) + self.text_proj = nn.Linear(text_embed_dim, embed_dim) + + if use_positional_embeddings or use_learned_positional_embeddings: + persistent = use_learned_positional_embeddings + pos_embedding = self._get_positional_embeddings(sample_height, sample_width, sample_frames) + self.register_buffer("pos_embedding", pos_embedding, persistent=persistent) + + def _get_positional_embeddings(self, sample_height: int, sample_width: int, sample_frames: int) -> torch.Tensor: + post_patch_height = sample_height // self.patch_size[1] + post_patch_width = sample_width // self.patch_size[2] + post_time_compression_frames = (sample_frames - 1) // self.temporal_compression_ratio + 1 + num_patches = post_patch_height * post_patch_width * post_time_compression_frames + + pos_embedding = get_3d_sincos_pos_embed( + self.embed_dim, + (post_patch_width, post_patch_height), + post_time_compression_frames, + self.spatial_interpolation_scale, + self.temporal_interpolation_scale, + ) + pos_embedding = torch.from_numpy(pos_embedding).flatten(0, 1) + joint_pos_embedding = torch.zeros(1, self.max_text_seq_length + num_patches, self.embed_dim, requires_grad=False) + joint_pos_embedding.data[:, self.max_text_seq_length:].copy_(pos_embedding) + + return joint_pos_embedding + + def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor): + """ + Args: + text_embeds (torch.Tensor): Input text embeddings of shape (batch_size, seq_length, embedding_dim). + image_embeds (torch.Tensor): Input image embeddings of shape (batch_size, num_frames, channels, height, width). + """ + text_embeds = self.text_proj(text_embeds) + first_frame = image_embeds[:, 0:1, :, :, :] + duplicated_first_frame = first_frame.repeat(1, 2, 1, 1, 1) # (batch, 2, channels, height, width) + # Copy the first frames, for t_patch + image_embeds = torch.cat([duplicated_first_frame, image_embeds[:, 1:, :, :, :]], dim=1) + batch, num_frames, channels, height, width = image_embeds.shape + image_embeds = image_embeds.permute(0, 2, 1, 3, 4).contiguous() + image_embeds = image_embeds.view(batch, channels, -1).permute(0, 2, 1) + + rope_patch_t = num_frames // self.patch_size[0] + rope_patch_h = height // self.patch_size[1] + rope_patch_w = width // self.patch_size[2] + + image_embeds = image_embeds.view( + batch, + rope_patch_t, self.patch_size[0], + rope_patch_h, self.patch_size[1], + rope_patch_w, self.patch_size[2], + channels + ) + image_embeds = image_embeds.permute(0, 1, 3, 5, 7, 2, 4, 6).contiguous() + image_embeds = image_embeds.view(batch, rope_patch_t * rope_patch_h * rope_patch_w, -1) + image_embeds = self.proj(image_embeds) + # Concatenate text and image embeddings + embeds = torch.cat([text_embeds, image_embeds], dim=1).contiguous() + + # Add positional embeddings if applicable + if self.use_positional_embeddings or self.use_learned_positional_embeddings: + if self.use_learned_positional_embeddings and (self.sample_width != width or self.sample_height != height): + raise ValueError( + "It is currently not possible to generate videos at a different resolution that the defaults. This should only be the case with 'THUDM/CogVideoX-5b-I2V'." + "If you think this is incorrect, please open an issue at https://github.com/huggingface/diffusers/issues." + ) + + pre_time_compression_frames = (num_frames - 1) * self.temporal_compression_ratio + 1 + + if ( + self.sample_height != height + or self.sample_width != width + or self.sample_frames != pre_time_compression_frames + ): + pos_embedding = self._get_positional_embeddings(height, width, pre_time_compression_frames) + pos_embedding = pos_embedding.to(embeds.device, dtype=embeds.dtype) + else: + pos_embedding = self.pos_embedding + + embeds = embeds + pos_embedding + + return embeds \ No newline at end of file diff --git a/model_loading.py b/model_loading.py index c3591c0..91c6ed3 100644 --- a/model_loading.py +++ b/model_loading.py @@ -71,6 +71,7 @@ class DownloadAndLoadCogVideoModel: "THUDM/CogVideoX-2b", "THUDM/CogVideoX-5b", "THUDM/CogVideoX-5b-I2V", + "kijai/CogVideoX-5b-1.5-T2V", "bertjiazheng/KoolCogVideoX-5b", "kijai/CogVideoX-Fun-2b", "kijai/CogVideoX-Fun-5b", diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 9e38ea8..571498a 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -25,8 +25,9 @@ from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor from diffusers.video_processor import VideoProcessor -from diffusers.models.embeddings import get_3d_rotary_pos_embed +#from diffusers.models.embeddings import get_3d_rotary_pos_embed from diffusers.loaders import CogVideoXLoraLoaderMixin +from .embeddings import get_3d_rotary_pos_embed from .custom_cogvideox_transformer_3d import CogVideoXTransformer3DModel From e783951dadc9f672f67cd7c7234ca336e7eda7d0 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 02:24:18 +0200 Subject: [PATCH 02/49] maybe --- custom_cogvideox_transformer_3d.py | 369 +++++++++++++++-------------- embeddings.py | 100 ++++---- pipeline_cogvideox.py | 35 +-- 3 files changed, 262 insertions(+), 242 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 0e36cba..ebad39e 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -21,20 +21,18 @@ import torch.nn.functional as F import numpy as np from einops import rearrange -from functools import reduce -from operator import mul from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.utils import logging from diffusers.utils.torch_utils import maybe_allow_in_graph from diffusers.models.attention import Attention, FeedForward from diffusers.models.attention_processor import AttentionProcessor -from diffusers.models.embeddings import CogVideoXPatchEmbed, TimestepEmbedding, Timesteps +from diffusers.models.embeddings import TimestepEmbedding, Timesteps from diffusers.models.modeling_outputs import Transformer2DModelOutput from diffusers.models.modeling_utils import ModelMixin from diffusers.models.normalization import AdaLayerNorm, CogVideoXLayerNormZero from diffusers.loaders import PeftAdapterMixin -from .embeddings import CogVideoX1_1PatchEmbed +from .embeddings import CogVideoXPatchEmbed logger = logging.get_logger(__name__) # pylint: disable=invalid-name @@ -64,14 +62,6 @@ def fft(tensor): return low_freq_fft, high_freq_fft -def rotate_half(x): - x = rearrange(x, "... (d r) -> ... d r", r=2) - x1, x2 = x.unbind(dim=-1) - x = torch.stack((-x2, x1), dim=-1) - return rearrange(x, "... d r -> ... (d r)") - - - class CogVideoXAttnProcessor2_0: r""" Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on @@ -81,16 +71,7 @@ class CogVideoXAttnProcessor2_0: def __init__(self): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") - def rotary(self, t, rope_args): - def reshape_freq(freqs): - freqs = freqs[: rope_args["T"], : rope_args["H"], : rope_args["W"]].contiguous() - freqs = rearrange(freqs, "t h w d -> (t h w) d") - freqs = freqs.unsqueeze(0).unsqueeze(0) - return freqs - freqs_cos = reshape_freq(self.freqs_cos).to(t.dtype) - freqs_sin = reshape_freq(self.freqs_sin).to(t.dtype) - - return t * freqs_cos + rotate_half(t) * freqs_sin + @torch.compiler.disable() def __call__( self, @@ -99,7 +80,6 @@ class CogVideoXAttnProcessor2_0: encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, - rope_args: Optional[dict] = None ) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) @@ -129,127 +109,118 @@ class CogVideoXAttnProcessor2_0: if attn.norm_k is not None: key = attn.norm_k(key) - - - # Apply RoPE if needed - if image_rotary_emb is not None: - self.freqs_cos = image_rotary_emb[0] - self.freqs_sin = image_rotary_emb[1] - print("rope args", rope_args) #{'T': 6, 'H': 30, 'W': 45, 'seq_length': 8775} - print("freqs_cos", self.freqs_cos.shape) #torch.Size([13, 30, 45, 64]) - print("freqs_sin", self.freqs_sin.shape) - - - from diffusers.models.embeddings import apply_rotary_emb - - #query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) - query = torch.cat( - (query[:, :, : text_seq_length], - self.rotary(query[:, :, text_seq_length:], - rope_args)), - dim=2) - - if not attn.is_cross_attention: - #key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - key = torch.cat( - (key[ :, :, : text_seq_length], - self.rotary(key[:, :, text_seq_length:], - rope_args)), - dim=2) - - if SAGEATTN_IS_AVAILABLE: - hidden_states = sageattn(query, key, value, is_causal=False) - else: - hidden_states = F.scaled_dot_product_attention( - query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False - ) - - hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - encoder_hidden_states, hidden_states = hidden_states.split( - [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 - ) - return hidden_states, encoder_hidden_states - - -class FusedCogVideoXAttnProcessor2_0: - r""" - Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on - query and key vectors, but does not include spatial normalization. - """ - - def __init__(self): - if not hasattr(F, "scaled_dot_product_attention"): - raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") - @torch.compiler.disable() - def __call__( - self, - attn: Attention, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - image_rotary_emb: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - text_seq_length = encoder_hidden_states.size(1) - - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - - batch_size, sequence_length, _ = ( - hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape - ) - - if attention_mask is not None: - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) - attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) - - qkv = attn.to_qkv(hidden_states) - split_size = qkv.shape[-1] // 3 - query, key, value = torch.split(qkv, split_size, dim=-1) - - inner_dim = key.shape[-1] - head_dim = inner_dim // attn.heads - - query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - - if attn.norm_q is not None: - query = attn.norm_q(query) - if attn.norm_k is not None: - key = attn.norm_k(key) - - # Apply RoPE if needed if image_rotary_emb is not None: from diffusers.models.embeddings import apply_rotary_emb + has_nan = torch.isnan(query).any() + if has_nan: + raise ValueError(f"query before rope has nan: {has_nan}") - query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) + query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) if not attn.is_cross_attention: key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - if SAGEATTN_IS_AVAILABLE: - hidden_states = sageattn(query, key, value, is_causal=False) - else: - hidden_states = F.scaled_dot_product_attention( - query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False - ) + #if SAGEATTN_IS_AVAILABLE: + # hidden_states = sageattn(query, key, value, is_causal=False) + #else: + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + has_nan = torch.isnan(hidden_states).any() + if has_nan: + raise ValueError(f"hs after scaled_dot_product_attention has nan: {has_nan}") + has_inf = torch.isinf(hidden_states).any() + if has_inf: + raise ValueError(f"hs after scaled_dot_product_attention has inf: {has_inf}") hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) # linear proj hidden_states = attn.to_out[0](hidden_states) + has_nan = torch.isnan(hidden_states).any() + # dropout hidden_states = attn.to_out[1](hidden_states) - encoder_hidden_states, hidden_states = hidden_states.split( [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 ) return hidden_states, encoder_hidden_states - + + +# class FusedCogVideoXAttnProcessor2_0: +# r""" +# Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on +# query and key vectors, but does not include spatial normalization. +# """ + +# def __init__(self): +# if not hasattr(F, "scaled_dot_product_attention"): +# raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") +# @torch.compiler.disable() +# def __call__( +# self, +# attn: Attention, +# hidden_states: torch.Tensor, +# encoder_hidden_states: torch.Tensor, +# attention_mask: Optional[torch.Tensor] = None, +# image_rotary_emb: Optional[torch.Tensor] = None, +# ) -> torch.Tensor: +# print("FusedCogVideoXAttnProcessor2_0") +# text_seq_length = encoder_hidden_states.size(1) + +# hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) + +# batch_size, sequence_length, _ = ( +# hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape +# ) + +# if attention_mask is not None: +# attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) +# attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) + +# qkv = attn.to_qkv(hidden_states) +# split_size = qkv.shape[-1] // 3 +# query, key, value = torch.split(qkv, split_size, dim=-1) + +# inner_dim = key.shape[-1] +# head_dim = inner_dim // attn.heads + +# query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) +# key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) +# value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + +# if attn.norm_q is not None: +# query = attn.norm_q(query) +# if attn.norm_k is not None: +# key = attn.norm_k(key) + +# # Apply RoPE if needed +# if image_rotary_emb is not None: +# from diffusers.models.embeddings import apply_rotary_emb + +# query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) +# if not attn.is_cross_attention: +# key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) + +# if SAGEATTN_IS_AVAILABLE: +# hidden_states = sageattn(query, key, value, is_causal=False) +# else: +# hidden_states = F.scaled_dot_product_attention( +# query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False +# ) + +# hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) + +# # linear proj +# hidden_states = attn.to_out[0](hidden_states) +# # dropout +# hidden_states = attn.to_out[1](hidden_states) + +# encoder_hidden_states, hidden_states = hidden_states.split( +# [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 +# ) +# return hidden_states, encoder_hidden_states + +#region Blocks @maybe_allow_in_graph class CogVideoXBlock(nn.Module): @@ -344,14 +315,14 @@ class CogVideoXBlock(nn.Module): fuser=None, fastercache_counter=0, fastercache_start_step=15, - fastercache_device="cuda:0", - rope_args=None + fastercache_device="cuda:0" ) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) # norm & modulate norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1( hidden_states, encoder_hidden_states, temb ) + # Tora Motion-guidance Fuser if video_flow_feature is not None: H, W = video_flow_feature.shape[-2:] @@ -378,7 +349,7 @@ class CogVideoXBlock(nn.Module): attn_hidden_states, attn_encoder_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, - image_rotary_emb=image_rotary_emb, rope_args=rope_args + image_rotary_emb=image_rotary_emb ) if fastercache_counter == fastercache_start_step: self.cached_hidden_states = [attn_hidden_states.to(fastercache_device), attn_hidden_states.to(fastercache_device)] @@ -386,10 +357,18 @@ class CogVideoXBlock(nn.Module): elif fastercache_counter > fastercache_start_step: self.cached_hidden_states[-1].copy_(attn_hidden_states.to(fastercache_device)) self.cached_encoder_hidden_states[-1].copy_(attn_encoder_hidden_states.to(fastercache_device)) - + + hidden_states = hidden_states + gate_msa * attn_hidden_states encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states + # has_nan = torch.isnan(hidden_states).any() + # if has_nan: + # raise ValueError(f"hs before norm2 has nan: {has_nan}") + # has_inf = torch.isinf(hidden_states).any() + # if has_inf: + # raise ValueError(f"hs before norm2 has inf: {has_inf}") + # norm & modulate norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2( hidden_states, encoder_hidden_states, temb @@ -404,7 +383,7 @@ class CogVideoXBlock(nn.Module): return hidden_states, encoder_hidden_states - +#region Transformer class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): """ A Transformer model for video-like data in [CogVideoX](https://github.com/THUDM/CogVideo). @@ -479,6 +458,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): sample_height: int = 60, sample_frames: int = 49, patch_size: int = 2, + patch_size_t: int = 2, temporal_compression_ratio: int = 4, max_text_seq_length: int = 226, activation_fn: str = "gelu-approximate", @@ -489,6 +469,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): temporal_interpolation_scale: float = 1.0, use_rotary_positional_embeddings: bool = False, use_learned_positional_embeddings: bool = False, + patch_bias: bool = True, ): super().__init__() inner_dim = num_attention_heads * attention_head_dim @@ -501,12 +482,13 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): ) # 1. Patch embedding - self.patch_embed = CogVideoX1_1PatchEmbed( + self.patch_embed = CogVideoXPatchEmbed( patch_size=patch_size, + patch_size_t=patch_size_t, in_channels=in_channels, embed_dim=inner_dim, text_embed_dim=text_embed_dim, - #bias=True, + bias=patch_bias, sample_width=sample_width, sample_height=sample_height, sample_frames=sample_frames, @@ -550,7 +532,14 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): norm_eps=norm_eps, chunk_dim=1, ) - self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * patch_size * out_channels) + if patch_size_t is None: + # For CogVideox 1.0 + output_dim = patch_size * patch_size * out_channels + else: + # For CogVideoX 1.5 + output_dim = patch_size * patch_size * patch_size_t * out_channels + + self.proj_out = nn.Linear(inner_dim, output_dim) self.gradient_checkpointing = False @@ -626,44 +615,44 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): fn_recursive_attn_processor(name, module, processor) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedCogVideoXAttnProcessor2_0 - def fuse_qkv_projections(self): - """ - Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) - are fused. For cross-attention modules, key and value projection matrices are fused. + # def fuse_qkv_projections(self): + # """ + # Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) + # are fused. For cross-attention modules, key and value projection matrices are fused. - + # - This API is 🧪 experimental. + # This API is 🧪 experimental. - - """ - self.original_attn_processors = None + # + # """ + # self.original_attn_processors = None - for _, attn_processor in self.attn_processors.items(): - if "Added" in str(attn_processor.__class__.__name__): - raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") + # for _, attn_processor in self.attn_processors.items(): + # if "Added" in str(attn_processor.__class__.__name__): + # raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") - self.original_attn_processors = self.attn_processors + # self.original_attn_processors = self.attn_processors - for module in self.modules(): - if isinstance(module, Attention): - module.fuse_projections(fuse=True) + # for module in self.modules(): + # if isinstance(module, Attention): + # module.fuse_projections(fuse=True) - self.set_attn_processor(FusedCogVideoXAttnProcessor2_0()) + # self.set_attn_processor(FusedCogVideoXAttnProcessor2_0()) # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections - def unfuse_qkv_projections(self): - """Disables the fused QKV projection if enabled. + # def unfuse_qkv_projections(self): + # """Disables the fused QKV projection if enabled. - + # - This API is 🧪 experimental. + # This API is 🧪 experimental. - + # - """ - if self.original_attn_processors is not None: - self.set_attn_processor(self.original_attn_processors) + # """ + # if self.original_attn_processors is not None: + # self.set_attn_processor(self.original_attn_processors) def forward( self, @@ -678,9 +667,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): return_dict: bool = True, ): batch_size, num_frames, channels, height, width = hidden_states.shape - p = self.config.patch_size - print("p", p) - + # 1. Time embedding timesteps = timestep t_emb = self.time_proj(timesteps) @@ -691,25 +678,24 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): t_emb = t_emb.to(dtype=hidden_states.dtype) emb = self.time_embedding(t_emb, timestep_cond) - # RoPE - seq_length = num_frames * height * width // reduce(mul, [p, p, p]) - rope_T = num_frames // p - rope_H = height // p - rope_W = width // p - rope_args = { - "T": rope_T, - "H": rope_H, - "W": rope_W, - "seq_length": seq_length, - } - # 2. Patch embedding + p = self.config.patch_size + p_t = self.config.patch_size_t + + # We know that the hidden states height and width will always be divisible by patch_size. + # But, the number of frames may not be divisible by patch_size_t. So, we pad with the beginning frames. + if p_t is not None: + remaining_frames = p_t - num_frames % p_t + first_frame = hidden_states[:, :1].repeat(1, 1 + remaining_frames, 1, 1, 1) + hidden_states = torch.cat([first_frame, hidden_states[:, 1:]], dim=1) + hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) hidden_states = self.embedding_dropout(hidden_states) - + text_seq_length = encoder_hidden_states.shape[1] encoder_hidden_states = hidden_states[:, :text_seq_length] hidden_states = hidden_states[:, text_seq_length:] + if self.use_fastercache: self.fastercache_counter+=1 if self.fastercache_counter >= self.fastercache_start_step + 3 and self.fastercache_counter % 5 !=0: @@ -754,8 +740,15 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): # - It is okay to `channels` use for CogVideoX-2b and CogVideoX-5b (number of input channels is equal to output channels) # - However, for CogVideoX-5b-I2V also takes concatenated input image latents (number of input channels is twice the output channels) - output = hidden_states.reshape(1, num_frames, height // p, width // p, -1, p, p) - output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) + if p_t is None: + output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, -1, p, p) + output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) + else: + output = hidden_states.reshape( + batch_size, (num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p + ) + output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2) + output = output[:, remaining_frames:] (bb, tt, cc, hh, ww) = output.shape cond = rearrange(output, "B T C H W -> (B T) C H W", B=bb, C=cc, T=tt, H=hh, W=ww) @@ -777,6 +770,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): output = torch.cat([output, recovered_uncond]) else: for i, block in enumerate(self.transformer_blocks): + print("block", i) hidden_states, encoder_hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, @@ -785,9 +779,11 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): video_flow_feature=video_flow_features[i] if video_flow_features is not None else None, fuser = self.fuser_list[i] if self.fuser_list is not None else None, fastercache_counter = self.fastercache_counter, - fastercache_device = self.fastercache_device, - rope_args=rope_args + fastercache_device = self.fastercache_device ) + has_nan = torch.isnan(hidden_states).any() + if has_nan: + raise ValueError(f"block output hidden_states has nan: {has_nan}") if (controlnet_states is not None) and (i < len(controlnet_states)): controlnet_states_block = controlnet_states[i] @@ -816,9 +812,16 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): # Note: we use `-1` instead of `channels`: # - It is okay to `channels` use for CogVideoX-2b and CogVideoX-5b (number of input channels is equal to output channels) # - However, for CogVideoX-5b-I2V also takes concatenated input image latents (number of input channels is twice the output channels) - p = self.config.patch_size - output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, -1, p, p) - output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) + + if p_t is None: + output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, -1, p, p) + output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) + else: + output = hidden_states.reshape( + batch_size, (num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p + ) + output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2) + output = output[:, remaining_frames:] if self.fastercache_counter >= self.fastercache_start_step + 1: (bb, tt, cc, hh, ww) = output.shape diff --git a/embeddings.py b/embeddings.py index 9747e91..bc3bf7f 100644 --- a/embeddings.py +++ b/embeddings.py @@ -1,7 +1,7 @@ import torch import torch.nn as nn import numpy as np -from typing import Tuple, Union +from typing import Tuple, Union, Optional def get_1d_rotary_pos_embed( dim: int, @@ -123,9 +123,9 @@ def get_3d_rotary_pos_embed( freqs = torch.cat( [freqs_t, freqs_h, freqs_w], dim=-1 ) # temporal_size, grid_size_h, grid_size_w, (dim_t + dim_h + dim_w) - #freqs = freqs.view( - # temporal_size * grid_size_h * grid_size_w, -1 - #) # (temporal_size * grid_size_h * grid_size_w), (dim_t + dim_h + dim_w) + freqs = freqs.view( + temporal_size * grid_size_h * grid_size_w, -1 + ) # (temporal_size * grid_size_h * grid_size_w), (dim_t + dim_h + dim_w) return freqs t_cos, t_sin = freqs_t # both t_cos and t_sin has shape: temporal_size, dim_t @@ -236,16 +236,18 @@ def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) return emb -class CogVideoX1_1PatchEmbed(nn.Module): +class CogVideoXPatchEmbed(nn.Module): def __init__( self, patch_size: int = 2, + patch_size_t: Optional[int] = None, in_channels: int = 16, embed_dim: int = 1920, text_embed_dim: int = 4096, + bias: bool = True, sample_width: int = 90, sample_height: int = 60, - sample_frames: int = 81, + sample_frames: int = 49, temporal_compression_ratio: int = 4, max_text_seq_length: int = 226, spatial_interpolation_scale: float = 1.875, @@ -255,8 +257,8 @@ class CogVideoX1_1PatchEmbed(nn.Module): ) -> None: super().__init__() - # Adjust patch_size to handle three dimensions - self.patch_size = (patch_size, patch_size, patch_size) # (depth, height, width) + self.patch_size = patch_size + self.patch_size_t = patch_size_t self.embed_dim = embed_dim self.sample_height = sample_height self.sample_width = sample_width @@ -268,8 +270,15 @@ class CogVideoX1_1PatchEmbed(nn.Module): self.use_positional_embeddings = use_positional_embeddings self.use_learned_positional_embeddings = use_learned_positional_embeddings - # Use Linear layer for projection - self.proj = nn.Linear(in_channels * (patch_size ** 3), embed_dim) + if patch_size_t is None: + # CogVideoX 1.0 checkpoints + self.proj = nn.Conv2d( + in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias + ) + else: + # CogVideoX 1.5 checkpoints + self.proj = nn.Linear(in_channels * patch_size * patch_size * patch_size_t, embed_dim) + self.text_proj = nn.Linear(text_embed_dim, embed_dim) if use_positional_embeddings or use_learned_positional_embeddings: @@ -278,8 +287,8 @@ class CogVideoX1_1PatchEmbed(nn.Module): self.register_buffer("pos_embedding", pos_embedding, persistent=persistent) def _get_positional_embeddings(self, sample_height: int, sample_width: int, sample_frames: int) -> torch.Tensor: - post_patch_height = sample_height // self.patch_size[1] - post_patch_width = sample_width // self.patch_size[2] + post_patch_height = sample_height // self.patch_size + post_patch_width = sample_width // self.patch_size post_time_compression_frames = (sample_frames - 1) // self.temporal_compression_ratio + 1 num_patches = post_patch_height * post_patch_width * post_time_compression_frames @@ -291,44 +300,46 @@ class CogVideoX1_1PatchEmbed(nn.Module): self.temporal_interpolation_scale, ) pos_embedding = torch.from_numpy(pos_embedding).flatten(0, 1) - joint_pos_embedding = torch.zeros(1, self.max_text_seq_length + num_patches, self.embed_dim, requires_grad=False) - joint_pos_embedding.data[:, self.max_text_seq_length:].copy_(pos_embedding) + joint_pos_embedding = torch.zeros( + 1, self.max_text_seq_length + num_patches, self.embed_dim, requires_grad=False + ) + joint_pos_embedding.data[:, self.max_text_seq_length :].copy_(pos_embedding) return joint_pos_embedding def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor): - """ + r""" Args: - text_embeds (torch.Tensor): Input text embeddings of shape (batch_size, seq_length, embedding_dim). - image_embeds (torch.Tensor): Input image embeddings of shape (batch_size, num_frames, channels, height, width). + text_embeds (`torch.Tensor`): + Input text embeddings. Expected shape: (batch_size, seq_length, embedding_dim). + image_embeds (`torch.Tensor`): + Input image embeddings. Expected shape: (batch_size, num_frames, channels, height, width). """ text_embeds = self.text_proj(text_embeds) - first_frame = image_embeds[:, 0:1, :, :, :] - duplicated_first_frame = first_frame.repeat(1, 2, 1, 1, 1) # (batch, 2, channels, height, width) - # Copy the first frames, for t_patch - image_embeds = torch.cat([duplicated_first_frame, image_embeds[:, 1:, :, :, :]], dim=1) - batch, num_frames, channels, height, width = image_embeds.shape - image_embeds = image_embeds.permute(0, 2, 1, 3, 4).contiguous() - image_embeds = image_embeds.view(batch, channels, -1).permute(0, 2, 1) - rope_patch_t = num_frames // self.patch_size[0] - rope_patch_h = height // self.patch_size[1] - rope_patch_w = width // self.patch_size[2] + batch_size, num_frames, channels, height, width = image_embeds.shape - image_embeds = image_embeds.view( - batch, - rope_patch_t, self.patch_size[0], - rope_patch_h, self.patch_size[1], - rope_patch_w, self.patch_size[2], - channels - ) - image_embeds = image_embeds.permute(0, 1, 3, 5, 7, 2, 4, 6).contiguous() - image_embeds = image_embeds.view(batch, rope_patch_t * rope_patch_h * rope_patch_w, -1) - image_embeds = self.proj(image_embeds) - # Concatenate text and image embeddings - embeds = torch.cat([text_embeds, image_embeds], dim=1).contiguous() + if self.patch_size_t is None: + image_embeds = image_embeds.reshape(-1, channels, height, width) + image_embeds = self.proj(image_embeds) + image_embeds = image_embeds.view(batch_size, num_frames, *image_embeds.shape[1:]) + image_embeds = image_embeds.flatten(3).transpose(2, 3) # [batch, num_frames, height x width, channels] + image_embeds = image_embeds.flatten(1, 2) # [batch, num_frames x height x width, channels] + else: + p = self.patch_size + p_t = self.patch_size_t + + image_embeds = image_embeds.permute(0, 1, 3, 4, 2) + image_embeds = image_embeds.reshape( + batch_size, num_frames // p_t, p_t, height // p, p, width // p, p, channels + ) + image_embeds = image_embeds.permute(0, 1, 3, 5, 7, 2, 4, 6).flatten(4, 7).flatten(1, 3) + image_embeds = self.proj(image_embeds) + + embeds = torch.cat( + [text_embeds, image_embeds], dim=1 + ).contiguous() # [batch, seq_length + num_frames x height x width, channels] - # Add positional embeddings if applicable if self.use_positional_embeddings or self.use_learned_positional_embeddings: if self.use_learned_positional_embeddings and (self.sample_width != width or self.sample_height != height): raise ValueError( @@ -339,9 +350,9 @@ class CogVideoX1_1PatchEmbed(nn.Module): pre_time_compression_frames = (num_frames - 1) * self.temporal_compression_ratio + 1 if ( - self.sample_height != height - or self.sample_width != width - or self.sample_frames != pre_time_compression_frames + self.sample_height != height + or self.sample_width != width + or self.sample_frames != pre_time_compression_frames ): pos_embedding = self._get_positional_embeddings(height, width, pre_time_compression_frames) pos_embedding = pos_embedding.to(embeds.device, dtype=embeds.dtype) @@ -350,4 +361,5 @@ class CogVideoX1_1PatchEmbed(nn.Module): embeds = embeds + pos_embedding - return embeds \ No newline at end of file + return embeds + \ No newline at end of file diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 571498a..a563b73 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -21,6 +21,7 @@ import torch.nn.functional as F import math from diffusers.models import AutoencoderKLCogVideoX#, CogVideoXTransformer3DModel +from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor @@ -115,7 +116,7 @@ def retrieve_timesteps( timesteps = scheduler.timesteps return timesteps, num_inference_steps -class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): +class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): r""" Pipeline for text-to-video generation using CogVideoX. @@ -298,18 +299,18 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): weights = weights.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(4).repeat(1, t_batch_size,1, 1, 1) return weights - def fuse_qkv_projections(self) -> None: - r"""Enables fused QKV projections.""" - self.fusing_transformer = True - self.transformer.fuse_qkv_projections() + # def fuse_qkv_projections(self) -> None: + # r"""Enables fused QKV projections.""" + # self.fusing_transformer = True + # self.transformer.fuse_qkv_projections() - def unfuse_qkv_projections(self) -> None: - r"""Disable QKV projection fusion if enabled.""" - if not self.fusing_transformer: - logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.") - else: - self.transformer.unfuse_qkv_projections() - self.fusing_transformer = False + # def unfuse_qkv_projections(self) -> None: + # r"""Disable QKV projection fusion if enabled.""" + # if not self.fusing_transformer: + # logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.") + # else: + # self.transformer.unfuse_qkv_projections() + # self.fusing_transformer = False def _prepare_rotary_positional_embeddings( self, @@ -322,8 +323,12 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): ) -> Tuple[torch.Tensor, torch.Tensor]: grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) - base_size_width = 720 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) - base_size_height = 480 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) + p = self.transformer.config.patch_size + p_t = self.transformer.config.patch_size_t or 1 + + base_size_width = self.transformer.config.sample_width // p + base_size_height = self.transformer.config.sample_height // p + base_num_frames = (num_frames + p_t - 1) // p_t grid_crops_coords = get_resize_crop_region_for_grid( (grid_height, grid_width), base_size_width, base_size_height @@ -332,7 +337,7 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): embed_dim=self.transformer.config.attention_head_dim, crops_coords=grid_crops_coords, grid_size=(grid_height, grid_width), - temporal_size=num_frames, + temporal_size=base_num_frames, use_real=True, ) From 9aab678a9eba983f15189cf05b7a09a8ea5e2f13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 03:15:21 +0200 Subject: [PATCH 03/49] test --- custom_cogvideox_transformer_3d.py | 38 ++--- embeddings.py | 233 +---------------------------- pipeline_cogvideox.py | 3 +- 3 files changed, 16 insertions(+), 258 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index ebad39e..c751d13 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -109,37 +109,28 @@ class CogVideoXAttnProcessor2_0: if attn.norm_k is not None: key = attn.norm_k(key) + # Apply RoPE if needed if image_rotary_emb is not None: from diffusers.models.embeddings import apply_rotary_emb - has_nan = torch.isnan(query).any() - if has_nan: - raise ValueError(f"query before rope has nan: {has_nan}") - query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) + query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) if not attn.is_cross_attention: key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - #if SAGEATTN_IS_AVAILABLE: - # hidden_states = sageattn(query, key, value, is_causal=False) - #else: - hidden_states = F.scaled_dot_product_attention( - query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False - ) - has_nan = torch.isnan(hidden_states).any() - if has_nan: - raise ValueError(f"hs after scaled_dot_product_attention has nan: {has_nan}") - has_inf = torch.isinf(hidden_states).any() - if has_inf: - raise ValueError(f"hs after scaled_dot_product_attention has inf: {has_inf}") + if SAGEATTN_IS_AVAILABLE: + hidden_states = sageattn(query, key, value, is_causal=False) + else: + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) # linear proj hidden_states = attn.to_out[0](hidden_states) - has_nan = torch.isnan(hidden_states).any() - # dropout hidden_states = attn.to_out[1](hidden_states) + encoder_hidden_states, hidden_states = hidden_states.split( [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 ) @@ -322,7 +313,6 @@ class CogVideoXBlock(nn.Module): norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1( hidden_states, encoder_hidden_states, temb ) - # Tora Motion-guidance Fuser if video_flow_feature is not None: H, W = video_flow_feature.shape[-2:] @@ -747,8 +737,8 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): output = hidden_states.reshape( batch_size, (num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p ) - output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2) - output = output[:, remaining_frames:] + output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2) + output = output[:, remaining_frames:] (bb, tt, cc, hh, ww) = output.shape cond = rearrange(output, "B T C H W -> (B T) C H W", B=bb, C=cc, T=tt, H=hh, W=ww) @@ -770,7 +760,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): output = torch.cat([output, recovered_uncond]) else: for i, block in enumerate(self.transformer_blocks): - print("block", i) + #print("block", i) hidden_states, encoder_hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, @@ -820,8 +810,8 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): output = hidden_states.reshape( batch_size, (num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p ) - output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2) - output = output[:, remaining_frames:] + output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2) + output = output[:, remaining_frames:] if self.fastercache_counter >= self.fastercache_start_step + 1: (bb, tt, cc, hh, ww) = output.shape diff --git a/embeddings.py b/embeddings.py index bc3bf7f..908c67f 100644 --- a/embeddings.py +++ b/embeddings.py @@ -2,239 +2,8 @@ import torch import torch.nn as nn import numpy as np from typing import Tuple, Union, Optional +from diffusers.models.embeddings import get_3d_sincos_pos_embed -def get_1d_rotary_pos_embed( - dim: int, - pos: Union[np.ndarray, int], - theta: float = 10000.0, - use_real=False, - linear_factor=1.0, - ntk_factor=1.0, - repeat_interleave_real=True, - freqs_dtype=torch.float32, # torch.float32, torch.float64 (flux) -): - """ - Precompute the frequency tensor for complex exponentials (cis) with given dimensions. - - This function calculates a frequency tensor with complex exponentials using the given dimension 'dim' and the end - index 'end'. The 'theta' parameter scales the frequencies. The returned tensor contains complex values in complex64 - data type. - - Args: - dim (`int`): Dimension of the frequency tensor. - pos (`np.ndarray` or `int`): Position indices for the frequency tensor. [S] or scalar - theta (`float`, *optional*, defaults to 10000.0): - Scaling factor for frequency computation. Defaults to 10000.0. - use_real (`bool`, *optional*): - If True, return real part and imaginary part separately. Otherwise, return complex numbers. - linear_factor (`float`, *optional*, defaults to 1.0): - Scaling factor for the context extrapolation. Defaults to 1.0. - ntk_factor (`float`, *optional*, defaults to 1.0): - Scaling factor for the NTK-Aware RoPE. Defaults to 1.0. - repeat_interleave_real (`bool`, *optional*, defaults to `True`): - If `True` and `use_real`, real part and imaginary part are each interleaved with themselves to reach `dim`. - Otherwise, they are concateanted with themselves. - freqs_dtype (`torch.float32` or `torch.float64`, *optional*, defaults to `torch.float32`): - the dtype of the frequency tensor. - Returns: - `torch.Tensor`: Precomputed frequency tensor with complex exponentials. [S, D/2] - """ - assert dim % 2 == 0 - - if isinstance(pos, int): - pos = torch.arange(pos) - if isinstance(pos, np.ndarray): - pos = torch.from_numpy(pos) # type: ignore # [S] - - theta = theta * ntk_factor - freqs = ( - 1.0 - / (theta ** (torch.arange(0, dim, 2, dtype=freqs_dtype, device=pos.device)[: (dim // 2)] / dim)) - / linear_factor - ) # [D/2] - freqs = torch.outer(pos, freqs) # type: ignore # [S, D/2] - if use_real and repeat_interleave_real: - # flux, hunyuan-dit, cogvideox - freqs_cos = freqs.cos().repeat_interleave(2, dim=1).float() # [S, D] - freqs_sin = freqs.sin().repeat_interleave(2, dim=1).float() # [S, D] - return freqs_cos, freqs_sin - elif use_real: - # stable audio - freqs_cos = torch.cat([freqs.cos(), freqs.cos()], dim=-1).float() # [S, D] - freqs_sin = torch.cat([freqs.sin(), freqs.sin()], dim=-1).float() # [S, D] - return freqs_cos, freqs_sin - else: - # lumina - freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64 # [S, D/2] - return freqs_cis - -def get_3d_rotary_pos_embed( - embed_dim, crops_coords, grid_size, temporal_size, theta: int = 10000, use_real: bool = True -) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: - """ - RoPE for video tokens with 3D structure. - - Args: - embed_dim: (`int`): - The embedding dimension size, corresponding to hidden_size_head. - crops_coords (`Tuple[int]`): - The top-left and bottom-right coordinates of the crop. - grid_size (`Tuple[int]`): - The grid size of the spatial positional embedding (height, width). - temporal_size (`int`): - The size of the temporal dimension. - theta (`float`): - Scaling factor for frequency computation. - - Returns: - `torch.Tensor`: positional embedding with shape `(temporal_size * grid_size[0] * grid_size[1], embed_dim/2)`. - """ - if use_real is not True: - raise ValueError(" `use_real = False` is not currently supported for get_3d_rotary_pos_embed") - start, stop = crops_coords - grid_size_h, grid_size_w = grid_size - grid_h = np.linspace(start[0], stop[0], grid_size_h, endpoint=False, dtype=np.float32) - grid_w = np.linspace(start[1], stop[1], grid_size_w, endpoint=False, dtype=np.float32) - grid_t = np.linspace(0, temporal_size, temporal_size, endpoint=False, dtype=np.float32) - - # Compute dimensions for each axis - dim_t = embed_dim // 4 - dim_h = embed_dim // 8 * 3 - dim_w = embed_dim // 8 * 3 - - # Temporal frequencies - freqs_t = get_1d_rotary_pos_embed(dim_t, grid_t, use_real=True) - # Spatial frequencies for height and width - freqs_h = get_1d_rotary_pos_embed(dim_h, grid_h, use_real=True) - freqs_w = get_1d_rotary_pos_embed(dim_w, grid_w, use_real=True) - - # BroadCast and concatenate temporal and spaial frequencie (height and width) into a 3d tensor - def combine_time_height_width(freqs_t, freqs_h, freqs_w): - freqs_t = freqs_t[:, None, None, :].expand( - -1, grid_size_h, grid_size_w, -1 - ) # temporal_size, grid_size_h, grid_size_w, dim_t - freqs_h = freqs_h[None, :, None, :].expand( - temporal_size, -1, grid_size_w, -1 - ) # temporal_size, grid_size_h, grid_size_2, dim_h - freqs_w = freqs_w[None, None, :, :].expand( - temporal_size, grid_size_h, -1, -1 - ) # temporal_size, grid_size_h, grid_size_2, dim_w - - freqs = torch.cat( - [freqs_t, freqs_h, freqs_w], dim=-1 - ) # temporal_size, grid_size_h, grid_size_w, (dim_t + dim_h + dim_w) - freqs = freqs.view( - temporal_size * grid_size_h * grid_size_w, -1 - ) # (temporal_size * grid_size_h * grid_size_w), (dim_t + dim_h + dim_w) - return freqs - - t_cos, t_sin = freqs_t # both t_cos and t_sin has shape: temporal_size, dim_t - h_cos, h_sin = freqs_h # both h_cos and h_sin has shape: grid_size_h, dim_h - w_cos, w_sin = freqs_w # both w_cos and w_sin has shape: grid_size_w, dim_w - cos = combine_time_height_width(t_cos, h_cos, w_cos) - sin = combine_time_height_width(t_sin, h_sin, w_sin) - return cos, sin - -def get_3d_sincos_pos_embed( - embed_dim: int, - spatial_size: Union[int, Tuple[int, int]], - temporal_size: int, - spatial_interpolation_scale: float = 1.0, - temporal_interpolation_scale: float = 1.0, -) -> np.ndarray: - r""" - Args: - embed_dim (`int`): - spatial_size (`int` or `Tuple[int, int]`): - temporal_size (`int`): - spatial_interpolation_scale (`float`, defaults to 1.0): - temporal_interpolation_scale (`float`, defaults to 1.0): - """ - if embed_dim % 4 != 0: - raise ValueError("`embed_dim` must be divisible by 4") - if isinstance(spatial_size, int): - spatial_size = (spatial_size, spatial_size) - - embed_dim_spatial = 3 * embed_dim // 4 - embed_dim_temporal = embed_dim // 4 - - # 1. Spatial - grid_h = np.arange(spatial_size[1], dtype=np.float32) / spatial_interpolation_scale - grid_w = np.arange(spatial_size[0], dtype=np.float32) / spatial_interpolation_scale - grid = np.meshgrid(grid_w, grid_h) # here w goes first - grid = np.stack(grid, axis=0) - - grid = grid.reshape([2, 1, spatial_size[1], spatial_size[0]]) - pos_embed_spatial = get_2d_sincos_pos_embed_from_grid(embed_dim_spatial, grid) - - # 2. Temporal - grid_t = np.arange(temporal_size, dtype=np.float32) / temporal_interpolation_scale - pos_embed_temporal = get_1d_sincos_pos_embed_from_grid(embed_dim_temporal, grid_t) - - # 3. Concat - pos_embed_spatial = pos_embed_spatial[np.newaxis, :, :] - pos_embed_spatial = np.repeat(pos_embed_spatial, temporal_size, axis=0) # [T, H*W, D // 4 * 3] - - pos_embed_temporal = pos_embed_temporal[:, np.newaxis, :] - pos_embed_temporal = np.repeat(pos_embed_temporal, spatial_size[0] * spatial_size[1], axis=1) # [T, H*W, D // 4] - - pos_embed = np.concatenate([pos_embed_temporal, pos_embed_spatial], axis=-1) # [T, H*W, D] - return pos_embed - - -def get_2d_sincos_pos_embed( - embed_dim, grid_size, cls_token=False, extra_tokens=0, interpolation_scale=1.0, base_size=16 -): - """ - grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or - [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) - """ - if isinstance(grid_size, int): - grid_size = (grid_size, grid_size) - - grid_h = np.arange(grid_size[0], dtype=np.float32) / (grid_size[0] / base_size) / interpolation_scale - grid_w = np.arange(grid_size[1], dtype=np.float32) / (grid_size[1] / base_size) / interpolation_scale - grid = np.meshgrid(grid_w, grid_h) # here w goes first - grid = np.stack(grid, axis=0) - - grid = grid.reshape([2, 1, grid_size[1], grid_size[0]]) - pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) - if cls_token and extra_tokens > 0: - pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0) - return pos_embed - - -def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): - if embed_dim % 2 != 0: - raise ValueError("embed_dim must be divisible by 2") - - # use half of dimensions to encode grid_h - emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) - emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) - - emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) - return emb - - -def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): - """ - embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D) - """ - if embed_dim % 2 != 0: - raise ValueError("embed_dim must be divisible by 2") - - omega = np.arange(embed_dim // 2, dtype=np.float64) - omega /= embed_dim / 2.0 - omega = 1.0 / 10000**omega # (D/2,) - - pos = pos.reshape(-1) # (M,) - out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product - - emb_sin = np.sin(out) # (M, D/2) - emb_cos = np.cos(out) # (M, D/2) - - emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) - return emb class CogVideoXPatchEmbed(nn.Module): def __init__( diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index a563b73..f2fb927 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -26,9 +26,8 @@ from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor from diffusers.video_processor import VideoProcessor -#from diffusers.models.embeddings import get_3d_rotary_pos_embed +from diffusers.models.embeddings import get_3d_rotary_pos_embed from diffusers.loaders import CogVideoXLoraLoaderMixin -from .embeddings import get_3d_rotary_pos_embed from .custom_cogvideox_transformer_3d import CogVideoXTransformer3DModel From b563994afcb6eec0760d757fc082016f9d4a9861 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 04:02:36 +0200 Subject: [PATCH 04/49] finally works --- custom_cogvideox_transformer_3d.py | 147 ++++------------------------- model_loading.py | 6 +- nodes.py | 1 + 3 files changed, 22 insertions(+), 132 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index c751d13..9bbd87b 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -73,6 +73,7 @@ class CogVideoXAttnProcessor2_0: raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") @torch.compiler.disable() + def __call__( self, attn: Attention, @@ -115,14 +116,16 @@ class CogVideoXAttnProcessor2_0: query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) if not attn.is_cross_attention: - key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - - if SAGEATTN_IS_AVAILABLE: - hidden_states = sageattn(query, key, value, is_causal=False) - else: - hidden_states = F.scaled_dot_product_attention( + key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) + + #if SAGEATTN_IS_AVAILABLE: + # hidden_states = sageattn(query, key, value, is_causal=False) + #else: + hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) + if torch.isinf(hidden_states).any(): + raise ValueError(f"hidden_states after dot product has inf") hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) @@ -136,81 +139,6 @@ class CogVideoXAttnProcessor2_0: ) return hidden_states, encoder_hidden_states - -# class FusedCogVideoXAttnProcessor2_0: -# r""" -# Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on -# query and key vectors, but does not include spatial normalization. -# """ - -# def __init__(self): -# if not hasattr(F, "scaled_dot_product_attention"): -# raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") -# @torch.compiler.disable() -# def __call__( -# self, -# attn: Attention, -# hidden_states: torch.Tensor, -# encoder_hidden_states: torch.Tensor, -# attention_mask: Optional[torch.Tensor] = None, -# image_rotary_emb: Optional[torch.Tensor] = None, -# ) -> torch.Tensor: -# print("FusedCogVideoXAttnProcessor2_0") -# text_seq_length = encoder_hidden_states.size(1) - -# hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - -# batch_size, sequence_length, _ = ( -# hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape -# ) - -# if attention_mask is not None: -# attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) -# attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) - -# qkv = attn.to_qkv(hidden_states) -# split_size = qkv.shape[-1] // 3 -# query, key, value = torch.split(qkv, split_size, dim=-1) - -# inner_dim = key.shape[-1] -# head_dim = inner_dim // attn.heads - -# query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) -# key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) -# value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - -# if attn.norm_q is not None: -# query = attn.norm_q(query) -# if attn.norm_k is not None: -# key = attn.norm_k(key) - -# # Apply RoPE if needed -# if image_rotary_emb is not None: -# from diffusers.models.embeddings import apply_rotary_emb - -# query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) -# if not attn.is_cross_attention: -# key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - -# if SAGEATTN_IS_AVAILABLE: -# hidden_states = sageattn(query, key, value, is_causal=False) -# else: -# hidden_states = F.scaled_dot_product_attention( -# query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False -# ) - -# hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) - -# # linear proj -# hidden_states = attn.to_out[0](hidden_states) -# # dropout -# hidden_states = attn.to_out[1](hidden_states) - -# encoder_hidden_states, hidden_states = hidden_states.split( -# [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 -# ) -# return hidden_states, encoder_hidden_states - #region Blocks @maybe_allow_in_graph class CogVideoXBlock(nn.Module): @@ -270,6 +198,7 @@ class CogVideoXBlock(nn.Module): # 1. Self Attention self.norm1 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True) + self.attn1 = Attention( query_dim=dim, @@ -308,11 +237,14 @@ class CogVideoXBlock(nn.Module): fastercache_start_step=15, fastercache_device="cuda:0" ) -> torch.Tensor: + text_seq_length = encoder_hidden_states.size(1) + # norm & modulate norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1( hidden_states, encoder_hidden_states, temb ) + # Tora Motion-guidance Fuser if video_flow_feature is not None: H, W = video_flow_feature.shape[-2:] @@ -347,19 +279,12 @@ class CogVideoXBlock(nn.Module): elif fastercache_counter > fastercache_start_step: self.cached_hidden_states[-1].copy_(attn_hidden_states.to(fastercache_device)) self.cached_encoder_hidden_states[-1].copy_(attn_encoder_hidden_states.to(fastercache_device)) - - + hidden_states = hidden_states + gate_msa * attn_hidden_states encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states - # has_nan = torch.isnan(hidden_states).any() - # if has_nan: - # raise ValueError(f"hs before norm2 has nan: {has_nan}") - # has_inf = torch.isinf(hidden_states).any() - # if has_inf: - # raise ValueError(f"hs before norm2 has inf: {has_inf}") - # norm & modulate + norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2( hidden_states, encoder_hidden_states, temb ) @@ -604,45 +529,6 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) - # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedCogVideoXAttnProcessor2_0 - # def fuse_qkv_projections(self): - # """ - # Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) - # are fused. For cross-attention modules, key and value projection matrices are fused. - - # - - # This API is 🧪 experimental. - - # - # """ - # self.original_attn_processors = None - - # for _, attn_processor in self.attn_processors.items(): - # if "Added" in str(attn_processor.__class__.__name__): - # raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") - - # self.original_attn_processors = self.attn_processors - - # for module in self.modules(): - # if isinstance(module, Attention): - # module.fuse_projections(fuse=True) - - # self.set_attn_processor(FusedCogVideoXAttnProcessor2_0()) - - # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections - # def unfuse_qkv_projections(self): - # """Disables the fused QKV projection if enabled. - - # - - # This API is 🧪 experimental. - - # - - # """ - # if self.original_attn_processors is not None: - # self.set_attn_processor(self.original_attn_processors) def forward( self, @@ -679,8 +565,10 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): first_frame = hidden_states[:, :1].repeat(1, 1 + remaining_frames, 1, 1, 1) hidden_states = torch.cat([first_frame, hidden_states[:, 1:]], dim=1) + hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) hidden_states = self.embedding_dropout(hidden_states) + text_seq_length = encoder_hidden_states.shape[1] encoder_hidden_states = hidden_states[:, :text_seq_length] @@ -760,7 +648,6 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): output = torch.cat([output, recovered_uncond]) else: for i, block in enumerate(self.transformer_blocks): - #print("block", i) hidden_states, encoder_hidden_states = block( hidden_states=hidden_states, encoder_hidden_states=encoder_hidden_states, diff --git a/model_loading.py b/model_loading.py index 91c6ed3..00dfcee 100644 --- a/model_loading.py +++ b/model_loading.py @@ -261,7 +261,7 @@ class DownloadAndLoadCogVideoModel: if "CogVideoXBlock" in str(block): pipe.transformer.transformer_blocks[i] = torch.compile(block, fullgraph=False, dynamic=False, backend="inductor") - + elif compile == "onediff": from onediffx import compile_pipe @@ -274,7 +274,7 @@ class DownloadAndLoadCogVideoModel: ignores=["vae"], fuse_qkv_projections=True if pab_config is None else False, ) - + pipeline = { "pipe": pipe, "dtype": dtype, @@ -453,6 +453,8 @@ class DownloadAndLoadCogVideoGGUFModel: if enable_sequential_cpu_offload: pipe.enable_sequential_cpu_offload() + + pipeline = { "pipe": pipe, "dtype": vae_dtype, diff --git a/nodes.py b/nodes.py index 4e4ce6f..fe4d367 100644 --- a/nodes.py +++ b/nodes.py @@ -861,6 +861,7 @@ class CogVideoSampler: pipe.transformer.fastercache_counter = 0 autocastcondition = not pipeline["onediff"] or not dtype == torch.float32 + autocastcondition = False ##todo autocast_context = torch.autocast(mm.get_autocast_device(device)) if autocastcondition else nullcontext() with autocast_context: latents = pipeline["pipe"]( From 9a64e1ae5e799fe8e8d74d07fdc4d585f3f830cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 04:16:46 +0200 Subject: [PATCH 05/49] Update model_loading.py --- model_loading.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/model_loading.py b/model_loading.py index 00dfcee..6df4c55 100644 --- a/model_loading.py +++ b/model_loading.py @@ -144,6 +144,11 @@ class DownloadAndLoadCogVideoModel: base_path = os.path.join(download_path, "CogVideo2B") download_path = base_path repo_id = model + elif "1.5-T2V" in model: + base_path = os.path.join(download_path, "CogVideoX-5b-1.5") + download_path = base_path + transformer_path = os.path.join(base_path, "transformer_T2V") + repo_id = "kijai/CogVideoX-5b-1.5" else: base_path = os.path.join(download_path, (model.split("/")[-1])) download_path = base_path @@ -172,6 +177,8 @@ class DownloadAndLoadCogVideoModel: transformer = CogVideoXTransformer3DModelFunPAB.from_pretrained(base_path, subfolder="transformer") else: transformer = CogVideoXTransformer3DModelFun.from_pretrained(base_path, subfolder="transformer") + elif "1.5-T2V" in model: + transformer = CogVideoXTransformer3DModel.from_pretrained(transformer_path) else: if pab_config is not None: transformer = CogVideoXTransformer3DModelPAB.from_pretrained(base_path, subfolder="transformer") From 2eb9b81d277c07738a7bf147176381f8cffcb5e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jukka=20Sepp=C3=A4nen?= <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 04:41:07 +0200 Subject: [PATCH 06/49] fp8 --- fp8_optimization.py | 14 +++++++++----- model_loading.py | 7 ++++--- nodes.py | 3 +-- 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/fp8_optimization.py b/fp8_optimization.py index b01ac91..05b0146 100644 --- a/fp8_optimization.py +++ b/fp8_optimization.py @@ -36,10 +36,14 @@ def fp8_linear_forward(cls, original_dtype, input): else: return cls.original_forward(input) -def convert_fp8_linear(module, original_dtype): +def convert_fp8_linear(module, original_dtype, params_to_keep={}): setattr(module, "fp8_matmul_enabled", True) + + for name, module in module.named_modules(): - if isinstance(module, nn.Linear): - original_forward = module.forward - setattr(module, "original_forward", original_forward) - setattr(module, "forward", lambda input, m=module: fp8_linear_forward(m, original_dtype, input)) + if not any(keyword in name for keyword in params_to_keep): + if isinstance(module, nn.Linear): + print(name) + original_forward = module.forward + setattr(module, "original_forward", original_forward) + setattr(module, "forward", lambda input, m=module: fp8_linear_forward(m, original_dtype, input)) diff --git a/model_loading.py b/model_loading.py index 6df4c55..7adc9d6 100644 --- a/model_loading.py +++ b/model_loading.py @@ -153,7 +153,6 @@ class DownloadAndLoadCogVideoModel: base_path = os.path.join(download_path, (model.split("/")[-1])) download_path = base_path repo_id = model - if "2b" in model: scheduler_path = os.path.join(script_directory, 'configs', 'scheduler_config_2b.json') @@ -193,13 +192,15 @@ class DownloadAndLoadCogVideoModel: #fp8 if fp8_transformer == "enabled" or fp8_transformer == "fastmode": for name, param in transformer.named_parameters(): - params_to_keep = {"patch_embed", "lora", "pos_embedding"} + params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding"} if not any(keyword in name for keyword in params_to_keep): param.data = param.data.to(torch.float8_e4m3fn) if fp8_transformer == "fastmode": from .fp8_optimization import convert_fp8_linear - convert_fp8_linear(transformer, dtype) + if "1.5" in model: + params_to_keep = {"norm","ff"} + convert_fp8_linear(transformer, dtype, params_to_keep=params_to_keep) with open(scheduler_path) as f: scheduler_config = json.load(f) diff --git a/nodes.py b/nodes.py index fe4d367..fa5e3ad 100644 --- a/nodes.py +++ b/nodes.py @@ -861,8 +861,7 @@ class CogVideoSampler: pipe.transformer.fastercache_counter = 0 autocastcondition = not pipeline["onediff"] or not dtype == torch.float32 - autocastcondition = False ##todo - autocast_context = torch.autocast(mm.get_autocast_device(device)) if autocastcondition else nullcontext() + autocast_context = torch.autocast(mm.get_autocast_device(device), dtype=dtype) if autocastcondition else nullcontext() with autocast_context: latents = pipeline["pipe"]( num_inference_steps=steps, From 643bbc18c1cc7c37a0a8270fd09235222462b428 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 12:13:52 +0200 Subject: [PATCH 07/49] i2v --- custom_cogvideox_transformer_3d.py | 13 +++++++++++-- model_loading.py | 31 +++++++++++++++++++----------- nodes.py | 6 +++--- pipeline_cogvideox.py | 8 ++------ 4 files changed, 36 insertions(+), 22 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 9bbd87b..1003aa7 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -254,7 +254,7 @@ class CogVideoXBlock(nn.Module): norm_hidden_states = rearrange(h, "(B T) C H W -> B (T H W) C", T=T) del h, fuser - #fastercache + #region fastercache B = norm_hidden_states.shape[0] if fastercache_counter >= fastercache_start_step + 3 and fastercache_counter%3!=0 and self.cached_hidden_states[-1].shape[0] >= B: attn_hidden_states = ( @@ -365,6 +365,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): flip_sin_to_cos: bool = True, freq_shift: int = 0, time_embed_dim: int = 512, + ofs_embed_dim: Optional[int] = None, text_embed_dim: int = 4096, num_layers: int = 30, dropout: float = 0.0, @@ -373,7 +374,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): sample_height: int = 60, sample_frames: int = 49, patch_size: int = 2, - patch_size_t: int = 2, + patch_size_t: int = None, temporal_compression_ratio: int = 4, max_text_seq_length: int = 226, activation_fn: str = "gelu-approximate", @@ -420,6 +421,11 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift) self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn) + self.ofs_embedding = None + + if ofs_embed_dim: + self.ofs_embedding = TimestepEmbedding(ofs_embed_dim, ofs_embed_dim, timestep_activation_fn) # same as time embeddings, for ofs + # 3. Define spatio-temporal transformers blocks self.transformer_blocks = nn.ModuleList( [ @@ -553,6 +559,9 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=hidden_states.dtype) emb = self.time_embedding(t_emb, timestep_cond) + if self.ofs_embedding is not None: #1.5 I2V + emb_ofs = self.ofs_embedding(emb, timestep_cond) + emb = emb + emb_ofs # 2. Patch embedding p = self.config.patch_size diff --git a/model_loading.py b/model_loading.py index 7adc9d6..bb3f774 100644 --- a/model_loading.py +++ b/model_loading.py @@ -72,6 +72,7 @@ class DownloadAndLoadCogVideoModel: "THUDM/CogVideoX-5b", "THUDM/CogVideoX-5b-I2V", "kijai/CogVideoX-5b-1.5-T2V", + "kijai/CogVideoX-5b-1.5-I2V", "bertjiazheng/KoolCogVideoX-5b", "kijai/CogVideoX-Fun-2b", "kijai/CogVideoX-Fun-5b", @@ -97,6 +98,7 @@ class DownloadAndLoadCogVideoModel: "block_edit": ("TRANSFORMERBLOCKS", {"default": None}), "lora": ("COGLORA", {"default": None}), "compile_args":("COMPILEARGS", ), + "load_device": (["main_device", "offload_device"], {"default": "main_device"}), } } @@ -106,12 +108,13 @@ class DownloadAndLoadCogVideoModel: CATEGORY = "CogVideoWrapper" DESCRIPTION = "Downloads and loads the selected CogVideo model from Huggingface to 'ComfyUI/models/CogVideo'" - def loadmodel(self, model, precision, fp8_transformer="disabled", compile="disabled", enable_sequential_cpu_offload=False, pab_config=None, block_edit=None, lora=None, compile_args=None): + def loadmodel(self, model, precision, fp8_transformer="disabled", compile="disabled", enable_sequential_cpu_offload=False, pab_config=None, block_edit=None, lora=None, compile_args=None, load_device="main_device"): check_diffusers_version() device = mm.get_torch_device() offload_device = mm.unet_offload_device() + transformer_load_device = device if load_device == "main_device" else offload_device mm.soft_empty_cache() dtype = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[precision] @@ -134,6 +137,8 @@ class DownloadAndLoadCogVideoModel: if not os.path.exists(base_path): base_path = os.path.join(download_path, (model.split("/")[-1])) download_path = base_path + subfolder = "transformer" + allow_patterns = ["*transformer*", "*scheduler*", "*vae*"] elif "2b" in model: if 'img2vid' in model: @@ -144,27 +149,33 @@ class DownloadAndLoadCogVideoModel: base_path = os.path.join(download_path, "CogVideo2B") download_path = base_path repo_id = model - elif "1.5-T2V" in model: + subfolder = "transformer" + allow_patterns = ["*transformer*", "*scheduler*", "*vae*"] + elif "1.5-T2V" in model or "1.5-I2V" in model: base_path = os.path.join(download_path, "CogVideoX-5b-1.5") download_path = base_path - transformer_path = os.path.join(base_path, "transformer_T2V") + subfolder = "transformer_T2V" if "1.5-T2V" in model else "transformer_I2V" + allow_patterns = [f"*{subfolder}*"] repo_id = "kijai/CogVideoX-5b-1.5" else: base_path = os.path.join(download_path, (model.split("/")[-1])) download_path = base_path repo_id = model + subfolder = "transformer" + allow_patterns = ["*transformer*", "*scheduler*", "*vae*"] if "2b" in model: scheduler_path = os.path.join(script_directory, 'configs', 'scheduler_config_2b.json') else: scheduler_path = os.path.join(script_directory, 'configs', 'scheduler_config_5b.json') - if not os.path.exists(base_path) or not os.path.exists(os.path.join(base_path, "transformer")): + if not os.path.exists(base_path) or not os.path.exists(os.path.join(base_path, subfolder)): log.info(f"Downloading model to: {base_path}") from huggingface_hub import snapshot_download snapshot_download( repo_id=repo_id, + allow_patterns=allow_patterns, ignore_patterns=["*text_encoder*", "*tokenizer*"], local_dir=download_path, local_dir_use_symlinks=False, @@ -173,18 +184,16 @@ class DownloadAndLoadCogVideoModel: # transformer if "Fun" in model: if pab_config is not None: - transformer = CogVideoXTransformer3DModelFunPAB.from_pretrained(base_path, subfolder="transformer") + transformer = CogVideoXTransformer3DModelFunPAB.from_pretrained(base_path, subfolder=subfolder) else: - transformer = CogVideoXTransformer3DModelFun.from_pretrained(base_path, subfolder="transformer") - elif "1.5-T2V" in model: - transformer = CogVideoXTransformer3DModel.from_pretrained(transformer_path) + transformer = CogVideoXTransformer3DModelFun.from_pretrained(base_path, subfolder=subfolder) else: if pab_config is not None: - transformer = CogVideoXTransformer3DModelPAB.from_pretrained(base_path, subfolder="transformer") + transformer = CogVideoXTransformer3DModelPAB.from_pretrained(base_path, subfolder=subfolder) else: - transformer = CogVideoXTransformer3DModel.from_pretrained(base_path, subfolder="transformer") + transformer = CogVideoXTransformer3DModel.from_pretrained(base_path, subfolder=subfolder) - transformer = transformer.to(dtype).to(offload_device) + transformer = transformer.to(dtype).to(transformer_load_device) if block_edit is not None: transformer = remove_specific_blocks(transformer, block_edit) diff --git a/nodes.py b/nodes.py index fa5e3ad..d8a7cbc 100644 --- a/nodes.py +++ b/nodes.py @@ -782,9 +782,9 @@ class CogVideoSampler: "pipeline": ("COGVIDEOPIPE",), "positive": ("CONDITIONING", ), "negative": ("CONDITIONING", ), - "height": ("INT", {"default": 480, "min": 128, "max": 2048, "step": 8}), - "width": ("INT", {"default": 720, "min": 128, "max": 2048, "step": 8}), - "num_frames": ("INT", {"default": 49, "min": 16, "max": 1024, "step": 1}), + "height": ("INT", {"default": 480, "min": 128, "max": 2048, "step": 16}), + "width": ("INT", {"default": 720, "min": 128, "max": 2048, "step": 16}), + "num_frames": ("INT", {"default": 48, "min": 16, "max": 1024, "step": 1}), "steps": ("INT", {"default": 50, "min": 1}), "cfg": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 30.0, "step": 0.01}), "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index f2fb927..29ebfb3 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -442,10 +442,6 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): argument. """ - #assert ( - # num_frames <= 48 and num_frames % fps == 0 and fps == 8 - #), f"The number of frames must be divisible by {fps=} and less than 48 frames (for now). Other values are not supported in CogVideoX." - height = height or self.transformer.config.sample_size * self.vae_scale_factor_spatial width = width or self.transformer.config.sample_size * self.vae_scale_factor_spatial num_videos_per_prompt = 1 @@ -480,8 +476,8 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): # 5. Prepare latents. latent_channels = self.vae.config.latent_channels - if latents is None and num_frames == t_tile_length: - num_frames += 1 + #if latents is None and num_frames == t_tile_length: + # num_frames += 1 if self.original_mask is not None: image_latents = latents From 1c3aff900021f73eef742a8aec59e1500a22832d Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 15:15:10 +0200 Subject: [PATCH 08/49] some cleanup --- model_loading.py | 13 ++++--------- nodes.py | 7 ++++++- pipeline_cogvideox.py | 28 ++++------------------------ 3 files changed, 14 insertions(+), 34 deletions(-) diff --git a/model_loading.py b/model_loading.py index bb3f774..15166c6 100644 --- a/model_loading.py +++ b/model_loading.py @@ -60,7 +60,7 @@ class CogVideoLoraSelect: cog_loras_list.append(cog_lora) print(cog_loras_list) return (cog_loras_list,) - +#region DownloadAndLoadCogVideoModel class DownloadAndLoadCogVideoModel: @classmethod def INPUT_TYPES(s): @@ -259,12 +259,9 @@ class DownloadAndLoadCogVideoModel: if fuse: pipe.fuse_lora(lora_scale=1 / lora_rank, components=["transformer"]) - - if enable_sequential_cpu_offload: pipe.enable_sequential_cpu_offload() - # compilation if compile == "torch": pipe.transformer.to(memory_format=torch.channels_last) @@ -277,8 +274,6 @@ class DownloadAndLoadCogVideoModel: for i, block in enumerate(pipe.transformer.transformer_blocks): if "CogVideoXBlock" in str(block): pipe.transformer.transformer_blocks[i] = torch.compile(block, fullgraph=False, dynamic=False, backend="inductor") - - elif compile == "onediff": from onediffx import compile_pipe @@ -303,7 +298,7 @@ class DownloadAndLoadCogVideoModel: } return (pipeline,) - +#region GGUF class DownloadAndLoadCogVideoGGUFModel: @classmethod def INPUT_TYPES(s): @@ -483,7 +478,7 @@ class DownloadAndLoadCogVideoGGUFModel: } return (pipeline,) - +#region Tora class DownloadAndLoadToraModel: @classmethod def INPUT_TYPES(s): @@ -591,7 +586,7 @@ class DownloadAndLoadToraModel: } return (toramodel,) - +#region controlnet class DownloadAndLoadCogVideoControlNet: @classmethod def INPUT_TYPES(s): diff --git a/nodes.py b/nodes.py index d8a7cbc..44e1d7b 100644 --- a/nodes.py +++ b/nodes.py @@ -816,7 +816,12 @@ class CogVideoSampler: base_path = pipeline["base_path"] assert "fun" not in base_path.lower(), "'Fun' models not supported in 'CogVideoSampler', use the 'CogVideoXFunSampler'" - assert ("I2V" not in pipeline.get("model_name","") or num_frames == 49 or context_options is not None), "I2V model can only do 49 frames" + assert ( + "I2V" not in pipeline.get("model_name", "") or + "1.5" in pipeline.get("model_name", "") or + num_frames == 49 or + context_options is not None + ), "1.0 I2V model can only do 49 frames" device = mm.get_torch_device() offload_device = mm.unet_offload_device() diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 29ebfb3..4704255 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -317,18 +317,17 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): width: int, num_frames: int, device: torch.device, - start_frame: int = None, - end_frame: int = None, ) -> Tuple[torch.Tensor, torch.Tensor]: grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) + p = self.transformer.config.patch_size p_t = self.transformer.config.patch_size_t or 1 base_size_width = self.transformer.config.sample_width // p base_size_height = self.transformer.config.sample_height // p base_num_frames = (num_frames + p_t - 1) // p_t - + grid_crops_coords = get_resize_crop_region_for_grid( (grid_height, grid_width), base_size_width, base_size_height ) @@ -336,19 +335,8 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): embed_dim=self.transformer.config.attention_head_dim, crops_coords=grid_crops_coords, grid_size=(grid_height, grid_width), - temporal_size=base_num_frames, - use_real=True, + temporal_size=base_num_frames ) - - if start_frame is not None: - freqs_cos = freqs_cos.view(num_frames, grid_height * grid_width, -1) - freqs_sin = freqs_sin.view(num_frames, grid_height * grid_width, -1) - - freqs_cos = freqs_cos[start_frame:end_frame] - freqs_sin = freqs_sin[start_frame:end_frame] - - freqs_cos = freqs_cos.view(-1, freqs_cos.shape[-1]) - freqs_sin = freqs_sin.view(-1, freqs_sin.shape[-1]) freqs_cos = freqs_cos.to(device=device) freqs_sin = freqs_sin.to(device=device) @@ -535,13 +523,6 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - # 6.5. Create rotary embeds if required - image_rotary_emb = ( - self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) - if self.transformer.config.use_rotary_positional_embeddings - else None - ) - # masks if self.original_mask is not None: mask = self.original_mask.to(device) @@ -579,7 +560,7 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): use_temporal_tiling = False use_context_schedule = False logger.info("Temporal tiling and context schedule disabled") - # 7. Create rotary embeds if required + # 8.5. Create rotary embeds if required image_rotary_emb = ( self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) if self.transformer.config.use_rotary_positional_embeddings @@ -882,7 +863,6 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): controlnet_states=controlnet_states, controlnet_weights=control_weights, video_flow_features=video_flow_features if (tora is not None and tora["start_percent"] <= current_step_percentage <= tora["end_percent"]) else None, - )[0] noise_pred = noise_pred.float() From 9a797229f251607e77951c940c9826ea29dad656 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 15:41:57 +0200 Subject: [PATCH 09/49] check --- nodes.py | 4 +++- requirements.txt | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index 44e1d7b..4603799 100644 --- a/nodes.py +++ b/nodes.py @@ -5,6 +5,8 @@ import comfy.model_management as mm from einops import rearrange from contextlib import nullcontext +from .utils import log, check_diffusers_version +check_diffusers_version() from diffusers.schedulers import ( CogVideoXDDIMScheduler, CogVideoXDPMScheduler, @@ -42,7 +44,7 @@ from PIL import Image import numpy as np import json -from .utils import log, check_diffusers_version + script_directory = os.path.dirname(os.path.abspath(__file__)) diff --git a/requirements.txt b/requirements.txt index 8dddf7a..2b24b6a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ huggingface_hub -diffusers>=0.30.3 +diffusers>=0.31.0 accelerate>=0.33.0 einops peft \ No newline at end of file From 634c22db505716a8a828846b3086ed3267746f5a Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 17:05:55 +0200 Subject: [PATCH 10/49] sageattn --- custom_cogvideox_transformer_3d.py | 45 +++++++++++++++++------------- fp8_optimization.py | 2 -- model_loading.py | 10 +++++-- 3 files changed, 32 insertions(+), 25 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 1003aa7..79e2ebb 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -32,6 +32,7 @@ from diffusers.models.modeling_outputs import Transformer2DModelOutput from diffusers.models.modeling_utils import ModelMixin from diffusers.models.normalization import AdaLayerNorm, CogVideoXLayerNormZero from diffusers.loaders import PeftAdapterMixin +from diffusers.models.embeddings import apply_rotary_emb from .embeddings import CogVideoXPatchEmbed @@ -40,9 +41,7 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name try: from sageattention import sageattn SAGEATTN_IS_AVAILABLE = True - logger.info("Using sageattn") except: - logger.info("sageattn not found, using sdpa") SAGEATTN_IS_AVAILABLE = False def fft(tensor): @@ -73,7 +72,6 @@ class CogVideoXAttnProcessor2_0: raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") @torch.compiler.disable() - def __call__( self, attn: Attention, @@ -81,6 +79,7 @@ class CogVideoXAttnProcessor2_0: encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, + attention_mode: Optional[str] = None, ) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) @@ -112,20 +111,21 @@ class CogVideoXAttnProcessor2_0: # Apply RoPE if needed if image_rotary_emb is not None: - from diffusers.models.embeddings import apply_rotary_emb - query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) if not attn.is_cross_attention: - key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - - #if SAGEATTN_IS_AVAILABLE: - # hidden_states = sageattn(query, key, value, is_causal=False) - #else: - hidden_states = F.scaled_dot_product_attention( - query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False - ) - if torch.isinf(hidden_states).any(): - raise ValueError(f"hidden_states after dot product has inf") + key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) + + if attention_mode == "sageattn": + if SAGEATTN_IS_AVAILABLE: + hidden_states = sageattn(query, key, value, attn_mask=attention_mask, dropout_p=0.0,is_causal=False) + else: + raise ImportError("sageattn not found") + else: + hidden_states = F.scaled_dot_product_attention( + query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False + ) + #if torch.isinf(hidden_states).any(): + # raise ValueError(f"hidden_states after dot product has inf") hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) @@ -193,6 +193,7 @@ class CogVideoXBlock(nn.Module): ff_inner_dim: Optional[int] = None, ff_bias: bool = True, attention_out_bias: bool = True, + attention_mode: Optional[str] = None, ): super().__init__() @@ -224,6 +225,7 @@ class CogVideoXBlock(nn.Module): ) self.cached_hidden_states = [] self.cached_encoder_hidden_states = [] + self.attention_mode = attention_mode def forward( self, @@ -235,7 +237,7 @@ class CogVideoXBlock(nn.Module): fuser=None, fastercache_counter=0, fastercache_start_step=15, - fastercache_device="cuda:0" + fastercache_device="cuda:0", ) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) @@ -271,7 +273,8 @@ class CogVideoXBlock(nn.Module): attn_hidden_states, attn_encoder_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, - image_rotary_emb=image_rotary_emb + image_rotary_emb=image_rotary_emb, + attention_mode=self.attention_mode, ) if fastercache_counter == fastercache_start_step: self.cached_hidden_states = [attn_hidden_states.to(fastercache_device), attn_hidden_states.to(fastercache_device)] @@ -386,6 +389,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): use_rotary_positional_embeddings: bool = False, use_learned_positional_embeddings: bool = False, patch_bias: bool = True, + attention_mode: Optional[str] = None, ): super().__init__() inner_dim = num_attention_heads * attention_head_dim @@ -471,6 +475,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): self.fastercache_lf_step = 40 self.fastercache_hf_step = 30 self.fastercache_device = "cuda" + self.attention_mode = attention_mode def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = value @@ -667,9 +672,9 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): fastercache_counter = self.fastercache_counter, fastercache_device = self.fastercache_device ) - has_nan = torch.isnan(hidden_states).any() - if has_nan: - raise ValueError(f"block output hidden_states has nan: {has_nan}") + #has_nan = torch.isnan(hidden_states).any() + #if has_nan: + # raise ValueError(f"block output hidden_states has nan: {has_nan}") if (controlnet_states is not None) and (i < len(controlnet_states)): controlnet_states_block = controlnet_states[i] diff --git a/fp8_optimization.py b/fp8_optimization.py index 05b0146..09f026d 100644 --- a/fp8_optimization.py +++ b/fp8_optimization.py @@ -39,11 +39,9 @@ def fp8_linear_forward(cls, original_dtype, input): def convert_fp8_linear(module, original_dtype, params_to_keep={}): setattr(module, "fp8_matmul_enabled", True) - for name, module in module.named_modules(): if not any(keyword in name for keyword in params_to_keep): if isinstance(module, nn.Linear): - print(name) original_forward = module.forward setattr(module, "original_forward", original_forward) setattr(module, "forward", lambda input, m=module: fp8_linear_forward(m, original_dtype, input)) diff --git a/model_loading.py b/model_loading.py index 15166c6..dbc5804 100644 --- a/model_loading.py +++ b/model_loading.py @@ -60,6 +60,7 @@ class CogVideoLoraSelect: cog_loras_list.append(cog_lora) print(cog_loras_list) return (cog_loras_list,) + #region DownloadAndLoadCogVideoModel class DownloadAndLoadCogVideoModel: @classmethod @@ -98,6 +99,7 @@ class DownloadAndLoadCogVideoModel: "block_edit": ("TRANSFORMERBLOCKS", {"default": None}), "lora": ("COGLORA", {"default": None}), "compile_args":("COMPILEARGS", ), + "attention_mode": (["sdpa", "sageattn"], {"default": "sdpa"}), "load_device": (["main_device", "offload_device"], {"default": "main_device"}), } } @@ -108,9 +110,9 @@ class DownloadAndLoadCogVideoModel: CATEGORY = "CogVideoWrapper" DESCRIPTION = "Downloads and loads the selected CogVideo model from Huggingface to 'ComfyUI/models/CogVideo'" - def loadmodel(self, model, precision, fp8_transformer="disabled", compile="disabled", enable_sequential_cpu_offload=False, pab_config=None, block_edit=None, lora=None, compile_args=None, load_device="main_device"): - - check_diffusers_version() + def loadmodel(self, model, precision, fp8_transformer="disabled", compile="disabled", + enable_sequential_cpu_offload=False, pab_config=None, block_edit=None, lora=None, compile_args=None, + attention_mode="sdpa", load_device="main_device"): device = mm.get_torch_device() offload_device = mm.unet_offload_device() @@ -195,6 +197,8 @@ class DownloadAndLoadCogVideoModel: transformer = transformer.to(dtype).to(transformer_load_device) + transformer.attention_mode = attention_mode + if block_edit is not None: transformer = remove_specific_blocks(transformer, block_edit) From 75aa19b4e1958faa7ea53c03ff476fb573d1cf33 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 18:12:03 +0200 Subject: [PATCH 11/49] Update model_loading.py --- model_loading.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model_loading.py b/model_loading.py index dbc5804..7af2bf3 100644 --- a/model_loading.py +++ b/model_loading.py @@ -32,7 +32,7 @@ class CogVideoLoraSelect: "required": { "lora": (folder_paths.get_filename_list("cogvideox_loras"), {"tooltip": "LORA models are expected to be in ComfyUI/models/CogVideo/loras with .safetensors extension"}), - "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01, "tooltip": "LORA strength, set to 0.0 to unmerge the LORA"}), + "strength": ("FLOAT", {"default": 1.0, "min": -10.0, "max": 10.0, "step": 0.0001, "tooltip": "LORA strength, set to 0.0 to unmerge the LORA"}), }, "optional": { "prev_lora":("COGLORA", {"default": None, "tooltip": "For loading multiple LoRAs"}), From 7162d1040da7624101488a55ec2f3e0f3d496b7f Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 18:14:48 +0200 Subject: [PATCH 12/49] Update model_loading.py --- model_loading.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/model_loading.py b/model_loading.py index 7af2bf3..896c8a4 100644 --- a/model_loading.py +++ b/model_loading.py @@ -243,8 +243,7 @@ class DownloadAndLoadCogVideoModel: adapter_list = [] adapter_weights = [] for l in lora: - if l["fuse_lora"]: - fuse = True + fuse = True if l["fuse_lora"] else False lora_sd = load_torch_file(l["path"]) for key, val in lora_sd.items(): if "lora_B" in key: From a630bb33140a7a90f55c818f9c7d9638be90e188 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 22:17:10 +0200 Subject: [PATCH 13/49] update --- model_loading.py | 89 +++++++++++++++++++++++++++++++++---------- nodes.py | 1 + pipeline_cogvideox.py | 4 +- 3 files changed, 71 insertions(+), 23 deletions(-) diff --git a/model_loading.py b/model_loading.py index 896c8a4..ad0dfc4 100644 --- a/model_loading.py +++ b/model_loading.py @@ -157,7 +157,7 @@ class DownloadAndLoadCogVideoModel: base_path = os.path.join(download_path, "CogVideoX-5b-1.5") download_path = base_path subfolder = "transformer_T2V" if "1.5-T2V" in model else "transformer_I2V" - allow_patterns = [f"*{subfolder}*"] + allow_patterns = [f"*{subfolder}*", "*vae*", "*scheduler*"] repo_id = "kijai/CogVideoX-5b-1.5" else: base_path = os.path.join(download_path, (model.split("/")[-1])) @@ -204,15 +204,17 @@ class DownloadAndLoadCogVideoModel: #fp8 if fp8_transformer == "enabled" or fp8_transformer == "fastmode": + params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding"} + if "1.5" in model: + params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding", "norm","ofs_embedding", "norm_final", "norm_out", "proj_out"} for name, param in transformer.named_parameters(): - params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding"} if not any(keyword in name for keyword in params_to_keep): param.data = param.data.to(torch.float8_e4m3fn) if fp8_transformer == "fastmode": from .fp8_optimization import convert_fp8_linear if "1.5" in model: - params_to_keep = {"norm","ff"} + params_to_keep.update({"ff"}) convert_fp8_linear(transformer, dtype, params_to_keep=params_to_keep) with open(scheduler_path) as f: @@ -311,12 +313,12 @@ class DownloadAndLoadCogVideoGGUFModel: [ "CogVideoX_5b_GGUF_Q4_0.safetensors", "CogVideoX_5b_I2V_GGUF_Q4_0.safetensors", + "CogVideoX_5b_1_5_I2V_GGUF_Q4_0.safetensors", "CogVideoX_5b_fun_GGUF_Q4_0.safetensors", "CogVideoX_5b_fun_1_1_GGUF_Q4_0.safetensors", "CogVideoX_5b_fun_1_1_Pose_GGUF_Q4_0.safetensors", "CogVideoX_5b_Interpolation_GGUF_Q4_0.safetensors", "CogVideoX_5b_Tora_GGUF_Q4_0.safetensors", - ], ), "vae_precision": (["fp16", "fp32", "bf16"], {"default": "bf16", "tooltip": "VAE dtype"}), @@ -327,8 +329,9 @@ class DownloadAndLoadCogVideoGGUFModel: "optional": { "pab_config": ("PAB_CONFIG", {"default": None}), "block_edit": ("TRANSFORMERBLOCKS", {"default": None}), + #"lora": ("COGLORA", {"default": None}), "compile": (["disabled","torch"], {"tooltip": "compile the model for faster inference, these are advanced options only available on Linux, see readme for more info"}), - + "attention_mode": (["sdpa", "sageattn"], {"default": "sdpa"}), } } @@ -337,7 +340,8 @@ class DownloadAndLoadCogVideoGGUFModel: FUNCTION = "loadmodel" CATEGORY = "CogVideoWrapper" - def loadmodel(self, model, vae_precision, fp8_fastmode, load_device, enable_sequential_cpu_offload, pab_config=None, block_edit=None, compile="disabled"): + def loadmodel(self, model, vae_precision, fp8_fastmode, load_device, enable_sequential_cpu_offload, + pab_config=None, block_edit=None, compile="disabled", attention_mode="sdpa"): check_diffusers_version() @@ -375,7 +379,7 @@ class DownloadAndLoadCogVideoGGUFModel: with open(transformer_path) as f: transformer_config = json.load(f) - sd = load_torch_file(gguf_path) + from . import mz_gguf_loader import importlib @@ -393,6 +397,13 @@ class DownloadAndLoadCogVideoGGUFModel: transformer = CogVideoXTransformer3DModelFun.from_config(transformer_config) elif "I2V" in model or "Interpolation" in model: transformer_config["in_channels"] = 32 + if "1_5" in model: + transformer_config["ofs_embed_dim"] = 512 + transformer_config["use_learned_positional_embeddings"] = False + transformer_config["patch_size_t"] = 2 + transformer_config["patch_bias"] = False + transformer_config["sample_height"] = 96 + transformer_config["sample_width"] = 170 if pab_config is not None: transformer = CogVideoXTransformer3DModelPAB.from_config(transformer_config) else: @@ -405,23 +416,23 @@ class DownloadAndLoadCogVideoGGUFModel: transformer = CogVideoXTransformer3DModel.from_config(transformer_config) if "2b" in model: - for name, param in transformer.named_parameters(): - if name != "pos_embedding": - param.data = param.data.to(torch.float8_e4m3fn) - else: - param.data = param.data.to(torch.float16) - else: - transformer.to(torch.float8_e4m3fn) - + params_to_keep = {"patch_embed", "pos_embedding", "time_embedding"} + cast_dtype = torch.float16 + elif "1_5" in model: + params_to_keep = {"patch_embed", "time_embedding", "ofs_embedding", "norm_final", "norm_out", "proj_out", "norm"} + cast_dtype = torch.bfloat16 + for name, param in transformer.named_parameters(): + if not any(keyword in name for keyword in params_to_keep): + param.data = param.data.to(torch.bfloat16) + else: + param.data = param.data.to(cast_dtype) + #for name, param in transformer.named_parameters(): + # print(name, param.data.dtype) + if block_edit is not None: transformer = remove_specific_blocks(transformer, block_edit) - transformer = mz_gguf_loader.quantize_load_state_dict(transformer, sd, device="cpu") - if load_device == "offload_device": - transformer.to(offload_device) - else: - transformer.to(device) - + transformer.attention_mode = attention_mode if fp8_fastmode: from .fp8_optimization import convert_fp8_linear @@ -468,6 +479,42 @@ class DownloadAndLoadCogVideoGGUFModel: if enable_sequential_cpu_offload: pipe.enable_sequential_cpu_offload() + sd = load_torch_file(gguf_path) + + # #LoRAs + # if lora is not None: + # if "fun" in model.lower(): + # raise NotImplementedError("LoRA with GGUF is not supported for Fun models") + # from .lora_utils import merge_lora#, load_lora_into_transformer + # #for l in lora: + # # log.info(f"Merging LoRA weights from {l['path']} with strength {l['strength']}") + # # pipe.transformer = merge_lora(pipe.transformer, l["path"], l["strength"]) + # else: + # adapter_list = [] + # adapter_weights = [] + # for l in lora: + # lora_sd = load_torch_file(l["path"]) + # for key, val in lora_sd.items(): + # if "lora_B" in key: + # lora_rank = val.shape[1] + # break + # log.info(f"Loading rank {lora_rank} LoRA weights from {l['path']} with strength {l['strength']}") + # adapter_name = l['path'].split("/")[-1].split(".")[0] + # adapter_weight = l['strength'] + # pipe.load_lora_weights(l['path'], weight_name=l['path'].split("/")[-1], lora_rank=lora_rank, adapter_name=adapter_name) + + # #transformer = load_lora_into_transformer(lora, transformer) + # adapter_list.append(adapter_name) + # adapter_weights.append(adapter_weight) + # for l in lora: + # pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) + # #pipe.fuse_lora(lora_scale=1 / lora_rank, components=["transformer"]) + + pipe.transformer = mz_gguf_loader.quantize_load_state_dict(pipe.transformer, sd, device="cpu") + if load_device == "offload_device": + pipe.transformer.to(offload_device) + else: + pipe.transformer.to(device) pipeline = { diff --git a/nodes.py b/nodes.py index 4603799..7fa9606 100644 --- a/nodes.py +++ b/nodes.py @@ -821,6 +821,7 @@ class CogVideoSampler: assert ( "I2V" not in pipeline.get("model_name", "") or "1.5" in pipeline.get("model_name", "") or + "1_5" in pipeline.get("model_name", "") or num_frames == 49 or context_options is not None ), "1.0 I2V model can only do 49 frames" diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 4704255..22bb8e7 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -21,7 +21,7 @@ import torch.nn.functional as F import math from diffusers.models import AutoencoderKLCogVideoX#, CogVideoXTransformer3DModel -from diffusers.pipelines.pipeline_utils import DiffusionPipeline +#from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor @@ -115,7 +115,7 @@ def retrieve_timesteps( timesteps = scheduler.timesteps return timesteps, num_inference_steps -class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): +class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): r""" Pipeline for text-to-video generation using CogVideoX. From fb246f95ef30a16c96c1244ed9eeb2a4c19585f5 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 9 Nov 2024 22:56:50 +0200 Subject: [PATCH 14/49] attention compile works with higher cache_size_limit --- custom_cogvideox_transformer_3d.py | 2 +- model_loading.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 79e2ebb..ed955a4 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -71,7 +71,7 @@ class CogVideoXAttnProcessor2_0: if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") - @torch.compiler.disable() + #@torch.compiler.disable() def __call__( self, attn: Attention, diff --git a/model_loading.py b/model_loading.py index ad0dfc4..d89a268 100644 --- a/model_loading.py +++ b/model_loading.py @@ -262,7 +262,10 @@ class DownloadAndLoadCogVideoModel: for l in lora: pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) if fuse: - pipe.fuse_lora(lora_scale=1 / lora_rank, components=["transformer"]) + lora_scale = 1 + if "dimensionx" in lora[-1]["path"].lower(): + lora_scale = lora_scale / lora_rank + pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) if enable_sequential_cpu_offload: pipe.enable_sequential_cpu_offload() From ea5ee0b017c7fbfade7a7eb36e05523fc16e14de Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sun, 10 Nov 2024 18:13:44 +0200 Subject: [PATCH 15/49] GGUF Q4 works --- model_loading.py | 13 +++++++----- mz_gguf_loader.py | 51 ++++++++++++++++++++++++++++++++++++++--------- 2 files changed, 50 insertions(+), 14 deletions(-) diff --git a/model_loading.py b/model_loading.py index d89a268..08218ca 100644 --- a/model_loading.py +++ b/model_loading.py @@ -206,7 +206,7 @@ class DownloadAndLoadCogVideoModel: if fp8_transformer == "enabled" or fp8_transformer == "fastmode": params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding"} if "1.5" in model: - params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding", "norm","ofs_embedding", "norm_final", "norm_out", "proj_out"} + params_to_keep.update({"norm1.linear.weight", "norm_k", "norm_q","ofs_embedding", "norm_final", "norm_out", "proj_out"}) for name, param in transformer.named_parameters(): if not any(keyword in name for keyword in params_to_keep): param.data = param.data.to(torch.float8_e4m3fn) @@ -214,7 +214,7 @@ class DownloadAndLoadCogVideoModel: if fp8_transformer == "fastmode": from .fp8_optimization import convert_fp8_linear if "1.5" in model: - params_to_keep.update({"ff"}) + params_to_keep.update({"ff"}) #otherwise NaNs convert_fp8_linear(transformer, dtype, params_to_keep=params_to_keep) with open(scheduler_path) as f: @@ -422,11 +422,11 @@ class DownloadAndLoadCogVideoGGUFModel: params_to_keep = {"patch_embed", "pos_embedding", "time_embedding"} cast_dtype = torch.float16 elif "1_5" in model: - params_to_keep = {"patch_embed", "time_embedding", "ofs_embedding", "norm_final", "norm_out", "proj_out", "norm"} + params_to_keep = {"norm1.linear.weight", "patch_embed", "time_embedding", "ofs_embedding", "norm_final", "norm_out", "proj_out"} cast_dtype = torch.bfloat16 for name, param in transformer.named_parameters(): if not any(keyword in name for keyword in params_to_keep): - param.data = param.data.to(torch.bfloat16) + param.data = param.data.to(torch.float8_e4m3fn) else: param.data = param.data.to(cast_dtype) #for name, param in transformer.named_parameters(): @@ -438,8 +438,11 @@ class DownloadAndLoadCogVideoGGUFModel: transformer.attention_mode = attention_mode if fp8_fastmode: + params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding"} + if "1.5" in model: + params_to_keep.update({"ff","norm1.linear.weight", "norm_k", "norm_q","ofs_embedding", "norm_final", "norm_out", "proj_out"}) from .fp8_optimization import convert_fp8_linear - convert_fp8_linear(transformer, vae_dtype) + convert_fp8_linear(transformer, vae_dtype, params_to_keep=params_to_keep) if compile == "torch": # compilation diff --git a/mz_gguf_loader.py b/mz_gguf_loader.py index f5a6059..fd8c640 100644 --- a/mz_gguf_loader.py +++ b/mz_gguf_loader.py @@ -19,17 +19,21 @@ class quantize_lazy_load(): def quantize_load_state_dict(model, state_dict, device="cpu"): - Q4_0_qkey = [] + quant_keys = [] for key in state_dict.keys(): if key.endswith(".Q4_0_qweight"): - Q4_0_qkey.append(key.replace(".Q4_0_qweight", "")) + quant_keys.append(key.replace(".Q4_0_qweight", "")) + qtype = "Q4_0" + elif key.endswith(".Q8_0_qweight"): + quant_keys.append(key.replace(".Q8_0_qweight", "")) + qtype = "Q8_0" for name, module in model.named_modules(): - if name in Q4_0_qkey: + if name in quant_keys: q_linear = WQLinear_GGUF.from_linear( linear=module, device=device, - qtype="Q4_0", + qtype=qtype, ) set_op_by_name(model, name, q_linear) @@ -117,14 +121,14 @@ class WQLinear_GGUF(nn.Module): @torch.no_grad() def forward(self, x): - # x = torch.matmul(x, dequantize_blocks_Q4_0(self.qweight)) if self.qtype == "Q4_0": - x = F.linear(x, dequantize_blocks_Q4_0( - self.Q4_0_qweight, x.dtype), self.bias.to(x.dtype) if self.bias is not None else None) + dequant = dequantize_blocks_Q4_0(self.Q4_0_qweight, x.dtype) + elif self.qtype == "Q8_0": + dequant = dequantize_blocks_Q8_0(self.Q8_0_qweight, x.dtype) else: raise ValueError(f"Unknown qtype: {self.qtype}") - - return x + + return F.linear(x, dequant, bias=self.bias.to(x.dtype) if self.bias is not None else None) def split_block_dims(blocks, *args): @@ -153,6 +157,7 @@ def quant_shape_from_byte_shape(shape, qtype) -> tuple[int, ...]: GGML_QUANT_SIZES = { "Q4_0": (32, 2 + 16), + "Q8_0": (32, 2 + 32), } @@ -186,3 +191,31 @@ def dequantize_blocks_Q4_0(data, dtype=torch.float16): )).to(dtype) return out +def dequantize_blocks_Q8_0(data, dtype=torch.float16): + block_size, type_size = GGML_QUANT_SIZES["Q8_0"] + + data = data.to(torch.uint8) + shape = data.shape + + rows = data.reshape( + (-1, data.shape[-1]) + ).view(torch.uint8) + + n_blocks = rows.numel() // type_size + blocks = data.reshape((n_blocks, type_size)) + + n_blocks = blocks.shape[0] + + d, qs = split_block_dims(blocks, 2) + d = d.view(torch.float16).to(torch.float32) + + qs = qs.view(torch.int8).to(torch.float32) + + out = (d * qs) + + out = out.reshape(quant_shape_from_byte_shape( + shape, + qtype="Q8_0", + )).to(dtype) + return out + From 87ed4a56cf48cf8d71e07f3d15088d2f2d196538 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sun, 10 Nov 2024 18:19:35 +0200 Subject: [PATCH 16/49] Update model_loading.py --- model_loading.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model_loading.py b/model_loading.py index 08218ca..acf8c9e 100644 --- a/model_loading.py +++ b/model_loading.py @@ -418,8 +418,8 @@ class DownloadAndLoadCogVideoGGUFModel: else: transformer = CogVideoXTransformer3DModel.from_config(transformer_config) + params_to_keep = {"patch_embed", "pos_embedding", "time_embedding"} if "2b" in model: - params_to_keep = {"patch_embed", "pos_embedding", "time_embedding"} cast_dtype = torch.float16 elif "1_5" in model: params_to_keep = {"norm1.linear.weight", "patch_embed", "time_embedding", "ofs_embedding", "norm_final", "norm_out", "proj_out"} From ed167ecfffaf13415e885745506fbcb9844e3c96 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sun, 10 Nov 2024 19:27:40 +0200 Subject: [PATCH 17/49] allow scheduling cfg --- nodes.py | 12 ++++++++++-- pipeline_cogvideox.py | 12 ++++++------ 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/nodes.py b/nodes.py index 7fa9606..2ac987a 100644 --- a/nodes.py +++ b/nodes.py @@ -349,6 +349,7 @@ class CogVideoImageEncode: "chunk_size": ("INT", {"default": 16, "min": 4}), "enable_tiling": ("BOOLEAN", {"default": False, "tooltip": "Enable tiling for the VAE to reduce memory usage"}), "mask": ("MASK", ), + "noise_aug_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001, "tooltip": "Augment image with noise"}), }, } @@ -357,7 +358,7 @@ class CogVideoImageEncode: FUNCTION = "encode" CATEGORY = "CogVideoWrapper" - def encode(self, pipeline, image, chunk_size=8, enable_tiling=False, mask=None): + def encode(self, pipeline, image, chunk_size=8, enable_tiling=False, mask=None, noise_aug_strength=0.0): device = mm.get_torch_device() offload_device = mm.unet_offload_device() generator = torch.Generator(device=device).manual_seed(0) @@ -395,6 +396,8 @@ class CogVideoImageEncode: input_image = input_image.to(vae.dtype).to(device) input_image = input_image.unsqueeze(0).permute(0, 4, 1, 2, 3) # B, C, T, H, W B, C, T, H, W = input_image.shape + if noise_aug_strength > 0: + input_image = add_noise_to_reference_video(input_image, ratio=noise_aug_strength) latents_list = [] # Loop through the temporal dimension in chunks of 16 @@ -786,7 +789,7 @@ class CogVideoSampler: "negative": ("CONDITIONING", ), "height": ("INT", {"default": 480, "min": 128, "max": 2048, "step": 16}), "width": ("INT", {"default": 720, "min": 128, "max": 2048, "step": 16}), - "num_frames": ("INT", {"default": 48, "min": 16, "max": 1024, "step": 1}), + "num_frames": ("INT", {"default": 49, "min": 17, "max": 1024, "step": 4}), "steps": ("INT", {"default": 50, "min": 1}), "cfg": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 30.0, "step": 0.01}), "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), @@ -868,6 +871,11 @@ class CogVideoSampler: pipe.transformer.use_fastercache = False pipe.transformer.fastercache_counter = 0 + if not isinstance(cfg, list): + cfg = [cfg for _ in range(steps)] + else: + assert len(cfg) == steps, "Length of cfg list must match number of steps" + autocastcondition = not pipeline["onediff"] or not dtype == torch.float32 autocast_context = torch.autocast(mm.get_autocast_device(device), dtype=dtype) if autocastcondition else nullcontext() with autocast_context: diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 22bb8e7..a9353b2 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -451,7 +451,7 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 + do_classifier_free_guidance = guidance_scale[0] > 1.0 if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) @@ -660,7 +660,7 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): if self.do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self._guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = noise_pred_uncond + self._guidance_scale[i] * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 latents_tile = self.scheduler.step(noise_pred, t, latents_tile.to(self.vae.dtype), **extra_step_kwargs, return_dict=False)[0] @@ -801,7 +801,7 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): noise_pred /= counter if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self._guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = noise_pred_uncond + self._guidance_scale[i] * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 if not isinstance(self.scheduler, CogVideoXDPMScheduler): @@ -865,15 +865,15 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): video_flow_features=video_flow_features if (tora is not None and tora["start_percent"] <= current_step_percentage <= tora["end_percent"]) else None, )[0] noise_pred = noise_pred.float() - + print(self._guidance_scale[i]) if isinstance(self.scheduler, CogVideoXDPMScheduler): - self._guidance_scale = 1 + guidance_scale * ( + self._guidance_scale[i] = 1 + guidance_scale[i] * ( (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2 ) if do_classifier_free_guidance: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self._guidance_scale * (noise_pred_text - noise_pred_uncond) + noise_pred = noise_pred_uncond + self._guidance_scale[i] * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 if not isinstance(self.scheduler, CogVideoXDPMScheduler): From 184097e78eb11d6204fbfcf1b5f2d7b6d6e934e0 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sun, 10 Nov 2024 19:31:37 +0200 Subject: [PATCH 18/49] Update pipeline_cogvideox.py --- pipeline_cogvideox.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index a9353b2..fefd0bc 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -865,7 +865,6 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): video_flow_features=video_flow_features if (tora is not None and tora["start_percent"] <= current_step_percentage <= tora["end_percent"]) else None, )[0] noise_pred = noise_pred.float() - print(self._guidance_scale[i]) if isinstance(self.scheduler, CogVideoXDPMScheduler): self._guidance_scale[i] = 1 + guidance_scale[i] * ( (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2 From ca63f5dadea550a353ca99c36f8f85c33b876985 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Mon, 11 Nov 2024 01:19:11 +0200 Subject: [PATCH 19/49] update --- custom_cogvideox_transformer_3d.py | 3 +-- model_loading.py | 3 ++- nodes.py | 4 ++++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index ed955a4..de80c2d 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -571,11 +571,10 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): # 2. Patch embedding p = self.config.patch_size p_t = self.config.patch_size_t - # We know that the hidden states height and width will always be divisible by patch_size. # But, the number of frames may not be divisible by patch_size_t. So, we pad with the beginning frames. if p_t is not None: - remaining_frames = p_t - num_frames % p_t + remaining_frames = 0 if num_frames % 2 == 0 else 1 first_frame = hidden_states[:, :1].repeat(1, 1 + remaining_frames, 1, 1, 1) hidden_states = torch.cat([first_frame, hidden_states[:, 1:]], dim=1) diff --git a/model_loading.py b/model_loading.py index acf8c9e..89c1516 100644 --- a/model_loading.py +++ b/model_loading.py @@ -263,7 +263,8 @@ class DownloadAndLoadCogVideoModel: pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) if fuse: lora_scale = 1 - if "dimensionx" in lora[-1]["path"].lower(): + dimension_loras = ["orbit_left_lora", "dimensionx"] # for now dimensionx loras need scaling + if any(item in lora[-1]["path"].lower() for item in dimension_loras): lora_scale = lora_scale / lora_rank pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) diff --git a/nodes.py b/nodes.py index 2ac987a..248306a 100644 --- a/nodes.py +++ b/nodes.py @@ -828,6 +828,10 @@ class CogVideoSampler: num_frames == 49 or context_options is not None ), "1.0 I2V model can only do 49 frames" + if image_cond_latents is not None: + assert "I2V" in pipeline.get("model_name", ""), "Image condition latents only supported for I2V models" + else: + assert "I2V" not in pipeline.get("model_name", ""), "Image condition latents required for I2V models" device = mm.get_torch_device() offload_device = mm.unet_offload_device() From 43bc7fb4fc74ebe3a92e08d35f195a0c0ff62f76 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Mon, 11 Nov 2024 01:31:13 +0200 Subject: [PATCH 20/49] fix progress bars for vid2vid --- pipeline_cogvideox.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index fefd0bc..ae1aa6d 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -536,11 +536,10 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): logger.info(f"latents: {latents.shape}") logger.info(f"mask: {mask.shape}") - # 7. Denoising loop + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) - comfy_pbar = ProgressBar(num_inference_steps) - # 8. context schedule and temporal tiling + # 7. context schedule and temporal tiling if context_schedule is not None and context_schedule == "temporal_tiling": t_tile_length = context_frames t_tile_overlap = context_overlap @@ -560,7 +559,7 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): use_temporal_tiling = False use_context_schedule = False logger.info("Temporal tiling and context schedule disabled") - # 8.5. Create rotary embeds if required + # 7.5. Create rotary embeds if required image_rotary_emb = ( self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) if self.transformer.config.use_rotary_positional_embeddings @@ -569,7 +568,7 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): if tora is not None and do_classifier_free_guidance: video_flow_features = tora["video_flow_features"].repeat(1, 2, 1, 1, 1).contiguous() - # 9. Controlnet + #8. Controlnet if controlnet is not None: self.controlnet = controlnet["control_model"].to(device) if self.transformer.dtype == torch.float8_e4m3fn: @@ -604,8 +603,9 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): for param in module.parameters(): param.data = param.data.to(device) - # 10. Denoising loop - with self.progress_bar(total=num_inference_steps) as progress_bar: + # 9. Denoising loop + comfy_pbar = ProgressBar(len(timesteps)) + with self.progress_bar(total=len(timesteps)) as progress_bar: old_pred_original_sample = None # for DPM-solver++ for i, t in enumerate(timesteps): if self.interrupt: From 5f1a917b93f9a363b90b1d153fa33c035597ef28 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Mon, 11 Nov 2024 17:29:57 +0200 Subject: [PATCH 21/49] padding fix --- custom_cogvideox_transformer_3d.py | 10 +++++----- nodes.py | 2 +- pipeline_cogvideox.py | 20 +++++++++++++++++--- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index de80c2d..2fa191a 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -573,10 +573,10 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): p_t = self.config.patch_size_t # We know that the hidden states height and width will always be divisible by patch_size. # But, the number of frames may not be divisible by patch_size_t. So, we pad with the beginning frames. - if p_t is not None: - remaining_frames = 0 if num_frames % 2 == 0 else 1 - first_frame = hidden_states[:, :1].repeat(1, 1 + remaining_frames, 1, 1, 1) - hidden_states = torch.cat([first_frame, hidden_states[:, 1:]], dim=1) + # if p_t is not None: + # remaining_frames = 0 if num_frames % 2 == 0 else 1 + # first_frame = hidden_states[:, :1].repeat(1, 1 + remaining_frames, 1, 1, 1) + # hidden_states = torch.cat([first_frame, hidden_states[:, 1:]], dim=1) hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) @@ -711,7 +711,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): batch_size, (num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p ) output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2) - output = output[:, remaining_frames:] + #output = output[:, remaining_frames:] if self.fastercache_counter >= self.fastercache_start_step + 1: (bb, tt, cc, hh, ww) = output.shape diff --git a/nodes.py b/nodes.py index 248306a..42af0ad 100644 --- a/nodes.py +++ b/nodes.py @@ -1004,7 +1004,7 @@ class CogVideoDecode: vae._clear_fake_context_parallel_cache() except: pass - frames = vae.decode(latents).sample + frames = vae.decode(latents[:, :, pipeline["pipe"].additional_frames:]).sample vae.disable_tiling() if not pipeline["cpu_offloading"]: vae.to(offload_device) diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index ae1aa6d..007987e 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -434,6 +434,8 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): width = width or self.transformer.config.sample_size * self.vae_scale_factor_spatial num_videos_per_prompt = 1 + self.num_frames = num_frames + # 1. Check inputs. Raise error if not correct self.check_inputs( height, @@ -463,6 +465,14 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): # 5. Prepare latents. latent_channels = self.vae.config.latent_channels + latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 + # For CogVideoX 1.5, the latent frames should be padded to make it divisible by patch_size_t + patch_size_t = self.transformer.config.patch_size_t + self.additional_frames = 0 + if patch_size_t is not None and latent_frames % patch_size_t != 0: + self.additional_frames = patch_size_t - latent_frames % patch_size_t + num_frames += self.additional_frames * self.vae_scale_factor_temporal + #if latents is None and num_frames == t_tile_length: # num_frames += 1 @@ -503,8 +513,12 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): width // self.vae_scale_factor_spatial, ) latent_padding = torch.zeros(padding_shape, device=device, dtype=self.vae.dtype) - image_cond_latents = torch.cat([image_cond_latents[:, 0, :, :, :].unsqueeze(1), latent_padding, image_cond_latents[:, -1, :, :, :].unsqueeze(1)], dim=1) + # Select the first frame along the second dimension + if self.transformer.config.patch_size_t is not None: + first_frame = image_cond_latents[:, : image_latents.size(1) % self.transformer.config.patch_size_t, ...] + image_cond_latents = torch.cat([first_frame, image_latents], dim=1) + logger.info(f"image cond latents shape: {image_cond_latents.shape}") else: logger.info("Only one image conditioning frame received, img2vid") @@ -597,8 +611,8 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): if tora is not None: trajectory_length = tora["video_flow_features"].shape[1] logger.info(f"Tora trajectory length: {trajectory_length}") - if trajectory_length != latents.shape[1]: - raise ValueError(f"Tora trajectory length {trajectory_length} does not match inpaint_latents count {latents.shape[2]}") + #if trajectory_length != latents.shape[1]: + # raise ValueError(f"Tora trajectory length {trajectory_length} does not match inpaint_latents count {latents.shape[2]}") for module in self.transformer.fuser_list: for param in module.parameters(): param.data = param.data.to(device) From 693157691657cd49776f45648ed326ca522eaef4 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Mon, 11 Nov 2024 18:53:12 +0200 Subject: [PATCH 22/49] update from upstream, ofs embeds --- custom_cogvideox_transformer_3d.py | 20 ++++++++------------ nodes.py | 7 ++++++- pipeline_cogvideox.py | 15 ++++++++++++--- 3 files changed, 26 insertions(+), 16 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 2fa191a..10b9e4f 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -425,9 +425,11 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift) self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn) + self.ofs_proj = None self.ofs_embedding = None if ofs_embed_dim: + self.ofs_proj = Timesteps(ofs_embed_dim, flip_sin_to_cos, freq_shift) self.ofs_embedding = TimestepEmbedding(ofs_embed_dim, ofs_embed_dim, timestep_activation_fn) # same as time embeddings, for ofs # 3. Define spatio-temporal transformers blocks @@ -547,6 +549,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): encoder_hidden_states: torch.Tensor, timestep: Union[int, float, torch.LongTensor], timestep_cond: Optional[torch.Tensor] = None, + ofs: Optional[Union[int, float, torch.LongTensor]] = None, image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, controlnet_states: torch.Tensor = None, controlnet_weights: Optional[Union[float, int, list, np.ndarray, torch.FloatTensor]] = 1.0, @@ -563,26 +566,21 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): # but time_embedding might actually be running in fp16. so we need to cast here. # there might be better ways to encapsulate this. t_emb = t_emb.to(dtype=hidden_states.dtype) + emb = self.time_embedding(t_emb, timestep_cond) if self.ofs_embedding is not None: #1.5 I2V - emb_ofs = self.ofs_embedding(emb, timestep_cond) - emb = emb + emb_ofs + ofs_emb = self.ofs_proj(ofs) + ofs_emb = ofs_emb.to(dtype=hidden_states.dtype) + ofs_emb = self.ofs_embedding(ofs_emb) + emb = emb + ofs_emb # 2. Patch embedding p = self.config.patch_size p_t = self.config.patch_size_t - # We know that the hidden states height and width will always be divisible by patch_size. - # But, the number of frames may not be divisible by patch_size_t. So, we pad with the beginning frames. - # if p_t is not None: - # remaining_frames = 0 if num_frames % 2 == 0 else 1 - # first_frame = hidden_states[:, :1].repeat(1, 1 + remaining_frames, 1, 1, 1) - # hidden_states = torch.cat([first_frame, hidden_states[:, 1:]], dim=1) - hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) hidden_states = self.embedding_dropout(hidden_states) - text_seq_length = encoder_hidden_states.shape[1] encoder_hidden_states = hidden_states[:, :text_seq_length] hidden_states = hidden_states[:, text_seq_length:] @@ -639,7 +637,6 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): batch_size, (num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p ) output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2) - output = output[:, remaining_frames:] (bb, tt, cc, hh, ww) = output.shape cond = rearrange(output, "B T C H W -> (B T) C H W", B=bb, C=cc, T=tt, H=hh, W=ww) @@ -711,7 +708,6 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): batch_size, (num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p ) output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2) - #output = output[:, remaining_frames:] if self.fastercache_counter >= self.fastercache_start_step + 1: (bb, tt, cc, hh, ww) = output.shape diff --git a/nodes.py b/nodes.py index 42af0ad..df73ae6 100644 --- a/nodes.py +++ b/nodes.py @@ -346,7 +346,7 @@ class CogVideoImageEncode: "image": ("IMAGE", ), }, "optional": { - "chunk_size": ("INT", {"default": 16, "min": 4}), + "chunk_size": ("INT", {"default": 16, "min": 4, "tooltip": "How many images to encode at once, lower values use less memory"}), "enable_tiling": ("BOOLEAN", {"default": False, "tooltip": "Enable tiling for the VAE to reduce memory usage"}), "mask": ("MASK", ), "noise_aug_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001, "tooltip": "Augment image with noise"}), @@ -806,6 +806,7 @@ class CogVideoSampler: "controlnet": ("COGVIDECONTROLNET",), "tora_trajectory": ("TORAFEATURES", ), "fastercache": ("FASTERCACHEARGS", ), + #"sigmas": ("SIGMAS", ), } } @@ -879,6 +880,9 @@ class CogVideoSampler: cfg = [cfg for _ in range(steps)] else: assert len(cfg) == steps, "Length of cfg list must match number of steps" + + # if sigmas is not None: + # sigma_list = sigmas.tolist() autocastcondition = not pipeline["onediff"] or not dtype == torch.float32 autocast_context = torch.autocast(mm.get_autocast_device(device), dtype=dtype) if autocastcondition else nullcontext() @@ -889,6 +893,7 @@ class CogVideoSampler: width = width, num_frames = num_frames, guidance_scale=cfg, + #sigmas=sigma_list if sigmas is not None else None, latents=samples["samples"] if samples is not None else None, image_cond_latents=image_cond_latents["samples"] if image_cond_latents is not None else None, denoise_strength=denoise_strength, diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 007987e..466eecb 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -369,6 +369,7 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): timesteps: Optional[List[int]] = None, guidance_scale: float = 6, denoise_strength: float = 1.0, + sigmas: Optional[List[float]] = None, num_videos_per_prompt: int = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, @@ -429,7 +430,7 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. """ - + height = height or self.transformer.config.sample_size * self.vae_scale_factor_spatial width = width or self.transformer.config.sample_size * self.vae_scale_factor_spatial num_videos_per_prompt = 1 @@ -460,7 +461,10 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): prompt_embeds = prompt_embeds.to(self.vae.dtype) # 4. Prepare timesteps - timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + if sigmas is None: + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) + else: + timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, sigmas=sigmas, device=device) self._num_timesteps = len(timesteps) # 5. Prepare latents. @@ -499,7 +503,6 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): freenoise=freenoise, ) latents = latents.to(self.vae.dtype) - #print("latents", latents.shape) # 5.5. if image_cond_latents is not None: @@ -579,6 +582,9 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): if self.transformer.config.use_rotary_positional_embeddings else None ) + # 7.6. Create ofs embeds if required + ofs_emb = None if self.transformer.config.ofs_embed_dim is None else latents.new_full((1,), fill_value=2.0) + if tora is not None and do_classifier_free_guidance: video_flow_features = tora["video_flow_features"].repeat(1, 2, 1, 1, 1).contiguous() @@ -617,6 +623,8 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): for param in module.parameters(): param.data = param.data.to(device) + logger.info(f"Sampling {num_frames} frames in {latent_frames} latent frames at {width}x{height} with {num_inference_steps} inference steps") + # 9. Denoising loop comfy_pbar = ProgressBar(len(timesteps)) with self.progress_bar(total=len(timesteps)) as progress_bar: @@ -873,6 +881,7 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): encoder_hidden_states=prompt_embeds, timestep=timestep, image_rotary_emb=image_rotary_emb, + ofs=ofs_emb, return_dict=False, controlnet_states=controlnet_states, controlnet_weights=control_weights, From 6d4c99e77d54ab8a767cf319dc34ecd30b3b1a83 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Mon, 11 Nov 2024 19:58:17 +0200 Subject: [PATCH 23/49] Update model_loading.py --- model_loading.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model_loading.py b/model_loading.py index 89c1516..72af2a6 100644 --- a/model_loading.py +++ b/model_loading.py @@ -263,7 +263,7 @@ class DownloadAndLoadCogVideoModel: pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) if fuse: lora_scale = 1 - dimension_loras = ["orbit_left_lora", "dimensionx"] # for now dimensionx loras need scaling + dimension_loras = ["orbit", "dimensionx"] # for now dimensionx loras need scaling if any(item in lora[-1]["path"].lower() for item in dimension_loras): lora_scale = lora_scale / lora_rank pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) From ea0273c8ecc01b2d5216810cd2773854507b251f Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Mon, 11 Nov 2024 22:34:43 +0200 Subject: [PATCH 24/49] VAE fix, allow using fp32 VAE --- model_loading.py | 40 ++++++++++++++++++++++++++++++++++++++++ nodes.py | 25 +++++++++++++++++++------ pipeline_cogvideox.py | 12 ++++++++++-- 3 files changed, 69 insertions(+), 8 deletions(-) diff --git a/model_loading.py b/model_loading.py index 72af2a6..7c79d71 100644 --- a/model_loading.py +++ b/model_loading.py @@ -535,6 +535,44 @@ class DownloadAndLoadCogVideoGGUFModel: } return (pipeline,) + +#revion VAE + +class CogVideoXVAELoader: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model_name": (folder_paths.get_filename_list("vae"), {"tooltip": "The name of the checkpoint (vae) to load."}), + }, + "optional": { + "precision": (["fp16", "fp32", "bf16"], + {"default": "bf16"} + ), + } + } + + RETURN_TYPES = ("VAE",) + RETURN_NAMES = ("vae", ) + FUNCTION = "loadmodel" + CATEGORY = "CogVideoWrapper" + DESCRIPTION = "Loads CogVideoX VAE model from 'ComfyUI/models/vae'" + + def loadmodel(self, model_name, precision): + device = mm.get_torch_device() + offload_device = mm.unet_offload_device() + + dtype = {"bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[precision] + with open(os.path.join(script_directory, 'configs', 'vae_config.json')) as f: + vae_config = json.load(f) + model_path = folder_paths.get_full_path("vae", model_name) + vae_sd = load_torch_file(model_path) + + vae = AutoencoderKLCogVideoX.from_config(vae_config).to(dtype).to(offload_device) + vae.load_state_dict(vae_sd) + + return (vae,) + #region Tora class DownloadAndLoadToraModel: @classmethod @@ -698,6 +736,7 @@ NODE_CLASS_MAPPINGS = { "DownloadAndLoadCogVideoControlNet": DownloadAndLoadCogVideoControlNet, "DownloadAndLoadToraModel": DownloadAndLoadToraModel, "CogVideoLoraSelect": CogVideoLoraSelect, + "CogVideoXVAELoader": CogVideoXVAELoader, } NODE_DISPLAY_NAME_MAPPINGS = { "DownloadAndLoadCogVideoModel": "(Down)load CogVideo Model", @@ -705,4 +744,5 @@ NODE_DISPLAY_NAME_MAPPINGS = { "DownloadAndLoadCogVideoControlNet": "(Down)load CogVideo ControlNet", "DownloadAndLoadToraModel": "(Down)load Tora Model", "CogVideoLoraSelect": "CogVideo LoraSelect", + "CogVideoXVAELoader": "CogVideoX VAE Loader", } \ No newline at end of file diff --git a/nodes.py b/nodes.py index df73ae6..3d83bc9 100644 --- a/nodes.py +++ b/nodes.py @@ -350,6 +350,7 @@ class CogVideoImageEncode: "enable_tiling": ("BOOLEAN", {"default": False, "tooltip": "Enable tiling for the VAE to reduce memory usage"}), "mask": ("MASK", ), "noise_aug_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001, "tooltip": "Augment image with noise"}), + "vae_override" : ("VAE", {"default": None, "tooltip": "Override the VAE model in the pipeline"}), }, } @@ -358,15 +359,21 @@ class CogVideoImageEncode: FUNCTION = "encode" CATEGORY = "CogVideoWrapper" - def encode(self, pipeline, image, chunk_size=8, enable_tiling=False, mask=None, noise_aug_strength=0.0): + def encode(self, pipeline, image, chunk_size=8, enable_tiling=False, mask=None, noise_aug_strength=0.0, vae_override=None): device = mm.get_torch_device() offload_device = mm.unet_offload_device() generator = torch.Generator(device=device).manual_seed(0) B, H, W, C = image.shape - vae = pipeline["pipe"].vae + vae = pipeline["pipe"].vae if vae_override is None else vae_override vae.enable_slicing() + model_name = pipeline.get("model_name", "") + + if "1.5" in model_name or "1_5" in model_name: + vae_scaling_factor = 1 / vae.config.scaling_factor + else: + vae_scaling_factor = vae.config.scaling_factor if enable_tiling: from .mz_enable_vae_encode_tiling import enable_vae_encode_tiling @@ -391,10 +398,14 @@ class CogVideoImageEncode: # input_image = input_image * (1 -mask) else: pipeline["pipe"].original_mask = None - + #input_image = input_image.permute(0, 3, 1, 2) # B, C, H, W + #input_image = pipeline["pipe"].video_processor.preprocess(input_image).to(device, dtype=vae.dtype) + #input_image = input_image.unsqueeze(2) + input_image = input_image * 2.0 - 1.0 input_image = input_image.to(vae.dtype).to(device) input_image = input_image.unsqueeze(0).permute(0, 4, 1, 2, 3) # B, C, T, H, W + B, C, T, H, W = input_image.shape if noise_aug_strength > 0: input_image = add_noise_to_reference_video(input_image, ratio=noise_aug_strength) @@ -417,7 +428,7 @@ class CogVideoImageEncode: elif hasattr(latents, "latents"): latents = latents.latents - latents = vae.config.scaling_factor * latents + latents = vae_scaling_factor * latents latents = latents.permute(0, 2, 1, 3, 4) # B, T_chunk, C, H, W latents_list.append(latents) @@ -972,6 +983,7 @@ class CogVideoDecode: "tile_overlap_factor_height": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}), "tile_overlap_factor_width": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}), "auto_tile_size": ("BOOLEAN", {"default": True, "tooltip": "Auto size based on height and width, default is half the size"}), + "vae_override": ("VAE", {"default": None}), } } @@ -980,11 +992,12 @@ class CogVideoDecode: FUNCTION = "decode" CATEGORY = "CogVideoWrapper" - def decode(self, pipeline, samples, enable_vae_tiling, tile_sample_min_height, tile_sample_min_width, tile_overlap_factor_height, tile_overlap_factor_width, auto_tile_size=True): + def decode(self, pipeline, samples, enable_vae_tiling, tile_sample_min_height, tile_sample_min_width, tile_overlap_factor_height, tile_overlap_factor_width, + auto_tile_size=True, vae_override=None): device = mm.get_torch_device() offload_device = mm.unet_offload_device() latents = samples["samples"] - vae = pipeline["pipe"].vae + vae = pipeline["pipe"].vae if vae_override is None else vae_override vae.enable_slicing() diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 466eecb..694a85e 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -159,15 +159,17 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): ) self.vae_scale_factor_temporal = ( self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4 - ) + ) self.original_mask = original_mask self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) + self.video_processor.config.do_resize = False if pab_config is not None: set_pab_manager(pab_config) self.input_with_padding = True + def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, timesteps, denoise_strength, num_inference_steps, latents=None, freenoise=True, context_size=None, context_overlap=None @@ -625,6 +627,9 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): logger.info(f"Sampling {num_frames} frames in {latent_frames} latent frames at {width}x{height} with {num_inference_steps} inference steps") + from .latent_preview import prepare_callback + callback = prepare_callback(self.transformer, num_inference_steps) + # 9. Denoising loop comfy_pbar = ProgressBar(len(timesteps)) with self.progress_bar(total=len(timesteps)) as progress_bar: @@ -926,7 +931,10 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() - comfy_pbar.update(1) + if callback is not None: + callback(i, latents.detach()[-1], None, num_inference_steps) + else: + comfy_pbar.update(1) # Offload all models From 00fde5ebce18c64dfe77ae9e7e492fd4dd31a9eb Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Mon, 11 Nov 2024 23:36:48 +0200 Subject: [PATCH 25/49] allow fused loras with fp8 --- model_loading.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/model_loading.py b/model_loading.py index 7c79d71..1fcfbb6 100644 --- a/model_loading.py +++ b/model_loading.py @@ -201,21 +201,6 @@ class DownloadAndLoadCogVideoModel: if block_edit is not None: transformer = remove_specific_blocks(transformer, block_edit) - - #fp8 - if fp8_transformer == "enabled" or fp8_transformer == "fastmode": - params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding"} - if "1.5" in model: - params_to_keep.update({"norm1.linear.weight", "norm_k", "norm_q","ofs_embedding", "norm_final", "norm_out", "proj_out"}) - for name, param in transformer.named_parameters(): - if not any(keyword in name for keyword in params_to_keep): - param.data = param.data.to(torch.float8_e4m3fn) - - if fp8_transformer == "fastmode": - from .fp8_optimization import convert_fp8_linear - if "1.5" in model: - params_to_keep.update({"ff"}) #otherwise NaNs - convert_fp8_linear(transformer, dtype, params_to_keep=params_to_keep) with open(scheduler_path) as f: scheduler_config = json.load(f) @@ -267,6 +252,21 @@ class DownloadAndLoadCogVideoModel: if any(item in lora[-1]["path"].lower() for item in dimension_loras): lora_scale = lora_scale / lora_rank pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) + + #fp8 + if fp8_transformer == "enabled" or fp8_transformer == "fastmode": + params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding"} + if "1.5" in model: + params_to_keep.update({"norm1.linear.weight", "norm_k", "norm_q","ofs_embedding", "norm_final", "norm_out", "proj_out"}) + for name, param in pipe.transformer.named_parameters(): + if not any(keyword in name for keyword in params_to_keep): + param.data = param.data.to(torch.float8_e4m3fn) + + if fp8_transformer == "fastmode": + from .fp8_optimization import convert_fp8_linear + if "1.5" in model: + params_to_keep.update({"ff"}) #otherwise NaNs + convert_fp8_linear(pipe.transformer, dtype, params_to_keep=params_to_keep) if enable_sequential_cpu_offload: pipe.enable_sequential_cpu_offload() From db697fea11c59491cb6f0b9f2eb66c3e2c9d4ab1 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 12 Nov 2024 00:22:26 +0200 Subject: [PATCH 26/49] Update model_loading.py --- model_loading.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/model_loading.py b/model_loading.py index 1fcfbb6..08ddcee 100644 --- a/model_loading.py +++ b/model_loading.py @@ -113,6 +113,9 @@ class DownloadAndLoadCogVideoModel: def loadmodel(self, model, precision, fp8_transformer="disabled", compile="disabled", enable_sequential_cpu_offload=False, pab_config=None, block_edit=None, lora=None, compile_args=None, attention_mode="sdpa", load_device="main_device"): + + if precision == "fp16" and "1.5" in model: + raise ValueError("1.5 models do not work in fp16") device = mm.get_torch_device() offload_device = mm.unet_offload_device() From c8772c3aa07028692d1fb99c1b8204c23953e8c5 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 12 Nov 2024 00:53:55 +0200 Subject: [PATCH 27/49] Update nodes.py --- nodes.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nodes.py b/nodes.py index 3d83bc9..5a5345a 100644 --- a/nodes.py +++ b/nodes.py @@ -370,7 +370,7 @@ class CogVideoImageEncode: vae.enable_slicing() model_name = pipeline.get("model_name", "") - if "1.5" in model_name or "1_5" in model_name: + if ("1.5" in model_name or "1_5" in model_name) and image.shape[0] == 1: vae_scaling_factor = 1 / vae.config.scaling_factor else: vae_scaling_factor = vae.config.scaling_factor @@ -428,12 +428,13 @@ class CogVideoImageEncode: elif hasattr(latents, "latents"): latents = latents.latents - latents = vae_scaling_factor * latents latents = latents.permute(0, 2, 1, 3, 4) # B, T_chunk, C, H, W latents_list.append(latents) # Concatenate all the chunks along the temporal dimension final_latents = torch.cat(latents_list, dim=1) + final_latents = final_latents * vae_scaling_factor + log.info(f"Encoded latents shape: {final_latents.shape}") if not pipeline["cpu_offloading"]: vae.to(offload_device) @@ -810,9 +811,9 @@ class CogVideoSampler: }), }, "optional": { - "samples": ("LATENT", ), + "samples": ("LATENT", {"tooltip": "init Latents to use for video2video process"} ), "denoise_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - "image_cond_latents": ("LATENT", ), + "image_cond_latents": ("LATENT",{"tooltip": "Latent to use for image2video conditioning"} ), "context_options": ("COGCONTEXT", ), "controlnet": ("COGVIDECONTROLNET",), "tora_trajectory": ("TORAFEATURES", ), @@ -841,6 +842,7 @@ class CogVideoSampler: context_options is not None ), "1.0 I2V model can only do 49 frames" if image_cond_latents is not None: + assert image_cond_latents.shape[0] == 1, "Image condition latents must be a single latent" assert "I2V" in pipeline.get("model_name", ""), "Image condition latents only supported for I2V models" else: assert "I2V" not in pipeline.get("model_name", ""), "Image condition latents required for I2V models" From 9681c83c1d1bcb27140d032b878ee3a65eee7239 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 12 Nov 2024 00:58:00 +0200 Subject: [PATCH 28/49] Update nodes.py --- nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index 5a5345a..1f1a80b 100644 --- a/nodes.py +++ b/nodes.py @@ -842,7 +842,7 @@ class CogVideoSampler: context_options is not None ), "1.0 I2V model can only do 49 frames" if image_cond_latents is not None: - assert image_cond_latents.shape[0] == 1, "Image condition latents must be a single latent" + assert image_cond_latents["samples"].shape[0] == 1, "Image condition latents must be a single latent" assert "I2V" in pipeline.get("model_name", ""), "Image condition latents only supported for I2V models" else: assert "I2V" not in pipeline.get("model_name", ""), "Image condition latents required for I2V models" From 0a121dba53d088b6d018577d3ce77292445ada3c Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 12 Nov 2024 07:39:00 +0200 Subject: [PATCH 29/49] fix FasterCache --- custom_cogvideox_transformer_3d.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 10b9e4f..47b9488 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -630,11 +630,11 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): # - However, for CogVideoX-5b-I2V also takes concatenated input image latents (number of input channels is twice the output channels) if p_t is None: - output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, -1, p, p) + output = hidden_states.reshape(1, num_frames, height // p, width // p, -1, p, p) output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) else: output = hidden_states.reshape( - batch_size, (num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p + 1, (num_frames + p_t - 1) // p_t, height // p, width // p, -1, p_t, p, p ) output = output.permute(0, 1, 5, 4, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(1, 2) From dac6a2a3ace50f8f157a8e5107eb34ffce756b6e Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 12 Nov 2024 08:42:01 +0200 Subject: [PATCH 30/49] allow limiting blocks to cache --- custom_cogvideox_transformer_3d.py | 52 +++++++++++++++++++----------- nodes.py | 47 +++++++++++++++++++-------- 2 files changed, 67 insertions(+), 32 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 47b9488..12633b1 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -235,6 +235,7 @@ class CogVideoXBlock(nn.Module): image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, video_flow_feature: Optional[torch.Tensor] = None, fuser=None, + block_use_fastercache=False, fastercache_counter=0, fastercache_start_step=15, fastercache_device="cuda:0", @@ -257,18 +258,32 @@ class CogVideoXBlock(nn.Module): del h, fuser #region fastercache - B = norm_hidden_states.shape[0] - if fastercache_counter >= fastercache_start_step + 3 and fastercache_counter%3!=0 and self.cached_hidden_states[-1].shape[0] >= B: - attn_hidden_states = ( - self.cached_hidden_states[1][:B] + - (self.cached_hidden_states[1][:B] - self.cached_hidden_states[0][:B]) - * 0.3 - ).to(norm_hidden_states.device, non_blocking=True) - attn_encoder_hidden_states = ( - self.cached_encoder_hidden_states[1][:B] + - (self.cached_encoder_hidden_states[1][:B] - self.cached_encoder_hidden_states[0][:B]) - * 0.3 - ).to(norm_hidden_states.device, non_blocking=True) + if block_use_fastercache: + B = norm_hidden_states.shape[0] + if fastercache_counter >= fastercache_start_step + 3 and fastercache_counter%3!=0 and self.cached_hidden_states[-1].shape[0] >= B: + attn_hidden_states = ( + self.cached_hidden_states[1][:B] + + (self.cached_hidden_states[1][:B] - self.cached_hidden_states[0][:B]) + * 0.3 + ).to(norm_hidden_states.device, non_blocking=True) + attn_encoder_hidden_states = ( + self.cached_encoder_hidden_states[1][:B] + + (self.cached_encoder_hidden_states[1][:B] - self.cached_encoder_hidden_states[0][:B]) + * 0.3 + ).to(norm_hidden_states.device, non_blocking=True) + else: + attn_hidden_states, attn_encoder_hidden_states = self.attn1( + hidden_states=norm_hidden_states, + encoder_hidden_states=norm_encoder_hidden_states, + image_rotary_emb=image_rotary_emb, + attention_mode=self.attention_mode, + ) + if fastercache_counter == fastercache_start_step: + self.cached_hidden_states = [attn_hidden_states.to(fastercache_device), attn_hidden_states.to(fastercache_device)] + self.cached_encoder_hidden_states = [attn_encoder_hidden_states.to(fastercache_device), attn_encoder_hidden_states.to(fastercache_device)] + elif fastercache_counter > fastercache_start_step: + self.cached_hidden_states[-1].copy_(attn_hidden_states.to(fastercache_device)) + self.cached_encoder_hidden_states[-1].copy_(attn_encoder_hidden_states.to(fastercache_device)) else: attn_hidden_states, attn_encoder_hidden_states = self.attn1( hidden_states=norm_hidden_states, @@ -276,12 +291,6 @@ class CogVideoXBlock(nn.Module): image_rotary_emb=image_rotary_emb, attention_mode=self.attention_mode, ) - if fastercache_counter == fastercache_start_step: - self.cached_hidden_states = [attn_hidden_states.to(fastercache_device), attn_hidden_states.to(fastercache_device)] - self.cached_encoder_hidden_states = [attn_encoder_hidden_states.to(fastercache_device), attn_encoder_hidden_states.to(fastercache_device)] - elif fastercache_counter > fastercache_start_step: - self.cached_hidden_states[-1].copy_(attn_hidden_states.to(fastercache_device)) - self.cached_encoder_hidden_states[-1].copy_(attn_encoder_hidden_states.to(fastercache_device)) hidden_states = hidden_states + gate_msa * attn_hidden_states encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states @@ -477,6 +486,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): self.fastercache_lf_step = 40 self.fastercache_hf_step = 30 self.fastercache_device = "cuda" + self.fastercache_num_blocks_to_cache = len(self.transformer_blocks) self.attention_mode = attention_mode def _set_gradient_checkpointing(self, module, value=False): @@ -577,7 +587,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): # 2. Patch embedding p = self.config.patch_size p_t = self.config.patch_size_t - + hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) hidden_states = self.embedding_dropout(hidden_states) @@ -597,7 +607,9 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): image_rotary_emb=image_rotary_emb, video_flow_feature=video_flow_features[i][:1] if video_flow_features is not None else None, fuser = self.fuser_list[i] if self.fuser_list is not None else None, + block_use_fastercache = i <= self.fastercache_num_blocks_to_cache, fastercache_counter = self.fastercache_counter, + fastercache_start_step = self.fastercache_start_step, fastercache_device = self.fastercache_device ) @@ -665,7 +677,9 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): image_rotary_emb=image_rotary_emb, video_flow_feature=video_flow_features[i] if video_flow_features is not None else None, fuser = self.fuser_list[i] if self.fuser_list is not None else None, + block_use_fastercache = i <= self.fastercache_num_blocks_to_cache, fastercache_counter = self.fastercache_counter, + fastercache_start_step = self.fastercache_start_step, fastercache_device = self.fastercache_device ) #has_nan = torch.isnan(hidden_states).any() diff --git a/nodes.py b/nodes.py index 1f1a80b..29ffe2c 100644 --- a/nodes.py +++ b/nodes.py @@ -452,6 +452,8 @@ class CogVideoImageInterpolationEncode: "optional": { "enable_tiling": ("BOOLEAN", {"default": False, "tooltip": "Enable tiling for the VAE to reduce memory usage"}), "mask": ("MASK", ), + "vae_override" : ("VAE", {"default": None, "tooltip": "Override the VAE model in the pipeline"}), + }, } @@ -460,14 +462,21 @@ class CogVideoImageInterpolationEncode: FUNCTION = "encode" CATEGORY = "CogVideoWrapper" - def encode(self, pipeline, start_image, end_image, chunk_size=8, enable_tiling=False, mask=None): + def encode(self, pipeline, start_image, end_image, enable_tiling=False, mask=None, vae_override=None): device = mm.get_torch_device() offload_device = mm.unet_offload_device() generator = torch.Generator(device=device).manual_seed(0) B, H, W, C = start_image.shape - vae = pipeline["pipe"].vae + vae = pipeline["pipe"].vae if vae_override is None else vae_override + vae.enable_slicing() + model_name = pipeline.get("model_name", "") + + if ("1.5" in model_name or "1_5" in model_name): + vae_scaling_factor = 1 / vae.config.scaling_factor + else: + vae_scaling_factor = vae.config.scaling_factor vae.enable_slicing() if enable_tiling: @@ -500,8 +509,8 @@ class CogVideoImageInterpolationEncode: latents_list = [] # Encode the chunk of images - start_latents = vae.encode(start_image).latent_dist.sample(generator) * vae.config.scaling_factor - end_latents = vae.encode(end_image).latent_dist.sample(generator) * vae.config.scaling_factor + start_latents = vae.encode(start_image).latent_dist.sample(generator) * vae_scaling_factor + end_latents = vae.encode(end_image).latent_dist.sample(generator) * vae_scaling_factor start_latents = start_latents.permute(0, 2, 1, 3, 4) # B, T, C, H, W end_latents = end_latents.permute(0, 2, 1, 3, 4) # B, T, C, H, W @@ -769,6 +778,7 @@ class CogVideoXFasterCache: "hf_step": ("INT", {"default": 30, "min": 0, "max": 1024, "step": 1}), "lf_step": ("INT", {"default": 40, "min": 0, "max": 1024, "step": 1}), "cache_device": (["main_device", "offload_device", "cuda:1"], {"default": "main_device", "tooltip": "The device to use for the cache, main_device is on GPU and uses a lot of VRAM"}), + "num_blocks_to_cache": ("INT", {"default": 42, "min": 0, "max": 1024, "step": 1, "tooltip": "Number of transformer blocks to cache, 5b model has 42 blocks, tradeoff between speed and memory"}), }, } @@ -777,7 +787,7 @@ class CogVideoXFasterCache: FUNCTION = "args" CATEGORY = "CogVideoWrapper" - def args(self, start_step, hf_step, lf_step, cache_device): + def args(self, start_step, hf_step, lf_step, cache_device, num_blocks_to_cache): device = mm.get_torch_device() offload_device = mm.unet_offload_device() if cache_device == "cuda:1": @@ -786,7 +796,8 @@ class CogVideoXFasterCache: "start_step" : start_step, "hf_step" : hf_step, "lf_step" : lf_step, - "cache_device" : device if cache_device != "offload_device" else offload_device + "cache_device" : device if cache_device != "offload_device" else offload_device, + "num_blocks_to_cache" : num_blocks_to_cache, } return (fastercache,) @@ -832,20 +843,25 @@ class CogVideoSampler: mm.soft_empty_cache() base_path = pipeline["base_path"] + model_name = pipeline.get("model_name", "") + supports_image_conds = True if "I2V" in model_name or "interpolation" in model_name.lower() else False assert "fun" not in base_path.lower(), "'Fun' models not supported in 'CogVideoSampler', use the 'CogVideoXFunSampler'" assert ( - "I2V" not in pipeline.get("model_name", "") or - "1.5" in pipeline.get("model_name", "") or - "1_5" in pipeline.get("model_name", "") or + "I2V" not in model_name or + "1.5" in model_name or + "1_5" in model_name or num_frames == 49 or context_options is not None ), "1.0 I2V model can only do 49 frames" if image_cond_latents is not None: - assert image_cond_latents["samples"].shape[0] == 1, "Image condition latents must be a single latent" - assert "I2V" in pipeline.get("model_name", ""), "Image condition latents only supported for I2V models" + assert supports_image_conds, "Image condition latents only supported for I2V and Interpolation models" + if "I2V" in model_name: + assert image_cond_latents["samples"].shape[1] == 1, "I2V model only supports single image condition latent" + elif "interpolation" in model_name.lower(): + assert image_cond_latents["samples"].shape[1] == 2, "Interpolation model needs two image condition latents" else: - assert "I2V" not in pipeline.get("model_name", ""), "Image condition latents required for I2V models" + assert not supports_image_conds, "Image condition latents required for I2V models" device = mm.get_torch_device() offload_device = mm.unet_offload_device() @@ -885,6 +901,8 @@ class CogVideoSampler: pipe.transformer.fastercache_lf_step = fastercache["lf_step"] pipe.transformer.fastercache_hf_step = fastercache["hf_step"] pipe.transformer.fastercache_device = fastercache["cache_device"] + pipe.transformer.fastercache_num_blocks_to_cache = fastercache["num_blocks_to_cache"] + log.info(f"FasterCache enabled for {pipe.transformer.fastercache_num_blocks_to_cache} blocks out of {len(pipe.transformer.transformer_blocks)}") else: pipe.transformer.use_fastercache = False pipe.transformer.fastercache_counter = 0 @@ -1001,6 +1019,8 @@ class CogVideoDecode: latents = samples["samples"] vae = pipeline["pipe"].vae if vae_override is None else vae_override + additional_frames = getattr(pipeline["pipe"], "additional_frames", 0) + vae.enable_slicing() if not pipeline["cpu_offloading"]: @@ -1024,7 +1044,8 @@ class CogVideoDecode: vae._clear_fake_context_parallel_cache() except: pass - frames = vae.decode(latents[:, :, pipeline["pipe"].additional_frames:]).sample + + frames = vae.decode(latents[:, :, additional_frames:]).sample vae.disable_tiling() if not pipeline["cpu_offloading"]: vae.to(offload_device) From ba2dbfbeb496e3a917b6965e02a0998c0a778428 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 12 Nov 2024 11:29:50 +0200 Subject: [PATCH 31/49] fixes --- model_loading.py | 4 ++-- nodes.py | 16 ++++++++-------- pipeline_cogvideox.py | 9 ++++++--- 3 files changed, 16 insertions(+), 13 deletions(-) diff --git a/model_loading.py b/model_loading.py index 08ddcee..959c8ff 100644 --- a/model_loading.py +++ b/model_loading.py @@ -258,9 +258,9 @@ class DownloadAndLoadCogVideoModel: #fp8 if fp8_transformer == "enabled" or fp8_transformer == "fastmode": - params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding"} + params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding", "norm_k", "norm_q", "to_k.bias", "to_q.bias", "to_v.bias"} if "1.5" in model: - params_to_keep.update({"norm1.linear.weight", "norm_k", "norm_q","ofs_embedding", "norm_final", "norm_out", "proj_out"}) + params_to_keep.update({"norm1.linear.weight", "ofs_embedding", "norm_final", "norm_out", "proj_out"}) for name, param in pipe.transformer.named_parameters(): if not any(keyword in name for keyword in params_to_keep): param.data = param.data.to(torch.float8_e4m3fn) diff --git a/nodes.py b/nodes.py index 29ffe2c..aa8f6bf 100644 --- a/nodes.py +++ b/nodes.py @@ -854,14 +854,14 @@ class CogVideoSampler: num_frames == 49 or context_options is not None ), "1.0 I2V model can only do 49 frames" - if image_cond_latents is not None: - assert supports_image_conds, "Image condition latents only supported for I2V and Interpolation models" - if "I2V" in model_name: - assert image_cond_latents["samples"].shape[1] == 1, "I2V model only supports single image condition latent" - elif "interpolation" in model_name.lower(): - assert image_cond_latents["samples"].shape[1] == 2, "Interpolation model needs two image condition latents" - else: - assert not supports_image_conds, "Image condition latents required for I2V models" + # if image_cond_latents is not None: + # assert supports_image_conds, "Image condition latents only supported for I2V and Interpolation models" + # if "I2V" in model_name: + # assert image_cond_latents["samples"].shape[1] == 1, "I2V model only supports single image condition latent" + # elif "interpolation" in model_name.lower(): + # assert image_cond_latents["samples"].shape[1] == 2, "Interpolation model needs two image condition latents" + # else: + # assert not supports_image_conds, "Image condition latents required for I2V models" device = mm.get_torch_device() offload_device = mm.unet_offload_device() diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 694a85e..13c960e 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -519,10 +519,9 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): ) latent_padding = torch.zeros(padding_shape, device=device, dtype=self.vae.dtype) image_cond_latents = torch.cat([image_cond_latents[:, 0, :, :, :].unsqueeze(1), latent_padding, image_cond_latents[:, -1, :, :, :].unsqueeze(1)], dim=1) - # Select the first frame along the second dimension if self.transformer.config.patch_size_t is not None: - first_frame = image_cond_latents[:, : image_latents.size(1) % self.transformer.config.patch_size_t, ...] - image_cond_latents = torch.cat([first_frame, image_latents], dim=1) + first_frame = image_cond_latents[:, : image_cond_latents.size(1) % self.transformer.config.patch_size_t, ...] + image_cond_latents = torch.cat([first_frame, image_cond_latents], dim=1) logger.info(f"image cond latents shape: {image_cond_latents.shape}") else: @@ -537,6 +536,10 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): ) latent_padding = torch.zeros(padding_shape, device=device, dtype=self.vae.dtype) image_cond_latents = torch.cat([image_cond_latents, latent_padding], dim=1) + # Select the first frame along the second dimension + if self.transformer.config.patch_size_t is not None: + first_frame = image_cond_latents[:, : image_cond_latents.size(1) % self.transformer.config.patch_size_t, ...] + image_cond_latents = torch.cat([first_frame, image_cond_latents], dim=1) else: image_cond_latents = image_cond_latents.repeat(1, latents.shape[1], 1, 1, 1) # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline From 7ac2224ec252d7a534eccd609aca2437fcfa9f14 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 12 Nov 2024 11:30:03 +0200 Subject: [PATCH 32/49] Update nodes.py --- nodes.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/nodes.py b/nodes.py index aa8f6bf..817683f 100644 --- a/nodes.py +++ b/nodes.py @@ -854,14 +854,14 @@ class CogVideoSampler: num_frames == 49 or context_options is not None ), "1.0 I2V model can only do 49 frames" - # if image_cond_latents is not None: - # assert supports_image_conds, "Image condition latents only supported for I2V and Interpolation models" - # if "I2V" in model_name: - # assert image_cond_latents["samples"].shape[1] == 1, "I2V model only supports single image condition latent" - # elif "interpolation" in model_name.lower(): - # assert image_cond_latents["samples"].shape[1] == 2, "Interpolation model needs two image condition latents" - # else: - # assert not supports_image_conds, "Image condition latents required for I2V models" + if image_cond_latents is not None: + assert supports_image_conds, "Image condition latents only supported for I2V and Interpolation models" + # if "I2V" in model_name: + # assert image_cond_latents["samples"].shape[1] == 1, "I2V model only supports single image condition latent" + # elif "interpolation" in model_name.lower(): + # assert image_cond_latents["samples"].shape[1] == 2, "Interpolation model needs two image condition latents" + else: + assert not supports_image_conds, "Image condition latents required for I2V models" device = mm.get_torch_device() offload_device = mm.unet_offload_device() From 34b650c7854e291dcba6e017f4f8dfa17a699c1a Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 12 Nov 2024 19:29:55 +0200 Subject: [PATCH 33/49] fun fixes --- cogvideox_fun/transformer_3d.py | 156 +++++++++++--------------------- nodes.py | 4 +- pipeline_cogvideox.py | 9 +- 3 files changed, 64 insertions(+), 105 deletions(-) diff --git a/cogvideox_fun/transformer_3d.py b/cogvideox_fun/transformer_3d.py index 83614e2..5b6fef9 100644 --- a/cogvideox_fun/transformer_3d.py +++ b/cogvideox_fun/transformer_3d.py @@ -37,11 +37,9 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name from einops import rearrange try: from sageattention import sageattn - SAGEATTN_IS_AVAVILABLE = True - logger.info("Using sageattn") + SAGEATTN_IS_AVAILABLE = True except: - logger.info("sageattn not found, using sdpa") - SAGEATTN_IS_AVAVILABLE = False + SAGEATTN_IS_AVAILABLE = False def fft(tensor): tensor_fft = torch.fft.fft2(tensor) @@ -77,6 +75,7 @@ class CogVideoXAttnProcessor2_0: encoder_hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_rotary_emb: Optional[torch.Tensor] = None, + attention_mode: Optional[str] = None, ) -> torch.Tensor: text_seq_length = encoder_hidden_states.size(1) @@ -113,83 +112,12 @@ class CogVideoXAttnProcessor2_0: query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) if not attn.is_cross_attention: key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - - if SAGEATTN_IS_AVAVILABLE: - hidden_states = sageattn(query, key, value, is_causal=False) - else: - hidden_states = F.scaled_dot_product_attention( - query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False - ) - - hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - encoder_hidden_states, hidden_states = hidden_states.split( - [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 - ) - return hidden_states, encoder_hidden_states - - -class FusedCogVideoXAttnProcessor2_0: - r""" - Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on - query and key vectors, but does not include spatial normalization. - """ - - def __init__(self): - if not hasattr(F, "scaled_dot_product_attention"): - raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") - - def __call__( - self, - attn: Attention, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - image_rotary_emb: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - text_seq_length = encoder_hidden_states.size(1) - - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - - batch_size, sequence_length, _ = ( - hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape - ) - - if attention_mask is not None: - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) - attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) - - qkv = attn.to_qkv(hidden_states) - split_size = qkv.shape[-1] // 3 - query, key, value = torch.split(qkv, split_size, dim=-1) - - inner_dim = key.shape[-1] - head_dim = inner_dim // attn.heads - - query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - - if attn.norm_q is not None: - query = attn.norm_q(query) - if attn.norm_k is not None: - key = attn.norm_k(key) - - # Apply RoPE if needed - if image_rotary_emb is not None: - from diffusers.models.embeddings import apply_rotary_emb - - query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) - if not attn.is_cross_attention: - key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - - if SAGEATTN_IS_AVAVILABLE: - hidden_states = sageattn(query, key, value, is_causal=False) + + if attention_mode == "sageattn": + if SAGEATTN_IS_AVAILABLE: + hidden_states = sageattn(query, key, value, attn_mask=attention_mask, dropout_p=0.0,is_causal=False) + else: + raise ImportError("sageattn not found") else: hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False @@ -298,6 +226,7 @@ class CogVideoXBlock(nn.Module): ff_inner_dim: Optional[int] = None, ff_bias: bool = True, attention_out_bias: bool = True, + attention_mode: Optional[str] = None, ): super().__init__() @@ -326,7 +255,10 @@ class CogVideoXBlock(nn.Module): inner_dim=ff_inner_dim, bias=ff_bias, ) - + self.cached_hidden_states = [] + self.cached_encoder_hidden_states = [] + self.attention_mode = attention_mode + def forward( self, hidden_states: torch.Tensor, @@ -335,6 +267,7 @@ class CogVideoXBlock(nn.Module): image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, video_flow_feature: Optional[torch.Tensor] = None, fuser=None, + block_use_fastercache=False, fastercache_counter=0, fastercache_start_step=15, fastercache_device="cuda:0", @@ -352,33 +285,43 @@ class CogVideoXBlock(nn.Module): h = rearrange(norm_hidden_states, "B (T H W) C -> (B T) C H W", H=H, W=W) h = fuser(h, video_flow_feature.to(h), T=T) norm_hidden_states = rearrange(h, "(B T) C H W -> B (T H W) C", T=T) - del h, fuser - #fastercache - B = norm_hidden_states.shape[0] - if fastercache_counter >= fastercache_start_step + 3 and fastercache_counter%3!=0 and self.cached_hidden_states[-1].shape[0] >= B: - attn_hidden_states = ( - self.cached_hidden_states[1][:B] + - (self.cached_hidden_states[1][:B] - self.cached_hidden_states[0][:B]) - * 0.3 - ).to(norm_hidden_states.device, non_blocking=True) - attn_encoder_hidden_states = ( - self.cached_encoder_hidden_states[1][:B] + - (self.cached_encoder_hidden_states[1][:B] - self.cached_encoder_hidden_states[0][:B]) - * 0.3 - ).to(norm_hidden_states.device, non_blocking=True) + del h, fuser + + #region fastercache + if block_use_fastercache: + B = norm_hidden_states.shape[0] + if fastercache_counter >= fastercache_start_step + 3 and fastercache_counter%3!=0 and self.cached_hidden_states[-1].shape[0] >= B: + attn_hidden_states = ( + self.cached_hidden_states[1][:B] + + (self.cached_hidden_states[1][:B] - self.cached_hidden_states[0][:B]) + * 0.3 + ).to(norm_hidden_states.device, non_blocking=True) + attn_encoder_hidden_states = ( + self.cached_encoder_hidden_states[1][:B] + + (self.cached_encoder_hidden_states[1][:B] - self.cached_encoder_hidden_states[0][:B]) + * 0.3 + ).to(norm_hidden_states.device, non_blocking=True) + else: + attn_hidden_states, attn_encoder_hidden_states = self.attn1( + hidden_states=norm_hidden_states, + encoder_hidden_states=norm_encoder_hidden_states, + image_rotary_emb=image_rotary_emb, + attention_mode=self.attention_mode, + ) + if fastercache_counter == fastercache_start_step: + self.cached_hidden_states = [attn_hidden_states.to(fastercache_device), attn_hidden_states.to(fastercache_device)] + self.cached_encoder_hidden_states = [attn_encoder_hidden_states.to(fastercache_device), attn_encoder_hidden_states.to(fastercache_device)] + elif fastercache_counter > fastercache_start_step: + self.cached_hidden_states[-1].copy_(attn_hidden_states.to(fastercache_device)) + self.cached_encoder_hidden_states[-1].copy_(attn_encoder_hidden_states.to(fastercache_device)) else: attn_hidden_states, attn_encoder_hidden_states = self.attn1( hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, + attention_mode=self.attention_mode, ) - if fastercache_counter == fastercache_start_step: - self.cached_hidden_states = [attn_hidden_states.to(fastercache_device), attn_hidden_states.to(fastercache_device)] - self.cached_encoder_hidden_states = [attn_encoder_hidden_states.to(fastercache_device), attn_encoder_hidden_states.to(fastercache_device)] - elif fastercache_counter > fastercache_start_step: - self.cached_hidden_states[-1].copy_(attn_hidden_states.to(fastercache_device)) - self.cached_encoder_hidden_states[-1].copy_(attn_encoder_hidden_states.to(fastercache_device)) - + hidden_states = hidden_states + gate_msa * attn_hidden_states encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states @@ -481,6 +424,7 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin): temporal_interpolation_scale: float = 1.0, use_rotary_positional_embeddings: bool = False, add_noise_in_inpaint_model: bool = False, + attention_mode: Optional[str] = None, ): super().__init__() inner_dim = num_attention_heads * attention_head_dim @@ -554,6 +498,8 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin): self.fastercache_lf_step = 40 self.fastercache_hf_step = 30 self.fastercache_device = "cuda" + self.fastercache_num_blocks_to_cache = len(self.transformer_blocks) + self.attention_mode = attention_mode def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = value @@ -720,6 +666,8 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin): image_rotary_emb=image_rotary_emb, video_flow_feature=video_flow_features[i][:1] if video_flow_features is not None else None, fuser = self.fuser_list[i] if self.fuser_list is not None else None, + block_use_fastercache = i <= self.fastercache_num_blocks_to_cache, + fastercache_start_step = self.fastercache_start_step, fastercache_counter = self.fastercache_counter, fastercache_device = self.fastercache_device ) @@ -770,7 +718,9 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin): image_rotary_emb=image_rotary_emb, video_flow_feature=video_flow_features[i] if video_flow_features is not None else None, fuser = self.fuser_list[i] if self.fuser_list is not None else None, + block_use_fastercache = i <= self.fastercache_num_blocks_to_cache, fastercache_counter = self.fastercache_counter, + fastercache_start_step = self.fastercache_start_step, fastercache_device = self.fastercache_device ) diff --git a/nodes.py b/nodes.py index 817683f..f2874cd 100644 --- a/nodes.py +++ b/nodes.py @@ -1180,6 +1180,8 @@ class CogVideoXFunSampler: pipe.transformer.fastercache_lf_step = fastercache["lf_step"] pipe.transformer.fastercache_hf_step = fastercache["hf_step"] pipe.transformer.fastercache_device = fastercache["cache_device"] + pipe.transformer.fastercache_num_blocks_to_cache = fastercache["num_blocks_to_cache"] + log.info(f"FasterCache enabled for {pipe.transformer.fastercache_num_blocks_to_cache} blocks out of {len(pipe.transformer.transformer_blocks)}") else: pipe.transformer.use_fastercache = False pipe.transformer.fastercache_counter = 0 @@ -1187,7 +1189,7 @@ class CogVideoXFunSampler: generator = torch.Generator(device=torch.device("cpu")).manual_seed(seed) autocastcondition = not pipeline["onediff"] or not dtype == torch.float32 - autocast_context = torch.autocast(mm.get_autocast_device(device)) if autocastcondition else nullcontext() + autocast_context = torch.autocast(mm.get_autocast_device(device), dtype=dtype) if autocastcondition else nullcontext() with autocast_context: video_length = int((video_length - 1) // pipe.vae.config.temporal_compression_ratio * pipe.vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 if vid2vid_images is not None: diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 13c960e..87d19e9 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -472,8 +472,15 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): # 5. Prepare latents. latent_channels = self.vae.config.latent_channels latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 + # For CogVideoX 1.5, the latent frames should be padded to make it divisible by patch_size_t - patch_size_t = self.transformer.config.patch_size_t + patch_size_t = getattr(self.transformer.config, "patch_size_t", None) + if patch_size_t is None: + self.transformer.config.patch_size_t = None + ofs_embed_dim = getattr(self.transformer.config, "ofs_embed_dim", None) + if ofs_embed_dim is None: + self.transformer.config.ofs_embed_dim = None + self.additional_frames = 0 if patch_size_t is not None and latent_frames % patch_size_t != 0: self.additional_frames = patch_size_t - latent_frames % patch_size_t From e8a289112f0dfbaec327368f9bd5e01a969fe797 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Wed, 13 Nov 2024 15:37:45 +0200 Subject: [PATCH 34/49] fix VAE scaling (again) --- model_loading.py | 5 ----- nodes.py | 10 ++++++---- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/model_loading.py b/model_loading.py index 959c8ff..fe9245b 100644 --- a/model_loading.py +++ b/model_loading.py @@ -350,8 +350,6 @@ class DownloadAndLoadCogVideoGGUFModel: def loadmodel(self, model, vae_precision, fp8_fastmode, load_device, enable_sequential_cpu_offload, pab_config=None, block_edit=None, compile="disabled", attention_mode="sdpa"): - check_diffusers_version() - device = mm.get_torch_device() offload_device = mm.unet_offload_device() mm.soft_empty_cache() @@ -597,9 +595,6 @@ class DownloadAndLoadToraModel: DESCRIPTION = "Downloads and loads the the Tora model from Huggingface to 'ComfyUI/models/CogVideo/CogVideoX-5b-Tora'" def loadmodel(self, model): - - check_diffusers_version() - device = mm.get_torch_device() offload_device = mm.unet_offload_device() mm.soft_empty_cache() diff --git a/nodes.py b/nodes.py index f2874cd..ecea9db 100644 --- a/nodes.py +++ b/nodes.py @@ -298,7 +298,7 @@ class CogVideoTextEncode: embeds = clip.encode_from_tokens(tokens, return_pooled=False, return_dict=False) - if embeds.shape[1] > 226: + if embeds.shape[1] > max_tokens: raise ValueError(f"Prompt is too long, max tokens supported is {max_tokens} or less, got {embeds.shape[1]}") embeds *= strength if force_offload: @@ -371,7 +371,7 @@ class CogVideoImageEncode: model_name = pipeline.get("model_name", "") if ("1.5" in model_name or "1_5" in model_name) and image.shape[0] == 1: - vae_scaling_factor = 1 / vae.config.scaling_factor + vae_scaling_factor = 1 #/ vae.config.scaling_factor else: vae_scaling_factor = vae.config.scaling_factor @@ -599,16 +599,18 @@ class ToraEncodeTrajectory: vae.to(device) video_flow = vae.encode(video_flow).latent_dist.sample(generator) * vae.config.scaling_factor + log.info(f"video_flow shape after encoding: {video_flow.shape}") #torch.Size([1, 16, 4, 80, 80]) if not pipeline["cpu_offloading"]: vae.to(offload_device) - + #print("video_flow shape before traj_extractor: ", video_flow.shape) #torch.Size([1, 16, 4, 80, 80]) video_flow_features = tora_model["traj_extractor"](video_flow.to(torch.float32)) video_flow_features = torch.stack(video_flow_features) + #print("video_flow_features after traj_extractor: ", video_flow_features.shape) #torch.Size([42, 4, 128, 40, 40]) video_flow_features = video_flow_features * strength - log.info(f"video_flow shape: {video_flow.shape}") + tora = { "video_flow_features" : video_flow_features, From 0bd3da569ead6b36982dbeb55500a1c4963d137d Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Thu, 14 Nov 2024 19:54:52 +0200 Subject: [PATCH 35/49] code cleanup codebase getting too bloated: drop PAB support in favor of FasterCache drop temporal tilling in favor of FreeNoise --- cogvideox_fun/fun_pab_transformer_3d.py | 741 -------------------- cogvideox_fun/pipeline_cogvideox_control.py | 116 +-- cogvideox_fun/pipeline_cogvideox_inpaint.py | 126 +--- model_loading.py | 50 +- nodes.py | 64 +- pipeline_cogvideox.py | 154 +--- videosys/cogvideox_transformer_3d.py | 621 ---------------- videosys/core/__init__.py | 0 videosys/core/pab_mgr.py | 232 ------ videosys/core/pipeline.py | 44 -- videosys/modules/__init__.py | 0 videosys/modules/activations.py | 3 - videosys/modules/downsampling.py | 71 -- videosys/modules/embeddings.py | 308 -------- videosys/modules/normalization.py | 85 --- videosys/modules/upsampling.py | 67 -- videosys/pab.py | 64 -- 17 files changed, 35 insertions(+), 2711 deletions(-) delete mode 100644 cogvideox_fun/fun_pab_transformer_3d.py delete mode 100644 videosys/cogvideox_transformer_3d.py delete mode 100644 videosys/core/__init__.py delete mode 100644 videosys/core/pab_mgr.py delete mode 100644 videosys/core/pipeline.py delete mode 100644 videosys/modules/__init__.py delete mode 100644 videosys/modules/activations.py delete mode 100644 videosys/modules/downsampling.py delete mode 100644 videosys/modules/embeddings.py delete mode 100644 videosys/modules/normalization.py delete mode 100644 videosys/modules/upsampling.py delete mode 100644 videosys/pab.py diff --git a/cogvideox_fun/fun_pab_transformer_3d.py b/cogvideox_fun/fun_pab_transformer_3d.py deleted file mode 100644 index 25a3934..0000000 --- a/cogvideox_fun/fun_pab_transformer_3d.py +++ /dev/null @@ -1,741 +0,0 @@ -# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Any, Dict, Optional, Tuple, Union - -import os -import json -import torch -import glob -import torch.nn.functional as F -from torch import nn - -from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers.utils import is_torch_version, logging -from diffusers.utils.torch_utils import maybe_allow_in_graph -from diffusers.models.attention import Attention, FeedForward -#from diffusers.models.attention_processor import AttentionProcessor -from diffusers.models.embeddings import TimestepEmbedding, Timesteps, get_3d_sincos_pos_embed -from diffusers.models.modeling_outputs import Transformer2DModelOutput -from diffusers.models.modeling_utils import ModelMixin -#from diffusers.models.normalization import AdaLayerNorm, CogVideoXLayerNormZero - -from ..videosys.modules.normalization import AdaLayerNorm, CogVideoXLayerNormZero -from ..videosys.modules.embeddings import apply_rotary_emb -from ..videosys.core.pab_mgr import enable_pab, if_broadcast_spatial -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -try: - from sageattention import sageattn - SAGEATTN_IS_AVAVILABLE = True - logger.info("Using sageattn") -except: - logger.info("sageattn not found, using sdpa") - SAGEATTN_IS_AVAVILABLE = False - -class CogVideoXAttnProcessor2_0: - r""" - Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on - query and key vectors, but does not include spatial normalization. - """ - - def __init__(self): - if not hasattr(F, "scaled_dot_product_attention"): - raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") - - def __call__( - self, - attn: Attention, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - image_rotary_emb: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - text_seq_length = encoder_hidden_states.size(1) - - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - - batch_size, sequence_length, _ = ( - hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape - ) - - if attention_mask is not None: - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) - attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) - - query = attn.to_q(hidden_states) - key = attn.to_k(hidden_states) - value = attn.to_v(hidden_states) - - # if attn.parallel_manager.sp_size > 1: - # assert ( - # attn.heads % attn.parallel_manager.sp_size == 0 - # ), f"Number of heads {attn.heads} must be divisible by sequence parallel size {attn.parallel_manager.sp_size}" - # attn_heads = attn.heads // attn.parallel_manager.sp_size - # query, key, value = map( - # lambda x: all_to_all_comm(x, attn.parallel_manager.sp_group, scatter_dim=2, gather_dim=1), - # [query, key, value], - # ) - - attn_heads = attn.heads - - inner_dim = key.shape[-1] - head_dim = inner_dim // attn_heads - - query = query.view(batch_size, -1, attn_heads, head_dim).transpose(1, 2) - key = key.view(batch_size, -1, attn_heads, head_dim).transpose(1, 2) - value = value.view(batch_size, -1, attn_heads, head_dim).transpose(1, 2) - - if attn.norm_q is not None: - query = attn.norm_q(query) - if attn.norm_k is not None: - key = attn.norm_k(key) - - # Apply RoPE if needed - if image_rotary_emb is not None: - emb_len = image_rotary_emb[0].shape[0] - query[:, :, text_seq_length : emb_len + text_seq_length] = apply_rotary_emb( - query[:, :, text_seq_length : emb_len + text_seq_length], image_rotary_emb - ) - if not attn.is_cross_attention: - key[:, :, text_seq_length : emb_len + text_seq_length] = apply_rotary_emb( - key[:, :, text_seq_length : emb_len + text_seq_length], image_rotary_emb - ) - - if SAGEATTN_IS_AVAVILABLE: - hidden_states = sageattn(query, key, value, is_causal=False) - else: - hidden_states = F.scaled_dot_product_attention( - query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False - ) - - hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn_heads * head_dim) - - #if attn.parallel_manager.sp_size > 1: - # hidden_states = all_to_all_comm(hidden_states, attn.parallel_manager.sp_group, scatter_dim=1, gather_dim=2) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - encoder_hidden_states, hidden_states = hidden_states.split( - [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 - ) - return hidden_states, encoder_hidden_states - - -class FusedCogVideoXAttnProcessor2_0: - r""" - Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on - query and key vectors, but does not include spatial normalization. - """ - - def __init__(self): - if not hasattr(F, "scaled_dot_product_attention"): - raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") - - def __call__( - self, - attn: Attention, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - image_rotary_emb: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - text_seq_length = encoder_hidden_states.size(1) - - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - - batch_size, sequence_length, _ = ( - hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape - ) - - if attention_mask is not None: - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) - attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) - - qkv = attn.to_qkv(hidden_states) - split_size = qkv.shape[-1] // 3 - query, key, value = torch.split(qkv, split_size, dim=-1) - - inner_dim = key.shape[-1] - head_dim = inner_dim // attn.heads - - query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - - if attn.norm_q is not None: - query = attn.norm_q(query) - if attn.norm_k is not None: - key = attn.norm_k(key) - - # Apply RoPE if needed - if image_rotary_emb is not None: - query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) - if not attn.is_cross_attention: - key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - - if SAGEATTN_IS_AVAVILABLE: - hidden_states = sageattn(query, key, value, is_causal=False) - else: - hidden_states = F.scaled_dot_product_attention( - query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False - ) - - hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - encoder_hidden_states, hidden_states = hidden_states.split( - [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 - ) - return hidden_states, encoder_hidden_states - -class CogVideoXPatchEmbed(nn.Module): - def __init__( - self, - patch_size: int = 2, - in_channels: int = 16, - embed_dim: int = 1920, - text_embed_dim: int = 4096, - bias: bool = True, - ) -> None: - super().__init__() - self.patch_size = patch_size - - self.proj = nn.Conv2d( - in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias - ) - self.text_proj = nn.Linear(text_embed_dim, embed_dim) - - def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor): - r""" - Args: - text_embeds (`torch.Tensor`): - Input text embeddings. Expected shape: (batch_size, seq_length, embedding_dim). - image_embeds (`torch.Tensor`): - Input image embeddings. Expected shape: (batch_size, num_frames, channels, height, width). - """ - text_embeds = self.text_proj(text_embeds) - - batch, num_frames, channels, height, width = image_embeds.shape - image_embeds = image_embeds.reshape(-1, channels, height, width) - image_embeds = self.proj(image_embeds) - image_embeds = image_embeds.view(batch, num_frames, *image_embeds.shape[1:]) - image_embeds = image_embeds.flatten(3).transpose(2, 3) # [batch, num_frames, height x width, channels] - image_embeds = image_embeds.flatten(1, 2) # [batch, num_frames x height x width, channels] - - embeds = torch.cat( - [text_embeds, image_embeds], dim=1 - ).contiguous() # [batch, seq_length + num_frames x height x width, channels] - return embeds - -@maybe_allow_in_graph -class CogVideoXBlock(nn.Module): - r""" - Transformer block used in [CogVideoX](https://github.com/THUDM/CogVideo) model. - - Parameters: - dim (`int`): - The number of channels in the input and output. - num_attention_heads (`int`): - The number of heads to use for multi-head attention. - attention_head_dim (`int`): - The number of channels in each head. - time_embed_dim (`int`): - The number of channels in timestep embedding. - dropout (`float`, defaults to `0.0`): - The dropout probability to use. - activation_fn (`str`, defaults to `"gelu-approximate"`): - Activation function to be used in feed-forward. - attention_bias (`bool`, defaults to `False`): - Whether or not to use bias in attention projection layers. - qk_norm (`bool`, defaults to `True`): - Whether or not to use normalization after query and key projections in Attention. - norm_elementwise_affine (`bool`, defaults to `True`): - Whether to use learnable elementwise affine parameters for normalization. - norm_eps (`float`, defaults to `1e-5`): - Epsilon value for normalization layers. - final_dropout (`bool` defaults to `False`): - Whether to apply a final dropout after the last feed-forward layer. - ff_inner_dim (`int`, *optional*, defaults to `None`): - Custom hidden dimension of Feed-forward layer. If not provided, `4 * dim` is used. - ff_bias (`bool`, defaults to `True`): - Whether or not to use bias in Feed-forward layer. - attention_out_bias (`bool`, defaults to `True`): - Whether or not to use bias in Attention output projection layer. - """ - - def __init__( - self, - dim: int, - num_attention_heads: int, - attention_head_dim: int, - time_embed_dim: int, - dropout: float = 0.0, - activation_fn: str = "gelu-approximate", - attention_bias: bool = False, - qk_norm: bool = True, - norm_elementwise_affine: bool = True, - norm_eps: float = 1e-5, - final_dropout: bool = True, - ff_inner_dim: Optional[int] = None, - ff_bias: bool = True, - attention_out_bias: bool = True, - block_idx: int = 0, - ): - super().__init__() - - # 1. Self Attention - self.norm1 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True) - - self.attn1 = Attention( - query_dim=dim, - dim_head=attention_head_dim, - heads=num_attention_heads, - qk_norm="layer_norm" if qk_norm else None, - eps=1e-6, - bias=attention_bias, - out_bias=attention_out_bias, - processor=CogVideoXAttnProcessor2_0(), - ) - - # parallel - #self.attn1.parallel_manager = None - - # 2. Feed Forward - self.norm2 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True) - - self.ff = FeedForward( - dim, - dropout=dropout, - activation_fn=activation_fn, - final_dropout=final_dropout, - inner_dim=ff_inner_dim, - bias=ff_bias, - ) - - # pab - self.attn_count = 0 - self.last_attn = None - self.block_idx = block_idx - - def forward( - self, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - temb: torch.Tensor, - image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - timestep=None, - ) -> torch.Tensor: - text_seq_length = encoder_hidden_states.size(1) - - # norm & modulate - norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1( - hidden_states, encoder_hidden_states, temb - ) - - # attention - if enable_pab(): - broadcast_attn, self.attn_count = if_broadcast_spatial(int(timestep[0]), self.attn_count, self.block_idx) - if enable_pab() and broadcast_attn: - attn_hidden_states, attn_encoder_hidden_states = self.last_attn - else: - attn_hidden_states, attn_encoder_hidden_states = self.attn1( - hidden_states=norm_hidden_states, - encoder_hidden_states=norm_encoder_hidden_states, - image_rotary_emb=image_rotary_emb, - ) - if enable_pab(): - self.last_attn = (attn_hidden_states, attn_encoder_hidden_states) - - hidden_states = hidden_states + gate_msa * attn_hidden_states - encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states - - # norm & modulate - norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2( - hidden_states, encoder_hidden_states, temb - ) - - # feed-forward - norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1) - ff_output = self.ff(norm_hidden_states) - - hidden_states = hidden_states + gate_ff * ff_output[:, text_seq_length:] - encoder_hidden_states = encoder_hidden_states + enc_gate_ff * ff_output[:, :text_seq_length] - - return hidden_states, encoder_hidden_states - - -class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin): - """ - A Transformer model for video-like data in [CogVideoX](https://github.com/THUDM/CogVideo). - - Parameters: - num_attention_heads (`int`, defaults to `30`): - The number of heads to use for multi-head attention. - attention_head_dim (`int`, defaults to `64`): - The number of channels in each head. - in_channels (`int`, defaults to `16`): - The number of channels in the input. - out_channels (`int`, *optional*, defaults to `16`): - The number of channels in the output. - flip_sin_to_cos (`bool`, defaults to `True`): - Whether to flip the sin to cos in the time embedding. - time_embed_dim (`int`, defaults to `512`): - Output dimension of timestep embeddings. - text_embed_dim (`int`, defaults to `4096`): - Input dimension of text embeddings from the text encoder. - num_layers (`int`, defaults to `30`): - The number of layers of Transformer blocks to use. - dropout (`float`, defaults to `0.0`): - The dropout probability to use. - attention_bias (`bool`, defaults to `True`): - Whether or not to use bias in the attention projection layers. - sample_width (`int`, defaults to `90`): - The width of the input latents. - sample_height (`int`, defaults to `60`): - The height of the input latents. - sample_frames (`int`, defaults to `49`): - The number of frames in the input latents. Note that this parameter was incorrectly initialized to 49 - instead of 13 because CogVideoX processed 13 latent frames at once in its default and recommended settings, - but cannot be changed to the correct value to ensure backwards compatibility. To create a transformer with - K latent frames, the correct value to pass here would be: ((K - 1) * temporal_compression_ratio + 1). - patch_size (`int`, defaults to `2`): - The size of the patches to use in the patch embedding layer. - temporal_compression_ratio (`int`, defaults to `4`): - The compression ratio across the temporal dimension. See documentation for `sample_frames`. - max_text_seq_length (`int`, defaults to `226`): - The maximum sequence length of the input text embeddings. - activation_fn (`str`, defaults to `"gelu-approximate"`): - Activation function to use in feed-forward. - timestep_activation_fn (`str`, defaults to `"silu"`): - Activation function to use when generating the timestep embeddings. - norm_elementwise_affine (`bool`, defaults to `True`): - Whether or not to use elementwise affine in normalization layers. - norm_eps (`float`, defaults to `1e-5`): - The epsilon value to use in normalization layers. - spatial_interpolation_scale (`float`, defaults to `1.875`): - Scaling factor to apply in 3D positional embeddings across spatial dimensions. - temporal_interpolation_scale (`float`, defaults to `1.0`): - Scaling factor to apply in 3D positional embeddings across temporal dimensions. - """ - - _supports_gradient_checkpointing = True - - @register_to_config - def __init__( - self, - num_attention_heads: int = 30, - attention_head_dim: int = 64, - in_channels: int = 16, - out_channels: Optional[int] = 16, - flip_sin_to_cos: bool = True, - freq_shift: int = 0, - time_embed_dim: int = 512, - text_embed_dim: int = 4096, - num_layers: int = 30, - dropout: float = 0.0, - attention_bias: bool = True, - sample_width: int = 90, - sample_height: int = 60, - sample_frames: int = 49, - patch_size: int = 2, - temporal_compression_ratio: int = 4, - max_text_seq_length: int = 226, - activation_fn: str = "gelu-approximate", - timestep_activation_fn: str = "silu", - norm_elementwise_affine: bool = True, - norm_eps: float = 1e-5, - spatial_interpolation_scale: float = 1.875, - temporal_interpolation_scale: float = 1.0, - use_rotary_positional_embeddings: bool = False, - add_noise_in_inpaint_model: bool = False, - ): - super().__init__() - inner_dim = num_attention_heads * attention_head_dim - - post_patch_height = sample_height // patch_size - post_patch_width = sample_width // patch_size - post_time_compression_frames = (sample_frames - 1) // temporal_compression_ratio + 1 - self.num_patches = post_patch_height * post_patch_width * post_time_compression_frames - self.post_patch_height = post_patch_height - self.post_patch_width = post_patch_width - self.post_time_compression_frames = post_time_compression_frames - self.patch_size = patch_size - - # 1. Patch embedding - self.patch_embed = CogVideoXPatchEmbed(patch_size, in_channels, inner_dim, text_embed_dim, bias=True) - self.embedding_dropout = nn.Dropout(dropout) - - # 2. 3D positional embeddings - spatial_pos_embedding = get_3d_sincos_pos_embed( - inner_dim, - (post_patch_width, post_patch_height), - post_time_compression_frames, - spatial_interpolation_scale, - temporal_interpolation_scale, - ) - spatial_pos_embedding = torch.from_numpy(spatial_pos_embedding).flatten(0, 1) - pos_embedding = torch.zeros(1, max_text_seq_length + self.num_patches, inner_dim, requires_grad=False) - pos_embedding.data[:, max_text_seq_length:].copy_(spatial_pos_embedding) - self.register_buffer("pos_embedding", pos_embedding, persistent=False) - - # 3. Time embeddings - self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift) - self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn) - - # 4. Define spatio-temporal transformers blocks - self.transformer_blocks = nn.ModuleList( - [ - CogVideoXBlock( - dim=inner_dim, - num_attention_heads=num_attention_heads, - attention_head_dim=attention_head_dim, - time_embed_dim=time_embed_dim, - dropout=dropout, - activation_fn=activation_fn, - attention_bias=attention_bias, - norm_elementwise_affine=norm_elementwise_affine, - norm_eps=norm_eps, - ) - for _ in range(num_layers) - ] - ) - self.norm_final = nn.LayerNorm(inner_dim, norm_eps, norm_elementwise_affine) - - # 5. Output blocks - self.norm_out = AdaLayerNorm( - embedding_dim=time_embed_dim, - output_dim=2 * inner_dim, - norm_elementwise_affine=norm_elementwise_affine, - norm_eps=norm_eps, - chunk_dim=1, - ) - self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels) - - self.gradient_checkpointing = False - - def _set_gradient_checkpointing(self, module, value=False): - self.gradient_checkpointing = value - - # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedCogVideoXAttnProcessor2_0 - def fuse_qkv_projections(self): - """ - Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) - are fused. For cross-attention modules, key and value projection matrices are fused. - - - - This API is 🧪 experimental. - - - """ - self.original_attn_processors = None - - for _, attn_processor in self.attn_processors.items(): - if "Added" in str(attn_processor.__class__.__name__): - raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") - - self.original_attn_processors = self.attn_processors - - for module in self.modules(): - if isinstance(module, Attention): - module.fuse_projections(fuse=True) - - self.set_attn_processor(FusedCogVideoXAttnProcessor2_0()) - - # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections - def unfuse_qkv_projections(self): - """Disables the fused QKV projection if enabled. - - - - This API is 🧪 experimental. - - - - """ - if self.original_attn_processors is not None: - self.set_attn_processor(self.original_attn_processors) - - def forward( - self, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - timestep: Union[int, float, torch.LongTensor], - timestep_cond: Optional[torch.Tensor] = None, - inpaint_latents: Optional[torch.Tensor] = None, - control_latents: Optional[torch.Tensor] = None, - image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - return_dict: bool = True, - ): - batch_size, num_frames, channels, height, width = hidden_states.shape - - # 1. Time embedding - timesteps = timestep - t_emb = self.time_proj(timesteps) - - # timesteps does not contain any weights and will always return f32 tensors - # but time_embedding might actually be running in fp16. so we need to cast here. - # there might be better ways to encapsulate this. - t_emb = t_emb.to(dtype=hidden_states.dtype) - emb = self.time_embedding(t_emb, timestep_cond) - - # 2. Patch embedding - if inpaint_latents is not None: - hidden_states = torch.concat([hidden_states, inpaint_latents], 2) - if control_latents is not None: - hidden_states = torch.concat([hidden_states, control_latents], 2) - hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) - - # 3. Position embedding - text_seq_length = encoder_hidden_states.shape[1] - if not self.config.use_rotary_positional_embeddings: - seq_length = height * width * num_frames // (self.config.patch_size**2) - # pos_embeds = self.pos_embedding[:, : text_seq_length + seq_length] - pos_embeds = self.pos_embedding - emb_size = hidden_states.size()[-1] - pos_embeds_without_text = pos_embeds[:, text_seq_length: ].view(1, self.post_time_compression_frames, self.post_patch_height, self.post_patch_width, emb_size) - pos_embeds_without_text = pos_embeds_without_text.permute([0, 4, 1, 2, 3]) - pos_embeds_without_text = F.interpolate(pos_embeds_without_text,size=[self.post_time_compression_frames, height // self.config.patch_size, width // self.config.patch_size],mode='trilinear',align_corners=False) - pos_embeds_without_text = pos_embeds_without_text.permute([0, 2, 3, 4, 1]).view(1, -1, emb_size) - pos_embeds = torch.cat([pos_embeds[:, :text_seq_length], pos_embeds_without_text], dim = 1) - pos_embeds = pos_embeds[:, : text_seq_length + seq_length] - hidden_states = hidden_states + pos_embeds - hidden_states = self.embedding_dropout(hidden_states) - - encoder_hidden_states = hidden_states[:, :text_seq_length] - hidden_states = hidden_states[:, text_seq_length:] - - # 4. Transformer blocks - - for i, block in enumerate(self.transformer_blocks): - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} - hidden_states, encoder_hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(block), - hidden_states, - encoder_hidden_states, - emb, - image_rotary_emb, - **ckpt_kwargs, - ) - else: - hidden_states, encoder_hidden_states = block( - hidden_states=hidden_states, - encoder_hidden_states=encoder_hidden_states, - temb=emb, - image_rotary_emb=image_rotary_emb, - timestep=timestep, - ) - - if not self.config.use_rotary_positional_embeddings: - # CogVideoX-2B - hidden_states = self.norm_final(hidden_states) - else: - # CogVideoX-5B - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - hidden_states = self.norm_final(hidden_states) - hidden_states = hidden_states[:, text_seq_length:] - - # 5. Final block - hidden_states = self.norm_out(hidden_states, temb=emb) - hidden_states = self.proj_out(hidden_states) - - # 6. Unpatchify - p = self.config.patch_size - output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, channels, p, p) - output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) - - if not return_dict: - return (output,) - return Transformer2DModelOutput(sample=output) - - @classmethod - def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, transformer_additional_kwargs={}): - if subfolder is not None: - pretrained_model_path = os.path.join(pretrained_model_path, subfolder) - print(f"loaded 3D transformer's pretrained weights from {pretrained_model_path} ...") - - config_file = os.path.join(pretrained_model_path, 'config.json') - if not os.path.isfile(config_file): - raise RuntimeError(f"{config_file} does not exist") - with open(config_file, "r") as f: - config = json.load(f) - - from diffusers.utils import WEIGHTS_NAME - model = cls.from_config(config, **transformer_additional_kwargs) - model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME) - model_file_safetensors = model_file.replace(".bin", ".safetensors") - if os.path.exists(model_file): - state_dict = torch.load(model_file, map_location="cpu") - elif os.path.exists(model_file_safetensors): - from safetensors.torch import load_file, safe_open - state_dict = load_file(model_file_safetensors) - else: - from safetensors.torch import load_file, safe_open - model_files_safetensors = glob.glob(os.path.join(pretrained_model_path, "*.safetensors")) - state_dict = {} - for model_file_safetensors in model_files_safetensors: - _state_dict = load_file(model_file_safetensors) - for key in _state_dict: - state_dict[key] = _state_dict[key] - - if model.state_dict()['patch_embed.proj.weight'].size() != state_dict['patch_embed.proj.weight'].size(): - new_shape = model.state_dict()['patch_embed.proj.weight'].size() - if len(new_shape) == 5: - state_dict['patch_embed.proj.weight'] = state_dict['patch_embed.proj.weight'].unsqueeze(2).expand(new_shape).clone() - state_dict['patch_embed.proj.weight'][:, :, :-1] = 0 - else: - if model.state_dict()['patch_embed.proj.weight'].size()[1] > state_dict['patch_embed.proj.weight'].size()[1]: - model.state_dict()['patch_embed.proj.weight'][:, :state_dict['patch_embed.proj.weight'].size()[1], :, :] = state_dict['patch_embed.proj.weight'] - model.state_dict()['patch_embed.proj.weight'][:, state_dict['patch_embed.proj.weight'].size()[1]:, :, :] = 0 - state_dict['patch_embed.proj.weight'] = model.state_dict()['patch_embed.proj.weight'] - else: - model.state_dict()['patch_embed.proj.weight'][:, :, :, :] = state_dict['patch_embed.proj.weight'][:, :model.state_dict()['patch_embed.proj.weight'].size()[1], :, :] - state_dict['patch_embed.proj.weight'] = model.state_dict()['patch_embed.proj.weight'] - - tmp_state_dict = {} - for key in state_dict: - if key in model.state_dict().keys() and model.state_dict()[key].size() == state_dict[key].size(): - tmp_state_dict[key] = state_dict[key] - else: - print(key, "Size don't match, skip") - state_dict = tmp_state_dict - - m, u = model.load_state_dict(state_dict, strict=False) - print(f"### missing keys: {len(m)}; \n### unexpected keys: {len(u)};") - print(m) - - params = [p.numel() if "mamba" in n else 0 for n, p in model.named_parameters()] - print(f"### Mamba Parameters: {sum(params) / 1e6} M") - - params = [p.numel() if "attn1." in n else 0 for n, p in model.named_parameters()] - print(f"### attn1 Parameters: {sum(params) / 1e6} M") - - return model \ No newline at end of file diff --git a/cogvideox_fun/pipeline_cogvideox_control.py b/cogvideox_fun/pipeline_cogvideox_control.py index 85687fe..f598147 100644 --- a/cogvideox_fun/pipeline_cogvideox_control.py +++ b/cogvideox_fun/pipeline_cogvideox_control.py @@ -33,10 +33,6 @@ from diffusers.video_processor import VideoProcessor from diffusers.image_processor import VaeImageProcessor from einops import rearrange -from ..videosys.core.pipeline import VideoSysPipeline -from ..videosys.cogvideox_transformer_3d import CogVideoXTransformer3DModel as CogVideoXTransformer3DModelPAB -from ..videosys.core.pab_mgr import set_pab_manager - logger = logging.get_logger(__name__) # pylint: disable=invalid-name @@ -158,7 +154,7 @@ class CogVideoX_Fun_PipelineOutput(BaseOutput): videos: torch.Tensor -class CogVideoX_Fun_Pipeline_Control(VideoSysPipeline): +class CogVideoX_Fun_Pipeline_Control(DiffusionPipeline): r""" Pipeline for text-to-video generation using CogVideoX. @@ -188,7 +184,6 @@ class CogVideoX_Fun_Pipeline_Control(VideoSysPipeline): vae: AutoencoderKLCogVideoX, transformer: CogVideoXTransformer3DModel, scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler], - pab_config = None ): super().__init__() @@ -210,9 +205,6 @@ class CogVideoX_Fun_Pipeline_Control(VideoSysPipeline): vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) - if pab_config is not None: - set_pab_manager(pab_config) - def prepare_latents( self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, timesteps, denoise_strength, num_inference_steps, latents=None, freenoise=True, context_size=None, context_overlap=None @@ -348,16 +340,6 @@ class CogVideoX_Fun_Pipeline_Control(VideoSysPipeline): if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs - - def _gaussian_weights(self, t_tile_length, t_batch_size): - from numpy import pi, exp, sqrt - - var = 0.01 - midpoint = (t_tile_length - 1) / 2 # -1 because index goes from 0 to latent_width - 1 - t_probs = [exp(-(t-midpoint)*(t-midpoint)/(t_tile_length*t_tile_length)/(2*var)) / sqrt(2*pi*var) for t in range(t_tile_length)] - weights = torch.tensor(t_probs) - weights = weights.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(4).repeat(1, t_batch_size,1, 1, 1) - return weights # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs def check_inputs( @@ -697,24 +679,15 @@ class CogVideoX_Fun_Pipeline_Control(VideoSysPipeline): # 8. Denoising loop num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) - # 8.5. Temporal tiling prep - if context_schedule is not None and context_schedule == "temporal_tiling": - t_tile_length = context_frames - t_tile_overlap = context_overlap - t_tile_weights = self._gaussian_weights(t_tile_length=t_tile_length, t_batch_size=1).to(latents.device).to(self.vae.dtype) - use_temporal_tiling = True - print("Temporal tiling enabled") - elif context_schedule is not None: + if context_schedule is not None: print(f"Context schedule enabled: {context_frames} frames, {context_stride} stride, {context_overlap} overlap") - use_temporal_tiling = False use_context_schedule = True from .context import get_context_scheduler context = get_context_scheduler(context_schedule) else: - use_temporal_tiling = False use_context_schedule = False - print("Temporal tiling and context schedule disabled") + print(" context schedule disabled") # 7. Create rotary embeds if required image_rotary_emb = ( self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) @@ -735,88 +708,7 @@ class CogVideoX_Fun_Pipeline_Control(VideoSysPipeline): for i, t in enumerate(timesteps): if self.interrupt: continue - - if use_temporal_tiling and isinstance(self.scheduler, CogVideoXDDIMScheduler): - #temporal tiling code based on https://github.com/mayuelala/FollowYourEmoji/blob/main/models/video_pipeline.py - # ===================================================== - grid_ts = 0 - cur_t = 0 - while cur_t < latents.shape[1]: - cur_t = max(grid_ts * t_tile_length - t_tile_overlap * grid_ts, 0) + t_tile_length - grid_ts += 1 - - all_t = latents.shape[1] - latents_all_list = [] - # ===================================================== - - image_rotary_emb = ( - self._prepare_rotary_positional_embeddings(height, width, context_frames, device) - if self.transformer.config.use_rotary_positional_embeddings - else None - ) - - for t_i in range(grid_ts): - if t_i < grid_ts - 1: - ofs_t = max(t_i * t_tile_length - t_tile_overlap * t_i, 0) - if t_i == grid_ts - 1: - ofs_t = all_t - t_tile_length - - input_start_t = ofs_t - input_end_t = ofs_t + t_tile_length - - latents_tile = latents[:, input_start_t:input_end_t,:, :, :] - control_latents_tile = control_latents[:, input_start_t:input_end_t, :, :, :] - - latent_model_input_tile = torch.cat([latents_tile] * 2) if do_classifier_free_guidance else latents_tile - latent_model_input_tile = self.scheduler.scale_model_input(latent_model_input_tile, t) - - #t_input = t[None].to(device) - t_input = t.expand(latent_model_input_tile.shape[0]) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - - # predict noise model_output - noise_pred = self.transformer( - hidden_states=latent_model_input_tile, - encoder_hidden_states=prompt_embeds, - timestep=t_input, - image_rotary_emb=image_rotary_emb, - return_dict=False, - control_latents=control_latents_tile, - )[0] - noise_pred = noise_pred.float() - - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self._guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents_tile = self.scheduler.step(noise_pred, t, latents_tile.to(self.vae.dtype), **extra_step_kwargs, return_dict=False)[0] - latents_all_list.append(latents_tile) - - # ========================================== - latents_all = torch.zeros(latents.shape, device=latents.device, dtype=self.vae.dtype) - contributors = torch.zeros(latents.shape, device=latents.device, dtype=self.vae.dtype) - # Add each tile contribution to overall latents - for t_i in range(grid_ts): - if t_i < grid_ts - 1: - ofs_t = max(t_i * t_tile_length - t_tile_overlap * t_i, 0) - if t_i == grid_ts - 1: - ofs_t = all_t - t_tile_length - - input_start_t = ofs_t - input_end_t = ofs_t + t_tile_length - - latents_all[:, input_start_t:input_end_t,:, :, :] += latents_all_list[t_i] * t_tile_weights - contributors[:, input_start_t:input_end_t,:, :, :] += t_tile_weights - - latents_all /= contributors - - latents = latents_all - - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - pbar.update(1) - # ========================================== - elif use_context_schedule: + if use_context_schedule: latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) diff --git a/cogvideox_fun/pipeline_cogvideox_inpaint.py b/cogvideox_fun/pipeline_cogvideox_inpaint.py index 4b3b4f3..7b9d8e7 100644 --- a/cogvideox_fun/pipeline_cogvideox_inpaint.py +++ b/cogvideox_fun/pipeline_cogvideox_inpaint.py @@ -33,11 +33,6 @@ from diffusers.video_processor import VideoProcessor from diffusers.image_processor import VaeImageProcessor from einops import rearrange -from ..videosys.core.pipeline import VideoSysPipeline -from ..videosys.cogvideox_transformer_3d import CogVideoXTransformer3DModel as CogVideoXTransformer3DModelPAB -from ..videosys.core.pab_mgr import set_pab_manager - - logger = logging.get_logger(__name__) # pylint: disable=invalid-name @@ -206,7 +201,7 @@ class CogVideoX_Fun_PipelineOutput(BaseOutput): videos: torch.Tensor -class CogVideoX_Fun_Pipeline_Inpaint(VideoSysPipeline): +class CogVideoX_Fun_Pipeline_Inpaint(DiffusionPipeline): r""" Pipeline for text-to-video generation using CogVideoX. @@ -236,7 +231,6 @@ class CogVideoX_Fun_Pipeline_Inpaint(VideoSysPipeline): vae: AutoencoderKLCogVideoX, transformer: CogVideoXTransformer3DModel, scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler], - pab_config = None ): super().__init__() @@ -258,9 +252,6 @@ class CogVideoX_Fun_Pipeline_Inpaint(VideoSysPipeline): vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True ) - if pab_config is not None: - set_pab_manager(pab_config) - def prepare_latents( self, batch_size, @@ -433,16 +424,6 @@ class CogVideoX_Fun_Pipeline_Inpaint(VideoSysPipeline): extra_step_kwargs["generator"] = generator return extra_step_kwargs - def _gaussian_weights(self, t_tile_length, t_batch_size): - from numpy import pi, exp, sqrt - - var = 0.01 - midpoint = (t_tile_length - 1) / 2 # -1 because index goes from 0 to latent_width - 1 - t_probs = [exp(-(t-midpoint)*(t-midpoint)/(t_tile_length*t_tile_length)/(2*var)) / sqrt(2*pi*var) for t in range(t_tile_length)] - weights = torch.tensor(t_probs) - weights = weights.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(4).repeat(1, t_batch_size,1, 1, 1) - return weights - # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs def check_inputs( self, @@ -866,22 +847,14 @@ class CogVideoX_Fun_Pipeline_Inpaint(VideoSysPipeline): extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) # 7. Create rotary embeds if required - if context_schedule is not None and context_schedule == "temporal_tiling": - t_tile_length = context_frames - t_tile_overlap = context_overlap - t_tile_weights = self._gaussian_weights(t_tile_length=t_tile_length, t_batch_size=1).to(latents.device).to(self.vae.dtype) - use_temporal_tiling = True - print("Temporal tiling enabled") - elif context_schedule is not None: + if context_schedule is not None: print(f"Context schedule enabled: {context_frames} frames, {context_stride} stride, {context_overlap} overlap") - use_temporal_tiling = False use_context_schedule = True from .context import get_context_scheduler context = get_context_scheduler(context_schedule) else: - use_temporal_tiling = False use_context_schedule = False - print("Temporal tiling and context schedule disabled") + print("context schedule disabled") # 7. Create rotary embeds if required image_rotary_emb = ( self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) @@ -915,87 +888,7 @@ class CogVideoX_Fun_Pipeline_Inpaint(VideoSysPipeline): if self.interrupt: continue - if use_temporal_tiling and isinstance(self.scheduler, CogVideoXDDIMScheduler): - #temporal tiling code based on https://github.com/mayuelala/FollowYourEmoji/blob/main/models/video_pipeline.py - # ===================================================== - grid_ts = 0 - cur_t = 0 - while cur_t < latents.shape[1]: - cur_t = max(grid_ts * t_tile_length - t_tile_overlap * grid_ts, 0) + t_tile_length - grid_ts += 1 - - all_t = latents.shape[1] - latents_all_list = [] - # ===================================================== - - image_rotary_emb = ( - self._prepare_rotary_positional_embeddings(height, width, t_tile_length, device) - if self.transformer.config.use_rotary_positional_embeddings - else None - ) - - for t_i in range(grid_ts): - if t_i < grid_ts - 1: - ofs_t = max(t_i * t_tile_length - t_tile_overlap * t_i, 0) - if t_i == grid_ts - 1: - ofs_t = all_t - t_tile_length - - input_start_t = ofs_t - input_end_t = ofs_t + t_tile_length - - latents_tile = latents[:, input_start_t:input_end_t,:, :, :] - inpaint_latents_tile = inpaint_latents[:, input_start_t:input_end_t, :, :, :] - - latent_model_input_tile = torch.cat([latents_tile] * 2) if do_classifier_free_guidance else latents_tile - latent_model_input_tile = self.scheduler.scale_model_input(latent_model_input_tile, t) - - #t_input = t[None].to(device) - t_input = t.expand(latent_model_input_tile.shape[0]) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - - # predict noise model_output - noise_pred = self.transformer( - hidden_states=latent_model_input_tile, - encoder_hidden_states=prompt_embeds, - timestep=t_input, - image_rotary_emb=image_rotary_emb, - return_dict=False, - inpaint_latents=inpaint_latents_tile, - )[0] - noise_pred = noise_pred.float() - - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self._guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents_tile = self.scheduler.step(noise_pred, t, latents_tile.to(self.vae.dtype), **extra_step_kwargs, return_dict=False)[0] - latents_all_list.append(latents_tile) - - # ========================================== - latents_all = torch.zeros(latents.shape, device=latents.device, dtype=self.vae.dtype) - contributors = torch.zeros(latents.shape, device=latents.device, dtype=self.vae.dtype) - # Add each tile contribution to overall latents - for t_i in range(grid_ts): - if t_i < grid_ts - 1: - ofs_t = max(t_i * t_tile_length - t_tile_overlap * t_i, 0) - if t_i == grid_ts - 1: - ofs_t = all_t - t_tile_length - - input_start_t = ofs_t - input_end_t = ofs_t + t_tile_length - - latents_all[:, input_start_t:input_end_t,:, :, :] += latents_all_list[t_i] * t_tile_weights - contributors[:, input_start_t:input_end_t,:, :, :] += t_tile_weights - - latents_all /= contributors - - latents = latents_all - - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - pbar.update(1) - # ========================================== - elif use_context_schedule: + if use_context_schedule: latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) @@ -1133,18 +1026,7 @@ class CogVideoX_Fun_Pipeline_Inpaint(VideoSysPipeline): else: pbar.update(1) - # if output_type == "numpy": - # video = self.decode_latents(latents) - # elif not output_type == "latent": - # video = self.decode_latents(latents) - # video = self.video_processor.postprocess_video(video=video, output_type=output_type) - # else: - # video = latents - # Offload all models self.maybe_free_model_hooks() - # if not return_dict: - # video = torch.from_numpy(video) - return latents \ No newline at end of file diff --git a/model_loading.py b/model_loading.py index fe9245b..532d6aa 100644 --- a/model_loading.py +++ b/model_loading.py @@ -12,15 +12,12 @@ from .pipeline_cogvideox import CogVideoXPipeline from contextlib import nullcontext from .cogvideox_fun.transformer_3d import CogVideoXTransformer3DModel as CogVideoXTransformer3DModelFun -from .cogvideox_fun.fun_pab_transformer_3d import CogVideoXTransformer3DModel as CogVideoXTransformer3DModelFunPAB from .cogvideox_fun.autoencoder_magvit import AutoencoderKLCogVideoX as AutoencoderKLCogVideoXFun from .cogvideox_fun.pipeline_cogvideox_inpaint import CogVideoX_Fun_Pipeline_Inpaint from .cogvideox_fun.pipeline_cogvideox_control import CogVideoX_Fun_Pipeline_Control -from .videosys.cogvideox_transformer_3d import CogVideoXTransformer3DModel as CogVideoXTransformer3DModelPAB - -from .utils import check_diffusers_version, remove_specific_blocks, log +from .utils import remove_specific_blocks, log from comfy.utils import load_torch_file script_directory = os.path.dirname(os.path.abspath(__file__)) @@ -95,7 +92,6 @@ class DownloadAndLoadCogVideoModel: "fp8_transformer": (['disabled', 'enabled', 'fastmode'], {"default": 'disabled', "tooltip": "enabled casts the transformer to torch.float8_e4m3fn, fastmode is only for latest nvidia GPUs and requires torch 2.4.0 and cu124 minimum"}), "compile": (["disabled","onediff","torch"], {"tooltip": "compile the model for faster inference, these are advanced options only available on Linux, see readme for more info"}), "enable_sequential_cpu_offload": ("BOOLEAN", {"default": False, "tooltip": "significantly reducing memory usage and slows down the inference"}), - "pab_config": ("PAB_CONFIG", {"default": None}), "block_edit": ("TRANSFORMERBLOCKS", {"default": None}), "lora": ("COGLORA", {"default": None}), "compile_args":("COMPILEARGS", ), @@ -111,7 +107,7 @@ class DownloadAndLoadCogVideoModel: DESCRIPTION = "Downloads and loads the selected CogVideo model from Huggingface to 'ComfyUI/models/CogVideo'" def loadmodel(self, model, precision, fp8_transformer="disabled", compile="disabled", - enable_sequential_cpu_offload=False, pab_config=None, block_edit=None, lora=None, compile_args=None, + enable_sequential_cpu_offload=False, block_edit=None, lora=None, compile_args=None, attention_mode="sdpa", load_device="main_device"): if precision == "fp16" and "1.5" in model: @@ -188,15 +184,9 @@ class DownloadAndLoadCogVideoModel: # transformer if "Fun" in model: - if pab_config is not None: - transformer = CogVideoXTransformer3DModelFunPAB.from_pretrained(base_path, subfolder=subfolder) - else: - transformer = CogVideoXTransformer3DModelFun.from_pretrained(base_path, subfolder=subfolder) + transformer = CogVideoXTransformer3DModelFun.from_pretrained(base_path, subfolder=subfolder) else: - if pab_config is not None: - transformer = CogVideoXTransformer3DModelPAB.from_pretrained(base_path, subfolder=subfolder) - else: - transformer = CogVideoXTransformer3DModel.from_pretrained(base_path, subfolder=subfolder) + transformer = CogVideoXTransformer3DModel.from_pretrained(base_path, subfolder=subfolder) transformer = transformer.to(dtype).to(transformer_load_device) @@ -213,12 +203,12 @@ class DownloadAndLoadCogVideoModel: if "Fun" in model: vae = AutoencoderKLCogVideoXFun.from_pretrained(base_path, subfolder="vae").to(dtype).to(offload_device) if "Pose" in model: - pipe = CogVideoX_Fun_Pipeline_Control(vae, transformer, scheduler, pab_config=pab_config) + pipe = CogVideoX_Fun_Pipeline_Control(vae, transformer, scheduler) else: - pipe = CogVideoX_Fun_Pipeline_Inpaint(vae, transformer, scheduler, pab_config=pab_config) + pipe = CogVideoX_Fun_Pipeline_Inpaint(vae, transformer, scheduler) else: vae = AutoencoderKLCogVideoX.from_pretrained(base_path, subfolder="vae").to(dtype).to(offload_device) - pipe = CogVideoXPipeline(vae, transformer, scheduler, pab_config=pab_config) + pipe = CogVideoXPipeline(vae, transformer, scheduler) if "cogvideox-2b-img2vid" in model: pipe.input_with_padding = False @@ -296,7 +286,7 @@ class DownloadAndLoadCogVideoModel: backend="nexfort", options= {"mode": "max-optimize:max-autotune:max-autotune", "memory_format": "channels_last", "options": {"inductor.optimize_linear_epilogue": False, "triton.fuse_attention_allow_fp16_reduction": False}}, ignores=["vae"], - fuse_qkv_projections=True if pab_config is None else False, + fuse_qkv_projections= False, ) pipeline = { @@ -334,7 +324,6 @@ class DownloadAndLoadCogVideoGGUFModel: "enable_sequential_cpu_offload": ("BOOLEAN", {"default": False, "tooltip": "significantly reducing memory usage and slows down the inference"}), }, "optional": { - "pab_config": ("PAB_CONFIG", {"default": None}), "block_edit": ("TRANSFORMERBLOCKS", {"default": None}), #"lora": ("COGLORA", {"default": None}), "compile": (["disabled","torch"], {"tooltip": "compile the model for faster inference, these are advanced options only available on Linux, see readme for more info"}), @@ -348,7 +337,7 @@ class DownloadAndLoadCogVideoGGUFModel: CATEGORY = "CogVideoWrapper" def loadmodel(self, model, vae_precision, fp8_fastmode, load_device, enable_sequential_cpu_offload, - pab_config=None, block_edit=None, compile="disabled", attention_mode="sdpa"): + block_edit=None, compile="disabled", attention_mode="sdpa"): device = mm.get_torch_device() offload_device = mm.unet_offload_device() @@ -396,10 +385,7 @@ class DownloadAndLoadCogVideoGGUFModel: transformer_config["in_channels"] = 32 else: transformer_config["in_channels"] = 33 - if pab_config is not None: - transformer = CogVideoXTransformer3DModelFunPAB.from_config(transformer_config) - else: - transformer = CogVideoXTransformer3DModelFun.from_config(transformer_config) + transformer = CogVideoXTransformer3DModelFun.from_config(transformer_config) elif "I2V" in model or "Interpolation" in model: transformer_config["in_channels"] = 32 if "1_5" in model: @@ -409,16 +395,10 @@ class DownloadAndLoadCogVideoGGUFModel: transformer_config["patch_bias"] = False transformer_config["sample_height"] = 96 transformer_config["sample_width"] = 170 - if pab_config is not None: - transformer = CogVideoXTransformer3DModelPAB.from_config(transformer_config) - else: - transformer = CogVideoXTransformer3DModel.from_config(transformer_config) + transformer = CogVideoXTransformer3DModel.from_config(transformer_config) else: transformer_config["in_channels"] = 16 - if pab_config is not None: - transformer = CogVideoXTransformer3DModelPAB.from_config(transformer_config) - else: - transformer = CogVideoXTransformer3DModel.from_config(transformer_config) + transformer = CogVideoXTransformer3DModel.from_config(transformer_config) params_to_keep = {"patch_embed", "pos_embedding", "time_embedding"} if "2b" in model: @@ -476,13 +456,13 @@ class DownloadAndLoadCogVideoGGUFModel: vae = AutoencoderKLCogVideoXFun.from_config(vae_config).to(vae_dtype).to(offload_device) vae.load_state_dict(vae_sd) if "Pose" in model: - pipe = CogVideoX_Fun_Pipeline_Control(vae, transformer, scheduler, pab_config=pab_config) + pipe = CogVideoX_Fun_Pipeline_Control(vae, transformer, scheduler) else: - pipe = CogVideoX_Fun_Pipeline_Inpaint(vae, transformer, scheduler, pab_config=pab_config) + pipe = CogVideoX_Fun_Pipeline_Inpaint(vae, transformer, scheduler) else: vae = AutoencoderKLCogVideoX.from_config(vae_config).to(vae_dtype).to(offload_device) vae.load_state_dict(vae_sd) - pipe = CogVideoXPipeline(vae, transformer, scheduler, pab_config=pab_config) + pipe = CogVideoXPipeline(vae, transformer, scheduler) if enable_sequential_cpu_offload: pipe.enable_sequential_cpu_offload() diff --git a/nodes.py b/nodes.py index ecea9db..cd90f33 100644 --- a/nodes.py +++ b/nodes.py @@ -44,8 +44,6 @@ from PIL import Image import numpy as np import json - - script_directory = os.path.dirname(os.path.abspath(__file__)) if not "CogVideo" in folder_paths.folder_names_and_paths: @@ -53,61 +51,11 @@ if not "CogVideo" in folder_paths.folder_names_and_paths: if not "cogvideox_loras" in folder_paths.folder_names_and_paths: folder_paths.add_model_folder_path("cogvideox_loras", os.path.join(folder_paths.models_dir, "CogVideo", "loras")) -#PAB -from .videosys.pab import CogVideoXPABConfig - -class CogVideoPABConfig: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "spatial_broadcast": ("BOOLEAN", {"default": True, "tooltip": "Enable Spatial PAB, highest impact"}), - "spatial_threshold_start": ("INT", {"default": 850, "min": 0, "max": 1000, "tooltip": "PAB Start Timestep"} ), - "spatial_threshold_end": ("INT", {"default": 100, "min": 0, "max": 1000, "tooltip": "PAB End Timestep"} ), - "spatial_range": ("INT", {"default": 2, "min": 0, "max": 10, "tooltip": "Broadcast timesteps range, higher values are faster but quality may suffer"} ), - "temporal_broadcast": ("BOOLEAN", {"default": False, "tooltip": "Enable Temporal PAB, medium impact"}), - "temporal_threshold_start": ("INT", {"default": 850, "min": 0, "max": 1000, "tooltip": "PAB Start Timestep"} ), - "temporal_threshold_end": ("INT", {"default": 100, "min": 0, "max": 1000, "tooltip": "PAB End Timestep"} ), - "temporal_range": ("INT", {"default": 4, "min": 0, "max": 10, "tooltip": "Broadcast timesteps range, higher values are faster but quality may suffer"} ), - "cross_broadcast": ("BOOLEAN", {"default": False, "tooltip": "Enable Cross Attention PAB, low impact"}), - "cross_threshold_start": ("INT", {"default": 850, "min": 0, "max": 1000, "tooltip": "PAB Start Timestep"} ), - "cross_threshold_end": ("INT", {"default": 100, "min": 0, "max": 1000, "tooltip": "PAB End Timestep"} ), - "cross_range": ("INT", {"default": 6, "min": 0, "max": 10, "tooltip": "Broadcast timesteps range, higher values are faster but quality may suffer"} ), - - "steps": ("INT", {"default": 50, "min": 0, "max": 1000, "tooltip": "Should match the sampling steps"} ), - } - } - - RETURN_TYPES = ("PAB_CONFIG",) - RETURN_NAMES = ("pab_config", ) - FUNCTION = "config" - CATEGORY = "CogVideoWrapper" - DESCRIPTION = "EXPERIMENTAL:Pyramid Attention Broadcast (PAB) speeds up inference by mitigating redundant attention computation. Increases memory use" - - def config(self, spatial_broadcast, spatial_threshold_start, spatial_threshold_end, spatial_range, - temporal_broadcast, temporal_threshold_start, temporal_threshold_end, temporal_range, - cross_broadcast, cross_threshold_start, cross_threshold_end, cross_range, steps): - - os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" - pab_config = CogVideoXPABConfig( - steps=steps, - spatial_broadcast=spatial_broadcast, - spatial_threshold=[spatial_threshold_end, spatial_threshold_start], - spatial_range=spatial_range, - temporal_broadcast=temporal_broadcast, - temporal_threshold=[temporal_threshold_end, temporal_threshold_start], - temporal_range=temporal_range, - cross_broadcast=cross_broadcast, - cross_threshold=[cross_threshold_end, cross_threshold_start], - cross_range=cross_range - ) - - return (pab_config, ) - class CogVideoContextOptions: @classmethod def INPUT_TYPES(s): return {"required": { - "context_schedule": (["uniform_standard", "uniform_looped", "static_standard", "temporal_tiling"],), + "context_schedule": (["uniform_standard", "uniform_looped", "static_standard"],), "context_frames": ("INT", {"default": 48, "min": 2, "max": 100, "step": 1, "tooltip": "Number of pixel frames in the context, NOTE: the latent space has 4 frames in 1"} ), "context_stride": ("INT", {"default": 4, "min": 4, "max": 100, "step": 1, "tooltip": "Context stride as pixel frames, NOTE: the latent space has 4 frames in 1"} ), "context_overlap": ("INT", {"default": 4, "min": 4, "max": 100, "step": 1, "tooltip": "Context overlap as pixel frames, NOTE: the latent space has 4 frames in 1"} ), @@ -1152,9 +1100,6 @@ class CogVideoXFunSampler: end_img = [to_pil(_end_img) for _end_img in end_img] if end_img is not None else None # Load Sampler - if context_options is not None and context_options["context_schedule"] == "temporal_tiling": - log.info("Temporal tiling enabled, changing scheduler to CogVideoXDDIM") - scheduler="CogVideoXDDIM" scheduler_config = pipeline["scheduler_config"] if scheduler in scheduler_mapping: noise_scheduler = scheduler_mapping[scheduler].from_config(scheduler_config) @@ -1282,7 +1227,7 @@ class CogVideoXFunControlSampler: CATEGORY = "CogVideoWrapper" def process(self, pipeline, positive, negative, seed, steps, cfg, scheduler, control_latents, - control_strength=1.0, control_start_percent=0.0, control_end_percent=1.0, t_tile_length=16, t_tile_overlap=8, + control_strength=1.0, control_start_percent=0.0, control_end_percent=1.0, samples=None, denoise_strength=1.0, context_options=None): device = mm.get_torch_device() offload_device = mm.unet_offload_device() @@ -1306,9 +1251,6 @@ class CogVideoXFunControlSampler: # Load Sampler scheduler_config = pipeline["scheduler_config"] - if context_options is not None and context_options["context_schedule"] == "temporal_tiling": - log.info("Temporal tiling enabled, changing scheduler to CogVideoXDDIM") - scheduler="CogVideoXDDIM" if scheduler in scheduler_mapping: noise_scheduler = scheduler_mapping[scheduler].from_config(scheduler_config) pipe.scheduler = noise_scheduler @@ -1427,7 +1369,6 @@ NODE_CLASS_MAPPINGS = { "CogVideoXFunVid2VidSampler": CogVideoXFunVid2VidSampler, "CogVideoXFunControlSampler": CogVideoXFunControlSampler, "CogVideoTextEncodeCombine": CogVideoTextEncodeCombine, - "CogVideoPABConfig": CogVideoPABConfig, "CogVideoTransformerEdit": CogVideoTransformerEdit, "CogVideoControlImageEncode": CogVideoControlImageEncode, "CogVideoContextOptions": CogVideoContextOptions, @@ -1450,7 +1391,6 @@ NODE_DISPLAY_NAME_MAPPINGS = { "CogVideoXFunVid2VidSampler": "CogVideoXFun Vid2Vid Sampler", "CogVideoXFunControlSampler": "CogVideoXFun Control Sampler", "CogVideoTextEncodeCombine": "CogVideo TextEncode Combine", - "CogVideoPABConfig": "CogVideo PABConfig", "CogVideoTransformerEdit": "CogVideo TransformerEdit", "CogVideoControlImageEncode": "CogVideo Control ImageEncode", "CogVideoContextOptions": "CogVideo Context Options", diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 87d19e9..09e9103 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -20,8 +20,8 @@ import torch import torch.nn.functional as F import math -from diffusers.models import AutoencoderKLCogVideoX#, CogVideoXTransformer3DModel -#from diffusers.pipelines.pipeline_utils import DiffusionPipeline +from diffusers.models import AutoencoderKLCogVideoX +from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor @@ -35,10 +35,6 @@ from comfy.utils import ProgressBar logger = logging.get_logger(__name__) # pylint: disable=invalid-name -from .videosys.core.pipeline import VideoSysPipeline -from .videosys.cogvideox_transformer_3d import CogVideoXTransformer3DModel as CogVideoXTransformer3DModelPAB -from .videosys.core.pab_mgr import set_pab_manager - def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): tw = tgt_width th = tgt_height @@ -115,7 +111,7 @@ def retrieve_timesteps( timesteps = scheduler.timesteps return timesteps, num_inference_steps -class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): +class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): r""" Pipeline for text-to-video generation using CogVideoX. @@ -144,10 +140,9 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): def __init__( self, vae: AutoencoderKLCogVideoX, - transformer: Union[CogVideoXTransformer3DModel, CogVideoXTransformer3DModelPAB], + transformer: CogVideoXTransformer3DModel, scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler], original_mask = None, - pab_config = None ): super().__init__() @@ -164,9 +159,6 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) self.video_processor.config.do_resize = False - if pab_config is not None: - set_pab_manager(pab_config) - self.input_with_padding = True @@ -289,29 +281,6 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): self.scheduler.set_begin_index(t_start * self.scheduler.order) return timesteps.to(device), num_inference_steps - t_start - - def _gaussian_weights(self, t_tile_length, t_batch_size): - from numpy import pi, exp, sqrt - - var = 0.01 - midpoint = (t_tile_length - 1) / 2 # -1 because index goes from 0 to latent_width - 1 - t_probs = [exp(-(t-midpoint)*(t-midpoint)/(t_tile_length*t_tile_length)/(2*var)) / sqrt(2*pi*var) for t in range(t_tile_length)] - weights = torch.tensor(t_probs) - weights = weights.unsqueeze(0).unsqueeze(2).unsqueeze(3).unsqueeze(4).repeat(1, t_batch_size,1, 1, 1) - return weights - - # def fuse_qkv_projections(self) -> None: - # r"""Enables fused QKV projections.""" - # self.fusing_transformer = True - # self.transformer.fuse_qkv_projections() - - # def unfuse_qkv_projections(self) -> None: - # r"""Disable QKV projection fusion if enabled.""" - # if not self.fusing_transformer: - # logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.") - # else: - # self.transformer.unfuse_qkv_projections() - # self.fusing_transformer = False def _prepare_rotary_positional_embeddings( self, @@ -365,8 +334,6 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): height: int = 480, width: int = 720, num_frames: int = 48, - t_tile_length: int = 12, - t_tile_overlap: int = 4, num_inference_steps: int = 50, timesteps: Optional[List[int]] = None, guidance_scale: float = 6, @@ -487,9 +454,6 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): num_frames += self.additional_frames * self.vae_scale_factor_temporal - #if latents is None and num_frames == t_tile_length: - # num_frames += 1 - if self.original_mask is not None: image_latents = latents original_image_latents = image_latents @@ -569,23 +533,16 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) # 7. context schedule and temporal tiling - if context_schedule is not None and context_schedule == "temporal_tiling": - t_tile_length = context_frames - t_tile_overlap = context_overlap - t_tile_weights = self._gaussian_weights(t_tile_length=t_tile_length, t_batch_size=1).to(latents.device).to(self.vae.dtype) - use_temporal_tiling = True - logger.info("Temporal tiling enabled") - elif context_schedule is not None: + if context_schedule is not None: if image_cond_latents is not None: raise NotImplementedError("Context schedule not currently supported with image conditioning") logger.info(f"Context schedule enabled: {context_frames} frames, {context_stride} stride, {context_overlap} overlap") - use_temporal_tiling = False use_context_schedule = True from .cogvideox_fun.context import get_context_scheduler context = get_context_scheduler(context_schedule) + #todo ofs embeds? else: - use_temporal_tiling = False use_context_schedule = False logger.info("Temporal tiling and context schedule disabled") # 7.5. Create rotary embeds if required @@ -647,100 +604,8 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): for i, t in enumerate(timesteps): if self.interrupt: continue - if use_temporal_tiling and isinstance(self.scheduler, CogVideoXDDIMScheduler): - #temporal tiling code based on https://github.com/mayuelala/FollowYourEmoji/blob/main/models/video_pipeline.py - # ===================================================== - grid_ts = 0 - cur_t = 0 - while cur_t < latents.shape[1]: - cur_t = max(grid_ts * t_tile_length - t_tile_overlap * grid_ts, 0) + t_tile_length - grid_ts += 1 - - all_t = latents.shape[1] - latents_all_list = [] - # ===================================================== - - for t_i in range(grid_ts): - if t_i < grid_ts - 1: - ofs_t = max(t_i * t_tile_length - t_tile_overlap * t_i, 0) - if t_i == grid_ts - 1: - ofs_t = all_t - t_tile_length - - input_start_t = ofs_t - input_end_t = ofs_t + t_tile_length - - #latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents - #latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - image_rotary_emb = ( - self._prepare_rotary_positional_embeddings(height, width, t_tile_length, device) - if self.transformer.config.use_rotary_positional_embeddings - else None - ) - - latents_tile = latents[:, input_start_t:input_end_t,:, :, :] - latent_model_input_tile = torch.cat([latents_tile] * 2) if do_classifier_free_guidance else latents_tile - latent_model_input_tile = self.scheduler.scale_model_input(latent_model_input_tile, t) - - #t_input = t[None].to(device) - t_input = t.expand(latent_model_input_tile.shape[0]) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - - # predict noise model_output - noise_pred = self.transformer( - hidden_states=latent_model_input_tile, - encoder_hidden_states=prompt_embeds, - timestep=t_input, - image_rotary_emb=image_rotary_emb, - return_dict=False, - )[0] - noise_pred = noise_pred.float() - - if self.do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self._guidance_scale[i] * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents_tile = self.scheduler.step(noise_pred, t, latents_tile.to(self.vae.dtype), **extra_step_kwargs, return_dict=False)[0] - latents_all_list.append(latents_tile) - - # ========================================== - latents_all = torch.zeros(latents.shape, device=latents.device, dtype=self.vae.dtype) - contributors = torch.zeros(latents.shape, device=latents.device, dtype=self.vae.dtype) - # Add each tile contribution to overall latents - for t_i in range(grid_ts): - if t_i < grid_ts - 1: - ofs_t = max(t_i * t_tile_length - t_tile_overlap * t_i, 0) - if t_i == grid_ts - 1: - ofs_t = all_t - t_tile_length - - input_start_t = ofs_t - input_end_t = ofs_t + t_tile_length - - latents_all[:, input_start_t:input_end_t,:, :, :] += latents_all_list[t_i] * t_tile_weights - contributors[:, input_start_t:input_end_t,:, :, :] += t_tile_weights - - latents_all /= contributors - - latents = latents_all - #print("latents",latents.shape) - # start diff diff - if i < len(timesteps) - 1 and self.original_mask is not None: - noise_timestep = timesteps[i + 1] - image_latent = self.scheduler.add_noise(original_image_latents, noise, torch.tensor([noise_timestep]) - ) - mask = mask.to(latents) - ts_from = timesteps[0] - ts_to = timesteps[-1] - threshold = (t - ts_to) / (ts_from - ts_to) - mask = torch.where(mask >= threshold, mask, torch.zeros_like(mask)) - latents = image_latent * mask + latents * (1 - mask) - # end diff diff - - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - comfy_pbar.update(1) - # ========================================== - elif use_context_schedule: + # region context schedule sampling + if use_context_schedule: latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) counter = torch.zeros_like(latent_model_input) @@ -858,7 +723,8 @@ class CogVideoXPipeline(VideoSysPipeline, CogVideoXLoraLoaderMixin): if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() comfy_pbar.update(1) - + + # region sampling else: latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) diff --git a/videosys/cogvideox_transformer_3d.py b/videosys/cogvideox_transformer_3d.py deleted file mode 100644 index 26550a2..0000000 --- a/videosys/cogvideox_transformer_3d.py +++ /dev/null @@ -1,621 +0,0 @@ -# Adapted from CogVideo - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# -------------------------------------------------------- -# References: -# CogVideo: https://github.com/THUDM/CogVideo -# diffusers: https://github.com/huggingface/diffusers -# -------------------------------------------------------- - -from typing import Any, Dict, Optional, Tuple, Union -from einops import rearrange -import torch -import torch.nn.functional as F -from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers.models.attention import Attention, FeedForward -from diffusers.models.embeddings import TimestepEmbedding, Timesteps, get_3d_sincos_pos_embed, CogVideoXPatchEmbed -from diffusers.models.modeling_outputs import Transformer2DModelOutput -from diffusers.models.modeling_utils import ModelMixin -from diffusers.utils import is_torch_version -from diffusers.utils.torch_utils import maybe_allow_in_graph -from torch import nn - -from .core.pab_mgr import enable_pab, if_broadcast_spatial -from .modules.embeddings import apply_rotary_emb - -#from .modules.embeddings import CogVideoXPatchEmbed - -from .modules.normalization import AdaLayerNorm, CogVideoXLayerNormZero -try: - from sageattention import sageattn - SAGEATTN_IS_AVAVILABLE = True -except: - SAGEATTN_IS_AVAVILABLE = False - -class CogVideoXAttnProcessor2_0: - r""" - Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on - query and key vectors, but does not include spatial normalization. - """ - - def __init__(self): - if not hasattr(F, "scaled_dot_product_attention"): - raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") - @torch.compiler.disable() - def __call__( - self, - attn: Attention, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - image_rotary_emb: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - text_seq_length = encoder_hidden_states.size(1) - - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - - batch_size, sequence_length, _ = ( - hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape - ) - - if attention_mask is not None: - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) - attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) - - query = attn.to_q(hidden_states) - key = attn.to_k(hidden_states) - value = attn.to_v(hidden_states) - - attn_heads = attn.heads - - inner_dim = key.shape[-1] - head_dim = inner_dim // attn_heads - - query = query.view(batch_size, -1, attn_heads, head_dim).transpose(1, 2) - key = key.view(batch_size, -1, attn_heads, head_dim).transpose(1, 2) - value = value.view(batch_size, -1, attn_heads, head_dim).transpose(1, 2) - - if attn.norm_q is not None: - query = attn.norm_q(query) - if attn.norm_k is not None: - key = attn.norm_k(key) - - # Apply RoPE if needed - if image_rotary_emb is not None: - emb_len = image_rotary_emb[0].shape[0] - query[:, :, text_seq_length : emb_len + text_seq_length] = apply_rotary_emb( - query[:, :, text_seq_length : emb_len + text_seq_length], image_rotary_emb - ) - if not attn.is_cross_attention: - key[:, :, text_seq_length : emb_len + text_seq_length] = apply_rotary_emb( - key[:, :, text_seq_length : emb_len + text_seq_length], image_rotary_emb - ) - - if SAGEATTN_IS_AVAVILABLE: - hidden_states = sageattn(query, key, value, is_causal=False) - else: - hidden_states = F.scaled_dot_product_attention( - query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False - ) - - hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn_heads * head_dim) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - encoder_hidden_states, hidden_states = hidden_states.split( - [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 - ) - return hidden_states, encoder_hidden_states - - -class FusedCogVideoXAttnProcessor2_0: - r""" - Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on - query and key vectors, but does not include spatial normalization. - """ - - def __init__(self): - if not hasattr(F, "scaled_dot_product_attention"): - raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") - @torch.compiler.disable() - def __call__( - self, - attn: Attention, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - image_rotary_emb: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - text_seq_length = encoder_hidden_states.size(1) - - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - - batch_size, sequence_length, _ = ( - hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape - ) - - if attention_mask is not None: - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) - attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) - - qkv = attn.to_qkv(hidden_states) - split_size = qkv.shape[-1] // 3 - query, key, value = torch.split(qkv, split_size, dim=-1) - - inner_dim = key.shape[-1] - head_dim = inner_dim // attn.heads - - query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - - if attn.norm_q is not None: - query = attn.norm_q(query) - if attn.norm_k is not None: - key = attn.norm_k(key) - - # Apply RoPE if needed - if image_rotary_emb is not None: - query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) - if not attn.is_cross_attention: - key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - - if SAGEATTN_IS_AVAVILABLE: - hidden_states = sageattn(query, key, value, is_causal=False) - else: - hidden_states = F.scaled_dot_product_attention( - query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False - ) - - hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - encoder_hidden_states, hidden_states = hidden_states.split( - [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 - ) - return hidden_states, encoder_hidden_states - - -@maybe_allow_in_graph -class CogVideoXBlock(nn.Module): - r""" - Transformer block used in [CogVideoX](https://github.com/THUDM/CogVideo) model. - - Parameters: - dim (`int`): - The number of channels in the input and output. - num_attention_heads (`int`): - The number of heads to use for multi-head attention. - attention_head_dim (`int`): - The number of channels in each head. - time_embed_dim (`int`): - The number of channels in timestep embedding. - dropout (`float`, defaults to `0.0`): - The dropout probability to use. - activation_fn (`str`, defaults to `"gelu-approximate"`): - Activation function to be used in feed-forward. - attention_bias (`bool`, defaults to `False`): - Whether or not to use bias in attention projection layers. - qk_norm (`bool`, defaults to `True`): - Whether or not to use normalization after query and key projections in Attention. - norm_elementwise_affine (`bool`, defaults to `True`): - Whether to use learnable elementwise affine parameters for normalization. - norm_eps (`float`, defaults to `1e-5`): - Epsilon value for normalization layers. - final_dropout (`bool` defaults to `False`): - Whether to apply a final dropout after the last feed-forward layer. - ff_inner_dim (`int`, *optional*, defaults to `None`): - Custom hidden dimension of Feed-forward layer. If not provided, `4 * dim` is used. - ff_bias (`bool`, defaults to `True`): - Whether or not to use bias in Feed-forward layer. - attention_out_bias (`bool`, defaults to `True`): - Whether or not to use bias in Attention output projection layer. - """ - - def __init__( - self, - dim: int, - num_attention_heads: int, - attention_head_dim: int, - time_embed_dim: int, - dropout: float = 0.0, - activation_fn: str = "gelu-approximate", - attention_bias: bool = False, - qk_norm: bool = True, - norm_elementwise_affine: bool = True, - norm_eps: float = 1e-5, - final_dropout: bool = True, - ff_inner_dim: Optional[int] = None, - ff_bias: bool = True, - attention_out_bias: bool = True, - block_idx: int = 0, - ): - super().__init__() - - # 1. Self Attention - self.norm1 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True) - - self.attn1 = Attention( - query_dim=dim, - dim_head=attention_head_dim, - heads=num_attention_heads, - qk_norm="layer_norm" if qk_norm else None, - eps=1e-6, - bias=attention_bias, - out_bias=attention_out_bias, - processor=CogVideoXAttnProcessor2_0(), - ) - - # parallel - #self.attn1.parallel_manager = None - - # 2. Feed Forward - self.norm2 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True) - - self.ff = FeedForward( - dim, - dropout=dropout, - activation_fn=activation_fn, - final_dropout=final_dropout, - inner_dim=ff_inner_dim, - bias=ff_bias, - ) - - # pab - self.attn_count = 0 - self.last_attn = None - self.block_idx = block_idx - #@torch.compiler.disable() - def forward( - self, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - temb: torch.Tensor, - image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - timestep=None, - video_flow_feature: Optional[torch.Tensor] = None, - fuser=None, - ) -> torch.Tensor: - text_seq_length = encoder_hidden_states.size(1) - - # norm & modulate - norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1( - hidden_states, encoder_hidden_states, temb - ) - # Tora Motion-guidance Fuser - if video_flow_feature is not None: - H, W = video_flow_feature.shape[-2:] - T = norm_hidden_states.shape[1] // H // W - h = rearrange(norm_hidden_states, "B (T H W) C -> (B T) C H W", H=H, W=W) - h = fuser(h, video_flow_feature.to(h), T=T) - norm_hidden_states = rearrange(h, "(B T) C H W -> B (T H W) C", T=T) - del h, fuser - # attention - if enable_pab(): - broadcast_attn, self.attn_count = if_broadcast_spatial(int(timestep[0]), self.attn_count, self.block_idx) - if enable_pab() and broadcast_attn: - attn_hidden_states, attn_encoder_hidden_states = self.last_attn - else: - attn_hidden_states, attn_encoder_hidden_states = self.attn1( - hidden_states=norm_hidden_states, - encoder_hidden_states=norm_encoder_hidden_states, - image_rotary_emb=image_rotary_emb, - ) - if enable_pab(): - self.last_attn = (attn_hidden_states, attn_encoder_hidden_states) - - hidden_states = hidden_states + gate_msa * attn_hidden_states - encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states - - # norm & modulate - norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2( - hidden_states, encoder_hidden_states, temb - ) - - # feed-forward - norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1) - ff_output = self.ff(norm_hidden_states) - - hidden_states = hidden_states + gate_ff * ff_output[:, text_seq_length:] - encoder_hidden_states = encoder_hidden_states + enc_gate_ff * ff_output[:, :text_seq_length] - - return hidden_states, encoder_hidden_states - - -class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin): - """ - A Transformer model for video-like data in [CogVideoX](https://github.com/THUDM/CogVideo). - - Parameters: - num_attention_heads (`int`, defaults to `30`): - The number of heads to use for multi-head attention. - attention_head_dim (`int`, defaults to `64`): - The number of channels in each head. - in_channels (`int`, defaults to `16`): - The number of channels in the input. - out_channels (`int`, *optional*, defaults to `16`): - The number of channels in the output. - flip_sin_to_cos (`bool`, defaults to `True`): - Whether to flip the sin to cos in the time embedding. - time_embed_dim (`int`, defaults to `512`): - Output dimension of timestep embeddings. - text_embed_dim (`int`, defaults to `4096`): - Input dimension of text embeddings from the text encoder. - num_layers (`int`, defaults to `30`): - The number of layers of Transformer blocks to use. - dropout (`float`, defaults to `0.0`): - The dropout probability to use. - attention_bias (`bool`, defaults to `True`): - Whether or not to use bias in the attention projection layers. - sample_width (`int`, defaults to `90`): - The width of the input latents. - sample_height (`int`, defaults to `60`): - The height of the input latents. - sample_frames (`int`, defaults to `49`): - The number of frames in the input latents. Note that this parameter was incorrectly initialized to 49 - instead of 13 because CogVideoX processed 13 latent frames at once in its default and recommended settings, - but cannot be changed to the correct value to ensure backwards compatibility. To create a transformer with - K latent frames, the correct value to pass here would be: ((K - 1) * temporal_compression_ratio + 1). - patch_size (`int`, defaults to `2`): - The size of the patches to use in the patch embedding layer. - temporal_compression_ratio (`int`, defaults to `4`): - The compression ratio across the temporal dimension. See documentation for `sample_frames`. - max_text_seq_length (`int`, defaults to `226`): - The maximum sequence length of the input text embeddings. - activation_fn (`str`, defaults to `"gelu-approximate"`): - Activation function to use in feed-forward. - timestep_activation_fn (`str`, defaults to `"silu"`): - Activation function to use when generating the timestep embeddings. - norm_elementwise_affine (`bool`, defaults to `True`): - Whether or not to use elementwise affine in normalization layers. - norm_eps (`float`, defaults to `1e-5`): - The epsilon value to use in normalization layers. - spatial_interpolation_scale (`float`, defaults to `1.875`): - Scaling factor to apply in 3D positional embeddings across spatial dimensions. - temporal_interpolation_scale (`float`, defaults to `1.0`): - Scaling factor to apply in 3D positional embeddings across temporal dimensions. - """ - - _supports_gradient_checkpointing = True - - @register_to_config - def __init__( - self, - num_attention_heads: int = 30, - attention_head_dim: int = 64, - in_channels: int = 16, - out_channels: Optional[int] = 16, - flip_sin_to_cos: bool = True, - freq_shift: int = 0, - time_embed_dim: int = 512, - text_embed_dim: int = 4096, - num_layers: int = 30, - dropout: float = 0.0, - attention_bias: bool = True, - sample_width: int = 90, - sample_height: int = 60, - sample_frames: int = 49, - patch_size: int = 2, - temporal_compression_ratio: int = 4, - max_text_seq_length: int = 226, - activation_fn: str = "gelu-approximate", - timestep_activation_fn: str = "silu", - norm_elementwise_affine: bool = True, - norm_eps: float = 1e-5, - spatial_interpolation_scale: float = 1.875, - temporal_interpolation_scale: float = 1.0, - use_rotary_positional_embeddings: bool = False, - use_learned_positional_embeddings: bool = False, - ): - super().__init__() - inner_dim = num_attention_heads * attention_head_dim - - post_patch_height = sample_height // patch_size - post_patch_width = sample_width // patch_size - post_time_compression_frames = (sample_frames - 1) // temporal_compression_ratio + 1 - self.num_patches = post_patch_height * post_patch_width * post_time_compression_frames - - # 1. Patch embedding - self.patch_embed = CogVideoXPatchEmbed( - patch_size=patch_size, - in_channels=in_channels, - embed_dim=inner_dim, - text_embed_dim=text_embed_dim, - bias=True, - sample_width=sample_width, - sample_height=sample_height, - sample_frames=sample_frames, - temporal_compression_ratio=temporal_compression_ratio, - max_text_seq_length=max_text_seq_length, - spatial_interpolation_scale=spatial_interpolation_scale, - temporal_interpolation_scale=temporal_interpolation_scale, - use_positional_embeddings=not use_rotary_positional_embeddings, - use_learned_positional_embeddings=use_learned_positional_embeddings, - ) - self.embedding_dropout = nn.Dropout(dropout) - - # 2. 3D positional embeddings - spatial_pos_embedding = get_3d_sincos_pos_embed( - inner_dim, - (post_patch_width, post_patch_height), - post_time_compression_frames, - spatial_interpolation_scale, - temporal_interpolation_scale, - ) - spatial_pos_embedding = torch.from_numpy(spatial_pos_embedding).flatten(0, 1) - pos_embedding = torch.zeros(1, max_text_seq_length + self.num_patches, inner_dim, requires_grad=False) - pos_embedding.data[:, max_text_seq_length:].copy_(spatial_pos_embedding) - self.register_buffer("pos_embedding", pos_embedding, persistent=False) - - # 3. Time embeddings - self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift) - self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn) - - # 4. Define spatio-temporal transformers blocks - self.transformer_blocks = nn.ModuleList( - [ - CogVideoXBlock( - dim=inner_dim, - num_attention_heads=num_attention_heads, - attention_head_dim=attention_head_dim, - time_embed_dim=time_embed_dim, - dropout=dropout, - activation_fn=activation_fn, - attention_bias=attention_bias, - norm_elementwise_affine=norm_elementwise_affine, - norm_eps=norm_eps, - ) - for _ in range(num_layers) - ] - ) - self.norm_final = nn.LayerNorm(inner_dim, norm_eps, norm_elementwise_affine) - - # 5. Output blocks - self.norm_out = AdaLayerNorm( - embedding_dim=time_embed_dim, - output_dim=2 * inner_dim, - norm_elementwise_affine=norm_elementwise_affine, - norm_eps=norm_eps, - chunk_dim=1, - ) - self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels) - - self.gradient_checkpointing = False - - self.fuser_list = None - - # parallel - #self.parallel_manager = None - - # def enable_parallel(self, dp_size, sp_size, enable_cp): - # # update cfg parallel - # if enable_cp and sp_size % 2 == 0: - # sp_size = sp_size // 2 - # cp_size = 2 - # else: - # cp_size = 1 - - # self.parallel_manager: ParallelManager = ParallelManager(dp_size, cp_size, sp_size) - - # for _, module in self.named_modules(): - # if hasattr(module, "parallel_manager"): - # module.parallel_manager = self.parallel_manager - - def _set_gradient_checkpointing(self, module, value=False): - self.gradient_checkpointing = value - - def forward( - self, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - timestep: Union[int, float, torch.LongTensor], - timestep_cond: Optional[torch.Tensor] = None, - image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - return_dict: bool = True, - controlnet_states: torch.Tensor = None, - controlnet_weights: Optional[Union[float, int, list, torch.FloatTensor]] = 1.0, - video_flow_features: Optional[torch.Tensor] = None, - ): - # if self.parallel_manager.cp_size > 1: - # ( - # hidden_states, - # encoder_hidden_states, - # timestep, - # timestep_cond, - # image_rotary_emb, - # ) = batch_func( - # partial(split_sequence, process_group=self.parallel_manager.cp_group, dim=0), - # hidden_states, - # encoder_hidden_states, - # timestep, - # timestep_cond, - # image_rotary_emb, - # ) - - batch_size, num_frames, channels, height, width = hidden_states.shape - - # 1. Time embedding - timesteps = timestep - t_emb = self.time_proj(timesteps) - - # timesteps does not contain any weights and will always return f32 tensors - # but time_embedding might actually be running in fp16. so we need to cast here. - # there might be better ways to encapsulate this. - t_emb = t_emb.to(dtype=hidden_states.dtype) - emb = self.time_embedding(t_emb, timestep_cond) - - # 2. Patch embedding - hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) - - # 3. Position embedding - text_seq_length = encoder_hidden_states.shape[1] - if not self.config.use_rotary_positional_embeddings: - seq_length = height * width * num_frames // (self.config.patch_size**2) - - pos_embeds = self.pos_embedding[:, : text_seq_length + seq_length] - hidden_states = hidden_states + pos_embeds - hidden_states = self.embedding_dropout(hidden_states) - - encoder_hidden_states = hidden_states[:, :text_seq_length] - hidden_states = hidden_states[:, text_seq_length:] - - # if self.parallel_manager.sp_size > 1: - # set_pad("pad", hidden_states.shape[1], self.parallel_manager.sp_group) - # hidden_states = split_sequence(hidden_states, self.parallel_manager.sp_group, dim=1, pad=get_pad("pad")) - - # 4. Transformer blocks - for i, block in enumerate(self.transformer_blocks): - hidden_states, encoder_hidden_states = block( - hidden_states=hidden_states, - encoder_hidden_states=encoder_hidden_states, - temb=emb, - image_rotary_emb=image_rotary_emb, - timestep=timesteps if enable_pab() else None, - video_flow_feature=video_flow_features[i] if video_flow_features is not None else None, - fuser = self.fuser_list[i] if self.fuser_list is not None else None, - ) - if (controlnet_states is not None) and (i < len(controlnet_states)): - controlnet_states_block = controlnet_states[i] - controlnet_block_weight = 1.0 - if isinstance(controlnet_weights, (list)) or torch.is_tensor(controlnet_weights): - controlnet_block_weight = controlnet_weights[i] - elif isinstance(controlnet_weights, (float, int)): - controlnet_block_weight = controlnet_weights - - hidden_states = hidden_states + controlnet_states_block * controlnet_block_weight - - #if self.parallel_manager.sp_size > 1: - # hidden_states = gather_sequence(hidden_states, self.parallel_manager.sp_group, dim=1, pad=get_pad("pad")) - - if not self.config.use_rotary_positional_embeddings: - # CogVideoX-2B - hidden_states = self.norm_final(hidden_states) - else: - # CogVideoX-5B - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - hidden_states = self.norm_final(hidden_states) - hidden_states = hidden_states[:, text_seq_length:] - - # 5. Final block - hidden_states = self.norm_out(hidden_states, temb=emb) - hidden_states = self.proj_out(hidden_states) - - # 6. Unpatchify - p = self.config.patch_size - output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, -1, p, p) - output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) - - #if self.parallel_manager.cp_size > 1: - # output = gather_sequence(output, self.parallel_manager.cp_group, dim=0) - - if not return_dict: - return (output,) - return Transformer2DModelOutput(sample=output) diff --git a/videosys/core/__init__.py b/videosys/core/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/videosys/core/pab_mgr.py b/videosys/core/pab_mgr.py deleted file mode 100644 index 6f19a50..0000000 --- a/videosys/core/pab_mgr.py +++ /dev/null @@ -1,232 +0,0 @@ - -PAB_MANAGER = None - - -class PABConfig: - def __init__( - self, - steps: int, - cross_broadcast: bool = False, - cross_threshold: list = None, - cross_range: int = None, - spatial_broadcast: bool = False, - spatial_threshold: list = None, - spatial_range: int = None, - temporal_broadcast: bool = False, - temporal_threshold: list = None, - temporal_range: int = None, - mlp_broadcast: bool = False, - mlp_spatial_broadcast_config: dict = None, - mlp_temporal_broadcast_config: dict = None, - ): - self.steps = steps - - self.cross_broadcast = cross_broadcast - self.cross_threshold = cross_threshold - self.cross_range = cross_range - - self.spatial_broadcast = spatial_broadcast - self.spatial_threshold = spatial_threshold - self.spatial_range = spatial_range - - self.temporal_broadcast = temporal_broadcast - self.temporal_threshold = temporal_threshold - self.temporal_range = temporal_range - - self.mlp_broadcast = mlp_broadcast - self.mlp_spatial_broadcast_config = mlp_spatial_broadcast_config - self.mlp_temporal_broadcast_config = mlp_temporal_broadcast_config - self.mlp_temporal_outputs = {} - self.mlp_spatial_outputs = {} - - -class PABManager: - def __init__(self, config: PABConfig): - self.config: PABConfig = config - - init_prompt = f"Init Pyramid Attention Broadcast. steps: {config.steps}." - init_prompt += f" spatial broadcast: {config.spatial_broadcast}, spatial range: {config.spatial_range}, spatial threshold: {config.spatial_threshold}." - init_prompt += f" temporal broadcast: {config.temporal_broadcast}, temporal range: {config.temporal_range}, temporal_threshold: {config.temporal_threshold}." - init_prompt += f" cross broadcast: {config.cross_broadcast}, cross range: {config.cross_range}, cross threshold: {config.cross_threshold}." - init_prompt += f" mlp broadcast: {config.mlp_broadcast}." - print(init_prompt) - - def if_broadcast_cross(self, timestep: int, count: int): - if ( - self.config.cross_broadcast - and (timestep is not None) - and (count % self.config.cross_range != 0) - and (self.config.cross_threshold[0] < timestep < self.config.cross_threshold[1]) - ): - flag = True - else: - flag = False - count = (count + 1) % self.config.steps - return flag, count - - def if_broadcast_temporal(self, timestep: int, count: int): - if ( - self.config.temporal_broadcast - and (timestep is not None) - and (count % self.config.temporal_range != 0) - and (self.config.temporal_threshold[0] < timestep < self.config.temporal_threshold[1]) - ): - flag = True - else: - flag = False - count = (count + 1) % self.config.steps - return flag, count - - def if_broadcast_spatial(self, timestep: int, count: int, block_idx: int): - if ( - self.config.spatial_broadcast - and (timestep is not None) - and (count % self.config.spatial_range != 0) - and (self.config.spatial_threshold[0] < timestep < self.config.spatial_threshold[1]) - ): - flag = True - else: - flag = False - count = (count + 1) % self.config.steps - return flag, count - - @staticmethod - def _is_t_in_skip_config(all_timesteps, timestep, config): - is_t_in_skip_config = False - skip_range = None - for key in config: - if key not in all_timesteps: - continue - index = all_timesteps.index(key) - skip_range = all_timesteps[index : index + 1 + int(config[key]["skip_count"])] - if timestep in skip_range: - is_t_in_skip_config = True - skip_range = [all_timesteps[index], all_timesteps[index + int(config[key]["skip_count"])]] - break - return is_t_in_skip_config, skip_range - - def if_skip_mlp(self, timestep: int, count: int, block_idx: int, all_timesteps, is_temporal=False): - if not self.config.mlp_broadcast: - return False, None, False, None - - if is_temporal: - cur_config = self.config.mlp_temporal_broadcast_config - else: - cur_config = self.config.mlp_spatial_broadcast_config - - is_t_in_skip_config, skip_range = self._is_t_in_skip_config(all_timesteps, timestep, cur_config) - next_flag = False - if ( - self.config.mlp_broadcast - and (timestep is not None) - and (timestep in cur_config) - and (block_idx in cur_config[timestep]["block"]) - ): - flag = False - next_flag = True - count = count + 1 - elif ( - self.config.mlp_broadcast - and (timestep is not None) - and (is_t_in_skip_config) - and (block_idx in cur_config[skip_range[0]]["block"]) - ): - flag = True - count = 0 - else: - flag = False - - return flag, count, next_flag, skip_range - - def save_skip_output(self, timestep, block_idx, ff_output, is_temporal=False): - if is_temporal: - self.config.mlp_temporal_outputs[(timestep, block_idx)] = ff_output - else: - self.config.mlp_spatial_outputs[(timestep, block_idx)] = ff_output - - def get_mlp_output(self, skip_range, timestep, block_idx, is_temporal=False): - skip_start_t = skip_range[0] - if is_temporal: - skip_output = ( - self.config.mlp_temporal_outputs.get((skip_start_t, block_idx), None) - if self.config.mlp_temporal_outputs is not None - else None - ) - else: - skip_output = ( - self.config.mlp_spatial_outputs.get((skip_start_t, block_idx), None) - if self.config.mlp_spatial_outputs is not None - else None - ) - - if skip_output is not None: - if timestep == skip_range[-1]: - # TODO: save memory - if is_temporal: - del self.config.mlp_temporal_outputs[(skip_start_t, block_idx)] - else: - del self.config.mlp_spatial_outputs[(skip_start_t, block_idx)] - else: - raise ValueError( - f"No stored MLP output found | t {timestep} |[{skip_range[0]}, {skip_range[-1]}] | block {block_idx}" - ) - - return skip_output - - def get_spatial_mlp_outputs(self): - return self.config.mlp_spatial_outputs - - def get_temporal_mlp_outputs(self): - return self.config.mlp_temporal_outputs - - -def set_pab_manager(config: PABConfig): - global PAB_MANAGER - PAB_MANAGER = PABManager(config) - - -def enable_pab(): - if PAB_MANAGER is None: - return False - return ( - PAB_MANAGER.config.cross_broadcast - or PAB_MANAGER.config.spatial_broadcast - or PAB_MANAGER.config.temporal_broadcast - ) - - -def update_steps(steps: int): - if PAB_MANAGER is not None: - PAB_MANAGER.config.steps = steps - - -def if_broadcast_cross(timestep: int, count: int): - if not enable_pab(): - return False, count - return PAB_MANAGER.if_broadcast_cross(timestep, count) - - -def if_broadcast_temporal(timestep: int, count: int): - if not enable_pab(): - return False, count - return PAB_MANAGER.if_broadcast_temporal(timestep, count) - - -def if_broadcast_spatial(timestep: int, count: int, block_idx: int): - if not enable_pab(): - return False, count - return PAB_MANAGER.if_broadcast_spatial(timestep, count, block_idx) - - -def if_broadcast_mlp(timestep: int, count: int, block_idx: int, all_timesteps, is_temporal=False): - if not enable_pab(): - return False, count - return PAB_MANAGER.if_skip_mlp(timestep, count, block_idx, all_timesteps, is_temporal) - - -def save_mlp_output(timestep: int, block_idx: int, ff_output, is_temporal=False): - return PAB_MANAGER.save_skip_output(timestep, block_idx, ff_output, is_temporal) - - -def get_mlp_output(skip_range, timestep, block_idx: int, is_temporal=False): - return PAB_MANAGER.get_mlp_output(skip_range, timestep, block_idx, is_temporal) diff --git a/videosys/core/pipeline.py b/videosys/core/pipeline.py deleted file mode 100644 index 3244749..0000000 --- a/videosys/core/pipeline.py +++ /dev/null @@ -1,44 +0,0 @@ -import inspect -from abc import abstractmethod - -import torch -from diffusers.pipelines.pipeline_utils import DiffusionPipeline - -class VideoSysPipeline(DiffusionPipeline): - def __init__(self): - super().__init__() - - @staticmethod - def set_eval_and_device(device: torch.device, *modules): - for module in modules: - module.eval() - module.to(device) - - @abstractmethod - def generate(self, *args, **kwargs): - pass - - def __call__(self, *args, **kwargs): - """ - In diffusers, it is a convention to call the pipeline object. - But in VideoSys, we will use the generate method for better prompt. - This is a wrapper for the generate method to support the diffusers usage. - """ - return self.generate(*args, **kwargs) - - @classmethod - def _get_signature_keys(cls, obj): - parameters = inspect.signature(obj.__init__).parameters - required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty} - optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty}) - expected_modules = set(required_parameters.keys()) - {"self"} - # modify: remove the config module from the expected modules - expected_modules = expected_modules - {"config"} - - optional_names = list(optional_parameters) - for name in optional_names: - if name in cls._optional_components: - expected_modules.add(name) - optional_parameters.remove(name) - - return expected_modules, optional_parameters diff --git a/videosys/modules/__init__.py b/videosys/modules/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/videosys/modules/activations.py b/videosys/modules/activations.py deleted file mode 100644 index cf24149..0000000 --- a/videosys/modules/activations.py +++ /dev/null @@ -1,3 +0,0 @@ -import torch.nn as nn - -approx_gelu = lambda: nn.GELU(approximate="tanh") diff --git a/videosys/modules/downsampling.py b/videosys/modules/downsampling.py deleted file mode 100644 index 9455a32..0000000 --- a/videosys/modules/downsampling.py +++ /dev/null @@ -1,71 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class CogVideoXDownsample3D(nn.Module): - # Todo: Wait for paper relase. - r""" - A 3D Downsampling layer using in [CogVideoX]() by Tsinghua University & ZhipuAI - - Args: - in_channels (`int`): - Number of channels in the input image. - out_channels (`int`): - Number of channels produced by the convolution. - kernel_size (`int`, defaults to `3`): - Size of the convolving kernel. - stride (`int`, defaults to `2`): - Stride of the convolution. - padding (`int`, defaults to `0`): - Padding added to all four sides of the input. - compress_time (`bool`, defaults to `False`): - Whether or not to compress the time dimension. - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - kernel_size: int = 3, - stride: int = 2, - padding: int = 0, - compress_time: bool = False, - ): - super().__init__() - - self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) - self.compress_time = compress_time - - def forward(self, x: torch.Tensor) -> torch.Tensor: - if self.compress_time: - batch_size, channels, frames, height, width = x.shape - - # (batch_size, channels, frames, height, width) -> (batch_size, height, width, channels, frames) -> (batch_size * height * width, channels, frames) - x = x.permute(0, 3, 4, 1, 2).reshape(batch_size * height * width, channels, frames) - - if x.shape[-1] % 2 == 1: - x_first, x_rest = x[..., 0], x[..., 1:] - if x_rest.shape[-1] > 0: - # (batch_size * height * width, channels, frames - 1) -> (batch_size * height * width, channels, (frames - 1) // 2) - x_rest = F.avg_pool1d(x_rest, kernel_size=2, stride=2) - - x = torch.cat([x_first[..., None], x_rest], dim=-1) - # (batch_size * height * width, channels, (frames // 2) + 1) -> (batch_size, height, width, channels, (frames // 2) + 1) -> (batch_size, channels, (frames // 2) + 1, height, width) - x = x.reshape(batch_size, height, width, channels, x.shape[-1]).permute(0, 3, 4, 1, 2) - else: - # (batch_size * height * width, channels, frames) -> (batch_size * height * width, channels, frames // 2) - x = F.avg_pool1d(x, kernel_size=2, stride=2) - # (batch_size * height * width, channels, frames // 2) -> (batch_size, height, width, channels, frames // 2) -> (batch_size, channels, frames // 2, height, width) - x = x.reshape(batch_size, height, width, channels, x.shape[-1]).permute(0, 3, 4, 1, 2) - - # Pad the tensor - pad = (0, 1, 0, 1) - x = F.pad(x, pad, mode="constant", value=0) - batch_size, channels, frames, height, width = x.shape - # (batch_size, channels, frames, height, width) -> (batch_size, frames, channels, height, width) -> (batch_size * frames, channels, height, width) - x = x.permute(0, 2, 1, 3, 4).reshape(batch_size * frames, channels, height, width) - x = self.conv(x) - # (batch_size * frames, channels, height, width) -> (batch_size, frames, channels, height, width) -> (batch_size, channels, frames, height, width) - x = x.reshape(batch_size, frames, x.shape[1], x.shape[2], x.shape[3]).permute(0, 2, 1, 3, 4) - return x diff --git a/videosys/modules/embeddings.py b/videosys/modules/embeddings.py deleted file mode 100644 index 04eba82..0000000 --- a/videosys/modules/embeddings.py +++ /dev/null @@ -1,308 +0,0 @@ -import math -from typing import Optional, Tuple, Union - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint -from einops import rearrange - -class CogVideoXPatchEmbed(nn.Module): - def __init__( - self, - patch_size: int = 2, - in_channels: int = 16, - embed_dim: int = 1920, - text_embed_dim: int = 4096, - bias: bool = True, - ) -> None: - super().__init__() - self.patch_size = patch_size - - self.proj = nn.Conv2d( - in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias - ) - self.text_proj = nn.Linear(text_embed_dim, embed_dim) - - def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor): - r""" - Args: - text_embeds (`torch.Tensor`): - Input text embeddings. Expected shape: (batch_size, seq_length, embedding_dim). - image_embeds (`torch.Tensor`): - Input image embeddings. Expected shape: (batch_size, num_frames, channels, height, width). - """ - text_embeds = self.text_proj(text_embeds) - - batch, num_frames, channels, height, width = image_embeds.shape - image_embeds = image_embeds.reshape(-1, channels, height, width) - image_embeds = self.proj(image_embeds) - image_embeds = image_embeds.view(batch, num_frames, *image_embeds.shape[1:]) - image_embeds = image_embeds.flatten(3).transpose(2, 3) # [batch, num_frames, height x width, channels] - image_embeds = image_embeds.flatten(1, 2) # [batch, num_frames x height x width, channels] - - embeds = torch.cat( - [text_embeds, image_embeds], dim=1 - ).contiguous() # [batch, seq_length + num_frames x height x width, channels] - return embeds - - -class OpenSoraPatchEmbed3D(nn.Module): - """Video to Patch Embedding. - - Args: - patch_size (int): Patch token size. Default: (2,4,4). - in_chans (int): Number of input video channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__( - self, - patch_size=(2, 4, 4), - in_chans=3, - embed_dim=96, - norm_layer=None, - flatten=True, - ): - super().__init__() - self.patch_size = patch_size - self.flatten = flatten - - self.in_chans = in_chans - self.embed_dim = embed_dim - - self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - """Forward function.""" - # padding - _, _, D, H, W = x.size() - if W % self.patch_size[2] != 0: - x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2])) - if H % self.patch_size[1] != 0: - x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1])) - if D % self.patch_size[0] != 0: - x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self.patch_size[0])) - - x = self.proj(x) # (B C T H W) - if self.norm is not None: - D, Wh, Ww = x.size(2), x.size(3), x.size(4) - x = x.flatten(2).transpose(1, 2) - x = self.norm(x) - x = x.transpose(1, 2).view(-1, self.embed_dim, D, Wh, Ww) - if self.flatten: - x = x.flatten(2).transpose(1, 2) # BCTHW -> BNC - return x - - -class TimestepEmbedder(nn.Module): - """ - Embeds scalar timesteps into vector representations. - """ - - def __init__(self, hidden_size, frequency_embedding_size=256): - super().__init__() - self.mlp = nn.Sequential( - nn.Linear(frequency_embedding_size, hidden_size, bias=True), - nn.SiLU(), - nn.Linear(hidden_size, hidden_size, bias=True), - ) - self.frequency_embedding_size = frequency_embedding_size - - @staticmethod - def timestep_embedding(t, dim, max_period=10000): - """ - Create sinusoidal timestep embeddings. - :param t: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param dim: the dimension of the output. - :param max_period: controls the minimum frequency of the embeddings. - :return: an (N, D) Tensor of positional embeddings. - """ - # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py - half = dim // 2 - freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half) - freqs = freqs.to(device=t.device) - args = t[:, None].float() * freqs[None] - embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) - if dim % 2: - embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) - return embedding - - def forward(self, t, dtype): - t_freq = self.timestep_embedding(t, self.frequency_embedding_size) - if t_freq.dtype != dtype: - t_freq = t_freq.to(dtype) - t_emb = self.mlp(t_freq) - return t_emb - - -class SizeEmbedder(TimestepEmbedder): - """ - Embeds scalar timesteps into vector representations. - """ - - def __init__(self, hidden_size, frequency_embedding_size=256): - super().__init__(hidden_size=hidden_size, frequency_embedding_size=frequency_embedding_size) - self.mlp = nn.Sequential( - nn.Linear(frequency_embedding_size, hidden_size, bias=True), - nn.SiLU(), - nn.Linear(hidden_size, hidden_size, bias=True), - ) - self.frequency_embedding_size = frequency_embedding_size - self.outdim = hidden_size - - def forward(self, s, bs): - if s.ndim == 1: - s = s[:, None] - assert s.ndim == 2 - if s.shape[0] != bs: - s = s.repeat(bs // s.shape[0], 1) - assert s.shape[0] == bs - b, dims = s.shape[0], s.shape[1] - s = rearrange(s, "b d -> (b d)") - s_freq = self.timestep_embedding(s, self.frequency_embedding_size).to(self.dtype) - s_emb = self.mlp(s_freq) - s_emb = rearrange(s_emb, "(b d) d2 -> b (d d2)", b=b, d=dims, d2=self.outdim) - return s_emb - - @property - def dtype(self): - return next(self.parameters()).dtype - -def get_3d_rotary_pos_embed( - embed_dim, crops_coords, grid_size, temporal_size, theta: int = 10000, use_real: bool = True -) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: - """ - RoPE for video tokens with 3D structure. - - Args: - embed_dim: (`int`): - The embedding dimension size, corresponding to hidden_size_head. - crops_coords (`Tuple[int]`): - The top-left and bottom-right coordinates of the crop. - grid_size (`Tuple[int]`): - The grid size of the spatial positional embedding (height, width). - temporal_size (`int`): - The size of the temporal dimension. - theta (`float`): - Scaling factor for frequency computation. - use_real (`bool`): - If True, return real part and imaginary part separately. Otherwise, return complex numbers. - - Returns: - `torch.Tensor`: positional embedding with shape `(temporal_size * grid_size[0] * grid_size[1], embed_dim/2)`. - """ - start, stop = crops_coords - grid_h = np.linspace(start[0], stop[0], grid_size[0], endpoint=False, dtype=np.float32) - grid_w = np.linspace(start[1], stop[1], grid_size[1], endpoint=False, dtype=np.float32) - grid_t = np.linspace(0, temporal_size, temporal_size, endpoint=False, dtype=np.float32) - - # Compute dimensions for each axis - dim_t = embed_dim // 4 - dim_h = embed_dim // 8 * 3 - dim_w = embed_dim // 8 * 3 - - # Temporal frequencies - freqs_t = 1.0 / (theta ** (torch.arange(0, dim_t, 2).float() / dim_t)) - grid_t = torch.from_numpy(grid_t).float() - freqs_t = torch.einsum("n , f -> n f", grid_t, freqs_t) - freqs_t = freqs_t.repeat_interleave(2, dim=-1) - - # Spatial frequencies for height and width - freqs_h = 1.0 / (theta ** (torch.arange(0, dim_h, 2).float() / dim_h)) - freqs_w = 1.0 / (theta ** (torch.arange(0, dim_w, 2).float() / dim_w)) - grid_h = torch.from_numpy(grid_h).float() - grid_w = torch.from_numpy(grid_w).float() - freqs_h = torch.einsum("n , f -> n f", grid_h, freqs_h) - freqs_w = torch.einsum("n , f -> n f", grid_w, freqs_w) - freqs_h = freqs_h.repeat_interleave(2, dim=-1) - freqs_w = freqs_w.repeat_interleave(2, dim=-1) - - # Broadcast and concatenate tensors along specified dimension - def broadcast(tensors, dim=-1): - num_tensors = len(tensors) - shape_lens = {len(t.shape) for t in tensors} - assert len(shape_lens) == 1, "tensors must all have the same number of dimensions" - shape_len = list(shape_lens)[0] - dim = (dim + shape_len) if dim < 0 else dim - dims = list(zip(*(list(t.shape) for t in tensors))) - expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim] - assert all( - [*(len(set(t[1])) <= 2 for t in expandable_dims)] - ), "invalid dimensions for broadcastable concatenation" - max_dims = [(t[0], max(t[1])) for t in expandable_dims] - expanded_dims = [(t[0], (t[1],) * num_tensors) for t in max_dims] - expanded_dims.insert(dim, (dim, dims[dim])) - expandable_shapes = list(zip(*(t[1] for t in expanded_dims))) - tensors = [t[0].expand(*t[1]) for t in zip(tensors, expandable_shapes)] - return torch.cat(tensors, dim=dim) - - freqs = broadcast((freqs_t[:, None, None, :], freqs_h[None, :, None, :], freqs_w[None, None, :, :]), dim=-1) - - t, h, w, d = freqs.shape - freqs = freqs.view(t * h * w, d) - - # Generate sine and cosine components - sin = freqs.sin() - cos = freqs.cos() - - if use_real: - return cos, sin - else: - freqs_cis = torch.polar(torch.ones_like(freqs), freqs) - return freqs_cis - - -def apply_rotary_emb( - x: torch.Tensor, - freqs_cis: Union[torch.Tensor, Tuple[torch.Tensor]], - use_real: bool = True, - use_real_unbind_dim: int = -1, -) -> Tuple[torch.Tensor, torch.Tensor]: - """ - Apply rotary embeddings to input tensors using the given frequency tensor. This function applies rotary embeddings - to the given query or key 'x' tensors using the provided frequency tensor 'freqs_cis'. The input tensors are - reshaped as complex numbers, and the frequency tensor is reshaped for broadcasting compatibility. The resulting - tensors contain rotary embeddings and are returned as real tensors. - - Args: - x (`torch.Tensor`): - Query or key tensor to apply rotary embeddings. [B, H, S, D] xk (torch.Tensor): Key tensor to apply - freqs_cis (`Tuple[torch.Tensor]`): Precomputed frequency tensor for complex exponentials. ([S, D], [S, D],) - - Returns: - Tuple[torch.Tensor, torch.Tensor]: Tuple of modified query tensor and key tensor with rotary embeddings. - """ - if use_real: - cos, sin = freqs_cis # [S, D] - cos = cos[None, None] - sin = sin[None, None] - cos, sin = cos.to(x.device), sin.to(x.device) - - if use_real_unbind_dim == -1: - # Use for example in Lumina - x_real, x_imag = x.reshape(*x.shape[:-1], -1, 2).unbind(-1) # [B, S, H, D//2] - x_rotated = torch.stack([-x_imag, x_real], dim=-1).flatten(3) - elif use_real_unbind_dim == -2: - # Use for example in Stable Audio - x_real, x_imag = x.reshape(*x.shape[:-1], 2, -1).unbind(-2) # [B, S, H, D//2] - x_rotated = torch.cat([-x_imag, x_real], dim=-1) - else: - raise ValueError(f"`use_real_unbind_dim={use_real_unbind_dim}` but should be -1 or -2.") - - out = (x.float() * cos + x_rotated.float() * sin).to(x.dtype) - - return out - else: - x_rotated = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2)) - freqs_cis = freqs_cis.unsqueeze(2) - x_out = torch.view_as_real(x_rotated * freqs_cis).flatten(3) - - return x_out.type_as(x) diff --git a/videosys/modules/normalization.py b/videosys/modules/normalization.py deleted file mode 100644 index 216d0cc..0000000 --- a/videosys/modules/normalization.py +++ /dev/null @@ -1,85 +0,0 @@ -from typing import Optional, Tuple - -import torch -import torch.nn as nn - - -class CogVideoXLayerNormZero(nn.Module): - def __init__( - self, - conditioning_dim: int, - embedding_dim: int, - elementwise_affine: bool = True, - eps: float = 1e-5, - bias: bool = True, - ) -> None: - super().__init__() - - self.silu = nn.SiLU() - self.linear = nn.Linear(conditioning_dim, 6 * embedding_dim, bias=bias) - self.norm = nn.LayerNorm(embedding_dim, eps=eps, elementwise_affine=elementwise_affine) - - def forward( - self, hidden_states: torch.Tensor, encoder_hidden_states: torch.Tensor, temb: torch.Tensor - ) -> Tuple[torch.Tensor, torch.Tensor]: - shift, scale, gate, enc_shift, enc_scale, enc_gate = self.linear(self.silu(temb)).chunk(6, dim=1) - hidden_states = self.norm(hidden_states) * (1 + scale)[:, None, :] + shift[:, None, :] - encoder_hidden_states = self.norm(encoder_hidden_states) * (1 + enc_scale)[:, None, :] + enc_shift[:, None, :] - return hidden_states, encoder_hidden_states, gate[:, None, :], enc_gate[:, None, :] - - -class AdaLayerNorm(nn.Module): - r""" - Norm layer modified to incorporate timestep embeddings. - - Parameters: - embedding_dim (`int`): The size of each embedding vector. - num_embeddings (`int`, *optional*): The size of the embeddings dictionary. - output_dim (`int`, *optional*): - norm_elementwise_affine (`bool`, defaults to `False): - norm_eps (`bool`, defaults to `False`): - chunk_dim (`int`, defaults to `0`): - """ - - def __init__( - self, - embedding_dim: int, - num_embeddings: Optional[int] = None, - output_dim: Optional[int] = None, - norm_elementwise_affine: bool = False, - norm_eps: float = 1e-5, - chunk_dim: int = 0, - ): - super().__init__() - - self.chunk_dim = chunk_dim - output_dim = output_dim or embedding_dim * 2 - - if num_embeddings is not None: - self.emb = nn.Embedding(num_embeddings, embedding_dim) - else: - self.emb = None - - self.silu = nn.SiLU() - self.linear = nn.Linear(embedding_dim, output_dim) - self.norm = nn.LayerNorm(output_dim // 2, norm_eps, norm_elementwise_affine) - - def forward( - self, x: torch.Tensor, timestep: Optional[torch.Tensor] = None, temb: Optional[torch.Tensor] = None - ) -> torch.Tensor: - if self.emb is not None: - temb = self.emb(timestep) - - temb = self.linear(self.silu(temb)) - - if self.chunk_dim == 1: - # This is a bit weird why we have the order of "shift, scale" here and "scale, shift" in the - # other if-branch. This branch is specific to CogVideoX for now. - shift, scale = temb.chunk(2, dim=1) - shift = shift[:, None, :] - scale = scale[:, None, :] - else: - scale, shift = temb.chunk(2, dim=0) - - x = self.norm(x) * (1 + scale) + shift - return x diff --git a/videosys/modules/upsampling.py b/videosys/modules/upsampling.py deleted file mode 100644 index f9a61b7..0000000 --- a/videosys/modules/upsampling.py +++ /dev/null @@ -1,67 +0,0 @@ -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class CogVideoXUpsample3D(nn.Module): - r""" - A 3D Upsample layer using in CogVideoX by Tsinghua University & ZhipuAI # Todo: Wait for paper relase. - - Args: - in_channels (`int`): - Number of channels in the input image. - out_channels (`int`): - Number of channels produced by the convolution. - kernel_size (`int`, defaults to `3`): - Size of the convolving kernel. - stride (`int`, defaults to `1`): - Stride of the convolution. - padding (`int`, defaults to `1`): - Padding added to all four sides of the input. - compress_time (`bool`, defaults to `False`): - Whether or not to compress the time dimension. - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - kernel_size: int = 3, - stride: int = 1, - padding: int = 1, - compress_time: bool = False, - ) -> None: - super().__init__() - - self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding) - self.compress_time = compress_time - - def forward(self, inputs: torch.Tensor) -> torch.Tensor: - if self.compress_time: - if inputs.shape[2] > 1 and inputs.shape[2] % 2 == 1: - # split first frame - x_first, x_rest = inputs[:, :, 0], inputs[:, :, 1:] - - x_first = F.interpolate(x_first, scale_factor=2.0) - x_rest = F.interpolate(x_rest, scale_factor=2.0) - x_first = x_first[:, :, None, :, :] - inputs = torch.cat([x_first, x_rest], dim=2) - elif inputs.shape[2] > 1: - inputs = F.interpolate(inputs, scale_factor=2.0) - else: - inputs = inputs.squeeze(2) - inputs = F.interpolate(inputs, scale_factor=2.0) - inputs = inputs[:, :, None, :, :] - else: - # only interpolate 2D - b, c, t, h, w = inputs.shape - inputs = inputs.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w) - inputs = F.interpolate(inputs, scale_factor=2.0) - inputs = inputs.reshape(b, t, c, *inputs.shape[2:]).permute(0, 2, 1, 3, 4) - - b, c, t, h, w = inputs.shape - inputs = inputs.permute(0, 2, 1, 3, 4).reshape(b * t, c, h, w) - inputs = self.conv(inputs) - inputs = inputs.reshape(b, t, *inputs.shape[1:]).permute(0, 2, 1, 3, 4) - - return inputs diff --git a/videosys/pab.py b/videosys/pab.py deleted file mode 100644 index 007e1b3..0000000 --- a/videosys/pab.py +++ /dev/null @@ -1,64 +0,0 @@ -class PABConfig: - def __init__( - self, - steps: int, - cross_broadcast: bool = False, - cross_threshold: list = None, - cross_range: int = None, - spatial_broadcast: bool = False, - spatial_threshold: list = None, - spatial_range: int = None, - temporal_broadcast: bool = False, - temporal_threshold: list = None, - temporal_range: int = None, - mlp_broadcast: bool = False, - mlp_spatial_broadcast_config: dict = None, - mlp_temporal_broadcast_config: dict = None, - ): - self.steps = steps - - self.cross_broadcast = cross_broadcast - self.cross_threshold = cross_threshold - self.cross_range = cross_range - - self.spatial_broadcast = spatial_broadcast - self.spatial_threshold = spatial_threshold - self.spatial_range = spatial_range - - self.temporal_broadcast = temporal_broadcast - self.temporal_threshold = temporal_threshold - self.temporal_range = temporal_range - - self.mlp_broadcast = mlp_broadcast - self.mlp_spatial_broadcast_config = mlp_spatial_broadcast_config - self.mlp_temporal_broadcast_config = mlp_temporal_broadcast_config - self.mlp_temporal_outputs = {} - self.mlp_spatial_outputs = {} - -class CogVideoXPABConfig(PABConfig): - def __init__( - self, - steps: int = 50, - spatial_broadcast: bool = True, - spatial_threshold: list = [100, 850], - spatial_range: int = 2, - temporal_broadcast: bool = False, - temporal_threshold: list = [100, 850], - temporal_range: int = 4, - cross_broadcast: bool = False, - cross_threshold: list = [100, 850], - cross_range: int = 6, - ): - super().__init__( - steps=steps, - spatial_broadcast=spatial_broadcast, - spatial_threshold=spatial_threshold, - spatial_range=spatial_range, - temporal_broadcast=temporal_broadcast, - temporal_threshold=temporal_threshold, - temporal_range=temporal_range, - cross_broadcast=cross_broadcast, - cross_threshold=cross_threshold, - cross_range=cross_range - - ) \ No newline at end of file From 832dad94bcad6cb3fd6c29a3add6c26af118d5ea Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Thu, 14 Nov 2024 19:57:38 +0200 Subject: [PATCH 36/49] flip width/height inputs to the sampler node (finally) --- nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodes.py b/nodes.py index cd90f33..8feba51 100644 --- a/nodes.py +++ b/nodes.py @@ -760,8 +760,8 @@ class CogVideoSampler: "pipeline": ("COGVIDEOPIPE",), "positive": ("CONDITIONING", ), "negative": ("CONDITIONING", ), - "height": ("INT", {"default": 480, "min": 128, "max": 2048, "step": 16}), "width": ("INT", {"default": 720, "min": 128, "max": 2048, "step": 16}), + "height": ("INT", {"default": 480, "min": 128, "max": 2048, "step": 16}), "num_frames": ("INT", {"default": 49, "min": 17, "max": 1024, "step": 4}), "steps": ("INT", {"default": 50, "min": 1}), "cfg": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 30.0, "step": 0.01}), From 75e98906a3e9bffed2ad84baa2315175102a87f3 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Fri, 15 Nov 2024 02:37:44 +0200 Subject: [PATCH 37/49] rotary embed fix https://github.com/huggingface/diffusers/pull/9877/commits/25a9e1c567f86bf6de538891d9f07c7f155e70af --- embeddings.py | 96 ++++++++++++++++++++++++++++++++++++++++++- model_loading.py | 13 +++--- pipeline_cogvideox.py | 50 ++++++++++++++-------- requirements.txt | 3 +- 4 files changed, 137 insertions(+), 25 deletions(-) diff --git a/embeddings.py b/embeddings.py index 908c67f..111ba04 100644 --- a/embeddings.py +++ b/embeddings.py @@ -2,7 +2,7 @@ import torch import torch.nn as nn import numpy as np from typing import Tuple, Union, Optional -from diffusers.models.embeddings import get_3d_sincos_pos_embed +from diffusers.models.embeddings import get_3d_sincos_pos_embed, get_1d_rotary_pos_embed class CogVideoXPatchEmbed(nn.Module): @@ -131,4 +131,96 @@ class CogVideoXPatchEmbed(nn.Module): embeds = embeds + pos_embedding return embeds - \ No newline at end of file + +def get_3d_rotary_pos_embed( + embed_dim, + crops_coords, + grid_size, + temporal_size, + theta: int = 10000, + use_real: bool = True, + grid_type: str = "linspace", + max_size: Optional[Tuple[int, int]] = None, +) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: + """ + RoPE for video tokens with 3D structure. + + Args: + embed_dim: (`int`): + The embedding dimension size, corresponding to hidden_size_head. + crops_coords (`Tuple[int]`): + The top-left and bottom-right coordinates of the crop. + grid_size (`Tuple[int]`): + The grid size of the spatial positional embedding (height, width). + temporal_size (`int`): + The size of the temporal dimension. + theta (`float`): + Scaling factor for frequency computation. + grid_type (`str`): + Whether to use "linspace" or "slice" to compute grids. + + Returns: + `torch.Tensor`: positional embedding with shape `(temporal_size * grid_size[0] * grid_size[1], embed_dim/2)`. + """ + if use_real is not True: + raise ValueError(" `use_real = False` is not currently supported for get_3d_rotary_pos_embed") + + if grid_type == "linspace": + start, stop = crops_coords + grid_size_h, grid_size_w = grid_size + grid_h = np.linspace(start[0], stop[0], grid_size_h, endpoint=False, dtype=np.float32) + grid_w = np.linspace(start[1], stop[1], grid_size_w, endpoint=False, dtype=np.float32) + grid_t = np.arange(temporal_size, dtype=np.float32) + grid_t = np.linspace(0, temporal_size, temporal_size, endpoint=False, dtype=np.float32) + elif grid_type == "slice": + max_h, max_w = max_size + grid_size_h, grid_size_w = grid_size + grid_h = np.arange(max_h, dtype=np.float32) + grid_w = np.arange(max_w, dtype=np.float32) + grid_t = np.arange(temporal_size, dtype=np.float32) + else: + raise ValueError("Invalid value passed for `grid_type`.") + + # Compute dimensions for each axis + dim_t = embed_dim // 4 + dim_h = embed_dim // 8 * 3 + dim_w = embed_dim // 8 * 3 + + # Temporal frequencies + freqs_t = get_1d_rotary_pos_embed(dim_t, grid_t, use_real=True) + # Spatial frequencies for height and width + freqs_h = get_1d_rotary_pos_embed(dim_h, grid_h, use_real=True) + freqs_w = get_1d_rotary_pos_embed(dim_w, grid_w, use_real=True) + + # BroadCast and concatenate temporal and spaial frequencie (height and width) into a 3d tensor + def combine_time_height_width(freqs_t, freqs_h, freqs_w): + freqs_t = freqs_t[:, None, None, :].expand( + -1, grid_size_h, grid_size_w, -1 + ) # temporal_size, grid_size_h, grid_size_w, dim_t + freqs_h = freqs_h[None, :, None, :].expand( + temporal_size, -1, grid_size_w, -1 + ) # temporal_size, grid_size_h, grid_size_2, dim_h + freqs_w = freqs_w[None, None, :, :].expand( + temporal_size, grid_size_h, -1, -1 + ) # temporal_size, grid_size_h, grid_size_2, dim_w + + freqs = torch.cat( + [freqs_t, freqs_h, freqs_w], dim=-1 + ) # temporal_size, grid_size_h, grid_size_w, (dim_t + dim_h + dim_w) + freqs = freqs.view( + temporal_size * grid_size_h * grid_size_w, -1 + ) # (temporal_size * grid_size_h * grid_size_w), (dim_t + dim_h + dim_w) + return freqs + + t_cos, t_sin = freqs_t # both t_cos and t_sin has shape: temporal_size, dim_t + h_cos, h_sin = freqs_h # both h_cos and h_sin has shape: grid_size_h, dim_h + w_cos, w_sin = freqs_w # both w_cos and w_sin has shape: grid_size_w, dim_w + + if grid_type == "slice": + t_cos, t_sin = t_cos[:temporal_size], t_sin[:temporal_size] + h_cos, h_sin = h_cos[:grid_size_h], h_sin[:grid_size_h] + w_cos, w_sin = w_cos[:grid_size_w], w_sin[:grid_size_w] + + cos = combine_time_height_width(t_cos, h_cos, w_cos) + sin = combine_time_height_width(t_sin, h_sin, w_sin) + return cos, sin \ No newline at end of file diff --git a/model_loading.py b/model_loading.py index 532d6aa..45b6c1b 100644 --- a/model_loading.py +++ b/model_loading.py @@ -182,16 +182,19 @@ class DownloadAndLoadCogVideoModel: local_dir_use_symlinks=False, ) - # transformer + #transformer if "Fun" in model: transformer = CogVideoXTransformer3DModelFun.from_pretrained(base_path, subfolder=subfolder) else: transformer = CogVideoXTransformer3DModel.from_pretrained(base_path, subfolder=subfolder) transformer = transformer.to(dtype).to(transformer_load_device) - transformer.attention_mode = attention_mode + if "1.5" in model: + transformer.config.sample_height = 300 + transformer.config.sample_width = 300 + if block_edit is not None: transformer = remove_specific_blocks(transformer, block_edit) @@ -199,7 +202,7 @@ class DownloadAndLoadCogVideoModel: scheduler_config = json.load(f) scheduler = CogVideoXDDIMScheduler.from_config(scheduler_config) - # VAE + #VAE if "Fun" in model: vae = AutoencoderKLCogVideoXFun.from_pretrained(base_path, subfolder="vae").to(dtype).to(offload_device) if "Pose" in model: @@ -393,8 +396,8 @@ class DownloadAndLoadCogVideoGGUFModel: transformer_config["use_learned_positional_embeddings"] = False transformer_config["patch_size_t"] = 2 transformer_config["patch_bias"] = False - transformer_config["sample_height"] = 96 - transformer_config["sample_width"] = 170 + transformer_config["sample_height"] = 300 + transformer_config["sample_width"] = 300 transformer = CogVideoXTransformer3DModel.from_config(transformer_config) else: transformer_config["in_channels"] = 16 diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 09e9103..472a308 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -26,9 +26,10 @@ from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor from diffusers.video_processor import VideoProcessor -from diffusers.models.embeddings import get_3d_rotary_pos_embed +#from diffusers.models.embeddings import get_3d_rotary_pos_embed from diffusers.loaders import CogVideoXLoraLoaderMixin +from .embeddings import get_3d_rotary_pos_embed from .custom_cogvideox_transformer_3d import CogVideoXTransformer3DModel from comfy.utils import ProgressBar @@ -293,21 +294,36 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) p = self.transformer.config.patch_size - p_t = self.transformer.config.patch_size_t or 1 + p_t = self.transformer.config.patch_size_t - base_size_width = self.transformer.config.sample_width // p - base_size_height = self.transformer.config.sample_height // p - base_num_frames = (num_frames + p_t - 1) // p_t - - grid_crops_coords = get_resize_crop_region_for_grid( - (grid_height, grid_width), base_size_width, base_size_height - ) - freqs_cos, freqs_sin = get_3d_rotary_pos_embed( - embed_dim=self.transformer.config.attention_head_dim, - crops_coords=grid_crops_coords, - grid_size=(grid_height, grid_width), - temporal_size=base_num_frames - ) + if p_t is None: + # CogVideoX 1.0 I2V + base_size_width = self.transformer.config.sample_width // p + base_size_height = self.transformer.config.sample_height // p + + grid_crops_coords = get_resize_crop_region_for_grid( + (grid_height, grid_width), base_size_width, base_size_height + ) + freqs_cos, freqs_sin = get_3d_rotary_pos_embed( + embed_dim=self.transformer.config.attention_head_dim, + crops_coords=grid_crops_coords, + grid_size=(grid_height, grid_width), + temporal_size=num_frames, + ) + else: + # CogVideoX 1.5 I2V + base_size_width = self.transformer.config.sample_width // p + base_size_height = self.transformer.config.sample_height // p + base_num_frames = (num_frames + p_t - 1) // p_t + + freqs_cos, freqs_sin = get_3d_rotary_pos_embed( + embed_dim=self.transformer.config.attention_head_dim, + crops_coords=None, + grid_size=(grid_height, grid_width), + temporal_size=base_num_frames, + grid_type="slice", + max_size=(base_size_height, base_size_width), + ) freqs_cos = freqs_cos.to(device=device) freqs_sin = freqs_sin.to(device=device) @@ -532,7 +548,7 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) - # 7. context schedule and temporal tiling + # 7. context schedule if context_schedule is not None: if image_cond_latents is not None: raise NotImplementedError("Context schedule not currently supported with image conditioning") @@ -544,7 +560,7 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): else: use_context_schedule = False - logger.info("Temporal tiling and context schedule disabled") + logger.info("Context schedule disabled") # 7.5. Create rotary embeds if required image_rotary_emb = ( self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) diff --git a/requirements.txt b/requirements.txt index 2b24b6a..8ab8109 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,4 +2,5 @@ huggingface_hub diffusers>=0.31.0 accelerate>=0.33.0 einops -peft \ No newline at end of file +peft +opencv-python \ No newline at end of file From bececf01897be874ec41078a80d76662545b2f3e Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 16 Nov 2024 17:32:31 +0200 Subject: [PATCH 38/49] some experimental optimizations --- custom_cogvideox_transformer_3d.py | 18 ++++-- model_loading.py | 91 +++++++++++++++++++++++++++--- nodes.py | 4 +- 3 files changed, 98 insertions(+), 15 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 12633b1..20615be 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -93,9 +93,14 @@ class CogVideoXAttnProcessor2_0: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) - query = attn.to_q(hidden_states) - key = attn.to_k(hidden_states) - value = attn.to_v(hidden_states) + if attention_mode != "fused_sdpa" or attention_mode != "fused_sageattn": + query = attn.to_q(hidden_states) + key = attn.to_k(hidden_states) + value = attn.to_v(hidden_states) + else: + qkv = attn.to_qkv(hidden_states) + split_size = qkv.shape[-1] // 3 + query, key, value = torch.split(qkv, split_size, dim=-1) inner_dim = key.shape[-1] head_dim = inner_dim // attn.heads @@ -240,13 +245,14 @@ class CogVideoXBlock(nn.Module): fastercache_start_step=15, fastercache_device="cuda:0", ) -> torch.Tensor: - + #print("hidden_states in block: ", hidden_states.shape) #1.5: torch.Size([2, 3200, 3072]) 10.: torch.Size([2, 6400, 3072]) text_seq_length = encoder_hidden_states.size(1) # norm & modulate norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1( hidden_states, encoder_hidden_states, temb ) + #print("norm_hidden_states in block: ", norm_hidden_states.shape) #torch.Size([2, 3200, 3072]) # Tora Motion-guidance Fuser if video_flow_feature is not None: @@ -587,13 +593,17 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): # 2. Patch embedding p = self.config.patch_size p_t = self.config.patch_size_t + + #print("hidden_states before patch_embedding", hidden_states.shape) #torch.Size([2, 4, 16, 60, 90]) hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) + #print("hidden_states after patch_embedding", hidden_states.shape) #1.5: torch.Size([2, 2926, 3072]) #1.0: torch.Size([2, 5626, 3072]) hidden_states = self.embedding_dropout(hidden_states) text_seq_length = encoder_hidden_states.shape[1] encoder_hidden_states = hidden_states[:, :text_seq_length] hidden_states = hidden_states[:, text_seq_length:] + #print("hidden_states after split", hidden_states.shape) #1.5: torch.Size([2, 2700, 3072]) #1.0: torch.Size([2, 5400, 3072]) if self.use_fastercache: self.fastercache_counter+=1 diff --git a/model_loading.py b/model_loading.py index 45b6c1b..387de3f 100644 --- a/model_loading.py +++ b/model_loading.py @@ -89,13 +89,13 @@ class DownloadAndLoadCogVideoModel: "precision": (["fp16", "fp32", "bf16"], {"default": "bf16", "tooltip": "official recommendation is that 2b model should be fp16, 5b model should be bf16"} ), - "fp8_transformer": (['disabled', 'enabled', 'fastmode'], {"default": 'disabled', "tooltip": "enabled casts the transformer to torch.float8_e4m3fn, fastmode is only for latest nvidia GPUs and requires torch 2.4.0 and cu124 minimum"}), + "fp8_transformer": (['disabled', 'enabled', 'fastmode', 'torchao_fp8dq', "torchao_fp6"], {"default": 'disabled', "tooltip": "enabled casts the transformer to torch.float8_e4m3fn, fastmode is only for latest nvidia GPUs and requires torch 2.4.0 and cu124 minimum"}), "compile": (["disabled","onediff","torch"], {"tooltip": "compile the model for faster inference, these are advanced options only available on Linux, see readme for more info"}), "enable_sequential_cpu_offload": ("BOOLEAN", {"default": False, "tooltip": "significantly reducing memory usage and slows down the inference"}), "block_edit": ("TRANSFORMERBLOCKS", {"default": None}), "lora": ("COGLORA", {"default": None}), "compile_args":("COMPILEARGS", ), - "attention_mode": (["sdpa", "sageattn"], {"default": "sdpa"}), + "attention_mode": (["sdpa", "sageattn", "fused_sdpa", "fused_sageattn"], {"default": "sdpa"}), "load_device": (["main_device", "offload_device"], {"default": "main_device"}), } } @@ -111,10 +111,11 @@ class DownloadAndLoadCogVideoModel: attention_mode="sdpa", load_device="main_device"): if precision == "fp16" and "1.5" in model: - raise ValueError("1.5 models do not work in fp16") + raise ValueError("1.5 models do not currently work in fp16") device = mm.get_torch_device() offload_device = mm.unet_offload_device() + manual_offloading = True transformer_load_device = device if load_device == "main_device" else offload_device mm.soft_empty_cache() @@ -189,7 +190,6 @@ class DownloadAndLoadCogVideoModel: transformer = CogVideoXTransformer3DModel.from_pretrained(base_path, subfolder=subfolder) transformer = transformer.to(dtype).to(transformer_load_device) - transformer.attention_mode = attention_mode if "1.5" in model: transformer.config.sample_height = 300 @@ -202,7 +202,7 @@ class DownloadAndLoadCogVideoModel: scheduler_config = json.load(f) scheduler = CogVideoXDDIMScheduler.from_config(scheduler_config) - #VAE + # VAE if "Fun" in model: vae = AutoencoderKLCogVideoXFun.from_pretrained(base_path, subfolder="vae").to(dtype).to(offload_device) if "Pose" in model: @@ -263,13 +263,75 @@ class DownloadAndLoadCogVideoModel: if "1.5" in model: params_to_keep.update({"ff"}) #otherwise NaNs convert_fp8_linear(pipe.transformer, dtype, params_to_keep=params_to_keep) + + elif "torchao" in fp8_transformer: + try: + from torchao.quantization import ( + quantize_, + fpx_weight_only, + float8_dynamic_activation_float8_weight + ) + except: + raise ImportError("torchao is not installed, please install torchao to use fp8dq") + + def filter_fn(module: nn.Module, fqn: str) -> bool: + target_submodules = {'attn1', 'ff'} # avoid norm layers, 1.5 at least won't work with quantized norm1 #todo: test other models + if any(sub in fqn for sub in target_submodules): + return isinstance(module, nn.Linear) + return False + + if "fp6" in fp8_transformer: #slower for some reason on 4090 + quant_func = fpx_weight_only(3, 2) + elif "fp8dq" in fp8_transformer: #very fast on 4090 when compiled + quant_func = float8_dynamic_activation_float8_weight() + + for i, block in enumerate(pipe.transformer.transformer_blocks): + if "CogVideoXBlock" in str(block): + quantize_(block, quant_func, filter_fn=filter_fn) + + manual_offloading = False # to disable manual .to(device) calls if enable_sequential_cpu_offload: pipe.enable_sequential_cpu_offload() + manual_offloading = False + + # CogVideoXBlock( + # (norm1): CogVideoXLayerNormZero( + # (silu): SiLU() + # (linear): Linear(in_features=512, out_features=18432, bias=True) + # (norm): LayerNorm((3072,), eps=1e-05, elementwise_affine=True) + # ) + # (attn1): Attention( + # (norm_q): LayerNorm((64,), eps=1e-06, elementwise_affine=True) + # (norm_k): LayerNorm((64,), eps=1e-06, elementwise_affine=True) + # (to_q): Linear(in_features=3072, out_features=3072, bias=True) + # (to_k): Linear(in_features=3072, out_features=3072, bias=True) + # (to_v): Linear(in_features=3072, out_features=3072, bias=True) + # (to_out): ModuleList( + # (0): Linear(in_features=3072, out_features=3072, bias=True) + # (1): Dropout(p=0.0, inplace=False) + # ) + # ) + # (norm2): CogVideoXLayerNormZero( + # (silu): SiLU() + # (linear): Linear(in_features=512, out_features=18432, bias=True) + # (norm): LayerNorm((3072,), eps=1e-05, elementwise_affine=True) + # ) + # (ff): FeedForward( + # (net): ModuleList( + # (0): GELU( + # (proj): Linear(in_features=3072, out_features=12288, bias=True) + # ) + # (1): Dropout(p=0.0, inplace=False) + # (2): Linear(in_features=12288, out_features=3072, bias=True) + # (3): Dropout(p=0.0, inplace=False) + # ) + # ) + # ) # compilation if compile == "torch": - pipe.transformer.to(memory_format=torch.channels_last) + #pipe.transformer.to(memory_format=torch.channels_last) if compile_args is not None: torch._dynamo.config.cache_size_limit = compile_args["dynamo_cache_size_limit"] for i, block in enumerate(pipe.transformer.transformer_blocks): @@ -279,7 +341,16 @@ class DownloadAndLoadCogVideoModel: for i, block in enumerate(pipe.transformer.transformer_blocks): if "CogVideoXBlock" in str(block): pipe.transformer.transformer_blocks[i] = torch.compile(block, fullgraph=False, dynamic=False, backend="inductor") - + + transformer.attention_mode = attention_mode + + if "fused" in attention_mode: + from diffusers.models.attention import Attention + transformer.fuse_qkv_projections = True + for module in transformer.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + elif compile == "onediff": from onediffx import compile_pipe os.environ['NEXFORT_FX_FORCE_TRITON_SDPA'] = '1' @@ -298,8 +369,9 @@ class DownloadAndLoadCogVideoModel: "base_path": base_path, "onediff": True if compile == "onediff" else False, "cpu_offloading": enable_sequential_cpu_offload, + "manual_offloading": manual_offloading, "scheduler_config": scheduler_config, - "model_name": model + "model_name": model, } return (pipeline,) @@ -515,7 +587,8 @@ class DownloadAndLoadCogVideoGGUFModel: "onediff": False, "cpu_offloading": enable_sequential_cpu_offload, "scheduler_config": scheduler_config, - "model_name": model + "model_name": model, + "manual_offloading": True, } return (pipeline,) diff --git a/nodes.py b/nodes.py index 8feba51..8d7257e 100644 --- a/nodes.py +++ b/nodes.py @@ -819,7 +819,7 @@ class CogVideoSampler: dtype = pipeline["dtype"] scheduler_config = pipeline["scheduler_config"] - if not pipeline["cpu_offloading"]: + if not pipeline["cpu_offloading"] and pipeline["manual_offloading"]: pipe.transformer.to(device) generator = torch.Generator(device=torch.device("cpu")).manual_seed(seed) @@ -890,7 +890,7 @@ class CogVideoSampler: controlnet=controlnet, tora=tora_trajectory if tora_trajectory is not None else None, ) - if not pipeline["cpu_offloading"]: + if not pipeline["cpu_offloading"] and pipeline["manual_offloading"]: pipe.transformer.to(offload_device) if fastercache is not None: From f21432bea118697cd3755ea73bdda19a38857a34 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 16 Nov 2024 19:36:13 +0200 Subject: [PATCH 39/49] tile decode by default --- nodes.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/nodes.py b/nodes.py index 8d7257e..6350e98 100644 --- a/nodes.py +++ b/nodes.py @@ -945,7 +945,7 @@ class CogVideoDecode: return {"required": { "pipeline": ("COGVIDEOPIPE",), "samples": ("LATENT", ), - "enable_vae_tiling": ("BOOLEAN", {"default": False, "tooltip": "Drastically reduces memory use but may introduce seams"}), + "enable_vae_tiling": ("BOOLEAN", {"default": True, "tooltip": "Drastically reduces memory use but may introduce seams"}), }, "optional": { "tile_sample_min_height": ("INT", {"default": 240, "min": 16, "max": 2048, "step": 8, "tooltip": "Minimum tile height, default is half the height"}), @@ -995,7 +995,14 @@ class CogVideoDecode: except: pass - frames = vae.decode(latents[:, :, additional_frames:]).sample + try: + frames = vae.decode(latents[:, :, additional_frames:]).sample + except: + mm.soft_empty_cache() + log.warning("Failed to decode, retrying with tiling") + vae.enable_tiling() + frames = vae.decode(latents[:, :, additional_frames:]).sample + vae.disable_tiling() if not pipeline["cpu_offloading"]: vae.to(offload_device) From 43742731381348c2b84fee029b69422597180784 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sat, 16 Nov 2024 22:18:12 +0200 Subject: [PATCH 40/49] possible compile fixes --- model_loading.py | 36 ++++++++++++++++++++++++++++++++++-- nodes.py | 13 +++++++++++-- utils.py | 12 ++++++++++-- 3 files changed, 55 insertions(+), 6 deletions(-) diff --git a/model_loading.py b/model_loading.py index 387de3f..d1482d3 100644 --- a/model_loading.py +++ b/model_loading.py @@ -1,9 +1,41 @@ import os -import torch -import torch.nn as nn import json import folder_paths import comfy.model_management as mm +from typing import Union + +def patched_write_atomic( + path_: str, + content: Union[str, bytes], + make_dirs: bool = False, + encode_utf_8: bool = False, +) -> None: + # Write into temporary file first to avoid conflicts between threads + # Avoid using a named temporary file, as those have restricted permissions + from pathlib import Path + import os + import shutil + import threading + assert isinstance( + content, (str, bytes) + ), "Only strings and byte arrays can be saved in the cache" + path = Path(path_) + if make_dirs: + path.parent.mkdir(parents=True, exist_ok=True) + tmp_path = path.parent / f".{os.getpid()}.{threading.get_ident()}.tmp" + write_mode = "w" if isinstance(content, str) else "wb" + with tmp_path.open(write_mode, encoding="utf-8" if encode_utf_8 else None) as f: + f.write(content) + shutil.copy2(src=tmp_path, dst=path) #changed to allow overwriting cache files + os.remove(tmp_path) +try: + import torch._inductor.codecache + torch._inductor.codecache.write_atomic = patched_write_atomic +except: + pass + +import torch +import torch.nn as nn from diffusers.models import AutoencoderKLCogVideoX from diffusers.schedulers import CogVideoXDDIMScheduler diff --git a/nodes.py b/nodes.py index 6350e98..dd9589a 100644 --- a/nodes.py +++ b/nodes.py @@ -5,7 +5,7 @@ import comfy.model_management as mm from einops import rearrange from contextlib import nullcontext -from .utils import log, check_diffusers_version +from .utils import log, check_diffusers_version, print_memory check_diffusers_version() from diffusers.schedulers import ( CogVideoXDDIMScheduler, @@ -864,6 +864,10 @@ class CogVideoSampler: # if sigmas is not None: # sigma_list = sigmas.tolist() + try: + torch.cuda.reset_peak_memory_stats(device) + except: + pass autocastcondition = not pipeline["onediff"] or not dtype == torch.float32 autocast_context = torch.autocast(mm.get_autocast_device(device), dtype=dtype) if autocastcondition else nullcontext() @@ -898,8 +902,13 @@ class CogVideoSampler: if (hasattr, block, "cached_hidden_states") and block.cached_hidden_states is not None: block.cached_hidden_states = None block.cached_encoder_hidden_states = None - + + print_memory(device) mm.soft_empty_cache() + try: + torch.cuda.reset_peak_memory_stats(device) + except: + pass return (pipeline, {"samples": latents}) diff --git a/utils.py b/utils.py index e3c6fd4..d667097 100644 --- a/utils.py +++ b/utils.py @@ -1,5 +1,5 @@ import importlib.metadata - +import torch import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') log = logging.getLogger(__name__) @@ -19,4 +19,12 @@ def remove_specific_blocks(model, block_indices_to_remove): new_blocks = [block for i, block in enumerate(transformer_blocks) if i not in block_indices_to_remove] model.transformer_blocks = nn.ModuleList(new_blocks) - return model \ No newline at end of file + return model + +def print_memory(device): + memory = torch.cuda.memory_allocated(device) / 1024**3 + max_memory = torch.cuda.max_memory_allocated(device) / 1024**3 + max_reserved = torch.cuda.max_memory_reserved(device) / 1024**3 + log.info(f"Allocated memory: {memory=:.3f} GB") + log.info(f"Max allocated memory: {max_memory=:.3f} GB") + log.info(f"Max reserved memory: {max_reserved=:.3f} GB") \ No newline at end of file From 15aa68c95ddecd304b56677687302040b2258ec7 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sun, 17 Nov 2024 00:48:01 +0200 Subject: [PATCH 41/49] update --- cogvideox_fun/pipeline_cogvideox_inpaint.py | 5 +++++ model_loading.py | 12 +++++++++--- nodes.py | 12 ++++-------- 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/cogvideox_fun/pipeline_cogvideox_inpaint.py b/cogvideox_fun/pipeline_cogvideox_inpaint.py index 7b9d8e7..a6f0e9e 100644 --- a/cogvideox_fun/pipeline_cogvideox_inpaint.py +++ b/cogvideox_fun/pipeline_cogvideox_inpaint.py @@ -739,6 +739,8 @@ class CogVideoX_Fun_Pipeline_Inpaint(DiffusionPipeline): num_channels_transformer = self.transformer.config.in_channels return_image_latents = num_channels_transformer == num_channels_latents + self.vae.to(device) + latents_outputs = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, @@ -840,6 +842,9 @@ class CogVideoX_Fun_Pipeline_Inpaint(DiffusionPipeline): mask = rearrange(mask, "b c f h w -> b f c h w") inpaint_latents = None + + self.vae.to(torch.device("cpu")) + if comfyui_progressbar: pbar.update(1) diff --git a/model_loading.py b/model_loading.py index d1482d3..e627351 100644 --- a/model_loading.py +++ b/model_loading.py @@ -121,7 +121,7 @@ class DownloadAndLoadCogVideoModel: "precision": (["fp16", "fp32", "bf16"], {"default": "bf16", "tooltip": "official recommendation is that 2b model should be fp16, 5b model should be bf16"} ), - "fp8_transformer": (['disabled', 'enabled', 'fastmode', 'torchao_fp8dq', "torchao_fp6"], {"default": 'disabled', "tooltip": "enabled casts the transformer to torch.float8_e4m3fn, fastmode is only for latest nvidia GPUs and requires torch 2.4.0 and cu124 minimum"}), + "fp8_transformer": (['disabled', 'enabled', 'fastmode', 'torchao_fp8dq', "torchao_fp8dqrow", "torchao_int8dq", "torchao_fp6"], {"default": 'disabled', "tooltip": "enabled casts the transformer to torch.float8_e4m3fn, fastmode is only for latest nvidia GPUs and requires torch 2.4.0 and cu124 minimum"}), "compile": (["disabled","onediff","torch"], {"tooltip": "compile the model for faster inference, these are advanced options only available on Linux, see readme for more info"}), "enable_sequential_cpu_offload": ("BOOLEAN", {"default": False, "tooltip": "significantly reducing memory usage and slows down the inference"}), "block_edit": ("TRANSFORMERBLOCKS", {"default": None}), @@ -301,7 +301,8 @@ class DownloadAndLoadCogVideoModel: from torchao.quantization import ( quantize_, fpx_weight_only, - float8_dynamic_activation_float8_weight + float8_dynamic_activation_float8_weight, + int8_dynamic_activation_int8_weight ) except: raise ImportError("torchao is not installed, please install torchao to use fp8dq") @@ -316,11 +317,16 @@ class DownloadAndLoadCogVideoModel: quant_func = fpx_weight_only(3, 2) elif "fp8dq" in fp8_transformer: #very fast on 4090 when compiled quant_func = float8_dynamic_activation_float8_weight() + elif 'fp8dqrow' in fp8_transformer: + from torchao.quantization.quant_api import PerRow + quant_func = float8_dynamic_activation_float8_weight(granularity=PerRow()) + elif 'int8dq' in fp8_transformer: + quant_func = int8_dynamic_activation_int8_weight() for i, block in enumerate(pipe.transformer.transformer_blocks): if "CogVideoXBlock" in str(block): quantize_(block, quant_func, filter_fn=filter_fn) - + manual_offloading = False # to disable manual .to(device) calls if enable_sequential_cpu_offload: diff --git a/nodes.py b/nodes.py index dd9589a..b18a978 100644 --- a/nodes.py +++ b/nodes.py @@ -1100,10 +1100,6 @@ class CogVideoXFunSampler: base_path = pipeline["base_path"] assert "fun" in base_path.lower(), "'Unfun' models not supported in 'CogVideoXFunSampler', use the 'CogVideoSampler'" assert "pose" not in base_path.lower(), "'Pose' models not supported in 'CogVideoXFunSampler', use the 'CogVideoXFunControlSampler'" - - - if not pipeline["cpu_offloading"]: - pipe.enable_model_cpu_offload(device=device) mm.soft_empty_cache() @@ -1123,8 +1119,8 @@ class CogVideoXFunSampler: else: raise ValueError(f"Unknown scheduler: {scheduler}") - #if not pipeline["cpu_offloading"]: - # pipe.transformer.to(device) + if not pipeline["cpu_offloading"] and pipeline["manual_offloading"]: + pipe.transformer.to(device) if context_options is not None: context_frames = context_options["context_frames"] // 4 @@ -1184,8 +1180,8 @@ class CogVideoXFunSampler: noise_aug_strength = noise_aug_strength, strength = vid2vid_denoise, ) - #if not pipeline["cpu_offloading"]: - # pipe.transformer.to(offload_device) + if not pipeline["cpu_offloading"] and pipeline["manual_offloading"]: + pipe.transformer.to(offload_device) #clear FasterCache if fastercache is not None: for block in pipe.transformer.transformer_blocks: From eebdc412f910cef06bcd43d84fe4a164d37e4e79 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sun, 17 Nov 2024 21:43:53 +0200 Subject: [PATCH 42/49] fix sageattention --- custom_cogvideox_transformer_3d.py | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 20615be..ba9f037 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -121,10 +121,7 @@ class CogVideoXAttnProcessor2_0: key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) if attention_mode == "sageattn": - if SAGEATTN_IS_AVAILABLE: - hidden_states = sageattn(query, key, value, attn_mask=attention_mask, dropout_p=0.0,is_causal=False) - else: - raise ImportError("sageattn not found") + hidden_states = sageattn(query, key, value, attn_mask=attention_mask, dropout_p=0.0,is_causal=False) else: hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False @@ -198,7 +195,6 @@ class CogVideoXBlock(nn.Module): ff_inner_dim: Optional[int] = None, ff_bias: bool = True, attention_out_bias: bool = True, - attention_mode: Optional[str] = None, ): super().__init__() @@ -230,7 +226,6 @@ class CogVideoXBlock(nn.Module): ) self.cached_hidden_states = [] self.cached_encoder_hidden_states = [] - self.attention_mode = attention_mode def forward( self, @@ -244,6 +239,7 @@ class CogVideoXBlock(nn.Module): fastercache_counter=0, fastercache_start_step=15, fastercache_device="cuda:0", + attention_mode="sdpa", ) -> torch.Tensor: #print("hidden_states in block: ", hidden_states.shape) #1.5: torch.Size([2, 3200, 3072]) 10.: torch.Size([2, 6400, 3072]) text_seq_length = encoder_hidden_states.size(1) @@ -282,7 +278,7 @@ class CogVideoXBlock(nn.Module): hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, - attention_mode=self.attention_mode, + attention_mode=attention_mode, ) if fastercache_counter == fastercache_start_step: self.cached_hidden_states = [attn_hidden_states.to(fastercache_device), attn_hidden_states.to(fastercache_device)] @@ -295,7 +291,7 @@ class CogVideoXBlock(nn.Module): hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states, image_rotary_emb=image_rotary_emb, - attention_mode=self.attention_mode, + attention_mode=attention_mode, ) hidden_states = hidden_states + gate_msa * attn_hidden_states @@ -404,7 +400,6 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): use_rotary_positional_embeddings: bool = False, use_learned_positional_embeddings: bool = False, patch_bias: bool = True, - attention_mode: Optional[str] = None, ): super().__init__() inner_dim = num_attention_heads * attention_head_dim @@ -493,7 +488,8 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): self.fastercache_hf_step = 30 self.fastercache_device = "cuda" self.fastercache_num_blocks_to_cache = len(self.transformer_blocks) - self.attention_mode = attention_mode + self.attention_mode = "sdpa" + def _set_gradient_checkpointing(self, module, value=False): self.gradient_checkpointing = value @@ -620,7 +616,8 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): block_use_fastercache = i <= self.fastercache_num_blocks_to_cache, fastercache_counter = self.fastercache_counter, fastercache_start_step = self.fastercache_start_step, - fastercache_device = self.fastercache_device + fastercache_device = self.fastercache_device, + attention_mode = self.attention_mode ) if (controlnet_states is not None) and (i < len(controlnet_states)): @@ -690,7 +687,8 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): block_use_fastercache = i <= self.fastercache_num_blocks_to_cache, fastercache_counter = self.fastercache_counter, fastercache_start_step = self.fastercache_start_step, - fastercache_device = self.fastercache_device + fastercache_device = self.fastercache_device, + attention_mode = self.attention_mode ) #has_nan = torch.isnan(hidden_states).any() #if has_nan: From e70da23ac2b4724624537e503b0cdaf93d24a74e Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sun, 17 Nov 2024 22:11:32 +0200 Subject: [PATCH 43/49] exclude sageattn from compile --- custom_cogvideox_transformer_3d.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index ba9f037..a228219 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -40,10 +40,15 @@ logger = logging.get_logger(__name__) # pylint: disable=invalid-name try: from sageattention import sageattn + SAGEATTN_IS_AVAILABLE = True except: SAGEATTN_IS_AVAILABLE = False +@torch.compiler.disable() +def sageattn_func(query, key, value, attn_mask=None, dropout_p=0.0,is_causal=False): + return sageattn(query, key, value, attn_mask=attn_mask, dropout_p=dropout_p,is_causal=is_causal) + def fft(tensor): tensor_fft = torch.fft.fft2(tensor) tensor_fft_shifted = torch.fft.fftshift(tensor_fft) @@ -121,7 +126,7 @@ class CogVideoXAttnProcessor2_0: key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) if attention_mode == "sageattn": - hidden_states = sageattn(query, key, value, attn_mask=attention_mask, dropout_p=0.0,is_causal=False) + hidden_states = sageattn_func(query, key, value, attn_mask=attention_mask, dropout_p=0.0,is_causal=False) else: hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False From 6f9e4ff6477d51ef29e2f7eea9ff2bbd6986b007 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Sun, 17 Nov 2024 22:23:40 +0200 Subject: [PATCH 44/49] Update custom_cogvideox_transformer_3d.py --- custom_cogvideox_transformer_3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index a228219..50b0f25 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -125,7 +125,7 @@ class CogVideoXAttnProcessor2_0: if not attn.is_cross_attention: key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - if attention_mode == "sageattn": + if attention_mode == "sageattn" or attention_mode == "fused_sageattn": hidden_states = sageattn_func(query, key, value, attn_mask=attention_mask, dropout_p=0.0,is_causal=False) else: hidden_states = F.scaled_dot_product_attention( From a7646c0d6ff3a147299082a5fc6050af326bde02 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 19 Nov 2024 03:04:22 +0200 Subject: [PATCH 45/49] refactor - unify all pipelines into one - unify transformer model into one - separate VAE - add single file model loading --- .gitignore | 3 +- cogvideox_fun/autoencoder_magvit.py | 1296 ------------------- cogvideox_fun/pipeline_cogvideox_control.py | 866 ------------- cogvideox_fun/pipeline_cogvideox_inpaint.py | 1037 --------------- cogvideox_fun/transformer_3d.py | 823 ------------ cogvideox_fun/utils.py | 138 +- cogvideox_fun/context.py => context.py | 0 convert_weight_sat2hf.py | 303 ----- custom_cogvideox_transformer_3d.py | 1 - model_loading.py | 432 +++++-- nodes.py | 879 +++---------- pipeline_cogvideox.py | 157 +-- pyproject.toml | 4 +- 13 files changed, 594 insertions(+), 5345 deletions(-) delete mode 100644 cogvideox_fun/autoencoder_magvit.py delete mode 100644 cogvideox_fun/pipeline_cogvideox_control.py delete mode 100644 cogvideox_fun/pipeline_cogvideox_inpaint.py delete mode 100644 cogvideox_fun/transformer_3d.py rename cogvideox_fun/context.py => context.py (100%) delete mode 100644 convert_weight_sat2hf.py diff --git a/.gitignore b/.gitignore index da24bdf..d75870d 100644 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,5 @@ master_ip logs/ *.DS_Store .idea -*.pt \ No newline at end of file +*.pt +tools/ \ No newline at end of file diff --git a/cogvideox_fun/autoencoder_magvit.py b/cogvideox_fun/autoencoder_magvit.py deleted file mode 100644 index 9c2b906..0000000 --- a/cogvideox_fun/autoencoder_magvit.py +++ /dev/null @@ -1,1296 +0,0 @@ -# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Optional, Tuple, Union - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F - -from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers.loaders.single_file_model import FromOriginalModelMixin -from diffusers.utils import logging -from diffusers.utils.accelerate_utils import apply_forward_hook -from diffusers.models.activations import get_activation -from diffusers.models.downsampling import CogVideoXDownsample3D -from diffusers.models.modeling_outputs import AutoencoderKLOutput -from diffusers.models.modeling_utils import ModelMixin -from diffusers.models.upsampling import CogVideoXUpsample3D -from diffusers.models.autoencoders.vae import DecoderOutput, DiagonalGaussianDistribution - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -class CogVideoXSafeConv3d(nn.Conv3d): - r""" - A 3D convolution layer that splits the input tensor into smaller parts to avoid OOM in CogVideoX Model. - """ - - def forward(self, input: torch.Tensor) -> torch.Tensor: - memory_count = torch.prod(torch.tensor(input.shape)).item() * 2 / 1024**3 - - # Set to 2GB, suitable for CuDNN - if memory_count > 2: - kernel_size = self.kernel_size[0] - part_num = int(memory_count / 2) + 1 - input_chunks = torch.chunk(input, part_num, dim=2) - - if kernel_size > 1: - input_chunks = [input_chunks[0]] + [ - torch.cat((input_chunks[i - 1][:, :, -kernel_size + 1 :], input_chunks[i]), dim=2) - for i in range(1, len(input_chunks)) - ] - - output_chunks = [] - for input_chunk in input_chunks: - output_chunks.append(super().forward(input_chunk)) - output = torch.cat(output_chunks, dim=2) - return output - else: - return super().forward(input) - - -class CogVideoXCausalConv3d(nn.Module): - r"""A 3D causal convolution layer that pads the input tensor to ensure causality in CogVideoX Model. - - Args: - in_channels (`int`): Number of channels in the input tensor. - out_channels (`int`): Number of output channels produced by the convolution. - kernel_size (`int` or `Tuple[int, int, int]`): Kernel size of the convolutional kernel. - stride (`int`, defaults to `1`): Stride of the convolution. - dilation (`int`, defaults to `1`): Dilation rate of the convolution. - pad_mode (`str`, defaults to `"constant"`): Padding mode. - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - kernel_size: Union[int, Tuple[int, int, int]], - stride: int = 1, - dilation: int = 1, - pad_mode: str = "constant", - ): - super().__init__() - - if isinstance(kernel_size, int): - kernel_size = (kernel_size,) * 3 - - time_kernel_size, height_kernel_size, width_kernel_size = kernel_size - - self.pad_mode = pad_mode - time_pad = dilation * (time_kernel_size - 1) + (1 - stride) - height_pad = height_kernel_size // 2 - width_pad = width_kernel_size // 2 - - self.height_pad = height_pad - self.width_pad = width_pad - self.time_pad = time_pad - self.time_causal_padding = (width_pad, width_pad, height_pad, height_pad, time_pad, 0) - - self.temporal_dim = 2 - self.time_kernel_size = time_kernel_size - - stride = (stride, 1, 1) - dilation = (dilation, 1, 1) - self.conv = CogVideoXSafeConv3d( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - dilation=dilation, - ) - - self.conv_cache = None - - def fake_context_parallel_forward(self, inputs: torch.Tensor) -> torch.Tensor: - kernel_size = self.time_kernel_size - if kernel_size > 1: - cached_inputs = ( - [self.conv_cache] if self.conv_cache is not None else [inputs[:, :, :1]] * (kernel_size - 1) - ) - inputs = torch.cat(cached_inputs + [inputs], dim=2) - return inputs - - def _clear_fake_context_parallel_cache(self): - del self.conv_cache - self.conv_cache = None - - def forward(self, inputs: torch.Tensor) -> torch.Tensor: - inputs = self.fake_context_parallel_forward(inputs) - - self._clear_fake_context_parallel_cache() - # Note: we could move these to the cpu for a lower maximum memory usage but its only a few - # hundred megabytes and so let's not do it for now - self.conv_cache = inputs[:, :, -self.time_kernel_size + 1 :].clone() - - padding_2d = (self.width_pad, self.width_pad, self.height_pad, self.height_pad) - inputs = F.pad(inputs, padding_2d, mode="constant", value=0) - - output = self.conv(inputs) - return output - - -class CogVideoXSpatialNorm3D(nn.Module): - r""" - Spatially conditioned normalization as defined in https://arxiv.org/abs/2209.09002. This implementation is specific - to 3D-video like data. - - CogVideoXSafeConv3d is used instead of nn.Conv3d to avoid OOM in CogVideoX Model. - - Args: - f_channels (`int`): - The number of channels for input to group normalization layer, and output of the spatial norm layer. - zq_channels (`int`): - The number of channels for the quantized vector as described in the paper. - groups (`int`): - Number of groups to separate the channels into for group normalization. - """ - - def __init__( - self, - f_channels: int, - zq_channels: int, - groups: int = 32, - ): - super().__init__() - self.norm_layer = nn.GroupNorm(num_channels=f_channels, num_groups=groups, eps=1e-6, affine=True) - self.conv_y = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1) - self.conv_b = CogVideoXCausalConv3d(zq_channels, f_channels, kernel_size=1, stride=1) - - def forward(self, f: torch.Tensor, zq: torch.Tensor) -> torch.Tensor: - if f.shape[2] > 1 and f.shape[2] % 2 == 1: - f_first, f_rest = f[:, :, :1], f[:, :, 1:] - f_first_size, f_rest_size = f_first.shape[-3:], f_rest.shape[-3:] - z_first, z_rest = zq[:, :, :1], zq[:, :, 1:] - z_first = F.interpolate(z_first, size=f_first_size) - z_rest = F.interpolate(z_rest, size=f_rest_size) - zq = torch.cat([z_first, z_rest], dim=2) - else: - zq = F.interpolate(zq, size=f.shape[-3:]) - - norm_f = self.norm_layer(f) - new_f = norm_f * self.conv_y(zq) + self.conv_b(zq) - return new_f - - -class CogVideoXResnetBlock3D(nn.Module): - r""" - A 3D ResNet block used in the CogVideoX model. - - Args: - in_channels (`int`): - Number of input channels. - out_channels (`int`, *optional*): - Number of output channels. If None, defaults to `in_channels`. - dropout (`float`, defaults to `0.0`): - Dropout rate. - temb_channels (`int`, defaults to `512`): - Number of time embedding channels. - groups (`int`, defaults to `32`): - Number of groups to separate the channels into for group normalization. - eps (`float`, defaults to `1e-6`): - Epsilon value for normalization layers. - non_linearity (`str`, defaults to `"swish"`): - Activation function to use. - conv_shortcut (bool, defaults to `False`): - Whether or not to use a convolution shortcut. - spatial_norm_dim (`int`, *optional*): - The dimension to use for spatial norm if it is to be used instead of group norm. - pad_mode (str, defaults to `"first"`): - Padding mode. - """ - - def __init__( - self, - in_channels: int, - out_channels: Optional[int] = None, - dropout: float = 0.0, - temb_channels: int = 512, - groups: int = 32, - eps: float = 1e-6, - non_linearity: str = "swish", - conv_shortcut: bool = False, - spatial_norm_dim: Optional[int] = None, - pad_mode: str = "first", - ): - super().__init__() - - out_channels = out_channels or in_channels - - self.in_channels = in_channels - self.out_channels = out_channels - self.nonlinearity = get_activation(non_linearity) - self.use_conv_shortcut = conv_shortcut - - if spatial_norm_dim is None: - self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=groups, eps=eps) - self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=groups, eps=eps) - else: - self.norm1 = CogVideoXSpatialNorm3D( - f_channels=in_channels, - zq_channels=spatial_norm_dim, - groups=groups, - ) - self.norm2 = CogVideoXSpatialNorm3D( - f_channels=out_channels, - zq_channels=spatial_norm_dim, - groups=groups, - ) - - self.conv1 = CogVideoXCausalConv3d( - in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode - ) - - if temb_channels > 0: - self.temb_proj = nn.Linear(in_features=temb_channels, out_features=out_channels) - - self.dropout = nn.Dropout(dropout) - self.conv2 = CogVideoXCausalConv3d( - in_channels=out_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode - ) - - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - self.conv_shortcut = CogVideoXCausalConv3d( - in_channels=in_channels, out_channels=out_channels, kernel_size=3, pad_mode=pad_mode - ) - else: - self.conv_shortcut = CogVideoXSafeConv3d( - in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0 - ) - - def forward( - self, - inputs: torch.Tensor, - temb: Optional[torch.Tensor] = None, - zq: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - hidden_states = inputs - - if zq is not None: - hidden_states = self.norm1(hidden_states, zq) - else: - hidden_states = self.norm1(hidden_states) - - hidden_states = self.nonlinearity(hidden_states) - hidden_states = self.conv1(hidden_states) - - if temb is not None: - hidden_states = hidden_states + self.temb_proj(self.nonlinearity(temb))[:, :, None, None, None] - - if zq is not None: - hidden_states = self.norm2(hidden_states, zq) - else: - hidden_states = self.norm2(hidden_states) - - hidden_states = self.nonlinearity(hidden_states) - hidden_states = self.dropout(hidden_states) - hidden_states = self.conv2(hidden_states) - - if self.in_channels != self.out_channels: - inputs = self.conv_shortcut(inputs) - - hidden_states = hidden_states + inputs - return hidden_states - - -class CogVideoXDownBlock3D(nn.Module): - r""" - A downsampling block used in the CogVideoX model. - - Args: - in_channels (`int`): - Number of input channels. - out_channels (`int`, *optional*): - Number of output channels. If None, defaults to `in_channels`. - temb_channels (`int`, defaults to `512`): - Number of time embedding channels. - num_layers (`int`, defaults to `1`): - Number of resnet layers. - dropout (`float`, defaults to `0.0`): - Dropout rate. - resnet_eps (`float`, defaults to `1e-6`): - Epsilon value for normalization layers. - resnet_act_fn (`str`, defaults to `"swish"`): - Activation function to use. - resnet_groups (`int`, defaults to `32`): - Number of groups to separate the channels into for group normalization. - add_downsample (`bool`, defaults to `True`): - Whether or not to use a downsampling layer. If not used, output dimension would be same as input dimension. - compress_time (`bool`, defaults to `False`): - Whether or not to downsample across temporal dimension. - pad_mode (str, defaults to `"first"`): - Padding mode. - """ - - _supports_gradient_checkpointing = True - - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - add_downsample: bool = True, - downsample_padding: int = 0, - compress_time: bool = False, - pad_mode: str = "first", - ): - super().__init__() - - resnets = [] - for i in range(num_layers): - in_channel = in_channels if i == 0 else out_channels - resnets.append( - CogVideoXResnetBlock3D( - in_channels=in_channel, - out_channels=out_channels, - dropout=dropout, - temb_channels=temb_channels, - groups=resnet_groups, - eps=resnet_eps, - non_linearity=resnet_act_fn, - pad_mode=pad_mode, - ) - ) - - self.resnets = nn.ModuleList(resnets) - self.downsamplers = None - - if add_downsample: - self.downsamplers = nn.ModuleList( - [ - CogVideoXDownsample3D( - out_channels, out_channels, padding=downsample_padding, compress_time=compress_time - ) - ] - ) - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.Tensor, - temb: Optional[torch.Tensor] = None, - zq: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - for resnet in self.resnets: - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def create_forward(*inputs): - return module(*inputs) - - return create_forward - - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb, zq - ) - else: - hidden_states = resnet(hidden_states, temb, zq) - - if self.downsamplers is not None: - for downsampler in self.downsamplers: - hidden_states = downsampler(hidden_states) - - return hidden_states - - -class CogVideoXMidBlock3D(nn.Module): - r""" - A middle block used in the CogVideoX model. - - Args: - in_channels (`int`): - Number of input channels. - temb_channels (`int`, defaults to `512`): - Number of time embedding channels. - dropout (`float`, defaults to `0.0`): - Dropout rate. - num_layers (`int`, defaults to `1`): - Number of resnet layers. - resnet_eps (`float`, defaults to `1e-6`): - Epsilon value for normalization layers. - resnet_act_fn (`str`, defaults to `"swish"`): - Activation function to use. - resnet_groups (`int`, defaults to `32`): - Number of groups to separate the channels into for group normalization. - spatial_norm_dim (`int`, *optional*): - The dimension to use for spatial norm if it is to be used instead of group norm. - pad_mode (str, defaults to `"first"`): - Padding mode. - """ - - _supports_gradient_checkpointing = True - - def __init__( - self, - in_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - spatial_norm_dim: Optional[int] = None, - pad_mode: str = "first", - ): - super().__init__() - - resnets = [] - for _ in range(num_layers): - resnets.append( - CogVideoXResnetBlock3D( - in_channels=in_channels, - out_channels=in_channels, - dropout=dropout, - temb_channels=temb_channels, - groups=resnet_groups, - eps=resnet_eps, - spatial_norm_dim=spatial_norm_dim, - non_linearity=resnet_act_fn, - pad_mode=pad_mode, - ) - ) - self.resnets = nn.ModuleList(resnets) - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.Tensor, - temb: Optional[torch.Tensor] = None, - zq: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - for resnet in self.resnets: - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def create_forward(*inputs): - return module(*inputs) - - return create_forward - - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb, zq - ) - else: - hidden_states = resnet(hidden_states, temb, zq) - - return hidden_states - - -class CogVideoXUpBlock3D(nn.Module): - r""" - An upsampling block used in the CogVideoX model. - - Args: - in_channels (`int`): - Number of input channels. - out_channels (`int`, *optional*): - Number of output channels. If None, defaults to `in_channels`. - temb_channels (`int`, defaults to `512`): - Number of time embedding channels. - dropout (`float`, defaults to `0.0`): - Dropout rate. - num_layers (`int`, defaults to `1`): - Number of resnet layers. - resnet_eps (`float`, defaults to `1e-6`): - Epsilon value for normalization layers. - resnet_act_fn (`str`, defaults to `"swish"`): - Activation function to use. - resnet_groups (`int`, defaults to `32`): - Number of groups to separate the channels into for group normalization. - spatial_norm_dim (`int`, defaults to `16`): - The dimension to use for spatial norm if it is to be used instead of group norm. - add_upsample (`bool`, defaults to `True`): - Whether or not to use a upsampling layer. If not used, output dimension would be same as input dimension. - compress_time (`bool`, defaults to `False`): - Whether or not to downsample across temporal dimension. - pad_mode (str, defaults to `"first"`): - Padding mode. - """ - - def __init__( - self, - in_channels: int, - out_channels: int, - temb_channels: int, - dropout: float = 0.0, - num_layers: int = 1, - resnet_eps: float = 1e-6, - resnet_act_fn: str = "swish", - resnet_groups: int = 32, - spatial_norm_dim: int = 16, - add_upsample: bool = True, - upsample_padding: int = 1, - compress_time: bool = False, - pad_mode: str = "first", - ): - super().__init__() - - resnets = [] - for i in range(num_layers): - in_channel = in_channels if i == 0 else out_channels - resnets.append( - CogVideoXResnetBlock3D( - in_channels=in_channel, - out_channels=out_channels, - dropout=dropout, - temb_channels=temb_channels, - groups=resnet_groups, - eps=resnet_eps, - non_linearity=resnet_act_fn, - spatial_norm_dim=spatial_norm_dim, - pad_mode=pad_mode, - ) - ) - - self.resnets = nn.ModuleList(resnets) - self.upsamplers = None - - if add_upsample: - self.upsamplers = nn.ModuleList( - [ - CogVideoXUpsample3D( - out_channels, out_channels, padding=upsample_padding, compress_time=compress_time - ) - ] - ) - - self.gradient_checkpointing = False - - def forward( - self, - hidden_states: torch.Tensor, - temb: Optional[torch.Tensor] = None, - zq: Optional[torch.Tensor] = None, - ) -> torch.Tensor: - r"""Forward method of the `CogVideoXUpBlock3D` class.""" - for resnet in self.resnets: - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def create_forward(*inputs): - return module(*inputs) - - return create_forward - - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(resnet), hidden_states, temb, zq - ) - else: - hidden_states = resnet(hidden_states, temb, zq) - - if self.upsamplers is not None: - for upsampler in self.upsamplers: - hidden_states = upsampler(hidden_states) - - return hidden_states - - -class CogVideoXEncoder3D(nn.Module): - r""" - The `CogVideoXEncoder3D` layer of a variational autoencoder that encodes its input into a latent representation. - - Args: - in_channels (`int`, *optional*, defaults to 3): - The number of input channels. - out_channels (`int`, *optional*, defaults to 3): - The number of output channels. - down_block_types (`Tuple[str, ...]`, *optional*, defaults to `("DownEncoderBlock2D",)`): - The types of down blocks to use. See `~diffusers.models.unet_2d_blocks.get_down_block` for available - options. - block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`): - The number of output channels for each block. - act_fn (`str`, *optional*, defaults to `"silu"`): - The activation function to use. See `~diffusers.models.activations.get_activation` for available options. - layers_per_block (`int`, *optional*, defaults to 2): - The number of layers per block. - norm_num_groups (`int`, *optional*, defaults to 32): - The number of groups for normalization. - """ - - _supports_gradient_checkpointing = True - - def __init__( - self, - in_channels: int = 3, - out_channels: int = 16, - down_block_types: Tuple[str, ...] = ( - "CogVideoXDownBlock3D", - "CogVideoXDownBlock3D", - "CogVideoXDownBlock3D", - "CogVideoXDownBlock3D", - ), - block_out_channels: Tuple[int, ...] = (128, 256, 256, 512), - layers_per_block: int = 3, - act_fn: str = "silu", - norm_eps: float = 1e-6, - norm_num_groups: int = 32, - dropout: float = 0.0, - pad_mode: str = "first", - temporal_compression_ratio: float = 4, - ): - super().__init__() - - # log2 of temporal_compress_times - temporal_compress_level = int(np.log2(temporal_compression_ratio)) - - self.conv_in = CogVideoXCausalConv3d(in_channels, block_out_channels[0], kernel_size=3, pad_mode=pad_mode) - self.down_blocks = nn.ModuleList([]) - - # down blocks - output_channel = block_out_channels[0] - for i, down_block_type in enumerate(down_block_types): - input_channel = output_channel - output_channel = block_out_channels[i] - is_final_block = i == len(block_out_channels) - 1 - compress_time = i < temporal_compress_level - - if down_block_type == "CogVideoXDownBlock3D": - down_block = CogVideoXDownBlock3D( - in_channels=input_channel, - out_channels=output_channel, - temb_channels=0, - dropout=dropout, - num_layers=layers_per_block, - resnet_eps=norm_eps, - resnet_act_fn=act_fn, - resnet_groups=norm_num_groups, - add_downsample=not is_final_block, - compress_time=compress_time, - ) - else: - raise ValueError("Invalid `down_block_type` encountered. Must be `CogVideoXDownBlock3D`") - - self.down_blocks.append(down_block) - - # mid block - self.mid_block = CogVideoXMidBlock3D( - in_channels=block_out_channels[-1], - temb_channels=0, - dropout=dropout, - num_layers=2, - resnet_eps=norm_eps, - resnet_act_fn=act_fn, - resnet_groups=norm_num_groups, - pad_mode=pad_mode, - ) - - self.norm_out = nn.GroupNorm(norm_num_groups, block_out_channels[-1], eps=1e-6) - self.conv_act = nn.SiLU() - self.conv_out = CogVideoXCausalConv3d( - block_out_channels[-1], 2 * out_channels, kernel_size=3, pad_mode=pad_mode - ) - - self.gradient_checkpointing = False - - def forward(self, sample: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: - r"""The forward method of the `CogVideoXEncoder3D` class.""" - hidden_states = self.conv_in(sample) - - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - # 1. Down - for down_block in self.down_blocks: - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(down_block), hidden_states, temb, None - ) - - # 2. Mid - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(self.mid_block), hidden_states, temb, None - ) - else: - # 1. Down - for down_block in self.down_blocks: - hidden_states = down_block(hidden_states, temb, None) - - # 2. Mid - hidden_states = self.mid_block(hidden_states, temb, None) - - # 3. Post-process - hidden_states = self.norm_out(hidden_states) - hidden_states = self.conv_act(hidden_states) - hidden_states = self.conv_out(hidden_states) - return hidden_states - - -class CogVideoXDecoder3D(nn.Module): - r""" - The `CogVideoXDecoder3D` layer of a variational autoencoder that decodes its latent representation into an output - sample. - - Args: - in_channels (`int`, *optional*, defaults to 3): - The number of input channels. - out_channels (`int`, *optional*, defaults to 3): - The number of output channels. - up_block_types (`Tuple[str, ...]`, *optional*, defaults to `("UpDecoderBlock2D",)`): - The types of up blocks to use. See `~diffusers.models.unet_2d_blocks.get_up_block` for available options. - block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`): - The number of output channels for each block. - act_fn (`str`, *optional*, defaults to `"silu"`): - The activation function to use. See `~diffusers.models.activations.get_activation` for available options. - layers_per_block (`int`, *optional*, defaults to 2): - The number of layers per block. - norm_num_groups (`int`, *optional*, defaults to 32): - The number of groups for normalization. - """ - - _supports_gradient_checkpointing = True - - def __init__( - self, - in_channels: int = 16, - out_channels: int = 3, - up_block_types: Tuple[str, ...] = ( - "CogVideoXUpBlock3D", - "CogVideoXUpBlock3D", - "CogVideoXUpBlock3D", - "CogVideoXUpBlock3D", - ), - block_out_channels: Tuple[int, ...] = (128, 256, 256, 512), - layers_per_block: int = 3, - act_fn: str = "silu", - norm_eps: float = 1e-6, - norm_num_groups: int = 32, - dropout: float = 0.0, - pad_mode: str = "first", - temporal_compression_ratio: float = 4, - ): - super().__init__() - - reversed_block_out_channels = list(reversed(block_out_channels)) - - self.conv_in = CogVideoXCausalConv3d( - in_channels, reversed_block_out_channels[0], kernel_size=3, pad_mode=pad_mode - ) - - # mid block - self.mid_block = CogVideoXMidBlock3D( - in_channels=reversed_block_out_channels[0], - temb_channels=0, - num_layers=2, - resnet_eps=norm_eps, - resnet_act_fn=act_fn, - resnet_groups=norm_num_groups, - spatial_norm_dim=in_channels, - pad_mode=pad_mode, - ) - - # up blocks - self.up_blocks = nn.ModuleList([]) - - output_channel = reversed_block_out_channels[0] - temporal_compress_level = int(np.log2(temporal_compression_ratio)) - - for i, up_block_type in enumerate(up_block_types): - prev_output_channel = output_channel - output_channel = reversed_block_out_channels[i] - is_final_block = i == len(block_out_channels) - 1 - compress_time = i < temporal_compress_level - - if up_block_type == "CogVideoXUpBlock3D": - up_block = CogVideoXUpBlock3D( - in_channels=prev_output_channel, - out_channels=output_channel, - temb_channels=0, - dropout=dropout, - num_layers=layers_per_block + 1, - resnet_eps=norm_eps, - resnet_act_fn=act_fn, - resnet_groups=norm_num_groups, - spatial_norm_dim=in_channels, - add_upsample=not is_final_block, - compress_time=compress_time, - pad_mode=pad_mode, - ) - prev_output_channel = output_channel - else: - raise ValueError("Invalid `up_block_type` encountered. Must be `CogVideoXUpBlock3D`") - - self.up_blocks.append(up_block) - - self.norm_out = CogVideoXSpatialNorm3D(reversed_block_out_channels[-1], in_channels, groups=norm_num_groups) - self.conv_act = nn.SiLU() - self.conv_out = CogVideoXCausalConv3d( - reversed_block_out_channels[-1], out_channels, kernel_size=3, pad_mode=pad_mode - ) - - self.gradient_checkpointing = False - - def forward(self, sample: torch.Tensor, temb: Optional[torch.Tensor] = None) -> torch.Tensor: - r"""The forward method of the `CogVideoXDecoder3D` class.""" - hidden_states = self.conv_in(sample) - - if self.training and self.gradient_checkpointing: - - def create_custom_forward(module): - def custom_forward(*inputs): - return module(*inputs) - - return custom_forward - - # 1. Mid - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(self.mid_block), hidden_states, temb, sample - ) - - # 2. Up - for up_block in self.up_blocks: - hidden_states = torch.utils.checkpoint.checkpoint( - create_custom_forward(up_block), hidden_states, temb, sample - ) - else: - # 1. Mid - hidden_states = self.mid_block(hidden_states, temb, sample) - - # 2. Up - for up_block in self.up_blocks: - hidden_states = up_block(hidden_states, temb, sample) - - # 3. Post-process - hidden_states = self.norm_out(hidden_states, sample) - hidden_states = self.conv_act(hidden_states) - hidden_states = self.conv_out(hidden_states) - return hidden_states - - -class AutoencoderKLCogVideoX(ModelMixin, ConfigMixin, FromOriginalModelMixin): - r""" - A VAE model with KL loss for encoding images into latents and decoding latent representations into images. Used in - [CogVideoX](https://github.com/THUDM/CogVideo). - - This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented - for all models (such as downloading or saving). - - Parameters: - in_channels (int, *optional*, defaults to 3): Number of channels in the input image. - out_channels (int, *optional*, defaults to 3): Number of channels in the output. - down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`): - Tuple of downsample block types. - up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`): - Tuple of upsample block types. - block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`): - Tuple of block output channels. - act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. - sample_size (`int`, *optional*, defaults to `32`): Sample input size. - scaling_factor (`float`, *optional*, defaults to `1.15258426`): - The component-wise standard deviation of the trained latent space computed using the first batch of the - training set. This is used to scale the latent space to have unit variance when training the diffusion - model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the - diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 - / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image - Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. - force_upcast (`bool`, *optional*, default to `True`): - If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE - can be fine-tuned / trained to a lower range without loosing too much precision in which case - `force_upcast` can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix - """ - - _supports_gradient_checkpointing = True - _no_split_modules = ["CogVideoXResnetBlock3D"] - - @register_to_config - def __init__( - self, - in_channels: int = 3, - out_channels: int = 3, - down_block_types: Tuple[str] = ( - "CogVideoXDownBlock3D", - "CogVideoXDownBlock3D", - "CogVideoXDownBlock3D", - "CogVideoXDownBlock3D", - ), - up_block_types: Tuple[str] = ( - "CogVideoXUpBlock3D", - "CogVideoXUpBlock3D", - "CogVideoXUpBlock3D", - "CogVideoXUpBlock3D", - ), - block_out_channels: Tuple[int] = (128, 256, 256, 512), - latent_channels: int = 16, - layers_per_block: int = 3, - act_fn: str = "silu", - norm_eps: float = 1e-6, - norm_num_groups: int = 32, - temporal_compression_ratio: float = 4, - sample_height: int = 480, - sample_width: int = 720, - scaling_factor: float = 1.15258426, - shift_factor: Optional[float] = None, - latents_mean: Optional[Tuple[float]] = None, - latents_std: Optional[Tuple[float]] = None, - force_upcast: float = True, - use_quant_conv: bool = False, - use_post_quant_conv: bool = False, - ): - super().__init__() - - self.encoder = CogVideoXEncoder3D( - in_channels=in_channels, - out_channels=latent_channels, - down_block_types=down_block_types, - block_out_channels=block_out_channels, - layers_per_block=layers_per_block, - act_fn=act_fn, - norm_eps=norm_eps, - norm_num_groups=norm_num_groups, - temporal_compression_ratio=temporal_compression_ratio, - ) - self.decoder = CogVideoXDecoder3D( - in_channels=latent_channels, - out_channels=out_channels, - up_block_types=up_block_types, - block_out_channels=block_out_channels, - layers_per_block=layers_per_block, - act_fn=act_fn, - norm_eps=norm_eps, - norm_num_groups=norm_num_groups, - temporal_compression_ratio=temporal_compression_ratio, - ) - self.quant_conv = CogVideoXSafeConv3d(2 * out_channels, 2 * out_channels, 1) if use_quant_conv else None - self.post_quant_conv = CogVideoXSafeConv3d(out_channels, out_channels, 1) if use_post_quant_conv else None - - self.use_slicing = False - self.use_tiling = False - - # Can be increased to decode more latent frames at once, but comes at a reasonable memory cost and it is not - # recommended because the temporal parts of the VAE, here, are tricky to understand. - # If you decode X latent frames together, the number of output frames is: - # (X + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) => X + 6 frames - # - # Example with num_latent_frames_batch_size = 2: - # - 12 latent frames: (0, 1), (2, 3), (4, 5), (6, 7), (8, 9), (10, 11) are processed together - # => (12 // 2 frame slices) * ((2 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) - # => 6 * 8 = 48 frames - # - 13 latent frames: (0, 1, 2) (special case), (3, 4), (5, 6), (7, 8), (9, 10), (11, 12) are processed together - # => (1 frame slice) * ((3 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) + - # ((13 - 3) // 2) * ((2 num_latent_frames_batch_size) + (2 conv cache) + (2 time upscale_1) + (4 time upscale_2) - (2 causal conv downscale)) - # => 1 * 9 + 5 * 8 = 49 frames - # It has been implemented this way so as to not have "magic values" in the code base that would be hard to explain. Note that - # setting it to anything other than 2 would give poor results because the VAE hasn't been trained to be adaptive with different - # number of temporal frames. - self.num_latent_frames_batch_size = 2 - - # We make the minimum height and width of sample for tiling half that of the generally supported - self.tile_sample_min_height = sample_height // 2 - self.tile_sample_min_width = sample_width // 2 - self.tile_latent_min_height = int( - self.tile_sample_min_height / (2 ** (len(self.config.block_out_channels) - 1)) - ) - self.tile_latent_min_width = int(self.tile_sample_min_width / (2 ** (len(self.config.block_out_channels) - 1))) - - # These are experimental overlap factors that were chosen based on experimentation and seem to work best for - # 720x480 (WxH) resolution. The above resolution is the strongly recommended generation resolution in CogVideoX - # and so the tiling implementation has only been tested on those specific resolutions. - self.tile_overlap_factor_height = 1 / 6 - self.tile_overlap_factor_width = 1 / 5 - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, (CogVideoXEncoder3D, CogVideoXDecoder3D)): - module.gradient_checkpointing = value - - def _clear_fake_context_parallel_cache(self): - for name, module in self.named_modules(): - if isinstance(module, CogVideoXCausalConv3d): - logger.debug(f"Clearing fake Context Parallel cache for layer: {name}") - module._clear_fake_context_parallel_cache() - - def enable_tiling( - self, - tile_sample_min_height: Optional[int] = None, - tile_sample_min_width: Optional[int] = None, - tile_overlap_factor_height: Optional[float] = None, - tile_overlap_factor_width: Optional[float] = None, - ) -> None: - r""" - Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to - compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow - processing larger images. - - Args: - tile_sample_min_height (`int`, *optional*): - The minimum height required for a sample to be separated into tiles across the height dimension. - tile_sample_min_width (`int`, *optional*): - The minimum width required for a sample to be separated into tiles across the width dimension. - tile_overlap_factor_height (`int`, *optional*): - The minimum amount of overlap between two consecutive vertical tiles. This is to ensure that there are - no tiling artifacts produced across the height dimension. Must be between 0 and 1. Setting a higher - value might cause more tiles to be processed leading to slow down of the decoding process. - tile_overlap_factor_width (`int`, *optional*): - The minimum amount of overlap between two consecutive horizontal tiles. This is to ensure that there - are no tiling artifacts produced across the width dimension. Must be between 0 and 1. Setting a higher - value might cause more tiles to be processed leading to slow down of the decoding process. - """ - self.use_tiling = True - self.tile_sample_min_height = tile_sample_min_height or self.tile_sample_min_height - self.tile_sample_min_width = tile_sample_min_width or self.tile_sample_min_width - self.tile_latent_min_height = int( - self.tile_sample_min_height / (2 ** (len(self.config.block_out_channels) - 1)) - ) - self.tile_latent_min_width = int(self.tile_sample_min_width / (2 ** (len(self.config.block_out_channels) - 1))) - self.tile_overlap_factor_height = tile_overlap_factor_height or self.tile_overlap_factor_height - self.tile_overlap_factor_width = tile_overlap_factor_width or self.tile_overlap_factor_width - - def disable_tiling(self) -> None: - r""" - Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing - decoding in one step. - """ - self.use_tiling = False - - def enable_slicing(self) -> None: - r""" - Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to - compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. - """ - self.use_slicing = True - - def disable_slicing(self) -> None: - r""" - Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing - decoding in one step. - """ - self.use_slicing = False - - @apply_forward_hook - def encode( - self, x: torch.Tensor, return_dict: bool = True - ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]: - """ - Encode a batch of images into latents. - - Args: - x (`torch.Tensor`): Input batch of images. - return_dict (`bool`, *optional*, defaults to `True`): - Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple. - - Returns: - The latent representations of the encoded images. If `return_dict` is True, a - [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. - """ - batch_size, num_channels, num_frames, height, width = x.shape - if num_frames == 1: - h = self.encoder(x) - if self.quant_conv is not None: - h = self.quant_conv(h) - posterior = DiagonalGaussianDistribution(h) - else: - frame_batch_size = 4 - h = [] - for i in range(num_frames // frame_batch_size): - remaining_frames = num_frames % frame_batch_size - start_frame = frame_batch_size * i + (0 if i == 0 else remaining_frames) - end_frame = frame_batch_size * (i + 1) + remaining_frames - z_intermediate = x[:, :, start_frame:end_frame] - z_intermediate = self.encoder(z_intermediate) - if self.quant_conv is not None: - z_intermediate = self.quant_conv(z_intermediate) - h.append(z_intermediate) - self._clear_fake_context_parallel_cache() - h = torch.cat(h, dim=2) - posterior = DiagonalGaussianDistribution(h) - if not return_dict: - return (posterior,) - return AutoencoderKLOutput(latent_dist=posterior) - - def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: - batch_size, num_channels, num_frames, height, width = z.shape - - if self.use_tiling and (width > self.tile_latent_min_width or height > self.tile_latent_min_height): - return self.tiled_decode(z, return_dict=return_dict) - - if num_frames == 1: - dec = [] - z_intermediate = z - if self.post_quant_conv is not None: - z_intermediate = self.post_quant_conv(z_intermediate) - z_intermediate = self.decoder(z_intermediate) - dec.append(z_intermediate) - else: - frame_batch_size = self.num_latent_frames_batch_size - dec = [] - for i in range(num_frames // frame_batch_size): - remaining_frames = num_frames % frame_batch_size - start_frame = frame_batch_size * i + (0 if i == 0 else remaining_frames) - end_frame = frame_batch_size * (i + 1) + remaining_frames - z_intermediate = z[:, :, start_frame:end_frame] - if self.post_quant_conv is not None: - z_intermediate = self.post_quant_conv(z_intermediate) - z_intermediate = self.decoder(z_intermediate) - dec.append(z_intermediate) - - self._clear_fake_context_parallel_cache() - dec = torch.cat(dec, dim=2) - - if not return_dict: - return (dec,) - - return DecoderOutput(sample=dec) - - @apply_forward_hook - def decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: - """ - Decode a batch of images. - - Args: - z (`torch.Tensor`): Input batch of latent vectors. - return_dict (`bool`, *optional*, defaults to `True`): - Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. - - Returns: - [`~models.vae.DecoderOutput`] or `tuple`: - If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is - returned. - """ - if self.use_slicing and z.shape[0] > 1: - decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)] - decoded = torch.cat(decoded_slices) - else: - decoded = self._decode(z).sample - - if not return_dict: - return (decoded,) - return DecoderOutput(sample=decoded) - - def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: - blend_extent = min(a.shape[3], b.shape[3], blend_extent) - for y in range(blend_extent): - b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * ( - y / blend_extent - ) - return b - - def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: - blend_extent = min(a.shape[4], b.shape[4], blend_extent) - for x in range(blend_extent): - b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * ( - x / blend_extent - ) - return b - - def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]: - r""" - Decode a batch of images using a tiled decoder. - - Args: - z (`torch.Tensor`): Input batch of latent vectors. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple. - - Returns: - [`~models.vae.DecoderOutput`] or `tuple`: - If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is - returned. - """ - # Rough memory assessment: - # - In CogVideoX-2B, there are a total of 24 CausalConv3d layers. - # - The biggest intermediate dimensions are: [1, 128, 9, 480, 720]. - # - Assume fp16 (2 bytes per value). - # Memory required: 1 * 128 * 9 * 480 * 720 * 24 * 2 / 1024**3 = 17.8 GB - # - # Memory assessment when using tiling: - # - Assume everything as above but now HxW is 240x360 by tiling in half - # Memory required: 1 * 128 * 9 * 240 * 360 * 24 * 2 / 1024**3 = 4.5 GB - - batch_size, num_channels, num_frames, height, width = z.shape - - overlap_height = int(self.tile_latent_min_height * (1 - self.tile_overlap_factor_height)) - overlap_width = int(self.tile_latent_min_width * (1 - self.tile_overlap_factor_width)) - blend_extent_height = int(self.tile_sample_min_height * self.tile_overlap_factor_height) - blend_extent_width = int(self.tile_sample_min_width * self.tile_overlap_factor_width) - row_limit_height = self.tile_sample_min_height - blend_extent_height - row_limit_width = self.tile_sample_min_width - blend_extent_width - frame_batch_size = self.num_latent_frames_batch_size - - # Split z into overlapping tiles and decode them separately. - # The tiles have an overlap to avoid seams between tiles. - rows = [] - for i in range(0, height, overlap_height): - row = [] - for j in range(0, width, overlap_width): - time = [] - for k in range(num_frames // frame_batch_size): - remaining_frames = num_frames % frame_batch_size - start_frame = frame_batch_size * k + (0 if k == 0 else remaining_frames) - end_frame = frame_batch_size * (k + 1) + remaining_frames - tile = z[ - :, - :, - start_frame:end_frame, - i : i + self.tile_latent_min_height, - j : j + self.tile_latent_min_width, - ] - if self.post_quant_conv is not None: - tile = self.post_quant_conv(tile) - tile = self.decoder(tile) - time.append(tile) - self._clear_fake_context_parallel_cache() - row.append(torch.cat(time, dim=2)) - rows.append(row) - - result_rows = [] - for i, row in enumerate(rows): - result_row = [] - for j, tile in enumerate(row): - # blend the above tile and the left tile - # to the current tile and add the current tile to the result row - if i > 0: - tile = self.blend_v(rows[i - 1][j], tile, blend_extent_height) - if j > 0: - tile = self.blend_h(row[j - 1], tile, blend_extent_width) - result_row.append(tile[:, :, :, :row_limit_height, :row_limit_width]) - result_rows.append(torch.cat(result_row, dim=4)) - - dec = torch.cat(result_rows, dim=3) - - if not return_dict: - return (dec,) - - return DecoderOutput(sample=dec) - - def forward( - self, - sample: torch.Tensor, - sample_posterior: bool = False, - return_dict: bool = True, - generator: Optional[torch.Generator] = None, - ) -> Union[torch.Tensor, torch.Tensor]: - x = sample - posterior = self.encode(x).latent_dist - if sample_posterior: - z = posterior.sample(generator=generator) - else: - z = posterior.mode() - dec = self.decode(z) - if not return_dict: - return (dec,) - return dec diff --git a/cogvideox_fun/pipeline_cogvideox_control.py b/cogvideox_fun/pipeline_cogvideox_control.py deleted file mode 100644 index f598147..0000000 --- a/cogvideox_fun/pipeline_cogvideox_control.py +++ /dev/null @@ -1,866 +0,0 @@ -# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -import math -from dataclasses import dataclass -from typing import Callable, Dict, List, Optional, Tuple, Union - -import torch -import torch.nn.functional as F -from einops import rearrange - -from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback -from diffusers.models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel -from diffusers.models.embeddings import get_3d_rotary_pos_embed -from diffusers.pipelines.pipeline_utils import DiffusionPipeline -from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler -from diffusers.utils import BaseOutput, logging, replace_example_docstring -from diffusers.utils.torch_utils import randn_tensor -from diffusers.video_processor import VideoProcessor -from diffusers.image_processor import VaeImageProcessor -from einops import rearrange - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -EXAMPLE_DOC_STRING = """ - Examples: - ```python - >>> import torch - >>> from diffusers import CogVideoX_Fun_Pipeline - >>> from diffusers.utils import export_to_video - - >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b" - >>> pipe = CogVideoX_Fun_Pipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16).to("cuda") - >>> prompt = ( - ... "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. " - ... "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other " - ... "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, " - ... "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. " - ... "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical " - ... "atmosphere of this unique musical performance." - ... ) - >>> video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0] - >>> export_to_video(video, "output.mp4", fps=8) - ``` -""" - - -# Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid -def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): - tw = tgt_width - th = tgt_height - h, w = src - r = h / w - if r > (th / tw): - resize_height = th - resize_width = int(round(th / h * w)) - else: - resize_width = tw - resize_height = int(round(tw / w * h)) - - crop_top = int(round((th - resize_height) / 2.0)) - crop_left = int(round((tw - resize_width) / 2.0)) - - return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) - - -# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps -def retrieve_timesteps( - scheduler, - num_inference_steps: Optional[int] = None, - device: Optional[Union[str, torch.device]] = None, - timesteps: Optional[List[int]] = None, - sigmas: Optional[List[float]] = None, - **kwargs, -): - """ - Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles - custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. - - Args: - scheduler (`SchedulerMixin`): - The scheduler to get timesteps from. - num_inference_steps (`int`): - The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` - must be `None`. - device (`str` or `torch.device`, *optional*): - The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. - timesteps (`List[int]`, *optional*): - Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, - `num_inference_steps` and `sigmas` must be `None`. - sigmas (`List[float]`, *optional*): - Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, - `num_inference_steps` and `timesteps` must be `None`. - - Returns: - `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the - second element is the number of inference steps. - """ - if timesteps is not None and sigmas is not None: - raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") - if timesteps is not None: - accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) - if not accepts_timesteps: - raise ValueError( - f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" - f" timestep schedules. Please check whether you are using the correct scheduler." - ) - scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) - timesteps = scheduler.timesteps - num_inference_steps = len(timesteps) - elif sigmas is not None: - accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) - if not accept_sigmas: - raise ValueError( - f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" - f" sigmas schedules. Please check whether you are using the correct scheduler." - ) - scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) - timesteps = scheduler.timesteps - num_inference_steps = len(timesteps) - else: - scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) - timesteps = scheduler.timesteps - return timesteps, num_inference_steps - - -@dataclass -class CogVideoX_Fun_PipelineOutput(BaseOutput): - r""" - Output class for CogVideo pipelines. - - Args: - video (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): - List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing - denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape - `(batch_size, num_frames, channels, height, width)`. - """ - - videos: torch.Tensor - - -class CogVideoX_Fun_Pipeline_Control(DiffusionPipeline): - r""" - Pipeline for text-to-video generation using CogVideoX. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. - transformer ([`CogVideoXTransformer3DModel`]): - A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `transformer` to denoise the encoded video latents. - """ - - _optional_components = [] - model_cpu_offload_seq = "vae->transformer->vae" - - _callback_tensor_inputs = [ - "latents", - "prompt_embeds", - "negative_prompt_embeds", - ] - - def __init__( - self, - vae: AutoencoderKLCogVideoX, - transformer: CogVideoXTransformer3DModel, - scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler], - ): - super().__init__() - - self.register_modules( - vae=vae, transformer=transformer, scheduler=scheduler - ) - self.vae_scale_factor_spatial = ( - 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 - ) - self.vae_scale_factor_temporal = ( - self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4 - ) - - self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) - - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) - self.mask_processor = VaeImageProcessor( - vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True - ) - - def prepare_latents( - self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, timesteps, denoise_strength, num_inference_steps, - latents=None, freenoise=True, context_size=None, context_overlap=None - ): - shape = ( - batch_size, - (num_frames - 1) // self.vae_scale_factor_temporal + 1, - num_channels_latents, - height // self.vae_scale_factor_spatial, - width // self.vae_scale_factor_spatial, - ) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - noise = randn_tensor(shape, generator=generator, device=torch.device("cpu"), dtype=self.vae.dtype) - if freenoise: - print("Applying FreeNoise") - # code and comments from AnimateDiff-Evolved by Kosinkadink (https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved) - video_length = num_frames // 4 - delta = context_size - context_overlap - for start_idx in range(0, video_length-context_size, delta): - # start_idx corresponds to the beginning of a context window - # goal: place shuffled in the delta region right after the end of the context window - # if space after context window is not enough to place the noise, adjust and finish - place_idx = start_idx + context_size - # if place_idx is outside the valid indexes, we are already finished - if place_idx >= video_length: - break - end_idx = place_idx - 1 - #print("video_length:", video_length, "start_idx:", start_idx, "end_idx:", end_idx, "place_idx:", place_idx, "delta:", delta) - - # if there is not enough room to copy delta amount of indexes, copy limited amount and finish - if end_idx + delta >= video_length: - final_delta = video_length - place_idx - # generate list of indexes in final delta region - list_idx = torch.tensor(list(range(start_idx,start_idx+final_delta)), device=torch.device("cpu"), dtype=torch.long) - # shuffle list - list_idx = list_idx[torch.randperm(final_delta, generator=generator)] - # apply shuffled indexes - noise[:, place_idx:place_idx + final_delta, :, :, :] = noise[:, list_idx, :, :, :] - break - # otherwise, do normal behavior - # generate list of indexes in delta region - list_idx = torch.tensor(list(range(start_idx,start_idx+delta)), device=torch.device("cpu"), dtype=torch.long) - # shuffle list - list_idx = list_idx[torch.randperm(delta, generator=generator)] - # apply shuffled indexes - #print("place_idx:", place_idx, "delta:", delta, "list_idx:", list_idx) - noise[:, place_idx:place_idx + delta, :, :, :] = noise[:, list_idx, :, :, :] - if latents is None: - latents = noise.to(device) - else: - latents = latents.to(device) - timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, denoise_strength, device) - latent_timestep = timesteps[:1] - - noise = randn_tensor(shape, generator=generator, device=device, dtype=self.vae.dtype) - frames_needed = noise.shape[1] - current_frames = latents.shape[1] - - if frames_needed > current_frames: - repeat_factor = frames_needed // current_frames - additional_frame = torch.randn((latents.size(0), repeat_factor, latents.size(2), latents.size(3), latents.size(4)), dtype=latents.dtype, device=latents.device) - latents = torch.cat((latents, additional_frame), dim=1) - elif frames_needed < current_frames: - latents = latents[:, :frames_needed, :, :, :] - - latents = self.scheduler.add_noise(latents, noise, latent_timestep) - latents = latents * self.scheduler.init_noise_sigma # scale the initial noise by the standard deviation required by the scheduler - return latents, timesteps, noise - - def prepare_control_latents( - self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance - ): - # resize the mask to latents shape as we concatenate the mask to the latents - # we do that before converting to dtype to avoid breaking in case we're using cpu_offload - # and half precision - - if mask is not None: - mask = mask.to(device=device, dtype=self.vae.dtype) - bs = 1 - new_mask = [] - for i in range(0, mask.shape[0], bs): - mask_bs = mask[i : i + bs] - mask_bs = self.vae.encode(mask_bs)[0] - mask_bs = mask_bs.mode() - new_mask.append(mask_bs) - mask = torch.cat(new_mask, dim = 0) - mask = mask * self.vae.config.scaling_factor - - if masked_image is not None: - masked_image = masked_image.to(device=device, dtype=self.vae.dtype) - bs = 1 - new_mask_pixel_values = [] - for i in range(0, masked_image.shape[0], bs): - mask_pixel_values_bs = masked_image[i : i + bs] - mask_pixel_values_bs = self.vae.encode(mask_pixel_values_bs)[0] - mask_pixel_values_bs = mask_pixel_values_bs.mode() - new_mask_pixel_values.append(mask_pixel_values_bs) - masked_image_latents = torch.cat(new_mask_pixel_values, dim = 0) - masked_image_latents = masked_image_latents * self.vae.config.scaling_factor - else: - masked_image_latents = None - - return mask, masked_image_latents - - def decode_latents(self, latents: torch.Tensor) -> torch.Tensor: - latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width] - latents = 1 / self.vae.config.scaling_factor * latents - - frames = self.vae.decode(latents).sample - frames = (frames / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 - frames = frames.cpu().float().numpy() - return frames - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs - def check_inputs( - self, - prompt, - height, - width, - negative_prompt, - callback_on_step_end_tensor_inputs, - prompt_embeds=None, - negative_prompt_embeds=None, - ): - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if callback_on_step_end_tensor_inputs is not None and not all( - k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs - ): - raise ValueError( - f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" - ) - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - def fuse_qkv_projections(self) -> None: - r"""Enables fused QKV projections.""" - self.fusing_transformer = True - self.transformer.fuse_qkv_projections() - - def unfuse_qkv_projections(self) -> None: - r"""Disable QKV projection fusion if enabled.""" - if not self.fusing_transformer: - logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.") - else: - self.transformer.unfuse_qkv_projections() - self.fusing_transformer = False - - def _prepare_rotary_positional_embeddings( - self, - height: int, - width: int, - num_frames: int, - device: torch.device, - start_frame: Optional[int] = None, - end_frame: Optional[int] = None, - context_frames: Optional[int] = None, - ) -> Tuple[torch.Tensor, torch.Tensor]: - grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) - grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) - base_size_width = 720 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) - base_size_height = 480 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) - - grid_crops_coords = get_resize_crop_region_for_grid( - (grid_height, grid_width), base_size_width, base_size_height - ) - freqs_cos, freqs_sin = get_3d_rotary_pos_embed( - embed_dim=self.transformer.config.attention_head_dim, - crops_coords=grid_crops_coords, - grid_size=(grid_height, grid_width), - temporal_size=num_frames, - use_real=True, - ) - - if start_frame is not None or context_frames is not None: - freqs_cos = freqs_cos.view(num_frames, grid_height * grid_width, -1) - freqs_sin = freqs_sin.view(num_frames, grid_height * grid_width, -1) - if context_frames is not None: - freqs_cos = freqs_cos[context_frames] - freqs_sin = freqs_sin[context_frames] - else: - freqs_cos = freqs_cos[start_frame:end_frame] - freqs_sin = freqs_sin[start_frame:end_frame] - - freqs_cos = freqs_cos.view(-1, freqs_cos.shape[-1]) - freqs_sin = freqs_sin.view(-1, freqs_sin.shape[-1]) - - freqs_cos = freqs_cos.to(device=device) - freqs_sin = freqs_sin.to(device=device) - return freqs_cos, freqs_sin - - @property - def guidance_scale(self): - return self._guidance_scale - - @property - def num_timesteps(self): - return self._num_timesteps - - @property - def interrupt(self): - return self._interrupt - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps - def get_timesteps(self, num_inference_steps, strength, device): - # get the original timestep using init_timestep - init_timestep = min(int(num_inference_steps * strength), num_inference_steps) - - t_start = max(num_inference_steps - init_timestep, 0) - timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] - - return timesteps, num_inference_steps - t_start - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt: Optional[Union[str, List[str]]] = None, - negative_prompt: Optional[Union[str, List[str]]] = None, - height: int = 480, - width: int = 720, - video: Union[torch.FloatTensor] = None, - control_video: Union[torch.FloatTensor] = None, - num_frames: int = 49, - num_inference_steps: int = 50, - timesteps: Optional[List[int]] = None, - guidance_scale: float = 6, - use_dynamic_cfg: bool = False, - denoise_strength: float = 1.0, - num_videos_per_prompt: int = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: str = "numpy", - return_dict: bool = False, - callback_on_step_end: Optional[ - Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] - ] = None, - callback_on_step_end_tensor_inputs: List[str] = ["latents"], - max_sequence_length: int = 226, - comfyui_progressbar: bool = False, - control_strength: float = 1.0, - control_start_percent: float = 0.0, - control_end_percent: float = 1.0, - scheduler_name: str = "DPM", - context_schedule: Optional[str] = None, - context_frames: Optional[int] = None, - context_stride: Optional[int] = None, - context_overlap: Optional[int] = None, - freenoise: Optional[bool] = True, - tora: Optional[dict] = None, - ) -> Union[CogVideoX_Fun_PipelineOutput, Tuple]: - """ - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. - instead. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The height in pixels of the generated image. This is set to 1024 by default for the best results. - width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The width in pixels of the generated image. This is set to 1024 by default for the best results. - num_frames (`int`, defaults to `48`): - Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will - contain 1 extra frame because CogVideoX_Fun is conditioned with (num_seconds * fps + 1) frames where - num_seconds is 6 and fps is 4. However, since videos can be saved at any fps, the only condition that - needs to be satisfied is that of divisibility mentioned above. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - timesteps (`List[int]`, *optional*): - Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument - in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is - passed will be used. Must be in descending order. - guidance_scale (`float`, *optional*, defaults to 7.0): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - num_videos_per_prompt (`int`, *optional*, defaults to 1): - The number of videos to generate per prompt. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead - of a plain tuple. - callback_on_step_end (`Callable`, *optional*): - A function that calls at the end of each denoising steps during the inference. The function is called - with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, - callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by - `callback_on_step_end_tensor_inputs`. - callback_on_step_end_tensor_inputs (`List`, *optional*): - The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list - will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the - `._callback_tensor_inputs` attribute of your pipeline class. - max_sequence_length (`int`, defaults to `226`): - Maximum sequence length in encoded prompt. Must be consistent with - `self.transformer.config.max_text_seq_length` otherwise may lead to poor results. - - Examples: - - Returns: - [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] or `tuple`: - [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] if `return_dict` is True, otherwise a - `tuple`. When returning a tuple, the first element is a list with the generated images. - """ - - # if num_frames > 49: - # raise ValueError( - # "The number of frames must be less than 49 for now due to static positional embeddings. This will be updated in the future to remove this limitation." - # ) - - if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): - callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs - - height = height or self.transformer.config.sample_size * self.vae_scale_factor_spatial - width = width or self.transformer.config.sample_size * self.vae_scale_factor_spatial - num_videos_per_prompt = 1 - - # 1. Check inputs. Raise error if not correct - self.check_inputs( - prompt, - height, - width, - negative_prompt, - callback_on_step_end_tensor_inputs, - prompt_embeds, - negative_prompt_embeds, - ) - self._guidance_scale = guidance_scale - self._interrupt = False - - # 2. Default call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - device = self._execution_device - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - if do_classifier_free_guidance: - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) - - # 4. Prepare timesteps - timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps) - self._num_timesteps = len(timesteps) - if comfyui_progressbar: - from comfy.utils import ProgressBar - pbar = ProgressBar(num_inference_steps + 2) - - # 5. Prepare latents. - latent_channels = self.vae.config.latent_channels - latents, timesteps, noise = self.prepare_latents( - batch_size * num_videos_per_prompt, - latent_channels, - num_frames, - height, - width, - self.vae.dtype, - device, - generator, - timesteps, - denoise_strength, - num_inference_steps, - latents, - context_size=context_frames, - context_overlap=context_overlap, - freenoise=freenoise, - ) - if comfyui_progressbar: - pbar.update(1) - - - control_video_latents_input = ( - torch.cat([control_video] * 2) if do_classifier_free_guidance else control_video - ) - control_latents = rearrange(control_video_latents_input, "b c f h w -> b f c h w") - - control_latents = control_latents * control_strength - - if comfyui_progressbar: - pbar.update(1) - - # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - - - # 8. Denoising loop - num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) - - if context_schedule is not None: - print(f"Context schedule enabled: {context_frames} frames, {context_stride} stride, {context_overlap} overlap") - use_context_schedule = True - from .context import get_context_scheduler - context = get_context_scheduler(context_schedule) - - else: - use_context_schedule = False - print(" context schedule disabled") - # 7. Create rotary embeds if required - image_rotary_emb = ( - self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) - if self.transformer.config.use_rotary_positional_embeddings - else None - ) - if tora is not None and do_classifier_free_guidance: - video_flow_features = tora["video_flow_features"].repeat(1, 2, 1, 1, 1).contiguous() - - if tora is not None: - for module in self.transformer.fuser_list: - for param in module.parameters(): - param.data = param.data.to(device) - - with self.progress_bar(total=num_inference_steps) as progress_bar: - # for DPM-solver++ - old_pred_original_sample = None - for i, t in enumerate(timesteps): - if self.interrupt: - continue - if use_context_schedule: - - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # Calculate the current step percentage - current_step_percentage = i / num_inference_steps - - # Determine if control_latents should be applied - apply_control = control_start_percent <= current_step_percentage <= control_end_percent - current_control_latents = control_latents if apply_control else torch.zeros_like(control_latents) - - # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - timestep = t.expand(latent_model_input.shape[0]) - - context_queue = list(context( - i, num_inference_steps, latents.shape[1], context_frames, context_stride, context_overlap, - )) - counter = torch.zeros_like(latent_model_input) - noise_pred = torch.zeros_like(latent_model_input) - - image_rotary_emb = ( - self._prepare_rotary_positional_embeddings(height, width, context_frames, device) - if self.transformer.config.use_rotary_positional_embeddings - else None - ) - - for c in context_queue: - partial_latent_model_input = latent_model_input[:, c, :, :, :] - partial_control_latents = current_control_latents[:, c, :, :, :] - - # predict noise model_output - noise_pred[:, c, :, :, :] += self.transformer( - hidden_states=partial_latent_model_input, - encoder_hidden_states=prompt_embeds, - timestep=timestep, - image_rotary_emb=image_rotary_emb, - return_dict=False, - control_latents=partial_control_latents, - )[0] - - counter[:, c, :, :, :] += 1 - noise_pred = noise_pred.float() - - noise_pred /= counter - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - if not isinstance(self.scheduler, CogVideoXDPMScheduler): - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - else: - latents, old_pred_original_sample = self.scheduler.step( - noise_pred, - old_pred_original_sample, - t, - timesteps[i - 1] if i > 0 else None, - latents, - **extra_step_kwargs, - return_dict=False, - ) - latents = latents.to(prompt_embeds.dtype) - - # call the callback, if provided - if callback_on_step_end is not None: - callback_kwargs = {} - for k in callback_on_step_end_tensor_inputs: - callback_kwargs[k] = locals()[k] - callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) - - latents = callback_outputs.pop("latents", latents) - prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) - negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) - - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if comfyui_progressbar: - pbar.update(1) - else: - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # Calculate the current step percentage - current_step_percentage = i / num_inference_steps - - # Determine if control_latents should be applied - apply_control = control_start_percent <= current_step_percentage <= control_end_percent - current_control_latents = control_latents if apply_control else torch.zeros_like(control_latents) - - # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - timestep = t.expand(latent_model_input.shape[0]) - - # predict noise model_output - noise_pred = self.transformer( - hidden_states=latent_model_input, - encoder_hidden_states=prompt_embeds, - timestep=timestep, - image_rotary_emb=image_rotary_emb, - return_dict=False, - control_latents=current_control_latents, - video_flow_features=video_flow_features if (tora is not None and tora["start_percent"] <= current_step_percentage <= tora["end_percent"]) else None, - - )[0] - noise_pred = noise_pred.float() - - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - if not isinstance(self.scheduler, CogVideoXDPMScheduler): - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - else: - latents, old_pred_original_sample = self.scheduler.step( - noise_pred, - old_pred_original_sample, - t, - timesteps[i - 1] if i > 0 else None, - latents, - **extra_step_kwargs, - return_dict=False, - ) - latents = latents.to(prompt_embeds.dtype) - - # call the callback, if provided - if callback_on_step_end is not None: - callback_kwargs = {} - for k in callback_on_step_end_tensor_inputs: - callback_kwargs[k] = locals()[k] - callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) - - latents = callback_outputs.pop("latents", latents) - prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) - negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) - - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if comfyui_progressbar: - pbar.update(1) - - # if output_type == "numpy": - # video = self.decode_latents(latents) - # elif not output_type == "latent": - # video = self.decode_latents(latents) - # video = self.video_processor.postprocess_video(video=video, output_type=output_type) - # else: - # video = latents - - # Offload all models - self.maybe_free_model_hooks() - - # if not return_dict: - # video = torch.from_numpy(video) - - return latents \ No newline at end of file diff --git a/cogvideox_fun/pipeline_cogvideox_inpaint.py b/cogvideox_fun/pipeline_cogvideox_inpaint.py deleted file mode 100644 index a6f0e9e..0000000 --- a/cogvideox_fun/pipeline_cogvideox_inpaint.py +++ /dev/null @@ -1,1037 +0,0 @@ -# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -import math -from dataclasses import dataclass -from typing import Callable, Dict, List, Optional, Tuple, Union - -import torch -import torch.nn.functional as F -from einops import rearrange - -from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback -from diffusers.models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel -from diffusers.models.embeddings import get_3d_rotary_pos_embed -from diffusers.pipelines.pipeline_utils import DiffusionPipeline -from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler -from diffusers.utils import BaseOutput, logging, replace_example_docstring -from diffusers.utils.torch_utils import randn_tensor -from diffusers.video_processor import VideoProcessor -from diffusers.image_processor import VaeImageProcessor -from einops import rearrange - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -EXAMPLE_DOC_STRING = """ - Examples: - ```python - >>> import torch - >>> from diffusers import CogVideoX_Fun_Pipeline - >>> from diffusers.utils import export_to_video - - >>> # Models: "THUDM/CogVideoX-2b" or "THUDM/CogVideoX-5b" - >>> pipe = CogVideoX_Fun_Pipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16).to("cuda") - >>> prompt = ( - ... "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. " - ... "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other " - ... "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, " - ... "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. " - ... "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical " - ... "atmosphere of this unique musical performance." - ... ) - >>> video = pipe(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0] - >>> export_to_video(video, "output.mp4", fps=8) - ``` -""" - - -# Similar to diffusers.pipelines.hunyuandit.pipeline_hunyuandit.get_resize_crop_region_for_grid -def get_resize_crop_region_for_grid(src, tgt_width, tgt_height): - tw = tgt_width - th = tgt_height - h, w = src - r = h / w - if r > (th / tw): - resize_height = th - resize_width = int(round(th / h * w)) - else: - resize_width = tw - resize_height = int(round(tw / w * h)) - - crop_top = int(round((th - resize_height) / 2.0)) - crop_left = int(round((tw - resize_width) / 2.0)) - - return (crop_top, crop_left), (crop_top + resize_height, crop_left + resize_width) - - -# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps -def retrieve_timesteps( - scheduler, - num_inference_steps: Optional[int] = None, - device: Optional[Union[str, torch.device]] = None, - timesteps: Optional[List[int]] = None, - sigmas: Optional[List[float]] = None, - **kwargs, -): - """ - Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles - custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. - - Args: - scheduler (`SchedulerMixin`): - The scheduler to get timesteps from. - num_inference_steps (`int`): - The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` - must be `None`. - device (`str` or `torch.device`, *optional*): - The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. - timesteps (`List[int]`, *optional*): - Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, - `num_inference_steps` and `sigmas` must be `None`. - sigmas (`List[float]`, *optional*): - Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, - `num_inference_steps` and `timesteps` must be `None`. - - Returns: - `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the - second element is the number of inference steps. - """ - if timesteps is not None and sigmas is not None: - raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") - if timesteps is not None: - accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) - if not accepts_timesteps: - raise ValueError( - f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" - f" timestep schedules. Please check whether you are using the correct scheduler." - ) - scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) - timesteps = scheduler.timesteps - num_inference_steps = len(timesteps) - elif sigmas is not None: - accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) - if not accept_sigmas: - raise ValueError( - f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" - f" sigmas schedules. Please check whether you are using the correct scheduler." - ) - scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) - timesteps = scheduler.timesteps - num_inference_steps = len(timesteps) - else: - scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) - timesteps = scheduler.timesteps - return timesteps, num_inference_steps - - -def resize_mask(mask, latent, process_first_frame_only=True): - latent_size = latent.size() - batch_size, channels, num_frames, height, width = mask.shape - - if process_first_frame_only: - target_size = list(latent_size[2:]) - target_size[0] = 1 - first_frame_resized = F.interpolate( - mask[:, :, 0:1, :, :], - size=target_size, - mode='trilinear', - align_corners=False - ) - - target_size = list(latent_size[2:]) - target_size[0] = target_size[0] - 1 - if target_size[0] != 0: - remaining_frames_resized = F.interpolate( - mask[:, :, 1:, :, :], - size=target_size, - mode='trilinear', - align_corners=False - ) - resized_mask = torch.cat([first_frame_resized, remaining_frames_resized], dim=2) - else: - resized_mask = first_frame_resized - else: - target_size = list(latent_size[2:]) - resized_mask = F.interpolate( - mask, - size=target_size, - mode='trilinear', - align_corners=False - ) - return resized_mask - -def add_noise_to_reference_video(image, ratio=None): - if ratio is None: - sigma = torch.normal(mean=-3.0, std=0.5, size=(image.shape[0],)).to(image.device) - sigma = torch.exp(sigma).to(image.dtype) - else: - sigma = torch.ones((image.shape[0],)).to(image.device, image.dtype) * ratio - - image_noise = torch.randn_like(image) * sigma[:, None, None, None, None] - image_noise = torch.where(image==-1, torch.zeros_like(image), image_noise) - image = image + image_noise - return image - -@dataclass -class CogVideoX_Fun_PipelineOutput(BaseOutput): - r""" - Output class for CogVideo pipelines. - - Args: - video (`torch.Tensor`, `np.ndarray`, or List[List[PIL.Image.Image]]): - List of video outputs - It can be a nested list of length `batch_size,` with each sub-list containing - denoised PIL image sequences of length `num_frames.` It can also be a NumPy array or Torch tensor of shape - `(batch_size, num_frames, channels, height, width)`. - """ - - videos: torch.Tensor - - -class CogVideoX_Fun_Pipeline_Inpaint(DiffusionPipeline): - r""" - Pipeline for text-to-video generation using CogVideoX. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. - transformer ([`CogVideoXTransformer3DModel`]): - A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `transformer` to denoise the encoded video latents. - """ - - _optional_components = [] - model_cpu_offload_seq = "vae->transformer->vae" - - _callback_tensor_inputs = [ - "latents", - "prompt_embeds", - "negative_prompt_embeds", - ] - - def __init__( - self, - vae: AutoencoderKLCogVideoX, - transformer: CogVideoXTransformer3DModel, - scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler], - ): - super().__init__() - - self.register_modules( - vae=vae, transformer=transformer, scheduler=scheduler - ) - self.vae_scale_factor_spatial = ( - 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 - ) - self.vae_scale_factor_temporal = ( - self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4 - ) - - self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) - - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) - self.mask_processor = VaeImageProcessor( - vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_binarize=True, do_convert_grayscale=True - ) - - def prepare_latents( - self, - batch_size, - num_channels_latents, - height, - width, - video_length, - dtype, - device, - generator, - latents=None, - video=None, - timestep=None, - is_strength_max=True, - return_noise=False, - return_video_latents=False, - context_size=None, - context_overlap=None, - freenoise=False, - ): - shape = ( - batch_size, - (video_length - 1) // self.vae_scale_factor_temporal + 1, - num_channels_latents, - height // self.vae_scale_factor_spatial, - width // self.vae_scale_factor_spatial, - ) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if return_video_latents or (latents is None and not is_strength_max): - video = video.to(device=device, dtype=self.vae.dtype) - - bs = 1 - new_video = [] - for i in range(0, video.shape[0], bs): - video_bs = video[i : i + bs] - video_bs = self.vae.encode(video_bs)[0] - video_bs = video_bs.sample() - new_video.append(video_bs) - video = torch.cat(new_video, dim = 0) - video = video * self.vae.config.scaling_factor - - video_latents = video.repeat(batch_size // video.shape[0], 1, 1, 1, 1) - video_latents = video_latents.to(device=device, dtype=dtype) - video_latents = rearrange(video_latents, "b c f h w -> b f c h w") - - if latents is None: - noise = randn_tensor(shape, generator=generator, device=torch.device("cpu"), dtype=dtype) - if freenoise: - print("Applying FreeNoise") - # code and comments from AnimateDiff-Evolved by Kosinkadink (https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved) - video_length = video_length // 4 - delta = context_size - context_overlap - for start_idx in range(0, video_length-context_size, delta): - # start_idx corresponds to the beginning of a context window - # goal: place shuffled in the delta region right after the end of the context window - # if space after context window is not enough to place the noise, adjust and finish - place_idx = start_idx + context_size - # if place_idx is outside the valid indexes, we are already finished - if place_idx >= video_length: - break - end_idx = place_idx - 1 - #print("video_length:", video_length, "start_idx:", start_idx, "end_idx:", end_idx, "place_idx:", place_idx, "delta:", delta) - - # if there is not enough room to copy delta amount of indexes, copy limited amount and finish - if end_idx + delta >= video_length: - final_delta = video_length - place_idx - # generate list of indexes in final delta region - list_idx = torch.tensor(list(range(start_idx,start_idx+final_delta)), device=torch.device("cpu"), dtype=torch.long) - # shuffle list - list_idx = list_idx[torch.randperm(final_delta, generator=generator)] - # apply shuffled indexes - noise[:, place_idx:place_idx + final_delta, :, :, :] = noise[:, list_idx, :, :, :] - break - # otherwise, do normal behavior - # generate list of indexes in delta region - list_idx = torch.tensor(list(range(start_idx,start_idx+delta)), device=torch.device("cpu"), dtype=torch.long) - # shuffle list - list_idx = list_idx[torch.randperm(delta, generator=generator)] - # apply shuffled indexes - #print("place_idx:", place_idx, "delta:", delta, "list_idx:", list_idx) - noise[:, place_idx:place_idx + delta, :, :, :] = noise[:, list_idx, :, :, :] - - # if strength is 1. then initialise the latents to noise, else initial to image + noise - latents = noise if is_strength_max else self.scheduler.add_noise(video_latents.to(noise), noise, timestep) - # if pure noise then scale the initial latents by the Scheduler's init sigma - latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents - latents = latents.to(device) - else: - noise = latents.to(device) - latents = noise * self.scheduler.init_noise_sigma - - # scale the initial noise by the standard deviation required by the scheduler - outputs = (latents,) - - if return_noise: - outputs += (noise,) - - if return_video_latents: - outputs += (video_latents,) - - return outputs - - def prepare_mask_latents( - self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance, noise_aug_strength - ): - # resize the mask to latents shape as we concatenate the mask to the latents - # we do that before converting to dtype to avoid breaking in case we're using cpu_offload - # and half precision - - if mask is not None: - mask = mask.to(device=device, dtype=self.vae.dtype) - bs = 1 - new_mask = [] - for i in range(0, mask.shape[0], bs): - mask_bs = mask[i : i + bs] - mask_bs = self.vae.encode(mask_bs)[0] - mask_bs = mask_bs.mode() - new_mask.append(mask_bs) - mask = torch.cat(new_mask, dim = 0) - mask = mask * self.vae.config.scaling_factor - - if masked_image is not None: - if self.transformer.config.add_noise_in_inpaint_model: - masked_image = add_noise_to_reference_video(masked_image, ratio=noise_aug_strength) - masked_image = masked_image.to(device=device, dtype=self.vae.dtype) - bs = 1 - new_mask_pixel_values = [] - for i in range(0, masked_image.shape[0], bs): - mask_pixel_values_bs = masked_image[i : i + bs] - mask_pixel_values_bs = self.vae.encode(mask_pixel_values_bs)[0] - mask_pixel_values_bs = mask_pixel_values_bs.mode() - new_mask_pixel_values.append(mask_pixel_values_bs) - masked_image_latents = torch.cat(new_mask_pixel_values, dim = 0) - masked_image_latents = masked_image_latents * self.vae.config.scaling_factor - else: - masked_image_latents = None - - return mask, masked_image_latents - - def decode_latents(self, latents: torch.Tensor) -> torch.Tensor: - latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width] - latents = 1 / self.vae.config.scaling_factor * latents - - frames = self.vae.decode(latents).sample - frames = (frames / 2 + 0.5).clamp(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 - frames = frames.cpu().float().numpy() - return frames - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - # Copied from diffusers.pipelines.latte.pipeline_latte.LattePipeline.check_inputs - def check_inputs( - self, - prompt, - height, - width, - negative_prompt, - callback_on_step_end_tensor_inputs, - prompt_embeds=None, - negative_prompt_embeds=None, - ): - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if callback_on_step_end_tensor_inputs is not None and not all( - k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs - ): - raise ValueError( - f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" - ) - if prompt is not None and prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" - " only forward one of the two." - ) - elif prompt is None and prompt_embeds is None: - raise ValueError( - "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." - ) - elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): - raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") - - if prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if negative_prompt is not None and negative_prompt_embeds is not None: - raise ValueError( - f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" - f" {negative_prompt_embeds}. Please make sure to only forward one of the two." - ) - - if prompt_embeds is not None and negative_prompt_embeds is not None: - if prompt_embeds.shape != negative_prompt_embeds.shape: - raise ValueError( - "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" - f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" - f" {negative_prompt_embeds.shape}." - ) - - def fuse_qkv_projections(self) -> None: - r"""Enables fused QKV projections.""" - self.fusing_transformer = True - self.transformer.fuse_qkv_projections() - - def unfuse_qkv_projections(self) -> None: - r"""Disable QKV projection fusion if enabled.""" - if not self.fusing_transformer: - logger.warning("The Transformer was not initially fused for QKV projections. Doing nothing.") - else: - self.transformer.unfuse_qkv_projections() - self.fusing_transformer = False - - def _prepare_rotary_positional_embeddings( - self, - height: int, - width: int, - num_frames: int, - device: torch.device, - start_frame: Optional[int] = None, - end_frame: Optional[int] = None, - context_frames: Optional[int] = None, - ) -> Tuple[torch.Tensor, torch.Tensor]: - grid_height = height // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) - grid_width = width // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) - base_size_width = 720 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) - base_size_height = 480 // (self.vae_scale_factor_spatial * self.transformer.config.patch_size) - - grid_crops_coords = get_resize_crop_region_for_grid( - (grid_height, grid_width), base_size_width, base_size_height - ) - freqs_cos, freqs_sin = get_3d_rotary_pos_embed( - embed_dim=self.transformer.config.attention_head_dim, - crops_coords=grid_crops_coords, - grid_size=(grid_height, grid_width), - temporal_size=num_frames, - use_real=True, - ) - - if start_frame is not None or context_frames is not None: - freqs_cos = freqs_cos.view(num_frames, grid_height * grid_width, -1) - freqs_sin = freqs_sin.view(num_frames, grid_height * grid_width, -1) - if context_frames is not None: - freqs_cos = freqs_cos[context_frames] - freqs_sin = freqs_sin[context_frames] - else: - freqs_cos = freqs_cos[start_frame:end_frame] - freqs_sin = freqs_sin[start_frame:end_frame] - - freqs_cos = freqs_cos.view(-1, freqs_cos.shape[-1]) - freqs_sin = freqs_sin.view(-1, freqs_sin.shape[-1]) - - freqs_cos = freqs_cos.to(device=device) - freqs_sin = freqs_sin.to(device=device) - return freqs_cos, freqs_sin - - @property - def guidance_scale(self): - return self._guidance_scale - - @property - def num_timesteps(self): - return self._num_timesteps - - @property - def interrupt(self): - return self._interrupt - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps - def get_timesteps(self, num_inference_steps, strength, device): - # get the original timestep using init_timestep - init_timestep = min(int(num_inference_steps * strength), num_inference_steps) - - t_start = max(num_inference_steps - init_timestep, 0) - timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] - - return timesteps, num_inference_steps - t_start - - @torch.no_grad() - @replace_example_docstring(EXAMPLE_DOC_STRING) - def __call__( - self, - prompt: Optional[Union[str, List[str]]] = None, - negative_prompt: Optional[Union[str, List[str]]] = None, - height: int = 480, - width: int = 720, - video: Union[torch.FloatTensor] = None, - mask_video: Union[torch.FloatTensor] = None, - masked_video_latents: Union[torch.FloatTensor] = None, - num_frames: int = 49, - num_inference_steps: int = 50, - timesteps: Optional[List[int]] = None, - guidance_scale: float = 6, - use_dynamic_cfg: bool = False, - num_videos_per_prompt: int = 1, - eta: float = 0.0, - generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, - latents: Optional[torch.FloatTensor] = None, - prompt_embeds: Optional[torch.FloatTensor] = None, - negative_prompt_embeds: Optional[torch.FloatTensor] = None, - output_type: str = "numpy", - return_dict: bool = False, - callback_on_step_end: Optional[ - Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] - ] = None, - callback_on_step_end_tensor_inputs: List[str] = ["latents"], - max_sequence_length: int = 226, - strength: float = 1, - noise_aug_strength: float = 0.0563, - comfyui_progressbar: bool = False, - context_schedule: Optional[str] = None, - context_frames: Optional[int] = None, - context_stride: Optional[int] = None, - context_overlap: Optional[int] = None, - freenoise: Optional[bool] = True, - tora: Optional[dict] = None, - ) -> Union[CogVideoX_Fun_PipelineOutput, Tuple]: - """ - Function invoked when calling the pipeline for generation. - - Args: - prompt (`str` or `List[str]`, *optional*): - The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. - instead. - negative_prompt (`str` or `List[str]`, *optional*): - The prompt or prompts not to guide the image generation. If not defined, one has to pass - `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is - less than `1`). - height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The height in pixels of the generated image. This is set to 1024 by default for the best results. - width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The width in pixels of the generated image. This is set to 1024 by default for the best results. - num_frames (`int`, defaults to `48`): - Number of frames to generate. Must be divisible by self.vae_scale_factor_temporal. Generated video will - contain 1 extra frame because CogVideoX_Fun is conditioned with (num_seconds * fps + 1) frames where - num_seconds is 6 and fps is 4. However, since videos can be saved at any fps, the only condition that - needs to be satisfied is that of divisibility mentioned above. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - timesteps (`List[int]`, *optional*): - Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument - in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is - passed will be used. Must be in descending order. - guidance_scale (`float`, *optional*, defaults to 7.0): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - num_videos_per_prompt (`int`, *optional*, defaults to 1): - The number of videos to generate per prompt. - generator (`torch.Generator` or `List[torch.Generator]`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - latents (`torch.FloatTensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not - provided, text embeddings will be generated from `prompt` input argument. - negative_prompt_embeds (`torch.FloatTensor`, *optional*): - Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt - weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input - argument. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead - of a plain tuple. - callback_on_step_end (`Callable`, *optional*): - A function that calls at the end of each denoising steps during the inference. The function is called - with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, - callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by - `callback_on_step_end_tensor_inputs`. - callback_on_step_end_tensor_inputs (`List`, *optional*): - The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list - will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the - `._callback_tensor_inputs` attribute of your pipeline class. - max_sequence_length (`int`, defaults to `226`): - Maximum sequence length in encoded prompt. Must be consistent with - `self.transformer.config.max_text_seq_length` otherwise may lead to poor results. - - Examples: - - Returns: - [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] or `tuple`: - [`~pipelines.cogvideo.pipeline_cogvideox.CogVideoX_Fun_PipelineOutput`] if `return_dict` is True, otherwise a - `tuple`. When returning a tuple, the first element is a list with the generated images. - """ - - # if num_frames > 49: - # raise ValueError( - # "The number of frames must be less than 49 for now due to static positional embeddings. This will be updated in the future to remove this limitation." - # ) - - if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): - callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs - - height = height or self.transformer.config.sample_size * self.vae_scale_factor_spatial - width = width or self.transformer.config.sample_size * self.vae_scale_factor_spatial - num_videos_per_prompt = 1 - - # 1. Check inputs. Raise error if not correct - self.check_inputs( - prompt, - height, - width, - negative_prompt, - callback_on_step_end_tensor_inputs, - prompt_embeds, - negative_prompt_embeds, - ) - self._guidance_scale = guidance_scale - self._interrupt = False - - # 2. Default call parameters - if prompt is not None and isinstance(prompt, str): - batch_size = 1 - elif prompt is not None and isinstance(prompt, list): - batch_size = len(prompt) - else: - batch_size = prompt_embeds.shape[0] - - device = self._execution_device - - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - if do_classifier_free_guidance: - prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) - - # 4. set timesteps - self.scheduler.set_timesteps(num_inference_steps, device=device) - timesteps, num_inference_steps = self.get_timesteps( - num_inference_steps=num_inference_steps, strength=strength, device=device - ) - self._num_timesteps = len(timesteps) - if comfyui_progressbar: - from comfy.utils import ProgressBar - pbar = ProgressBar(num_inference_steps + 2) - # at which timestep to set the initial noise (n.b. 50% if strength is 0.5) - latent_timestep = timesteps[:1].repeat(batch_size * num_videos_per_prompt) - # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise - is_strength_max = strength == 1.0 - - # 5. Prepare latents. - if video is not None: - video_length = video.shape[2] - init_video = self.image_processor.preprocess(rearrange(video, "b c f h w -> (b f) c h w"), height=height, width=width) - init_video = init_video.to(dtype=torch.float32) - init_video = rearrange(init_video, "(b f) c h w -> b c f h w", f=video_length) - else: - init_video = None - - num_channels_latents = self.vae.config.latent_channels - num_channels_transformer = self.transformer.config.in_channels - return_image_latents = num_channels_transformer == num_channels_latents - - self.vae.to(device) - - latents_outputs = self.prepare_latents( - batch_size * num_videos_per_prompt, - num_channels_latents, - height, - width, - video_length, - self.vae.dtype, - device, - generator, - latents, - video=init_video, - timestep=latent_timestep, - is_strength_max=is_strength_max, - return_noise=True, - return_video_latents=return_image_latents, - context_size=context_frames, - context_overlap=context_overlap, - freenoise=freenoise, - ) - if return_image_latents: - latents, noise, image_latents = latents_outputs - else: - latents, noise = latents_outputs - if comfyui_progressbar: - pbar.update(1) - - if mask_video is not None: - if (mask_video == 255).all(): - mask_latents = torch.zeros_like(latents)[:, :, :1].to(latents.device, latents.dtype) - masked_video_latents = torch.zeros_like(latents).to(latents.device, latents.dtype) - - mask_input = torch.cat([mask_latents] * 2) if do_classifier_free_guidance else mask_latents - masked_video_latents_input = ( - torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents - ) - inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=2).to(latents.dtype) - else: - # Prepare mask latent variables - video_length = video.shape[2] - mask_condition = self.mask_processor.preprocess(rearrange(mask_video, "b c f h w -> (b f) c h w"), height=height, width=width) - mask_condition = mask_condition.to(dtype=torch.float32) - mask_condition = rearrange(mask_condition, "(b f) c h w -> b c f h w", f=video_length) - - if num_channels_transformer != num_channels_latents: - mask_condition_tile = torch.tile(mask_condition, [1, 3, 1, 1, 1]) - if masked_video_latents is None: - masked_video = init_video * (mask_condition_tile < 0.5) + torch.ones_like(init_video) * (mask_condition_tile > 0.5) * -1 - else: - masked_video = masked_video_latents - - _, masked_video_latents = self.prepare_mask_latents( - None, - masked_video, - batch_size, - height, - width, - self.vae.dtype, - device, - generator, - do_classifier_free_guidance, - noise_aug_strength=noise_aug_strength, - ) - mask_latents = resize_mask(1 - mask_condition, masked_video_latents) - mask_latents = mask_latents.to(masked_video_latents.device) * self.vae.config.scaling_factor - - mask = torch.tile(mask_condition, [1, num_channels_latents, 1, 1, 1]) - mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype) - - mask_input = torch.cat([mask_latents] * 2) if do_classifier_free_guidance else mask_latents - masked_video_latents_input = ( - torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents - ) - - mask = rearrange(mask, "b c f h w -> b f c h w") - mask_input = rearrange(mask_input, "b c f h w -> b f c h w") - masked_video_latents_input = rearrange(masked_video_latents_input, "b c f h w -> b f c h w") - - inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=2).to(latents.dtype) - else: - mask = torch.tile(mask_condition, [1, num_channels_latents, 1, 1, 1]) - mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype) - mask = rearrange(mask, "b c f h w -> b f c h w") - - inpaint_latents = None - else: - if num_channels_transformer != num_channels_latents: - mask = torch.zeros_like(latents).to(latents.device, latents.dtype) - masked_video_latents = torch.zeros_like(latents).to(latents.device, latents.dtype) - - mask_input = torch.cat([mask] * 2) if do_classifier_free_guidance else mask - masked_video_latents_input = ( - torch.cat([masked_video_latents] * 2) if do_classifier_free_guidance else masked_video_latents - ) - inpaint_latents = torch.cat([mask_input, masked_video_latents_input], dim=1).to(latents.dtype) - else: - mask = torch.zeros_like(init_video[:, :1]) - mask = torch.tile(mask, [1, num_channels_latents, 1, 1, 1]) - mask = F.interpolate(mask, size=latents.size()[-3:], mode='trilinear', align_corners=True).to(latents.device, latents.dtype) - mask = rearrange(mask, "b c f h w -> b f c h w") - - inpaint_latents = None - - self.vae.to(torch.device("cpu")) - - if comfyui_progressbar: - pbar.update(1) - - # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 7. Create rotary embeds if required - if context_schedule is not None: - print(f"Context schedule enabled: {context_frames} frames, {context_stride} stride, {context_overlap} overlap") - use_context_schedule = True - from .context import get_context_scheduler - context = get_context_scheduler(context_schedule) - else: - use_context_schedule = False - print("context schedule disabled") - # 7. Create rotary embeds if required - image_rotary_emb = ( - self._prepare_rotary_positional_embeddings(height, width, latents.size(1), device) - if self.transformer.config.use_rotary_positional_embeddings - else None - ) - if tora is not None and do_classifier_free_guidance: - video_flow_features = tora["video_flow_features"].repeat(1, 2, 1, 1, 1).contiguous() - - if tora is not None: - trajectory_length = tora["video_flow_features"].shape[1] - logger.info(f"Tora trajectory length: {trajectory_length}") - logger.info(f"Tora trajectory shape: {tora['video_flow_features'].shape}") - logger.info(f"latents shape: {latents.shape}") - if trajectory_length != latents.shape[1]: - raise ValueError(f"Tora trajectory length {trajectory_length} does not match latent count {latents.shape[2]}") - for module in self.transformer.fuser_list: - for param in module.parameters(): - param.data = param.data.to(device) - - # 8. Denoising loop - num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) - - from ..latent_preview import prepare_callback - callback = prepare_callback(self.transformer, num_inference_steps) - - with self.progress_bar(total=num_inference_steps) as progress_bar: - # for DPM-solver++ - old_pred_original_sample = None - for i, t in enumerate(timesteps): - if self.interrupt: - continue - - if use_context_schedule: - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - timestep = t.expand(latent_model_input.shape[0]) - - context_queue = list(context( - i, num_inference_steps, latents.shape[1], context_frames, context_stride, context_overlap, - )) - counter = torch.zeros_like(latent_model_input) - noise_pred = torch.zeros_like(latent_model_input) - - current_step_percentage = i / num_inference_steps - - image_rotary_emb = ( - self._prepare_rotary_positional_embeddings(height, width, context_frames, device) - if self.transformer.config.use_rotary_positional_embeddings - else None - ) - - for c in context_queue: - partial_latent_model_input = latent_model_input[:, c, :, :, :] - partial_inpaint_latents = inpaint_latents[:, c, :, :, :] - partial_inpaint_latents[:, 0, :, :, :] = inpaint_latents[:, 0, :, :, :] - if (tora is not None and tora["start_percent"] <= current_step_percentage <= tora["end_percent"]): - if do_classifier_free_guidance: - partial_video_flow_features = tora["video_flow_features"][:, c, :, :, :].repeat(1, 2, 1, 1, 1).contiguous() - else: - partial_video_flow_features = tora["video_flow_features"][:, c, :, :, :] - else: - partial_video_flow_features = None - - # predict noise model_output - noise_pred[:, c, :, :, :] += self.transformer( - hidden_states=partial_latent_model_input, - encoder_hidden_states=prompt_embeds, - timestep=timestep, - image_rotary_emb=image_rotary_emb, - return_dict=False, - inpaint_latents=partial_inpaint_latents, - video_flow_features=partial_video_flow_features - )[0] - - counter[:, c, :, :, :] += 1 - - noise_pred = noise_pred.float() - - noise_pred /= counter - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - if not isinstance(self.scheduler, CogVideoXDPMScheduler): - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - else: - latents, old_pred_original_sample = self.scheduler.step( - noise_pred, - old_pred_original_sample, - t, - timesteps[i - 1] if i > 0 else None, - latents, - **extra_step_kwargs, - return_dict=False, - ) - latents = latents.to(prompt_embeds.dtype) - - # call the callback, if provided - if callback_on_step_end is not None: - callback_kwargs = {} - for k in callback_on_step_end_tensor_inputs: - callback_kwargs[k] = locals()[k] - callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) - - latents = callback_outputs.pop("latents", latents) - prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) - negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) - - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if comfyui_progressbar: - pbar.update(1) - - else: - latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - - # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - timestep = t.expand(latent_model_input.shape[0]) - - current_step_percentage = i / num_inference_steps - - # predict noise model_output - noise_pred = self.transformer( - hidden_states=latent_model_input, - encoder_hidden_states=prompt_embeds, - timestep=timestep, - image_rotary_emb=image_rotary_emb, - return_dict=False, - inpaint_latents=inpaint_latents, - video_flow_features=video_flow_features if (tora is not None and tora["start_percent"] <= current_step_percentage <= tora["end_percent"]) else None, - - )[0] - noise_pred = noise_pred.float() - - # perform guidance - if use_dynamic_cfg: - self._guidance_scale = 1 + guidance_scale * ( - (1 - math.cos(math.pi * ((num_inference_steps - t.item()) / num_inference_steps) ** 5.0)) / 2 - ) - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - if not isinstance(self.scheduler, CogVideoXDPMScheduler): - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] - else: - latents, old_pred_original_sample = self.scheduler.step( - noise_pred, - old_pred_original_sample, - t, - timesteps[i - 1] if i > 0 else None, - latents, - **extra_step_kwargs, - return_dict=False, - ) - latents = latents.to(prompt_embeds.dtype) - - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if comfyui_progressbar: - if callback is not None: - callback(i, latents.detach()[-1], None, num_inference_steps) - else: - pbar.update(1) - - # Offload all models - self.maybe_free_model_hooks() - - return latents \ No newline at end of file diff --git a/cogvideox_fun/transformer_3d.py b/cogvideox_fun/transformer_3d.py deleted file mode 100644 index 5b6fef9..0000000 --- a/cogvideox_fun/transformer_3d.py +++ /dev/null @@ -1,823 +0,0 @@ -# Copyright 2024 The CogVideoX team, Tsinghua University & ZhipuAI and The HuggingFace Team. -# All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Any, Dict, Optional, Tuple, Union - -import os -import json -import torch -import glob -import torch.nn.functional as F -from torch import nn - -from diffusers.configuration_utils import ConfigMixin, register_to_config -from diffusers.utils import is_torch_version, logging -from diffusers.utils.torch_utils import maybe_allow_in_graph -from diffusers.models.attention import Attention, FeedForward -from diffusers.models.attention_processor import AttentionProcessor#, CogVideoXAttnProcessor2_0, FusedCogVideoXAttnProcessor2_0 -from diffusers.models.embeddings import TimestepEmbedding, Timesteps, get_3d_sincos_pos_embed -from diffusers.models.modeling_outputs import Transformer2DModelOutput -from diffusers.models.modeling_utils import ModelMixin -from diffusers.models.normalization import AdaLayerNorm, CogVideoXLayerNormZero - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - -from einops import rearrange -try: - from sageattention import sageattn - SAGEATTN_IS_AVAILABLE = True -except: - SAGEATTN_IS_AVAILABLE = False - -def fft(tensor): - tensor_fft = torch.fft.fft2(tensor) - tensor_fft_shifted = torch.fft.fftshift(tensor_fft) - B, C, H, W = tensor.size() - radius = min(H, W) // 5 - - Y, X = torch.meshgrid(torch.arange(H), torch.arange(W)) - center_x, center_y = W // 2, H // 2 - mask = (X - center_x) ** 2 + (Y - center_y) ** 2 <= radius ** 2 - low_freq_mask = mask.unsqueeze(0).unsqueeze(0).to(tensor.device) - high_freq_mask = ~low_freq_mask - - low_freq_fft = tensor_fft_shifted * low_freq_mask - high_freq_fft = tensor_fft_shifted * high_freq_mask - - return low_freq_fft, high_freq_fft - -class CogVideoXAttnProcessor2_0: - r""" - Processor for implementing scaled dot-product attention for the CogVideoX model. It applies a rotary embedding on - query and key vectors, but does not include spatial normalization. - """ - - def __init__(self): - if not hasattr(F, "scaled_dot_product_attention"): - raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") - - def __call__( - self, - attn: Attention, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - attention_mask: Optional[torch.Tensor] = None, - image_rotary_emb: Optional[torch.Tensor] = None, - attention_mode: Optional[str] = None, - ) -> torch.Tensor: - text_seq_length = encoder_hidden_states.size(1) - - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - - batch_size, sequence_length, _ = ( - hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape - ) - - if attention_mask is not None: - attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) - attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) - - query = attn.to_q(hidden_states) - key = attn.to_k(hidden_states) - value = attn.to_v(hidden_states) - - inner_dim = key.shape[-1] - head_dim = inner_dim // attn.heads - - query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) - - if attn.norm_q is not None: - query = attn.norm_q(query) - if attn.norm_k is not None: - key = attn.norm_k(key) - - # Apply RoPE if needed - if image_rotary_emb is not None: - from diffusers.models.embeddings import apply_rotary_emb - - query[:, :, text_seq_length:] = apply_rotary_emb(query[:, :, text_seq_length:], image_rotary_emb) - if not attn.is_cross_attention: - key[:, :, text_seq_length:] = apply_rotary_emb(key[:, :, text_seq_length:], image_rotary_emb) - - if attention_mode == "sageattn": - if SAGEATTN_IS_AVAILABLE: - hidden_states = sageattn(query, key, value, attn_mask=attention_mask, dropout_p=0.0,is_causal=False) - else: - raise ImportError("sageattn not found") - else: - hidden_states = F.scaled_dot_product_attention( - query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False - ) - - hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) - - # linear proj - hidden_states = attn.to_out[0](hidden_states) - # dropout - hidden_states = attn.to_out[1](hidden_states) - - encoder_hidden_states, hidden_states = hidden_states.split( - [text_seq_length, hidden_states.size(1) - text_seq_length], dim=1 - ) - return hidden_states, encoder_hidden_states - -class CogVideoXPatchEmbed(nn.Module): - def __init__( - self, - patch_size: int = 2, - in_channels: int = 16, - embed_dim: int = 1920, - text_embed_dim: int = 4096, - bias: bool = True, - ) -> None: - super().__init__() - self.patch_size = patch_size - - self.proj = nn.Conv2d( - in_channels, embed_dim, kernel_size=(patch_size, patch_size), stride=patch_size, bias=bias - ) - self.text_proj = nn.Linear(text_embed_dim, embed_dim) - - def forward(self, text_embeds: torch.Tensor, image_embeds: torch.Tensor): - r""" - Args: - text_embeds (`torch.Tensor`): - Input text embeddings. Expected shape: (batch_size, seq_length, embedding_dim). - image_embeds (`torch.Tensor`): - Input image embeddings. Expected shape: (batch_size, num_frames, channels, height, width). - """ - text_embeds = self.text_proj(text_embeds) - - batch, num_frames, channels, height, width = image_embeds.shape - image_embeds = image_embeds.reshape(-1, channels, height, width) - image_embeds = self.proj(image_embeds) - image_embeds = image_embeds.view(batch, num_frames, *image_embeds.shape[1:]) - image_embeds = image_embeds.flatten(3).transpose(2, 3) # [batch, num_frames, height x width, channels] - image_embeds = image_embeds.flatten(1, 2) # [batch, num_frames x height x width, channels] - - embeds = torch.cat( - [text_embeds, image_embeds], dim=1 - ).contiguous() # [batch, seq_length + num_frames x height x width, channels] - return embeds - -@maybe_allow_in_graph -class CogVideoXBlock(nn.Module): - r""" - Transformer block used in [CogVideoX](https://github.com/THUDM/CogVideo) model. - - Parameters: - dim (`int`): - The number of channels in the input and output. - num_attention_heads (`int`): - The number of heads to use for multi-head attention. - attention_head_dim (`int`): - The number of channels in each head. - time_embed_dim (`int`): - The number of channels in timestep embedding. - dropout (`float`, defaults to `0.0`): - The dropout probability to use. - activation_fn (`str`, defaults to `"gelu-approximate"`): - Activation function to be used in feed-forward. - attention_bias (`bool`, defaults to `False`): - Whether or not to use bias in attention projection layers. - qk_norm (`bool`, defaults to `True`): - Whether or not to use normalization after query and key projections in Attention. - norm_elementwise_affine (`bool`, defaults to `True`): - Whether to use learnable elementwise affine parameters for normalization. - norm_eps (`float`, defaults to `1e-5`): - Epsilon value for normalization layers. - final_dropout (`bool` defaults to `False`): - Whether to apply a final dropout after the last feed-forward layer. - ff_inner_dim (`int`, *optional*, defaults to `None`): - Custom hidden dimension of Feed-forward layer. If not provided, `4 * dim` is used. - ff_bias (`bool`, defaults to `True`): - Whether or not to use bias in Feed-forward layer. - attention_out_bias (`bool`, defaults to `True`): - Whether or not to use bias in Attention output projection layer. - """ - - def __init__( - self, - dim: int, - num_attention_heads: int, - attention_head_dim: int, - time_embed_dim: int, - dropout: float = 0.0, - activation_fn: str = "gelu-approximate", - attention_bias: bool = False, - qk_norm: bool = True, - norm_elementwise_affine: bool = True, - norm_eps: float = 1e-5, - final_dropout: bool = True, - ff_inner_dim: Optional[int] = None, - ff_bias: bool = True, - attention_out_bias: bool = True, - attention_mode: Optional[str] = None, - ): - super().__init__() - - # 1. Self Attention - self.norm1 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True) - - self.attn1 = Attention( - query_dim=dim, - dim_head=attention_head_dim, - heads=num_attention_heads, - qk_norm="layer_norm" if qk_norm else None, - eps=1e-6, - bias=attention_bias, - out_bias=attention_out_bias, - processor=CogVideoXAttnProcessor2_0(), - ) - - # 2. Feed Forward - self.norm2 = CogVideoXLayerNormZero(time_embed_dim, dim, norm_elementwise_affine, norm_eps, bias=True) - - self.ff = FeedForward( - dim, - dropout=dropout, - activation_fn=activation_fn, - final_dropout=final_dropout, - inner_dim=ff_inner_dim, - bias=ff_bias, - ) - self.cached_hidden_states = [] - self.cached_encoder_hidden_states = [] - self.attention_mode = attention_mode - - def forward( - self, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - temb: torch.Tensor, - image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - video_flow_feature: Optional[torch.Tensor] = None, - fuser=None, - block_use_fastercache=False, - fastercache_counter=0, - fastercache_start_step=15, - fastercache_device="cuda:0", - ) -> torch.Tensor: - text_seq_length = encoder_hidden_states.size(1) - - # norm & modulate - norm_hidden_states, norm_encoder_hidden_states, gate_msa, enc_gate_msa = self.norm1( - hidden_states, encoder_hidden_states, temb - ) - # Tora Motion-guidance Fuser - if video_flow_feature is not None: - H, W = video_flow_feature.shape[-2:] - T = norm_hidden_states.shape[1] // H // W - h = rearrange(norm_hidden_states, "B (T H W) C -> (B T) C H W", H=H, W=W) - h = fuser(h, video_flow_feature.to(h), T=T) - norm_hidden_states = rearrange(h, "(B T) C H W -> B (T H W) C", T=T) - del h, fuser - - #region fastercache - if block_use_fastercache: - B = norm_hidden_states.shape[0] - if fastercache_counter >= fastercache_start_step + 3 and fastercache_counter%3!=0 and self.cached_hidden_states[-1].shape[0] >= B: - attn_hidden_states = ( - self.cached_hidden_states[1][:B] + - (self.cached_hidden_states[1][:B] - self.cached_hidden_states[0][:B]) - * 0.3 - ).to(norm_hidden_states.device, non_blocking=True) - attn_encoder_hidden_states = ( - self.cached_encoder_hidden_states[1][:B] + - (self.cached_encoder_hidden_states[1][:B] - self.cached_encoder_hidden_states[0][:B]) - * 0.3 - ).to(norm_hidden_states.device, non_blocking=True) - else: - attn_hidden_states, attn_encoder_hidden_states = self.attn1( - hidden_states=norm_hidden_states, - encoder_hidden_states=norm_encoder_hidden_states, - image_rotary_emb=image_rotary_emb, - attention_mode=self.attention_mode, - ) - if fastercache_counter == fastercache_start_step: - self.cached_hidden_states = [attn_hidden_states.to(fastercache_device), attn_hidden_states.to(fastercache_device)] - self.cached_encoder_hidden_states = [attn_encoder_hidden_states.to(fastercache_device), attn_encoder_hidden_states.to(fastercache_device)] - elif fastercache_counter > fastercache_start_step: - self.cached_hidden_states[-1].copy_(attn_hidden_states.to(fastercache_device)) - self.cached_encoder_hidden_states[-1].copy_(attn_encoder_hidden_states.to(fastercache_device)) - else: - attn_hidden_states, attn_encoder_hidden_states = self.attn1( - hidden_states=norm_hidden_states, - encoder_hidden_states=norm_encoder_hidden_states, - image_rotary_emb=image_rotary_emb, - attention_mode=self.attention_mode, - ) - - hidden_states = hidden_states + gate_msa * attn_hidden_states - encoder_hidden_states = encoder_hidden_states + enc_gate_msa * attn_encoder_hidden_states - - # norm & modulate - norm_hidden_states, norm_encoder_hidden_states, gate_ff, enc_gate_ff = self.norm2( - hidden_states, encoder_hidden_states, temb - ) - - # feed-forward - norm_hidden_states = torch.cat([norm_encoder_hidden_states, norm_hidden_states], dim=1) - ff_output = self.ff(norm_hidden_states) - - hidden_states = hidden_states + gate_ff * ff_output[:, text_seq_length:] - encoder_hidden_states = encoder_hidden_states + enc_gate_ff * ff_output[:, :text_seq_length] - - return hidden_states, encoder_hidden_states - - -class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin): - """ - A Transformer model for video-like data in [CogVideoX](https://github.com/THUDM/CogVideo). - - Parameters: - num_attention_heads (`int`, defaults to `30`): - The number of heads to use for multi-head attention. - attention_head_dim (`int`, defaults to `64`): - The number of channels in each head. - in_channels (`int`, defaults to `16`): - The number of channels in the input. - out_channels (`int`, *optional*, defaults to `16`): - The number of channels in the output. - flip_sin_to_cos (`bool`, defaults to `True`): - Whether to flip the sin to cos in the time embedding. - time_embed_dim (`int`, defaults to `512`): - Output dimension of timestep embeddings. - text_embed_dim (`int`, defaults to `4096`): - Input dimension of text embeddings from the text encoder. - num_layers (`int`, defaults to `30`): - The number of layers of Transformer blocks to use. - dropout (`float`, defaults to `0.0`): - The dropout probability to use. - attention_bias (`bool`, defaults to `True`): - Whether or not to use bias in the attention projection layers. - sample_width (`int`, defaults to `90`): - The width of the input latents. - sample_height (`int`, defaults to `60`): - The height of the input latents. - sample_frames (`int`, defaults to `49`): - The number of frames in the input latents. Note that this parameter was incorrectly initialized to 49 - instead of 13 because CogVideoX processed 13 latent frames at once in its default and recommended settings, - but cannot be changed to the correct value to ensure backwards compatibility. To create a transformer with - K latent frames, the correct value to pass here would be: ((K - 1) * temporal_compression_ratio + 1). - patch_size (`int`, defaults to `2`): - The size of the patches to use in the patch embedding layer. - temporal_compression_ratio (`int`, defaults to `4`): - The compression ratio across the temporal dimension. See documentation for `sample_frames`. - max_text_seq_length (`int`, defaults to `226`): - The maximum sequence length of the input text embeddings. - activation_fn (`str`, defaults to `"gelu-approximate"`): - Activation function to use in feed-forward. - timestep_activation_fn (`str`, defaults to `"silu"`): - Activation function to use when generating the timestep embeddings. - norm_elementwise_affine (`bool`, defaults to `True`): - Whether or not to use elementwise affine in normalization layers. - norm_eps (`float`, defaults to `1e-5`): - The epsilon value to use in normalization layers. - spatial_interpolation_scale (`float`, defaults to `1.875`): - Scaling factor to apply in 3D positional embeddings across spatial dimensions. - temporal_interpolation_scale (`float`, defaults to `1.0`): - Scaling factor to apply in 3D positional embeddings across temporal dimensions. - """ - - _supports_gradient_checkpointing = True - - @register_to_config - def __init__( - self, - num_attention_heads: int = 30, - attention_head_dim: int = 64, - in_channels: int = 16, - out_channels: Optional[int] = 16, - flip_sin_to_cos: bool = True, - freq_shift: int = 0, - time_embed_dim: int = 512, - text_embed_dim: int = 4096, - num_layers: int = 30, - dropout: float = 0.0, - attention_bias: bool = True, - sample_width: int = 90, - sample_height: int = 60, - sample_frames: int = 49, - patch_size: int = 2, - temporal_compression_ratio: int = 4, - max_text_seq_length: int = 226, - activation_fn: str = "gelu-approximate", - timestep_activation_fn: str = "silu", - norm_elementwise_affine: bool = True, - norm_eps: float = 1e-5, - spatial_interpolation_scale: float = 1.875, - temporal_interpolation_scale: float = 1.0, - use_rotary_positional_embeddings: bool = False, - add_noise_in_inpaint_model: bool = False, - attention_mode: Optional[str] = None, - ): - super().__init__() - inner_dim = num_attention_heads * attention_head_dim - - post_patch_height = sample_height // patch_size - post_patch_width = sample_width // patch_size - post_time_compression_frames = (sample_frames - 1) // temporal_compression_ratio + 1 - self.num_patches = post_patch_height * post_patch_width * post_time_compression_frames - self.post_patch_height = post_patch_height - self.post_patch_width = post_patch_width - self.post_time_compression_frames = post_time_compression_frames - self.patch_size = patch_size - - # 1. Patch embedding - self.patch_embed = CogVideoXPatchEmbed(patch_size, in_channels, inner_dim, text_embed_dim, bias=True) - self.embedding_dropout = nn.Dropout(dropout) - - # 2. 3D positional embeddings - spatial_pos_embedding = get_3d_sincos_pos_embed( - inner_dim, - (post_patch_width, post_patch_height), - post_time_compression_frames, - spatial_interpolation_scale, - temporal_interpolation_scale, - ) - spatial_pos_embedding = torch.from_numpy(spatial_pos_embedding).flatten(0, 1) - pos_embedding = torch.zeros(1, max_text_seq_length + self.num_patches, inner_dim, requires_grad=False) - pos_embedding.data[:, max_text_seq_length:].copy_(spatial_pos_embedding) - self.register_buffer("pos_embedding", pos_embedding, persistent=False) - - # 3. Time embeddings - self.time_proj = Timesteps(inner_dim, flip_sin_to_cos, freq_shift) - self.time_embedding = TimestepEmbedding(inner_dim, time_embed_dim, timestep_activation_fn) - - # 4. Define spatio-temporal transformers blocks - self.transformer_blocks = nn.ModuleList( - [ - CogVideoXBlock( - dim=inner_dim, - num_attention_heads=num_attention_heads, - attention_head_dim=attention_head_dim, - time_embed_dim=time_embed_dim, - dropout=dropout, - activation_fn=activation_fn, - attention_bias=attention_bias, - norm_elementwise_affine=norm_elementwise_affine, - norm_eps=norm_eps, - ) - for _ in range(num_layers) - ] - ) - self.norm_final = nn.LayerNorm(inner_dim, norm_eps, norm_elementwise_affine) - - # 5. Output blocks - self.norm_out = AdaLayerNorm( - embedding_dim=time_embed_dim, - output_dim=2 * inner_dim, - norm_elementwise_affine=norm_elementwise_affine, - norm_eps=norm_eps, - chunk_dim=1, - ) - self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * out_channels) - - self.gradient_checkpointing = False - - self.fuser_list = None - - self.use_fastercache = False - self.fastercache_counter = 0 - self.fastercache_start_step = 15 - self.fastercache_lf_step = 40 - self.fastercache_hf_step = 30 - self.fastercache_device = "cuda" - self.fastercache_num_blocks_to_cache = len(self.transformer_blocks) - self.attention_mode = attention_mode - - def _set_gradient_checkpointing(self, module, value=False): - self.gradient_checkpointing = value - - @property - # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors - def attn_processors(self) -> Dict[str, AttentionProcessor]: - r""" - Returns: - `dict` of attention processors: A dictionary containing all attention processors used in the model with - indexed by its weight name. - """ - # set recursively - processors = {} - - def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): - if hasattr(module, "get_processor"): - processors[f"{name}.processor"] = module.get_processor() - - for sub_name, child in module.named_children(): - fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) - - return processors - - for name, module in self.named_children(): - fn_recursive_add_processors(name, module, processors) - - return processors - - # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor - def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): - r""" - Sets the attention processor to use to compute attention. - - Parameters: - processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): - The instantiated processor class or a dictionary of processor classes that will be set as the processor - for **all** `Attention` layers. - - If `processor` is a dict, the key needs to define the path to the corresponding cross attention - processor. This is strongly recommended when setting trainable attention processors. - - """ - count = len(self.attn_processors.keys()) - - if isinstance(processor, dict) and len(processor) != count: - raise ValueError( - f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" - f" number of attention layers: {count}. Please make sure to pass {count} processor classes." - ) - - def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): - if hasattr(module, "set_processor"): - if not isinstance(processor, dict): - module.set_processor(processor) - else: - module.set_processor(processor.pop(f"{name}.processor")) - - for sub_name, child in module.named_children(): - fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) - - for name, module in self.named_children(): - fn_recursive_attn_processor(name, module, processor) - - # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedCogVideoXAttnProcessor2_0 - def fuse_qkv_projections(self): - """ - Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value) - are fused. For cross-attention modules, key and value projection matrices are fused. - - - - This API is 🧪 experimental. - - - """ - self.original_attn_processors = None - - for _, attn_processor in self.attn_processors.items(): - if "Added" in str(attn_processor.__class__.__name__): - raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.") - - self.original_attn_processors = self.attn_processors - - for module in self.modules(): - if isinstance(module, Attention): - module.fuse_projections(fuse=True) - - self.set_attn_processor(FusedCogVideoXAttnProcessor2_0()) - - # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections - def unfuse_qkv_projections(self): - """Disables the fused QKV projection if enabled. - - - - This API is 🧪 experimental. - - - - """ - if self.original_attn_processors is not None: - self.set_attn_processor(self.original_attn_processors) - - def forward( - self, - hidden_states: torch.Tensor, - encoder_hidden_states: torch.Tensor, - timestep: Union[int, float, torch.LongTensor], - timestep_cond: Optional[torch.Tensor] = None, - inpaint_latents: Optional[torch.Tensor] = None, - control_latents: Optional[torch.Tensor] = None, - image_rotary_emb: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, - video_flow_features: Optional[torch.Tensor] = None, - return_dict: bool = True, - ): - batch_size, num_frames, channels, height, width = hidden_states.shape - - # 1. Time embedding - timesteps = timestep - t_emb = self.time_proj(timesteps) - - # timesteps does not contain any weights and will always return f32 tensors - # but time_embedding might actually be running in fp16. so we need to cast here. - # there might be better ways to encapsulate this. - t_emb = t_emb.to(dtype=hidden_states.dtype) - emb = self.time_embedding(t_emb, timestep_cond) - - # 2. Patch embedding - if inpaint_latents is not None: - hidden_states = torch.concat([hidden_states, inpaint_latents], 2) - if control_latents is not None: - hidden_states = torch.concat([hidden_states, control_latents], 2) - hidden_states = self.patch_embed(encoder_hidden_states, hidden_states) - - # 3. Position embedding - text_seq_length = encoder_hidden_states.shape[1] - if not self.config.use_rotary_positional_embeddings: - seq_length = height * width * num_frames // (self.config.patch_size**2) - # pos_embeds = self.pos_embedding[:, : text_seq_length + seq_length] - pos_embeds = self.pos_embedding - emb_size = hidden_states.size()[-1] - pos_embeds_without_text = pos_embeds[:, text_seq_length: ].view(1, self.post_time_compression_frames, self.post_patch_height, self.post_patch_width, emb_size) - pos_embeds_without_text = pos_embeds_without_text.permute([0, 4, 1, 2, 3]) - pos_embeds_without_text = F.interpolate(pos_embeds_without_text,size=[self.post_time_compression_frames, height // self.config.patch_size, width // self.config.patch_size],mode='trilinear',align_corners=False) - pos_embeds_without_text = pos_embeds_without_text.permute([0, 2, 3, 4, 1]).view(1, -1, emb_size) - pos_embeds = torch.cat([pos_embeds[:, :text_seq_length], pos_embeds_without_text], dim = 1) - pos_embeds = pos_embeds[:, : text_seq_length + seq_length] - hidden_states = hidden_states + pos_embeds - hidden_states = self.embedding_dropout(hidden_states) - - encoder_hidden_states = hidden_states[:, :text_seq_length] - hidden_states = hidden_states[:, text_seq_length:] - - if self.use_fastercache: - self.fastercache_counter+=1 - if self.fastercache_counter >= self.fastercache_start_step + 3 and self.fastercache_counter % 5 !=0: - # 4. Transformer blocks - for i, block in enumerate(self.transformer_blocks): - hidden_states, encoder_hidden_states = block( - hidden_states=hidden_states[:1], - encoder_hidden_states=encoder_hidden_states[:1], - temb=emb[:1], - image_rotary_emb=image_rotary_emb, - video_flow_feature=video_flow_features[i][:1] if video_flow_features is not None else None, - fuser = self.fuser_list[i] if self.fuser_list is not None else None, - block_use_fastercache = i <= self.fastercache_num_blocks_to_cache, - fastercache_start_step = self.fastercache_start_step, - fastercache_counter = self.fastercache_counter, - fastercache_device = self.fastercache_device - ) - - if not self.config.use_rotary_positional_embeddings: - # CogVideoX-2B - hidden_states = self.norm_final(hidden_states) - else: - # CogVideoX-5B - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - hidden_states = self.norm_final(hidden_states) - hidden_states = hidden_states[:, text_seq_length:] - - # 5. Final block - hidden_states = self.norm_out(hidden_states, temb=emb[:1]) - hidden_states = self.proj_out(hidden_states) - - # 6. Unpatchify - p = self.config.patch_size - output = hidden_states.reshape(1, num_frames, height // p, width // p, channels, p, p) - output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) - - (bb, tt, cc, hh, ww) = output.shape - cond = rearrange(output, "B T C H W -> (B T) C H W", B=bb, C=cc, T=tt, H=hh, W=ww) - lf_c, hf_c = fft(cond.float()) - #lf_step = 40 - #hf_step = 30 - if self.fastercache_counter <= self.fastercache_lf_step: - self.delta_lf = self.delta_lf * 1.1 - if self.fastercache_counter >= self.fastercache_hf_step: - self.delta_hf = self.delta_hf * 1.1 - - new_hf_uc = self.delta_hf + hf_c - new_lf_uc = self.delta_lf + lf_c - - combine_uc = new_lf_uc + new_hf_uc - combined_fft = torch.fft.ifftshift(combine_uc) - recovered_uncond = torch.fft.ifft2(combined_fft).real - recovered_uncond = rearrange(recovered_uncond.to(output.dtype), "(B T) C H W -> B T C H W", B=bb, C=cc, T=tt, H=hh, W=ww) - output = torch.cat([output, recovered_uncond]) - else: - # 4. Transformer blocks - for i, block in enumerate(self.transformer_blocks): - hidden_states, encoder_hidden_states = block( - hidden_states=hidden_states, - encoder_hidden_states=encoder_hidden_states, - temb=emb, - image_rotary_emb=image_rotary_emb, - video_flow_feature=video_flow_features[i] if video_flow_features is not None else None, - fuser = self.fuser_list[i] if self.fuser_list is not None else None, - block_use_fastercache = i <= self.fastercache_num_blocks_to_cache, - fastercache_counter = self.fastercache_counter, - fastercache_start_step = self.fastercache_start_step, - fastercache_device = self.fastercache_device - ) - - if not self.config.use_rotary_positional_embeddings: - # CogVideoX-2B - hidden_states = self.norm_final(hidden_states) - else: - # CogVideoX-5B - hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) - hidden_states = self.norm_final(hidden_states) - hidden_states = hidden_states[:, text_seq_length:] - - # 5. Final block - hidden_states = self.norm_out(hidden_states, temb=emb) - hidden_states = self.proj_out(hidden_states) - - # 6. Unpatchify - p = self.config.patch_size - output = hidden_states.reshape(batch_size, num_frames, height // p, width // p, channels, p, p) - output = output.permute(0, 1, 4, 2, 5, 3, 6).flatten(5, 6).flatten(3, 4) - - if self.fastercache_counter >= self.fastercache_start_step + 1: - (bb, tt, cc, hh, ww) = output.shape - cond = rearrange(output[0:1].float(), "B T C H W -> (B T) C H W", B=bb//2, C=cc, T=tt, H=hh, W=ww) - uncond = rearrange(output[1:2].float(), "B T C H W -> (B T) C H W", B=bb//2, C=cc, T=tt, H=hh, W=ww) - - lf_c, hf_c = fft(cond) - lf_uc, hf_uc = fft(uncond) - - self.delta_lf = lf_uc - lf_c - self.delta_hf = hf_uc - hf_c - - - if not return_dict: - return (output,) - return Transformer2DModelOutput(sample=output) - - @classmethod - def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, transformer_additional_kwargs={}): - if subfolder is not None: - pretrained_model_path = os.path.join(pretrained_model_path, subfolder) - print(f"loaded 3D transformer's pretrained weights from {pretrained_model_path} ...") - - config_file = os.path.join(pretrained_model_path, 'config.json') - if not os.path.isfile(config_file): - raise RuntimeError(f"{config_file} does not exist") - with open(config_file, "r") as f: - config = json.load(f) - - from diffusers.utils import WEIGHTS_NAME - model = cls.from_config(config, **transformer_additional_kwargs) - model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME) - model_file_safetensors = model_file.replace(".bin", ".safetensors") - if os.path.exists(model_file): - state_dict = torch.load(model_file, map_location="cpu") - elif os.path.exists(model_file_safetensors): - from safetensors.torch import load_file, safe_open - state_dict = load_file(model_file_safetensors) - else: - from safetensors.torch import load_file, safe_open - model_files_safetensors = glob.glob(os.path.join(pretrained_model_path, "*.safetensors")) - state_dict = {} - for model_file_safetensors in model_files_safetensors: - _state_dict = load_file(model_file_safetensors) - for key in _state_dict: - state_dict[key] = _state_dict[key] - - if model.state_dict()['patch_embed.proj.weight'].size() != state_dict['patch_embed.proj.weight'].size(): - new_shape = model.state_dict()['patch_embed.proj.weight'].size() - if len(new_shape) == 5: - state_dict['patch_embed.proj.weight'] = state_dict['patch_embed.proj.weight'].unsqueeze(2).expand(new_shape).clone() - state_dict['patch_embed.proj.weight'][:, :, :-1] = 0 - else: - if model.state_dict()['patch_embed.proj.weight'].size()[1] > state_dict['patch_embed.proj.weight'].size()[1]: - model.state_dict()['patch_embed.proj.weight'][:, :state_dict['patch_embed.proj.weight'].size()[1], :, :] = state_dict['patch_embed.proj.weight'] - model.state_dict()['patch_embed.proj.weight'][:, state_dict['patch_embed.proj.weight'].size()[1]:, :, :] = 0 - state_dict['patch_embed.proj.weight'] = model.state_dict()['patch_embed.proj.weight'] - else: - model.state_dict()['patch_embed.proj.weight'][:, :, :, :] = state_dict['patch_embed.proj.weight'][:, :model.state_dict()['patch_embed.proj.weight'].size()[1], :, :] - state_dict['patch_embed.proj.weight'] = model.state_dict()['patch_embed.proj.weight'] - - tmp_state_dict = {} - for key in state_dict: - if key in model.state_dict().keys() and model.state_dict()[key].size() == state_dict[key].size(): - tmp_state_dict[key] = state_dict[key] - else: - print(key, "Size don't match, skip") - state_dict = tmp_state_dict - - m, u = model.load_state_dict(state_dict, strict=False) - print(f"### missing keys: {len(m)}; \n### unexpected keys: {len(u)};") - print(m) - - params = [p.numel() if "mamba" in n else 0 for n, p in model.named_parameters()] - print(f"### Mamba Parameters: {sum(params) / 1e6} M") - - params = [p.numel() if "attn1." in n else 0 for n, p in model.named_parameters()] - print(f"### attn1 Parameters: {sum(params) / 1e6} M") - - return model \ No newline at end of file diff --git a/cogvideox_fun/utils.py b/cogvideox_fun/utils.py index f790161..9f670ec 100644 --- a/cogvideox_fun/utils.py +++ b/cogvideox_fun/utils.py @@ -1,26 +1,6 @@ -import os -import gc import numpy as np -import torch from PIL import Image -# Copyright (c) OpenMMLab. All rights reserved. - -def tensor2pil(image): - return Image.fromarray(np.clip(255. * image.cpu().numpy(), 0, 255).astype(np.uint8)) - -def numpy2pil(image): - return Image.fromarray(np.clip(255. * image, 0, 255).astype(np.uint8)) - -def to_pil(image): - if isinstance(image, Image.Image): - return image - if isinstance(image, torch.Tensor): - return tensor2pil(image) - if isinstance(image, np.ndarray): - return numpy2pil(image) - raise ValueError(f"Cannot convert {type(image)} to PIL.Image") - ASPECT_RATIO_512 = { '0.25': [256.0, 1024.0], '0.26': [256.0, 992.0], '0.27': [256.0, 960.0], '0.28': [256.0, 928.0], '0.32': [288.0, 896.0], '0.33': [288.0, 864.0], '0.35': [288.0, 832.0], '0.4': [320.0, 800.0], @@ -54,126 +34,10 @@ def get_closest_ratio(height: float, width: float, ratios: dict = ASPECT_RATIO_5 closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - aspect_ratio)) return ratios[closest_ratio], float(closest_ratio) - def get_width_and_height_from_image_and_base_resolution(image, base_resolution): target_pixels = int(base_resolution) * int(base_resolution) original_width, original_height = Image.open(image).size ratio = (target_pixels / (original_width * original_height)) ** 0.5 width_slider = round(original_width * ratio) height_slider = round(original_height * ratio) - return height_slider, width_slider - -def get_image_to_video_latent(validation_image_start, validation_image_end, video_length, sample_size): - if validation_image_start is not None and validation_image_end is not None: - if type(validation_image_start) is str and os.path.isfile(validation_image_start): - image_start = clip_image = Image.open(validation_image_start).convert("RGB") - image_start = image_start.resize([sample_size[1], sample_size[0]]) - clip_image = clip_image.resize([sample_size[1], sample_size[0]]) - else: - image_start = clip_image = validation_image_start - image_start = [_image_start.resize([sample_size[1], sample_size[0]]) for _image_start in image_start] - clip_image = [_clip_image.resize([sample_size[1], sample_size[0]]) for _clip_image in clip_image] - - if type(validation_image_end) is str and os.path.isfile(validation_image_end): - image_end = Image.open(validation_image_end).convert("RGB") - image_end = image_end.resize([sample_size[1], sample_size[0]]) - else: - image_end = validation_image_end - image_end = [_image_end.resize([sample_size[1], sample_size[0]]) for _image_end in image_end] - - if type(image_start) is list: - clip_image = clip_image[0] - start_video = torch.cat( - [torch.from_numpy(np.array(_image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0) for _image_start in image_start], - dim=2 - ) - input_video = torch.tile(start_video[:, :, :1], [1, 1, video_length, 1, 1]) - input_video[:, :, :len(image_start)] = start_video - - input_video_mask = torch.zeros_like(input_video[:, :1]) - input_video_mask[:, :, len(image_start):] = 255 - else: - input_video = torch.tile( - torch.from_numpy(np.array(image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0), - [1, 1, video_length, 1, 1] - ) - input_video_mask = torch.zeros_like(input_video[:, :1]) - input_video_mask[:, :, 1:] = 255 - - if type(image_end) is list: - image_end = [_image_end.resize(image_start[0].size if type(image_start) is list else image_start.size) for _image_end in image_end] - end_video = torch.cat( - [torch.from_numpy(np.array(_image_end)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0) for _image_end in image_end], - dim=2 - ) - input_video[:, :, -len(end_video):] = end_video - - input_video_mask[:, :, -len(image_end):] = 0 - else: - image_end = image_end.resize(image_start[0].size if type(image_start) is list else image_start.size) - input_video[:, :, -1:] = torch.from_numpy(np.array(image_end)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0) - input_video_mask[:, :, -1:] = 0 - - input_video = input_video / 255 - - elif validation_image_start is not None: - if type(validation_image_start) is str and os.path.isfile(validation_image_start): - image_start = clip_image = Image.open(validation_image_start).convert("RGB") - image_start = image_start.resize([sample_size[1], sample_size[0]]) - clip_image = clip_image.resize([sample_size[1], sample_size[0]]) - else: - image_start = clip_image = validation_image_start - image_start = [_image_start.resize([sample_size[1], sample_size[0]]) for _image_start in image_start] - clip_image = [_clip_image.resize([sample_size[1], sample_size[0]]) for _clip_image in clip_image] - image_end = None - - if type(image_start) is list: - clip_image = clip_image[0] - start_video = torch.cat( - [torch.from_numpy(np.array(_image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0) for _image_start in image_start], - dim=2 - ) - input_video = torch.tile(start_video[:, :, :1], [1, 1, video_length, 1, 1]) - input_video[:, :, :len(image_start)] = start_video - input_video = input_video / 255 - - input_video_mask = torch.zeros_like(input_video[:, :1]) - input_video_mask[:, :, len(image_start):] = 255 - else: - input_video = torch.tile( - torch.from_numpy(np.array(image_start)).permute(2, 0, 1).unsqueeze(1).unsqueeze(0), - [1, 1, video_length, 1, 1] - ) / 255 - input_video_mask = torch.zeros_like(input_video[:, :1]) - input_video_mask[:, :, 1:, ] = 255 - else: - image_start = None - image_end = None - input_video = torch.zeros([1, 3, video_length, sample_size[0], sample_size[1]]) - input_video_mask = torch.ones([1, 1, video_length, sample_size[0], sample_size[1]]) * 255 - clip_image = None - - del image_start - del image_end - gc.collect() - - return input_video, input_video_mask, clip_image - -def get_video_to_video_latent(input_video_path, video_length, sample_size, validation_video_mask=None): - input_video = input_video_path - - input_video = torch.from_numpy(np.array(input_video))[:video_length] - input_video = input_video.permute([3, 0, 1, 2]).unsqueeze(0) / 255 - - if validation_video_mask is not None: - validation_video_mask = Image.open(validation_video_mask).convert('L').resize((sample_size[1], sample_size[0])) - input_video_mask = np.where(np.array(validation_video_mask) < 240, 0, 255) - - input_video_mask = torch.from_numpy(np.array(input_video_mask)).unsqueeze(0).unsqueeze(-1).permute([3, 0, 1, 2]).unsqueeze(0) - input_video_mask = torch.tile(input_video_mask, [1, 1, input_video.size()[2], 1, 1]) - input_video_mask = input_video_mask.to(input_video.device, input_video.dtype) - else: - input_video_mask = torch.zeros_like(input_video[:, :1]) - input_video_mask[:, :, :] = 255 - - return input_video, input_video_mask, None \ No newline at end of file + return height_slider, width_slider \ No newline at end of file diff --git a/cogvideox_fun/context.py b/context.py similarity index 100% rename from cogvideox_fun/context.py rename to context.py diff --git a/convert_weight_sat2hf.py b/convert_weight_sat2hf.py deleted file mode 100644 index 545925b..0000000 --- a/convert_weight_sat2hf.py +++ /dev/null @@ -1,303 +0,0 @@ -""" - -The script demonstrates how to convert the weights of the CogVideoX model from SAT to Hugging Face format. -This script supports the conversion of the following models: -- CogVideoX-2B -- CogVideoX-5B, CogVideoX-5B-I2V -- CogVideoX1.1-5B, CogVideoX1.1-5B-I2V - -Original Script: -https://github.com/huggingface/diffusers/blob/main/scripts/convert_cogvideox_to_diffusers.py - -""" -import argparse -from typing import Any, Dict - -import torch -from transformers import T5EncoderModel, T5Tokenizer - -from diffusers import ( - AutoencoderKLCogVideoX, - CogVideoXDDIMScheduler, - CogVideoXImageToVideoPipeline, - CogVideoXPipeline, - #CogVideoXTransformer3DModel, -) -from custom_cogvideox_transformer_3d import CogVideoXTransformer3DModel - - -def reassign_query_key_value_inplace(key: str, state_dict: Dict[str, Any]): - to_q_key = key.replace("query_key_value", "to_q") - to_k_key = key.replace("query_key_value", "to_k") - to_v_key = key.replace("query_key_value", "to_v") - to_q, to_k, to_v = torch.chunk(state_dict[key], chunks=3, dim=0) - state_dict[to_q_key] = to_q - state_dict[to_k_key] = to_k - state_dict[to_v_key] = to_v - state_dict.pop(key) - - -def reassign_query_key_layernorm_inplace(key: str, state_dict: Dict[str, Any]): - layer_id, weight_or_bias = key.split(".")[-2:] - - if "query" in key: - new_key = f"transformer_blocks.{layer_id}.attn1.norm_q.{weight_or_bias}" - elif "key" in key: - new_key = f"transformer_blocks.{layer_id}.attn1.norm_k.{weight_or_bias}" - - state_dict[new_key] = state_dict.pop(key) - - -def reassign_adaln_norm_inplace(key: str, state_dict: Dict[str, Any]): - layer_id, _, weight_or_bias = key.split(".")[-3:] - - weights_or_biases = state_dict[key].chunk(12, dim=0) - norm1_weights_or_biases = torch.cat(weights_or_biases[0:3] + weights_or_biases[6:9]) - norm2_weights_or_biases = torch.cat(weights_or_biases[3:6] + weights_or_biases[9:12]) - - norm1_key = f"transformer_blocks.{layer_id}.norm1.linear.{weight_or_bias}" - state_dict[norm1_key] = norm1_weights_or_biases - - norm2_key = f"transformer_blocks.{layer_id}.norm2.linear.{weight_or_bias}" - state_dict[norm2_key] = norm2_weights_or_biases - - state_dict.pop(key) - - -def remove_keys_inplace(key: str, state_dict: Dict[str, Any]): - state_dict.pop(key) - - -def replace_up_keys_inplace(key: str, state_dict: Dict[str, Any]): - key_split = key.split(".") - layer_index = int(key_split[2]) - replace_layer_index = 4 - 1 - layer_index - - key_split[1] = "up_blocks" - key_split[2] = str(replace_layer_index) - new_key = ".".join(key_split) - - state_dict[new_key] = state_dict.pop(key) - - -TRANSFORMER_KEYS_RENAME_DICT = { - "transformer.final_layernorm": "norm_final", - "transformer": "transformer_blocks", - "attention": "attn1", - "mlp": "ff.net", - "dense_h_to_4h": "0.proj", - "dense_4h_to_h": "2", - ".layers": "", - "dense": "to_out.0", - "input_layernorm": "norm1.norm", - "post_attn1_layernorm": "norm2.norm", - "time_embed.0": "time_embedding.linear_1", - "time_embed.2": "time_embedding.linear_2", - "mixins.patch_embed": "patch_embed", - "mixins.final_layer.norm_final": "norm_out.norm", - "mixins.final_layer.linear": "proj_out", - "mixins.final_layer.adaLN_modulation.1": "norm_out.linear", - "mixins.pos_embed.pos_embedding": "patch_embed.pos_embedding", # Specific to CogVideoX-5b-I2V -} - -TRANSFORMER_SPECIAL_KEYS_REMAP = { - "query_key_value": reassign_query_key_value_inplace, - "query_layernorm_list": reassign_query_key_layernorm_inplace, - "key_layernorm_list": reassign_query_key_layernorm_inplace, - "adaln_layer.adaLN_modulations": reassign_adaln_norm_inplace, - "embed_tokens": remove_keys_inplace, - "freqs_sin": remove_keys_inplace, - "freqs_cos": remove_keys_inplace, - "position_embedding": remove_keys_inplace, -} - -VAE_KEYS_RENAME_DICT = { - "block.": "resnets.", - "down.": "down_blocks.", - "downsample": "downsamplers.0", - "upsample": "upsamplers.0", - "nin_shortcut": "conv_shortcut", - "encoder.mid.block_1": "encoder.mid_block.resnets.0", - "encoder.mid.block_2": "encoder.mid_block.resnets.1", - "decoder.mid.block_1": "decoder.mid_block.resnets.0", - "decoder.mid.block_2": "decoder.mid_block.resnets.1", -} - -VAE_SPECIAL_KEYS_REMAP = { - "loss": remove_keys_inplace, - "up.": replace_up_keys_inplace, -} - -TOKENIZER_MAX_LENGTH = 226 - - -def get_state_dict(saved_dict: Dict[str, Any]) -> Dict[str, Any]: - state_dict = saved_dict - if "model" in saved_dict.keys(): - state_dict = state_dict["model"] - if "module" in saved_dict.keys(): - state_dict = state_dict["module"] - if "state_dict" in saved_dict.keys(): - state_dict = state_dict["state_dict"] - return state_dict - - -def update_state_dict_inplace(state_dict: Dict[str, Any], old_key: str, new_key: str) -> Dict[str, Any]: - state_dict[new_key] = state_dict.pop(old_key) - - -def convert_transformer( - ckpt_path: str, - num_layers: int, - num_attention_heads: int, - use_rotary_positional_embeddings: bool, - i2v: bool, - dtype: torch.dtype, -): - PREFIX_KEY = "model.diffusion_model." - - original_state_dict = get_state_dict(torch.load(ckpt_path, map_location="cpu", mmap=True)) - transformer = CogVideoXTransformer3DModel( - in_channels=32 if i2v else 16, - num_layers=num_layers, - num_attention_heads=num_attention_heads, - use_rotary_positional_embeddings=use_rotary_positional_embeddings, - use_learned_positional_embeddings=i2v, - ).to(dtype=dtype) - - for key in list(original_state_dict.keys()): - new_key = key[len(PREFIX_KEY):] - for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items(): - new_key = new_key.replace(replace_key, rename_key) - update_state_dict_inplace(original_state_dict, key, new_key) - - for key in list(original_state_dict.keys()): - for special_key, handler_fn_inplace in TRANSFORMER_SPECIAL_KEYS_REMAP.items(): - if special_key not in key: - continue - handler_fn_inplace(key, original_state_dict) - transformer.load_state_dict(original_state_dict, strict=True) - return transformer - - -def convert_vae(ckpt_path: str, scaling_factor: float, dtype: torch.dtype): - original_state_dict = get_state_dict(torch.load(ckpt_path, map_location="cpu", mmap=True)) - vae = AutoencoderKLCogVideoX(scaling_factor=scaling_factor).to(dtype=dtype) - - for key in list(original_state_dict.keys()): - new_key = key[:] - for replace_key, rename_key in VAE_KEYS_RENAME_DICT.items(): - new_key = new_key.replace(replace_key, rename_key) - update_state_dict_inplace(original_state_dict, key, new_key) - - for key in list(original_state_dict.keys()): - for special_key, handler_fn_inplace in VAE_SPECIAL_KEYS_REMAP.items(): - if special_key not in key: - continue - handler_fn_inplace(key, original_state_dict) - - vae.load_state_dict(original_state_dict, strict=True) - return vae - - -def get_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--transformer_ckpt_path", type=str, default=None, help="Path to original transformer checkpoint" - ) - parser.add_argument("--vae_ckpt_path", type=str, default=None, help="Path to original vae checkpoint") - parser.add_argument("--output_path", type=str, required=True, help="Path where converted model should be saved") - parser.add_argument("--fp16", action="store_true", default=False, help="Whether to save the model weights in fp16") - parser.add_argument("--bf16", action="store_true", default=False, help="Whether to save the model weights in bf16") - parser.add_argument( - "--push_to_hub", action="store_true", default=False, help="Whether to push to HF Hub after saving" - ) - parser.add_argument( - "--text_encoder_cache_dir", type=str, default=None, help="Path to text encoder cache directory" - ) - # For CogVideoX-2B, num_layers is 30. For 5B, it is 42 - parser.add_argument("--num_layers", type=int, default=30, help="Number of transformer blocks") - # For CogVideoX-2B, num_attention_heads is 30. For 5B, it is 48 - parser.add_argument("--num_attention_heads", type=int, default=30, help="Number of attention heads") - # For CogVideoX-2B, use_rotary_positional_embeddings is False. For 5B, it is True - parser.add_argument( - "--use_rotary_positional_embeddings", action="store_true", default=False, help="Whether to use RoPE or not" - ) - # For CogVideoX-2B, scaling_factor is 1.15258426. For 5B, it is 0.7 - parser.add_argument("--scaling_factor", type=float, default=1.15258426, help="Scaling factor in the VAE") - # For CogVideoX-2B, snr_shift_scale is 3.0. For 5B, it is 1.0 - parser.add_argument("--snr_shift_scale", type=float, default=3.0, help="Scaling factor in the VAE") - parser.add_argument("--i2v", action="store_true", default=False, help="Whether to save the model weights in fp16") - return parser.parse_args() - - -if __name__ == "__main__": - args = get_args() - - transformer = None - vae = None - - if args.fp16 and args.bf16: - raise ValueError("You cannot pass both --fp16 and --bf16 at the same time.") - - dtype = torch.float16 if args.fp16 else torch.bfloat16 if args.bf16 else torch.float32 - - if args.transformer_ckpt_path is not None: - transformer = convert_transformer( - args.transformer_ckpt_path, - args.num_layers, - args.num_attention_heads, - args.use_rotary_positional_embeddings, - args.i2v, - dtype, - ) - if args.vae_ckpt_path is not None: - vae = convert_vae(args.vae_ckpt_path, args.scaling_factor, dtype) - - #text_encoder_id = "/share/official_pretrains/hf_home/t5-v1_1-xxl" - #tokenizer = T5Tokenizer.from_pretrained(text_encoder_id, model_max_length=TOKENIZER_MAX_LENGTH) - #text_encoder = T5EncoderModel.from_pretrained(text_encoder_id, cache_dir=args.text_encoder_cache_dir) - - # Apparently, the conversion does not work anymore without this :shrug: - #for param in text_encoder.parameters(): - # param.data = param.data.contiguous() - - scheduler = CogVideoXDDIMScheduler.from_config( - { - "snr_shift_scale": args.snr_shift_scale, - "beta_end": 0.012, - "beta_schedule": "scaled_linear", - "beta_start": 0.00085, - "clip_sample": False, - "num_train_timesteps": 1000, - "prediction_type": "v_prediction", - "rescale_betas_zero_snr": True, - "set_alpha_to_one": True, - "timestep_spacing": "trailing", - } - ) - if args.i2v: - pipeline_cls = CogVideoXImageToVideoPipeline - else: - pipeline_cls = CogVideoXPipeline - - pipe = pipeline_cls( - tokenizer=None, - text_encoder=None, - vae=vae, - transformer=transformer, - scheduler=scheduler, - ) - - if args.fp16: - pipe = pipe.to(dtype=torch.float16) - if args.bf16: - pipe = pipe.to(dtype=torch.bfloat16) - - # We don't use variant here because the model must be run in fp16 (2B) or bf16 (5B). It would be weird - # for users to specify variant when the default is not fp32 and they want to run with the correct default (which - # is either fp16/bf16 here). - - # This is necessary This is necessary for users with insufficient memory, - # such as those using Colab and notebooks, as it can save some memory used for model loading. - pipe.save_pretrained(args.output_path, safe_serialization=True, max_shard_size="5GB", push_to_hub=args.push_to_hub) diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 50b0f25..89c72aa 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -76,7 +76,6 @@ class CogVideoXAttnProcessor2_0: if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("CogVideoXAttnProcessor requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") - #@torch.compiler.disable() def __call__( self, attn: Attention, diff --git a/model_loading.py b/model_loading.py index e627351..c77e3c5 100644 --- a/model_loading.py +++ b/model_loading.py @@ -43,11 +43,8 @@ from .custom_cogvideox_transformer_3d import CogVideoXTransformer3DModel from .pipeline_cogvideox import CogVideoXPipeline from contextlib import nullcontext -from .cogvideox_fun.transformer_3d import CogVideoXTransformer3DModel as CogVideoXTransformer3DModelFun -from .cogvideox_fun.autoencoder_magvit import AutoencoderKLCogVideoX as AutoencoderKLCogVideoXFun - -from .cogvideox_fun.pipeline_cogvideox_inpaint import CogVideoX_Fun_Pipeline_Inpaint -from .cogvideox_fun.pipeline_cogvideox_control import CogVideoX_Fun_Pipeline_Control +from accelerate import init_empty_weights +from accelerate.utils import set_module_tensor_to_device from .utils import remove_specific_blocks, log from comfy.utils import load_torch_file @@ -121,8 +118,7 @@ class DownloadAndLoadCogVideoModel: "precision": (["fp16", "fp32", "bf16"], {"default": "bf16", "tooltip": "official recommendation is that 2b model should be fp16, 5b model should be bf16"} ), - "fp8_transformer": (['disabled', 'enabled', 'fastmode', 'torchao_fp8dq', "torchao_fp8dqrow", "torchao_int8dq", "torchao_fp6"], {"default": 'disabled', "tooltip": "enabled casts the transformer to torch.float8_e4m3fn, fastmode is only for latest nvidia GPUs and requires torch 2.4.0 and cu124 minimum"}), - "compile": (["disabled","onediff","torch"], {"tooltip": "compile the model for faster inference, these are advanced options only available on Linux, see readme for more info"}), + "quantization": (['disabled', 'fp8_e4m3fn', 'fp8_e4m3fn_fastmode', 'torchao_fp8dq', "torchao_fp8dqrow", "torchao_int8dq", "torchao_fp6"], {"default": 'disabled', "tooltip": "enabled casts the transformer to torch.float8_e4m3fn, fastmode is only for latest nvidia GPUs and requires torch 2.4.0 and cu124 minimum"}), "enable_sequential_cpu_offload": ("BOOLEAN", {"default": False, "tooltip": "significantly reducing memory usage and slows down the inference"}), "block_edit": ("TRANSFORMERBLOCKS", {"default": None}), "lora": ("COGLORA", {"default": None}), @@ -132,13 +128,13 @@ class DownloadAndLoadCogVideoModel: } } - RETURN_TYPES = ("COGVIDEOPIPE",) - RETURN_NAMES = ("cogvideo_pipe", ) + RETURN_TYPES = ("COGVIDEOMODEL", "VAE",) + RETURN_NAMES = ("model", "vae", ) FUNCTION = "loadmodel" CATEGORY = "CogVideoWrapper" DESCRIPTION = "Downloads and loads the selected CogVideo model from Huggingface to 'ComfyUI/models/CogVideo'" - def loadmodel(self, model, precision, fp8_transformer="disabled", compile="disabled", + def loadmodel(self, model, precision, quantization="disabled", compile="disabled", enable_sequential_cpu_offload=False, block_edit=None, lora=None, compile_args=None, attention_mode="sdpa", load_device="main_device"): @@ -215,12 +211,7 @@ class DownloadAndLoadCogVideoModel: local_dir_use_symlinks=False, ) - #transformer - if "Fun" in model: - transformer = CogVideoXTransformer3DModelFun.from_pretrained(base_path, subfolder=subfolder) - else: - transformer = CogVideoXTransformer3DModel.from_pretrained(base_path, subfolder=subfolder) - + transformer = CogVideoXTransformer3DModel.from_pretrained(base_path, subfolder=subfolder) transformer = transformer.to(dtype).to(transformer_load_device) if "1.5" in model: @@ -235,17 +226,17 @@ class DownloadAndLoadCogVideoModel: scheduler = CogVideoXDDIMScheduler.from_config(scheduler_config) # VAE - if "Fun" in model: - vae = AutoencoderKLCogVideoXFun.from_pretrained(base_path, subfolder="vae").to(dtype).to(offload_device) - if "Pose" in model: - pipe = CogVideoX_Fun_Pipeline_Control(vae, transformer, scheduler) - else: - pipe = CogVideoX_Fun_Pipeline_Inpaint(vae, transformer, scheduler) - else: - vae = AutoencoderKLCogVideoX.from_pretrained(base_path, subfolder="vae").to(dtype).to(offload_device) - pipe = CogVideoXPipeline(vae, transformer, scheduler) - if "cogvideox-2b-img2vid" in model: - pipe.input_with_padding = False + vae = AutoencoderKLCogVideoX.from_pretrained(base_path, subfolder="vae").to(dtype).to(offload_device) + + #pipeline + pipe = CogVideoXPipeline( + transformer, + scheduler, + dtype=dtype, + is_fun_inpaint=True if "fun" in model.lower() and "pose" not in model.lower() else False + ) + if "cogvideox-2b-img2vid" in model: + pipe.input_with_padding = False #LoRAs if lora is not None: @@ -281,8 +272,19 @@ class DownloadAndLoadCogVideoModel: lora_scale = lora_scale / lora_rank pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) + if "fused" in attention_mode: + from diffusers.models.attention import Attention + transformer.fuse_qkv_projections = True + for module in transformer.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + transformer.attention_mode = attention_mode + + if compile_args is not None: + pipe.transformer.to(memory_format=torch.channels_last) + #fp8 - if fp8_transformer == "enabled" or fp8_transformer == "fastmode": + if quantization == "fp8_e4m3fn" or quantization == "fp8_e4m3fn_fastmode": params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding", "norm_k", "norm_q", "to_k.bias", "to_q.bias", "to_v.bias"} if "1.5" in model: params_to_keep.update({"norm1.linear.weight", "ofs_embedding", "norm_final", "norm_out", "proj_out"}) @@ -290,13 +292,20 @@ class DownloadAndLoadCogVideoModel: if not any(keyword in name for keyword in params_to_keep): param.data = param.data.to(torch.float8_e4m3fn) - if fp8_transformer == "fastmode": + if quantization == "fp8_e4m3fn_fastmode": from .fp8_optimization import convert_fp8_linear if "1.5" in model: params_to_keep.update({"ff"}) #otherwise NaNs convert_fp8_linear(pipe.transformer, dtype, params_to_keep=params_to_keep) + + # compilation + if compile_args is not None: + torch._dynamo.config.cache_size_limit = compile_args["dynamo_cache_size_limit"] + for i, block in enumerate(pipe.transformer.transformer_blocks): + if "CogVideoXBlock" in str(block): + pipe.transformer.transformer_blocks[i] = torch.compile(block, fullgraph=compile_args["fullgraph"], dynamic=compile_args["dynamic"], backend=compile_args["backend"], mode=compile_args["mode"]) - elif "torchao" in fp8_transformer: + if "torchao" in quantization: try: from torchao.quantization import ( quantize_, @@ -313,14 +322,14 @@ class DownloadAndLoadCogVideoModel: return isinstance(module, nn.Linear) return False - if "fp6" in fp8_transformer: #slower for some reason on 4090 + if "fp6" in quantization: #slower for some reason on 4090 quant_func = fpx_weight_only(3, 2) - elif "fp8dq" in fp8_transformer: #very fast on 4090 when compiled + elif "fp8dq" in quantization: #very fast on 4090 when compiled quant_func = float8_dynamic_activation_float8_weight() - elif 'fp8dqrow' in fp8_transformer: + elif 'fp8dqrow' in quantization: from torchao.quantization.quant_api import PerRow quant_func = float8_dynamic_activation_float8_weight(granularity=PerRow()) - elif 'int8dq' in fp8_transformer: + elif 'int8dq' in quantization: quant_func = int8_dynamic_activation_int8_weight() for i, block in enumerate(pipe.transformer.transformer_blocks): @@ -365,41 +374,19 @@ class DownloadAndLoadCogVideoModel: # (3): Dropout(p=0.0, inplace=False) # ) # ) - # ) + # ) + + # if compile == "onediff": + # from onediffx import compile_pipe + # os.environ['NEXFORT_FX_FORCE_TRITON_SDPA'] = '1' - # compilation - if compile == "torch": - #pipe.transformer.to(memory_format=torch.channels_last) - if compile_args is not None: - torch._dynamo.config.cache_size_limit = compile_args["dynamo_cache_size_limit"] - for i, block in enumerate(pipe.transformer.transformer_blocks): - if "CogVideoXBlock" in str(block): - pipe.transformer.transformer_blocks[i] = torch.compile(block, fullgraph=compile_args["fullgraph"], dynamic=compile_args["dynamic"], backend=compile_args["backend"], mode=compile_args["mode"]) - else: - for i, block in enumerate(pipe.transformer.transformer_blocks): - if "CogVideoXBlock" in str(block): - pipe.transformer.transformer_blocks[i] = torch.compile(block, fullgraph=False, dynamic=False, backend="inductor") - - transformer.attention_mode = attention_mode - - if "fused" in attention_mode: - from diffusers.models.attention import Attention - transformer.fuse_qkv_projections = True - for module in transformer.modules(): - if isinstance(module, Attention): - module.fuse_projections(fuse=True) - - elif compile == "onediff": - from onediffx import compile_pipe - os.environ['NEXFORT_FX_FORCE_TRITON_SDPA'] = '1' - - pipe = compile_pipe( - pipe, - backend="nexfort", - options= {"mode": "max-optimize:max-autotune:max-autotune", "memory_format": "channels_last", "options": {"inductor.optimize_linear_epilogue": False, "triton.fuse_attention_allow_fp16_reduction": False}}, - ignores=["vae"], - fuse_qkv_projections= False, - ) + # pipe = compile_pipe( + # pipe, + # backend="nexfort", + # options= {"mode": "max-optimize:max-autotune:max-autotune", "memory_format": "channels_last", "options": {"inductor.optimize_linear_epilogue": False, "triton.fuse_attention_allow_fp16_reduction": False}}, + # ignores=["vae"], + # fuse_qkv_projections= False, + # ) pipeline = { "pipe": pipe, @@ -412,7 +399,7 @@ class DownloadAndLoadCogVideoModel: "model_name": model, } - return (pipeline,) + return (pipeline, vae) #region GGUF class DownloadAndLoadCogVideoGGUFModel: @classmethod @@ -444,8 +431,8 @@ class DownloadAndLoadCogVideoGGUFModel: } } - RETURN_TYPES = ("COGVIDEOPIPE",) - RETURN_NAMES = ("cogvideo_pipe", ) + RETURN_TYPES = ("COGVIDEOMODEL", "VAE",) + RETURN_NAMES = ("model", "vae",) FUNCTION = "loadmodel" CATEGORY = "CogVideoWrapper" @@ -486,7 +473,6 @@ class DownloadAndLoadCogVideoGGUFModel: with open(transformer_path) as f: transformer_config = json.load(f) - from . import mz_gguf_loader import importlib @@ -498,7 +484,6 @@ class DownloadAndLoadCogVideoGGUFModel: transformer_config["in_channels"] = 32 else: transformer_config["in_channels"] = 33 - transformer = CogVideoXTransformer3DModelFun.from_config(transformer_config) elif "I2V" in model or "Interpolation" in model: transformer_config["in_channels"] = 32 if "1_5" in model: @@ -508,10 +493,10 @@ class DownloadAndLoadCogVideoGGUFModel: transformer_config["patch_bias"] = False transformer_config["sample_height"] = 300 transformer_config["sample_width"] = 300 - transformer = CogVideoXTransformer3DModel.from_config(transformer_config) else: transformer_config["in_channels"] = 16 - transformer = CogVideoXTransformer3DModel.from_config(transformer_config) + + transformer = CogVideoXTransformer3DModel.from_config(transformer_config) params_to_keep = {"patch_embed", "pos_embedding", "time_embedding"} if "2b" in model: @@ -564,60 +549,25 @@ class DownloadAndLoadCogVideoGGUFModel: with open(os.path.join(script_directory, 'configs', 'vae_config.json')) as f: vae_config = json.load(f) + #VAE vae_sd = load_torch_file(vae_path) - if "fun" in model: - vae = AutoencoderKLCogVideoXFun.from_config(vae_config).to(vae_dtype).to(offload_device) - vae.load_state_dict(vae_sd) - if "Pose" in model: - pipe = CogVideoX_Fun_Pipeline_Control(vae, transformer, scheduler) - else: - pipe = CogVideoX_Fun_Pipeline_Inpaint(vae, transformer, scheduler) - else: - vae = AutoencoderKLCogVideoX.from_config(vae_config).to(vae_dtype).to(offload_device) - vae.load_state_dict(vae_sd) - pipe = CogVideoXPipeline(vae, transformer, scheduler) + vae = AutoencoderKLCogVideoX.from_config(vae_config).to(vae_dtype).to(offload_device) + vae.load_state_dict(vae_sd) + del vae_sd + pipe = CogVideoXPipeline(transformer, scheduler, dtype=vae_dtype) if enable_sequential_cpu_offload: pipe.enable_sequential_cpu_offload() sd = load_torch_file(gguf_path) - - # #LoRAs - # if lora is not None: - # if "fun" in model.lower(): - # raise NotImplementedError("LoRA with GGUF is not supported for Fun models") - # from .lora_utils import merge_lora#, load_lora_into_transformer - # #for l in lora: - # # log.info(f"Merging LoRA weights from {l['path']} with strength {l['strength']}") - # # pipe.transformer = merge_lora(pipe.transformer, l["path"], l["strength"]) - # else: - # adapter_list = [] - # adapter_weights = [] - # for l in lora: - # lora_sd = load_torch_file(l["path"]) - # for key, val in lora_sd.items(): - # if "lora_B" in key: - # lora_rank = val.shape[1] - # break - # log.info(f"Loading rank {lora_rank} LoRA weights from {l['path']} with strength {l['strength']}") - # adapter_name = l['path'].split("/")[-1].split(".")[0] - # adapter_weight = l['strength'] - # pipe.load_lora_weights(l['path'], weight_name=l['path'].split("/")[-1], lora_rank=lora_rank, adapter_name=adapter_name) - - # #transformer = load_lora_into_transformer(lora, transformer) - # adapter_list.append(adapter_name) - # adapter_weights.append(adapter_weight) - # for l in lora: - # pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) - # #pipe.fuse_lora(lora_scale=1 / lora_rank, components=["transformer"]) - pipe.transformer = mz_gguf_loader.quantize_load_state_dict(pipe.transformer, sd, device="cpu") + del sd + if load_device == "offload_device": pipe.transformer.to(offload_device) else: pipe.transformer.to(device) - pipeline = { "pipe": pipe, "dtype": vae_dtype, @@ -629,9 +579,253 @@ class DownloadAndLoadCogVideoGGUFModel: "manual_offloading": True, } + return (pipeline, vae) + +#region ModelLoader +class CogVideoXModelLoader: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": (folder_paths.get_filename_list("diffusion_models"), {"tooltip": "The name of the checkpoint (model) to load.",}), + + "base_precision": (["fp16", "fp32", "bf16"], {"default": "bf16"}), + "quantization": (['disabled', 'fp8_e4m3fn', 'fp8_e4m3fn_fast', 'torchao_fp8dq', "torchao_fp8dqrow", "torchao_int8dq", "torchao_fp6"], {"default": 'disabled', "tooltip": "optional quantization method"}), + "load_device": (["main_device", "offload_device"], {"default": "main_device"}), + "enable_sequential_cpu_offload": ("BOOLEAN", {"default": False, "tooltip": "significantly reducing memory usage and slows down the inference"}), + }, + "optional": { + "block_edit": ("TRANSFORMERBLOCKS", {"default": None}), + "lora": ("COGLORA", {"default": None}), + "compile_args":("COMPILEARGS", ), + "attention_mode": (["sdpa", "sageattn", "fused_sdpa", "fused_sageattn"], {"default": "sdpa"}), + } + } + + RETURN_TYPES = ("COGVIDEOMODEL",) + RETURN_NAMES = ("model", ) + FUNCTION = "loadmodel" + CATEGORY = "CogVideoWrapper" + + def loadmodel(self, model, base_precision, load_device, enable_sequential_cpu_offload, + block_edit=None, compile_args=None, lora=None, attention_mode="sdpa", quantization="disabled"): + + device = mm.get_torch_device() + offload_device = mm.unet_offload_device() + manual_offloading = True + transformer_load_device = device if load_device == "main_device" else offload_device + mm.soft_empty_cache() + + base_dtype = {"fp8_e4m3fn": torch.float8_e4m3fn, "fp8_e4m3fn_fast": torch.float8_e4m3fn, "bf16": torch.bfloat16, "fp16": torch.float16, "fp32": torch.float32}[base_precision] + + model_path = folder_paths.get_full_path_or_raise("diffusion_models", model) + sd = load_torch_file(model_path, device=transformer_load_device) + + model_type = "" + if sd["patch_embed.proj.weight"].shape == (3072, 33, 2, 2): + model_type = "fun_5b" + elif sd["patch_embed.proj.weight"].shape == (3072, 16, 2, 2): + model_type = "5b" + elif sd["patch_embed.proj.weight"].shape == (3072, 128): + model_type = "5b_1_5" + elif sd["patch_embed.proj.weight"].shape == (3072, 256): + model_type = "5b_I2V_1_5" + elif sd["patch_embed.proj.weight"].shape == (1920, 33, 2, 2): + model_type = "fun_2b" + elif sd["patch_embed.proj.weight"].shape == (1920, 16, 2, 2): + model_type = "2b" + elif sd["patch_embed.proj.weight"].shape == (3072, 32, 2, 2): + if "pos_embedding" in sd: + model_type = "fun_5b_pose" + else: + model_type = "I2V_5b" + else: + raise Exception("Selected model is not recognized") + log.info(f"Detected CogVideoX model type: {model_type}") + + if "5b" in model_type: + scheduler_config_path = os.path.join(script_directory, 'configs', 'scheduler_config_5b.json') + transformer_config_path = os.path.join(script_directory, 'configs', 'transformer_config_5b.json') + elif "2b" in model_type: + scheduler_config_path = os.path.join(script_directory, 'configs', 'scheduler_config_2b.json') + transformer_config_path = os.path.join(script_directory, 'configs', 'transformer_config_2b.json') + + with open(transformer_config_path) as f: + transformer_config = json.load(f) + + with init_empty_weights(): + if model_type in ["I2V", "I2V_5b", "fun_5b_pose", "5b_I2V_1_5"]: + transformer_config["in_channels"] = 32 + if "1_5" in model_type: + transformer_config["ofs_embed_dim"] = 512 + transformer_config["use_learned_positional_embeddings"] = False + transformer_config["patch_size_t"] = 2 + transformer_config["patch_bias"] = False + transformer_config["sample_height"] = 300 + transformer_config["sample_width"] = 300 + elif "fun" in model_type: + transformer_config["in_channels"] = 33 + else: + if "1_5" in model_type: + transformer_config["use_learned_positional_embeddings"] = False + transformer_config["patch_size_t"] = 2 + transformer_config["patch_bias"] = False + #transformer_config["sample_height"] = 300 todo: check if this is needed + #transformer_config["sample_width"] = 300 + transformer_config["in_channels"] = 16 + + transformer = CogVideoXTransformer3DModel.from_config(transformer_config) + + #load weights + #params_to_keep = {} + log.info("Using accelerate to load and assign model weights to device...") + + for name, param in transformer.named_parameters(): + #dtype_to_use = base_dtype if any(keyword in name for keyword in params_to_keep) else dtype + set_module_tensor_to_device(transformer, name, device=transformer_load_device, dtype=base_dtype, value=sd[name]) + del sd + + + #scheduler + with open(scheduler_config_path) as f: + scheduler_config = json.load(f) + scheduler = CogVideoXDDIMScheduler.from_config(scheduler_config, subfolder="scheduler") + + if block_edit is not None: + transformer = remove_specific_blocks(transformer, block_edit) + + if "fused" in attention_mode: + from diffusers.models.attention import Attention + transformer.fuse_qkv_projections = True + for module in transformer.modules(): + if isinstance(module, Attention): + module.fuse_projections(fuse=True) + transformer.attention_mode = attention_mode + + if "fun" in model_type: + if not "pose" in model_type: + raise NotImplementedError("Fun models besides pose are not supported with this loader yet") + pipe = CogVideoX_Fun_Pipeline_Inpaint(vae, transformer, scheduler) + else: + pipe = CogVideoXPipeline(transformer, scheduler, dtype=base_dtype) + else: + pipe = CogVideoXPipeline(transformer, scheduler, dtype=base_dtype) + + if enable_sequential_cpu_offload: + pipe.enable_sequential_cpu_offload() + + #LoRAs + if lora is not None: + from .lora_utils import merge_lora#, load_lora_into_transformer + if "fun" in model.lower(): + for l in lora: + log.info(f"Merging LoRA weights from {l['path']} with strength {l['strength']}") + transformer = merge_lora(transformer, l["path"], l["strength"]) + else: + adapter_list = [] + adapter_weights = [] + for l in lora: + fuse = True if l["fuse_lora"] else False + lora_sd = load_torch_file(l["path"]) + for key, val in lora_sd.items(): + if "lora_B" in key: + lora_rank = val.shape[1] + break + log.info(f"Merging rank {lora_rank} LoRA weights from {l['path']} with strength {l['strength']}") + adapter_name = l['path'].split("/")[-1].split(".")[0] + adapter_weight = l['strength'] + pipe.load_lora_weights(l['path'], weight_name=l['path'].split("/")[-1], lora_rank=lora_rank, adapter_name=adapter_name) + + #transformer = load_lora_into_transformer(lora, transformer) + adapter_list.append(adapter_name) + adapter_weights.append(adapter_weight) + for l in lora: + pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) + if fuse: + lora_scale = 1 + dimension_loras = ["orbit", "dimensionx"] # for now dimensionx loras need scaling + if any(item in lora[-1]["path"].lower() for item in dimension_loras): + lora_scale = lora_scale / lora_rank + pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) + + if compile_args is not None: + pipe.transformer.to(memory_format=torch.channels_last) + + #quantization + if quantization == "fp8_e4m3fn" or quantization == "fp8_e4m3fn_fast": + params_to_keep = {"patch_embed", "lora", "pos_embedding", "time_embedding", "norm_k", "norm_q", "to_k.bias", "to_q.bias", "to_v.bias"} + if "1.5" in model: + params_to_keep.update({"norm1.linear.weight", "ofs_embedding", "norm_final", "norm_out", "proj_out"}) + for name, param in pipe.transformer.named_parameters(): + if not any(keyword in name for keyword in params_to_keep): + param.data = param.data.to(torch.float8_e4m3fn) + + if quantization == "fp8_e4m3fn_fast": + from .fp8_optimization import convert_fp8_linear + if "1.5" in model: + params_to_keep.update({"ff"}) #otherwise NaNs + convert_fp8_linear(pipe.transformer, base_dtype, params_to_keep=params_to_keep) + + #compile + if compile_args is not None: + torch._dynamo.config.cache_size_limit = compile_args["dynamo_cache_size_limit"] + for i, block in enumerate(pipe.transformer.transformer_blocks): + if "CogVideoXBlock" in str(block): + pipe.transformer.transformer_blocks[i] = torch.compile(block, fullgraph=compile_args["fullgraph"], dynamic=compile_args["dynamic"], backend=compile_args["backend"], mode=compile_args["mode"]) + + if "torchao" in quantization: + try: + from torchao.quantization import ( + quantize_, + fpx_weight_only, + float8_dynamic_activation_float8_weight, + int8_dynamic_activation_int8_weight + ) + except: + raise ImportError("torchao is not installed, please install torchao to use fp8dq") + + def filter_fn(module: nn.Module, fqn: str) -> bool: + target_submodules = {'attn1', 'ff'} # avoid norm layers, 1.5 at least won't work with quantized norm1 #todo: test other models + if any(sub in fqn for sub in target_submodules): + return isinstance(module, nn.Linear) + return False + + if "fp6" in quantization: #slower for some reason on 4090 + quant_func = fpx_weight_only(3, 2) + elif "fp8dq" in quantization: #very fast on 4090 when compiled + quant_func = float8_dynamic_activation_float8_weight() + elif 'fp8dqrow' in quantization: + from torchao.quantization.quant_api import PerRow + quant_func = float8_dynamic_activation_float8_weight(granularity=PerRow()) + elif 'int8dq' in quantization: + quant_func = int8_dynamic_activation_int8_weight() + + for i, block in enumerate(pipe.transformer.transformer_blocks): + if "CogVideoXBlock" in str(block): + quantize_(block, quant_func, filter_fn=filter_fn) + + manual_offloading = False # to disable manual .to(device) calls + log.info(f"Quantized transformer blocks to {quantization}") + + # if load_device == "offload_device": + # pipe.transformer.to(offload_device) + # else: + # pipe.transformer.to(device) + + pipeline = { + "pipe": pipe, + "dtype": base_dtype, + "base_path": model, + "onediff": False, + "cpu_offloading": enable_sequential_cpu_offload, + "scheduler_config": scheduler_config, + "model_name": model, + "manual_offloading": manual_offloading, + } + return (pipeline,) -#revion VAE +#region VAE class CogVideoXVAELoader: @classmethod @@ -829,6 +1023,7 @@ NODE_CLASS_MAPPINGS = { "DownloadAndLoadToraModel": DownloadAndLoadToraModel, "CogVideoLoraSelect": CogVideoLoraSelect, "CogVideoXVAELoader": CogVideoXVAELoader, + "CogVideoXModelLoader": CogVideoXModelLoader, } NODE_DISPLAY_NAME_MAPPINGS = { "DownloadAndLoadCogVideoModel": "(Down)load CogVideo Model", @@ -837,4 +1032,5 @@ NODE_DISPLAY_NAME_MAPPINGS = { "DownloadAndLoadToraModel": "(Down)load Tora Model", "CogVideoLoraSelect": "CogVideo LoraSelect", "CogVideoXVAELoader": "CogVideoX VAE Loader", + "CogVideoXModelLoader": "CogVideoX Model Loader", } \ No newline at end of file diff --git a/nodes.py b/nodes.py index b18a978..bd89609 100644 --- a/nodes.py +++ b/nodes.py @@ -1,7 +1,6 @@ import os import torch -import folder_paths -import comfy.model_management as mm +import json from einops import rearrange from contextlib import nullcontext @@ -38,11 +37,10 @@ scheduler_mapping = { } available_schedulers = list(scheduler_mapping.keys()) -from .cogvideox_fun.utils import get_image_to_video_latent, get_video_to_video_latent, ASPECT_RATIO_512, get_closest_ratio, to_pil +from diffusers.video_processor import VideoProcessor -from PIL import Image -import numpy as np -import json +import folder_paths +import comfy.model_management as mm script_directory = os.path.dirname(os.path.abspath(__file__)) @@ -129,94 +127,6 @@ class CogVideoXTorchCompileSettings: return (compile_args, ) #region TextEncode -class CogVideoEncodePrompt: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "pipeline": ("COGVIDEOPIPE",), - "prompt": ("STRING", {"default": "", "multiline": True} ), - "negative_prompt": ("STRING", {"default": "", "multiline": True} ), - } - } - - RETURN_TYPES = ("CONDITIONING", "CONDITIONING") - RETURN_NAMES = ("positive", "negative") - FUNCTION = "process" - CATEGORY = "CogVideoWrapper" - - def process(self, pipeline, prompt, negative_prompt): - device = mm.get_torch_device() - offload_device = mm.unet_offload_device() - pipe = pipeline["pipe"] - dtype = pipeline["dtype"] - - pipe.text_encoder.to(device) - pipe.transformer.to(offload_device) - - positive, negative = pipe.encode_prompt( - prompt=prompt, - negative_prompt=negative_prompt, - do_classifier_free_guidance=True, - num_videos_per_prompt=1, - max_sequence_length=226, - device=device, - dtype=dtype, - ) - pipe.text_encoder.to(offload_device) - - return (positive, negative) - -# Inject clip_l and t5xxl w/ individual strength adjustments for ComfyUI's DualCLIPLoader node for CogVideoX. Use CLIPSave node from any SDXL model then load in a custom clip_l model. -# For some reason seems to give a lot more movement and consistency on new CogVideoXFun img2vid? set 'type' to flux / DualClipLoader. -class CogVideoDualTextEncode_311: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "clip": ("CLIP",), - "clip_l": ("STRING", {"default": "", "multiline": True}), - "t5xxl": ("STRING", {"default": "", "multiline": True}), - "clip_l_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), # excessive max for testing, have found intesting results up to 20 max? - "t5xxl_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), # setting this to 0.0001 or level as high as 18 seems to work. - } - } - - RETURN_TYPES = ("CONDITIONING",) - RETURN_NAMES = ("conditioning",) - FUNCTION = "process" - CATEGORY = "CogVideoWrapper" - - def process(self, clip, clip_l, t5xxl, clip_l_strength, t5xxl_strength): - load_device = mm.text_encoder_device() - offload_device = mm.text_encoder_offload_device() - - # setup tokenizer for clip_l and t5xxl - clip.tokenizer.t5xxl.pad_to_max_length = True - clip.tokenizer.t5xxl.max_length = 226 - clip.cond_stage_model.to(load_device) - - # tokenize clip_l and t5xxl - tokens_l = clip.tokenize(clip_l, return_word_ids=True) - tokens_t5 = clip.tokenize(t5xxl, return_word_ids=True) - - # encode the tokens separately - embeds_l = clip.encode_from_tokens(tokens_l, return_pooled=False, return_dict=False) - embeds_t5 = clip.encode_from_tokens(tokens_t5, return_pooled=False, return_dict=False) - - # apply strength adjustments to each embedding - if embeds_l.dim() == 3: - embeds_l *= clip_l_strength - if embeds_t5.dim() == 3: - embeds_t5 *= t5xxl_strength - - # combine the embeddings by summing them - combined_embeds = embeds_l + embeds_t5 - - # offload the model to save memory - clip.cond_stage_model.to(offload_device) - - return (combined_embeds,) - class CogVideoTextEncode: @classmethod def INPUT_TYPES(s): @@ -285,20 +195,31 @@ class CogVideoTextEncodeCombine: return (embeds, ) -#region ImageEncode +#region ImageEncode + +def add_noise_to_reference_video(image, ratio=None): + if ratio is None: + sigma = torch.normal(mean=-3.0, std=0.5, size=(image.shape[0],)).to(image.device) + sigma = torch.exp(sigma).to(image.dtype) + else: + sigma = torch.ones((image.shape[0],)).to(image.device, image.dtype) * ratio + + image_noise = torch.randn_like(image) * sigma[:, None, None, None, None] + image_noise = torch.where(image==-1, torch.zeros_like(image), image_noise) + image = image + image_noise + return image + class CogVideoImageEncode: @classmethod def INPUT_TYPES(s): return {"required": { - "pipeline": ("COGVIDEOPIPE",), - "image": ("IMAGE", ), + "vae": ("VAE",), + "start_image": ("IMAGE", ), }, "optional": { - "chunk_size": ("INT", {"default": 16, "min": 4, "tooltip": "How many images to encode at once, lower values use less memory"}), + "end_image": ("IMAGE", ), "enable_tiling": ("BOOLEAN", {"default": False, "tooltip": "Enable tiling for the VAE to reduce memory usage"}), - "mask": ("MASK", ), "noise_aug_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001, "tooltip": "Augment image with noise"}), - "vae_override" : ("VAE", {"default": None, "tooltip": "Override the VAE model in the pipeline"}), }, } @@ -307,49 +228,111 @@ class CogVideoImageEncode: FUNCTION = "encode" CATEGORY = "CogVideoWrapper" - def encode(self, pipeline, image, chunk_size=8, enable_tiling=False, mask=None, noise_aug_strength=0.0, vae_override=None): + def encode(self, vae, start_image, end_image=None, enable_tiling=False, noise_aug_strength=0.0): device = mm.get_torch_device() offload_device = mm.unet_offload_device() generator = torch.Generator(device=device).manual_seed(0) - - B, H, W, C = image.shape - - vae = pipeline["pipe"].vae if vae_override is None else vae_override - vae.enable_slicing() - model_name = pipeline.get("model_name", "") - - if ("1.5" in model_name or "1_5" in model_name) and image.shape[0] == 1: - vae_scaling_factor = 1 #/ vae.config.scaling_factor - else: - vae_scaling_factor = vae.config.scaling_factor + + try: + vae.enable_slicing() + except: + pass + + vae_scaling_factor = vae.config.scaling_factor if enable_tiling: from .mz_enable_vae_encode_tiling import enable_vae_encode_tiling enable_vae_encode_tiling(vae) - if not pipeline["cpu_offloading"]: - vae.to(device) + vae.to(device) - check_diffusers_version() try: vae._clear_fake_context_parallel_cache() except: pass - input_image = image.clone() - if mask is not None: - pipeline["pipe"].original_mask = mask - # print(mask.shape) - # mask = mask.repeat(B, 1, 1) # Shape: [B, H, W] - # mask = mask.unsqueeze(-1).repeat(1, 1, 1, C) - # print(mask.shape) - # input_image = input_image * (1 -mask) - else: - pipeline["pipe"].original_mask = None - #input_image = input_image.permute(0, 3, 1, 2) # B, C, H, W - #input_image = pipeline["pipe"].video_processor.preprocess(input_image).to(device, dtype=vae.dtype) - #input_image = input_image.unsqueeze(2) + if noise_aug_strength > 0: + start_image = add_noise_to_reference_video(start_image, ratio=noise_aug_strength) + if end_image is not None: + end_image = add_noise_to_reference_video(end_image, ratio=noise_aug_strength) + + latents_list = [] + start_image = (start_image * 2.0 - 1.0).to(vae.dtype).to(device).unsqueeze(0).permute(0, 4, 1, 2, 3) # B, C, T, H, W + start_latents = vae.encode(start_image).latent_dist.sample(generator) + start_latents = start_latents.permute(0, 2, 1, 3, 4) # B, T, C, H, W + + if end_image is not None: + end_image = (end_image * 2.0 - 1.0).to(vae.dtype).to(device).unsqueeze(0).permute(0, 4, 1, 2, 3) + end_latents = vae.encode(end_image).latent_dist.sample(generator) + end_latents = end_latents.permute(0, 2, 1, 3, 4) # B, T, C, H, W + latents_list = [start_latents, end_latents] + final_latents = torch.cat(latents_list, dim=1) + else: + final_latents = start_latents + + final_latents = final_latents * vae_scaling_factor + + log.info(f"Encoded latents shape: {final_latents.shape}") + vae.to(offload_device) + + return ({"samples": final_latents}, ) + +class CogVideoImageEncodeFunInP: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "vae": ("VAE",), + "start_image": ("IMAGE", ), + "num_frames": ("INT", {"default": 49, "min": 2, "max": 1024, "step": 1}), + }, + "optional": { + "end_image": ("IMAGE", ), + "enable_tiling": ("BOOLEAN", {"default": False, "tooltip": "Enable tiling for the VAE to reduce memory usage"}), + "noise_aug_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001, "tooltip": "Augment image with noise"}), + }, + } + + RETURN_TYPES = ("LATENT",) + RETURN_NAMES = ("image_cond_latents",) + FUNCTION = "encode" + CATEGORY = "CogVideoWrapper" + + def encode(self, vae, start_image, num_frames, end_image=None, enable_tiling=False, noise_aug_strength=0.0): + + device = mm.get_torch_device() + offload_device = mm.unet_offload_device() + generator = torch.Generator(device=device).manual_seed(0) + + try: + vae.enable_slicing() + except: + pass + + vae_scaling_factor = vae.config.scaling_factor + + if enable_tiling: + from .mz_enable_vae_encode_tiling import enable_vae_encode_tiling + enable_vae_encode_tiling(vae) + + vae.to(device) + + try: + vae._clear_fake_context_parallel_cache() + except: + pass + + if end_image is not None: + # Create a tensor of zeros for padding + padding = torch.zeros((num_frames - 2, start_image.shape[1], start_image.shape[2], 3), device=end_image.device, dtype=end_image.dtype) * -1 + # Concatenate start_image, padding, and end_image + input_image = torch.cat([start_image, padding, end_image], dim=0) + else: + # Create a tensor of zeros for padding + padding = torch.zeros((num_frames - 1, start_image.shape[1], start_image.shape[2], 3), device=start_image.device, dtype=start_image.dtype) * -1 + # Concatenate start_image and padding + input_image = torch.cat([start_image, padding], dim=0) + input_image = input_image * 2.0 - 1.0 input_image = input_image.to(vae.dtype).to(device) input_image = input_image.unsqueeze(0).permute(0, 4, 1, 2, 3) # B, C, T, H, W @@ -357,120 +340,34 @@ class CogVideoImageEncode: B, C, T, H, W = input_image.shape if noise_aug_strength > 0: input_image = add_noise_to_reference_video(input_image, ratio=noise_aug_strength) + + bs = 1 + new_mask_pixel_values = [] + print("input_image shape: ",input_image.shape) + for i in range(0, input_image.shape[0], bs): + mask_pixel_values_bs = input_image[i : i + bs] + mask_pixel_values_bs = vae.encode(mask_pixel_values_bs)[0] + print("mask_pixel_values_bs: ",mask_pixel_values_bs.parameters.shape) + mask_pixel_values_bs = mask_pixel_values_bs.mode() + print("mask_pixel_values_bs: ",mask_pixel_values_bs.shape, mask_pixel_values_bs.min(), mask_pixel_values_bs.max()) + new_mask_pixel_values.append(mask_pixel_values_bs) + masked_image_latents = torch.cat(new_mask_pixel_values, dim = 0) + masked_image_latents = masked_image_latents.permute(0, 2, 1, 3, 4) # B, T, C, H, W - latents_list = [] - # Loop through the temporal dimension in chunks of 16 - for i in range(0, T, chunk_size): - # Get the chunk of 16 frames (or remaining frames if less than 16 are left) - end_index = min(i + chunk_size, T) - image_chunk = input_image[:, :, i:end_index, :, :] # Shape: [B, C, chunk_size, H, W] + mask = torch.zeros_like(masked_image_latents[:, :, :1, :, :]) + if end_image is not None: + mask[:, -1, :, :, :] = vae_scaling_factor + mask[:, 0, :, :, :] = vae_scaling_factor - # Encode the chunk of images - latents = vae.encode(image_chunk) - - sample_mode = "sample" - if hasattr(latents, "latent_dist") and sample_mode == "sample": - latents = latents.latent_dist.sample(generator) - elif hasattr(latents, "latent_dist") and sample_mode == "argmax": - latents = latents.latent_dist.mode() - elif hasattr(latents, "latents"): - latents = latents.latents - - latents = latents.permute(0, 2, 1, 3, 4) # B, T_chunk, C, H, W - latents_list.append(latents) - - # Concatenate all the chunks along the temporal dimension - final_latents = torch.cat(latents_list, dim=1) - final_latents = final_latents * vae_scaling_factor + final_latents = masked_image_latents * vae_scaling_factor log.info(f"Encoded latents shape: {final_latents.shape}") - if not pipeline["cpu_offloading"]: - vae.to(offload_device) + vae.to(offload_device) - return ({"samples": final_latents}, ) - -class CogVideoImageInterpolationEncode: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "pipeline": ("COGVIDEOPIPE",), - "start_image": ("IMAGE", ), - "end_image": ("IMAGE", ), - }, - "optional": { - "enable_tiling": ("BOOLEAN", {"default": False, "tooltip": "Enable tiling for the VAE to reduce memory usage"}), - "mask": ("MASK", ), - "vae_override" : ("VAE", {"default": None, "tooltip": "Override the VAE model in the pipeline"}), - - }, - } - - RETURN_TYPES = ("LATENT",) - RETURN_NAMES = ("samples",) - FUNCTION = "encode" - CATEGORY = "CogVideoWrapper" - - def encode(self, pipeline, start_image, end_image, enable_tiling=False, mask=None, vae_override=None): - device = mm.get_torch_device() - offload_device = mm.unet_offload_device() - generator = torch.Generator(device=device).manual_seed(0) - - B, H, W, C = start_image.shape - - vae = pipeline["pipe"].vae if vae_override is None else vae_override - vae.enable_slicing() - model_name = pipeline.get("model_name", "") - - if ("1.5" in model_name or "1_5" in model_name): - vae_scaling_factor = 1 / vae.config.scaling_factor - else: - vae_scaling_factor = vae.config.scaling_factor - vae.enable_slicing() - - if enable_tiling: - from .mz_enable_vae_encode_tiling import enable_vae_encode_tiling - enable_vae_encode_tiling(vae) - - if not pipeline["cpu_offloading"]: - vae.to(device) - - check_diffusers_version() - try: - vae._clear_fake_context_parallel_cache() - except: - pass - - if mask is not None: - pipeline["pipe"].original_mask = mask - # print(mask.shape) - # mask = mask.repeat(B, 1, 1) # Shape: [B, H, W] - # mask = mask.unsqueeze(-1).repeat(1, 1, 1, C) - # print(mask.shape) - # input_image = input_image * (1 -mask) - else: - pipeline["pipe"].original_mask = None - - start_image = (start_image * 2.0 - 1.0).to(vae.dtype).to(device).unsqueeze(0).permute(0, 4, 1, 2, 3) # B, C, T, H, W - end_image = (end_image * 2.0 - 1.0).to(vae.dtype).to(device).unsqueeze(0).permute(0, 4, 1, 2, 3) - B, T, C, H, W = start_image.shape - - latents_list = [] - - # Encode the chunk of images - start_latents = vae.encode(start_image).latent_dist.sample(generator) * vae_scaling_factor - end_latents = vae.encode(end_image).latent_dist.sample(generator) * vae_scaling_factor - - start_latents = start_latents.permute(0, 2, 1, 3, 4) # B, T, C, H, W - end_latents = end_latents.permute(0, 2, 1, 3, 4) # B, T, C, H, W - latents_list = [start_latents, end_latents] - - # Concatenate all the chunks along the temporal dimension - final_latents = torch.cat(latents_list, dim=1) - log.info(f"Encoded latents shape: {final_latents.shape}") - if not pipeline["cpu_offloading"]: - vae.to(offload_device) - - return ({"samples": final_latents}, ) + return ({ + "samples": final_latents, + "mask": mask + },) #region Tora from .tora.traj_utils import process_traj, scale_traj_list_to_256 @@ -480,8 +377,8 @@ class ToraEncodeTrajectory: @classmethod def INPUT_TYPES(s): return {"required": { - "pipeline": ("COGVIDEOPIPE",), "tora_model": ("TORAMODEL",), + "vae": ("VAE",), "coordinates": ("STRING", {"forceInput": True}), "width": ("INT", {"default": 720, "min": 128, "max": 2048, "step": 8}), "height": ("INT", {"default": 480, "min": 128, "max": 2048, "step": 8}), @@ -491,7 +388,7 @@ class ToraEncodeTrajectory: "end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), }, "optional": { - "enable_tiling": ("BOOLEAN", {"default": False}), + "enable_tiling": ("BOOLEAN", {"default": True}), } } @@ -500,14 +397,16 @@ class ToraEncodeTrajectory: FUNCTION = "encode" CATEGORY = "CogVideoWrapper" - def encode(self, pipeline, width, height, num_frames, coordinates, strength, start_percent, end_percent, tora_model, enable_tiling=False): + def encode(self, vae, width, height, num_frames, coordinates, strength, start_percent, end_percent, tora_model, enable_tiling=False): check_diffusers_version() device = mm.get_torch_device() offload_device = mm.unet_offload_device() generator = torch.Generator(device=device).manual_seed(0) - vae = pipeline["pipe"].vae - vae.enable_slicing() + try: + vae.enable_slicing() + except: + pass try: vae._clear_fake_context_parallel_cache() except: @@ -533,33 +432,26 @@ class ToraEncodeTrajectory: video_flow, points = process_traj(coords_list, num_frames, (height,width), device=device) video_flow = rearrange(video_flow, "T H W C -> T C H W") video_flow = flow_to_image(video_flow).unsqueeze_(0).to(device) # [1 T C H W] - - - video_flow = ( - rearrange(video_flow / 255.0 * 2 - 1, "B T C H W -> B C T H W").contiguous().to(vae.dtype) - ) + video_flow = (rearrange(video_flow / 255.0 * 2 - 1, "B T C H W -> B C T H W").contiguous().to(vae.dtype)) video_flow_image = rearrange(video_flow, "B C T H W -> (B T) H W C") - print(video_flow_image.shape) + #print(video_flow_image.shape) mm.soft_empty_cache() # VAE encode - if not pipeline["cpu_offloading"]: - vae.to(device) - + vae.to(device) video_flow = vae.encode(video_flow).latent_dist.sample(generator) * vae.config.scaling_factor log.info(f"video_flow shape after encoding: {video_flow.shape}") #torch.Size([1, 16, 4, 80, 80]) + vae.to(offload_device) - if not pipeline["cpu_offloading"]: - vae.to(offload_device) + tora_model["traj_extractor"].to(device) #print("video_flow shape before traj_extractor: ", video_flow.shape) #torch.Size([1, 16, 4, 80, 80]) video_flow_features = tora_model["traj_extractor"](video_flow.to(torch.float32)) + tora_model["traj_extractor"].to(offload_device) video_flow_features = torch.stack(video_flow_features) #print("video_flow_features after traj_extractor: ", video_flow_features.shape) #torch.Size([42, 4, 128, 40, 40]) video_flow_features = video_flow_features * strength - - tora = { "video_flow_features" : video_flow_features, "start_percent" : start_percent, @@ -574,7 +466,7 @@ class ToraEncodeOpticalFlow: @classmethod def INPUT_TYPES(s): return {"required": { - "pipeline": ("COGVIDEOPIPE",), + "vae": ("VAE",), "tora_model": ("TORAMODEL",), "optical_flow": ("IMAGE", ), "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), @@ -589,15 +481,18 @@ class ToraEncodeOpticalFlow: FUNCTION = "encode" CATEGORY = "CogVideoWrapper" - def encode(self, pipeline, optical_flow, strength, tora_model, start_percent, end_percent): + def encode(self, vae, optical_flow, strength, tora_model, start_percent, end_percent): check_diffusers_version() B, H, W, C = optical_flow.shape device = mm.get_torch_device() offload_device = mm.unet_offload_device() generator = torch.Generator(device=device).manual_seed(0) - vae = pipeline["pipe"].vae - vae.enable_slicing() + try: + vae.enable_slicing() + except: + pass + try: vae._clear_fake_context_parallel_cache() except: @@ -609,15 +504,14 @@ class ToraEncodeOpticalFlow: mm.soft_empty_cache() # VAE encode - if not pipeline["cpu_offloading"]: - vae.to(device) + + vae.to(device) video_flow = video_flow.to(vae.dtype).to(vae.device) video_flow = vae.encode(video_flow).latent_dist.sample(generator) * vae.config.scaling_factor vae.to(offload_device) video_flow_features = tora_model["traj_extractor"](video_flow.to(torch.float32)) video_flow_features = torch.stack(video_flow_features) - video_flow_features = video_flow_features * strength log.info(f"video_flow shape: {video_flow.shape}") @@ -632,91 +526,7 @@ class ToraEncodeOpticalFlow: return (tora, ) -def add_noise_to_reference_video(image, ratio=None): - if ratio is None: - sigma = torch.normal(mean=-3.0, std=0.5, size=(image.shape[0],)).to(image.device) - sigma = torch.exp(sigma).to(image.dtype) - else: - sigma = torch.ones((image.shape[0],)).to(image.device, image.dtype) * ratio - - image_noise = torch.randn_like(image) * sigma[:, None, None, None, None] - image_noise = torch.where(image==-1, torch.zeros_like(image), image_noise) - image = image + image_noise - return image -class CogVideoControlImageEncode: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "pipeline": ("COGVIDEOPIPE",), - "control_video": ("IMAGE", ), - "base_resolution": ("INT", {"min": 64, "max": 1280, "step": 64, "default": 512, "tooltip": "Base resolution, closest training data bucket resolution is chosen based on the selection."}), - "enable_tiling": ("BOOLEAN", {"default": False, "tooltip": "Enable tiling for the VAE to reduce memory usage"}), - "noise_aug_strength": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}), - }, - } - - RETURN_TYPES = ("COGCONTROL_LATENTS", "INT", "INT",) - RETURN_NAMES = ("control_latents", "width", "height") - FUNCTION = "encode" - CATEGORY = "CogVideoWrapper" - - def encode(self, pipeline, control_video, base_resolution, enable_tiling, noise_aug_strength=0.0563): - device = mm.get_torch_device() - offload_device = mm.unet_offload_device() - - B, H, W, C = control_video.shape - - vae = pipeline["pipe"].vae - vae.enable_slicing() - - if enable_tiling: - from .mz_enable_vae_encode_tiling import enable_vae_encode_tiling - enable_vae_encode_tiling(vae) - - if not pipeline["cpu_offloading"]: - vae.to(device) - - # Count most suitable height and width - aspect_ratio_sample_size = {key : [x / 512 * base_resolution for x in ASPECT_RATIO_512[key]] for key in ASPECT_RATIO_512.keys()} - - control_video = np.array(control_video.cpu().numpy() * 255, np.uint8) - original_width, original_height = Image.fromarray(control_video[0]).size - - closest_size, closest_ratio = get_closest_ratio(original_height, original_width, ratios=aspect_ratio_sample_size) - height, width = [int(x / 16) * 16 for x in closest_size] - log.info(f"Closest bucket size: {width}x{height}") - - video_length = int((B - 1) // vae.config.temporal_compression_ratio * vae.config.temporal_compression_ratio) + 1 if B != 1 else 1 - input_video, input_video_mask, clip_image = get_video_to_video_latent(control_video, video_length=video_length, sample_size=(height, width)) - - control_video = pipeline["pipe"].image_processor.preprocess(rearrange(input_video, "b c f h w -> (b f) c h w"), height=height, width=width) - control_video = control_video.to(dtype=torch.float32) - control_video = rearrange(control_video, "(b f) c h w -> b c f h w", f=video_length) - - masked_image = control_video.to(device=device, dtype=vae.dtype) - if noise_aug_strength > 0: - masked_image = add_noise_to_reference_video(masked_image, ratio=noise_aug_strength) - bs = 1 - new_mask_pixel_values = [] - for i in range(0, masked_image.shape[0], bs): - mask_pixel_values_bs = masked_image[i : i + bs] - mask_pixel_values_bs = vae.encode(mask_pixel_values_bs)[0] - mask_pixel_values_bs = mask_pixel_values_bs.mode() - new_mask_pixel_values.append(mask_pixel_values_bs) - masked_image_latents = torch.cat(new_mask_pixel_values, dim = 0) - masked_image_latents = masked_image_latents * vae.config.scaling_factor - - vae.to(offload_device) - - control_latents = { - "latents": masked_image_latents, - "num_frames" : B, - "height" : height, - "width" : width, - } - - return (control_latents, width, height) #region FasterCache class CogVideoXFasterCache: @@ -757,12 +567,10 @@ class CogVideoSampler: def INPUT_TYPES(s): return { "required": { - "pipeline": ("COGVIDEOPIPE",), + "model": ("COGVIDEOMODEL",), "positive": ("CONDITIONING", ), "negative": ("CONDITIONING", ), - "width": ("INT", {"default": 720, "min": 128, "max": 2048, "step": 16}), - "height": ("INT", {"default": 480, "min": 128, "max": 2048, "step": 16}), - "num_frames": ("INT", {"default": 49, "min": 17, "max": 1024, "step": 4}), + "num_frames": ("INT", {"default": 49, "min": 1, "max": 1024, "step": 1}), "steps": ("INT", {"default": 50, "min": 1}), "cfg": ("FLOAT", {"default": 6.0, "min": 0.0, "max": 30.0, "step": 0.01}), "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), @@ -773,45 +581,54 @@ class CogVideoSampler: }, "optional": { "samples": ("LATENT", {"tooltip": "init Latents to use for video2video process"} ), - "denoise_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), "image_cond_latents": ("LATENT",{"tooltip": "Latent to use for image2video conditioning"} ), + "denoise_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), "context_options": ("COGCONTEXT", ), "controlnet": ("COGVIDECONTROLNET",), "tora_trajectory": ("TORAFEATURES", ), "fastercache": ("FASTERCACHEARGS", ), - #"sigmas": ("SIGMAS", ), } } - RETURN_TYPES = ("COGVIDEOPIPE", "LATENT",) - RETURN_NAMES = ("cogvideo_pipe", "samples",) + RETURN_TYPES = ("LATENT",) + RETURN_NAMES = ("samples",) FUNCTION = "process" CATEGORY = "CogVideoWrapper" - def process(self, pipeline, positive, negative, steps, cfg, seed, height, width, num_frames, scheduler, samples=None, + def process(self, pipeline, positive, negative, steps, cfg, seed, scheduler, num_frames, samples=None, denoise_strength=1.0, image_cond_latents=None, context_options=None, controlnet=None, tora_trajectory=None, fastercache=None): mm.soft_empty_cache() - base_path = pipeline["base_path"] model_name = pipeline.get("model_name", "") - supports_image_conds = True if "I2V" in model_name or "interpolation" in model_name.lower() else False + supports_image_conds = True if "I2V" in model_name or "interpolation" in model_name.lower() or "fun" in model_name.lower() else False - assert "fun" not in base_path.lower(), "'Fun' models not supported in 'CogVideoSampler', use the 'CogVideoXFunSampler'" - assert ( - "I2V" not in model_name or - "1.5" in model_name or - "1_5" in model_name or - num_frames == 49 or - context_options is not None - ), "1.0 I2V model can only do 49 frames" + if "fun" in model_name.lower() and image_cond_latents is not None: + assert image_cond_latents["mask"] is not None, "For fun inpaint models use CogVideoImageEncodeFunInP" + fun_mask = image_cond_latents["mask"] + else: + fun_mask = None + if image_cond_latents is not None: assert supports_image_conds, "Image condition latents only supported for I2V and Interpolation models" - # if "I2V" in model_name: - # assert image_cond_latents["samples"].shape[1] == 1, "I2V model only supports single image condition latent" - # elif "interpolation" in model_name.lower(): - # assert image_cond_latents["samples"].shape[1] == 2, "Interpolation model needs two image condition latents" + image_conds = image_cond_latents["samples"] + if "1.5" in model_name or "1_5" in model_name: + image_conds = image_conds / 0.7 # needed for 1.5 models else: - assert not supports_image_conds, "Image condition latents required for I2V models" + if not "fun" in model_name.lower(): + assert not supports_image_conds, "Image condition latents required for I2V models" + image_conds = None + + if samples is not None: + if len(samples["samples"].shape) == 5: + B, T, C, H, W = samples["samples"].shape + latents = samples["samples"] + if len(samples["samples"].shape) == 4: + B, C, H, W = samples["samples"].shape + latents = None + if image_cond_latents is not None: + B, T, C, H, W = image_cond_latents["samples"].shape + height = H * 8 + width = W * 8 device = mm.get_torch_device() offload_device = mm.unet_offload_device() @@ -861,9 +678,6 @@ class CogVideoSampler: cfg = [cfg for _ in range(steps)] else: assert len(cfg) == steps, "Length of cfg list must match number of steps" - - # if sigmas is not None: - # sigma_list = sigmas.tolist() try: torch.cuda.reset_peak_memory_stats(device) except: @@ -878,9 +692,9 @@ class CogVideoSampler: width = width, num_frames = num_frames, guidance_scale=cfg, - #sigmas=sigma_list if sigmas is not None else None, - latents=samples["samples"] if samples is not None else None, - image_cond_latents=image_cond_latents["samples"] if image_cond_latents is not None else None, + latents=latents if samples is not None else None, + fun_mask = fun_mask, + image_cond_latents=image_conds, denoise_strength=denoise_strength, prompt_embeds=positive.to(dtype).to(device), negative_prompt_embeds=negative.to(dtype).to(device), @@ -910,7 +724,11 @@ class CogVideoSampler: except: pass - return (pipeline, {"samples": latents}) + additional_frames = getattr(pipe, "additional_frames", 0) + return ({ + "samples": latents, + "additional_frames": additional_frames, + },) class CogVideoControlNet: @classmethod @@ -930,13 +748,7 @@ class CogVideoControlNet: CATEGORY = "CogVideoWrapper" def encode(self, controlnet, images, control_strength, control_start_percent, control_end_percent): - device = mm.get_torch_device() - offload_device = mm.unet_offload_device() - - B, H, W, C = images.shape - control_frames = images.permute(0, 3, 1, 2).unsqueeze(0) * 2 - 1 - controlnet = { "control_model": controlnet, "control_frames": control_frames, @@ -944,7 +756,6 @@ class CogVideoControlNet: "control_start": control_start_percent, "control_end": control_end_percent, } - return (controlnet,) #region VideoDecode @@ -952,8 +763,8 @@ class CogVideoDecode: @classmethod def INPUT_TYPES(s): return {"required": { - "pipeline": ("COGVIDEOPIPE",), "samples": ("LATENT", ), + "vae": ("VAE", {"default": None}), "enable_vae_tiling": ("BOOLEAN", {"default": True, "tooltip": "Drastically reduces memory use but may introduce seams"}), }, "optional": { @@ -962,7 +773,6 @@ class CogVideoDecode: "tile_overlap_factor_height": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}), "tile_overlap_factor_width": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}), "auto_tile_size": ("BOOLEAN", {"default": True, "tooltip": "Auto size based on height and width, default is half the size"}), - "vae_override": ("VAE", {"default": None}), } } @@ -971,19 +781,20 @@ class CogVideoDecode: FUNCTION = "decode" CATEGORY = "CogVideoWrapper" - def decode(self, pipeline, samples, enable_vae_tiling, tile_sample_min_height, tile_sample_min_width, tile_overlap_factor_height, tile_overlap_factor_width, - auto_tile_size=True, vae_override=None): + def decode(self, vae, samples, enable_vae_tiling, tile_sample_min_height, tile_sample_min_width, tile_overlap_factor_height, tile_overlap_factor_width, + auto_tile_size=True, pipeline=None): device = mm.get_torch_device() offload_device = mm.unet_offload_device() latents = samples["samples"] - vae = pipeline["pipe"].vae if vae_override is None else vae_override + + additional_frames = samples.get("additional_frames", 0) - additional_frames = getattr(pipeline["pipe"], "additional_frames", 0) + try: + vae.enable_slicing() + except: + pass - vae.enable_slicing() - - if not pipeline["cpu_offloading"]: - vae.to(device) + vae.to(device) if enable_vae_tiling: if auto_tile_size: vae.enable_tiling() @@ -999,11 +810,11 @@ class CogVideoDecode: latents = latents.to(vae.dtype).to(device) latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width] latents = 1 / vae.config.scaling_factor * latents + try: vae._clear_fake_context_parallel_cache() except: pass - try: frames = vae.decode(latents[:, :, additional_frames:]).sample except: @@ -1013,11 +824,13 @@ class CogVideoDecode: frames = vae.decode(latents[:, :, additional_frames:]).sample vae.disable_tiling() - if not pipeline["cpu_offloading"]: - vae.to(offload_device) + vae.to(offload_device) mm.soft_empty_cache() - video = pipeline["pipe"].video_processor.postprocess_video(video=frames, output_type="pt") + video_processor = VideoProcessor(vae_scale_factor=8) + video_processor.config.do_resize = False + + video = video_processor.postprocess_video(video=frames, output_type="pt") video = video[0].permute(0, 2, 3, 1).cpu().float() return (video,) @@ -1041,6 +854,7 @@ class CogVideoXFunResizeToClosestBucket: def resize(self, images, base_resolution, upscale_method, crop): from comfy.utils import common_upscale + from .cogvideox_fun.utils import ASPECT_RATIO_512, get_closest_ratio B, H, W, C = images.shape # Count most suitable height and width @@ -1056,256 +870,6 @@ class CogVideoXFunResizeToClosestBucket: return (resized_images, width, height) -#region FunSamplers -class CogVideoXFunSampler: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "pipeline": ("COGVIDEOPIPE",), - "positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "video_length": ("INT", {"default": 49, "min": 5, "max": 2048, "step": 4}), - "width": ("INT", {"default": 720, "min": 128, "max": 2048, "step": 8}), - "height": ("INT", {"default": 480, "min": 128, "max": 2048, "step": 8}), - "seed": ("INT", {"default": 43, "min": 0, "max": 0xffffffffffffffff}), - "steps": ("INT", {"default": 50, "min": 1, "max": 200, "step": 1}), - "cfg": ("FLOAT", {"default": 6.0, "min": 1.0, "max": 20.0, "step": 0.01}), - "scheduler": (available_schedulers, {"default": 'DDIM'}) - }, - "optional":{ - "start_img": ("IMAGE",), - "end_img": ("IMAGE",), - "noise_aug_strength": ("FLOAT", {"default": 0.0563, "min": 0.0, "max": 1.0, "step": 0.001}), - "context_options": ("COGCONTEXT", ), - "tora_trajectory": ("TORAFEATURES", ), - "fastercache": ("FASTERCACHEARGS",), - "vid2vid_images": ("IMAGE",), - "vid2vid_denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}), - }, - } - - RETURN_TYPES = ("COGVIDEOPIPE", "LATENT",) - RETURN_NAMES = ("cogvideo_pipe", "samples",) - FUNCTION = "process" - CATEGORY = "CogVideoWrapper" - - def process(self, pipeline, positive, negative, video_length, width, height, seed, steps, cfg, scheduler, - start_img=None, end_img=None, noise_aug_strength=0.0563, context_options=None, fastercache=None, - tora_trajectory=None, vid2vid_images=None, vid2vid_denoise=1.0): - device = mm.get_torch_device() - offload_device = mm.unet_offload_device() - pipe = pipeline["pipe"] - dtype = pipeline["dtype"] - base_path = pipeline["base_path"] - assert "fun" in base_path.lower(), "'Unfun' models not supported in 'CogVideoXFunSampler', use the 'CogVideoSampler'" - assert "pose" not in base_path.lower(), "'Pose' models not supported in 'CogVideoXFunSampler', use the 'CogVideoXFunControlSampler'" - - mm.soft_empty_cache() - - #vid2vid - if vid2vid_images is not None: - validation_video = np.array(vid2vid_images.cpu().numpy() * 255, np.uint8) - #img2vid - elif start_img is not None: - start_img = [to_pil(_start_img) for _start_img in start_img] if start_img is not None else None - end_img = [to_pil(_end_img) for _end_img in end_img] if end_img is not None else None - - # Load Sampler - scheduler_config = pipeline["scheduler_config"] - if scheduler in scheduler_mapping: - noise_scheduler = scheduler_mapping[scheduler].from_config(scheduler_config) - pipe.scheduler = noise_scheduler - else: - raise ValueError(f"Unknown scheduler: {scheduler}") - - if not pipeline["cpu_offloading"] and pipeline["manual_offloading"]: - pipe.transformer.to(device) - - if context_options is not None: - context_frames = context_options["context_frames"] // 4 - context_stride = context_options["context_stride"] // 4 - context_overlap = context_options["context_overlap"] // 4 - else: - context_frames, context_stride, context_overlap = None, None, None - - if tora_trajectory is not None: - pipe.transformer.fuser_list = tora_trajectory["fuser_list"] - - if fastercache is not None: - pipe.transformer.use_fastercache = True - pipe.transformer.fastercache_counter = 0 - pipe.transformer.fastercache_start_step = fastercache["start_step"] - pipe.transformer.fastercache_lf_step = fastercache["lf_step"] - pipe.transformer.fastercache_hf_step = fastercache["hf_step"] - pipe.transformer.fastercache_device = fastercache["cache_device"] - pipe.transformer.fastercache_num_blocks_to_cache = fastercache["num_blocks_to_cache"] - log.info(f"FasterCache enabled for {pipe.transformer.fastercache_num_blocks_to_cache} blocks out of {len(pipe.transformer.transformer_blocks)}") - else: - pipe.transformer.use_fastercache = False - pipe.transformer.fastercache_counter = 0 - - generator = torch.Generator(device=torch.device("cpu")).manual_seed(seed) - - autocastcondition = not pipeline["onediff"] or not dtype == torch.float32 - autocast_context = torch.autocast(mm.get_autocast_device(device), dtype=dtype) if autocastcondition else nullcontext() - with autocast_context: - video_length = int((video_length - 1) // pipe.vae.config.temporal_compression_ratio * pipe.vae.config.temporal_compression_ratio) + 1 if video_length != 1 else 1 - if vid2vid_images is not None: - input_video, input_video_mask, clip_image = get_video_to_video_latent(validation_video, video_length=video_length, sample_size=(height, width)) - else: - input_video, input_video_mask, clip_image = get_image_to_video_latent(start_img, end_img, video_length=video_length, sample_size=(height, width)) - - common_params = { - "prompt_embeds": positive.to(dtype).to(device), - "negative_prompt_embeds": negative.to(dtype).to(device), - "num_frames": video_length, - "height": height, - "width": width, - "generator": generator, - "guidance_scale": cfg, - "num_inference_steps": steps, - "comfyui_progressbar": True, - "context_schedule":context_options["context_schedule"] if context_options is not None else None, - "context_frames":context_frames, - "context_stride": context_stride, - "context_overlap": context_overlap, - "freenoise":context_options["freenoise"] if context_options is not None else None, - "tora":tora_trajectory if tora_trajectory is not None else None, - } - latents = pipe( - **common_params, - video = input_video, - mask_video = input_video_mask, - noise_aug_strength = noise_aug_strength, - strength = vid2vid_denoise, - ) - if not pipeline["cpu_offloading"] and pipeline["manual_offloading"]: - pipe.transformer.to(offload_device) - #clear FasterCache - if fastercache is not None: - for block in pipe.transformer.transformer_blocks: - if (hasattr, block, "cached_hidden_states") and block.cached_hidden_states is not None: - block.cached_hidden_states = None - block.cached_encoder_hidden_states = None - - mm.soft_empty_cache() - - return (pipeline, {"samples": latents}) - -class CogVideoXFunVid2VidSampler: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "note": ("STRING", {"default": "This node is deprecated, functionality moved to 'CogVideoXFunSampler' node instead.", "multiline": True}), - }, - } - - RETURN_TYPES = () - FUNCTION = "process" - CATEGORY = "CogVideoWrapper" - DEPRECATED = True - def process(self): - return () - -class CogVideoXFunControlSampler: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "pipeline": ("COGVIDEOPIPE",), - "positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - "control_latents": ("COGCONTROL_LATENTS",), - "seed": ("INT", {"default": 42, "min": 0, "max": 0xffffffffffffffff}), - "steps": ("INT", {"default": 25, "min": 1, "max": 200, "step": 1}), - "cfg": ("FLOAT", {"default": 6.0, "min": 1.0, "max": 20.0, "step": 0.01}), - "scheduler": (available_schedulers, {"default": 'DDIM'}), - "control_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "control_start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.01}), - "control_end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - }, - "optional": { - "samples": ("LATENT", ), - "denoise_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - "context_options": ("COGCONTEXT", ), - }, - } - - RETURN_TYPES = ("COGVIDEOPIPE", "LATENT",) - RETURN_NAMES = ("cogvideo_pipe", "samples",) - FUNCTION = "process" - CATEGORY = "CogVideoWrapper" - - def process(self, pipeline, positive, negative, seed, steps, cfg, scheduler, control_latents, - control_strength=1.0, control_start_percent=0.0, control_end_percent=1.0, - samples=None, denoise_strength=1.0, context_options=None): - device = mm.get_torch_device() - offload_device = mm.unet_offload_device() - pipe = pipeline["pipe"] - dtype = pipeline["dtype"] - base_path = pipeline["base_path"] - - assert "fun" in base_path.lower(), "'Unfun' models not supported in 'CogVideoXFunSampler', use the 'CogVideoSampler'" - - if not pipeline["cpu_offloading"]: - pipe.enable_model_cpu_offload(device=device) - - mm.soft_empty_cache() - - if context_options is not None: - context_frames = context_options["context_frames"] // 4 - context_stride = context_options["context_stride"] // 4 - context_overlap = context_options["context_overlap"] // 4 - else: - context_frames, context_stride, context_overlap = None, None, None - - # Load Sampler - scheduler_config = pipeline["scheduler_config"] - if scheduler in scheduler_mapping: - noise_scheduler = scheduler_mapping[scheduler].from_config(scheduler_config) - pipe.scheduler = noise_scheduler - else: - raise ValueError(f"Unknown scheduler: {scheduler}") - - generator = torch.Generator(device=torch.device("cpu")).manual_seed(seed) - - autocastcondition = not pipeline["onediff"] or not dtype == torch.float32 - autocast_context = torch.autocast(mm.get_autocast_device(device)) if autocastcondition else nullcontext() - with autocast_context: - - common_params = { - "prompt_embeds": positive.to(dtype).to(device), - "negative_prompt_embeds": negative.to(dtype).to(device), - "num_frames": control_latents["num_frames"], - "height": control_latents["height"], - "width": control_latents["width"], - "generator": generator, - "guidance_scale": cfg, - "num_inference_steps": steps, - "comfyui_progressbar": True, - } - - latents = pipe( - **common_params, - control_video=control_latents["latents"], - control_strength=control_strength, - control_start_percent=control_start_percent, - control_end_percent=control_end_percent, - scheduler_name=scheduler, - latents=samples["samples"] if samples is not None else None, - denoise_strength=denoise_strength, - context_schedule=context_options["context_schedule"] if context_options is not None else None, - context_frames=context_frames, - context_stride= context_stride, - context_overlap= context_overlap, - freenoise=context_options["freenoise"] if context_options is not None else None - - ) - - return (pipeline, {"samples": latents}) - class CogVideoLatentPreview: @classmethod def INPUT_TYPES(s): @@ -1332,9 +896,6 @@ class CogVideoLatentPreview: latents = samples["samples"].clone() print("in sample", latents.shape) latents = latents.permute(0, 2, 1, 3, 4) # [batch_size, num_channels, num_frames, height, width] - - device = mm.get_torch_device() - offload_device = mm.unet_offload_device() #[[0.0658900170023352, 0.04687556512203313, -0.056971557475649186], [-0.01265770449940036, -0.02814809569100843, -0.0768912512529372], [0.061456544746314665, 0.0005511617552452358, -0.0652574975291287], [-0.09020669168815276, -0.004755440180558637, -0.023763970904494294], [0.031766964513999865, -0.030959599938418375, 0.08654669098083616], [-0.005981764690055846, -0.08809119252349802, -0.06439852368217663], [-0.0212114426433989, 0.08894281999597677, 0.05155629477559985], [-0.013947446911030725, -0.08987475069900677, -0.08923124751217484], [-0.08235967967978511, 0.07268025379974379, 0.08830486164536037], [-0.08052049179735378, -0.050116143175332195, 0.02023752569687405], [-0.07607527759162447, 0.06827156419895981, 0.08678111754261035], [-0.04689089232553825, 0.017294986041038893, -0.10280492336438908], [-0.06105783150270304, 0.07311850680875913, 0.019995735372550075], [-0.09232589996527711, -0.012869815059053047, -0.04355587834255975], [-0.06679931010802251, 0.018399815879067458, 0.06802404982033876], [-0.013062632927118165, -0.04292991477896661, 0.07476243356192845]] latent_rgb_factors =[[0.11945946736445662, 0.09919175788574555, -0.004832707433877734], [-0.0011977028264356232, 0.05496505130267682, 0.021321622433638193], [-0.014088548986590666, -0.008701477861945644, -0.020991313281459367], [0.03063921972519621, 0.12186477097625073, 0.0139593690235148], [0.0927403067854673, 0.030293187650929136, 0.05083134241694003], [0.0379112441305742, 0.04935199882777209, 0.058562766246777774], [0.017749911959153715, 0.008839453404921545, 0.036005638019226294], [0.10610119248526109, 0.02339855688237826, 0.057154257614084596], [0.1273639464837117, -0.010959856130713416, 0.043268631260428896], [-0.01873510946881321, 0.08220930648486932, 0.10613256772247093], [0.008429116376722327, 0.07623856561000408, 0.09295712117576727], [0.12938137079617007, 0.12360403483892413, 0.04478930933220116], [0.04565908794779364, 0.041064156741596365, -0.017695041535528512], [0.00019003240570281826, -0.013965147883381978, 0.05329669529635849], [0.08082391586738358, 0.11548306825496074, -0.021464170006615893], [-0.01517932393230994, -0.0057985555313003236, 0.07216646476618871]] @@ -1374,15 +935,9 @@ NODE_CLASS_MAPPINGS = { "CogVideoSampler": CogVideoSampler, "CogVideoDecode": CogVideoDecode, "CogVideoTextEncode": CogVideoTextEncode, - "CogVideoDualTextEncode_311": CogVideoDualTextEncode_311, "CogVideoImageEncode": CogVideoImageEncode, - "CogVideoImageInterpolationEncode": CogVideoImageInterpolationEncode, - "CogVideoXFunSampler": CogVideoXFunSampler, - "CogVideoXFunVid2VidSampler": CogVideoXFunVid2VidSampler, - "CogVideoXFunControlSampler": CogVideoXFunControlSampler, "CogVideoTextEncodeCombine": CogVideoTextEncodeCombine, "CogVideoTransformerEdit": CogVideoTransformerEdit, - "CogVideoControlImageEncode": CogVideoControlImageEncode, "CogVideoContextOptions": CogVideoContextOptions, "CogVideoControlNet": CogVideoControlNet, "ToraEncodeTrajectory": ToraEncodeTrajectory, @@ -1390,21 +945,16 @@ NODE_CLASS_MAPPINGS = { "CogVideoXFasterCache": CogVideoXFasterCache, "CogVideoXFunResizeToClosestBucket": CogVideoXFunResizeToClosestBucket, "CogVideoLatentPreview": CogVideoLatentPreview, - "CogVideoXTorchCompileSettings": CogVideoXTorchCompileSettings + "CogVideoXTorchCompileSettings": CogVideoXTorchCompileSettings, + "CogVideoImageEncodeFunInP": CogVideoImageEncodeFunInP, } NODE_DISPLAY_NAME_MAPPINGS = { "CogVideoSampler": "CogVideo Sampler", "CogVideoDecode": "CogVideo Decode", "CogVideoTextEncode": "CogVideo TextEncode", - "CogVideoDualTextEncode_311": "CogVideo DualTextEncode", "CogVideoImageEncode": "CogVideo ImageEncode", - "CogVideoImageInterpolationEncode": "CogVideo ImageInterpolation Encode", - "CogVideoXFunSampler": "CogVideoXFun Sampler", - "CogVideoXFunVid2VidSampler": "CogVideoXFun Vid2Vid Sampler", - "CogVideoXFunControlSampler": "CogVideoXFun Control Sampler", "CogVideoTextEncodeCombine": "CogVideo TextEncode Combine", "CogVideoTransformerEdit": "CogVideo TransformerEdit", - "CogVideoControlImageEncode": "CogVideo Control ImageEncode", "CogVideoContextOptions": "CogVideo Context Options", "ToraEncodeTrajectory": "Tora Encode Trajectory", "ToraEncodeOpticalFlow": "Tora Encode OpticalFlow", @@ -1412,4 +962,5 @@ NODE_DISPLAY_NAME_MAPPINGS = { "CogVideoXFunResizeToClosestBucket": "CogVideoXFun ResizeToClosestBucket", "CogVideoLatentPreview": "CogVideo LatentPreview", "CogVideoXTorchCompileSettings": "CogVideo TorchCompileSettings", + "CogVideoImageEncodeFunInP": "CogVideo ImageEncode FunInP", } \ No newline at end of file diff --git a/pipeline_cogvideox.py b/pipeline_cogvideox.py index 472a308..f869ce4 100644 --- a/pipeline_cogvideox.py +++ b/pipeline_cogvideox.py @@ -17,15 +17,13 @@ import inspect from typing import Callable, Dict, List, Optional, Tuple, Union import torch -import torch.nn.functional as F import math -from diffusers.models import AutoencoderKLCogVideoX from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor -from diffusers.video_processor import VideoProcessor + #from diffusers.models.embeddings import get_3d_rotary_pos_embed from diffusers.loaders import CogVideoXLoraLoaderMixin @@ -120,15 +118,6 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode videos to and from latent representations. - text_encoder ([`T5EncoderModel`]): - Frozen text-encoder. CogVideoX uses - [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel); specifically the - [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. - tokenizer (`T5Tokenizer`): - Tokenizer of class - [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). transformer ([`CogVideoXTransformer3DModel`]): A text conditioned `CogVideoXTransformer3DModel` to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): @@ -140,31 +129,25 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): def __init__( self, - vae: AutoencoderKLCogVideoX, transformer: CogVideoXTransformer3DModel, scheduler: Union[CogVideoXDDIMScheduler, CogVideoXDPMScheduler], - original_mask = None, + dtype: torch.dtype = torch.bfloat16, + is_fun_inpaint: bool = False, ): super().__init__() - self.register_modules( - vae=vae, transformer=transformer, scheduler=scheduler - ) - self.vae_scale_factor_spatial = ( - 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 - ) - self.vae_scale_factor_temporal = ( - self.vae.config.temporal_compression_ratio if hasattr(self, "vae") and self.vae is not None else 4 - ) - self.original_mask = original_mask - self.video_processor = VideoProcessor(vae_scale_factor=self.vae_scale_factor_spatial) - self.video_processor.config.do_resize = False + self.register_modules(transformer=transformer, scheduler=scheduler) + self.vae_scale_factor_spatial = 8 + self.vae_scale_factor_temporal = 4 + self.vae_latent_channels = 16 + self.vae_dtype = dtype + self.is_fun_inpaint = is_fun_inpaint self.input_with_padding = True def prepare_latents( - self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, timesteps, denoise_strength, + self, batch_size, num_channels_latents, num_frames, height, width, device, generator, timesteps, denoise_strength, num_inference_steps, latents=None, freenoise=True, context_size=None, context_overlap=None ): shape = ( @@ -174,14 +157,10 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, ) - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - noise = randn_tensor(shape, generator=generator, device=torch.device("cpu"), dtype=self.vae.dtype) + + noise = randn_tensor(shape, generator=generator, device=torch.device("cpu"), dtype=self.vae_dtype) if freenoise: - print("Applying FreeNoise") + logger.info("Applying FreeNoise") # code and comments from AnimateDiff-Evolved by Kosinkadink (https://github.com/Kosinkadink/ComfyUI-AnimateDiff-Evolved) video_length = num_frames // 4 delta = context_size - context_overlap @@ -221,20 +200,20 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, denoise_strength, device) latent_timestep = timesteps[:1] - noise = randn_tensor(shape, generator=generator, device=device, dtype=self.vae.dtype) frames_needed = noise.shape[1] current_frames = latents.shape[1] if frames_needed > current_frames: - repeat_factor = frames_needed // current_frames + repeat_factor = frames_needed - current_frames additional_frame = torch.randn((latents.size(0), repeat_factor, latents.size(2), latents.size(3), latents.size(4)), dtype=latents.dtype, device=latents.device) - latents = torch.cat((latents, additional_frame), dim=1) + latents = torch.cat((additional_frame, latents), dim=1) + self.additional_frames = repeat_factor elif frames_needed < current_frames: latents = latents[:, :frames_needed, :, :, :] - latents = self.scheduler.add_noise(latents, noise, latent_timestep) + latents = self.scheduler.add_noise(latents, noise.to(device), latent_timestep) latents = latents * self.scheduler.init_noise_sigma # scale the initial noise by the standard deviation required by the scheduler - return latents, timesteps, noise + return latents, timesteps # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): @@ -355,10 +334,10 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): guidance_scale: float = 6, denoise_strength: float = 1.0, sigmas: Optional[List[float]] = None, - num_videos_per_prompt: int = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.Tensor] = None, + fun_mask: Optional[torch.Tensor] = None, image_cond_latents: Optional[torch.Tensor] = None, prompt_embeds: Optional[torch.Tensor] = None, negative_prompt_embeds: Optional[torch.Tensor] = None, @@ -398,8 +377,6 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. - num_videos_per_prompt (`int`, *optional*, defaults to 1): - The number of videos to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. @@ -443,7 +420,7 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): if do_classifier_free_guidance: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) - prompt_embeds = prompt_embeds.to(self.vae.dtype) + prompt_embeds = prompt_embeds.to(self.vae_dtype) # 4. Prepare timesteps if sigmas is None: @@ -453,7 +430,7 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): self._num_timesteps = len(timesteps) # 5. Prepare latents. - latent_channels = self.vae.config.latent_channels + latent_channels = self.vae_latent_channels latent_frames = (num_frames - 1) // self.vae_scale_factor_temporal + 1 # For CogVideoX 1.5, the latent frames should be padded to make it divisible by patch_size_t @@ -469,18 +446,12 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): self.additional_frames = patch_size_t - latent_frames % patch_size_t num_frames += self.additional_frames * self.vae_scale_factor_temporal - - if self.original_mask is not None: - image_latents = latents - original_image_latents = image_latents - - latents, timesteps, noise = self.prepare_latents( - batch_size * num_videos_per_prompt, + latents, timesteps = self.prepare_latents( + batch_size, latent_channels, num_frames, height, width, - self.vae.dtype, device, generator, timesteps, @@ -491,37 +462,41 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): context_overlap=context_overlap, freenoise=freenoise, ) - latents = latents.to(self.vae.dtype) + latents = latents.to(self.vae_dtype) + + if self.is_fun_inpaint and fun_mask is None: # For FUN inpaint vid2vid, we need to mask all the latents + fun_mask = torch.zeros_like(latents[:, :, :1, :, :], device=latents.device, dtype=latents.dtype) + fun_masked_video_latents = torch.zeros_like(latents, device=latents.device, dtype=latents.dtype) # 5.5. if image_cond_latents is not None: - if image_cond_latents.shape[1] > 1: + if image_cond_latents.shape[1] == 2: logger.info("More than one image conditioning frame received, interpolating") padding_shape = ( - batch_size, - (latents.shape[1] - 2), - self.vae.config.latent_channels, - height // self.vae_scale_factor_spatial, - width // self.vae_scale_factor_spatial, + batch_size, + (latents.shape[1] - 2), + self.vae_latent_channels, + height // self.vae_scale_factor_spatial, + width // self.vae_scale_factor_spatial, ) - latent_padding = torch.zeros(padding_shape, device=device, dtype=self.vae.dtype) + latent_padding = torch.zeros(padding_shape, device=device, dtype=self.vae_dtype) image_cond_latents = torch.cat([image_cond_latents[:, 0, :, :, :].unsqueeze(1), latent_padding, image_cond_latents[:, -1, :, :, :].unsqueeze(1)], dim=1) if self.transformer.config.patch_size_t is not None: - first_frame = image_cond_latents[:, : image_cond_latents.size(1) % self.transformer.config.patch_size_t, ...] - image_cond_latents = torch.cat([first_frame, image_cond_latents], dim=1) + first_frame = image_cond_latents[:, : image_cond_latents.size(1) % self.transformer.config.patch_size_t, ...] + image_cond_latents = torch.cat([first_frame, image_cond_latents], dim=1) logger.info(f"image cond latents shape: {image_cond_latents.shape}") - else: + elif image_cond_latents.shape[1] == 1: logger.info("Only one image conditioning frame received, img2vid") if self.input_with_padding: padding_shape = ( batch_size, (latents.shape[1] - 1), - self.vae.config.latent_channels, + self.vae_latent_channels, height // self.vae_scale_factor_spatial, width // self.vae_scale_factor_spatial, ) - latent_padding = torch.zeros(padding_shape, device=device, dtype=self.vae.dtype) + latent_padding = torch.zeros(padding_shape, device=device, dtype=self.vae_dtype) image_cond_latents = torch.cat([image_cond_latents, latent_padding], dim=1) # Select the first frame along the second dimension if self.transformer.config.patch_size_t is not None: @@ -529,22 +504,11 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): image_cond_latents = torch.cat([first_frame, image_cond_latents], dim=1) else: image_cond_latents = image_cond_latents.repeat(1, latents.shape[1], 1, 1, 1) + else: + logger.info(f"Received {image_cond_latents.shape[1]} image conditioning frames") + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # masks - if self.original_mask is not None: - mask = self.original_mask.to(device) - logger.info(f"self.original_mask: {self.original_mask.shape}") - - mask = F.interpolate(self.original_mask.unsqueeze(1), size=(latents.shape[-2], latents.shape[-1]), mode='bilinear', align_corners=False) - if mask.shape[0] != latents.shape[1]: - mask = mask.unsqueeze(1).repeat(1, latents.shape[1], 16, 1, 1) - else: - mask = mask.unsqueeze(0).repeat(1, 1, 16, 1, 1) - logger.info(f"latents: {latents.shape}") - logger.info(f"mask: {mask.shape}") - num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) @@ -554,7 +518,7 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): raise NotImplementedError("Context schedule not currently supported with image conditioning") logger.info(f"Context schedule enabled: {context_frames} frames, {context_stride} stride, {context_overlap} overlap") use_context_schedule = True - from .cogvideox_fun.context import get_context_scheduler + from .context import get_context_scheduler context = get_context_scheduler(context_schedule) #todo ofs embeds? @@ -747,7 +711,18 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): if image_cond_latents is not None: latent_image_input = torch.cat([image_cond_latents] * 2) if do_classifier_free_guidance else image_cond_latents - latent_model_input = torch.cat([latent_model_input, latent_image_input], dim=2) + if fun_mask is not None: #for fun img2vid and interpolation + fun_inpaint_mask = torch.cat([fun_mask] * 2) if do_classifier_free_guidance else fun_mask + masks_input = torch.cat([fun_inpaint_mask, latent_image_input], dim=2) + latent_model_input = torch.cat([latent_model_input, masks_input], dim=2) + else: + latent_model_input = torch.cat([latent_model_input, latent_image_input], dim=2) + else: # for Fun inpaint vid2vid + if fun_mask is not None: + fun_inpaint_mask = torch.cat([fun_mask] * 2) if do_classifier_free_guidance else fun_mask + fun_inpaint_masked_video_latents = torch.cat([fun_masked_video_latents] * 2) if do_classifier_free_guidance else fun_masked_video_latents + fun_inpaint_latents = torch.cat([fun_inpaint_mask, fun_inpaint_masked_video_latents], dim=2).to(latents.dtype) + latent_model_input = torch.cat([latent_model_input, fun_inpaint_latents], dim=2) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML timestep = t.expand(latent_model_input.shape[0]) @@ -767,9 +742,9 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): return_dict=False, )[0] if isinstance(controlnet_states, (tuple, list)): - controlnet_states = [x.to(dtype=self.vae.dtype) for x in controlnet_states] + controlnet_states = [x.to(dtype=self.vae_dtype) for x in controlnet_states] else: - controlnet_states = controlnet_states.to(dtype=self.vae.dtype) + controlnet_states = controlnet_states.to(dtype=self.vae_dtype) # predict noise model_output @@ -796,30 +771,18 @@ class CogVideoXPipeline(DiffusionPipeline, CogVideoXLoraLoaderMixin): # compute the previous noisy sample x_t -> x_t-1 if not isinstance(self.scheduler, CogVideoXDPMScheduler): - latents = self.scheduler.step(noise_pred, t, latents.to(self.vae.dtype), **extra_step_kwargs, return_dict=False)[0] + latents = self.scheduler.step(noise_pred, t, latents.to(self.vae_dtype), **extra_step_kwargs, return_dict=False)[0] else: latents, old_pred_original_sample = self.scheduler.step( noise_pred, old_pred_original_sample, t, timesteps[i - 1] if i > 0 else None, - latents.to(self.vae.dtype), + latents.to(self.vae_dtype), **extra_step_kwargs, return_dict=False, ) latents = latents.to(prompt_embeds.dtype) - # start diff diff - if i < len(timesteps) - 1 and self.original_mask is not None: - noise_timestep = timesteps[i + 1] - image_latent = self.scheduler.add_noise(original_image_latents, noise, torch.tensor([noise_timestep]) - ) - mask = mask.to(latents) - ts_from = timesteps[0] - ts_to = timesteps[-1] - threshold = (t - ts_to) / (ts_from - ts_to) - mask = torch.where(mask >= threshold, mask, torch.zeros_like(mask)) - latents = image_latent * mask + latents * (1 - mask) - # end diff diff if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): progress_bar.update() diff --git a/pyproject.toml b/pyproject.toml index 78b9ed8..1ca05f7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,9 @@ [project] name = "comfyui-cogvideoxwrapper" description = "Diffusers wrapper for CogVideoX -models: [a/https://github.com/THUDM/CogVideo](https://github.com/THUDM/CogVideo)" -version = "1.1.0" +version = "1.5.0" license = {file = "LICENSE"} -dependencies = ["huggingface_hub", "diffusers>=0.30.1", "accelerate>=0.33.0"] +dependencies = ["huggingface_hub", "diffusers>=0.31.0", "accelerate>=0.33.0"] [project.urls] Repository = "https://github.com/kijai/ComfyUI-CogVideoXWrapper" From 128f89c4d24a409f3bb20bccdcdac068c19ab1e1 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 19 Nov 2024 15:23:38 +0200 Subject: [PATCH 46/49] Update workflows, fix controlnet --- custom_cogvideox_transformer_3d.py | 63 +- .../cogvideo_2b_context_schedule_test_01.json | 561 ------ ...on => cogvideox_1_0_2b_controlnet_02.json} | 915 +++++---- ...e_01.json => cogvideox_1_0_5b_I2V_02.json} | 690 +++---- examples/cogvideox_1_0_5b_I2V_Tora_02.json | 1710 ++++++++++++++++ ...e_01.json => cogvideox_1_0_5b_T2V_02.json} | 592 +++--- ...=> cogvideox_1_0_5b_interpolation_02.json} | 931 ++++----- ....json => cogvideox_1_0_5b_vid2vid_02.json} | 1111 ++++++----- .../cogvideox_5b_Tora_I2V_testing_01.json | 1337 ------------- ...gvideox_5b_tora_trajectory_example_01.json | 1119 ----------- ...mple_02.json => cogvideox_Fun_I2V_02.json} | 904 +++++---- examples/cogvideox_Fun_I2V_Tora.json | 1711 +++++++++++++++++ ...ple_01.json => cogvideox_Fun_pose_02.json} | 836 ++++---- examples/cogvideox_fun_img2vid_tora_01.json | 1315 ------------- ...idex_fun_5b_GGUF_10GB_VRAM_example_02.json | 622 ------ nodes.py | 42 +- 16 files changed, 6627 insertions(+), 7832 deletions(-) delete mode 100644 examples/cogvideo_2b_context_schedule_test_01.json rename examples/{cogvideox_2b_controlnet_example_01.json => cogvideox_1_0_2b_controlnet_02.json} (80%) rename examples/{cogvideox_I2V_example_01.json => cogvideox_1_0_5b_I2V_02.json} (75%) create mode 100644 examples/cogvideox_1_0_5b_I2V_Tora_02.json rename examples/{cogvideox_5b_example_01.json => cogvideox_1_0_5b_T2V_02.json} (68%) rename examples/{cogvideox_interpolation_example_01.json => cogvideox_1_0_5b_interpolation_02.json} (78%) rename examples/{cogvideo_5b_vid2vid_example_01.json => cogvideox_1_0_5b_vid2vid_02.json} (84%) delete mode 100644 examples/cogvideox_5b_Tora_I2V_testing_01.json delete mode 100644 examples/cogvideox_5b_tora_trajectory_example_01.json rename examples/{cogvidex_fun_i2v_example_02.json => cogvideox_Fun_I2V_02.json} (75%) create mode 100644 examples/cogvideox_Fun_I2V_Tora.json rename examples/{cogvideox_fun_pose_example_01.json => cogvideox_Fun_pose_02.json} (78%) delete mode 100644 examples/cogvideox_fun_img2vid_tora_01.json delete mode 100644 examples/cogvidex_fun_5b_GGUF_10GB_VRAM_example_02.json diff --git a/custom_cogvideox_transformer_3d.py b/custom_cogvideox_transformer_3d.py index 89c72aa..f3d0d94 100644 --- a/custom_cogvideox_transformer_3d.py +++ b/custom_cogvideox_transformer_3d.py @@ -610,29 +610,29 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): if self.fastercache_counter >= self.fastercache_start_step + 3 and self.fastercache_counter % 5 !=0: # 3. Transformer blocks for i, block in enumerate(self.transformer_blocks): - hidden_states, encoder_hidden_states = block( - hidden_states=hidden_states[:1], - encoder_hidden_states=encoder_hidden_states[:1], - temb=emb[:1], - image_rotary_emb=image_rotary_emb, - video_flow_feature=video_flow_features[i][:1] if video_flow_features is not None else None, - fuser = self.fuser_list[i] if self.fuser_list is not None else None, - block_use_fastercache = i <= self.fastercache_num_blocks_to_cache, - fastercache_counter = self.fastercache_counter, - fastercache_start_step = self.fastercache_start_step, - fastercache_device = self.fastercache_device, - attention_mode = self.attention_mode - ) + hidden_states, encoder_hidden_states = block( + hidden_states=hidden_states[:1], + encoder_hidden_states=encoder_hidden_states[:1], + temb=emb[:1], + image_rotary_emb=image_rotary_emb, + video_flow_feature=video_flow_features[i][:1] if video_flow_features is not None else None, + fuser = self.fuser_list[i] if self.fuser_list is not None else None, + block_use_fastercache = i <= self.fastercache_num_blocks_to_cache, + fastercache_counter = self.fastercache_counter, + fastercache_start_step = self.fastercache_start_step, + fastercache_device = self.fastercache_device, + attention_mode = self.attention_mode + ) - if (controlnet_states is not None) and (i < len(controlnet_states)): - controlnet_states_block = controlnet_states[i] - controlnet_block_weight = 1.0 - if isinstance(controlnet_weights, (list, np.ndarray)) or torch.is_tensor(controlnet_weights): - controlnet_block_weight = controlnet_weights[i] - elif isinstance(controlnet_weights, (float, int)): - controlnet_block_weight = controlnet_weights - - hidden_states = hidden_states + controlnet_states_block * controlnet_block_weight + if (controlnet_states is not None) and (i < len(controlnet_states)): + controlnet_states_block = controlnet_states[i] + controlnet_block_weight = 1.0 + if isinstance(controlnet_weights, (list, np.ndarray)) or torch.is_tensor(controlnet_weights): + controlnet_block_weight = controlnet_weights[i] + elif isinstance(controlnet_weights, (float, int)): + controlnet_block_weight = controlnet_weights + + hidden_states = hidden_states + controlnet_states_block * controlnet_block_weight if not self.config.use_rotary_positional_embeddings: # CogVideoX-2B @@ -698,15 +698,16 @@ class CogVideoXTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin): #if has_nan: # raise ValueError(f"block output hidden_states has nan: {has_nan}") - if (controlnet_states is not None) and (i < len(controlnet_states)): - controlnet_states_block = controlnet_states[i] - controlnet_block_weight = 1.0 - if isinstance(controlnet_weights, (list, np.ndarray)) or torch.is_tensor(controlnet_weights): - controlnet_block_weight = controlnet_weights[i] - elif isinstance(controlnet_weights, (float, int)): - controlnet_block_weight = controlnet_weights - - hidden_states = hidden_states + controlnet_states_block * controlnet_block_weight + #controlnet + if (controlnet_states is not None) and (i < len(controlnet_states)): + controlnet_states_block = controlnet_states[i] + controlnet_block_weight = 1.0 + if isinstance(controlnet_weights, (list, np.ndarray)) or torch.is_tensor(controlnet_weights): + controlnet_block_weight = controlnet_weights[i] + print(controlnet_block_weight) + elif isinstance(controlnet_weights, (float, int)): + controlnet_block_weight = controlnet_weights + hidden_states = hidden_states + controlnet_states_block * controlnet_block_weight if not self.config.use_rotary_positional_embeddings: # CogVideoX-2B diff --git a/examples/cogvideo_2b_context_schedule_test_01.json b/examples/cogvideo_2b_context_schedule_test_01.json deleted file mode 100644 index ed7b6fa..0000000 --- a/examples/cogvideo_2b_context_schedule_test_01.json +++ /dev/null @@ -1,561 +0,0 @@ -{ - "last_node_id": 34, - "last_link_id": 61, - "nodes": [ - { - "id": 33, - "type": "GetImageSizeAndCount", - "pos": { - "0": 1176, - "1": 122 - }, - "size": { - "0": 210, - "1": 86 - }, - "flags": {}, - "order": 7, - "mode": 0, - "inputs": [ - { - "name": "image", - "type": "IMAGE", - "link": 59 - } - ], - "outputs": [ - { - "name": "image", - "type": "IMAGE", - "links": [ - 60 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "720 width", - "type": "INT", - "links": null, - "shape": 3 - }, - { - "name": "480 height", - "type": "INT", - "links": null, - "shape": 3 - }, - { - "name": "104 count", - "type": "INT", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "GetImageSizeAndCount" - }, - "widgets_values": [] - }, - { - "id": 30, - "type": "CogVideoTextEncode", - "pos": { - "0": 500, - "1": 308 - }, - "size": [ - 474.8035864085422, - 211.10369504535595 - ], - "flags": {}, - "order": 3, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 54 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 55 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. The panda's fluffy paws strum a miniature\nacoustic guitar, producing soft, melodic tunes. Nearby, a few other pandas gather, watching curiously and some clapping in rhythm. Sunlight filters\nthrough the tall bamboo, casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. The\nbackground includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical atmosphere of this unique musical\nperformance.", - 1, - true - ] - }, - { - "id": 31, - "type": "CogVideoTextEncode", - "pos": { - "0": 508, - "1": 576 - }, - "size": { - "0": 463.01251220703125, - "1": 124 - }, - "flags": {}, - "order": 4, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 56 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 57 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "", - 1, - true - ] - }, - { - "id": 20, - "type": "CLIPLoader", - "pos": { - "0": -37, - "1": 443 - }, - "size": { - "0": 451.30548095703125, - "1": 82 - }, - "flags": {}, - "order": 0, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "CLIP", - "type": "CLIP", - "links": [ - 54, - 56 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CLIPLoader" - }, - "widgets_values": [ - "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", - "sd3" - ] - }, - { - "id": 11, - "type": "CogVideoDecode", - "pos": { - "0": 1045, - "1": 776 - }, - "size": { - "0": 295.70111083984375, - "1": 198 - }, - "flags": {}, - "order": 6, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 37 - }, - { - "name": "samples", - "type": "LATENT", - "link": 38 - } - ], - "outputs": [ - { - "name": "images", - "type": "IMAGE", - "links": [ - 59 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoDecode" - }, - "widgets_values": [ - true, - 96, - 96, - 0.083, - 0.083, - true - ] - }, - { - "id": 1, - "type": "DownloadAndLoadCogVideoModel", - "pos": { - "0": 652, - "1": 43 - }, - "size": { - "0": 315, - "1": 194 - }, - "flags": {}, - "order": 1, - "mode": 0, - "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null - }, - { - "name": "block_edit", - "type": "TRANSFORMERBLOCKS", - "link": null - }, - { - "name": "lora", - "type": "COGLORA", - "link": null - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 36 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadCogVideoModel" - }, - "widgets_values": [ - "THUDM/CogVideoX-2b", - "fp16", - "enabled", - "disabled", - false - ] - }, - { - "id": 32, - "type": "VHS_VideoCombine", - "pos": { - "0": 1439, - "1": 122 - }, - "size": [ - 563.3333740234375, - 686.2222493489583 - ], - "flags": {}, - "order": 8, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 60, - "slot_index": 0 - }, - { - "name": "audio", - "type": "VHS_AUDIO", - "link": null - }, - { - "name": "meta_batch", - "type": "VHS_BatchManager", - "link": null - }, - { - "name": "vae", - "type": "VAE", - "link": null - } - ], - "outputs": [ - { - "name": "Filenames", - "type": "VHS_FILENAMES", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "VHS_VideoCombine" - }, - "widgets_values": { - "frame_rate": 8, - "loop_count": 0, - "filename_prefix": "CogVideo2B_long", - "format": "video/h264-mp4", - "pix_fmt": "yuv420p", - "crf": 19, - "save_metadata": true, - "pingpong": false, - "save_output": false, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "CogVideo2B_long_00005.mp4", - "subfolder": "", - "type": "temp", - "format": "video/h264-mp4", - "frame_rate": 8 - } - } - } - }, - { - "id": 34, - "type": "CogVideoContextOptions", - "pos": { - "0": 1053, - "1": -84 - }, - "size": { - "0": 315, - "1": 154 - }, - "flags": {}, - "order": 2, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "context_options", - "type": "COGCONTEXT", - "links": [ - 61 - ], - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoContextOptions" - }, - "widgets_values": [ - "uniform_standard", - 52, - 4, - 8, - true - ] - }, - { - "id": 22, - "type": "CogVideoSampler", - "pos": { - "0": 1041, - "1": 342 - }, - "size": { - "0": 315, - "1": 382 - }, - "flags": {}, - "order": 5, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 36 - }, - { - "name": "positive", - "type": "CONDITIONING", - "link": 55, - "slot_index": 1 - }, - { - "name": "negative", - "type": "CONDITIONING", - "link": 57 - }, - { - "name": "samples", - "type": "LATENT", - "link": null - }, - { - "name": "image_cond_latents", - "type": "LATENT", - "link": null - }, - { - "name": "context_options", - "type": "COGCONTEXT", - "link": 61 - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 37 - ], - "shape": 3 - }, - { - "name": "samples", - "type": "LATENT", - "links": [ - 38 - ], - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoSampler" - }, - "widgets_values": [ - 480, - 720, - 104, - 32, - 6, - 42, - "fixed", - "CogVideoXDDIM", - 1 - ] - } - ], - "links": [ - [ - 36, - 1, - 0, - 22, - 0, - "COGVIDEOPIPE" - ], - [ - 37, - 22, - 0, - 11, - 0, - "COGVIDEOPIPE" - ], - [ - 38, - 22, - 1, - 11, - 1, - "LATENT" - ], - [ - 54, - 20, - 0, - 30, - 0, - "CLIP" - ], - [ - 55, - 30, - 0, - 22, - 1, - "CONDITIONING" - ], - [ - 56, - 20, - 0, - 31, - 0, - "CLIP" - ], - [ - 57, - 31, - 0, - 22, - 2, - "CONDITIONING" - ], - [ - 59, - 11, - 0, - 33, - 0, - "IMAGE" - ], - [ - 60, - 33, - 0, - 32, - 0, - "IMAGE" - ], - [ - 61, - 34, - 0, - 22, - 5, - "COGCONTEXT" - ] - ], - "groups": [], - "config": {}, - "extra": { - "ds": { - "scale": 0.8390545288825444, - "offset": [ - -14.198557467892236, - 144.90015432747748 - ] - } - }, - "version": 0.4 -} \ No newline at end of file diff --git a/examples/cogvideox_2b_controlnet_example_01.json b/examples/cogvideox_1_0_2b_controlnet_02.json similarity index 80% rename from examples/cogvideox_2b_controlnet_example_01.json rename to examples/cogvideox_1_0_2b_controlnet_02.json index 6e826ab..739bf17 100644 --- a/examples/cogvideox_2b_controlnet_example_01.json +++ b/examples/cogvideox_1_0_2b_controlnet_02.json @@ -1,56 +1,7 @@ { - "last_node_id": 43, - "last_link_id": 77, + "last_node_id": 48, + "last_link_id": 90, "nodes": [ - { - "id": 11, - "type": "CogVideoDecode", - "pos": { - "0": 740, - "1": 580 - }, - "size": { - "0": 300.396484375, - "1": 198 - }, - "flags": {}, - "order": 11, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 63 - }, - { - "name": "samples", - "type": "LATENT", - "link": 64 - } - ], - "outputs": [ - { - "name": "images", - "type": "IMAGE", - "links": [ - 76 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoDecode" - }, - "widgets_values": [ - false, - 240, - 360, - 0.2, - 0.2, - true - ] - }, { "id": 41, "type": "HEDPreprocessor", @@ -63,7 +14,7 @@ "1": 82 }, "flags": {}, - "order": 6, + "order": 4, "mode": 0, "inputs": [ { @@ -90,82 +41,6 @@ 768 ] }, - { - "id": 31, - "type": "CogVideoTextEncode", - "pos": { - "0": 140, - "1": 660 - }, - "size": { - "0": 463.01251220703125, - "1": 124 - }, - "flags": {}, - "order": 5, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 56 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 62 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "", - 1, - true - ] - }, - { - "id": 20, - "type": "CLIPLoader", - "pos": { - "0": -390, - "1": 480 - }, - "size": { - "0": 451.30548095703125, - "1": 82 - }, - "flags": {}, - "order": 0, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "CLIP", - "type": "CLIP", - "links": [ - 54, - 56 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CLIPLoader" - }, - "widgets_values": [ - "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", - "sd3" - ] - }, { "id": 38, "type": "VHS_LoadVideo", @@ -178,7 +53,7 @@ 427.63671875 ], "flags": {}, - "order": 1, + "order": 0, "mode": 0, "inputs": [ { @@ -260,7 +135,7 @@ "1": 266 }, "flags": {}, - "order": 7, + "order": 6, "mode": 0, "inputs": [ { @@ -327,12 +202,280 @@ "disabled" ] }, + { + "id": 30, + "type": "CogVideoTextEncode", + "pos": { + "0": 130, + "1": 350 + }, + "size": { + "0": 475.7875061035156, + "1": 231.29896545410156 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 84 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 78 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "car is moving among mountains", + 1, + false + ] + }, + { + "id": 31, + "type": "CogVideoTextEncode", + "pos": { + "0": 139, + "1": 643 + }, + "size": { + "0": 463.01251220703125, + "1": 144 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 78 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 85 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "", + 1, + true + ] + }, + { + "id": 44, + "type": "DownloadAndLoadCogVideoModel", + "pos": { + "0": 326, + "1": -319 + }, + "size": { + "0": 315, + "1": 218 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "block_edit", + "type": "TRANSFORMERBLOCKS", + "link": null, + "shape": 7 + }, + { + "name": "lora", + "type": "COGLORA", + "link": null, + "shape": 7 + }, + { + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "links": [ + 83 + ] + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 82 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadCogVideoModel" + }, + "widgets_values": [ + "THUDM/CogVideoX-2b", + "bf16", + "disabled", + false, + "sdpa", + "main_device" + ] + }, + { + "id": 20, + "type": "CLIPLoader", + "pos": { + "0": -175, + "1": -317 + }, + "size": { + "0": 452.912353515625, + "1": 82 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CLIPLoader" + }, + "widgets_values": [ + "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", + "sd3" + ] + }, + { + "id": 35, + "type": "DownloadAndLoadCogVideoControlNet", + "pos": { + "0": -105, + "1": -182 + }, + "size": { + "0": 378, + "1": 58 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "cogvideo_controlnet", + "type": "COGVIDECONTROLNETMODEL", + "links": [ + 67 + ] + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadCogVideoControlNet" + }, + "widgets_values": [ + "TheDenk/cogvideox-2b-controlnet-hed-v1" + ] + }, + { + "id": 37, + "type": "CogVideoControlNet", + "pos": { + "0": 220, + "1": 155 + }, + "size": { + "0": 367.79998779296875, + "1": 126 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "controlnet", + "type": "COGVIDECONTROLNETMODEL", + "link": 67 + }, + { + "name": "images", + "type": "IMAGE", + "link": 72 + } + ], + "outputs": [ + { + "name": "cogvideo_controlnet", + "type": "COGVIDECONTROLNET", + "links": [ + 86 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoControlNet" + }, + "widgets_values": [ + 1, + 0, + 1 + ] + }, { "id": 40, "type": "GetImageSizeAndCount", "pos": { - "0": -190, - "1": -68 + "0": -123, + "1": -34 }, "size": { "0": 277.20001220703125, @@ -361,12 +504,17 @@ { "name": "720 width", "type": "INT", - "links": null + "links": [ + 89 + ] }, { "name": "480 height", "type": "INT", - "links": null + "links": [ + 90 + ], + "slot_index": 2 }, { "name": "49 count", @@ -380,212 +528,91 @@ "widgets_values": [] }, { - "id": 37, - "type": "CogVideoControlNet", + "id": 47, + "type": "EmptyLatentImage", "pos": { - "0": 133, - "1": 131 - }, - "size": { - "0": 367.79998779296875, - "1": 126 - }, - "flags": {}, - "order": 9, - "mode": 0, - "inputs": [ - { - "name": "controlnet", - "type": "COGVIDECONTROLNETMODEL", - "link": 67 - }, - { - "name": "images", - "type": "IMAGE", - "link": 72 - } - ], - "outputs": [ - { - "name": "cogvideo_controlnet", - "type": "COGVIDECONTROLNET", - "links": [ - 68 - ], - "slot_index": 0 - } - ], - "properties": { - "Node name for S&R": "CogVideoControlNet" - }, - "widgets_values": [ - 1, - 0, - 1 - ] - }, - { - "id": 35, - "type": "DownloadAndLoadCogVideoControlNet", - "pos": { - "0": -187, - "1": -207 - }, - "size": { - "0": 378, - "1": 58 - }, - "flags": {}, - "order": 2, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "cogvideo_controlnet", - "type": "COGVIDECONTROLNETMODEL", - "links": [ - 67 - ] - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadCogVideoControlNet" - }, - "widgets_values": [ - "TheDenk/cogvideox-2b-controlnet-hed-v1" - ] - }, - { - "id": 1, - "type": "DownloadAndLoadCogVideoModel", - "pos": { - "0": -157, - "1": -473 + "0": 409, + "1": 77 }, "size": { "0": 315, - "1": 194 + "1": 106 }, - "flags": {}, - "order": 3, - "mode": 0, - "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null, - "shape": 7 - }, - { - "name": "block_edit", - "type": "TRANSFORMERBLOCKS", - "link": null, - "shape": 7 - }, - { - "name": "lora", - "type": "COGLORA", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 60 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadCogVideoModel" + "flags": { + "collapsed": true }, - "widgets_values": [ - "THUDM/CogVideoX-2b", - "fp16", - "disabled", - "disabled", - false - ] - }, - { - "id": 30, - "type": "CogVideoTextEncode", - "pos": { - "0": 130, - "1": 350 - }, - "size": [ - 475.7874994452536, - 231.2989729014987 - ], - "flags": {}, - "order": 4, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 54 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 61 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "car is moving among mountains", - 1, - true - ] - }, - { - "id": 34, - "type": "CogVideoSampler", - "pos": { - "0": 730, - "1": 170 - }, - "size": { - "0": 315.8404846191406, - "1": 370 - }, - "flags": {}, "order": 10, "mode": 0, "inputs": [ { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 60 + "name": "width", + "type": "INT", + "link": 89, + "widget": { + "name": "width" + } + }, + { + "name": "height", + "type": "INT", + "link": 90, + "widget": { + "name": "height" + } + } + ], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 88 + ] + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 720, + 480, + 1 + ] + }, + { + "id": 46, + "type": "CogVideoSampler", + "pos": { + "0": 743, + "1": 49 + }, + "size": { + "0": 330, + "1": 574 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 83 }, { "name": "positive", "type": "CONDITIONING", - "link": 61 + "link": 84 }, { "name": "negative", "type": "CONDITIONING", - "link": 62 + "link": 85 }, { "name": "samples", "type": "LATENT", - "link": null, + "link": 88, "shape": 7 }, { @@ -603,56 +630,104 @@ { "name": "controlnet", "type": "COGVIDECONTROLNET", - "link": 68, + "link": 86, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": null, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, "shape": 7 } ], "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 63 - ], - "shape": 3 - }, { "name": "samples", "type": "LATENT", "links": [ - 64 - ], - "shape": 3 + 87 + ] } ], "properties": { "Node name for S&R": "CogVideoSampler" }, "widgets_values": [ - 480, - 720, 49, - 32, + 40, 6, - 806286757407563, + 0, "fixed", "CogVideoXDDIM", 1 ] }, + { + "id": 45, + "type": "CogVideoDecode", + "pos": { + "0": 758, + "1": 685 + }, + "size": { + "0": 315, + "1": 198 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 82 + }, + { + "name": "samples", + "type": "LATENT", + "link": 87 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 81 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoDecode" + }, + "widgets_values": [ + true, + 240, + 360, + 0.2, + 0.2, + true + ] + }, { "id": 42, "type": "ImageConcatMulti", "pos": { - "0": 1139, - "1": -19 + "0": 1145, + "1": -24 }, "size": { "0": 210, "1": 150 }, "flags": {}, - "order": 12, + "order": 13, "mode": 0, "inputs": [ { @@ -663,7 +738,7 @@ { "name": "image_2", "type": "IMAGE", - "link": 76 + "link": 81 } ], "outputs": [ @@ -696,7 +771,7 @@ 576.9007568359375 ], "flags": {}, - "order": 13, + "order": 14, "mode": 0, "inputs": [ { @@ -737,7 +812,7 @@ "widgets_values": { "frame_rate": 8, "loop_count": 0, - "filename_prefix": "CogVideoX2B_controlnet", + "filename_prefix": "CogVideoX_2b_controlnet", "format": "video/h264-mp4", "pix_fmt": "yuv420p", "crf": 19, @@ -748,7 +823,7 @@ "hidden": false, "paused": false, "params": { - "filename": "CogVideoX2B_00007.mp4", + "filename": "CogVideoX2B_controlnet_00003.mp4", "subfolder": "", "type": "temp", "format": "video/h264-mp4", @@ -768,54 +843,6 @@ 0, "CLIP" ], - [ - 56, - 20, - 0, - 31, - 0, - "CLIP" - ], - [ - 60, - 1, - 0, - 34, - 0, - "COGVIDEOPIPE" - ], - [ - 61, - 30, - 0, - 34, - 1, - "CONDITIONING" - ], - [ - 62, - 31, - 0, - 34, - 2, - "CONDITIONING" - ], - [ - 63, - 34, - 0, - 11, - 0, - "COGVIDEOPIPE" - ], - [ - 64, - 34, - 1, - 11, - 1, - "LATENT" - ], [ 67, 35, @@ -824,14 +851,6 @@ 0, "COGVIDECONTROLNETMODEL" ], - [ - 68, - 37, - 0, - 34, - 6, - "COGVIDECONTROLNET" - ], [ 71, 39, @@ -872,14 +891,6 @@ 0, "IMAGE" ], - [ - 76, - 11, - 0, - 42, - 1, - "IMAGE" - ], [ 77, 42, @@ -887,16 +898,104 @@ 43, 0, "IMAGE" + ], + [ + 78, + 30, + 1, + 31, + 0, + "CLIP" + ], + [ + 81, + 45, + 0, + 42, + 1, + "IMAGE" + ], + [ + 82, + 44, + 1, + 45, + 0, + "VAE" + ], + [ + 83, + 44, + 0, + 46, + 0, + "COGVIDEOMODEL" + ], + [ + 84, + 30, + 0, + 46, + 1, + "CONDITIONING" + ], + [ + 85, + 31, + 0, + 46, + 2, + "CONDITIONING" + ], + [ + 86, + 37, + 0, + 46, + 6, + "COGVIDECONTROLNET" + ], + [ + 87, + 46, + 0, + 45, + 1, + "LATENT" + ], + [ + 88, + 47, + 0, + 46, + 3, + "LATENT" + ], + [ + 89, + 40, + 1, + 47, + 0, + "INT" + ], + [ + 90, + 40, + 2, + 47, + 1, + "INT" ] ], "groups": [], "config": {}, "extra": { "ds": { - "scale": 0.6303940863129801, + "scale": 0.7627768444387069, "offset": [ - 1194.8126582413695, - 661.2034019206458 + 1075.4957551311677, + 398.4420252790512 ] } }, diff --git a/examples/cogvideox_I2V_example_01.json b/examples/cogvideox_1_0_5b_I2V_02.json similarity index 75% rename from examples/cogvideox_I2V_example_01.json rename to examples/cogvideox_1_0_5b_I2V_02.json index b6cefa3..f1265d6 100644 --- a/examples/cogvideox_I2V_example_01.json +++ b/examples/cogvideox_1_0_5b_I2V_02.json @@ -1,42 +1,7 @@ { - "last_node_id": 58, - "last_link_id": 129, + "last_node_id": 63, + "last_link_id": 149, "nodes": [ - { - "id": 20, - "type": "CLIPLoader", - "pos": { - "0": -26, - "1": 400 - }, - "size": { - "0": 451.30548095703125, - "1": 82 - }, - "flags": {}, - "order": 0, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "CLIP", - "type": "CLIP", - "links": [ - 54, - 56 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CLIPLoader" - }, - "widgets_values": [ - "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", - "sd3" - ] - }, { "id": 31, "type": "CogVideoTextEncode", @@ -46,16 +11,16 @@ }, "size": { "0": 463.01251220703125, - "1": 124 + "1": 144 }, "flags": {}, - "order": 4, + "order": 6, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", - "link": 56 + "link": 149 } ], "outputs": [ @@ -63,10 +28,15 @@ "name": "conditioning", "type": "CONDITIONING", "links": [ - 123 + 146 ], "slot_index": 0, "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null } ], "properties": { @@ -78,6 +48,208 @@ true ] }, + { + "id": 63, + "type": "CogVideoSampler", + "pos": { + "0": 1142, + "1": 74 + }, + "size": [ + 330, + 574 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 144 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 145 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 146 + }, + { + "name": "samples", + "type": "LATENT", + "link": null, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": 147, + "shape": 7 + }, + { + "name": "context_options", + "type": "COGCONTEXT", + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": null, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 148 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoSampler" + }, + "widgets_values": [ + 49, + 25, + 6, + 0, + "fixed", + "CogVideoXDDIM", + 1 + ] + }, + { + "id": 62, + "type": "CogVideoImageEncode", + "pos": { + "0": 1149, + "1": 711 + }, + "size": { + "0": 315, + "1": 122 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 141 + }, + { + "name": "start_image", + "type": "IMAGE", + "link": 142 + }, + { + "name": "end_image", + "type": "IMAGE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 147 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoImageEncode" + }, + "widgets_values": [ + false, + 0 + ] + }, + { + "id": 59, + "type": "DownloadAndLoadCogVideoModel", + "pos": { + "0": 622, + "1": -25 + }, + "size": { + "0": 315, + "1": 218 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "name": "block_edit", + "type": "TRANSFORMERBLOCKS", + "link": null, + "shape": 7 + }, + { + "name": "lora", + "type": "COGLORA", + "link": null, + "shape": 7 + }, + { + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "links": [ + 144 + ] + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 132, + 141 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadCogVideoModel" + }, + "widgets_values": [ + "THUDM/CogVideoX-5b-I2V", + "bf16", + "disabled", + false, + "sdpa", + "main_device" + ] + }, { "id": 30, "type": "CogVideoTextEncode", @@ -90,7 +262,7 @@ "1": 168.08047485351562 }, "flags": {}, - "order": 3, + "order": 4, "mode": 0, "inputs": [ { @@ -104,10 +276,18 @@ "name": "conditioning", "type": "CONDITIONING", "links": [ - 122 + 145 ], "slot_index": 0, "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 149 + ], + "slot_index": 1 } ], "properties": { @@ -116,22 +296,22 @@ "widgets_values": [ "a majestic stag is grazing in an enhanced forest, basking in the setting sun filtered by the trees", 1, - true + false ] }, { "id": 37, "type": "ImageResizeKJ", "pos": { - "0": 809, - "1": 684 + "0": 784, + "1": 731 }, "size": { "0": 315, "1": 266 }, "flags": {}, - "order": 5, + "order": 3, "mode": 0, "inputs": [ { @@ -142,7 +322,8 @@ { "name": "get_image_size", "type": "IMAGE", - "link": null + "link": null, + "shape": 7 }, { "name": "width_input", @@ -166,7 +347,7 @@ "name": "IMAGE", "type": "IMAGE", "links": [ - 125 + 142 ], "slot_index": 0, "shape": 3 @@ -199,64 +380,88 @@ ] }, { - "id": 58, - "type": "CogVideoImageEncode", + "id": 36, + "type": "LoadImage", "pos": { - "0": 1156, - "1": 650 + "0": 335, + "1": 731 }, "size": { - "0": 315, - "1": 122 + "0": 402.06353759765625, + "1": 396.6225891113281 }, "flags": {}, - "order": 6, + "order": 1, "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 124 - }, - { - "name": "image", - "type": "IMAGE", - "link": 125 - }, - { - "name": "mask", - "type": "MASK", - "link": null - } - ], + "inputs": [], "outputs": [ { - "name": "samples", - "type": "LATENT", + "name": "IMAGE", + "type": "IMAGE", "links": [ - 129 + 71 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "sd3stag.png", + "image" + ] + }, + { + "id": 20, + "type": "CLIPLoader", + "pos": { + "0": -2, + "1": 304 + }, + "size": { + "0": 451.30548095703125, + "1": 82 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54 ], "slot_index": 0, "shape": 3 } ], "properties": { - "Node name for S&R": "CogVideoImageEncode" + "Node name for S&R": "CLIPLoader" }, "widgets_values": [ - 16, - true + "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", + "sd3" ] }, { - "id": 56, + "id": 60, "type": "CogVideoDecode", "pos": { - "0": 1581, - "1": 148 + "0": 1523, + "1": -6 }, "size": { - "0": 300.396484375, + "0": 315, "1": 198 }, "flags": {}, @@ -264,14 +469,14 @@ "mode": 0, "inputs": [ { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 128 + "name": "vae", + "type": "VAE", + "link": 132 }, { "name": "samples", "type": "LATENT", - "link": 127 + "link": 148 } ], "outputs": [ @@ -279,17 +484,15 @@ "name": "images", "type": "IMAGE", "links": [ - 118 - ], - "slot_index": 0, - "shape": 3 + 134 + ] } ], "properties": { "Node name for S&R": "CogVideoDecode" }, "widgets_values": [ - false, + true, 240, 360, 0.2, @@ -301,8 +504,8 @@ "id": 44, "type": "VHS_VideoCombine", "pos": { - "0": 1927, - "1": 146 + "0": 1884, + "1": -6 }, "size": [ 605.3909912109375, @@ -315,22 +518,25 @@ { "name": "images", "type": "IMAGE", - "link": 118 + "link": 134 }, { "name": "audio", "type": "AUDIO", - "link": null + "link": null, + "shape": 7 }, { "name": "meta_batch", "type": "VHS_BatchManager", - "link": null + "link": null, + "shape": 7 }, { "name": "vae", "type": "VAE", - "link": null + "link": null, + "shape": 7 } ], "outputs": [ @@ -367,180 +573,6 @@ "muted": false } } - }, - { - "id": 36, - "type": "LoadImage", - "pos": { - "0": 365, - "1": 685 - }, - "size": { - "0": 402.06353759765625, - "1": 396.6225891113281 - }, - "flags": {}, - "order": 1, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 71 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "MASK", - "type": "MASK", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "LoadImage" - }, - "widgets_values": [ - "sd3stag.png", - "image" - ] - }, - { - "id": 57, - "type": "CogVideoSampler", - "pos": { - "0": 1138, - "1": 150 - }, - "size": [ - 399.878095897654, - 350 - ], - "flags": {}, - "order": 7, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 121 - }, - { - "name": "positive", - "type": "CONDITIONING", - "link": 122 - }, - { - "name": "negative", - "type": "CONDITIONING", - "link": 123 - }, - { - "name": "samples", - "type": "LATENT", - "link": null - }, - { - "name": "image_cond_latents", - "type": "LATENT", - "link": 129 - }, - { - "name": "context_options", - "type": "COGCONTEXT", - "link": null - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 128 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "samples", - "type": "LATENT", - "links": [ - 127 - ], - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoSampler" - }, - "widgets_values": [ - 480, - 720, - 49, - 20, - 6, - 65334758276105, - "fixed", - "CogVideoXDPMScheduler", - 1 - ] - }, - { - "id": 1, - "type": "DownloadAndLoadCogVideoModel", - "pos": { - "0": 633, - "1": 44 - }, - "size": { - "0": 337.8885192871094, - "1": 194 - }, - "flags": {}, - "order": 2, - "mode": 0, - "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null - }, - { - "name": "block_edit", - "type": "TRANSFORMERBLOCKS", - "link": null - }, - { - "name": "lora", - "type": "COGLORA", - "link": null - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 121, - 124 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadCogVideoModel" - }, - "widgets_values": [ - "THUDM/CogVideoX-5b-I2V", - "bf16", - "disabled", - "disabled", - false - ] } ], "links": [ @@ -552,14 +584,6 @@ 0, "CLIP" ], - [ - 56, - 20, - 0, - 31, - 0, - "CLIP" - ], [ 71, 36, @@ -569,86 +593,94 @@ "IMAGE" ], [ - 118, - 56, + 132, + 59, + 1, + 60, + 0, + "VAE" + ], + [ + 134, + 60, 0, 44, 0, "IMAGE" ], [ - 121, + 141, + 59, 1, + 62, 0, - 57, - 0, - "COGVIDEOPIPE" + "VAE" ], [ - 122, - 30, - 0, - 57, - 1, - "CONDITIONING" - ], - [ - 123, - 31, - 0, - 57, - 2, - "CONDITIONING" - ], - [ - 124, - 1, - 0, - 58, - 0, - "COGVIDEOPIPE" - ], - [ - 125, + 142, 37, 0, - 58, + 62, 1, "IMAGE" ], [ - 127, - 57, - 1, - 56, - 1, - "LATENT" + 144, + 59, + 0, + 63, + 0, + "COGVIDEOMODEL" ], [ - 128, - 57, + 145, + 30, 0, - 56, - 0, - "COGVIDEOPIPE" + 63, + 1, + "CONDITIONING" ], [ - 129, - 58, + 146, + 31, 0, - 57, + 63, + 2, + "CONDITIONING" + ], + [ + 147, + 62, + 0, + 63, 4, "LATENT" + ], + [ + 148, + 63, + 0, + 60, + 1, + "LATENT" + ], + [ + 149, + 30, + 1, + 31, + 0, + "CLIP" ] ], "groups": [], "config": {}, "extra": { "ds": { - "scale": 0.6934334949442514, + "scale": 0.7627768444387059, "offset": [ - -24.154349208343916, - 155.20539218330134 + 648.7113591814891, + 185.9907078691075 ] } }, diff --git a/examples/cogvideox_1_0_5b_I2V_Tora_02.json b/examples/cogvideox_1_0_5b_I2V_Tora_02.json new file mode 100644 index 0000000..a9e46a6 --- /dev/null +++ b/examples/cogvideox_1_0_5b_I2V_Tora_02.json @@ -0,0 +1,1710 @@ +{ + "last_node_id": 92, + "last_link_id": 223, + "nodes": [ + { + "id": 31, + "type": "CogVideoTextEncode", + "pos": { + "0": 497, + "1": 520 + }, + "size": { + "0": 463.01251220703125, + "1": 144 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 209 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 198 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "The video is not of a high quality, it has a low resolution. Watermark present in each frame. Strange motion trajectory. ", + 1, + true + ] + }, + { + "id": 78, + "type": "ToraEncodeTrajectory", + "pos": { + "0": 1053, + "1": 640 + }, + "size": [ + 355.20001220703125, + 246 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "tora_model", + "type": "TORAMODEL", + "link": 193 + }, + { + "name": "vae", + "type": "VAE", + "link": 205 + }, + { + "name": "coordinates", + "type": "STRING", + "link": 220, + "widget": { + "name": "coordinates" + } + }, + { + "name": "num_frames", + "type": "INT", + "link": 189, + "widget": { + "name": "num_frames" + } + }, + { + "name": "width", + "type": "INT", + "link": 190, + "widget": { + "name": "width" + } + }, + { + "name": "height", + "type": "INT", + "link": 191, + "widget": { + "name": "height" + } + } + ], + "outputs": [ + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "links": [ + 200 + ] + }, + { + "name": "video_flow_images", + "type": "IMAGE", + "links": [ + 203 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "ToraEncodeTrajectory" + }, + "widgets_values": [ + "", + 720, + 480, + 49, + 1, + 0, + 1, + true + ] + }, + { + "id": 73, + "type": "ImageResizeKJ", + "pos": { + "0": -436, + "1": 527 + }, + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 166 + }, + { + "name": "get_image_size", + "type": "IMAGE", + "link": null, + "shape": 7 + }, + { + "name": "width_input", + "type": "INT", + "link": null, + "widget": { + "name": "width_input" + }, + "shape": 7 + }, + { + "name": "height_input", + "type": "INT", + "link": null, + "widget": { + "name": "height_input" + }, + "shape": 7 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 167, + 187, + 210, + 216 + ], + "slot_index": 0 + }, + { + "name": "width", + "type": "INT", + "links": null + }, + { + "name": "height", + "type": "INT", + "links": null + } + ], + "properties": { + "Node name for S&R": "ImageResizeKJ" + }, + "widgets_values": [ + 720, + 480, + "nearest-exact", + false, + 2, + 0, + 0, + "center" + ] + }, + { + "id": 72, + "type": "LoadImage", + "pos": { + "0": -820, + "1": 531 + }, + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 166 + ], + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "pasted/image (473).png", + "image" + ] + }, + { + "id": 20, + "type": "CLIPLoader", + "pos": { + "0": -21, + "1": 288 + }, + "size": { + "0": 451.30548095703125, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CLIPLoader" + }, + "widgets_values": [ + "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", + "sd3" + ] + }, + { + "id": 71, + "type": "CogVideoImageEncode", + "pos": { + "0": 651, + "1": 96 + }, + "size": { + "0": 315, + "1": 122 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 208 + }, + { + "name": "start_image", + "type": "IMAGE", + "link": 167 + }, + { + "name": "end_image", + "type": "IMAGE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 199 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoImageEncode" + }, + "widgets_values": [ + 16, + false + ] + }, + { + "id": 67, + "type": "GetMaskSizeAndCount", + "pos": { + "0": 750, + "1": 775 + }, + "size": { + "0": 264.5999755859375, + "1": 86 + }, + "flags": { + "collapsed": true + }, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 146 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": null + }, + { + "name": "720 width", + "type": "INT", + "links": [ + 149, + 190 + ], + "slot_index": 1 + }, + { + "name": "480 height", + "type": "INT", + "links": [ + 150, + 191 + ], + "slot_index": 2 + }, + { + "name": "49 count", + "type": "INT", + "links": [ + 189, + 201 + ], + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "GetMaskSizeAndCount" + }, + "widgets_values": [] + }, + { + "id": 56, + "type": "CogVideoDecode", + "pos": { + "0": 1582, + "1": -66 + }, + "size": { + "0": 300.396484375, + "1": 198 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 206 + }, + { + "name": "samples", + "type": "LATENT", + "link": 202 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 155 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CogVideoDecode" + }, + "widgets_values": [ + true, + 240, + 360, + 0.2, + 0.2, + true + ] + }, + { + "id": 30, + "type": "CogVideoTextEncode", + "pos": { + "0": 498, + "1": 293 + }, + "size": { + "0": 471.90142822265625, + "1": 168.08047485351562 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 197 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 209 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "flying car lifts off in the air in front of a house", + 1, + false + ] + }, + { + "id": 44, + "type": "VHS_VideoCombine", + "pos": { + "0": 2229, + "1": -113 + }, + "size": [ + 1388.8330963815574, + 1236.555397587705 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 156 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "CogVideoX-Tora", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX-Tora_00009.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 16 + }, + "muted": false + } + } + }, + { + "id": 60, + "type": "SplineEditor", + "pos": { + "0": -1367, + "1": 1222 + }, + "size": [ + 765, + 910 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "bg_image", + "type": "IMAGE", + "link": 187, + "shape": 7 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [ + 146 + ], + "slot_index": 0 + }, + { + "name": "coord_str", + "type": "STRING", + "links": [ + 212 + ], + "slot_index": 1 + }, + { + "name": "float", + "type": "FLOAT", + "links": null + }, + { + "name": "count", + "type": "INT", + "links": null + }, + { + "name": "normalized_str", + "type": "STRING", + "links": null + } + ], + "properties": { + "Node name for S&R": "SplineEditor", + "points": "SplineEditor", + "imgData": { + "name": "bg_image", + "base64": [ + "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAHgAtADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDWAoFLigfSu0+YExRinYoxQAgFAFKBSgUAJilxxS4oxQAmKMUtGKBiYNFLiigBMUGloxQhhijFLilAoEAFFOxRigBMUAUuKWgBuKMe1OxRigBuKUClxRigBMUtGKKADFJTsUYoAbTgKMc0YoAMUmKXFLigBtFOIpMUgDFFLijFACYoxTsUYoAbiinYpMUwEoxS0tACYpMU7FIRSATFLikxTsUCExRilxRigBKMUuKKYxtGKdigCkIbilpcUEUAJSYpcUuKAG4oxS0UAJiilpCKYDTRilxxRikISjFLRigBMUY46UuKMUANxRilIoxQAmKKUijFADaMc07FGKAGYoIxT8UhFMQ3FJTsUY5pCG4oApcUY9qYDSKTFPx7UmDQAzFGKeR7UYoEMxS4p22kxQAmKDS4ooAZR+FOIpCKAG4paXFJigB4FGKdiig0EpMU6jFADcUtLRigBKWjFL2oASlx7UAUuKBjaMU/HNJtouAmPaj8KdRSGNFOAopaYgo70tLigBKKXFGKQCUtGKWmAlJTqMcUgG0U4UYoASjFOxRjNADcUU7HtRj2oATFFLQKAEoxTsUUANoxTsUAUXASjFLiloAbijilooAbilxS8UtADaPwp1JigBPwpcUfhS0ANxRTqSkIKTFOxQBQAlJTiKMUAJSYp2KCKAG4oxTsUUwG0YpcUtADaTFOIpMUCGkH0ox7U7bRigBuKMe1OxRQIbgelJjmnYoxQMbiilxRigBtFOxSEc0AJ+FFLilxQA3FGKU0UCYmKbg0+kxQIbiinYoxTENxRinYpMUAJikxTse1H4UANx7UmKdj2oxSAYRRinEUYoAbikxTsUYpgNoxzTiKQ9TQIdRilApcUGo3FGKdijFACbaMU4CigBMUYpaXFFwsIBS4opaQxuKMU6jFIY3FGKXFLRcBMUuKXFLigLCYpcUtFFxWExRinYoouOw3FGKdij8qBWExRjil70YouOw3FLg0uKXFFxWG4pcUuPalxRcY3FGKdijFADSKBTsUmKQhMUYp2KUCmFhuKMe1OxRigLDcUYp2KKAG4pMU+kxQAzFOxS4oxQAmKMU7HtRigBmKdijFLQAm2kAp9JjmlcBMUYpxFJQAmKMUtFO4hMUY9qWjFFwEA9qMUuKKAGkZoxTqMUANIpMU/FJigQ3FJin4oxQAzFHenYowfQUANxRTse1GKAG4pNvtTsUEc0ANxQRS0YoEJijFLijFADSKTFPoxTAbikwKcR9KTFIQ2ilopgJijFLRQA3FGKdikxQIQUYpcc0YoAaRRinYpMUCG4pcD0paMUAMIoxTiKTFMY7FID6V3K6Fpq/8uwP1Ymnro+nIci0iz7jNcDx0OzPV/s6fdHC9aK7/APs6xHSzg/79ipY7eCP7kES/RAKl49di/wCzn1kedgE9Bn6VKltO/wB2GRvopNehYX0FKMVP159i1ly6yOBGn3hGRaT/APfs/wCFKNOvT/y6Tf8AfBrvajj+5/wI/wAzS+uy7Ff2dDucSuj6g3S0k/EYp39i6j/z6v8AmK7jnGM0c+tL67Psh/2fT7s4kaHqJ/5dj+LCn/2BqP8AzwH/AH2K7Ln1o59aX12p5D+oU/M43/hH9R/54L/32P8AGl/4R7Uf+eK/99iuxpfxpfXKnkP6hS8zjx4d1A9UQfV6evhy+PeIfVv/AK1dbRS+uVPIf1Cl5nKf8I1ff34f++j/AIUf8I1e/wB+D/vo/wCFdWOtL+NH1uoP6jS8zk/+Ebvf+ekH/fR/wpf+Ebvf+ekH/fR/wrq8UYo+t1A+o0vM5T/hG73/AJ6Qf99H/Ck/4Ru+/vw/99H/AArq6X8aPrlQPqNLzOS/4Ry+B6w/99f/AFqT/hHr8D7sZ/4HXXfjR+NH1yoH1Gl5nI/8I/qH/PND9HFH9g6h/wA8V/77FdaG+dlzyAD/ADpc+9H1yp5C+oUvM5A6DqAH+pH/AH2KYdGvwf8Aj2J+hFdn+NB+tP65PyE8BT8zi/7Hv/8An2b8xTG0y+Xrayfgua7bNLn3p/XZ9kL+z6fdnDGwvB1tZ/8Av2aja2nT70Mi/VTXe8+tHXrT+uy7C/s+Pc8+II4IOaXBrvtinqq/lUbWlq/37aFv95AapY1dUQ8u7M4TFLiu2bTLFutrEPouP5VE2jae3/LAD6MatY2HVGby6fRo47FGK61tBsD0R1+jmoW8OWpA2zSg++DVLGU2S8vq+RzFGK6FvDQx8tzn6p/9eom8OTgfLNE31BFUsTT7mbwVZdDC70uK1G0G9X+BD9GFRNpN8nW2c/TBrRVoPZmTw1VfZZRxSYxVl7S4iHzwyL/vKRUJUjqKtSTIdOS3QzFGKdiincnlG44o606ii4rCEUU6jFAWGEc0mKkxRimIZijFP20hFADaMU4CjHtSAbRilxS4p3EMxRTsUUXAaRSYp+KTFAhtLS0Y5oAaaMU4ikpgNNJj2p+KMUAMxS4paWgBmKXFLiloAZijFPxzSYoAZj6UYp2KTFAhhFGKfijFAhmKMU/ApMCgBmKMU/H0oxQAzHNFOIoxQAwiinYoxTENxSYp+KSgBp/CinYpMCi4M9Hooor54+sDFFFApAFGKWkp2HcWo0+4fqf5mpKaowp+p/nQFxaKXFGKAEpKUijFACd6XvSgc0uOaQxMUYp2PpRigY3/ABopaMUwCijFGKAAjikFOxSYpAIKWl2k0bTTAiX/AI+JP91f60+gIRMx9VH8zS4NABSU7FAFAhtLS4oI9KBiUUYpMcUALRSYpaQB9aM0hpQKYBmk/KlxSfiKQBRRiloATNLmkOc0UDFz7UxVVl+ZVPzN1HuafTI/un/eb/0I0xNET2Vq/wB63iPvsFQNotgw/wBRj3DH/Gr1FUpyWzIdOD3Rjv4dtjnZLKv1wagbw4c/JcD8VrfFLVrEVF1MnhKL6HLyeH7xASpjf0Abn9arvpV8nW3Y/Tn+VdfS5rRYya3MngKT20OGkgli/wBbG6f7y4pgFd4cHggH61A9rbSD54ImPqVFarG90Yyy7tI4vFIRXWvo9i//ACy2/wC6xqtL4ft2/wBXK6n35FaxxlN7mMsvqLbU5vFGK2n8PTc+XMh9Mgiqr6NfR/8ALHd/usDWqr031MJYWrHeJn4oxU0ttNCf3kTr9RURFaKSexg4SW4hFNxT6Sncmw3FJin4oxTuKwzFGKfikxQIbilxS4pcUANIpMU7FBFADMUYp+KQimA3FGKUClxSuIZijFPxRincCOjGafikxzRcQzFGKdijFAhuKMU7FGKAG4pCKcRRQIaBRinUmKAGYpcU6jFMBm2jFPxSEUDGYo20+jBoFc6WTxr4bibDatEf9xHf+QqrP8QvDcIJS7lmPpHAw/8AQsV4wKO9fPn2vsonrZ+J+hDpb35P/XNP/iqhPxS00H5LC7I9yo/rXleBTgKVx+yient8U7LHyaZcE+8ij+lQt8U1/h0cn63P/wBjXm+KXoaLsfs49j0J/ilKfuaQi/705P8A7KKrt8TdSIwllaKffcf6iuGzS8EYpXY/Zw7HaH4maz2ttPH/AGzf/wCLpr/EjXGGBHZL/uxH+rGuNGBT6LsOSPY6d/iB4gY/LcxJ9IV/qKZ/wnfiMn/kIAfSCP8A+Jrm80oNK7Hyx7HQt428Rtx/aTD6Qxj/ANlqM+Ldfbrqk/4YH9KxM0uaV2Oy7GwfE+uN11W7/CQikPiPWj/zFr3/AL/t/jWTmlzRdlWRp/2/rJ/5i19/3/b/ABpp13V++q3v/f8Ab/Gs/NLmi7CyL41vVf8AoKXv/f8Ab/Gl/trVD11K7P8A22b/ABrPBpQaV2FkX/7a1Uf8xK7/AO/zf40o1zVh01O8H/bZv8aoUZFF2OyNH+3tYHTVb0f9t2/xpw8Qa1/0Fb3/AL/t/jWbRnFF2Fkay+I9aHI1S7/GUmnr4p1xTldTn/E5/nWOGpQaV2PlXY3l8ZeIFGBqLH6xIf5rUi+N/ECnm9VvrCn+Fc7S5ouxcsex1KeP9aXhvsz/AO9F/gRUw+Ier97exP8A2zf/AOKrkKcKOZi5IdjsY/iJqAP72ytWH+xuX+pq2vxHOBv0oe+Lj/7GuEyKOKOZh7OHY9BX4jWxHz6bMD/syg/0qVPiHpzH57S6Ue20/wBRXnPFHFHOxexh2PTo/H2iucFbtPdox/QmraeMdAfH+n7T6NC4/pXlFHFPnYvYQPYE8R6K4yup230LY/nVmPVtOkxt1C1Oen75f8a8WwKMUe0YvYR6M9zRlkGY2Dj1U5p2Mda8MjkkiOUdlP8AsnFWY9Tv4T+6vblP92Vh/Wn7QX1fzPacUYryODxRrlvnZqU5z/z0w/8A6EDVyPxzrsagGeGT3eEZ/TFNTQnQken4pF4H4n+ZrzyP4gamMeZbWjDvhWB/nV6H4iLwJ9N+rRzf0I/rT5kS6Mztu9HauWh8e6VIwEkVzD7lQR+hrQj8XaDL0vwp/wBuNx/SjmRDpyXQ2aPxqjFrmkzECPUrUk9jKAT+dX1xIgZGDKe6nNMlpoaaKdijFAhDSU40lACfjRzS0YoABRRRQAhqJ7eCT78MbfVRUtHSmm1sJpPcz5NGs5eiMhPdT/jVOXw8Cf3VwQPRlzW4KK0VepHZmMsNSlvE5iXQ7tPuBZB7HH86qSWVzF9+CQD/AHTXYntRW0cZNbnPPAU3s7HD4oxXZvbQSA+ZCjH3UGqkmi2b52qyH2b/ABraOMi9zmll0ujOXorcl8PnrFOD7Mv9aoyaVeR5/clh6qc1vGvCWzOaeEqx3RQxmlxUjxPEcOjKfQjFNFbJpnO4tDce1GKdRii4rDMUmKfjFGKAsMIoxTiKTFNEjcUU6kIpgNwKTFPpMUhDaMU7FLTEMIpKeaTFADcUU7FFADTSYp9JikMbikp+KQincQwilpaMUEs8woFIKXvXgH3Y7NKDTKcKQhwpaQUtIAoopR0oGKD60ozTRS0gHd6Wmc+tLmgB4NLmmfjS596VgHg07NR5pc0WHcfmjNNDe9KGpWC48GnCq88siQO0Sh5AOFJxms9dYi8tR9tg88H5o2XA/PPFUoN7DubGaM1FHKJEDDHTnBp26psA/NLmo93vS7qLBcfml3e9R7qXd70rDH7qXNR7qN1FguSZo3YqPd707cKLASBqM1Hu96XdRYLkmaX8ai3e9LupWGS5pQai3UBqLAS5pc1Hupd1Fh3H5ozTN1G6iw7kmaKYGpc+9ILj88UAim596QdOvc/zoAk4oxTPxpQT60rDHUqSSQtuikaNvVDg03dRmgRfh1zVoMeXqN0MeshP860bfxrrsDAtcpMB/DLGP5jB/WufzRVJsTjF7o7CH4h3yt+/sbd19ELJ/PNaEPxDtHH76wmjP+w4cf0rz/8AGlwKOeRLpQfQ9Pg8b6JKcPNLCf8AppEf6ZrTh1vSrjAi1G2Ynt5gB/I145jmlxT5yXQie4IySLuRldfVTkUteK295d2mfs1zNDnr5blf5Vfg8Ta5btlNSnPtIQ4/8ezVc6M3h30Z6170YrzqLx9qiACWG3l99pU/oa07b4hW7H/StPlT3ikDfocU+ZEOjNHZUGsW38XaJcYH2zymIziVCuPxxj9a0oL6zuuILuCUnskgNO6IcWt0TelFKVxxRimSJRmlpOfWgVgoowaOaAsNZVcFWUEehFVpdMtJRzCFPqvFWjmiqjNx2JlCMt0Y82gDrFOR7OM/qKpTaPdxDIUOP9g5rpqK2jiqi3OaWCpS2VjjJIZYj86Mv1GKZXaOiyKVdQynqCM1Vl0uzlHMQX3TiuiOMX2kcs8uf2WcrikxW9LoKnJimIPowzVGXSbyL/lnvHqhzW8cRCWzOSeDqx3Rn4oxUrxvGcOrKfQjFR1smc7g0NI4pMU7FGKdyXETFJin4oxTuTYZijFOxRii4WGYoxTse9G3mmFhmKXFKR70EUh2G0YpcUY96YhhBopxoxQKx5UDTs1CGHrS78d68Cx9yTDFOFQCUU7zR60rBYmHWlFQ+co7003kS9XGfY0WY7Fmioo51k6BqmJAUk8YHepCzEpcgUxmGOtM8z3phYmzSZqHzfc0eZmiwE+73o3e9Qb+epxS7x60WCxNuGKUPVfcT3pd9FgJ99LuqDdRvosFifd70yFRbRXcULNHHdqVnRTgSA8EEVHupd1VqthDoI47aIRxKQg4AyTUu+oN9HmUrDLAajfVYuTShjipsBY30u+q+6l3Uxk+/Bo3iod3FAb60WAn30u+oNx96Nx96LAWNwo3VBuNKGJpWGTBqcHqvk0uTRYCffS76g3GgPilYZY3Uu6oN/1o8z60coXJw1LuFQeZ9aPM+tKwFjeKXcKreZ9aXzKLAWS+Kgubs28G9V3HJzk4AHqabvpj7Jo2R1DIwwQehppdxjRqbrbCWQx7T0YKwQ+mG6fzq9HLvjViMEgEj0NZzWweyjsnmna0iYMkDTMUUjpgE1aVsCnNLoJXLO6nBqrb6cHqLDuT5pc1BvpQ1KwE2aUGog1AeiwE2aM1Hv8AelDUrBckzS5pgajNFgH5pabmjdQFx1NKgnkA0uaKQXLtvrGp2iqsF9cIo6L5hIH4dK17Lxtq1s3+kNHdJ6OgUj8RiucJparmYnFPc7q2+IMTuBc6e6LjlopA3P0IH861rfxhok7BTdNEx7SoQPzGRXl4ApQKamzN0ons8N1bXQBguYZQenlyBv5VNivE0Z4pA8bsjjkMpwRWlB4j1q3xs1Kc+0hDj/x7NVzmbodmeskUmK4C38f36DFxa282B1XKE/zH6VsWfjzTZyFuoZ7Zj3wHX8xz+lPmRm6UkdPRVS31jS7v/U39uxPRS4DfkeavbeM9jVENNDKDT8UmKBDKWlIpMUDGMiOu11DKexGapT6TaTAgIY27FeP0q/SE1UZyjszOVOMviRhS6FIOYpQ3+8MVSl0+4gHzxNj1HIrqcUoHFdEcVNb6nNPBU5baHGlfrSY+tdbJaW8xJkhQk98c1Rl0OJuYpGU+h5FdEcXF76HHPL5r4dTBIpMVfm0q5iPCeYPVKqNGyHDKQfQjFdMakZbM5J0ZR0kiKjFPxRiruZcozFJipMUhFAWGYoxT8Um2mKwzFIRUm2grQFjxbcaaXI71gP4lH8Fsfxf/AOtUD+I7hvuQRKP9rJrx1Skfb88TozIfU0hd/WuXbXL9j8rIvsEz/PNNGrak3SU/hGP8KpUWT7SJ0z727mo4YXE4JbIx0rmpLzUZOTLL+HH8qvaE10+qJ57yMu08MxNEqbSbuNTV9jtLSPgVPeKEtJWZgFCHJJwBS2qfKKTXF/4kt2B3iIri+0VzHNjVbaDj7XGR6Bs07+3rHk/aV/I1y32L1f8ASlFiP7/6V3+yiZuUux03/CQ6eOsz/ghpp8S6eP4pW+if/XrnRYp3Y04WcQ65/On7OAryN4+KLEdEnP8AwEf40xvFMAPyW8h+pArG+yw/3T+dL9nhCn5P1NHs4CvI1f8AhK17Wjf9/P8A61J/wlef+XM/9/f/AK1c0DU9qqvcoGGR6fhVeyh2FzM3v+ErYf8ALmP+/v8A9aj/AISuTtZp+LmqYji/55L+QpQkY6Iv5VPLDsVqTv4ouz923iH5n+tRHxJqR6eWv0SlG0dAB+FLuHpRaPYdn3Iz4h1Q/wAaf9+xR/bmrHpJ+UY/wqTcPSkzTSj2Cz7jDq2rn/lq4+iD/Ck/tbV/+e0v/fA/wqTPtRmnZdhW8xn9p6uf+W83/fP/ANak+36v/wA/E/51KGoBpadgt5kP27Vv+fmf/vql+36sP+Xqf/vqps0Zo07BbzIf7Q1b/n6n/wC+qP7Q1b/n6n/76qXNGaenYLFc6vqqPt+2TbvTNSDVtaHS4m/FR/hVWbH2z8RV8UNJdBLUjGsa0P8AlvKf+AD/AAp39t6yOsz/AIxj/CpM0bjS93sOw0eINYX/AJag/WIf4U4eI9VHJaP/AL9CjNGaLR7BZ9yVPFl8vDwwv+BH9amHi6X+KxXPtIf8KplUP8IP4UnlR/8APNP++RScYdh69y+PFx/isfyl/wDrU8eL4+9m4+kmf6VlmCE9Y1/Dimm1g/ufqaXs4dg1NxPFtiR88Nwp9lU/1qVfFOnE8+cv1T/A1z32SD+4f++jSGyhP94fQ0vZUx3kdQPE2lngTMPrGami1rTZMAXsI4/iO3+dcAyhZio7EimsOlDw8Re0Z6ZHcwTDMU8Tj/ZcGpQx+tebx2LPGrhhyKlVL63/ANTPKo/2JCKh0F0ZXO+x6JvpQ9cEmqaxCu0XEpH+0ob9SKeviLVoz8zq/s0YH8sUvYS6MXOd2GpweuMi8XXaj97aRN7qSv8AjVuPxhD/AMtLORfUq4P9BUOhPsPnR1Qel31gweKNMmOGeSH/AK6J/hmr0eq2Ev8Aq7yA+mXAP5Gs3Tkt0PmTNAPTw9VVkD/dIb6HNODGpsMtB6XfVcNijd70rBcsB6dvqtu96cHosBZD5pwb3qsr08NSsBPu96A2Rmod2aUNilYLk4NLuqIPShsmiwEoNBPNMBpc0gHUcd6SlHWmAYFWrPUb7Tyfsl3NCCckI5AP4dKrGlFAM6Sy8capbv8A6T5d0nowCn8wK3bTx7Yy5F1azQHsUIcf0P6V59S1Sk0ZunF9D1q017Sr4gQX0O48BHO1vyNaGM9ORXiu2r1lrOpadxa3kqL/AHSdy/keKfOQ6PZnrZWmkV5/aeOtTgwtxFDcr3JGxv04/St2x8babdOEuY5LUn+JvmX8x/hVKSZm6ckdHiiobe+s7xd1tdQyj/YcGp9pqiLCUlOpvekApqN4Y5VKugYHsafRTTsJq+5nTaRBJyhMZ9uRVGbSJ48lMOPbrW9iito4ipHqc88LTn0OUkheI4dGU+4pmK6x40kG10DD0IqnJpVs4JQFD7GuqGLT+JHHPANfCzn8UYrSm0maMZQiQe3Bqi8TxnDqyn3GK6I1Yy2ZyzoTh8SI8UhFPINJitLmLifMCxRj+EU8Kg6KPyqMNkUu73rh1PsNCXj0oyKi3UbqQyQmtHRBu1Jf901lbq1/DmG1UD0Q1M/hYrnd2cfyiovECbdFuT/sVoWcXyD6VV8TDboN1/1zNcNveQKR5nmlyfSoCxBpRIDXfYLk26kLVCT3o3U7Bcl3Uhb5T9Ki3UFuD9KdiSitWLU4ul/H+VRIKliG24X6/wBKt7ELc0N9G6ot1JurOxoTbqUNUG6l3UWAm30b/eod1G+iwE2+l31Buo3UWC5Nv96XfUG40bqLBcn8yl31X3H1p2/iiwE2+gPUO+jdRYCGU/6WD7ir26qL8zZ9xVjdTaJTJ91G6qrfMQdxGPSlAKHdubB9ehpWKuWd1LuqDfRuosFybdzTt1QB+aN3vSsMn3Ck3Cot3vRuosBLuFLuqHdShqLAUJObhv8AeNNYdPrUm3dM3+8aJEwB9a0MzRt+IE+lP3VDCcQp9Kdmsmaku7igtUefejNIBxVG+8in8KY1vA38A/Cl3UuaYiJrGEj5dw/WojYej/0q2WpU+dgq9ScCndhZFT+zbuOISoSFJ4IfFTRX+sWv3bifHoTuH65rSvZAm22ThYwAfc1U3VKk3ugcUOi8ValCw81Y5FHUFcE/iK0Y/GNu2PNtZU/3WDf4VlkBxhgCPeo2tYG6p+RxScYPdCszpofE2mSgZmaMns6n+nFaFve21z/qLiKT2RwTXCPYRH7pYfXmoWsZByrAn64qXRg9mNNnpQbmnh/rXnMGoatZ/cnmCjjDfMP1zV6DxZfxECZIpV78bT+n+FZvDy6D5kdzvpfMrlIvGNu2BLayoe+xg3+Fa1rrVhdKvl3KKx/gc7SPzrN05R3Q00zXD04Pk1WByM9qerVFgLO+nBhVfdTg9JoCwDUFxqEdsVBUsSeTkAD6k0oeqs+mWd1dR3M0StNGMKTyPxB4P4iiKV9RNltdRhknWNFLq3R1IK1aBFZllp1rYSSPbxhXlOWPQfgBwPwFXg9ErX0AmpajDZpwNSA+lpuaXNABiijIpc0AIMg5HBFatn4j1ayAWO8dkH8Mp3j9elZYpe9MnRna2fjtPLVb2zbeOC8J4P4Hp+dbdr4l0e7IC3axsf4ZRs/U8V5gKXFVzMhwiz2NSrqGUhlPQg5BpcV5NZ6lfaeQbS5kiGc7QflP4Hit2z8b30LYu4Y7hMclfkbP8v0qudEOmzuxRWJYeLdMviVd2tnA6TYAP0PStpGSVA8bBlPQqcimjNprcMUYpSKQimISkZFcEMoI9CKcKKAKE+lW8mSgMbe3T8qoy6TOnK7XHt1rcpe9bQxE49TCphqc+h8U7qN1R5o3e9dVj0OYl3Ubqi3e9KGosFyXNb3hIb9ax/0zP8xXO7q6TwUN2tn/AK5H+YqJL3WKT0PULSL92KzPFa7dAvD/ANM/6ity1XEYrH8Xj/inbz/rn/UVx21RFOWp5CzZpm6kY88Uw13pGrJA9O3ZqDNKGosK5Juo3cGo80ZpiJIkzmnAbZ/8+lPt1+99aHGJj9f6U+hK3HE0maQ0VBoLmlzTcDNLigLhmlzRtpQhJwBmgLiZoqUW8zfdic/RSaeLK6PS2m/74NFmLmRXpasjTr09LWb/AL4NO/sq/P8Ay5z/APfBp2YuZdypRV0aRqBP/HpL+IxT10TUGP8Ax7MPqRRZhzx7lDNGa0x4f1AjPkqPq4pf+Ee1DH+rX/vsU+Vi9pHuZbKcZ9x/WjmrRtJRP9mIHm71TGe/NXB4evj/AAIP+BinysXPFbsyc4o71rf8I5ff3Y/++xR/wjt9/wBMv++6XKx+0j3MrNLnNah8O3+OPK/77ph0C/X/AJZo30cUuVh7SPczgSDS7jVw6Rfqf+PVz9OaadMvh/y6Tf8AfJo5WNTj3KuTS5NTmwvF62sw/wCAGm/ZLn/n3l/74NKzDmXci3UobkUphkXrGw+opoU5osVcZEuZj9TT7lNsa/Wlt1zKfxqS8H7tOP4v6VdjO42M/u1+lO3UxBhF+lFZtGtx+6jdTDRnilYCTNG6osmjNFguTbqvaaFBkuJPuRL+tZgPNXpj5FlHD0Z/3j/0qJLoUiKSQu7Mx5JyaQNxUW6jcc1SQmyYGlzUW7mnbqTQEmacKh3ZpwakMlzUM4VoXyASAacGzTZf9S/+6aEDMpaU8EUkYp7CtjJbFmE3tuA8LyJkZBRv8K07XxVfwECdUmUdcjDH8RTYP+PaL/cH8qHhSX76A1jKz3Roo9jct/Fmny4EvmQHvkbh+Y/wrWt7+1u8/Z7iOQjqFbn8q4V9PibOMiq7afKnMZzjpg4NZulB7B7x6ZmlVq8+t/EGrWXytJ5i4wFmXP69a1bTxmpAF5alT/eiOR+R/wAazdCS2C6OvDU7dWZaarZXqgw3KEn+AnDflV7cRWTi1uBOGpwaq4fmnhqmwyffTg1V91ODUWEWAacDUAenB6LAT5paiDU8NxSJHilFN3Zp1AC0UlLQFxRUsM01u2+CaSJvVGKn9KizTs+1AHQ2PjLULZgt0FuowMc/K35gfzFdDaeL9KuRiSR7dsdJF4/MV5517Uu3IpqTJcUz1yCeG5jEkEqSoejIwIp5FeS21xPZy+bbSvE+MFlOM1v2vjPUIdqzxwzqOpIKsfxHH6VSkQ6fY7rFFY1h4p029bY7NbvjpLgA/j0raUrIoZGDKehByKozaaPiGjNGB60nA716ZqLRSbhRvAHA5oFckHSup8CjdrrD/pif5iuULkEjA/A5rq/h9l/EEntbsf8Ax5aiWwpS0PXLZMRjisXxiMeG73/c/qK37df3Y+lYPjQ48MX3+5/UVy21RnTep4ue/IphZR3FNfrUZ6HPWuw2cidFLhmUZCjJIHSpUgLmIKykydMducc1UeVmjSPChUGOB15zk1dsruXR74yCNWlUbSG7Z/rVIhyfQZdwy2c7xSoQy9+gNOtpbU8To2cNyGwM4+X9abfalcX0sjyNhXP3R0A7Cqi/eH1o0C7e5q24HzDHerulWUd/rCW8udrbicHHRc/0qvbJzJ/vVo+H+PEcJH/TT/0E00hSdk2bv/CO6an3rct9Xb/GpU0nTk6WcX/Ahn+dbHyzDbjDenrVdk2Ng1aSOR1Jdyn/AGbY/wDPnb/9+x/hTlsLNelpAP8AtmP8KsUVVkRzy7jUjjThI1X2UYqeOUr8rZMZ6rn9R71FmjNFkHMxZVaPBDb0PRvX/Cmj5hxTkk2gqy7kPVc/r9aZInlEMp3I3Q/0PvSKbuL0pc01TmlxTJA0lKaSmAZpaTvS4pDOUcf8VD/29p/M11QzXMOMeIB/19p/M11JpI0qdBh5pMU40lVczExQBS/hQTkk4HXtQAdOlFFApAFA5opRQAoJ7Gn5zjPP1qMU8dRRYdziLhQNZuwOglf+ZplyPlj/AN7+lTXHOtXZHTzX/maZdrhY/wDf/pWR2LY6y1hilsLbfFG2Yk+8oPYUNYWZP/HrD/3wKfZf8g+2/wCuKfyFSmtLI5XJplF9JsZOtso/3SR/Kqz6BZNnb5i/Rv8AGtY00jNLlTGqkl1MJ/DJO4w3YPorp/XP9KrN4cvV6PC30Y/1Feg+HJ9HtZ531bawKhY0e3Min1PDDB/xro21LwaYyRb2ZbHANnKM/wDjxrGbUXblZtGpJ63PG4NCvVnUvCDGp3MQwPA56ZzVa7trx7h5Ht5BuPA2ngV27gBHmCbBMSETP3Vzn/634GoPeqVJPUHiZLQ4Mgg4I5o6V3UsUc67ZY1cf7QzVOXRrCUH9zsPqjEUOl2KWJXVHJZpc10L+G4SfkuJF/3gD/hVSXw9cKf3UqOPf5al02aKvB9TJDUuanm067gYh4HwP4lXI/MVXKlTyMVm423NVJPYfmhzmJx/smmUN/q2HtRYdynCuafKuAKdbrz+FSTrgL9a0sQti7EcQRj/AGB/Kn5qGNsRJz/CKcDWDWpqSdQRntSr8oxkn6nNMDU8GlYYpAYYYAj0NQyWcL/w7fpU2aKLtCM2TTnXPlsCPTOKmh1XVrEjFxKVH8LncP1q5SEZ7Zp819xcvY0bLxhGQEvYWRu7pyPyrftNWsL3/UXSM3908H8jXDyWkTj7u0+oqpLZGNSyMCBzjpUOnB+Qao9QyaVWrzix8R6jZbVWXzIl/gkGRj69a37PxlbyHbdW7RH+8h3D/H+dZyoSQrpnVBqcGqpb3dvdxh7eZJFP908/iO1TZxWLVgLAb3qRXqtupwaiwiyGp4aqwfmnh6VgLAYU7IqANUgakIkpRTFNSCkAuBS0etOxQAgpcUUUgCp7e8urQk21xLFk5OxyM/WocUtMD56kieJtsiMpwDgjseRTM4BFOkleQAMeFzgemajr2DAKUCkpwoAUV2Pw4GfEU3/Xs3/oS1xwrtPhqM+Ibj/r1b/0JamWwS2PYIF/d1zvjcf8Uxff7g/mK6WEfu/wrmvHRK+Fb3H90f8AoQrne6MqT1PEnB64NRnpWlaaiLOG6TyEkaeIx7yeUBHOKoRRPPIsUYy7cAetdRuyOinyKF2jBDY+bPr/AJxTKYgpyfeH1ptOT74+tAG3bj5pPrWhoA/4qGL6Sf8AoJqlbj5pP96tHw+P+KiX/df+Rqo7kVPhZ17DNSBw42yDns3+NIRzTCK1aucN7Ehtz25pv2dj6imYpMVNmO6Jfs596T7M2e9R7RRgUWY7of8AZzTljKcEZU9VPQ1FSYoswuiTyhEQ33o+57j61Lsi/vR/mKgjcxk8BlYYZT0IqQRxh1kKs0JIDAHBHtn+tS7opNMGSMDO5SO+0jNRvFsPYg8gg9a6VbTwe0QLX+oq2MkeUpwfyrCmFtFdyQQytLbZ+SVk2n64qVK5TViuqKfT86eI19qf5Ef/AD2j/wC+qBbx5/1kf/fQqrhY4+TjxAB6XafzNdPXMuufEarkY+2rznj7xrsGtk/57Rf99ihOxU1exUIpMVb+zp/z2i/77FI9vGIyRNGSOQAw5p8xnylTFFKRSYqiQoFO3EIU4wSD0+v+NNoGFKKKKBB3p46iminDqKYzi251W6/66v8A+hGm3f3I/wDf/pT8f8TO5/66P/M0l4MJH/v/ANDWR2o62y/5B9t/1xT+QqTFMsh/xLrX/rin8hUhFaHHLcb+FFLijFAjUg1LT0hSOfRLaYqoBfzZFJ9+GqhIEubs/Z4VhR2wkYYkL+J5qPFWYcQwyTYBYjYmexPU/gP51Fraoq5DdsGnZVbdGnyIfUDgGoKcRSYq1oiW9QxRS4ooEJS0UCgAzTJIYphiWNH/AN5QakpKNx3sUZdFspuREYz6ocfpWbdeH3WN2hmVgAThxiuiFNm/1En+6f5VLhFmsas11OBthnt/DT7gfKn1othz/wABqS6H7tfr/SszrT0Bf9Wv0FLXR2+gWt1pltKGeOR4UYkcgkgHpVW48OXMXMLrMuP90/l/9es3TY414PS5jg04OQaJIZIXKSRsjDswxTKixtcmDg04NVfNKGNS0BZoqJZegqVSDUlCFajmX9zJ/umrGKjmH7iT/dP8qAMnT0V51DAEYPB+lXZrCNxlPkNVdOH+kD6GtbFaSbTIiroyPJubVw6FlKnIZD0rTsvFF/auBNJ56dxJ1/Pr/OnkVDLaxS8suD6ipbT+JD5Ox01n4o0+6AEjmBz2ccfmOK2I5UdA6OGU9CpyDXmkmmuuTGwb2PFNtr690yYtC7Rk8EY4P4dKh0U/hYtVuenh/eniT3rjLTxgSFW6twT/ABOhx+h/xrdttUt7tN0Mgb26EfhWMqclug3NkS8df1qRZR61jm6x3pPthHSs+ULG6sq+oqZZk7sPzrl5NS2nHU+1RG8nm4B2j260OI+U7JHVjgMCR2BqcDNYHh+Hif1+XJ/Ot0bl75HvWT0YrIo3+pR2eVVd8vYE7R+Z/pTbPUGuCvyxyA43GJidh7g5Azj1FaOULZaMbh0OM1IuwDCAAe1CkhWG7aXbT6Xbmi4rHzveQeTINuNrDIHpVatXUlPlRnJwGPH1/wD1VnKwR1YDOOcNzmvYhLmjcmtDkm0N2kYyCPSipZ5vPZTyMDAUnIUeg9qjAqjMceeQAAewrtfhqP8Aiop/+vRv/Q0rjGTY23IJHociu1+Ga58QXB9LVv8A0NKmWwp/Cz2CIfIPpXMeOxnwte/Qf+hCupiH7v8ACuW8eHHhW89wB/48Kwe6MaW54pIgVVbepLDOB2+tRhmXocfSnkZ54HNXLqw+wWqPO0LvOm6NY5QWj5HJA9Rmuk6CpcTRyiIJEI9qAHnO4+tQ9sYoABzlguBnnvUyBp0IATESFuTgkZ/XrTERMhU4OOPQ5oj++v1FSAIBl13ZyMA4xU1vatJGJVwx3Y2Lywxjkj0xmhBc1rcff+taPhznX8+ivVC2+6+fWtHw2M642OyP/OqW5FT4Wdg1MNSGmkVqcAzFGKfiqct9DFqMNif9bKCQew4P+FK40m9iwRSYqTFG2mIZjijFP20EUAMxT4yUbI/HI60KKo6veS2FmbiMD5CCcng+xqWXFNuw/UtQgsJLfP3ZXIIPGz8fyp0U8V0vmRSK6eoNchrus2+qx2zRKyOgYOrdBnHQ1L4WvCl81ux+WReB7j/JqFLWxu6XuX6nXbaUCn7aAK0MDlH/AOQ8P+vlf5muoxXLt/yH1/6+l/ma6k9aEaVOgzFGKcaMUGY3FJinYoxQAwikxUmKQigBtHpS4B5FLigBMU9aKUDFAHF4zqdz/wBdH/maS8+5H/v/ANDT1/5CFwf9t/8A0KmXmcRf7/8AQ1kdvQ66z/5B9t/1yT+QqQjmmWgxp9t/1yT+QqQ1qcb3GnrSUp5oANAkKBk4AyTU91iNlgGP3QwxHdj1/wAPwotfkdpj/wAshuH16D9SKgOaXUb2GmjFLijbTJEoxTiKMUANxRinAfzpdtAxoFNYqhXcwG44GT1NNN3BHceTI+184G7gHjPWsfVb5WleDeuwMCpHVTgZqJTSRrCk5M0b28FttKEPtyZEGMgY606O7iubNnDKpKnKlhmua81pNrONwGRu5z+dIsmwmRGVcrwuOvrWXtnc3+rqxTthz+FPuVyi/U0lqvT/AHf61LcjCqe2T/KqNDs9OkRdIsyzqP3CdT/sirakPGHUjaRmuat5QLaFD2jA/Sri3hWExADB79wKXtTF0OpYv7mPhGCSL0YMoIrGawt53ZR+6PY54/KrM6qT95XGB839KjVwGBPJ9azcrs2hHlWhQn0a7iXcieanUMnP6daoMjKcMCD6EYrrG1HMZCpg8fhUcj299KIp4lIA69CD7Gm1HoNTmviRy1KGI6Gti90WNMvbTgrnAVv8aypYJIX2uu0/zqGrGsZJ7CrcEdelPeVHgkAPJU8H6VX2ijZwaXKXch01czA+qmtcrWZpg/er9DWrSnuENhmKQgU8kCo2kVRyQKgoMVHIqMuHCke9RSXYHCjntnvURZ25JJqkmIhns4W/1Z2n9Kpss1udwJGOjA1o4qG6GIGq03sQ0SWWtXomiidxIjMF+ccjn1reWV3+834CuRtP+PyD/rov8666JazqxSY4PQmiT2q5ElQxLVyNawsU2dD4cjBW44/u/wBa2jF7VmeGUyLn/gP9a3zHWU46mDlqZ5iphjrQaKmGKs3EamUsOO9KJCOoP4VZMWaY0WKmw+ZM8Gv1zanpgMKy5ImjfY6kNwcfUZravJ3sRDLER5ivkZAPT2NZMkzzMrSNkgY6ds5r16PwmuK/iBcW4gKASxyErk7Odvtmoe9dP9nsx4TllRP9ICpuYrjOX4xn6Gsi7t4YbCweMHzZY2eQ5/22A/QVs1Y5FK5FcQxJDbvEwYsn7z5s4bJ4x24xXZfDFc61dH/p3I/8eWuGr0D4at5ut3jhFT/RwMKMDqP8KiWwqnws9WT7n4Vy3j3aPDFzvzt3LnHXG4V1SfdrkviB/wAird/Vf/QhWL3RhS3PFXxuOOnamHnmpCON3HXHWmVujrsNxxSqSpyKcQoXBB359eMYppA4wfrxTJZotd/2nqKz6hIsasu0ukQ4444FdDD4Tktbh5jcRS2qRlg8bfe+U9vTNYFhol9fxl7eHcoI5JA69/pXXaNok2mWF5LcMdzRuoUHjAzzj14q4q5jOVtmYUAwG+uK0fDA/wCJ3IfRH/mKzoM4f61qeFhnW5v+ub/+hChbjn8LOuI5pMU8jmkx3rQ4TO1W+FjaFtwDN8qk+tcPc3091f8A2iR8OTjI4wMVt+ItQjun+zIcNGcgk9DyCCK5jdiQeuawlK7PQo07R1PTYJkmVQrqW2gsoPTIqXFcjoGqR2STecSSVL8+2AB+OTWzoF5LeWsjSkHEhC/z/rWqlc5Z0nG7NWsnX3u4bAyWrABfvYB3fhitikYAqQwBBGCPWmyIuzucfo3id1nWG/fdEf8AloRyv19RV7xJMsMDNBqTbZBgxYDgAj8xXL6wunx3LLYrOhVirpIBgH2Oaqw3kkUckWFaKT76kdfx7Vk30O1U03zIgPHTpViyupLS5SaIgOvIzVZsbjjOO2aAccUkzW11Y9G1fUxp+nK24GZgBgH25/nU2lXYnsLVpZB5siZAJ5bnGcV55c3kl04aRs4AAHYYAH9KW2u5Le4imQgtGcru5AqufUwdBctjo2H/ABPkPrdD+ZrqiK5GzZptQsXc5Z5lY/XrXYd6tGFXdDMUlPIopmZGRRinkUgoATFGKd+FH4UAMPWlxTutFFwGjrTsc0YFLimgOKT/AI/Z/wDeb+dNvekX+/8A0pYebuY+pb+dF6OIv9/+lZnb0OutB/oFt/1yT+QqUim2o/0OAf8ATNf5VIQau5xvcfa3EtnOs8JUSLkAsgYc8dCCKlvdQnvlQTLANp48uBIz+O0DNV8cdaTbzU6XuCJXIjtkjxhn+Zj7dh/M/jVfFP20baaYMbigCn4o207isNxRinbaMUrjsNxWZq7wmII1wYpF5GMjP4itbHrWfqV3awlFnjLZBIZRnHalJqxdNPmOXeWTILN5h7YbJxS+Yybnl5GenekdjI5eEBFJwq4xjHemIXkcfNwx6LwQK5GegkSFiVHl4OB1PGPwpWw0WT5ZI6ELjimsy+X06nHy0ZbYVzuKgjp0FJDZDa/wf7v9amn+4v1P8qjtR8qH/ZP86mnHCfU/yrp6GQ+G6IAVgGCpwR1wKuQzxyABHBOM47iqLiKNVTGJGUZ5yDQxGG3FMlcHP8Nc7eptY0iT2bPtTS2OSBWc128GU8xWKjOCP60sepqVBkUrz25oQWLxc888YpA5HfnFQrPFNzG4z6U/nnnNMLEwlwOv1qQSJJGUfDc9DVTdjPP6UqvkdOKBWJBp9vKX+dYmLYUDmq0mnSxMQAJB6pzVjdgD0qaOUqvynnt7UXDVHP2ZEZB6cYq0bgAE5PX0pmnwrPchH3bST93rVufRZsZjkRv9npVOF9RKajoyhJckn5cVXd2IJJzT5reWBtsiFTUZHyN9KOWxV7j4eUzgZzUwXFMtVzCfrVkJWctzREW2obsf6M5q5sqter/okn0/rQnqDWhnWYze2/8A10X+ddjEtcnZL/plt/10X+ddzBZStyQFHqxxRWZnDYWNDgVZjXmpIreGP/WSZPooq5Fc20GSijPqeTXM5DaNnw0BGlwXIUNtxu4z1reMkX/PRP8AvoVxh1QetJ/ag9ah3Zm6d3c7LfH/AM9E/wC+hRmP++v51xn9qL60f2mvrSsxezOyIQ9x+dNKL6j864t9aRJFTZIxY9VXgfU1J/aq+v6UuVlch5Jqk4mutqHKR8D3PeqI4p2KbXqRXKrBOTlJyZMbmVofKMr+WP4cnFR7icAkkDpTe1KKZJIBXoPwuTOo3xHXylGPxrgApGM9xmvaP2f7JJtV1i6cZMEUSqD6sW5/8dqJuyJmrxZ2Rs7mOHzHglVMfeKECuL8fc+Fbv6r/wChCvd7nBiIPIxXiPxOiWDRL6NBhdyED0+YVgndmVONpHh7DFDRBYVfIyRnAOfz9KV+tMNdSOhkkkUflGWORcFsCMnLAep4xUcSgyLubaM8nGcVNJHHFBFJHOryODvUA/L7c+1N83MKxBEGDuL45P1pkbnpeiXVrLp8TRzRnIAxwpHsR61oah/yDbn/AK5N/KuP8H2CNNJPMjb48bQy8YPeui127MGmuAwDP8mD3zWqehxOHv2RyEH8f1/pWn4VH/E6l943/wDQhWZCRl+v3v6VCLp7SXzIyQ24j9ai51OPMmj0l9qKWYgKBkk9qp6jdRQ6XJNvG10+Qg9cjjFRSTQ6toBJl27xt3dAGH/1645tTmFodOmI2xudpHbrxn0pylZGFOjd+hmyszylt2SSST60xSBKCxxj2pCcMQfSkJyQSOeuaxO+xIzk4Y9TwK6Pwom68LGYfKpxGBn6nPauTMnzcH860rDVZtPiYW5IeQEEn+lVHRmdSN42R6WOtQ3ySG0cxT+Q6jcHbG3j14PFcff+JpXtYUgcpKo/eB1wc+tZ6+JdRTyNsv8Aqk2EHkOM9xWjmcsaEr3KWoo7TNMTA5PLPA2QeeuO1UM+tTyvHICwUq2eg6VBxWZ2RjZC0UCjvQULSg0hpB1oE0dVpv8Ax/ab670/lXaMmCSPyrjNPwL/AE3P/PSP+VdszpIDhgcHGRz+Fao4ay1IjSYp4p4XNFzEgNAGaseWfw+tKI29P1ouFivikq15bf3ahkXDZ9aLjI6XtRjNGKYhBwafjNM5z0p4oQHDwDE7++f50Xn/ACx/3/6GlhH75j9f50XIz5Xs39Kk7Tsrcf6LD/uL/KpMVJZW0k8UUca5OwfyrXi8OXhVXcKinoTnBrGviqVFXqSsZUsPUqv3FcxcUYrp4vDUZxvlYn2q5F4XtiM7JH/GvNeeYb7N38jtWV1vtWXzOMxk0u2u6Xw3bIM/ZXP1U1J/Y9sn/Lqv/fNZvO49Kci1ld95o4LbRt9q7v8As21H/Luv/fNKNMtz0tV/74qf7c/6dP8Ar5Ff2Uv+fi/r5nBbaiuY5zARbsqv6tXoZ02FRzaLj/rnR/ZluRk2KEf9c6Tz1dab/r5FLKrP41/XzPE5ri8MjKZ8sDyQeDVYmVnGJACT36GvbW0fSSTusEz9SP61Xl8M6DP9/Tl/Bm/xrP8AtinfWLR0LANLRniwcybVLAdQOMZpSHOSGXA444IxXr7+CfDjf8uci/SU1Wl8A6C5ypuoz/suD/MVazag+4fUZnkcshMmxhlccYHeiNPLQqWyx644/CvUW+GuksSyahco56EoCBVab4ZyFT9m1SCQnHEkZX+RNbwzHDy+0ZSwlRdDzm3HyRj/AGf61LLyF+p/lXYf8Kz1uLaIpLSUKMfLLjPPuKq3XgPxFHgiwDYz92VP8a7o4ujJaSRg8PNdDk5CpTAbBAGT1qt9oclk3Ag8ZxWpe6Nq9lE6z2FxGc8tsJGPqKyljKybmUhh0B4oUovVMpwkt0WHZVQgYywyT1FVHRW+593+IelWiCilsncVxkDiq2wo6nI9TxTTRNhEt5POzn5fXOOKuG6aFMo/AOPn5prxuwLueNvY0ySFVT5WII5ye34UXuFi3HfRuo3kpnjmpw2eUYY9RWSQphIJGWbAzxinRxhFKLvZieccYoEaoLf3gf1pSzEEEqB64qi8nloIwxJP3sn+tRq0GCGL7+2KAJ9LkEVwrtnHPQZroTOhXIP4YrlbYlVGCR9K1EuY1iGS39a3TMJRvqaDyxsMFciqM1lbTpIyoUIGTg9eRT9wK7h0NOjYbJf93+orRx0M1o9CrDp5SM7XBGeh4pTA6dUNXInAQ5OOaUzr7muacdTqjPQo+Wx/hb8qqX6FbSXIxgf1rV85mIAGB+dUdRGba4/z3qLWaL5roxrZzHJDIOqsCPzrqpruWK1Fwb22cEj92kylxkenWuTUfulPpVY3DVc4czIjJROp/tiQ9G/Oj+1XP8Vcr9pNL9pNT7JD50dR/arf3qT+1G/vVy/2k0faTR7JBzo6j+1H/vUv9pt/erlvtJ9aX7UfWj2SFzo6f+0z/eNH9qN/eNcv9qPrS/aGo9kh86IB2x1pCpBIIII7GrAtZAc5XP1pzW0juWO3JOeK3MisFz065pQOas/ZX9B+dAtZM9BSAjQV7v8AAKR5rnxFLIQWZbckgAd5OwrxJLVx1H617f8AAJCj6/nutv8AzkrKpsOWx7Fc/wCrP0rxX4pn/iWXg/3P/QhXtVyfkP0rxT4n4eyu1/3P/QhWC3RjHc8RkHJqIirr2r5OAMfWojaSe3511o3KtOQ7WB61P9kk/wBn86kFm+3tn61RJt+HtSvmnEKSbkJGd7dB7UuuX73OqNEsrNCjjCnscYNV9Elk06Z3Yrg4O3aDk/XtVfyZXuN7YJLZJz1p3IUVzXLUX8f+9/SqN30/4Ef51fUBck8bm4/KqN5xt+p/nQ2Ulqa9nqBXw5cWqsRJG+4496wJJC7ZZuR7dauW06qs0ZPEqY6dD2qhIhD471F7lxjZsPMOzJPOfSgrhd2eewpqAE4Y80+U5xjPTpmkWVuuOTU8jtM4baFG0AKowAAKYiqWO7OMdvWrMMBYktnYgy59P89KolkE00lxJvldmYADJOSahNXr2XzHBW2WKMDCgD+vc1ToBCUmeaO9KAKADNLijFLSGGKTFSIMuo9TU0y8jFFwaNgxvILOOM7XZkVTnGD0612CyWel20cDyqnHAIOWPc1x12r/AGa3KjnIx9cVatdNVLoPqlzsAG/aDuLY7Z7VdzmnDmOyjIkjV1PysAQfapUz0P4Vjw+JLCWWdVykMSKUJXr1z/SoT4rtfsssgRvND7UQ/wAXoSaLnP7KXY6LB9KMf7IqnZ3UlxaxyFslhnp/9YVP5j+v6U7Gb0JlI/u0y4xheKarNnkn16USuWC5NCAhopcUYNMQmPelA5oqvcX0NuvXc3YChsaTexydpC0kpCgk4qzLFGMAqZJFOQA2F/PvWlb6dc3p2wxCG3/vngf4k12Wh+DYoVSe6LZPIyBvb8P4R+tc1bEU6MeabsjuhCU5csVdnK6Z4e1HX5le4j8uFByxU4A9BuyR+FegWljo/hPSJXM/kWmAzyTNjew7Knc/QE1D4j8Wad4WgMO0S3uP3doh4XPdj2+nU15XrOqah4ovzeagOnEdvFnZGvoB/Pua4lGtjVyzXLB9Or9eyOn3KDutZfgjoNW+JOqX7yW+gQiztweJyP3pHuTwv0HPvXO3Oo+JL4lrjXrls/wtdSEflUASdVCrbuAOwQ0uy5P/ACwk/wC+DXoUsNClHlgrIxlV5neTuVWsrx23PeKWPfk05bK5Uf8AH6R9Mj+tWPLuv+eMn/fJpDHcj/ljL/3ya05GLnRH5F4P+YhIPxP+NN8i9z/yEGP1ZqeVuM/6iX/vg0hEo6xsPqpo5GPnQJ/akZymouv+7Iwqymo6/GP3et3a/S6kFUy7DrSeaaPZD5zUTxD4qTga7d497lj/ADp//CU+LgQf7Vkkx/fKN/6EKyDMfSmmc0nRi90NTZ0MXj/xXAwErJOB2NvGR+aqKtn4p60G/eaVp+B2Ecq/+z1yfnmk88+prKWBoy3ivuLVecdmd1Y/FWMsRqOmke8Eo4/Bv8a14viP4fmxuN1Dn+/GDj/vkmvLTNkYPIPY0wrA/wB6JPwGP5VyzybCy+zY1jjqq6nttl4l0W/GbfVLZj/dd9h/JsVqrMwAKSHB6YPFeH+H9Ej1PVFgSUxKiSTMxOQAiliPxxj8a9xcgW9sgABWFQQBjHf+teDmuX08JBTpyZ34TFyrT5JIX7TKP4s/UVWubaxvR/pen2kxPUtEM/nT6SvFhiqsdpHounF7oybjwl4dueVsmtnxjdA5GPwPH6VhX/w1tZIiNO1FgxPK3IGPzArsjTa66ea14dbmcsLTl0PMNR8C+IrYF4rdblFGAbZwxP8AwHr+lctcQywO8VwHWUcEOMEfhXvKyMhBViCPSuU16ytv7Rme7QNHMd0bmMMAO49sGvfy7H/Wrxa1R5mMoKglLozypI2VwVcsPpUzzBIwuTgEDArrJPDVrcljZzlfYHI/I4NYl14b1GJm2wiUKM4Q4P5HBr1L9zhUk9jGkHnOVUtnOST3qQtGqHO7IOMU8WzBz5gcY6hh0/CmyhHLYJViemOtUgsNjYJGpOce1SJLhhk4BOcdQajU4iHAPtQ0bNjavOegP9K1urk20NCO4Mqk7h+FBZvU1FbqRCMjB6HNS4roi7o55KzJYf8AVn61KBTYB+6P1qZRxWMtzWOwgXLD61W1FP8ARrj6GroHzD61DqKj7LcfQ1jLdGkdjmgP3C1mmtYD9wPrWSa1IY2iiimSIelJQelJQIWkNGaKYgqUGoqkFJlRPdR4d0n/AKB1t/36FOHhvR886dbf9+xWmKcK4uZnPzMzP+Eb0f8A6B1v/wB8Cl/4RrR/+gdb/wDfArVFKKnnY7syv+Eb0cD/AJB8H/fArtPh3ptnYSakbW3SIuI920Yzjdj+dYOOK6nwRw999E/9mqeZ3LTOmufuH6V5br9pb3+sTW9zGJYioJVuhr1K6OEP0rzPUv8AkYp/9wU2yVuYbeF9G/6B8P5VXl8NaMP+YfD+VdCe9VpgMUcz7lJsxo/DWjE/8g+H8qtx+F9F/wCgdD+VWoqvR0cz7jbM9PCWhnrp0P61YXwhoI/5hsX61px1YFHO+5F2eUePNMs9L1S0SygWFGjBIXucnmuJveCn416B8Sz/AMTuxHbyf/ZjXn98OUrrpO8dTojsischcg/jSsVLBuc96iJKkihWxnPcVRqLgbyR0prMD1qRSuAew6g0m1Tg4zzQFiewihecGdmWNQWO0ZzjtSvMzL5ca7VOCQO5qeOzLWsUkb4MpIYlsAAdjV62tY7fTHumA81mKoDzkd/wpmdzJuI5Ys7jt3KDtJ6g+lUzVmbG87TkHoPT2qEigtLQixzTsU/bRii4WG44pQKdtzTttK47CIMSJ9asyDLCq6KfOT61cZfnFS2O2h23i+wtrTQLdoYwpFztyPTyxXCyzyy7d7khRtA9BXoPjY58NW59bz/2mK87Ip3M6auhuferMZS3YGVPMYEHy84/P/Cq2KcASc00y3E7TR9Ya6YI45J4AwqIPqetdCoBGR0riNAsmkuFle2MqZxuY4Vf8a7VGLIGVht7YFaJnnV4JS0EuB+5NPI4HsKikLlDk8Z9KQyleWbjPpRcy5dCXFRSzJCDuIyB09PrVd7p5spbjAH3mJxj8e386sabos+oEzHAhU4aaQYVfoO5/X6VMp22LVNWvJ2RRM1xeyLDAp+bgYHJ+gra0/w7HHIDd757hjxEDxn3Pf6Vs6ZpYy0Gmxc4/eXL9cfXt9K6FYrDQNOe6upljgQEyXMnVj/dUevsK48RiFTfLvJ7I2owlVfuaR7kGnaKluElmCvKq5CYASIDv6f0rkvE/wARUtpnsPDzJPOB+8vTyqn0T/4r8qwvFXja88UKbGyR7LSc5z/y0m/3sHp7dPrWn4J8GadfWy3d/LIqknyY0x26sx+tZ0MJKc/a13eX4L0OmdWNKHLDRfizlLPSJLmU3N3JIxbks5yzE9evStqO3hgKJEgVeSfU8VvarpEVnHJNDNvReV+YMCM+3SsCScRsDgnrXrKKijznUlUZMRSEA02KdJhkH8KkNO5nZrcbtBqvPMbeVWAyCCOaW5u1gyvIYjINZst4ZYwrdR0PcVnKpbQ6KVJvV7GtDI7rl+/IA61LsHesYah5SlQDu45q5FqkbEB1Kj1ojNEzpSvoi2yKRggEe9QtZ2zD5oIj9UFOju4pIw+duSQM0l3II42TcVkI49605kZcsr2KM9rpyk7oEznHyjHNY8tvb7uEI/HrVp53djuAJ6E460ht5FhW4dD5e8puxwTjOP1rNzZ1wjyrUz2tY8A7mBP6Uz7FuUFZO+PmFX5F3OMHANNK4wExnNNTZqZz2kqReYcFR1welQc1oXTFbULnl25+gqhitE9AR1ngW2M11dOQ+CiwKQONzuox/wB8hq9ZlYPKzAYBOQPSuI+HlkIbK3mlVh5sk1yAehEaeWp/77c/lXZmvkOJKl5Qh6s9bKo/FP5BTc80Zppr5c9pCk5ppoJpDTKQlRyxxzIUkQOp6hhkU880lXCUoPmi7MbipKzMW58NWkxLQO8D+3I/Ks2TT9ZtB8yrdxDsPm/Tr+VdXS17GHzvEU9KnvLz/wAzzq2VUJ6x91+RxjXFlc/ur62Knphl3Y/PkfnVKfwvpd8+60laNvRDu/8AHTz+tdzPawXClZokcf7S5rIuPDVux3W0jwn0PzCvYoZvhKnxXg/vR59XLMTT+B8y+44Wbwrf22WiCzqpzhD82P8AdPNYk6vatsIZXU8hlwa9N+x6xZMApW4jHY4b9DUU9xZ3B8vULBkJ4IaPePybkfga9KM1UV6clJeRxvmp6VItHmqXBYgEnGeQT+tWo97LuYYyePpXVTeEbG6lL6dchCckIOf/AB08/wA6ypPD2o2au0kRkAONyZOB7jqPyrSFWzs9BSUZLQrwD9yfrUyjimwphGHfPSpQtaNkpAq/MPrUWor/AKJcH2NWVHI+tQaiP9EuB/smspbouJzK824/Gsg81sIv+jD6msgjFbEtDKKdikIpkjOtGKU0lBNhKKdRigBtSgYqLvU1JjifRA6U8UxaetcRyodThyKSnAcVIxK6rwT9+++if+zVyxHFdT4J+/f/AET/ANmpItHSXf3D9K801D/kYJ/9z/CvS7v/AFZ+leaXxzr9z7J/hQ2CWo0iqs1WzVSbrUjSGR9aux9KpxirsYNHMkU02Wo6nFQRj0qyqn+6fyrJ1qa3kvvGqNR7RZ5j8ShnW7H/AK4/+zGuCvRllH+zXq3jTwzqmtatazWVr5kSRbWYuq4OT2Jz6Vgt8MtcudpzaxYGDvkP9Aa3hjcPGNnNfedVOhU00POzGc0nlGvSE+E+sHh7qzH0Zj/Spx8JL/8Ai1G2H0VjUPM8Mvto6Pq8ux5iI29KcIzjGK9Vt/hI+T9o1RQP+mcWf5mtKH4U6OgHn3d/Ie5Qoo/9BNQ81w6+0P6vLsePRF4iCvbseRV2C88q1aJwXznaD0GetevL8MvDidRfP/vTqP5LUyfDzw0ow2nzP7m6b+gqP7Yw/f8AAHhZPoeFvEu75M496aIj6V7wPh/4YHTSm/G6f/GpY/BHh6E/JpEB/wB+SRv/AGaoec0FtctYWR4KLcntTvspNfQaeG9Hj6aJph/3oSf5mpF0PTFPy6Npi/S2H+NZvO6PRMpYVnzwLfFO+zs3RSfwr6LGnWaLhdM08f8AbuKcLWJTlbKxB9oBWbzyn2KWFPnJLdhKnynr6Vb+zuz9K+hRGynIgtR9Iv8A69O3Tdo7cf8AAP8A69Q89h/L/X3D+qeZ5h4ttZp/DdskcTuwvCSFUkj5BXFDQ9Tf7mnXbfSFj/SvoMvc/wB2H/vk/wCNHm3fYQ/981Dz3tEVPAqKtc+fv+Ed1c9NKvf/AAHf/CoLjSb+xjD3VlcwITtDSxMoJ9MkV9DGS7P8UY/CsrWtIl1q3itrpw9uj7ygbGT0Bzj3P51dPPE5WktC3gl0Z4/b2l/BFDLO0/kPgrGrn5l/pW6byfVrf7FZWxg3AhnZ87gB0UhRz2xyea7RfCFmgjzEXEa7VDyEgDJOMemSa0YtOMKqqRIqr0C4AFa1M9gv4abJhlies5I4WG11e2s5Hv4J2fcWZ5EPAPTPHJ9hSpY3dwymSKbJ5EYjIJHuewr0FCqkqMkqcHapOD6cU8swAxA7jPsMfmayWfS6w/EyllUekjkLPS44wGuxu2/dgQ8D6kfyrptP0i51FElm/cWK8LgYBHoo71ejMjZ87bt/hQDge/vVoXMqqqh2AUYUA9BWk+IKcU1GGpyf2PNyvKV0Q6zrOk+FNIW4uwY4zn7Nax/fnYdefTpk+9eP6trV/wCL9US61JngtVOIrePOyJfXHf8Ar+levXUUN8ytdxR3BUYUzKHx9M9KqyaRpsv37G2/CJR/SufD5xh6bvKLbe70udUsDNq0Wl5Hks9ukcreRl4gflYjGR9K6rw1bRNolzeTsEW3mVXLjIZGHKr/ALXGa7ZvC2nPas7abAygcLGg3n6beayfE2jy/wDCP2VnpemSRxI7M+UK7XPRuDzxxk56178cRHEU/gaXmcHI6U/iTKevQaXbafvt3XzpVAjjVh93qSQO3FcfKNzDIyOc4ppFzZyNbyQRq8ZKuoHIPfpWtpj6PczrHepdQuejiVdpP4rxUU8RSw9Plcm/xNamDr1ZcyjYqRxrAu9jgY5z2quNUt9xVgw98V283hPTJkK+bdhT6SLz/wCO1TPgLSTnE15n/fX/AOJrnee4Xo/wIjlVV6zX4nD3t2J8qMFB0YVQZ8ZPPvXobeANP/gurgf7wB/wqJvAFsel/IP+2Q/xqP7YwsteY6I4GrFWSOBJ39zx6+lITtBBzjPFd4fAEXQX7fjF/wDXpv8AwgQU5W+z9Y//AK9XHNsL/ODwVbscUrsI+pApXnkdBuJO08Z7V2X/AAgkvA+2xfihpp8CXCvuS9gB9drf4Vqszwv/AD8Rk8HW/lOPUo6gYw2cnB61t6jZSQ+G9MlEbBJHkZ2PQscbePoP1rQHw+ui4P222A7nL5/9BrQ1Dw1q+oKiy3dsVTou5sZ6Z+76YraOZYS38RfeYzwdfmVos4Eggc9PWmkEkAZOelda/gXUz0ltT/wNv8KI/AWobstcWq46ZLH+lNZjhP8An4vvK+rVv5WcHcn97t/ujFRou5uld9H8NZS5M+pxjPOY4y388VsWPgHR7Vlec3Fyw6hnCqfwAz+tRUznBwXx39DWGCrv7Jd8LweXpoyjL9mgjtUz05HnSfjucD8K2Cailt1WECBShjZpFAY8seTnPrU6hXUMpyrDII7ivkM0xcMVW9pDbY9nBUXQpcstyM0lTeWKXy1rzbnXzIrGkq15a0mwUcw+dFbmkwc1a2rSFVp3HzlXBpMVa2LSFV9Kdx85WpM1ZKKaaY0JppjU0V+tDKrjDKGHoRmp/KX1pPKHrVxm4u6Y24vczpNNsnOTbqD7EinLbKihVkk2joGbdj8+n4VdMPuKb5J7V2Qx+JjopswlhcPLeKMy40uK6j/fxQSydm2lCR7sMk1nN4SSQjypGjJHch1B+vB/Suk8l/7ppu2RTwCK7qebYhKzs/kYSy3DyemnzOLufDOp2gDeR5q9cxc4/DrWDqSMlvcqwIIB616vHcXEXTke4pl3b2GqQtFf6fGyuMMyjDfn1r0KWaqVudHNUy1r4GeDon+jD6msgrXtV78NtNuedNv3t1x/q5xuGfr1/nXEar8OPEOmb2+xm6iUZ8y2O8Y9cdf0r1aeKpVPhZwTw1SG6OJK00ir0trJGxDoykdQRjFQNHXQpI53Blc0mKlK00rVXJcSOinbaMUXJaGYqUUypMUMcUfQ4GKlWmCniuJnEhwHNOpAKcKhspIQ9K6nwT1vvon/ALNXMbSxwoyTW7ov2qxjm8sqhmxknkgDPT865q2Jp0dZnRSoyqbHV30iRxEu6qMdziuAl0+efV7idAvlMAFOeT0ro/LjZt80pdvUnJqZZrSLopNedPMJyfu2S8zsjhYR3uzFt9BeZgCWJJ7cVYk8MpG+HjJ+rGty31e3iIAhA96dd63G3+rUfUiqdSDpc0q2vkNQkp2jT0MWPRYox8sCD6jNTLpoH8I/Knyaq7dwPoKgbUWP8VebKpTe8mzsjCr2SJhZIvak+zxjsKrG+J70n2vPWseamWoVOpZMaDtTSgqNJC5471citZJDwCa0hDn+FCk+T4mV9lGz2q7JbrCMuwHtVKWdFJC80qkFD4hRlzbCEY7U0iomufaozOT2rmc4myhIlOKbkVCZs00yVHMjRQZPkUuRVXzaUTD1ppofIy9DC08gSNcsegpt/H/Z+43JVAi72JPAHrUEV68DB43KsO4qpfy/2jFNFcsXWZCj89QRg1rejyWd+a/yt/mQqdTn8jMfxv4bQkHU049I3P8ASnx+L9ClXcl5lfXy2/wrwi+ge0vp7dxh4pGQ/UHFdn4NtbfXL4RS7hHFFvMYOM4IGM+nNexiMroU6ftE3b+vIVKSlJxkej/8JTo//P3/AOQ2/wAKnh1yxuBmF5ZB6rC5/pSQafZ22PJtYUx0IQZ/Or8Yya8OTo/ZT+83aiiOO5SX7qyfjGw/mKlzUzsI4duOW/lVepcEiE7i0hoppNLlKQHpVSaRpZfs0LEMRmR1P3F/xPb86luJvIiL7S7cBVXGWJ4A5pLaIxQgMQZGO5yO7H/OB7CtYxsuZj8iSONIowkahVHQCloorCTuxhiigmkzUNDFzRmkzSZpWCwpAPUD8qY0UToUaNSp6jHFOzSZrSNWpHRSf3i5E+hRfRtOfO60jJJySc5P41UufC+m3DqwR4iv/PNuv55rZzSVSxFVO6kygiRYokjQfKoCjJ7U7PtTaKx1eoWHZpOKTNNzQOw/ijimZozQFh/FHHpTM0ZoCw/ijNMzRmgLD80vWo84qvf6lBp9uHdJmcngRxFxj8Oa2oUZVp8kXr5kVJci5mW8UYrlR490jJBuFUjs0cg/9lq9p3iax1Sbyra8ty2MtuWQAD3JXA/Gu95RiV2+8w+t013+42jxUds+5ZEIP7t9oJ7jAb+uPwpsc6zxCRPuknH4HH9KdDxFIcctMfy2r/8AXrz3Bx5oy3R0pqUU11Js00tTSaQmsbFJDi1ITTM0madirDiaQmkzSUJDsLuo3UYpNppgGaM0u00m007j0G5o3UFTTcUxodmkzSUmaY7Dg1O3kdGP51HmlzVJtA0SCZx3B+ozS+bnqiH/AIDUWaM1oqs11Fyol8xe8Y/AmpEnVOQ0i/Q5qtmjNWsTNCcEyW9tNM1aExahbQXKkYDSx/MPow5H4GuP1P4YaPdIzaddy2svZZCHQ/jww/WurozXXTzStA554SnI8c1b4e69phJ+x/aogM+ZbfvB+XUfiK5WSBo3KMpDDggjBFfRwYqcgkH2qG5tbW9B+120NxkYPmxhv516NLPrfxI/ccs8tT+FnzmYj6U0p7V7heeBfD14zN9jaBiOsEhXH4HI/SuevfhbEWLWWpsFPRJ4s4/4ED/SvQp5xhp7u3qcs8vqrZXPLdvNPxXX3/w61y0+aKKK7Qd4H5/I4Nc5d2E9lM0M8MkUi9VdSDXoU8RTq/BJM5JUZw+JWPomDS7uYxBYWBlGUDDBYYzxmtODwvevzIFj9iea4G8129vvGGn6ikc0k6yKDb79xUjBIUD159a7zwt4ing1YaFf2riaQySmVjtIyxPKknAxjofwrhdRt2OP2RpweE4/+WszH6Vd/sDTrSNpZR8qjJLnirun6lb3lm8yXCSLG7K7hSgGD71yeta62oTeXGcWyH5f9r3NcmJxcaMLvfodWFwcq8+VbLcmu76Fn8u1RY4AewwW9zVY3hxgGsvzSaXzK+Yq1KlWXPJn0lPCQpx5Yo0DdsepppuCe9UDMBSfaAPWs7M0VHyNDzz60hnPrVD7SPekNyvvRaQ/Y+ReMx9aTzjVL7UnvSfao/U0csh+xfYueec1LC7yuABVKNhKfl5rVgCWsRlkIAA6mjW9jOpaKtbU19Ojjhw8xH0q1ea3DCpHmRwJ79a878ReOLTSowryNGWHygLmRvcDsPc15pqvxAv7vzUtUWBWbIlOWkx9en6V7uCoYmdPlgrLueZVo01LmrPXse36jr1nFGJPOLjuzfIo/E1zNz440yEsGvrNMdhJvP6V4bdahdXkhe4nklY93Ymq5ya7lksJO9SX3f8ABuJYuEFaMfvPZ5viNpI6aiG9kgb+oqD/AIWRpROPts3/AH4rx7BpcGtVkmGXf+vkH9oy6RX3HsifEbSCfmv2H+9A39BV2DxzpE/3dStjns2U/mK8NxSc1Msiw72b/D/IFmUusUfRVrq1veLuhkjkX1icMP0qx5yt91hXzhDczWzh4ZXjcdGRiDXUaV4/1WxUR3Gy7jz1lzvA9AR/XNcVfIpx1pSv6nRTx9KTtJWPZ/NI60u7Ncxoniuw1sGOBys4GTDJ97Ht61trPtPXivDrYedOXLNWZ6EVGa5oO5xPjvwzNNdDU7C3aQMMTLGMnP8AexWJ4PkudM8T2geOSMSuI2VlIyDxz+h/CvXEZHHD4P1qUIM58zJ/CvQpZnKND2NSN1axx1MOufmTsycDAyWX8CKtRRknpkDk49KpgsP+Wn6CpBNKAR5xweDzivLio31HKMnsPkcu5NNzTOvej8avcaVhc0hNIT70ySQIjMeQoJwKaKSIOJ7373ywYOAf4iD1+g/nVvNVbOPyYPnIMjku5HqTn9On4VYzVVN7LoJIdmkJpuaM1jYdhaDTSaN1Kw7BmjNJmjNTYdgzRmjNJmiwC0ZpM0maLDFJopM0ZpWAM0GjNJRYYtGaTNGaVgFzSZozSZosFhc0ZpM0ZosFhc0lLmkNMY1o43Pzxo3+8Aah8uyLlIooiSPmKR8H8cYNT00r827JzjHJNdFOpBU5Kd79O3zM5RlzJxtbqNCrGgRFCqOgAwKkRybWEE8/MfwLsR+mKikOyNnx90E1JHH5UMUWd3loqZPfAAz+lZp+42+pbWqFzmkNLSGsikNooqe2t2ncBRVJXCTUVdkaoWOAKlW2buK3f7L+yW/mygBcZJPasW81a2gVmU7gvVjwo/E1rOhOLSktTmhXdV/uxwgAoMY9K4/UfiJpdszILpXYdoELfr0rn7n4oQjiG1uZf9+UJ/IGuinleJqbRG5wj8U0enFRTSFHcV5DN8S7pmylhGB6PKzfyxUX/Cyb8f8ALhafm/8A8VXSsjxPb8SfrNBfa/A9gIU9CKaUryH/AIWXfZ50+1x7F/8AGrUXxPIAEmmN7lLj+hFN5JilsvxGsXQ/m/A9QKVGwxXE2nxK0uQgSfaYf99AwH5c10djr9hqQzbXMM2BkiNuR9Qea5qmBr0tZxZ006kJ/DJM0c0oNRiRXGVNOU5rmcbGzQ6ilpagkbRS4FIetABmkzRSGqGG40u6m0UWHYdmjNNpKLBYfmopooriMxzxpLGeqyKGB/A06msaqLcXdByp6M5Pw5dS2cc+qQxW8V3GWaFjIFEsQyWGzsSOc8dOKs2dvfeItXlvBHviY7/KhkG446KMsPxOe9cRp/2MaHczXEdy90JVCsqfu4o8ZJJ7kttXHpmvUrPVrTW/Bq2NtDdNd29pvR7e1kI81cbRkDAP6V9tKifHxaRu317DHax6fpcItrNF+ZEGNx75rKKt6GsdNbnsBENSaS2uHTeVKlSOSOfy/WrsXiiN/uag3/fRr5bE068qjdRH1GGhCFJKjaxcEbnopP0FNk3R/eBX61YtNeJkXfqRSPuwbOPwzUF3qjXMjD7Z52DjIfNYulZXNoym52aX9fIgaTnrQMt90E1A8hY8sT+NV5L3UrWeM6dcSQllYSGM4J6Y/rVUaKnPlNqsnCHMi8dwppJ9aj8zxlJB5sdxeumcHjn8sU118aAKHe+G7pjH8x0rq+pLu/uOVYt/3fv/AOASk+9IoLMBWLe6jrK3Vpb6jdXTKblAUl7Ehv6A10VlFvkFYYmj7FLXc3o4jni2+ho2UKwwmV8ADnJrivG3jcWEb2lpIGuzwF6iIep9/atfxd4hi0TTySNxXCoo/ifsPoK8LuLiS4nkmlcvJIxZmPUk125Tl6rP21RadPM8vGYn2a0+J/gv8yS5vbi8maa4meWRurO2TUOSTT7W2mvLhIIELyOcACvSfDnhKx063Goagwdk5LsAQp9EHf6n/wCtX1SSWiPCnVe7OV0fwVr2tRmW2sWWHGfMmIjU/TPX8K6AeA9MsbcNqeuKs+OYoIt2D6ZJ/pWpq/jK6v7hbHSY2AJwAp5Pbk/lWJPHZWFzG2o3rTTLc/vYogGUxhQwYHPOSQOcd6UppEJTn5EL6ToKMEiN1M/puHP4AVUmtdKjYobdkYdQzkEVZstVuJ713sNHWaaSLy3WNCc/NnO1QAOwI6HFSXXhTxRqtwbs6NKNygAABcAAAcE+goU3fVFcqW7Mk2WmueGkH0Yf4UxtFt5FzFdc+jLTr3QdX0zJvNOuYQO7RnH5jiqsc7KQa0ugt2ZXm0m5iBYKrAd1YVSdGjOGUqfQjFdHFc7xhuabMsUqFJY1bd0buo9qGCbRz8NxNbzLLBK8cinKsjEEfjXpvg/xeNSRdP1GQC8HEch/5aj39/515vd6fLagSfehJwHA7+lV43ZHDKxDA5BHauPF4SniIcst+/Y7MNip0ZXWx7/uKtj8qduPqa5/w1ry6/pKyMNt1BhZRnqcfe/GttHBFfHVaMqc3CS1R9TTnGpBTj1Jdx9TSbm/vH86bupC1ZWNLD/Mb+8fzp80wt9Pe5ZizbtqRh8Fj37Hp9KrlhmqmoQm5hj8v5ZomLI4OCpOOh/AV04RUvar22xzYtVPZP2W5bjuzMu5d6A5wrn5sA46UTO7QOoPJUgZNVIBcNNHJO5JRCpLOWLk45Pp90cVYdgRRiI041X7LYrD+0dNe1Wo+KUyQRyK5IdQ2c+op4dv7x/OqdpIdksZ3ZSQjnuDyMe3OPwqfdWVSNpM2VrE3mN/eP50b2/vH86h3Uu6osOyJfMb+8fzo8xv7x/Ood1G6jlCyJvNf++350vmyf32/OoN1Luo5QsibzX/AL7fnR5r/wB9vzqHdRupWCyJ/Nk/vt+dJ5sn99vzqHdS7hRYLIl86Qf8tG/Ol8+X/no3/fRqLIoyKLBZEvnS/wDPRvzNHnS/89H/AO+jUW6l3UrByol86X/no350edL/AM9G/Oot1GaLByol86X/AJ6N+dHnS/8APRvzqLNLmiwuVdiTz5f+ejfnR58399vzqPNGaLByrsSfaJv+eho+0Tf89GqLcKQmjlQcsexN9pm/56Gj7TN/fNQk0m6jlQ+WPYdcXcohI3t85VOO2SBn9an+1Tf3zVNiDNFx0Jb9CP61LmrlFcqRPKr7E32qb++aQ3U3979KizSdTUcqK5I9i3byTTShQc/hXS2txDpsPmygFv4V/vGsWxRLeIyydhmuD8e+L3hZ9OtWIuHH7xwf9Wp/hHua1wlCdaso00ebjHBp82kV+Js+M/ii6F7W0lSeXoyqT5cf/wAUa8m1HXNR1Rybu7lkHULnCj6DpVBnLHrmk2k19jQwcKer1l3e54k67tyx0XZCE5pMVcW0YJl/lHv1rUttBmPzT7LaNWAdpWAZf+Ak5/yK63yx3MNWYAQnsaDGfSui+y6NEreffTyEOQFijC5XsQeR6+n41inrQpJ7Cd0VChHakK1c2g9Sad5MBGCGz65qtBGdzT4bme3lEkMrxuvRkYgj8RVx7OP+FyPrzVWW2kjG7G5fUdqGkJSaeh1/h/x5c2rpBqbNNDn/AF38a/X1H616Xa3yTxJLG6ujDIZTkMPavn0Eius8GeITp18LO4c/ZZzgEniN+x+nrXi5hlkZxdSkrPt3PXwOYNNU6ruu56y2ohWwFz+NJ/af/TP9azLh9lwAeN/8+9M3cda+d9jE9tqJqHVcf8s/1pP7VH/PL9ayi9ML0/Yx7BaJr/2sv/PI/nR/ayf88z+dYpemmSmqEewtDb/taP8A55n86P7Yh7o1YXmU1pBVLDxFdHQjV7Y9Q/5Uf2vag4xJn/drDsbuG3v4JZyBGjhmyMjANcxPdalNKXfU23HqRM/9K6qGX06iu3Ywq11B2sehNq9qOvmD/gFNOsWX99v++TXnO7VM5GoSvj/pvJz+YrSs5J1t2NzIWIyctnI/E9a0qZZTirpkwxKbtY2b3R/EyrCljYIvmL5k8nyE7+4AGdoHQYGfetLTdBvDHjUr2+LnqoJCj9c0y3sI42WRri5ZhyrGY5H0I5FXDql9plpJM04u4Y1LlZx85x2Djp+INfQQraWaPjqkNfdZoQ+GdCLKZ1nkYdyCR+prpIND8HJApNpH5gHUqf5CvPIPGV/rC2r6RbWStITvtbnLSYH3mQhgHx3XAI9x0t6h4j1OxYH7NZvE33X8tx+B+eipOEdZIujGtJ8sH+J6XLZ+GDprReVZBWXB/dcn6jrXn+o+GdAeR/IuI4fTybWVf1FYr+M9QI/49rP/AL4f/wCKqBvFt+T/AMe1l/3w/wD8XWf1mjtb8DqWDxd7p/iMniudEm8x79prQuE2zRydCf4WZQMjrjNaZcMuQxwR2OK5fU7641SRGuCpCZKIowq59BWlpl0ZLfy2+9GADn07V42OjByVSkrH0OA9pGHJWdzVDPji4uVHos7j+tDB2XBurwj/AK+X/wAar+ZxR5prh56v8zO32dP+VfcSfZlllhaaa4m8lt8ayTMwU+oya6G2YQW7SkfdXNc/DJucZ9a1tQl8nTv94gf1/pXPWc6koqTuEoxUbRVjyrx/qbXOsrbCQssKZcdt55P6YrkURpJFRAWZjgAdyas6ndm91K5uW6ySFq1PDFk0txJeEgLAMLkZyx9PoMmvtcNSVKlGC6I+TxVXnqyl5nT+HdJi06zczeX5h5mcjJH+yD/n1pbm8udbvY9Ot5lhg3KrOxwkalgoJ/EgfU1Dql39nt1gA6jJI9abe2seg+HILue3uYtTu8NBIwBjaM87lI46EcHkHBrSTa0W7OaK6sgubgaLHb21hDJFq486C7J+cOC2F+U9CMfoD1re0XwZBBFDqPiCb5nbc0D9B357sfYfjR4W0UaXaf23qYbzpOUVh86g89ezMO/Ye54ztZ1e61LUpLSBMysdrsDwo/uj0/z3qYx6IUp9jfu/Gmk6LB5VhDbRNvI2RoOnvgYH61lL8Sbm5u1gh0+3mBOFL7gc/T/61MsfhLq+q3ETxSgW0qgiZ0xg9wRn9e9dhpvwu0Tw7qNrcXmvRvcRnJheMbScfXOK2UEtzCUooxrfxrOjAajZeXEx567cfgAf1NTz6J4e8UWwaDyLW7f7rRfLk/XofocGup1DwzA8Hn20lpcx87zCmAPwya8p8SXkGiaxtsAPMCgy4+4QedvvxjmplTW8WKnUu7GVqmj3+h3hgvIiuc7HA+Vx6ioVkLLg16Jp99B460JtPumAuVXfFL1YEcc+4yM+oNefXFpNZXc1rOu2WJyjD3FEZX0Z1RdyNSCTHIqsjcENWNdwfZ5ygJKHlCRjI7VrvwRVK9UyxgjGByAB3psoveENXOla7CXk228x8uUdsHofwOK9aJ8t8Z4PIrwUHmvYtB1E6p4YtblsebEPLf6rxn8Rg/jXg5xh/hqr0f6HuZTX3pP1Nnfz1oL+9VBLQZa8NwPa5iyW96bvquZaTzKOQXMWd/0pC4qv5lJ5lPkGpEiSBLsocDemRz3B/wDr/pU5es+aQIUlPRGBJ9B0P6GrBbBqpQ0TFfUn30m+oN9LvqOUdyffSb6h30b6OULk2+l3e9Qb6XzKOULlgNRuNQB6N9LlHcn3Uu6oQ9L5lLlBMm3Ubqi8wUb6XKO5Lupd1Q76N9HKFybdS7qg8ylD0uULk2+jdUO8Ub6OUVybdRuqHfRv5o5QuTbqTf71Fvppenyhcm3Uhaod9LvyaaiO45WUzu38SqFznp3P/stSB6rIy7Ayj75LE+vPH6AUvmVU462Ii+pZ3+9SW43yge9UfMzWlpowWkPQDNZTVolSlaNyLxRrC6PpEswxlF+VT/E56D+teGTzyXM7zSuXkc7mY9Sa7P4j6q019DYKTtQea/PVj0/Ifzrh0BdwqglicADvX1GUYVUqHO95fkfN5hVvPkWy/MsWttLdTrFDGzuxwAoroZNNt9MRU+W5u3wOOin0Hr9a0LW1i8P6QzMEa8mHzP8A3f8AZH+f5VFB5UGjXmpXsYaSeMLZ55IYPy3Xjp+NerKXKjzE3J+QsMa6aZ47iN21cMFgiSNZR/gDn6nj8a2bPwZe30X2zxJetapkFYmYb8erE9P1NaukWsXhjT5Nd1o+bqtzH543DJiQ9/ryB9SB71UgN545ka4RtkCNtCBs7T7+p/8A1VMYuTIqVbbbD5P+EQ03CRWdrOq8eY53Z9+Q2f0qBta8MSMETT9JI9oVB/MpWvb+C2ViwiWRkOGZIixz+ANcbr3w+1EXV3c2yOYy5fY0LLgE9uvStnSsjmjVUnqzpv7D8LaxEVEUVtMfutE+cfipwPxWuW8QeB73R4xc2zG7tTkl0XlPrj+Y/Ss/R9J1uCfcsnlwxNySc/gtd3omvPHOLedFViQJAeAw9SP6jke44rnalF3Rup22Z5Ypp2CD7V3vi7wojJJrOmR7YutxBjBjI6ke3+Oe9cPsyK1jJSRqndFG7tB5YmhA4HzqB096oqea2OUcH86ypU8qVlHQdPpVAz0nRNSk1PwxDMxzPZv5bc8sAAQT+Bx+BrZEgZAwPBGRXBeCr4RahPYsfku4yBzxuGSP6iustJiqtC3VDx9K+bxmH5KjS23+/wD4J9Hg6/tKSb32L7Se9RtJ71A0lRmSuRQOvmJzJ70wy+9QGSmF81agTzExlNNMlQ7jRk1fKTzD2fNRFVJ+6KXmnKpqkrCbuIqqB90UllZT+IdXTS7ZvLgX57q4AyEQdR9T0Hv+NQyi6vb2PS9OjMl5L1x0Re5PpxXpOi6NDoGlpYW53tndLLtwZXPfH6AV34Wh9uXyPJzHGqmvZw3PK/8AhIrPaAdMlwOxvZKjGu6djB0ptvcG8lP8zWeI7LjFxN16eSP/AIqrUVpDMAytcEL1Itxx+tehyQ/q543MyaDW9JgdXj0VY3XkMk75U5zleeD9K7PRvFFh4iD2N7GEkPQyMAZfcEYAYe3X69eZtdNWWVYmeRRggs1kP51oDwpC+HXUYw+OhhAIx9DWU3T2YrtO6LGraRJp85ABaFvuPjr/APXrLZTnkV2tgWitFtru4iuk2hTuGM//AF/eqd1o1rIS1tOoB6JIMH6Z6V59SOt4nsYbGwkuWo7M5M8VJa3Bt59w6Hg/Sto6DcMG2RGTHXZ82PyqlLpckfDIVPoRWEmtmenCXVEhux6ik+1j1qqbSQcYpn2d896y9lE19qzTguxvU571p+IbvOh7kPOx249lNcyIZAeKmu5JX0yWJicCNsflWboJzi+zKdRuLPLm5Ndzpy/YtFtIFAG9fNkOOrNyP021wx612l26phBjCRomR7KBX1h8jIksbS51rV9kNqboRfvZIQ4UugYAgEkcnIHrzTnaDxF46trSAyzafbqiRLJkHaiAYI92wDU3hGDVF1F9T0y6it2tpIo3MgLbvMYrjGORwT+FWvhsif8ACR3l07qMLtBYerbv/ZaybvJ+QS0gdD4zuhpsP2aKRSLckq3ZmbHJ9x1/EGq/w/0SK5uUubsDk+Y2/PIHPNc/8Q75JpnMY4dmJPr85H8lH5VZ+Gt48djcmSV2DM67SxIAWMngfjWkPdRzzTcdDuvEviy7ubgabpj/AGeNOSo7DsTjqT1x0A61x9zPNIxJluJWzn5V3YFRR3Id55nA3Sykq38QJ5x716Bpt7d6fpyw6dbrgqGeZ0GXyAc5/lVPzM72MDT7qO1sBdQ6h9mk/iSRSNw9GAHI9wfwrL8RaZB4j0lb6EYkcE7VAJD9sexIwR6kEd6b4rkPnWs5CxrdFhtTnkHBPFXPDyy/2bdWzkEtGZQ2OQVOB+mKL2DzOO8DpqthqyyRwSrGcglkIUMAeCTxyMj8a6rxFs07xTpeuRWsdxFKql0kUFHYcYOf9kr/ADrfukdhDNMN6iReD7kD+tZPiuAHwbavnmOVMfipH9KxnqzWE22jmfHlsYtca6XS302K5QSJC0iPnsSNnA5HSuMnc+ScYFelfE6FXbSr5II0Se2zvhfdGx4JwMDaeeR715nL901VOV4nWUGG1j6V3fw+v/3V5p7sNpxKg/Q/0rhpxiT6itTwvcG38Q2pzgOTGfxGB+uKxxlL2tCUTfCVPZ14v+tT0oTYJGehxS+d71UKsGOe5pOa+b5EfS85c873o873qnzSZPrR7NC5y753vS+aPWqOTS5NLkHzluVlkiZG5Vhgj1FPhnZ4EZ8bsYYj1HBqluOKW3ZhJJH1z849ux/p+dPkvFopS1L3m+9Hm+9VsN3oAao5B8xZ80+tKJPcVV+alG70o5A5y15nuKPM96rfN6UDPpS5B85a8z3pwk96pndSjdR7MOcueb70ebVPJoBal7MOcu+ZS+bVLLe9GT70vZj5y55lL5lUsn3pQze9L2Ycxd8yjzKp7m96C7e9Hsw5i7vo8yqQc0nmH3o9mHMXvMpN/vVLzDR5ho9mHMXfMpPMql5hpPMNP2Ycxd8ymSzFEJUjd/D7ntVXzTSeaPMXeAVBJwe/Bx+uKqNPUHPQuM6oojVvlQbR9BTPN96pNKSc0wymj2dyVI0BL83Wtu1OLAnoW4rlYnJcc1v3NyLXRDKzYEcbyE/QVlVp3aiupalpc8f8R3v27xBez5yDKVX6Dgfyqx4atle7N04yIvu59awXYtIzHqTk11emJ9msoo2IyyiTgevP8sV9pTgoRUV0Pjq03OTl3L12smrapb6fE2N7hQfT1NbWn6dY6v41s7CDYdLsoFeQbiV7tzk8dRkfWsPw+Fl8TwPJ5xEYklBhBLBlRmU8c8EA1s+CI3mXxBMXHnyBE3j/AGid386znfmIbtCxl+PPEFxqGqGySUkHaXRfX+BP+AgjP+0T7V1vw70RtItrrUby9NtbBQs+4DDH0HsOmepORVPTvDGm6osupzowuUlYhkPUjPUHitXxJcwpo+naRC5iiD/vj64Ax/M10QtFHHKfNoS6x4w13WTL/ZxNrpsZCjy1AYj19fyrGWbU44pLmHWt6oQGEjMeTnjac+nJHSruiW8eq6lPFMxhsrYDcsY+Zh0AHufWr2u6PZR2zzaX5kUUR3TQM2S49R7ilcSSKmnaimqQNbzokd4BkEdJfcGsm+tTE4uIyVkQfKQ2AR/nv2NVoZ4bS+txFKHEkoO0jBU4yKyvH91cfbYzHII7aRm/dx8DcArEn2w44+tJ6lxjroejeH7lbyyRJUR1m/dTg8deAcfofTOO1eb69pDaTrdxaAHy1bMZx1U9P8PwrvPB2JdOUAgzT2gaPI6N6/mM/jVL4kQRtdWd0nB2mNvcEBh/M1nHRmkJWlY80uU21lXfO1seor0Hxf4bstK0fTL+zvjN9rT5o2wSpCqSRjtkkfhXn9wv7o+xrSMk1dGzF0y4NrqdtODjZKrfhnmvSbiPybxiOn868sTlwB3NeshTc28Mo/iX+XFeXmK1i/U9TLZe7JehVzmjBParQtmp/wBkfGT09cV5tj0+Yo7T6UoQmrRWFfvTxj/gQpjTWqZ/ehvYA07BqQbKeIiR0py3tuW2pHIzHoMAUy71EWQHnpHbkjIErYOPoapQk9kTKSjq3YesBPaodRmXTrCS4bqOEHq3aq0GtQXkzRDUIoCBlXlyiE+mf/rVJeaFY6nta88Y2Kbeka7cD835rro4STknPRHFiMfThFqDuzoPAttZ2NmLozx3etagCWjhYPIq5+7gdOgJ6ds9K9QtdP8As6q8uxpsevC/T1+teK2mnW2mxqll4+Nuq9Fhuwg564UPXqHg3Tr60tp57zWbzUVuAhjNyzHbjOSA3IzkflXpSjofOVJc0nI44aNpZYN9mhxg9V6086TpwXAtIgAORyDmp/OjEoVMZBKjI6D1pFuVbZiQE8qcDnp3rzuaXcLsWOzSABI4QoAO45NPaFFZMpGA33TtzxUK3LDkEY5znpSm5dNpaQKQpGQO3NKw7lgeX0GwLt4+XrSGVVG7bjavTHGTUCTsI0zIQGXAOMCgzgqAZduB/wB9GpsNMsDYWOWVjwR8veuC1O48barqDQJDPbrCWEaxt5a7c9SxPzdu9dqXIDbmKtjccj3oE+12ZnTpkew960pz5HtcpTa2ZzNhpnifyx9q1hI8DlDEsh/Mj/GtY6ddmIj7erMBnLQr178DFajMSjBZUYgY3YpN7MHZJI+eFXHT60p2l0X3GscXWjtJnMXln4ht03QLZ3XcquUOPxODWBqGt6xaxSR3Wm+RkFdzxsBz79K9HcuefNQDgnaO4/8A11I7ArKrPGy8URVNbxRusfXtbmPAsc9a62+lEsrOARuVXwfQqD6e9d7daLpV87LPZWxywO4RgE/iOa5HxLYx2d8Y4QUi8tdi88ADGMnnHFehTrKeiOdS5mange2hu9P1iGXU/wCz22xPHKHCkOC2OvbJAP1rH8HSFbu+iU/vNisMd/nCn/0OpPCet2uialcSX0cr289tJAxhVWdC2MMAxAJBAqrJqFiPG0+oW/mpZXEzkBlVSobOCVBxw2DgHtVJPmY57MXxLZktcRquCSSHlbjg7sL9Qw611ngnwzLp+kOLq5QtK4lCRLnaChVhu7kq3b061U8RafueK5h+T5mRdy5wR/C3t1q94N1lYQmnXGQCSsLHofVD9P5Vs0+U5nK6IZNLaO9kSYZy5IyMc9McdulbOn+I2spoLCePfKMIsg5GDwBjvWrf/YFAhnmQ5H7tSfnHsCeo9OahsdJtmvUluPKdQQM3Ecquo9MjANCaktTJ3M3xRomo/bLa6kiWQZKwoo5Vic9KsQwjTdKuLmR/mC+XyOSoUFz7/dwPUkV6Jqt5o1np8dzEkEkiqEG0Y49CTzjivDPiDr95PdNaCNlSdQQ+MAr/AHV9s4z6nFS10RcdXY1YvHMfiFVsobI29xHiV5FffHhTnjjPXAqXxTdKvhmztmYeZJIrAewQHP8A4+Kz/A3h6SyLRzvsuZiHlCjPloP4TnqSeoHp61uzWN74p8cn+zYYJItOwxWQ4R9hA29MZYjFKSSWhUfj0Mb4ix2ljp+iWFpcNIvkGaQeYXXJwoYZJwDt6DivMpDwT7103jG80+48QXj6VZtaWm4KsLYypAAboSBznpxXMN0A7mlTjyxsdlyrPzIPpSwSNBcRzIcNGwYfUHNIWR5Gyec4FPEQPRs/Sra7iT1uj0pL15VSQ4ZWUEcY4NWk2yKGXkfyrjtK1hbe2S3ut/y8K+M8ehrYg1e0Vw6XKficZrwKuGnCTVj6WlWp1YJp6m35ftSGL2NQp4h0rA3SgN3AII/nTv8AhINJP/LY/kP8aw9lPsPmXcf5fsaXy/Y00a5pJP8Ar8fl/jTv7a0r/n5X8SP8al059hqce4eX7Ux4iGR/MaMggZHcEgY/l+VOOt6SP+XlD9CP8abJrGkyxtG04wwwen+NOEJp7MblF6XJcOP+Wjfp/hS4k/56P+AX/CmRa9pRiQvP8+Pm4A5796X+3tH/AOex/T/Gm41L7fgF13HbSessv4bf/iaXb/00n/NP/iaZ/b+kjpL/AC/xo/t/Sf8Anp+q/wCNLlqdvwD3e4/aP+etx+af/EUmwf8APW5/NP8A4im/29pH/PX9V/xpf7e0j/nr+q/40ctTt+Ae73HbfSa4/NP/AIik2d/OuPzT/wCJo/t3SP8Anr/6D/jS/wBu6Of+W/5lf8aVqvb8Cvc7/iJtP/PWf/xz/wCJo2HP+um/8c/+Jpf7c0j/AJ7j8x/jR/bWlH/lsP0/xo/e/wAv4f8AAC0O/wCIm1v+es3/AI5/8TRtb/nrL/45/wDE1INX0k/8t/5U7+1NKP8Ay3/8dNF6n8v4f8AfLH+mRbZP+esn/jv/AMTTdsv/AD2f/vlf8Ks/2jpn/Pc/98H/AApf7Q03HE//AI43+FLmn/L+C/yDlRV2T/8APdv++F/wppS4I/4+PzjFXRe6ex/16/ip/wAKd9psSf8Aj4j/ACP+FTzS7fgPlRm7Lrtcx/jD/wDZUeXd/wDPxD/35P8A8VWl59j/AM/Ef5H/AApfMsj/AMvEf60ueXb8A5EZgjvP+fmH/vwf/i6Xy7zvcQ/9+D/8XWmHsv8An4j/AFpc2Z6TpT9o+34BymV5d3/z8Q/9+D/8XR5d3/z8Qf8Afg//ABdapFt2mj/Om4g/56x/99Cl7V9vwDlMspd4/wCPiD/vwf8A4umxrP8AaP3skbgIeFjKYyR7n0rUZIT/AMtov++hTY4EcyMrKQDtz9P/AK5NUqmj0/ATVigQf7v5GmkHH+rJ/EVqfZV9RSG1A7ip5kMzUOxgSjAe2DVnxdcG28HzHIy8aoAf9o/4U+VYoULO6gDnGeTXG+NNae6t7ezJJAO8j0wMD+tbYai6teFtk7meIqqnQk/I5CNPMlRM43EDJrs7p9t6VBOAgAyMcDj0FcXFJsmjf0YGux1QGO+LOwZiATgYxX073Plrk/hdbuXxHDHYSpHdPHMI96bgx8tjtx79PxrY8IvPCNfhZPLulkXMeOhBbI/PFcvY6ncaLq9vqNoV8+3cOm4ZGfcd62vD3iBbvxjLNLFFZDUVMTCAbUWTqDjPGSP1rJxblcU/hOz8MHfpOoCWaK3CXDgtIwULk5HX2o8aWMZtLe+s1SaLem8qxIcc8gjtyBn2rz7xxpzpcC5jA8vaCRjBH/6jx+VbPgHxWj2jaFqCmQSDbEQB6Yxz37f55uztc5nHqjXtrtNGu1LOrRzjMixrjZjpg9TjvW8NStrqCV4bm3klYfKiEkntyOw+tYWp6JK7OLctNCihcIMsn+yQcfTkio/D2mXVjq8cs0Gy3zhi5AZR643c1Voy1M3ojSXwFLHLZ3zGNEmmjk29TgHP4cZrB8b+GrvUbCPULKJTFA8jyIWAIUhACM9eF5r1ee6W5hFnYh3Ljb5rDC474/lWdqcUAiXSIgsyxFZr3J4UZyqfVmA49B7ik3d6BGUtzi/DjSWA01UY74wsLDHQkAEfgTVDxVcySWttHKSW3ZBPoEUf1rvLHSo7YJNO2JcnaMdO5J+nJ/KuE1q2Oq+KLXSrf5Sg8t2c8IfvOT7KOv8Au0pKyNKUrzuReN7LToPDmk3Vu9u166qkvlSZJXykPzLng5J5715tN/qX6f5Nd98StTS91O1tnslt72zh8i5KfcbBJTae67SCM+tcdbaZe6mWhsraa4k4JSGMscepwOmcfnUU9InU3oUtNgSW+iErqkYOWLHHTtXZrr8VvCkS3SBVzjbz1NUbP4b+Lbv/AFWiTr7yssf/AKERW9afBnxLMitPLYW+eqPKWZf++VI/Ws69GFVpyZ0YfG+wi0ktTIl8TgcJNM49uKpv4ikkJAhdx/tS/wD1q9H0/wCCFusatqOsyu5HzJbwhQD9STn8hXa6J4C8P6B5clrp6yXKDH2if52J9cHgH6AVksPRXS455nVezt8jwqBdevQhs9JuHRxlWS3dgfxxita08HeO70Ax6XJCCcEzGOLHvhjmvoJmO371Rl/9oVSjBbRRyzx1aXU8p0f4Y6+rONS1G3COmADNK2w+oClcn8cVrQ/CSy+1l77Vpbq1PLQLbJEW9jIPmx+vvXemT/aH4im+YT3Wq5uxzSqzerZix+BvC8TRFdEtP3Qwu5M/nnr+OasS+H9DQADRbBnY4Vfsyc/p0rReXapPJ9MDrSQI6Au5QzN95gPyA9h/9fvRzMhyZFa6Nplngw2FoknXekCqc+2BxV0p/tGmeZIvp+VBmfPQH60mTds8im8P+Iw26WyuJGyT8ig4HbBqE6R4oSFCNKumkThdqAZHPXnrivXgx/2acHY8cVn7NGvtF2PI49F8SBY2Gk3eVzlCAOT/AEoOkeJS4X+y7oqM7iY8E/TmvXstjtS5fsRS9nEftV2PHptO8RxWnmvpd+zEj90sW45yPT2zTfsHiI/8we9KhAwBhPX0r2Vd3fFL83bFHs4h7RdjxZrbxJGQJdJvmEvZYWOwZ9cU0W3iJMu+m3CiRQoURHK9ueK9sAPc/rSgkd6XJEftF2PGTba2AYjZTruP9xuBn8v8/hUaxaztkL2sw2/KoMDZz9MV7Vz/AJNHP+TR7ND512PE2GsICwtXJ9RG3P4Yz3qRJdR2MWsrlWXB5hYZ5+le05b1pwLDrS9kg9oux4oLnWNzbbByM7gfLP8ALrWfr8N7e2E8txaSDycNG5BGQeDkfrXvjSiNd0jBF9WOBVW5vdMuraa2nv7MxyoUdTOnQjB71cKfLK6Gqi7HytISpIzyPSo8k9a2/FeiHQdZksxcRXMY+aOWJg29D0Jxnn1FYitjBFd8Wmi9zu/C+rDVrV9OvyZZEAwN3zyKO6/7Q9O4x6VNf6NJbyl0dsnDAqOvow9/cc+orh4zuIZThgcgg45rt9I8WIsAtdWj3DvMF3B/99fXH8Qwau9jCcGndHNazba2bw3kNzJc7xtKqOQPTb3HuOK1PCmu6zJqNvp9w00MB3DcqFADgkZxgcniu0tU0jVFY2l9GhLfcEynj6Ptb9D9auwaLMswbe23++I8nH/fOKLJkOelmi7pWkS6rKCDJMyZ3EnIX8TwP51X1rwtP9uj+1WzyQryrtgJG2OD6sf/ANWK7fQL3TdLscS3qB84PmMob8FFUvEuuSXkBg0qN/MdSv2hl+6Mc7VPOfcj86yk1EIq5wOszR6Bp8dvBzqdwpX5h8yqerbex6Yz9cDFQNnwj4Ha+i1J7fVLhlKRIwwwO5ee5Iw+fQke1XbGLSvB5tta1WVLgXgdFYN5snQ5Yc5PIAJOOteW+JNfute1N7m4ldlHyxKxHyJk4HHGeeT3OTWWs5W6HTTikjGlfcx5qrLIUVmHpgcVI5ydufqaoTyb2AByqjH/ANeuhItsYOtdB4a0J9WvNzq4tYiDIyg8/wCzn3wau+EvBE/iC4Sa8vLfTtNB+e4nkUM3sik5J9+n8q+hNLv/AAjoOmQadZahp0UEK4A85cse7H1J9azqydrR3IvY8mk8I6LKSsaXUYA/glOT/wB9ZqI+B9JVyUuL3HUKzLnH/fNezN4l8Lt97UtOb/gamk/4SPwvuDfb9OyOMgrXHar/ADFe08jxaTwTp5LFJ7xSCRlowwGPwFRt4HtghYX0wHZvKBGfzr27/hI/DB66hp/5ikHiDwp/z/6Z+a/4Ufvf5vwGqnkeLP8AD+BU3jVX2nHP2fp/49VKTwLcCSQR38O2PktKhX+RNe7tr3hKQfPqGlN9Shp39v8AhPcW/tLSsngnenNNOquv4D9p5HgEngjUUGVmgf6ZHHr0pD4N1JAdpifGMYbr/n3r6BOu+FGHOoaUf+BpUg1fw24+W+005H99KfNV7/gL2vkfPK+D9UbHESgvsyzYGfyqNvCGqLtLCIZYr9//AOtX0X9r8Osf9dphPrmOnG58OtjL6WQOmTHRzVe/4B7XyPm+TwpqsZAMSEnphqYvhnVJFDCNNp6HdX0j9o8OBdofSguc4zHjNHmeHe7aX+cdHPU/pC9r5HzanhfVpArJbhg/TDAfzxUT6DqaY3Wj89MYPfH86+mRJ4dbjdpZHpmOgDw9nj+y/wAPLo56ge18j5pk8N6tGAzWpwe+9f8AGoX0XUoyAbKfJzwEJx9a+nPK8PNkbNMbPUfuzThDoWc+Xp5PrhKaqVB+18j5kk8P6tGqsbCYqwzlRuA+pHApg0HVD5hFhOfLOGwmfy9fwr6eW30MfdgsQPZE4pTa6LsGbay2qd2Si8e9HtJh7U+Xf7I1DdtFnPnGceWc4oGk6iWKrZXBYdQIicc4r6YSbwvPmNJtLfZ1XfHxSm38MnqNKOOR88f+NHtJdh+2PmZtN1Jd3+i3I2gMfkPAPelXTdWJ4tbrOccIf896+nE07w/cA7LewkH+ztP8qVtE0I8mxsz7bRT9pLsL2x8wCz1R1LCC7YDrhG4pf7O1UDd9lu8euxv89xX1B/ZWjAHbZ2wyMH5BTv7L0lzlrS2fH+wDR7SXYftj5dey1OMZe3ulHqUamSJfQ/6wXCc4+bcK+pv7J0ncG+xwbgcg+WOKH0vSZE2PY2rqTu2vApGfXkdafP3Qe3fmfKvnXA/5aS/maX7Rc/8APWb/AL6NfUh0PQyf+QXYf+Aqf4Un9h6F30rTz/26p/hT512D6zLufLn2i4/57yf99mlF3c9riT6bic19Rf2JofbSdP8A/AVP8KX+xdEPXSdO/wDAVP8AClzrsP6zNdT5eF7eZ+WeQdzzT0vtRK4hnnIzwFr6ujaONQke1VAwFUYAHpT959TReP8AKH1up3Z8oSXuqRY8yW4TPTdkZqxDD4hu5AkFtqcrMMgRwu2fyFfUrfvFwVJ/ClXzY1wAzgdFPUfQ1N4/yi+t1O7PlTVl1/QLmOLUYbm2lkjEiLOOqn/9XSucuriW5naWViznqa+svFvhWw8ZaLJYXOI7lPmgmKfNC/8Ageh9fwFfMfifwnrHhW/+zaraNCW+5IPmSQeqsOD/ADrai4PVKzCVec42kzCDV2Ly/btLgvE5GAsihOjAcnj6da4w1saHeKvm2cr7UlwUJGQrDvj6ZrdozTsXZfm59qq4KOCpKkHIPcVZIKllbscGo2HNCNHqeh2N1F4o0Uu+wXSfLPG3OTjG7Hoe/p+FZekaemgalLcwQeZNu2eW/wB9Af7nZs/nXL2N7Pp90lzbOUlQ8eh9j7V3dv4g0rWrYRzubK6IwVbGwn2P+f61VkzmknH0N2LV9PuWAm2JMOCsqlHHtg4NR3TXj3Df2bNpkMKRbpZb+ZgqZJAwoPPQ9c1nyaVcG2dIpmaFgCudwT2Pdf0qvYaRNazbmvLTB5IURA/otT7HqmZOaM7SL/xGPFy3dlqMtwFkEf2nZshdOmAp42+gr3vRND057ANEG8x38yWXdlpJD1Zj3/pXnVhYRo3ms7Pk581YyVH1Y/KPxIro7PxZDZKkVv8Av2j6RxOGB/33xgf7q59zSfKhKfMzT8SxRaZazyNKhwm2KNl+8euB/X2GO9ea6PayQx6t4ia7iSeyRpGjmXcLhWyHQ9/myBx611OpNLfkapqT+WkhYK+07IwoJ5IGFHHU/wA6848e+M011ILGzlme0jZZm8xQB5mxVwoAyFGD165zWLfM7I6acEjk9Z1K41fVbi+uWDTTuWbHQegHsBgfhXrfwb0gw6bd6rJGwMxEMRPQqOWI/HA/CvJNG0q71zV4LCziMk0rYAxwB3J9AK+oNOsItM0y2sYB+7gjCLwBnHf8etVPRWCrLSxazjsKAw9P1pCD6UmD2WsDAdv9v1o8z2pu0/3aPLPpSAGlyOVH60wlT1UfmaUo3pSFH/u/nQBGyoevX86aI1JOCp/SpCjE/dB/Gk8pj/CB+NAiMx/MDsY46YBPNKZQDg7gad5Lj+DOKCjkYIPuDTQCB165el3Kf4j+NJgjqin07fypCueMEfrQIn2D0pdg9D+VHngdf5Un2pR/CTWZQ7aB3I/CkJX+/wDpTftS+hppuU96Vxqw8lf79JuHHz1H56n+E0hkU87T+VTdjsibeo/i5qG6vfs6Bktri5JONkIUkD1O5gMUocHscVkeJLPWb+xWDRr2Gzcn9475DEezDp+X404u7KsN1LxbZaTGHv5prDd91JrMsx/J8VgS/FTSlBEV5Ox7E6fwf/Iormrz4ZeIriZpZb20nkbks0rkn8StUH+F/iUE7IYHHtMBn866oqC3Y9C/qfxX1aWd0sBbxQcbZDBhz74LMB+tYT/ELxPyBrNyM/3SF/kKs/8ACsvFP/PjH/4EJ/jTT8L/ABSf+XGP/wACE/xrRciKTic9fa/qOoHde3k9yf8AptIXx+ZqkdQkA4OB7V1p+Fnik/8ALin/AIEJ/jTf+FUeKT/y4xD63Cf40+aPcfMjjpb55F2uSy9wTVfcA+A25T37iu2Pwm8Vk8WUP/gQn+NNPwn8WLk/YYcDrm5QD+dPmj3KUkccjlDwa0bW+RcK4BHoazb23+wXT27zQyOpwxhkEij/AIEOD+BqJZB2aquDsztbR9KmUeaCv0rorJvDqIPNugB6Fa8rE2OjEfjSidv+eh/OhszdK/U95GpeG9I0BL9by3zNHIYYvMG5nUkAbAM4ODzmsDUPivBp2pRvotos0CQFGa4GC7tgk8c4GMe/NeSGQ9S1NMgPesnC71LjBJF6+1a7vhGs8zOIwVRSeFBJJA9OSTWa8nYcn1pHJPSo/KZh0IrRJbFNkEsmflU8dz60yJ2icOuNw6ZANWhaMR92lFkx7VVhXH/2tfMctOxJ7mnjVLnHMhNN+xGpE06V/uoT+FKyGmA1O4/vmnDUrj++alTRbthlbeU/RDU6+HdRYgLZXBJ9Im/wpOw1Iqf2jP8A36X+0Z/79akfg3XZRlNHv2HqLZv8K0Ifhp4quIjLHo8oQdfMdEP/AHySCfypXiPmOb/tGf8AvUv9ozf3j+dXpPC+rRMVl066Rh1DQsD/ACpo8Paj/wA+U/8A37P+FF0HMU/7Sm/vfrQNSmH8X61dPhzUP+fKf/v2aQ+HdQ/58p/+/Zp6BzIqf2pP/e/WnDVph3P51Y/4R3Uf+fKf/v2aX/hHdR/58p/+/Zo0C5ANYm9f1o/tibuT+dSnw9qA/wCXOf8A79mmtoN8Bk2kw/7ZmloFyP8AtaU//rpP7Vl/yad/Yl7/AM+03/fBpP7Fu/8An3l/74NPQNBP7Vk/yaP7Vf8Auig6Pdj/AJd5P++DSf2Rdf8APCT/AL5NLQNBw1Vv7opRq0gPAAqL+y7gdY2/Kk/s2b+4fyo0DQsf2vL2P60f2vIevP41X/s+b+4fypDYSgfdpj0J11Mq25VAb1AqT+2pfU/nVL7FJ6Un2N6BOxfGuTDufzpRr06n5XZfoaz/ALG9H2N6BGl/wkN1/wA9pP8Avo0h1+5brM//AH0azvsb0n2V6LArGh/bk/8Az0b/AL6NL/bc/wDz0b/vqs/7K9H2V/SiwaGh/bc//PRv++qP7cnH/LRv++qzvsr0fZXoDQ0v+EguR0mcfRjS/wDCSXn/AD8S/wDfZrKNo/pTfsz+lAaGyPE98Ol1MPpIacPF2qpwuoXQHtM3+NYn2Z/Smm1k9KYtDeTxjqkcgkW+uRIOjiZgR+Oaj1HxfqOq2ptdRuri7hzuCTys4B9Rk8H3rE+ySelH2ST0pWQFKdULkxqVHoTmo13KQRkEcgir/wBjc9qabR89Komxet70Xcah+JUXGB3HsKeW59qzhbujBlyCOQRVlbgkYl6jv61JaZOCR1qZGB4JwKq7yehpwfj0+lNMGbdjqeoWChbW+uIU6hY5CB+XStqx1/Xby5jt47+4eWRgiKMZYngDpXGCRl/iqxbX89rOk0MhSWNg6OOqkHIIpt6EOCe56fLoPiOaUjWL1o412eYtxPu2qzBQSoJ4ycdK07680rwbrmn2N8Glt3jEsssZAKcsMbfqBXk1zr+pXXmebeysJAquA20MF+6CBwQO1Z0tw8jZdyT7mseWT3YciR12r+PdVu9Kk0eG4MenmaR9o4ZwzFtrHuBnpXGvIScnrTWf0NRnJq4xUSjf0LX7vQZHm0+6kt5pF2u0Zxkelby/EfxH0/te4/MVwg3DsaeA2eKuxLSZ3q/EjxGOmqy/jj/Cnf8ACx/EnfVJD/wFf8K4MB6eBJ70WQuVHdD4jeISedRY/VF/wp3/AAsXxF/0E3H0Vf8ACuEAf3pwD+9HKuwcqO6HxD8RHj+05PyX/Cl/4WD4j/6Ckv5L/hXDDeD3p4L+9Oy7C5UdqfiD4k7arJ/3yv8AhR/wn/iQ/wDMWk/75X/CuLG/3p6lsd6El2E4o7QePvEn/QVk/wC+V/wpR498Rnrqkn5L/hXGhj70/efenyx7E2O3tviL4gtn3PcRTr/cliXH5jB/Wte3+K1zuButMhde/kyFP57q8z3H3pQT70OnB9CbH0mCT2I/AUAdMseKT5/XFJhz/EPxrzrGdx3ydxn8KVdn900gU9z+VOANILigr2BpwAP/AOqmYPFKM9P6U7Idx/A7jP0/+vS9v/rVHgjr1oI9OKLBzEgHc/y/+vThgdGP5VEDjuPzo349KLD5ifp/F+lLv9x+VVvMPqBRu47UBzFoOPUUu8f5NUgx9qM+p/WgOYubyOgFJ5p9Kq7vcUb/AKfnQFytdaFod67Pc6Fp07scl5LVGJ/EiqD+CPCjnLeHdPB/2YwP5Vs78DJzVGTVDKfKsEMzlipkx+6THct357Lk/TrT5mPmZlzeB/BUAD3Gh2cYJwMlhk+gGeT7VWj+H/ha+IYeHIraPceCz73HbPzYX6cn6V0FpYiMrNczNc3QH+ukAGPZQOFH069ya0I1564p8zHdnPJ8NfCgA2+HoD9Wc/1p/wDwrfwt/wBC7b/+Pf411CySIOGNP8+RhkM350+Z9xXZzC/DzwmB/wAi9b5/4F/jT18A+FQMjQbUfgf8a6MyN6tn61E8rep/OjmfcLswx4L8MR/d0Oz+hhB/nTx4W8PJ93Q7Af8AbpF/hWp5h54agtnnkVLk+4XZmr4e0dDlNKsl+lug/kKsJZW0J/dW0Sf7qAVYJX3pCR/tUrjuxoG08KB9BThg9VH5Um7rwaNx6YNJsabHcAfd/KkyvdT+dG446Yoyx5pXGLlB2/WnBk/un86YT14o59KLjJg6Y+5/49/9anB0/ugfj/8AWqDn0H5Uiludygc9jmjmYFgSJn7n/j3/ANaneZH/AHP/AB7/AOtVbnjtScjH+FK4y2Jov+eZ/Ol82I8eXn8ap7j60mT7UXYXLu+L/nl+tJvi7RH86phwSRlTjqM07zD7fnRzMdyyzRn/AJZmmFYWHKfoKi8wHt+tG4e3Si4D/Kt+nlfoKaba1JyYQfqopu4A9aXzF7/zouAhsbJutuh+sYqJ9J05z81lbt9YVNWN6eppQUz1PT1pXYXKR0PSj1020P8A27rTf7C0j/oGWn/gOtX2Kg9aBg/xfrRcVzPOg6R/0DLT/wAB1pv9haR/0DLT/wAB1/wrTKjJ+cfnTdv+0KLgZ39haQf+YXZn/t3X/Ck/4R7RTydIsT9bZP8ACtLYO5FGweo/OncDN/4R3Qz10fT/AMbZP8KP+Ea8PnrommH62kf+FaYhB7r1/vCjyfTH5ii7AzP+Eb8Pf9AHS/8AwEj/AMKafDHhvPOg6Zn/AK9k/wAK1Rbk9MfnThat7fnT5mBjnwp4bPXQ9M/8Bk/wpp8IeGT/AMwLTvwt1/wra+ysO6/nR9mP94fnRzMDEHg3wz30Sx/CIUh8FeFj10O1/BcVuG3x3H50ogz/ABL+dHNIVznv+EI8Lf8AQEt/1/xpp8B+Fm/5gcP5n/GuhMDDoV/Ojyj6j86fOwuc4fAXhZeuiQ/99N/8VR/wg3hUD/kCW347j/Wuj8vdgH+dIYcdKOZiOabwH4UY5/sW3z9GIP4ZrJvvhd4UvXYC0uLEnoYJiV/8eBxXctCev9aTyTzwfzp88u4XPL5/ghpJY/Z9Wvoz/thGH8hVQ/A5c/L4kcfWyB/9qV6w0BVSIiYz7dPy6U3zJEYiRGA7MuSP8R/L3qlVl3DmZ5SPgax/5mU/+AP/ANspf+FFv/0MZ/8AAH/7ZXrKzpgEfMD3BpwnT+6fzp+1l3FzyPJf+FFSH/mY/wDyR/8As6T/AIUQxPPiHP8A25f/AGdevi6j7o3/AH1ThdR/3X/76p+0l3DnZ5CPgSR/zHx/4Bn/AOLp3/Cjgp51sH6Wn/2devC8Vf4H/Og6gOnlfmaftJdw52eSD4LRL11r/wAlP/s6X/hTUX/Qb/8AJP8A+zr1hruJvvREfSomkibkK4o9pLuHMzzBPg3bA/Pq8j/7tsB/7MauR/CHRl/1l7fMf9kov81Nd8WXHBYUzd/t/rS9pLuLmZyEXwr8OxDDLdy+7y/4AVaT4d+G4xtOmB/dpH/xrpg2f4/1pSf9r/x6lzy7hzM5r/hX/hr/AKBSD/gbf40h+H3hrP8AyDF/7+P/AI10+R/fH500nPf8c0ueXcV2c1/wr3wz/wBAtf8Av4/+NL/wr3w1/wBA1Pp5j/410ZwOmaTcR3NHPLuFznT8PfDY/wCYYP8Av4/+NJ/wr7w3/wBAsf8Afx/8a6TzcDqaT7Rzg5p+0l3Fc5o+AvDo/wCYYP8Av4/+NJ/wgfh3/oGj/v4/+NdP54OaduBo9pLuK5XDH1NLu6ZJqvv9F/I0AtngEVJKLO/p1o349ar5b0NJyR0/WgCx5nNHm47n86gA9jTgB/d/WgLkvm57t+dJvpuP9ninKqnqtAXDzO3OaC/t+Zp2xD2oCL60DG59v1oP0qTYuOn40BAenSgCI8Z4pMn2qbyuTTZnit4XmmkWNFBZixwABQMZk+1Vb7UIbCNTJueRziKGMZeVvRR3/p34qGS9udQDR6ZGyRkD/S5kIUZ/uKQN/Hf7vuelaFhpFrbs0uwPcP8Afnfl2/H09hge1CQFGK2u9SQHUP8AR4GUZtYny3uHcdfouB15ataGKOKNI40VY0G1VXgAegFWhapg5wPxzUiwRr0HNOwDIo0bkhqmEcXqfypQoHQD60vXtTGJsQcjOaDwOAcUHpx/OomzUgK2DTCikUmD70u0kHk0gGbOMU0qKftPoaaVPPynpQMZtpCPwpxXnpTduPWkAY96MD3NGDzwaNvpSGLgYpDgdqUA/hSEHsKAFyPSjd7H86Z83oaUg45oGO3e5pufY0cjt+tNOQfumkA/J9D+dIT7HNM5/umjHqDiiwxCR3pM/Wl2jPek8sHvSsAnHrRu6UeUKBGR2/WiwXELccZqpJd3aPhbCRl9RIo/rV3Z7frRg4osFyvb3EkyZlt5IT6OR/QmrGQTS7fbjHrTwgI6UWHcYGHoaXdjtUgRaTyx6UWAbuBpQc9jTvLHPFLsA6UWC4gPPSl3f7P5Gl2A5/xpdg7ZoAbn2pCfqKcV57n8aClAXGZ9/wBKAzDnI/Kl2nPQ0m09OfzoC4od+/8AKlD5puxv8mk8s96Yrku760m4d800Bh1pdp6UWAduHGDS7j70zaaXZQIkDAjo3SlPPrTAo75/Onbf85phcTgdjTww96TA7/zoxnP+NArik5HQ0hz2FOB5/wDr07/PWmFyHcfQ0E+oqQr3IppUHt+NAFc2ybmeMmNm5OOhPqR0P86ryu0G3zlyuPmkRTgH3HJA/P8ACtDGBTSPWgCkHVkV0Ksjchgcg/SlEntinyWURZniJjkbqVPBPqR0NQsksQAljz/txgkH8Oo/X60XFYkEqk+/1pdwIFQAhl3DkeoNGcetO4ifK+tHy1X3fjQGHfI/GgCxxjqable/FRZJAw1HzUCH7kpwK5xUJB7gUhGDQFycbT0NBUZ4quQQeM0Bj7/nTsK5OV/CjYO5P51Flvel3MB/9eiwXJNoHajA9DUe49hS7x6frRYLj9opdo9vzpm+l3nHXvTFczlj6HYOtP2AfwD8RUgt3OMAdfWni3fA6D8aCSBVXqVX8qeAvTaPyqYQuPT86cEIHXt60DIlUE8KBTwh9vzqRVAIzg/jT+B1x7e9AXGCNj6/nS+VLng/rUvmLwOKdnjgjpQMgMMm37x59WNN+yseN789xK3+NWc+hAP1pwYDnI96AKwsSDw8h+sz/wCNO+zFT1b8ZG/xp13qFrp9u01zPHFGoyWdgBXKSalq3iQgWAl0/TifmupExLIPRFPTP94/rTsUjU1bWYNMljtkSe6vJMFLa3YlgM9Tk4Vfcmo00+4vZI5tVmWTy33pbRZ8pSOhbPMhHXnAz271NYaZaacri3iCtI26R2JZnPqzHk1dyM5H86BNjzPJjOc8+lL9rkx/9YVAwLKQHYZ/iGMj8xS4x6596AJxdzAZB59MCj7fP/ex+FQdB/8AXpCcAcfhSGWvt0w/iH5Un26bAJIz9KqFschR1o3c5xQBbF/Ngcr+VH26X0WqgYDtT8jrt5pBcnN/IOy0fb5scBfyP+NQkFgBjB/OkC+1AXLH9oTDjYp/A/40f2hIRgqv5f8A16r4JHvSFWHpmkMs/bWP8A/z+NIbtz/yzH51Dg9MGjbzjmgZL9qfOCi/nTvtJH/LMfnUOOen60EfWkBObpgOEH/fX/1qPtR7oPz/APrVBg4PagjuKAJvtXP3B19aX7Rnt+tVtpx1zzThSGWvMJ/hH504OD1H61XBPbA5xmgE98UwLG4dSD+dGFPYn8ag3c/z5oB9T9KB3Jwqen60YXt/OoMnPBpAxzgmgLkxHsD+NKCTjC/rUIc98YoMpzwP1oAl59B+dJkg/dGPrUPmtjnr9aPMPQntQFyXceOP1pRI3ZePrUJOcfTrSkk+nT1oC5L5rf3DS+Yc52H8/wD69Q7iAcn8qPM4PP50rBcmM5/55n9P8aTzjnlG/DH+NRb8Z4H50hkx9O1AE3nHk+W36f40n2og/wCpf8x/jUZfrzSE9/60wuSm7P8Azxkx/wAB/wAaT7XnnyZR+X+NRbm/D60bj7UguTfae/lSfp/jTTc88wTfp/jTA5/ClLYGT0osK48XIH/LGX9P8actyP8AnlKPy/xqDdwCBnn1oZgew6+tMLljz1HRG/Kl+0AfwP8AkP8AGqwbkY6fWnA8Z/rQBN9qX/nm/wCQ/wAaPtS8fupPyH+NQdSMg/nSgD0P50Bcn+1oDyjnHoKQ3yqeYZceoXNQgD0pe3pSAm+2Rnosn/fJpftUQ/56f98H/Cq/PXA6YoHA6fWmIsfaoh2f/vg/4U4XkIP8f/fDf4VVyucU/cB6Y+tAFn7RF2Y/98n/AAo+0R5+8f8Avk/4VV3c5yKcHHrj8aALP2iI5w4/I0nmJ13j9ahGDSgDp/WgVyQzRj+IUn2iP+9Ue3OTj9aNv0FMZBNFG0pkjYo56lejfUdDVZ52gAE67gWI3RKxwPcY4/M1ocAUhUGhCKasrAEK4B5BPFH8I4b8BUzwg9DtPtVdvlI3AJnuW+Un69vxpiJVZeM7h9QaeGB7n8jUXp1/OlC8j1NNCZJnjO4YpmByc0oGRnP60uD/AHv1oFcZ8v8AepDs9T+dOwfc/jRjsDyKAG5QA4OfxpRICOmKQqeeCM+9JsOM5oAcGQnqKUlfVfxNRhev9DS459/c0wA4Azx+dISnTcP++hSH3/nSHOeCPzosK5//2Q==" + ] + } + }, + "widgets_values": [ + "[{\"x\":568.1871482594877,\"y\":385.0405294042721},{\"x\":566.745048898423,\"y\":216.3149041597034}]", + "[{\"x\":568.1871337890625,\"y\":385.04052734375},{\"x\":568.1571044921875,\"y\":381.525390625},{\"x\":568.1270141601562,\"y\":378.01031494140625},{\"x\":568.0969848632812,\"y\":374.49517822265625},{\"x\":568.0669555664062,\"y\":370.9800720214844},{\"x\":568.0369262695312,\"y\":367.4649353027344},{\"x\":568.0068969726562,\"y\":363.9498291015625},{\"x\":567.976806640625,\"y\":360.4346923828125},{\"x\":567.94677734375,\"y\":356.9195861816406},{\"x\":567.916748046875,\"y\":353.40447998046875},{\"x\":567.88671875,\"y\":349.88934326171875},{\"x\":567.8566284179688,\"y\":346.374267578125},{\"x\":567.8265991210938,\"y\":342.859130859375},{\"x\":567.7965698242188,\"y\":339.343994140625},{\"x\":567.7665405273438,\"y\":335.8288879394531},{\"x\":567.7364501953125,\"y\":332.31378173828125},{\"x\":567.7064208984375,\"y\":328.79864501953125},{\"x\":567.6763916015625,\"y\":325.2835388183594},{\"x\":567.6463623046875,\"y\":321.7684326171875},{\"x\":567.6163330078125,\"y\":318.2532958984375},{\"x\":567.5862426757812,\"y\":314.7381896972656},{\"x\":567.5562133789062,\"y\":311.22308349609375},{\"x\":567.5261840820312,\"y\":307.70794677734375},{\"x\":567.4961547851562,\"y\":304.1928405761719},{\"x\":567.466064453125,\"y\":300.677734375},{\"x\":567.43603515625,\"y\":297.16259765625},{\"x\":567.406005859375,\"y\":293.6474914550781},{\"x\":567.3759765625,\"y\":290.1323547363281},{\"x\":567.345947265625,\"y\":286.61724853515625},{\"x\":567.3158569335938,\"y\":283.1021423339844},{\"x\":567.2858276367188,\"y\":279.5870056152344},{\"x\":567.2557983398438,\"y\":276.0718994140625},{\"x\":567.2257690429688,\"y\":272.5567932128906},{\"x\":567.1956787109375,\"y\":269.0416564941406},{\"x\":567.1656494140625,\"y\":265.52655029296875},{\"x\":567.1356201171875,\"y\":262.01141357421875},{\"x\":567.1055908203125,\"y\":258.4963073730469},{\"x\":567.0755004882812,\"y\":254.981201171875},{\"x\":567.0454711914062,\"y\":251.46607971191406},{\"x\":567.0154418945312,\"y\":247.95095825195312},{\"x\":566.9854125976562,\"y\":244.43585205078125},{\"x\":566.9553833007812,\"y\":240.9207305908203},{\"x\":566.92529296875,\"y\":237.40560913085938},{\"x\":566.895263671875,\"y\":233.8905029296875},{\"x\":566.865234375,\"y\":230.3753662109375},{\"x\":566.835205078125,\"y\":226.86026000976562},{\"x\":566.8051147460938,\"y\":223.3451385498047},{\"x\":566.7750854492188,\"y\":219.8300323486328},{\"x\":566.7450561523438,\"y\":216.31491088867188}]", + 720, + 480, + 49, + "path", + "basis", + 0.5, + 1, + "list", + 0, + 1, + null, + null, + null + ] + }, + { + "id": 82, + "type": "SplineEditor", + "pos": { + "0": -564, + "1": 1226 + }, + "size": [ + 765, + 910 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "bg_image", + "type": "IMAGE", + "link": 210, + "shape": 7 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [], + "slot_index": 0 + }, + { + "name": "coord_str", + "type": "STRING", + "links": [ + 211 + ], + "slot_index": 1 + }, + { + "name": "float", + "type": "FLOAT", + "links": null + }, + { + "name": "count", + "type": "INT", + "links": null + }, + { + "name": "normalized_str", + "type": "STRING", + "links": null + } + ], + "properties": { + "Node name for S&R": "SplineEditor", + "points": "SplineEditor", + "imgData": { + "name": "bg_image", + "base64": [ + "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAHgAtADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDWAoFLigfSu0+YExRinYoxQAgFAFKBSgUAJilxxS4oxQAmKMUtGKBiYNFLiigBMUGloxQhhijFLilAoEAFFOxRigBMUAUuKWgBuKMe1OxRigBuKUClxRigBMUtGKKADFJTsUYoAbTgKMc0YoAMUmKXFLigBtFOIpMUgDFFLijFACYoxTsUYoAbiinYpMUwEoxS0tACYpMU7FIRSATFLikxTsUCExRilxRigBKMUuKKYxtGKdigCkIbilpcUEUAJSYpcUuKAG4oxS0UAJiilpCKYDTRilxxRikISjFLRigBMUY46UuKMUANxRilIoxQAmKKUijFADaMc07FGKAGYoIxT8UhFMQ3FJTsUY5pCG4oApcUY9qYDSKTFPx7UmDQAzFGKeR7UYoEMxS4p22kxQAmKDS4ooAZR+FOIpCKAG4paXFJigB4FGKdiig0EpMU6jFADcUtLRigBKWjFL2oASlx7UAUuKBjaMU/HNJtouAmPaj8KdRSGNFOAopaYgo70tLigBKKXFGKQCUtGKWmAlJTqMcUgG0U4UYoASjFOxRjNADcUU7HtRj2oATFFLQKAEoxTsUUANoxTsUAUXASjFLiloAbijilooAbilxS8UtADaPwp1JigBPwpcUfhS0ANxRTqSkIKTFOxQBQAlJTiKMUAJSYp2KCKAG4oxTsUUwG0YpcUtADaTFOIpMUCGkH0ox7U7bRigBuKMe1OxRQIbgelJjmnYoxQMbiilxRigBtFOxSEc0AJ+FFLilxQA3FGKU0UCYmKbg0+kxQIbiinYoxTENxRinYpMUAJikxTse1H4UANx7UmKdj2oxSAYRRinEUYoAbikxTsUYpgNoxzTiKQ9TQIdRilApcUGo3FGKdijFACbaMU4CigBMUYpaXFFwsIBS4opaQxuKMU6jFIY3FGKXFLRcBMUuKXFLigLCYpcUtFFxWExRinYoouOw3FGKdij8qBWExRjil70YouOw3FLg0uKXFFxWG4pcUuPalxRcY3FGKdijFADSKBTsUmKQhMUYp2KUCmFhuKMe1OxRigLDcUYp2KKAG4pMU+kxQAzFOxS4oxQAmKMU7HtRigBmKdijFLQAm2kAp9JjmlcBMUYpxFJQAmKMUtFO4hMUY9qWjFFwEA9qMUuKKAGkZoxTqMUANIpMU/FJigQ3FJin4oxQAzFHenYowfQUANxRTse1GKAG4pNvtTsUEc0ANxQRS0YoEJijFLijFADSKTFPoxTAbikwKcR9KTFIQ2ilopgJijFLRQA3FGKdikxQIQUYpcc0YoAaRRinYpMUCG4pcD0paMUAMIoxTiKTFMY7FID6V3K6Fpq/8uwP1Ymnro+nIci0iz7jNcDx0OzPV/s6fdHC9aK7/APs6xHSzg/79ipY7eCP7kES/RAKl49di/wCzn1kedgE9Bn6VKltO/wB2GRvopNehYX0FKMVP159i1ly6yOBGn3hGRaT/APfs/wCFKNOvT/y6Tf8AfBrvajj+5/wI/wAzS+uy7Ff2dDucSuj6g3S0k/EYp39i6j/z6v8AmK7jnGM0c+tL67Psh/2fT7s4kaHqJ/5dj+LCn/2BqP8AzwH/AH2K7Ln1o59aX12p5D+oU/M43/hH9R/54L/32P8AGl/4R7Uf+eK/99iuxpfxpfXKnkP6hS8zjx4d1A9UQfV6evhy+PeIfVv/AK1dbRS+uVPIf1Cl5nKf8I1ff34f++j/AIUf8I1e/wB+D/vo/wCFdWOtL+NH1uoP6jS8zk/+Ebvf+ekH/fR/wpf+Ebvf+ekH/fR/wrq8UYo+t1A+o0vM5T/hG73/AJ6Qf99H/Ck/4Ru+/vw/99H/AArq6X8aPrlQPqNLzOS/4Ry+B6w/99f/AFqT/hHr8D7sZ/4HXXfjR+NH1yoH1Gl5nI/8I/qH/PND9HFH9g6h/wA8V/77FdaG+dlzyAD/ADpc+9H1yp5C+oUvM5A6DqAH+pH/AH2KYdGvwf8Aj2J+hFdn+NB+tP65PyE8BT8zi/7Hv/8An2b8xTG0y+Xrayfgua7bNLn3p/XZ9kL+z6fdnDGwvB1tZ/8Av2aja2nT70Mi/VTXe8+tHXrT+uy7C/s+Pc8+II4IOaXBrvtinqq/lUbWlq/37aFv95AapY1dUQ8u7M4TFLiu2bTLFutrEPouP5VE2jae3/LAD6MatY2HVGby6fRo47FGK61tBsD0R1+jmoW8OWpA2zSg++DVLGU2S8vq+RzFGK6FvDQx8tzn6p/9eom8OTgfLNE31BFUsTT7mbwVZdDC70uK1G0G9X+BD9GFRNpN8nW2c/TBrRVoPZmTw1VfZZRxSYxVl7S4iHzwyL/vKRUJUjqKtSTIdOS3QzFGKdiincnlG44o606ii4rCEUU6jFAWGEc0mKkxRimIZijFP20hFADaMU4CjHtSAbRilxS4p3EMxRTsUUXAaRSYp+KTFAhtLS0Y5oAaaMU4ikpgNNJj2p+KMUAMxS4paWgBmKXFLiloAZijFPxzSYoAZj6UYp2KTFAhhFGKfijFAhmKMU/ApMCgBmKMU/H0oxQAzHNFOIoxQAwiinYoxTENxSYp+KSgBp/CinYpMCi4M9Hooor54+sDFFFApAFGKWkp2HcWo0+4fqf5mpKaowp+p/nQFxaKXFGKAEpKUijFACd6XvSgc0uOaQxMUYp2PpRigY3/ABopaMUwCijFGKAAjikFOxSYpAIKWl2k0bTTAiX/AI+JP91f60+gIRMx9VH8zS4NABSU7FAFAhtLS4oI9KBiUUYpMcUALRSYpaQB9aM0hpQKYBmk/KlxSfiKQBRRiloATNLmkOc0UDFz7UxVVl+ZVPzN1HuafTI/un/eb/0I0xNET2Vq/wB63iPvsFQNotgw/wBRj3DH/Gr1FUpyWzIdOD3Rjv4dtjnZLKv1wagbw4c/JcD8VrfFLVrEVF1MnhKL6HLyeH7xASpjf0Abn9arvpV8nW3Y/Tn+VdfS5rRYya3MngKT20OGkgli/wBbG6f7y4pgFd4cHggH61A9rbSD54ImPqVFarG90Yyy7tI4vFIRXWvo9i//ACy2/wC6xqtL4ft2/wBXK6n35FaxxlN7mMsvqLbU5vFGK2n8PTc+XMh9Mgiqr6NfR/8ALHd/usDWqr031MJYWrHeJn4oxU0ttNCf3kTr9RURFaKSexg4SW4hFNxT6Sncmw3FJin4oxTuKwzFGKfikxQIbilxS4pcUANIpMU7FBFADMUYp+KQimA3FGKUClxSuIZijFPxRincCOjGafikxzRcQzFGKdijFAhuKMU7FGKAG4pCKcRRQIaBRinUmKAGYpcU6jFMBm2jFPxSEUDGYo20+jBoFc6WTxr4bibDatEf9xHf+QqrP8QvDcIJS7lmPpHAw/8AQsV4wKO9fPn2vsonrZ+J+hDpb35P/XNP/iqhPxS00H5LC7I9yo/rXleBTgKVx+yient8U7LHyaZcE+8ij+lQt8U1/h0cn63P/wBjXm+KXoaLsfs49j0J/ilKfuaQi/705P8A7KKrt8TdSIwllaKffcf6iuGzS8EYpXY/Zw7HaH4maz2ttPH/AGzf/wCLpr/EjXGGBHZL/uxH+rGuNGBT6LsOSPY6d/iB4gY/LcxJ9IV/qKZ/wnfiMn/kIAfSCP8A+Jrm80oNK7Hyx7HQt428Rtx/aTD6Qxj/ANlqM+Ldfbrqk/4YH9KxM0uaV2Oy7GwfE+uN11W7/CQikPiPWj/zFr3/AL/t/jWTmlzRdlWRp/2/rJ/5i19/3/b/ABpp13V++q3v/f8Ab/Gs/NLmi7CyL41vVf8AoKXv/f8Ab/Gl/trVD11K7P8A22b/ABrPBpQaV2FkX/7a1Uf8xK7/AO/zf40o1zVh01O8H/bZv8aoUZFF2OyNH+3tYHTVb0f9t2/xpw8Qa1/0Fb3/AL/t/jWbRnFF2Fkay+I9aHI1S7/GUmnr4p1xTldTn/E5/nWOGpQaV2PlXY3l8ZeIFGBqLH6xIf5rUi+N/ECnm9VvrCn+Fc7S5ouxcsex1KeP9aXhvsz/AO9F/gRUw+Ier97exP8A2zf/AOKrkKcKOZi5IdjsY/iJqAP72ytWH+xuX+pq2vxHOBv0oe+Lj/7GuEyKOKOZh7OHY9BX4jWxHz6bMD/syg/0qVPiHpzH57S6Ue20/wBRXnPFHFHOxexh2PTo/H2iucFbtPdox/QmraeMdAfH+n7T6NC4/pXlFHFPnYvYQPYE8R6K4yup230LY/nVmPVtOkxt1C1Oen75f8a8WwKMUe0YvYR6M9zRlkGY2Dj1U5p2Mda8MjkkiOUdlP8AsnFWY9Tv4T+6vblP92Vh/Wn7QX1fzPacUYryODxRrlvnZqU5z/z0w/8A6EDVyPxzrsagGeGT3eEZ/TFNTQnQken4pF4H4n+ZrzyP4gamMeZbWjDvhWB/nV6H4iLwJ9N+rRzf0I/rT5kS6Mztu9HauWh8e6VIwEkVzD7lQR+hrQj8XaDL0vwp/wBuNx/SjmRDpyXQ2aPxqjFrmkzECPUrUk9jKAT+dX1xIgZGDKe6nNMlpoaaKdijFAhDSU40lACfjRzS0YoABRRRQAhqJ7eCT78MbfVRUtHSmm1sJpPcz5NGs5eiMhPdT/jVOXw8Cf3VwQPRlzW4KK0VepHZmMsNSlvE5iXQ7tPuBZB7HH86qSWVzF9+CQD/AHTXYntRW0cZNbnPPAU3s7HD4oxXZvbQSA+ZCjH3UGqkmi2b52qyH2b/ABraOMi9zmll0ujOXorcl8PnrFOD7Mv9aoyaVeR5/clh6qc1vGvCWzOaeEqx3RQxmlxUjxPEcOjKfQjFNFbJpnO4tDce1GKdRii4rDMUmKfjFGKAsMIoxTiKTFNEjcUU6kIpgNwKTFPpMUhDaMU7FLTEMIpKeaTFADcUU7FFADTSYp9JikMbikp+KQincQwilpaMUEs8woFIKXvXgH3Y7NKDTKcKQhwpaQUtIAoopR0oGKD60ozTRS0gHd6Wmc+tLmgB4NLmmfjS596VgHg07NR5pc0WHcfmjNNDe9KGpWC48GnCq88siQO0Sh5AOFJxms9dYi8tR9tg88H5o2XA/PPFUoN7DubGaM1FHKJEDDHTnBp26psA/NLmo93vS7qLBcfml3e9R7qXd70rDH7qXNR7qN1FguSZo3YqPd707cKLASBqM1Hu96XdRYLkmaX8ai3e9LupWGS5pQai3UBqLAS5pc1Hupd1Fh3H5ozTN1G6iw7kmaKYGpc+9ILj88UAim596QdOvc/zoAk4oxTPxpQT60rDHUqSSQtuikaNvVDg03dRmgRfh1zVoMeXqN0MeshP860bfxrrsDAtcpMB/DLGP5jB/WufzRVJsTjF7o7CH4h3yt+/sbd19ELJ/PNaEPxDtHH76wmjP+w4cf0rz/8AGlwKOeRLpQfQ9Pg8b6JKcPNLCf8AppEf6ZrTh1vSrjAi1G2Ynt5gB/I145jmlxT5yXQie4IySLuRldfVTkUteK295d2mfs1zNDnr5blf5Vfg8Ta5btlNSnPtIQ4/8ezVc6M3h30Z6170YrzqLx9qiACWG3l99pU/oa07b4hW7H/StPlT3ikDfocU+ZEOjNHZUGsW38XaJcYH2zymIziVCuPxxj9a0oL6zuuILuCUnskgNO6IcWt0TelFKVxxRimSJRmlpOfWgVgoowaOaAsNZVcFWUEehFVpdMtJRzCFPqvFWjmiqjNx2JlCMt0Y82gDrFOR7OM/qKpTaPdxDIUOP9g5rpqK2jiqi3OaWCpS2VjjJIZYj86Mv1GKZXaOiyKVdQynqCM1Vl0uzlHMQX3TiuiOMX2kcs8uf2WcrikxW9LoKnJimIPowzVGXSbyL/lnvHqhzW8cRCWzOSeDqx3Rn4oxUrxvGcOrKfQjFR1smc7g0NI4pMU7FGKdyXETFJin4oxTuTYZijFOxRii4WGYoxTse9G3mmFhmKXFKR70EUh2G0YpcUY96YhhBopxoxQKx5UDTs1CGHrS78d68Cx9yTDFOFQCUU7zR60rBYmHWlFQ+co7003kS9XGfY0WY7Fmioo51k6BqmJAUk8YHepCzEpcgUxmGOtM8z3phYmzSZqHzfc0eZmiwE+73o3e9Qb+epxS7x60WCxNuGKUPVfcT3pd9FgJ99LuqDdRvosFifd70yFRbRXcULNHHdqVnRTgSA8EEVHupd1VqthDoI47aIRxKQg4AyTUu+oN9HmUrDLAajfVYuTShjipsBY30u+q+6l3Uxk+/Bo3iod3FAb60WAn30u+oNx96Nx96LAWNwo3VBuNKGJpWGTBqcHqvk0uTRYCffS76g3GgPilYZY3Uu6oN/1o8z60coXJw1LuFQeZ9aPM+tKwFjeKXcKreZ9aXzKLAWS+Kgubs28G9V3HJzk4AHqabvpj7Jo2R1DIwwQehppdxjRqbrbCWQx7T0YKwQ+mG6fzq9HLvjViMEgEj0NZzWweyjsnmna0iYMkDTMUUjpgE1aVsCnNLoJXLO6nBqrb6cHqLDuT5pc1BvpQ1KwE2aUGog1AeiwE2aM1Hv8AelDUrBckzS5pgajNFgH5pabmjdQFx1NKgnkA0uaKQXLtvrGp2iqsF9cIo6L5hIH4dK17Lxtq1s3+kNHdJ6OgUj8RiucJparmYnFPc7q2+IMTuBc6e6LjlopA3P0IH861rfxhok7BTdNEx7SoQPzGRXl4ApQKamzN0ons8N1bXQBguYZQenlyBv5VNivE0Z4pA8bsjjkMpwRWlB4j1q3xs1Kc+0hDj/x7NVzmbodmeskUmK4C38f36DFxa282B1XKE/zH6VsWfjzTZyFuoZ7Zj3wHX8xz+lPmRm6UkdPRVS31jS7v/U39uxPRS4DfkeavbeM9jVENNDKDT8UmKBDKWlIpMUDGMiOu11DKexGapT6TaTAgIY27FeP0q/SE1UZyjszOVOMviRhS6FIOYpQ3+8MVSl0+4gHzxNj1HIrqcUoHFdEcVNb6nNPBU5baHGlfrSY+tdbJaW8xJkhQk98c1Rl0OJuYpGU+h5FdEcXF76HHPL5r4dTBIpMVfm0q5iPCeYPVKqNGyHDKQfQjFdMakZbM5J0ZR0kiKjFPxRiruZcozFJipMUhFAWGYoxT8Um2mKwzFIRUm2grQFjxbcaaXI71gP4lH8Fsfxf/AOtUD+I7hvuQRKP9rJrx1Skfb88TozIfU0hd/WuXbXL9j8rIvsEz/PNNGrak3SU/hGP8KpUWT7SJ0z727mo4YXE4JbIx0rmpLzUZOTLL+HH8qvaE10+qJ57yMu08MxNEqbSbuNTV9jtLSPgVPeKEtJWZgFCHJJwBS2qfKKTXF/4kt2B3iIri+0VzHNjVbaDj7XGR6Bs07+3rHk/aV/I1y32L1f8ASlFiP7/6V3+yiZuUux03/CQ6eOsz/ghpp8S6eP4pW+if/XrnRYp3Y04WcQ65/On7OAryN4+KLEdEnP8AwEf40xvFMAPyW8h+pArG+yw/3T+dL9nhCn5P1NHs4CvI1f8AhK17Wjf9/P8A61J/wlef+XM/9/f/AK1c0DU9qqvcoGGR6fhVeyh2FzM3v+ErYf8ALmP+/v8A9aj/AISuTtZp+LmqYji/55L+QpQkY6Iv5VPLDsVqTv4ouz923iH5n+tRHxJqR6eWv0SlG0dAB+FLuHpRaPYdn3Iz4h1Q/wAaf9+xR/bmrHpJ+UY/wqTcPSkzTSj2Cz7jDq2rn/lq4+iD/Ck/tbV/+e0v/fA/wqTPtRmnZdhW8xn9p6uf+W83/fP/ANak+36v/wA/E/51KGoBpadgt5kP27Vv+fmf/vql+36sP+Xqf/vqps0Zo07BbzIf7Q1b/n6n/wC+qP7Q1b/n6n/76qXNGaenYLFc6vqqPt+2TbvTNSDVtaHS4m/FR/hVWbH2z8RV8UNJdBLUjGsa0P8AlvKf+AD/AAp39t6yOsz/AIxj/CpM0bjS93sOw0eINYX/AJag/WIf4U4eI9VHJaP/AL9CjNGaLR7BZ9yVPFl8vDwwv+BH9amHi6X+KxXPtIf8KplUP8IP4UnlR/8APNP++RScYdh69y+PFx/isfyl/wDrU8eL4+9m4+kmf6VlmCE9Y1/Dimm1g/ufqaXs4dg1NxPFtiR88Nwp9lU/1qVfFOnE8+cv1T/A1z32SD+4f++jSGyhP94fQ0vZUx3kdQPE2lngTMPrGami1rTZMAXsI4/iO3+dcAyhZio7EimsOlDw8Re0Z6ZHcwTDMU8Tj/ZcGpQx+tebx2LPGrhhyKlVL63/ANTPKo/2JCKh0F0ZXO+x6JvpQ9cEmqaxCu0XEpH+0ob9SKeviLVoz8zq/s0YH8sUvYS6MXOd2GpweuMi8XXaj97aRN7qSv8AjVuPxhD/AMtLORfUq4P9BUOhPsPnR1Qel31gweKNMmOGeSH/AK6J/hmr0eq2Ev8Aq7yA+mXAP5Gs3Tkt0PmTNAPTw9VVkD/dIb6HNODGpsMtB6XfVcNijd70rBcsB6dvqtu96cHosBZD5pwb3qsr08NSsBPu96A2Rmod2aUNilYLk4NLuqIPShsmiwEoNBPNMBpc0gHUcd6SlHWmAYFWrPUb7Tyfsl3NCCckI5AP4dKrGlFAM6Sy8capbv8A6T5d0nowCn8wK3bTx7Yy5F1azQHsUIcf0P6V59S1Sk0ZunF9D1q017Sr4gQX0O48BHO1vyNaGM9ORXiu2r1lrOpadxa3kqL/AHSdy/keKfOQ6PZnrZWmkV5/aeOtTgwtxFDcr3JGxv04/St2x8babdOEuY5LUn+JvmX8x/hVKSZm6ckdHiiobe+s7xd1tdQyj/YcGp9pqiLCUlOpvekApqN4Y5VKugYHsafRTTsJq+5nTaRBJyhMZ9uRVGbSJ48lMOPbrW9iito4ipHqc88LTn0OUkheI4dGU+4pmK6x40kG10DD0IqnJpVs4JQFD7GuqGLT+JHHPANfCzn8UYrSm0maMZQiQe3Bqi8TxnDqyn3GK6I1Yy2ZyzoTh8SI8UhFPINJitLmLifMCxRj+EU8Kg6KPyqMNkUu73rh1PsNCXj0oyKi3UbqQyQmtHRBu1Jf901lbq1/DmG1UD0Q1M/hYrnd2cfyiovECbdFuT/sVoWcXyD6VV8TDboN1/1zNcNveQKR5nmlyfSoCxBpRIDXfYLk26kLVCT3o3U7Bcl3Uhb5T9Ki3UFuD9KdiSitWLU4ul/H+VRIKliG24X6/wBKt7ELc0N9G6ot1JurOxoTbqUNUG6l3UWAm30b/eod1G+iwE2+l31Buo3UWC5Nv96XfUG40bqLBcn8yl31X3H1p2/iiwE2+gPUO+jdRYCGU/6WD7ir26qL8zZ9xVjdTaJTJ91G6qrfMQdxGPSlAKHdubB9ehpWKuWd1LuqDfRuosFybdzTt1QB+aN3vSsMn3Ck3Cot3vRuosBLuFLuqHdShqLAUJObhv8AeNNYdPrUm3dM3+8aJEwB9a0MzRt+IE+lP3VDCcQp9Kdmsmaku7igtUefejNIBxVG+8in8KY1vA38A/Cl3UuaYiJrGEj5dw/WojYej/0q2WpU+dgq9ScCndhZFT+zbuOISoSFJ4IfFTRX+sWv3bifHoTuH65rSvZAm22ThYwAfc1U3VKk3ugcUOi8ValCw81Y5FHUFcE/iK0Y/GNu2PNtZU/3WDf4VlkBxhgCPeo2tYG6p+RxScYPdCszpofE2mSgZmaMns6n+nFaFve21z/qLiKT2RwTXCPYRH7pYfXmoWsZByrAn64qXRg9mNNnpQbmnh/rXnMGoatZ/cnmCjjDfMP1zV6DxZfxECZIpV78bT+n+FZvDy6D5kdzvpfMrlIvGNu2BLayoe+xg3+Fa1rrVhdKvl3KKx/gc7SPzrN05R3Q00zXD04Pk1WByM9qerVFgLO+nBhVfdTg9JoCwDUFxqEdsVBUsSeTkAD6k0oeqs+mWd1dR3M0StNGMKTyPxB4P4iiKV9RNltdRhknWNFLq3R1IK1aBFZllp1rYSSPbxhXlOWPQfgBwPwFXg9ErX0AmpajDZpwNSA+lpuaXNABiijIpc0AIMg5HBFatn4j1ayAWO8dkH8Mp3j9elZYpe9MnRna2fjtPLVb2zbeOC8J4P4Hp+dbdr4l0e7IC3axsf4ZRs/U8V5gKXFVzMhwiz2NSrqGUhlPQg5BpcV5NZ6lfaeQbS5kiGc7QflP4Hit2z8b30LYu4Y7hMclfkbP8v0qudEOmzuxRWJYeLdMviVd2tnA6TYAP0PStpGSVA8bBlPQqcimjNprcMUYpSKQimISkZFcEMoI9CKcKKAKE+lW8mSgMbe3T8qoy6TOnK7XHt1rcpe9bQxE49TCphqc+h8U7qN1R5o3e9dVj0OYl3Ubqi3e9KGosFyXNb3hIb9ax/0zP8xXO7q6TwUN2tn/AK5H+YqJL3WKT0PULSL92KzPFa7dAvD/ANM/6ity1XEYrH8Xj/inbz/rn/UVx21RFOWp5CzZpm6kY88Uw13pGrJA9O3ZqDNKGosK5Juo3cGo80ZpiJIkzmnAbZ/8+lPt1+99aHGJj9f6U+hK3HE0maQ0VBoLmlzTcDNLigLhmlzRtpQhJwBmgLiZoqUW8zfdic/RSaeLK6PS2m/74NFmLmRXpasjTr09LWb/AL4NO/sq/P8Ay5z/APfBp2YuZdypRV0aRqBP/HpL+IxT10TUGP8Ax7MPqRRZhzx7lDNGa0x4f1AjPkqPq4pf+Ee1DH+rX/vsU+Vi9pHuZbKcZ9x/WjmrRtJRP9mIHm71TGe/NXB4evj/AAIP+BinysXPFbsyc4o71rf8I5ff3Y/++xR/wjt9/wBMv++6XKx+0j3MrNLnNah8O3+OPK/77ph0C/X/AJZo30cUuVh7SPczgSDS7jVw6Rfqf+PVz9OaadMvh/y6Tf8AfJo5WNTj3KuTS5NTmwvF62sw/wCAGm/ZLn/n3l/74NKzDmXci3UobkUphkXrGw+opoU5osVcZEuZj9TT7lNsa/Wlt1zKfxqS8H7tOP4v6VdjO42M/u1+lO3UxBhF+lFZtGtx+6jdTDRnilYCTNG6osmjNFguTbqvaaFBkuJPuRL+tZgPNXpj5FlHD0Z/3j/0qJLoUiKSQu7Mx5JyaQNxUW6jcc1SQmyYGlzUW7mnbqTQEmacKh3ZpwakMlzUM4VoXyASAacGzTZf9S/+6aEDMpaU8EUkYp7CtjJbFmE3tuA8LyJkZBRv8K07XxVfwECdUmUdcjDH8RTYP+PaL/cH8qHhSX76A1jKz3Roo9jct/Fmny4EvmQHvkbh+Y/wrWt7+1u8/Z7iOQjqFbn8q4V9PibOMiq7afKnMZzjpg4NZulB7B7x6ZmlVq8+t/EGrWXytJ5i4wFmXP69a1bTxmpAF5alT/eiOR+R/wAazdCS2C6OvDU7dWZaarZXqgw3KEn+AnDflV7cRWTi1uBOGpwaq4fmnhqmwyffTg1V91ODUWEWAacDUAenB6LAT5paiDU8NxSJHilFN3Zp1AC0UlLQFxRUsM01u2+CaSJvVGKn9KizTs+1AHQ2PjLULZgt0FuowMc/K35gfzFdDaeL9KuRiSR7dsdJF4/MV5517Uu3IpqTJcUz1yCeG5jEkEqSoejIwIp5FeS21xPZy+bbSvE+MFlOM1v2vjPUIdqzxwzqOpIKsfxHH6VSkQ6fY7rFFY1h4p029bY7NbvjpLgA/j0raUrIoZGDKehByKozaaPiGjNGB60nA716ZqLRSbhRvAHA5oFckHSup8CjdrrD/pif5iuULkEjA/A5rq/h9l/EEntbsf8Ax5aiWwpS0PXLZMRjisXxiMeG73/c/qK37df3Y+lYPjQ48MX3+5/UVy21RnTep4ue/IphZR3FNfrUZ6HPWuw2cidFLhmUZCjJIHSpUgLmIKykydMducc1UeVmjSPChUGOB15zk1dsruXR74yCNWlUbSG7Z/rVIhyfQZdwy2c7xSoQy9+gNOtpbU8To2cNyGwM4+X9abfalcX0sjyNhXP3R0A7Cqi/eH1o0C7e5q24HzDHerulWUd/rCW8udrbicHHRc/0qvbJzJ/vVo+H+PEcJH/TT/0E00hSdk2bv/CO6an3rct9Xb/GpU0nTk6WcX/Ahn+dbHyzDbjDenrVdk2Ng1aSOR1Jdyn/AGbY/wDPnb/9+x/hTlsLNelpAP8AtmP8KsUVVkRzy7jUjjThI1X2UYqeOUr8rZMZ6rn9R71FmjNFkHMxZVaPBDb0PRvX/Cmj5hxTkk2gqy7kPVc/r9aZInlEMp3I3Q/0PvSKbuL0pc01TmlxTJA0lKaSmAZpaTvS4pDOUcf8VD/29p/M11QzXMOMeIB/19p/M11JpI0qdBh5pMU40lVczExQBS/hQTkk4HXtQAdOlFFApAFA5opRQAoJ7Gn5zjPP1qMU8dRRYdziLhQNZuwOglf+ZplyPlj/AN7+lTXHOtXZHTzX/maZdrhY/wDf/pWR2LY6y1hilsLbfFG2Yk+8oPYUNYWZP/HrD/3wKfZf8g+2/wCuKfyFSmtLI5XJplF9JsZOtso/3SR/Kqz6BZNnb5i/Rv8AGtY00jNLlTGqkl1MJ/DJO4w3YPorp/XP9KrN4cvV6PC30Y/1Feg+HJ9HtZ531bawKhY0e3Min1PDDB/xro21LwaYyRb2ZbHANnKM/wDjxrGbUXblZtGpJ63PG4NCvVnUvCDGp3MQwPA56ZzVa7trx7h5Ht5BuPA2ngV27gBHmCbBMSETP3Vzn/634GoPeqVJPUHiZLQ4Mgg4I5o6V3UsUc67ZY1cf7QzVOXRrCUH9zsPqjEUOl2KWJXVHJZpc10L+G4SfkuJF/3gD/hVSXw9cKf3UqOPf5al02aKvB9TJDUuanm067gYh4HwP4lXI/MVXKlTyMVm423NVJPYfmhzmJx/smmUN/q2HtRYdynCuafKuAKdbrz+FSTrgL9a0sQti7EcQRj/AGB/Kn5qGNsRJz/CKcDWDWpqSdQRntSr8oxkn6nNMDU8GlYYpAYYYAj0NQyWcL/w7fpU2aKLtCM2TTnXPlsCPTOKmh1XVrEjFxKVH8LncP1q5SEZ7Zp819xcvY0bLxhGQEvYWRu7pyPyrftNWsL3/UXSM3908H8jXDyWkTj7u0+oqpLZGNSyMCBzjpUOnB+Qao9QyaVWrzix8R6jZbVWXzIl/gkGRj69a37PxlbyHbdW7RH+8h3D/H+dZyoSQrpnVBqcGqpb3dvdxh7eZJFP908/iO1TZxWLVgLAb3qRXqtupwaiwiyGp4aqwfmnh6VgLAYU7IqANUgakIkpRTFNSCkAuBS0etOxQAgpcUUUgCp7e8urQk21xLFk5OxyM/WocUtMD56kieJtsiMpwDgjseRTM4BFOkleQAMeFzgemajr2DAKUCkpwoAUV2Pw4GfEU3/Xs3/oS1xwrtPhqM+Ibj/r1b/0JamWwS2PYIF/d1zvjcf8Uxff7g/mK6WEfu/wrmvHRK+Fb3H90f8AoQrne6MqT1PEnB64NRnpWlaaiLOG6TyEkaeIx7yeUBHOKoRRPPIsUYy7cAetdRuyOinyKF2jBDY+bPr/AJxTKYgpyfeH1ptOT74+tAG3bj5pPrWhoA/4qGL6Sf8AoJqlbj5pP96tHw+P+KiX/df+Rqo7kVPhZ17DNSBw42yDns3+NIRzTCK1aucN7Ehtz25pv2dj6imYpMVNmO6Jfs596T7M2e9R7RRgUWY7of8AZzTljKcEZU9VPQ1FSYoswuiTyhEQ33o+57j61Lsi/vR/mKgjcxk8BlYYZT0IqQRxh1kKs0JIDAHBHtn+tS7opNMGSMDO5SO+0jNRvFsPYg8gg9a6VbTwe0QLX+oq2MkeUpwfyrCmFtFdyQQytLbZ+SVk2n64qVK5TViuqKfT86eI19qf5Ef/AD2j/wC+qBbx5/1kf/fQqrhY4+TjxAB6XafzNdPXMuufEarkY+2rznj7xrsGtk/57Rf99ihOxU1exUIpMVb+zp/z2i/77FI9vGIyRNGSOQAw5p8xnylTFFKRSYqiQoFO3EIU4wSD0+v+NNoGFKKKKBB3p46iminDqKYzi251W6/66v8A+hGm3f3I/wDf/pT8f8TO5/66P/M0l4MJH/v/ANDWR2o62y/5B9t/1xT+QqTFMsh/xLrX/rin8hUhFaHHLcb+FFLijFAjUg1LT0hSOfRLaYqoBfzZFJ9+GqhIEubs/Z4VhR2wkYYkL+J5qPFWYcQwyTYBYjYmexPU/gP51Fraoq5DdsGnZVbdGnyIfUDgGoKcRSYq1oiW9QxRS4ooEJS0UCgAzTJIYphiWNH/AN5QakpKNx3sUZdFspuREYz6ocfpWbdeH3WN2hmVgAThxiuiFNm/1En+6f5VLhFmsas11OBthnt/DT7gfKn1othz/wABqS6H7tfr/SszrT0Bf9Wv0FLXR2+gWt1pltKGeOR4UYkcgkgHpVW48OXMXMLrMuP90/l/9es3TY414PS5jg04OQaJIZIXKSRsjDswxTKixtcmDg04NVfNKGNS0BZoqJZegqVSDUlCFajmX9zJ/umrGKjmH7iT/dP8qAMnT0V51DAEYPB+lXZrCNxlPkNVdOH+kD6GtbFaSbTIiroyPJubVw6FlKnIZD0rTsvFF/auBNJ56dxJ1/Pr/OnkVDLaxS8suD6ipbT+JD5Ox01n4o0+6AEjmBz2ccfmOK2I5UdA6OGU9CpyDXmkmmuuTGwb2PFNtr690yYtC7Rk8EY4P4dKh0U/hYtVuenh/eniT3rjLTxgSFW6twT/ABOhx+h/xrdttUt7tN0Mgb26EfhWMqclug3NkS8df1qRZR61jm6x3pPthHSs+ULG6sq+oqZZk7sPzrl5NS2nHU+1RG8nm4B2j260OI+U7JHVjgMCR2BqcDNYHh+Hif1+XJ/Ot0bl75HvWT0YrIo3+pR2eVVd8vYE7R+Z/pTbPUGuCvyxyA43GJidh7g5Azj1FaOULZaMbh0OM1IuwDCAAe1CkhWG7aXbT6Xbmi4rHzveQeTINuNrDIHpVatXUlPlRnJwGPH1/wD1VnKwR1YDOOcNzmvYhLmjcmtDkm0N2kYyCPSipZ5vPZTyMDAUnIUeg9qjAqjMceeQAAewrtfhqP8Aiop/+vRv/Q0rjGTY23IJHociu1+Ga58QXB9LVv8A0NKmWwp/Cz2CIfIPpXMeOxnwte/Qf+hCupiH7v8ACuW8eHHhW89wB/48Kwe6MaW54pIgVVbepLDOB2+tRhmXocfSnkZ54HNXLqw+wWqPO0LvOm6NY5QWj5HJA9Rmuk6CpcTRyiIJEI9qAHnO4+tQ9sYoABzlguBnnvUyBp0IATESFuTgkZ/XrTERMhU4OOPQ5oj++v1FSAIBl13ZyMA4xU1vatJGJVwx3Y2Lywxjkj0xmhBc1rcff+taPhznX8+ivVC2+6+fWtHw2M642OyP/OqW5FT4Wdg1MNSGmkVqcAzFGKfiqct9DFqMNif9bKCQew4P+FK40m9iwRSYqTFG2mIZjijFP20EUAMxT4yUbI/HI60KKo6veS2FmbiMD5CCcng+xqWXFNuw/UtQgsJLfP3ZXIIPGz8fyp0U8V0vmRSK6eoNchrus2+qx2zRKyOgYOrdBnHQ1L4WvCl81ux+WReB7j/JqFLWxu6XuX6nXbaUCn7aAK0MDlH/AOQ8P+vlf5muoxXLt/yH1/6+l/ma6k9aEaVOgzFGKcaMUGY3FJinYoxQAwikxUmKQigBtHpS4B5FLigBMU9aKUDFAHF4zqdz/wBdH/maS8+5H/v/ANDT1/5CFwf9t/8A0KmXmcRf7/8AQ1kdvQ66z/5B9t/1yT+QqQjmmWgxp9t/1yT+QqQ1qcb3GnrSUp5oANAkKBk4AyTU91iNlgGP3QwxHdj1/wAPwotfkdpj/wAshuH16D9SKgOaXUb2GmjFLijbTJEoxTiKMUANxRinAfzpdtAxoFNYqhXcwG44GT1NNN3BHceTI+184G7gHjPWsfVb5WleDeuwMCpHVTgZqJTSRrCk5M0b28FttKEPtyZEGMgY606O7iubNnDKpKnKlhmua81pNrONwGRu5z+dIsmwmRGVcrwuOvrWXtnc3+rqxTthz+FPuVyi/U0lqvT/AHf61LcjCqe2T/KqNDs9OkRdIsyzqP3CdT/sirakPGHUjaRmuat5QLaFD2jA/Sri3hWExADB79wKXtTF0OpYv7mPhGCSL0YMoIrGawt53ZR+6PY54/KrM6qT95XGB839KjVwGBPJ9azcrs2hHlWhQn0a7iXcieanUMnP6daoMjKcMCD6EYrrG1HMZCpg8fhUcj299KIp4lIA69CD7Gm1HoNTmviRy1KGI6Gti90WNMvbTgrnAVv8aypYJIX2uu0/zqGrGsZJ7CrcEdelPeVHgkAPJU8H6VX2ijZwaXKXch01czA+qmtcrWZpg/er9DWrSnuENhmKQgU8kCo2kVRyQKgoMVHIqMuHCke9RSXYHCjntnvURZ25JJqkmIhns4W/1Z2n9Kpss1udwJGOjA1o4qG6GIGq03sQ0SWWtXomiidxIjMF+ccjn1reWV3+834CuRtP+PyD/rov8666JazqxSY4PQmiT2q5ElQxLVyNawsU2dD4cjBW44/u/wBa2jF7VmeGUyLn/gP9a3zHWU46mDlqZ5iphjrQaKmGKs3EamUsOO9KJCOoP4VZMWaY0WKmw+ZM8Gv1zanpgMKy5ImjfY6kNwcfUZravJ3sRDLER5ivkZAPT2NZMkzzMrSNkgY6ds5r16PwmuK/iBcW4gKASxyErk7Odvtmoe9dP9nsx4TllRP9ICpuYrjOX4xn6Gsi7t4YbCweMHzZY2eQ5/22A/QVs1Y5FK5FcQxJDbvEwYsn7z5s4bJ4x24xXZfDFc61dH/p3I/8eWuGr0D4at5ut3jhFT/RwMKMDqP8KiWwqnws9WT7n4Vy3j3aPDFzvzt3LnHXG4V1SfdrkviB/wAird/Vf/QhWL3RhS3PFXxuOOnamHnmpCON3HXHWmVujrsNxxSqSpyKcQoXBB359eMYppA4wfrxTJZotd/2nqKz6hIsasu0ukQ4444FdDD4Tktbh5jcRS2qRlg8bfe+U9vTNYFhol9fxl7eHcoI5JA69/pXXaNok2mWF5LcMdzRuoUHjAzzj14q4q5jOVtmYUAwG+uK0fDA/wCJ3IfRH/mKzoM4f61qeFhnW5v+ub/+hChbjn8LOuI5pMU8jmkx3rQ4TO1W+FjaFtwDN8qk+tcPc3091f8A2iR8OTjI4wMVt+ItQjun+zIcNGcgk9DyCCK5jdiQeuawlK7PQo07R1PTYJkmVQrqW2gsoPTIqXFcjoGqR2STecSSVL8+2AB+OTWzoF5LeWsjSkHEhC/z/rWqlc5Z0nG7NWsnX3u4bAyWrABfvYB3fhitikYAqQwBBGCPWmyIuzucfo3id1nWG/fdEf8AloRyv19RV7xJMsMDNBqTbZBgxYDgAj8xXL6wunx3LLYrOhVirpIBgH2Oaqw3kkUckWFaKT76kdfx7Vk30O1U03zIgPHTpViyupLS5SaIgOvIzVZsbjjOO2aAccUkzW11Y9G1fUxp+nK24GZgBgH25/nU2lXYnsLVpZB5siZAJ5bnGcV55c3kl04aRs4AAHYYAH9KW2u5Le4imQgtGcru5AqufUwdBctjo2H/ABPkPrdD+ZrqiK5GzZptQsXc5Z5lY/XrXYd6tGFXdDMUlPIopmZGRRinkUgoATFGKd+FH4UAMPWlxTutFFwGjrTsc0YFLimgOKT/AI/Z/wDeb+dNvekX+/8A0pYebuY+pb+dF6OIv9/+lZnb0OutB/oFt/1yT+QqUim2o/0OAf8ATNf5VIQau5xvcfa3EtnOs8JUSLkAsgYc8dCCKlvdQnvlQTLANp48uBIz+O0DNV8cdaTbzU6XuCJXIjtkjxhn+Zj7dh/M/jVfFP20baaYMbigCn4o207isNxRinbaMUrjsNxWZq7wmII1wYpF5GMjP4itbHrWfqV3awlFnjLZBIZRnHalJqxdNPmOXeWTILN5h7YbJxS+Yybnl5GenekdjI5eEBFJwq4xjHemIXkcfNwx6LwQK5GegkSFiVHl4OB1PGPwpWw0WT5ZI6ELjimsy+X06nHy0ZbYVzuKgjp0FJDZDa/wf7v9amn+4v1P8qjtR8qH/ZP86mnHCfU/yrp6GQ+G6IAVgGCpwR1wKuQzxyABHBOM47iqLiKNVTGJGUZ5yDQxGG3FMlcHP8Nc7eptY0iT2bPtTS2OSBWc128GU8xWKjOCP60sepqVBkUrz25oQWLxc888YpA5HfnFQrPFNzG4z6U/nnnNMLEwlwOv1qQSJJGUfDc9DVTdjPP6UqvkdOKBWJBp9vKX+dYmLYUDmq0mnSxMQAJB6pzVjdgD0qaOUqvynnt7UXDVHP2ZEZB6cYq0bgAE5PX0pmnwrPchH3bST93rVufRZsZjkRv9npVOF9RKajoyhJckn5cVXd2IJJzT5reWBtsiFTUZHyN9KOWxV7j4eUzgZzUwXFMtVzCfrVkJWctzREW2obsf6M5q5sqter/okn0/rQnqDWhnWYze2/8A10X+ddjEtcnZL/plt/10X+ddzBZStyQFHqxxRWZnDYWNDgVZjXmpIreGP/WSZPooq5Fc20GSijPqeTXM5DaNnw0BGlwXIUNtxu4z1reMkX/PRP8AvoVxh1QetJ/ag9ah3Zm6d3c7LfH/AM9E/wC+hRmP++v51xn9qL60f2mvrSsxezOyIQ9x+dNKL6j864t9aRJFTZIxY9VXgfU1J/aq+v6UuVlch5Jqk4mutqHKR8D3PeqI4p2KbXqRXKrBOTlJyZMbmVofKMr+WP4cnFR7icAkkDpTe1KKZJIBXoPwuTOo3xHXylGPxrgApGM9xmvaP2f7JJtV1i6cZMEUSqD6sW5/8dqJuyJmrxZ2Rs7mOHzHglVMfeKECuL8fc+Fbv6r/wChCvd7nBiIPIxXiPxOiWDRL6NBhdyED0+YVgndmVONpHh7DFDRBYVfIyRnAOfz9KV+tMNdSOhkkkUflGWORcFsCMnLAep4xUcSgyLubaM8nGcVNJHHFBFJHOryODvUA/L7c+1N83MKxBEGDuL45P1pkbnpeiXVrLp8TRzRnIAxwpHsR61oah/yDbn/AK5N/KuP8H2CNNJPMjb48bQy8YPeui127MGmuAwDP8mD3zWqehxOHv2RyEH8f1/pWn4VH/E6l943/wDQhWZCRl+v3v6VCLp7SXzIyQ24j9ai51OPMmj0l9qKWYgKBkk9qp6jdRQ6XJNvG10+Qg9cjjFRSTQ6toBJl27xt3dAGH/1645tTmFodOmI2xudpHbrxn0pylZGFOjd+hmyszylt2SSST60xSBKCxxj2pCcMQfSkJyQSOeuaxO+xIzk4Y9TwK6Pwom68LGYfKpxGBn6nPauTMnzcH860rDVZtPiYW5IeQEEn+lVHRmdSN42R6WOtQ3ySG0cxT+Q6jcHbG3j14PFcff+JpXtYUgcpKo/eB1wc+tZ6+JdRTyNsv8Aqk2EHkOM9xWjmcsaEr3KWoo7TNMTA5PLPA2QeeuO1UM+tTyvHICwUq2eg6VBxWZ2RjZC0UCjvQULSg0hpB1oE0dVpv8Ax/ab670/lXaMmCSPyrjNPwL/AE3P/PSP+VdszpIDhgcHGRz+Fao4ay1IjSYp4p4XNFzEgNAGaseWfw+tKI29P1ouFivikq15bf3ahkXDZ9aLjI6XtRjNGKYhBwafjNM5z0p4oQHDwDE7++f50Xn/ACx/3/6GlhH75j9f50XIz5Xs39Kk7Tsrcf6LD/uL/KpMVJZW0k8UUca5OwfyrXi8OXhVXcKinoTnBrGviqVFXqSsZUsPUqv3FcxcUYrp4vDUZxvlYn2q5F4XtiM7JH/GvNeeYb7N38jtWV1vtWXzOMxk0u2u6Xw3bIM/ZXP1U1J/Y9sn/Lqv/fNZvO49Kci1ld95o4LbRt9q7v8As21H/Luv/fNKNMtz0tV/74qf7c/6dP8Ar5Ff2Uv+fi/r5nBbaiuY5zARbsqv6tXoZ02FRzaLj/rnR/ZluRk2KEf9c6Tz1dab/r5FLKrP41/XzPE5ri8MjKZ8sDyQeDVYmVnGJACT36GvbW0fSSTusEz9SP61Xl8M6DP9/Tl/Bm/xrP8AtinfWLR0LANLRniwcybVLAdQOMZpSHOSGXA444IxXr7+CfDjf8uci/SU1Wl8A6C5ypuoz/suD/MVazag+4fUZnkcshMmxhlccYHeiNPLQqWyx644/CvUW+GuksSyahco56EoCBVab4ZyFT9m1SCQnHEkZX+RNbwzHDy+0ZSwlRdDzm3HyRj/AGf61LLyF+p/lXYf8Kz1uLaIpLSUKMfLLjPPuKq3XgPxFHgiwDYz92VP8a7o4ujJaSRg8PNdDk5CpTAbBAGT1qt9oclk3Ag8ZxWpe6Nq9lE6z2FxGc8tsJGPqKyljKybmUhh0B4oUovVMpwkt0WHZVQgYywyT1FVHRW+593+IelWiCilsncVxkDiq2wo6nI9TxTTRNhEt5POzn5fXOOKuG6aFMo/AOPn5prxuwLueNvY0ySFVT5WII5ye34UXuFi3HfRuo3kpnjmpw2eUYY9RWSQphIJGWbAzxinRxhFKLvZieccYoEaoLf3gf1pSzEEEqB64qi8nloIwxJP3sn+tRq0GCGL7+2KAJ9LkEVwrtnHPQZroTOhXIP4YrlbYlVGCR9K1EuY1iGS39a3TMJRvqaDyxsMFciqM1lbTpIyoUIGTg9eRT9wK7h0NOjYbJf93+orRx0M1o9CrDp5SM7XBGeh4pTA6dUNXInAQ5OOaUzr7muacdTqjPQo+Wx/hb8qqX6FbSXIxgf1rV85mIAGB+dUdRGba4/z3qLWaL5roxrZzHJDIOqsCPzrqpruWK1Fwb22cEj92kylxkenWuTUfulPpVY3DVc4czIjJROp/tiQ9G/Oj+1XP8Vcr9pNL9pNT7JD50dR/arf3qT+1G/vVy/2k0faTR7JBzo6j+1H/vUv9pt/erlvtJ9aX7UfWj2SFzo6f+0z/eNH9qN/eNcv9qPrS/aGo9kh86IB2x1pCpBIIII7GrAtZAc5XP1pzW0juWO3JOeK3MisFz065pQOas/ZX9B+dAtZM9BSAjQV7v8AAKR5rnxFLIQWZbckgAd5OwrxJLVx1H617f8AAJCj6/nutv8AzkrKpsOWx7Fc/wCrP0rxX4pn/iWXg/3P/QhXtVyfkP0rxT4n4eyu1/3P/QhWC3RjHc8RkHJqIirr2r5OAMfWojaSe3511o3KtOQ7WB61P9kk/wBn86kFm+3tn61RJt+HtSvmnEKSbkJGd7dB7UuuX73OqNEsrNCjjCnscYNV9Elk06Z3Yrg4O3aDk/XtVfyZXuN7YJLZJz1p3IUVzXLUX8f+9/SqN30/4Ef51fUBck8bm4/KqN5xt+p/nQ2Ulqa9nqBXw5cWqsRJG+4496wJJC7ZZuR7dauW06qs0ZPEqY6dD2qhIhD471F7lxjZsPMOzJPOfSgrhd2eewpqAE4Y80+U5xjPTpmkWVuuOTU8jtM4baFG0AKowAAKYiqWO7OMdvWrMMBYktnYgy59P89KolkE00lxJvldmYADJOSahNXr2XzHBW2WKMDCgD+vc1ToBCUmeaO9KAKADNLijFLSGGKTFSIMuo9TU0y8jFFwaNgxvILOOM7XZkVTnGD0612CyWel20cDyqnHAIOWPc1x12r/AGa3KjnIx9cVatdNVLoPqlzsAG/aDuLY7Z7VdzmnDmOyjIkjV1PysAQfapUz0P4Vjw+JLCWWdVykMSKUJXr1z/SoT4rtfsssgRvND7UQ/wAXoSaLnP7KXY6LB9KMf7IqnZ3UlxaxyFslhnp/9YVP5j+v6U7Gb0JlI/u0y4xheKarNnkn16USuWC5NCAhopcUYNMQmPelA5oqvcX0NuvXc3YChsaTexydpC0kpCgk4qzLFGMAqZJFOQA2F/PvWlb6dc3p2wxCG3/vngf4k12Wh+DYoVSe6LZPIyBvb8P4R+tc1bEU6MeabsjuhCU5csVdnK6Z4e1HX5le4j8uFByxU4A9BuyR+FegWljo/hPSJXM/kWmAzyTNjew7Knc/QE1D4j8Wad4WgMO0S3uP3doh4XPdj2+nU15XrOqah4ovzeagOnEdvFnZGvoB/Pua4lGtjVyzXLB9Or9eyOn3KDutZfgjoNW+JOqX7yW+gQiztweJyP3pHuTwv0HPvXO3Oo+JL4lrjXrls/wtdSEflUASdVCrbuAOwQ0uy5P/ACwk/wC+DXoUsNClHlgrIxlV5neTuVWsrx23PeKWPfk05bK5Uf8AH6R9Mj+tWPLuv+eMn/fJpDHcj/ljL/3ya05GLnRH5F4P+YhIPxP+NN8i9z/yEGP1ZqeVuM/6iX/vg0hEo6xsPqpo5GPnQJ/akZymouv+7Iwqymo6/GP3et3a/S6kFUy7DrSeaaPZD5zUTxD4qTga7d497lj/ADp//CU+LgQf7Vkkx/fKN/6EKyDMfSmmc0nRi90NTZ0MXj/xXAwErJOB2NvGR+aqKtn4p60G/eaVp+B2Ecq/+z1yfnmk88+prKWBoy3ivuLVecdmd1Y/FWMsRqOmke8Eo4/Bv8a14viP4fmxuN1Dn+/GDj/vkmvLTNkYPIPY0wrA/wB6JPwGP5VyzybCy+zY1jjqq6nttl4l0W/GbfVLZj/dd9h/JsVqrMwAKSHB6YPFeH+H9Ej1PVFgSUxKiSTMxOQAiliPxxj8a9xcgW9sgABWFQQBjHf+teDmuX08JBTpyZ34TFyrT5JIX7TKP4s/UVWubaxvR/pen2kxPUtEM/nT6SvFhiqsdpHounF7oybjwl4dueVsmtnxjdA5GPwPH6VhX/w1tZIiNO1FgxPK3IGPzArsjTa66ea14dbmcsLTl0PMNR8C+IrYF4rdblFGAbZwxP8AwHr+lctcQywO8VwHWUcEOMEfhXvKyMhBViCPSuU16ytv7Rme7QNHMd0bmMMAO49sGvfy7H/Wrxa1R5mMoKglLozypI2VwVcsPpUzzBIwuTgEDArrJPDVrcljZzlfYHI/I4NYl14b1GJm2wiUKM4Q4P5HBr1L9zhUk9jGkHnOVUtnOST3qQtGqHO7IOMU8WzBz5gcY6hh0/CmyhHLYJViemOtUgsNjYJGpOce1SJLhhk4BOcdQajU4iHAPtQ0bNjavOegP9K1urk20NCO4Mqk7h+FBZvU1FbqRCMjB6HNS4roi7o55KzJYf8AVn61KBTYB+6P1qZRxWMtzWOwgXLD61W1FP8ARrj6GroHzD61DqKj7LcfQ1jLdGkdjmgP3C1mmtYD9wPrWSa1IY2iiimSIelJQelJQIWkNGaKYgqUGoqkFJlRPdR4d0n/AKB1t/36FOHhvR886dbf9+xWmKcK4uZnPzMzP+Eb0f8A6B1v/wB8Cl/4RrR/+gdb/wDfArVFKKnnY7syv+Eb0cD/AJB8H/fArtPh3ptnYSakbW3SIuI920Yzjdj+dYOOK6nwRw999E/9mqeZ3LTOmufuH6V5br9pb3+sTW9zGJYioJVuhr1K6OEP0rzPUv8AkYp/9wU2yVuYbeF9G/6B8P5VXl8NaMP+YfD+VdCe9VpgMUcz7lJsxo/DWjE/8g+H8qtx+F9F/wCgdD+VWoqvR0cz7jbM9PCWhnrp0P61YXwhoI/5hsX61px1YFHO+5F2eUePNMs9L1S0SygWFGjBIXucnmuJveCn416B8Sz/AMTuxHbyf/ZjXn98OUrrpO8dTojsischcg/jSsVLBuc96iJKkihWxnPcVRqLgbyR0prMD1qRSuAew6g0m1Tg4zzQFiewihecGdmWNQWO0ZzjtSvMzL5ca7VOCQO5qeOzLWsUkb4MpIYlsAAdjV62tY7fTHumA81mKoDzkd/wpmdzJuI5Ys7jt3KDtJ6g+lUzVmbG87TkHoPT2qEigtLQixzTsU/bRii4WG44pQKdtzTttK47CIMSJ9asyDLCq6KfOT61cZfnFS2O2h23i+wtrTQLdoYwpFztyPTyxXCyzyy7d7khRtA9BXoPjY58NW59bz/2mK87Ip3M6auhuferMZS3YGVPMYEHy84/P/Cq2KcASc00y3E7TR9Ya6YI45J4AwqIPqetdCoBGR0riNAsmkuFle2MqZxuY4Vf8a7VGLIGVht7YFaJnnV4JS0EuB+5NPI4HsKikLlDk8Z9KQyleWbjPpRcy5dCXFRSzJCDuIyB09PrVd7p5spbjAH3mJxj8e386sabos+oEzHAhU4aaQYVfoO5/X6VMp22LVNWvJ2RRM1xeyLDAp+bgYHJ+gra0/w7HHIDd757hjxEDxn3Pf6Vs6ZpYy0Gmxc4/eXL9cfXt9K6FYrDQNOe6upljgQEyXMnVj/dUevsK48RiFTfLvJ7I2owlVfuaR7kGnaKluElmCvKq5CYASIDv6f0rkvE/wARUtpnsPDzJPOB+8vTyqn0T/4r8qwvFXja88UKbGyR7LSc5z/y0m/3sHp7dPrWn4J8GadfWy3d/LIqknyY0x26sx+tZ0MJKc/a13eX4L0OmdWNKHLDRfizlLPSJLmU3N3JIxbks5yzE9evStqO3hgKJEgVeSfU8VvarpEVnHJNDNvReV+YMCM+3SsCScRsDgnrXrKKijznUlUZMRSEA02KdJhkH8KkNO5nZrcbtBqvPMbeVWAyCCOaW5u1gyvIYjINZst4ZYwrdR0PcVnKpbQ6KVJvV7GtDI7rl+/IA61LsHesYah5SlQDu45q5FqkbEB1Kj1ojNEzpSvoi2yKRggEe9QtZ2zD5oIj9UFOju4pIw+duSQM0l3II42TcVkI49605kZcsr2KM9rpyk7oEznHyjHNY8tvb7uEI/HrVp53djuAJ6E460ht5FhW4dD5e8puxwTjOP1rNzZ1wjyrUz2tY8A7mBP6Uz7FuUFZO+PmFX5F3OMHANNK4wExnNNTZqZz2kqReYcFR1welQc1oXTFbULnl25+gqhitE9AR1ngW2M11dOQ+CiwKQONzuox/wB8hq9ZlYPKzAYBOQPSuI+HlkIbK3mlVh5sk1yAehEaeWp/77c/lXZmvkOJKl5Qh6s9bKo/FP5BTc80Zppr5c9pCk5ppoJpDTKQlRyxxzIUkQOp6hhkU880lXCUoPmi7MbipKzMW58NWkxLQO8D+3I/Ks2TT9ZtB8yrdxDsPm/Tr+VdXS17GHzvEU9KnvLz/wAzzq2VUJ6x91+RxjXFlc/ur62Knphl3Y/PkfnVKfwvpd8+60laNvRDu/8AHTz+tdzPawXClZokcf7S5rIuPDVux3W0jwn0PzCvYoZvhKnxXg/vR59XLMTT+B8y+44Wbwrf22WiCzqpzhD82P8AdPNYk6vatsIZXU8hlwa9N+x6xZMApW4jHY4b9DUU9xZ3B8vULBkJ4IaPePybkfga9KM1UV6clJeRxvmp6VItHmqXBYgEnGeQT+tWo97LuYYyePpXVTeEbG6lL6dchCckIOf/AB08/wA6ypPD2o2au0kRkAONyZOB7jqPyrSFWzs9BSUZLQrwD9yfrUyjimwphGHfPSpQtaNkpAq/MPrUWor/AKJcH2NWVHI+tQaiP9EuB/smspbouJzK824/Gsg81sIv+jD6msgjFbEtDKKdikIpkjOtGKU0lBNhKKdRigBtSgYqLvU1JjifRA6U8UxaetcRyodThyKSnAcVIxK6rwT9+++if+zVyxHFdT4J+/f/AET/ANmpItHSXf3D9K801D/kYJ/9z/CvS7v/AFZ+leaXxzr9z7J/hQ2CWo0iqs1WzVSbrUjSGR9aux9KpxirsYNHMkU02Wo6nFQRj0qyqn+6fyrJ1qa3kvvGqNR7RZ5j8ShnW7H/AK4/+zGuCvRllH+zXq3jTwzqmtatazWVr5kSRbWYuq4OT2Jz6Vgt8MtcudpzaxYGDvkP9Aa3hjcPGNnNfedVOhU00POzGc0nlGvSE+E+sHh7qzH0Zj/Spx8JL/8Ai1G2H0VjUPM8Mvto6Pq8ux5iI29KcIzjGK9Vt/hI+T9o1RQP+mcWf5mtKH4U6OgHn3d/Ie5Qoo/9BNQ81w6+0P6vLsePRF4iCvbseRV2C88q1aJwXznaD0GetevL8MvDidRfP/vTqP5LUyfDzw0ow2nzP7m6b+gqP7Yw/f8AAHhZPoeFvEu75M496aIj6V7wPh/4YHTSm/G6f/GpY/BHh6E/JpEB/wB+SRv/AGaoec0FtctYWR4KLcntTvspNfQaeG9Hj6aJph/3oSf5mpF0PTFPy6Npi/S2H+NZvO6PRMpYVnzwLfFO+zs3RSfwr6LGnWaLhdM08f8AbuKcLWJTlbKxB9oBWbzyn2KWFPnJLdhKnynr6Vb+zuz9K+hRGynIgtR9Iv8A69O3Tdo7cf8AAP8A69Q89h/L/X3D+qeZ5h4ttZp/DdskcTuwvCSFUkj5BXFDQ9Tf7mnXbfSFj/SvoMvc/wB2H/vk/wCNHm3fYQ/981Dz3tEVPAqKtc+fv+Ed1c9NKvf/AAHf/CoLjSb+xjD3VlcwITtDSxMoJ9MkV9DGS7P8UY/CsrWtIl1q3itrpw9uj7ygbGT0Bzj3P51dPPE5WktC3gl0Z4/b2l/BFDLO0/kPgrGrn5l/pW6byfVrf7FZWxg3AhnZ87gB0UhRz2xyea7RfCFmgjzEXEa7VDyEgDJOMemSa0YtOMKqqRIqr0C4AFa1M9gv4abJhlies5I4WG11e2s5Hv4J2fcWZ5EPAPTPHJ9hSpY3dwymSKbJ5EYjIJHuewr0FCqkqMkqcHapOD6cU8swAxA7jPsMfmayWfS6w/EyllUekjkLPS44wGuxu2/dgQ8D6kfyrptP0i51FElm/cWK8LgYBHoo71ejMjZ87bt/hQDge/vVoXMqqqh2AUYUA9BWk+IKcU1GGpyf2PNyvKV0Q6zrOk+FNIW4uwY4zn7Nax/fnYdefTpk+9eP6trV/wCL9US61JngtVOIrePOyJfXHf8Ar+levXUUN8ytdxR3BUYUzKHx9M9KqyaRpsv37G2/CJR/SufD5xh6bvKLbe70udUsDNq0Wl5Hks9ukcreRl4gflYjGR9K6rw1bRNolzeTsEW3mVXLjIZGHKr/ALXGa7ZvC2nPas7abAygcLGg3n6beayfE2jy/wDCP2VnpemSRxI7M+UK7XPRuDzxxk56178cRHEU/gaXmcHI6U/iTKevQaXbafvt3XzpVAjjVh93qSQO3FcfKNzDIyOc4ppFzZyNbyQRq8ZKuoHIPfpWtpj6PczrHepdQuejiVdpP4rxUU8RSw9Plcm/xNamDr1ZcyjYqRxrAu9jgY5z2quNUt9xVgw98V283hPTJkK+bdhT6SLz/wCO1TPgLSTnE15n/fX/AOJrnee4Xo/wIjlVV6zX4nD3t2J8qMFB0YVQZ8ZPPvXobeANP/gurgf7wB/wqJvAFsel/IP+2Q/xqP7YwsteY6I4GrFWSOBJ39zx6+lITtBBzjPFd4fAEXQX7fjF/wDXpv8AwgQU5W+z9Y//AK9XHNsL/ODwVbscUrsI+pApXnkdBuJO08Z7V2X/AAgkvA+2xfihpp8CXCvuS9gB9drf4Vqszwv/AD8Rk8HW/lOPUo6gYw2cnB61t6jZSQ+G9MlEbBJHkZ2PQscbePoP1rQHw+ui4P222A7nL5/9BrQ1Dw1q+oKiy3dsVTou5sZ6Z+76YraOZYS38RfeYzwdfmVos4Eggc9PWmkEkAZOelda/gXUz0ltT/wNv8KI/AWobstcWq46ZLH+lNZjhP8An4vvK+rVv5WcHcn97t/ujFRou5uld9H8NZS5M+pxjPOY4y388VsWPgHR7Vlec3Fyw6hnCqfwAz+tRUznBwXx39DWGCrv7Jd8LweXpoyjL9mgjtUz05HnSfjucD8K2Cailt1WECBShjZpFAY8seTnPrU6hXUMpyrDII7ivkM0xcMVW9pDbY9nBUXQpcstyM0lTeWKXy1rzbnXzIrGkq15a0mwUcw+dFbmkwc1a2rSFVp3HzlXBpMVa2LSFV9Kdx85WpM1ZKKaaY0JppjU0V+tDKrjDKGHoRmp/KX1pPKHrVxm4u6Y24vczpNNsnOTbqD7EinLbKihVkk2joGbdj8+n4VdMPuKb5J7V2Qx+JjopswlhcPLeKMy40uK6j/fxQSydm2lCR7sMk1nN4SSQjypGjJHch1B+vB/Suk8l/7ppu2RTwCK7qebYhKzs/kYSy3DyemnzOLufDOp2gDeR5q9cxc4/DrWDqSMlvcqwIIB616vHcXEXTke4pl3b2GqQtFf6fGyuMMyjDfn1r0KWaqVudHNUy1r4GeDon+jD6msgrXtV78NtNuedNv3t1x/q5xuGfr1/nXEar8OPEOmb2+xm6iUZ8y2O8Y9cdf0r1aeKpVPhZwTw1SG6OJK00ir0trJGxDoykdQRjFQNHXQpI53Blc0mKlK00rVXJcSOinbaMUXJaGYqUUypMUMcUfQ4GKlWmCniuJnEhwHNOpAKcKhspIQ9K6nwT1vvon/ALNXMbSxwoyTW7ov2qxjm8sqhmxknkgDPT865q2Jp0dZnRSoyqbHV30iRxEu6qMdziuAl0+efV7idAvlMAFOeT0ro/LjZt80pdvUnJqZZrSLopNedPMJyfu2S8zsjhYR3uzFt9BeZgCWJJ7cVYk8MpG+HjJ+rGty31e3iIAhA96dd63G3+rUfUiqdSDpc0q2vkNQkp2jT0MWPRYox8sCD6jNTLpoH8I/Knyaq7dwPoKgbUWP8VebKpTe8mzsjCr2SJhZIvak+zxjsKrG+J70n2vPWseamWoVOpZMaDtTSgqNJC5471citZJDwCa0hDn+FCk+T4mV9lGz2q7JbrCMuwHtVKWdFJC80qkFD4hRlzbCEY7U0iomufaozOT2rmc4myhIlOKbkVCZs00yVHMjRQZPkUuRVXzaUTD1ppofIy9DC08gSNcsegpt/H/Z+43JVAi72JPAHrUEV68DB43KsO4qpfy/2jFNFcsXWZCj89QRg1rejyWd+a/yt/mQqdTn8jMfxv4bQkHU049I3P8ASnx+L9ClXcl5lfXy2/wrwi+ge0vp7dxh4pGQ/UHFdn4NtbfXL4RS7hHFFvMYOM4IGM+nNexiMroU6ftE3b+vIVKSlJxkej/8JTo//P3/AOQ2/wAKnh1yxuBmF5ZB6rC5/pSQafZ22PJtYUx0IQZ/Or8Yya8OTo/ZT+83aiiOO5SX7qyfjGw/mKlzUzsI4duOW/lVepcEiE7i0hoppNLlKQHpVSaRpZfs0LEMRmR1P3F/xPb86luJvIiL7S7cBVXGWJ4A5pLaIxQgMQZGO5yO7H/OB7CtYxsuZj8iSONIowkahVHQCloorCTuxhiigmkzUNDFzRmkzSZpWCwpAPUD8qY0UToUaNSp6jHFOzSZrSNWpHRSf3i5E+hRfRtOfO60jJJySc5P41UufC+m3DqwR4iv/PNuv55rZzSVSxFVO6kygiRYokjQfKoCjJ7U7PtTaKx1eoWHZpOKTNNzQOw/ijimZozQFh/FHHpTM0ZoCw/ijNMzRmgLD80vWo84qvf6lBp9uHdJmcngRxFxj8Oa2oUZVp8kXr5kVJci5mW8UYrlR490jJBuFUjs0cg/9lq9p3iax1Sbyra8ty2MtuWQAD3JXA/Gu95RiV2+8w+t013+42jxUds+5ZEIP7t9oJ7jAb+uPwpsc6zxCRPuknH4HH9KdDxFIcctMfy2r/8AXrz3Bx5oy3R0pqUU11Js00tTSaQmsbFJDi1ITTM0madirDiaQmkzSUJDsLuo3UYpNppgGaM0u00m007j0G5o3UFTTcUxodmkzSUmaY7Dg1O3kdGP51HmlzVJtA0SCZx3B+ozS+bnqiH/AIDUWaM1oqs11Fyol8xe8Y/AmpEnVOQ0i/Q5qtmjNWsTNCcEyW9tNM1aExahbQXKkYDSx/MPow5H4GuP1P4YaPdIzaddy2svZZCHQ/jww/WurozXXTzStA554SnI8c1b4e69phJ+x/aogM+ZbfvB+XUfiK5WSBo3KMpDDggjBFfRwYqcgkH2qG5tbW9B+120NxkYPmxhv516NLPrfxI/ccs8tT+FnzmYj6U0p7V7heeBfD14zN9jaBiOsEhXH4HI/SuevfhbEWLWWpsFPRJ4s4/4ED/SvQp5xhp7u3qcs8vqrZXPLdvNPxXX3/w61y0+aKKK7Qd4H5/I4Nc5d2E9lM0M8MkUi9VdSDXoU8RTq/BJM5JUZw+JWPomDS7uYxBYWBlGUDDBYYzxmtODwvevzIFj9iea4G8129vvGGn6ikc0k6yKDb79xUjBIUD159a7zwt4ing1YaFf2riaQySmVjtIyxPKknAxjofwrhdRt2OP2RpweE4/+WszH6Vd/sDTrSNpZR8qjJLnirun6lb3lm8yXCSLG7K7hSgGD71yeta62oTeXGcWyH5f9r3NcmJxcaMLvfodWFwcq8+VbLcmu76Fn8u1RY4AewwW9zVY3hxgGsvzSaXzK+Yq1KlWXPJn0lPCQpx5Yo0DdsepppuCe9UDMBSfaAPWs7M0VHyNDzz60hnPrVD7SPekNyvvRaQ/Y+ReMx9aTzjVL7UnvSfao/U0csh+xfYueec1LC7yuABVKNhKfl5rVgCWsRlkIAA6mjW9jOpaKtbU19Ojjhw8xH0q1ea3DCpHmRwJ79a878ReOLTSowryNGWHygLmRvcDsPc15pqvxAv7vzUtUWBWbIlOWkx9en6V7uCoYmdPlgrLueZVo01LmrPXse36jr1nFGJPOLjuzfIo/E1zNz440yEsGvrNMdhJvP6V4bdahdXkhe4nklY93Ymq5ya7lksJO9SX3f8ABuJYuEFaMfvPZ5viNpI6aiG9kgb+oqD/AIWRpROPts3/AH4rx7BpcGtVkmGXf+vkH9oy6RX3HsifEbSCfmv2H+9A39BV2DxzpE/3dStjns2U/mK8NxSc1Msiw72b/D/IFmUusUfRVrq1veLuhkjkX1icMP0qx5yt91hXzhDczWzh4ZXjcdGRiDXUaV4/1WxUR3Gy7jz1lzvA9AR/XNcVfIpx1pSv6nRTx9KTtJWPZ/NI60u7Ncxoniuw1sGOBys4GTDJ97Ht61trPtPXivDrYedOXLNWZ6EVGa5oO5xPjvwzNNdDU7C3aQMMTLGMnP8AexWJ4PkudM8T2geOSMSuI2VlIyDxz+h/CvXEZHHD4P1qUIM58zJ/CvQpZnKND2NSN1axx1MOufmTsycDAyWX8CKtRRknpkDk49KpgsP+Wn6CpBNKAR5xweDzivLio31HKMnsPkcu5NNzTOvej8avcaVhc0hNIT70ySQIjMeQoJwKaKSIOJ7373ywYOAf4iD1+g/nVvNVbOPyYPnIMjku5HqTn9On4VYzVVN7LoJIdmkJpuaM1jYdhaDTSaN1Kw7BmjNJmjNTYdgzRmjNJmiwC0ZpM0maLDFJopM0ZpWAM0GjNJRYYtGaTNGaVgFzSZozSZosFhc0ZpM0ZosFhc0lLmkNMY1o43Pzxo3+8Aah8uyLlIooiSPmKR8H8cYNT00r827JzjHJNdFOpBU5Kd79O3zM5RlzJxtbqNCrGgRFCqOgAwKkRybWEE8/MfwLsR+mKikOyNnx90E1JHH5UMUWd3loqZPfAAz+lZp+42+pbWqFzmkNLSGsikNooqe2t2ncBRVJXCTUVdkaoWOAKlW2buK3f7L+yW/mygBcZJPasW81a2gVmU7gvVjwo/E1rOhOLSktTmhXdV/uxwgAoMY9K4/UfiJpdszILpXYdoELfr0rn7n4oQjiG1uZf9+UJ/IGuinleJqbRG5wj8U0enFRTSFHcV5DN8S7pmylhGB6PKzfyxUX/Cyb8f8ALhafm/8A8VXSsjxPb8SfrNBfa/A9gIU9CKaUryH/AIWXfZ50+1x7F/8AGrUXxPIAEmmN7lLj+hFN5JilsvxGsXQ/m/A9QKVGwxXE2nxK0uQgSfaYf99AwH5c10djr9hqQzbXMM2BkiNuR9Qea5qmBr0tZxZ006kJ/DJM0c0oNRiRXGVNOU5rmcbGzQ6ilpagkbRS4FIetABmkzRSGqGG40u6m0UWHYdmjNNpKLBYfmopooriMxzxpLGeqyKGB/A06msaqLcXdByp6M5Pw5dS2cc+qQxW8V3GWaFjIFEsQyWGzsSOc8dOKs2dvfeItXlvBHviY7/KhkG446KMsPxOe9cRp/2MaHczXEdy90JVCsqfu4o8ZJJ7kttXHpmvUrPVrTW/Bq2NtDdNd29pvR7e1kI81cbRkDAP6V9tKifHxaRu317DHax6fpcItrNF+ZEGNx75rKKt6GsdNbnsBENSaS2uHTeVKlSOSOfy/WrsXiiN/uag3/fRr5bE068qjdRH1GGhCFJKjaxcEbnopP0FNk3R/eBX61YtNeJkXfqRSPuwbOPwzUF3qjXMjD7Z52DjIfNYulZXNoym52aX9fIgaTnrQMt90E1A8hY8sT+NV5L3UrWeM6dcSQllYSGM4J6Y/rVUaKnPlNqsnCHMi8dwppJ9aj8zxlJB5sdxeumcHjn8sU118aAKHe+G7pjH8x0rq+pLu/uOVYt/3fv/AOASk+9IoLMBWLe6jrK3Vpb6jdXTKblAUl7Ehv6A10VlFvkFYYmj7FLXc3o4jni2+ho2UKwwmV8ADnJrivG3jcWEb2lpIGuzwF6iIep9/atfxd4hi0TTySNxXCoo/ifsPoK8LuLiS4nkmlcvJIxZmPUk125Tl6rP21RadPM8vGYn2a0+J/gv8yS5vbi8maa4meWRurO2TUOSTT7W2mvLhIIELyOcACvSfDnhKx063Goagwdk5LsAQp9EHf6n/wCtX1SSWiPCnVe7OV0fwVr2tRmW2sWWHGfMmIjU/TPX8K6AeA9MsbcNqeuKs+OYoIt2D6ZJ/pWpq/jK6v7hbHSY2AJwAp5Pbk/lWJPHZWFzG2o3rTTLc/vYogGUxhQwYHPOSQOcd6UppEJTn5EL6ToKMEiN1M/puHP4AVUmtdKjYobdkYdQzkEVZstVuJ713sNHWaaSLy3WNCc/NnO1QAOwI6HFSXXhTxRqtwbs6NKNygAABcAAAcE+goU3fVFcqW7Mk2WmueGkH0Yf4UxtFt5FzFdc+jLTr3QdX0zJvNOuYQO7RnH5jiqsc7KQa0ugt2ZXm0m5iBYKrAd1YVSdGjOGUqfQjFdHFc7xhuabMsUqFJY1bd0buo9qGCbRz8NxNbzLLBK8cinKsjEEfjXpvg/xeNSRdP1GQC8HEch/5aj39/515vd6fLagSfehJwHA7+lV43ZHDKxDA5BHauPF4SniIcst+/Y7MNip0ZXWx7/uKtj8qduPqa5/w1ry6/pKyMNt1BhZRnqcfe/GttHBFfHVaMqc3CS1R9TTnGpBTj1Jdx9TSbm/vH86bupC1ZWNLD/Mb+8fzp80wt9Pe5ZizbtqRh8Fj37Hp9KrlhmqmoQm5hj8v5ZomLI4OCpOOh/AV04RUvar22xzYtVPZP2W5bjuzMu5d6A5wrn5sA46UTO7QOoPJUgZNVIBcNNHJO5JRCpLOWLk45Pp90cVYdgRRiI041X7LYrD+0dNe1Wo+KUyQRyK5IdQ2c+op4dv7x/OqdpIdksZ3ZSQjnuDyMe3OPwqfdWVSNpM2VrE3mN/eP50b2/vH86h3Uu6osOyJfMb+8fzo8xv7x/Ood1G6jlCyJvNf++350vmyf32/OoN1Luo5QsibzX/AL7fnR5r/wB9vzqHdRupWCyJ/Nk/vt+dJ5sn99vzqHdS7hRYLIl86Qf8tG/Ol8+X/no3/fRqLIoyKLBZEvnS/wDPRvzNHnS/89H/AO+jUW6l3UrByol86X/no350edL/AM9G/Oot1GaLByol86X/AJ6N+dHnS/8APRvzqLNLmiwuVdiTz5f+ejfnR58399vzqPNGaLByrsSfaJv+eho+0Tf89GqLcKQmjlQcsexN9pm/56Gj7TN/fNQk0m6jlQ+WPYdcXcohI3t85VOO2SBn9an+1Tf3zVNiDNFx0Jb9CP61LmrlFcqRPKr7E32qb++aQ3U3979KizSdTUcqK5I9i3byTTShQc/hXS2txDpsPmygFv4V/vGsWxRLeIyydhmuD8e+L3hZ9OtWIuHH7xwf9Wp/hHua1wlCdaso00ebjHBp82kV+Js+M/ii6F7W0lSeXoyqT5cf/wAUa8m1HXNR1Rybu7lkHULnCj6DpVBnLHrmk2k19jQwcKer1l3e54k67tyx0XZCE5pMVcW0YJl/lHv1rUttBmPzT7LaNWAdpWAZf+Ak5/yK63yx3MNWYAQnsaDGfSui+y6NEreffTyEOQFijC5XsQeR6+n41inrQpJ7Cd0VChHakK1c2g9Sad5MBGCGz65qtBGdzT4bme3lEkMrxuvRkYgj8RVx7OP+FyPrzVWW2kjG7G5fUdqGkJSaeh1/h/x5c2rpBqbNNDn/AF38a/X1H616Xa3yTxJLG6ujDIZTkMPavn0Eius8GeITp18LO4c/ZZzgEniN+x+nrXi5hlkZxdSkrPt3PXwOYNNU6ruu56y2ohWwFz+NJ/af/TP9azLh9lwAeN/8+9M3cda+d9jE9tqJqHVcf8s/1pP7VH/PL9ayi9ML0/Yx7BaJr/2sv/PI/nR/ayf88z+dYpemmSmqEewtDb/taP8A55n86P7Yh7o1YXmU1pBVLDxFdHQjV7Y9Q/5Uf2vag4xJn/drDsbuG3v4JZyBGjhmyMjANcxPdalNKXfU23HqRM/9K6qGX06iu3Ywq11B2sehNq9qOvmD/gFNOsWX99v++TXnO7VM5GoSvj/pvJz+YrSs5J1t2NzIWIyctnI/E9a0qZZTirpkwxKbtY2b3R/EyrCljYIvmL5k8nyE7+4AGdoHQYGfetLTdBvDHjUr2+LnqoJCj9c0y3sI42WRri5ZhyrGY5H0I5FXDql9plpJM04u4Y1LlZx85x2Djp+INfQQraWaPjqkNfdZoQ+GdCLKZ1nkYdyCR+prpIND8HJApNpH5gHUqf5CvPIPGV/rC2r6RbWStITvtbnLSYH3mQhgHx3XAI9x0t6h4j1OxYH7NZvE33X8tx+B+eipOEdZIujGtJ8sH+J6XLZ+GDprReVZBWXB/dcn6jrXn+o+GdAeR/IuI4fTybWVf1FYr+M9QI/49rP/AL4f/wCKqBvFt+T/AMe1l/3w/wD8XWf1mjtb8DqWDxd7p/iMniudEm8x79prQuE2zRydCf4WZQMjrjNaZcMuQxwR2OK5fU7641SRGuCpCZKIowq59BWlpl0ZLfy2+9GADn07V42OjByVSkrH0OA9pGHJWdzVDPji4uVHos7j+tDB2XBurwj/AK+X/wAar+ZxR5prh56v8zO32dP+VfcSfZlllhaaa4m8lt8ayTMwU+oya6G2YQW7SkfdXNc/DJucZ9a1tQl8nTv94gf1/pXPWc6koqTuEoxUbRVjyrx/qbXOsrbCQssKZcdt55P6YrkURpJFRAWZjgAdyas6ndm91K5uW6ySFq1PDFk0txJeEgLAMLkZyx9PoMmvtcNSVKlGC6I+TxVXnqyl5nT+HdJi06zczeX5h5mcjJH+yD/n1pbm8udbvY9Ot5lhg3KrOxwkalgoJ/EgfU1Dql39nt1gA6jJI9abe2seg+HILue3uYtTu8NBIwBjaM87lI46EcHkHBrSTa0W7OaK6sgubgaLHb21hDJFq486C7J+cOC2F+U9CMfoD1re0XwZBBFDqPiCb5nbc0D9B357sfYfjR4W0UaXaf23qYbzpOUVh86g89ezMO/Ye54ztZ1e61LUpLSBMysdrsDwo/uj0/z3qYx6IUp9jfu/Gmk6LB5VhDbRNvI2RoOnvgYH61lL8Sbm5u1gh0+3mBOFL7gc/T/61MsfhLq+q3ETxSgW0qgiZ0xg9wRn9e9dhpvwu0Tw7qNrcXmvRvcRnJheMbScfXOK2UEtzCUooxrfxrOjAajZeXEx567cfgAf1NTz6J4e8UWwaDyLW7f7rRfLk/XofocGup1DwzA8Hn20lpcx87zCmAPwya8p8SXkGiaxtsAPMCgy4+4QedvvxjmplTW8WKnUu7GVqmj3+h3hgvIiuc7HA+Vx6ioVkLLg16Jp99B460JtPumAuVXfFL1YEcc+4yM+oNefXFpNZXc1rOu2WJyjD3FEZX0Z1RdyNSCTHIqsjcENWNdwfZ5ygJKHlCRjI7VrvwRVK9UyxgjGByAB3psoveENXOla7CXk228x8uUdsHofwOK9aJ8t8Z4PIrwUHmvYtB1E6p4YtblsebEPLf6rxn8Rg/jXg5xh/hqr0f6HuZTX3pP1Nnfz1oL+9VBLQZa8NwPa5iyW96bvquZaTzKOQXMWd/0pC4qv5lJ5lPkGpEiSBLsocDemRz3B/wDr/pU5es+aQIUlPRGBJ9B0P6GrBbBqpQ0TFfUn30m+oN9LvqOUdyffSb6h30b6OULk2+l3e9Qb6XzKOULlgNRuNQB6N9LlHcn3Uu6oQ9L5lLlBMm3Ubqi8wUb6XKO5Lupd1Q76N9HKFybdS7qg8ylD0uULk2+jdUO8Ub6OUVybdRuqHfRv5o5QuTbqTf71Fvppenyhcm3Uhaod9LvyaaiO45WUzu38SqFznp3P/stSB6rIy7Ayj75LE+vPH6AUvmVU462Ii+pZ3+9SW43yge9UfMzWlpowWkPQDNZTVolSlaNyLxRrC6PpEswxlF+VT/E56D+teGTzyXM7zSuXkc7mY9Sa7P4j6q019DYKTtQea/PVj0/Ifzrh0BdwqglicADvX1GUYVUqHO95fkfN5hVvPkWy/MsWttLdTrFDGzuxwAoroZNNt9MRU+W5u3wOOin0Hr9a0LW1i8P6QzMEa8mHzP8A3f8AZH+f5VFB5UGjXmpXsYaSeMLZ55IYPy3Xjp+NerKXKjzE3J+QsMa6aZ47iN21cMFgiSNZR/gDn6nj8a2bPwZe30X2zxJetapkFYmYb8erE9P1NaukWsXhjT5Nd1o+bqtzH543DJiQ9/ryB9SB71UgN545ka4RtkCNtCBs7T7+p/8A1VMYuTIqVbbbD5P+EQ03CRWdrOq8eY53Z9+Q2f0qBta8MSMETT9JI9oVB/MpWvb+C2ViwiWRkOGZIixz+ANcbr3w+1EXV3c2yOYy5fY0LLgE9uvStnSsjmjVUnqzpv7D8LaxEVEUVtMfutE+cfipwPxWuW8QeB73R4xc2zG7tTkl0XlPrj+Y/Ss/R9J1uCfcsnlwxNySc/gtd3omvPHOLedFViQJAeAw9SP6jke44rnalF3Rup22Z5Ypp2CD7V3vi7wojJJrOmR7YutxBjBjI6ke3+Oe9cPsyK1jJSRqndFG7tB5YmhA4HzqB096oqea2OUcH86ypU8qVlHQdPpVAz0nRNSk1PwxDMxzPZv5bc8sAAQT+Bx+BrZEgZAwPBGRXBeCr4RahPYsfku4yBzxuGSP6iustJiqtC3VDx9K+bxmH5KjS23+/wD4J9Hg6/tKSb32L7Se9RtJ71A0lRmSuRQOvmJzJ70wy+9QGSmF81agTzExlNNMlQ7jRk1fKTzD2fNRFVJ+6KXmnKpqkrCbuIqqB90UllZT+IdXTS7ZvLgX57q4AyEQdR9T0Hv+NQyi6vb2PS9OjMl5L1x0Re5PpxXpOi6NDoGlpYW53tndLLtwZXPfH6AV34Wh9uXyPJzHGqmvZw3PK/8AhIrPaAdMlwOxvZKjGu6djB0ptvcG8lP8zWeI7LjFxN16eSP/AIqrUVpDMAytcEL1Itxx+tehyQ/q543MyaDW9JgdXj0VY3XkMk75U5zleeD9K7PRvFFh4iD2N7GEkPQyMAZfcEYAYe3X69eZtdNWWVYmeRRggs1kP51oDwpC+HXUYw+OhhAIx9DWU3T2YrtO6LGraRJp85ABaFvuPjr/APXrLZTnkV2tgWitFtru4iuk2hTuGM//AF/eqd1o1rIS1tOoB6JIMH6Z6V59SOt4nsYbGwkuWo7M5M8VJa3Bt59w6Hg/Sto6DcMG2RGTHXZ82PyqlLpckfDIVPoRWEmtmenCXVEhux6ik+1j1qqbSQcYpn2d896y9lE19qzTguxvU571p+IbvOh7kPOx249lNcyIZAeKmu5JX0yWJicCNsflWboJzi+zKdRuLPLm5Ndzpy/YtFtIFAG9fNkOOrNyP021wx612l26phBjCRomR7KBX1h8jIksbS51rV9kNqboRfvZIQ4UugYAgEkcnIHrzTnaDxF46trSAyzafbqiRLJkHaiAYI92wDU3hGDVF1F9T0y6it2tpIo3MgLbvMYrjGORwT+FWvhsif8ACR3l07qMLtBYerbv/ZaybvJ+QS0gdD4zuhpsP2aKRSLckq3ZmbHJ9x1/EGq/w/0SK5uUubsDk+Y2/PIHPNc/8Q75JpnMY4dmJPr85H8lH5VZ+Gt48djcmSV2DM67SxIAWMngfjWkPdRzzTcdDuvEviy7ubgabpj/AGeNOSo7DsTjqT1x0A61x9zPNIxJluJWzn5V3YFRR3Id55nA3Sykq38QJ5x716Bpt7d6fpyw6dbrgqGeZ0GXyAc5/lVPzM72MDT7qO1sBdQ6h9mk/iSRSNw9GAHI9wfwrL8RaZB4j0lb6EYkcE7VAJD9sexIwR6kEd6b4rkPnWs5CxrdFhtTnkHBPFXPDyy/2bdWzkEtGZQ2OQVOB+mKL2DzOO8DpqthqyyRwSrGcglkIUMAeCTxyMj8a6rxFs07xTpeuRWsdxFKql0kUFHYcYOf9kr/ADrfukdhDNMN6iReD7kD+tZPiuAHwbavnmOVMfipH9KxnqzWE22jmfHlsYtca6XS302K5QSJC0iPnsSNnA5HSuMnc+ScYFelfE6FXbSr5II0Se2zvhfdGx4JwMDaeeR715nL901VOV4nWUGG1j6V3fw+v/3V5p7sNpxKg/Q/0rhpxiT6itTwvcG38Q2pzgOTGfxGB+uKxxlL2tCUTfCVPZ14v+tT0oTYJGehxS+d71UKsGOe5pOa+b5EfS85c873o873qnzSZPrR7NC5y753vS+aPWqOTS5NLkHzluVlkiZG5Vhgj1FPhnZ4EZ8bsYYj1HBqluOKW3ZhJJH1z849ux/p+dPkvFopS1L3m+9Hm+9VsN3oAao5B8xZ80+tKJPcVV+alG70o5A5y15nuKPM96rfN6UDPpS5B85a8z3pwk96pndSjdR7MOcueb70ebVPJoBal7MOcu+ZS+bVLLe9GT70vZj5y55lL5lUsn3pQze9L2Ycxd8yjzKp7m96C7e9Hsw5i7vo8yqQc0nmH3o9mHMXvMpN/vVLzDR5ho9mHMXfMpPMql5hpPMNP2Ycxd8ymSzFEJUjd/D7ntVXzTSeaPMXeAVBJwe/Bx+uKqNPUHPQuM6oojVvlQbR9BTPN96pNKSc0wymj2dyVI0BL83Wtu1OLAnoW4rlYnJcc1v3NyLXRDKzYEcbyE/QVlVp3aiupalpc8f8R3v27xBez5yDKVX6Dgfyqx4atle7N04yIvu59awXYtIzHqTk11emJ9msoo2IyyiTgevP8sV9pTgoRUV0Pjq03OTl3L12smrapb6fE2N7hQfT1NbWn6dY6v41s7CDYdLsoFeQbiV7tzk8dRkfWsPw+Fl8TwPJ5xEYklBhBLBlRmU8c8EA1s+CI3mXxBMXHnyBE3j/AGid386znfmIbtCxl+PPEFxqGqGySUkHaXRfX+BP+AgjP+0T7V1vw70RtItrrUby9NtbBQs+4DDH0HsOmepORVPTvDGm6osupzowuUlYhkPUjPUHitXxJcwpo+naRC5iiD/vj64Ax/M10QtFHHKfNoS6x4w13WTL/ZxNrpsZCjy1AYj19fyrGWbU44pLmHWt6oQGEjMeTnjac+nJHSruiW8eq6lPFMxhsrYDcsY+Zh0AHufWr2u6PZR2zzaX5kUUR3TQM2S49R7ilcSSKmnaimqQNbzokd4BkEdJfcGsm+tTE4uIyVkQfKQ2AR/nv2NVoZ4bS+txFKHEkoO0jBU4yKyvH91cfbYzHII7aRm/dx8DcArEn2w44+tJ6lxjroejeH7lbyyRJUR1m/dTg8deAcfofTOO1eb69pDaTrdxaAHy1bMZx1U9P8PwrvPB2JdOUAgzT2gaPI6N6/mM/jVL4kQRtdWd0nB2mNvcEBh/M1nHRmkJWlY80uU21lXfO1seor0Hxf4bstK0fTL+zvjN9rT5o2wSpCqSRjtkkfhXn9wv7o+xrSMk1dGzF0y4NrqdtODjZKrfhnmvSbiPybxiOn868sTlwB3NeshTc28Mo/iX+XFeXmK1i/U9TLZe7JehVzmjBParQtmp/wBkfGT09cV5tj0+Yo7T6UoQmrRWFfvTxj/gQpjTWqZ/ehvYA07BqQbKeIiR0py3tuW2pHIzHoMAUy71EWQHnpHbkjIErYOPoapQk9kTKSjq3YesBPaodRmXTrCS4bqOEHq3aq0GtQXkzRDUIoCBlXlyiE+mf/rVJeaFY6nta88Y2Kbeka7cD835rro4STknPRHFiMfThFqDuzoPAttZ2NmLozx3etagCWjhYPIq5+7gdOgJ6ds9K9QtdP8As6q8uxpsevC/T1+teK2mnW2mxqll4+Nuq9Fhuwg564UPXqHg3Tr60tp57zWbzUVuAhjNyzHbjOSA3IzkflXpSjofOVJc0nI44aNpZYN9mhxg9V6086TpwXAtIgAORyDmp/OjEoVMZBKjI6D1pFuVbZiQE8qcDnp3rzuaXcLsWOzSABI4QoAO45NPaFFZMpGA33TtzxUK3LDkEY5znpSm5dNpaQKQpGQO3NKw7lgeX0GwLt4+XrSGVVG7bjavTHGTUCTsI0zIQGXAOMCgzgqAZduB/wB9GpsNMsDYWOWVjwR8veuC1O48barqDQJDPbrCWEaxt5a7c9SxPzdu9dqXIDbmKtjccj3oE+12ZnTpkew960pz5HtcpTa2ZzNhpnifyx9q1hI8DlDEsh/Mj/GtY6ddmIj7erMBnLQr178DFajMSjBZUYgY3YpN7MHZJI+eFXHT60p2l0X3GscXWjtJnMXln4ht03QLZ3XcquUOPxODWBqGt6xaxSR3Wm+RkFdzxsBz79K9HcuefNQDgnaO4/8A11I7ArKrPGy8URVNbxRusfXtbmPAsc9a62+lEsrOARuVXwfQqD6e9d7daLpV87LPZWxywO4RgE/iOa5HxLYx2d8Y4QUi8tdi88ADGMnnHFehTrKeiOdS5mange2hu9P1iGXU/wCz22xPHKHCkOC2OvbJAP1rH8HSFbu+iU/vNisMd/nCn/0OpPCet2uialcSX0cr289tJAxhVWdC2MMAxAJBAqrJqFiPG0+oW/mpZXEzkBlVSobOCVBxw2DgHtVJPmY57MXxLZktcRquCSSHlbjg7sL9Qw611ngnwzLp+kOLq5QtK4lCRLnaChVhu7kq3b061U8RafueK5h+T5mRdy5wR/C3t1q94N1lYQmnXGQCSsLHofVD9P5Vs0+U5nK6IZNLaO9kSYZy5IyMc9McdulbOn+I2spoLCePfKMIsg5GDwBjvWrf/YFAhnmQ5H7tSfnHsCeo9OahsdJtmvUluPKdQQM3Ecquo9MjANCaktTJ3M3xRomo/bLa6kiWQZKwoo5Vic9KsQwjTdKuLmR/mC+XyOSoUFz7/dwPUkV6Jqt5o1np8dzEkEkiqEG0Y49CTzjivDPiDr95PdNaCNlSdQQ+MAr/AHV9s4z6nFS10RcdXY1YvHMfiFVsobI29xHiV5FffHhTnjjPXAqXxTdKvhmztmYeZJIrAewQHP8A4+Kz/A3h6SyLRzvsuZiHlCjPloP4TnqSeoHp61uzWN74p8cn+zYYJItOwxWQ4R9hA29MZYjFKSSWhUfj0Mb4ix2ljp+iWFpcNIvkGaQeYXXJwoYZJwDt6DivMpDwT7103jG80+48QXj6VZtaWm4KsLYypAAboSBznpxXMN0A7mlTjyxsdlyrPzIPpSwSNBcRzIcNGwYfUHNIWR5Gyec4FPEQPRs/Sra7iT1uj0pL15VSQ4ZWUEcY4NWk2yKGXkfyrjtK1hbe2S3ut/y8K+M8ehrYg1e0Vw6XKficZrwKuGnCTVj6WlWp1YJp6m35ftSGL2NQp4h0rA3SgN3AII/nTv8AhINJP/LY/kP8aw9lPsPmXcf5fsaXy/Y00a5pJP8Ar8fl/jTv7a0r/n5X8SP8al059hqce4eX7Ux4iGR/MaMggZHcEgY/l+VOOt6SP+XlD9CP8abJrGkyxtG04wwwen+NOEJp7MblF6XJcOP+Wjfp/hS4k/56P+AX/CmRa9pRiQvP8+Pm4A5796X+3tH/AOex/T/Gm41L7fgF13HbSessv4bf/iaXb/00n/NP/iaZ/b+kjpL/AC/xo/t/Sf8Anp+q/wCNLlqdvwD3e4/aP+etx+af/EUmwf8APW5/NP8A4im/29pH/PX9V/xpf7e0j/nr+q/40ctTt+Ae73HbfSa4/NP/AIik2d/OuPzT/wCJo/t3SP8Anr/6D/jS/wBu6Of+W/5lf8aVqvb8Cvc7/iJtP/PWf/xz/wCJo2HP+um/8c/+Jpf7c0j/AJ7j8x/jR/bWlH/lsP0/xo/e/wAv4f8AAC0O/wCIm1v+es3/AI5/8TRtb/nrL/45/wDE1INX0k/8t/5U7+1NKP8Ay3/8dNF6n8v4f8AfLH+mRbZP+esn/jv/AMTTdsv/AD2f/vlf8Ks/2jpn/Pc/98H/AApf7Q03HE//AI43+FLmn/L+C/yDlRV2T/8APdv++F/wppS4I/4+PzjFXRe6ex/16/ip/wAKd9psSf8Aj4j/ACP+FTzS7fgPlRm7Lrtcx/jD/wDZUeXd/wDPxD/35P8A8VWl59j/AM/Ef5H/AApfMsj/AMvEf60ueXb8A5EZgjvP+fmH/vwf/i6Xy7zvcQ/9+D/8XWmHsv8An4j/AFpc2Z6TpT9o+34BymV5d3/z8Q/9+D/8XR5d3/z8Qf8Afg//ABdapFt2mj/Om4g/56x/99Cl7V9vwDlMspd4/wCPiD/vwf8A4umxrP8AaP3skbgIeFjKYyR7n0rUZIT/AMtov++hTY4EcyMrKQDtz9P/AK5NUqmj0/ATVigQf7v5GmkHH+rJ/EVqfZV9RSG1A7ip5kMzUOxgSjAe2DVnxdcG28HzHIy8aoAf9o/4U+VYoULO6gDnGeTXG+NNae6t7ezJJAO8j0wMD+tbYai6teFtk7meIqqnQk/I5CNPMlRM43EDJrs7p9t6VBOAgAyMcDj0FcXFJsmjf0YGux1QGO+LOwZiATgYxX073Plrk/hdbuXxHDHYSpHdPHMI96bgx8tjtx79PxrY8IvPCNfhZPLulkXMeOhBbI/PFcvY6ncaLq9vqNoV8+3cOm4ZGfcd62vD3iBbvxjLNLFFZDUVMTCAbUWTqDjPGSP1rJxblcU/hOz8MHfpOoCWaK3CXDgtIwULk5HX2o8aWMZtLe+s1SaLem8qxIcc8gjtyBn2rz7xxpzpcC5jA8vaCRjBH/6jx+VbPgHxWj2jaFqCmQSDbEQB6Yxz37f55uztc5nHqjXtrtNGu1LOrRzjMixrjZjpg9TjvW8NStrqCV4bm3klYfKiEkntyOw+tYWp6JK7OLctNCihcIMsn+yQcfTkio/D2mXVjq8cs0Gy3zhi5AZR643c1Voy1M3ojSXwFLHLZ3zGNEmmjk29TgHP4cZrB8b+GrvUbCPULKJTFA8jyIWAIUhACM9eF5r1ee6W5hFnYh3Ljb5rDC474/lWdqcUAiXSIgsyxFZr3J4UZyqfVmA49B7ik3d6BGUtzi/DjSWA01UY74wsLDHQkAEfgTVDxVcySWttHKSW3ZBPoEUf1rvLHSo7YJNO2JcnaMdO5J+nJ/KuE1q2Oq+KLXSrf5Sg8t2c8IfvOT7KOv8Au0pKyNKUrzuReN7LToPDmk3Vu9u166qkvlSZJXykPzLng5J5715tN/qX6f5Nd98StTS91O1tnslt72zh8i5KfcbBJTae67SCM+tcdbaZe6mWhsraa4k4JSGMscepwOmcfnUU9InU3oUtNgSW+iErqkYOWLHHTtXZrr8VvCkS3SBVzjbz1NUbP4b+Lbv/AFWiTr7yssf/AKERW9afBnxLMitPLYW+eqPKWZf++VI/Ws69GFVpyZ0YfG+wi0ktTIl8TgcJNM49uKpv4ikkJAhdx/tS/wD1q9H0/wCCFusatqOsyu5HzJbwhQD9STn8hXa6J4C8P6B5clrp6yXKDH2if52J9cHgH6AVksPRXS455nVezt8jwqBdevQhs9JuHRxlWS3dgfxxita08HeO70Ax6XJCCcEzGOLHvhjmvoJmO371Rl/9oVSjBbRRyzx1aXU8p0f4Y6+rONS1G3COmADNK2w+oClcn8cVrQ/CSy+1l77Vpbq1PLQLbJEW9jIPmx+vvXemT/aH4im+YT3Wq5uxzSqzerZix+BvC8TRFdEtP3Qwu5M/nnr+OasS+H9DQADRbBnY4Vfsyc/p0rReXapPJ9MDrSQI6Au5QzN95gPyA9h/9fvRzMhyZFa6Nplngw2FoknXekCqc+2BxV0p/tGmeZIvp+VBmfPQH60mTds8im8P+Iw26WyuJGyT8ig4HbBqE6R4oSFCNKumkThdqAZHPXnrivXgx/2acHY8cVn7NGvtF2PI49F8SBY2Gk3eVzlCAOT/AEoOkeJS4X+y7oqM7iY8E/TmvXstjtS5fsRS9nEftV2PHptO8RxWnmvpd+zEj90sW45yPT2zTfsHiI/8we9KhAwBhPX0r2Vd3fFL83bFHs4h7RdjxZrbxJGQJdJvmEvZYWOwZ9cU0W3iJMu+m3CiRQoURHK9ueK9sAPc/rSgkd6XJEftF2PGTba2AYjZTruP9xuBn8v8/hUaxaztkL2sw2/KoMDZz9MV7Vz/AJNHP+TR7ND512PE2GsICwtXJ9RG3P4Yz3qRJdR2MWsrlWXB5hYZ5+le05b1pwLDrS9kg9oux4oLnWNzbbByM7gfLP8ALrWfr8N7e2E8txaSDycNG5BGQeDkfrXvjSiNd0jBF9WOBVW5vdMuraa2nv7MxyoUdTOnQjB71cKfLK6Gqi7HytISpIzyPSo8k9a2/FeiHQdZksxcRXMY+aOWJg29D0Jxnn1FYitjBFd8Wmi9zu/C+rDVrV9OvyZZEAwN3zyKO6/7Q9O4x6VNf6NJbyl0dsnDAqOvow9/cc+orh4zuIZThgcgg45rt9I8WIsAtdWj3DvMF3B/99fXH8Qwau9jCcGndHNazba2bw3kNzJc7xtKqOQPTb3HuOK1PCmu6zJqNvp9w00MB3DcqFADgkZxgcniu0tU0jVFY2l9GhLfcEynj6Ptb9D9auwaLMswbe23++I8nH/fOKLJkOelmi7pWkS6rKCDJMyZ3EnIX8TwP51X1rwtP9uj+1WzyQryrtgJG2OD6sf/ANWK7fQL3TdLscS3qB84PmMob8FFUvEuuSXkBg0qN/MdSv2hl+6Mc7VPOfcj86yk1EIq5wOszR6Bp8dvBzqdwpX5h8yqerbex6Yz9cDFQNnwj4Ha+i1J7fVLhlKRIwwwO5ee5Iw+fQke1XbGLSvB5tta1WVLgXgdFYN5snQ5Yc5PIAJOOteW+JNfute1N7m4ldlHyxKxHyJk4HHGeeT3OTWWs5W6HTTikjGlfcx5qrLIUVmHpgcVI5ydufqaoTyb2AByqjH/ANeuhItsYOtdB4a0J9WvNzq4tYiDIyg8/wCzn3wau+EvBE/iC4Sa8vLfTtNB+e4nkUM3sik5J9+n8q+hNLv/AAjoOmQadZahp0UEK4A85cse7H1J9azqydrR3IvY8mk8I6LKSsaXUYA/glOT/wB9ZqI+B9JVyUuL3HUKzLnH/fNezN4l8Lt97UtOb/gamk/4SPwvuDfb9OyOMgrXHar/ADFe08jxaTwTp5LFJ7xSCRlowwGPwFRt4HtghYX0wHZvKBGfzr27/hI/DB66hp/5ikHiDwp/z/6Z+a/4Ufvf5vwGqnkeLP8AD+BU3jVX2nHP2fp/49VKTwLcCSQR38O2PktKhX+RNe7tr3hKQfPqGlN9Shp39v8AhPcW/tLSsngnenNNOquv4D9p5HgEngjUUGVmgf6ZHHr0pD4N1JAdpifGMYbr/n3r6BOu+FGHOoaUf+BpUg1fw24+W+005H99KfNV7/gL2vkfPK+D9UbHESgvsyzYGfyqNvCGqLtLCIZYr9//AOtX0X9r8Osf9dphPrmOnG58OtjL6WQOmTHRzVe/4B7XyPm+TwpqsZAMSEnphqYvhnVJFDCNNp6HdX0j9o8OBdofSguc4zHjNHmeHe7aX+cdHPU/pC9r5HzanhfVpArJbhg/TDAfzxUT6DqaY3Wj89MYPfH86+mRJ4dbjdpZHpmOgDw9nj+y/wAPLo56ge18j5pk8N6tGAzWpwe+9f8AGoX0XUoyAbKfJzwEJx9a+nPK8PNkbNMbPUfuzThDoWc+Xp5PrhKaqVB+18j5kk8P6tGqsbCYqwzlRuA+pHApg0HVD5hFhOfLOGwmfy9fwr6eW30MfdgsQPZE4pTa6LsGbay2qd2Si8e9HtJh7U+Xf7I1DdtFnPnGceWc4oGk6iWKrZXBYdQIicc4r6YSbwvPmNJtLfZ1XfHxSm38MnqNKOOR88f+NHtJdh+2PmZtN1Jd3+i3I2gMfkPAPelXTdWJ4tbrOccIf896+nE07w/cA7LewkH+ztP8qVtE0I8mxsz7bRT9pLsL2x8wCz1R1LCC7YDrhG4pf7O1UDd9lu8euxv89xX1B/ZWjAHbZ2wyMH5BTv7L0lzlrS2fH+wDR7SXYftj5dey1OMZe3ulHqUamSJfQ/6wXCc4+bcK+pv7J0ncG+xwbgcg+WOKH0vSZE2PY2rqTu2vApGfXkdafP3Qe3fmfKvnXA/5aS/maX7Rc/8APWb/AL6NfUh0PQyf+QXYf+Aqf4Un9h6F30rTz/26p/hT512D6zLufLn2i4/57yf99mlF3c9riT6bic19Rf2JofbSdP8A/AVP8KX+xdEPXSdO/wDAVP8AClzrsP6zNdT5eF7eZ+WeQdzzT0vtRK4hnnIzwFr6ujaONQke1VAwFUYAHpT959TReP8AKH1up3Z8oSXuqRY8yW4TPTdkZqxDD4hu5AkFtqcrMMgRwu2fyFfUrfvFwVJ/ClXzY1wAzgdFPUfQ1N4/yi+t1O7PlTVl1/QLmOLUYbm2lkjEiLOOqn/9XSucuriW5naWViznqa+svFvhWw8ZaLJYXOI7lPmgmKfNC/8Ageh9fwFfMfifwnrHhW/+zaraNCW+5IPmSQeqsOD/ADrai4PVKzCVec42kzCDV2Ly/btLgvE5GAsihOjAcnj6da4w1saHeKvm2cr7UlwUJGQrDvj6ZrdozTsXZfm59qq4KOCpKkHIPcVZIKllbscGo2HNCNHqeh2N1F4o0Uu+wXSfLPG3OTjG7Hoe/p+FZekaemgalLcwQeZNu2eW/wB9Af7nZs/nXL2N7Pp90lzbOUlQ8eh9j7V3dv4g0rWrYRzubK6IwVbGwn2P+f61VkzmknH0N2LV9PuWAm2JMOCsqlHHtg4NR3TXj3Df2bNpkMKRbpZb+ZgqZJAwoPPQ9c1nyaVcG2dIpmaFgCudwT2Pdf0qvYaRNazbmvLTB5IURA/otT7HqmZOaM7SL/xGPFy3dlqMtwFkEf2nZshdOmAp42+gr3vRND057ANEG8x38yWXdlpJD1Zj3/pXnVhYRo3ms7Pk581YyVH1Y/KPxIro7PxZDZKkVv8Av2j6RxOGB/33xgf7q59zSfKhKfMzT8SxRaZazyNKhwm2KNl+8euB/X2GO9ea6PayQx6t4ia7iSeyRpGjmXcLhWyHQ9/myBx611OpNLfkapqT+WkhYK+07IwoJ5IGFHHU/wA6848e+M011ILGzlme0jZZm8xQB5mxVwoAyFGD165zWLfM7I6acEjk9Z1K41fVbi+uWDTTuWbHQegHsBgfhXrfwb0gw6bd6rJGwMxEMRPQqOWI/HA/CvJNG0q71zV4LCziMk0rYAxwB3J9AK+oNOsItM0y2sYB+7gjCLwBnHf8etVPRWCrLSxazjsKAw9P1pCD6UmD2WsDAdv9v1o8z2pu0/3aPLPpSAGlyOVH60wlT1UfmaUo3pSFH/u/nQBGyoevX86aI1JOCp/SpCjE/dB/Gk8pj/CB+NAiMx/MDsY46YBPNKZQDg7gad5Lj+DOKCjkYIPuDTQCB165el3Kf4j+NJgjqin07fypCueMEfrQIn2D0pdg9D+VHngdf5Un2pR/CTWZQ7aB3I/CkJX+/wDpTftS+hppuU96Vxqw8lf79JuHHz1H56n+E0hkU87T+VTdjsibeo/i5qG6vfs6Bktri5JONkIUkD1O5gMUocHscVkeJLPWb+xWDRr2Gzcn9475DEezDp+X404u7KsN1LxbZaTGHv5prDd91JrMsx/J8VgS/FTSlBEV5Ox7E6fwf/Iormrz4ZeIriZpZb20nkbks0rkn8StUH+F/iUE7IYHHtMBn866oqC3Y9C/qfxX1aWd0sBbxQcbZDBhz74LMB+tYT/ELxPyBrNyM/3SF/kKs/8ACsvFP/PjH/4EJ/jTT8L/ABSf+XGP/wACE/xrRciKTic9fa/qOoHde3k9yf8AptIXx+ZqkdQkA4OB7V1p+Fnik/8ALin/AIEJ/jTf+FUeKT/y4xD63Cf40+aPcfMjjpb55F2uSy9wTVfcA+A25T37iu2Pwm8Vk8WUP/gQn+NNPwn8WLk/YYcDrm5QD+dPmj3KUkccjlDwa0bW+RcK4BHoazb23+wXT27zQyOpwxhkEij/AIEOD+BqJZB2aquDsztbR9KmUeaCv0rorJvDqIPNugB6Fa8rE2OjEfjSidv+eh/OhszdK/U95GpeG9I0BL9by3zNHIYYvMG5nUkAbAM4ODzmsDUPivBp2pRvotos0CQFGa4GC7tgk8c4GMe/NeSGQ9S1NMgPesnC71LjBJF6+1a7vhGs8zOIwVRSeFBJJA9OSTWa8nYcn1pHJPSo/KZh0IrRJbFNkEsmflU8dz60yJ2icOuNw6ZANWhaMR92lFkx7VVhXH/2tfMctOxJ7mnjVLnHMhNN+xGpE06V/uoT+FKyGmA1O4/vmnDUrj++alTRbthlbeU/RDU6+HdRYgLZXBJ9Im/wpOw1Iqf2jP8A36X+0Z/79akfg3XZRlNHv2HqLZv8K0Ifhp4quIjLHo8oQdfMdEP/AHySCfypXiPmOb/tGf8AvUv9ozf3j+dXpPC+rRMVl066Rh1DQsD/ACpo8Paj/wA+U/8A37P+FF0HMU/7Sm/vfrQNSmH8X61dPhzUP+fKf/v2aQ+HdQ/58p/+/Zp6BzIqf2pP/e/WnDVph3P51Y/4R3Uf+fKf/v2aX/hHdR/58p/+/Zo0C5ANYm9f1o/tibuT+dSnw9qA/wCXOf8A79mmtoN8Bk2kw/7ZmloFyP8AtaU//rpP7Vl/yad/Yl7/AM+03/fBpP7Fu/8An3l/74NPQNBP7Vk/yaP7Vf8Auig6Pdj/AJd5P++DSf2Rdf8APCT/AL5NLQNBw1Vv7opRq0gPAAqL+y7gdY2/Kk/s2b+4fyo0DQsf2vL2P60f2vIevP41X/s+b+4fypDYSgfdpj0J11Mq25VAb1AqT+2pfU/nVL7FJ6Un2N6BOxfGuTDufzpRr06n5XZfoaz/ALG9H2N6BGl/wkN1/wA9pP8Avo0h1+5brM//AH0azvsb0n2V6LArGh/bk/8Az0b/AL6NL/bc/wDz0b/vqs/7K9H2V/SiwaGh/bc//PRv++qP7cnH/LRv++qzvsr0fZXoDQ0v+EguR0mcfRjS/wDCSXn/AD8S/wDfZrKNo/pTfsz+lAaGyPE98Ol1MPpIacPF2qpwuoXQHtM3+NYn2Z/Smm1k9KYtDeTxjqkcgkW+uRIOjiZgR+Oaj1HxfqOq2ptdRuri7hzuCTys4B9Rk8H3rE+ySelH2ST0pWQFKdULkxqVHoTmo13KQRkEcgir/wBjc9qabR89Komxet70Xcah+JUXGB3HsKeW59qzhbujBlyCOQRVlbgkYl6jv61JaZOCR1qZGB4JwKq7yehpwfj0+lNMGbdjqeoWChbW+uIU6hY5CB+XStqx1/Xby5jt47+4eWRgiKMZYngDpXGCRl/iqxbX89rOk0MhSWNg6OOqkHIIpt6EOCe56fLoPiOaUjWL1o412eYtxPu2qzBQSoJ4ycdK07680rwbrmn2N8Glt3jEsssZAKcsMbfqBXk1zr+pXXmebeysJAquA20MF+6CBwQO1Z0tw8jZdyT7mseWT3YciR12r+PdVu9Kk0eG4MenmaR9o4ZwzFtrHuBnpXGvIScnrTWf0NRnJq4xUSjf0LX7vQZHm0+6kt5pF2u0Zxkelby/EfxH0/te4/MVwg3DsaeA2eKuxLSZ3q/EjxGOmqy/jj/Cnf8ACx/EnfVJD/wFf8K4MB6eBJ70WQuVHdD4jeISedRY/VF/wp3/AAsXxF/0E3H0Vf8ACuEAf3pwD+9HKuwcqO6HxD8RHj+05PyX/Cl/4WD4j/6Ckv5L/hXDDeD3p4L+9Oy7C5UdqfiD4k7arJ/3yv8AhR/wn/iQ/wDMWk/75X/CuLG/3p6lsd6El2E4o7QePvEn/QVk/wC+V/wpR498Rnrqkn5L/hXGhj70/efenyx7E2O3tviL4gtn3PcRTr/cliXH5jB/Wte3+K1zuButMhde/kyFP57q8z3H3pQT70OnB9CbH0mCT2I/AUAdMseKT5/XFJhz/EPxrzrGdx3ydxn8KVdn900gU9z+VOANILigr2BpwAP/AOqmYPFKM9P6U7Idx/A7jP0/+vS9v/rVHgjr1oI9OKLBzEgHc/y/+vThgdGP5VEDjuPzo349KLD5ifp/F+lLv9x+VVvMPqBRu47UBzFoOPUUu8f5NUgx9qM+p/WgOYubyOgFJ5p9Kq7vcUb/AKfnQFytdaFod67Pc6Fp07scl5LVGJ/EiqD+CPCjnLeHdPB/2YwP5Vs78DJzVGTVDKfKsEMzlipkx+6THct357Lk/TrT5mPmZlzeB/BUAD3Gh2cYJwMlhk+gGeT7VWj+H/ha+IYeHIraPceCz73HbPzYX6cn6V0FpYiMrNczNc3QH+ukAGPZQOFH069ya0I1564p8zHdnPJ8NfCgA2+HoD9Wc/1p/wDwrfwt/wBC7b/+Pf411CySIOGNP8+RhkM350+Z9xXZzC/DzwmB/wAi9b5/4F/jT18A+FQMjQbUfgf8a6MyN6tn61E8rep/OjmfcLswx4L8MR/d0Oz+hhB/nTx4W8PJ93Q7Af8AbpF/hWp5h54agtnnkVLk+4XZmr4e0dDlNKsl+lug/kKsJZW0J/dW0Sf7qAVYJX3pCR/tUrjuxoG08KB9BThg9VH5Um7rwaNx6YNJsabHcAfd/KkyvdT+dG446Yoyx5pXGLlB2/WnBk/un86YT14o59KLjJg6Y+5/49/9anB0/ugfj/8AWqDn0H5Uiludygc9jmjmYFgSJn7n/j3/ANaneZH/AHP/AB7/AOtVbnjtScjH+FK4y2Jov+eZ/Ol82I8eXn8ap7j60mT7UXYXLu+L/nl+tJvi7RH86phwSRlTjqM07zD7fnRzMdyyzRn/AJZmmFYWHKfoKi8wHt+tG4e3Si4D/Kt+nlfoKaba1JyYQfqopu4A9aXzF7/zouAhsbJutuh+sYqJ9J05z81lbt9YVNWN6eppQUz1PT1pXYXKR0PSj1020P8A27rTf7C0j/oGWn/gOtX2Kg9aBg/xfrRcVzPOg6R/0DLT/wAB1pv9haR/0DLT/wAB1/wrTKjJ+cfnTdv+0KLgZ39haQf+YXZn/t3X/Ck/4R7RTydIsT9bZP8ACtLYO5FGweo/OncDN/4R3Qz10fT/AMbZP8KP+Ea8PnrommH62kf+FaYhB7r1/vCjyfTH5ii7AzP+Eb8Pf9AHS/8AwEj/AMKafDHhvPOg6Zn/AK9k/wAK1Rbk9MfnThat7fnT5mBjnwp4bPXQ9M/8Bk/wpp8IeGT/AMwLTvwt1/wra+ysO6/nR9mP94fnRzMDEHg3wz30Sx/CIUh8FeFj10O1/BcVuG3x3H50ogz/ABL+dHNIVznv+EI8Lf8AQEt/1/xpp8B+Fm/5gcP5n/GuhMDDoV/Ojyj6j86fOwuc4fAXhZeuiQ/99N/8VR/wg3hUD/kCW347j/Wuj8vdgH+dIYcdKOZiOabwH4UY5/sW3z9GIP4ZrJvvhd4UvXYC0uLEnoYJiV/8eBxXctCev9aTyTzwfzp88u4XPL5/ghpJY/Z9Wvoz/thGH8hVQ/A5c/L4kcfWyB/9qV6w0BVSIiYz7dPy6U3zJEYiRGA7MuSP8R/L3qlVl3DmZ5SPgax/5mU/+AP/ANspf+FFv/0MZ/8AAH/7ZXrKzpgEfMD3BpwnT+6fzp+1l3FzyPJf+FFSH/mY/wDyR/8As6T/AIUQxPPiHP8A25f/AGdevi6j7o3/AH1ThdR/3X/76p+0l3DnZ5CPgSR/zHx/4Bn/AOLp3/Cjgp51sH6Wn/2devC8Vf4H/Og6gOnlfmaftJdw52eSD4LRL11r/wAlP/s6X/hTUX/Qb/8AJP8A+zr1hruJvvREfSomkibkK4o9pLuHMzzBPg3bA/Pq8j/7tsB/7MauR/CHRl/1l7fMf9kov81Nd8WXHBYUzd/t/rS9pLuLmZyEXwr8OxDDLdy+7y/4AVaT4d+G4xtOmB/dpH/xrpg2f4/1pSf9r/x6lzy7hzM5r/hX/hr/AKBSD/gbf40h+H3hrP8AyDF/7+P/AI10+R/fH500nPf8c0ueXcV2c1/wr3wz/wBAtf8Av4/+NL/wr3w1/wBA1Pp5j/410ZwOmaTcR3NHPLuFznT8PfDY/wCYYP8Av4/+NJ/wr7w3/wBAsf8Afx/8a6TzcDqaT7Rzg5p+0l3Fc5o+AvDo/wCYYP8Av4/+NJ/wgfh3/oGj/v4/+NdP54OaduBo9pLuK5XDH1NLu6ZJqvv9F/I0AtngEVJKLO/p1o349ar5b0NJyR0/WgCx5nNHm47n86gA9jTgB/d/WgLkvm57t+dJvpuP9ninKqnqtAXDzO3OaC/t+Zp2xD2oCL60DG59v1oP0qTYuOn40BAenSgCI8Z4pMn2qbyuTTZnit4XmmkWNFBZixwABQMZk+1Vb7UIbCNTJueRziKGMZeVvRR3/p34qGS9udQDR6ZGyRkD/S5kIUZ/uKQN/Hf7vuelaFhpFrbs0uwPcP8Afnfl2/H09hge1CQFGK2u9SQHUP8AR4GUZtYny3uHcdfouB15ataGKOKNI40VY0G1VXgAegFWhapg5wPxzUiwRr0HNOwDIo0bkhqmEcXqfypQoHQD60vXtTGJsQcjOaDwOAcUHpx/OomzUgK2DTCikUmD70u0kHk0gGbOMU0qKftPoaaVPPynpQMZtpCPwpxXnpTduPWkAY96MD3NGDzwaNvpSGLgYpDgdqUA/hSEHsKAFyPSjd7H86Z83oaUg45oGO3e5pufY0cjt+tNOQfumkA/J9D+dIT7HNM5/umjHqDiiwxCR3pM/Wl2jPek8sHvSsAnHrRu6UeUKBGR2/WiwXELccZqpJd3aPhbCRl9RIo/rV3Z7frRg4osFyvb3EkyZlt5IT6OR/QmrGQTS7fbjHrTwgI6UWHcYGHoaXdjtUgRaTyx6UWAbuBpQc9jTvLHPFLsA6UWC4gPPSl3f7P5Gl2A5/xpdg7ZoAbn2pCfqKcV57n8aClAXGZ9/wBKAzDnI/Kl2nPQ0m09OfzoC4od+/8AKlD5puxv8mk8s96Yrku760m4d800Bh1pdp6UWAduHGDS7j70zaaXZQIkDAjo3SlPPrTAo75/Onbf85phcTgdjTww96TA7/zoxnP+NArik5HQ0hz2FOB5/wDr07/PWmFyHcfQ0E+oqQr3IppUHt+NAFc2ybmeMmNm5OOhPqR0P86ryu0G3zlyuPmkRTgH3HJA/P8ACtDGBTSPWgCkHVkV0Ksjchgcg/SlEntinyWURZniJjkbqVPBPqR0NQsksQAljz/txgkH8Oo/X60XFYkEqk+/1pdwIFQAhl3DkeoNGcetO4ifK+tHy1X3fjQGHfI/GgCxxjqable/FRZJAw1HzUCH7kpwK5xUJB7gUhGDQFycbT0NBUZ4quQQeM0Bj7/nTsK5OV/CjYO5P51Flvel3MB/9eiwXJNoHajA9DUe49hS7x6frRYLj9opdo9vzpm+l3nHXvTFczlj6HYOtP2AfwD8RUgt3OMAdfWni3fA6D8aCSBVXqVX8qeAvTaPyqYQuPT86cEIHXt60DIlUE8KBTwh9vzqRVAIzg/jT+B1x7e9AXGCNj6/nS+VLng/rUvmLwOKdnjgjpQMgMMm37x59WNN+yseN789xK3+NWc+hAP1pwYDnI96AKwsSDw8h+sz/wCNO+zFT1b8ZG/xp13qFrp9u01zPHFGoyWdgBXKSalq3iQgWAl0/TifmupExLIPRFPTP94/rTsUjU1bWYNMljtkSe6vJMFLa3YlgM9Tk4Vfcmo00+4vZI5tVmWTy33pbRZ8pSOhbPMhHXnAz271NYaZaacri3iCtI26R2JZnPqzHk1dyM5H86BNjzPJjOc8+lL9rkx/9YVAwLKQHYZ/iGMj8xS4x6596AJxdzAZB59MCj7fP/ex+FQdB/8AXpCcAcfhSGWvt0w/iH5Un26bAJIz9KqFschR1o3c5xQBbF/Ngcr+VH26X0WqgYDtT8jrt5pBcnN/IOy0fb5scBfyP+NQkFgBjB/OkC+1AXLH9oTDjYp/A/40f2hIRgqv5f8A16r4JHvSFWHpmkMs/bWP8A/z+NIbtz/yzH51Dg9MGjbzjmgZL9qfOCi/nTvtJH/LMfnUOOen60EfWkBObpgOEH/fX/1qPtR7oPz/APrVBg4PagjuKAJvtXP3B19aX7Rnt+tVtpx1zzThSGWvMJ/hH504OD1H61XBPbA5xmgE98UwLG4dSD+dGFPYn8ag3c/z5oB9T9KB3Jwqen60YXt/OoMnPBpAxzgmgLkxHsD+NKCTjC/rUIc98YoMpzwP1oAl59B+dJkg/dGPrUPmtjnr9aPMPQntQFyXceOP1pRI3ZePrUJOcfTrSkk+nT1oC5L5rf3DS+Yc52H8/wD69Q7iAcn8qPM4PP50rBcmM5/55n9P8aTzjnlG/DH+NRb8Z4H50hkx9O1AE3nHk+W36f40n2og/wCpf8x/jUZfrzSE9/60wuSm7P8Azxkx/wAB/wAaT7XnnyZR+X+NRbm/D60bj7UguTfae/lSfp/jTTc88wTfp/jTA5/ClLYGT0osK48XIH/LGX9P8actyP8AnlKPy/xqDdwCBnn1oZgew6+tMLljz1HRG/Kl+0AfwP8AkP8AGqwbkY6fWnA8Z/rQBN9qX/nm/wCQ/wAaPtS8fupPyH+NQdSMg/nSgD0P50Bcn+1oDyjnHoKQ3yqeYZceoXNQgD0pe3pSAm+2Rnosn/fJpftUQ/56f98H/Cq/PXA6YoHA6fWmIsfaoh2f/vg/4U4XkIP8f/fDf4VVyucU/cB6Y+tAFn7RF2Y/98n/AAo+0R5+8f8Avk/4VV3c5yKcHHrj8aALP2iI5w4/I0nmJ13j9ahGDSgDp/WgVyQzRj+IUn2iP+9Ue3OTj9aNv0FMZBNFG0pkjYo56lejfUdDVZ52gAE67gWI3RKxwPcY4/M1ocAUhUGhCKasrAEK4B5BPFH8I4b8BUzwg9DtPtVdvlI3AJnuW+Un69vxpiJVZeM7h9QaeGB7n8jUXp1/OlC8j1NNCZJnjO4YpmByc0oGRnP60uD/AHv1oFcZ8v8AepDs9T+dOwfc/jRjsDyKAG5QA4OfxpRICOmKQqeeCM+9JsOM5oAcGQnqKUlfVfxNRhev9DS459/c0wA4Azx+dISnTcP++hSH3/nSHOeCPzosK5//2Q==" + ] + } + }, + "widgets_values": [ + "[{\"x\":174.49402268882744,\"y\":383.8868499154203},{\"x\":173.05192332776272,\"y\":200.4518111879918}]", + "[{\"x\":174.4940185546875,\"y\":383.8868408203125},{\"x\":174.46397399902344,\"y\":380.0652770996094},{\"x\":174.43392944335938,\"y\":376.24371337890625},{\"x\":174.4038848876953,\"y\":372.4221496582031},{\"x\":174.37384033203125,\"y\":368.6005859375},{\"x\":174.3437957763672,\"y\":364.77899169921875},{\"x\":174.31375122070312,\"y\":360.95745849609375},{\"x\":174.28370666503906,\"y\":357.1358947753906},{\"x\":174.253662109375,\"y\":353.3143310546875},{\"x\":174.22361755371094,\"y\":349.4927673339844},{\"x\":174.19357299804688,\"y\":345.67120361328125},{\"x\":174.16354370117188,\"y\":341.8496398925781},{\"x\":174.1334991455078,\"y\":338.028076171875},{\"x\":174.10345458984375,\"y\":334.2065124511719},{\"x\":174.0734100341797,\"y\":330.38494873046875},{\"x\":174.04336547851562,\"y\":326.56341552734375},{\"x\":174.01332092285156,\"y\":322.7418212890625},{\"x\":173.9832763671875,\"y\":318.9202880859375},{\"x\":173.95323181152344,\"y\":315.09869384765625},{\"x\":173.92318725585938,\"y\":311.2771301269531},{\"x\":173.8931427001953,\"y\":307.45556640625},{\"x\":173.86309814453125,\"y\":303.6340026855469},{\"x\":173.8330535888672,\"y\":299.81243896484375},{\"x\":173.80300903320312,\"y\":295.9908752441406},{\"x\":173.77296447753906,\"y\":292.1693115234375},{\"x\":173.742919921875,\"y\":288.3477783203125},{\"x\":173.712890625,\"y\":284.52618408203125},{\"x\":173.68284606933594,\"y\":280.70465087890625},{\"x\":173.65280151367188,\"y\":276.8830871582031},{\"x\":173.6227569580078,\"y\":273.0615234375},{\"x\":173.59271240234375,\"y\":269.2399597167969},{\"x\":173.5626678466797,\"y\":265.41839599609375},{\"x\":173.53262329101562,\"y\":261.5968322753906},{\"x\":173.50257873535156,\"y\":257.7752685546875},{\"x\":173.4725341796875,\"y\":253.95370483398438},{\"x\":173.44248962402344,\"y\":250.13214111328125},{\"x\":173.41244506835938,\"y\":246.31056213378906},{\"x\":173.3824005126953,\"y\":242.489013671875},{\"x\":173.35235595703125,\"y\":238.66744995117188},{\"x\":173.3223114013672,\"y\":234.84588623046875},{\"x\":173.29226684570312,\"y\":231.02430725097656},{\"x\":173.26223754882812,\"y\":227.2027587890625},{\"x\":173.23219299316406,\"y\":223.38119506835938},{\"x\":173.2021484375,\"y\":219.5596160888672},{\"x\":173.17210388183594,\"y\":215.73806762695312},{\"x\":173.14205932617188,\"y\":211.91650390625},{\"x\":173.1120147705078,\"y\":208.09494018554688},{\"x\":173.08197021484375,\"y\":204.27337646484375},{\"x\":173.0519256591797,\"y\":200.45181274414062}]", + 720, + 480, + 49, + "path", + "basis", + 0.5, + 1, + "list", + 0, + 1, + null, + null, + null + ] + }, + { + "id": 83, + "type": "AppendStringsToList", + "pos": { + "0": 334, + "1": 915 + }, + "size": [ + 315, + 82 + ], + "flags": { + "collapsed": false + }, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "string1", + "type": "STRING", + "link": 212, + "widget": { + "name": "string1" + } + }, + { + "name": "string2", + "type": "STRING", + "link": 211, + "widget": { + "name": "string2" + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 217 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "AppendStringsToList" + }, + "widgets_values": [ + "", + "" + ] + }, + { + "id": 86, + "type": "AppendStringsToList", + "pos": { + "0": 683, + "1": 916 + }, + "size": [ + 315, + 82 + ], + "flags": { + "collapsed": false + }, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "string1", + "type": "STRING", + "link": 217, + "widget": { + "name": "string1" + } + }, + { + "name": "string2", + "type": "STRING", + "link": 218, + "widget": { + "name": "string2" + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 219, + 220 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "AppendStringsToList" + }, + "widgets_values": [ + "", + "" + ] + }, + { + "id": 65, + "type": "CreateShapeImageOnPath", + "pos": { + "0": 1189.82080078125, + "1": 1284.833251953125 + }, + "size": { + "0": 313.4619445800781, + "1": 286 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "coordinates", + "type": "STRING", + "link": 219, + "widget": { + "name": "coordinates" + } + }, + { + "name": "size_multiplier", + "type": "FLOAT", + "link": null, + "widget": { + "name": "size_multiplier" + }, + "shape": 7 + }, + { + "name": "frame_width", + "type": "INT", + "link": 149, + "widget": { + "name": "frame_width" + } + }, + { + "name": "frame_height", + "type": "INT", + "link": 150, + "widget": { + "name": "frame_height" + } + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 153 + ], + "slot_index": 0 + }, + { + "name": "mask", + "type": "MASK", + "links": [ + 154 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CreateShapeImageOnPath" + }, + "widgets_values": [ + "circle", + "", + 512, + 512, + 12, + 12, + "red", + "black", + 0, + 1, + [ + 1 + ], + 1.3 + ] + }, + { + "id": 68, + "type": "ImageCompositeMasked", + "pos": { + "0": 1528.82080078125, + "1": 1280.833251953125 + }, + "size": { + "0": 315, + "1": 146 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "destination", + "type": "IMAGE", + "link": 155 + }, + { + "name": "source", + "type": "IMAGE", + "link": 153 + }, + { + "name": "mask", + "type": "MASK", + "link": 154, + "shape": 7 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 156 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageCompositeMasked" + }, + "widgets_values": [ + 0, + 0, + false + ] + }, + { + "id": 91, + "type": "Note", + "pos": { + "0": 1565.82080078125, + "1": 1475.833251953125 + }, + "size": [ + 251.63747656176258, + 73.90463053872986 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [], + "properties": {}, + "widgets_values": [ + "This is only for visualization" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 85, + "type": "SplineEditor", + "pos": { + "0": 232, + "1": 1226 + }, + "size": [ + 765, + 910 + ], + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "bg_image", + "type": "IMAGE", + "link": 216, + "shape": 7 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [], + "slot_index": 0 + }, + { + "name": "coord_str", + "type": "STRING", + "links": [ + 218 + ], + "slot_index": 1 + }, + { + "name": "float", + "type": "FLOAT", + "links": null + }, + { + "name": "count", + "type": "INT", + "links": null + }, + { + "name": "normalized_str", + "type": "STRING", + "links": null + } + ], + "properties": { + "Node name for S&R": "SplineEditor", + "points": "SplineEditor", + "imgData": { + "name": "bg_image", + "base64": [ + "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAHgAtADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDWAoFLigfSu0+YExRinYoxQAgFAFKBSgUAJilxxS4oxQAmKMUtGKBiYNFLiigBMUGloxQhhijFLilAoEAFFOxRigBMUAUuKWgBuKMe1OxRigBuKUClxRigBMUtGKKADFJTsUYoAbTgKMc0YoAMUmKXFLigBtFOIpMUgDFFLijFACYoxTsUYoAbiinYpMUwEoxS0tACYpMU7FIRSATFLikxTsUCExRilxRigBKMUuKKYxtGKdigCkIbilpcUEUAJSYpcUuKAG4oxS0UAJiilpCKYDTRilxxRikISjFLRigBMUY46UuKMUANxRilIoxQAmKKUijFADaMc07FGKAGYoIxT8UhFMQ3FJTsUY5pCG4oApcUY9qYDSKTFPx7UmDQAzFGKeR7UYoEMxS4p22kxQAmKDS4ooAZR+FOIpCKAG4paXFJigB4FGKdiig0EpMU6jFADcUtLRigBKWjFL2oASlx7UAUuKBjaMU/HNJtouAmPaj8KdRSGNFOAopaYgo70tLigBKKXFGKQCUtGKWmAlJTqMcUgG0U4UYoASjFOxRjNADcUU7HtRj2oATFFLQKAEoxTsUUANoxTsUAUXASjFLiloAbijilooAbilxS8UtADaPwp1JigBPwpcUfhS0ANxRTqSkIKTFOxQBQAlJTiKMUAJSYp2KCKAG4oxTsUUwG0YpcUtADaTFOIpMUCGkH0ox7U7bRigBuKMe1OxRQIbgelJjmnYoxQMbiilxRigBtFOxSEc0AJ+FFLilxQA3FGKU0UCYmKbg0+kxQIbiinYoxTENxRinYpMUAJikxTse1H4UANx7UmKdj2oxSAYRRinEUYoAbikxTsUYpgNoxzTiKQ9TQIdRilApcUGo3FGKdijFACbaMU4CigBMUYpaXFFwsIBS4opaQxuKMU6jFIY3FGKXFLRcBMUuKXFLigLCYpcUtFFxWExRinYoouOw3FGKdij8qBWExRjil70YouOw3FLg0uKXFFxWG4pcUuPalxRcY3FGKdijFADSKBTsUmKQhMUYp2KUCmFhuKMe1OxRigLDcUYp2KKAG4pMU+kxQAzFOxS4oxQAmKMU7HtRigBmKdijFLQAm2kAp9JjmlcBMUYpxFJQAmKMUtFO4hMUY9qWjFFwEA9qMUuKKAGkZoxTqMUANIpMU/FJigQ3FJin4oxQAzFHenYowfQUANxRTse1GKAG4pNvtTsUEc0ANxQRS0YoEJijFLijFADSKTFPoxTAbikwKcR9KTFIQ2ilopgJijFLRQA3FGKdikxQIQUYpcc0YoAaRRinYpMUCG4pcD0paMUAMIoxTiKTFMY7FID6V3K6Fpq/8uwP1Ymnro+nIci0iz7jNcDx0OzPV/s6fdHC9aK7/APs6xHSzg/79ipY7eCP7kES/RAKl49di/wCzn1kedgE9Bn6VKltO/wB2GRvopNehYX0FKMVP159i1ly6yOBGn3hGRaT/APfs/wCFKNOvT/y6Tf8AfBrvajj+5/wI/wAzS+uy7Ff2dDucSuj6g3S0k/EYp39i6j/z6v8AmK7jnGM0c+tL67Psh/2fT7s4kaHqJ/5dj+LCn/2BqP8AzwH/AH2K7Ln1o59aX12p5D+oU/M43/hH9R/54L/32P8AGl/4R7Uf+eK/99iuxpfxpfXKnkP6hS8zjx4d1A9UQfV6evhy+PeIfVv/AK1dbRS+uVPIf1Cl5nKf8I1ff34f++j/AIUf8I1e/wB+D/vo/wCFdWOtL+NH1uoP6jS8zk/+Ebvf+ekH/fR/wpf+Ebvf+ekH/fR/wrq8UYo+t1A+o0vM5T/hG73/AJ6Qf99H/Ck/4Ru+/vw/99H/AArq6X8aPrlQPqNLzOS/4Ry+B6w/99f/AFqT/hHr8D7sZ/4HXXfjR+NH1yoH1Gl5nI/8I/qH/PND9HFH9g6h/wA8V/77FdaG+dlzyAD/ADpc+9H1yp5C+oUvM5A6DqAH+pH/AH2KYdGvwf8Aj2J+hFdn+NB+tP65PyE8BT8zi/7Hv/8An2b8xTG0y+Xrayfgua7bNLn3p/XZ9kL+z6fdnDGwvB1tZ/8Av2aja2nT70Mi/VTXe8+tHXrT+uy7C/s+Pc8+II4IOaXBrvtinqq/lUbWlq/37aFv95AapY1dUQ8u7M4TFLiu2bTLFutrEPouP5VE2jae3/LAD6MatY2HVGby6fRo47FGK61tBsD0R1+jmoW8OWpA2zSg++DVLGU2S8vq+RzFGK6FvDQx8tzn6p/9eom8OTgfLNE31BFUsTT7mbwVZdDC70uK1G0G9X+BD9GFRNpN8nW2c/TBrRVoPZmTw1VfZZRxSYxVl7S4iHzwyL/vKRUJUjqKtSTIdOS3QzFGKdiincnlG44o606ii4rCEUU6jFAWGEc0mKkxRimIZijFP20hFADaMU4CjHtSAbRilxS4p3EMxRTsUUXAaRSYp+KTFAhtLS0Y5oAaaMU4ikpgNNJj2p+KMUAMxS4paWgBmKXFLiloAZijFPxzSYoAZj6UYp2KTFAhhFGKfijFAhmKMU/ApMCgBmKMU/H0oxQAzHNFOIoxQAwiinYoxTENxSYp+KSgBp/CinYpMCi4M9Hooor54+sDFFFApAFGKWkp2HcWo0+4fqf5mpKaowp+p/nQFxaKXFGKAEpKUijFACd6XvSgc0uOaQxMUYp2PpRigY3/ABopaMUwCijFGKAAjikFOxSYpAIKWl2k0bTTAiX/AI+JP91f60+gIRMx9VH8zS4NABSU7FAFAhtLS4oI9KBiUUYpMcUALRSYpaQB9aM0hpQKYBmk/KlxSfiKQBRRiloATNLmkOc0UDFz7UxVVl+ZVPzN1HuafTI/un/eb/0I0xNET2Vq/wB63iPvsFQNotgw/wBRj3DH/Gr1FUpyWzIdOD3Rjv4dtjnZLKv1wagbw4c/JcD8VrfFLVrEVF1MnhKL6HLyeH7xASpjf0Abn9arvpV8nW3Y/Tn+VdfS5rRYya3MngKT20OGkgli/wBbG6f7y4pgFd4cHggH61A9rbSD54ImPqVFarG90Yyy7tI4vFIRXWvo9i//ACy2/wC6xqtL4ft2/wBXK6n35FaxxlN7mMsvqLbU5vFGK2n8PTc+XMh9Mgiqr6NfR/8ALHd/usDWqr031MJYWrHeJn4oxU0ttNCf3kTr9RURFaKSexg4SW4hFNxT6Sncmw3FJin4oxTuKwzFGKfikxQIbilxS4pcUANIpMU7FBFADMUYp+KQimA3FGKUClxSuIZijFPxRincCOjGafikxzRcQzFGKdijFAhuKMU7FGKAG4pCKcRRQIaBRinUmKAGYpcU6jFMBm2jFPxSEUDGYo20+jBoFc6WTxr4bibDatEf9xHf+QqrP8QvDcIJS7lmPpHAw/8AQsV4wKO9fPn2vsonrZ+J+hDpb35P/XNP/iqhPxS00H5LC7I9yo/rXleBTgKVx+yient8U7LHyaZcE+8ij+lQt8U1/h0cn63P/wBjXm+KXoaLsfs49j0J/ilKfuaQi/705P8A7KKrt8TdSIwllaKffcf6iuGzS8EYpXY/Zw7HaH4maz2ttPH/AGzf/wCLpr/EjXGGBHZL/uxH+rGuNGBT6LsOSPY6d/iB4gY/LcxJ9IV/qKZ/wnfiMn/kIAfSCP8A+Jrm80oNK7Hyx7HQt428Rtx/aTD6Qxj/ANlqM+Ldfbrqk/4YH9KxM0uaV2Oy7GwfE+uN11W7/CQikPiPWj/zFr3/AL/t/jWTmlzRdlWRp/2/rJ/5i19/3/b/ABpp13V++q3v/f8Ab/Gs/NLmi7CyL41vVf8AoKXv/f8Ab/Gl/trVD11K7P8A22b/ABrPBpQaV2FkX/7a1Uf8xK7/AO/zf40o1zVh01O8H/bZv8aoUZFF2OyNH+3tYHTVb0f9t2/xpw8Qa1/0Fb3/AL/t/jWbRnFF2Fkay+I9aHI1S7/GUmnr4p1xTldTn/E5/nWOGpQaV2PlXY3l8ZeIFGBqLH6xIf5rUi+N/ECnm9VvrCn+Fc7S5ouxcsex1KeP9aXhvsz/AO9F/gRUw+Ier97exP8A2zf/AOKrkKcKOZi5IdjsY/iJqAP72ytWH+xuX+pq2vxHOBv0oe+Lj/7GuEyKOKOZh7OHY9BX4jWxHz6bMD/syg/0qVPiHpzH57S6Ue20/wBRXnPFHFHOxexh2PTo/H2iucFbtPdox/QmraeMdAfH+n7T6NC4/pXlFHFPnYvYQPYE8R6K4yup230LY/nVmPVtOkxt1C1Oen75f8a8WwKMUe0YvYR6M9zRlkGY2Dj1U5p2Mda8MjkkiOUdlP8AsnFWY9Tv4T+6vblP92Vh/Wn7QX1fzPacUYryODxRrlvnZqU5z/z0w/8A6EDVyPxzrsagGeGT3eEZ/TFNTQnQken4pF4H4n+ZrzyP4gamMeZbWjDvhWB/nV6H4iLwJ9N+rRzf0I/rT5kS6Mztu9HauWh8e6VIwEkVzD7lQR+hrQj8XaDL0vwp/wBuNx/SjmRDpyXQ2aPxqjFrmkzECPUrUk9jKAT+dX1xIgZGDKe6nNMlpoaaKdijFAhDSU40lACfjRzS0YoABRRRQAhqJ7eCT78MbfVRUtHSmm1sJpPcz5NGs5eiMhPdT/jVOXw8Cf3VwQPRlzW4KK0VepHZmMsNSlvE5iXQ7tPuBZB7HH86qSWVzF9+CQD/AHTXYntRW0cZNbnPPAU3s7HD4oxXZvbQSA+ZCjH3UGqkmi2b52qyH2b/ABraOMi9zmll0ujOXorcl8PnrFOD7Mv9aoyaVeR5/clh6qc1vGvCWzOaeEqx3RQxmlxUjxPEcOjKfQjFNFbJpnO4tDce1GKdRii4rDMUmKfjFGKAsMIoxTiKTFNEjcUU6kIpgNwKTFPpMUhDaMU7FLTEMIpKeaTFADcUU7FFADTSYp9JikMbikp+KQincQwilpaMUEs8woFIKXvXgH3Y7NKDTKcKQhwpaQUtIAoopR0oGKD60ozTRS0gHd6Wmc+tLmgB4NLmmfjS596VgHg07NR5pc0WHcfmjNNDe9KGpWC48GnCq88siQO0Sh5AOFJxms9dYi8tR9tg88H5o2XA/PPFUoN7DubGaM1FHKJEDDHTnBp26psA/NLmo93vS7qLBcfml3e9R7qXd70rDH7qXNR7qN1FguSZo3YqPd707cKLASBqM1Hu96XdRYLkmaX8ai3e9LupWGS5pQai3UBqLAS5pc1Hupd1Fh3H5ozTN1G6iw7kmaKYGpc+9ILj88UAim596QdOvc/zoAk4oxTPxpQT60rDHUqSSQtuikaNvVDg03dRmgRfh1zVoMeXqN0MeshP860bfxrrsDAtcpMB/DLGP5jB/WufzRVJsTjF7o7CH4h3yt+/sbd19ELJ/PNaEPxDtHH76wmjP+w4cf0rz/8AGlwKOeRLpQfQ9Pg8b6JKcPNLCf8AppEf6ZrTh1vSrjAi1G2Ynt5gB/I145jmlxT5yXQie4IySLuRldfVTkUteK295d2mfs1zNDnr5blf5Vfg8Ta5btlNSnPtIQ4/8ezVc6M3h30Z6170YrzqLx9qiACWG3l99pU/oa07b4hW7H/StPlT3ikDfocU+ZEOjNHZUGsW38XaJcYH2zymIziVCuPxxj9a0oL6zuuILuCUnskgNO6IcWt0TelFKVxxRimSJRmlpOfWgVgoowaOaAsNZVcFWUEehFVpdMtJRzCFPqvFWjmiqjNx2JlCMt0Y82gDrFOR7OM/qKpTaPdxDIUOP9g5rpqK2jiqi3OaWCpS2VjjJIZYj86Mv1GKZXaOiyKVdQynqCM1Vl0uzlHMQX3TiuiOMX2kcs8uf2WcrikxW9LoKnJimIPowzVGXSbyL/lnvHqhzW8cRCWzOSeDqx3Rn4oxUrxvGcOrKfQjFR1smc7g0NI4pMU7FGKdyXETFJin4oxTuTYZijFOxRii4WGYoxTse9G3mmFhmKXFKR70EUh2G0YpcUY96YhhBopxoxQKx5UDTs1CGHrS78d68Cx9yTDFOFQCUU7zR60rBYmHWlFQ+co7003kS9XGfY0WY7Fmioo51k6BqmJAUk8YHepCzEpcgUxmGOtM8z3phYmzSZqHzfc0eZmiwE+73o3e9Qb+epxS7x60WCxNuGKUPVfcT3pd9FgJ99LuqDdRvosFifd70yFRbRXcULNHHdqVnRTgSA8EEVHupd1VqthDoI47aIRxKQg4AyTUu+oN9HmUrDLAajfVYuTShjipsBY30u+q+6l3Uxk+/Bo3iod3FAb60WAn30u+oNx96Nx96LAWNwo3VBuNKGJpWGTBqcHqvk0uTRYCffS76g3GgPilYZY3Uu6oN/1o8z60coXJw1LuFQeZ9aPM+tKwFjeKXcKreZ9aXzKLAWS+Kgubs28G9V3HJzk4AHqabvpj7Jo2R1DIwwQehppdxjRqbrbCWQx7T0YKwQ+mG6fzq9HLvjViMEgEj0NZzWweyjsnmna0iYMkDTMUUjpgE1aVsCnNLoJXLO6nBqrb6cHqLDuT5pc1BvpQ1KwE2aUGog1AeiwE2aM1Hv8AelDUrBckzS5pgajNFgH5pabmjdQFx1NKgnkA0uaKQXLtvrGp2iqsF9cIo6L5hIH4dK17Lxtq1s3+kNHdJ6OgUj8RiucJparmYnFPc7q2+IMTuBc6e6LjlopA3P0IH861rfxhok7BTdNEx7SoQPzGRXl4ApQKamzN0ons8N1bXQBguYZQenlyBv5VNivE0Z4pA8bsjjkMpwRWlB4j1q3xs1Kc+0hDj/x7NVzmbodmeskUmK4C38f36DFxa282B1XKE/zH6VsWfjzTZyFuoZ7Zj3wHX8xz+lPmRm6UkdPRVS31jS7v/U39uxPRS4DfkeavbeM9jVENNDKDT8UmKBDKWlIpMUDGMiOu11DKexGapT6TaTAgIY27FeP0q/SE1UZyjszOVOMviRhS6FIOYpQ3+8MVSl0+4gHzxNj1HIrqcUoHFdEcVNb6nNPBU5baHGlfrSY+tdbJaW8xJkhQk98c1Rl0OJuYpGU+h5FdEcXF76HHPL5r4dTBIpMVfm0q5iPCeYPVKqNGyHDKQfQjFdMakZbM5J0ZR0kiKjFPxRiruZcozFJipMUhFAWGYoxT8Um2mKwzFIRUm2grQFjxbcaaXI71gP4lH8Fsfxf/AOtUD+I7hvuQRKP9rJrx1Skfb88TozIfU0hd/WuXbXL9j8rIvsEz/PNNGrak3SU/hGP8KpUWT7SJ0z727mo4YXE4JbIx0rmpLzUZOTLL+HH8qvaE10+qJ57yMu08MxNEqbSbuNTV9jtLSPgVPeKEtJWZgFCHJJwBS2qfKKTXF/4kt2B3iIri+0VzHNjVbaDj7XGR6Bs07+3rHk/aV/I1y32L1f8ASlFiP7/6V3+yiZuUux03/CQ6eOsz/ghpp8S6eP4pW+if/XrnRYp3Y04WcQ65/On7OAryN4+KLEdEnP8AwEf40xvFMAPyW8h+pArG+yw/3T+dL9nhCn5P1NHs4CvI1f8AhK17Wjf9/P8A61J/wlef+XM/9/f/AK1c0DU9qqvcoGGR6fhVeyh2FzM3v+ErYf8ALmP+/v8A9aj/AISuTtZp+LmqYji/55L+QpQkY6Iv5VPLDsVqTv4ouz923iH5n+tRHxJqR6eWv0SlG0dAB+FLuHpRaPYdn3Iz4h1Q/wAaf9+xR/bmrHpJ+UY/wqTcPSkzTSj2Cz7jDq2rn/lq4+iD/Ck/tbV/+e0v/fA/wqTPtRmnZdhW8xn9p6uf+W83/fP/ANak+36v/wA/E/51KGoBpadgt5kP27Vv+fmf/vql+36sP+Xqf/vqps0Zo07BbzIf7Q1b/n6n/wC+qP7Q1b/n6n/76qXNGaenYLFc6vqqPt+2TbvTNSDVtaHS4m/FR/hVWbH2z8RV8UNJdBLUjGsa0P8AlvKf+AD/AAp39t6yOsz/AIxj/CpM0bjS93sOw0eINYX/AJag/WIf4U4eI9VHJaP/AL9CjNGaLR7BZ9yVPFl8vDwwv+BH9amHi6X+KxXPtIf8KplUP8IP4UnlR/8APNP++RScYdh69y+PFx/isfyl/wDrU8eL4+9m4+kmf6VlmCE9Y1/Dimm1g/ufqaXs4dg1NxPFtiR88Nwp9lU/1qVfFOnE8+cv1T/A1z32SD+4f++jSGyhP94fQ0vZUx3kdQPE2lngTMPrGami1rTZMAXsI4/iO3+dcAyhZio7EimsOlDw8Re0Z6ZHcwTDMU8Tj/ZcGpQx+tebx2LPGrhhyKlVL63/ANTPKo/2JCKh0F0ZXO+x6JvpQ9cEmqaxCu0XEpH+0ob9SKeviLVoz8zq/s0YH8sUvYS6MXOd2GpweuMi8XXaj97aRN7qSv8AjVuPxhD/AMtLORfUq4P9BUOhPsPnR1Qel31gweKNMmOGeSH/AK6J/hmr0eq2Ev8Aq7yA+mXAP5Gs3Tkt0PmTNAPTw9VVkD/dIb6HNODGpsMtB6XfVcNijd70rBcsB6dvqtu96cHosBZD5pwb3qsr08NSsBPu96A2Rmod2aUNilYLk4NLuqIPShsmiwEoNBPNMBpc0gHUcd6SlHWmAYFWrPUb7Tyfsl3NCCckI5AP4dKrGlFAM6Sy8capbv8A6T5d0nowCn8wK3bTx7Yy5F1azQHsUIcf0P6V59S1Sk0ZunF9D1q017Sr4gQX0O48BHO1vyNaGM9ORXiu2r1lrOpadxa3kqL/AHSdy/keKfOQ6PZnrZWmkV5/aeOtTgwtxFDcr3JGxv04/St2x8babdOEuY5LUn+JvmX8x/hVKSZm6ckdHiiobe+s7xd1tdQyj/YcGp9pqiLCUlOpvekApqN4Y5VKugYHsafRTTsJq+5nTaRBJyhMZ9uRVGbSJ48lMOPbrW9iito4ipHqc88LTn0OUkheI4dGU+4pmK6x40kG10DD0IqnJpVs4JQFD7GuqGLT+JHHPANfCzn8UYrSm0maMZQiQe3Bqi8TxnDqyn3GK6I1Yy2ZyzoTh8SI8UhFPINJitLmLifMCxRj+EU8Kg6KPyqMNkUu73rh1PsNCXj0oyKi3UbqQyQmtHRBu1Jf901lbq1/DmG1UD0Q1M/hYrnd2cfyiovECbdFuT/sVoWcXyD6VV8TDboN1/1zNcNveQKR5nmlyfSoCxBpRIDXfYLk26kLVCT3o3U7Bcl3Uhb5T9Ki3UFuD9KdiSitWLU4ul/H+VRIKliG24X6/wBKt7ELc0N9G6ot1JurOxoTbqUNUG6l3UWAm30b/eod1G+iwE2+l31Buo3UWC5Nv96XfUG40bqLBcn8yl31X3H1p2/iiwE2+gPUO+jdRYCGU/6WD7ir26qL8zZ9xVjdTaJTJ91G6qrfMQdxGPSlAKHdubB9ehpWKuWd1LuqDfRuosFybdzTt1QB+aN3vSsMn3Ck3Cot3vRuosBLuFLuqHdShqLAUJObhv8AeNNYdPrUm3dM3+8aJEwB9a0MzRt+IE+lP3VDCcQp9Kdmsmaku7igtUefejNIBxVG+8in8KY1vA38A/Cl3UuaYiJrGEj5dw/WojYej/0q2WpU+dgq9ScCndhZFT+zbuOISoSFJ4IfFTRX+sWv3bifHoTuH65rSvZAm22ThYwAfc1U3VKk3ugcUOi8ValCw81Y5FHUFcE/iK0Y/GNu2PNtZU/3WDf4VlkBxhgCPeo2tYG6p+RxScYPdCszpofE2mSgZmaMns6n+nFaFve21z/qLiKT2RwTXCPYRH7pYfXmoWsZByrAn64qXRg9mNNnpQbmnh/rXnMGoatZ/cnmCjjDfMP1zV6DxZfxECZIpV78bT+n+FZvDy6D5kdzvpfMrlIvGNu2BLayoe+xg3+Fa1rrVhdKvl3KKx/gc7SPzrN05R3Q00zXD04Pk1WByM9qerVFgLO+nBhVfdTg9JoCwDUFxqEdsVBUsSeTkAD6k0oeqs+mWd1dR3M0StNGMKTyPxB4P4iiKV9RNltdRhknWNFLq3R1IK1aBFZllp1rYSSPbxhXlOWPQfgBwPwFXg9ErX0AmpajDZpwNSA+lpuaXNABiijIpc0AIMg5HBFatn4j1ayAWO8dkH8Mp3j9elZYpe9MnRna2fjtPLVb2zbeOC8J4P4Hp+dbdr4l0e7IC3axsf4ZRs/U8V5gKXFVzMhwiz2NSrqGUhlPQg5BpcV5NZ6lfaeQbS5kiGc7QflP4Hit2z8b30LYu4Y7hMclfkbP8v0qudEOmzuxRWJYeLdMviVd2tnA6TYAP0PStpGSVA8bBlPQqcimjNprcMUYpSKQimISkZFcEMoI9CKcKKAKE+lW8mSgMbe3T8qoy6TOnK7XHt1rcpe9bQxE49TCphqc+h8U7qN1R5o3e9dVj0OYl3Ubqi3e9KGosFyXNb3hIb9ax/0zP8xXO7q6TwUN2tn/AK5H+YqJL3WKT0PULSL92KzPFa7dAvD/ANM/6ity1XEYrH8Xj/inbz/rn/UVx21RFOWp5CzZpm6kY88Uw13pGrJA9O3ZqDNKGosK5Juo3cGo80ZpiJIkzmnAbZ/8+lPt1+99aHGJj9f6U+hK3HE0maQ0VBoLmlzTcDNLigLhmlzRtpQhJwBmgLiZoqUW8zfdic/RSaeLK6PS2m/74NFmLmRXpasjTr09LWb/AL4NO/sq/P8Ay5z/APfBp2YuZdypRV0aRqBP/HpL+IxT10TUGP8Ax7MPqRRZhzx7lDNGa0x4f1AjPkqPq4pf+Ee1DH+rX/vsU+Vi9pHuZbKcZ9x/WjmrRtJRP9mIHm71TGe/NXB4evj/AAIP+BinysXPFbsyc4o71rf8I5ff3Y/++xR/wjt9/wBMv++6XKx+0j3MrNLnNah8O3+OPK/77ph0C/X/AJZo30cUuVh7SPczgSDS7jVw6Rfqf+PVz9OaadMvh/y6Tf8AfJo5WNTj3KuTS5NTmwvF62sw/wCAGm/ZLn/n3l/74NKzDmXci3UobkUphkXrGw+opoU5osVcZEuZj9TT7lNsa/Wlt1zKfxqS8H7tOP4v6VdjO42M/u1+lO3UxBhF+lFZtGtx+6jdTDRnilYCTNG6osmjNFguTbqvaaFBkuJPuRL+tZgPNXpj5FlHD0Z/3j/0qJLoUiKSQu7Mx5JyaQNxUW6jcc1SQmyYGlzUW7mnbqTQEmacKh3ZpwakMlzUM4VoXyASAacGzTZf9S/+6aEDMpaU8EUkYp7CtjJbFmE3tuA8LyJkZBRv8K07XxVfwECdUmUdcjDH8RTYP+PaL/cH8qHhSX76A1jKz3Roo9jct/Fmny4EvmQHvkbh+Y/wrWt7+1u8/Z7iOQjqFbn8q4V9PibOMiq7afKnMZzjpg4NZulB7B7x6ZmlVq8+t/EGrWXytJ5i4wFmXP69a1bTxmpAF5alT/eiOR+R/wAazdCS2C6OvDU7dWZaarZXqgw3KEn+AnDflV7cRWTi1uBOGpwaq4fmnhqmwyffTg1V91ODUWEWAacDUAenB6LAT5paiDU8NxSJHilFN3Zp1AC0UlLQFxRUsM01u2+CaSJvVGKn9KizTs+1AHQ2PjLULZgt0FuowMc/K35gfzFdDaeL9KuRiSR7dsdJF4/MV5517Uu3IpqTJcUz1yCeG5jEkEqSoejIwIp5FeS21xPZy+bbSvE+MFlOM1v2vjPUIdqzxwzqOpIKsfxHH6VSkQ6fY7rFFY1h4p029bY7NbvjpLgA/j0raUrIoZGDKehByKozaaPiGjNGB60nA716ZqLRSbhRvAHA5oFckHSup8CjdrrD/pif5iuULkEjA/A5rq/h9l/EEntbsf8Ax5aiWwpS0PXLZMRjisXxiMeG73/c/qK37df3Y+lYPjQ48MX3+5/UVy21RnTep4ue/IphZR3FNfrUZ6HPWuw2cidFLhmUZCjJIHSpUgLmIKykydMducc1UeVmjSPChUGOB15zk1dsruXR74yCNWlUbSG7Z/rVIhyfQZdwy2c7xSoQy9+gNOtpbU8To2cNyGwM4+X9abfalcX0sjyNhXP3R0A7Cqi/eH1o0C7e5q24HzDHerulWUd/rCW8udrbicHHRc/0qvbJzJ/vVo+H+PEcJH/TT/0E00hSdk2bv/CO6an3rct9Xb/GpU0nTk6WcX/Ahn+dbHyzDbjDenrVdk2Ng1aSOR1Jdyn/AGbY/wDPnb/9+x/hTlsLNelpAP8AtmP8KsUVVkRzy7jUjjThI1X2UYqeOUr8rZMZ6rn9R71FmjNFkHMxZVaPBDb0PRvX/Cmj5hxTkk2gqy7kPVc/r9aZInlEMp3I3Q/0PvSKbuL0pc01TmlxTJA0lKaSmAZpaTvS4pDOUcf8VD/29p/M11QzXMOMeIB/19p/M11JpI0qdBh5pMU40lVczExQBS/hQTkk4HXtQAdOlFFApAFA5opRQAoJ7Gn5zjPP1qMU8dRRYdziLhQNZuwOglf+ZplyPlj/AN7+lTXHOtXZHTzX/maZdrhY/wDf/pWR2LY6y1hilsLbfFG2Yk+8oPYUNYWZP/HrD/3wKfZf8g+2/wCuKfyFSmtLI5XJplF9JsZOtso/3SR/Kqz6BZNnb5i/Rv8AGtY00jNLlTGqkl1MJ/DJO4w3YPorp/XP9KrN4cvV6PC30Y/1Feg+HJ9HtZ531bawKhY0e3Min1PDDB/xro21LwaYyRb2ZbHANnKM/wDjxrGbUXblZtGpJ63PG4NCvVnUvCDGp3MQwPA56ZzVa7trx7h5Ht5BuPA2ngV27gBHmCbBMSETP3Vzn/634GoPeqVJPUHiZLQ4Mgg4I5o6V3UsUc67ZY1cf7QzVOXRrCUH9zsPqjEUOl2KWJXVHJZpc10L+G4SfkuJF/3gD/hVSXw9cKf3UqOPf5al02aKvB9TJDUuanm067gYh4HwP4lXI/MVXKlTyMVm423NVJPYfmhzmJx/smmUN/q2HtRYdynCuafKuAKdbrz+FSTrgL9a0sQti7EcQRj/AGB/Kn5qGNsRJz/CKcDWDWpqSdQRntSr8oxkn6nNMDU8GlYYpAYYYAj0NQyWcL/w7fpU2aKLtCM2TTnXPlsCPTOKmh1XVrEjFxKVH8LncP1q5SEZ7Zp819xcvY0bLxhGQEvYWRu7pyPyrftNWsL3/UXSM3908H8jXDyWkTj7u0+oqpLZGNSyMCBzjpUOnB+Qao9QyaVWrzix8R6jZbVWXzIl/gkGRj69a37PxlbyHbdW7RH+8h3D/H+dZyoSQrpnVBqcGqpb3dvdxh7eZJFP908/iO1TZxWLVgLAb3qRXqtupwaiwiyGp4aqwfmnh6VgLAYU7IqANUgakIkpRTFNSCkAuBS0etOxQAgpcUUUgCp7e8urQk21xLFk5OxyM/WocUtMD56kieJtsiMpwDgjseRTM4BFOkleQAMeFzgemajr2DAKUCkpwoAUV2Pw4GfEU3/Xs3/oS1xwrtPhqM+Ibj/r1b/0JamWwS2PYIF/d1zvjcf8Uxff7g/mK6WEfu/wrmvHRK+Fb3H90f8AoQrne6MqT1PEnB64NRnpWlaaiLOG6TyEkaeIx7yeUBHOKoRRPPIsUYy7cAetdRuyOinyKF2jBDY+bPr/AJxTKYgpyfeH1ptOT74+tAG3bj5pPrWhoA/4qGL6Sf8AoJqlbj5pP96tHw+P+KiX/df+Rqo7kVPhZ17DNSBw42yDns3+NIRzTCK1aucN7Ehtz25pv2dj6imYpMVNmO6Jfs596T7M2e9R7RRgUWY7of8AZzTljKcEZU9VPQ1FSYoswuiTyhEQ33o+57j61Lsi/vR/mKgjcxk8BlYYZT0IqQRxh1kKs0JIDAHBHtn+tS7opNMGSMDO5SO+0jNRvFsPYg8gg9a6VbTwe0QLX+oq2MkeUpwfyrCmFtFdyQQytLbZ+SVk2n64qVK5TViuqKfT86eI19qf5Ef/AD2j/wC+qBbx5/1kf/fQqrhY4+TjxAB6XafzNdPXMuufEarkY+2rznj7xrsGtk/57Rf99ihOxU1exUIpMVb+zp/z2i/77FI9vGIyRNGSOQAw5p8xnylTFFKRSYqiQoFO3EIU4wSD0+v+NNoGFKKKKBB3p46iminDqKYzi251W6/66v8A+hGm3f3I/wDf/pT8f8TO5/66P/M0l4MJH/v/ANDWR2o62y/5B9t/1xT+QqTFMsh/xLrX/rin8hUhFaHHLcb+FFLijFAjUg1LT0hSOfRLaYqoBfzZFJ9+GqhIEubs/Z4VhR2wkYYkL+J5qPFWYcQwyTYBYjYmexPU/gP51Fraoq5DdsGnZVbdGnyIfUDgGoKcRSYq1oiW9QxRS4ooEJS0UCgAzTJIYphiWNH/AN5QakpKNx3sUZdFspuREYz6ocfpWbdeH3WN2hmVgAThxiuiFNm/1En+6f5VLhFmsas11OBthnt/DT7gfKn1othz/wABqS6H7tfr/SszrT0Bf9Wv0FLXR2+gWt1pltKGeOR4UYkcgkgHpVW48OXMXMLrMuP90/l/9es3TY414PS5jg04OQaJIZIXKSRsjDswxTKixtcmDg04NVfNKGNS0BZoqJZegqVSDUlCFajmX9zJ/umrGKjmH7iT/dP8qAMnT0V51DAEYPB+lXZrCNxlPkNVdOH+kD6GtbFaSbTIiroyPJubVw6FlKnIZD0rTsvFF/auBNJ56dxJ1/Pr/OnkVDLaxS8suD6ipbT+JD5Ox01n4o0+6AEjmBz2ccfmOK2I5UdA6OGU9CpyDXmkmmuuTGwb2PFNtr690yYtC7Rk8EY4P4dKh0U/hYtVuenh/eniT3rjLTxgSFW6twT/ABOhx+h/xrdttUt7tN0Mgb26EfhWMqclug3NkS8df1qRZR61jm6x3pPthHSs+ULG6sq+oqZZk7sPzrl5NS2nHU+1RG8nm4B2j260OI+U7JHVjgMCR2BqcDNYHh+Hif1+XJ/Ot0bl75HvWT0YrIo3+pR2eVVd8vYE7R+Z/pTbPUGuCvyxyA43GJidh7g5Azj1FaOULZaMbh0OM1IuwDCAAe1CkhWG7aXbT6Xbmi4rHzveQeTINuNrDIHpVatXUlPlRnJwGPH1/wD1VnKwR1YDOOcNzmvYhLmjcmtDkm0N2kYyCPSipZ5vPZTyMDAUnIUeg9qjAqjMceeQAAewrtfhqP8Aiop/+vRv/Q0rjGTY23IJHociu1+Ga58QXB9LVv8A0NKmWwp/Cz2CIfIPpXMeOxnwte/Qf+hCupiH7v8ACuW8eHHhW89wB/48Kwe6MaW54pIgVVbepLDOB2+tRhmXocfSnkZ54HNXLqw+wWqPO0LvOm6NY5QWj5HJA9Rmuk6CpcTRyiIJEI9qAHnO4+tQ9sYoABzlguBnnvUyBp0IATESFuTgkZ/XrTERMhU4OOPQ5oj++v1FSAIBl13ZyMA4xU1vatJGJVwx3Y2Lywxjkj0xmhBc1rcff+taPhznX8+ivVC2+6+fWtHw2M642OyP/OqW5FT4Wdg1MNSGmkVqcAzFGKfiqct9DFqMNif9bKCQew4P+FK40m9iwRSYqTFG2mIZjijFP20EUAMxT4yUbI/HI60KKo6veS2FmbiMD5CCcng+xqWXFNuw/UtQgsJLfP3ZXIIPGz8fyp0U8V0vmRSK6eoNchrus2+qx2zRKyOgYOrdBnHQ1L4WvCl81ux+WReB7j/JqFLWxu6XuX6nXbaUCn7aAK0MDlH/AOQ8P+vlf5muoxXLt/yH1/6+l/ma6k9aEaVOgzFGKcaMUGY3FJinYoxQAwikxUmKQigBtHpS4B5FLigBMU9aKUDFAHF4zqdz/wBdH/maS8+5H/v/ANDT1/5CFwf9t/8A0KmXmcRf7/8AQ1kdvQ66z/5B9t/1yT+QqQjmmWgxp9t/1yT+QqQ1qcb3GnrSUp5oANAkKBk4AyTU91iNlgGP3QwxHdj1/wAPwotfkdpj/wAshuH16D9SKgOaXUb2GmjFLijbTJEoxTiKMUANxRinAfzpdtAxoFNYqhXcwG44GT1NNN3BHceTI+184G7gHjPWsfVb5WleDeuwMCpHVTgZqJTSRrCk5M0b28FttKEPtyZEGMgY606O7iubNnDKpKnKlhmua81pNrONwGRu5z+dIsmwmRGVcrwuOvrWXtnc3+rqxTthz+FPuVyi/U0lqvT/AHf61LcjCqe2T/KqNDs9OkRdIsyzqP3CdT/sirakPGHUjaRmuat5QLaFD2jA/Sri3hWExADB79wKXtTF0OpYv7mPhGCSL0YMoIrGawt53ZR+6PY54/KrM6qT95XGB839KjVwGBPJ9azcrs2hHlWhQn0a7iXcieanUMnP6daoMjKcMCD6EYrrG1HMZCpg8fhUcj299KIp4lIA69CD7Gm1HoNTmviRy1KGI6Gti90WNMvbTgrnAVv8aypYJIX2uu0/zqGrGsZJ7CrcEdelPeVHgkAPJU8H6VX2ijZwaXKXch01czA+qmtcrWZpg/er9DWrSnuENhmKQgU8kCo2kVRyQKgoMVHIqMuHCke9RSXYHCjntnvURZ25JJqkmIhns4W/1Z2n9Kpss1udwJGOjA1o4qG6GIGq03sQ0SWWtXomiidxIjMF+ccjn1reWV3+834CuRtP+PyD/rov8666JazqxSY4PQmiT2q5ElQxLVyNawsU2dD4cjBW44/u/wBa2jF7VmeGUyLn/gP9a3zHWU46mDlqZ5iphjrQaKmGKs3EamUsOO9KJCOoP4VZMWaY0WKmw+ZM8Gv1zanpgMKy5ImjfY6kNwcfUZravJ3sRDLER5ivkZAPT2NZMkzzMrSNkgY6ds5r16PwmuK/iBcW4gKASxyErk7Odvtmoe9dP9nsx4TllRP9ICpuYrjOX4xn6Gsi7t4YbCweMHzZY2eQ5/22A/QVs1Y5FK5FcQxJDbvEwYsn7z5s4bJ4x24xXZfDFc61dH/p3I/8eWuGr0D4at5ut3jhFT/RwMKMDqP8KiWwqnws9WT7n4Vy3j3aPDFzvzt3LnHXG4V1SfdrkviB/wAird/Vf/QhWL3RhS3PFXxuOOnamHnmpCON3HXHWmVujrsNxxSqSpyKcQoXBB359eMYppA4wfrxTJZotd/2nqKz6hIsasu0ukQ4444FdDD4Tktbh5jcRS2qRlg8bfe+U9vTNYFhol9fxl7eHcoI5JA69/pXXaNok2mWF5LcMdzRuoUHjAzzj14q4q5jOVtmYUAwG+uK0fDA/wCJ3IfRH/mKzoM4f61qeFhnW5v+ub/+hChbjn8LOuI5pMU8jmkx3rQ4TO1W+FjaFtwDN8qk+tcPc3091f8A2iR8OTjI4wMVt+ItQjun+zIcNGcgk9DyCCK5jdiQeuawlK7PQo07R1PTYJkmVQrqW2gsoPTIqXFcjoGqR2STecSSVL8+2AB+OTWzoF5LeWsjSkHEhC/z/rWqlc5Z0nG7NWsnX3u4bAyWrABfvYB3fhitikYAqQwBBGCPWmyIuzucfo3id1nWG/fdEf8AloRyv19RV7xJMsMDNBqTbZBgxYDgAj8xXL6wunx3LLYrOhVirpIBgH2Oaqw3kkUckWFaKT76kdfx7Vk30O1U03zIgPHTpViyupLS5SaIgOvIzVZsbjjOO2aAccUkzW11Y9G1fUxp+nK24GZgBgH25/nU2lXYnsLVpZB5siZAJ5bnGcV55c3kl04aRs4AAHYYAH9KW2u5Le4imQgtGcru5AqufUwdBctjo2H/ABPkPrdD+ZrqiK5GzZptQsXc5Z5lY/XrXYd6tGFXdDMUlPIopmZGRRinkUgoATFGKd+FH4UAMPWlxTutFFwGjrTsc0YFLimgOKT/AI/Z/wDeb+dNvekX+/8A0pYebuY+pb+dF6OIv9/+lZnb0OutB/oFt/1yT+QqUim2o/0OAf8ATNf5VIQau5xvcfa3EtnOs8JUSLkAsgYc8dCCKlvdQnvlQTLANp48uBIz+O0DNV8cdaTbzU6XuCJXIjtkjxhn+Zj7dh/M/jVfFP20baaYMbigCn4o207isNxRinbaMUrjsNxWZq7wmII1wYpF5GMjP4itbHrWfqV3awlFnjLZBIZRnHalJqxdNPmOXeWTILN5h7YbJxS+Yybnl5GenekdjI5eEBFJwq4xjHemIXkcfNwx6LwQK5GegkSFiVHl4OB1PGPwpWw0WT5ZI6ELjimsy+X06nHy0ZbYVzuKgjp0FJDZDa/wf7v9amn+4v1P8qjtR8qH/ZP86mnHCfU/yrp6GQ+G6IAVgGCpwR1wKuQzxyABHBOM47iqLiKNVTGJGUZ5yDQxGG3FMlcHP8Nc7eptY0iT2bPtTS2OSBWc128GU8xWKjOCP60sepqVBkUrz25oQWLxc888YpA5HfnFQrPFNzG4z6U/nnnNMLEwlwOv1qQSJJGUfDc9DVTdjPP6UqvkdOKBWJBp9vKX+dYmLYUDmq0mnSxMQAJB6pzVjdgD0qaOUqvynnt7UXDVHP2ZEZB6cYq0bgAE5PX0pmnwrPchH3bST93rVufRZsZjkRv9npVOF9RKajoyhJckn5cVXd2IJJzT5reWBtsiFTUZHyN9KOWxV7j4eUzgZzUwXFMtVzCfrVkJWctzREW2obsf6M5q5sqter/okn0/rQnqDWhnWYze2/8A10X+ddjEtcnZL/plt/10X+ddzBZStyQFHqxxRWZnDYWNDgVZjXmpIreGP/WSZPooq5Fc20GSijPqeTXM5DaNnw0BGlwXIUNtxu4z1reMkX/PRP8AvoVxh1QetJ/ag9ah3Zm6d3c7LfH/AM9E/wC+hRmP++v51xn9qL60f2mvrSsxezOyIQ9x+dNKL6j864t9aRJFTZIxY9VXgfU1J/aq+v6UuVlch5Jqk4mutqHKR8D3PeqI4p2KbXqRXKrBOTlJyZMbmVofKMr+WP4cnFR7icAkkDpTe1KKZJIBXoPwuTOo3xHXylGPxrgApGM9xmvaP2f7JJtV1i6cZMEUSqD6sW5/8dqJuyJmrxZ2Rs7mOHzHglVMfeKECuL8fc+Fbv6r/wChCvd7nBiIPIxXiPxOiWDRL6NBhdyED0+YVgndmVONpHh7DFDRBYVfIyRnAOfz9KV+tMNdSOhkkkUflGWORcFsCMnLAep4xUcSgyLubaM8nGcVNJHHFBFJHOryODvUA/L7c+1N83MKxBEGDuL45P1pkbnpeiXVrLp8TRzRnIAxwpHsR61oah/yDbn/AK5N/KuP8H2CNNJPMjb48bQy8YPeui127MGmuAwDP8mD3zWqehxOHv2RyEH8f1/pWn4VH/E6l943/wDQhWZCRl+v3v6VCLp7SXzIyQ24j9ai51OPMmj0l9qKWYgKBkk9qp6jdRQ6XJNvG10+Qg9cjjFRSTQ6toBJl27xt3dAGH/1645tTmFodOmI2xudpHbrxn0pylZGFOjd+hmyszylt2SSST60xSBKCxxj2pCcMQfSkJyQSOeuaxO+xIzk4Y9TwK6Pwom68LGYfKpxGBn6nPauTMnzcH860rDVZtPiYW5IeQEEn+lVHRmdSN42R6WOtQ3ySG0cxT+Q6jcHbG3j14PFcff+JpXtYUgcpKo/eB1wc+tZ6+JdRTyNsv8Aqk2EHkOM9xWjmcsaEr3KWoo7TNMTA5PLPA2QeeuO1UM+tTyvHICwUq2eg6VBxWZ2RjZC0UCjvQULSg0hpB1oE0dVpv8Ax/ab670/lXaMmCSPyrjNPwL/AE3P/PSP+VdszpIDhgcHGRz+Fao4ay1IjSYp4p4XNFzEgNAGaseWfw+tKI29P1ouFivikq15bf3ahkXDZ9aLjI6XtRjNGKYhBwafjNM5z0p4oQHDwDE7++f50Xn/ACx/3/6GlhH75j9f50XIz5Xs39Kk7Tsrcf6LD/uL/KpMVJZW0k8UUca5OwfyrXi8OXhVXcKinoTnBrGviqVFXqSsZUsPUqv3FcxcUYrp4vDUZxvlYn2q5F4XtiM7JH/GvNeeYb7N38jtWV1vtWXzOMxk0u2u6Xw3bIM/ZXP1U1J/Y9sn/Lqv/fNZvO49Kci1ld95o4LbRt9q7v8As21H/Luv/fNKNMtz0tV/74qf7c/6dP8Ar5Ff2Uv+fi/r5nBbaiuY5zARbsqv6tXoZ02FRzaLj/rnR/ZluRk2KEf9c6Tz1dab/r5FLKrP41/XzPE5ri8MjKZ8sDyQeDVYmVnGJACT36GvbW0fSSTusEz9SP61Xl8M6DP9/Tl/Bm/xrP8AtinfWLR0LANLRniwcybVLAdQOMZpSHOSGXA444IxXr7+CfDjf8uci/SU1Wl8A6C5ypuoz/suD/MVazag+4fUZnkcshMmxhlccYHeiNPLQqWyx644/CvUW+GuksSyahco56EoCBVab4ZyFT9m1SCQnHEkZX+RNbwzHDy+0ZSwlRdDzm3HyRj/AGf61LLyF+p/lXYf8Kz1uLaIpLSUKMfLLjPPuKq3XgPxFHgiwDYz92VP8a7o4ujJaSRg8PNdDk5CpTAbBAGT1qt9oclk3Ag8ZxWpe6Nq9lE6z2FxGc8tsJGPqKyljKybmUhh0B4oUovVMpwkt0WHZVQgYywyT1FVHRW+593+IelWiCilsncVxkDiq2wo6nI9TxTTRNhEt5POzn5fXOOKuG6aFMo/AOPn5prxuwLueNvY0ySFVT5WII5ye34UXuFi3HfRuo3kpnjmpw2eUYY9RWSQphIJGWbAzxinRxhFKLvZieccYoEaoLf3gf1pSzEEEqB64qi8nloIwxJP3sn+tRq0GCGL7+2KAJ9LkEVwrtnHPQZroTOhXIP4YrlbYlVGCR9K1EuY1iGS39a3TMJRvqaDyxsMFciqM1lbTpIyoUIGTg9eRT9wK7h0NOjYbJf93+orRx0M1o9CrDp5SM7XBGeh4pTA6dUNXInAQ5OOaUzr7muacdTqjPQo+Wx/hb8qqX6FbSXIxgf1rV85mIAGB+dUdRGba4/z3qLWaL5roxrZzHJDIOqsCPzrqpruWK1Fwb22cEj92kylxkenWuTUfulPpVY3DVc4czIjJROp/tiQ9G/Oj+1XP8Vcr9pNL9pNT7JD50dR/arf3qT+1G/vVy/2k0faTR7JBzo6j+1H/vUv9pt/erlvtJ9aX7UfWj2SFzo6f+0z/eNH9qN/eNcv9qPrS/aGo9kh86IB2x1pCpBIIII7GrAtZAc5XP1pzW0juWO3JOeK3MisFz065pQOas/ZX9B+dAtZM9BSAjQV7v8AAKR5rnxFLIQWZbckgAd5OwrxJLVx1H617f8AAJCj6/nutv8AzkrKpsOWx7Fc/wCrP0rxX4pn/iWXg/3P/QhXtVyfkP0rxT4n4eyu1/3P/QhWC3RjHc8RkHJqIirr2r5OAMfWojaSe3511o3KtOQ7WB61P9kk/wBn86kFm+3tn61RJt+HtSvmnEKSbkJGd7dB7UuuX73OqNEsrNCjjCnscYNV9Elk06Z3Yrg4O3aDk/XtVfyZXuN7YJLZJz1p3IUVzXLUX8f+9/SqN30/4Ef51fUBck8bm4/KqN5xt+p/nQ2Ulqa9nqBXw5cWqsRJG+4496wJJC7ZZuR7dauW06qs0ZPEqY6dD2qhIhD471F7lxjZsPMOzJPOfSgrhd2eewpqAE4Y80+U5xjPTpmkWVuuOTU8jtM4baFG0AKowAAKYiqWO7OMdvWrMMBYktnYgy59P89KolkE00lxJvldmYADJOSahNXr2XzHBW2WKMDCgD+vc1ToBCUmeaO9KAKADNLijFLSGGKTFSIMuo9TU0y8jFFwaNgxvILOOM7XZkVTnGD0612CyWel20cDyqnHAIOWPc1x12r/AGa3KjnIx9cVatdNVLoPqlzsAG/aDuLY7Z7VdzmnDmOyjIkjV1PysAQfapUz0P4Vjw+JLCWWdVykMSKUJXr1z/SoT4rtfsssgRvND7UQ/wAXoSaLnP7KXY6LB9KMf7IqnZ3UlxaxyFslhnp/9YVP5j+v6U7Gb0JlI/u0y4xheKarNnkn16USuWC5NCAhopcUYNMQmPelA5oqvcX0NuvXc3YChsaTexydpC0kpCgk4qzLFGMAqZJFOQA2F/PvWlb6dc3p2wxCG3/vngf4k12Wh+DYoVSe6LZPIyBvb8P4R+tc1bEU6MeabsjuhCU5csVdnK6Z4e1HX5le4j8uFByxU4A9BuyR+FegWljo/hPSJXM/kWmAzyTNjew7Knc/QE1D4j8Wad4WgMO0S3uP3doh4XPdj2+nU15XrOqah4ovzeagOnEdvFnZGvoB/Pua4lGtjVyzXLB9Or9eyOn3KDutZfgjoNW+JOqX7yW+gQiztweJyP3pHuTwv0HPvXO3Oo+JL4lrjXrls/wtdSEflUASdVCrbuAOwQ0uy5P/ACwk/wC+DXoUsNClHlgrIxlV5neTuVWsrx23PeKWPfk05bK5Uf8AH6R9Mj+tWPLuv+eMn/fJpDHcj/ljL/3ya05GLnRH5F4P+YhIPxP+NN8i9z/yEGP1ZqeVuM/6iX/vg0hEo6xsPqpo5GPnQJ/akZymouv+7Iwqymo6/GP3et3a/S6kFUy7DrSeaaPZD5zUTxD4qTga7d497lj/ADp//CU+LgQf7Vkkx/fKN/6EKyDMfSmmc0nRi90NTZ0MXj/xXAwErJOB2NvGR+aqKtn4p60G/eaVp+B2Ecq/+z1yfnmk88+prKWBoy3ivuLVecdmd1Y/FWMsRqOmke8Eo4/Bv8a14viP4fmxuN1Dn+/GDj/vkmvLTNkYPIPY0wrA/wB6JPwGP5VyzybCy+zY1jjqq6nttl4l0W/GbfVLZj/dd9h/JsVqrMwAKSHB6YPFeH+H9Ej1PVFgSUxKiSTMxOQAiliPxxj8a9xcgW9sgABWFQQBjHf+teDmuX08JBTpyZ34TFyrT5JIX7TKP4s/UVWubaxvR/pen2kxPUtEM/nT6SvFhiqsdpHounF7oybjwl4dueVsmtnxjdA5GPwPH6VhX/w1tZIiNO1FgxPK3IGPzArsjTa66ea14dbmcsLTl0PMNR8C+IrYF4rdblFGAbZwxP8AwHr+lctcQywO8VwHWUcEOMEfhXvKyMhBViCPSuU16ytv7Rme7QNHMd0bmMMAO49sGvfy7H/Wrxa1R5mMoKglLozypI2VwVcsPpUzzBIwuTgEDArrJPDVrcljZzlfYHI/I4NYl14b1GJm2wiUKM4Q4P5HBr1L9zhUk9jGkHnOVUtnOST3qQtGqHO7IOMU8WzBz5gcY6hh0/CmyhHLYJViemOtUgsNjYJGpOce1SJLhhk4BOcdQajU4iHAPtQ0bNjavOegP9K1urk20NCO4Mqk7h+FBZvU1FbqRCMjB6HNS4roi7o55KzJYf8AVn61KBTYB+6P1qZRxWMtzWOwgXLD61W1FP8ARrj6GroHzD61DqKj7LcfQ1jLdGkdjmgP3C1mmtYD9wPrWSa1IY2iiimSIelJQelJQIWkNGaKYgqUGoqkFJlRPdR4d0n/AKB1t/36FOHhvR886dbf9+xWmKcK4uZnPzMzP+Eb0f8A6B1v/wB8Cl/4RrR/+gdb/wDfArVFKKnnY7syv+Eb0cD/AJB8H/fArtPh3ptnYSakbW3SIuI920Yzjdj+dYOOK6nwRw999E/9mqeZ3LTOmufuH6V5br9pb3+sTW9zGJYioJVuhr1K6OEP0rzPUv8AkYp/9wU2yVuYbeF9G/6B8P5VXl8NaMP+YfD+VdCe9VpgMUcz7lJsxo/DWjE/8g+H8qtx+F9F/wCgdD+VWoqvR0cz7jbM9PCWhnrp0P61YXwhoI/5hsX61px1YFHO+5F2eUePNMs9L1S0SygWFGjBIXucnmuJveCn416B8Sz/AMTuxHbyf/ZjXn98OUrrpO8dTojsischcg/jSsVLBuc96iJKkihWxnPcVRqLgbyR0prMD1qRSuAew6g0m1Tg4zzQFiewihecGdmWNQWO0ZzjtSvMzL5ca7VOCQO5qeOzLWsUkb4MpIYlsAAdjV62tY7fTHumA81mKoDzkd/wpmdzJuI5Ys7jt3KDtJ6g+lUzVmbG87TkHoPT2qEigtLQixzTsU/bRii4WG44pQKdtzTttK47CIMSJ9asyDLCq6KfOT61cZfnFS2O2h23i+wtrTQLdoYwpFztyPTyxXCyzyy7d7khRtA9BXoPjY58NW59bz/2mK87Ip3M6auhuferMZS3YGVPMYEHy84/P/Cq2KcASc00y3E7TR9Ya6YI45J4AwqIPqetdCoBGR0riNAsmkuFle2MqZxuY4Vf8a7VGLIGVht7YFaJnnV4JS0EuB+5NPI4HsKikLlDk8Z9KQyleWbjPpRcy5dCXFRSzJCDuIyB09PrVd7p5spbjAH3mJxj8e386sabos+oEzHAhU4aaQYVfoO5/X6VMp22LVNWvJ2RRM1xeyLDAp+bgYHJ+gra0/w7HHIDd757hjxEDxn3Pf6Vs6ZpYy0Gmxc4/eXL9cfXt9K6FYrDQNOe6upljgQEyXMnVj/dUevsK48RiFTfLvJ7I2owlVfuaR7kGnaKluElmCvKq5CYASIDv6f0rkvE/wARUtpnsPDzJPOB+8vTyqn0T/4r8qwvFXja88UKbGyR7LSc5z/y0m/3sHp7dPrWn4J8GadfWy3d/LIqknyY0x26sx+tZ0MJKc/a13eX4L0OmdWNKHLDRfizlLPSJLmU3N3JIxbks5yzE9evStqO3hgKJEgVeSfU8VvarpEVnHJNDNvReV+YMCM+3SsCScRsDgnrXrKKijznUlUZMRSEA02KdJhkH8KkNO5nZrcbtBqvPMbeVWAyCCOaW5u1gyvIYjINZst4ZYwrdR0PcVnKpbQ6KVJvV7GtDI7rl+/IA61LsHesYah5SlQDu45q5FqkbEB1Kj1ojNEzpSvoi2yKRggEe9QtZ2zD5oIj9UFOju4pIw+duSQM0l3II42TcVkI49605kZcsr2KM9rpyk7oEznHyjHNY8tvb7uEI/HrVp53djuAJ6E460ht5FhW4dD5e8puxwTjOP1rNzZ1wjyrUz2tY8A7mBP6Uz7FuUFZO+PmFX5F3OMHANNK4wExnNNTZqZz2kqReYcFR1welQc1oXTFbULnl25+gqhitE9AR1ngW2M11dOQ+CiwKQONzuox/wB8hq9ZlYPKzAYBOQPSuI+HlkIbK3mlVh5sk1yAehEaeWp/77c/lXZmvkOJKl5Qh6s9bKo/FP5BTc80Zppr5c9pCk5ppoJpDTKQlRyxxzIUkQOp6hhkU880lXCUoPmi7MbipKzMW58NWkxLQO8D+3I/Ks2TT9ZtB8yrdxDsPm/Tr+VdXS17GHzvEU9KnvLz/wAzzq2VUJ6x91+RxjXFlc/ur62Knphl3Y/PkfnVKfwvpd8+60laNvRDu/8AHTz+tdzPawXClZokcf7S5rIuPDVux3W0jwn0PzCvYoZvhKnxXg/vR59XLMTT+B8y+44Wbwrf22WiCzqpzhD82P8AdPNYk6vatsIZXU8hlwa9N+x6xZMApW4jHY4b9DUU9xZ3B8vULBkJ4IaPePybkfga9KM1UV6clJeRxvmp6VItHmqXBYgEnGeQT+tWo97LuYYyePpXVTeEbG6lL6dchCckIOf/AB08/wA6ypPD2o2au0kRkAONyZOB7jqPyrSFWzs9BSUZLQrwD9yfrUyjimwphGHfPSpQtaNkpAq/MPrUWor/AKJcH2NWVHI+tQaiP9EuB/smspbouJzK824/Gsg81sIv+jD6msgjFbEtDKKdikIpkjOtGKU0lBNhKKdRigBtSgYqLvU1JjifRA6U8UxaetcRyodThyKSnAcVIxK6rwT9+++if+zVyxHFdT4J+/f/AET/ANmpItHSXf3D9K801D/kYJ/9z/CvS7v/AFZ+leaXxzr9z7J/hQ2CWo0iqs1WzVSbrUjSGR9aux9KpxirsYNHMkU02Wo6nFQRj0qyqn+6fyrJ1qa3kvvGqNR7RZ5j8ShnW7H/AK4/+zGuCvRllH+zXq3jTwzqmtatazWVr5kSRbWYuq4OT2Jz6Vgt8MtcudpzaxYGDvkP9Aa3hjcPGNnNfedVOhU00POzGc0nlGvSE+E+sHh7qzH0Zj/Spx8JL/8Ai1G2H0VjUPM8Mvto6Pq8ux5iI29KcIzjGK9Vt/hI+T9o1RQP+mcWf5mtKH4U6OgHn3d/Ie5Qoo/9BNQ81w6+0P6vLsePRF4iCvbseRV2C88q1aJwXznaD0GetevL8MvDidRfP/vTqP5LUyfDzw0ow2nzP7m6b+gqP7Yw/f8AAHhZPoeFvEu75M496aIj6V7wPh/4YHTSm/G6f/GpY/BHh6E/JpEB/wB+SRv/AGaoec0FtctYWR4KLcntTvspNfQaeG9Hj6aJph/3oSf5mpF0PTFPy6Npi/S2H+NZvO6PRMpYVnzwLfFO+zs3RSfwr6LGnWaLhdM08f8AbuKcLWJTlbKxB9oBWbzyn2KWFPnJLdhKnynr6Vb+zuz9K+hRGynIgtR9Iv8A69O3Tdo7cf8AAP8A69Q89h/L/X3D+qeZ5h4ttZp/DdskcTuwvCSFUkj5BXFDQ9Tf7mnXbfSFj/SvoMvc/wB2H/vk/wCNHm3fYQ/981Dz3tEVPAqKtc+fv+Ed1c9NKvf/AAHf/CoLjSb+xjD3VlcwITtDSxMoJ9MkV9DGS7P8UY/CsrWtIl1q3itrpw9uj7ygbGT0Bzj3P51dPPE5WktC3gl0Z4/b2l/BFDLO0/kPgrGrn5l/pW6byfVrf7FZWxg3AhnZ87gB0UhRz2xyea7RfCFmgjzEXEa7VDyEgDJOMemSa0YtOMKqqRIqr0C4AFa1M9gv4abJhlies5I4WG11e2s5Hv4J2fcWZ5EPAPTPHJ9hSpY3dwymSKbJ5EYjIJHuewr0FCqkqMkqcHapOD6cU8swAxA7jPsMfmayWfS6w/EyllUekjkLPS44wGuxu2/dgQ8D6kfyrptP0i51FElm/cWK8LgYBHoo71ejMjZ87bt/hQDge/vVoXMqqqh2AUYUA9BWk+IKcU1GGpyf2PNyvKV0Q6zrOk+FNIW4uwY4zn7Nax/fnYdefTpk+9eP6trV/wCL9US61JngtVOIrePOyJfXHf8Ar+levXUUN8ytdxR3BUYUzKHx9M9KqyaRpsv37G2/CJR/SufD5xh6bvKLbe70udUsDNq0Wl5Hks9ukcreRl4gflYjGR9K6rw1bRNolzeTsEW3mVXLjIZGHKr/ALXGa7ZvC2nPas7abAygcLGg3n6beayfE2jy/wDCP2VnpemSRxI7M+UK7XPRuDzxxk56178cRHEU/gaXmcHI6U/iTKevQaXbafvt3XzpVAjjVh93qSQO3FcfKNzDIyOc4ppFzZyNbyQRq8ZKuoHIPfpWtpj6PczrHepdQuejiVdpP4rxUU8RSw9Plcm/xNamDr1ZcyjYqRxrAu9jgY5z2quNUt9xVgw98V283hPTJkK+bdhT6SLz/wCO1TPgLSTnE15n/fX/AOJrnee4Xo/wIjlVV6zX4nD3t2J8qMFB0YVQZ8ZPPvXobeANP/gurgf7wB/wqJvAFsel/IP+2Q/xqP7YwsteY6I4GrFWSOBJ39zx6+lITtBBzjPFd4fAEXQX7fjF/wDXpv8AwgQU5W+z9Y//AK9XHNsL/ODwVbscUrsI+pApXnkdBuJO08Z7V2X/AAgkvA+2xfihpp8CXCvuS9gB9drf4Vqszwv/AD8Rk8HW/lOPUo6gYw2cnB61t6jZSQ+G9MlEbBJHkZ2PQscbePoP1rQHw+ui4P222A7nL5/9BrQ1Dw1q+oKiy3dsVTou5sZ6Z+76YraOZYS38RfeYzwdfmVos4Eggc9PWmkEkAZOelda/gXUz0ltT/wNv8KI/AWobstcWq46ZLH+lNZjhP8An4vvK+rVv5WcHcn97t/ujFRou5uld9H8NZS5M+pxjPOY4y388VsWPgHR7Vlec3Fyw6hnCqfwAz+tRUznBwXx39DWGCrv7Jd8LweXpoyjL9mgjtUz05HnSfjucD8K2Cailt1WECBShjZpFAY8seTnPrU6hXUMpyrDII7ivkM0xcMVW9pDbY9nBUXQpcstyM0lTeWKXy1rzbnXzIrGkq15a0mwUcw+dFbmkwc1a2rSFVp3HzlXBpMVa2LSFV9Kdx85WpM1ZKKaaY0JppjU0V+tDKrjDKGHoRmp/KX1pPKHrVxm4u6Y24vczpNNsnOTbqD7EinLbKihVkk2joGbdj8+n4VdMPuKb5J7V2Qx+JjopswlhcPLeKMy40uK6j/fxQSydm2lCR7sMk1nN4SSQjypGjJHch1B+vB/Suk8l/7ppu2RTwCK7qebYhKzs/kYSy3DyemnzOLufDOp2gDeR5q9cxc4/DrWDqSMlvcqwIIB616vHcXEXTke4pl3b2GqQtFf6fGyuMMyjDfn1r0KWaqVudHNUy1r4GeDon+jD6msgrXtV78NtNuedNv3t1x/q5xuGfr1/nXEar8OPEOmb2+xm6iUZ8y2O8Y9cdf0r1aeKpVPhZwTw1SG6OJK00ir0trJGxDoykdQRjFQNHXQpI53Blc0mKlK00rVXJcSOinbaMUXJaGYqUUypMUMcUfQ4GKlWmCniuJnEhwHNOpAKcKhspIQ9K6nwT1vvon/ALNXMbSxwoyTW7ov2qxjm8sqhmxknkgDPT865q2Jp0dZnRSoyqbHV30iRxEu6qMdziuAl0+efV7idAvlMAFOeT0ro/LjZt80pdvUnJqZZrSLopNedPMJyfu2S8zsjhYR3uzFt9BeZgCWJJ7cVYk8MpG+HjJ+rGty31e3iIAhA96dd63G3+rUfUiqdSDpc0q2vkNQkp2jT0MWPRYox8sCD6jNTLpoH8I/Knyaq7dwPoKgbUWP8VebKpTe8mzsjCr2SJhZIvak+zxjsKrG+J70n2vPWseamWoVOpZMaDtTSgqNJC5471citZJDwCa0hDn+FCk+T4mV9lGz2q7JbrCMuwHtVKWdFJC80qkFD4hRlzbCEY7U0iomufaozOT2rmc4myhIlOKbkVCZs00yVHMjRQZPkUuRVXzaUTD1ppofIy9DC08gSNcsegpt/H/Z+43JVAi72JPAHrUEV68DB43KsO4qpfy/2jFNFcsXWZCj89QRg1rejyWd+a/yt/mQqdTn8jMfxv4bQkHU049I3P8ASnx+L9ClXcl5lfXy2/wrwi+ge0vp7dxh4pGQ/UHFdn4NtbfXL4RS7hHFFvMYOM4IGM+nNexiMroU6ftE3b+vIVKSlJxkej/8JTo//P3/AOQ2/wAKnh1yxuBmF5ZB6rC5/pSQafZ22PJtYUx0IQZ/Or8Yya8OTo/ZT+83aiiOO5SX7qyfjGw/mKlzUzsI4duOW/lVepcEiE7i0hoppNLlKQHpVSaRpZfs0LEMRmR1P3F/xPb86luJvIiL7S7cBVXGWJ4A5pLaIxQgMQZGO5yO7H/OB7CtYxsuZj8iSONIowkahVHQCloorCTuxhiigmkzUNDFzRmkzSZpWCwpAPUD8qY0UToUaNSp6jHFOzSZrSNWpHRSf3i5E+hRfRtOfO60jJJySc5P41UufC+m3DqwR4iv/PNuv55rZzSVSxFVO6kygiRYokjQfKoCjJ7U7PtTaKx1eoWHZpOKTNNzQOw/ijimZozQFh/FHHpTM0ZoCw/ijNMzRmgLD80vWo84qvf6lBp9uHdJmcngRxFxj8Oa2oUZVp8kXr5kVJci5mW8UYrlR490jJBuFUjs0cg/9lq9p3iax1Sbyra8ty2MtuWQAD3JXA/Gu95RiV2+8w+t013+42jxUds+5ZEIP7t9oJ7jAb+uPwpsc6zxCRPuknH4HH9KdDxFIcctMfy2r/8AXrz3Bx5oy3R0pqUU11Js00tTSaQmsbFJDi1ITTM0madirDiaQmkzSUJDsLuo3UYpNppgGaM0u00m007j0G5o3UFTTcUxodmkzSUmaY7Dg1O3kdGP51HmlzVJtA0SCZx3B+ozS+bnqiH/AIDUWaM1oqs11Fyol8xe8Y/AmpEnVOQ0i/Q5qtmjNWsTNCcEyW9tNM1aExahbQXKkYDSx/MPow5H4GuP1P4YaPdIzaddy2svZZCHQ/jww/WurozXXTzStA554SnI8c1b4e69phJ+x/aogM+ZbfvB+XUfiK5WSBo3KMpDDggjBFfRwYqcgkH2qG5tbW9B+120NxkYPmxhv516NLPrfxI/ccs8tT+FnzmYj6U0p7V7heeBfD14zN9jaBiOsEhXH4HI/SuevfhbEWLWWpsFPRJ4s4/4ED/SvQp5xhp7u3qcs8vqrZXPLdvNPxXX3/w61y0+aKKK7Qd4H5/I4Nc5d2E9lM0M8MkUi9VdSDXoU8RTq/BJM5JUZw+JWPomDS7uYxBYWBlGUDDBYYzxmtODwvevzIFj9iea4G8129vvGGn6ikc0k6yKDb79xUjBIUD159a7zwt4ing1YaFf2riaQySmVjtIyxPKknAxjofwrhdRt2OP2RpweE4/+WszH6Vd/sDTrSNpZR8qjJLnirun6lb3lm8yXCSLG7K7hSgGD71yeta62oTeXGcWyH5f9r3NcmJxcaMLvfodWFwcq8+VbLcmu76Fn8u1RY4AewwW9zVY3hxgGsvzSaXzK+Yq1KlWXPJn0lPCQpx5Yo0DdsepppuCe9UDMBSfaAPWs7M0VHyNDzz60hnPrVD7SPekNyvvRaQ/Y+ReMx9aTzjVL7UnvSfao/U0csh+xfYueec1LC7yuABVKNhKfl5rVgCWsRlkIAA6mjW9jOpaKtbU19Ojjhw8xH0q1ea3DCpHmRwJ79a878ReOLTSowryNGWHygLmRvcDsPc15pqvxAv7vzUtUWBWbIlOWkx9en6V7uCoYmdPlgrLueZVo01LmrPXse36jr1nFGJPOLjuzfIo/E1zNz440yEsGvrNMdhJvP6V4bdahdXkhe4nklY93Ymq5ya7lksJO9SX3f8ABuJYuEFaMfvPZ5viNpI6aiG9kgb+oqD/AIWRpROPts3/AH4rx7BpcGtVkmGXf+vkH9oy6RX3HsifEbSCfmv2H+9A39BV2DxzpE/3dStjns2U/mK8NxSc1Msiw72b/D/IFmUusUfRVrq1veLuhkjkX1icMP0qx5yt91hXzhDczWzh4ZXjcdGRiDXUaV4/1WxUR3Gy7jz1lzvA9AR/XNcVfIpx1pSv6nRTx9KTtJWPZ/NI60u7Ncxoniuw1sGOBys4GTDJ97Ht61trPtPXivDrYedOXLNWZ6EVGa5oO5xPjvwzNNdDU7C3aQMMTLGMnP8AexWJ4PkudM8T2geOSMSuI2VlIyDxz+h/CvXEZHHD4P1qUIM58zJ/CvQpZnKND2NSN1axx1MOufmTsycDAyWX8CKtRRknpkDk49KpgsP+Wn6CpBNKAR5xweDzivLio31HKMnsPkcu5NNzTOvej8avcaVhc0hNIT70ySQIjMeQoJwKaKSIOJ7373ywYOAf4iD1+g/nVvNVbOPyYPnIMjku5HqTn9On4VYzVVN7LoJIdmkJpuaM1jYdhaDTSaN1Kw7BmjNJmjNTYdgzRmjNJmiwC0ZpM0maLDFJopM0ZpWAM0GjNJRYYtGaTNGaVgFzSZozSZosFhc0ZpM0ZosFhc0lLmkNMY1o43Pzxo3+8Aah8uyLlIooiSPmKR8H8cYNT00r827JzjHJNdFOpBU5Kd79O3zM5RlzJxtbqNCrGgRFCqOgAwKkRybWEE8/MfwLsR+mKikOyNnx90E1JHH5UMUWd3loqZPfAAz+lZp+42+pbWqFzmkNLSGsikNooqe2t2ncBRVJXCTUVdkaoWOAKlW2buK3f7L+yW/mygBcZJPasW81a2gVmU7gvVjwo/E1rOhOLSktTmhXdV/uxwgAoMY9K4/UfiJpdszILpXYdoELfr0rn7n4oQjiG1uZf9+UJ/IGuinleJqbRG5wj8U0enFRTSFHcV5DN8S7pmylhGB6PKzfyxUX/Cyb8f8ALhafm/8A8VXSsjxPb8SfrNBfa/A9gIU9CKaUryH/AIWXfZ50+1x7F/8AGrUXxPIAEmmN7lLj+hFN5JilsvxGsXQ/m/A9QKVGwxXE2nxK0uQgSfaYf99AwH5c10djr9hqQzbXMM2BkiNuR9Qea5qmBr0tZxZ006kJ/DJM0c0oNRiRXGVNOU5rmcbGzQ6ilpagkbRS4FIetABmkzRSGqGG40u6m0UWHYdmjNNpKLBYfmopooriMxzxpLGeqyKGB/A06msaqLcXdByp6M5Pw5dS2cc+qQxW8V3GWaFjIFEsQyWGzsSOc8dOKs2dvfeItXlvBHviY7/KhkG446KMsPxOe9cRp/2MaHczXEdy90JVCsqfu4o8ZJJ7kttXHpmvUrPVrTW/Bq2NtDdNd29pvR7e1kI81cbRkDAP6V9tKifHxaRu317DHax6fpcItrNF+ZEGNx75rKKt6GsdNbnsBENSaS2uHTeVKlSOSOfy/WrsXiiN/uag3/fRr5bE068qjdRH1GGhCFJKjaxcEbnopP0FNk3R/eBX61YtNeJkXfqRSPuwbOPwzUF3qjXMjD7Z52DjIfNYulZXNoym52aX9fIgaTnrQMt90E1A8hY8sT+NV5L3UrWeM6dcSQllYSGM4J6Y/rVUaKnPlNqsnCHMi8dwppJ9aj8zxlJB5sdxeumcHjn8sU118aAKHe+G7pjH8x0rq+pLu/uOVYt/3fv/AOASk+9IoLMBWLe6jrK3Vpb6jdXTKblAUl7Ehv6A10VlFvkFYYmj7FLXc3o4jni2+ho2UKwwmV8ADnJrivG3jcWEb2lpIGuzwF6iIep9/atfxd4hi0TTySNxXCoo/ifsPoK8LuLiS4nkmlcvJIxZmPUk125Tl6rP21RadPM8vGYn2a0+J/gv8yS5vbi8maa4meWRurO2TUOSTT7W2mvLhIIELyOcACvSfDnhKx063Goagwdk5LsAQp9EHf6n/wCtX1SSWiPCnVe7OV0fwVr2tRmW2sWWHGfMmIjU/TPX8K6AeA9MsbcNqeuKs+OYoIt2D6ZJ/pWpq/jK6v7hbHSY2AJwAp5Pbk/lWJPHZWFzG2o3rTTLc/vYogGUxhQwYHPOSQOcd6UppEJTn5EL6ToKMEiN1M/puHP4AVUmtdKjYobdkYdQzkEVZstVuJ713sNHWaaSLy3WNCc/NnO1QAOwI6HFSXXhTxRqtwbs6NKNygAABcAAAcE+goU3fVFcqW7Mk2WmueGkH0Yf4UxtFt5FzFdc+jLTr3QdX0zJvNOuYQO7RnH5jiqsc7KQa0ugt2ZXm0m5iBYKrAd1YVSdGjOGUqfQjFdHFc7xhuabMsUqFJY1bd0buo9qGCbRz8NxNbzLLBK8cinKsjEEfjXpvg/xeNSRdP1GQC8HEch/5aj39/515vd6fLagSfehJwHA7+lV43ZHDKxDA5BHauPF4SniIcst+/Y7MNip0ZXWx7/uKtj8qduPqa5/w1ry6/pKyMNt1BhZRnqcfe/GttHBFfHVaMqc3CS1R9TTnGpBTj1Jdx9TSbm/vH86bupC1ZWNLD/Mb+8fzp80wt9Pe5ZizbtqRh8Fj37Hp9KrlhmqmoQm5hj8v5ZomLI4OCpOOh/AV04RUvar22xzYtVPZP2W5bjuzMu5d6A5wrn5sA46UTO7QOoPJUgZNVIBcNNHJO5JRCpLOWLk45Pp90cVYdgRRiI041X7LYrD+0dNe1Wo+KUyQRyK5IdQ2c+op4dv7x/OqdpIdksZ3ZSQjnuDyMe3OPwqfdWVSNpM2VrE3mN/eP50b2/vH86h3Uu6osOyJfMb+8fzo8xv7x/Ood1G6jlCyJvNf++350vmyf32/OoN1Luo5QsibzX/AL7fnR5r/wB9vzqHdRupWCyJ/Nk/vt+dJ5sn99vzqHdS7hRYLIl86Qf8tG/Ol8+X/no3/fRqLIoyKLBZEvnS/wDPRvzNHnS/89H/AO+jUW6l3UrByol86X/no350edL/AM9G/Oot1GaLByol86X/AJ6N+dHnS/8APRvzqLNLmiwuVdiTz5f+ejfnR58399vzqPNGaLByrsSfaJv+eho+0Tf89GqLcKQmjlQcsexN9pm/56Gj7TN/fNQk0m6jlQ+WPYdcXcohI3t85VOO2SBn9an+1Tf3zVNiDNFx0Jb9CP61LmrlFcqRPKr7E32qb++aQ3U3979KizSdTUcqK5I9i3byTTShQc/hXS2txDpsPmygFv4V/vGsWxRLeIyydhmuD8e+L3hZ9OtWIuHH7xwf9Wp/hHua1wlCdaso00ebjHBp82kV+Js+M/ii6F7W0lSeXoyqT5cf/wAUa8m1HXNR1Rybu7lkHULnCj6DpVBnLHrmk2k19jQwcKer1l3e54k67tyx0XZCE5pMVcW0YJl/lHv1rUttBmPzT7LaNWAdpWAZf+Ak5/yK63yx3MNWYAQnsaDGfSui+y6NEreffTyEOQFijC5XsQeR6+n41inrQpJ7Cd0VChHakK1c2g9Sad5MBGCGz65qtBGdzT4bme3lEkMrxuvRkYgj8RVx7OP+FyPrzVWW2kjG7G5fUdqGkJSaeh1/h/x5c2rpBqbNNDn/AF38a/X1H616Xa3yTxJLG6ujDIZTkMPavn0Eius8GeITp18LO4c/ZZzgEniN+x+nrXi5hlkZxdSkrPt3PXwOYNNU6ruu56y2ohWwFz+NJ/af/TP9azLh9lwAeN/8+9M3cda+d9jE9tqJqHVcf8s/1pP7VH/PL9ayi9ML0/Yx7BaJr/2sv/PI/nR/ayf88z+dYpemmSmqEewtDb/taP8A55n86P7Yh7o1YXmU1pBVLDxFdHQjV7Y9Q/5Uf2vag4xJn/drDsbuG3v4JZyBGjhmyMjANcxPdalNKXfU23HqRM/9K6qGX06iu3Ywq11B2sehNq9qOvmD/gFNOsWX99v++TXnO7VM5GoSvj/pvJz+YrSs5J1t2NzIWIyctnI/E9a0qZZTirpkwxKbtY2b3R/EyrCljYIvmL5k8nyE7+4AGdoHQYGfetLTdBvDHjUr2+LnqoJCj9c0y3sI42WRri5ZhyrGY5H0I5FXDql9plpJM04u4Y1LlZx85x2Djp+INfQQraWaPjqkNfdZoQ+GdCLKZ1nkYdyCR+prpIND8HJApNpH5gHUqf5CvPIPGV/rC2r6RbWStITvtbnLSYH3mQhgHx3XAI9x0t6h4j1OxYH7NZvE33X8tx+B+eipOEdZIujGtJ8sH+J6XLZ+GDprReVZBWXB/dcn6jrXn+o+GdAeR/IuI4fTybWVf1FYr+M9QI/49rP/AL4f/wCKqBvFt+T/AMe1l/3w/wD8XWf1mjtb8DqWDxd7p/iMniudEm8x79prQuE2zRydCf4WZQMjrjNaZcMuQxwR2OK5fU7641SRGuCpCZKIowq59BWlpl0ZLfy2+9GADn07V42OjByVSkrH0OA9pGHJWdzVDPji4uVHos7j+tDB2XBurwj/AK+X/wAar+ZxR5prh56v8zO32dP+VfcSfZlllhaaa4m8lt8ayTMwU+oya6G2YQW7SkfdXNc/DJucZ9a1tQl8nTv94gf1/pXPWc6koqTuEoxUbRVjyrx/qbXOsrbCQssKZcdt55P6YrkURpJFRAWZjgAdyas6ndm91K5uW6ySFq1PDFk0txJeEgLAMLkZyx9PoMmvtcNSVKlGC6I+TxVXnqyl5nT+HdJi06zczeX5h5mcjJH+yD/n1pbm8udbvY9Ot5lhg3KrOxwkalgoJ/EgfU1Dql39nt1gA6jJI9abe2seg+HILue3uYtTu8NBIwBjaM87lI46EcHkHBrSTa0W7OaK6sgubgaLHb21hDJFq486C7J+cOC2F+U9CMfoD1re0XwZBBFDqPiCb5nbc0D9B357sfYfjR4W0UaXaf23qYbzpOUVh86g89ezMO/Ye54ztZ1e61LUpLSBMysdrsDwo/uj0/z3qYx6IUp9jfu/Gmk6LB5VhDbRNvI2RoOnvgYH61lL8Sbm5u1gh0+3mBOFL7gc/T/61MsfhLq+q3ETxSgW0qgiZ0xg9wRn9e9dhpvwu0Tw7qNrcXmvRvcRnJheMbScfXOK2UEtzCUooxrfxrOjAajZeXEx567cfgAf1NTz6J4e8UWwaDyLW7f7rRfLk/XofocGup1DwzA8Hn20lpcx87zCmAPwya8p8SXkGiaxtsAPMCgy4+4QedvvxjmplTW8WKnUu7GVqmj3+h3hgvIiuc7HA+Vx6ioVkLLg16Jp99B460JtPumAuVXfFL1YEcc+4yM+oNefXFpNZXc1rOu2WJyjD3FEZX0Z1RdyNSCTHIqsjcENWNdwfZ5ygJKHlCRjI7VrvwRVK9UyxgjGByAB3psoveENXOla7CXk228x8uUdsHofwOK9aJ8t8Z4PIrwUHmvYtB1E6p4YtblsebEPLf6rxn8Rg/jXg5xh/hqr0f6HuZTX3pP1Nnfz1oL+9VBLQZa8NwPa5iyW96bvquZaTzKOQXMWd/0pC4qv5lJ5lPkGpEiSBLsocDemRz3B/wDr/pU5es+aQIUlPRGBJ9B0P6GrBbBqpQ0TFfUn30m+oN9LvqOUdyffSb6h30b6OULk2+l3e9Qb6XzKOULlgNRuNQB6N9LlHcn3Uu6oQ9L5lLlBMm3Ubqi8wUb6XKO5Lupd1Q76N9HKFybdS7qg8ylD0uULk2+jdUO8Ub6OUVybdRuqHfRv5o5QuTbqTf71Fvppenyhcm3Uhaod9LvyaaiO45WUzu38SqFznp3P/stSB6rIy7Ayj75LE+vPH6AUvmVU462Ii+pZ3+9SW43yge9UfMzWlpowWkPQDNZTVolSlaNyLxRrC6PpEswxlF+VT/E56D+teGTzyXM7zSuXkc7mY9Sa7P4j6q019DYKTtQea/PVj0/Ifzrh0BdwqglicADvX1GUYVUqHO95fkfN5hVvPkWy/MsWttLdTrFDGzuxwAoroZNNt9MRU+W5u3wOOin0Hr9a0LW1i8P6QzMEa8mHzP8A3f8AZH+f5VFB5UGjXmpXsYaSeMLZ55IYPy3Xjp+NerKXKjzE3J+QsMa6aZ47iN21cMFgiSNZR/gDn6nj8a2bPwZe30X2zxJetapkFYmYb8erE9P1NaukWsXhjT5Nd1o+bqtzH543DJiQ9/ryB9SB71UgN545ka4RtkCNtCBs7T7+p/8A1VMYuTIqVbbbD5P+EQ03CRWdrOq8eY53Z9+Q2f0qBta8MSMETT9JI9oVB/MpWvb+C2ViwiWRkOGZIixz+ANcbr3w+1EXV3c2yOYy5fY0LLgE9uvStnSsjmjVUnqzpv7D8LaxEVEUVtMfutE+cfipwPxWuW8QeB73R4xc2zG7tTkl0XlPrj+Y/Ss/R9J1uCfcsnlwxNySc/gtd3omvPHOLedFViQJAeAw9SP6jke44rnalF3Rup22Z5Ypp2CD7V3vi7wojJJrOmR7YutxBjBjI6ke3+Oe9cPsyK1jJSRqndFG7tB5YmhA4HzqB096oqea2OUcH86ypU8qVlHQdPpVAz0nRNSk1PwxDMxzPZv5bc8sAAQT+Bx+BrZEgZAwPBGRXBeCr4RahPYsfku4yBzxuGSP6iustJiqtC3VDx9K+bxmH5KjS23+/wD4J9Hg6/tKSb32L7Se9RtJ71A0lRmSuRQOvmJzJ70wy+9QGSmF81agTzExlNNMlQ7jRk1fKTzD2fNRFVJ+6KXmnKpqkrCbuIqqB90UllZT+IdXTS7ZvLgX57q4AyEQdR9T0Hv+NQyi6vb2PS9OjMl5L1x0Re5PpxXpOi6NDoGlpYW53tndLLtwZXPfH6AV34Wh9uXyPJzHGqmvZw3PK/8AhIrPaAdMlwOxvZKjGu6djB0ptvcG8lP8zWeI7LjFxN16eSP/AIqrUVpDMAytcEL1Itxx+tehyQ/q543MyaDW9JgdXj0VY3XkMk75U5zleeD9K7PRvFFh4iD2N7GEkPQyMAZfcEYAYe3X69eZtdNWWVYmeRRggs1kP51oDwpC+HXUYw+OhhAIx9DWU3T2YrtO6LGraRJp85ABaFvuPjr/APXrLZTnkV2tgWitFtru4iuk2hTuGM//AF/eqd1o1rIS1tOoB6JIMH6Z6V59SOt4nsYbGwkuWo7M5M8VJa3Bt59w6Hg/Sto6DcMG2RGTHXZ82PyqlLpckfDIVPoRWEmtmenCXVEhux6ik+1j1qqbSQcYpn2d896y9lE19qzTguxvU571p+IbvOh7kPOx249lNcyIZAeKmu5JX0yWJicCNsflWboJzi+zKdRuLPLm5Ndzpy/YtFtIFAG9fNkOOrNyP021wx612l26phBjCRomR7KBX1h8jIksbS51rV9kNqboRfvZIQ4UugYAgEkcnIHrzTnaDxF46trSAyzafbqiRLJkHaiAYI92wDU3hGDVF1F9T0y6it2tpIo3MgLbvMYrjGORwT+FWvhsif8ACR3l07qMLtBYerbv/ZaybvJ+QS0gdD4zuhpsP2aKRSLckq3ZmbHJ9x1/EGq/w/0SK5uUubsDk+Y2/PIHPNc/8Q75JpnMY4dmJPr85H8lH5VZ+Gt48djcmSV2DM67SxIAWMngfjWkPdRzzTcdDuvEviy7ubgabpj/AGeNOSo7DsTjqT1x0A61x9zPNIxJluJWzn5V3YFRR3Id55nA3Sykq38QJ5x716Bpt7d6fpyw6dbrgqGeZ0GXyAc5/lVPzM72MDT7qO1sBdQ6h9mk/iSRSNw9GAHI9wfwrL8RaZB4j0lb6EYkcE7VAJD9sexIwR6kEd6b4rkPnWs5CxrdFhtTnkHBPFXPDyy/2bdWzkEtGZQ2OQVOB+mKL2DzOO8DpqthqyyRwSrGcglkIUMAeCTxyMj8a6rxFs07xTpeuRWsdxFKql0kUFHYcYOf9kr/ADrfukdhDNMN6iReD7kD+tZPiuAHwbavnmOVMfipH9KxnqzWE22jmfHlsYtca6XS302K5QSJC0iPnsSNnA5HSuMnc+ScYFelfE6FXbSr5II0Se2zvhfdGx4JwMDaeeR715nL901VOV4nWUGG1j6V3fw+v/3V5p7sNpxKg/Q/0rhpxiT6itTwvcG38Q2pzgOTGfxGB+uKxxlL2tCUTfCVPZ14v+tT0oTYJGehxS+d71UKsGOe5pOa+b5EfS85c873o873qnzSZPrR7NC5y753vS+aPWqOTS5NLkHzluVlkiZG5Vhgj1FPhnZ4EZ8bsYYj1HBqluOKW3ZhJJH1z849ux/p+dPkvFopS1L3m+9Hm+9VsN3oAao5B8xZ80+tKJPcVV+alG70o5A5y15nuKPM96rfN6UDPpS5B85a8z3pwk96pndSjdR7MOcueb70ebVPJoBal7MOcu+ZS+bVLLe9GT70vZj5y55lL5lUsn3pQze9L2Ycxd8yjzKp7m96C7e9Hsw5i7vo8yqQc0nmH3o9mHMXvMpN/vVLzDR5ho9mHMXfMpPMql5hpPMNP2Ycxd8ymSzFEJUjd/D7ntVXzTSeaPMXeAVBJwe/Bx+uKqNPUHPQuM6oojVvlQbR9BTPN96pNKSc0wymj2dyVI0BL83Wtu1OLAnoW4rlYnJcc1v3NyLXRDKzYEcbyE/QVlVp3aiupalpc8f8R3v27xBez5yDKVX6Dgfyqx4atle7N04yIvu59awXYtIzHqTk11emJ9msoo2IyyiTgevP8sV9pTgoRUV0Pjq03OTl3L12smrapb6fE2N7hQfT1NbWn6dY6v41s7CDYdLsoFeQbiV7tzk8dRkfWsPw+Fl8TwPJ5xEYklBhBLBlRmU8c8EA1s+CI3mXxBMXHnyBE3j/AGid386znfmIbtCxl+PPEFxqGqGySUkHaXRfX+BP+AgjP+0T7V1vw70RtItrrUby9NtbBQs+4DDH0HsOmepORVPTvDGm6osupzowuUlYhkPUjPUHitXxJcwpo+naRC5iiD/vj64Ax/M10QtFHHKfNoS6x4w13WTL/ZxNrpsZCjy1AYj19fyrGWbU44pLmHWt6oQGEjMeTnjac+nJHSruiW8eq6lPFMxhsrYDcsY+Zh0AHufWr2u6PZR2zzaX5kUUR3TQM2S49R7ilcSSKmnaimqQNbzokd4BkEdJfcGsm+tTE4uIyVkQfKQ2AR/nv2NVoZ4bS+txFKHEkoO0jBU4yKyvH91cfbYzHII7aRm/dx8DcArEn2w44+tJ6lxjroejeH7lbyyRJUR1m/dTg8deAcfofTOO1eb69pDaTrdxaAHy1bMZx1U9P8PwrvPB2JdOUAgzT2gaPI6N6/mM/jVL4kQRtdWd0nB2mNvcEBh/M1nHRmkJWlY80uU21lXfO1seor0Hxf4bstK0fTL+zvjN9rT5o2wSpCqSRjtkkfhXn9wv7o+xrSMk1dGzF0y4NrqdtODjZKrfhnmvSbiPybxiOn868sTlwB3NeshTc28Mo/iX+XFeXmK1i/U9TLZe7JehVzmjBParQtmp/wBkfGT09cV5tj0+Yo7T6UoQmrRWFfvTxj/gQpjTWqZ/ehvYA07BqQbKeIiR0py3tuW2pHIzHoMAUy71EWQHnpHbkjIErYOPoapQk9kTKSjq3YesBPaodRmXTrCS4bqOEHq3aq0GtQXkzRDUIoCBlXlyiE+mf/rVJeaFY6nta88Y2Kbeka7cD835rro4STknPRHFiMfThFqDuzoPAttZ2NmLozx3etagCWjhYPIq5+7gdOgJ6ds9K9QtdP8As6q8uxpsevC/T1+teK2mnW2mxqll4+Nuq9Fhuwg564UPXqHg3Tr60tp57zWbzUVuAhjNyzHbjOSA3IzkflXpSjofOVJc0nI44aNpZYN9mhxg9V6086TpwXAtIgAORyDmp/OjEoVMZBKjI6D1pFuVbZiQE8qcDnp3rzuaXcLsWOzSABI4QoAO45NPaFFZMpGA33TtzxUK3LDkEY5znpSm5dNpaQKQpGQO3NKw7lgeX0GwLt4+XrSGVVG7bjavTHGTUCTsI0zIQGXAOMCgzgqAZduB/wB9GpsNMsDYWOWVjwR8veuC1O48barqDQJDPbrCWEaxt5a7c9SxPzdu9dqXIDbmKtjccj3oE+12ZnTpkew960pz5HtcpTa2ZzNhpnifyx9q1hI8DlDEsh/Mj/GtY6ddmIj7erMBnLQr178DFajMSjBZUYgY3YpN7MHZJI+eFXHT60p2l0X3GscXWjtJnMXln4ht03QLZ3XcquUOPxODWBqGt6xaxSR3Wm+RkFdzxsBz79K9HcuefNQDgnaO4/8A11I7ArKrPGy8URVNbxRusfXtbmPAsc9a62+lEsrOARuVXwfQqD6e9d7daLpV87LPZWxywO4RgE/iOa5HxLYx2d8Y4QUi8tdi88ADGMnnHFehTrKeiOdS5mange2hu9P1iGXU/wCz22xPHKHCkOC2OvbJAP1rH8HSFbu+iU/vNisMd/nCn/0OpPCet2uialcSX0cr289tJAxhVWdC2MMAxAJBAqrJqFiPG0+oW/mpZXEzkBlVSobOCVBxw2DgHtVJPmY57MXxLZktcRquCSSHlbjg7sL9Qw611ngnwzLp+kOLq5QtK4lCRLnaChVhu7kq3b061U8RafueK5h+T5mRdy5wR/C3t1q94N1lYQmnXGQCSsLHofVD9P5Vs0+U5nK6IZNLaO9kSYZy5IyMc9McdulbOn+I2spoLCePfKMIsg5GDwBjvWrf/YFAhnmQ5H7tSfnHsCeo9OahsdJtmvUluPKdQQM3Ecquo9MjANCaktTJ3M3xRomo/bLa6kiWQZKwoo5Vic9KsQwjTdKuLmR/mC+XyOSoUFz7/dwPUkV6Jqt5o1np8dzEkEkiqEG0Y49CTzjivDPiDr95PdNaCNlSdQQ+MAr/AHV9s4z6nFS10RcdXY1YvHMfiFVsobI29xHiV5FffHhTnjjPXAqXxTdKvhmztmYeZJIrAewQHP8A4+Kz/A3h6SyLRzvsuZiHlCjPloP4TnqSeoHp61uzWN74p8cn+zYYJItOwxWQ4R9hA29MZYjFKSSWhUfj0Mb4ix2ljp+iWFpcNIvkGaQeYXXJwoYZJwDt6DivMpDwT7103jG80+48QXj6VZtaWm4KsLYypAAboSBznpxXMN0A7mlTjyxsdlyrPzIPpSwSNBcRzIcNGwYfUHNIWR5Gyec4FPEQPRs/Sra7iT1uj0pL15VSQ4ZWUEcY4NWk2yKGXkfyrjtK1hbe2S3ut/y8K+M8ehrYg1e0Vw6XKficZrwKuGnCTVj6WlWp1YJp6m35ftSGL2NQp4h0rA3SgN3AII/nTv8AhINJP/LY/kP8aw9lPsPmXcf5fsaXy/Y00a5pJP8Ar8fl/jTv7a0r/n5X8SP8al059hqce4eX7Ux4iGR/MaMggZHcEgY/l+VOOt6SP+XlD9CP8abJrGkyxtG04wwwen+NOEJp7MblF6XJcOP+Wjfp/hS4k/56P+AX/CmRa9pRiQvP8+Pm4A5796X+3tH/AOex/T/Gm41L7fgF13HbSessv4bf/iaXb/00n/NP/iaZ/b+kjpL/AC/xo/t/Sf8Anp+q/wCNLlqdvwD3e4/aP+etx+af/EUmwf8APW5/NP8A4im/29pH/PX9V/xpf7e0j/nr+q/40ctTt+Ae73HbfSa4/NP/AIik2d/OuPzT/wCJo/t3SP8Anr/6D/jS/wBu6Of+W/5lf8aVqvb8Cvc7/iJtP/PWf/xz/wCJo2HP+um/8c/+Jpf7c0j/AJ7j8x/jR/bWlH/lsP0/xo/e/wAv4f8AAC0O/wCIm1v+es3/AI5/8TRtb/nrL/45/wDE1INX0k/8t/5U7+1NKP8Ay3/8dNF6n8v4f8AfLH+mRbZP+esn/jv/AMTTdsv/AD2f/vlf8Ks/2jpn/Pc/98H/AApf7Q03HE//AI43+FLmn/L+C/yDlRV2T/8APdv++F/wppS4I/4+PzjFXRe6ex/16/ip/wAKd9psSf8Aj4j/ACP+FTzS7fgPlRm7Lrtcx/jD/wDZUeXd/wDPxD/35P8A8VWl59j/AM/Ef5H/AApfMsj/AMvEf60ueXb8A5EZgjvP+fmH/vwf/i6Xy7zvcQ/9+D/8XWmHsv8An4j/AFpc2Z6TpT9o+34BymV5d3/z8Q/9+D/8XR5d3/z8Qf8Afg//ABdapFt2mj/Om4g/56x/99Cl7V9vwDlMspd4/wCPiD/vwf8A4umxrP8AaP3skbgIeFjKYyR7n0rUZIT/AMtov++hTY4EcyMrKQDtz9P/AK5NUqmj0/ATVigQf7v5GmkHH+rJ/EVqfZV9RSG1A7ip5kMzUOxgSjAe2DVnxdcG28HzHIy8aoAf9o/4U+VYoULO6gDnGeTXG+NNae6t7ezJJAO8j0wMD+tbYai6teFtk7meIqqnQk/I5CNPMlRM43EDJrs7p9t6VBOAgAyMcDj0FcXFJsmjf0YGux1QGO+LOwZiATgYxX073Plrk/hdbuXxHDHYSpHdPHMI96bgx8tjtx79PxrY8IvPCNfhZPLulkXMeOhBbI/PFcvY6ncaLq9vqNoV8+3cOm4ZGfcd62vD3iBbvxjLNLFFZDUVMTCAbUWTqDjPGSP1rJxblcU/hOz8MHfpOoCWaK3CXDgtIwULk5HX2o8aWMZtLe+s1SaLem8qxIcc8gjtyBn2rz7xxpzpcC5jA8vaCRjBH/6jx+VbPgHxWj2jaFqCmQSDbEQB6Yxz37f55uztc5nHqjXtrtNGu1LOrRzjMixrjZjpg9TjvW8NStrqCV4bm3klYfKiEkntyOw+tYWp6JK7OLctNCihcIMsn+yQcfTkio/D2mXVjq8cs0Gy3zhi5AZR643c1Voy1M3ojSXwFLHLZ3zGNEmmjk29TgHP4cZrB8b+GrvUbCPULKJTFA8jyIWAIUhACM9eF5r1ee6W5hFnYh3Ljb5rDC474/lWdqcUAiXSIgsyxFZr3J4UZyqfVmA49B7ik3d6BGUtzi/DjSWA01UY74wsLDHQkAEfgTVDxVcySWttHKSW3ZBPoEUf1rvLHSo7YJNO2JcnaMdO5J+nJ/KuE1q2Oq+KLXSrf5Sg8t2c8IfvOT7KOv8Au0pKyNKUrzuReN7LToPDmk3Vu9u166qkvlSZJXykPzLng5J5715tN/qX6f5Nd98StTS91O1tnslt72zh8i5KfcbBJTae67SCM+tcdbaZe6mWhsraa4k4JSGMscepwOmcfnUU9InU3oUtNgSW+iErqkYOWLHHTtXZrr8VvCkS3SBVzjbz1NUbP4b+Lbv/AFWiTr7yssf/AKERW9afBnxLMitPLYW+eqPKWZf++VI/Ws69GFVpyZ0YfG+wi0ktTIl8TgcJNM49uKpv4ikkJAhdx/tS/wD1q9H0/wCCFusatqOsyu5HzJbwhQD9STn8hXa6J4C8P6B5clrp6yXKDH2if52J9cHgH6AVksPRXS455nVezt8jwqBdevQhs9JuHRxlWS3dgfxxita08HeO70Ax6XJCCcEzGOLHvhjmvoJmO371Rl/9oVSjBbRRyzx1aXU8p0f4Y6+rONS1G3COmADNK2w+oClcn8cVrQ/CSy+1l77Vpbq1PLQLbJEW9jIPmx+vvXemT/aH4im+YT3Wq5uxzSqzerZix+BvC8TRFdEtP3Qwu5M/nnr+OasS+H9DQADRbBnY4Vfsyc/p0rReXapPJ9MDrSQI6Au5QzN95gPyA9h/9fvRzMhyZFa6Nplngw2FoknXekCqc+2BxV0p/tGmeZIvp+VBmfPQH60mTds8im8P+Iw26WyuJGyT8ig4HbBqE6R4oSFCNKumkThdqAZHPXnrivXgx/2acHY8cVn7NGvtF2PI49F8SBY2Gk3eVzlCAOT/AEoOkeJS4X+y7oqM7iY8E/TmvXstjtS5fsRS9nEftV2PHptO8RxWnmvpd+zEj90sW45yPT2zTfsHiI/8we9KhAwBhPX0r2Vd3fFL83bFHs4h7RdjxZrbxJGQJdJvmEvZYWOwZ9cU0W3iJMu+m3CiRQoURHK9ueK9sAPc/rSgkd6XJEftF2PGTba2AYjZTruP9xuBn8v8/hUaxaztkL2sw2/KoMDZz9MV7Vz/AJNHP+TR7ND512PE2GsICwtXJ9RG3P4Yz3qRJdR2MWsrlWXB5hYZ5+le05b1pwLDrS9kg9oux4oLnWNzbbByM7gfLP8ALrWfr8N7e2E8txaSDycNG5BGQeDkfrXvjSiNd0jBF9WOBVW5vdMuraa2nv7MxyoUdTOnQjB71cKfLK6Gqi7HytISpIzyPSo8k9a2/FeiHQdZksxcRXMY+aOWJg29D0Jxnn1FYitjBFd8Wmi9zu/C+rDVrV9OvyZZEAwN3zyKO6/7Q9O4x6VNf6NJbyl0dsnDAqOvow9/cc+orh4zuIZThgcgg45rt9I8WIsAtdWj3DvMF3B/99fXH8Qwau9jCcGndHNazba2bw3kNzJc7xtKqOQPTb3HuOK1PCmu6zJqNvp9w00MB3DcqFADgkZxgcniu0tU0jVFY2l9GhLfcEynj6Ptb9D9auwaLMswbe23++I8nH/fOKLJkOelmi7pWkS6rKCDJMyZ3EnIX8TwP51X1rwtP9uj+1WzyQryrtgJG2OD6sf/ANWK7fQL3TdLscS3qB84PmMob8FFUvEuuSXkBg0qN/MdSv2hl+6Mc7VPOfcj86yk1EIq5wOszR6Bp8dvBzqdwpX5h8yqerbex6Yz9cDFQNnwj4Ha+i1J7fVLhlKRIwwwO5ee5Iw+fQke1XbGLSvB5tta1WVLgXgdFYN5snQ5Yc5PIAJOOteW+JNfute1N7m4ldlHyxKxHyJk4HHGeeT3OTWWs5W6HTTikjGlfcx5qrLIUVmHpgcVI5ydufqaoTyb2AByqjH/ANeuhItsYOtdB4a0J9WvNzq4tYiDIyg8/wCzn3wau+EvBE/iC4Sa8vLfTtNB+e4nkUM3sik5J9+n8q+hNLv/AAjoOmQadZahp0UEK4A85cse7H1J9azqydrR3IvY8mk8I6LKSsaXUYA/glOT/wB9ZqI+B9JVyUuL3HUKzLnH/fNezN4l8Lt97UtOb/gamk/4SPwvuDfb9OyOMgrXHar/ADFe08jxaTwTp5LFJ7xSCRlowwGPwFRt4HtghYX0wHZvKBGfzr27/hI/DB66hp/5ikHiDwp/z/6Z+a/4Ufvf5vwGqnkeLP8AD+BU3jVX2nHP2fp/49VKTwLcCSQR38O2PktKhX+RNe7tr3hKQfPqGlN9Shp39v8AhPcW/tLSsngnenNNOquv4D9p5HgEngjUUGVmgf6ZHHr0pD4N1JAdpifGMYbr/n3r6BOu+FGHOoaUf+BpUg1fw24+W+005H99KfNV7/gL2vkfPK+D9UbHESgvsyzYGfyqNvCGqLtLCIZYr9//AOtX0X9r8Osf9dphPrmOnG58OtjL6WQOmTHRzVe/4B7XyPm+TwpqsZAMSEnphqYvhnVJFDCNNp6HdX0j9o8OBdofSguc4zHjNHmeHe7aX+cdHPU/pC9r5HzanhfVpArJbhg/TDAfzxUT6DqaY3Wj89MYPfH86+mRJ4dbjdpZHpmOgDw9nj+y/wAPLo56ge18j5pk8N6tGAzWpwe+9f8AGoX0XUoyAbKfJzwEJx9a+nPK8PNkbNMbPUfuzThDoWc+Xp5PrhKaqVB+18j5kk8P6tGqsbCYqwzlRuA+pHApg0HVD5hFhOfLOGwmfy9fwr6eW30MfdgsQPZE4pTa6LsGbay2qd2Si8e9HtJh7U+Xf7I1DdtFnPnGceWc4oGk6iWKrZXBYdQIicc4r6YSbwvPmNJtLfZ1XfHxSm38MnqNKOOR88f+NHtJdh+2PmZtN1Jd3+i3I2gMfkPAPelXTdWJ4tbrOccIf896+nE07w/cA7LewkH+ztP8qVtE0I8mxsz7bRT9pLsL2x8wCz1R1LCC7YDrhG4pf7O1UDd9lu8euxv89xX1B/ZWjAHbZ2wyMH5BTv7L0lzlrS2fH+wDR7SXYftj5dey1OMZe3ulHqUamSJfQ/6wXCc4+bcK+pv7J0ncG+xwbgcg+WOKH0vSZE2PY2rqTu2vApGfXkdafP3Qe3fmfKvnXA/5aS/maX7Rc/8APWb/AL6NfUh0PQyf+QXYf+Aqf4Un9h6F30rTz/26p/hT512D6zLufLn2i4/57yf99mlF3c9riT6bic19Rf2JofbSdP8A/AVP8KX+xdEPXSdO/wDAVP8AClzrsP6zNdT5eF7eZ+WeQdzzT0vtRK4hnnIzwFr6ujaONQke1VAwFUYAHpT959TReP8AKH1up3Z8oSXuqRY8yW4TPTdkZqxDD4hu5AkFtqcrMMgRwu2fyFfUrfvFwVJ/ClXzY1wAzgdFPUfQ1N4/yi+t1O7PlTVl1/QLmOLUYbm2lkjEiLOOqn/9XSucuriW5naWViznqa+svFvhWw8ZaLJYXOI7lPmgmKfNC/8Ageh9fwFfMfifwnrHhW/+zaraNCW+5IPmSQeqsOD/ADrai4PVKzCVec42kzCDV2Ly/btLgvE5GAsihOjAcnj6da4w1saHeKvm2cr7UlwUJGQrDvj6ZrdozTsXZfm59qq4KOCpKkHIPcVZIKllbscGo2HNCNHqeh2N1F4o0Uu+wXSfLPG3OTjG7Hoe/p+FZekaemgalLcwQeZNu2eW/wB9Af7nZs/nXL2N7Pp90lzbOUlQ8eh9j7V3dv4g0rWrYRzubK6IwVbGwn2P+f61VkzmknH0N2LV9PuWAm2JMOCsqlHHtg4NR3TXj3Df2bNpkMKRbpZb+ZgqZJAwoPPQ9c1nyaVcG2dIpmaFgCudwT2Pdf0qvYaRNazbmvLTB5IURA/otT7HqmZOaM7SL/xGPFy3dlqMtwFkEf2nZshdOmAp42+gr3vRND057ANEG8x38yWXdlpJD1Zj3/pXnVhYRo3ms7Pk581YyVH1Y/KPxIro7PxZDZKkVv8Av2j6RxOGB/33xgf7q59zSfKhKfMzT8SxRaZazyNKhwm2KNl+8euB/X2GO9ea6PayQx6t4ia7iSeyRpGjmXcLhWyHQ9/myBx611OpNLfkapqT+WkhYK+07IwoJ5IGFHHU/wA6848e+M011ILGzlme0jZZm8xQB5mxVwoAyFGD165zWLfM7I6acEjk9Z1K41fVbi+uWDTTuWbHQegHsBgfhXrfwb0gw6bd6rJGwMxEMRPQqOWI/HA/CvJNG0q71zV4LCziMk0rYAxwB3J9AK+oNOsItM0y2sYB+7gjCLwBnHf8etVPRWCrLSxazjsKAw9P1pCD6UmD2WsDAdv9v1o8z2pu0/3aPLPpSAGlyOVH60wlT1UfmaUo3pSFH/u/nQBGyoevX86aI1JOCp/SpCjE/dB/Gk8pj/CB+NAiMx/MDsY46YBPNKZQDg7gad5Lj+DOKCjkYIPuDTQCB165el3Kf4j+NJgjqin07fypCueMEfrQIn2D0pdg9D+VHngdf5Un2pR/CTWZQ7aB3I/CkJX+/wDpTftS+hppuU96Vxqw8lf79JuHHz1H56n+E0hkU87T+VTdjsibeo/i5qG6vfs6Bktri5JONkIUkD1O5gMUocHscVkeJLPWb+xWDRr2Gzcn9475DEezDp+X404u7KsN1LxbZaTGHv5prDd91JrMsx/J8VgS/FTSlBEV5Ox7E6fwf/Iormrz4ZeIriZpZb20nkbks0rkn8StUH+F/iUE7IYHHtMBn866oqC3Y9C/qfxX1aWd0sBbxQcbZDBhz74LMB+tYT/ELxPyBrNyM/3SF/kKs/8ACsvFP/PjH/4EJ/jTT8L/ABSf+XGP/wACE/xrRciKTic9fa/qOoHde3k9yf8AptIXx+ZqkdQkA4OB7V1p+Fnik/8ALin/AIEJ/jTf+FUeKT/y4xD63Cf40+aPcfMjjpb55F2uSy9wTVfcA+A25T37iu2Pwm8Vk8WUP/gQn+NNPwn8WLk/YYcDrm5QD+dPmj3KUkccjlDwa0bW+RcK4BHoazb23+wXT27zQyOpwxhkEij/AIEOD+BqJZB2aquDsztbR9KmUeaCv0rorJvDqIPNugB6Fa8rE2OjEfjSidv+eh/OhszdK/U95GpeG9I0BL9by3zNHIYYvMG5nUkAbAM4ODzmsDUPivBp2pRvotos0CQFGa4GC7tgk8c4GMe/NeSGQ9S1NMgPesnC71LjBJF6+1a7vhGs8zOIwVRSeFBJJA9OSTWa8nYcn1pHJPSo/KZh0IrRJbFNkEsmflU8dz60yJ2icOuNw6ZANWhaMR92lFkx7VVhXH/2tfMctOxJ7mnjVLnHMhNN+xGpE06V/uoT+FKyGmA1O4/vmnDUrj++alTRbthlbeU/RDU6+HdRYgLZXBJ9Im/wpOw1Iqf2jP8A36X+0Z/79akfg3XZRlNHv2HqLZv8K0Ifhp4quIjLHo8oQdfMdEP/AHySCfypXiPmOb/tGf8AvUv9ozf3j+dXpPC+rRMVl066Rh1DQsD/ACpo8Paj/wA+U/8A37P+FF0HMU/7Sm/vfrQNSmH8X61dPhzUP+fKf/v2aQ+HdQ/58p/+/Zp6BzIqf2pP/e/WnDVph3P51Y/4R3Uf+fKf/v2aX/hHdR/58p/+/Zo0C5ANYm9f1o/tibuT+dSnw9qA/wCXOf8A79mmtoN8Bk2kw/7ZmloFyP8AtaU//rpP7Vl/yad/Yl7/AM+03/fBpP7Fu/8An3l/74NPQNBP7Vk/yaP7Vf8Auig6Pdj/AJd5P++DSf2Rdf8APCT/AL5NLQNBw1Vv7opRq0gPAAqL+y7gdY2/Kk/s2b+4fyo0DQsf2vL2P60f2vIevP41X/s+b+4fypDYSgfdpj0J11Mq25VAb1AqT+2pfU/nVL7FJ6Un2N6BOxfGuTDufzpRr06n5XZfoaz/ALG9H2N6BGl/wkN1/wA9pP8Avo0h1+5brM//AH0azvsb0n2V6LArGh/bk/8Az0b/AL6NL/bc/wDz0b/vqs/7K9H2V/SiwaGh/bc//PRv++qP7cnH/LRv++qzvsr0fZXoDQ0v+EguR0mcfRjS/wDCSXn/AD8S/wDfZrKNo/pTfsz+lAaGyPE98Ol1MPpIacPF2qpwuoXQHtM3+NYn2Z/Smm1k9KYtDeTxjqkcgkW+uRIOjiZgR+Oaj1HxfqOq2ptdRuri7hzuCTys4B9Rk8H3rE+ySelH2ST0pWQFKdULkxqVHoTmo13KQRkEcgir/wBjc9qabR89Komxet70Xcah+JUXGB3HsKeW59qzhbujBlyCOQRVlbgkYl6jv61JaZOCR1qZGB4JwKq7yehpwfj0+lNMGbdjqeoWChbW+uIU6hY5CB+XStqx1/Xby5jt47+4eWRgiKMZYngDpXGCRl/iqxbX89rOk0MhSWNg6OOqkHIIpt6EOCe56fLoPiOaUjWL1o412eYtxPu2qzBQSoJ4ycdK07680rwbrmn2N8Glt3jEsssZAKcsMbfqBXk1zr+pXXmebeysJAquA20MF+6CBwQO1Z0tw8jZdyT7mseWT3YciR12r+PdVu9Kk0eG4MenmaR9o4ZwzFtrHuBnpXGvIScnrTWf0NRnJq4xUSjf0LX7vQZHm0+6kt5pF2u0Zxkelby/EfxH0/te4/MVwg3DsaeA2eKuxLSZ3q/EjxGOmqy/jj/Cnf8ACx/EnfVJD/wFf8K4MB6eBJ70WQuVHdD4jeISedRY/VF/wp3/AAsXxF/0E3H0Vf8ACuEAf3pwD+9HKuwcqO6HxD8RHj+05PyX/Cl/4WD4j/6Ckv5L/hXDDeD3p4L+9Oy7C5UdqfiD4k7arJ/3yv8AhR/wn/iQ/wDMWk/75X/CuLG/3p6lsd6El2E4o7QePvEn/QVk/wC+V/wpR498Rnrqkn5L/hXGhj70/efenyx7E2O3tviL4gtn3PcRTr/cliXH5jB/Wte3+K1zuButMhde/kyFP57q8z3H3pQT70OnB9CbH0mCT2I/AUAdMseKT5/XFJhz/EPxrzrGdx3ydxn8KVdn900gU9z+VOANILigr2BpwAP/AOqmYPFKM9P6U7Idx/A7jP0/+vS9v/rVHgjr1oI9OKLBzEgHc/y/+vThgdGP5VEDjuPzo349KLD5ifp/F+lLv9x+VVvMPqBRu47UBzFoOPUUu8f5NUgx9qM+p/WgOYubyOgFJ5p9Kq7vcUb/AKfnQFytdaFod67Pc6Fp07scl5LVGJ/EiqD+CPCjnLeHdPB/2YwP5Vs78DJzVGTVDKfKsEMzlipkx+6THct357Lk/TrT5mPmZlzeB/BUAD3Gh2cYJwMlhk+gGeT7VWj+H/ha+IYeHIraPceCz73HbPzYX6cn6V0FpYiMrNczNc3QH+ukAGPZQOFH069ya0I1564p8zHdnPJ8NfCgA2+HoD9Wc/1p/wDwrfwt/wBC7b/+Pf411CySIOGNP8+RhkM350+Z9xXZzC/DzwmB/wAi9b5/4F/jT18A+FQMjQbUfgf8a6MyN6tn61E8rep/OjmfcLswx4L8MR/d0Oz+hhB/nTx4W8PJ93Q7Af8AbpF/hWp5h54agtnnkVLk+4XZmr4e0dDlNKsl+lug/kKsJZW0J/dW0Sf7qAVYJX3pCR/tUrjuxoG08KB9BThg9VH5Um7rwaNx6YNJsabHcAfd/KkyvdT+dG446Yoyx5pXGLlB2/WnBk/un86YT14o59KLjJg6Y+5/49/9anB0/ugfj/8AWqDn0H5Uiludygc9jmjmYFgSJn7n/j3/ANaneZH/AHP/AB7/AOtVbnjtScjH+FK4y2Jov+eZ/Ol82I8eXn8ap7j60mT7UXYXLu+L/nl+tJvi7RH86phwSRlTjqM07zD7fnRzMdyyzRn/AJZmmFYWHKfoKi8wHt+tG4e3Si4D/Kt+nlfoKaba1JyYQfqopu4A9aXzF7/zouAhsbJutuh+sYqJ9J05z81lbt9YVNWN6eppQUz1PT1pXYXKR0PSj1020P8A27rTf7C0j/oGWn/gOtX2Kg9aBg/xfrRcVzPOg6R/0DLT/wAB1pv9haR/0DLT/wAB1/wrTKjJ+cfnTdv+0KLgZ39haQf+YXZn/t3X/Ck/4R7RTydIsT9bZP8ACtLYO5FGweo/OncDN/4R3Qz10fT/AMbZP8KP+Ea8PnrommH62kf+FaYhB7r1/vCjyfTH5ii7AzP+Eb8Pf9AHS/8AwEj/AMKafDHhvPOg6Zn/AK9k/wAK1Rbk9MfnThat7fnT5mBjnwp4bPXQ9M/8Bk/wpp8IeGT/AMwLTvwt1/wra+ysO6/nR9mP94fnRzMDEHg3wz30Sx/CIUh8FeFj10O1/BcVuG3x3H50ogz/ABL+dHNIVznv+EI8Lf8AQEt/1/xpp8B+Fm/5gcP5n/GuhMDDoV/Ojyj6j86fOwuc4fAXhZeuiQ/99N/8VR/wg3hUD/kCW347j/Wuj8vdgH+dIYcdKOZiOabwH4UY5/sW3z9GIP4ZrJvvhd4UvXYC0uLEnoYJiV/8eBxXctCev9aTyTzwfzp88u4XPL5/ghpJY/Z9Wvoz/thGH8hVQ/A5c/L4kcfWyB/9qV6w0BVSIiYz7dPy6U3zJEYiRGA7MuSP8R/L3qlVl3DmZ5SPgax/5mU/+AP/ANspf+FFv/0MZ/8AAH/7ZXrKzpgEfMD3BpwnT+6fzp+1l3FzyPJf+FFSH/mY/wDyR/8As6T/AIUQxPPiHP8A25f/AGdevi6j7o3/AH1ThdR/3X/76p+0l3DnZ5CPgSR/zHx/4Bn/AOLp3/Cjgp51sH6Wn/2devC8Vf4H/Og6gOnlfmaftJdw52eSD4LRL11r/wAlP/s6X/hTUX/Qb/8AJP8A+zr1hruJvvREfSomkibkK4o9pLuHMzzBPg3bA/Pq8j/7tsB/7MauR/CHRl/1l7fMf9kov81Nd8WXHBYUzd/t/rS9pLuLmZyEXwr8OxDDLdy+7y/4AVaT4d+G4xtOmB/dpH/xrpg2f4/1pSf9r/x6lzy7hzM5r/hX/hr/AKBSD/gbf40h+H3hrP8AyDF/7+P/AI10+R/fH500nPf8c0ueXcV2c1/wr3wz/wBAtf8Av4/+NL/wr3w1/wBA1Pp5j/410ZwOmaTcR3NHPLuFznT8PfDY/wCYYP8Av4/+NJ/wr7w3/wBAsf8Afx/8a6TzcDqaT7Rzg5p+0l3Fc5o+AvDo/wCYYP8Av4/+NJ/wgfh3/oGj/v4/+NdP54OaduBo9pLuK5XDH1NLu6ZJqvv9F/I0AtngEVJKLO/p1o349ar5b0NJyR0/WgCx5nNHm47n86gA9jTgB/d/WgLkvm57t+dJvpuP9ninKqnqtAXDzO3OaC/t+Zp2xD2oCL60DG59v1oP0qTYuOn40BAenSgCI8Z4pMn2qbyuTTZnit4XmmkWNFBZixwABQMZk+1Vb7UIbCNTJueRziKGMZeVvRR3/p34qGS9udQDR6ZGyRkD/S5kIUZ/uKQN/Hf7vuelaFhpFrbs0uwPcP8Afnfl2/H09hge1CQFGK2u9SQHUP8AR4GUZtYny3uHcdfouB15ataGKOKNI40VY0G1VXgAegFWhapg5wPxzUiwRr0HNOwDIo0bkhqmEcXqfypQoHQD60vXtTGJsQcjOaDwOAcUHpx/OomzUgK2DTCikUmD70u0kHk0gGbOMU0qKftPoaaVPPynpQMZtpCPwpxXnpTduPWkAY96MD3NGDzwaNvpSGLgYpDgdqUA/hSEHsKAFyPSjd7H86Z83oaUg45oGO3e5pufY0cjt+tNOQfumkA/J9D+dIT7HNM5/umjHqDiiwxCR3pM/Wl2jPek8sHvSsAnHrRu6UeUKBGR2/WiwXELccZqpJd3aPhbCRl9RIo/rV3Z7frRg4osFyvb3EkyZlt5IT6OR/QmrGQTS7fbjHrTwgI6UWHcYGHoaXdjtUgRaTyx6UWAbuBpQc9jTvLHPFLsA6UWC4gPPSl3f7P5Gl2A5/xpdg7ZoAbn2pCfqKcV57n8aClAXGZ9/wBKAzDnI/Kl2nPQ0m09OfzoC4od+/8AKlD5puxv8mk8s96Yrku760m4d800Bh1pdp6UWAduHGDS7j70zaaXZQIkDAjo3SlPPrTAo75/Onbf85phcTgdjTww96TA7/zoxnP+NArik5HQ0hz2FOB5/wDr07/PWmFyHcfQ0E+oqQr3IppUHt+NAFc2ybmeMmNm5OOhPqR0P86ryu0G3zlyuPmkRTgH3HJA/P8ACtDGBTSPWgCkHVkV0Ksjchgcg/SlEntinyWURZniJjkbqVPBPqR0NQsksQAljz/txgkH8Oo/X60XFYkEqk+/1pdwIFQAhl3DkeoNGcetO4ifK+tHy1X3fjQGHfI/GgCxxjqable/FRZJAw1HzUCH7kpwK5xUJB7gUhGDQFycbT0NBUZ4quQQeM0Bj7/nTsK5OV/CjYO5P51Flvel3MB/9eiwXJNoHajA9DUe49hS7x6frRYLj9opdo9vzpm+l3nHXvTFczlj6HYOtP2AfwD8RUgt3OMAdfWni3fA6D8aCSBVXqVX8qeAvTaPyqYQuPT86cEIHXt60DIlUE8KBTwh9vzqRVAIzg/jT+B1x7e9AXGCNj6/nS+VLng/rUvmLwOKdnjgjpQMgMMm37x59WNN+yseN789xK3+NWc+hAP1pwYDnI96AKwsSDw8h+sz/wCNO+zFT1b8ZG/xp13qFrp9u01zPHFGoyWdgBXKSalq3iQgWAl0/TifmupExLIPRFPTP94/rTsUjU1bWYNMljtkSe6vJMFLa3YlgM9Tk4Vfcmo00+4vZI5tVmWTy33pbRZ8pSOhbPMhHXnAz271NYaZaacri3iCtI26R2JZnPqzHk1dyM5H86BNjzPJjOc8+lL9rkx/9YVAwLKQHYZ/iGMj8xS4x6596AJxdzAZB59MCj7fP/ex+FQdB/8AXpCcAcfhSGWvt0w/iH5Un26bAJIz9KqFschR1o3c5xQBbF/Ngcr+VH26X0WqgYDtT8jrt5pBcnN/IOy0fb5scBfyP+NQkFgBjB/OkC+1AXLH9oTDjYp/A/40f2hIRgqv5f8A16r4JHvSFWHpmkMs/bWP8A/z+NIbtz/yzH51Dg9MGjbzjmgZL9qfOCi/nTvtJH/LMfnUOOen60EfWkBObpgOEH/fX/1qPtR7oPz/APrVBg4PagjuKAJvtXP3B19aX7Rnt+tVtpx1zzThSGWvMJ/hH504OD1H61XBPbA5xmgE98UwLG4dSD+dGFPYn8ag3c/z5oB9T9KB3Jwqen60YXt/OoMnPBpAxzgmgLkxHsD+NKCTjC/rUIc98YoMpzwP1oAl59B+dJkg/dGPrUPmtjnr9aPMPQntQFyXceOP1pRI3ZePrUJOcfTrSkk+nT1oC5L5rf3DS+Yc52H8/wD69Q7iAcn8qPM4PP50rBcmM5/55n9P8aTzjnlG/DH+NRb8Z4H50hkx9O1AE3nHk+W36f40n2og/wCpf8x/jUZfrzSE9/60wuSm7P8Azxkx/wAB/wAaT7XnnyZR+X+NRbm/D60bj7UguTfae/lSfp/jTTc88wTfp/jTA5/ClLYGT0osK48XIH/LGX9P8actyP8AnlKPy/xqDdwCBnn1oZgew6+tMLljz1HRG/Kl+0AfwP8AkP8AGqwbkY6fWnA8Z/rQBN9qX/nm/wCQ/wAaPtS8fupPyH+NQdSMg/nSgD0P50Bcn+1oDyjnHoKQ3yqeYZceoXNQgD0pe3pSAm+2Rnosn/fJpftUQ/56f98H/Cq/PXA6YoHA6fWmIsfaoh2f/vg/4U4XkIP8f/fDf4VVyucU/cB6Y+tAFn7RF2Y/98n/AAo+0R5+8f8Avk/4VV3c5yKcHHrj8aALP2iI5w4/I0nmJ13j9ahGDSgDp/WgVyQzRj+IUn2iP+9Ue3OTj9aNv0FMZBNFG0pkjYo56lejfUdDVZ52gAE67gWI3RKxwPcY4/M1ocAUhUGhCKasrAEK4B5BPFH8I4b8BUzwg9DtPtVdvlI3AJnuW+Un69vxpiJVZeM7h9QaeGB7n8jUXp1/OlC8j1NNCZJnjO4YpmByc0oGRnP60uD/AHv1oFcZ8v8AepDs9T+dOwfc/jRjsDyKAG5QA4OfxpRICOmKQqeeCM+9JsOM5oAcGQnqKUlfVfxNRhev9DS459/c0wA4Azx+dISnTcP++hSH3/nSHOeCPzosK5//2Q==" + ] + } + }, + "widgets_values": [ + "[{\"x\":374.36899413239337,\"y\":315.67555013706055},{\"x\":377.5416127267357,\"y\":142.76783674540425,\"fix\":null}]", + "[{\"x\":374.3689880371094,\"y\":315.675537109375},{\"x\":374.4350891113281,\"y\":312.07330322265625},{\"x\":374.5011901855469,\"y\":308.4710388183594},{\"x\":374.5672912597656,\"y\":304.8688049316406},{\"x\":374.6333923339844,\"y\":301.26654052734375},{\"x\":374.6994934082031,\"y\":297.664306640625},{\"x\":374.76556396484375,\"y\":294.06207275390625},{\"x\":374.8316650390625,\"y\":290.4598083496094},{\"x\":374.89776611328125,\"y\":286.8575744628906},{\"x\":374.9638671875,\"y\":283.25531005859375},{\"x\":375.02996826171875,\"y\":279.653076171875},{\"x\":375.0960693359375,\"y\":276.05084228515625},{\"x\":375.1621398925781,\"y\":272.4486083984375},{\"x\":375.2282409667969,\"y\":268.84637451171875},{\"x\":375.2943420410156,\"y\":265.2441101074219},{\"x\":375.3604431152344,\"y\":261.6418762207031},{\"x\":375.4265441894531,\"y\":258.03961181640625},{\"x\":375.4926452636719,\"y\":254.4373779296875},{\"x\":375.5587463378906,\"y\":250.83514404296875},{\"x\":375.62481689453125,\"y\":247.23291015625},{\"x\":375.69091796875,\"y\":243.63064575195312},{\"x\":375.75701904296875,\"y\":240.02841186523438},{\"x\":375.8231201171875,\"y\":236.42617797851562},{\"x\":375.88922119140625,\"y\":232.8239288330078},{\"x\":375.955322265625,\"y\":229.2216796875},{\"x\":376.02142333984375,\"y\":225.61944580078125},{\"x\":376.0874938964844,\"y\":222.01718139648438},{\"x\":376.1535949707031,\"y\":218.41494750976562},{\"x\":376.2196960449219,\"y\":214.81271362304688},{\"x\":376.2857971191406,\"y\":211.21046447753906},{\"x\":376.3518981933594,\"y\":207.60821533203125},{\"x\":376.4179992675781,\"y\":204.0059814453125},{\"x\":376.48406982421875,\"y\":200.4037322998047},{\"x\":376.5501708984375,\"y\":196.80148315429688},{\"x\":376.61627197265625,\"y\":193.19924926757812},{\"x\":376.682373046875,\"y\":189.5970001220703},{\"x\":376.74847412109375,\"y\":185.9947509765625},{\"x\":376.8145751953125,\"y\":182.39251708984375},{\"x\":376.88067626953125,\"y\":178.790283203125},{\"x\":376.9467468261719,\"y\":175.18801879882812},{\"x\":377.0128479003906,\"y\":171.58578491210938},{\"x\":377.0789489746094,\"y\":167.98355102539062},{\"x\":377.1450500488281,\"y\":164.38128662109375},{\"x\":377.2111511230469,\"y\":160.779052734375},{\"x\":377.2772521972656,\"y\":157.17681884765625},{\"x\":377.34332275390625,\"y\":153.57456970214844},{\"x\":377.409423828125,\"y\":149.97232055664062},{\"x\":377.47552490234375,\"y\":146.3700714111328},{\"x\":377.5416259765625,\"y\":142.76783752441406}]", + 720, + 480, + 49, + "path", + "basis", + 0.5, + 1, + "list", + 0, + 1, + null, + null, + null + ] + }, + { + "id": 75, + "type": "DownloadAndLoadToraModel", + "pos": { + "0": 1074, + "1": 937 + }, + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "tora_model", + "type": "TORAMODEL", + "links": [ + 193 + ] + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadToraModel" + }, + "widgets_values": [ + "kijai/CogVideoX-5b-Tora" + ] + }, + { + "id": 66, + "type": "VHS_VideoCombine", + "pos": { + "0": 1485, + "1": 436 + }, + "size": [ + 605.3909912109375, + 714.2606608072917 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 203 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "CogVideoX-Tora-trajectory", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX-Tora-trajectory_00010.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + }, + "muted": false + } + } + }, + { + "id": 90, + "type": "Note", + "pos": { + "0": 339, + "1": 1066 + }, + "size": [ + 251.63747656176258, + 73.90463053872986 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [], + "properties": {}, + "widgets_values": [ + "Three sets of coordinates are created here and appened to a list" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 92, + "type": "Note", + "pos": { + "0": 1200, + "1": 1045 + }, + "size": [ + 251.63747656176258, + 73.90463053872986 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [], + "outputs": [], + "properties": {}, + "widgets_values": [ + "Coordinates are used to create optical flow video, which is then encoded for Tora" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 80, + "type": "DownloadAndLoadCogVideoModel", + "pos": { + "0": 106, + "1": -85 + }, + "size": { + "0": 315, + "1": 218 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "block_edit", + "type": "TRANSFORMERBLOCKS", + "link": null, + "shape": 7 + }, + { + "name": "lora", + "type": "COGLORA", + "link": null, + "shape": 7 + }, + { + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "links": [ + 204 + ] + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 205, + 206, + 208 + ] + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadCogVideoModel" + }, + "widgets_values": [ + "THUDM/CogVideoX-5b-I2V", + "bf16", + "disabled", + false, + "sdpa", + "main_device" + ] + }, + { + "id": 79, + "type": "CogVideoSampler", + "pos": { + "0": 1089, + "1": 17 + }, + "size": [ + 330, + 570 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 204 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 197 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 198 + }, + { + "name": "samples", + "type": "LATENT", + "link": null, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": 199, + "shape": 7 + }, + { + "name": "context_options", + "type": "COGCONTEXT", + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": 200, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + }, + { + "name": "num_frames", + "type": "INT", + "link": 201, + "widget": { + "name": "num_frames" + } + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 202 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoSampler" + }, + "widgets_values": [ + 49, + 40, + 6, + 3, + "fixed", + "CogVideoXDDIM", + 1 + ] + } + ], + "links": [ + [ + 54, + 20, + 0, + 30, + 0, + "CLIP" + ], + [ + 146, + 60, + 0, + 67, + 0, + "MASK" + ], + [ + 149, + 67, + 1, + 65, + 2, + "INT" + ], + [ + 150, + 67, + 2, + 65, + 3, + "INT" + ], + [ + 153, + 65, + 0, + 68, + 1, + "IMAGE" + ], + [ + 154, + 65, + 1, + 68, + 2, + "MASK" + ], + [ + 155, + 56, + 0, + 68, + 0, + "IMAGE" + ], + [ + 156, + 68, + 0, + 44, + 0, + "IMAGE" + ], + [ + 166, + 72, + 0, + 73, + 0, + "IMAGE" + ], + [ + 167, + 73, + 0, + 71, + 1, + "IMAGE" + ], + [ + 187, + 73, + 0, + 60, + 0, + "IMAGE" + ], + [ + 189, + 67, + 3, + 78, + 3, + "INT" + ], + [ + 190, + 67, + 1, + 78, + 4, + "INT" + ], + [ + 191, + 67, + 2, + 78, + 5, + "INT" + ], + [ + 193, + 75, + 0, + 78, + 0, + "TORAMODEL" + ], + [ + 197, + 30, + 0, + 79, + 1, + "CONDITIONING" + ], + [ + 198, + 31, + 0, + 79, + 2, + "CONDITIONING" + ], + [ + 199, + 71, + 0, + 79, + 4, + "LATENT" + ], + [ + 200, + 78, + 0, + 79, + 7, + "TORAFEATURES" + ], + [ + 201, + 67, + 3, + 79, + 9, + "INT" + ], + [ + 202, + 79, + 0, + 56, + 1, + "LATENT" + ], + [ + 203, + 78, + 1, + 66, + 0, + "IMAGE" + ], + [ + 204, + 80, + 0, + 79, + 0, + "COGVIDEOMODEL" + ], + [ + 205, + 80, + 1, + 78, + 1, + "VAE" + ], + [ + 206, + 80, + 1, + 56, + 0, + "VAE" + ], + [ + 208, + 80, + 1, + 71, + 0, + "VAE" + ], + [ + 209, + 30, + 1, + 31, + 0, + "CLIP" + ], + [ + 210, + 73, + 0, + 82, + 0, + "IMAGE" + ], + [ + 211, + 82, + 1, + 83, + 1, + "STRING" + ], + [ + 212, + 60, + 1, + 83, + 0, + "STRING" + ], + [ + 216, + 73, + 0, + 85, + 0, + "IMAGE" + ], + [ + 217, + 83, + 0, + 86, + 0, + "STRING" + ], + [ + 218, + 85, + 1, + 86, + 1, + "STRING" + ], + [ + 219, + 86, + 0, + 65, + 0, + "STRING" + ], + [ + 220, + 86, + 0, + 78, + 2, + "STRING" + ] + ], + "groups": [ + { + "title": "VisualizeTrajectories", + "bounding": [ + 1124, + 1198, + 832, + 413 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "config": {}, + "extra": { + "ds": { + "scale": 0.39142513012212404, + "offset": [ + 2198.0900495441047, + 429.7964748076673 + ] + } + }, + "version": 0.4 +} \ No newline at end of file diff --git a/examples/cogvideox_5b_example_01.json b/examples/cogvideox_1_0_5b_T2V_02.json similarity index 68% rename from examples/cogvideox_5b_example_01.json rename to examples/cogvideox_1_0_5b_T2V_02.json index af7522a..01c3669 100644 --- a/examples/cogvideox_5b_example_01.json +++ b/examples/cogvideox_1_0_5b_T2V_02.json @@ -1,48 +1,7 @@ { - "last_node_id": 34, - "last_link_id": 64, + "last_node_id": 37, + "last_link_id": 72, "nodes": [ - { - "id": 31, - "type": "CogVideoTextEncode", - "pos": { - "0": 503, - "1": 521 - }, - "size": { - "0": 463.01251220703125, - "1": 124 - }, - "flags": {}, - "order": 3, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 56 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 62 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "", - 1, - true - ] - }, { "id": 30, "type": "CogVideoTextEncode", @@ -50,12 +9,12 @@ "0": 500, "1": 308 }, - "size": { - "0": 471.90142822265625, - "1": 168.08047485351562 - }, + "size": [ + 470.99399664051055, + 237.5088638951354 + ], "flags": {}, - "order": 2, + "order": 3, "mode": 0, "inputs": [ { @@ -69,10 +28,18 @@ "name": "conditioning", "type": "CONDITIONING", "links": [ - 61 + 67 ], "slot_index": 0, "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 65 + ], + "slot_index": 1 } ], "properties": { @@ -81,192 +48,79 @@ "widgets_values": [ "A golden retriever, sporting sleek black sunglasses, with its lengthy fur flowing in the breeze, sprints playfully across a rooftop terrace, recently refreshed by a light rain. The scene unfolds from a distance, the dog's energetic bounds growing larger as it approaches the camera, its tail wagging with unrestrained joy, while droplets of water glisten on the concrete behind it. The overcast sky provides a dramatic backdrop, emphasizing the vibrant golden coat of the canine as it dashes towards the viewer.\n\n", 1, - true + false ] }, { - "id": 33, - "type": "VHS_VideoCombine", + "id": 31, + "type": "CogVideoTextEncode", "pos": { - "0": 1441, - "1": 129 + "0": 503, + "1": 602 }, "size": [ - 778.7022705078125, - 310 + 464.4980515341475, + 169.87479027400514 ], "flags": {}, - "order": 6, + "order": 4, "mode": 0, "inputs": [ { - "name": "images", - "type": "IMAGE", - "link": 59 - }, - { - "name": "audio", - "type": "AUDIO", - "link": null - }, - { - "name": "meta_batch", - "type": "VHS_BatchManager", - "link": null - }, - { - "name": "vae", - "type": "VAE", - "link": null - } - ], - "outputs": [ - { - "name": "Filenames", - "type": "VHS_FILENAMES", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "VHS_VideoCombine" - }, - "widgets_values": { - "frame_rate": 8, - "loop_count": 0, - "filename_prefix": "CogVideoX5B", - "format": "video/h264-mp4", - "pix_fmt": "yuv420p", - "crf": 19, - "save_metadata": true, - "pingpong": false, - "save_output": false, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "CogVideoX5B_00009.mp4", - "subfolder": "", - "type": "temp", - "format": "video/h264-mp4", - "frame_rate": 8 - }, - "muted": false - } - } - }, - { - "id": 20, - "type": "CLIPLoader", - "pos": { - "0": -26, - "1": 400 - }, - "size": { - "0": 451.30548095703125, - "1": 82 - }, - "flags": {}, - "order": 0, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "CLIP", + "name": "clip", "type": "CLIP", - "links": [ - 54, - 56 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CLIPLoader" - }, - "widgets_values": [ - "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", - "sd3" - ] - }, - { - "id": 1, - "type": "DownloadAndLoadCogVideoModel", - "pos": { - "0": 642, - "1": 90 - }, - "size": { - "0": 315, - "1": 194 - }, - "flags": {}, - "order": 1, - "mode": 0, - "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null - }, - { - "name": "block_edit", - "type": "TRANSFORMERBLOCKS", - "link": null - }, - { - "name": "lora", - "type": "COGLORA", - "link": null + "link": 65 } ], "outputs": [ { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", + "name": "conditioning", + "type": "CONDITIONING", "links": [ - 60 + 68 ], "slot_index": 0, "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null } ], "properties": { - "Node name for S&R": "DownloadAndLoadCogVideoModel" + "Node name for S&R": "CogVideoTextEncode" }, "widgets_values": [ - "THUDM/CogVideoX-5b", - "bf16", - "disabled", - "disabled", - false + "", + 1, + true ] }, { "id": 11, "type": "CogVideoDecode", "pos": { - "0": 1051, - "1": 748 + "0": 1416, + "1": 40 }, "size": { "0": 300.396484375, "1": 198 }, "flags": {}, - "order": 5, + "order": 6, "mode": 0, "inputs": [ { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 63 + "name": "vae", + "type": "VAE", + "link": 71 }, { "name": "samples", "type": "LATENT", - "link": 64 + "link": 69 } ], "outputs": [ @@ -293,83 +147,297 @@ ] }, { - "id": 34, - "type": "CogVideoSampler", + "id": 36, + "type": "DownloadAndLoadCogVideoModel", "pos": { - "0": 1041, - "1": 342 + "0": 645, + "1": 17 }, "size": { - "0": 315.8404846191406, - "1": 358 + "0": 315, + "1": 218 }, "flags": {}, - "order": 4, + "order": 0, "mode": 0, "inputs": [ { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 60 + "name": "block_edit", + "type": "TRANSFORMERBLOCKS", + "link": null, + "shape": 7 }, { - "name": "positive", - "type": "CONDITIONING", - "link": 61 + "name": "lora", + "type": "COGLORA", + "link": null, + "shape": 7 }, { - "name": "negative", - "type": "CONDITIONING", - "link": 62 - }, - { - "name": "samples", - "type": "LATENT", - "link": null - }, - { - "name": "image_cond_latents", - "type": "LATENT", - "link": null - }, - { - "name": "context_options", - "type": "COGCONTEXT", - "link": null + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 } ], "outputs": [ { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", + "name": "model", + "type": "COGVIDEOMODEL", "links": [ - 63 + 70 + ] + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 71 ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadCogVideoModel" + }, + "widgets_values": [ + "THUDM/CogVideoX-5b", + "bf16", + "disabled", + false, + "sdpa", + "main_device" + ] + }, + { + "id": 20, + "type": "CLIPLoader", + "pos": { + "0": 5, + "1": 308 + }, + "size": { + "0": 451.30548095703125, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54 + ], + "slot_index": 0, "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CLIPLoader" + }, + "widgets_values": [ + "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", + "sd3" + ] + }, + { + "id": 37, + "type": "EmptyLatentImage", + "pos": { + "0": 643, + "1": 827 + }, + "size": { + "0": 315, + "1": 106 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "LATENT", + "type": "LATENT", + "links": [ + 72 + ] + } + ], + "properties": { + "Node name for S&R": "EmptyLatentImage" + }, + "widgets_values": [ + 720, + 480, + 1 + ] + }, + { + "id": 35, + "type": "CogVideoSampler", + "pos": { + "0": 1042, + "1": 291 + }, + "size": [ + 330, + 574 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 70 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 67 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 68 }, + { + "name": "samples", + "type": "LATENT", + "link": 72, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": null, + "shape": 7 + }, + { + "name": "context_options", + "type": "COGCONTEXT", + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": null, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ { "name": "samples", "type": "LATENT", "links": [ - 64 - ], - "shape": 3 + 69 + ] } ], "properties": { "Node name for S&R": "CogVideoSampler" }, "widgets_values": [ - 480, - 720, 49, 50, 6, - 806286757407563, + 0, "fixed", - "DPM++", + "CogVideoXDDIM", 1 ] + }, + { + "id": 33, + "type": "VHS_VideoCombine", + "pos": { + "0": 1767, + "1": 39 + }, + "size": [ + 778.7022705078125, + 829.801513671875 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 59 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "CogVideoX5B-T2V", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX5B_00001.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + }, + "muted": false + } + } } ], "links": [ @@ -381,14 +449,6 @@ 0, "CLIP" ], - [ - 56, - 20, - 0, - 31, - 0, - "CLIP" - ], [ 59, 11, @@ -398,43 +458,59 @@ "IMAGE" ], [ - 60, + 65, + 30, 1, + 31, 0, - 34, - 0, - "COGVIDEOPIPE" + "CLIP" ], [ - 61, + 67, 30, 0, - 34, + 35, 1, "CONDITIONING" ], [ - 62, + 68, 31, 0, - 34, + 35, 2, "CONDITIONING" ], [ - 63, - 34, + 69, + 35, 0, 11, - 0, - "COGVIDEOPIPE" + 1, + "LATENT" ], [ - 64, - 34, + 70, + 36, + 0, + 35, + 0, + "COGVIDEOMODEL" + ], + [ + 71, + 36, 1, 11, - 1, + 0, + "VAE" + ], + [ + 72, + 37, + 0, + 35, + 3, "LATENT" ] ], @@ -442,10 +518,10 @@ "config": {}, "extra": { "ds": { - "scale": 0.6934334949442514, + "scale": 0.7627768444387061, "offset": [ - -24.154349208343916, - 155.20539218330134 + 734.1791945221892, + 237.29437844909364 ] } }, diff --git a/examples/cogvideox_interpolation_example_01.json b/examples/cogvideox_1_0_5b_interpolation_02.json similarity index 78% rename from examples/cogvideox_interpolation_example_01.json rename to examples/cogvideox_1_0_5b_interpolation_02.json index 8198543..6bea1e8 100644 --- a/examples/cogvideox_interpolation_example_01.json +++ b/examples/cogvideox_1_0_5b_interpolation_02.json @@ -1,42 +1,7 @@ { - "last_node_id": 67, - "last_link_id": 152, + "last_node_id": 68, + "last_link_id": 155, "nodes": [ - { - "id": 20, - "type": "CLIPLoader", - "pos": { - "0": -26, - "1": 400 - }, - "size": { - "0": 451.30548095703125, - "1": 82 - }, - "flags": {}, - "order": 0, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "CLIP", - "type": "CLIP", - "links": [ - 54, - 56 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CLIPLoader" - }, - "widgets_values": [ - "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", - "sd3" - ] - }, { "id": 31, "type": "CogVideoTextEncode", @@ -46,16 +11,16 @@ }, "size": { "0": 463.01251220703125, - "1": 124 + "1": 144 }, "flags": {}, - "order": 5, + "order": 6, "mode": 0, "inputs": [ { "name": "clip", "type": "CLIP", - "link": 56 + "link": 149 } ], "outputs": [ @@ -63,10 +28,15 @@ "name": "conditioning", "type": "CONDITIONING", "links": [ - 123 + 146 ], "slot_index": 0, "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null } ], "properties": { @@ -78,6 +48,95 @@ true ] }, + { + "id": 63, + "type": "CogVideoSampler", + "pos": { + "0": 1142, + "1": 74 + }, + "size": [ + 330, + 574 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 144 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 145 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 146 + }, + { + "name": "samples", + "type": "LATENT", + "link": null, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": 147, + "shape": 7 + }, + { + "name": "context_options", + "type": "COGCONTEXT", + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": null, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 148 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoSampler" + }, + "widgets_values": [ + 49, + 25, + 6, + 0, + "fixed", + "CogVideoXDDIM", + 1 + ] + }, { "id": 30, "type": "CogVideoTextEncode", @@ -104,10 +163,18 @@ "name": "conditioning", "type": "CONDITIONING", "links": [ - 122 + 145 ], "slot_index": 0, "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 149 + ], + "slot_index": 1 } ], "properties": { @@ -116,297 +183,135 @@ "widgets_values": [ "a majestic stag is grazing in an enhanced forest, basking in the setting sun filtered by the trees", 1, - true + false ] }, { - "id": 57, - "type": "CogVideoSampler", + "id": 20, + "type": "CLIPLoader", "pos": { - "0": 1138, - "1": 150 + "0": -2, + "1": 304 }, "size": { - "0": 399.8780822753906, - "1": 370 + "0": 451.30548095703125, + "1": 82 }, "flags": {}, - "order": 9, + "order": 0, "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 121 - }, - { - "name": "positive", - "type": "CONDITIONING", - "link": 122 - }, - { - "name": "negative", - "type": "CONDITIONING", - "link": 123 - }, - { - "name": "samples", - "type": "LATENT", - "link": null, - "shape": 7 - }, - { - "name": "image_cond_latents", - "type": "LATENT", - "link": 146, - "shape": 7 - }, - { - "name": "context_options", - "type": "COGCONTEXT", - "link": null, - "shape": 7 - }, - { - "name": "controlnet", - "type": "COGVIDECONTROLNET", - "link": null, - "shape": 7 - } - ], + "inputs": [], "outputs": [ { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", + "name": "CLIP", + "type": "CLIP", "links": [ - 128 + 54 ], "slot_index": 0, "shape": 3 - }, - { - "name": "samples", - "type": "LATENT", - "links": [ - 127 - ], - "shape": 3 } ], "properties": { - "Node name for S&R": "CogVideoSampler" + "Node name for S&R": "CLIPLoader" }, "widgets_values": [ - 480, - 720, - 49, - 20, - 6, - 65334758276105, - "fixed", - "CogVideoXDPMScheduler", - 1 + "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", + "sd3" ] }, { - "id": 1, - "type": "DownloadAndLoadCogVideoModel", + "id": 36, + "type": "LoadImage", "pos": { - "0": 633, - "1": 44 + "0": 105, + "1": 732 }, "size": { - "0": 337.8885192871094, - "1": 194 + "0": 402.06353759765625, + "1": 396.6225891113281 }, "flags": {}, "order": 1, "mode": 0, - "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null, - "shape": 7 - }, - { - "name": "block_edit", - "type": "TRANSFORMERBLOCKS", - "link": null, - "shape": 7 - }, - { - "name": "lora", - "type": "COGLORA", - "link": null, - "shape": 7 - } - ], + "inputs": [], "outputs": [ { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", + "name": "IMAGE", + "type": "IMAGE", "links": [ - 121, - 149 + 71 ], "slot_index": 0, "shape": 3 - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadCogVideoModel" - }, - "widgets_values": [ - "feizhengcong/CogvideoX-Interpolation", - "bf16", - "disabled", - "disabled", - false - ] - }, - { - "id": 65, - "type": "CogVideoImageInterpolationEncode", - "pos": { - "0": 1123, - "1": 647 - }, - "size": [ - 331.6177535935244, - 118 - ], - "flags": {}, - "order": 8, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 149 }, { - "name": "start_image", - "type": "IMAGE", - "link": 147 - }, - { - "name": "end_image", - "type": "IMAGE", - "link": 152 - }, - { - "name": "mask", + "name": "MASK", "type": "MASK", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "samples", - "type": "LATENT", - "links": [ - 146 - ], - "slot_index": 0 - } - ], - "properties": { - "Node name for S&R": "CogVideoImageInterpolationEncode" - }, - "widgets_values": [ - false - ] - }, - { - "id": 44, - "type": "VHS_VideoCombine", - "pos": { - "0": 1927, - "1": 146 - }, - "size": [ - 605.3909912109375, - 714.2606608072917 - ], - "flags": {}, - "order": 11, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 118 - }, - { - "name": "audio", - "type": "AUDIO", - "link": null, - "shape": 7 - }, - { - "name": "meta_batch", - "type": "VHS_BatchManager", - "link": null, - "shape": 7 - }, - { - "name": "vae", - "type": "VAE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "Filenames", - "type": "VHS_FILENAMES", "links": null, "shape": 3 } ], "properties": { - "Node name for S&R": "VHS_VideoCombine" + "Node name for S&R": "LoadImage" }, - "widgets_values": { - "frame_rate": 8, - "loop_count": 0, - "filename_prefix": "CogVideoX_interpolation", - "format": "video/h264-mp4", - "pix_fmt": "yuv420p", - "crf": 19, - "save_metadata": true, - "pingpong": false, - "save_output": false, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "CogVideoX-I2V_00001.mp4", - "subfolder": "", - "type": "temp", - "format": "video/h264-mp4", - "frame_rate": 8 - }, - "muted": false - } - } + "widgets_values": [ + "sd3stag.png", + "image" + ] }, { - "id": 67, + "id": 64, + "type": "LoadImage", + "pos": { + "0": 105, + "1": 1189 + }, + "size": { + "0": 402.06353759765625, + "1": 396.6225891113281 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 151 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "sd3stag.png", + "image" + ] + }, + { + "id": 65, "type": "ImageResizeKJ", "pos": { - "0": 569, - "1": 1173 + "0": 607, + "1": 1188 }, "size": [ 315, 266 ], - "flags": { - "collapsed": true - }, + "flags": {}, "order": 7, "mode": 0, "inputs": [ @@ -418,7 +323,7 @@ { "name": "get_image_size", "type": "IMAGE", - "link": 150, + "link": null, "shape": 7 }, { @@ -438,6 +343,22 @@ "name": "height_input" }, "shape": 7 + }, + { + "name": "width", + "type": "INT", + "link": 152, + "widget": { + "name": "width" + } + }, + { + "name": "height", + "type": "INT", + "link": 153, + "widget": { + "name": "height" + } } ], "outputs": [ @@ -445,7 +366,7 @@ "name": "IMAGE", "type": "IMAGE", "links": [ - 152 + 155 ], "slot_index": 0, "shape": 3 @@ -453,15 +374,13 @@ { "name": "width", "type": "INT", - "links": [], - "slot_index": 1, + "links": null, "shape": 3 }, { "name": "height", "type": "INT", - "links": [], - "slot_index": 2, + "links": null, "shape": 3 } ], @@ -483,15 +402,15 @@ "id": 37, "type": "ImageResizeKJ", "pos": { - "0": 537, - "1": 722 + "0": 593, + "1": 731 }, "size": { "0": 315, "1": 266 }, "flags": {}, - "order": 6, + "order": 5, "mode": 0, "inputs": [ { @@ -527,8 +446,7 @@ "name": "IMAGE", "type": "IMAGE", "links": [ - 147, - 150 + 142 ], "slot_index": 0, "shape": 3 @@ -536,16 +454,20 @@ { "name": "width", "type": "INT", - "links": [], - "slot_index": 1, - "shape": 3 + "links": [ + 152 + ], + "shape": 3, + "slot_index": 1 }, { "name": "height", "type": "INT", - "links": [], - "slot_index": 2, - "shape": 3 + "links": [ + 153 + ], + "shape": 3, + "slot_index": 2 } ], "properties": { @@ -563,96 +485,14 @@ ] }, { - "id": 36, - "type": "LoadImage", - "pos": { - "0": 20, - "1": 674 - }, - "size": { - "0": 402.06353759765625, - "1": 396.6225891113281 - }, - "flags": {}, - "order": 2, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 71 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "MASK", - "type": "MASK", - "links": null, - "shape": 3 - } - ], - "title": "Load Image: Start", - "properties": { - "Node name for S&R": "LoadImage" - }, - "widgets_values": [ - "sd3stag.png", - "image" - ] - }, - { - "id": 66, - "type": "LoadImage", - "pos": { - "0": 20, - "1": 1121 - }, - "size": { - "0": 402.06353759765625, - "1": 396.6225891113281 - }, - "flags": {}, - "order": 3, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 151 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "MASK", - "type": "MASK", - "links": null, - "shape": 3 - } - ], - "title": "Load Image: End", - "properties": { - "Node name for S&R": "LoadImage" - }, - "widgets_values": [ - "sd3stag.png", - "image" - ] - }, - { - "id": 56, + "id": 60, "type": "CogVideoDecode", "pos": { - "0": 1581, - "1": 148 + "0": 1526, + "1": -4 }, "size": { - "0": 300.396484375, + "0": 315, "1": 198 }, "flags": {}, @@ -660,14 +500,14 @@ "mode": 0, "inputs": [ { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 128 + "name": "vae", + "type": "VAE", + "link": 132 }, { "name": "samples", "type": "LATENT", - "link": 127 + "link": 148 } ], "outputs": [ @@ -675,10 +515,8 @@ "name": "images", "type": "IMAGE", "links": [ - 118 - ], - "slot_index": 0, - "shape": 3 + 134 + ] } ], "properties": { @@ -692,6 +530,193 @@ 0.2, true ] + }, + { + "id": 62, + "type": "CogVideoImageEncode", + "pos": { + "0": 1152, + "1": 706 + }, + "size": { + "0": 315, + "1": 122 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 141 + }, + { + "name": "start_image", + "type": "IMAGE", + "link": 142 + }, + { + "name": "end_image", + "type": "IMAGE", + "link": 155, + "shape": 7 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 147 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoImageEncode" + }, + "widgets_values": [ + false, + 0 + ] + }, + { + "id": 44, + "type": "VHS_VideoCombine", + "pos": { + "0": 1884, + "1": -3 + }, + "size": [ + 605.3909912109375, + 714.2606608072917 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 134 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "CogVideoX-Interpolation", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX-I2V_00003.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + }, + "muted": false + } + } + }, + { + "id": 59, + "type": "DownloadAndLoadCogVideoModel", + "pos": { + "0": 622, + "1": -25 + }, + "size": [ + 347.24594407027485, + 218 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "block_edit", + "type": "TRANSFORMERBLOCKS", + "link": null, + "shape": 7 + }, + { + "name": "lora", + "type": "COGLORA", + "link": null, + "shape": 7 + }, + { + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "links": [ + 144 + ] + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 132, + 141 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadCogVideoModel" + }, + "widgets_values": [ + "feizhengcong/CogvideoX-Interpolation", + "bf16", + "disabled", + false, + "sdpa", + "main_device" + ] } ], "links": [ @@ -703,14 +728,6 @@ 0, "CLIP" ], - [ - 56, - 20, - 0, - 31, - 0, - "CLIP" - ], [ 71, 36, @@ -720,98 +737,114 @@ "IMAGE" ], [ - 118, - 56, + 132, + 59, + 1, + 60, + 0, + "VAE" + ], + [ + 134, + 60, 0, 44, 0, "IMAGE" ], [ - 121, + 141, + 59, 1, + 62, 0, - 57, - 0, - "COGVIDEOPIPE" + "VAE" ], [ - 122, + 142, + 37, + 0, + 62, + 1, + "IMAGE" + ], + [ + 144, + 59, + 0, + 63, + 0, + "COGVIDEOMODEL" + ], + [ + 145, 30, 0, - 57, + 63, 1, "CONDITIONING" ], [ - 123, + 146, 31, 0, - 57, + 63, 2, "CONDITIONING" ], [ - 127, - 57, - 1, - 56, - 1, - "LATENT" - ], - [ - 128, - 57, + 147, + 62, 0, - 56, - 0, - "COGVIDEOPIPE" - ], - [ - 146, - 65, - 0, - 57, + 63, 4, "LATENT" ], [ - 147, - 37, + 148, + 63, 0, - 65, + 60, 1, - "IMAGE" + "LATENT" ], [ 149, + 30, 1, + 31, 0, - 65, - 0, - "COGVIDEOPIPE" - ], - [ - 150, - 37, - 0, - 67, - 1, - "IMAGE" + "CLIP" ], [ 151, - 66, + 64, 0, - 67, + 65, 0, "IMAGE" ], [ 152, - 67, - 0, + 37, + 1, 65, + 4, + "INT" + ], + [ + 153, + 37, + 2, + 65, + 5, + "INT" + ], + [ + 155, + 65, + 0, + 62, 2, "IMAGE" ] @@ -820,10 +853,10 @@ "config": {}, "extra": { "ds": { - "scale": 0.693433494944327, + "scale": 0.7627768444387061, "offset": [ - 225.6761629383604, - -15.041612364034256 + 630.1733472923837, + 148.14641794691272 ] } }, diff --git a/examples/cogvideo_5b_vid2vid_example_01.json b/examples/cogvideox_1_0_5b_vid2vid_02.json similarity index 84% rename from examples/cogvideo_5b_vid2vid_example_01.json rename to examples/cogvideox_1_0_5b_vid2vid_02.json index 5c545e6..a45ff0b 100644 --- a/examples/cogvideo_5b_vid2vid_example_01.json +++ b/examples/cogvideox_1_0_5b_vid2vid_02.json @@ -1,6 +1,6 @@ { - "last_node_id": 74, - "last_link_id": 200, + "last_node_id": 78, + "last_link_id": 218, "nodes": [ { "id": 20, @@ -22,8 +22,7 @@ "name": "CLIP", "type": "CLIP", "links": [ - 54, - 56 + 54 ], "slot_index": 0, "shape": 3 @@ -129,7 +128,7 @@ 365.7275390625 ], "flags": {}, - "order": 5, + "order": 4, "mode": 0, "inputs": [ { @@ -305,7 +304,459 @@ "bgcolor": "#29699c" }, { - "id": 1, + "id": 58, + "type": "ImageConcanate", + "pos": { + "0": 1594, + "1": 230 + }, + "size": { + "0": 315, + "1": 102 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "image1", + "type": "IMAGE", + "link": 191 + }, + { + "name": "image2", + "type": "IMAGE", + "link": 170 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 132 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageConcanate" + }, + "widgets_values": [ + "right", + false + ] + }, + { + "id": 55, + "type": "GetImageSizeAndCount", + "pos": { + "0": 1654, + "1": 77 + }, + "size": { + "0": 210, + "1": 86 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 208, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 170 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "720 width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "480 height", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "33 count", + "type": "INT", + "links": [], + "slot_index": 3, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "GetImageSizeAndCount" + }, + "widgets_values": [] + }, + { + "id": 77, + "type": "CogVideoImageEncode", + "pos": { + "0": 952, + "1": -118 + }, + "size": { + "0": 315, + "1": 122 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 209 + }, + { + "name": "start_image", + "type": "IMAGE", + "link": 210 + }, + { + "name": "end_image", + "type": "IMAGE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 215 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoImageEncode" + }, + "widgets_values": [ + false, + 0 + ] + }, + { + "id": 76, + "type": "CogVideoDecode", + "pos": { + "0": 1335, + "1": -123 + }, + "size": { + "0": 315, + "1": 198 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 206 + }, + { + "name": "samples", + "type": "LATENT", + "link": 216 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 208 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoDecode" + }, + "widgets_values": [ + true, + 240, + 360, + 0.2, + 0.2, + true + ] + }, + { + "id": 30, + "type": "CogVideoTextEncode", + "pos": { + "0": 491, + "1": 372 + }, + "size": [ + 478.6890949595422, + 215.66308749666905 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 213 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 217 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "A high-definition nature video showcasing a brown bear as it gracefully runs down a crystal-clear stream, surrounded by the serene ambiance of a dense, verdant forest. The sunlight filters through the canopy of tall trees, casting dappled light on the forest floor, while the gentle sound of flowing water and rustling leaves creates a peaceful atmosphere. The brown bear's fur glistens in the sunlight, highlighting its striking red and white markings as it navigates the stream with agility and playfulness.", + 1, + false + ] + }, + { + "id": 31, + "type": "CogVideoTextEncode", + "pos": { + "0": 504, + "1": 651 + }, + "size": { + "0": 463.01251220703125, + "1": 144 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 217 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 214 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "", + 1, + true + ] + }, + { + "id": 78, + "type": "CogVideoSampler", + "pos": { + "0": 1083, + "1": 255 + }, + "size": [ + 330, + 574 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 212 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 213 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 214 + }, + { + "name": "samples", + "type": "LATENT", + "link": 215, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": null, + "shape": 7 + }, + { + "name": "context_options", + "type": "COGCONTEXT", + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": null, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + }, + { + "name": "num_frames", + "type": "INT", + "link": 218, + "widget": { + "name": "num_frames" + } + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 216 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoSampler" + }, + "widgets_values": [ + 49, + 25, + 6, + 0, + "fixed", + "CogVideoXDDIM", + 0.8 + ] + }, + { + "id": 57, + "type": "GetImageSizeAndCount", + "pos": { + "0": 595, + "1": -79 + }, + "size": { + "0": 202.2143096923828, + "1": 99.23601531982422 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 126, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 191, + 210 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "720 width", + "type": "INT", + "links": [], + "slot_index": 1, + "shape": 3 + }, + { + "name": "480 height", + "type": "INT", + "links": [], + "slot_index": 2, + "shape": 3 + }, + { + "name": "33 count", + "type": "INT", + "links": [ + 218 + ], + "slot_index": 3, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "GetImageSizeAndCount" + }, + "widgets_values": [] + }, + { + "id": 75, "type": "DownloadAndLoadCogVideoModel", "pos": { "0": 606, @@ -313,18 +764,12 @@ }, "size": { "0": 315, - "1": 194 + "1": 218 }, "flags": {}, "order": 2, "mode": 0, "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null, - "shape": 7 - }, { "name": "block_edit", "type": "TRANSFORMERBLOCKS", @@ -336,18 +781,29 @@ "type": "COGLORA", "link": null, "shape": 7 + }, + { + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 } ], "outputs": [ { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", + "name": "model", + "type": "COGVIDEOMODEL", "links": [ - 83, - 192 - ], - "slot_index": 0, - "shape": 3 + 212 + ] + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 206, + 209 + ] } ], "properties": { @@ -355,75 +811,23 @@ }, "widgets_values": [ "THUDM/CogVideoX-5b", - "fp16", + "bf16", "disabled", - "disabled", - false - ] - }, - { - "id": 37, - "type": "CogVideoImageEncode", - "pos": { - "0": 975, - "1": -73 - }, - "size": { - "0": 210, - "1": 122 - }, - "flags": {}, - "order": 9, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 83, - "slot_index": 0 - }, - { - "name": "image", - "type": "IMAGE", - "link": 129, - "slot_index": 1 - }, - { - "name": "mask", - "type": "MASK", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "samples", - "type": "LATENT", - "links": [ - 195 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoImageEncode" - }, - "widgets_values": [ - 8, - true + false, + "sdpa", + "main_device" ] }, { "id": 47, "type": "VHS_VideoCombine", "pos": { - "0": 1788, - "1": -364 + "0": 1946, + "1": -172 }, "size": [ 1110, - 310 + 687.3333333333333 ], "flags": {}, "order": 14, @@ -478,7 +882,7 @@ "hidden": false, "paused": false, "params": { - "filename": "CogVideoX_vid2vid_00001.mp4", + "filename": "CogVideoX_vid2vid_00003.mp4", "subfolder": "", "type": "temp", "format": "video/h264-mp4", @@ -486,421 +890,6 @@ } } } - }, - { - "id": 55, - "type": "GetImageSizeAndCount", - "pos": { - "0": 1205, - "1": 137 - }, - "size": { - "0": 210, - "1": 86 - }, - "flags": {}, - "order": 12, - "mode": 0, - "inputs": [ - { - "name": "image", - "type": "IMAGE", - "link": 118, - "slot_index": 0 - } - ], - "outputs": [ - { - "name": "image", - "type": "IMAGE", - "links": [ - 170 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "width", - "type": "INT", - "links": null, - "shape": 3 - }, - { - "name": "height", - "type": "INT", - "links": null, - "shape": 3 - }, - { - "name": "count", - "type": "INT", - "links": [], - "slot_index": 3, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "GetImageSizeAndCount" - }, - "widgets_values": [] - }, - { - "id": 58, - "type": "ImageConcanate", - "pos": { - "0": 1594, - "1": 230 - }, - "size": { - "0": 315, - "1": 102 - }, - "flags": {}, - "order": 13, - "mode": 0, - "inputs": [ - { - "name": "image1", - "type": "IMAGE", - "link": 191 - }, - { - "name": "image2", - "type": "IMAGE", - "link": 170 - } - ], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 132 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "ImageConcanate" - }, - "widgets_values": [ - "right", - false - ] - }, - { - "id": 11, - "type": "CogVideoDecode", - "pos": { - "0": 1116, - "1": 735 - }, - "size": { - "0": 301.1664123535156, - "1": 198 - }, - "flags": {}, - "order": 11, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 196 - }, - { - "name": "samples", - "type": "LATENT", - "link": 197 - } - ], - "outputs": [ - { - "name": "images", - "type": "IMAGE", - "links": [ - 118 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoDecode" - }, - "widgets_values": [ - false, - 96, - 96, - 0.083, - 0.083, - true - ] - }, - { - "id": 57, - "type": "GetImageSizeAndCount", - "pos": { - "0": 603, - "1": -65 - }, - "size": { - "0": 202.2143096923828, - "1": 99.23601531982422 - }, - "flags": {}, - "order": 8, - "mode": 0, - "inputs": [ - { - "name": "image", - "type": "IMAGE", - "link": 126, - "slot_index": 0 - } - ], - "outputs": [ - { - "name": "image", - "type": "IMAGE", - "links": [ - 129, - 191 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "720 width", - "type": "INT", - "links": [ - 199 - ], - "slot_index": 1, - "shape": 3 - }, - { - "name": "480 height", - "type": "INT", - "links": [ - 198 - ], - "slot_index": 2, - "shape": 3 - }, - { - "name": "33 count", - "type": "INT", - "links": [ - 200 - ], - "slot_index": 3, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "GetImageSizeAndCount" - }, - "widgets_values": [] - }, - { - "id": 74, - "type": "CogVideoSampler", - "pos": { - "0": 1084, - "1": 278 - }, - "size": [ - 326.6858603197775, - 380.14142158858795 - ], - "flags": {}, - "order": 10, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 192 - }, - { - "name": "positive", - "type": "CONDITIONING", - "link": 193 - }, - { - "name": "negative", - "type": "CONDITIONING", - "link": 194 - }, - { - "name": "samples", - "type": "LATENT", - "link": 195, - "shape": 7 - }, - { - "name": "image_cond_latents", - "type": "LATENT", - "link": null, - "shape": 7 - }, - { - "name": "context_options", - "type": "COGCONTEXT", - "link": null, - "shape": 7 - }, - { - "name": "controlnet", - "type": "COGVIDECONTROLNET", - "link": null, - "shape": 7 - }, - { - "name": "tora_trajectory", - "type": "TORAFEATURES", - "link": null, - "shape": 7 - }, - { - "name": "height", - "type": "INT", - "link": 198, - "widget": { - "name": "height" - } - }, - { - "name": "width", - "type": "INT", - "link": 199, - "widget": { - "name": "width" - } - }, - { - "name": "num_frames", - "type": "INT", - "link": 200, - "widget": { - "name": "num_frames" - } - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 196 - ] - }, - { - "name": "samples", - "type": "LATENT", - "links": [ - 197 - ] - } - ], - "properties": { - "Node name for S&R": "CogVideoSampler" - }, - "widgets_values": [ - 480, - 720, - 49, - 25, - 6, - 0, - "fixed", - "CogVideoXDDIM", - 1 - ] - }, - { - "id": 31, - "type": "CogVideoTextEncode", - "pos": { - "0": 505, - "1": 545 - }, - "size": { - "0": 463.01251220703125, - "1": 124 - }, - "flags": {}, - "order": 4, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 56 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 194 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "", - 1, - true - ] - }, - { - "id": 30, - "type": "CogVideoTextEncode", - "pos": { - "0": 503, - "1": 328 - }, - "size": { - "0": 474.8450012207031, - "1": 164.7423553466797 - }, - "flags": {}, - "order": 3, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 54 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 193 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "A high-definition nature video showcasing a brown bear as it gracefully runs down a crystal-clear stream, surrounded by the serene ambiance of a dense, verdant forest. The sunlight filters through the canopy of tall trees, casting dappled light on the forest floor, while the gentle sound of flowing water and rustling leaves creates a peaceful atmosphere. The brown bear's fur glistens in the sunlight, highlighting its striking red and white markings as it navigates the stream with agility and playfulness.", - 1, - true - ] } ], "links": [ @@ -912,30 +901,6 @@ 0, "CLIP" ], - [ - 56, - 20, - 0, - 31, - 0, - "CLIP" - ], - [ - 83, - 1, - 0, - 37, - 0, - "COGVIDEOPIPE" - ], - [ - 118, - 11, - 0, - 55, - 0, - "IMAGE" - ], [ 126, 41, @@ -944,14 +909,6 @@ 0, "IMAGE" ], - [ - 129, - 57, - 0, - 37, - 1, - "IMAGE" - ], [ 132, 58, @@ -1001,86 +958,102 @@ "IMAGE" ], [ - 192, + 206, + 75, 1, + 76, 0, - 74, - 0, - "COGVIDEOPIPE" + "VAE" ], [ - 193, + 208, + 76, + 0, + 55, + 0, + "IMAGE" + ], + [ + 209, + 75, + 1, + 77, + 0, + "VAE" + ], + [ + 210, + 57, + 0, + 77, + 1, + "IMAGE" + ], + [ + 212, + 75, + 0, + 78, + 0, + "COGVIDEOMODEL" + ], + [ + 213, 30, 0, - 74, + 78, 1, "CONDITIONING" ], [ - 194, + 214, 31, 0, - 74, + 78, 2, "CONDITIONING" ], [ - 195, - 37, + 215, + 77, 0, - 74, + 78, 3, "LATENT" ], [ - 196, - 74, + 216, + 78, 0, - 11, - 0, - "COGVIDEOPIPE" - ], - [ - 197, - 74, - 1, - 11, + 76, 1, "LATENT" ], [ - 198, - 57, - 2, - 74, - 8, - "INT" + 217, + 30, + 1, + 31, + 0, + "CLIP" ], [ - 199, + 218, 57, - 1, - 74, + 3, + 78, 9, "INT" - ], - [ - 200, - 57, - 3, - 74, - 10, - "INT" ] ], "groups": [], "config": {}, "extra": { "ds": { - "scale": 0.6934334949442321, + "scale": 0.8390545288825798, "offset": [ - 444.4636820418327, - 493.4387780923951 + -318.82552550589344, + 331.70430573737934 ] } }, diff --git a/examples/cogvideox_5b_Tora_I2V_testing_01.json b/examples/cogvideox_5b_Tora_I2V_testing_01.json deleted file mode 100644 index 604e0fe..0000000 --- a/examples/cogvideox_5b_Tora_I2V_testing_01.json +++ /dev/null @@ -1,1337 +0,0 @@ -{ - "last_node_id": 77, - "last_link_id": 186, - "nodes": [ - { - "id": 20, - "type": "CLIPLoader", - "pos": { - "0": -26, - "1": 400 - }, - "size": { - "0": 451.30548095703125, - "1": 82 - }, - "flags": {}, - "order": 0, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "CLIP", - "type": "CLIP", - "links": [ - 54, - 56 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CLIPLoader" - }, - "widgets_values": [ - "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", - "sd3" - ] - }, - { - "id": 31, - "type": "CogVideoTextEncode", - "pos": { - "0": 497, - "1": 520 - }, - "size": { - "0": 463.01251220703125, - "1": 124 - }, - "flags": {}, - "order": 6, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 56 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 179 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "The video is not of a high quality, it has a low resolution. Watermark present in each frame. Strange motion trajectory. ", - 1, - true - ] - }, - { - "id": 65, - "type": "CreateShapeImageOnPath", - "pos": { - "0": 1052, - "1": 935 - }, - "size": { - "0": 313.4619445800781, - "1": 286 - }, - "flags": {}, - "order": 8, - "mode": 0, - "inputs": [ - { - "name": "coordinates", - "type": "STRING", - "link": 145, - "widget": { - "name": "coordinates" - } - }, - { - "name": "size_multiplier", - "type": "FLOAT", - "link": null, - "widget": { - "name": "size_multiplier" - }, - "shape": 7 - }, - { - "name": "frame_width", - "type": "INT", - "link": 149, - "widget": { - "name": "frame_width" - } - }, - { - "name": "frame_height", - "type": "INT", - "link": 150, - "widget": { - "name": "frame_height" - } - } - ], - "outputs": [ - { - "name": "image", - "type": "IMAGE", - "links": [ - 142, - 153 - ], - "slot_index": 0 - }, - { - "name": "mask", - "type": "MASK", - "links": [ - 154 - ], - "slot_index": 1 - } - ], - "properties": { - "Node name for S&R": "CreateShapeImageOnPath" - }, - "widgets_values": [ - "circle", - "", - 512, - 512, - 12, - 12, - "red", - "black", - 0, - 1, - [ - 1 - ], - 1 - ] - }, - { - "id": 66, - "type": "VHS_VideoCombine", - "pos": { - "0": 1405, - "1": 916 - }, - "size": [ - 605.3909912109375, - 714.2606608072917 - ], - "flags": {}, - "order": 11, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 142 - }, - { - "name": "audio", - "type": "AUDIO", - "link": null, - "shape": 7 - }, - { - "name": "meta_batch", - "type": "VHS_BatchManager", - "link": null, - "shape": 7 - }, - { - "name": "vae", - "type": "VAE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "Filenames", - "type": "VHS_FILENAMES", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "VHS_VideoCombine" - }, - "widgets_values": { - "frame_rate": 8, - "loop_count": 0, - "filename_prefix": "CogVideoX-Tora-trajectory", - "format": "video/h264-mp4", - "pix_fmt": "yuv420p", - "crf": 19, - "save_metadata": true, - "pingpong": false, - "save_output": false, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "CogVideoX-Tora-trajectory_00003.mp4", - "subfolder": "", - "type": "temp", - "format": "video/h264-mp4", - "frame_rate": 8 - }, - "muted": false - } - } - }, - { - "id": 56, - "type": "CogVideoDecode", - "pos": { - "0": 1596, - "1": 150 - }, - "size": { - "0": 300.396484375, - "1": 198 - }, - "flags": {}, - "order": 14, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 184 - }, - { - "name": "samples", - "type": "LATENT", - "link": 185 - } - ], - "outputs": [ - { - "name": "images", - "type": "IMAGE", - "links": [ - 155 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoDecode" - }, - "widgets_values": [ - false, - 240, - 360, - 0.2, - 0.2, - true - ] - }, - { - "id": 71, - "type": "CogVideoImageEncode", - "pos": { - "0": 68.59265899658203, - "1": 573.0311889648438 - }, - "size": { - "0": 315, - "1": 122 - }, - "flags": {}, - "order": 12, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 164 - }, - { - "name": "image", - "type": "IMAGE", - "link": 167 - }, - { - "name": "mask", - "type": "MASK", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "samples", - "type": "LATENT", - "links": [ - 180 - ], - "slot_index": 0 - } - ], - "properties": { - "Node name for S&R": "CogVideoImageEncode" - }, - "widgets_values": [ - 16, - false - ] - }, - { - "id": 72, - "type": "LoadImage", - "pos": { - "0": -820, - "1": 531 - }, - "size": { - "0": 315, - "1": 314 - }, - "flags": {}, - "order": 1, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 166 - ], - "slot_index": 0 - }, - { - "name": "MASK", - "type": "MASK", - "links": null - } - ], - "properties": { - "Node name for S&R": "LoadImage" - }, - "widgets_values": [ - "sd3stag.png", - "image" - ] - }, - { - "id": 73, - "type": "ImageResizeKJ", - "pos": { - "0": -436, - "1": 527 - }, - "size": { - "0": 315, - "1": 266 - }, - "flags": {}, - "order": 9, - "mode": 0, - "inputs": [ - { - "name": "image", - "type": "IMAGE", - "link": 166 - }, - { - "name": "get_image_size", - "type": "IMAGE", - "link": null, - "shape": 7 - }, - { - "name": "width_input", - "type": "INT", - "link": 168, - "widget": { - "name": "width_input" - }, - "shape": 7 - }, - { - "name": "height_input", - "type": "INT", - "link": 169, - "widget": { - "name": "height_input" - }, - "shape": 7 - } - ], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 167 - ], - "slot_index": 0 - }, - { - "name": "width", - "type": "INT", - "links": null - }, - { - "name": "height", - "type": "INT", - "links": null - } - ], - "properties": { - "Node name for S&R": "ImageResizeKJ" - }, - "widgets_values": [ - 512, - 512, - "nearest-exact", - false, - 2, - 0, - 0, - "disabled" - ] - }, - { - "id": 68, - "type": "ImageCompositeMasked", - "pos": { - "0": 1674, - "1": 641 - }, - "size": { - "0": 315, - "1": 146 - }, - "flags": {}, - "order": 15, - "mode": 0, - "inputs": [ - { - "name": "destination", - "type": "IMAGE", - "link": 155 - }, - { - "name": "source", - "type": "IMAGE", - "link": 153 - }, - { - "name": "mask", - "type": "MASK", - "link": 154, - "shape": 7 - } - ], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 156 - ], - "slot_index": 0 - } - ], - "properties": { - "Node name for S&R": "ImageCompositeMasked" - }, - "widgets_values": [ - 0, - 0, - false - ] - }, - { - "id": 60, - "type": "SplineEditor", - "pos": { - "0": -103, - "1": 770 - }, - "size": { - "0": 765, - "1": 910 - }, - "flags": {}, - "order": 2, - "mode": 0, - "inputs": [ - { - "name": "bg_image", - "type": "IMAGE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "mask", - "type": "MASK", - "links": [ - 146 - ], - "slot_index": 0 - }, - { - "name": "coord_str", - "type": "STRING", - "links": [ - 145, - 176 - ], - "slot_index": 1 - }, - { - "name": "float", - "type": "FLOAT", - "links": null - }, - { - "name": "count", - "type": "INT", - "links": null - }, - { - "name": "normalized_str", - "type": "STRING", - "links": null - } - ], - "properties": { - "Node name for S&R": "SplineEditor", - "points": "SplineEditor" - }, - "widgets_values": [ - "[{\"x\":366.43744764656,\"y\":171.3214040944956},{\"x\":466.3749333683491,\"y\":177.6666412831806},{\"x\":539.3451610382268,\"y\":195.1160435520644},{\"x\":276.01781770779843,\"y\":199.87497144357818}]", - "[{\"x\":366.43743896484375,\"y\":171.3214111328125},{\"x\":373.86798095703125,\"y\":171.79318237304688},{\"x\":381.29852294921875,\"y\":172.26495361328125},{\"x\":388.7288818359375,\"y\":172.73956298828125},{\"x\":396.1580810546875,\"y\":173.23184204101562},{\"x\":403.58544921875,\"y\":173.7510223388672},{\"x\":411.0102233886719,\"y\":174.30575561523438},{\"x\":418.4319763183594,\"y\":174.8998260498047},{\"x\":425.85003662109375,\"y\":175.53823852539062},{\"x\":433.26348876953125,\"y\":176.2280731201172},{\"x\":440.67156982421875,\"y\":176.9736328125},{\"x\":448.0726623535156,\"y\":177.78512573242188},{\"x\":455.4649658203125,\"y\":178.67330932617188},{\"x\":462.8458557128906,\"y\":179.65150451660156},{\"x\":470.2113952636719,\"y\":180.73902893066406},{\"x\":477.5547180175781,\"y\":181.96739196777344},{\"x\":484.8601379394531,\"y\":183.40267944335938},{\"x\":492.0770568847656,\"y\":185.22531127929688},{\"x\":498.24371337890625,\"y\":188.81117248535156},{\"x\":491.68231201171875,\"y\":191.73179626464844},{\"x\":484.3272705078125,\"y\":192.8770294189453},{\"x\":476.9224853515625,\"y\":193.65155029296875},{\"x\":469.50146484375,\"y\":194.25323486328125},{\"x\":462.07281494140625,\"y\":194.7535400390625},{\"x\":454.6398620605469,\"y\":195.1853790283203},{\"x\":447.2041931152344,\"y\":195.56698608398438},{\"x\":439.7665710449219,\"y\":195.90963745117188},{\"x\":432.32757568359375,\"y\":196.2206573486328},{\"x\":424.8875427246094,\"y\":196.50531005859375},{\"x\":417.4466552734375,\"y\":196.76824951171875},{\"x\":410.0051574707031,\"y\":197.01141357421875},{\"x\":402.5631103515625,\"y\":197.23898315429688},{\"x\":395.1206970214844,\"y\":197.45263671875},{\"x\":387.6778869628906,\"y\":197.6529541015625},{\"x\":380.23480224609375,\"y\":197.8413848876953},{\"x\":372.7914123535156,\"y\":198.0200653076172},{\"x\":365.3478698730469,\"y\":198.19000244140625},{\"x\":357.90411376953125,\"y\":198.350341796875},{\"x\":350.4601745605469,\"y\":198.50411987304688},{\"x\":343.01611328125,\"y\":198.65133666992188},{\"x\":335.5719909667969,\"y\":198.79347229003906},{\"x\":328.12774658203125,\"y\":198.93048095703125},{\"x\":320.68353271484375,\"y\":199.0675048828125},{\"x\":313.2392578125,\"y\":199.20228576660156},{\"x\":305.79498291015625,\"y\":199.33682250976562},{\"x\":298.35064697265625,\"y\":199.4713592529297},{\"x\":290.9063720703125,\"y\":199.60589599609375},{\"x\":283.46209716796875,\"y\":199.7404327392578},{\"x\":276.017822265625,\"y\":199.87496948242188}]", - 720, - 480, - 49, - "path", - "basis", - 0.5, - 1, - "list", - 0, - 1, - null, - null, - null - ] - }, - { - "id": 30, - "type": "CogVideoTextEncode", - "pos": { - "0": 493, - "1": 303 - }, - "size": { - "0": 471.90142822265625, - "1": 168.08047485351562 - }, - "flags": {}, - "order": 5, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 54 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 178 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "a stag is standing and looking around in a forest", - 1, - true - ] - }, - { - "id": 44, - "type": "VHS_VideoCombine", - "pos": { - "0": 2210, - "1": 151 - }, - "size": [ - 1131.619140625, - 310 - ], - "flags": {}, - "order": 16, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 156 - }, - { - "name": "audio", - "type": "AUDIO", - "link": null, - "shape": 7 - }, - { - "name": "meta_batch", - "type": "VHS_BatchManager", - "link": null, - "shape": 7 - }, - { - "name": "vae", - "type": "VAE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "Filenames", - "type": "VHS_FILENAMES", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "VHS_VideoCombine" - }, - "widgets_values": { - "frame_rate": 16, - "loop_count": 0, - "filename_prefix": "CogVideoX-Tora", - "format": "video/h264-mp4", - "pix_fmt": "yuv420p", - "crf": 19, - "save_metadata": true, - "pingpong": false, - "save_output": false, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "CogVideoX-Tora_00012.mp4", - "subfolder": "", - "type": "temp", - "format": "video/h264-mp4", - "frame_rate": 16 - }, - "muted": false - } - } - }, - { - "id": 75, - "type": "DownloadAndLoadToraModel", - "pos": { - "0": 253, - "1": 146 - }, - "size": { - "0": 315, - "1": 58 - }, - "flags": {}, - "order": 3, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "tora_model", - "type": "TORAMODEL", - "links": [ - 175 - ] - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadToraModel" - }, - "widgets_values": [ - "kijai/CogVideoX-5b-Tora" - ] - }, - { - "id": 74, - "type": "ToraEncodeTrajectory", - "pos": { - "0": 1060, - "1": 670 - }, - "size": { - "0": 335.1993408203125, - "1": 230 - }, - "flags": {}, - "order": 10, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 174 - }, - { - "name": "tora_model", - "type": "TORAMODEL", - "link": 175 - }, - { - "name": "coordinates", - "type": "STRING", - "link": 176, - "widget": { - "name": "coordinates" - } - }, - { - "name": "num_frames", - "type": "INT", - "link": 170, - "widget": { - "name": "num_frames" - } - }, - { - "name": "width", - "type": "INT", - "link": 171, - "widget": { - "name": "width" - } - }, - { - "name": "height", - "type": "INT", - "link": 172, - "widget": { - "name": "height" - } - } - ], - "outputs": [ - { - "name": "tora_trajectory", - "type": "TORAFEATURES", - "links": [ - 181 - ] - }, - { - "name": "video_flow_images", - "type": "IMAGE", - "links": null - } - ], - "properties": { - "Node name for S&R": "ToraEncodeTrajectory" - }, - "widgets_values": [ - "", - 720, - 480, - 49, - 1, - 0, - 0.1, - false - ] - }, - { - "id": 1, - "type": "DownloadAndLoadCogVideoModel", - "pos": { - "0": 633, - "1": 44 - }, - "size": { - "0": 337.8885192871094, - "1": 194 - }, - "flags": {}, - "order": 4, - "mode": 0, - "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null, - "shape": 7 - }, - { - "name": "block_edit", - "type": "TRANSFORMERBLOCKS", - "link": null, - "shape": 7 - }, - { - "name": "lora", - "type": "COGLORA", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 164, - 174, - 177 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadCogVideoModel" - }, - "widgets_values": [ - "THUDM/CogVideoX-5b-I2V", - "bf16", - "disabled", - "disabled", - false - ] - }, - { - "id": 77, - "type": "CogVideoSampler", - "pos": { - "0": 1138, - "1": 150 - }, - "size": [ - 405.5999755859375, - 410 - ], - "flags": {}, - "order": 13, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 177 - }, - { - "name": "positive", - "type": "CONDITIONING", - "link": 178 - }, - { - "name": "negative", - "type": "CONDITIONING", - "link": 179 - }, - { - "name": "samples", - "type": "LATENT", - "link": null, - "shape": 7 - }, - { - "name": "image_cond_latents", - "type": "LATENT", - "link": 180, - "shape": 7 - }, - { - "name": "context_options", - "type": "COGCONTEXT", - "link": null, - "shape": 7 - }, - { - "name": "controlnet", - "type": "COGVIDECONTROLNET", - "link": null, - "shape": 7 - }, - { - "name": "tora_trajectory", - "type": "TORAFEATURES", - "link": 181, - "shape": 7 - }, - { - "name": "fastercache", - "type": "FASTERCACHEARGS", - "link": null, - "shape": 7 - }, - { - "name": "num_frames", - "type": "INT", - "link": 186, - "widget": { - "name": "num_frames" - } - }, - { - "name": "height", - "type": "INT", - "link": 182, - "widget": { - "name": "height" - } - }, - { - "name": "width", - "type": "INT", - "link": 183, - "widget": { - "name": "width" - } - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 184 - ] - }, - { - "name": "samples", - "type": "LATENT", - "links": [ - 185 - ] - } - ], - "properties": { - "Node name for S&R": "CogVideoSampler" - }, - "widgets_values": [ - 480, - 720, - 49, - 32, - 6, - 65334758276105, - "fixed", - "CogVideoXDPMScheduler", - 1 - ] - }, - { - "id": 67, - "type": "GetMaskSizeAndCount", - "pos": { - "0": 763, - "1": 772 - }, - "size": { - "0": 264.5999755859375, - "1": 86 - }, - "flags": { - "collapsed": true - }, - "order": 7, - "mode": 0, - "inputs": [ - { - "name": "mask", - "type": "MASK", - "link": 146 - } - ], - "outputs": [ - { - "name": "mask", - "type": "MASK", - "links": null - }, - { - "name": "720 width", - "type": "INT", - "links": [ - 149, - 168, - 171, - 183 - ], - "slot_index": 1 - }, - { - "name": "480 height", - "type": "INT", - "links": [ - 150, - 169, - 172, - 182 - ], - "slot_index": 2 - }, - { - "name": "49 count", - "type": "INT", - "links": [ - 170, - 186 - ], - "slot_index": 3 - } - ], - "properties": { - "Node name for S&R": "GetMaskSizeAndCount" - }, - "widgets_values": [] - } - ], - "links": [ - [ - 54, - 20, - 0, - 30, - 0, - "CLIP" - ], - [ - 56, - 20, - 0, - 31, - 0, - "CLIP" - ], - [ - 142, - 65, - 0, - 66, - 0, - "IMAGE" - ], - [ - 145, - 60, - 1, - 65, - 0, - "STRING" - ], - [ - 146, - 60, - 0, - 67, - 0, - "MASK" - ], - [ - 149, - 67, - 1, - 65, - 2, - "INT" - ], - [ - 150, - 67, - 2, - 65, - 3, - "INT" - ], - [ - 153, - 65, - 0, - 68, - 1, - "IMAGE" - ], - [ - 154, - 65, - 1, - 68, - 2, - "MASK" - ], - [ - 155, - 56, - 0, - 68, - 0, - "IMAGE" - ], - [ - 156, - 68, - 0, - 44, - 0, - "IMAGE" - ], - [ - 164, - 1, - 0, - 71, - 0, - "COGVIDEOPIPE" - ], - [ - 166, - 72, - 0, - 73, - 0, - "IMAGE" - ], - [ - 167, - 73, - 0, - 71, - 1, - "IMAGE" - ], - [ - 168, - 67, - 1, - 73, - 2, - "INT" - ], - [ - 169, - 67, - 2, - 73, - 3, - "INT" - ], - [ - 170, - 67, - 3, - 74, - 3, - "INT" - ], - [ - 171, - 67, - 1, - 74, - 4, - "INT" - ], - [ - 172, - 67, - 2, - 74, - 5, - "INT" - ], - [ - 174, - 1, - 0, - 74, - 0, - "COGVIDEOPIPE" - ], - [ - 175, - 75, - 0, - 74, - 1, - "TORAMODEL" - ], - [ - 176, - 60, - 1, - 74, - 2, - "STRING" - ], - [ - 177, - 1, - 0, - 77, - 0, - "COGVIDEOPIPE" - ], - [ - 178, - 30, - 0, - 77, - 1, - "CONDITIONING" - ], - [ - 179, - 31, - 0, - 77, - 2, - "CONDITIONING" - ], - [ - 180, - 71, - 0, - 77, - 4, - "LATENT" - ], - [ - 181, - 74, - 0, - 77, - 7, - "TORAFEATURES" - ], - [ - 182, - 67, - 2, - 77, - 10, - "INT" - ], - [ - 183, - 67, - 1, - 77, - 11, - "INT" - ], - [ - 184, - 77, - 0, - 56, - 0, - "COGVIDEOPIPE" - ], - [ - 185, - 77, - 1, - 56, - 1, - "LATENT" - ], - [ - 186, - 67, - 3, - 77, - 9, - "INT" - ] - ], - "groups": [], - "config": {}, - "extra": { - "ds": { - "scale": 0.6303940863129501, - "offset": [ - 136.37893073690276, - 136.88820468799213 - ] - } - }, - "version": 0.4 -} \ No newline at end of file diff --git a/examples/cogvideox_5b_tora_trajectory_example_01.json b/examples/cogvideox_5b_tora_trajectory_example_01.json deleted file mode 100644 index bad2570..0000000 --- a/examples/cogvideox_5b_tora_trajectory_example_01.json +++ /dev/null @@ -1,1119 +0,0 @@ -{ - "last_node_id": 72, - "last_link_id": 174, - "nodes": [ - { - "id": 20, - "type": "CLIPLoader", - "pos": { - "0": -26, - "1": 400 - }, - "size": { - "0": 451.30548095703125, - "1": 82 - }, - "flags": {}, - "order": 0, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "CLIP", - "type": "CLIP", - "links": [ - 54, - 56 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CLIPLoader" - }, - "widgets_values": [ - "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", - "sd3" - ] - }, - { - "id": 31, - "type": "CogVideoTextEncode", - "pos": { - "0": 497, - "1": 520 - }, - "size": { - "0": 463.01251220703125, - "1": 124 - }, - "flags": {}, - "order": 5, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 56 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 168 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "The video is not of a high quality, it has a low resolution. Watermark present in each frame. Strange motion trajectory. ", - 1, - true - ] - }, - { - "id": 1, - "type": "DownloadAndLoadCogVideoModel", - "pos": { - "0": 633, - "1": 44 - }, - "size": { - "0": 337.8885192871094, - "1": 194 - }, - "flags": {}, - "order": 1, - "mode": 0, - "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null, - "shape": 7 - }, - { - "name": "block_edit", - "type": "TRANSFORMERBLOCKS", - "link": null, - "shape": 7 - }, - { - "name": "lora", - "type": "COGLORA", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 159, - 166 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadCogVideoModel" - }, - "widgets_values": [ - "kijai/CogVideoX-5b-Tora", - "bf16", - "disabled", - "disabled", - false - ] - }, - { - "id": 65, - "type": "CreateShapeImageOnPath", - "pos": { - "0": 1052, - "1": 935 - }, - "size": { - "0": 313.4619445800781, - "1": 286 - }, - "flags": {}, - "order": 7, - "mode": 0, - "inputs": [ - { - "name": "coordinates", - "type": "STRING", - "link": 145, - "widget": { - "name": "coordinates" - } - }, - { - "name": "size_multiplier", - "type": "FLOAT", - "link": null, - "widget": { - "name": "size_multiplier" - }, - "shape": 7 - }, - { - "name": "frame_width", - "type": "INT", - "link": 149, - "widget": { - "name": "frame_width" - } - }, - { - "name": "frame_height", - "type": "INT", - "link": 150, - "widget": { - "name": "frame_height" - } - } - ], - "outputs": [ - { - "name": "image", - "type": "IMAGE", - "links": [ - 142, - 153 - ], - "slot_index": 0 - }, - { - "name": "mask", - "type": "MASK", - "links": [ - 154 - ], - "slot_index": 1 - } - ], - "properties": { - "Node name for S&R": "CreateShapeImageOnPath" - }, - "widgets_values": [ - "circle", - "", - 512, - 512, - 12, - 12, - "red", - "black", - 0, - 1, - [ - 1 - ], - 1 - ] - }, - { - "id": 56, - "type": "CogVideoDecode", - "pos": { - "0": 1581, - "1": 148 - }, - "size": { - "0": 300.396484375, - "1": 198 - }, - "flags": {}, - "order": 11, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 172 - }, - { - "name": "samples", - "type": "LATENT", - "link": 173 - } - ], - "outputs": [ - { - "name": "images", - "type": "IMAGE", - "links": [ - 155 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoDecode" - }, - "widgets_values": [ - false, - 240, - 360, - 0.2, - 0.2, - true - ] - }, - { - "id": 68, - "type": "ImageCompositeMasked", - "pos": { - "0": 1623, - "1": 557 - }, - "size": { - "0": 315, - "1": 146 - }, - "flags": {}, - "order": 12, - "mode": 0, - "inputs": [ - { - "name": "destination", - "type": "IMAGE", - "link": 155 - }, - { - "name": "source", - "type": "IMAGE", - "link": 153 - }, - { - "name": "mask", - "type": "MASK", - "link": 154, - "shape": 7 - } - ], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 156 - ], - "slot_index": 0 - } - ], - "properties": { - "Node name for S&R": "ImageCompositeMasked" - }, - "widgets_values": [ - 0, - 0, - false - ] - }, - { - "id": 66, - "type": "VHS_VideoCombine", - "pos": { - "0": 1405, - "1": 916 - }, - "size": [ - 605.3909912109375, - 714.2606608072917 - ], - "flags": {}, - "order": 9, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 142 - }, - { - "name": "audio", - "type": "AUDIO", - "link": null, - "shape": 7 - }, - { - "name": "meta_batch", - "type": "VHS_BatchManager", - "link": null, - "shape": 7 - }, - { - "name": "vae", - "type": "VAE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "Filenames", - "type": "VHS_FILENAMES", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "VHS_VideoCombine" - }, - "widgets_values": { - "frame_rate": 8, - "loop_count": 0, - "filename_prefix": "CogVideoX-Tora-trajectory", - "format": "video/h264-mp4", - "pix_fmt": "yuv420p", - "crf": 19, - "save_metadata": true, - "pingpong": false, - "save_output": false, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "CogVideoX-Tora-trajectory_00001.mp4", - "subfolder": "", - "type": "temp", - "format": "video/h264-mp4", - "frame_rate": 8 - }, - "muted": false - } - } - }, - { - "id": 30, - "type": "CogVideoTextEncode", - "pos": { - "0": 493, - "1": 303 - }, - "size": { - "0": 471.90142822265625, - "1": 168.08047485351562 - }, - "flags": {}, - "order": 4, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 54 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 167 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "video of a brown bear in front of a waterfall", - 1, - true - ] - }, - { - "id": 60, - "type": "SplineEditor", - "pos": { - "0": -103, - "1": 770 - }, - "size": { - "0": 765, - "1": 910 - }, - "flags": {}, - "order": 2, - "mode": 0, - "inputs": [ - { - "name": "bg_image", - "type": "IMAGE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "mask", - "type": "MASK", - "links": [ - 146 - ], - "slot_index": 0 - }, - { - "name": "coord_str", - "type": "STRING", - "links": [ - 145, - 165 - ], - "slot_index": 1 - }, - { - "name": "float", - "type": "FLOAT", - "links": null - }, - { - "name": "count", - "type": "INT", - "links": null - }, - { - "name": "normalized_str", - "type": "STRING", - "links": null - } - ], - "properties": { - "Node name for S&R": "SplineEditor", - "points": "SplineEditor" - }, - "widgets_values": [ - "[{\"x\":133.249980962386,\"y\":260.15472473608696},{\"x\":263.32734333042947,\"y\":353.74697326919136},{\"x\":412.4404172645281,\"y\":258.5684154389157},{\"x\":234.77377598134674,\"y\":190.35711566055141},{\"x\":399.749942887158,\"y\":106.28272291047455},{\"x\":539.345161038229,\"y\":266.499961924772}]", - "[{\"x\":133.24998474121094,\"y\":260.15472412109375},{\"x\":146.90032958984375,\"y\":275.1250305175781},{\"x\":160.95431518554688,\"y\":289.7167053222656},{\"x\":175.49871826171875,\"y\":303.81890869140625},{\"x\":190.64990234375,\"y\":317.2657165527344},{\"x\":206.57586669921875,\"y\":329.7820129394531},{\"x\":223.51571655273438,\"y\":340.87823486328125},{\"x\":241.78599548339844,\"y\":349.5747375488281},{\"x\":261.53076171875,\"y\":353.722412109375},{\"x\":281.7359619140625,\"y\":352.4859313964844},{\"x\":301.6475830078125,\"y\":348.8008117675781},{\"x\":321.0851745605469,\"y\":343.1146545410156},{\"x\":339.9189147949219,\"y\":335.6663513183594},{\"x\":358.0020751953125,\"y\":326.5465393066406},{\"x\":375.1062316894531,\"y\":315.70538330078125},{\"x\":390.79693603515625,\"y\":302.913818359375},{\"x\":404.0909118652344,\"y\":287.676025390625},{\"x\":412.28533935546875,\"y\":269.30328369140625},{\"x\":408.2677307128906,\"y\":250.2057647705078},{\"x\":391.3509521484375,\"y\":239.37109375},{\"x\":372.0826721191406,\"y\":233.1624298095703},{\"x\":352.3110656738281,\"y\":228.7548065185547},{\"x\":332.3755187988281,\"y\":225.14697265625},{\"x\":312.3946228027344,\"y\":221.79623413085938},{\"x\":292.44403076171875,\"y\":218.2718048095703},{\"x\":272.63787841796875,\"y\":214.0209503173828},{\"x\":253.3236541748047,\"y\":207.96151733398438},{\"x\":236.94012451171875,\"y\":196.54066467285156},{\"x\":237.0126495361328,\"y\":177.01820373535156},{\"x\":247.45623779296875,\"y\":159.7530975341797},{\"x\":261.6012268066406,\"y\":145.28346252441406},{\"x\":277.7103576660156,\"y\":133.0191192626953},{\"x\":295.14239501953125,\"y\":122.7152328491211},{\"x\":313.58447265625,\"y\":114.35106658935547},{\"x\":332.8315734863281,\"y\":108.0604019165039},{\"x\":352.688720703125,\"y\":104.10931396484375},{\"x\":372.89337158203125,\"y\":102.88045501708984},{\"x\":393.0354309082031,\"y\":104.8262939453125},{\"x\":412.199951171875,\"y\":111.1242446899414},{\"x\":429.0757751464844,\"y\":122.27816009521484},{\"x\":444.1902160644531,\"y\":135.7550506591797},{\"x\":458.09735107421875,\"y\":150.48239135742188},{\"x\":471.13787841796875,\"y\":165.98487854003906},{\"x\":483.52349853515625,\"y\":182.0165557861328},{\"x\":495.3956604003906,\"y\":198.4325714111328},{\"x\":506.85150146484375,\"y\":215.1421356201172},{\"x\":517.9617919921875,\"y\":232.08358764648438},{\"x\":528.7791137695312,\"y\":249.2136688232422},{\"x\":539.3451538085938,\"y\":266.4999694824219}]", - 720, - 480, - 49, - "path", - "cardinal", - 0.5, - 1, - "list", - 0, - 1, - null, - null, - null - ] - }, - { - "id": 44, - "type": "VHS_VideoCombine", - "pos": { - "0": 2210, - "1": 151 - }, - "size": [ - 1131.619140625, - 310 - ], - "flags": {}, - "order": 13, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 156 - }, - { - "name": "audio", - "type": "AUDIO", - "link": null, - "shape": 7 - }, - { - "name": "meta_batch", - "type": "VHS_BatchManager", - "link": null, - "shape": 7 - }, - { - "name": "vae", - "type": "VAE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "Filenames", - "type": "VHS_FILENAMES", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "VHS_VideoCombine" - }, - "widgets_values": { - "frame_rate": 8, - "loop_count": 0, - "filename_prefix": "CogVideoX-Tora", - "format": "video/h264-mp4", - "pix_fmt": "yuv420p", - "crf": 19, - "save_metadata": true, - "pingpong": false, - "save_output": false, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "CogVideoX-Tora_00002.mp4", - "subfolder": "", - "type": "temp", - "format": "video/h264-mp4", - "frame_rate": 8 - }, - "muted": false - } - } - }, - { - "id": 70, - "type": "DownloadAndLoadToraModel", - "pos": { - "0": 270, - "1": 158 - }, - "size": { - "0": 315, - "1": 58 - }, - "flags": {}, - "order": 3, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "tora_model", - "type": "TORAMODEL", - "links": [ - 164 - ] - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadToraModel" - }, - "widgets_values": [ - "kijai/CogVideoX-5b-Tora" - ] - }, - { - "id": 69, - "type": "ToraEncodeTrajectory", - "pos": { - "0": 1112, - "1": 663 - }, - "size": { - "0": 355.20001220703125, - "1": 230 - }, - "flags": {}, - "order": 8, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 159 - }, - { - "name": "tora_model", - "type": "TORAMODEL", - "link": 164 - }, - { - "name": "coordinates", - "type": "STRING", - "link": 165, - "widget": { - "name": "coordinates" - } - }, - { - "name": "num_frames", - "type": "INT", - "link": 160, - "widget": { - "name": "num_frames" - } - }, - { - "name": "width", - "type": "INT", - "link": 161, - "widget": { - "name": "width" - } - }, - { - "name": "height", - "type": "INT", - "link": 162, - "widget": { - "name": "height" - } - } - ], - "outputs": [ - { - "name": "tora_trajectory", - "type": "TORAFEATURES", - "links": [ - 169 - ] - }, - { - "name": "video_flow_images", - "type": "IMAGE", - "links": null - } - ], - "properties": { - "Node name for S&R": "ToraEncodeTrajectory" - }, - "widgets_values": [ - "", - 720, - 480, - 49, - 1, - 0, - 1, - false - ] - }, - { - "id": 67, - "type": "GetMaskSizeAndCount", - "pos": { - "0": 763, - "1": 772 - }, - "size": { - "0": 264.5999755859375, - "1": 86 - }, - "flags": { - "collapsed": true - }, - "order": 6, - "mode": 0, - "inputs": [ - { - "name": "mask", - "type": "MASK", - "link": 146 - } - ], - "outputs": [ - { - "name": "mask", - "type": "MASK", - "links": null - }, - { - "name": "width", - "type": "INT", - "links": [ - 149, - 161, - 171 - ], - "slot_index": 1 - }, - { - "name": "height", - "type": "INT", - "links": [ - 150, - 162, - 170 - ], - "slot_index": 2 - }, - { - "name": "count", - "type": "INT", - "links": [ - 160, - 174 - ], - "slot_index": 3 - } - ], - "properties": { - "Node name for S&R": "GetMaskSizeAndCount" - }, - "widgets_values": [] - }, - { - "id": 72, - "type": "CogVideoSampler", - "pos": { - "0": 1138, - "1": 150 - }, - "size": { - "0": 405.5999755859375, - "1": 410 - }, - "flags": {}, - "order": 10, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 166 - }, - { - "name": "positive", - "type": "CONDITIONING", - "link": 167 - }, - { - "name": "negative", - "type": "CONDITIONING", - "link": 168 - }, - { - "name": "samples", - "type": "LATENT", - "link": null, - "shape": 7 - }, - { - "name": "image_cond_latents", - "type": "LATENT", - "link": null, - "shape": 7 - }, - { - "name": "context_options", - "type": "COGCONTEXT", - "link": null, - "shape": 7 - }, - { - "name": "controlnet", - "type": "COGVIDECONTROLNET", - "link": null, - "shape": 7 - }, - { - "name": "tora_trajectory", - "type": "TORAFEATURES", - "link": 169, - "shape": 7 - }, - { - "name": "fastercache", - "type": "FASTERCACHEARGS", - "link": null, - "shape": 7 - }, - { - "name": "num_frames", - "type": "INT", - "link": 174, - "widget": { - "name": "num_frames" - } - }, - { - "name": "height", - "type": "INT", - "link": 170, - "widget": { - "name": "height" - } - }, - { - "name": "width", - "type": "INT", - "link": 171, - "widget": { - "name": "width" - } - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 172 - ] - }, - { - "name": "samples", - "type": "LATENT", - "links": [ - 173 - ] - } - ], - "properties": { - "Node name for S&R": "CogVideoSampler" - }, - "widgets_values": [ - 480, - 720, - 49, - 32, - 6, - 65334758276105, - "fixed", - "CogVideoXDPMScheduler", - 1 - ] - } - ], - "links": [ - [ - 54, - 20, - 0, - 30, - 0, - "CLIP" - ], - [ - 56, - 20, - 0, - 31, - 0, - "CLIP" - ], - [ - 142, - 65, - 0, - 66, - 0, - "IMAGE" - ], - [ - 145, - 60, - 1, - 65, - 0, - "STRING" - ], - [ - 146, - 60, - 0, - 67, - 0, - "MASK" - ], - [ - 149, - 67, - 1, - 65, - 2, - "INT" - ], - [ - 150, - 67, - 2, - 65, - 3, - "INT" - ], - [ - 153, - 65, - 0, - 68, - 1, - "IMAGE" - ], - [ - 154, - 65, - 1, - 68, - 2, - "MASK" - ], - [ - 155, - 56, - 0, - 68, - 0, - "IMAGE" - ], - [ - 156, - 68, - 0, - 44, - 0, - "IMAGE" - ], - [ - 159, - 1, - 0, - 69, - 0, - "COGVIDEOPIPE" - ], - [ - 160, - 67, - 3, - 69, - 3, - "INT" - ], - [ - 161, - 67, - 1, - 69, - 4, - "INT" - ], - [ - 162, - 67, - 2, - 69, - 5, - "INT" - ], - [ - 164, - 70, - 0, - 69, - 1, - "TORAMODEL" - ], - [ - 165, - 60, - 1, - 69, - 2, - "STRING" - ], - [ - 166, - 1, - 0, - 72, - 0, - "COGVIDEOPIPE" - ], - [ - 167, - 30, - 0, - 72, - 1, - "CONDITIONING" - ], - [ - 168, - 31, - 0, - 72, - 2, - "CONDITIONING" - ], - [ - 169, - 69, - 0, - 72, - 7, - "TORAFEATURES" - ], - [ - 170, - 67, - 2, - 72, - 10, - "INT" - ], - [ - 171, - 67, - 1, - 72, - 11, - "INT" - ], - [ - 172, - 72, - 0, - 56, - 0, - "COGVIDEOPIPE" - ], - [ - 173, - 72, - 1, - 56, - 1, - "LATENT" - ], - [ - 174, - 67, - 3, - 72, - 9, - "INT" - ] - ], - "groups": [], - "config": {}, - "extra": { - "ds": { - "scale": 0.7627768444386698, - "offset": [ - -854.9458997028877, - 235.76201522394632 - ] - } - }, - "version": 0.4 -} \ No newline at end of file diff --git a/examples/cogvidex_fun_i2v_example_02.json b/examples/cogvideox_Fun_I2V_02.json similarity index 75% rename from examples/cogvidex_fun_i2v_example_02.json rename to examples/cogvideox_Fun_I2V_02.json index d7023d1..66fea99 100644 --- a/examples/cogvidex_fun_i2v_example_02.json +++ b/examples/cogvideox_Fun_I2V_02.json @@ -1,13 +1,198 @@ { - "last_node_id": 47, - "last_link_id": 110, + "last_node_id": 51, + "last_link_id": 123, "nodes": [ + { + "id": 48, + "type": "CogVideoSampler", + "pos": { + "0": 1200, + "1": 124 + }, + "size": [ + 330, + 574 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 114 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 116 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 117 + }, + { + "name": "samples", + "type": "LATENT", + "link": null, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": 120, + "shape": 7 + }, + { + "name": "context_options", + "type": "COGCONTEXT", + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": null, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 123 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoSampler" + }, + "widgets_values": [ + 49, + 50, + 6, + 458091243358272, + "randomize", + "CogVideoXDDIM", + 1 + ] + }, + { + "id": 30, + "type": "CogVideoTextEncode", + "pos": { + "0": 490, + "1": 146 + }, + "size": { + "0": 471.90142822265625, + "1": 168.08047485351562 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 116 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 110 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "fireworks display over night city. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.", + 1, + false + ] + }, + { + "id": 31, + "type": "CogVideoTextEncode", + "pos": { + "0": 497, + "1": 365 + }, + "size": { + "0": 463.01251220703125, + "1": 144 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 110 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 117 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "The video is not of a high quality, it has a low resolution. Watermark present in each frame. Strange motion trajectory. ", + 1, + true + ] + }, { "id": 20, "type": "CLIPLoader", "pos": { - "0": -26, - "1": 400 + "0": -7, + "1": -37 }, "size": { "0": 451.30548095703125, @@ -37,29 +222,200 @@ ] }, { - "id": 11, - "type": "CogVideoDecode", + "id": 50, + "type": "CogVideoImageEncodeFunInP", "pos": { - "0": 1448, - "1": 345 - }, - "size": { - "0": 300.396484375, - "1": 198 + "0": 865, + "1": 567 }, + "size": [ + 253.60000610351562, + 146 + ], "flags": {}, - "order": 7, + "order": 6, "mode": 0, "inputs": [ { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 108 + "name": "vae", + "type": "VAE", + "link": 119 + }, + { + "name": "start_image", + "type": "IMAGE", + "link": 118 + }, + { + "name": "end_image", + "type": "IMAGE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "image_cond_latents", + "type": "LATENT", + "links": [ + 120 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoImageEncodeFunInP" + }, + "widgets_values": [ + 49, + false, + 0 + ] + }, + { + "id": 37, + "type": "ImageResizeKJ", + "pos": { + "0": 499, + "1": 587 + }, + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 71 + }, + { + "name": "get_image_size", + "type": "IMAGE", + "link": null, + "shape": 7 + }, + { + "name": "width_input", + "type": "INT", + "link": null, + "widget": { + "name": "width_input" + } + }, + { + "name": "height_input", + "type": "INT", + "link": null, + "widget": { + "name": "height_input" + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 118 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "height", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageResizeKJ" + }, + "widgets_values": [ + 720, + 480, + "lanczos", + false, + 2, + 0, + 0, + "disabled" + ] + }, + { + "id": 36, + "type": "LoadImage", + "pos": { + "0": 43, + "1": 587 + }, + "size": [ + 405.2986131072541, + 477.48971409949377 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 71 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "6e1a7befce6daa63fc01cb66c1a22ed0.jpg", + "image" + ] + }, + { + "id": 51, + "type": "CogVideoDecode", + "pos": { + "0": 1219, + "1": -134 + }, + "size": { + "0": 315, + "1": 198 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 122 }, { "name": "samples", "type": "LATENT", - "link": 109 + "link": 123 } ], "outputs": [ @@ -67,17 +423,15 @@ "name": "images", "type": "IMAGE", "links": [ - 97 - ], - "slot_index": 0, - "shape": 3 + 121 + ] } ], "properties": { "Node name for S&R": "CogVideoDecode" }, "widgets_values": [ - false, + true, 240, 360, 0.2, @@ -85,71 +439,25 @@ true ] }, - { - "id": 31, - "type": "CogVideoTextEncode", - "pos": { - "0": 497, - "1": 520 - }, - "size": { - "0": 463.01251220703125, - "1": 144 - }, - "flags": {}, - "order": 5, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 110 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 106 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "clip", - "type": "CLIP", - "links": null - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "The video is not of a high quality, it has a low resolution. Watermark present in each frame. Strange motion trajectory. ", - 1, - true - ] - }, { "id": 44, "type": "VHS_VideoCombine", "pos": { - "0": 1842, - "1": 345 + "0": 1602, + "1": -131 }, "size": [ - 605.3909912109375, - 714.2606608072917 + 767.7372279260157, + 822.491455078125 ], "flags": {}, - "order": 8, + "order": 9, "mode": 0, "inputs": [ { "name": "images", "type": "IMAGE", - "link": 97 + "link": 121 }, { "name": "audio", @@ -195,7 +503,7 @@ "hidden": false, "paused": false, "params": { - "filename": "CogVideoX_Fun_00001.mp4", + "filename": "CogVideoX_Fun_00002.mp4", "subfolder": "", "type": "temp", "format": "video/h264-mp4", @@ -206,66 +514,20 @@ } }, { - "id": 36, - "type": "LoadImage", - "pos": { - "0": 325, - "1": 715 - }, - "size": { - "0": 432.4361877441406, - "1": 361.0254211425781 - }, - "flags": {}, - "order": 1, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 71 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "MASK", - "type": "MASK", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "LoadImage" - }, - "widgets_values": [ - "6e1a7befce6daa63fc01cb66c1a22ed0.jpg", - "image" - ] - }, - { - "id": 1, + "id": 49, "type": "DownloadAndLoadCogVideoModel", "pos": { - "0": 602, - "1": 53 + "0": 491, + "1": -167 }, "size": { - "0": 337.8885192871094, - "1": 194 + "0": 362.1656799316406, + "1": 218 }, "flags": {}, "order": 2, "mode": 0, "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null, - "shape": 7 - }, { "name": "block_edit", "type": "TRANSFORMERBLOCKS", @@ -277,256 +539,42 @@ "type": "COGLORA", "link": null, "shape": 7 + }, + { + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 } ], "outputs": [ { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", + "name": "model", + "type": "COGVIDEOMODEL", "links": [ - 104 + 114 + ] + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 119, + 122 ], - "slot_index": 0, - "shape": 3 + "slot_index": 1 } ], "properties": { "Node name for S&R": "DownloadAndLoadCogVideoModel" }, "widgets_values": [ - "kijai/CogVideoX-Fun-5b", + "alibaba-pai/CogVideoX-Fun-V1.1-5b-InP", "bf16", "disabled", - "disabled", - false - ] - }, - { - "id": 37, - "type": "ImageResizeKJ", - "pos": { - "0": 824, - "1": 715 - }, - "size": { - "0": 315, - "1": 266 - }, - "flags": {}, - "order": 4, - "mode": 0, - "inputs": [ - { - "name": "image", - "type": "IMAGE", - "link": 71 - }, - { - "name": "get_image_size", - "type": "IMAGE", - "link": null, - "shape": 7 - }, - { - "name": "width_input", - "type": "INT", - "link": null, - "widget": { - "name": "width_input" - } - }, - { - "name": "height_input", - "type": "INT", - "link": null, - "widget": { - "name": "height_input" - } - } - ], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 107 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "width", - "type": "INT", - "links": null, - "shape": 3 - }, - { - "name": "height", - "type": "INT", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "ImageResizeKJ" - }, - "widgets_values": [ - 720, - 480, - "lanczos", false, - 2, - 0, - 0, - "disabled" - ] - }, - { - "id": 47, - "type": "CogVideoXFunSampler", - "pos": { - "0": 1068, - "1": 198 - }, - "size": { - "0": 367.79998779296875, - "1": 434 - }, - "flags": {}, - "order": 6, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 104 - }, - { - "name": "positive", - "type": "CONDITIONING", - "link": 105 - }, - { - "name": "negative", - "type": "CONDITIONING", - "link": 106 - }, - { - "name": "start_img", - "type": "IMAGE", - "link": 107, - "shape": 7 - }, - { - "name": "end_img", - "type": "IMAGE", - "link": null, - "shape": 7 - }, - { - "name": "context_options", - "type": "COGCONTEXT", - "link": null, - "shape": 7 - }, - { - "name": "tora_trajectory", - "type": "TORAFEATURES", - "link": null, - "shape": 7 - }, - { - "name": "fastercache", - "type": "FASTERCACHEARGS", - "link": null, - "shape": 7 - }, - { - "name": "vid2vid_images", - "type": "IMAGE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 108 - ] - }, - { - "name": "samples", - "type": "LATENT", - "links": [ - 109 - ] - } - ], - "properties": { - "Node name for S&R": "CogVideoXFunSampler" - }, - "widgets_values": [ - 49, - 720, - 480, - 43, - "fixed", - 50, - 6, - "DDIM", - 0.0563, - 1 - ] - }, - { - "id": 30, - "type": "CogVideoTextEncode", - "pos": { - "0": 493, - "1": 303 - }, - "size": { - "0": 471.90142822265625, - "1": 168.08047485351562 - }, - "flags": {}, - "order": 3, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 54 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 105 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "clip", - "type": "CLIP", - "links": [ - 110 - ], - "slot_index": 1 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "fireworks display over night city. The video is of high quality, and the view is very clear. High quality, masterpiece, best quality, highres, ultra-detailed, fantastic.", - 1, - false + "sdpa", + "main_device" ] } ], @@ -547,62 +595,6 @@ 0, "IMAGE" ], - [ - 97, - 11, - 0, - 44, - 0, - "IMAGE" - ], - [ - 104, - 1, - 0, - 47, - 0, - "COGVIDEOPIPE" - ], - [ - 105, - 30, - 0, - 47, - 1, - "CONDITIONING" - ], - [ - 106, - 31, - 0, - 47, - 2, - "CONDITIONING" - ], - [ - 107, - 37, - 0, - 47, - 3, - "IMAGE" - ], - [ - 108, - 47, - 0, - 11, - 0, - "COGVIDEOPIPE" - ], - [ - 109, - 47, - 1, - 11, - 1, - "LATENT" - ], [ 110, 30, @@ -610,16 +602,88 @@ 31, 0, "CLIP" + ], + [ + 114, + 49, + 0, + 48, + 0, + "COGVIDEOMODEL" + ], + [ + 116, + 30, + 0, + 48, + 1, + "CONDITIONING" + ], + [ + 117, + 31, + 0, + 48, + 2, + "CONDITIONING" + ], + [ + 118, + 37, + 0, + 50, + 1, + "IMAGE" + ], + [ + 119, + 49, + 1, + 50, + 0, + "VAE" + ], + [ + 120, + 50, + 0, + 48, + 4, + "LATENT" + ], + [ + 121, + 51, + 0, + 44, + 0, + "IMAGE" + ], + [ + 122, + 49, + 1, + 51, + 0, + "VAE" + ], + [ + 123, + 48, + 0, + 51, + 1, + "LATENT" ] ], "groups": [], "config": {}, "extra": { "ds": { - "scale": 0.8264462809917363, + "scale": 0.693433494944278, "offset": [ - 245.90746806300405, - 108.93624646284617 + 416.0091223165226, + 378.00843746369645 ] } }, diff --git a/examples/cogvideox_Fun_I2V_Tora.json b/examples/cogvideox_Fun_I2V_Tora.json new file mode 100644 index 0000000..9bd01df --- /dev/null +++ b/examples/cogvideox_Fun_I2V_Tora.json @@ -0,0 +1,1711 @@ +{ + "last_node_id": 93, + "last_link_id": 226, + "nodes": [ + { + "id": 31, + "type": "CogVideoTextEncode", + "pos": { + "0": 497, + "1": 520 + }, + "size": { + "0": 463.01251220703125, + "1": 144 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 209 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 198 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "The video is not of a high quality, it has a low resolution. Watermark present in each frame. Strange motion trajectory. ", + 1, + true + ] + }, + { + "id": 78, + "type": "ToraEncodeTrajectory", + "pos": { + "0": 1053, + "1": 640 + }, + "size": [ + 355.20001220703125, + 246 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "tora_model", + "type": "TORAMODEL", + "link": 193 + }, + { + "name": "vae", + "type": "VAE", + "link": 205 + }, + { + "name": "coordinates", + "type": "STRING", + "link": 220, + "widget": { + "name": "coordinates" + } + }, + { + "name": "num_frames", + "type": "INT", + "link": 189, + "widget": { + "name": "num_frames" + } + }, + { + "name": "width", + "type": "INT", + "link": 190, + "widget": { + "name": "width" + } + }, + { + "name": "height", + "type": "INT", + "link": 191, + "widget": { + "name": "height" + } + } + ], + "outputs": [ + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "links": [ + 200 + ] + }, + { + "name": "video_flow_images", + "type": "IMAGE", + "links": [ + 203 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "ToraEncodeTrajectory" + }, + "widgets_values": [ + "", + 720, + 480, + 49, + 1, + 0, + 1, + true + ] + }, + { + "id": 73, + "type": "ImageResizeKJ", + "pos": { + "0": -436, + "1": 527 + }, + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 166 + }, + { + "name": "get_image_size", + "type": "IMAGE", + "link": null, + "shape": 7 + }, + { + "name": "width_input", + "type": "INT", + "link": null, + "widget": { + "name": "width_input" + }, + "shape": 7 + }, + { + "name": "height_input", + "type": "INT", + "link": null, + "widget": { + "name": "height_input" + }, + "shape": 7 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 187, + 210, + 216, + 225 + ], + "slot_index": 0 + }, + { + "name": "width", + "type": "INT", + "links": null + }, + { + "name": "height", + "type": "INT", + "links": null + } + ], + "properties": { + "Node name for S&R": "ImageResizeKJ" + }, + "widgets_values": [ + 720, + 480, + "nearest-exact", + false, + 2, + 0, + 0, + "center" + ] + }, + { + "id": 72, + "type": "LoadImage", + "pos": { + "0": -820, + "1": 531 + }, + "size": { + "0": 315, + "1": 314 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 166 + ], + "slot_index": 0 + }, + { + "name": "MASK", + "type": "MASK", + "links": null + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "pasted/image (473).png", + "image" + ] + }, + { + "id": 20, + "type": "CLIPLoader", + "pos": { + "0": -21, + "1": 288 + }, + "size": { + "0": 451.30548095703125, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CLIPLoader" + }, + "widgets_values": [ + "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", + "sd3" + ] + }, + { + "id": 67, + "type": "GetMaskSizeAndCount", + "pos": { + "0": 750, + "1": 775 + }, + "size": { + "0": 264.5999755859375, + "1": 86 + }, + "flags": { + "collapsed": true + }, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "mask", + "type": "MASK", + "link": 146 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": null + }, + { + "name": "720 width", + "type": "INT", + "links": [ + 149, + 190 + ], + "slot_index": 1 + }, + { + "name": "480 height", + "type": "INT", + "links": [ + 150, + 191 + ], + "slot_index": 2 + }, + { + "name": "49 count", + "type": "INT", + "links": [ + 189, + 201 + ], + "slot_index": 3 + } + ], + "properties": { + "Node name for S&R": "GetMaskSizeAndCount" + }, + "widgets_values": [] + }, + { + "id": 56, + "type": "CogVideoDecode", + "pos": { + "0": 1582, + "1": -66 + }, + "size": { + "0": 300.396484375, + "1": 198 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 206 + }, + { + "name": "samples", + "type": "LATENT", + "link": 202 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 155 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CogVideoDecode" + }, + "widgets_values": [ + true, + 240, + 360, + 0.2, + 0.2, + true + ] + }, + { + "id": 30, + "type": "CogVideoTextEncode", + "pos": { + "0": 498, + "1": 293 + }, + "size": { + "0": 471.90142822265625, + "1": 168.08047485351562 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 197 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 209 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "flying car lifts off in the air in front of a house", + 1, + false + ] + }, + { + "id": 44, + "type": "VHS_VideoCombine", + "pos": { + "0": 2229, + "1": -113 + }, + "size": [ + 1388.8330963815574, + 1236.555397587705 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 156 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "CogVideoX-Tora", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX-Tora_00011.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 16 + }, + "muted": false + } + } + }, + { + "id": 60, + "type": "SplineEditor", + "pos": { + "0": -1367, + "1": 1222 + }, + "size": [ + 765, + 910 + ], + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "bg_image", + "type": "IMAGE", + "link": 187, + "shape": 7 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [ + 146 + ], + "slot_index": 0 + }, + { + "name": "coord_str", + "type": "STRING", + "links": [ + 212 + ], + "slot_index": 1 + }, + { + "name": "float", + "type": "FLOAT", + "links": null + }, + { + "name": "count", + "type": "INT", + "links": null + }, + { + "name": "normalized_str", + "type": "STRING", + "links": null + } + ], + "properties": { + "Node name for S&R": "SplineEditor", + "points": "SplineEditor", + "imgData": { + "name": "bg_image", + "base64": [ + "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAHgAtADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDWAoFLigfSu0+YExRinYoxQAgFAFKBSgUAJilxxS4oxQAmKMUtGKBiYNFLiigBMUGloxQhhijFLilAoEAFFOxRigBMUAUuKWgBuKMe1OxRigBuKUClxRigBMUtGKKADFJTsUYoAbTgKMc0YoAMUmKXFLigBtFOIpMUgDFFLijFACYoxTsUYoAbiinYpMUwEoxS0tACYpMU7FIRSATFLikxTsUCExRilxRigBKMUuKKYxtGKdigCkIbilpcUEUAJSYpcUuKAG4oxS0UAJiilpCKYDTRilxxRikISjFLRigBMUY46UuKMUANxRilIoxQAmKKUijFADaMc07FGKAGYoIxT8UhFMQ3FJTsUY5pCG4oApcUY9qYDSKTFPx7UmDQAzFGKeR7UYoEMxS4p22kxQAmKDS4ooAZR+FOIpCKAG4paXFJigB4FGKdiig0EpMU6jFADcUtLRigBKWjFL2oASlx7UAUuKBjaMU/HNJtouAmPaj8KdRSGNFOAopaYgo70tLigBKKXFGKQCUtGKWmAlJTqMcUgG0U4UYoASjFOxRjNADcUU7HtRj2oATFFLQKAEoxTsUUANoxTsUAUXASjFLiloAbijilooAbilxS8UtADaPwp1JigBPwpcUfhS0ANxRTqSkIKTFOxQBQAlJTiKMUAJSYp2KCKAG4oxTsUUwG0YpcUtADaTFOIpMUCGkH0ox7U7bRigBuKMe1OxRQIbgelJjmnYoxQMbiilxRigBtFOxSEc0AJ+FFLilxQA3FGKU0UCYmKbg0+kxQIbiinYoxTENxRinYpMUAJikxTse1H4UANx7UmKdj2oxSAYRRinEUYoAbikxTsUYpgNoxzTiKQ9TQIdRilApcUGo3FGKdijFACbaMU4CigBMUYpaXFFwsIBS4opaQxuKMU6jFIY3FGKXFLRcBMUuKXFLigLCYpcUtFFxWExRinYoouOw3FGKdij8qBWExRjil70YouOw3FLg0uKXFFxWG4pcUuPalxRcY3FGKdijFADSKBTsUmKQhMUYp2KUCmFhuKMe1OxRigLDcUYp2KKAG4pMU+kxQAzFOxS4oxQAmKMU7HtRigBmKdijFLQAm2kAp9JjmlcBMUYpxFJQAmKMUtFO4hMUY9qWjFFwEA9qMUuKKAGkZoxTqMUANIpMU/FJigQ3FJin4oxQAzFHenYowfQUANxRTse1GKAG4pNvtTsUEc0ANxQRS0YoEJijFLijFADSKTFPoxTAbikwKcR9KTFIQ2ilopgJijFLRQA3FGKdikxQIQUYpcc0YoAaRRinYpMUCG4pcD0paMUAMIoxTiKTFMY7FID6V3K6Fpq/8uwP1Ymnro+nIci0iz7jNcDx0OzPV/s6fdHC9aK7/APs6xHSzg/79ipY7eCP7kES/RAKl49di/wCzn1kedgE9Bn6VKltO/wB2GRvopNehYX0FKMVP159i1ly6yOBGn3hGRaT/APfs/wCFKNOvT/y6Tf8AfBrvajj+5/wI/wAzS+uy7Ff2dDucSuj6g3S0k/EYp39i6j/z6v8AmK7jnGM0c+tL67Psh/2fT7s4kaHqJ/5dj+LCn/2BqP8AzwH/AH2K7Ln1o59aX12p5D+oU/M43/hH9R/54L/32P8AGl/4R7Uf+eK/99iuxpfxpfXKnkP6hS8zjx4d1A9UQfV6evhy+PeIfVv/AK1dbRS+uVPIf1Cl5nKf8I1ff34f++j/AIUf8I1e/wB+D/vo/wCFdWOtL+NH1uoP6jS8zk/+Ebvf+ekH/fR/wpf+Ebvf+ekH/fR/wrq8UYo+t1A+o0vM5T/hG73/AJ6Qf99H/Ck/4Ru+/vw/99H/AArq6X8aPrlQPqNLzOS/4Ry+B6w/99f/AFqT/hHr8D7sZ/4HXXfjR+NH1yoH1Gl5nI/8I/qH/PND9HFH9g6h/wA8V/77FdaG+dlzyAD/ADpc+9H1yp5C+oUvM5A6DqAH+pH/AH2KYdGvwf8Aj2J+hFdn+NB+tP65PyE8BT8zi/7Hv/8An2b8xTG0y+Xrayfgua7bNLn3p/XZ9kL+z6fdnDGwvB1tZ/8Av2aja2nT70Mi/VTXe8+tHXrT+uy7C/s+Pc8+II4IOaXBrvtinqq/lUbWlq/37aFv95AapY1dUQ8u7M4TFLiu2bTLFutrEPouP5VE2jae3/LAD6MatY2HVGby6fRo47FGK61tBsD0R1+jmoW8OWpA2zSg++DVLGU2S8vq+RzFGK6FvDQx8tzn6p/9eom8OTgfLNE31BFUsTT7mbwVZdDC70uK1G0G9X+BD9GFRNpN8nW2c/TBrRVoPZmTw1VfZZRxSYxVl7S4iHzwyL/vKRUJUjqKtSTIdOS3QzFGKdiincnlG44o606ii4rCEUU6jFAWGEc0mKkxRimIZijFP20hFADaMU4CjHtSAbRilxS4p3EMxRTsUUXAaRSYp+KTFAhtLS0Y5oAaaMU4ikpgNNJj2p+KMUAMxS4paWgBmKXFLiloAZijFPxzSYoAZj6UYp2KTFAhhFGKfijFAhmKMU/ApMCgBmKMU/H0oxQAzHNFOIoxQAwiinYoxTENxSYp+KSgBp/CinYpMCi4M9Hooor54+sDFFFApAFGKWkp2HcWo0+4fqf5mpKaowp+p/nQFxaKXFGKAEpKUijFACd6XvSgc0uOaQxMUYp2PpRigY3/ABopaMUwCijFGKAAjikFOxSYpAIKWl2k0bTTAiX/AI+JP91f60+gIRMx9VH8zS4NABSU7FAFAhtLS4oI9KBiUUYpMcUALRSYpaQB9aM0hpQKYBmk/KlxSfiKQBRRiloATNLmkOc0UDFz7UxVVl+ZVPzN1HuafTI/un/eb/0I0xNET2Vq/wB63iPvsFQNotgw/wBRj3DH/Gr1FUpyWzIdOD3Rjv4dtjnZLKv1wagbw4c/JcD8VrfFLVrEVF1MnhKL6HLyeH7xASpjf0Abn9arvpV8nW3Y/Tn+VdfS5rRYya3MngKT20OGkgli/wBbG6f7y4pgFd4cHggH61A9rbSD54ImPqVFarG90Yyy7tI4vFIRXWvo9i//ACy2/wC6xqtL4ft2/wBXK6n35FaxxlN7mMsvqLbU5vFGK2n8PTc+XMh9Mgiqr6NfR/8ALHd/usDWqr031MJYWrHeJn4oxU0ttNCf3kTr9RURFaKSexg4SW4hFNxT6Sncmw3FJin4oxTuKwzFGKfikxQIbilxS4pcUANIpMU7FBFADMUYp+KQimA3FGKUClxSuIZijFPxRincCOjGafikxzRcQzFGKdijFAhuKMU7FGKAG4pCKcRRQIaBRinUmKAGYpcU6jFMBm2jFPxSEUDGYo20+jBoFc6WTxr4bibDatEf9xHf+QqrP8QvDcIJS7lmPpHAw/8AQsV4wKO9fPn2vsonrZ+J+hDpb35P/XNP/iqhPxS00H5LC7I9yo/rXleBTgKVx+yient8U7LHyaZcE+8ij+lQt8U1/h0cn63P/wBjXm+KXoaLsfs49j0J/ilKfuaQi/705P8A7KKrt8TdSIwllaKffcf6iuGzS8EYpXY/Zw7HaH4maz2ttPH/AGzf/wCLpr/EjXGGBHZL/uxH+rGuNGBT6LsOSPY6d/iB4gY/LcxJ9IV/qKZ/wnfiMn/kIAfSCP8A+Jrm80oNK7Hyx7HQt428Rtx/aTD6Qxj/ANlqM+Ldfbrqk/4YH9KxM0uaV2Oy7GwfE+uN11W7/CQikPiPWj/zFr3/AL/t/jWTmlzRdlWRp/2/rJ/5i19/3/b/ABpp13V++q3v/f8Ab/Gs/NLmi7CyL41vVf8AoKXv/f8Ab/Gl/trVD11K7P8A22b/ABrPBpQaV2FkX/7a1Uf8xK7/AO/zf40o1zVh01O8H/bZv8aoUZFF2OyNH+3tYHTVb0f9t2/xpw8Qa1/0Fb3/AL/t/jWbRnFF2Fkay+I9aHI1S7/GUmnr4p1xTldTn/E5/nWOGpQaV2PlXY3l8ZeIFGBqLH6xIf5rUi+N/ECnm9VvrCn+Fc7S5ouxcsex1KeP9aXhvsz/AO9F/gRUw+Ier97exP8A2zf/AOKrkKcKOZi5IdjsY/iJqAP72ytWH+xuX+pq2vxHOBv0oe+Lj/7GuEyKOKOZh7OHY9BX4jWxHz6bMD/syg/0qVPiHpzH57S6Ue20/wBRXnPFHFHOxexh2PTo/H2iucFbtPdox/QmraeMdAfH+n7T6NC4/pXlFHFPnYvYQPYE8R6K4yup230LY/nVmPVtOkxt1C1Oen75f8a8WwKMUe0YvYR6M9zRlkGY2Dj1U5p2Mda8MjkkiOUdlP8AsnFWY9Tv4T+6vblP92Vh/Wn7QX1fzPacUYryODxRrlvnZqU5z/z0w/8A6EDVyPxzrsagGeGT3eEZ/TFNTQnQken4pF4H4n+ZrzyP4gamMeZbWjDvhWB/nV6H4iLwJ9N+rRzf0I/rT5kS6Mztu9HauWh8e6VIwEkVzD7lQR+hrQj8XaDL0vwp/wBuNx/SjmRDpyXQ2aPxqjFrmkzECPUrUk9jKAT+dX1xIgZGDKe6nNMlpoaaKdijFAhDSU40lACfjRzS0YoABRRRQAhqJ7eCT78MbfVRUtHSmm1sJpPcz5NGs5eiMhPdT/jVOXw8Cf3VwQPRlzW4KK0VepHZmMsNSlvE5iXQ7tPuBZB7HH86qSWVzF9+CQD/AHTXYntRW0cZNbnPPAU3s7HD4oxXZvbQSA+ZCjH3UGqkmi2b52qyH2b/ABraOMi9zmll0ujOXorcl8PnrFOD7Mv9aoyaVeR5/clh6qc1vGvCWzOaeEqx3RQxmlxUjxPEcOjKfQjFNFbJpnO4tDce1GKdRii4rDMUmKfjFGKAsMIoxTiKTFNEjcUU6kIpgNwKTFPpMUhDaMU7FLTEMIpKeaTFADcUU7FFADTSYp9JikMbikp+KQincQwilpaMUEs8woFIKXvXgH3Y7NKDTKcKQhwpaQUtIAoopR0oGKD60ozTRS0gHd6Wmc+tLmgB4NLmmfjS596VgHg07NR5pc0WHcfmjNNDe9KGpWC48GnCq88siQO0Sh5AOFJxms9dYi8tR9tg88H5o2XA/PPFUoN7DubGaM1FHKJEDDHTnBp26psA/NLmo93vS7qLBcfml3e9R7qXd70rDH7qXNR7qN1FguSZo3YqPd707cKLASBqM1Hu96XdRYLkmaX8ai3e9LupWGS5pQai3UBqLAS5pc1Hupd1Fh3H5ozTN1G6iw7kmaKYGpc+9ILj88UAim596QdOvc/zoAk4oxTPxpQT60rDHUqSSQtuikaNvVDg03dRmgRfh1zVoMeXqN0MeshP860bfxrrsDAtcpMB/DLGP5jB/WufzRVJsTjF7o7CH4h3yt+/sbd19ELJ/PNaEPxDtHH76wmjP+w4cf0rz/8AGlwKOeRLpQfQ9Pg8b6JKcPNLCf8AppEf6ZrTh1vSrjAi1G2Ynt5gB/I145jmlxT5yXQie4IySLuRldfVTkUteK295d2mfs1zNDnr5blf5Vfg8Ta5btlNSnPtIQ4/8ezVc6M3h30Z6170YrzqLx9qiACWG3l99pU/oa07b4hW7H/StPlT3ikDfocU+ZEOjNHZUGsW38XaJcYH2zymIziVCuPxxj9a0oL6zuuILuCUnskgNO6IcWt0TelFKVxxRimSJRmlpOfWgVgoowaOaAsNZVcFWUEehFVpdMtJRzCFPqvFWjmiqjNx2JlCMt0Y82gDrFOR7OM/qKpTaPdxDIUOP9g5rpqK2jiqi3OaWCpS2VjjJIZYj86Mv1GKZXaOiyKVdQynqCM1Vl0uzlHMQX3TiuiOMX2kcs8uf2WcrikxW9LoKnJimIPowzVGXSbyL/lnvHqhzW8cRCWzOSeDqx3Rn4oxUrxvGcOrKfQjFR1smc7g0NI4pMU7FGKdyXETFJin4oxTuTYZijFOxRii4WGYoxTse9G3mmFhmKXFKR70EUh2G0YpcUY96YhhBopxoxQKx5UDTs1CGHrS78d68Cx9yTDFOFQCUU7zR60rBYmHWlFQ+co7003kS9XGfY0WY7Fmioo51k6BqmJAUk8YHepCzEpcgUxmGOtM8z3phYmzSZqHzfc0eZmiwE+73o3e9Qb+epxS7x60WCxNuGKUPVfcT3pd9FgJ99LuqDdRvosFifd70yFRbRXcULNHHdqVnRTgSA8EEVHupd1VqthDoI47aIRxKQg4AyTUu+oN9HmUrDLAajfVYuTShjipsBY30u+q+6l3Uxk+/Bo3iod3FAb60WAn30u+oNx96Nx96LAWNwo3VBuNKGJpWGTBqcHqvk0uTRYCffS76g3GgPilYZY3Uu6oN/1o8z60coXJw1LuFQeZ9aPM+tKwFjeKXcKreZ9aXzKLAWS+Kgubs28G9V3HJzk4AHqabvpj7Jo2R1DIwwQehppdxjRqbrbCWQx7T0YKwQ+mG6fzq9HLvjViMEgEj0NZzWweyjsnmna0iYMkDTMUUjpgE1aVsCnNLoJXLO6nBqrb6cHqLDuT5pc1BvpQ1KwE2aUGog1AeiwE2aM1Hv8AelDUrBckzS5pgajNFgH5pabmjdQFx1NKgnkA0uaKQXLtvrGp2iqsF9cIo6L5hIH4dK17Lxtq1s3+kNHdJ6OgUj8RiucJparmYnFPc7q2+IMTuBc6e6LjlopA3P0IH861rfxhok7BTdNEx7SoQPzGRXl4ApQKamzN0ons8N1bXQBguYZQenlyBv5VNivE0Z4pA8bsjjkMpwRWlB4j1q3xs1Kc+0hDj/x7NVzmbodmeskUmK4C38f36DFxa282B1XKE/zH6VsWfjzTZyFuoZ7Zj3wHX8xz+lPmRm6UkdPRVS31jS7v/U39uxPRS4DfkeavbeM9jVENNDKDT8UmKBDKWlIpMUDGMiOu11DKexGapT6TaTAgIY27FeP0q/SE1UZyjszOVOMviRhS6FIOYpQ3+8MVSl0+4gHzxNj1HIrqcUoHFdEcVNb6nNPBU5baHGlfrSY+tdbJaW8xJkhQk98c1Rl0OJuYpGU+h5FdEcXF76HHPL5r4dTBIpMVfm0q5iPCeYPVKqNGyHDKQfQjFdMakZbM5J0ZR0kiKjFPxRiruZcozFJipMUhFAWGYoxT8Um2mKwzFIRUm2grQFjxbcaaXI71gP4lH8Fsfxf/AOtUD+I7hvuQRKP9rJrx1Skfb88TozIfU0hd/WuXbXL9j8rIvsEz/PNNGrak3SU/hGP8KpUWT7SJ0z727mo4YXE4JbIx0rmpLzUZOTLL+HH8qvaE10+qJ57yMu08MxNEqbSbuNTV9jtLSPgVPeKEtJWZgFCHJJwBS2qfKKTXF/4kt2B3iIri+0VzHNjVbaDj7XGR6Bs07+3rHk/aV/I1y32L1f8ASlFiP7/6V3+yiZuUux03/CQ6eOsz/ghpp8S6eP4pW+if/XrnRYp3Y04WcQ65/On7OAryN4+KLEdEnP8AwEf40xvFMAPyW8h+pArG+yw/3T+dL9nhCn5P1NHs4CvI1f8AhK17Wjf9/P8A61J/wlef+XM/9/f/AK1c0DU9qqvcoGGR6fhVeyh2FzM3v+ErYf8ALmP+/v8A9aj/AISuTtZp+LmqYji/55L+QpQkY6Iv5VPLDsVqTv4ouz923iH5n+tRHxJqR6eWv0SlG0dAB+FLuHpRaPYdn3Iz4h1Q/wAaf9+xR/bmrHpJ+UY/wqTcPSkzTSj2Cz7jDq2rn/lq4+iD/Ck/tbV/+e0v/fA/wqTPtRmnZdhW8xn9p6uf+W83/fP/ANak+36v/wA/E/51KGoBpadgt5kP27Vv+fmf/vql+36sP+Xqf/vqps0Zo07BbzIf7Q1b/n6n/wC+qP7Q1b/n6n/76qXNGaenYLFc6vqqPt+2TbvTNSDVtaHS4m/FR/hVWbH2z8RV8UNJdBLUjGsa0P8AlvKf+AD/AAp39t6yOsz/AIxj/CpM0bjS93sOw0eINYX/AJag/WIf4U4eI9VHJaP/AL9CjNGaLR7BZ9yVPFl8vDwwv+BH9amHi6X+KxXPtIf8KplUP8IP4UnlR/8APNP++RScYdh69y+PFx/isfyl/wDrU8eL4+9m4+kmf6VlmCE9Y1/Dimm1g/ufqaXs4dg1NxPFtiR88Nwp9lU/1qVfFOnE8+cv1T/A1z32SD+4f++jSGyhP94fQ0vZUx3kdQPE2lngTMPrGami1rTZMAXsI4/iO3+dcAyhZio7EimsOlDw8Re0Z6ZHcwTDMU8Tj/ZcGpQx+tebx2LPGrhhyKlVL63/ANTPKo/2JCKh0F0ZXO+x6JvpQ9cEmqaxCu0XEpH+0ob9SKeviLVoz8zq/s0YH8sUvYS6MXOd2GpweuMi8XXaj97aRN7qSv8AjVuPxhD/AMtLORfUq4P9BUOhPsPnR1Qel31gweKNMmOGeSH/AK6J/hmr0eq2Ev8Aq7yA+mXAP5Gs3Tkt0PmTNAPTw9VVkD/dIb6HNODGpsMtB6XfVcNijd70rBcsB6dvqtu96cHosBZD5pwb3qsr08NSsBPu96A2Rmod2aUNilYLk4NLuqIPShsmiwEoNBPNMBpc0gHUcd6SlHWmAYFWrPUb7Tyfsl3NCCckI5AP4dKrGlFAM6Sy8capbv8A6T5d0nowCn8wK3bTx7Yy5F1azQHsUIcf0P6V59S1Sk0ZunF9D1q017Sr4gQX0O48BHO1vyNaGM9ORXiu2r1lrOpadxa3kqL/AHSdy/keKfOQ6PZnrZWmkV5/aeOtTgwtxFDcr3JGxv04/St2x8babdOEuY5LUn+JvmX8x/hVKSZm6ckdHiiobe+s7xd1tdQyj/YcGp9pqiLCUlOpvekApqN4Y5VKugYHsafRTTsJq+5nTaRBJyhMZ9uRVGbSJ48lMOPbrW9iito4ipHqc88LTn0OUkheI4dGU+4pmK6x40kG10DD0IqnJpVs4JQFD7GuqGLT+JHHPANfCzn8UYrSm0maMZQiQe3Bqi8TxnDqyn3GK6I1Yy2ZyzoTh8SI8UhFPINJitLmLifMCxRj+EU8Kg6KPyqMNkUu73rh1PsNCXj0oyKi3UbqQyQmtHRBu1Jf901lbq1/DmG1UD0Q1M/hYrnd2cfyiovECbdFuT/sVoWcXyD6VV8TDboN1/1zNcNveQKR5nmlyfSoCxBpRIDXfYLk26kLVCT3o3U7Bcl3Uhb5T9Ki3UFuD9KdiSitWLU4ul/H+VRIKliG24X6/wBKt7ELc0N9G6ot1JurOxoTbqUNUG6l3UWAm30b/eod1G+iwE2+l31Buo3UWC5Nv96XfUG40bqLBcn8yl31X3H1p2/iiwE2+gPUO+jdRYCGU/6WD7ir26qL8zZ9xVjdTaJTJ91G6qrfMQdxGPSlAKHdubB9ehpWKuWd1LuqDfRuosFybdzTt1QB+aN3vSsMn3Ck3Cot3vRuosBLuFLuqHdShqLAUJObhv8AeNNYdPrUm3dM3+8aJEwB9a0MzRt+IE+lP3VDCcQp9Kdmsmaku7igtUefejNIBxVG+8in8KY1vA38A/Cl3UuaYiJrGEj5dw/WojYej/0q2WpU+dgq9ScCndhZFT+zbuOISoSFJ4IfFTRX+sWv3bifHoTuH65rSvZAm22ThYwAfc1U3VKk3ugcUOi8ValCw81Y5FHUFcE/iK0Y/GNu2PNtZU/3WDf4VlkBxhgCPeo2tYG6p+RxScYPdCszpofE2mSgZmaMns6n+nFaFve21z/qLiKT2RwTXCPYRH7pYfXmoWsZByrAn64qXRg9mNNnpQbmnh/rXnMGoatZ/cnmCjjDfMP1zV6DxZfxECZIpV78bT+n+FZvDy6D5kdzvpfMrlIvGNu2BLayoe+xg3+Fa1rrVhdKvl3KKx/gc7SPzrN05R3Q00zXD04Pk1WByM9qerVFgLO+nBhVfdTg9JoCwDUFxqEdsVBUsSeTkAD6k0oeqs+mWd1dR3M0StNGMKTyPxB4P4iiKV9RNltdRhknWNFLq3R1IK1aBFZllp1rYSSPbxhXlOWPQfgBwPwFXg9ErX0AmpajDZpwNSA+lpuaXNABiijIpc0AIMg5HBFatn4j1ayAWO8dkH8Mp3j9elZYpe9MnRna2fjtPLVb2zbeOC8J4P4Hp+dbdr4l0e7IC3axsf4ZRs/U8V5gKXFVzMhwiz2NSrqGUhlPQg5BpcV5NZ6lfaeQbS5kiGc7QflP4Hit2z8b30LYu4Y7hMclfkbP8v0qudEOmzuxRWJYeLdMviVd2tnA6TYAP0PStpGSVA8bBlPQqcimjNprcMUYpSKQimISkZFcEMoI9CKcKKAKE+lW8mSgMbe3T8qoy6TOnK7XHt1rcpe9bQxE49TCphqc+h8U7qN1R5o3e9dVj0OYl3Ubqi3e9KGosFyXNb3hIb9ax/0zP8xXO7q6TwUN2tn/AK5H+YqJL3WKT0PULSL92KzPFa7dAvD/ANM/6ity1XEYrH8Xj/inbz/rn/UVx21RFOWp5CzZpm6kY88Uw13pGrJA9O3ZqDNKGosK5Juo3cGo80ZpiJIkzmnAbZ/8+lPt1+99aHGJj9f6U+hK3HE0maQ0VBoLmlzTcDNLigLhmlzRtpQhJwBmgLiZoqUW8zfdic/RSaeLK6PS2m/74NFmLmRXpasjTr09LWb/AL4NO/sq/P8Ay5z/APfBp2YuZdypRV0aRqBP/HpL+IxT10TUGP8Ax7MPqRRZhzx7lDNGa0x4f1AjPkqPq4pf+Ee1DH+rX/vsU+Vi9pHuZbKcZ9x/WjmrRtJRP9mIHm71TGe/NXB4evj/AAIP+BinysXPFbsyc4o71rf8I5ff3Y/++xR/wjt9/wBMv++6XKx+0j3MrNLnNah8O3+OPK/77ph0C/X/AJZo30cUuVh7SPczgSDS7jVw6Rfqf+PVz9OaadMvh/y6Tf8AfJo5WNTj3KuTS5NTmwvF62sw/wCAGm/ZLn/n3l/74NKzDmXci3UobkUphkXrGw+opoU5osVcZEuZj9TT7lNsa/Wlt1zKfxqS8H7tOP4v6VdjO42M/u1+lO3UxBhF+lFZtGtx+6jdTDRnilYCTNG6osmjNFguTbqvaaFBkuJPuRL+tZgPNXpj5FlHD0Z/3j/0qJLoUiKSQu7Mx5JyaQNxUW6jcc1SQmyYGlzUW7mnbqTQEmacKh3ZpwakMlzUM4VoXyASAacGzTZf9S/+6aEDMpaU8EUkYp7CtjJbFmE3tuA8LyJkZBRv8K07XxVfwECdUmUdcjDH8RTYP+PaL/cH8qHhSX76A1jKz3Roo9jct/Fmny4EvmQHvkbh+Y/wrWt7+1u8/Z7iOQjqFbn8q4V9PibOMiq7afKnMZzjpg4NZulB7B7x6ZmlVq8+t/EGrWXytJ5i4wFmXP69a1bTxmpAF5alT/eiOR+R/wAazdCS2C6OvDU7dWZaarZXqgw3KEn+AnDflV7cRWTi1uBOGpwaq4fmnhqmwyffTg1V91ODUWEWAacDUAenB6LAT5paiDU8NxSJHilFN3Zp1AC0UlLQFxRUsM01u2+CaSJvVGKn9KizTs+1AHQ2PjLULZgt0FuowMc/K35gfzFdDaeL9KuRiSR7dsdJF4/MV5517Uu3IpqTJcUz1yCeG5jEkEqSoejIwIp5FeS21xPZy+bbSvE+MFlOM1v2vjPUIdqzxwzqOpIKsfxHH6VSkQ6fY7rFFY1h4p029bY7NbvjpLgA/j0raUrIoZGDKehByKozaaPiGjNGB60nA716ZqLRSbhRvAHA5oFckHSup8CjdrrD/pif5iuULkEjA/A5rq/h9l/EEntbsf8Ax5aiWwpS0PXLZMRjisXxiMeG73/c/qK37df3Y+lYPjQ48MX3+5/UVy21RnTep4ue/IphZR3FNfrUZ6HPWuw2cidFLhmUZCjJIHSpUgLmIKykydMducc1UeVmjSPChUGOB15zk1dsruXR74yCNWlUbSG7Z/rVIhyfQZdwy2c7xSoQy9+gNOtpbU8To2cNyGwM4+X9abfalcX0sjyNhXP3R0A7Cqi/eH1o0C7e5q24HzDHerulWUd/rCW8udrbicHHRc/0qvbJzJ/vVo+H+PEcJH/TT/0E00hSdk2bv/CO6an3rct9Xb/GpU0nTk6WcX/Ahn+dbHyzDbjDenrVdk2Ng1aSOR1Jdyn/AGbY/wDPnb/9+x/hTlsLNelpAP8AtmP8KsUVVkRzy7jUjjThI1X2UYqeOUr8rZMZ6rn9R71FmjNFkHMxZVaPBDb0PRvX/Cmj5hxTkk2gqy7kPVc/r9aZInlEMp3I3Q/0PvSKbuL0pc01TmlxTJA0lKaSmAZpaTvS4pDOUcf8VD/29p/M11QzXMOMeIB/19p/M11JpI0qdBh5pMU40lVczExQBS/hQTkk4HXtQAdOlFFApAFA5opRQAoJ7Gn5zjPP1qMU8dRRYdziLhQNZuwOglf+ZplyPlj/AN7+lTXHOtXZHTzX/maZdrhY/wDf/pWR2LY6y1hilsLbfFG2Yk+8oPYUNYWZP/HrD/3wKfZf8g+2/wCuKfyFSmtLI5XJplF9JsZOtso/3SR/Kqz6BZNnb5i/Rv8AGtY00jNLlTGqkl1MJ/DJO4w3YPorp/XP9KrN4cvV6PC30Y/1Feg+HJ9HtZ531bawKhY0e3Min1PDDB/xro21LwaYyRb2ZbHANnKM/wDjxrGbUXblZtGpJ63PG4NCvVnUvCDGp3MQwPA56ZzVa7trx7h5Ht5BuPA2ngV27gBHmCbBMSETP3Vzn/634GoPeqVJPUHiZLQ4Mgg4I5o6V3UsUc67ZY1cf7QzVOXRrCUH9zsPqjEUOl2KWJXVHJZpc10L+G4SfkuJF/3gD/hVSXw9cKf3UqOPf5al02aKvB9TJDUuanm067gYh4HwP4lXI/MVXKlTyMVm423NVJPYfmhzmJx/smmUN/q2HtRYdynCuafKuAKdbrz+FSTrgL9a0sQti7EcQRj/AGB/Kn5qGNsRJz/CKcDWDWpqSdQRntSr8oxkn6nNMDU8GlYYpAYYYAj0NQyWcL/w7fpU2aKLtCM2TTnXPlsCPTOKmh1XVrEjFxKVH8LncP1q5SEZ7Zp819xcvY0bLxhGQEvYWRu7pyPyrftNWsL3/UXSM3908H8jXDyWkTj7u0+oqpLZGNSyMCBzjpUOnB+Qao9QyaVWrzix8R6jZbVWXzIl/gkGRj69a37PxlbyHbdW7RH+8h3D/H+dZyoSQrpnVBqcGqpb3dvdxh7eZJFP908/iO1TZxWLVgLAb3qRXqtupwaiwiyGp4aqwfmnh6VgLAYU7IqANUgakIkpRTFNSCkAuBS0etOxQAgpcUUUgCp7e8urQk21xLFk5OxyM/WocUtMD56kieJtsiMpwDgjseRTM4BFOkleQAMeFzgemajr2DAKUCkpwoAUV2Pw4GfEU3/Xs3/oS1xwrtPhqM+Ibj/r1b/0JamWwS2PYIF/d1zvjcf8Uxff7g/mK6WEfu/wrmvHRK+Fb3H90f8AoQrne6MqT1PEnB64NRnpWlaaiLOG6TyEkaeIx7yeUBHOKoRRPPIsUYy7cAetdRuyOinyKF2jBDY+bPr/AJxTKYgpyfeH1ptOT74+tAG3bj5pPrWhoA/4qGL6Sf8AoJqlbj5pP96tHw+P+KiX/df+Rqo7kVPhZ17DNSBw42yDns3+NIRzTCK1aucN7Ehtz25pv2dj6imYpMVNmO6Jfs596T7M2e9R7RRgUWY7of8AZzTljKcEZU9VPQ1FSYoswuiTyhEQ33o+57j61Lsi/vR/mKgjcxk8BlYYZT0IqQRxh1kKs0JIDAHBHtn+tS7opNMGSMDO5SO+0jNRvFsPYg8gg9a6VbTwe0QLX+oq2MkeUpwfyrCmFtFdyQQytLbZ+SVk2n64qVK5TViuqKfT86eI19qf5Ef/AD2j/wC+qBbx5/1kf/fQqrhY4+TjxAB6XafzNdPXMuufEarkY+2rznj7xrsGtk/57Rf99ihOxU1exUIpMVb+zp/z2i/77FI9vGIyRNGSOQAw5p8xnylTFFKRSYqiQoFO3EIU4wSD0+v+NNoGFKKKKBB3p46iminDqKYzi251W6/66v8A+hGm3f3I/wDf/pT8f8TO5/66P/M0l4MJH/v/ANDWR2o62y/5B9t/1xT+QqTFMsh/xLrX/rin8hUhFaHHLcb+FFLijFAjUg1LT0hSOfRLaYqoBfzZFJ9+GqhIEubs/Z4VhR2wkYYkL+J5qPFWYcQwyTYBYjYmexPU/gP51Fraoq5DdsGnZVbdGnyIfUDgGoKcRSYq1oiW9QxRS4ooEJS0UCgAzTJIYphiWNH/AN5QakpKNx3sUZdFspuREYz6ocfpWbdeH3WN2hmVgAThxiuiFNm/1En+6f5VLhFmsas11OBthnt/DT7gfKn1othz/wABqS6H7tfr/SszrT0Bf9Wv0FLXR2+gWt1pltKGeOR4UYkcgkgHpVW48OXMXMLrMuP90/l/9es3TY414PS5jg04OQaJIZIXKSRsjDswxTKixtcmDg04NVfNKGNS0BZoqJZegqVSDUlCFajmX9zJ/umrGKjmH7iT/dP8qAMnT0V51DAEYPB+lXZrCNxlPkNVdOH+kD6GtbFaSbTIiroyPJubVw6FlKnIZD0rTsvFF/auBNJ56dxJ1/Pr/OnkVDLaxS8suD6ipbT+JD5Ox01n4o0+6AEjmBz2ccfmOK2I5UdA6OGU9CpyDXmkmmuuTGwb2PFNtr690yYtC7Rk8EY4P4dKh0U/hYtVuenh/eniT3rjLTxgSFW6twT/ABOhx+h/xrdttUt7tN0Mgb26EfhWMqclug3NkS8df1qRZR61jm6x3pPthHSs+ULG6sq+oqZZk7sPzrl5NS2nHU+1RG8nm4B2j260OI+U7JHVjgMCR2BqcDNYHh+Hif1+XJ/Ot0bl75HvWT0YrIo3+pR2eVVd8vYE7R+Z/pTbPUGuCvyxyA43GJidh7g5Azj1FaOULZaMbh0OM1IuwDCAAe1CkhWG7aXbT6Xbmi4rHzveQeTINuNrDIHpVatXUlPlRnJwGPH1/wD1VnKwR1YDOOcNzmvYhLmjcmtDkm0N2kYyCPSipZ5vPZTyMDAUnIUeg9qjAqjMceeQAAewrtfhqP8Aiop/+vRv/Q0rjGTY23IJHociu1+Ga58QXB9LVv8A0NKmWwp/Cz2CIfIPpXMeOxnwte/Qf+hCupiH7v8ACuW8eHHhW89wB/48Kwe6MaW54pIgVVbepLDOB2+tRhmXocfSnkZ54HNXLqw+wWqPO0LvOm6NY5QWj5HJA9Rmuk6CpcTRyiIJEI9qAHnO4+tQ9sYoABzlguBnnvUyBp0IATESFuTgkZ/XrTERMhU4OOPQ5oj++v1FSAIBl13ZyMA4xU1vatJGJVwx3Y2Lywxjkj0xmhBc1rcff+taPhznX8+ivVC2+6+fWtHw2M642OyP/OqW5FT4Wdg1MNSGmkVqcAzFGKfiqct9DFqMNif9bKCQew4P+FK40m9iwRSYqTFG2mIZjijFP20EUAMxT4yUbI/HI60KKo6veS2FmbiMD5CCcng+xqWXFNuw/UtQgsJLfP3ZXIIPGz8fyp0U8V0vmRSK6eoNchrus2+qx2zRKyOgYOrdBnHQ1L4WvCl81ux+WReB7j/JqFLWxu6XuX6nXbaUCn7aAK0MDlH/AOQ8P+vlf5muoxXLt/yH1/6+l/ma6k9aEaVOgzFGKcaMUGY3FJinYoxQAwikxUmKQigBtHpS4B5FLigBMU9aKUDFAHF4zqdz/wBdH/maS8+5H/v/ANDT1/5CFwf9t/8A0KmXmcRf7/8AQ1kdvQ66z/5B9t/1yT+QqQjmmWgxp9t/1yT+QqQ1qcb3GnrSUp5oANAkKBk4AyTU91iNlgGP3QwxHdj1/wAPwotfkdpj/wAshuH16D9SKgOaXUb2GmjFLijbTJEoxTiKMUANxRinAfzpdtAxoFNYqhXcwG44GT1NNN3BHceTI+184G7gHjPWsfVb5WleDeuwMCpHVTgZqJTSRrCk5M0b28FttKEPtyZEGMgY606O7iubNnDKpKnKlhmua81pNrONwGRu5z+dIsmwmRGVcrwuOvrWXtnc3+rqxTthz+FPuVyi/U0lqvT/AHf61LcjCqe2T/KqNDs9OkRdIsyzqP3CdT/sirakPGHUjaRmuat5QLaFD2jA/Sri3hWExADB79wKXtTF0OpYv7mPhGCSL0YMoIrGawt53ZR+6PY54/KrM6qT95XGB839KjVwGBPJ9azcrs2hHlWhQn0a7iXcieanUMnP6daoMjKcMCD6EYrrG1HMZCpg8fhUcj299KIp4lIA69CD7Gm1HoNTmviRy1KGI6Gti90WNMvbTgrnAVv8aypYJIX2uu0/zqGrGsZJ7CrcEdelPeVHgkAPJU8H6VX2ijZwaXKXch01czA+qmtcrWZpg/er9DWrSnuENhmKQgU8kCo2kVRyQKgoMVHIqMuHCke9RSXYHCjntnvURZ25JJqkmIhns4W/1Z2n9Kpss1udwJGOjA1o4qG6GIGq03sQ0SWWtXomiidxIjMF+ccjn1reWV3+834CuRtP+PyD/rov8666JazqxSY4PQmiT2q5ElQxLVyNawsU2dD4cjBW44/u/wBa2jF7VmeGUyLn/gP9a3zHWU46mDlqZ5iphjrQaKmGKs3EamUsOO9KJCOoP4VZMWaY0WKmw+ZM8Gv1zanpgMKy5ImjfY6kNwcfUZravJ3sRDLER5ivkZAPT2NZMkzzMrSNkgY6ds5r16PwmuK/iBcW4gKASxyErk7Odvtmoe9dP9nsx4TllRP9ICpuYrjOX4xn6Gsi7t4YbCweMHzZY2eQ5/22A/QVs1Y5FK5FcQxJDbvEwYsn7z5s4bJ4x24xXZfDFc61dH/p3I/8eWuGr0D4at5ut3jhFT/RwMKMDqP8KiWwqnws9WT7n4Vy3j3aPDFzvzt3LnHXG4V1SfdrkviB/wAird/Vf/QhWL3RhS3PFXxuOOnamHnmpCON3HXHWmVujrsNxxSqSpyKcQoXBB359eMYppA4wfrxTJZotd/2nqKz6hIsasu0ukQ4444FdDD4Tktbh5jcRS2qRlg8bfe+U9vTNYFhol9fxl7eHcoI5JA69/pXXaNok2mWF5LcMdzRuoUHjAzzj14q4q5jOVtmYUAwG+uK0fDA/wCJ3IfRH/mKzoM4f61qeFhnW5v+ub/+hChbjn8LOuI5pMU8jmkx3rQ4TO1W+FjaFtwDN8qk+tcPc3091f8A2iR8OTjI4wMVt+ItQjun+zIcNGcgk9DyCCK5jdiQeuawlK7PQo07R1PTYJkmVQrqW2gsoPTIqXFcjoGqR2STecSSVL8+2AB+OTWzoF5LeWsjSkHEhC/z/rWqlc5Z0nG7NWsnX3u4bAyWrABfvYB3fhitikYAqQwBBGCPWmyIuzucfo3id1nWG/fdEf8AloRyv19RV7xJMsMDNBqTbZBgxYDgAj8xXL6wunx3LLYrOhVirpIBgH2Oaqw3kkUckWFaKT76kdfx7Vk30O1U03zIgPHTpViyupLS5SaIgOvIzVZsbjjOO2aAccUkzW11Y9G1fUxp+nK24GZgBgH25/nU2lXYnsLVpZB5siZAJ5bnGcV55c3kl04aRs4AAHYYAH9KW2u5Le4imQgtGcru5AqufUwdBctjo2H/ABPkPrdD+ZrqiK5GzZptQsXc5Z5lY/XrXYd6tGFXdDMUlPIopmZGRRinkUgoATFGKd+FH4UAMPWlxTutFFwGjrTsc0YFLimgOKT/AI/Z/wDeb+dNvekX+/8A0pYebuY+pb+dF6OIv9/+lZnb0OutB/oFt/1yT+QqUim2o/0OAf8ATNf5VIQau5xvcfa3EtnOs8JUSLkAsgYc8dCCKlvdQnvlQTLANp48uBIz+O0DNV8cdaTbzU6XuCJXIjtkjxhn+Zj7dh/M/jVfFP20baaYMbigCn4o207isNxRinbaMUrjsNxWZq7wmII1wYpF5GMjP4itbHrWfqV3awlFnjLZBIZRnHalJqxdNPmOXeWTILN5h7YbJxS+Yybnl5GenekdjI5eEBFJwq4xjHemIXkcfNwx6LwQK5GegkSFiVHl4OB1PGPwpWw0WT5ZI6ELjimsy+X06nHy0ZbYVzuKgjp0FJDZDa/wf7v9amn+4v1P8qjtR8qH/ZP86mnHCfU/yrp6GQ+G6IAVgGCpwR1wKuQzxyABHBOM47iqLiKNVTGJGUZ5yDQxGG3FMlcHP8Nc7eptY0iT2bPtTS2OSBWc128GU8xWKjOCP60sepqVBkUrz25oQWLxc888YpA5HfnFQrPFNzG4z6U/nnnNMLEwlwOv1qQSJJGUfDc9DVTdjPP6UqvkdOKBWJBp9vKX+dYmLYUDmq0mnSxMQAJB6pzVjdgD0qaOUqvynnt7UXDVHP2ZEZB6cYq0bgAE5PX0pmnwrPchH3bST93rVufRZsZjkRv9npVOF9RKajoyhJckn5cVXd2IJJzT5reWBtsiFTUZHyN9KOWxV7j4eUzgZzUwXFMtVzCfrVkJWctzREW2obsf6M5q5sqter/okn0/rQnqDWhnWYze2/8A10X+ddjEtcnZL/plt/10X+ddzBZStyQFHqxxRWZnDYWNDgVZjXmpIreGP/WSZPooq5Fc20GSijPqeTXM5DaNnw0BGlwXIUNtxu4z1reMkX/PRP8AvoVxh1QetJ/ag9ah3Zm6d3c7LfH/AM9E/wC+hRmP++v51xn9qL60f2mvrSsxezOyIQ9x+dNKL6j864t9aRJFTZIxY9VXgfU1J/aq+v6UuVlch5Jqk4mutqHKR8D3PeqI4p2KbXqRXKrBOTlJyZMbmVofKMr+WP4cnFR7icAkkDpTe1KKZJIBXoPwuTOo3xHXylGPxrgApGM9xmvaP2f7JJtV1i6cZMEUSqD6sW5/8dqJuyJmrxZ2Rs7mOHzHglVMfeKECuL8fc+Fbv6r/wChCvd7nBiIPIxXiPxOiWDRL6NBhdyED0+YVgndmVONpHh7DFDRBYVfIyRnAOfz9KV+tMNdSOhkkkUflGWORcFsCMnLAep4xUcSgyLubaM8nGcVNJHHFBFJHOryODvUA/L7c+1N83MKxBEGDuL45P1pkbnpeiXVrLp8TRzRnIAxwpHsR61oah/yDbn/AK5N/KuP8H2CNNJPMjb48bQy8YPeui127MGmuAwDP8mD3zWqehxOHv2RyEH8f1/pWn4VH/E6l943/wDQhWZCRl+v3v6VCLp7SXzIyQ24j9ai51OPMmj0l9qKWYgKBkk9qp6jdRQ6XJNvG10+Qg9cjjFRSTQ6toBJl27xt3dAGH/1645tTmFodOmI2xudpHbrxn0pylZGFOjd+hmyszylt2SSST60xSBKCxxj2pCcMQfSkJyQSOeuaxO+xIzk4Y9TwK6Pwom68LGYfKpxGBn6nPauTMnzcH860rDVZtPiYW5IeQEEn+lVHRmdSN42R6WOtQ3ySG0cxT+Q6jcHbG3j14PFcff+JpXtYUgcpKo/eB1wc+tZ6+JdRTyNsv8Aqk2EHkOM9xWjmcsaEr3KWoo7TNMTA5PLPA2QeeuO1UM+tTyvHICwUq2eg6VBxWZ2RjZC0UCjvQULSg0hpB1oE0dVpv8Ax/ab670/lXaMmCSPyrjNPwL/AE3P/PSP+VdszpIDhgcHGRz+Fao4ay1IjSYp4p4XNFzEgNAGaseWfw+tKI29P1ouFivikq15bf3ahkXDZ9aLjI6XtRjNGKYhBwafjNM5z0p4oQHDwDE7++f50Xn/ACx/3/6GlhH75j9f50XIz5Xs39Kk7Tsrcf6LD/uL/KpMVJZW0k8UUca5OwfyrXi8OXhVXcKinoTnBrGviqVFXqSsZUsPUqv3FcxcUYrp4vDUZxvlYn2q5F4XtiM7JH/GvNeeYb7N38jtWV1vtWXzOMxk0u2u6Xw3bIM/ZXP1U1J/Y9sn/Lqv/fNZvO49Kci1ld95o4LbRt9q7v8As21H/Luv/fNKNMtz0tV/74qf7c/6dP8Ar5Ff2Uv+fi/r5nBbaiuY5zARbsqv6tXoZ02FRzaLj/rnR/ZluRk2KEf9c6Tz1dab/r5FLKrP41/XzPE5ri8MjKZ8sDyQeDVYmVnGJACT36GvbW0fSSTusEz9SP61Xl8M6DP9/Tl/Bm/xrP8AtinfWLR0LANLRniwcybVLAdQOMZpSHOSGXA444IxXr7+CfDjf8uci/SU1Wl8A6C5ypuoz/suD/MVazag+4fUZnkcshMmxhlccYHeiNPLQqWyx644/CvUW+GuksSyahco56EoCBVab4ZyFT9m1SCQnHEkZX+RNbwzHDy+0ZSwlRdDzm3HyRj/AGf61LLyF+p/lXYf8Kz1uLaIpLSUKMfLLjPPuKq3XgPxFHgiwDYz92VP8a7o4ujJaSRg8PNdDk5CpTAbBAGT1qt9oclk3Ag8ZxWpe6Nq9lE6z2FxGc8tsJGPqKyljKybmUhh0B4oUovVMpwkt0WHZVQgYywyT1FVHRW+593+IelWiCilsncVxkDiq2wo6nI9TxTTRNhEt5POzn5fXOOKuG6aFMo/AOPn5prxuwLueNvY0ySFVT5WII5ye34UXuFi3HfRuo3kpnjmpw2eUYY9RWSQphIJGWbAzxinRxhFKLvZieccYoEaoLf3gf1pSzEEEqB64qi8nloIwxJP3sn+tRq0GCGL7+2KAJ9LkEVwrtnHPQZroTOhXIP4YrlbYlVGCR9K1EuY1iGS39a3TMJRvqaDyxsMFciqM1lbTpIyoUIGTg9eRT9wK7h0NOjYbJf93+orRx0M1o9CrDp5SM7XBGeh4pTA6dUNXInAQ5OOaUzr7muacdTqjPQo+Wx/hb8qqX6FbSXIxgf1rV85mIAGB+dUdRGba4/z3qLWaL5roxrZzHJDIOqsCPzrqpruWK1Fwb22cEj92kylxkenWuTUfulPpVY3DVc4czIjJROp/tiQ9G/Oj+1XP8Vcr9pNL9pNT7JD50dR/arf3qT+1G/vVy/2k0faTR7JBzo6j+1H/vUv9pt/erlvtJ9aX7UfWj2SFzo6f+0z/eNH9qN/eNcv9qPrS/aGo9kh86IB2x1pCpBIIII7GrAtZAc5XP1pzW0juWO3JOeK3MisFz065pQOas/ZX9B+dAtZM9BSAjQV7v8AAKR5rnxFLIQWZbckgAd5OwrxJLVx1H617f8AAJCj6/nutv8AzkrKpsOWx7Fc/wCrP0rxX4pn/iWXg/3P/QhXtVyfkP0rxT4n4eyu1/3P/QhWC3RjHc8RkHJqIirr2r5OAMfWojaSe3511o3KtOQ7WB61P9kk/wBn86kFm+3tn61RJt+HtSvmnEKSbkJGd7dB7UuuX73OqNEsrNCjjCnscYNV9Elk06Z3Yrg4O3aDk/XtVfyZXuN7YJLZJz1p3IUVzXLUX8f+9/SqN30/4Ef51fUBck8bm4/KqN5xt+p/nQ2Ulqa9nqBXw5cWqsRJG+4496wJJC7ZZuR7dauW06qs0ZPEqY6dD2qhIhD471F7lxjZsPMOzJPOfSgrhd2eewpqAE4Y80+U5xjPTpmkWVuuOTU8jtM4baFG0AKowAAKYiqWO7OMdvWrMMBYktnYgy59P89KolkE00lxJvldmYADJOSahNXr2XzHBW2WKMDCgD+vc1ToBCUmeaO9KAKADNLijFLSGGKTFSIMuo9TU0y8jFFwaNgxvILOOM7XZkVTnGD0612CyWel20cDyqnHAIOWPc1x12r/AGa3KjnIx9cVatdNVLoPqlzsAG/aDuLY7Z7VdzmnDmOyjIkjV1PysAQfapUz0P4Vjw+JLCWWdVykMSKUJXr1z/SoT4rtfsssgRvND7UQ/wAXoSaLnP7KXY6LB9KMf7IqnZ3UlxaxyFslhnp/9YVP5j+v6U7Gb0JlI/u0y4xheKarNnkn16USuWC5NCAhopcUYNMQmPelA5oqvcX0NuvXc3YChsaTexydpC0kpCgk4qzLFGMAqZJFOQA2F/PvWlb6dc3p2wxCG3/vngf4k12Wh+DYoVSe6LZPIyBvb8P4R+tc1bEU6MeabsjuhCU5csVdnK6Z4e1HX5le4j8uFByxU4A9BuyR+FegWljo/hPSJXM/kWmAzyTNjew7Knc/QE1D4j8Wad4WgMO0S3uP3doh4XPdj2+nU15XrOqah4ovzeagOnEdvFnZGvoB/Pua4lGtjVyzXLB9Or9eyOn3KDutZfgjoNW+JOqX7yW+gQiztweJyP3pHuTwv0HPvXO3Oo+JL4lrjXrls/wtdSEflUASdVCrbuAOwQ0uy5P/ACwk/wC+DXoUsNClHlgrIxlV5neTuVWsrx23PeKWPfk05bK5Uf8AH6R9Mj+tWPLuv+eMn/fJpDHcj/ljL/3ya05GLnRH5F4P+YhIPxP+NN8i9z/yEGP1ZqeVuM/6iX/vg0hEo6xsPqpo5GPnQJ/akZymouv+7Iwqymo6/GP3et3a/S6kFUy7DrSeaaPZD5zUTxD4qTga7d497lj/ADp//CU+LgQf7Vkkx/fKN/6EKyDMfSmmc0nRi90NTZ0MXj/xXAwErJOB2NvGR+aqKtn4p60G/eaVp+B2Ecq/+z1yfnmk88+prKWBoy3ivuLVecdmd1Y/FWMsRqOmke8Eo4/Bv8a14viP4fmxuN1Dn+/GDj/vkmvLTNkYPIPY0wrA/wB6JPwGP5VyzybCy+zY1jjqq6nttl4l0W/GbfVLZj/dd9h/JsVqrMwAKSHB6YPFeH+H9Ej1PVFgSUxKiSTMxOQAiliPxxj8a9xcgW9sgABWFQQBjHf+teDmuX08JBTpyZ34TFyrT5JIX7TKP4s/UVWubaxvR/pen2kxPUtEM/nT6SvFhiqsdpHounF7oybjwl4dueVsmtnxjdA5GPwPH6VhX/w1tZIiNO1FgxPK3IGPzArsjTa66ea14dbmcsLTl0PMNR8C+IrYF4rdblFGAbZwxP8AwHr+lctcQywO8VwHWUcEOMEfhXvKyMhBViCPSuU16ytv7Rme7QNHMd0bmMMAO49sGvfy7H/Wrxa1R5mMoKglLozypI2VwVcsPpUzzBIwuTgEDArrJPDVrcljZzlfYHI/I4NYl14b1GJm2wiUKM4Q4P5HBr1L9zhUk9jGkHnOVUtnOST3qQtGqHO7IOMU8WzBz5gcY6hh0/CmyhHLYJViemOtUgsNjYJGpOce1SJLhhk4BOcdQajU4iHAPtQ0bNjavOegP9K1urk20NCO4Mqk7h+FBZvU1FbqRCMjB6HNS4roi7o55KzJYf8AVn61KBTYB+6P1qZRxWMtzWOwgXLD61W1FP8ARrj6GroHzD61DqKj7LcfQ1jLdGkdjmgP3C1mmtYD9wPrWSa1IY2iiimSIelJQelJQIWkNGaKYgqUGoqkFJlRPdR4d0n/AKB1t/36FOHhvR886dbf9+xWmKcK4uZnPzMzP+Eb0f8A6B1v/wB8Cl/4RrR/+gdb/wDfArVFKKnnY7syv+Eb0cD/AJB8H/fArtPh3ptnYSakbW3SIuI920Yzjdj+dYOOK6nwRw999E/9mqeZ3LTOmufuH6V5br9pb3+sTW9zGJYioJVuhr1K6OEP0rzPUv8AkYp/9wU2yVuYbeF9G/6B8P5VXl8NaMP+YfD+VdCe9VpgMUcz7lJsxo/DWjE/8g+H8qtx+F9F/wCgdD+VWoqvR0cz7jbM9PCWhnrp0P61YXwhoI/5hsX61px1YFHO+5F2eUePNMs9L1S0SygWFGjBIXucnmuJveCn416B8Sz/AMTuxHbyf/ZjXn98OUrrpO8dTojsischcg/jSsVLBuc96iJKkihWxnPcVRqLgbyR0prMD1qRSuAew6g0m1Tg4zzQFiewihecGdmWNQWO0ZzjtSvMzL5ca7VOCQO5qeOzLWsUkb4MpIYlsAAdjV62tY7fTHumA81mKoDzkd/wpmdzJuI5Ys7jt3KDtJ6g+lUzVmbG87TkHoPT2qEigtLQixzTsU/bRii4WG44pQKdtzTttK47CIMSJ9asyDLCq6KfOT61cZfnFS2O2h23i+wtrTQLdoYwpFztyPTyxXCyzyy7d7khRtA9BXoPjY58NW59bz/2mK87Ip3M6auhuferMZS3YGVPMYEHy84/P/Cq2KcASc00y3E7TR9Ya6YI45J4AwqIPqetdCoBGR0riNAsmkuFle2MqZxuY4Vf8a7VGLIGVht7YFaJnnV4JS0EuB+5NPI4HsKikLlDk8Z9KQyleWbjPpRcy5dCXFRSzJCDuIyB09PrVd7p5spbjAH3mJxj8e386sabos+oEzHAhU4aaQYVfoO5/X6VMp22LVNWvJ2RRM1xeyLDAp+bgYHJ+gra0/w7HHIDd757hjxEDxn3Pf6Vs6ZpYy0Gmxc4/eXL9cfXt9K6FYrDQNOe6upljgQEyXMnVj/dUevsK48RiFTfLvJ7I2owlVfuaR7kGnaKluElmCvKq5CYASIDv6f0rkvE/wARUtpnsPDzJPOB+8vTyqn0T/4r8qwvFXja88UKbGyR7LSc5z/y0m/3sHp7dPrWn4J8GadfWy3d/LIqknyY0x26sx+tZ0MJKc/a13eX4L0OmdWNKHLDRfizlLPSJLmU3N3JIxbks5yzE9evStqO3hgKJEgVeSfU8VvarpEVnHJNDNvReV+YMCM+3SsCScRsDgnrXrKKijznUlUZMRSEA02KdJhkH8KkNO5nZrcbtBqvPMbeVWAyCCOaW5u1gyvIYjINZst4ZYwrdR0PcVnKpbQ6KVJvV7GtDI7rl+/IA61LsHesYah5SlQDu45q5FqkbEB1Kj1ojNEzpSvoi2yKRggEe9QtZ2zD5oIj9UFOju4pIw+duSQM0l3II42TcVkI49605kZcsr2KM9rpyk7oEznHyjHNY8tvb7uEI/HrVp53djuAJ6E460ht5FhW4dD5e8puxwTjOP1rNzZ1wjyrUz2tY8A7mBP6Uz7FuUFZO+PmFX5F3OMHANNK4wExnNNTZqZz2kqReYcFR1welQc1oXTFbULnl25+gqhitE9AR1ngW2M11dOQ+CiwKQONzuox/wB8hq9ZlYPKzAYBOQPSuI+HlkIbK3mlVh5sk1yAehEaeWp/77c/lXZmvkOJKl5Qh6s9bKo/FP5BTc80Zppr5c9pCk5ppoJpDTKQlRyxxzIUkQOp6hhkU880lXCUoPmi7MbipKzMW58NWkxLQO8D+3I/Ks2TT9ZtB8yrdxDsPm/Tr+VdXS17GHzvEU9KnvLz/wAzzq2VUJ6x91+RxjXFlc/ur62Knphl3Y/PkfnVKfwvpd8+60laNvRDu/8AHTz+tdzPawXClZokcf7S5rIuPDVux3W0jwn0PzCvYoZvhKnxXg/vR59XLMTT+B8y+44Wbwrf22WiCzqpzhD82P8AdPNYk6vatsIZXU8hlwa9N+x6xZMApW4jHY4b9DUU9xZ3B8vULBkJ4IaPePybkfga9KM1UV6clJeRxvmp6VItHmqXBYgEnGeQT+tWo97LuYYyePpXVTeEbG6lL6dchCckIOf/AB08/wA6ypPD2o2au0kRkAONyZOB7jqPyrSFWzs9BSUZLQrwD9yfrUyjimwphGHfPSpQtaNkpAq/MPrUWor/AKJcH2NWVHI+tQaiP9EuB/smspbouJzK824/Gsg81sIv+jD6msgjFbEtDKKdikIpkjOtGKU0lBNhKKdRigBtSgYqLvU1JjifRA6U8UxaetcRyodThyKSnAcVIxK6rwT9+++if+zVyxHFdT4J+/f/AET/ANmpItHSXf3D9K801D/kYJ/9z/CvS7v/AFZ+leaXxzr9z7J/hQ2CWo0iqs1WzVSbrUjSGR9aux9KpxirsYNHMkU02Wo6nFQRj0qyqn+6fyrJ1qa3kvvGqNR7RZ5j8ShnW7H/AK4/+zGuCvRllH+zXq3jTwzqmtatazWVr5kSRbWYuq4OT2Jz6Vgt8MtcudpzaxYGDvkP9Aa3hjcPGNnNfedVOhU00POzGc0nlGvSE+E+sHh7qzH0Zj/Spx8JL/8Ai1G2H0VjUPM8Mvto6Pq8ux5iI29KcIzjGK9Vt/hI+T9o1RQP+mcWf5mtKH4U6OgHn3d/Ie5Qoo/9BNQ81w6+0P6vLsePRF4iCvbseRV2C88q1aJwXznaD0GetevL8MvDidRfP/vTqP5LUyfDzw0ow2nzP7m6b+gqP7Yw/f8AAHhZPoeFvEu75M496aIj6V7wPh/4YHTSm/G6f/GpY/BHh6E/JpEB/wB+SRv/AGaoec0FtctYWR4KLcntTvspNfQaeG9Hj6aJph/3oSf5mpF0PTFPy6Npi/S2H+NZvO6PRMpYVnzwLfFO+zs3RSfwr6LGnWaLhdM08f8AbuKcLWJTlbKxB9oBWbzyn2KWFPnJLdhKnynr6Vb+zuz9K+hRGynIgtR9Iv8A69O3Tdo7cf8AAP8A69Q89h/L/X3D+qeZ5h4ttZp/DdskcTuwvCSFUkj5BXFDQ9Tf7mnXbfSFj/SvoMvc/wB2H/vk/wCNHm3fYQ/981Dz3tEVPAqKtc+fv+Ed1c9NKvf/AAHf/CoLjSb+xjD3VlcwITtDSxMoJ9MkV9DGS7P8UY/CsrWtIl1q3itrpw9uj7ygbGT0Bzj3P51dPPE5WktC3gl0Z4/b2l/BFDLO0/kPgrGrn5l/pW6byfVrf7FZWxg3AhnZ87gB0UhRz2xyea7RfCFmgjzEXEa7VDyEgDJOMemSa0YtOMKqqRIqr0C4AFa1M9gv4abJhlies5I4WG11e2s5Hv4J2fcWZ5EPAPTPHJ9hSpY3dwymSKbJ5EYjIJHuewr0FCqkqMkqcHapOD6cU8swAxA7jPsMfmayWfS6w/EyllUekjkLPS44wGuxu2/dgQ8D6kfyrptP0i51FElm/cWK8LgYBHoo71ejMjZ87bt/hQDge/vVoXMqqqh2AUYUA9BWk+IKcU1GGpyf2PNyvKV0Q6zrOk+FNIW4uwY4zn7Nax/fnYdefTpk+9eP6trV/wCL9US61JngtVOIrePOyJfXHf8Ar+levXUUN8ytdxR3BUYUzKHx9M9KqyaRpsv37G2/CJR/SufD5xh6bvKLbe70udUsDNq0Wl5Hks9ukcreRl4gflYjGR9K6rw1bRNolzeTsEW3mVXLjIZGHKr/ALXGa7ZvC2nPas7abAygcLGg3n6beayfE2jy/wDCP2VnpemSRxI7M+UK7XPRuDzxxk56178cRHEU/gaXmcHI6U/iTKevQaXbafvt3XzpVAjjVh93qSQO3FcfKNzDIyOc4ppFzZyNbyQRq8ZKuoHIPfpWtpj6PczrHepdQuejiVdpP4rxUU8RSw9Plcm/xNamDr1ZcyjYqRxrAu9jgY5z2quNUt9xVgw98V283hPTJkK+bdhT6SLz/wCO1TPgLSTnE15n/fX/AOJrnee4Xo/wIjlVV6zX4nD3t2J8qMFB0YVQZ8ZPPvXobeANP/gurgf7wB/wqJvAFsel/IP+2Q/xqP7YwsteY6I4GrFWSOBJ39zx6+lITtBBzjPFd4fAEXQX7fjF/wDXpv8AwgQU5W+z9Y//AK9XHNsL/ODwVbscUrsI+pApXnkdBuJO08Z7V2X/AAgkvA+2xfihpp8CXCvuS9gB9drf4Vqszwv/AD8Rk8HW/lOPUo6gYw2cnB61t6jZSQ+G9MlEbBJHkZ2PQscbePoP1rQHw+ui4P222A7nL5/9BrQ1Dw1q+oKiy3dsVTou5sZ6Z+76YraOZYS38RfeYzwdfmVos4Eggc9PWmkEkAZOelda/gXUz0ltT/wNv8KI/AWobstcWq46ZLH+lNZjhP8An4vvK+rVv5WcHcn97t/ujFRou5uld9H8NZS5M+pxjPOY4y388VsWPgHR7Vlec3Fyw6hnCqfwAz+tRUznBwXx39DWGCrv7Jd8LweXpoyjL9mgjtUz05HnSfjucD8K2Cailt1WECBShjZpFAY8seTnPrU6hXUMpyrDII7ivkM0xcMVW9pDbY9nBUXQpcstyM0lTeWKXy1rzbnXzIrGkq15a0mwUcw+dFbmkwc1a2rSFVp3HzlXBpMVa2LSFV9Kdx85WpM1ZKKaaY0JppjU0V+tDKrjDKGHoRmp/KX1pPKHrVxm4u6Y24vczpNNsnOTbqD7EinLbKihVkk2joGbdj8+n4VdMPuKb5J7V2Qx+JjopswlhcPLeKMy40uK6j/fxQSydm2lCR7sMk1nN4SSQjypGjJHch1B+vB/Suk8l/7ppu2RTwCK7qebYhKzs/kYSy3DyemnzOLufDOp2gDeR5q9cxc4/DrWDqSMlvcqwIIB616vHcXEXTke4pl3b2GqQtFf6fGyuMMyjDfn1r0KWaqVudHNUy1r4GeDon+jD6msgrXtV78NtNuedNv3t1x/q5xuGfr1/nXEar8OPEOmb2+xm6iUZ8y2O8Y9cdf0r1aeKpVPhZwTw1SG6OJK00ir0trJGxDoykdQRjFQNHXQpI53Blc0mKlK00rVXJcSOinbaMUXJaGYqUUypMUMcUfQ4GKlWmCniuJnEhwHNOpAKcKhspIQ9K6nwT1vvon/ALNXMbSxwoyTW7ov2qxjm8sqhmxknkgDPT865q2Jp0dZnRSoyqbHV30iRxEu6qMdziuAl0+efV7idAvlMAFOeT0ro/LjZt80pdvUnJqZZrSLopNedPMJyfu2S8zsjhYR3uzFt9BeZgCWJJ7cVYk8MpG+HjJ+rGty31e3iIAhA96dd63G3+rUfUiqdSDpc0q2vkNQkp2jT0MWPRYox8sCD6jNTLpoH8I/Knyaq7dwPoKgbUWP8VebKpTe8mzsjCr2SJhZIvak+zxjsKrG+J70n2vPWseamWoVOpZMaDtTSgqNJC5471citZJDwCa0hDn+FCk+T4mV9lGz2q7JbrCMuwHtVKWdFJC80qkFD4hRlzbCEY7U0iomufaozOT2rmc4myhIlOKbkVCZs00yVHMjRQZPkUuRVXzaUTD1ppofIy9DC08gSNcsegpt/H/Z+43JVAi72JPAHrUEV68DB43KsO4qpfy/2jFNFcsXWZCj89QRg1rejyWd+a/yt/mQqdTn8jMfxv4bQkHU049I3P8ASnx+L9ClXcl5lfXy2/wrwi+ge0vp7dxh4pGQ/UHFdn4NtbfXL4RS7hHFFvMYOM4IGM+nNexiMroU6ftE3b+vIVKSlJxkej/8JTo//P3/AOQ2/wAKnh1yxuBmF5ZB6rC5/pSQafZ22PJtYUx0IQZ/Or8Yya8OTo/ZT+83aiiOO5SX7qyfjGw/mKlzUzsI4duOW/lVepcEiE7i0hoppNLlKQHpVSaRpZfs0LEMRmR1P3F/xPb86luJvIiL7S7cBVXGWJ4A5pLaIxQgMQZGO5yO7H/OB7CtYxsuZj8iSONIowkahVHQCloorCTuxhiigmkzUNDFzRmkzSZpWCwpAPUD8qY0UToUaNSp6jHFOzSZrSNWpHRSf3i5E+hRfRtOfO60jJJySc5P41UufC+m3DqwR4iv/PNuv55rZzSVSxFVO6kygiRYokjQfKoCjJ7U7PtTaKx1eoWHZpOKTNNzQOw/ijimZozQFh/FHHpTM0ZoCw/ijNMzRmgLD80vWo84qvf6lBp9uHdJmcngRxFxj8Oa2oUZVp8kXr5kVJci5mW8UYrlR490jJBuFUjs0cg/9lq9p3iax1Sbyra8ty2MtuWQAD3JXA/Gu95RiV2+8w+t013+42jxUds+5ZEIP7t9oJ7jAb+uPwpsc6zxCRPuknH4HH9KdDxFIcctMfy2r/8AXrz3Bx5oy3R0pqUU11Js00tTSaQmsbFJDi1ITTM0madirDiaQmkzSUJDsLuo3UYpNppgGaM0u00m007j0G5o3UFTTcUxodmkzSUmaY7Dg1O3kdGP51HmlzVJtA0SCZx3B+ozS+bnqiH/AIDUWaM1oqs11Fyol8xe8Y/AmpEnVOQ0i/Q5qtmjNWsTNCcEyW9tNM1aExahbQXKkYDSx/MPow5H4GuP1P4YaPdIzaddy2svZZCHQ/jww/WurozXXTzStA554SnI8c1b4e69phJ+x/aogM+ZbfvB+XUfiK5WSBo3KMpDDggjBFfRwYqcgkH2qG5tbW9B+120NxkYPmxhv516NLPrfxI/ccs8tT+FnzmYj6U0p7V7heeBfD14zN9jaBiOsEhXH4HI/SuevfhbEWLWWpsFPRJ4s4/4ED/SvQp5xhp7u3qcs8vqrZXPLdvNPxXX3/w61y0+aKKK7Qd4H5/I4Nc5d2E9lM0M8MkUi9VdSDXoU8RTq/BJM5JUZw+JWPomDS7uYxBYWBlGUDDBYYzxmtODwvevzIFj9iea4G8129vvGGn6ikc0k6yKDb79xUjBIUD159a7zwt4ing1YaFf2riaQySmVjtIyxPKknAxjofwrhdRt2OP2RpweE4/+WszH6Vd/sDTrSNpZR8qjJLnirun6lb3lm8yXCSLG7K7hSgGD71yeta62oTeXGcWyH5f9r3NcmJxcaMLvfodWFwcq8+VbLcmu76Fn8u1RY4AewwW9zVY3hxgGsvzSaXzK+Yq1KlWXPJn0lPCQpx5Yo0DdsepppuCe9UDMBSfaAPWs7M0VHyNDzz60hnPrVD7SPekNyvvRaQ/Y+ReMx9aTzjVL7UnvSfao/U0csh+xfYueec1LC7yuABVKNhKfl5rVgCWsRlkIAA6mjW9jOpaKtbU19Ojjhw8xH0q1ea3DCpHmRwJ79a878ReOLTSowryNGWHygLmRvcDsPc15pqvxAv7vzUtUWBWbIlOWkx9en6V7uCoYmdPlgrLueZVo01LmrPXse36jr1nFGJPOLjuzfIo/E1zNz440yEsGvrNMdhJvP6V4bdahdXkhe4nklY93Ymq5ya7lksJO9SX3f8ABuJYuEFaMfvPZ5viNpI6aiG9kgb+oqD/AIWRpROPts3/AH4rx7BpcGtVkmGXf+vkH9oy6RX3HsifEbSCfmv2H+9A39BV2DxzpE/3dStjns2U/mK8NxSc1Msiw72b/D/IFmUusUfRVrq1veLuhkjkX1icMP0qx5yt91hXzhDczWzh4ZXjcdGRiDXUaV4/1WxUR3Gy7jz1lzvA9AR/XNcVfIpx1pSv6nRTx9KTtJWPZ/NI60u7Ncxoniuw1sGOBys4GTDJ97Ht61trPtPXivDrYedOXLNWZ6EVGa5oO5xPjvwzNNdDU7C3aQMMTLGMnP8AexWJ4PkudM8T2geOSMSuI2VlIyDxz+h/CvXEZHHD4P1qUIM58zJ/CvQpZnKND2NSN1axx1MOufmTsycDAyWX8CKtRRknpkDk49KpgsP+Wn6CpBNKAR5xweDzivLio31HKMnsPkcu5NNzTOvej8avcaVhc0hNIT70ySQIjMeQoJwKaKSIOJ7373ywYOAf4iD1+g/nVvNVbOPyYPnIMjku5HqTn9On4VYzVVN7LoJIdmkJpuaM1jYdhaDTSaN1Kw7BmjNJmjNTYdgzRmjNJmiwC0ZpM0maLDFJopM0ZpWAM0GjNJRYYtGaTNGaVgFzSZozSZosFhc0ZpM0ZosFhc0lLmkNMY1o43Pzxo3+8Aah8uyLlIooiSPmKR8H8cYNT00r827JzjHJNdFOpBU5Kd79O3zM5RlzJxtbqNCrGgRFCqOgAwKkRybWEE8/MfwLsR+mKikOyNnx90E1JHH5UMUWd3loqZPfAAz+lZp+42+pbWqFzmkNLSGsikNooqe2t2ncBRVJXCTUVdkaoWOAKlW2buK3f7L+yW/mygBcZJPasW81a2gVmU7gvVjwo/E1rOhOLSktTmhXdV/uxwgAoMY9K4/UfiJpdszILpXYdoELfr0rn7n4oQjiG1uZf9+UJ/IGuinleJqbRG5wj8U0enFRTSFHcV5DN8S7pmylhGB6PKzfyxUX/Cyb8f8ALhafm/8A8VXSsjxPb8SfrNBfa/A9gIU9CKaUryH/AIWXfZ50+1x7F/8AGrUXxPIAEmmN7lLj+hFN5JilsvxGsXQ/m/A9QKVGwxXE2nxK0uQgSfaYf99AwH5c10djr9hqQzbXMM2BkiNuR9Qea5qmBr0tZxZ006kJ/DJM0c0oNRiRXGVNOU5rmcbGzQ6ilpagkbRS4FIetABmkzRSGqGG40u6m0UWHYdmjNNpKLBYfmopooriMxzxpLGeqyKGB/A06msaqLcXdByp6M5Pw5dS2cc+qQxW8V3GWaFjIFEsQyWGzsSOc8dOKs2dvfeItXlvBHviY7/KhkG446KMsPxOe9cRp/2MaHczXEdy90JVCsqfu4o8ZJJ7kttXHpmvUrPVrTW/Bq2NtDdNd29pvR7e1kI81cbRkDAP6V9tKifHxaRu317DHax6fpcItrNF+ZEGNx75rKKt6GsdNbnsBENSaS2uHTeVKlSOSOfy/WrsXiiN/uag3/fRr5bE068qjdRH1GGhCFJKjaxcEbnopP0FNk3R/eBX61YtNeJkXfqRSPuwbOPwzUF3qjXMjD7Z52DjIfNYulZXNoym52aX9fIgaTnrQMt90E1A8hY8sT+NV5L3UrWeM6dcSQllYSGM4J6Y/rVUaKnPlNqsnCHMi8dwppJ9aj8zxlJB5sdxeumcHjn8sU118aAKHe+G7pjH8x0rq+pLu/uOVYt/3fv/AOASk+9IoLMBWLe6jrK3Vpb6jdXTKblAUl7Ehv6A10VlFvkFYYmj7FLXc3o4jni2+ho2UKwwmV8ADnJrivG3jcWEb2lpIGuzwF6iIep9/atfxd4hi0TTySNxXCoo/ifsPoK8LuLiS4nkmlcvJIxZmPUk125Tl6rP21RadPM8vGYn2a0+J/gv8yS5vbi8maa4meWRurO2TUOSTT7W2mvLhIIELyOcACvSfDnhKx063Goagwdk5LsAQp9EHf6n/wCtX1SSWiPCnVe7OV0fwVr2tRmW2sWWHGfMmIjU/TPX8K6AeA9MsbcNqeuKs+OYoIt2D6ZJ/pWpq/jK6v7hbHSY2AJwAp5Pbk/lWJPHZWFzG2o3rTTLc/vYogGUxhQwYHPOSQOcd6UppEJTn5EL6ToKMEiN1M/puHP4AVUmtdKjYobdkYdQzkEVZstVuJ713sNHWaaSLy3WNCc/NnO1QAOwI6HFSXXhTxRqtwbs6NKNygAABcAAAcE+goU3fVFcqW7Mk2WmueGkH0Yf4UxtFt5FzFdc+jLTr3QdX0zJvNOuYQO7RnH5jiqsc7KQa0ugt2ZXm0m5iBYKrAd1YVSdGjOGUqfQjFdHFc7xhuabMsUqFJY1bd0buo9qGCbRz8NxNbzLLBK8cinKsjEEfjXpvg/xeNSRdP1GQC8HEch/5aj39/515vd6fLagSfehJwHA7+lV43ZHDKxDA5BHauPF4SniIcst+/Y7MNip0ZXWx7/uKtj8qduPqa5/w1ry6/pKyMNt1BhZRnqcfe/GttHBFfHVaMqc3CS1R9TTnGpBTj1Jdx9TSbm/vH86bupC1ZWNLD/Mb+8fzp80wt9Pe5ZizbtqRh8Fj37Hp9KrlhmqmoQm5hj8v5ZomLI4OCpOOh/AV04RUvar22xzYtVPZP2W5bjuzMu5d6A5wrn5sA46UTO7QOoPJUgZNVIBcNNHJO5JRCpLOWLk45Pp90cVYdgRRiI041X7LYrD+0dNe1Wo+KUyQRyK5IdQ2c+op4dv7x/OqdpIdksZ3ZSQjnuDyMe3OPwqfdWVSNpM2VrE3mN/eP50b2/vH86h3Uu6osOyJfMb+8fzo8xv7x/Ood1G6jlCyJvNf++350vmyf32/OoN1Luo5QsibzX/AL7fnR5r/wB9vzqHdRupWCyJ/Nk/vt+dJ5sn99vzqHdS7hRYLIl86Qf8tG/Ol8+X/no3/fRqLIoyKLBZEvnS/wDPRvzNHnS/89H/AO+jUW6l3UrByol86X/no350edL/AM9G/Oot1GaLByol86X/AJ6N+dHnS/8APRvzqLNLmiwuVdiTz5f+ejfnR58399vzqPNGaLByrsSfaJv+eho+0Tf89GqLcKQmjlQcsexN9pm/56Gj7TN/fNQk0m6jlQ+WPYdcXcohI3t85VOO2SBn9an+1Tf3zVNiDNFx0Jb9CP61LmrlFcqRPKr7E32qb++aQ3U3979KizSdTUcqK5I9i3byTTShQc/hXS2txDpsPmygFv4V/vGsWxRLeIyydhmuD8e+L3hZ9OtWIuHH7xwf9Wp/hHua1wlCdaso00ebjHBp82kV+Js+M/ii6F7W0lSeXoyqT5cf/wAUa8m1HXNR1Rybu7lkHULnCj6DpVBnLHrmk2k19jQwcKer1l3e54k67tyx0XZCE5pMVcW0YJl/lHv1rUttBmPzT7LaNWAdpWAZf+Ak5/yK63yx3MNWYAQnsaDGfSui+y6NEreffTyEOQFijC5XsQeR6+n41inrQpJ7Cd0VChHakK1c2g9Sad5MBGCGz65qtBGdzT4bme3lEkMrxuvRkYgj8RVx7OP+FyPrzVWW2kjG7G5fUdqGkJSaeh1/h/x5c2rpBqbNNDn/AF38a/X1H616Xa3yTxJLG6ujDIZTkMPavn0Eius8GeITp18LO4c/ZZzgEniN+x+nrXi5hlkZxdSkrPt3PXwOYNNU6ruu56y2ohWwFz+NJ/af/TP9azLh9lwAeN/8+9M3cda+d9jE9tqJqHVcf8s/1pP7VH/PL9ayi9ML0/Yx7BaJr/2sv/PI/nR/ayf88z+dYpemmSmqEewtDb/taP8A55n86P7Yh7o1YXmU1pBVLDxFdHQjV7Y9Q/5Uf2vag4xJn/drDsbuG3v4JZyBGjhmyMjANcxPdalNKXfU23HqRM/9K6qGX06iu3Ywq11B2sehNq9qOvmD/gFNOsWX99v++TXnO7VM5GoSvj/pvJz+YrSs5J1t2NzIWIyctnI/E9a0qZZTirpkwxKbtY2b3R/EyrCljYIvmL5k8nyE7+4AGdoHQYGfetLTdBvDHjUr2+LnqoJCj9c0y3sI42WRri5ZhyrGY5H0I5FXDql9plpJM04u4Y1LlZx85x2Djp+INfQQraWaPjqkNfdZoQ+GdCLKZ1nkYdyCR+prpIND8HJApNpH5gHUqf5CvPIPGV/rC2r6RbWStITvtbnLSYH3mQhgHx3XAI9x0t6h4j1OxYH7NZvE33X8tx+B+eipOEdZIujGtJ8sH+J6XLZ+GDprReVZBWXB/dcn6jrXn+o+GdAeR/IuI4fTybWVf1FYr+M9QI/49rP/AL4f/wCKqBvFt+T/AMe1l/3w/wD8XWf1mjtb8DqWDxd7p/iMniudEm8x79prQuE2zRydCf4WZQMjrjNaZcMuQxwR2OK5fU7641SRGuCpCZKIowq59BWlpl0ZLfy2+9GADn07V42OjByVSkrH0OA9pGHJWdzVDPji4uVHos7j+tDB2XBurwj/AK+X/wAar+ZxR5prh56v8zO32dP+VfcSfZlllhaaa4m8lt8ayTMwU+oya6G2YQW7SkfdXNc/DJucZ9a1tQl8nTv94gf1/pXPWc6koqTuEoxUbRVjyrx/qbXOsrbCQssKZcdt55P6YrkURpJFRAWZjgAdyas6ndm91K5uW6ySFq1PDFk0txJeEgLAMLkZyx9PoMmvtcNSVKlGC6I+TxVXnqyl5nT+HdJi06zczeX5h5mcjJH+yD/n1pbm8udbvY9Ot5lhg3KrOxwkalgoJ/EgfU1Dql39nt1gA6jJI9abe2seg+HILue3uYtTu8NBIwBjaM87lI46EcHkHBrSTa0W7OaK6sgubgaLHb21hDJFq486C7J+cOC2F+U9CMfoD1re0XwZBBFDqPiCb5nbc0D9B357sfYfjR4W0UaXaf23qYbzpOUVh86g89ezMO/Ye54ztZ1e61LUpLSBMysdrsDwo/uj0/z3qYx6IUp9jfu/Gmk6LB5VhDbRNvI2RoOnvgYH61lL8Sbm5u1gh0+3mBOFL7gc/T/61MsfhLq+q3ETxSgW0qgiZ0xg9wRn9e9dhpvwu0Tw7qNrcXmvRvcRnJheMbScfXOK2UEtzCUooxrfxrOjAajZeXEx567cfgAf1NTz6J4e8UWwaDyLW7f7rRfLk/XofocGup1DwzA8Hn20lpcx87zCmAPwya8p8SXkGiaxtsAPMCgy4+4QedvvxjmplTW8WKnUu7GVqmj3+h3hgvIiuc7HA+Vx6ioVkLLg16Jp99B460JtPumAuVXfFL1YEcc+4yM+oNefXFpNZXc1rOu2WJyjD3FEZX0Z1RdyNSCTHIqsjcENWNdwfZ5ygJKHlCRjI7VrvwRVK9UyxgjGByAB3psoveENXOla7CXk228x8uUdsHofwOK9aJ8t8Z4PIrwUHmvYtB1E6p4YtblsebEPLf6rxn8Rg/jXg5xh/hqr0f6HuZTX3pP1Nnfz1oL+9VBLQZa8NwPa5iyW96bvquZaTzKOQXMWd/0pC4qv5lJ5lPkGpEiSBLsocDemRz3B/wDr/pU5es+aQIUlPRGBJ9B0P6GrBbBqpQ0TFfUn30m+oN9LvqOUdyffSb6h30b6OULk2+l3e9Qb6XzKOULlgNRuNQB6N9LlHcn3Uu6oQ9L5lLlBMm3Ubqi8wUb6XKO5Lupd1Q76N9HKFybdS7qg8ylD0uULk2+jdUO8Ub6OUVybdRuqHfRv5o5QuTbqTf71Fvppenyhcm3Uhaod9LvyaaiO45WUzu38SqFznp3P/stSB6rIy7Ayj75LE+vPH6AUvmVU462Ii+pZ3+9SW43yge9UfMzWlpowWkPQDNZTVolSlaNyLxRrC6PpEswxlF+VT/E56D+teGTzyXM7zSuXkc7mY9Sa7P4j6q019DYKTtQea/PVj0/Ifzrh0BdwqglicADvX1GUYVUqHO95fkfN5hVvPkWy/MsWttLdTrFDGzuxwAoroZNNt9MRU+W5u3wOOin0Hr9a0LW1i8P6QzMEa8mHzP8A3f8AZH+f5VFB5UGjXmpXsYaSeMLZ55IYPy3Xjp+NerKXKjzE3J+QsMa6aZ47iN21cMFgiSNZR/gDn6nj8a2bPwZe30X2zxJetapkFYmYb8erE9P1NaukWsXhjT5Nd1o+bqtzH543DJiQ9/ryB9SB71UgN545ka4RtkCNtCBs7T7+p/8A1VMYuTIqVbbbD5P+EQ03CRWdrOq8eY53Z9+Q2f0qBta8MSMETT9JI9oVB/MpWvb+C2ViwiWRkOGZIixz+ANcbr3w+1EXV3c2yOYy5fY0LLgE9uvStnSsjmjVUnqzpv7D8LaxEVEUVtMfutE+cfipwPxWuW8QeB73R4xc2zG7tTkl0XlPrj+Y/Ss/R9J1uCfcsnlwxNySc/gtd3omvPHOLedFViQJAeAw9SP6jke44rnalF3Rup22Z5Ypp2CD7V3vi7wojJJrOmR7YutxBjBjI6ke3+Oe9cPsyK1jJSRqndFG7tB5YmhA4HzqB096oqea2OUcH86ypU8qVlHQdPpVAz0nRNSk1PwxDMxzPZv5bc8sAAQT+Bx+BrZEgZAwPBGRXBeCr4RahPYsfku4yBzxuGSP6iustJiqtC3VDx9K+bxmH5KjS23+/wD4J9Hg6/tKSb32L7Se9RtJ71A0lRmSuRQOvmJzJ70wy+9QGSmF81agTzExlNNMlQ7jRk1fKTzD2fNRFVJ+6KXmnKpqkrCbuIqqB90UllZT+IdXTS7ZvLgX57q4AyEQdR9T0Hv+NQyi6vb2PS9OjMl5L1x0Re5PpxXpOi6NDoGlpYW53tndLLtwZXPfH6AV34Wh9uXyPJzHGqmvZw3PK/8AhIrPaAdMlwOxvZKjGu6djB0ptvcG8lP8zWeI7LjFxN16eSP/AIqrUVpDMAytcEL1Itxx+tehyQ/q543MyaDW9JgdXj0VY3XkMk75U5zleeD9K7PRvFFh4iD2N7GEkPQyMAZfcEYAYe3X69eZtdNWWVYmeRRggs1kP51oDwpC+HXUYw+OhhAIx9DWU3T2YrtO6LGraRJp85ABaFvuPjr/APXrLZTnkV2tgWitFtru4iuk2hTuGM//AF/eqd1o1rIS1tOoB6JIMH6Z6V59SOt4nsYbGwkuWo7M5M8VJa3Bt59w6Hg/Sto6DcMG2RGTHXZ82PyqlLpckfDIVPoRWEmtmenCXVEhux6ik+1j1qqbSQcYpn2d896y9lE19qzTguxvU571p+IbvOh7kPOx249lNcyIZAeKmu5JX0yWJicCNsflWboJzi+zKdRuLPLm5Ndzpy/YtFtIFAG9fNkOOrNyP021wx612l26phBjCRomR7KBX1h8jIksbS51rV9kNqboRfvZIQ4UugYAgEkcnIHrzTnaDxF46trSAyzafbqiRLJkHaiAYI92wDU3hGDVF1F9T0y6it2tpIo3MgLbvMYrjGORwT+FWvhsif8ACR3l07qMLtBYerbv/ZaybvJ+QS0gdD4zuhpsP2aKRSLckq3ZmbHJ9x1/EGq/w/0SK5uUubsDk+Y2/PIHPNc/8Q75JpnMY4dmJPr85H8lH5VZ+Gt48djcmSV2DM67SxIAWMngfjWkPdRzzTcdDuvEviy7ubgabpj/AGeNOSo7DsTjqT1x0A61x9zPNIxJluJWzn5V3YFRR3Id55nA3Sykq38QJ5x716Bpt7d6fpyw6dbrgqGeZ0GXyAc5/lVPzM72MDT7qO1sBdQ6h9mk/iSRSNw9GAHI9wfwrL8RaZB4j0lb6EYkcE7VAJD9sexIwR6kEd6b4rkPnWs5CxrdFhtTnkHBPFXPDyy/2bdWzkEtGZQ2OQVOB+mKL2DzOO8DpqthqyyRwSrGcglkIUMAeCTxyMj8a6rxFs07xTpeuRWsdxFKql0kUFHYcYOf9kr/ADrfukdhDNMN6iReD7kD+tZPiuAHwbavnmOVMfipH9KxnqzWE22jmfHlsYtca6XS302K5QSJC0iPnsSNnA5HSuMnc+ScYFelfE6FXbSr5II0Se2zvhfdGx4JwMDaeeR715nL901VOV4nWUGG1j6V3fw+v/3V5p7sNpxKg/Q/0rhpxiT6itTwvcG38Q2pzgOTGfxGB+uKxxlL2tCUTfCVPZ14v+tT0oTYJGehxS+d71UKsGOe5pOa+b5EfS85c873o873qnzSZPrR7NC5y753vS+aPWqOTS5NLkHzluVlkiZG5Vhgj1FPhnZ4EZ8bsYYj1HBqluOKW3ZhJJH1z849ux/p+dPkvFopS1L3m+9Hm+9VsN3oAao5B8xZ80+tKJPcVV+alG70o5A5y15nuKPM96rfN6UDPpS5B85a8z3pwk96pndSjdR7MOcueb70ebVPJoBal7MOcu+ZS+bVLLe9GT70vZj5y55lL5lUsn3pQze9L2Ycxd8yjzKp7m96C7e9Hsw5i7vo8yqQc0nmH3o9mHMXvMpN/vVLzDR5ho9mHMXfMpPMql5hpPMNP2Ycxd8ymSzFEJUjd/D7ntVXzTSeaPMXeAVBJwe/Bx+uKqNPUHPQuM6oojVvlQbR9BTPN96pNKSc0wymj2dyVI0BL83Wtu1OLAnoW4rlYnJcc1v3NyLXRDKzYEcbyE/QVlVp3aiupalpc8f8R3v27xBez5yDKVX6Dgfyqx4atle7N04yIvu59awXYtIzHqTk11emJ9msoo2IyyiTgevP8sV9pTgoRUV0Pjq03OTl3L12smrapb6fE2N7hQfT1NbWn6dY6v41s7CDYdLsoFeQbiV7tzk8dRkfWsPw+Fl8TwPJ5xEYklBhBLBlRmU8c8EA1s+CI3mXxBMXHnyBE3j/AGid386znfmIbtCxl+PPEFxqGqGySUkHaXRfX+BP+AgjP+0T7V1vw70RtItrrUby9NtbBQs+4DDH0HsOmepORVPTvDGm6osupzowuUlYhkPUjPUHitXxJcwpo+naRC5iiD/vj64Ax/M10QtFHHKfNoS6x4w13WTL/ZxNrpsZCjy1AYj19fyrGWbU44pLmHWt6oQGEjMeTnjac+nJHSruiW8eq6lPFMxhsrYDcsY+Zh0AHufWr2u6PZR2zzaX5kUUR3TQM2S49R7ilcSSKmnaimqQNbzokd4BkEdJfcGsm+tTE4uIyVkQfKQ2AR/nv2NVoZ4bS+txFKHEkoO0jBU4yKyvH91cfbYzHII7aRm/dx8DcArEn2w44+tJ6lxjroejeH7lbyyRJUR1m/dTg8deAcfofTOO1eb69pDaTrdxaAHy1bMZx1U9P8PwrvPB2JdOUAgzT2gaPI6N6/mM/jVL4kQRtdWd0nB2mNvcEBh/M1nHRmkJWlY80uU21lXfO1seor0Hxf4bstK0fTL+zvjN9rT5o2wSpCqSRjtkkfhXn9wv7o+xrSMk1dGzF0y4NrqdtODjZKrfhnmvSbiPybxiOn868sTlwB3NeshTc28Mo/iX+XFeXmK1i/U9TLZe7JehVzmjBParQtmp/wBkfGT09cV5tj0+Yo7T6UoQmrRWFfvTxj/gQpjTWqZ/ehvYA07BqQbKeIiR0py3tuW2pHIzHoMAUy71EWQHnpHbkjIErYOPoapQk9kTKSjq3YesBPaodRmXTrCS4bqOEHq3aq0GtQXkzRDUIoCBlXlyiE+mf/rVJeaFY6nta88Y2Kbeka7cD835rro4STknPRHFiMfThFqDuzoPAttZ2NmLozx3etagCWjhYPIq5+7gdOgJ6ds9K9QtdP8As6q8uxpsevC/T1+teK2mnW2mxqll4+Nuq9Fhuwg564UPXqHg3Tr60tp57zWbzUVuAhjNyzHbjOSA3IzkflXpSjofOVJc0nI44aNpZYN9mhxg9V6086TpwXAtIgAORyDmp/OjEoVMZBKjI6D1pFuVbZiQE8qcDnp3rzuaXcLsWOzSABI4QoAO45NPaFFZMpGA33TtzxUK3LDkEY5znpSm5dNpaQKQpGQO3NKw7lgeX0GwLt4+XrSGVVG7bjavTHGTUCTsI0zIQGXAOMCgzgqAZduB/wB9GpsNMsDYWOWVjwR8veuC1O48barqDQJDPbrCWEaxt5a7c9SxPzdu9dqXIDbmKtjccj3oE+12ZnTpkew960pz5HtcpTa2ZzNhpnifyx9q1hI8DlDEsh/Mj/GtY6ddmIj7erMBnLQr178DFajMSjBZUYgY3YpN7MHZJI+eFXHT60p2l0X3GscXWjtJnMXln4ht03QLZ3XcquUOPxODWBqGt6xaxSR3Wm+RkFdzxsBz79K9HcuefNQDgnaO4/8A11I7ArKrPGy8URVNbxRusfXtbmPAsc9a62+lEsrOARuVXwfQqD6e9d7daLpV87LPZWxywO4RgE/iOa5HxLYx2d8Y4QUi8tdi88ADGMnnHFehTrKeiOdS5mange2hu9P1iGXU/wCz22xPHKHCkOC2OvbJAP1rH8HSFbu+iU/vNisMd/nCn/0OpPCet2uialcSX0cr289tJAxhVWdC2MMAxAJBAqrJqFiPG0+oW/mpZXEzkBlVSobOCVBxw2DgHtVJPmY57MXxLZktcRquCSSHlbjg7sL9Qw611ngnwzLp+kOLq5QtK4lCRLnaChVhu7kq3b061U8RafueK5h+T5mRdy5wR/C3t1q94N1lYQmnXGQCSsLHofVD9P5Vs0+U5nK6IZNLaO9kSYZy5IyMc9McdulbOn+I2spoLCePfKMIsg5GDwBjvWrf/YFAhnmQ5H7tSfnHsCeo9OahsdJtmvUluPKdQQM3Ecquo9MjANCaktTJ3M3xRomo/bLa6kiWQZKwoo5Vic9KsQwjTdKuLmR/mC+XyOSoUFz7/dwPUkV6Jqt5o1np8dzEkEkiqEG0Y49CTzjivDPiDr95PdNaCNlSdQQ+MAr/AHV9s4z6nFS10RcdXY1YvHMfiFVsobI29xHiV5FffHhTnjjPXAqXxTdKvhmztmYeZJIrAewQHP8A4+Kz/A3h6SyLRzvsuZiHlCjPloP4TnqSeoHp61uzWN74p8cn+zYYJItOwxWQ4R9hA29MZYjFKSSWhUfj0Mb4ix2ljp+iWFpcNIvkGaQeYXXJwoYZJwDt6DivMpDwT7103jG80+48QXj6VZtaWm4KsLYypAAboSBznpxXMN0A7mlTjyxsdlyrPzIPpSwSNBcRzIcNGwYfUHNIWR5Gyec4FPEQPRs/Sra7iT1uj0pL15VSQ4ZWUEcY4NWk2yKGXkfyrjtK1hbe2S3ut/y8K+M8ehrYg1e0Vw6XKficZrwKuGnCTVj6WlWp1YJp6m35ftSGL2NQp4h0rA3SgN3AII/nTv8AhINJP/LY/kP8aw9lPsPmXcf5fsaXy/Y00a5pJP8Ar8fl/jTv7a0r/n5X8SP8al059hqce4eX7Ux4iGR/MaMggZHcEgY/l+VOOt6SP+XlD9CP8abJrGkyxtG04wwwen+NOEJp7MblF6XJcOP+Wjfp/hS4k/56P+AX/CmRa9pRiQvP8+Pm4A5796X+3tH/AOex/T/Gm41L7fgF13HbSessv4bf/iaXb/00n/NP/iaZ/b+kjpL/AC/xo/t/Sf8Anp+q/wCNLlqdvwD3e4/aP+etx+af/EUmwf8APW5/NP8A4im/29pH/PX9V/xpf7e0j/nr+q/40ctTt+Ae73HbfSa4/NP/AIik2d/OuPzT/wCJo/t3SP8Anr/6D/jS/wBu6Of+W/5lf8aVqvb8Cvc7/iJtP/PWf/xz/wCJo2HP+um/8c/+Jpf7c0j/AJ7j8x/jR/bWlH/lsP0/xo/e/wAv4f8AAC0O/wCIm1v+es3/AI5/8TRtb/nrL/45/wDE1INX0k/8t/5U7+1NKP8Ay3/8dNF6n8v4f8AfLH+mRbZP+esn/jv/AMTTdsv/AD2f/vlf8Ks/2jpn/Pc/98H/AApf7Q03HE//AI43+FLmn/L+C/yDlRV2T/8APdv++F/wppS4I/4+PzjFXRe6ex/16/ip/wAKd9psSf8Aj4j/ACP+FTzS7fgPlRm7Lrtcx/jD/wDZUeXd/wDPxD/35P8A8VWl59j/AM/Ef5H/AApfMsj/AMvEf60ueXb8A5EZgjvP+fmH/vwf/i6Xy7zvcQ/9+D/8XWmHsv8An4j/AFpc2Z6TpT9o+34BymV5d3/z8Q/9+D/8XR5d3/z8Qf8Afg//ABdapFt2mj/Om4g/56x/99Cl7V9vwDlMspd4/wCPiD/vwf8A4umxrP8AaP3skbgIeFjKYyR7n0rUZIT/AMtov++hTY4EcyMrKQDtz9P/AK5NUqmj0/ATVigQf7v5GmkHH+rJ/EVqfZV9RSG1A7ip5kMzUOxgSjAe2DVnxdcG28HzHIy8aoAf9o/4U+VYoULO6gDnGeTXG+NNae6t7ezJJAO8j0wMD+tbYai6teFtk7meIqqnQk/I5CNPMlRM43EDJrs7p9t6VBOAgAyMcDj0FcXFJsmjf0YGux1QGO+LOwZiATgYxX073Plrk/hdbuXxHDHYSpHdPHMI96bgx8tjtx79PxrY8IvPCNfhZPLulkXMeOhBbI/PFcvY6ncaLq9vqNoV8+3cOm4ZGfcd62vD3iBbvxjLNLFFZDUVMTCAbUWTqDjPGSP1rJxblcU/hOz8MHfpOoCWaK3CXDgtIwULk5HX2o8aWMZtLe+s1SaLem8qxIcc8gjtyBn2rz7xxpzpcC5jA8vaCRjBH/6jx+VbPgHxWj2jaFqCmQSDbEQB6Yxz37f55uztc5nHqjXtrtNGu1LOrRzjMixrjZjpg9TjvW8NStrqCV4bm3klYfKiEkntyOw+tYWp6JK7OLctNCihcIMsn+yQcfTkio/D2mXVjq8cs0Gy3zhi5AZR643c1Voy1M3ojSXwFLHLZ3zGNEmmjk29TgHP4cZrB8b+GrvUbCPULKJTFA8jyIWAIUhACM9eF5r1ee6W5hFnYh3Ljb5rDC474/lWdqcUAiXSIgsyxFZr3J4UZyqfVmA49B7ik3d6BGUtzi/DjSWA01UY74wsLDHQkAEfgTVDxVcySWttHKSW3ZBPoEUf1rvLHSo7YJNO2JcnaMdO5J+nJ/KuE1q2Oq+KLXSrf5Sg8t2c8IfvOT7KOv8Au0pKyNKUrzuReN7LToPDmk3Vu9u166qkvlSZJXykPzLng5J5715tN/qX6f5Nd98StTS91O1tnslt72zh8i5KfcbBJTae67SCM+tcdbaZe6mWhsraa4k4JSGMscepwOmcfnUU9InU3oUtNgSW+iErqkYOWLHHTtXZrr8VvCkS3SBVzjbz1NUbP4b+Lbv/AFWiTr7yssf/AKERW9afBnxLMitPLYW+eqPKWZf++VI/Ws69GFVpyZ0YfG+wi0ktTIl8TgcJNM49uKpv4ikkJAhdx/tS/wD1q9H0/wCCFusatqOsyu5HzJbwhQD9STn8hXa6J4C8P6B5clrp6yXKDH2if52J9cHgH6AVksPRXS455nVezt8jwqBdevQhs9JuHRxlWS3dgfxxita08HeO70Ax6XJCCcEzGOLHvhjmvoJmO371Rl/9oVSjBbRRyzx1aXU8p0f4Y6+rONS1G3COmADNK2w+oClcn8cVrQ/CSy+1l77Vpbq1PLQLbJEW9jIPmx+vvXemT/aH4im+YT3Wq5uxzSqzerZix+BvC8TRFdEtP3Qwu5M/nnr+OasS+H9DQADRbBnY4Vfsyc/p0rReXapPJ9MDrSQI6Au5QzN95gPyA9h/9fvRzMhyZFa6Nplngw2FoknXekCqc+2BxV0p/tGmeZIvp+VBmfPQH60mTds8im8P+Iw26WyuJGyT8ig4HbBqE6R4oSFCNKumkThdqAZHPXnrivXgx/2acHY8cVn7NGvtF2PI49F8SBY2Gk3eVzlCAOT/AEoOkeJS4X+y7oqM7iY8E/TmvXstjtS5fsRS9nEftV2PHptO8RxWnmvpd+zEj90sW45yPT2zTfsHiI/8we9KhAwBhPX0r2Vd3fFL83bFHs4h7RdjxZrbxJGQJdJvmEvZYWOwZ9cU0W3iJMu+m3CiRQoURHK9ueK9sAPc/rSgkd6XJEftF2PGTba2AYjZTruP9xuBn8v8/hUaxaztkL2sw2/KoMDZz9MV7Vz/AJNHP+TR7ND512PE2GsICwtXJ9RG3P4Yz3qRJdR2MWsrlWXB5hYZ5+le05b1pwLDrS9kg9oux4oLnWNzbbByM7gfLP8ALrWfr8N7e2E8txaSDycNG5BGQeDkfrXvjSiNd0jBF9WOBVW5vdMuraa2nv7MxyoUdTOnQjB71cKfLK6Gqi7HytISpIzyPSo8k9a2/FeiHQdZksxcRXMY+aOWJg29D0Jxnn1FYitjBFd8Wmi9zu/C+rDVrV9OvyZZEAwN3zyKO6/7Q9O4x6VNf6NJbyl0dsnDAqOvow9/cc+orh4zuIZThgcgg45rt9I8WIsAtdWj3DvMF3B/99fXH8Qwau9jCcGndHNazba2bw3kNzJc7xtKqOQPTb3HuOK1PCmu6zJqNvp9w00MB3DcqFADgkZxgcniu0tU0jVFY2l9GhLfcEynj6Ptb9D9auwaLMswbe23++I8nH/fOKLJkOelmi7pWkS6rKCDJMyZ3EnIX8TwP51X1rwtP9uj+1WzyQryrtgJG2OD6sf/ANWK7fQL3TdLscS3qB84PmMob8FFUvEuuSXkBg0qN/MdSv2hl+6Mc7VPOfcj86yk1EIq5wOszR6Bp8dvBzqdwpX5h8yqerbex6Yz9cDFQNnwj4Ha+i1J7fVLhlKRIwwwO5ee5Iw+fQke1XbGLSvB5tta1WVLgXgdFYN5snQ5Yc5PIAJOOteW+JNfute1N7m4ldlHyxKxHyJk4HHGeeT3OTWWs5W6HTTikjGlfcx5qrLIUVmHpgcVI5ydufqaoTyb2AByqjH/ANeuhItsYOtdB4a0J9WvNzq4tYiDIyg8/wCzn3wau+EvBE/iC4Sa8vLfTtNB+e4nkUM3sik5J9+n8q+hNLv/AAjoOmQadZahp0UEK4A85cse7H1J9azqydrR3IvY8mk8I6LKSsaXUYA/glOT/wB9ZqI+B9JVyUuL3HUKzLnH/fNezN4l8Lt97UtOb/gamk/4SPwvuDfb9OyOMgrXHar/ADFe08jxaTwTp5LFJ7xSCRlowwGPwFRt4HtghYX0wHZvKBGfzr27/hI/DB66hp/5ikHiDwp/z/6Z+a/4Ufvf5vwGqnkeLP8AD+BU3jVX2nHP2fp/49VKTwLcCSQR38O2PktKhX+RNe7tr3hKQfPqGlN9Shp39v8AhPcW/tLSsngnenNNOquv4D9p5HgEngjUUGVmgf6ZHHr0pD4N1JAdpifGMYbr/n3r6BOu+FGHOoaUf+BpUg1fw24+W+005H99KfNV7/gL2vkfPK+D9UbHESgvsyzYGfyqNvCGqLtLCIZYr9//AOtX0X9r8Osf9dphPrmOnG58OtjL6WQOmTHRzVe/4B7XyPm+TwpqsZAMSEnphqYvhnVJFDCNNp6HdX0j9o8OBdofSguc4zHjNHmeHe7aX+cdHPU/pC9r5HzanhfVpArJbhg/TDAfzxUT6DqaY3Wj89MYPfH86+mRJ4dbjdpZHpmOgDw9nj+y/wAPLo56ge18j5pk8N6tGAzWpwe+9f8AGoX0XUoyAbKfJzwEJx9a+nPK8PNkbNMbPUfuzThDoWc+Xp5PrhKaqVB+18j5kk8P6tGqsbCYqwzlRuA+pHApg0HVD5hFhOfLOGwmfy9fwr6eW30MfdgsQPZE4pTa6LsGbay2qd2Si8e9HtJh7U+Xf7I1DdtFnPnGceWc4oGk6iWKrZXBYdQIicc4r6YSbwvPmNJtLfZ1XfHxSm38MnqNKOOR88f+NHtJdh+2PmZtN1Jd3+i3I2gMfkPAPelXTdWJ4tbrOccIf896+nE07w/cA7LewkH+ztP8qVtE0I8mxsz7bRT9pLsL2x8wCz1R1LCC7YDrhG4pf7O1UDd9lu8euxv89xX1B/ZWjAHbZ2wyMH5BTv7L0lzlrS2fH+wDR7SXYftj5dey1OMZe3ulHqUamSJfQ/6wXCc4+bcK+pv7J0ncG+xwbgcg+WOKH0vSZE2PY2rqTu2vApGfXkdafP3Qe3fmfKvnXA/5aS/maX7Rc/8APWb/AL6NfUh0PQyf+QXYf+Aqf4Un9h6F30rTz/26p/hT512D6zLufLn2i4/57yf99mlF3c9riT6bic19Rf2JofbSdP8A/AVP8KX+xdEPXSdO/wDAVP8AClzrsP6zNdT5eF7eZ+WeQdzzT0vtRK4hnnIzwFr6ujaONQke1VAwFUYAHpT959TReP8AKH1up3Z8oSXuqRY8yW4TPTdkZqxDD4hu5AkFtqcrMMgRwu2fyFfUrfvFwVJ/ClXzY1wAzgdFPUfQ1N4/yi+t1O7PlTVl1/QLmOLUYbm2lkjEiLOOqn/9XSucuriW5naWViznqa+svFvhWw8ZaLJYXOI7lPmgmKfNC/8Ageh9fwFfMfifwnrHhW/+zaraNCW+5IPmSQeqsOD/ADrai4PVKzCVec42kzCDV2Ly/btLgvE5GAsihOjAcnj6da4w1saHeKvm2cr7UlwUJGQrDvj6ZrdozTsXZfm59qq4KOCpKkHIPcVZIKllbscGo2HNCNHqeh2N1F4o0Uu+wXSfLPG3OTjG7Hoe/p+FZekaemgalLcwQeZNu2eW/wB9Af7nZs/nXL2N7Pp90lzbOUlQ8eh9j7V3dv4g0rWrYRzubK6IwVbGwn2P+f61VkzmknH0N2LV9PuWAm2JMOCsqlHHtg4NR3TXj3Df2bNpkMKRbpZb+ZgqZJAwoPPQ9c1nyaVcG2dIpmaFgCudwT2Pdf0qvYaRNazbmvLTB5IURA/otT7HqmZOaM7SL/xGPFy3dlqMtwFkEf2nZshdOmAp42+gr3vRND057ANEG8x38yWXdlpJD1Zj3/pXnVhYRo3ms7Pk581YyVH1Y/KPxIro7PxZDZKkVv8Av2j6RxOGB/33xgf7q59zSfKhKfMzT8SxRaZazyNKhwm2KNl+8euB/X2GO9ea6PayQx6t4ia7iSeyRpGjmXcLhWyHQ9/myBx611OpNLfkapqT+WkhYK+07IwoJ5IGFHHU/wA6848e+M011ILGzlme0jZZm8xQB5mxVwoAyFGD165zWLfM7I6acEjk9Z1K41fVbi+uWDTTuWbHQegHsBgfhXrfwb0gw6bd6rJGwMxEMRPQqOWI/HA/CvJNG0q71zV4LCziMk0rYAxwB3J9AK+oNOsItM0y2sYB+7gjCLwBnHf8etVPRWCrLSxazjsKAw9P1pCD6UmD2WsDAdv9v1o8z2pu0/3aPLPpSAGlyOVH60wlT1UfmaUo3pSFH/u/nQBGyoevX86aI1JOCp/SpCjE/dB/Gk8pj/CB+NAiMx/MDsY46YBPNKZQDg7gad5Lj+DOKCjkYIPuDTQCB165el3Kf4j+NJgjqin07fypCueMEfrQIn2D0pdg9D+VHngdf5Un2pR/CTWZQ7aB3I/CkJX+/wDpTftS+hppuU96Vxqw8lf79JuHHz1H56n+E0hkU87T+VTdjsibeo/i5qG6vfs6Bktri5JONkIUkD1O5gMUocHscVkeJLPWb+xWDRr2Gzcn9475DEezDp+X404u7KsN1LxbZaTGHv5prDd91JrMsx/J8VgS/FTSlBEV5Ox7E6fwf/Iormrz4ZeIriZpZb20nkbks0rkn8StUH+F/iUE7IYHHtMBn866oqC3Y9C/qfxX1aWd0sBbxQcbZDBhz74LMB+tYT/ELxPyBrNyM/3SF/kKs/8ACsvFP/PjH/4EJ/jTT8L/ABSf+XGP/wACE/xrRciKTic9fa/qOoHde3k9yf8AptIXx+ZqkdQkA4OB7V1p+Fnik/8ALin/AIEJ/jTf+FUeKT/y4xD63Cf40+aPcfMjjpb55F2uSy9wTVfcA+A25T37iu2Pwm8Vk8WUP/gQn+NNPwn8WLk/YYcDrm5QD+dPmj3KUkccjlDwa0bW+RcK4BHoazb23+wXT27zQyOpwxhkEij/AIEOD+BqJZB2aquDsztbR9KmUeaCv0rorJvDqIPNugB6Fa8rE2OjEfjSidv+eh/OhszdK/U95GpeG9I0BL9by3zNHIYYvMG5nUkAbAM4ODzmsDUPivBp2pRvotos0CQFGa4GC7tgk8c4GMe/NeSGQ9S1NMgPesnC71LjBJF6+1a7vhGs8zOIwVRSeFBJJA9OSTWa8nYcn1pHJPSo/KZh0IrRJbFNkEsmflU8dz60yJ2icOuNw6ZANWhaMR92lFkx7VVhXH/2tfMctOxJ7mnjVLnHMhNN+xGpE06V/uoT+FKyGmA1O4/vmnDUrj++alTRbthlbeU/RDU6+HdRYgLZXBJ9Im/wpOw1Iqf2jP8A36X+0Z/79akfg3XZRlNHv2HqLZv8K0Ifhp4quIjLHo8oQdfMdEP/AHySCfypXiPmOb/tGf8AvUv9ozf3j+dXpPC+rRMVl066Rh1DQsD/ACpo8Paj/wA+U/8A37P+FF0HMU/7Sm/vfrQNSmH8X61dPhzUP+fKf/v2aQ+HdQ/58p/+/Zp6BzIqf2pP/e/WnDVph3P51Y/4R3Uf+fKf/v2aX/hHdR/58p/+/Zo0C5ANYm9f1o/tibuT+dSnw9qA/wCXOf8A79mmtoN8Bk2kw/7ZmloFyP8AtaU//rpP7Vl/yad/Yl7/AM+03/fBpP7Fu/8An3l/74NPQNBP7Vk/yaP7Vf8Auig6Pdj/AJd5P++DSf2Rdf8APCT/AL5NLQNBw1Vv7opRq0gPAAqL+y7gdY2/Kk/s2b+4fyo0DQsf2vL2P60f2vIevP41X/s+b+4fypDYSgfdpj0J11Mq25VAb1AqT+2pfU/nVL7FJ6Un2N6BOxfGuTDufzpRr06n5XZfoaz/ALG9H2N6BGl/wkN1/wA9pP8Avo0h1+5brM//AH0azvsb0n2V6LArGh/bk/8Az0b/AL6NL/bc/wDz0b/vqs/7K9H2V/SiwaGh/bc//PRv++qP7cnH/LRv++qzvsr0fZXoDQ0v+EguR0mcfRjS/wDCSXn/AD8S/wDfZrKNo/pTfsz+lAaGyPE98Ol1MPpIacPF2qpwuoXQHtM3+NYn2Z/Smm1k9KYtDeTxjqkcgkW+uRIOjiZgR+Oaj1HxfqOq2ptdRuri7hzuCTys4B9Rk8H3rE+ySelH2ST0pWQFKdULkxqVHoTmo13KQRkEcgir/wBjc9qabR89Komxet70Xcah+JUXGB3HsKeW59qzhbujBlyCOQRVlbgkYl6jv61JaZOCR1qZGB4JwKq7yehpwfj0+lNMGbdjqeoWChbW+uIU6hY5CB+XStqx1/Xby5jt47+4eWRgiKMZYngDpXGCRl/iqxbX89rOk0MhSWNg6OOqkHIIpt6EOCe56fLoPiOaUjWL1o412eYtxPu2qzBQSoJ4ycdK07680rwbrmn2N8Glt3jEsssZAKcsMbfqBXk1zr+pXXmebeysJAquA20MF+6CBwQO1Z0tw8jZdyT7mseWT3YciR12r+PdVu9Kk0eG4MenmaR9o4ZwzFtrHuBnpXGvIScnrTWf0NRnJq4xUSjf0LX7vQZHm0+6kt5pF2u0Zxkelby/EfxH0/te4/MVwg3DsaeA2eKuxLSZ3q/EjxGOmqy/jj/Cnf8ACx/EnfVJD/wFf8K4MB6eBJ70WQuVHdD4jeISedRY/VF/wp3/AAsXxF/0E3H0Vf8ACuEAf3pwD+9HKuwcqO6HxD8RHj+05PyX/Cl/4WD4j/6Ckv5L/hXDDeD3p4L+9Oy7C5UdqfiD4k7arJ/3yv8AhR/wn/iQ/wDMWk/75X/CuLG/3p6lsd6El2E4o7QePvEn/QVk/wC+V/wpR498Rnrqkn5L/hXGhj70/efenyx7E2O3tviL4gtn3PcRTr/cliXH5jB/Wte3+K1zuButMhde/kyFP57q8z3H3pQT70OnB9CbH0mCT2I/AUAdMseKT5/XFJhz/EPxrzrGdx3ydxn8KVdn900gU9z+VOANILigr2BpwAP/AOqmYPFKM9P6U7Idx/A7jP0/+vS9v/rVHgjr1oI9OKLBzEgHc/y/+vThgdGP5VEDjuPzo349KLD5ifp/F+lLv9x+VVvMPqBRu47UBzFoOPUUu8f5NUgx9qM+p/WgOYubyOgFJ5p9Kq7vcUb/AKfnQFytdaFod67Pc6Fp07scl5LVGJ/EiqD+CPCjnLeHdPB/2YwP5Vs78DJzVGTVDKfKsEMzlipkx+6THct357Lk/TrT5mPmZlzeB/BUAD3Gh2cYJwMlhk+gGeT7VWj+H/ha+IYeHIraPceCz73HbPzYX6cn6V0FpYiMrNczNc3QH+ukAGPZQOFH069ya0I1564p8zHdnPJ8NfCgA2+HoD9Wc/1p/wDwrfwt/wBC7b/+Pf411CySIOGNP8+RhkM350+Z9xXZzC/DzwmB/wAi9b5/4F/jT18A+FQMjQbUfgf8a6MyN6tn61E8rep/OjmfcLswx4L8MR/d0Oz+hhB/nTx4W8PJ93Q7Af8AbpF/hWp5h54agtnnkVLk+4XZmr4e0dDlNKsl+lug/kKsJZW0J/dW0Sf7qAVYJX3pCR/tUrjuxoG08KB9BThg9VH5Um7rwaNx6YNJsabHcAfd/KkyvdT+dG446Yoyx5pXGLlB2/WnBk/un86YT14o59KLjJg6Y+5/49/9anB0/ugfj/8AWqDn0H5Uiludygc9jmjmYFgSJn7n/j3/ANaneZH/AHP/AB7/AOtVbnjtScjH+FK4y2Jov+eZ/Ol82I8eXn8ap7j60mT7UXYXLu+L/nl+tJvi7RH86phwSRlTjqM07zD7fnRzMdyyzRn/AJZmmFYWHKfoKi8wHt+tG4e3Si4D/Kt+nlfoKaba1JyYQfqopu4A9aXzF7/zouAhsbJutuh+sYqJ9J05z81lbt9YVNWN6eppQUz1PT1pXYXKR0PSj1020P8A27rTf7C0j/oGWn/gOtX2Kg9aBg/xfrRcVzPOg6R/0DLT/wAB1pv9haR/0DLT/wAB1/wrTKjJ+cfnTdv+0KLgZ39haQf+YXZn/t3X/Ck/4R7RTydIsT9bZP8ACtLYO5FGweo/OncDN/4R3Qz10fT/AMbZP8KP+Ea8PnrommH62kf+FaYhB7r1/vCjyfTH5ii7AzP+Eb8Pf9AHS/8AwEj/AMKafDHhvPOg6Zn/AK9k/wAK1Rbk9MfnThat7fnT5mBjnwp4bPXQ9M/8Bk/wpp8IeGT/AMwLTvwt1/wra+ysO6/nR9mP94fnRzMDEHg3wz30Sx/CIUh8FeFj10O1/BcVuG3x3H50ogz/ABL+dHNIVznv+EI8Lf8AQEt/1/xpp8B+Fm/5gcP5n/GuhMDDoV/Ojyj6j86fOwuc4fAXhZeuiQ/99N/8VR/wg3hUD/kCW347j/Wuj8vdgH+dIYcdKOZiOabwH4UY5/sW3z9GIP4ZrJvvhd4UvXYC0uLEnoYJiV/8eBxXctCev9aTyTzwfzp88u4XPL5/ghpJY/Z9Wvoz/thGH8hVQ/A5c/L4kcfWyB/9qV6w0BVSIiYz7dPy6U3zJEYiRGA7MuSP8R/L3qlVl3DmZ5SPgax/5mU/+AP/ANspf+FFv/0MZ/8AAH/7ZXrKzpgEfMD3BpwnT+6fzp+1l3FzyPJf+FFSH/mY/wDyR/8As6T/AIUQxPPiHP8A25f/AGdevi6j7o3/AH1ThdR/3X/76p+0l3DnZ5CPgSR/zHx/4Bn/AOLp3/Cjgp51sH6Wn/2devC8Vf4H/Og6gOnlfmaftJdw52eSD4LRL11r/wAlP/s6X/hTUX/Qb/8AJP8A+zr1hruJvvREfSomkibkK4o9pLuHMzzBPg3bA/Pq8j/7tsB/7MauR/CHRl/1l7fMf9kov81Nd8WXHBYUzd/t/rS9pLuLmZyEXwr8OxDDLdy+7y/4AVaT4d+G4xtOmB/dpH/xrpg2f4/1pSf9r/x6lzy7hzM5r/hX/hr/AKBSD/gbf40h+H3hrP8AyDF/7+P/AI10+R/fH500nPf8c0ueXcV2c1/wr3wz/wBAtf8Av4/+NL/wr3w1/wBA1Pp5j/410ZwOmaTcR3NHPLuFznT8PfDY/wCYYP8Av4/+NJ/wr7w3/wBAsf8Afx/8a6TzcDqaT7Rzg5p+0l3Fc5o+AvDo/wCYYP8Av4/+NJ/wgfh3/oGj/v4/+NdP54OaduBo9pLuK5XDH1NLu6ZJqvv9F/I0AtngEVJKLO/p1o349ar5b0NJyR0/WgCx5nNHm47n86gA9jTgB/d/WgLkvm57t+dJvpuP9ninKqnqtAXDzO3OaC/t+Zp2xD2oCL60DG59v1oP0qTYuOn40BAenSgCI8Z4pMn2qbyuTTZnit4XmmkWNFBZixwABQMZk+1Vb7UIbCNTJueRziKGMZeVvRR3/p34qGS9udQDR6ZGyRkD/S5kIUZ/uKQN/Hf7vuelaFhpFrbs0uwPcP8Afnfl2/H09hge1CQFGK2u9SQHUP8AR4GUZtYny3uHcdfouB15ataGKOKNI40VY0G1VXgAegFWhapg5wPxzUiwRr0HNOwDIo0bkhqmEcXqfypQoHQD60vXtTGJsQcjOaDwOAcUHpx/OomzUgK2DTCikUmD70u0kHk0gGbOMU0qKftPoaaVPPynpQMZtpCPwpxXnpTduPWkAY96MD3NGDzwaNvpSGLgYpDgdqUA/hSEHsKAFyPSjd7H86Z83oaUg45oGO3e5pufY0cjt+tNOQfumkA/J9D+dIT7HNM5/umjHqDiiwxCR3pM/Wl2jPek8sHvSsAnHrRu6UeUKBGR2/WiwXELccZqpJd3aPhbCRl9RIo/rV3Z7frRg4osFyvb3EkyZlt5IT6OR/QmrGQTS7fbjHrTwgI6UWHcYGHoaXdjtUgRaTyx6UWAbuBpQc9jTvLHPFLsA6UWC4gPPSl3f7P5Gl2A5/xpdg7ZoAbn2pCfqKcV57n8aClAXGZ9/wBKAzDnI/Kl2nPQ0m09OfzoC4od+/8AKlD5puxv8mk8s96Yrku760m4d800Bh1pdp6UWAduHGDS7j70zaaXZQIkDAjo3SlPPrTAo75/Onbf85phcTgdjTww96TA7/zoxnP+NArik5HQ0hz2FOB5/wDr07/PWmFyHcfQ0E+oqQr3IppUHt+NAFc2ybmeMmNm5OOhPqR0P86ryu0G3zlyuPmkRTgH3HJA/P8ACtDGBTSPWgCkHVkV0Ksjchgcg/SlEntinyWURZniJjkbqVPBPqR0NQsksQAljz/txgkH8Oo/X60XFYkEqk+/1pdwIFQAhl3DkeoNGcetO4ifK+tHy1X3fjQGHfI/GgCxxjqable/FRZJAw1HzUCH7kpwK5xUJB7gUhGDQFycbT0NBUZ4quQQeM0Bj7/nTsK5OV/CjYO5P51Flvel3MB/9eiwXJNoHajA9DUe49hS7x6frRYLj9opdo9vzpm+l3nHXvTFczlj6HYOtP2AfwD8RUgt3OMAdfWni3fA6D8aCSBVXqVX8qeAvTaPyqYQuPT86cEIHXt60DIlUE8KBTwh9vzqRVAIzg/jT+B1x7e9AXGCNj6/nS+VLng/rUvmLwOKdnjgjpQMgMMm37x59WNN+yseN789xK3+NWc+hAP1pwYDnI96AKwsSDw8h+sz/wCNO+zFT1b8ZG/xp13qFrp9u01zPHFGoyWdgBXKSalq3iQgWAl0/TifmupExLIPRFPTP94/rTsUjU1bWYNMljtkSe6vJMFLa3YlgM9Tk4Vfcmo00+4vZI5tVmWTy33pbRZ8pSOhbPMhHXnAz271NYaZaacri3iCtI26R2JZnPqzHk1dyM5H86BNjzPJjOc8+lL9rkx/9YVAwLKQHYZ/iGMj8xS4x6596AJxdzAZB59MCj7fP/ex+FQdB/8AXpCcAcfhSGWvt0w/iH5Un26bAJIz9KqFschR1o3c5xQBbF/Ngcr+VH26X0WqgYDtT8jrt5pBcnN/IOy0fb5scBfyP+NQkFgBjB/OkC+1AXLH9oTDjYp/A/40f2hIRgqv5f8A16r4JHvSFWHpmkMs/bWP8A/z+NIbtz/yzH51Dg9MGjbzjmgZL9qfOCi/nTvtJH/LMfnUOOen60EfWkBObpgOEH/fX/1qPtR7oPz/APrVBg4PagjuKAJvtXP3B19aX7Rnt+tVtpx1zzThSGWvMJ/hH504OD1H61XBPbA5xmgE98UwLG4dSD+dGFPYn8ag3c/z5oB9T9KB3Jwqen60YXt/OoMnPBpAxzgmgLkxHsD+NKCTjC/rUIc98YoMpzwP1oAl59B+dJkg/dGPrUPmtjnr9aPMPQntQFyXceOP1pRI3ZePrUJOcfTrSkk+nT1oC5L5rf3DS+Yc52H8/wD69Q7iAcn8qPM4PP50rBcmM5/55n9P8aTzjnlG/DH+NRb8Z4H50hkx9O1AE3nHk+W36f40n2og/wCpf8x/jUZfrzSE9/60wuSm7P8Azxkx/wAB/wAaT7XnnyZR+X+NRbm/D60bj7UguTfae/lSfp/jTTc88wTfp/jTA5/ClLYGT0osK48XIH/LGX9P8actyP8AnlKPy/xqDdwCBnn1oZgew6+tMLljz1HRG/Kl+0AfwP8AkP8AGqwbkY6fWnA8Z/rQBN9qX/nm/wCQ/wAaPtS8fupPyH+NQdSMg/nSgD0P50Bcn+1oDyjnHoKQ3yqeYZceoXNQgD0pe3pSAm+2Rnosn/fJpftUQ/56f98H/Cq/PXA6YoHA6fWmIsfaoh2f/vg/4U4XkIP8f/fDf4VVyucU/cB6Y+tAFn7RF2Y/98n/AAo+0R5+8f8Avk/4VV3c5yKcHHrj8aALP2iI5w4/I0nmJ13j9ahGDSgDp/WgVyQzRj+IUn2iP+9Ue3OTj9aNv0FMZBNFG0pkjYo56lejfUdDVZ52gAE67gWI3RKxwPcY4/M1ocAUhUGhCKasrAEK4B5BPFH8I4b8BUzwg9DtPtVdvlI3AJnuW+Un69vxpiJVZeM7h9QaeGB7n8jUXp1/OlC8j1NNCZJnjO4YpmByc0oGRnP60uD/AHv1oFcZ8v8AepDs9T+dOwfc/jRjsDyKAG5QA4OfxpRICOmKQqeeCM+9JsOM5oAcGQnqKUlfVfxNRhev9DS459/c0wA4Azx+dISnTcP++hSH3/nSHOeCPzosK5//2Q==" + ] + } + }, + "widgets_values": [ + "[{\"x\":568.1871482594877,\"y\":385.0405294042721},{\"x\":566.745048898423,\"y\":216.3149041597034}]", + "[{\"x\":568.1871337890625,\"y\":385.04052734375},{\"x\":568.1571044921875,\"y\":381.525390625},{\"x\":568.1270141601562,\"y\":378.01031494140625},{\"x\":568.0969848632812,\"y\":374.49517822265625},{\"x\":568.0669555664062,\"y\":370.9800720214844},{\"x\":568.0369262695312,\"y\":367.4649353027344},{\"x\":568.0068969726562,\"y\":363.9498291015625},{\"x\":567.976806640625,\"y\":360.4346923828125},{\"x\":567.94677734375,\"y\":356.9195861816406},{\"x\":567.916748046875,\"y\":353.40447998046875},{\"x\":567.88671875,\"y\":349.88934326171875},{\"x\":567.8566284179688,\"y\":346.374267578125},{\"x\":567.8265991210938,\"y\":342.859130859375},{\"x\":567.7965698242188,\"y\":339.343994140625},{\"x\":567.7665405273438,\"y\":335.8288879394531},{\"x\":567.7364501953125,\"y\":332.31378173828125},{\"x\":567.7064208984375,\"y\":328.79864501953125},{\"x\":567.6763916015625,\"y\":325.2835388183594},{\"x\":567.6463623046875,\"y\":321.7684326171875},{\"x\":567.6163330078125,\"y\":318.2532958984375},{\"x\":567.5862426757812,\"y\":314.7381896972656},{\"x\":567.5562133789062,\"y\":311.22308349609375},{\"x\":567.5261840820312,\"y\":307.70794677734375},{\"x\":567.4961547851562,\"y\":304.1928405761719},{\"x\":567.466064453125,\"y\":300.677734375},{\"x\":567.43603515625,\"y\":297.16259765625},{\"x\":567.406005859375,\"y\":293.6474914550781},{\"x\":567.3759765625,\"y\":290.1323547363281},{\"x\":567.345947265625,\"y\":286.61724853515625},{\"x\":567.3158569335938,\"y\":283.1021423339844},{\"x\":567.2858276367188,\"y\":279.5870056152344},{\"x\":567.2557983398438,\"y\":276.0718994140625},{\"x\":567.2257690429688,\"y\":272.5567932128906},{\"x\":567.1956787109375,\"y\":269.0416564941406},{\"x\":567.1656494140625,\"y\":265.52655029296875},{\"x\":567.1356201171875,\"y\":262.01141357421875},{\"x\":567.1055908203125,\"y\":258.4963073730469},{\"x\":567.0755004882812,\"y\":254.981201171875},{\"x\":567.0454711914062,\"y\":251.46607971191406},{\"x\":567.0154418945312,\"y\":247.95095825195312},{\"x\":566.9854125976562,\"y\":244.43585205078125},{\"x\":566.9553833007812,\"y\":240.9207305908203},{\"x\":566.92529296875,\"y\":237.40560913085938},{\"x\":566.895263671875,\"y\":233.8905029296875},{\"x\":566.865234375,\"y\":230.3753662109375},{\"x\":566.835205078125,\"y\":226.86026000976562},{\"x\":566.8051147460938,\"y\":223.3451385498047},{\"x\":566.7750854492188,\"y\":219.8300323486328},{\"x\":566.7450561523438,\"y\":216.31491088867188}]", + 720, + 480, + 49, + "path", + "basis", + 0.5, + 1, + "list", + 0, + 1, + null, + null, + null + ] + }, + { + "id": 82, + "type": "SplineEditor", + "pos": { + "0": -564, + "1": 1226 + }, + "size": [ + 765, + 910 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "bg_image", + "type": "IMAGE", + "link": 210, + "shape": 7 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [], + "slot_index": 0 + }, + { + "name": "coord_str", + "type": "STRING", + "links": [ + 211 + ], + "slot_index": 1 + }, + { + "name": "float", + "type": "FLOAT", + "links": null + }, + { + "name": "count", + "type": "INT", + "links": null + }, + { + "name": "normalized_str", + "type": "STRING", + "links": null + } + ], + "properties": { + "Node name for S&R": "SplineEditor", + "points": "SplineEditor", + "imgData": { + "name": "bg_image", + "base64": [ + "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAHgAtADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDWAoFLigfSu0+YExRinYoxQAgFAFKBSgUAJilxxS4oxQAmKMUtGKBiYNFLiigBMUGloxQhhijFLilAoEAFFOxRigBMUAUuKWgBuKMe1OxRigBuKUClxRigBMUtGKKADFJTsUYoAbTgKMc0YoAMUmKXFLigBtFOIpMUgDFFLijFACYoxTsUYoAbiinYpMUwEoxS0tACYpMU7FIRSATFLikxTsUCExRilxRigBKMUuKKYxtGKdigCkIbilpcUEUAJSYpcUuKAG4oxS0UAJiilpCKYDTRilxxRikISjFLRigBMUY46UuKMUANxRilIoxQAmKKUijFADaMc07FGKAGYoIxT8UhFMQ3FJTsUY5pCG4oApcUY9qYDSKTFPx7UmDQAzFGKeR7UYoEMxS4p22kxQAmKDS4ooAZR+FOIpCKAG4paXFJigB4FGKdiig0EpMU6jFADcUtLRigBKWjFL2oASlx7UAUuKBjaMU/HNJtouAmPaj8KdRSGNFOAopaYgo70tLigBKKXFGKQCUtGKWmAlJTqMcUgG0U4UYoASjFOxRjNADcUU7HtRj2oATFFLQKAEoxTsUUANoxTsUAUXASjFLiloAbijilooAbilxS8UtADaPwp1JigBPwpcUfhS0ANxRTqSkIKTFOxQBQAlJTiKMUAJSYp2KCKAG4oxTsUUwG0YpcUtADaTFOIpMUCGkH0ox7U7bRigBuKMe1OxRQIbgelJjmnYoxQMbiilxRigBtFOxSEc0AJ+FFLilxQA3FGKU0UCYmKbg0+kxQIbiinYoxTENxRinYpMUAJikxTse1H4UANx7UmKdj2oxSAYRRinEUYoAbikxTsUYpgNoxzTiKQ9TQIdRilApcUGo3FGKdijFACbaMU4CigBMUYpaXFFwsIBS4opaQxuKMU6jFIY3FGKXFLRcBMUuKXFLigLCYpcUtFFxWExRinYoouOw3FGKdij8qBWExRjil70YouOw3FLg0uKXFFxWG4pcUuPalxRcY3FGKdijFADSKBTsUmKQhMUYp2KUCmFhuKMe1OxRigLDcUYp2KKAG4pMU+kxQAzFOxS4oxQAmKMU7HtRigBmKdijFLQAm2kAp9JjmlcBMUYpxFJQAmKMUtFO4hMUY9qWjFFwEA9qMUuKKAGkZoxTqMUANIpMU/FJigQ3FJin4oxQAzFHenYowfQUANxRTse1GKAG4pNvtTsUEc0ANxQRS0YoEJijFLijFADSKTFPoxTAbikwKcR9KTFIQ2ilopgJijFLRQA3FGKdikxQIQUYpcc0YoAaRRinYpMUCG4pcD0paMUAMIoxTiKTFMY7FID6V3K6Fpq/8uwP1Ymnro+nIci0iz7jNcDx0OzPV/s6fdHC9aK7/APs6xHSzg/79ipY7eCP7kES/RAKl49di/wCzn1kedgE9Bn6VKltO/wB2GRvopNehYX0FKMVP159i1ly6yOBGn3hGRaT/APfs/wCFKNOvT/y6Tf8AfBrvajj+5/wI/wAzS+uy7Ff2dDucSuj6g3S0k/EYp39i6j/z6v8AmK7jnGM0c+tL67Psh/2fT7s4kaHqJ/5dj+LCn/2BqP8AzwH/AH2K7Ln1o59aX12p5D+oU/M43/hH9R/54L/32P8AGl/4R7Uf+eK/99iuxpfxpfXKnkP6hS8zjx4d1A9UQfV6evhy+PeIfVv/AK1dbRS+uVPIf1Cl5nKf8I1ff34f++j/AIUf8I1e/wB+D/vo/wCFdWOtL+NH1uoP6jS8zk/+Ebvf+ekH/fR/wpf+Ebvf+ekH/fR/wrq8UYo+t1A+o0vM5T/hG73/AJ6Qf99H/Ck/4Ru+/vw/99H/AArq6X8aPrlQPqNLzOS/4Ry+B6w/99f/AFqT/hHr8D7sZ/4HXXfjR+NH1yoH1Gl5nI/8I/qH/PND9HFH9g6h/wA8V/77FdaG+dlzyAD/ADpc+9H1yp5C+oUvM5A6DqAH+pH/AH2KYdGvwf8Aj2J+hFdn+NB+tP65PyE8BT8zi/7Hv/8An2b8xTG0y+Xrayfgua7bNLn3p/XZ9kL+z6fdnDGwvB1tZ/8Av2aja2nT70Mi/VTXe8+tHXrT+uy7C/s+Pc8+II4IOaXBrvtinqq/lUbWlq/37aFv95AapY1dUQ8u7M4TFLiu2bTLFutrEPouP5VE2jae3/LAD6MatY2HVGby6fRo47FGK61tBsD0R1+jmoW8OWpA2zSg++DVLGU2S8vq+RzFGK6FvDQx8tzn6p/9eom8OTgfLNE31BFUsTT7mbwVZdDC70uK1G0G9X+BD9GFRNpN8nW2c/TBrRVoPZmTw1VfZZRxSYxVl7S4iHzwyL/vKRUJUjqKtSTIdOS3QzFGKdiincnlG44o606ii4rCEUU6jFAWGEc0mKkxRimIZijFP20hFADaMU4CjHtSAbRilxS4p3EMxRTsUUXAaRSYp+KTFAhtLS0Y5oAaaMU4ikpgNNJj2p+KMUAMxS4paWgBmKXFLiloAZijFPxzSYoAZj6UYp2KTFAhhFGKfijFAhmKMU/ApMCgBmKMU/H0oxQAzHNFOIoxQAwiinYoxTENxSYp+KSgBp/CinYpMCi4M9Hooor54+sDFFFApAFGKWkp2HcWo0+4fqf5mpKaowp+p/nQFxaKXFGKAEpKUijFACd6XvSgc0uOaQxMUYp2PpRigY3/ABopaMUwCijFGKAAjikFOxSYpAIKWl2k0bTTAiX/AI+JP91f60+gIRMx9VH8zS4NABSU7FAFAhtLS4oI9KBiUUYpMcUALRSYpaQB9aM0hpQKYBmk/KlxSfiKQBRRiloATNLmkOc0UDFz7UxVVl+ZVPzN1HuafTI/un/eb/0I0xNET2Vq/wB63iPvsFQNotgw/wBRj3DH/Gr1FUpyWzIdOD3Rjv4dtjnZLKv1wagbw4c/JcD8VrfFLVrEVF1MnhKL6HLyeH7xASpjf0Abn9arvpV8nW3Y/Tn+VdfS5rRYya3MngKT20OGkgli/wBbG6f7y4pgFd4cHggH61A9rbSD54ImPqVFarG90Yyy7tI4vFIRXWvo9i//ACy2/wC6xqtL4ft2/wBXK6n35FaxxlN7mMsvqLbU5vFGK2n8PTc+XMh9Mgiqr6NfR/8ALHd/usDWqr031MJYWrHeJn4oxU0ttNCf3kTr9RURFaKSexg4SW4hFNxT6Sncmw3FJin4oxTuKwzFGKfikxQIbilxS4pcUANIpMU7FBFADMUYp+KQimA3FGKUClxSuIZijFPxRincCOjGafikxzRcQzFGKdijFAhuKMU7FGKAG4pCKcRRQIaBRinUmKAGYpcU6jFMBm2jFPxSEUDGYo20+jBoFc6WTxr4bibDatEf9xHf+QqrP8QvDcIJS7lmPpHAw/8AQsV4wKO9fPn2vsonrZ+J+hDpb35P/XNP/iqhPxS00H5LC7I9yo/rXleBTgKVx+yient8U7LHyaZcE+8ij+lQt8U1/h0cn63P/wBjXm+KXoaLsfs49j0J/ilKfuaQi/705P8A7KKrt8TdSIwllaKffcf6iuGzS8EYpXY/Zw7HaH4maz2ttPH/AGzf/wCLpr/EjXGGBHZL/uxH+rGuNGBT6LsOSPY6d/iB4gY/LcxJ9IV/qKZ/wnfiMn/kIAfSCP8A+Jrm80oNK7Hyx7HQt428Rtx/aTD6Qxj/ANlqM+Ldfbrqk/4YH9KxM0uaV2Oy7GwfE+uN11W7/CQikPiPWj/zFr3/AL/t/jWTmlzRdlWRp/2/rJ/5i19/3/b/ABpp13V++q3v/f8Ab/Gs/NLmi7CyL41vVf8AoKXv/f8Ab/Gl/trVD11K7P8A22b/ABrPBpQaV2FkX/7a1Uf8xK7/AO/zf40o1zVh01O8H/bZv8aoUZFF2OyNH+3tYHTVb0f9t2/xpw8Qa1/0Fb3/AL/t/jWbRnFF2Fkay+I9aHI1S7/GUmnr4p1xTldTn/E5/nWOGpQaV2PlXY3l8ZeIFGBqLH6xIf5rUi+N/ECnm9VvrCn+Fc7S5ouxcsex1KeP9aXhvsz/AO9F/gRUw+Ier97exP8A2zf/AOKrkKcKOZi5IdjsY/iJqAP72ytWH+xuX+pq2vxHOBv0oe+Lj/7GuEyKOKOZh7OHY9BX4jWxHz6bMD/syg/0qVPiHpzH57S6Ue20/wBRXnPFHFHOxexh2PTo/H2iucFbtPdox/QmraeMdAfH+n7T6NC4/pXlFHFPnYvYQPYE8R6K4yup230LY/nVmPVtOkxt1C1Oen75f8a8WwKMUe0YvYR6M9zRlkGY2Dj1U5p2Mda8MjkkiOUdlP8AsnFWY9Tv4T+6vblP92Vh/Wn7QX1fzPacUYryODxRrlvnZqU5z/z0w/8A6EDVyPxzrsagGeGT3eEZ/TFNTQnQken4pF4H4n+ZrzyP4gamMeZbWjDvhWB/nV6H4iLwJ9N+rRzf0I/rT5kS6Mztu9HauWh8e6VIwEkVzD7lQR+hrQj8XaDL0vwp/wBuNx/SjmRDpyXQ2aPxqjFrmkzECPUrUk9jKAT+dX1xIgZGDKe6nNMlpoaaKdijFAhDSU40lACfjRzS0YoABRRRQAhqJ7eCT78MbfVRUtHSmm1sJpPcz5NGs5eiMhPdT/jVOXw8Cf3VwQPRlzW4KK0VepHZmMsNSlvE5iXQ7tPuBZB7HH86qSWVzF9+CQD/AHTXYntRW0cZNbnPPAU3s7HD4oxXZvbQSA+ZCjH3UGqkmi2b52qyH2b/ABraOMi9zmll0ujOXorcl8PnrFOD7Mv9aoyaVeR5/clh6qc1vGvCWzOaeEqx3RQxmlxUjxPEcOjKfQjFNFbJpnO4tDce1GKdRii4rDMUmKfjFGKAsMIoxTiKTFNEjcUU6kIpgNwKTFPpMUhDaMU7FLTEMIpKeaTFADcUU7FFADTSYp9JikMbikp+KQincQwilpaMUEs8woFIKXvXgH3Y7NKDTKcKQhwpaQUtIAoopR0oGKD60ozTRS0gHd6Wmc+tLmgB4NLmmfjS596VgHg07NR5pc0WHcfmjNNDe9KGpWC48GnCq88siQO0Sh5AOFJxms9dYi8tR9tg88H5o2XA/PPFUoN7DubGaM1FHKJEDDHTnBp26psA/NLmo93vS7qLBcfml3e9R7qXd70rDH7qXNR7qN1FguSZo3YqPd707cKLASBqM1Hu96XdRYLkmaX8ai3e9LupWGS5pQai3UBqLAS5pc1Hupd1Fh3H5ozTN1G6iw7kmaKYGpc+9ILj88UAim596QdOvc/zoAk4oxTPxpQT60rDHUqSSQtuikaNvVDg03dRmgRfh1zVoMeXqN0MeshP860bfxrrsDAtcpMB/DLGP5jB/WufzRVJsTjF7o7CH4h3yt+/sbd19ELJ/PNaEPxDtHH76wmjP+w4cf0rz/8AGlwKOeRLpQfQ9Pg8b6JKcPNLCf8AppEf6ZrTh1vSrjAi1G2Ynt5gB/I145jmlxT5yXQie4IySLuRldfVTkUteK295d2mfs1zNDnr5blf5Vfg8Ta5btlNSnPtIQ4/8ezVc6M3h30Z6170YrzqLx9qiACWG3l99pU/oa07b4hW7H/StPlT3ikDfocU+ZEOjNHZUGsW38XaJcYH2zymIziVCuPxxj9a0oL6zuuILuCUnskgNO6IcWt0TelFKVxxRimSJRmlpOfWgVgoowaOaAsNZVcFWUEehFVpdMtJRzCFPqvFWjmiqjNx2JlCMt0Y82gDrFOR7OM/qKpTaPdxDIUOP9g5rpqK2jiqi3OaWCpS2VjjJIZYj86Mv1GKZXaOiyKVdQynqCM1Vl0uzlHMQX3TiuiOMX2kcs8uf2WcrikxW9LoKnJimIPowzVGXSbyL/lnvHqhzW8cRCWzOSeDqx3Rn4oxUrxvGcOrKfQjFR1smc7g0NI4pMU7FGKdyXETFJin4oxTuTYZijFOxRii4WGYoxTse9G3mmFhmKXFKR70EUh2G0YpcUY96YhhBopxoxQKx5UDTs1CGHrS78d68Cx9yTDFOFQCUU7zR60rBYmHWlFQ+co7003kS9XGfY0WY7Fmioo51k6BqmJAUk8YHepCzEpcgUxmGOtM8z3phYmzSZqHzfc0eZmiwE+73o3e9Qb+epxS7x60WCxNuGKUPVfcT3pd9FgJ99LuqDdRvosFifd70yFRbRXcULNHHdqVnRTgSA8EEVHupd1VqthDoI47aIRxKQg4AyTUu+oN9HmUrDLAajfVYuTShjipsBY30u+q+6l3Uxk+/Bo3iod3FAb60WAn30u+oNx96Nx96LAWNwo3VBuNKGJpWGTBqcHqvk0uTRYCffS76g3GgPilYZY3Uu6oN/1o8z60coXJw1LuFQeZ9aPM+tKwFjeKXcKreZ9aXzKLAWS+Kgubs28G9V3HJzk4AHqabvpj7Jo2R1DIwwQehppdxjRqbrbCWQx7T0YKwQ+mG6fzq9HLvjViMEgEj0NZzWweyjsnmna0iYMkDTMUUjpgE1aVsCnNLoJXLO6nBqrb6cHqLDuT5pc1BvpQ1KwE2aUGog1AeiwE2aM1Hv8AelDUrBckzS5pgajNFgH5pabmjdQFx1NKgnkA0uaKQXLtvrGp2iqsF9cIo6L5hIH4dK17Lxtq1s3+kNHdJ6OgUj8RiucJparmYnFPc7q2+IMTuBc6e6LjlopA3P0IH861rfxhok7BTdNEx7SoQPzGRXl4ApQKamzN0ons8N1bXQBguYZQenlyBv5VNivE0Z4pA8bsjjkMpwRWlB4j1q3xs1Kc+0hDj/x7NVzmbodmeskUmK4C38f36DFxa282B1XKE/zH6VsWfjzTZyFuoZ7Zj3wHX8xz+lPmRm6UkdPRVS31jS7v/U39uxPRS4DfkeavbeM9jVENNDKDT8UmKBDKWlIpMUDGMiOu11DKexGapT6TaTAgIY27FeP0q/SE1UZyjszOVOMviRhS6FIOYpQ3+8MVSl0+4gHzxNj1HIrqcUoHFdEcVNb6nNPBU5baHGlfrSY+tdbJaW8xJkhQk98c1Rl0OJuYpGU+h5FdEcXF76HHPL5r4dTBIpMVfm0q5iPCeYPVKqNGyHDKQfQjFdMakZbM5J0ZR0kiKjFPxRiruZcozFJipMUhFAWGYoxT8Um2mKwzFIRUm2grQFjxbcaaXI71gP4lH8Fsfxf/AOtUD+I7hvuQRKP9rJrx1Skfb88TozIfU0hd/WuXbXL9j8rIvsEz/PNNGrak3SU/hGP8KpUWT7SJ0z727mo4YXE4JbIx0rmpLzUZOTLL+HH8qvaE10+qJ57yMu08MxNEqbSbuNTV9jtLSPgVPeKEtJWZgFCHJJwBS2qfKKTXF/4kt2B3iIri+0VzHNjVbaDj7XGR6Bs07+3rHk/aV/I1y32L1f8ASlFiP7/6V3+yiZuUux03/CQ6eOsz/ghpp8S6eP4pW+if/XrnRYp3Y04WcQ65/On7OAryN4+KLEdEnP8AwEf40xvFMAPyW8h+pArG+yw/3T+dL9nhCn5P1NHs4CvI1f8AhK17Wjf9/P8A61J/wlef+XM/9/f/AK1c0DU9qqvcoGGR6fhVeyh2FzM3v+ErYf8ALmP+/v8A9aj/AISuTtZp+LmqYji/55L+QpQkY6Iv5VPLDsVqTv4ouz923iH5n+tRHxJqR6eWv0SlG0dAB+FLuHpRaPYdn3Iz4h1Q/wAaf9+xR/bmrHpJ+UY/wqTcPSkzTSj2Cz7jDq2rn/lq4+iD/Ck/tbV/+e0v/fA/wqTPtRmnZdhW8xn9p6uf+W83/fP/ANak+36v/wA/E/51KGoBpadgt5kP27Vv+fmf/vql+36sP+Xqf/vqps0Zo07BbzIf7Q1b/n6n/wC+qP7Q1b/n6n/76qXNGaenYLFc6vqqPt+2TbvTNSDVtaHS4m/FR/hVWbH2z8RV8UNJdBLUjGsa0P8AlvKf+AD/AAp39t6yOsz/AIxj/CpM0bjS93sOw0eINYX/AJag/WIf4U4eI9VHJaP/AL9CjNGaLR7BZ9yVPFl8vDwwv+BH9amHi6X+KxXPtIf8KplUP8IP4UnlR/8APNP++RScYdh69y+PFx/isfyl/wDrU8eL4+9m4+kmf6VlmCE9Y1/Dimm1g/ufqaXs4dg1NxPFtiR88Nwp9lU/1qVfFOnE8+cv1T/A1z32SD+4f++jSGyhP94fQ0vZUx3kdQPE2lngTMPrGami1rTZMAXsI4/iO3+dcAyhZio7EimsOlDw8Re0Z6ZHcwTDMU8Tj/ZcGpQx+tebx2LPGrhhyKlVL63/ANTPKo/2JCKh0F0ZXO+x6JvpQ9cEmqaxCu0XEpH+0ob9SKeviLVoz8zq/s0YH8sUvYS6MXOd2GpweuMi8XXaj97aRN7qSv8AjVuPxhD/AMtLORfUq4P9BUOhPsPnR1Qel31gweKNMmOGeSH/AK6J/hmr0eq2Ev8Aq7yA+mXAP5Gs3Tkt0PmTNAPTw9VVkD/dIb6HNODGpsMtB6XfVcNijd70rBcsB6dvqtu96cHosBZD5pwb3qsr08NSsBPu96A2Rmod2aUNilYLk4NLuqIPShsmiwEoNBPNMBpc0gHUcd6SlHWmAYFWrPUb7Tyfsl3NCCckI5AP4dKrGlFAM6Sy8capbv8A6T5d0nowCn8wK3bTx7Yy5F1azQHsUIcf0P6V59S1Sk0ZunF9D1q017Sr4gQX0O48BHO1vyNaGM9ORXiu2r1lrOpadxa3kqL/AHSdy/keKfOQ6PZnrZWmkV5/aeOtTgwtxFDcr3JGxv04/St2x8babdOEuY5LUn+JvmX8x/hVKSZm6ckdHiiobe+s7xd1tdQyj/YcGp9pqiLCUlOpvekApqN4Y5VKugYHsafRTTsJq+5nTaRBJyhMZ9uRVGbSJ48lMOPbrW9iito4ipHqc88LTn0OUkheI4dGU+4pmK6x40kG10DD0IqnJpVs4JQFD7GuqGLT+JHHPANfCzn8UYrSm0maMZQiQe3Bqi8TxnDqyn3GK6I1Yy2ZyzoTh8SI8UhFPINJitLmLifMCxRj+EU8Kg6KPyqMNkUu73rh1PsNCXj0oyKi3UbqQyQmtHRBu1Jf901lbq1/DmG1UD0Q1M/hYrnd2cfyiovECbdFuT/sVoWcXyD6VV8TDboN1/1zNcNveQKR5nmlyfSoCxBpRIDXfYLk26kLVCT3o3U7Bcl3Uhb5T9Ki3UFuD9KdiSitWLU4ul/H+VRIKliG24X6/wBKt7ELc0N9G6ot1JurOxoTbqUNUG6l3UWAm30b/eod1G+iwE2+l31Buo3UWC5Nv96XfUG40bqLBcn8yl31X3H1p2/iiwE2+gPUO+jdRYCGU/6WD7ir26qL8zZ9xVjdTaJTJ91G6qrfMQdxGPSlAKHdubB9ehpWKuWd1LuqDfRuosFybdzTt1QB+aN3vSsMn3Ck3Cot3vRuosBLuFLuqHdShqLAUJObhv8AeNNYdPrUm3dM3+8aJEwB9a0MzRt+IE+lP3VDCcQp9Kdmsmaku7igtUefejNIBxVG+8in8KY1vA38A/Cl3UuaYiJrGEj5dw/WojYej/0q2WpU+dgq9ScCndhZFT+zbuOISoSFJ4IfFTRX+sWv3bifHoTuH65rSvZAm22ThYwAfc1U3VKk3ugcUOi8ValCw81Y5FHUFcE/iK0Y/GNu2PNtZU/3WDf4VlkBxhgCPeo2tYG6p+RxScYPdCszpofE2mSgZmaMns6n+nFaFve21z/qLiKT2RwTXCPYRH7pYfXmoWsZByrAn64qXRg9mNNnpQbmnh/rXnMGoatZ/cnmCjjDfMP1zV6DxZfxECZIpV78bT+n+FZvDy6D5kdzvpfMrlIvGNu2BLayoe+xg3+Fa1rrVhdKvl3KKx/gc7SPzrN05R3Q00zXD04Pk1WByM9qerVFgLO+nBhVfdTg9JoCwDUFxqEdsVBUsSeTkAD6k0oeqs+mWd1dR3M0StNGMKTyPxB4P4iiKV9RNltdRhknWNFLq3R1IK1aBFZllp1rYSSPbxhXlOWPQfgBwPwFXg9ErX0AmpajDZpwNSA+lpuaXNABiijIpc0AIMg5HBFatn4j1ayAWO8dkH8Mp3j9elZYpe9MnRna2fjtPLVb2zbeOC8J4P4Hp+dbdr4l0e7IC3axsf4ZRs/U8V5gKXFVzMhwiz2NSrqGUhlPQg5BpcV5NZ6lfaeQbS5kiGc7QflP4Hit2z8b30LYu4Y7hMclfkbP8v0qudEOmzuxRWJYeLdMviVd2tnA6TYAP0PStpGSVA8bBlPQqcimjNprcMUYpSKQimISkZFcEMoI9CKcKKAKE+lW8mSgMbe3T8qoy6TOnK7XHt1rcpe9bQxE49TCphqc+h8U7qN1R5o3e9dVj0OYl3Ubqi3e9KGosFyXNb3hIb9ax/0zP8xXO7q6TwUN2tn/AK5H+YqJL3WKT0PULSL92KzPFa7dAvD/ANM/6ity1XEYrH8Xj/inbz/rn/UVx21RFOWp5CzZpm6kY88Uw13pGrJA9O3ZqDNKGosK5Juo3cGo80ZpiJIkzmnAbZ/8+lPt1+99aHGJj9f6U+hK3HE0maQ0VBoLmlzTcDNLigLhmlzRtpQhJwBmgLiZoqUW8zfdic/RSaeLK6PS2m/74NFmLmRXpasjTr09LWb/AL4NO/sq/P8Ay5z/APfBp2YuZdypRV0aRqBP/HpL+IxT10TUGP8Ax7MPqRRZhzx7lDNGa0x4f1AjPkqPq4pf+Ee1DH+rX/vsU+Vi9pHuZbKcZ9x/WjmrRtJRP9mIHm71TGe/NXB4evj/AAIP+BinysXPFbsyc4o71rf8I5ff3Y/++xR/wjt9/wBMv++6XKx+0j3MrNLnNah8O3+OPK/77ph0C/X/AJZo30cUuVh7SPczgSDS7jVw6Rfqf+PVz9OaadMvh/y6Tf8AfJo5WNTj3KuTS5NTmwvF62sw/wCAGm/ZLn/n3l/74NKzDmXci3UobkUphkXrGw+opoU5osVcZEuZj9TT7lNsa/Wlt1zKfxqS8H7tOP4v6VdjO42M/u1+lO3UxBhF+lFZtGtx+6jdTDRnilYCTNG6osmjNFguTbqvaaFBkuJPuRL+tZgPNXpj5FlHD0Z/3j/0qJLoUiKSQu7Mx5JyaQNxUW6jcc1SQmyYGlzUW7mnbqTQEmacKh3ZpwakMlzUM4VoXyASAacGzTZf9S/+6aEDMpaU8EUkYp7CtjJbFmE3tuA8LyJkZBRv8K07XxVfwECdUmUdcjDH8RTYP+PaL/cH8qHhSX76A1jKz3Roo9jct/Fmny4EvmQHvkbh+Y/wrWt7+1u8/Z7iOQjqFbn8q4V9PibOMiq7afKnMZzjpg4NZulB7B7x6ZmlVq8+t/EGrWXytJ5i4wFmXP69a1bTxmpAF5alT/eiOR+R/wAazdCS2C6OvDU7dWZaarZXqgw3KEn+AnDflV7cRWTi1uBOGpwaq4fmnhqmwyffTg1V91ODUWEWAacDUAenB6LAT5paiDU8NxSJHilFN3Zp1AC0UlLQFxRUsM01u2+CaSJvVGKn9KizTs+1AHQ2PjLULZgt0FuowMc/K35gfzFdDaeL9KuRiSR7dsdJF4/MV5517Uu3IpqTJcUz1yCeG5jEkEqSoejIwIp5FeS21xPZy+bbSvE+MFlOM1v2vjPUIdqzxwzqOpIKsfxHH6VSkQ6fY7rFFY1h4p029bY7NbvjpLgA/j0raUrIoZGDKehByKozaaPiGjNGB60nA716ZqLRSbhRvAHA5oFckHSup8CjdrrD/pif5iuULkEjA/A5rq/h9l/EEntbsf8Ax5aiWwpS0PXLZMRjisXxiMeG73/c/qK37df3Y+lYPjQ48MX3+5/UVy21RnTep4ue/IphZR3FNfrUZ6HPWuw2cidFLhmUZCjJIHSpUgLmIKykydMducc1UeVmjSPChUGOB15zk1dsruXR74yCNWlUbSG7Z/rVIhyfQZdwy2c7xSoQy9+gNOtpbU8To2cNyGwM4+X9abfalcX0sjyNhXP3R0A7Cqi/eH1o0C7e5q24HzDHerulWUd/rCW8udrbicHHRc/0qvbJzJ/vVo+H+PEcJH/TT/0E00hSdk2bv/CO6an3rct9Xb/GpU0nTk6WcX/Ahn+dbHyzDbjDenrVdk2Ng1aSOR1Jdyn/AGbY/wDPnb/9+x/hTlsLNelpAP8AtmP8KsUVVkRzy7jUjjThI1X2UYqeOUr8rZMZ6rn9R71FmjNFkHMxZVaPBDb0PRvX/Cmj5hxTkk2gqy7kPVc/r9aZInlEMp3I3Q/0PvSKbuL0pc01TmlxTJA0lKaSmAZpaTvS4pDOUcf8VD/29p/M11QzXMOMeIB/19p/M11JpI0qdBh5pMU40lVczExQBS/hQTkk4HXtQAdOlFFApAFA5opRQAoJ7Gn5zjPP1qMU8dRRYdziLhQNZuwOglf+ZplyPlj/AN7+lTXHOtXZHTzX/maZdrhY/wDf/pWR2LY6y1hilsLbfFG2Yk+8oPYUNYWZP/HrD/3wKfZf8g+2/wCuKfyFSmtLI5XJplF9JsZOtso/3SR/Kqz6BZNnb5i/Rv8AGtY00jNLlTGqkl1MJ/DJO4w3YPorp/XP9KrN4cvV6PC30Y/1Feg+HJ9HtZ531bawKhY0e3Min1PDDB/xro21LwaYyRb2ZbHANnKM/wDjxrGbUXblZtGpJ63PG4NCvVnUvCDGp3MQwPA56ZzVa7trx7h5Ht5BuPA2ngV27gBHmCbBMSETP3Vzn/634GoPeqVJPUHiZLQ4Mgg4I5o6V3UsUc67ZY1cf7QzVOXRrCUH9zsPqjEUOl2KWJXVHJZpc10L+G4SfkuJF/3gD/hVSXw9cKf3UqOPf5al02aKvB9TJDUuanm067gYh4HwP4lXI/MVXKlTyMVm423NVJPYfmhzmJx/smmUN/q2HtRYdynCuafKuAKdbrz+FSTrgL9a0sQti7EcQRj/AGB/Kn5qGNsRJz/CKcDWDWpqSdQRntSr8oxkn6nNMDU8GlYYpAYYYAj0NQyWcL/w7fpU2aKLtCM2TTnXPlsCPTOKmh1XVrEjFxKVH8LncP1q5SEZ7Zp819xcvY0bLxhGQEvYWRu7pyPyrftNWsL3/UXSM3908H8jXDyWkTj7u0+oqpLZGNSyMCBzjpUOnB+Qao9QyaVWrzix8R6jZbVWXzIl/gkGRj69a37PxlbyHbdW7RH+8h3D/H+dZyoSQrpnVBqcGqpb3dvdxh7eZJFP908/iO1TZxWLVgLAb3qRXqtupwaiwiyGp4aqwfmnh6VgLAYU7IqANUgakIkpRTFNSCkAuBS0etOxQAgpcUUUgCp7e8urQk21xLFk5OxyM/WocUtMD56kieJtsiMpwDgjseRTM4BFOkleQAMeFzgemajr2DAKUCkpwoAUV2Pw4GfEU3/Xs3/oS1xwrtPhqM+Ibj/r1b/0JamWwS2PYIF/d1zvjcf8Uxff7g/mK6WEfu/wrmvHRK+Fb3H90f8AoQrne6MqT1PEnB64NRnpWlaaiLOG6TyEkaeIx7yeUBHOKoRRPPIsUYy7cAetdRuyOinyKF2jBDY+bPr/AJxTKYgpyfeH1ptOT74+tAG3bj5pPrWhoA/4qGL6Sf8AoJqlbj5pP96tHw+P+KiX/df+Rqo7kVPhZ17DNSBw42yDns3+NIRzTCK1aucN7Ehtz25pv2dj6imYpMVNmO6Jfs596T7M2e9R7RRgUWY7of8AZzTljKcEZU9VPQ1FSYoswuiTyhEQ33o+57j61Lsi/vR/mKgjcxk8BlYYZT0IqQRxh1kKs0JIDAHBHtn+tS7opNMGSMDO5SO+0jNRvFsPYg8gg9a6VbTwe0QLX+oq2MkeUpwfyrCmFtFdyQQytLbZ+SVk2n64qVK5TViuqKfT86eI19qf5Ef/AD2j/wC+qBbx5/1kf/fQqrhY4+TjxAB6XafzNdPXMuufEarkY+2rznj7xrsGtk/57Rf99ihOxU1exUIpMVb+zp/z2i/77FI9vGIyRNGSOQAw5p8xnylTFFKRSYqiQoFO3EIU4wSD0+v+NNoGFKKKKBB3p46iminDqKYzi251W6/66v8A+hGm3f3I/wDf/pT8f8TO5/66P/M0l4MJH/v/ANDWR2o62y/5B9t/1xT+QqTFMsh/xLrX/rin8hUhFaHHLcb+FFLijFAjUg1LT0hSOfRLaYqoBfzZFJ9+GqhIEubs/Z4VhR2wkYYkL+J5qPFWYcQwyTYBYjYmexPU/gP51Fraoq5DdsGnZVbdGnyIfUDgGoKcRSYq1oiW9QxRS4ooEJS0UCgAzTJIYphiWNH/AN5QakpKNx3sUZdFspuREYz6ocfpWbdeH3WN2hmVgAThxiuiFNm/1En+6f5VLhFmsas11OBthnt/DT7gfKn1othz/wABqS6H7tfr/SszrT0Bf9Wv0FLXR2+gWt1pltKGeOR4UYkcgkgHpVW48OXMXMLrMuP90/l/9es3TY414PS5jg04OQaJIZIXKSRsjDswxTKixtcmDg04NVfNKGNS0BZoqJZegqVSDUlCFajmX9zJ/umrGKjmH7iT/dP8qAMnT0V51DAEYPB+lXZrCNxlPkNVdOH+kD6GtbFaSbTIiroyPJubVw6FlKnIZD0rTsvFF/auBNJ56dxJ1/Pr/OnkVDLaxS8suD6ipbT+JD5Ox01n4o0+6AEjmBz2ccfmOK2I5UdA6OGU9CpyDXmkmmuuTGwb2PFNtr690yYtC7Rk8EY4P4dKh0U/hYtVuenh/eniT3rjLTxgSFW6twT/ABOhx+h/xrdttUt7tN0Mgb26EfhWMqclug3NkS8df1qRZR61jm6x3pPthHSs+ULG6sq+oqZZk7sPzrl5NS2nHU+1RG8nm4B2j260OI+U7JHVjgMCR2BqcDNYHh+Hif1+XJ/Ot0bl75HvWT0YrIo3+pR2eVVd8vYE7R+Z/pTbPUGuCvyxyA43GJidh7g5Azj1FaOULZaMbh0OM1IuwDCAAe1CkhWG7aXbT6Xbmi4rHzveQeTINuNrDIHpVatXUlPlRnJwGPH1/wD1VnKwR1YDOOcNzmvYhLmjcmtDkm0N2kYyCPSipZ5vPZTyMDAUnIUeg9qjAqjMceeQAAewrtfhqP8Aiop/+vRv/Q0rjGTY23IJHociu1+Ga58QXB9LVv8A0NKmWwp/Cz2CIfIPpXMeOxnwte/Qf+hCupiH7v8ACuW8eHHhW89wB/48Kwe6MaW54pIgVVbepLDOB2+tRhmXocfSnkZ54HNXLqw+wWqPO0LvOm6NY5QWj5HJA9Rmuk6CpcTRyiIJEI9qAHnO4+tQ9sYoABzlguBnnvUyBp0IATESFuTgkZ/XrTERMhU4OOPQ5oj++v1FSAIBl13ZyMA4xU1vatJGJVwx3Y2Lywxjkj0xmhBc1rcff+taPhznX8+ivVC2+6+fWtHw2M642OyP/OqW5FT4Wdg1MNSGmkVqcAzFGKfiqct9DFqMNif9bKCQew4P+FK40m9iwRSYqTFG2mIZjijFP20EUAMxT4yUbI/HI60KKo6veS2FmbiMD5CCcng+xqWXFNuw/UtQgsJLfP3ZXIIPGz8fyp0U8V0vmRSK6eoNchrus2+qx2zRKyOgYOrdBnHQ1L4WvCl81ux+WReB7j/JqFLWxu6XuX6nXbaUCn7aAK0MDlH/AOQ8P+vlf5muoxXLt/yH1/6+l/ma6k9aEaVOgzFGKcaMUGY3FJinYoxQAwikxUmKQigBtHpS4B5FLigBMU9aKUDFAHF4zqdz/wBdH/maS8+5H/v/ANDT1/5CFwf9t/8A0KmXmcRf7/8AQ1kdvQ66z/5B9t/1yT+QqQjmmWgxp9t/1yT+QqQ1qcb3GnrSUp5oANAkKBk4AyTU91iNlgGP3QwxHdj1/wAPwotfkdpj/wAshuH16D9SKgOaXUb2GmjFLijbTJEoxTiKMUANxRinAfzpdtAxoFNYqhXcwG44GT1NNN3BHceTI+184G7gHjPWsfVb5WleDeuwMCpHVTgZqJTSRrCk5M0b28FttKEPtyZEGMgY606O7iubNnDKpKnKlhmua81pNrONwGRu5z+dIsmwmRGVcrwuOvrWXtnc3+rqxTthz+FPuVyi/U0lqvT/AHf61LcjCqe2T/KqNDs9OkRdIsyzqP3CdT/sirakPGHUjaRmuat5QLaFD2jA/Sri3hWExADB79wKXtTF0OpYv7mPhGCSL0YMoIrGawt53ZR+6PY54/KrM6qT95XGB839KjVwGBPJ9azcrs2hHlWhQn0a7iXcieanUMnP6daoMjKcMCD6EYrrG1HMZCpg8fhUcj299KIp4lIA69CD7Gm1HoNTmviRy1KGI6Gti90WNMvbTgrnAVv8aypYJIX2uu0/zqGrGsZJ7CrcEdelPeVHgkAPJU8H6VX2ijZwaXKXch01czA+qmtcrWZpg/er9DWrSnuENhmKQgU8kCo2kVRyQKgoMVHIqMuHCke9RSXYHCjntnvURZ25JJqkmIhns4W/1Z2n9Kpss1udwJGOjA1o4qG6GIGq03sQ0SWWtXomiidxIjMF+ccjn1reWV3+834CuRtP+PyD/rov8666JazqxSY4PQmiT2q5ElQxLVyNawsU2dD4cjBW44/u/wBa2jF7VmeGUyLn/gP9a3zHWU46mDlqZ5iphjrQaKmGKs3EamUsOO9KJCOoP4VZMWaY0WKmw+ZM8Gv1zanpgMKy5ImjfY6kNwcfUZravJ3sRDLER5ivkZAPT2NZMkzzMrSNkgY6ds5r16PwmuK/iBcW4gKASxyErk7Odvtmoe9dP9nsx4TllRP9ICpuYrjOX4xn6Gsi7t4YbCweMHzZY2eQ5/22A/QVs1Y5FK5FcQxJDbvEwYsn7z5s4bJ4x24xXZfDFc61dH/p3I/8eWuGr0D4at5ut3jhFT/RwMKMDqP8KiWwqnws9WT7n4Vy3j3aPDFzvzt3LnHXG4V1SfdrkviB/wAird/Vf/QhWL3RhS3PFXxuOOnamHnmpCON3HXHWmVujrsNxxSqSpyKcQoXBB359eMYppA4wfrxTJZotd/2nqKz6hIsasu0ukQ4444FdDD4Tktbh5jcRS2qRlg8bfe+U9vTNYFhol9fxl7eHcoI5JA69/pXXaNok2mWF5LcMdzRuoUHjAzzj14q4q5jOVtmYUAwG+uK0fDA/wCJ3IfRH/mKzoM4f61qeFhnW5v+ub/+hChbjn8LOuI5pMU8jmkx3rQ4TO1W+FjaFtwDN8qk+tcPc3091f8A2iR8OTjI4wMVt+ItQjun+zIcNGcgk9DyCCK5jdiQeuawlK7PQo07R1PTYJkmVQrqW2gsoPTIqXFcjoGqR2STecSSVL8+2AB+OTWzoF5LeWsjSkHEhC/z/rWqlc5Z0nG7NWsnX3u4bAyWrABfvYB3fhitikYAqQwBBGCPWmyIuzucfo3id1nWG/fdEf8AloRyv19RV7xJMsMDNBqTbZBgxYDgAj8xXL6wunx3LLYrOhVirpIBgH2Oaqw3kkUckWFaKT76kdfx7Vk30O1U03zIgPHTpViyupLS5SaIgOvIzVZsbjjOO2aAccUkzW11Y9G1fUxp+nK24GZgBgH25/nU2lXYnsLVpZB5siZAJ5bnGcV55c3kl04aRs4AAHYYAH9KW2u5Le4imQgtGcru5AqufUwdBctjo2H/ABPkPrdD+ZrqiK5GzZptQsXc5Z5lY/XrXYd6tGFXdDMUlPIopmZGRRinkUgoATFGKd+FH4UAMPWlxTutFFwGjrTsc0YFLimgOKT/AI/Z/wDeb+dNvekX+/8A0pYebuY+pb+dF6OIv9/+lZnb0OutB/oFt/1yT+QqUim2o/0OAf8ATNf5VIQau5xvcfa3EtnOs8JUSLkAsgYc8dCCKlvdQnvlQTLANp48uBIz+O0DNV8cdaTbzU6XuCJXIjtkjxhn+Zj7dh/M/jVfFP20baaYMbigCn4o207isNxRinbaMUrjsNxWZq7wmII1wYpF5GMjP4itbHrWfqV3awlFnjLZBIZRnHalJqxdNPmOXeWTILN5h7YbJxS+Yybnl5GenekdjI5eEBFJwq4xjHemIXkcfNwx6LwQK5GegkSFiVHl4OB1PGPwpWw0WT5ZI6ELjimsy+X06nHy0ZbYVzuKgjp0FJDZDa/wf7v9amn+4v1P8qjtR8qH/ZP86mnHCfU/yrp6GQ+G6IAVgGCpwR1wKuQzxyABHBOM47iqLiKNVTGJGUZ5yDQxGG3FMlcHP8Nc7eptY0iT2bPtTS2OSBWc128GU8xWKjOCP60sepqVBkUrz25oQWLxc888YpA5HfnFQrPFNzG4z6U/nnnNMLEwlwOv1qQSJJGUfDc9DVTdjPP6UqvkdOKBWJBp9vKX+dYmLYUDmq0mnSxMQAJB6pzVjdgD0qaOUqvynnt7UXDVHP2ZEZB6cYq0bgAE5PX0pmnwrPchH3bST93rVufRZsZjkRv9npVOF9RKajoyhJckn5cVXd2IJJzT5reWBtsiFTUZHyN9KOWxV7j4eUzgZzUwXFMtVzCfrVkJWctzREW2obsf6M5q5sqter/okn0/rQnqDWhnWYze2/8A10X+ddjEtcnZL/plt/10X+ddzBZStyQFHqxxRWZnDYWNDgVZjXmpIreGP/WSZPooq5Fc20GSijPqeTXM5DaNnw0BGlwXIUNtxu4z1reMkX/PRP8AvoVxh1QetJ/ag9ah3Zm6d3c7LfH/AM9E/wC+hRmP++v51xn9qL60f2mvrSsxezOyIQ9x+dNKL6j864t9aRJFTZIxY9VXgfU1J/aq+v6UuVlch5Jqk4mutqHKR8D3PeqI4p2KbXqRXKrBOTlJyZMbmVofKMr+WP4cnFR7icAkkDpTe1KKZJIBXoPwuTOo3xHXylGPxrgApGM9xmvaP2f7JJtV1i6cZMEUSqD6sW5/8dqJuyJmrxZ2Rs7mOHzHglVMfeKECuL8fc+Fbv6r/wChCvd7nBiIPIxXiPxOiWDRL6NBhdyED0+YVgndmVONpHh7DFDRBYVfIyRnAOfz9KV+tMNdSOhkkkUflGWORcFsCMnLAep4xUcSgyLubaM8nGcVNJHHFBFJHOryODvUA/L7c+1N83MKxBEGDuL45P1pkbnpeiXVrLp8TRzRnIAxwpHsR61oah/yDbn/AK5N/KuP8H2CNNJPMjb48bQy8YPeui127MGmuAwDP8mD3zWqehxOHv2RyEH8f1/pWn4VH/E6l943/wDQhWZCRl+v3v6VCLp7SXzIyQ24j9ai51OPMmj0l9qKWYgKBkk9qp6jdRQ6XJNvG10+Qg9cjjFRSTQ6toBJl27xt3dAGH/1645tTmFodOmI2xudpHbrxn0pylZGFOjd+hmyszylt2SSST60xSBKCxxj2pCcMQfSkJyQSOeuaxO+xIzk4Y9TwK6Pwom68LGYfKpxGBn6nPauTMnzcH860rDVZtPiYW5IeQEEn+lVHRmdSN42R6WOtQ3ySG0cxT+Q6jcHbG3j14PFcff+JpXtYUgcpKo/eB1wc+tZ6+JdRTyNsv8Aqk2EHkOM9xWjmcsaEr3KWoo7TNMTA5PLPA2QeeuO1UM+tTyvHICwUq2eg6VBxWZ2RjZC0UCjvQULSg0hpB1oE0dVpv8Ax/ab670/lXaMmCSPyrjNPwL/AE3P/PSP+VdszpIDhgcHGRz+Fao4ay1IjSYp4p4XNFzEgNAGaseWfw+tKI29P1ouFivikq15bf3ahkXDZ9aLjI6XtRjNGKYhBwafjNM5z0p4oQHDwDE7++f50Xn/ACx/3/6GlhH75j9f50XIz5Xs39Kk7Tsrcf6LD/uL/KpMVJZW0k8UUca5OwfyrXi8OXhVXcKinoTnBrGviqVFXqSsZUsPUqv3FcxcUYrp4vDUZxvlYn2q5F4XtiM7JH/GvNeeYb7N38jtWV1vtWXzOMxk0u2u6Xw3bIM/ZXP1U1J/Y9sn/Lqv/fNZvO49Kci1ld95o4LbRt9q7v8As21H/Luv/fNKNMtz0tV/74qf7c/6dP8Ar5Ff2Uv+fi/r5nBbaiuY5zARbsqv6tXoZ02FRzaLj/rnR/ZluRk2KEf9c6Tz1dab/r5FLKrP41/XzPE5ri8MjKZ8sDyQeDVYmVnGJACT36GvbW0fSSTusEz9SP61Xl8M6DP9/Tl/Bm/xrP8AtinfWLR0LANLRniwcybVLAdQOMZpSHOSGXA444IxXr7+CfDjf8uci/SU1Wl8A6C5ypuoz/suD/MVazag+4fUZnkcshMmxhlccYHeiNPLQqWyx644/CvUW+GuksSyahco56EoCBVab4ZyFT9m1SCQnHEkZX+RNbwzHDy+0ZSwlRdDzm3HyRj/AGf61LLyF+p/lXYf8Kz1uLaIpLSUKMfLLjPPuKq3XgPxFHgiwDYz92VP8a7o4ujJaSRg8PNdDk5CpTAbBAGT1qt9oclk3Ag8ZxWpe6Nq9lE6z2FxGc8tsJGPqKyljKybmUhh0B4oUovVMpwkt0WHZVQgYywyT1FVHRW+593+IelWiCilsncVxkDiq2wo6nI9TxTTRNhEt5POzn5fXOOKuG6aFMo/AOPn5prxuwLueNvY0ySFVT5WII5ye34UXuFi3HfRuo3kpnjmpw2eUYY9RWSQphIJGWbAzxinRxhFKLvZieccYoEaoLf3gf1pSzEEEqB64qi8nloIwxJP3sn+tRq0GCGL7+2KAJ9LkEVwrtnHPQZroTOhXIP4YrlbYlVGCR9K1EuY1iGS39a3TMJRvqaDyxsMFciqM1lbTpIyoUIGTg9eRT9wK7h0NOjYbJf93+orRx0M1o9CrDp5SM7XBGeh4pTA6dUNXInAQ5OOaUzr7muacdTqjPQo+Wx/hb8qqX6FbSXIxgf1rV85mIAGB+dUdRGba4/z3qLWaL5roxrZzHJDIOqsCPzrqpruWK1Fwb22cEj92kylxkenWuTUfulPpVY3DVc4czIjJROp/tiQ9G/Oj+1XP8Vcr9pNL9pNT7JD50dR/arf3qT+1G/vVy/2k0faTR7JBzo6j+1H/vUv9pt/erlvtJ9aX7UfWj2SFzo6f+0z/eNH9qN/eNcv9qPrS/aGo9kh86IB2x1pCpBIIII7GrAtZAc5XP1pzW0juWO3JOeK3MisFz065pQOas/ZX9B+dAtZM9BSAjQV7v8AAKR5rnxFLIQWZbckgAd5OwrxJLVx1H617f8AAJCj6/nutv8AzkrKpsOWx7Fc/wCrP0rxX4pn/iWXg/3P/QhXtVyfkP0rxT4n4eyu1/3P/QhWC3RjHc8RkHJqIirr2r5OAMfWojaSe3511o3KtOQ7WB61P9kk/wBn86kFm+3tn61RJt+HtSvmnEKSbkJGd7dB7UuuX73OqNEsrNCjjCnscYNV9Elk06Z3Yrg4O3aDk/XtVfyZXuN7YJLZJz1p3IUVzXLUX8f+9/SqN30/4Ef51fUBck8bm4/KqN5xt+p/nQ2Ulqa9nqBXw5cWqsRJG+4496wJJC7ZZuR7dauW06qs0ZPEqY6dD2qhIhD471F7lxjZsPMOzJPOfSgrhd2eewpqAE4Y80+U5xjPTpmkWVuuOTU8jtM4baFG0AKowAAKYiqWO7OMdvWrMMBYktnYgy59P89KolkE00lxJvldmYADJOSahNXr2XzHBW2WKMDCgD+vc1ToBCUmeaO9KAKADNLijFLSGGKTFSIMuo9TU0y8jFFwaNgxvILOOM7XZkVTnGD0612CyWel20cDyqnHAIOWPc1x12r/AGa3KjnIx9cVatdNVLoPqlzsAG/aDuLY7Z7VdzmnDmOyjIkjV1PysAQfapUz0P4Vjw+JLCWWdVykMSKUJXr1z/SoT4rtfsssgRvND7UQ/wAXoSaLnP7KXY6LB9KMf7IqnZ3UlxaxyFslhnp/9YVP5j+v6U7Gb0JlI/u0y4xheKarNnkn16USuWC5NCAhopcUYNMQmPelA5oqvcX0NuvXc3YChsaTexydpC0kpCgk4qzLFGMAqZJFOQA2F/PvWlb6dc3p2wxCG3/vngf4k12Wh+DYoVSe6LZPIyBvb8P4R+tc1bEU6MeabsjuhCU5csVdnK6Z4e1HX5le4j8uFByxU4A9BuyR+FegWljo/hPSJXM/kWmAzyTNjew7Knc/QE1D4j8Wad4WgMO0S3uP3doh4XPdj2+nU15XrOqah4ovzeagOnEdvFnZGvoB/Pua4lGtjVyzXLB9Or9eyOn3KDutZfgjoNW+JOqX7yW+gQiztweJyP3pHuTwv0HPvXO3Oo+JL4lrjXrls/wtdSEflUASdVCrbuAOwQ0uy5P/ACwk/wC+DXoUsNClHlgrIxlV5neTuVWsrx23PeKWPfk05bK5Uf8AH6R9Mj+tWPLuv+eMn/fJpDHcj/ljL/3ya05GLnRH5F4P+YhIPxP+NN8i9z/yEGP1ZqeVuM/6iX/vg0hEo6xsPqpo5GPnQJ/akZymouv+7Iwqymo6/GP3et3a/S6kFUy7DrSeaaPZD5zUTxD4qTga7d497lj/ADp//CU+LgQf7Vkkx/fKN/6EKyDMfSmmc0nRi90NTZ0MXj/xXAwErJOB2NvGR+aqKtn4p60G/eaVp+B2Ecq/+z1yfnmk88+prKWBoy3ivuLVecdmd1Y/FWMsRqOmke8Eo4/Bv8a14viP4fmxuN1Dn+/GDj/vkmvLTNkYPIPY0wrA/wB6JPwGP5VyzybCy+zY1jjqq6nttl4l0W/GbfVLZj/dd9h/JsVqrMwAKSHB6YPFeH+H9Ej1PVFgSUxKiSTMxOQAiliPxxj8a9xcgW9sgABWFQQBjHf+teDmuX08JBTpyZ34TFyrT5JIX7TKP4s/UVWubaxvR/pen2kxPUtEM/nT6SvFhiqsdpHounF7oybjwl4dueVsmtnxjdA5GPwPH6VhX/w1tZIiNO1FgxPK3IGPzArsjTa66ea14dbmcsLTl0PMNR8C+IrYF4rdblFGAbZwxP8AwHr+lctcQywO8VwHWUcEOMEfhXvKyMhBViCPSuU16ytv7Rme7QNHMd0bmMMAO49sGvfy7H/Wrxa1R5mMoKglLozypI2VwVcsPpUzzBIwuTgEDArrJPDVrcljZzlfYHI/I4NYl14b1GJm2wiUKM4Q4P5HBr1L9zhUk9jGkHnOVUtnOST3qQtGqHO7IOMU8WzBz5gcY6hh0/CmyhHLYJViemOtUgsNjYJGpOce1SJLhhk4BOcdQajU4iHAPtQ0bNjavOegP9K1urk20NCO4Mqk7h+FBZvU1FbqRCMjB6HNS4roi7o55KzJYf8AVn61KBTYB+6P1qZRxWMtzWOwgXLD61W1FP8ARrj6GroHzD61DqKj7LcfQ1jLdGkdjmgP3C1mmtYD9wPrWSa1IY2iiimSIelJQelJQIWkNGaKYgqUGoqkFJlRPdR4d0n/AKB1t/36FOHhvR886dbf9+xWmKcK4uZnPzMzP+Eb0f8A6B1v/wB8Cl/4RrR/+gdb/wDfArVFKKnnY7syv+Eb0cD/AJB8H/fArtPh3ptnYSakbW3SIuI920Yzjdj+dYOOK6nwRw999E/9mqeZ3LTOmufuH6V5br9pb3+sTW9zGJYioJVuhr1K6OEP0rzPUv8AkYp/9wU2yVuYbeF9G/6B8P5VXl8NaMP+YfD+VdCe9VpgMUcz7lJsxo/DWjE/8g+H8qtx+F9F/wCgdD+VWoqvR0cz7jbM9PCWhnrp0P61YXwhoI/5hsX61px1YFHO+5F2eUePNMs9L1S0SygWFGjBIXucnmuJveCn416B8Sz/AMTuxHbyf/ZjXn98OUrrpO8dTojsischcg/jSsVLBuc96iJKkihWxnPcVRqLgbyR0prMD1qRSuAew6g0m1Tg4zzQFiewihecGdmWNQWO0ZzjtSvMzL5ca7VOCQO5qeOzLWsUkb4MpIYlsAAdjV62tY7fTHumA81mKoDzkd/wpmdzJuI5Ys7jt3KDtJ6g+lUzVmbG87TkHoPT2qEigtLQixzTsU/bRii4WG44pQKdtzTttK47CIMSJ9asyDLCq6KfOT61cZfnFS2O2h23i+wtrTQLdoYwpFztyPTyxXCyzyy7d7khRtA9BXoPjY58NW59bz/2mK87Ip3M6auhuferMZS3YGVPMYEHy84/P/Cq2KcASc00y3E7TR9Ya6YI45J4AwqIPqetdCoBGR0riNAsmkuFle2MqZxuY4Vf8a7VGLIGVht7YFaJnnV4JS0EuB+5NPI4HsKikLlDk8Z9KQyleWbjPpRcy5dCXFRSzJCDuIyB09PrVd7p5spbjAH3mJxj8e386sabos+oEzHAhU4aaQYVfoO5/X6VMp22LVNWvJ2RRM1xeyLDAp+bgYHJ+gra0/w7HHIDd757hjxEDxn3Pf6Vs6ZpYy0Gmxc4/eXL9cfXt9K6FYrDQNOe6upljgQEyXMnVj/dUevsK48RiFTfLvJ7I2owlVfuaR7kGnaKluElmCvKq5CYASIDv6f0rkvE/wARUtpnsPDzJPOB+8vTyqn0T/4r8qwvFXja88UKbGyR7LSc5z/y0m/3sHp7dPrWn4J8GadfWy3d/LIqknyY0x26sx+tZ0MJKc/a13eX4L0OmdWNKHLDRfizlLPSJLmU3N3JIxbks5yzE9evStqO3hgKJEgVeSfU8VvarpEVnHJNDNvReV+YMCM+3SsCScRsDgnrXrKKijznUlUZMRSEA02KdJhkH8KkNO5nZrcbtBqvPMbeVWAyCCOaW5u1gyvIYjINZst4ZYwrdR0PcVnKpbQ6KVJvV7GtDI7rl+/IA61LsHesYah5SlQDu45q5FqkbEB1Kj1ojNEzpSvoi2yKRggEe9QtZ2zD5oIj9UFOju4pIw+duSQM0l3II42TcVkI49605kZcsr2KM9rpyk7oEznHyjHNY8tvb7uEI/HrVp53djuAJ6E460ht5FhW4dD5e8puxwTjOP1rNzZ1wjyrUz2tY8A7mBP6Uz7FuUFZO+PmFX5F3OMHANNK4wExnNNTZqZz2kqReYcFR1welQc1oXTFbULnl25+gqhitE9AR1ngW2M11dOQ+CiwKQONzuox/wB8hq9ZlYPKzAYBOQPSuI+HlkIbK3mlVh5sk1yAehEaeWp/77c/lXZmvkOJKl5Qh6s9bKo/FP5BTc80Zppr5c9pCk5ppoJpDTKQlRyxxzIUkQOp6hhkU880lXCUoPmi7MbipKzMW58NWkxLQO8D+3I/Ks2TT9ZtB8yrdxDsPm/Tr+VdXS17GHzvEU9KnvLz/wAzzq2VUJ6x91+RxjXFlc/ur62Knphl3Y/PkfnVKfwvpd8+60laNvRDu/8AHTz+tdzPawXClZokcf7S5rIuPDVux3W0jwn0PzCvYoZvhKnxXg/vR59XLMTT+B8y+44Wbwrf22WiCzqpzhD82P8AdPNYk6vatsIZXU8hlwa9N+x6xZMApW4jHY4b9DUU9xZ3B8vULBkJ4IaPePybkfga9KM1UV6clJeRxvmp6VItHmqXBYgEnGeQT+tWo97LuYYyePpXVTeEbG6lL6dchCckIOf/AB08/wA6ypPD2o2au0kRkAONyZOB7jqPyrSFWzs9BSUZLQrwD9yfrUyjimwphGHfPSpQtaNkpAq/MPrUWor/AKJcH2NWVHI+tQaiP9EuB/smspbouJzK824/Gsg81sIv+jD6msgjFbEtDKKdikIpkjOtGKU0lBNhKKdRigBtSgYqLvU1JjifRA6U8UxaetcRyodThyKSnAcVIxK6rwT9+++if+zVyxHFdT4J+/f/AET/ANmpItHSXf3D9K801D/kYJ/9z/CvS7v/AFZ+leaXxzr9z7J/hQ2CWo0iqs1WzVSbrUjSGR9aux9KpxirsYNHMkU02Wo6nFQRj0qyqn+6fyrJ1qa3kvvGqNR7RZ5j8ShnW7H/AK4/+zGuCvRllH+zXq3jTwzqmtatazWVr5kSRbWYuq4OT2Jz6Vgt8MtcudpzaxYGDvkP9Aa3hjcPGNnNfedVOhU00POzGc0nlGvSE+E+sHh7qzH0Zj/Spx8JL/8Ai1G2H0VjUPM8Mvto6Pq8ux5iI29KcIzjGK9Vt/hI+T9o1RQP+mcWf5mtKH4U6OgHn3d/Ie5Qoo/9BNQ81w6+0P6vLsePRF4iCvbseRV2C88q1aJwXznaD0GetevL8MvDidRfP/vTqP5LUyfDzw0ow2nzP7m6b+gqP7Yw/f8AAHhZPoeFvEu75M496aIj6V7wPh/4YHTSm/G6f/GpY/BHh6E/JpEB/wB+SRv/AGaoec0FtctYWR4KLcntTvspNfQaeG9Hj6aJph/3oSf5mpF0PTFPy6Npi/S2H+NZvO6PRMpYVnzwLfFO+zs3RSfwr6LGnWaLhdM08f8AbuKcLWJTlbKxB9oBWbzyn2KWFPnJLdhKnynr6Vb+zuz9K+hRGynIgtR9Iv8A69O3Tdo7cf8AAP8A69Q89h/L/X3D+qeZ5h4ttZp/DdskcTuwvCSFUkj5BXFDQ9Tf7mnXbfSFj/SvoMvc/wB2H/vk/wCNHm3fYQ/981Dz3tEVPAqKtc+fv+Ed1c9NKvf/AAHf/CoLjSb+xjD3VlcwITtDSxMoJ9MkV9DGS7P8UY/CsrWtIl1q3itrpw9uj7ygbGT0Bzj3P51dPPE5WktC3gl0Z4/b2l/BFDLO0/kPgrGrn5l/pW6byfVrf7FZWxg3AhnZ87gB0UhRz2xyea7RfCFmgjzEXEa7VDyEgDJOMemSa0YtOMKqqRIqr0C4AFa1M9gv4abJhlies5I4WG11e2s5Hv4J2fcWZ5EPAPTPHJ9hSpY3dwymSKbJ5EYjIJHuewr0FCqkqMkqcHapOD6cU8swAxA7jPsMfmayWfS6w/EyllUekjkLPS44wGuxu2/dgQ8D6kfyrptP0i51FElm/cWK8LgYBHoo71ejMjZ87bt/hQDge/vVoXMqqqh2AUYUA9BWk+IKcU1GGpyf2PNyvKV0Q6zrOk+FNIW4uwY4zn7Nax/fnYdefTpk+9eP6trV/wCL9US61JngtVOIrePOyJfXHf8Ar+levXUUN8ytdxR3BUYUzKHx9M9KqyaRpsv37G2/CJR/SufD5xh6bvKLbe70udUsDNq0Wl5Hks9ukcreRl4gflYjGR9K6rw1bRNolzeTsEW3mVXLjIZGHKr/ALXGa7ZvC2nPas7abAygcLGg3n6beayfE2jy/wDCP2VnpemSRxI7M+UK7XPRuDzxxk56178cRHEU/gaXmcHI6U/iTKevQaXbafvt3XzpVAjjVh93qSQO3FcfKNzDIyOc4ppFzZyNbyQRq8ZKuoHIPfpWtpj6PczrHepdQuejiVdpP4rxUU8RSw9Plcm/xNamDr1ZcyjYqRxrAu9jgY5z2quNUt9xVgw98V283hPTJkK+bdhT6SLz/wCO1TPgLSTnE15n/fX/AOJrnee4Xo/wIjlVV6zX4nD3t2J8qMFB0YVQZ8ZPPvXobeANP/gurgf7wB/wqJvAFsel/IP+2Q/xqP7YwsteY6I4GrFWSOBJ39zx6+lITtBBzjPFd4fAEXQX7fjF/wDXpv8AwgQU5W+z9Y//AK9XHNsL/ODwVbscUrsI+pApXnkdBuJO08Z7V2X/AAgkvA+2xfihpp8CXCvuS9gB9drf4Vqszwv/AD8Rk8HW/lOPUo6gYw2cnB61t6jZSQ+G9MlEbBJHkZ2PQscbePoP1rQHw+ui4P222A7nL5/9BrQ1Dw1q+oKiy3dsVTou5sZ6Z+76YraOZYS38RfeYzwdfmVos4Eggc9PWmkEkAZOelda/gXUz0ltT/wNv8KI/AWobstcWq46ZLH+lNZjhP8An4vvK+rVv5WcHcn97t/ujFRou5uld9H8NZS5M+pxjPOY4y388VsWPgHR7Vlec3Fyw6hnCqfwAz+tRUznBwXx39DWGCrv7Jd8LweXpoyjL9mgjtUz05HnSfjucD8K2Cailt1WECBShjZpFAY8seTnPrU6hXUMpyrDII7ivkM0xcMVW9pDbY9nBUXQpcstyM0lTeWKXy1rzbnXzIrGkq15a0mwUcw+dFbmkwc1a2rSFVp3HzlXBpMVa2LSFV9Kdx85WpM1ZKKaaY0JppjU0V+tDKrjDKGHoRmp/KX1pPKHrVxm4u6Y24vczpNNsnOTbqD7EinLbKihVkk2joGbdj8+n4VdMPuKb5J7V2Qx+JjopswlhcPLeKMy40uK6j/fxQSydm2lCR7sMk1nN4SSQjypGjJHch1B+vB/Suk8l/7ppu2RTwCK7qebYhKzs/kYSy3DyemnzOLufDOp2gDeR5q9cxc4/DrWDqSMlvcqwIIB616vHcXEXTke4pl3b2GqQtFf6fGyuMMyjDfn1r0KWaqVudHNUy1r4GeDon+jD6msgrXtV78NtNuedNv3t1x/q5xuGfr1/nXEar8OPEOmb2+xm6iUZ8y2O8Y9cdf0r1aeKpVPhZwTw1SG6OJK00ir0trJGxDoykdQRjFQNHXQpI53Blc0mKlK00rVXJcSOinbaMUXJaGYqUUypMUMcUfQ4GKlWmCniuJnEhwHNOpAKcKhspIQ9K6nwT1vvon/ALNXMbSxwoyTW7ov2qxjm8sqhmxknkgDPT865q2Jp0dZnRSoyqbHV30iRxEu6qMdziuAl0+efV7idAvlMAFOeT0ro/LjZt80pdvUnJqZZrSLopNedPMJyfu2S8zsjhYR3uzFt9BeZgCWJJ7cVYk8MpG+HjJ+rGty31e3iIAhA96dd63G3+rUfUiqdSDpc0q2vkNQkp2jT0MWPRYox8sCD6jNTLpoH8I/Knyaq7dwPoKgbUWP8VebKpTe8mzsjCr2SJhZIvak+zxjsKrG+J70n2vPWseamWoVOpZMaDtTSgqNJC5471citZJDwCa0hDn+FCk+T4mV9lGz2q7JbrCMuwHtVKWdFJC80qkFD4hRlzbCEY7U0iomufaozOT2rmc4myhIlOKbkVCZs00yVHMjRQZPkUuRVXzaUTD1ppofIy9DC08gSNcsegpt/H/Z+43JVAi72JPAHrUEV68DB43KsO4qpfy/2jFNFcsXWZCj89QRg1rejyWd+a/yt/mQqdTn8jMfxv4bQkHU049I3P8ASnx+L9ClXcl5lfXy2/wrwi+ge0vp7dxh4pGQ/UHFdn4NtbfXL4RS7hHFFvMYOM4IGM+nNexiMroU6ftE3b+vIVKSlJxkej/8JTo//P3/AOQ2/wAKnh1yxuBmF5ZB6rC5/pSQafZ22PJtYUx0IQZ/Or8Yya8OTo/ZT+83aiiOO5SX7qyfjGw/mKlzUzsI4duOW/lVepcEiE7i0hoppNLlKQHpVSaRpZfs0LEMRmR1P3F/xPb86luJvIiL7S7cBVXGWJ4A5pLaIxQgMQZGO5yO7H/OB7CtYxsuZj8iSONIowkahVHQCloorCTuxhiigmkzUNDFzRmkzSZpWCwpAPUD8qY0UToUaNSp6jHFOzSZrSNWpHRSf3i5E+hRfRtOfO60jJJySc5P41UufC+m3DqwR4iv/PNuv55rZzSVSxFVO6kygiRYokjQfKoCjJ7U7PtTaKx1eoWHZpOKTNNzQOw/ijimZozQFh/FHHpTM0ZoCw/ijNMzRmgLD80vWo84qvf6lBp9uHdJmcngRxFxj8Oa2oUZVp8kXr5kVJci5mW8UYrlR490jJBuFUjs0cg/9lq9p3iax1Sbyra8ty2MtuWQAD3JXA/Gu95RiV2+8w+t013+42jxUds+5ZEIP7t9oJ7jAb+uPwpsc6zxCRPuknH4HH9KdDxFIcctMfy2r/8AXrz3Bx5oy3R0pqUU11Js00tTSaQmsbFJDi1ITTM0madirDiaQmkzSUJDsLuo3UYpNppgGaM0u00m007j0G5o3UFTTcUxodmkzSUmaY7Dg1O3kdGP51HmlzVJtA0SCZx3B+ozS+bnqiH/AIDUWaM1oqs11Fyol8xe8Y/AmpEnVOQ0i/Q5qtmjNWsTNCcEyW9tNM1aExahbQXKkYDSx/MPow5H4GuP1P4YaPdIzaddy2svZZCHQ/jww/WurozXXTzStA554SnI8c1b4e69phJ+x/aogM+ZbfvB+XUfiK5WSBo3KMpDDggjBFfRwYqcgkH2qG5tbW9B+120NxkYPmxhv516NLPrfxI/ccs8tT+FnzmYj6U0p7V7heeBfD14zN9jaBiOsEhXH4HI/SuevfhbEWLWWpsFPRJ4s4/4ED/SvQp5xhp7u3qcs8vqrZXPLdvNPxXX3/w61y0+aKKK7Qd4H5/I4Nc5d2E9lM0M8MkUi9VdSDXoU8RTq/BJM5JUZw+JWPomDS7uYxBYWBlGUDDBYYzxmtODwvevzIFj9iea4G8129vvGGn6ikc0k6yKDb79xUjBIUD159a7zwt4ing1YaFf2riaQySmVjtIyxPKknAxjofwrhdRt2OP2RpweE4/+WszH6Vd/sDTrSNpZR8qjJLnirun6lb3lm8yXCSLG7K7hSgGD71yeta62oTeXGcWyH5f9r3NcmJxcaMLvfodWFwcq8+VbLcmu76Fn8u1RY4AewwW9zVY3hxgGsvzSaXzK+Yq1KlWXPJn0lPCQpx5Yo0DdsepppuCe9UDMBSfaAPWs7M0VHyNDzz60hnPrVD7SPekNyvvRaQ/Y+ReMx9aTzjVL7UnvSfao/U0csh+xfYueec1LC7yuABVKNhKfl5rVgCWsRlkIAA6mjW9jOpaKtbU19Ojjhw8xH0q1ea3DCpHmRwJ79a878ReOLTSowryNGWHygLmRvcDsPc15pqvxAv7vzUtUWBWbIlOWkx9en6V7uCoYmdPlgrLueZVo01LmrPXse36jr1nFGJPOLjuzfIo/E1zNz440yEsGvrNMdhJvP6V4bdahdXkhe4nklY93Ymq5ya7lksJO9SX3f8ABuJYuEFaMfvPZ5viNpI6aiG9kgb+oqD/AIWRpROPts3/AH4rx7BpcGtVkmGXf+vkH9oy6RX3HsifEbSCfmv2H+9A39BV2DxzpE/3dStjns2U/mK8NxSc1Msiw72b/D/IFmUusUfRVrq1veLuhkjkX1icMP0qx5yt91hXzhDczWzh4ZXjcdGRiDXUaV4/1WxUR3Gy7jz1lzvA9AR/XNcVfIpx1pSv6nRTx9KTtJWPZ/NI60u7Ncxoniuw1sGOBys4GTDJ97Ht61trPtPXivDrYedOXLNWZ6EVGa5oO5xPjvwzNNdDU7C3aQMMTLGMnP8AexWJ4PkudM8T2geOSMSuI2VlIyDxz+h/CvXEZHHD4P1qUIM58zJ/CvQpZnKND2NSN1axx1MOufmTsycDAyWX8CKtRRknpkDk49KpgsP+Wn6CpBNKAR5xweDzivLio31HKMnsPkcu5NNzTOvej8avcaVhc0hNIT70ySQIjMeQoJwKaKSIOJ7373ywYOAf4iD1+g/nVvNVbOPyYPnIMjku5HqTn9On4VYzVVN7LoJIdmkJpuaM1jYdhaDTSaN1Kw7BmjNJmjNTYdgzRmjNJmiwC0ZpM0maLDFJopM0ZpWAM0GjNJRYYtGaTNGaVgFzSZozSZosFhc0ZpM0ZosFhc0lLmkNMY1o43Pzxo3+8Aah8uyLlIooiSPmKR8H8cYNT00r827JzjHJNdFOpBU5Kd79O3zM5RlzJxtbqNCrGgRFCqOgAwKkRybWEE8/MfwLsR+mKikOyNnx90E1JHH5UMUWd3loqZPfAAz+lZp+42+pbWqFzmkNLSGsikNooqe2t2ncBRVJXCTUVdkaoWOAKlW2buK3f7L+yW/mygBcZJPasW81a2gVmU7gvVjwo/E1rOhOLSktTmhXdV/uxwgAoMY9K4/UfiJpdszILpXYdoELfr0rn7n4oQjiG1uZf9+UJ/IGuinleJqbRG5wj8U0enFRTSFHcV5DN8S7pmylhGB6PKzfyxUX/Cyb8f8ALhafm/8A8VXSsjxPb8SfrNBfa/A9gIU9CKaUryH/AIWXfZ50+1x7F/8AGrUXxPIAEmmN7lLj+hFN5JilsvxGsXQ/m/A9QKVGwxXE2nxK0uQgSfaYf99AwH5c10djr9hqQzbXMM2BkiNuR9Qea5qmBr0tZxZ006kJ/DJM0c0oNRiRXGVNOU5rmcbGzQ6ilpagkbRS4FIetABmkzRSGqGG40u6m0UWHYdmjNNpKLBYfmopooriMxzxpLGeqyKGB/A06msaqLcXdByp6M5Pw5dS2cc+qQxW8V3GWaFjIFEsQyWGzsSOc8dOKs2dvfeItXlvBHviY7/KhkG446KMsPxOe9cRp/2MaHczXEdy90JVCsqfu4o8ZJJ7kttXHpmvUrPVrTW/Bq2NtDdNd29pvR7e1kI81cbRkDAP6V9tKifHxaRu317DHax6fpcItrNF+ZEGNx75rKKt6GsdNbnsBENSaS2uHTeVKlSOSOfy/WrsXiiN/uag3/fRr5bE068qjdRH1GGhCFJKjaxcEbnopP0FNk3R/eBX61YtNeJkXfqRSPuwbOPwzUF3qjXMjD7Z52DjIfNYulZXNoym52aX9fIgaTnrQMt90E1A8hY8sT+NV5L3UrWeM6dcSQllYSGM4J6Y/rVUaKnPlNqsnCHMi8dwppJ9aj8zxlJB5sdxeumcHjn8sU118aAKHe+G7pjH8x0rq+pLu/uOVYt/3fv/AOASk+9IoLMBWLe6jrK3Vpb6jdXTKblAUl7Ehv6A10VlFvkFYYmj7FLXc3o4jni2+ho2UKwwmV8ADnJrivG3jcWEb2lpIGuzwF6iIep9/atfxd4hi0TTySNxXCoo/ifsPoK8LuLiS4nkmlcvJIxZmPUk125Tl6rP21RadPM8vGYn2a0+J/gv8yS5vbi8maa4meWRurO2TUOSTT7W2mvLhIIELyOcACvSfDnhKx063Goagwdk5LsAQp9EHf6n/wCtX1SSWiPCnVe7OV0fwVr2tRmW2sWWHGfMmIjU/TPX8K6AeA9MsbcNqeuKs+OYoIt2D6ZJ/pWpq/jK6v7hbHSY2AJwAp5Pbk/lWJPHZWFzG2o3rTTLc/vYogGUxhQwYHPOSQOcd6UppEJTn5EL6ToKMEiN1M/puHP4AVUmtdKjYobdkYdQzkEVZstVuJ713sNHWaaSLy3WNCc/NnO1QAOwI6HFSXXhTxRqtwbs6NKNygAABcAAAcE+goU3fVFcqW7Mk2WmueGkH0Yf4UxtFt5FzFdc+jLTr3QdX0zJvNOuYQO7RnH5jiqsc7KQa0ugt2ZXm0m5iBYKrAd1YVSdGjOGUqfQjFdHFc7xhuabMsUqFJY1bd0buo9qGCbRz8NxNbzLLBK8cinKsjEEfjXpvg/xeNSRdP1GQC8HEch/5aj39/515vd6fLagSfehJwHA7+lV43ZHDKxDA5BHauPF4SniIcst+/Y7MNip0ZXWx7/uKtj8qduPqa5/w1ry6/pKyMNt1BhZRnqcfe/GttHBFfHVaMqc3CS1R9TTnGpBTj1Jdx9TSbm/vH86bupC1ZWNLD/Mb+8fzp80wt9Pe5ZizbtqRh8Fj37Hp9KrlhmqmoQm5hj8v5ZomLI4OCpOOh/AV04RUvar22xzYtVPZP2W5bjuzMu5d6A5wrn5sA46UTO7QOoPJUgZNVIBcNNHJO5JRCpLOWLk45Pp90cVYdgRRiI041X7LYrD+0dNe1Wo+KUyQRyK5IdQ2c+op4dv7x/OqdpIdksZ3ZSQjnuDyMe3OPwqfdWVSNpM2VrE3mN/eP50b2/vH86h3Uu6osOyJfMb+8fzo8xv7x/Ood1G6jlCyJvNf++350vmyf32/OoN1Luo5QsibzX/AL7fnR5r/wB9vzqHdRupWCyJ/Nk/vt+dJ5sn99vzqHdS7hRYLIl86Qf8tG/Ol8+X/no3/fRqLIoyKLBZEvnS/wDPRvzNHnS/89H/AO+jUW6l3UrByol86X/no350edL/AM9G/Oot1GaLByol86X/AJ6N+dHnS/8APRvzqLNLmiwuVdiTz5f+ejfnR58399vzqPNGaLByrsSfaJv+eho+0Tf89GqLcKQmjlQcsexN9pm/56Gj7TN/fNQk0m6jlQ+WPYdcXcohI3t85VOO2SBn9an+1Tf3zVNiDNFx0Jb9CP61LmrlFcqRPKr7E32qb++aQ3U3979KizSdTUcqK5I9i3byTTShQc/hXS2txDpsPmygFv4V/vGsWxRLeIyydhmuD8e+L3hZ9OtWIuHH7xwf9Wp/hHua1wlCdaso00ebjHBp82kV+Js+M/ii6F7W0lSeXoyqT5cf/wAUa8m1HXNR1Rybu7lkHULnCj6DpVBnLHrmk2k19jQwcKer1l3e54k67tyx0XZCE5pMVcW0YJl/lHv1rUttBmPzT7LaNWAdpWAZf+Ak5/yK63yx3MNWYAQnsaDGfSui+y6NEreffTyEOQFijC5XsQeR6+n41inrQpJ7Cd0VChHakK1c2g9Sad5MBGCGz65qtBGdzT4bme3lEkMrxuvRkYgj8RVx7OP+FyPrzVWW2kjG7G5fUdqGkJSaeh1/h/x5c2rpBqbNNDn/AF38a/X1H616Xa3yTxJLG6ujDIZTkMPavn0Eius8GeITp18LO4c/ZZzgEniN+x+nrXi5hlkZxdSkrPt3PXwOYNNU6ruu56y2ohWwFz+NJ/af/TP9azLh9lwAeN/8+9M3cda+d9jE9tqJqHVcf8s/1pP7VH/PL9ayi9ML0/Yx7BaJr/2sv/PI/nR/ayf88z+dYpemmSmqEewtDb/taP8A55n86P7Yh7o1YXmU1pBVLDxFdHQjV7Y9Q/5Uf2vag4xJn/drDsbuG3v4JZyBGjhmyMjANcxPdalNKXfU23HqRM/9K6qGX06iu3Ywq11B2sehNq9qOvmD/gFNOsWX99v++TXnO7VM5GoSvj/pvJz+YrSs5J1t2NzIWIyctnI/E9a0qZZTirpkwxKbtY2b3R/EyrCljYIvmL5k8nyE7+4AGdoHQYGfetLTdBvDHjUr2+LnqoJCj9c0y3sI42WRri5ZhyrGY5H0I5FXDql9plpJM04u4Y1LlZx85x2Djp+INfQQraWaPjqkNfdZoQ+GdCLKZ1nkYdyCR+prpIND8HJApNpH5gHUqf5CvPIPGV/rC2r6RbWStITvtbnLSYH3mQhgHx3XAI9x0t6h4j1OxYH7NZvE33X8tx+B+eipOEdZIujGtJ8sH+J6XLZ+GDprReVZBWXB/dcn6jrXn+o+GdAeR/IuI4fTybWVf1FYr+M9QI/49rP/AL4f/wCKqBvFt+T/AMe1l/3w/wD8XWf1mjtb8DqWDxd7p/iMniudEm8x79prQuE2zRydCf4WZQMjrjNaZcMuQxwR2OK5fU7641SRGuCpCZKIowq59BWlpl0ZLfy2+9GADn07V42OjByVSkrH0OA9pGHJWdzVDPji4uVHos7j+tDB2XBurwj/AK+X/wAar+ZxR5prh56v8zO32dP+VfcSfZlllhaaa4m8lt8ayTMwU+oya6G2YQW7SkfdXNc/DJucZ9a1tQl8nTv94gf1/pXPWc6koqTuEoxUbRVjyrx/qbXOsrbCQssKZcdt55P6YrkURpJFRAWZjgAdyas6ndm91K5uW6ySFq1PDFk0txJeEgLAMLkZyx9PoMmvtcNSVKlGC6I+TxVXnqyl5nT+HdJi06zczeX5h5mcjJH+yD/n1pbm8udbvY9Ot5lhg3KrOxwkalgoJ/EgfU1Dql39nt1gA6jJI9abe2seg+HILue3uYtTu8NBIwBjaM87lI46EcHkHBrSTa0W7OaK6sgubgaLHb21hDJFq486C7J+cOC2F+U9CMfoD1re0XwZBBFDqPiCb5nbc0D9B357sfYfjR4W0UaXaf23qYbzpOUVh86g89ezMO/Ye54ztZ1e61LUpLSBMysdrsDwo/uj0/z3qYx6IUp9jfu/Gmk6LB5VhDbRNvI2RoOnvgYH61lL8Sbm5u1gh0+3mBOFL7gc/T/61MsfhLq+q3ETxSgW0qgiZ0xg9wRn9e9dhpvwu0Tw7qNrcXmvRvcRnJheMbScfXOK2UEtzCUooxrfxrOjAajZeXEx567cfgAf1NTz6J4e8UWwaDyLW7f7rRfLk/XofocGup1DwzA8Hn20lpcx87zCmAPwya8p8SXkGiaxtsAPMCgy4+4QedvvxjmplTW8WKnUu7GVqmj3+h3hgvIiuc7HA+Vx6ioVkLLg16Jp99B460JtPumAuVXfFL1YEcc+4yM+oNefXFpNZXc1rOu2WJyjD3FEZX0Z1RdyNSCTHIqsjcENWNdwfZ5ygJKHlCRjI7VrvwRVK9UyxgjGByAB3psoveENXOla7CXk228x8uUdsHofwOK9aJ8t8Z4PIrwUHmvYtB1E6p4YtblsebEPLf6rxn8Rg/jXg5xh/hqr0f6HuZTX3pP1Nnfz1oL+9VBLQZa8NwPa5iyW96bvquZaTzKOQXMWd/0pC4qv5lJ5lPkGpEiSBLsocDemRz3B/wDr/pU5es+aQIUlPRGBJ9B0P6GrBbBqpQ0TFfUn30m+oN9LvqOUdyffSb6h30b6OULk2+l3e9Qb6XzKOULlgNRuNQB6N9LlHcn3Uu6oQ9L5lLlBMm3Ubqi8wUb6XKO5Lupd1Q76N9HKFybdS7qg8ylD0uULk2+jdUO8Ub6OUVybdRuqHfRv5o5QuTbqTf71Fvppenyhcm3Uhaod9LvyaaiO45WUzu38SqFznp3P/stSB6rIy7Ayj75LE+vPH6AUvmVU462Ii+pZ3+9SW43yge9UfMzWlpowWkPQDNZTVolSlaNyLxRrC6PpEswxlF+VT/E56D+teGTzyXM7zSuXkc7mY9Sa7P4j6q019DYKTtQea/PVj0/Ifzrh0BdwqglicADvX1GUYVUqHO95fkfN5hVvPkWy/MsWttLdTrFDGzuxwAoroZNNt9MRU+W5u3wOOin0Hr9a0LW1i8P6QzMEa8mHzP8A3f8AZH+f5VFB5UGjXmpXsYaSeMLZ55IYPy3Xjp+NerKXKjzE3J+QsMa6aZ47iN21cMFgiSNZR/gDn6nj8a2bPwZe30X2zxJetapkFYmYb8erE9P1NaukWsXhjT5Nd1o+bqtzH543DJiQ9/ryB9SB71UgN545ka4RtkCNtCBs7T7+p/8A1VMYuTIqVbbbD5P+EQ03CRWdrOq8eY53Z9+Q2f0qBta8MSMETT9JI9oVB/MpWvb+C2ViwiWRkOGZIixz+ANcbr3w+1EXV3c2yOYy5fY0LLgE9uvStnSsjmjVUnqzpv7D8LaxEVEUVtMfutE+cfipwPxWuW8QeB73R4xc2zG7tTkl0XlPrj+Y/Ss/R9J1uCfcsnlwxNySc/gtd3omvPHOLedFViQJAeAw9SP6jke44rnalF3Rup22Z5Ypp2CD7V3vi7wojJJrOmR7YutxBjBjI6ke3+Oe9cPsyK1jJSRqndFG7tB5YmhA4HzqB096oqea2OUcH86ypU8qVlHQdPpVAz0nRNSk1PwxDMxzPZv5bc8sAAQT+Bx+BrZEgZAwPBGRXBeCr4RahPYsfku4yBzxuGSP6iustJiqtC3VDx9K+bxmH5KjS23+/wD4J9Hg6/tKSb32L7Se9RtJ71A0lRmSuRQOvmJzJ70wy+9QGSmF81agTzExlNNMlQ7jRk1fKTzD2fNRFVJ+6KXmnKpqkrCbuIqqB90UllZT+IdXTS7ZvLgX57q4AyEQdR9T0Hv+NQyi6vb2PS9OjMl5L1x0Re5PpxXpOi6NDoGlpYW53tndLLtwZXPfH6AV34Wh9uXyPJzHGqmvZw3PK/8AhIrPaAdMlwOxvZKjGu6djB0ptvcG8lP8zWeI7LjFxN16eSP/AIqrUVpDMAytcEL1Itxx+tehyQ/q543MyaDW9JgdXj0VY3XkMk75U5zleeD9K7PRvFFh4iD2N7GEkPQyMAZfcEYAYe3X69eZtdNWWVYmeRRggs1kP51oDwpC+HXUYw+OhhAIx9DWU3T2YrtO6LGraRJp85ABaFvuPjr/APXrLZTnkV2tgWitFtru4iuk2hTuGM//AF/eqd1o1rIS1tOoB6JIMH6Z6V59SOt4nsYbGwkuWo7M5M8VJa3Bt59w6Hg/Sto6DcMG2RGTHXZ82PyqlLpckfDIVPoRWEmtmenCXVEhux6ik+1j1qqbSQcYpn2d896y9lE19qzTguxvU571p+IbvOh7kPOx249lNcyIZAeKmu5JX0yWJicCNsflWboJzi+zKdRuLPLm5Ndzpy/YtFtIFAG9fNkOOrNyP021wx612l26phBjCRomR7KBX1h8jIksbS51rV9kNqboRfvZIQ4UugYAgEkcnIHrzTnaDxF46trSAyzafbqiRLJkHaiAYI92wDU3hGDVF1F9T0y6it2tpIo3MgLbvMYrjGORwT+FWvhsif8ACR3l07qMLtBYerbv/ZaybvJ+QS0gdD4zuhpsP2aKRSLckq3ZmbHJ9x1/EGq/w/0SK5uUubsDk+Y2/PIHPNc/8Q75JpnMY4dmJPr85H8lH5VZ+Gt48djcmSV2DM67SxIAWMngfjWkPdRzzTcdDuvEviy7ubgabpj/AGeNOSo7DsTjqT1x0A61x9zPNIxJluJWzn5V3YFRR3Id55nA3Sykq38QJ5x716Bpt7d6fpyw6dbrgqGeZ0GXyAc5/lVPzM72MDT7qO1sBdQ6h9mk/iSRSNw9GAHI9wfwrL8RaZB4j0lb6EYkcE7VAJD9sexIwR6kEd6b4rkPnWs5CxrdFhtTnkHBPFXPDyy/2bdWzkEtGZQ2OQVOB+mKL2DzOO8DpqthqyyRwSrGcglkIUMAeCTxyMj8a6rxFs07xTpeuRWsdxFKql0kUFHYcYOf9kr/ADrfukdhDNMN6iReD7kD+tZPiuAHwbavnmOVMfipH9KxnqzWE22jmfHlsYtca6XS302K5QSJC0iPnsSNnA5HSuMnc+ScYFelfE6FXbSr5II0Se2zvhfdGx4JwMDaeeR715nL901VOV4nWUGG1j6V3fw+v/3V5p7sNpxKg/Q/0rhpxiT6itTwvcG38Q2pzgOTGfxGB+uKxxlL2tCUTfCVPZ14v+tT0oTYJGehxS+d71UKsGOe5pOa+b5EfS85c873o873qnzSZPrR7NC5y753vS+aPWqOTS5NLkHzluVlkiZG5Vhgj1FPhnZ4EZ8bsYYj1HBqluOKW3ZhJJH1z849ux/p+dPkvFopS1L3m+9Hm+9VsN3oAao5B8xZ80+tKJPcVV+alG70o5A5y15nuKPM96rfN6UDPpS5B85a8z3pwk96pndSjdR7MOcueb70ebVPJoBal7MOcu+ZS+bVLLe9GT70vZj5y55lL5lUsn3pQze9L2Ycxd8yjzKp7m96C7e9Hsw5i7vo8yqQc0nmH3o9mHMXvMpN/vVLzDR5ho9mHMXfMpPMql5hpPMNP2Ycxd8ymSzFEJUjd/D7ntVXzTSeaPMXeAVBJwe/Bx+uKqNPUHPQuM6oojVvlQbR9BTPN96pNKSc0wymj2dyVI0BL83Wtu1OLAnoW4rlYnJcc1v3NyLXRDKzYEcbyE/QVlVp3aiupalpc8f8R3v27xBez5yDKVX6Dgfyqx4atle7N04yIvu59awXYtIzHqTk11emJ9msoo2IyyiTgevP8sV9pTgoRUV0Pjq03OTl3L12smrapb6fE2N7hQfT1NbWn6dY6v41s7CDYdLsoFeQbiV7tzk8dRkfWsPw+Fl8TwPJ5xEYklBhBLBlRmU8c8EA1s+CI3mXxBMXHnyBE3j/AGid386znfmIbtCxl+PPEFxqGqGySUkHaXRfX+BP+AgjP+0T7V1vw70RtItrrUby9NtbBQs+4DDH0HsOmepORVPTvDGm6osupzowuUlYhkPUjPUHitXxJcwpo+naRC5iiD/vj64Ax/M10QtFHHKfNoS6x4w13WTL/ZxNrpsZCjy1AYj19fyrGWbU44pLmHWt6oQGEjMeTnjac+nJHSruiW8eq6lPFMxhsrYDcsY+Zh0AHufWr2u6PZR2zzaX5kUUR3TQM2S49R7ilcSSKmnaimqQNbzokd4BkEdJfcGsm+tTE4uIyVkQfKQ2AR/nv2NVoZ4bS+txFKHEkoO0jBU4yKyvH91cfbYzHII7aRm/dx8DcArEn2w44+tJ6lxjroejeH7lbyyRJUR1m/dTg8deAcfofTOO1eb69pDaTrdxaAHy1bMZx1U9P8PwrvPB2JdOUAgzT2gaPI6N6/mM/jVL4kQRtdWd0nB2mNvcEBh/M1nHRmkJWlY80uU21lXfO1seor0Hxf4bstK0fTL+zvjN9rT5o2wSpCqSRjtkkfhXn9wv7o+xrSMk1dGzF0y4NrqdtODjZKrfhnmvSbiPybxiOn868sTlwB3NeshTc28Mo/iX+XFeXmK1i/U9TLZe7JehVzmjBParQtmp/wBkfGT09cV5tj0+Yo7T6UoQmrRWFfvTxj/gQpjTWqZ/ehvYA07BqQbKeIiR0py3tuW2pHIzHoMAUy71EWQHnpHbkjIErYOPoapQk9kTKSjq3YesBPaodRmXTrCS4bqOEHq3aq0GtQXkzRDUIoCBlXlyiE+mf/rVJeaFY6nta88Y2Kbeka7cD835rro4STknPRHFiMfThFqDuzoPAttZ2NmLozx3etagCWjhYPIq5+7gdOgJ6ds9K9QtdP8As6q8uxpsevC/T1+teK2mnW2mxqll4+Nuq9Fhuwg564UPXqHg3Tr60tp57zWbzUVuAhjNyzHbjOSA3IzkflXpSjofOVJc0nI44aNpZYN9mhxg9V6086TpwXAtIgAORyDmp/OjEoVMZBKjI6D1pFuVbZiQE8qcDnp3rzuaXcLsWOzSABI4QoAO45NPaFFZMpGA33TtzxUK3LDkEY5znpSm5dNpaQKQpGQO3NKw7lgeX0GwLt4+XrSGVVG7bjavTHGTUCTsI0zIQGXAOMCgzgqAZduB/wB9GpsNMsDYWOWVjwR8veuC1O48barqDQJDPbrCWEaxt5a7c9SxPzdu9dqXIDbmKtjccj3oE+12ZnTpkew960pz5HtcpTa2ZzNhpnifyx9q1hI8DlDEsh/Mj/GtY6ddmIj7erMBnLQr178DFajMSjBZUYgY3YpN7MHZJI+eFXHT60p2l0X3GscXWjtJnMXln4ht03QLZ3XcquUOPxODWBqGt6xaxSR3Wm+RkFdzxsBz79K9HcuefNQDgnaO4/8A11I7ArKrPGy8URVNbxRusfXtbmPAsc9a62+lEsrOARuVXwfQqD6e9d7daLpV87LPZWxywO4RgE/iOa5HxLYx2d8Y4QUi8tdi88ADGMnnHFehTrKeiOdS5mange2hu9P1iGXU/wCz22xPHKHCkOC2OvbJAP1rH8HSFbu+iU/vNisMd/nCn/0OpPCet2uialcSX0cr289tJAxhVWdC2MMAxAJBAqrJqFiPG0+oW/mpZXEzkBlVSobOCVBxw2DgHtVJPmY57MXxLZktcRquCSSHlbjg7sL9Qw611ngnwzLp+kOLq5QtK4lCRLnaChVhu7kq3b061U8RafueK5h+T5mRdy5wR/C3t1q94N1lYQmnXGQCSsLHofVD9P5Vs0+U5nK6IZNLaO9kSYZy5IyMc9McdulbOn+I2spoLCePfKMIsg5GDwBjvWrf/YFAhnmQ5H7tSfnHsCeo9OahsdJtmvUluPKdQQM3Ecquo9MjANCaktTJ3M3xRomo/bLa6kiWQZKwoo5Vic9KsQwjTdKuLmR/mC+XyOSoUFz7/dwPUkV6Jqt5o1np8dzEkEkiqEG0Y49CTzjivDPiDr95PdNaCNlSdQQ+MAr/AHV9s4z6nFS10RcdXY1YvHMfiFVsobI29xHiV5FffHhTnjjPXAqXxTdKvhmztmYeZJIrAewQHP8A4+Kz/A3h6SyLRzvsuZiHlCjPloP4TnqSeoHp61uzWN74p8cn+zYYJItOwxWQ4R9hA29MZYjFKSSWhUfj0Mb4ix2ljp+iWFpcNIvkGaQeYXXJwoYZJwDt6DivMpDwT7103jG80+48QXj6VZtaWm4KsLYypAAboSBznpxXMN0A7mlTjyxsdlyrPzIPpSwSNBcRzIcNGwYfUHNIWR5Gyec4FPEQPRs/Sra7iT1uj0pL15VSQ4ZWUEcY4NWk2yKGXkfyrjtK1hbe2S3ut/y8K+M8ehrYg1e0Vw6XKficZrwKuGnCTVj6WlWp1YJp6m35ftSGL2NQp4h0rA3SgN3AII/nTv8AhINJP/LY/kP8aw9lPsPmXcf5fsaXy/Y00a5pJP8Ar8fl/jTv7a0r/n5X8SP8al059hqce4eX7Ux4iGR/MaMggZHcEgY/l+VOOt6SP+XlD9CP8abJrGkyxtG04wwwen+NOEJp7MblF6XJcOP+Wjfp/hS4k/56P+AX/CmRa9pRiQvP8+Pm4A5796X+3tH/AOex/T/Gm41L7fgF13HbSessv4bf/iaXb/00n/NP/iaZ/b+kjpL/AC/xo/t/Sf8Anp+q/wCNLlqdvwD3e4/aP+etx+af/EUmwf8APW5/NP8A4im/29pH/PX9V/xpf7e0j/nr+q/40ctTt+Ae73HbfSa4/NP/AIik2d/OuPzT/wCJo/t3SP8Anr/6D/jS/wBu6Of+W/5lf8aVqvb8Cvc7/iJtP/PWf/xz/wCJo2HP+um/8c/+Jpf7c0j/AJ7j8x/jR/bWlH/lsP0/xo/e/wAv4f8AAC0O/wCIm1v+es3/AI5/8TRtb/nrL/45/wDE1INX0k/8t/5U7+1NKP8Ay3/8dNF6n8v4f8AfLH+mRbZP+esn/jv/AMTTdsv/AD2f/vlf8Ks/2jpn/Pc/98H/AApf7Q03HE//AI43+FLmn/L+C/yDlRV2T/8APdv++F/wppS4I/4+PzjFXRe6ex/16/ip/wAKd9psSf8Aj4j/ACP+FTzS7fgPlRm7Lrtcx/jD/wDZUeXd/wDPxD/35P8A8VWl59j/AM/Ef5H/AApfMsj/AMvEf60ueXb8A5EZgjvP+fmH/vwf/i6Xy7zvcQ/9+D/8XWmHsv8An4j/AFpc2Z6TpT9o+34BymV5d3/z8Q/9+D/8XR5d3/z8Qf8Afg//ABdapFt2mj/Om4g/56x/99Cl7V9vwDlMspd4/wCPiD/vwf8A4umxrP8AaP3skbgIeFjKYyR7n0rUZIT/AMtov++hTY4EcyMrKQDtz9P/AK5NUqmj0/ATVigQf7v5GmkHH+rJ/EVqfZV9RSG1A7ip5kMzUOxgSjAe2DVnxdcG28HzHIy8aoAf9o/4U+VYoULO6gDnGeTXG+NNae6t7ezJJAO8j0wMD+tbYai6teFtk7meIqqnQk/I5CNPMlRM43EDJrs7p9t6VBOAgAyMcDj0FcXFJsmjf0YGux1QGO+LOwZiATgYxX073Plrk/hdbuXxHDHYSpHdPHMI96bgx8tjtx79PxrY8IvPCNfhZPLulkXMeOhBbI/PFcvY6ncaLq9vqNoV8+3cOm4ZGfcd62vD3iBbvxjLNLFFZDUVMTCAbUWTqDjPGSP1rJxblcU/hOz8MHfpOoCWaK3CXDgtIwULk5HX2o8aWMZtLe+s1SaLem8qxIcc8gjtyBn2rz7xxpzpcC5jA8vaCRjBH/6jx+VbPgHxWj2jaFqCmQSDbEQB6Yxz37f55uztc5nHqjXtrtNGu1LOrRzjMixrjZjpg9TjvW8NStrqCV4bm3klYfKiEkntyOw+tYWp6JK7OLctNCihcIMsn+yQcfTkio/D2mXVjq8cs0Gy3zhi5AZR643c1Voy1M3ojSXwFLHLZ3zGNEmmjk29TgHP4cZrB8b+GrvUbCPULKJTFA8jyIWAIUhACM9eF5r1ee6W5hFnYh3Ljb5rDC474/lWdqcUAiXSIgsyxFZr3J4UZyqfVmA49B7ik3d6BGUtzi/DjSWA01UY74wsLDHQkAEfgTVDxVcySWttHKSW3ZBPoEUf1rvLHSo7YJNO2JcnaMdO5J+nJ/KuE1q2Oq+KLXSrf5Sg8t2c8IfvOT7KOv8Au0pKyNKUrzuReN7LToPDmk3Vu9u166qkvlSZJXykPzLng5J5715tN/qX6f5Nd98StTS91O1tnslt72zh8i5KfcbBJTae67SCM+tcdbaZe6mWhsraa4k4JSGMscepwOmcfnUU9InU3oUtNgSW+iErqkYOWLHHTtXZrr8VvCkS3SBVzjbz1NUbP4b+Lbv/AFWiTr7yssf/AKERW9afBnxLMitPLYW+eqPKWZf++VI/Ws69GFVpyZ0YfG+wi0ktTIl8TgcJNM49uKpv4ikkJAhdx/tS/wD1q9H0/wCCFusatqOsyu5HzJbwhQD9STn8hXa6J4C8P6B5clrp6yXKDH2if52J9cHgH6AVksPRXS455nVezt8jwqBdevQhs9JuHRxlWS3dgfxxita08HeO70Ax6XJCCcEzGOLHvhjmvoJmO371Rl/9oVSjBbRRyzx1aXU8p0f4Y6+rONS1G3COmADNK2w+oClcn8cVrQ/CSy+1l77Vpbq1PLQLbJEW9jIPmx+vvXemT/aH4im+YT3Wq5uxzSqzerZix+BvC8TRFdEtP3Qwu5M/nnr+OasS+H9DQADRbBnY4Vfsyc/p0rReXapPJ9MDrSQI6Au5QzN95gPyA9h/9fvRzMhyZFa6Nplngw2FoknXekCqc+2BxV0p/tGmeZIvp+VBmfPQH60mTds8im8P+Iw26WyuJGyT8ig4HbBqE6R4oSFCNKumkThdqAZHPXnrivXgx/2acHY8cVn7NGvtF2PI49F8SBY2Gk3eVzlCAOT/AEoOkeJS4X+y7oqM7iY8E/TmvXstjtS5fsRS9nEftV2PHptO8RxWnmvpd+zEj90sW45yPT2zTfsHiI/8we9KhAwBhPX0r2Vd3fFL83bFHs4h7RdjxZrbxJGQJdJvmEvZYWOwZ9cU0W3iJMu+m3CiRQoURHK9ueK9sAPc/rSgkd6XJEftF2PGTba2AYjZTruP9xuBn8v8/hUaxaztkL2sw2/KoMDZz9MV7Vz/AJNHP+TR7ND512PE2GsICwtXJ9RG3P4Yz3qRJdR2MWsrlWXB5hYZ5+le05b1pwLDrS9kg9oux4oLnWNzbbByM7gfLP8ALrWfr8N7e2E8txaSDycNG5BGQeDkfrXvjSiNd0jBF9WOBVW5vdMuraa2nv7MxyoUdTOnQjB71cKfLK6Gqi7HytISpIzyPSo8k9a2/FeiHQdZksxcRXMY+aOWJg29D0Jxnn1FYitjBFd8Wmi9zu/C+rDVrV9OvyZZEAwN3zyKO6/7Q9O4x6VNf6NJbyl0dsnDAqOvow9/cc+orh4zuIZThgcgg45rt9I8WIsAtdWj3DvMF3B/99fXH8Qwau9jCcGndHNazba2bw3kNzJc7xtKqOQPTb3HuOK1PCmu6zJqNvp9w00MB3DcqFADgkZxgcniu0tU0jVFY2l9GhLfcEynj6Ptb9D9auwaLMswbe23++I8nH/fOKLJkOelmi7pWkS6rKCDJMyZ3EnIX8TwP51X1rwtP9uj+1WzyQryrtgJG2OD6sf/ANWK7fQL3TdLscS3qB84PmMob8FFUvEuuSXkBg0qN/MdSv2hl+6Mc7VPOfcj86yk1EIq5wOszR6Bp8dvBzqdwpX5h8yqerbex6Yz9cDFQNnwj4Ha+i1J7fVLhlKRIwwwO5ee5Iw+fQke1XbGLSvB5tta1WVLgXgdFYN5snQ5Yc5PIAJOOteW+JNfute1N7m4ldlHyxKxHyJk4HHGeeT3OTWWs5W6HTTikjGlfcx5qrLIUVmHpgcVI5ydufqaoTyb2AByqjH/ANeuhItsYOtdB4a0J9WvNzq4tYiDIyg8/wCzn3wau+EvBE/iC4Sa8vLfTtNB+e4nkUM3sik5J9+n8q+hNLv/AAjoOmQadZahp0UEK4A85cse7H1J9azqydrR3IvY8mk8I6LKSsaXUYA/glOT/wB9ZqI+B9JVyUuL3HUKzLnH/fNezN4l8Lt97UtOb/gamk/4SPwvuDfb9OyOMgrXHar/ADFe08jxaTwTp5LFJ7xSCRlowwGPwFRt4HtghYX0wHZvKBGfzr27/hI/DB66hp/5ikHiDwp/z/6Z+a/4Ufvf5vwGqnkeLP8AD+BU3jVX2nHP2fp/49VKTwLcCSQR38O2PktKhX+RNe7tr3hKQfPqGlN9Shp39v8AhPcW/tLSsngnenNNOquv4D9p5HgEngjUUGVmgf6ZHHr0pD4N1JAdpifGMYbr/n3r6BOu+FGHOoaUf+BpUg1fw24+W+005H99KfNV7/gL2vkfPK+D9UbHESgvsyzYGfyqNvCGqLtLCIZYr9//AOtX0X9r8Osf9dphPrmOnG58OtjL6WQOmTHRzVe/4B7XyPm+TwpqsZAMSEnphqYvhnVJFDCNNp6HdX0j9o8OBdofSguc4zHjNHmeHe7aX+cdHPU/pC9r5HzanhfVpArJbhg/TDAfzxUT6DqaY3Wj89MYPfH86+mRJ4dbjdpZHpmOgDw9nj+y/wAPLo56ge18j5pk8N6tGAzWpwe+9f8AGoX0XUoyAbKfJzwEJx9a+nPK8PNkbNMbPUfuzThDoWc+Xp5PrhKaqVB+18j5kk8P6tGqsbCYqwzlRuA+pHApg0HVD5hFhOfLOGwmfy9fwr6eW30MfdgsQPZE4pTa6LsGbay2qd2Si8e9HtJh7U+Xf7I1DdtFnPnGceWc4oGk6iWKrZXBYdQIicc4r6YSbwvPmNJtLfZ1XfHxSm38MnqNKOOR88f+NHtJdh+2PmZtN1Jd3+i3I2gMfkPAPelXTdWJ4tbrOccIf896+nE07w/cA7LewkH+ztP8qVtE0I8mxsz7bRT9pLsL2x8wCz1R1LCC7YDrhG4pf7O1UDd9lu8euxv89xX1B/ZWjAHbZ2wyMH5BTv7L0lzlrS2fH+wDR7SXYftj5dey1OMZe3ulHqUamSJfQ/6wXCc4+bcK+pv7J0ncG+xwbgcg+WOKH0vSZE2PY2rqTu2vApGfXkdafP3Qe3fmfKvnXA/5aS/maX7Rc/8APWb/AL6NfUh0PQyf+QXYf+Aqf4Un9h6F30rTz/26p/hT512D6zLufLn2i4/57yf99mlF3c9riT6bic19Rf2JofbSdP8A/AVP8KX+xdEPXSdO/wDAVP8AClzrsP6zNdT5eF7eZ+WeQdzzT0vtRK4hnnIzwFr6ujaONQke1VAwFUYAHpT959TReP8AKH1up3Z8oSXuqRY8yW4TPTdkZqxDD4hu5AkFtqcrMMgRwu2fyFfUrfvFwVJ/ClXzY1wAzgdFPUfQ1N4/yi+t1O7PlTVl1/QLmOLUYbm2lkjEiLOOqn/9XSucuriW5naWViznqa+svFvhWw8ZaLJYXOI7lPmgmKfNC/8Ageh9fwFfMfifwnrHhW/+zaraNCW+5IPmSQeqsOD/ADrai4PVKzCVec42kzCDV2Ly/btLgvE5GAsihOjAcnj6da4w1saHeKvm2cr7UlwUJGQrDvj6ZrdozTsXZfm59qq4KOCpKkHIPcVZIKllbscGo2HNCNHqeh2N1F4o0Uu+wXSfLPG3OTjG7Hoe/p+FZekaemgalLcwQeZNu2eW/wB9Af7nZs/nXL2N7Pp90lzbOUlQ8eh9j7V3dv4g0rWrYRzubK6IwVbGwn2P+f61VkzmknH0N2LV9PuWAm2JMOCsqlHHtg4NR3TXj3Df2bNpkMKRbpZb+ZgqZJAwoPPQ9c1nyaVcG2dIpmaFgCudwT2Pdf0qvYaRNazbmvLTB5IURA/otT7HqmZOaM7SL/xGPFy3dlqMtwFkEf2nZshdOmAp42+gr3vRND057ANEG8x38yWXdlpJD1Zj3/pXnVhYRo3ms7Pk581YyVH1Y/KPxIro7PxZDZKkVv8Av2j6RxOGB/33xgf7q59zSfKhKfMzT8SxRaZazyNKhwm2KNl+8euB/X2GO9ea6PayQx6t4ia7iSeyRpGjmXcLhWyHQ9/myBx611OpNLfkapqT+WkhYK+07IwoJ5IGFHHU/wA6848e+M011ILGzlme0jZZm8xQB5mxVwoAyFGD165zWLfM7I6acEjk9Z1K41fVbi+uWDTTuWbHQegHsBgfhXrfwb0gw6bd6rJGwMxEMRPQqOWI/HA/CvJNG0q71zV4LCziMk0rYAxwB3J9AK+oNOsItM0y2sYB+7gjCLwBnHf8etVPRWCrLSxazjsKAw9P1pCD6UmD2WsDAdv9v1o8z2pu0/3aPLPpSAGlyOVH60wlT1UfmaUo3pSFH/u/nQBGyoevX86aI1JOCp/SpCjE/dB/Gk8pj/CB+NAiMx/MDsY46YBPNKZQDg7gad5Lj+DOKCjkYIPuDTQCB165el3Kf4j+NJgjqin07fypCueMEfrQIn2D0pdg9D+VHngdf5Un2pR/CTWZQ7aB3I/CkJX+/wDpTftS+hppuU96Vxqw8lf79JuHHz1H56n+E0hkU87T+VTdjsibeo/i5qG6vfs6Bktri5JONkIUkD1O5gMUocHscVkeJLPWb+xWDRr2Gzcn9475DEezDp+X404u7KsN1LxbZaTGHv5prDd91JrMsx/J8VgS/FTSlBEV5Ox7E6fwf/Iormrz4ZeIriZpZb20nkbks0rkn8StUH+F/iUE7IYHHtMBn866oqC3Y9C/qfxX1aWd0sBbxQcbZDBhz74LMB+tYT/ELxPyBrNyM/3SF/kKs/8ACsvFP/PjH/4EJ/jTT8L/ABSf+XGP/wACE/xrRciKTic9fa/qOoHde3k9yf8AptIXx+ZqkdQkA4OB7V1p+Fnik/8ALin/AIEJ/jTf+FUeKT/y4xD63Cf40+aPcfMjjpb55F2uSy9wTVfcA+A25T37iu2Pwm8Vk8WUP/gQn+NNPwn8WLk/YYcDrm5QD+dPmj3KUkccjlDwa0bW+RcK4BHoazb23+wXT27zQyOpwxhkEij/AIEOD+BqJZB2aquDsztbR9KmUeaCv0rorJvDqIPNugB6Fa8rE2OjEfjSidv+eh/OhszdK/U95GpeG9I0BL9by3zNHIYYvMG5nUkAbAM4ODzmsDUPivBp2pRvotos0CQFGa4GC7tgk8c4GMe/NeSGQ9S1NMgPesnC71LjBJF6+1a7vhGs8zOIwVRSeFBJJA9OSTWa8nYcn1pHJPSo/KZh0IrRJbFNkEsmflU8dz60yJ2icOuNw6ZANWhaMR92lFkx7VVhXH/2tfMctOxJ7mnjVLnHMhNN+xGpE06V/uoT+FKyGmA1O4/vmnDUrj++alTRbthlbeU/RDU6+HdRYgLZXBJ9Im/wpOw1Iqf2jP8A36X+0Z/79akfg3XZRlNHv2HqLZv8K0Ifhp4quIjLHo8oQdfMdEP/AHySCfypXiPmOb/tGf8AvUv9ozf3j+dXpPC+rRMVl066Rh1DQsD/ACpo8Paj/wA+U/8A37P+FF0HMU/7Sm/vfrQNSmH8X61dPhzUP+fKf/v2aQ+HdQ/58p/+/Zp6BzIqf2pP/e/WnDVph3P51Y/4R3Uf+fKf/v2aX/hHdR/58p/+/Zo0C5ANYm9f1o/tibuT+dSnw9qA/wCXOf8A79mmtoN8Bk2kw/7ZmloFyP8AtaU//rpP7Vl/yad/Yl7/AM+03/fBpP7Fu/8An3l/74NPQNBP7Vk/yaP7Vf8Auig6Pdj/AJd5P++DSf2Rdf8APCT/AL5NLQNBw1Vv7opRq0gPAAqL+y7gdY2/Kk/s2b+4fyo0DQsf2vL2P60f2vIevP41X/s+b+4fypDYSgfdpj0J11Mq25VAb1AqT+2pfU/nVL7FJ6Un2N6BOxfGuTDufzpRr06n5XZfoaz/ALG9H2N6BGl/wkN1/wA9pP8Avo0h1+5brM//AH0azvsb0n2V6LArGh/bk/8Az0b/AL6NL/bc/wDz0b/vqs/7K9H2V/SiwaGh/bc//PRv++qP7cnH/LRv++qzvsr0fZXoDQ0v+EguR0mcfRjS/wDCSXn/AD8S/wDfZrKNo/pTfsz+lAaGyPE98Ol1MPpIacPF2qpwuoXQHtM3+NYn2Z/Smm1k9KYtDeTxjqkcgkW+uRIOjiZgR+Oaj1HxfqOq2ptdRuri7hzuCTys4B9Rk8H3rE+ySelH2ST0pWQFKdULkxqVHoTmo13KQRkEcgir/wBjc9qabR89Komxet70Xcah+JUXGB3HsKeW59qzhbujBlyCOQRVlbgkYl6jv61JaZOCR1qZGB4JwKq7yehpwfj0+lNMGbdjqeoWChbW+uIU6hY5CB+XStqx1/Xby5jt47+4eWRgiKMZYngDpXGCRl/iqxbX89rOk0MhSWNg6OOqkHIIpt6EOCe56fLoPiOaUjWL1o412eYtxPu2qzBQSoJ4ycdK07680rwbrmn2N8Glt3jEsssZAKcsMbfqBXk1zr+pXXmebeysJAquA20MF+6CBwQO1Z0tw8jZdyT7mseWT3YciR12r+PdVu9Kk0eG4MenmaR9o4ZwzFtrHuBnpXGvIScnrTWf0NRnJq4xUSjf0LX7vQZHm0+6kt5pF2u0Zxkelby/EfxH0/te4/MVwg3DsaeA2eKuxLSZ3q/EjxGOmqy/jj/Cnf8ACx/EnfVJD/wFf8K4MB6eBJ70WQuVHdD4jeISedRY/VF/wp3/AAsXxF/0E3H0Vf8ACuEAf3pwD+9HKuwcqO6HxD8RHj+05PyX/Cl/4WD4j/6Ckv5L/hXDDeD3p4L+9Oy7C5UdqfiD4k7arJ/3yv8AhR/wn/iQ/wDMWk/75X/CuLG/3p6lsd6El2E4o7QePvEn/QVk/wC+V/wpR498Rnrqkn5L/hXGhj70/efenyx7E2O3tviL4gtn3PcRTr/cliXH5jB/Wte3+K1zuButMhde/kyFP57q8z3H3pQT70OnB9CbH0mCT2I/AUAdMseKT5/XFJhz/EPxrzrGdx3ydxn8KVdn900gU9z+VOANILigr2BpwAP/AOqmYPFKM9P6U7Idx/A7jP0/+vS9v/rVHgjr1oI9OKLBzEgHc/y/+vThgdGP5VEDjuPzo349KLD5ifp/F+lLv9x+VVvMPqBRu47UBzFoOPUUu8f5NUgx9qM+p/WgOYubyOgFJ5p9Kq7vcUb/AKfnQFytdaFod67Pc6Fp07scl5LVGJ/EiqD+CPCjnLeHdPB/2YwP5Vs78DJzVGTVDKfKsEMzlipkx+6THct357Lk/TrT5mPmZlzeB/BUAD3Gh2cYJwMlhk+gGeT7VWj+H/ha+IYeHIraPceCz73HbPzYX6cn6V0FpYiMrNczNc3QH+ukAGPZQOFH069ya0I1564p8zHdnPJ8NfCgA2+HoD9Wc/1p/wDwrfwt/wBC7b/+Pf411CySIOGNP8+RhkM350+Z9xXZzC/DzwmB/wAi9b5/4F/jT18A+FQMjQbUfgf8a6MyN6tn61E8rep/OjmfcLswx4L8MR/d0Oz+hhB/nTx4W8PJ93Q7Af8AbpF/hWp5h54agtnnkVLk+4XZmr4e0dDlNKsl+lug/kKsJZW0J/dW0Sf7qAVYJX3pCR/tUrjuxoG08KB9BThg9VH5Um7rwaNx6YNJsabHcAfd/KkyvdT+dG446Yoyx5pXGLlB2/WnBk/un86YT14o59KLjJg6Y+5/49/9anB0/ugfj/8AWqDn0H5Uiludygc9jmjmYFgSJn7n/j3/ANaneZH/AHP/AB7/AOtVbnjtScjH+FK4y2Jov+eZ/Ol82I8eXn8ap7j60mT7UXYXLu+L/nl+tJvi7RH86phwSRlTjqM07zD7fnRzMdyyzRn/AJZmmFYWHKfoKi8wHt+tG4e3Si4D/Kt+nlfoKaba1JyYQfqopu4A9aXzF7/zouAhsbJutuh+sYqJ9J05z81lbt9YVNWN6eppQUz1PT1pXYXKR0PSj1020P8A27rTf7C0j/oGWn/gOtX2Kg9aBg/xfrRcVzPOg6R/0DLT/wAB1pv9haR/0DLT/wAB1/wrTKjJ+cfnTdv+0KLgZ39haQf+YXZn/t3X/Ck/4R7RTydIsT9bZP8ACtLYO5FGweo/OncDN/4R3Qz10fT/AMbZP8KP+Ea8PnrommH62kf+FaYhB7r1/vCjyfTH5ii7AzP+Eb8Pf9AHS/8AwEj/AMKafDHhvPOg6Zn/AK9k/wAK1Rbk9MfnThat7fnT5mBjnwp4bPXQ9M/8Bk/wpp8IeGT/AMwLTvwt1/wra+ysO6/nR9mP94fnRzMDEHg3wz30Sx/CIUh8FeFj10O1/BcVuG3x3H50ogz/ABL+dHNIVznv+EI8Lf8AQEt/1/xpp8B+Fm/5gcP5n/GuhMDDoV/Ojyj6j86fOwuc4fAXhZeuiQ/99N/8VR/wg3hUD/kCW347j/Wuj8vdgH+dIYcdKOZiOabwH4UY5/sW3z9GIP4ZrJvvhd4UvXYC0uLEnoYJiV/8eBxXctCev9aTyTzwfzp88u4XPL5/ghpJY/Z9Wvoz/thGH8hVQ/A5c/L4kcfWyB/9qV6w0BVSIiYz7dPy6U3zJEYiRGA7MuSP8R/L3qlVl3DmZ5SPgax/5mU/+AP/ANspf+FFv/0MZ/8AAH/7ZXrKzpgEfMD3BpwnT+6fzp+1l3FzyPJf+FFSH/mY/wDyR/8As6T/AIUQxPPiHP8A25f/AGdevi6j7o3/AH1ThdR/3X/76p+0l3DnZ5CPgSR/zHx/4Bn/AOLp3/Cjgp51sH6Wn/2devC8Vf4H/Og6gOnlfmaftJdw52eSD4LRL11r/wAlP/s6X/hTUX/Qb/8AJP8A+zr1hruJvvREfSomkibkK4o9pLuHMzzBPg3bA/Pq8j/7tsB/7MauR/CHRl/1l7fMf9kov81Nd8WXHBYUzd/t/rS9pLuLmZyEXwr8OxDDLdy+7y/4AVaT4d+G4xtOmB/dpH/xrpg2f4/1pSf9r/x6lzy7hzM5r/hX/hr/AKBSD/gbf40h+H3hrP8AyDF/7+P/AI10+R/fH500nPf8c0ueXcV2c1/wr3wz/wBAtf8Av4/+NL/wr3w1/wBA1Pp5j/410ZwOmaTcR3NHPLuFznT8PfDY/wCYYP8Av4/+NJ/wr7w3/wBAsf8Afx/8a6TzcDqaT7Rzg5p+0l3Fc5o+AvDo/wCYYP8Av4/+NJ/wgfh3/oGj/v4/+NdP54OaduBo9pLuK5XDH1NLu6ZJqvv9F/I0AtngEVJKLO/p1o349ar5b0NJyR0/WgCx5nNHm47n86gA9jTgB/d/WgLkvm57t+dJvpuP9ninKqnqtAXDzO3OaC/t+Zp2xD2oCL60DG59v1oP0qTYuOn40BAenSgCI8Z4pMn2qbyuTTZnit4XmmkWNFBZixwABQMZk+1Vb7UIbCNTJueRziKGMZeVvRR3/p34qGS9udQDR6ZGyRkD/S5kIUZ/uKQN/Hf7vuelaFhpFrbs0uwPcP8Afnfl2/H09hge1CQFGK2u9SQHUP8AR4GUZtYny3uHcdfouB15ataGKOKNI40VY0G1VXgAegFWhapg5wPxzUiwRr0HNOwDIo0bkhqmEcXqfypQoHQD60vXtTGJsQcjOaDwOAcUHpx/OomzUgK2DTCikUmD70u0kHk0gGbOMU0qKftPoaaVPPynpQMZtpCPwpxXnpTduPWkAY96MD3NGDzwaNvpSGLgYpDgdqUA/hSEHsKAFyPSjd7H86Z83oaUg45oGO3e5pufY0cjt+tNOQfumkA/J9D+dIT7HNM5/umjHqDiiwxCR3pM/Wl2jPek8sHvSsAnHrRu6UeUKBGR2/WiwXELccZqpJd3aPhbCRl9RIo/rV3Z7frRg4osFyvb3EkyZlt5IT6OR/QmrGQTS7fbjHrTwgI6UWHcYGHoaXdjtUgRaTyx6UWAbuBpQc9jTvLHPFLsA6UWC4gPPSl3f7P5Gl2A5/xpdg7ZoAbn2pCfqKcV57n8aClAXGZ9/wBKAzDnI/Kl2nPQ0m09OfzoC4od+/8AKlD5puxv8mk8s96Yrku760m4d800Bh1pdp6UWAduHGDS7j70zaaXZQIkDAjo3SlPPrTAo75/Onbf85phcTgdjTww96TA7/zoxnP+NArik5HQ0hz2FOB5/wDr07/PWmFyHcfQ0E+oqQr3IppUHt+NAFc2ybmeMmNm5OOhPqR0P86ryu0G3zlyuPmkRTgH3HJA/P8ACtDGBTSPWgCkHVkV0Ksjchgcg/SlEntinyWURZniJjkbqVPBPqR0NQsksQAljz/txgkH8Oo/X60XFYkEqk+/1pdwIFQAhl3DkeoNGcetO4ifK+tHy1X3fjQGHfI/GgCxxjqable/FRZJAw1HzUCH7kpwK5xUJB7gUhGDQFycbT0NBUZ4quQQeM0Bj7/nTsK5OV/CjYO5P51Flvel3MB/9eiwXJNoHajA9DUe49hS7x6frRYLj9opdo9vzpm+l3nHXvTFczlj6HYOtP2AfwD8RUgt3OMAdfWni3fA6D8aCSBVXqVX8qeAvTaPyqYQuPT86cEIHXt60DIlUE8KBTwh9vzqRVAIzg/jT+B1x7e9AXGCNj6/nS+VLng/rUvmLwOKdnjgjpQMgMMm37x59WNN+yseN789xK3+NWc+hAP1pwYDnI96AKwsSDw8h+sz/wCNO+zFT1b8ZG/xp13qFrp9u01zPHFGoyWdgBXKSalq3iQgWAl0/TifmupExLIPRFPTP94/rTsUjU1bWYNMljtkSe6vJMFLa3YlgM9Tk4Vfcmo00+4vZI5tVmWTy33pbRZ8pSOhbPMhHXnAz271NYaZaacri3iCtI26R2JZnPqzHk1dyM5H86BNjzPJjOc8+lL9rkx/9YVAwLKQHYZ/iGMj8xS4x6596AJxdzAZB59MCj7fP/ex+FQdB/8AXpCcAcfhSGWvt0w/iH5Un26bAJIz9KqFschR1o3c5xQBbF/Ngcr+VH26X0WqgYDtT8jrt5pBcnN/IOy0fb5scBfyP+NQkFgBjB/OkC+1AXLH9oTDjYp/A/40f2hIRgqv5f8A16r4JHvSFWHpmkMs/bWP8A/z+NIbtz/yzH51Dg9MGjbzjmgZL9qfOCi/nTvtJH/LMfnUOOen60EfWkBObpgOEH/fX/1qPtR7oPz/APrVBg4PagjuKAJvtXP3B19aX7Rnt+tVtpx1zzThSGWvMJ/hH504OD1H61XBPbA5xmgE98UwLG4dSD+dGFPYn8ag3c/z5oB9T9KB3Jwqen60YXt/OoMnPBpAxzgmgLkxHsD+NKCTjC/rUIc98YoMpzwP1oAl59B+dJkg/dGPrUPmtjnr9aPMPQntQFyXceOP1pRI3ZePrUJOcfTrSkk+nT1oC5L5rf3DS+Yc52H8/wD69Q7iAcn8qPM4PP50rBcmM5/55n9P8aTzjnlG/DH+NRb8Z4H50hkx9O1AE3nHk+W36f40n2og/wCpf8x/jUZfrzSE9/60wuSm7P8Azxkx/wAB/wAaT7XnnyZR+X+NRbm/D60bj7UguTfae/lSfp/jTTc88wTfp/jTA5/ClLYGT0osK48XIH/LGX9P8actyP8AnlKPy/xqDdwCBnn1oZgew6+tMLljz1HRG/Kl+0AfwP8AkP8AGqwbkY6fWnA8Z/rQBN9qX/nm/wCQ/wAaPtS8fupPyH+NQdSMg/nSgD0P50Bcn+1oDyjnHoKQ3yqeYZceoXNQgD0pe3pSAm+2Rnosn/fJpftUQ/56f98H/Cq/PXA6YoHA6fWmIsfaoh2f/vg/4U4XkIP8f/fDf4VVyucU/cB6Y+tAFn7RF2Y/98n/AAo+0R5+8f8Avk/4VV3c5yKcHHrj8aALP2iI5w4/I0nmJ13j9ahGDSgDp/WgVyQzRj+IUn2iP+9Ue3OTj9aNv0FMZBNFG0pkjYo56lejfUdDVZ52gAE67gWI3RKxwPcY4/M1ocAUhUGhCKasrAEK4B5BPFH8I4b8BUzwg9DtPtVdvlI3AJnuW+Un69vxpiJVZeM7h9QaeGB7n8jUXp1/OlC8j1NNCZJnjO4YpmByc0oGRnP60uD/AHv1oFcZ8v8AepDs9T+dOwfc/jRjsDyKAG5QA4OfxpRICOmKQqeeCM+9JsOM5oAcGQnqKUlfVfxNRhev9DS459/c0wA4Azx+dISnTcP++hSH3/nSHOeCPzosK5//2Q==" + ] + } + }, + "widgets_values": [ + "[{\"x\":174.49402268882744,\"y\":383.8868499154203},{\"x\":173.05192332776272,\"y\":200.4518111879918}]", + "[{\"x\":174.4940185546875,\"y\":383.8868408203125},{\"x\":174.46397399902344,\"y\":380.0652770996094},{\"x\":174.43392944335938,\"y\":376.24371337890625},{\"x\":174.4038848876953,\"y\":372.4221496582031},{\"x\":174.37384033203125,\"y\":368.6005859375},{\"x\":174.3437957763672,\"y\":364.77899169921875},{\"x\":174.31375122070312,\"y\":360.95745849609375},{\"x\":174.28370666503906,\"y\":357.1358947753906},{\"x\":174.253662109375,\"y\":353.3143310546875},{\"x\":174.22361755371094,\"y\":349.4927673339844},{\"x\":174.19357299804688,\"y\":345.67120361328125},{\"x\":174.16354370117188,\"y\":341.8496398925781},{\"x\":174.1334991455078,\"y\":338.028076171875},{\"x\":174.10345458984375,\"y\":334.2065124511719},{\"x\":174.0734100341797,\"y\":330.38494873046875},{\"x\":174.04336547851562,\"y\":326.56341552734375},{\"x\":174.01332092285156,\"y\":322.7418212890625},{\"x\":173.9832763671875,\"y\":318.9202880859375},{\"x\":173.95323181152344,\"y\":315.09869384765625},{\"x\":173.92318725585938,\"y\":311.2771301269531},{\"x\":173.8931427001953,\"y\":307.45556640625},{\"x\":173.86309814453125,\"y\":303.6340026855469},{\"x\":173.8330535888672,\"y\":299.81243896484375},{\"x\":173.80300903320312,\"y\":295.9908752441406},{\"x\":173.77296447753906,\"y\":292.1693115234375},{\"x\":173.742919921875,\"y\":288.3477783203125},{\"x\":173.712890625,\"y\":284.52618408203125},{\"x\":173.68284606933594,\"y\":280.70465087890625},{\"x\":173.65280151367188,\"y\":276.8830871582031},{\"x\":173.6227569580078,\"y\":273.0615234375},{\"x\":173.59271240234375,\"y\":269.2399597167969},{\"x\":173.5626678466797,\"y\":265.41839599609375},{\"x\":173.53262329101562,\"y\":261.5968322753906},{\"x\":173.50257873535156,\"y\":257.7752685546875},{\"x\":173.4725341796875,\"y\":253.95370483398438},{\"x\":173.44248962402344,\"y\":250.13214111328125},{\"x\":173.41244506835938,\"y\":246.31056213378906},{\"x\":173.3824005126953,\"y\":242.489013671875},{\"x\":173.35235595703125,\"y\":238.66744995117188},{\"x\":173.3223114013672,\"y\":234.84588623046875},{\"x\":173.29226684570312,\"y\":231.02430725097656},{\"x\":173.26223754882812,\"y\":227.2027587890625},{\"x\":173.23219299316406,\"y\":223.38119506835938},{\"x\":173.2021484375,\"y\":219.5596160888672},{\"x\":173.17210388183594,\"y\":215.73806762695312},{\"x\":173.14205932617188,\"y\":211.91650390625},{\"x\":173.1120147705078,\"y\":208.09494018554688},{\"x\":173.08197021484375,\"y\":204.27337646484375},{\"x\":173.0519256591797,\"y\":200.45181274414062}]", + 720, + 480, + 49, + "path", + "basis", + 0.5, + 1, + "list", + 0, + 1, + null, + null, + null + ] + }, + { + "id": 83, + "type": "AppendStringsToList", + "pos": { + "0": 334, + "1": 915 + }, + "size": [ + 315, + 82 + ], + "flags": { + "collapsed": false + }, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "string1", + "type": "STRING", + "link": 212, + "widget": { + "name": "string1" + } + }, + { + "name": "string2", + "type": "STRING", + "link": 211, + "widget": { + "name": "string2" + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 217 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "AppendStringsToList" + }, + "widgets_values": [ + "", + "" + ] + }, + { + "id": 86, + "type": "AppendStringsToList", + "pos": { + "0": 683, + "1": 916 + }, + "size": [ + 315, + 82 + ], + "flags": { + "collapsed": false + }, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "string1", + "type": "STRING", + "link": 217, + "widget": { + "name": "string1" + } + }, + { + "name": "string2", + "type": "STRING", + "link": 218, + "widget": { + "name": "string2" + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 219, + 220 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "AppendStringsToList" + }, + "widgets_values": [ + "", + "" + ] + }, + { + "id": 65, + "type": "CreateShapeImageOnPath", + "pos": { + "0": 1189.82080078125, + "1": 1284.833251953125 + }, + "size": { + "0": 313.4619445800781, + "1": 286 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "coordinates", + "type": "STRING", + "link": 219, + "widget": { + "name": "coordinates" + } + }, + { + "name": "size_multiplier", + "type": "FLOAT", + "link": null, + "widget": { + "name": "size_multiplier" + }, + "shape": 7 + }, + { + "name": "frame_width", + "type": "INT", + "link": 149, + "widget": { + "name": "frame_width" + } + }, + { + "name": "frame_height", + "type": "INT", + "link": 150, + "widget": { + "name": "frame_height" + } + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 153 + ], + "slot_index": 0 + }, + { + "name": "mask", + "type": "MASK", + "links": [ + 154 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CreateShapeImageOnPath" + }, + "widgets_values": [ + "circle", + "", + 512, + 512, + 12, + 12, + "red", + "black", + 0, + 1, + [ + 1 + ], + 1.3 + ] + }, + { + "id": 68, + "type": "ImageCompositeMasked", + "pos": { + "0": 1528.82080078125, + "1": 1280.833251953125 + }, + "size": { + "0": 315, + "1": 146 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "destination", + "type": "IMAGE", + "link": 155 + }, + { + "name": "source", + "type": "IMAGE", + "link": 153 + }, + { + "name": "mask", + "type": "MASK", + "link": 154, + "shape": 7 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 156 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageCompositeMasked" + }, + "widgets_values": [ + 0, + 0, + false + ] + }, + { + "id": 91, + "type": "Note", + "pos": { + "0": 1565.82080078125, + "1": 1475.833251953125 + }, + "size": [ + 251.63747656176258, + 73.90463053872986 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [], + "properties": {}, + "widgets_values": [ + "This is only for visualization" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 85, + "type": "SplineEditor", + "pos": { + "0": 232, + "1": 1226 + }, + "size": [ + 765, + 910 + ], + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "bg_image", + "type": "IMAGE", + "link": 216, + "shape": 7 + } + ], + "outputs": [ + { + "name": "mask", + "type": "MASK", + "links": [], + "slot_index": 0 + }, + { + "name": "coord_str", + "type": "STRING", + "links": [ + 218 + ], + "slot_index": 1 + }, + { + "name": "float", + "type": "FLOAT", + "links": null + }, + { + "name": "count", + "type": "INT", + "links": null + }, + { + "name": "normalized_str", + "type": "STRING", + "links": null + } + ], + "properties": { + "Node name for S&R": "SplineEditor", + "points": "SplineEditor", + "imgData": { + "name": "bg_image", + "base64": [ + "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAHgAtADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDWAoFLigfSu0+YExRinYoxQAgFAFKBSgUAJilxxS4oxQAmKMUtGKBiYNFLiigBMUGloxQhhijFLilAoEAFFOxRigBMUAUuKWgBuKMe1OxRigBuKUClxRigBMUtGKKADFJTsUYoAbTgKMc0YoAMUmKXFLigBtFOIpMUgDFFLijFACYoxTsUYoAbiinYpMUwEoxS0tACYpMU7FIRSATFLikxTsUCExRilxRigBKMUuKKYxtGKdigCkIbilpcUEUAJSYpcUuKAG4oxS0UAJiilpCKYDTRilxxRikISjFLRigBMUY46UuKMUANxRilIoxQAmKKUijFADaMc07FGKAGYoIxT8UhFMQ3FJTsUY5pCG4oApcUY9qYDSKTFPx7UmDQAzFGKeR7UYoEMxS4p22kxQAmKDS4ooAZR+FOIpCKAG4paXFJigB4FGKdiig0EpMU6jFADcUtLRigBKWjFL2oASlx7UAUuKBjaMU/HNJtouAmPaj8KdRSGNFOAopaYgo70tLigBKKXFGKQCUtGKWmAlJTqMcUgG0U4UYoASjFOxRjNADcUU7HtRj2oATFFLQKAEoxTsUUANoxTsUAUXASjFLiloAbijilooAbilxS8UtADaPwp1JigBPwpcUfhS0ANxRTqSkIKTFOxQBQAlJTiKMUAJSYp2KCKAG4oxTsUUwG0YpcUtADaTFOIpMUCGkH0ox7U7bRigBuKMe1OxRQIbgelJjmnYoxQMbiilxRigBtFOxSEc0AJ+FFLilxQA3FGKU0UCYmKbg0+kxQIbiinYoxTENxRinYpMUAJikxTse1H4UANx7UmKdj2oxSAYRRinEUYoAbikxTsUYpgNoxzTiKQ9TQIdRilApcUGo3FGKdijFACbaMU4CigBMUYpaXFFwsIBS4opaQxuKMU6jFIY3FGKXFLRcBMUuKXFLigLCYpcUtFFxWExRinYoouOw3FGKdij8qBWExRjil70YouOw3FLg0uKXFFxWG4pcUuPalxRcY3FGKdijFADSKBTsUmKQhMUYp2KUCmFhuKMe1OxRigLDcUYp2KKAG4pMU+kxQAzFOxS4oxQAmKMU7HtRigBmKdijFLQAm2kAp9JjmlcBMUYpxFJQAmKMUtFO4hMUY9qWjFFwEA9qMUuKKAGkZoxTqMUANIpMU/FJigQ3FJin4oxQAzFHenYowfQUANxRTse1GKAG4pNvtTsUEc0ANxQRS0YoEJijFLijFADSKTFPoxTAbikwKcR9KTFIQ2ilopgJijFLRQA3FGKdikxQIQUYpcc0YoAaRRinYpMUCG4pcD0paMUAMIoxTiKTFMY7FID6V3K6Fpq/8uwP1Ymnro+nIci0iz7jNcDx0OzPV/s6fdHC9aK7/APs6xHSzg/79ipY7eCP7kES/RAKl49di/wCzn1kedgE9Bn6VKltO/wB2GRvopNehYX0FKMVP159i1ly6yOBGn3hGRaT/APfs/wCFKNOvT/y6Tf8AfBrvajj+5/wI/wAzS+uy7Ff2dDucSuj6g3S0k/EYp39i6j/z6v8AmK7jnGM0c+tL67Psh/2fT7s4kaHqJ/5dj+LCn/2BqP8AzwH/AH2K7Ln1o59aX12p5D+oU/M43/hH9R/54L/32P8AGl/4R7Uf+eK/99iuxpfxpfXKnkP6hS8zjx4d1A9UQfV6evhy+PeIfVv/AK1dbRS+uVPIf1Cl5nKf8I1ff34f++j/AIUf8I1e/wB+D/vo/wCFdWOtL+NH1uoP6jS8zk/+Ebvf+ekH/fR/wpf+Ebvf+ekH/fR/wrq8UYo+t1A+o0vM5T/hG73/AJ6Qf99H/Ck/4Ru+/vw/99H/AArq6X8aPrlQPqNLzOS/4Ry+B6w/99f/AFqT/hHr8D7sZ/4HXXfjR+NH1yoH1Gl5nI/8I/qH/PND9HFH9g6h/wA8V/77FdaG+dlzyAD/ADpc+9H1yp5C+oUvM5A6DqAH+pH/AH2KYdGvwf8Aj2J+hFdn+NB+tP65PyE8BT8zi/7Hv/8An2b8xTG0y+Xrayfgua7bNLn3p/XZ9kL+z6fdnDGwvB1tZ/8Av2aja2nT70Mi/VTXe8+tHXrT+uy7C/s+Pc8+II4IOaXBrvtinqq/lUbWlq/37aFv95AapY1dUQ8u7M4TFLiu2bTLFutrEPouP5VE2jae3/LAD6MatY2HVGby6fRo47FGK61tBsD0R1+jmoW8OWpA2zSg++DVLGU2S8vq+RzFGK6FvDQx8tzn6p/9eom8OTgfLNE31BFUsTT7mbwVZdDC70uK1G0G9X+BD9GFRNpN8nW2c/TBrRVoPZmTw1VfZZRxSYxVl7S4iHzwyL/vKRUJUjqKtSTIdOS3QzFGKdiincnlG44o606ii4rCEUU6jFAWGEc0mKkxRimIZijFP20hFADaMU4CjHtSAbRilxS4p3EMxRTsUUXAaRSYp+KTFAhtLS0Y5oAaaMU4ikpgNNJj2p+KMUAMxS4paWgBmKXFLiloAZijFPxzSYoAZj6UYp2KTFAhhFGKfijFAhmKMU/ApMCgBmKMU/H0oxQAzHNFOIoxQAwiinYoxTENxSYp+KSgBp/CinYpMCi4M9Hooor54+sDFFFApAFGKWkp2HcWo0+4fqf5mpKaowp+p/nQFxaKXFGKAEpKUijFACd6XvSgc0uOaQxMUYp2PpRigY3/ABopaMUwCijFGKAAjikFOxSYpAIKWl2k0bTTAiX/AI+JP91f60+gIRMx9VH8zS4NABSU7FAFAhtLS4oI9KBiUUYpMcUALRSYpaQB9aM0hpQKYBmk/KlxSfiKQBRRiloATNLmkOc0UDFz7UxVVl+ZVPzN1HuafTI/un/eb/0I0xNET2Vq/wB63iPvsFQNotgw/wBRj3DH/Gr1FUpyWzIdOD3Rjv4dtjnZLKv1wagbw4c/JcD8VrfFLVrEVF1MnhKL6HLyeH7xASpjf0Abn9arvpV8nW3Y/Tn+VdfS5rRYya3MngKT20OGkgli/wBbG6f7y4pgFd4cHggH61A9rbSD54ImPqVFarG90Yyy7tI4vFIRXWvo9i//ACy2/wC6xqtL4ft2/wBXK6n35FaxxlN7mMsvqLbU5vFGK2n8PTc+XMh9Mgiqr6NfR/8ALHd/usDWqr031MJYWrHeJn4oxU0ttNCf3kTr9RURFaKSexg4SW4hFNxT6Sncmw3FJin4oxTuKwzFGKfikxQIbilxS4pcUANIpMU7FBFADMUYp+KQimA3FGKUClxSuIZijFPxRincCOjGafikxzRcQzFGKdijFAhuKMU7FGKAG4pCKcRRQIaBRinUmKAGYpcU6jFMBm2jFPxSEUDGYo20+jBoFc6WTxr4bibDatEf9xHf+QqrP8QvDcIJS7lmPpHAw/8AQsV4wKO9fPn2vsonrZ+J+hDpb35P/XNP/iqhPxS00H5LC7I9yo/rXleBTgKVx+yient8U7LHyaZcE+8ij+lQt8U1/h0cn63P/wBjXm+KXoaLsfs49j0J/ilKfuaQi/705P8A7KKrt8TdSIwllaKffcf6iuGzS8EYpXY/Zw7HaH4maz2ttPH/AGzf/wCLpr/EjXGGBHZL/uxH+rGuNGBT6LsOSPY6d/iB4gY/LcxJ9IV/qKZ/wnfiMn/kIAfSCP8A+Jrm80oNK7Hyx7HQt428Rtx/aTD6Qxj/ANlqM+Ldfbrqk/4YH9KxM0uaV2Oy7GwfE+uN11W7/CQikPiPWj/zFr3/AL/t/jWTmlzRdlWRp/2/rJ/5i19/3/b/ABpp13V++q3v/f8Ab/Gs/NLmi7CyL41vVf8AoKXv/f8Ab/Gl/trVD11K7P8A22b/ABrPBpQaV2FkX/7a1Uf8xK7/AO/zf40o1zVh01O8H/bZv8aoUZFF2OyNH+3tYHTVb0f9t2/xpw8Qa1/0Fb3/AL/t/jWbRnFF2Fkay+I9aHI1S7/GUmnr4p1xTldTn/E5/nWOGpQaV2PlXY3l8ZeIFGBqLH6xIf5rUi+N/ECnm9VvrCn+Fc7S5ouxcsex1KeP9aXhvsz/AO9F/gRUw+Ier97exP8A2zf/AOKrkKcKOZi5IdjsY/iJqAP72ytWH+xuX+pq2vxHOBv0oe+Lj/7GuEyKOKOZh7OHY9BX4jWxHz6bMD/syg/0qVPiHpzH57S6Ue20/wBRXnPFHFHOxexh2PTo/H2iucFbtPdox/QmraeMdAfH+n7T6NC4/pXlFHFPnYvYQPYE8R6K4yup230LY/nVmPVtOkxt1C1Oen75f8a8WwKMUe0YvYR6M9zRlkGY2Dj1U5p2Mda8MjkkiOUdlP8AsnFWY9Tv4T+6vblP92Vh/Wn7QX1fzPacUYryODxRrlvnZqU5z/z0w/8A6EDVyPxzrsagGeGT3eEZ/TFNTQnQken4pF4H4n+ZrzyP4gamMeZbWjDvhWB/nV6H4iLwJ9N+rRzf0I/rT5kS6Mztu9HauWh8e6VIwEkVzD7lQR+hrQj8XaDL0vwp/wBuNx/SjmRDpyXQ2aPxqjFrmkzECPUrUk9jKAT+dX1xIgZGDKe6nNMlpoaaKdijFAhDSU40lACfjRzS0YoABRRRQAhqJ7eCT78MbfVRUtHSmm1sJpPcz5NGs5eiMhPdT/jVOXw8Cf3VwQPRlzW4KK0VepHZmMsNSlvE5iXQ7tPuBZB7HH86qSWVzF9+CQD/AHTXYntRW0cZNbnPPAU3s7HD4oxXZvbQSA+ZCjH3UGqkmi2b52qyH2b/ABraOMi9zmll0ujOXorcl8PnrFOD7Mv9aoyaVeR5/clh6qc1vGvCWzOaeEqx3RQxmlxUjxPEcOjKfQjFNFbJpnO4tDce1GKdRii4rDMUmKfjFGKAsMIoxTiKTFNEjcUU6kIpgNwKTFPpMUhDaMU7FLTEMIpKeaTFADcUU7FFADTSYp9JikMbikp+KQincQwilpaMUEs8woFIKXvXgH3Y7NKDTKcKQhwpaQUtIAoopR0oGKD60ozTRS0gHd6Wmc+tLmgB4NLmmfjS596VgHg07NR5pc0WHcfmjNNDe9KGpWC48GnCq88siQO0Sh5AOFJxms9dYi8tR9tg88H5o2XA/PPFUoN7DubGaM1FHKJEDDHTnBp26psA/NLmo93vS7qLBcfml3e9R7qXd70rDH7qXNR7qN1FguSZo3YqPd707cKLASBqM1Hu96XdRYLkmaX8ai3e9LupWGS5pQai3UBqLAS5pc1Hupd1Fh3H5ozTN1G6iw7kmaKYGpc+9ILj88UAim596QdOvc/zoAk4oxTPxpQT60rDHUqSSQtuikaNvVDg03dRmgRfh1zVoMeXqN0MeshP860bfxrrsDAtcpMB/DLGP5jB/WufzRVJsTjF7o7CH4h3yt+/sbd19ELJ/PNaEPxDtHH76wmjP+w4cf0rz/8AGlwKOeRLpQfQ9Pg8b6JKcPNLCf8AppEf6ZrTh1vSrjAi1G2Ynt5gB/I145jmlxT5yXQie4IySLuRldfVTkUteK295d2mfs1zNDnr5blf5Vfg8Ta5btlNSnPtIQ4/8ezVc6M3h30Z6170YrzqLx9qiACWG3l99pU/oa07b4hW7H/StPlT3ikDfocU+ZEOjNHZUGsW38XaJcYH2zymIziVCuPxxj9a0oL6zuuILuCUnskgNO6IcWt0TelFKVxxRimSJRmlpOfWgVgoowaOaAsNZVcFWUEehFVpdMtJRzCFPqvFWjmiqjNx2JlCMt0Y82gDrFOR7OM/qKpTaPdxDIUOP9g5rpqK2jiqi3OaWCpS2VjjJIZYj86Mv1GKZXaOiyKVdQynqCM1Vl0uzlHMQX3TiuiOMX2kcs8uf2WcrikxW9LoKnJimIPowzVGXSbyL/lnvHqhzW8cRCWzOSeDqx3Rn4oxUrxvGcOrKfQjFR1smc7g0NI4pMU7FGKdyXETFJin4oxTuTYZijFOxRii4WGYoxTse9G3mmFhmKXFKR70EUh2G0YpcUY96YhhBopxoxQKx5UDTs1CGHrS78d68Cx9yTDFOFQCUU7zR60rBYmHWlFQ+co7003kS9XGfY0WY7Fmioo51k6BqmJAUk8YHepCzEpcgUxmGOtM8z3phYmzSZqHzfc0eZmiwE+73o3e9Qb+epxS7x60WCxNuGKUPVfcT3pd9FgJ99LuqDdRvosFifd70yFRbRXcULNHHdqVnRTgSA8EEVHupd1VqthDoI47aIRxKQg4AyTUu+oN9HmUrDLAajfVYuTShjipsBY30u+q+6l3Uxk+/Bo3iod3FAb60WAn30u+oNx96Nx96LAWNwo3VBuNKGJpWGTBqcHqvk0uTRYCffS76g3GgPilYZY3Uu6oN/1o8z60coXJw1LuFQeZ9aPM+tKwFjeKXcKreZ9aXzKLAWS+Kgubs28G9V3HJzk4AHqabvpj7Jo2R1DIwwQehppdxjRqbrbCWQx7T0YKwQ+mG6fzq9HLvjViMEgEj0NZzWweyjsnmna0iYMkDTMUUjpgE1aVsCnNLoJXLO6nBqrb6cHqLDuT5pc1BvpQ1KwE2aUGog1AeiwE2aM1Hv8AelDUrBckzS5pgajNFgH5pabmjdQFx1NKgnkA0uaKQXLtvrGp2iqsF9cIo6L5hIH4dK17Lxtq1s3+kNHdJ6OgUj8RiucJparmYnFPc7q2+IMTuBc6e6LjlopA3P0IH861rfxhok7BTdNEx7SoQPzGRXl4ApQKamzN0ons8N1bXQBguYZQenlyBv5VNivE0Z4pA8bsjjkMpwRWlB4j1q3xs1Kc+0hDj/x7NVzmbodmeskUmK4C38f36DFxa282B1XKE/zH6VsWfjzTZyFuoZ7Zj3wHX8xz+lPmRm6UkdPRVS31jS7v/U39uxPRS4DfkeavbeM9jVENNDKDT8UmKBDKWlIpMUDGMiOu11DKexGapT6TaTAgIY27FeP0q/SE1UZyjszOVOMviRhS6FIOYpQ3+8MVSl0+4gHzxNj1HIrqcUoHFdEcVNb6nNPBU5baHGlfrSY+tdbJaW8xJkhQk98c1Rl0OJuYpGU+h5FdEcXF76HHPL5r4dTBIpMVfm0q5iPCeYPVKqNGyHDKQfQjFdMakZbM5J0ZR0kiKjFPxRiruZcozFJipMUhFAWGYoxT8Um2mKwzFIRUm2grQFjxbcaaXI71gP4lH8Fsfxf/AOtUD+I7hvuQRKP9rJrx1Skfb88TozIfU0hd/WuXbXL9j8rIvsEz/PNNGrak3SU/hGP8KpUWT7SJ0z727mo4YXE4JbIx0rmpLzUZOTLL+HH8qvaE10+qJ57yMu08MxNEqbSbuNTV9jtLSPgVPeKEtJWZgFCHJJwBS2qfKKTXF/4kt2B3iIri+0VzHNjVbaDj7XGR6Bs07+3rHk/aV/I1y32L1f8ASlFiP7/6V3+yiZuUux03/CQ6eOsz/ghpp8S6eP4pW+if/XrnRYp3Y04WcQ65/On7OAryN4+KLEdEnP8AwEf40xvFMAPyW8h+pArG+yw/3T+dL9nhCn5P1NHs4CvI1f8AhK17Wjf9/P8A61J/wlef+XM/9/f/AK1c0DU9qqvcoGGR6fhVeyh2FzM3v+ErYf8ALmP+/v8A9aj/AISuTtZp+LmqYji/55L+QpQkY6Iv5VPLDsVqTv4ouz923iH5n+tRHxJqR6eWv0SlG0dAB+FLuHpRaPYdn3Iz4h1Q/wAaf9+xR/bmrHpJ+UY/wqTcPSkzTSj2Cz7jDq2rn/lq4+iD/Ck/tbV/+e0v/fA/wqTPtRmnZdhW8xn9p6uf+W83/fP/ANak+36v/wA/E/51KGoBpadgt5kP27Vv+fmf/vql+36sP+Xqf/vqps0Zo07BbzIf7Q1b/n6n/wC+qP7Q1b/n6n/76qXNGaenYLFc6vqqPt+2TbvTNSDVtaHS4m/FR/hVWbH2z8RV8UNJdBLUjGsa0P8AlvKf+AD/AAp39t6yOsz/AIxj/CpM0bjS93sOw0eINYX/AJag/WIf4U4eI9VHJaP/AL9CjNGaLR7BZ9yVPFl8vDwwv+BH9amHi6X+KxXPtIf8KplUP8IP4UnlR/8APNP++RScYdh69y+PFx/isfyl/wDrU8eL4+9m4+kmf6VlmCE9Y1/Dimm1g/ufqaXs4dg1NxPFtiR88Nwp9lU/1qVfFOnE8+cv1T/A1z32SD+4f++jSGyhP94fQ0vZUx3kdQPE2lngTMPrGami1rTZMAXsI4/iO3+dcAyhZio7EimsOlDw8Re0Z6ZHcwTDMU8Tj/ZcGpQx+tebx2LPGrhhyKlVL63/ANTPKo/2JCKh0F0ZXO+x6JvpQ9cEmqaxCu0XEpH+0ob9SKeviLVoz8zq/s0YH8sUvYS6MXOd2GpweuMi8XXaj97aRN7qSv8AjVuPxhD/AMtLORfUq4P9BUOhPsPnR1Qel31gweKNMmOGeSH/AK6J/hmr0eq2Ev8Aq7yA+mXAP5Gs3Tkt0PmTNAPTw9VVkD/dIb6HNODGpsMtB6XfVcNijd70rBcsB6dvqtu96cHosBZD5pwb3qsr08NSsBPu96A2Rmod2aUNilYLk4NLuqIPShsmiwEoNBPNMBpc0gHUcd6SlHWmAYFWrPUb7Tyfsl3NCCckI5AP4dKrGlFAM6Sy8capbv8A6T5d0nowCn8wK3bTx7Yy5F1azQHsUIcf0P6V59S1Sk0ZunF9D1q017Sr4gQX0O48BHO1vyNaGM9ORXiu2r1lrOpadxa3kqL/AHSdy/keKfOQ6PZnrZWmkV5/aeOtTgwtxFDcr3JGxv04/St2x8babdOEuY5LUn+JvmX8x/hVKSZm6ckdHiiobe+s7xd1tdQyj/YcGp9pqiLCUlOpvekApqN4Y5VKugYHsafRTTsJq+5nTaRBJyhMZ9uRVGbSJ48lMOPbrW9iito4ipHqc88LTn0OUkheI4dGU+4pmK6x40kG10DD0IqnJpVs4JQFD7GuqGLT+JHHPANfCzn8UYrSm0maMZQiQe3Bqi8TxnDqyn3GK6I1Yy2ZyzoTh8SI8UhFPINJitLmLifMCxRj+EU8Kg6KPyqMNkUu73rh1PsNCXj0oyKi3UbqQyQmtHRBu1Jf901lbq1/DmG1UD0Q1M/hYrnd2cfyiovECbdFuT/sVoWcXyD6VV8TDboN1/1zNcNveQKR5nmlyfSoCxBpRIDXfYLk26kLVCT3o3U7Bcl3Uhb5T9Ki3UFuD9KdiSitWLU4ul/H+VRIKliG24X6/wBKt7ELc0N9G6ot1JurOxoTbqUNUG6l3UWAm30b/eod1G+iwE2+l31Buo3UWC5Nv96XfUG40bqLBcn8yl31X3H1p2/iiwE2+gPUO+jdRYCGU/6WD7ir26qL8zZ9xVjdTaJTJ91G6qrfMQdxGPSlAKHdubB9ehpWKuWd1LuqDfRuosFybdzTt1QB+aN3vSsMn3Ck3Cot3vRuosBLuFLuqHdShqLAUJObhv8AeNNYdPrUm3dM3+8aJEwB9a0MzRt+IE+lP3VDCcQp9Kdmsmaku7igtUefejNIBxVG+8in8KY1vA38A/Cl3UuaYiJrGEj5dw/WojYej/0q2WpU+dgq9ScCndhZFT+zbuOISoSFJ4IfFTRX+sWv3bifHoTuH65rSvZAm22ThYwAfc1U3VKk3ugcUOi8ValCw81Y5FHUFcE/iK0Y/GNu2PNtZU/3WDf4VlkBxhgCPeo2tYG6p+RxScYPdCszpofE2mSgZmaMns6n+nFaFve21z/qLiKT2RwTXCPYRH7pYfXmoWsZByrAn64qXRg9mNNnpQbmnh/rXnMGoatZ/cnmCjjDfMP1zV6DxZfxECZIpV78bT+n+FZvDy6D5kdzvpfMrlIvGNu2BLayoe+xg3+Fa1rrVhdKvl3KKx/gc7SPzrN05R3Q00zXD04Pk1WByM9qerVFgLO+nBhVfdTg9JoCwDUFxqEdsVBUsSeTkAD6k0oeqs+mWd1dR3M0StNGMKTyPxB4P4iiKV9RNltdRhknWNFLq3R1IK1aBFZllp1rYSSPbxhXlOWPQfgBwPwFXg9ErX0AmpajDZpwNSA+lpuaXNABiijIpc0AIMg5HBFatn4j1ayAWO8dkH8Mp3j9elZYpe9MnRna2fjtPLVb2zbeOC8J4P4Hp+dbdr4l0e7IC3axsf4ZRs/U8V5gKXFVzMhwiz2NSrqGUhlPQg5BpcV5NZ6lfaeQbS5kiGc7QflP4Hit2z8b30LYu4Y7hMclfkbP8v0qudEOmzuxRWJYeLdMviVd2tnA6TYAP0PStpGSVA8bBlPQqcimjNprcMUYpSKQimISkZFcEMoI9CKcKKAKE+lW8mSgMbe3T8qoy6TOnK7XHt1rcpe9bQxE49TCphqc+h8U7qN1R5o3e9dVj0OYl3Ubqi3e9KGosFyXNb3hIb9ax/0zP8xXO7q6TwUN2tn/AK5H+YqJL3WKT0PULSL92KzPFa7dAvD/ANM/6ity1XEYrH8Xj/inbz/rn/UVx21RFOWp5CzZpm6kY88Uw13pGrJA9O3ZqDNKGosK5Juo3cGo80ZpiJIkzmnAbZ/8+lPt1+99aHGJj9f6U+hK3HE0maQ0VBoLmlzTcDNLigLhmlzRtpQhJwBmgLiZoqUW8zfdic/RSaeLK6PS2m/74NFmLmRXpasjTr09LWb/AL4NO/sq/P8Ay5z/APfBp2YuZdypRV0aRqBP/HpL+IxT10TUGP8Ax7MPqRRZhzx7lDNGa0x4f1AjPkqPq4pf+Ee1DH+rX/vsU+Vi9pHuZbKcZ9x/WjmrRtJRP9mIHm71TGe/NXB4evj/AAIP+BinysXPFbsyc4o71rf8I5ff3Y/++xR/wjt9/wBMv++6XKx+0j3MrNLnNah8O3+OPK/77ph0C/X/AJZo30cUuVh7SPczgSDS7jVw6Rfqf+PVz9OaadMvh/y6Tf8AfJo5WNTj3KuTS5NTmwvF62sw/wCAGm/ZLn/n3l/74NKzDmXci3UobkUphkXrGw+opoU5osVcZEuZj9TT7lNsa/Wlt1zKfxqS8H7tOP4v6VdjO42M/u1+lO3UxBhF+lFZtGtx+6jdTDRnilYCTNG6osmjNFguTbqvaaFBkuJPuRL+tZgPNXpj5FlHD0Z/3j/0qJLoUiKSQu7Mx5JyaQNxUW6jcc1SQmyYGlzUW7mnbqTQEmacKh3ZpwakMlzUM4VoXyASAacGzTZf9S/+6aEDMpaU8EUkYp7CtjJbFmE3tuA8LyJkZBRv8K07XxVfwECdUmUdcjDH8RTYP+PaL/cH8qHhSX76A1jKz3Roo9jct/Fmny4EvmQHvkbh+Y/wrWt7+1u8/Z7iOQjqFbn8q4V9PibOMiq7afKnMZzjpg4NZulB7B7x6ZmlVq8+t/EGrWXytJ5i4wFmXP69a1bTxmpAF5alT/eiOR+R/wAazdCS2C6OvDU7dWZaarZXqgw3KEn+AnDflV7cRWTi1uBOGpwaq4fmnhqmwyffTg1V91ODUWEWAacDUAenB6LAT5paiDU8NxSJHilFN3Zp1AC0UlLQFxRUsM01u2+CaSJvVGKn9KizTs+1AHQ2PjLULZgt0FuowMc/K35gfzFdDaeL9KuRiSR7dsdJF4/MV5517Uu3IpqTJcUz1yCeG5jEkEqSoejIwIp5FeS21xPZy+bbSvE+MFlOM1v2vjPUIdqzxwzqOpIKsfxHH6VSkQ6fY7rFFY1h4p029bY7NbvjpLgA/j0raUrIoZGDKehByKozaaPiGjNGB60nA716ZqLRSbhRvAHA5oFckHSup8CjdrrD/pif5iuULkEjA/A5rq/h9l/EEntbsf8Ax5aiWwpS0PXLZMRjisXxiMeG73/c/qK37df3Y+lYPjQ48MX3+5/UVy21RnTep4ue/IphZR3FNfrUZ6HPWuw2cidFLhmUZCjJIHSpUgLmIKykydMducc1UeVmjSPChUGOB15zk1dsruXR74yCNWlUbSG7Z/rVIhyfQZdwy2c7xSoQy9+gNOtpbU8To2cNyGwM4+X9abfalcX0sjyNhXP3R0A7Cqi/eH1o0C7e5q24HzDHerulWUd/rCW8udrbicHHRc/0qvbJzJ/vVo+H+PEcJH/TT/0E00hSdk2bv/CO6an3rct9Xb/GpU0nTk6WcX/Ahn+dbHyzDbjDenrVdk2Ng1aSOR1Jdyn/AGbY/wDPnb/9+x/hTlsLNelpAP8AtmP8KsUVVkRzy7jUjjThI1X2UYqeOUr8rZMZ6rn9R71FmjNFkHMxZVaPBDb0PRvX/Cmj5hxTkk2gqy7kPVc/r9aZInlEMp3I3Q/0PvSKbuL0pc01TmlxTJA0lKaSmAZpaTvS4pDOUcf8VD/29p/M11QzXMOMeIB/19p/M11JpI0qdBh5pMU40lVczExQBS/hQTkk4HXtQAdOlFFApAFA5opRQAoJ7Gn5zjPP1qMU8dRRYdziLhQNZuwOglf+ZplyPlj/AN7+lTXHOtXZHTzX/maZdrhY/wDf/pWR2LY6y1hilsLbfFG2Yk+8oPYUNYWZP/HrD/3wKfZf8g+2/wCuKfyFSmtLI5XJplF9JsZOtso/3SR/Kqz6BZNnb5i/Rv8AGtY00jNLlTGqkl1MJ/DJO4w3YPorp/XP9KrN4cvV6PC30Y/1Feg+HJ9HtZ531bawKhY0e3Min1PDDB/xro21LwaYyRb2ZbHANnKM/wDjxrGbUXblZtGpJ63PG4NCvVnUvCDGp3MQwPA56ZzVa7trx7h5Ht5BuPA2ngV27gBHmCbBMSETP3Vzn/634GoPeqVJPUHiZLQ4Mgg4I5o6V3UsUc67ZY1cf7QzVOXRrCUH9zsPqjEUOl2KWJXVHJZpc10L+G4SfkuJF/3gD/hVSXw9cKf3UqOPf5al02aKvB9TJDUuanm067gYh4HwP4lXI/MVXKlTyMVm423NVJPYfmhzmJx/smmUN/q2HtRYdynCuafKuAKdbrz+FSTrgL9a0sQti7EcQRj/AGB/Kn5qGNsRJz/CKcDWDWpqSdQRntSr8oxkn6nNMDU8GlYYpAYYYAj0NQyWcL/w7fpU2aKLtCM2TTnXPlsCPTOKmh1XVrEjFxKVH8LncP1q5SEZ7Zp819xcvY0bLxhGQEvYWRu7pyPyrftNWsL3/UXSM3908H8jXDyWkTj7u0+oqpLZGNSyMCBzjpUOnB+Qao9QyaVWrzix8R6jZbVWXzIl/gkGRj69a37PxlbyHbdW7RH+8h3D/H+dZyoSQrpnVBqcGqpb3dvdxh7eZJFP908/iO1TZxWLVgLAb3qRXqtupwaiwiyGp4aqwfmnh6VgLAYU7IqANUgakIkpRTFNSCkAuBS0etOxQAgpcUUUgCp7e8urQk21xLFk5OxyM/WocUtMD56kieJtsiMpwDgjseRTM4BFOkleQAMeFzgemajr2DAKUCkpwoAUV2Pw4GfEU3/Xs3/oS1xwrtPhqM+Ibj/r1b/0JamWwS2PYIF/d1zvjcf8Uxff7g/mK6WEfu/wrmvHRK+Fb3H90f8AoQrne6MqT1PEnB64NRnpWlaaiLOG6TyEkaeIx7yeUBHOKoRRPPIsUYy7cAetdRuyOinyKF2jBDY+bPr/AJxTKYgpyfeH1ptOT74+tAG3bj5pPrWhoA/4qGL6Sf8AoJqlbj5pP96tHw+P+KiX/df+Rqo7kVPhZ17DNSBw42yDns3+NIRzTCK1aucN7Ehtz25pv2dj6imYpMVNmO6Jfs596T7M2e9R7RRgUWY7of8AZzTljKcEZU9VPQ1FSYoswuiTyhEQ33o+57j61Lsi/vR/mKgjcxk8BlYYZT0IqQRxh1kKs0JIDAHBHtn+tS7opNMGSMDO5SO+0jNRvFsPYg8gg9a6VbTwe0QLX+oq2MkeUpwfyrCmFtFdyQQytLbZ+SVk2n64qVK5TViuqKfT86eI19qf5Ef/AD2j/wC+qBbx5/1kf/fQqrhY4+TjxAB6XafzNdPXMuufEarkY+2rznj7xrsGtk/57Rf99ihOxU1exUIpMVb+zp/z2i/77FI9vGIyRNGSOQAw5p8xnylTFFKRSYqiQoFO3EIU4wSD0+v+NNoGFKKKKBB3p46iminDqKYzi251W6/66v8A+hGm3f3I/wDf/pT8f8TO5/66P/M0l4MJH/v/ANDWR2o62y/5B9t/1xT+QqTFMsh/xLrX/rin8hUhFaHHLcb+FFLijFAjUg1LT0hSOfRLaYqoBfzZFJ9+GqhIEubs/Z4VhR2wkYYkL+J5qPFWYcQwyTYBYjYmexPU/gP51Fraoq5DdsGnZVbdGnyIfUDgGoKcRSYq1oiW9QxRS4ooEJS0UCgAzTJIYphiWNH/AN5QakpKNx3sUZdFspuREYz6ocfpWbdeH3WN2hmVgAThxiuiFNm/1En+6f5VLhFmsas11OBthnt/DT7gfKn1othz/wABqS6H7tfr/SszrT0Bf9Wv0FLXR2+gWt1pltKGeOR4UYkcgkgHpVW48OXMXMLrMuP90/l/9es3TY414PS5jg04OQaJIZIXKSRsjDswxTKixtcmDg04NVfNKGNS0BZoqJZegqVSDUlCFajmX9zJ/umrGKjmH7iT/dP8qAMnT0V51DAEYPB+lXZrCNxlPkNVdOH+kD6GtbFaSbTIiroyPJubVw6FlKnIZD0rTsvFF/auBNJ56dxJ1/Pr/OnkVDLaxS8suD6ipbT+JD5Ox01n4o0+6AEjmBz2ccfmOK2I5UdA6OGU9CpyDXmkmmuuTGwb2PFNtr690yYtC7Rk8EY4P4dKh0U/hYtVuenh/eniT3rjLTxgSFW6twT/ABOhx+h/xrdttUt7tN0Mgb26EfhWMqclug3NkS8df1qRZR61jm6x3pPthHSs+ULG6sq+oqZZk7sPzrl5NS2nHU+1RG8nm4B2j260OI+U7JHVjgMCR2BqcDNYHh+Hif1+XJ/Ot0bl75HvWT0YrIo3+pR2eVVd8vYE7R+Z/pTbPUGuCvyxyA43GJidh7g5Azj1FaOULZaMbh0OM1IuwDCAAe1CkhWG7aXbT6Xbmi4rHzveQeTINuNrDIHpVatXUlPlRnJwGPH1/wD1VnKwR1YDOOcNzmvYhLmjcmtDkm0N2kYyCPSipZ5vPZTyMDAUnIUeg9qjAqjMceeQAAewrtfhqP8Aiop/+vRv/Q0rjGTY23IJHociu1+Ga58QXB9LVv8A0NKmWwp/Cz2CIfIPpXMeOxnwte/Qf+hCupiH7v8ACuW8eHHhW89wB/48Kwe6MaW54pIgVVbepLDOB2+tRhmXocfSnkZ54HNXLqw+wWqPO0LvOm6NY5QWj5HJA9Rmuk6CpcTRyiIJEI9qAHnO4+tQ9sYoABzlguBnnvUyBp0IATESFuTgkZ/XrTERMhU4OOPQ5oj++v1FSAIBl13ZyMA4xU1vatJGJVwx3Y2Lywxjkj0xmhBc1rcff+taPhznX8+ivVC2+6+fWtHw2M642OyP/OqW5FT4Wdg1MNSGmkVqcAzFGKfiqct9DFqMNif9bKCQew4P+FK40m9iwRSYqTFG2mIZjijFP20EUAMxT4yUbI/HI60KKo6veS2FmbiMD5CCcng+xqWXFNuw/UtQgsJLfP3ZXIIPGz8fyp0U8V0vmRSK6eoNchrus2+qx2zRKyOgYOrdBnHQ1L4WvCl81ux+WReB7j/JqFLWxu6XuX6nXbaUCn7aAK0MDlH/AOQ8P+vlf5muoxXLt/yH1/6+l/ma6k9aEaVOgzFGKcaMUGY3FJinYoxQAwikxUmKQigBtHpS4B5FLigBMU9aKUDFAHF4zqdz/wBdH/maS8+5H/v/ANDT1/5CFwf9t/8A0KmXmcRf7/8AQ1kdvQ66z/5B9t/1yT+QqQjmmWgxp9t/1yT+QqQ1qcb3GnrSUp5oANAkKBk4AyTU91iNlgGP3QwxHdj1/wAPwotfkdpj/wAshuH16D9SKgOaXUb2GmjFLijbTJEoxTiKMUANxRinAfzpdtAxoFNYqhXcwG44GT1NNN3BHceTI+184G7gHjPWsfVb5WleDeuwMCpHVTgZqJTSRrCk5M0b28FttKEPtyZEGMgY606O7iubNnDKpKnKlhmua81pNrONwGRu5z+dIsmwmRGVcrwuOvrWXtnc3+rqxTthz+FPuVyi/U0lqvT/AHf61LcjCqe2T/KqNDs9OkRdIsyzqP3CdT/sirakPGHUjaRmuat5QLaFD2jA/Sri3hWExADB79wKXtTF0OpYv7mPhGCSL0YMoIrGawt53ZR+6PY54/KrM6qT95XGB839KjVwGBPJ9azcrs2hHlWhQn0a7iXcieanUMnP6daoMjKcMCD6EYrrG1HMZCpg8fhUcj299KIp4lIA69CD7Gm1HoNTmviRy1KGI6Gti90WNMvbTgrnAVv8aypYJIX2uu0/zqGrGsZJ7CrcEdelPeVHgkAPJU8H6VX2ijZwaXKXch01czA+qmtcrWZpg/er9DWrSnuENhmKQgU8kCo2kVRyQKgoMVHIqMuHCke9RSXYHCjntnvURZ25JJqkmIhns4W/1Z2n9Kpss1udwJGOjA1o4qG6GIGq03sQ0SWWtXomiidxIjMF+ccjn1reWV3+834CuRtP+PyD/rov8666JazqxSY4PQmiT2q5ElQxLVyNawsU2dD4cjBW44/u/wBa2jF7VmeGUyLn/gP9a3zHWU46mDlqZ5iphjrQaKmGKs3EamUsOO9KJCOoP4VZMWaY0WKmw+ZM8Gv1zanpgMKy5ImjfY6kNwcfUZravJ3sRDLER5ivkZAPT2NZMkzzMrSNkgY6ds5r16PwmuK/iBcW4gKASxyErk7Odvtmoe9dP9nsx4TllRP9ICpuYrjOX4xn6Gsi7t4YbCweMHzZY2eQ5/22A/QVs1Y5FK5FcQxJDbvEwYsn7z5s4bJ4x24xXZfDFc61dH/p3I/8eWuGr0D4at5ut3jhFT/RwMKMDqP8KiWwqnws9WT7n4Vy3j3aPDFzvzt3LnHXG4V1SfdrkviB/wAird/Vf/QhWL3RhS3PFXxuOOnamHnmpCON3HXHWmVujrsNxxSqSpyKcQoXBB359eMYppA4wfrxTJZotd/2nqKz6hIsasu0ukQ4444FdDD4Tktbh5jcRS2qRlg8bfe+U9vTNYFhol9fxl7eHcoI5JA69/pXXaNok2mWF5LcMdzRuoUHjAzzj14q4q5jOVtmYUAwG+uK0fDA/wCJ3IfRH/mKzoM4f61qeFhnW5v+ub/+hChbjn8LOuI5pMU8jmkx3rQ4TO1W+FjaFtwDN8qk+tcPc3091f8A2iR8OTjI4wMVt+ItQjun+zIcNGcgk9DyCCK5jdiQeuawlK7PQo07R1PTYJkmVQrqW2gsoPTIqXFcjoGqR2STecSSVL8+2AB+OTWzoF5LeWsjSkHEhC/z/rWqlc5Z0nG7NWsnX3u4bAyWrABfvYB3fhitikYAqQwBBGCPWmyIuzucfo3id1nWG/fdEf8AloRyv19RV7xJMsMDNBqTbZBgxYDgAj8xXL6wunx3LLYrOhVirpIBgH2Oaqw3kkUckWFaKT76kdfx7Vk30O1U03zIgPHTpViyupLS5SaIgOvIzVZsbjjOO2aAccUkzW11Y9G1fUxp+nK24GZgBgH25/nU2lXYnsLVpZB5siZAJ5bnGcV55c3kl04aRs4AAHYYAH9KW2u5Le4imQgtGcru5AqufUwdBctjo2H/ABPkPrdD+ZrqiK5GzZptQsXc5Z5lY/XrXYd6tGFXdDMUlPIopmZGRRinkUgoATFGKd+FH4UAMPWlxTutFFwGjrTsc0YFLimgOKT/AI/Z/wDeb+dNvekX+/8A0pYebuY+pb+dF6OIv9/+lZnb0OutB/oFt/1yT+QqUim2o/0OAf8ATNf5VIQau5xvcfa3EtnOs8JUSLkAsgYc8dCCKlvdQnvlQTLANp48uBIz+O0DNV8cdaTbzU6XuCJXIjtkjxhn+Zj7dh/M/jVfFP20baaYMbigCn4o207isNxRinbaMUrjsNxWZq7wmII1wYpF5GMjP4itbHrWfqV3awlFnjLZBIZRnHalJqxdNPmOXeWTILN5h7YbJxS+Yybnl5GenekdjI5eEBFJwq4xjHemIXkcfNwx6LwQK5GegkSFiVHl4OB1PGPwpWw0WT5ZI6ELjimsy+X06nHy0ZbYVzuKgjp0FJDZDa/wf7v9amn+4v1P8qjtR8qH/ZP86mnHCfU/yrp6GQ+G6IAVgGCpwR1wKuQzxyABHBOM47iqLiKNVTGJGUZ5yDQxGG3FMlcHP8Nc7eptY0iT2bPtTS2OSBWc128GU8xWKjOCP60sepqVBkUrz25oQWLxc888YpA5HfnFQrPFNzG4z6U/nnnNMLEwlwOv1qQSJJGUfDc9DVTdjPP6UqvkdOKBWJBp9vKX+dYmLYUDmq0mnSxMQAJB6pzVjdgD0qaOUqvynnt7UXDVHP2ZEZB6cYq0bgAE5PX0pmnwrPchH3bST93rVufRZsZjkRv9npVOF9RKajoyhJckn5cVXd2IJJzT5reWBtsiFTUZHyN9KOWxV7j4eUzgZzUwXFMtVzCfrVkJWctzREW2obsf6M5q5sqter/okn0/rQnqDWhnWYze2/8A10X+ddjEtcnZL/plt/10X+ddzBZStyQFHqxxRWZnDYWNDgVZjXmpIreGP/WSZPooq5Fc20GSijPqeTXM5DaNnw0BGlwXIUNtxu4z1reMkX/PRP8AvoVxh1QetJ/ag9ah3Zm6d3c7LfH/AM9E/wC+hRmP++v51xn9qL60f2mvrSsxezOyIQ9x+dNKL6j864t9aRJFTZIxY9VXgfU1J/aq+v6UuVlch5Jqk4mutqHKR8D3PeqI4p2KbXqRXKrBOTlJyZMbmVofKMr+WP4cnFR7icAkkDpTe1KKZJIBXoPwuTOo3xHXylGPxrgApGM9xmvaP2f7JJtV1i6cZMEUSqD6sW5/8dqJuyJmrxZ2Rs7mOHzHglVMfeKECuL8fc+Fbv6r/wChCvd7nBiIPIxXiPxOiWDRL6NBhdyED0+YVgndmVONpHh7DFDRBYVfIyRnAOfz9KV+tMNdSOhkkkUflGWORcFsCMnLAep4xUcSgyLubaM8nGcVNJHHFBFJHOryODvUA/L7c+1N83MKxBEGDuL45P1pkbnpeiXVrLp8TRzRnIAxwpHsR61oah/yDbn/AK5N/KuP8H2CNNJPMjb48bQy8YPeui127MGmuAwDP8mD3zWqehxOHv2RyEH8f1/pWn4VH/E6l943/wDQhWZCRl+v3v6VCLp7SXzIyQ24j9ai51OPMmj0l9qKWYgKBkk9qp6jdRQ6XJNvG10+Qg9cjjFRSTQ6toBJl27xt3dAGH/1645tTmFodOmI2xudpHbrxn0pylZGFOjd+hmyszylt2SSST60xSBKCxxj2pCcMQfSkJyQSOeuaxO+xIzk4Y9TwK6Pwom68LGYfKpxGBn6nPauTMnzcH860rDVZtPiYW5IeQEEn+lVHRmdSN42R6WOtQ3ySG0cxT+Q6jcHbG3j14PFcff+JpXtYUgcpKo/eB1wc+tZ6+JdRTyNsv8Aqk2EHkOM9xWjmcsaEr3KWoo7TNMTA5PLPA2QeeuO1UM+tTyvHICwUq2eg6VBxWZ2RjZC0UCjvQULSg0hpB1oE0dVpv8Ax/ab670/lXaMmCSPyrjNPwL/AE3P/PSP+VdszpIDhgcHGRz+Fao4ay1IjSYp4p4XNFzEgNAGaseWfw+tKI29P1ouFivikq15bf3ahkXDZ9aLjI6XtRjNGKYhBwafjNM5z0p4oQHDwDE7++f50Xn/ACx/3/6GlhH75j9f50XIz5Xs39Kk7Tsrcf6LD/uL/KpMVJZW0k8UUca5OwfyrXi8OXhVXcKinoTnBrGviqVFXqSsZUsPUqv3FcxcUYrp4vDUZxvlYn2q5F4XtiM7JH/GvNeeYb7N38jtWV1vtWXzOMxk0u2u6Xw3bIM/ZXP1U1J/Y9sn/Lqv/fNZvO49Kci1ld95o4LbRt9q7v8As21H/Luv/fNKNMtz0tV/74qf7c/6dP8Ar5Ff2Uv+fi/r5nBbaiuY5zARbsqv6tXoZ02FRzaLj/rnR/ZluRk2KEf9c6Tz1dab/r5FLKrP41/XzPE5ri8MjKZ8sDyQeDVYmVnGJACT36GvbW0fSSTusEz9SP61Xl8M6DP9/Tl/Bm/xrP8AtinfWLR0LANLRniwcybVLAdQOMZpSHOSGXA444IxXr7+CfDjf8uci/SU1Wl8A6C5ypuoz/suD/MVazag+4fUZnkcshMmxhlccYHeiNPLQqWyx644/CvUW+GuksSyahco56EoCBVab4ZyFT9m1SCQnHEkZX+RNbwzHDy+0ZSwlRdDzm3HyRj/AGf61LLyF+p/lXYf8Kz1uLaIpLSUKMfLLjPPuKq3XgPxFHgiwDYz92VP8a7o4ujJaSRg8PNdDk5CpTAbBAGT1qt9oclk3Ag8ZxWpe6Nq9lE6z2FxGc8tsJGPqKyljKybmUhh0B4oUovVMpwkt0WHZVQgYywyT1FVHRW+593+IelWiCilsncVxkDiq2wo6nI9TxTTRNhEt5POzn5fXOOKuG6aFMo/AOPn5prxuwLueNvY0ySFVT5WII5ye34UXuFi3HfRuo3kpnjmpw2eUYY9RWSQphIJGWbAzxinRxhFKLvZieccYoEaoLf3gf1pSzEEEqB64qi8nloIwxJP3sn+tRq0GCGL7+2KAJ9LkEVwrtnHPQZroTOhXIP4YrlbYlVGCR9K1EuY1iGS39a3TMJRvqaDyxsMFciqM1lbTpIyoUIGTg9eRT9wK7h0NOjYbJf93+orRx0M1o9CrDp5SM7XBGeh4pTA6dUNXInAQ5OOaUzr7muacdTqjPQo+Wx/hb8qqX6FbSXIxgf1rV85mIAGB+dUdRGba4/z3qLWaL5roxrZzHJDIOqsCPzrqpruWK1Fwb22cEj92kylxkenWuTUfulPpVY3DVc4czIjJROp/tiQ9G/Oj+1XP8Vcr9pNL9pNT7JD50dR/arf3qT+1G/vVy/2k0faTR7JBzo6j+1H/vUv9pt/erlvtJ9aX7UfWj2SFzo6f+0z/eNH9qN/eNcv9qPrS/aGo9kh86IB2x1pCpBIIII7GrAtZAc5XP1pzW0juWO3JOeK3MisFz065pQOas/ZX9B+dAtZM9BSAjQV7v8AAKR5rnxFLIQWZbckgAd5OwrxJLVx1H617f8AAJCj6/nutv8AzkrKpsOWx7Fc/wCrP0rxX4pn/iWXg/3P/QhXtVyfkP0rxT4n4eyu1/3P/QhWC3RjHc8RkHJqIirr2r5OAMfWojaSe3511o3KtOQ7WB61P9kk/wBn86kFm+3tn61RJt+HtSvmnEKSbkJGd7dB7UuuX73OqNEsrNCjjCnscYNV9Elk06Z3Yrg4O3aDk/XtVfyZXuN7YJLZJz1p3IUVzXLUX8f+9/SqN30/4Ef51fUBck8bm4/KqN5xt+p/nQ2Ulqa9nqBXw5cWqsRJG+4496wJJC7ZZuR7dauW06qs0ZPEqY6dD2qhIhD471F7lxjZsPMOzJPOfSgrhd2eewpqAE4Y80+U5xjPTpmkWVuuOTU8jtM4baFG0AKowAAKYiqWO7OMdvWrMMBYktnYgy59P89KolkE00lxJvldmYADJOSahNXr2XzHBW2WKMDCgD+vc1ToBCUmeaO9KAKADNLijFLSGGKTFSIMuo9TU0y8jFFwaNgxvILOOM7XZkVTnGD0612CyWel20cDyqnHAIOWPc1x12r/AGa3KjnIx9cVatdNVLoPqlzsAG/aDuLY7Z7VdzmnDmOyjIkjV1PysAQfapUz0P4Vjw+JLCWWdVykMSKUJXr1z/SoT4rtfsssgRvND7UQ/wAXoSaLnP7KXY6LB9KMf7IqnZ3UlxaxyFslhnp/9YVP5j+v6U7Gb0JlI/u0y4xheKarNnkn16USuWC5NCAhopcUYNMQmPelA5oqvcX0NuvXc3YChsaTexydpC0kpCgk4qzLFGMAqZJFOQA2F/PvWlb6dc3p2wxCG3/vngf4k12Wh+DYoVSe6LZPIyBvb8P4R+tc1bEU6MeabsjuhCU5csVdnK6Z4e1HX5le4j8uFByxU4A9BuyR+FegWljo/hPSJXM/kWmAzyTNjew7Knc/QE1D4j8Wad4WgMO0S3uP3doh4XPdj2+nU15XrOqah4ovzeagOnEdvFnZGvoB/Pua4lGtjVyzXLB9Or9eyOn3KDutZfgjoNW+JOqX7yW+gQiztweJyP3pHuTwv0HPvXO3Oo+JL4lrjXrls/wtdSEflUASdVCrbuAOwQ0uy5P/ACwk/wC+DXoUsNClHlgrIxlV5neTuVWsrx23PeKWPfk05bK5Uf8AH6R9Mj+tWPLuv+eMn/fJpDHcj/ljL/3ya05GLnRH5F4P+YhIPxP+NN8i9z/yEGP1ZqeVuM/6iX/vg0hEo6xsPqpo5GPnQJ/akZymouv+7Iwqymo6/GP3et3a/S6kFUy7DrSeaaPZD5zUTxD4qTga7d497lj/ADp//CU+LgQf7Vkkx/fKN/6EKyDMfSmmc0nRi90NTZ0MXj/xXAwErJOB2NvGR+aqKtn4p60G/eaVp+B2Ecq/+z1yfnmk88+prKWBoy3ivuLVecdmd1Y/FWMsRqOmke8Eo4/Bv8a14viP4fmxuN1Dn+/GDj/vkmvLTNkYPIPY0wrA/wB6JPwGP5VyzybCy+zY1jjqq6nttl4l0W/GbfVLZj/dd9h/JsVqrMwAKSHB6YPFeH+H9Ej1PVFgSUxKiSTMxOQAiliPxxj8a9xcgW9sgABWFQQBjHf+teDmuX08JBTpyZ34TFyrT5JIX7TKP4s/UVWubaxvR/pen2kxPUtEM/nT6SvFhiqsdpHounF7oybjwl4dueVsmtnxjdA5GPwPH6VhX/w1tZIiNO1FgxPK3IGPzArsjTa66ea14dbmcsLTl0PMNR8C+IrYF4rdblFGAbZwxP8AwHr+lctcQywO8VwHWUcEOMEfhXvKyMhBViCPSuU16ytv7Rme7QNHMd0bmMMAO49sGvfy7H/Wrxa1R5mMoKglLozypI2VwVcsPpUzzBIwuTgEDArrJPDVrcljZzlfYHI/I4NYl14b1GJm2wiUKM4Q4P5HBr1L9zhUk9jGkHnOVUtnOST3qQtGqHO7IOMU8WzBz5gcY6hh0/CmyhHLYJViemOtUgsNjYJGpOce1SJLhhk4BOcdQajU4iHAPtQ0bNjavOegP9K1urk20NCO4Mqk7h+FBZvU1FbqRCMjB6HNS4roi7o55KzJYf8AVn61KBTYB+6P1qZRxWMtzWOwgXLD61W1FP8ARrj6GroHzD61DqKj7LcfQ1jLdGkdjmgP3C1mmtYD9wPrWSa1IY2iiimSIelJQelJQIWkNGaKYgqUGoqkFJlRPdR4d0n/AKB1t/36FOHhvR886dbf9+xWmKcK4uZnPzMzP+Eb0f8A6B1v/wB8Cl/4RrR/+gdb/wDfArVFKKnnY7syv+Eb0cD/AJB8H/fArtPh3ptnYSakbW3SIuI920Yzjdj+dYOOK6nwRw999E/9mqeZ3LTOmufuH6V5br9pb3+sTW9zGJYioJVuhr1K6OEP0rzPUv8AkYp/9wU2yVuYbeF9G/6B8P5VXl8NaMP+YfD+VdCe9VpgMUcz7lJsxo/DWjE/8g+H8qtx+F9F/wCgdD+VWoqvR0cz7jbM9PCWhnrp0P61YXwhoI/5hsX61px1YFHO+5F2eUePNMs9L1S0SygWFGjBIXucnmuJveCn416B8Sz/AMTuxHbyf/ZjXn98OUrrpO8dTojsischcg/jSsVLBuc96iJKkihWxnPcVRqLgbyR0prMD1qRSuAew6g0m1Tg4zzQFiewihecGdmWNQWO0ZzjtSvMzL5ca7VOCQO5qeOzLWsUkb4MpIYlsAAdjV62tY7fTHumA81mKoDzkd/wpmdzJuI5Ys7jt3KDtJ6g+lUzVmbG87TkHoPT2qEigtLQixzTsU/bRii4WG44pQKdtzTttK47CIMSJ9asyDLCq6KfOT61cZfnFS2O2h23i+wtrTQLdoYwpFztyPTyxXCyzyy7d7khRtA9BXoPjY58NW59bz/2mK87Ip3M6auhuferMZS3YGVPMYEHy84/P/Cq2KcASc00y3E7TR9Ya6YI45J4AwqIPqetdCoBGR0riNAsmkuFle2MqZxuY4Vf8a7VGLIGVht7YFaJnnV4JS0EuB+5NPI4HsKikLlDk8Z9KQyleWbjPpRcy5dCXFRSzJCDuIyB09PrVd7p5spbjAH3mJxj8e386sabos+oEzHAhU4aaQYVfoO5/X6VMp22LVNWvJ2RRM1xeyLDAp+bgYHJ+gra0/w7HHIDd757hjxEDxn3Pf6Vs6ZpYy0Gmxc4/eXL9cfXt9K6FYrDQNOe6upljgQEyXMnVj/dUevsK48RiFTfLvJ7I2owlVfuaR7kGnaKluElmCvKq5CYASIDv6f0rkvE/wARUtpnsPDzJPOB+8vTyqn0T/4r8qwvFXja88UKbGyR7LSc5z/y0m/3sHp7dPrWn4J8GadfWy3d/LIqknyY0x26sx+tZ0MJKc/a13eX4L0OmdWNKHLDRfizlLPSJLmU3N3JIxbks5yzE9evStqO3hgKJEgVeSfU8VvarpEVnHJNDNvReV+YMCM+3SsCScRsDgnrXrKKijznUlUZMRSEA02KdJhkH8KkNO5nZrcbtBqvPMbeVWAyCCOaW5u1gyvIYjINZst4ZYwrdR0PcVnKpbQ6KVJvV7GtDI7rl+/IA61LsHesYah5SlQDu45q5FqkbEB1Kj1ojNEzpSvoi2yKRggEe9QtZ2zD5oIj9UFOju4pIw+duSQM0l3II42TcVkI49605kZcsr2KM9rpyk7oEznHyjHNY8tvb7uEI/HrVp53djuAJ6E460ht5FhW4dD5e8puxwTjOP1rNzZ1wjyrUz2tY8A7mBP6Uz7FuUFZO+PmFX5F3OMHANNK4wExnNNTZqZz2kqReYcFR1welQc1oXTFbULnl25+gqhitE9AR1ngW2M11dOQ+CiwKQONzuox/wB8hq9ZlYPKzAYBOQPSuI+HlkIbK3mlVh5sk1yAehEaeWp/77c/lXZmvkOJKl5Qh6s9bKo/FP5BTc80Zppr5c9pCk5ppoJpDTKQlRyxxzIUkQOp6hhkU880lXCUoPmi7MbipKzMW58NWkxLQO8D+3I/Ks2TT9ZtB8yrdxDsPm/Tr+VdXS17GHzvEU9KnvLz/wAzzq2VUJ6x91+RxjXFlc/ur62Knphl3Y/PkfnVKfwvpd8+60laNvRDu/8AHTz+tdzPawXClZokcf7S5rIuPDVux3W0jwn0PzCvYoZvhKnxXg/vR59XLMTT+B8y+44Wbwrf22WiCzqpzhD82P8AdPNYk6vatsIZXU8hlwa9N+x6xZMApW4jHY4b9DUU9xZ3B8vULBkJ4IaPePybkfga9KM1UV6clJeRxvmp6VItHmqXBYgEnGeQT+tWo97LuYYyePpXVTeEbG6lL6dchCckIOf/AB08/wA6ypPD2o2au0kRkAONyZOB7jqPyrSFWzs9BSUZLQrwD9yfrUyjimwphGHfPSpQtaNkpAq/MPrUWor/AKJcH2NWVHI+tQaiP9EuB/smspbouJzK824/Gsg81sIv+jD6msgjFbEtDKKdikIpkjOtGKU0lBNhKKdRigBtSgYqLvU1JjifRA6U8UxaetcRyodThyKSnAcVIxK6rwT9+++if+zVyxHFdT4J+/f/AET/ANmpItHSXf3D9K801D/kYJ/9z/CvS7v/AFZ+leaXxzr9z7J/hQ2CWo0iqs1WzVSbrUjSGR9aux9KpxirsYNHMkU02Wo6nFQRj0qyqn+6fyrJ1qa3kvvGqNR7RZ5j8ShnW7H/AK4/+zGuCvRllH+zXq3jTwzqmtatazWVr5kSRbWYuq4OT2Jz6Vgt8MtcudpzaxYGDvkP9Aa3hjcPGNnNfedVOhU00POzGc0nlGvSE+E+sHh7qzH0Zj/Spx8JL/8Ai1G2H0VjUPM8Mvto6Pq8ux5iI29KcIzjGK9Vt/hI+T9o1RQP+mcWf5mtKH4U6OgHn3d/Ie5Qoo/9BNQ81w6+0P6vLsePRF4iCvbseRV2C88q1aJwXznaD0GetevL8MvDidRfP/vTqP5LUyfDzw0ow2nzP7m6b+gqP7Yw/f8AAHhZPoeFvEu75M496aIj6V7wPh/4YHTSm/G6f/GpY/BHh6E/JpEB/wB+SRv/AGaoec0FtctYWR4KLcntTvspNfQaeG9Hj6aJph/3oSf5mpF0PTFPy6Npi/S2H+NZvO6PRMpYVnzwLfFO+zs3RSfwr6LGnWaLhdM08f8AbuKcLWJTlbKxB9oBWbzyn2KWFPnJLdhKnynr6Vb+zuz9K+hRGynIgtR9Iv8A69O3Tdo7cf8AAP8A69Q89h/L/X3D+qeZ5h4ttZp/DdskcTuwvCSFUkj5BXFDQ9Tf7mnXbfSFj/SvoMvc/wB2H/vk/wCNHm3fYQ/981Dz3tEVPAqKtc+fv+Ed1c9NKvf/AAHf/CoLjSb+xjD3VlcwITtDSxMoJ9MkV9DGS7P8UY/CsrWtIl1q3itrpw9uj7ygbGT0Bzj3P51dPPE5WktC3gl0Z4/b2l/BFDLO0/kPgrGrn5l/pW6byfVrf7FZWxg3AhnZ87gB0UhRz2xyea7RfCFmgjzEXEa7VDyEgDJOMemSa0YtOMKqqRIqr0C4AFa1M9gv4abJhlies5I4WG11e2s5Hv4J2fcWZ5EPAPTPHJ9hSpY3dwymSKbJ5EYjIJHuewr0FCqkqMkqcHapOD6cU8swAxA7jPsMfmayWfS6w/EyllUekjkLPS44wGuxu2/dgQ8D6kfyrptP0i51FElm/cWK8LgYBHoo71ejMjZ87bt/hQDge/vVoXMqqqh2AUYUA9BWk+IKcU1GGpyf2PNyvKV0Q6zrOk+FNIW4uwY4zn7Nax/fnYdefTpk+9eP6trV/wCL9US61JngtVOIrePOyJfXHf8Ar+levXUUN8ytdxR3BUYUzKHx9M9KqyaRpsv37G2/CJR/SufD5xh6bvKLbe70udUsDNq0Wl5Hks9ukcreRl4gflYjGR9K6rw1bRNolzeTsEW3mVXLjIZGHKr/ALXGa7ZvC2nPas7abAygcLGg3n6beayfE2jy/wDCP2VnpemSRxI7M+UK7XPRuDzxxk56178cRHEU/gaXmcHI6U/iTKevQaXbafvt3XzpVAjjVh93qSQO3FcfKNzDIyOc4ppFzZyNbyQRq8ZKuoHIPfpWtpj6PczrHepdQuejiVdpP4rxUU8RSw9Plcm/xNamDr1ZcyjYqRxrAu9jgY5z2quNUt9xVgw98V283hPTJkK+bdhT6SLz/wCO1TPgLSTnE15n/fX/AOJrnee4Xo/wIjlVV6zX4nD3t2J8qMFB0YVQZ8ZPPvXobeANP/gurgf7wB/wqJvAFsel/IP+2Q/xqP7YwsteY6I4GrFWSOBJ39zx6+lITtBBzjPFd4fAEXQX7fjF/wDXpv8AwgQU5W+z9Y//AK9XHNsL/ODwVbscUrsI+pApXnkdBuJO08Z7V2X/AAgkvA+2xfihpp8CXCvuS9gB9drf4Vqszwv/AD8Rk8HW/lOPUo6gYw2cnB61t6jZSQ+G9MlEbBJHkZ2PQscbePoP1rQHw+ui4P222A7nL5/9BrQ1Dw1q+oKiy3dsVTou5sZ6Z+76YraOZYS38RfeYzwdfmVos4Eggc9PWmkEkAZOelda/gXUz0ltT/wNv8KI/AWobstcWq46ZLH+lNZjhP8An4vvK+rVv5WcHcn97t/ujFRou5uld9H8NZS5M+pxjPOY4y388VsWPgHR7Vlec3Fyw6hnCqfwAz+tRUznBwXx39DWGCrv7Jd8LweXpoyjL9mgjtUz05HnSfjucD8K2Cailt1WECBShjZpFAY8seTnPrU6hXUMpyrDII7ivkM0xcMVW9pDbY9nBUXQpcstyM0lTeWKXy1rzbnXzIrGkq15a0mwUcw+dFbmkwc1a2rSFVp3HzlXBpMVa2LSFV9Kdx85WpM1ZKKaaY0JppjU0V+tDKrjDKGHoRmp/KX1pPKHrVxm4u6Y24vczpNNsnOTbqD7EinLbKihVkk2joGbdj8+n4VdMPuKb5J7V2Qx+JjopswlhcPLeKMy40uK6j/fxQSydm2lCR7sMk1nN4SSQjypGjJHch1B+vB/Suk8l/7ppu2RTwCK7qebYhKzs/kYSy3DyemnzOLufDOp2gDeR5q9cxc4/DrWDqSMlvcqwIIB616vHcXEXTke4pl3b2GqQtFf6fGyuMMyjDfn1r0KWaqVudHNUy1r4GeDon+jD6msgrXtV78NtNuedNv3t1x/q5xuGfr1/nXEar8OPEOmb2+xm6iUZ8y2O8Y9cdf0r1aeKpVPhZwTw1SG6OJK00ir0trJGxDoykdQRjFQNHXQpI53Blc0mKlK00rVXJcSOinbaMUXJaGYqUUypMUMcUfQ4GKlWmCniuJnEhwHNOpAKcKhspIQ9K6nwT1vvon/ALNXMbSxwoyTW7ov2qxjm8sqhmxknkgDPT865q2Jp0dZnRSoyqbHV30iRxEu6qMdziuAl0+efV7idAvlMAFOeT0ro/LjZt80pdvUnJqZZrSLopNedPMJyfu2S8zsjhYR3uzFt9BeZgCWJJ7cVYk8MpG+HjJ+rGty31e3iIAhA96dd63G3+rUfUiqdSDpc0q2vkNQkp2jT0MWPRYox8sCD6jNTLpoH8I/Knyaq7dwPoKgbUWP8VebKpTe8mzsjCr2SJhZIvak+zxjsKrG+J70n2vPWseamWoVOpZMaDtTSgqNJC5471citZJDwCa0hDn+FCk+T4mV9lGz2q7JbrCMuwHtVKWdFJC80qkFD4hRlzbCEY7U0iomufaozOT2rmc4myhIlOKbkVCZs00yVHMjRQZPkUuRVXzaUTD1ppofIy9DC08gSNcsegpt/H/Z+43JVAi72JPAHrUEV68DB43KsO4qpfy/2jFNFcsXWZCj89QRg1rejyWd+a/yt/mQqdTn8jMfxv4bQkHU049I3P8ASnx+L9ClXcl5lfXy2/wrwi+ge0vp7dxh4pGQ/UHFdn4NtbfXL4RS7hHFFvMYOM4IGM+nNexiMroU6ftE3b+vIVKSlJxkej/8JTo//P3/AOQ2/wAKnh1yxuBmF5ZB6rC5/pSQafZ22PJtYUx0IQZ/Or8Yya8OTo/ZT+83aiiOO5SX7qyfjGw/mKlzUzsI4duOW/lVepcEiE7i0hoppNLlKQHpVSaRpZfs0LEMRmR1P3F/xPb86luJvIiL7S7cBVXGWJ4A5pLaIxQgMQZGO5yO7H/OB7CtYxsuZj8iSONIowkahVHQCloorCTuxhiigmkzUNDFzRmkzSZpWCwpAPUD8qY0UToUaNSp6jHFOzSZrSNWpHRSf3i5E+hRfRtOfO60jJJySc5P41UufC+m3DqwR4iv/PNuv55rZzSVSxFVO6kygiRYokjQfKoCjJ7U7PtTaKx1eoWHZpOKTNNzQOw/ijimZozQFh/FHHpTM0ZoCw/ijNMzRmgLD80vWo84qvf6lBp9uHdJmcngRxFxj8Oa2oUZVp8kXr5kVJci5mW8UYrlR490jJBuFUjs0cg/9lq9p3iax1Sbyra8ty2MtuWQAD3JXA/Gu95RiV2+8w+t013+42jxUds+5ZEIP7t9oJ7jAb+uPwpsc6zxCRPuknH4HH9KdDxFIcctMfy2r/8AXrz3Bx5oy3R0pqUU11Js00tTSaQmsbFJDi1ITTM0madirDiaQmkzSUJDsLuo3UYpNppgGaM0u00m007j0G5o3UFTTcUxodmkzSUmaY7Dg1O3kdGP51HmlzVJtA0SCZx3B+ozS+bnqiH/AIDUWaM1oqs11Fyol8xe8Y/AmpEnVOQ0i/Q5qtmjNWsTNCcEyW9tNM1aExahbQXKkYDSx/MPow5H4GuP1P4YaPdIzaddy2svZZCHQ/jww/WurozXXTzStA554SnI8c1b4e69phJ+x/aogM+ZbfvB+XUfiK5WSBo3KMpDDggjBFfRwYqcgkH2qG5tbW9B+120NxkYPmxhv516NLPrfxI/ccs8tT+FnzmYj6U0p7V7heeBfD14zN9jaBiOsEhXH4HI/SuevfhbEWLWWpsFPRJ4s4/4ED/SvQp5xhp7u3qcs8vqrZXPLdvNPxXX3/w61y0+aKKK7Qd4H5/I4Nc5d2E9lM0M8MkUi9VdSDXoU8RTq/BJM5JUZw+JWPomDS7uYxBYWBlGUDDBYYzxmtODwvevzIFj9iea4G8129vvGGn6ikc0k6yKDb79xUjBIUD159a7zwt4ing1YaFf2riaQySmVjtIyxPKknAxjofwrhdRt2OP2RpweE4/+WszH6Vd/sDTrSNpZR8qjJLnirun6lb3lm8yXCSLG7K7hSgGD71yeta62oTeXGcWyH5f9r3NcmJxcaMLvfodWFwcq8+VbLcmu76Fn8u1RY4AewwW9zVY3hxgGsvzSaXzK+Yq1KlWXPJn0lPCQpx5Yo0DdsepppuCe9UDMBSfaAPWs7M0VHyNDzz60hnPrVD7SPekNyvvRaQ/Y+ReMx9aTzjVL7UnvSfao/U0csh+xfYueec1LC7yuABVKNhKfl5rVgCWsRlkIAA6mjW9jOpaKtbU19Ojjhw8xH0q1ea3DCpHmRwJ79a878ReOLTSowryNGWHygLmRvcDsPc15pqvxAv7vzUtUWBWbIlOWkx9en6V7uCoYmdPlgrLueZVo01LmrPXse36jr1nFGJPOLjuzfIo/E1zNz440yEsGvrNMdhJvP6V4bdahdXkhe4nklY93Ymq5ya7lksJO9SX3f8ABuJYuEFaMfvPZ5viNpI6aiG9kgb+oqD/AIWRpROPts3/AH4rx7BpcGtVkmGXf+vkH9oy6RX3HsifEbSCfmv2H+9A39BV2DxzpE/3dStjns2U/mK8NxSc1Msiw72b/D/IFmUusUfRVrq1veLuhkjkX1icMP0qx5yt91hXzhDczWzh4ZXjcdGRiDXUaV4/1WxUR3Gy7jz1lzvA9AR/XNcVfIpx1pSv6nRTx9KTtJWPZ/NI60u7Ncxoniuw1sGOBys4GTDJ97Ht61trPtPXivDrYedOXLNWZ6EVGa5oO5xPjvwzNNdDU7C3aQMMTLGMnP8AexWJ4PkudM8T2geOSMSuI2VlIyDxz+h/CvXEZHHD4P1qUIM58zJ/CvQpZnKND2NSN1axx1MOufmTsycDAyWX8CKtRRknpkDk49KpgsP+Wn6CpBNKAR5xweDzivLio31HKMnsPkcu5NNzTOvej8avcaVhc0hNIT70ySQIjMeQoJwKaKSIOJ7373ywYOAf4iD1+g/nVvNVbOPyYPnIMjku5HqTn9On4VYzVVN7LoJIdmkJpuaM1jYdhaDTSaN1Kw7BmjNJmjNTYdgzRmjNJmiwC0ZpM0maLDFJopM0ZpWAM0GjNJRYYtGaTNGaVgFzSZozSZosFhc0ZpM0ZosFhc0lLmkNMY1o43Pzxo3+8Aah8uyLlIooiSPmKR8H8cYNT00r827JzjHJNdFOpBU5Kd79O3zM5RlzJxtbqNCrGgRFCqOgAwKkRybWEE8/MfwLsR+mKikOyNnx90E1JHH5UMUWd3loqZPfAAz+lZp+42+pbWqFzmkNLSGsikNooqe2t2ncBRVJXCTUVdkaoWOAKlW2buK3f7L+yW/mygBcZJPasW81a2gVmU7gvVjwo/E1rOhOLSktTmhXdV/uxwgAoMY9K4/UfiJpdszILpXYdoELfr0rn7n4oQjiG1uZf9+UJ/IGuinleJqbRG5wj8U0enFRTSFHcV5DN8S7pmylhGB6PKzfyxUX/Cyb8f8ALhafm/8A8VXSsjxPb8SfrNBfa/A9gIU9CKaUryH/AIWXfZ50+1x7F/8AGrUXxPIAEmmN7lLj+hFN5JilsvxGsXQ/m/A9QKVGwxXE2nxK0uQgSfaYf99AwH5c10djr9hqQzbXMM2BkiNuR9Qea5qmBr0tZxZ006kJ/DJM0c0oNRiRXGVNOU5rmcbGzQ6ilpagkbRS4FIetABmkzRSGqGG40u6m0UWHYdmjNNpKLBYfmopooriMxzxpLGeqyKGB/A06msaqLcXdByp6M5Pw5dS2cc+qQxW8V3GWaFjIFEsQyWGzsSOc8dOKs2dvfeItXlvBHviY7/KhkG446KMsPxOe9cRp/2MaHczXEdy90JVCsqfu4o8ZJJ7kttXHpmvUrPVrTW/Bq2NtDdNd29pvR7e1kI81cbRkDAP6V9tKifHxaRu317DHax6fpcItrNF+ZEGNx75rKKt6GsdNbnsBENSaS2uHTeVKlSOSOfy/WrsXiiN/uag3/fRr5bE068qjdRH1GGhCFJKjaxcEbnopP0FNk3R/eBX61YtNeJkXfqRSPuwbOPwzUF3qjXMjD7Z52DjIfNYulZXNoym52aX9fIgaTnrQMt90E1A8hY8sT+NV5L3UrWeM6dcSQllYSGM4J6Y/rVUaKnPlNqsnCHMi8dwppJ9aj8zxlJB5sdxeumcHjn8sU118aAKHe+G7pjH8x0rq+pLu/uOVYt/3fv/AOASk+9IoLMBWLe6jrK3Vpb6jdXTKblAUl7Ehv6A10VlFvkFYYmj7FLXc3o4jni2+ho2UKwwmV8ADnJrivG3jcWEb2lpIGuzwF6iIep9/atfxd4hi0TTySNxXCoo/ifsPoK8LuLiS4nkmlcvJIxZmPUk125Tl6rP21RadPM8vGYn2a0+J/gv8yS5vbi8maa4meWRurO2TUOSTT7W2mvLhIIELyOcACvSfDnhKx063Goagwdk5LsAQp9EHf6n/wCtX1SSWiPCnVe7OV0fwVr2tRmW2sWWHGfMmIjU/TPX8K6AeA9MsbcNqeuKs+OYoIt2D6ZJ/pWpq/jK6v7hbHSY2AJwAp5Pbk/lWJPHZWFzG2o3rTTLc/vYogGUxhQwYHPOSQOcd6UppEJTn5EL6ToKMEiN1M/puHP4AVUmtdKjYobdkYdQzkEVZstVuJ713sNHWaaSLy3WNCc/NnO1QAOwI6HFSXXhTxRqtwbs6NKNygAABcAAAcE+goU3fVFcqW7Mk2WmueGkH0Yf4UxtFt5FzFdc+jLTr3QdX0zJvNOuYQO7RnH5jiqsc7KQa0ugt2ZXm0m5iBYKrAd1YVSdGjOGUqfQjFdHFc7xhuabMsUqFJY1bd0buo9qGCbRz8NxNbzLLBK8cinKsjEEfjXpvg/xeNSRdP1GQC8HEch/5aj39/515vd6fLagSfehJwHA7+lV43ZHDKxDA5BHauPF4SniIcst+/Y7MNip0ZXWx7/uKtj8qduPqa5/w1ry6/pKyMNt1BhZRnqcfe/GttHBFfHVaMqc3CS1R9TTnGpBTj1Jdx9TSbm/vH86bupC1ZWNLD/Mb+8fzp80wt9Pe5ZizbtqRh8Fj37Hp9KrlhmqmoQm5hj8v5ZomLI4OCpOOh/AV04RUvar22xzYtVPZP2W5bjuzMu5d6A5wrn5sA46UTO7QOoPJUgZNVIBcNNHJO5JRCpLOWLk45Pp90cVYdgRRiI041X7LYrD+0dNe1Wo+KUyQRyK5IdQ2c+op4dv7x/OqdpIdksZ3ZSQjnuDyMe3OPwqfdWVSNpM2VrE3mN/eP50b2/vH86h3Uu6osOyJfMb+8fzo8xv7x/Ood1G6jlCyJvNf++350vmyf32/OoN1Luo5QsibzX/AL7fnR5r/wB9vzqHdRupWCyJ/Nk/vt+dJ5sn99vzqHdS7hRYLIl86Qf8tG/Ol8+X/no3/fRqLIoyKLBZEvnS/wDPRvzNHnS/89H/AO+jUW6l3UrByol86X/no350edL/AM9G/Oot1GaLByol86X/AJ6N+dHnS/8APRvzqLNLmiwuVdiTz5f+ejfnR58399vzqPNGaLByrsSfaJv+eho+0Tf89GqLcKQmjlQcsexN9pm/56Gj7TN/fNQk0m6jlQ+WPYdcXcohI3t85VOO2SBn9an+1Tf3zVNiDNFx0Jb9CP61LmrlFcqRPKr7E32qb++aQ3U3979KizSdTUcqK5I9i3byTTShQc/hXS2txDpsPmygFv4V/vGsWxRLeIyydhmuD8e+L3hZ9OtWIuHH7xwf9Wp/hHua1wlCdaso00ebjHBp82kV+Js+M/ii6F7W0lSeXoyqT5cf/wAUa8m1HXNR1Rybu7lkHULnCj6DpVBnLHrmk2k19jQwcKer1l3e54k67tyx0XZCE5pMVcW0YJl/lHv1rUttBmPzT7LaNWAdpWAZf+Ak5/yK63yx3MNWYAQnsaDGfSui+y6NEreffTyEOQFijC5XsQeR6+n41inrQpJ7Cd0VChHakK1c2g9Sad5MBGCGz65qtBGdzT4bme3lEkMrxuvRkYgj8RVx7OP+FyPrzVWW2kjG7G5fUdqGkJSaeh1/h/x5c2rpBqbNNDn/AF38a/X1H616Xa3yTxJLG6ujDIZTkMPavn0Eius8GeITp18LO4c/ZZzgEniN+x+nrXi5hlkZxdSkrPt3PXwOYNNU6ruu56y2ohWwFz+NJ/af/TP9azLh9lwAeN/8+9M3cda+d9jE9tqJqHVcf8s/1pP7VH/PL9ayi9ML0/Yx7BaJr/2sv/PI/nR/ayf88z+dYpemmSmqEewtDb/taP8A55n86P7Yh7o1YXmU1pBVLDxFdHQjV7Y9Q/5Uf2vag4xJn/drDsbuG3v4JZyBGjhmyMjANcxPdalNKXfU23HqRM/9K6qGX06iu3Ywq11B2sehNq9qOvmD/gFNOsWX99v++TXnO7VM5GoSvj/pvJz+YrSs5J1t2NzIWIyctnI/E9a0qZZTirpkwxKbtY2b3R/EyrCljYIvmL5k8nyE7+4AGdoHQYGfetLTdBvDHjUr2+LnqoJCj9c0y3sI42WRri5ZhyrGY5H0I5FXDql9plpJM04u4Y1LlZx85x2Djp+INfQQraWaPjqkNfdZoQ+GdCLKZ1nkYdyCR+prpIND8HJApNpH5gHUqf5CvPIPGV/rC2r6RbWStITvtbnLSYH3mQhgHx3XAI9x0t6h4j1OxYH7NZvE33X8tx+B+eipOEdZIujGtJ8sH+J6XLZ+GDprReVZBWXB/dcn6jrXn+o+GdAeR/IuI4fTybWVf1FYr+M9QI/49rP/AL4f/wCKqBvFt+T/AMe1l/3w/wD8XWf1mjtb8DqWDxd7p/iMniudEm8x79prQuE2zRydCf4WZQMjrjNaZcMuQxwR2OK5fU7641SRGuCpCZKIowq59BWlpl0ZLfy2+9GADn07V42OjByVSkrH0OA9pGHJWdzVDPji4uVHos7j+tDB2XBurwj/AK+X/wAar+ZxR5prh56v8zO32dP+VfcSfZlllhaaa4m8lt8ayTMwU+oya6G2YQW7SkfdXNc/DJucZ9a1tQl8nTv94gf1/pXPWc6koqTuEoxUbRVjyrx/qbXOsrbCQssKZcdt55P6YrkURpJFRAWZjgAdyas6ndm91K5uW6ySFq1PDFk0txJeEgLAMLkZyx9PoMmvtcNSVKlGC6I+TxVXnqyl5nT+HdJi06zczeX5h5mcjJH+yD/n1pbm8udbvY9Ot5lhg3KrOxwkalgoJ/EgfU1Dql39nt1gA6jJI9abe2seg+HILue3uYtTu8NBIwBjaM87lI46EcHkHBrSTa0W7OaK6sgubgaLHb21hDJFq486C7J+cOC2F+U9CMfoD1re0XwZBBFDqPiCb5nbc0D9B357sfYfjR4W0UaXaf23qYbzpOUVh86g89ezMO/Ye54ztZ1e61LUpLSBMysdrsDwo/uj0/z3qYx6IUp9jfu/Gmk6LB5VhDbRNvI2RoOnvgYH61lL8Sbm5u1gh0+3mBOFL7gc/T/61MsfhLq+q3ETxSgW0qgiZ0xg9wRn9e9dhpvwu0Tw7qNrcXmvRvcRnJheMbScfXOK2UEtzCUooxrfxrOjAajZeXEx567cfgAf1NTz6J4e8UWwaDyLW7f7rRfLk/XofocGup1DwzA8Hn20lpcx87zCmAPwya8p8SXkGiaxtsAPMCgy4+4QedvvxjmplTW8WKnUu7GVqmj3+h3hgvIiuc7HA+Vx6ioVkLLg16Jp99B460JtPumAuVXfFL1YEcc+4yM+oNefXFpNZXc1rOu2WJyjD3FEZX0Z1RdyNSCTHIqsjcENWNdwfZ5ygJKHlCRjI7VrvwRVK9UyxgjGByAB3psoveENXOla7CXk228x8uUdsHofwOK9aJ8t8Z4PIrwUHmvYtB1E6p4YtblsebEPLf6rxn8Rg/jXg5xh/hqr0f6HuZTX3pP1Nnfz1oL+9VBLQZa8NwPa5iyW96bvquZaTzKOQXMWd/0pC4qv5lJ5lPkGpEiSBLsocDemRz3B/wDr/pU5es+aQIUlPRGBJ9B0P6GrBbBqpQ0TFfUn30m+oN9LvqOUdyffSb6h30b6OULk2+l3e9Qb6XzKOULlgNRuNQB6N9LlHcn3Uu6oQ9L5lLlBMm3Ubqi8wUb6XKO5Lupd1Q76N9HKFybdS7qg8ylD0uULk2+jdUO8Ub6OUVybdRuqHfRv5o5QuTbqTf71Fvppenyhcm3Uhaod9LvyaaiO45WUzu38SqFznp3P/stSB6rIy7Ayj75LE+vPH6AUvmVU462Ii+pZ3+9SW43yge9UfMzWlpowWkPQDNZTVolSlaNyLxRrC6PpEswxlF+VT/E56D+teGTzyXM7zSuXkc7mY9Sa7P4j6q019DYKTtQea/PVj0/Ifzrh0BdwqglicADvX1GUYVUqHO95fkfN5hVvPkWy/MsWttLdTrFDGzuxwAoroZNNt9MRU+W5u3wOOin0Hr9a0LW1i8P6QzMEa8mHzP8A3f8AZH+f5VFB5UGjXmpXsYaSeMLZ55IYPy3Xjp+NerKXKjzE3J+QsMa6aZ47iN21cMFgiSNZR/gDn6nj8a2bPwZe30X2zxJetapkFYmYb8erE9P1NaukWsXhjT5Nd1o+bqtzH543DJiQ9/ryB9SB71UgN545ka4RtkCNtCBs7T7+p/8A1VMYuTIqVbbbD5P+EQ03CRWdrOq8eY53Z9+Q2f0qBta8MSMETT9JI9oVB/MpWvb+C2ViwiWRkOGZIixz+ANcbr3w+1EXV3c2yOYy5fY0LLgE9uvStnSsjmjVUnqzpv7D8LaxEVEUVtMfutE+cfipwPxWuW8QeB73R4xc2zG7tTkl0XlPrj+Y/Ss/R9J1uCfcsnlwxNySc/gtd3omvPHOLedFViQJAeAw9SP6jke44rnalF3Rup22Z5Ypp2CD7V3vi7wojJJrOmR7YutxBjBjI6ke3+Oe9cPsyK1jJSRqndFG7tB5YmhA4HzqB096oqea2OUcH86ypU8qVlHQdPpVAz0nRNSk1PwxDMxzPZv5bc8sAAQT+Bx+BrZEgZAwPBGRXBeCr4RahPYsfku4yBzxuGSP6iustJiqtC3VDx9K+bxmH5KjS23+/wD4J9Hg6/tKSb32L7Se9RtJ71A0lRmSuRQOvmJzJ70wy+9QGSmF81agTzExlNNMlQ7jRk1fKTzD2fNRFVJ+6KXmnKpqkrCbuIqqB90UllZT+IdXTS7ZvLgX57q4AyEQdR9T0Hv+NQyi6vb2PS9OjMl5L1x0Re5PpxXpOi6NDoGlpYW53tndLLtwZXPfH6AV34Wh9uXyPJzHGqmvZw3PK/8AhIrPaAdMlwOxvZKjGu6djB0ptvcG8lP8zWeI7LjFxN16eSP/AIqrUVpDMAytcEL1Itxx+tehyQ/q543MyaDW9JgdXj0VY3XkMk75U5zleeD9K7PRvFFh4iD2N7GEkPQyMAZfcEYAYe3X69eZtdNWWVYmeRRggs1kP51oDwpC+HXUYw+OhhAIx9DWU3T2YrtO6LGraRJp85ABaFvuPjr/APXrLZTnkV2tgWitFtru4iuk2hTuGM//AF/eqd1o1rIS1tOoB6JIMH6Z6V59SOt4nsYbGwkuWo7M5M8VJa3Bt59w6Hg/Sto6DcMG2RGTHXZ82PyqlLpckfDIVPoRWEmtmenCXVEhux6ik+1j1qqbSQcYpn2d896y9lE19qzTguxvU571p+IbvOh7kPOx249lNcyIZAeKmu5JX0yWJicCNsflWboJzi+zKdRuLPLm5Ndzpy/YtFtIFAG9fNkOOrNyP021wx612l26phBjCRomR7KBX1h8jIksbS51rV9kNqboRfvZIQ4UugYAgEkcnIHrzTnaDxF46trSAyzafbqiRLJkHaiAYI92wDU3hGDVF1F9T0y6it2tpIo3MgLbvMYrjGORwT+FWvhsif8ACR3l07qMLtBYerbv/ZaybvJ+QS0gdD4zuhpsP2aKRSLckq3ZmbHJ9x1/EGq/w/0SK5uUubsDk+Y2/PIHPNc/8Q75JpnMY4dmJPr85H8lH5VZ+Gt48djcmSV2DM67SxIAWMngfjWkPdRzzTcdDuvEviy7ubgabpj/AGeNOSo7DsTjqT1x0A61x9zPNIxJluJWzn5V3YFRR3Id55nA3Sykq38QJ5x716Bpt7d6fpyw6dbrgqGeZ0GXyAc5/lVPzM72MDT7qO1sBdQ6h9mk/iSRSNw9GAHI9wfwrL8RaZB4j0lb6EYkcE7VAJD9sexIwR6kEd6b4rkPnWs5CxrdFhtTnkHBPFXPDyy/2bdWzkEtGZQ2OQVOB+mKL2DzOO8DpqthqyyRwSrGcglkIUMAeCTxyMj8a6rxFs07xTpeuRWsdxFKql0kUFHYcYOf9kr/ADrfukdhDNMN6iReD7kD+tZPiuAHwbavnmOVMfipH9KxnqzWE22jmfHlsYtca6XS302K5QSJC0iPnsSNnA5HSuMnc+ScYFelfE6FXbSr5II0Se2zvhfdGx4JwMDaeeR715nL901VOV4nWUGG1j6V3fw+v/3V5p7sNpxKg/Q/0rhpxiT6itTwvcG38Q2pzgOTGfxGB+uKxxlL2tCUTfCVPZ14v+tT0oTYJGehxS+d71UKsGOe5pOa+b5EfS85c873o873qnzSZPrR7NC5y753vS+aPWqOTS5NLkHzluVlkiZG5Vhgj1FPhnZ4EZ8bsYYj1HBqluOKW3ZhJJH1z849ux/p+dPkvFopS1L3m+9Hm+9VsN3oAao5B8xZ80+tKJPcVV+alG70o5A5y15nuKPM96rfN6UDPpS5B85a8z3pwk96pndSjdR7MOcueb70ebVPJoBal7MOcu+ZS+bVLLe9GT70vZj5y55lL5lUsn3pQze9L2Ycxd8yjzKp7m96C7e9Hsw5i7vo8yqQc0nmH3o9mHMXvMpN/vVLzDR5ho9mHMXfMpPMql5hpPMNP2Ycxd8ymSzFEJUjd/D7ntVXzTSeaPMXeAVBJwe/Bx+uKqNPUHPQuM6oojVvlQbR9BTPN96pNKSc0wymj2dyVI0BL83Wtu1OLAnoW4rlYnJcc1v3NyLXRDKzYEcbyE/QVlVp3aiupalpc8f8R3v27xBez5yDKVX6Dgfyqx4atle7N04yIvu59awXYtIzHqTk11emJ9msoo2IyyiTgevP8sV9pTgoRUV0Pjq03OTl3L12smrapb6fE2N7hQfT1NbWn6dY6v41s7CDYdLsoFeQbiV7tzk8dRkfWsPw+Fl8TwPJ5xEYklBhBLBlRmU8c8EA1s+CI3mXxBMXHnyBE3j/AGid386znfmIbtCxl+PPEFxqGqGySUkHaXRfX+BP+AgjP+0T7V1vw70RtItrrUby9NtbBQs+4DDH0HsOmepORVPTvDGm6osupzowuUlYhkPUjPUHitXxJcwpo+naRC5iiD/vj64Ax/M10QtFHHKfNoS6x4w13WTL/ZxNrpsZCjy1AYj19fyrGWbU44pLmHWt6oQGEjMeTnjac+nJHSruiW8eq6lPFMxhsrYDcsY+Zh0AHufWr2u6PZR2zzaX5kUUR3TQM2S49R7ilcSSKmnaimqQNbzokd4BkEdJfcGsm+tTE4uIyVkQfKQ2AR/nv2NVoZ4bS+txFKHEkoO0jBU4yKyvH91cfbYzHII7aRm/dx8DcArEn2w44+tJ6lxjroejeH7lbyyRJUR1m/dTg8deAcfofTOO1eb69pDaTrdxaAHy1bMZx1U9P8PwrvPB2JdOUAgzT2gaPI6N6/mM/jVL4kQRtdWd0nB2mNvcEBh/M1nHRmkJWlY80uU21lXfO1seor0Hxf4bstK0fTL+zvjN9rT5o2wSpCqSRjtkkfhXn9wv7o+xrSMk1dGzF0y4NrqdtODjZKrfhnmvSbiPybxiOn868sTlwB3NeshTc28Mo/iX+XFeXmK1i/U9TLZe7JehVzmjBParQtmp/wBkfGT09cV5tj0+Yo7T6UoQmrRWFfvTxj/gQpjTWqZ/ehvYA07BqQbKeIiR0py3tuW2pHIzHoMAUy71EWQHnpHbkjIErYOPoapQk9kTKSjq3YesBPaodRmXTrCS4bqOEHq3aq0GtQXkzRDUIoCBlXlyiE+mf/rVJeaFY6nta88Y2Kbeka7cD835rro4STknPRHFiMfThFqDuzoPAttZ2NmLozx3etagCWjhYPIq5+7gdOgJ6ds9K9QtdP8As6q8uxpsevC/T1+teK2mnW2mxqll4+Nuq9Fhuwg564UPXqHg3Tr60tp57zWbzUVuAhjNyzHbjOSA3IzkflXpSjofOVJc0nI44aNpZYN9mhxg9V6086TpwXAtIgAORyDmp/OjEoVMZBKjI6D1pFuVbZiQE8qcDnp3rzuaXcLsWOzSABI4QoAO45NPaFFZMpGA33TtzxUK3LDkEY5znpSm5dNpaQKQpGQO3NKw7lgeX0GwLt4+XrSGVVG7bjavTHGTUCTsI0zIQGXAOMCgzgqAZduB/wB9GpsNMsDYWOWVjwR8veuC1O48barqDQJDPbrCWEaxt5a7c9SxPzdu9dqXIDbmKtjccj3oE+12ZnTpkew960pz5HtcpTa2ZzNhpnifyx9q1hI8DlDEsh/Mj/GtY6ddmIj7erMBnLQr178DFajMSjBZUYgY3YpN7MHZJI+eFXHT60p2l0X3GscXWjtJnMXln4ht03QLZ3XcquUOPxODWBqGt6xaxSR3Wm+RkFdzxsBz79K9HcuefNQDgnaO4/8A11I7ArKrPGy8URVNbxRusfXtbmPAsc9a62+lEsrOARuVXwfQqD6e9d7daLpV87LPZWxywO4RgE/iOa5HxLYx2d8Y4QUi8tdi88ADGMnnHFehTrKeiOdS5mange2hu9P1iGXU/wCz22xPHKHCkOC2OvbJAP1rH8HSFbu+iU/vNisMd/nCn/0OpPCet2uialcSX0cr289tJAxhVWdC2MMAxAJBAqrJqFiPG0+oW/mpZXEzkBlVSobOCVBxw2DgHtVJPmY57MXxLZktcRquCSSHlbjg7sL9Qw611ngnwzLp+kOLq5QtK4lCRLnaChVhu7kq3b061U8RafueK5h+T5mRdy5wR/C3t1q94N1lYQmnXGQCSsLHofVD9P5Vs0+U5nK6IZNLaO9kSYZy5IyMc9McdulbOn+I2spoLCePfKMIsg5GDwBjvWrf/YFAhnmQ5H7tSfnHsCeo9OahsdJtmvUluPKdQQM3Ecquo9MjANCaktTJ3M3xRomo/bLa6kiWQZKwoo5Vic9KsQwjTdKuLmR/mC+XyOSoUFz7/dwPUkV6Jqt5o1np8dzEkEkiqEG0Y49CTzjivDPiDr95PdNaCNlSdQQ+MAr/AHV9s4z6nFS10RcdXY1YvHMfiFVsobI29xHiV5FffHhTnjjPXAqXxTdKvhmztmYeZJIrAewQHP8A4+Kz/A3h6SyLRzvsuZiHlCjPloP4TnqSeoHp61uzWN74p8cn+zYYJItOwxWQ4R9hA29MZYjFKSSWhUfj0Mb4ix2ljp+iWFpcNIvkGaQeYXXJwoYZJwDt6DivMpDwT7103jG80+48QXj6VZtaWm4KsLYypAAboSBznpxXMN0A7mlTjyxsdlyrPzIPpSwSNBcRzIcNGwYfUHNIWR5Gyec4FPEQPRs/Sra7iT1uj0pL15VSQ4ZWUEcY4NWk2yKGXkfyrjtK1hbe2S3ut/y8K+M8ehrYg1e0Vw6XKficZrwKuGnCTVj6WlWp1YJp6m35ftSGL2NQp4h0rA3SgN3AII/nTv8AhINJP/LY/kP8aw9lPsPmXcf5fsaXy/Y00a5pJP8Ar8fl/jTv7a0r/n5X8SP8al059hqce4eX7Ux4iGR/MaMggZHcEgY/l+VOOt6SP+XlD9CP8abJrGkyxtG04wwwen+NOEJp7MblF6XJcOP+Wjfp/hS4k/56P+AX/CmRa9pRiQvP8+Pm4A5796X+3tH/AOex/T/Gm41L7fgF13HbSessv4bf/iaXb/00n/NP/iaZ/b+kjpL/AC/xo/t/Sf8Anp+q/wCNLlqdvwD3e4/aP+etx+af/EUmwf8APW5/NP8A4im/29pH/PX9V/xpf7e0j/nr+q/40ctTt+Ae73HbfSa4/NP/AIik2d/OuPzT/wCJo/t3SP8Anr/6D/jS/wBu6Of+W/5lf8aVqvb8Cvc7/iJtP/PWf/xz/wCJo2HP+um/8c/+Jpf7c0j/AJ7j8x/jR/bWlH/lsP0/xo/e/wAv4f8AAC0O/wCIm1v+es3/AI5/8TRtb/nrL/45/wDE1INX0k/8t/5U7+1NKP8Ay3/8dNF6n8v4f8AfLH+mRbZP+esn/jv/AMTTdsv/AD2f/vlf8Ks/2jpn/Pc/98H/AApf7Q03HE//AI43+FLmn/L+C/yDlRV2T/8APdv++F/wppS4I/4+PzjFXRe6ex/16/ip/wAKd9psSf8Aj4j/ACP+FTzS7fgPlRm7Lrtcx/jD/wDZUeXd/wDPxD/35P8A8VWl59j/AM/Ef5H/AApfMsj/AMvEf60ueXb8A5EZgjvP+fmH/vwf/i6Xy7zvcQ/9+D/8XWmHsv8An4j/AFpc2Z6TpT9o+34BymV5d3/z8Q/9+D/8XR5d3/z8Qf8Afg//ABdapFt2mj/Om4g/56x/99Cl7V9vwDlMspd4/wCPiD/vwf8A4umxrP8AaP3skbgIeFjKYyR7n0rUZIT/AMtov++hTY4EcyMrKQDtz9P/AK5NUqmj0/ATVigQf7v5GmkHH+rJ/EVqfZV9RSG1A7ip5kMzUOxgSjAe2DVnxdcG28HzHIy8aoAf9o/4U+VYoULO6gDnGeTXG+NNae6t7ezJJAO8j0wMD+tbYai6teFtk7meIqqnQk/I5CNPMlRM43EDJrs7p9t6VBOAgAyMcDj0FcXFJsmjf0YGux1QGO+LOwZiATgYxX073Plrk/hdbuXxHDHYSpHdPHMI96bgx8tjtx79PxrY8IvPCNfhZPLulkXMeOhBbI/PFcvY6ncaLq9vqNoV8+3cOm4ZGfcd62vD3iBbvxjLNLFFZDUVMTCAbUWTqDjPGSP1rJxblcU/hOz8MHfpOoCWaK3CXDgtIwULk5HX2o8aWMZtLe+s1SaLem8qxIcc8gjtyBn2rz7xxpzpcC5jA8vaCRjBH/6jx+VbPgHxWj2jaFqCmQSDbEQB6Yxz37f55uztc5nHqjXtrtNGu1LOrRzjMixrjZjpg9TjvW8NStrqCV4bm3klYfKiEkntyOw+tYWp6JK7OLctNCihcIMsn+yQcfTkio/D2mXVjq8cs0Gy3zhi5AZR643c1Voy1M3ojSXwFLHLZ3zGNEmmjk29TgHP4cZrB8b+GrvUbCPULKJTFA8jyIWAIUhACM9eF5r1ee6W5hFnYh3Ljb5rDC474/lWdqcUAiXSIgsyxFZr3J4UZyqfVmA49B7ik3d6BGUtzi/DjSWA01UY74wsLDHQkAEfgTVDxVcySWttHKSW3ZBPoEUf1rvLHSo7YJNO2JcnaMdO5J+nJ/KuE1q2Oq+KLXSrf5Sg8t2c8IfvOT7KOv8Au0pKyNKUrzuReN7LToPDmk3Vu9u166qkvlSZJXykPzLng5J5715tN/qX6f5Nd98StTS91O1tnslt72zh8i5KfcbBJTae67SCM+tcdbaZe6mWhsraa4k4JSGMscepwOmcfnUU9InU3oUtNgSW+iErqkYOWLHHTtXZrr8VvCkS3SBVzjbz1NUbP4b+Lbv/AFWiTr7yssf/AKERW9afBnxLMitPLYW+eqPKWZf++VI/Ws69GFVpyZ0YfG+wi0ktTIl8TgcJNM49uKpv4ikkJAhdx/tS/wD1q9H0/wCCFusatqOsyu5HzJbwhQD9STn8hXa6J4C8P6B5clrp6yXKDH2if52J9cHgH6AVksPRXS455nVezt8jwqBdevQhs9JuHRxlWS3dgfxxita08HeO70Ax6XJCCcEzGOLHvhjmvoJmO371Rl/9oVSjBbRRyzx1aXU8p0f4Y6+rONS1G3COmADNK2w+oClcn8cVrQ/CSy+1l77Vpbq1PLQLbJEW9jIPmx+vvXemT/aH4im+YT3Wq5uxzSqzerZix+BvC8TRFdEtP3Qwu5M/nnr+OasS+H9DQADRbBnY4Vfsyc/p0rReXapPJ9MDrSQI6Au5QzN95gPyA9h/9fvRzMhyZFa6Nplngw2FoknXekCqc+2BxV0p/tGmeZIvp+VBmfPQH60mTds8im8P+Iw26WyuJGyT8ig4HbBqE6R4oSFCNKumkThdqAZHPXnrivXgx/2acHY8cVn7NGvtF2PI49F8SBY2Gk3eVzlCAOT/AEoOkeJS4X+y7oqM7iY8E/TmvXstjtS5fsRS9nEftV2PHptO8RxWnmvpd+zEj90sW45yPT2zTfsHiI/8we9KhAwBhPX0r2Vd3fFL83bFHs4h7RdjxZrbxJGQJdJvmEvZYWOwZ9cU0W3iJMu+m3CiRQoURHK9ueK9sAPc/rSgkd6XJEftF2PGTba2AYjZTruP9xuBn8v8/hUaxaztkL2sw2/KoMDZz9MV7Vz/AJNHP+TR7ND512PE2GsICwtXJ9RG3P4Yz3qRJdR2MWsrlWXB5hYZ5+le05b1pwLDrS9kg9oux4oLnWNzbbByM7gfLP8ALrWfr8N7e2E8txaSDycNG5BGQeDkfrXvjSiNd0jBF9WOBVW5vdMuraa2nv7MxyoUdTOnQjB71cKfLK6Gqi7HytISpIzyPSo8k9a2/FeiHQdZksxcRXMY+aOWJg29D0Jxnn1FYitjBFd8Wmi9zu/C+rDVrV9OvyZZEAwN3zyKO6/7Q9O4x6VNf6NJbyl0dsnDAqOvow9/cc+orh4zuIZThgcgg45rt9I8WIsAtdWj3DvMF3B/99fXH8Qwau9jCcGndHNazba2bw3kNzJc7xtKqOQPTb3HuOK1PCmu6zJqNvp9w00MB3DcqFADgkZxgcniu0tU0jVFY2l9GhLfcEynj6Ptb9D9auwaLMswbe23++I8nH/fOKLJkOelmi7pWkS6rKCDJMyZ3EnIX8TwP51X1rwtP9uj+1WzyQryrtgJG2OD6sf/ANWK7fQL3TdLscS3qB84PmMob8FFUvEuuSXkBg0qN/MdSv2hl+6Mc7VPOfcj86yk1EIq5wOszR6Bp8dvBzqdwpX5h8yqerbex6Yz9cDFQNnwj4Ha+i1J7fVLhlKRIwwwO5ee5Iw+fQke1XbGLSvB5tta1WVLgXgdFYN5snQ5Yc5PIAJOOteW+JNfute1N7m4ldlHyxKxHyJk4HHGeeT3OTWWs5W6HTTikjGlfcx5qrLIUVmHpgcVI5ydufqaoTyb2AByqjH/ANeuhItsYOtdB4a0J9WvNzq4tYiDIyg8/wCzn3wau+EvBE/iC4Sa8vLfTtNB+e4nkUM3sik5J9+n8q+hNLv/AAjoOmQadZahp0UEK4A85cse7H1J9azqydrR3IvY8mk8I6LKSsaXUYA/glOT/wB9ZqI+B9JVyUuL3HUKzLnH/fNezN4l8Lt97UtOb/gamk/4SPwvuDfb9OyOMgrXHar/ADFe08jxaTwTp5LFJ7xSCRlowwGPwFRt4HtghYX0wHZvKBGfzr27/hI/DB66hp/5ikHiDwp/z/6Z+a/4Ufvf5vwGqnkeLP8AD+BU3jVX2nHP2fp/49VKTwLcCSQR38O2PktKhX+RNe7tr3hKQfPqGlN9Shp39v8AhPcW/tLSsngnenNNOquv4D9p5HgEngjUUGVmgf6ZHHr0pD4N1JAdpifGMYbr/n3r6BOu+FGHOoaUf+BpUg1fw24+W+005H99KfNV7/gL2vkfPK+D9UbHESgvsyzYGfyqNvCGqLtLCIZYr9//AOtX0X9r8Osf9dphPrmOnG58OtjL6WQOmTHRzVe/4B7XyPm+TwpqsZAMSEnphqYvhnVJFDCNNp6HdX0j9o8OBdofSguc4zHjNHmeHe7aX+cdHPU/pC9r5HzanhfVpArJbhg/TDAfzxUT6DqaY3Wj89MYPfH86+mRJ4dbjdpZHpmOgDw9nj+y/wAPLo56ge18j5pk8N6tGAzWpwe+9f8AGoX0XUoyAbKfJzwEJx9a+nPK8PNkbNMbPUfuzThDoWc+Xp5PrhKaqVB+18j5kk8P6tGqsbCYqwzlRuA+pHApg0HVD5hFhOfLOGwmfy9fwr6eW30MfdgsQPZE4pTa6LsGbay2qd2Si8e9HtJh7U+Xf7I1DdtFnPnGceWc4oGk6iWKrZXBYdQIicc4r6YSbwvPmNJtLfZ1XfHxSm38MnqNKOOR88f+NHtJdh+2PmZtN1Jd3+i3I2gMfkPAPelXTdWJ4tbrOccIf896+nE07w/cA7LewkH+ztP8qVtE0I8mxsz7bRT9pLsL2x8wCz1R1LCC7YDrhG4pf7O1UDd9lu8euxv89xX1B/ZWjAHbZ2wyMH5BTv7L0lzlrS2fH+wDR7SXYftj5dey1OMZe3ulHqUamSJfQ/6wXCc4+bcK+pv7J0ncG+xwbgcg+WOKH0vSZE2PY2rqTu2vApGfXkdafP3Qe3fmfKvnXA/5aS/maX7Rc/8APWb/AL6NfUh0PQyf+QXYf+Aqf4Un9h6F30rTz/26p/hT512D6zLufLn2i4/57yf99mlF3c9riT6bic19Rf2JofbSdP8A/AVP8KX+xdEPXSdO/wDAVP8AClzrsP6zNdT5eF7eZ+WeQdzzT0vtRK4hnnIzwFr6ujaONQke1VAwFUYAHpT959TReP8AKH1up3Z8oSXuqRY8yW4TPTdkZqxDD4hu5AkFtqcrMMgRwu2fyFfUrfvFwVJ/ClXzY1wAzgdFPUfQ1N4/yi+t1O7PlTVl1/QLmOLUYbm2lkjEiLOOqn/9XSucuriW5naWViznqa+svFvhWw8ZaLJYXOI7lPmgmKfNC/8Ageh9fwFfMfifwnrHhW/+zaraNCW+5IPmSQeqsOD/ADrai4PVKzCVec42kzCDV2Ly/btLgvE5GAsihOjAcnj6da4w1saHeKvm2cr7UlwUJGQrDvj6ZrdozTsXZfm59qq4KOCpKkHIPcVZIKllbscGo2HNCNHqeh2N1F4o0Uu+wXSfLPG3OTjG7Hoe/p+FZekaemgalLcwQeZNu2eW/wB9Af7nZs/nXL2N7Pp90lzbOUlQ8eh9j7V3dv4g0rWrYRzubK6IwVbGwn2P+f61VkzmknH0N2LV9PuWAm2JMOCsqlHHtg4NR3TXj3Df2bNpkMKRbpZb+ZgqZJAwoPPQ9c1nyaVcG2dIpmaFgCudwT2Pdf0qvYaRNazbmvLTB5IURA/otT7HqmZOaM7SL/xGPFy3dlqMtwFkEf2nZshdOmAp42+gr3vRND057ANEG8x38yWXdlpJD1Zj3/pXnVhYRo3ms7Pk581YyVH1Y/KPxIro7PxZDZKkVv8Av2j6RxOGB/33xgf7q59zSfKhKfMzT8SxRaZazyNKhwm2KNl+8euB/X2GO9ea6PayQx6t4ia7iSeyRpGjmXcLhWyHQ9/myBx611OpNLfkapqT+WkhYK+07IwoJ5IGFHHU/wA6848e+M011ILGzlme0jZZm8xQB5mxVwoAyFGD165zWLfM7I6acEjk9Z1K41fVbi+uWDTTuWbHQegHsBgfhXrfwb0gw6bd6rJGwMxEMRPQqOWI/HA/CvJNG0q71zV4LCziMk0rYAxwB3J9AK+oNOsItM0y2sYB+7gjCLwBnHf8etVPRWCrLSxazjsKAw9P1pCD6UmD2WsDAdv9v1o8z2pu0/3aPLPpSAGlyOVH60wlT1UfmaUo3pSFH/u/nQBGyoevX86aI1JOCp/SpCjE/dB/Gk8pj/CB+NAiMx/MDsY46YBPNKZQDg7gad5Lj+DOKCjkYIPuDTQCB165el3Kf4j+NJgjqin07fypCueMEfrQIn2D0pdg9D+VHngdf5Un2pR/CTWZQ7aB3I/CkJX+/wDpTftS+hppuU96Vxqw8lf79JuHHz1H56n+E0hkU87T+VTdjsibeo/i5qG6vfs6Bktri5JONkIUkD1O5gMUocHscVkeJLPWb+xWDRr2Gzcn9475DEezDp+X404u7KsN1LxbZaTGHv5prDd91JrMsx/J8VgS/FTSlBEV5Ox7E6fwf/Iormrz4ZeIriZpZb20nkbks0rkn8StUH+F/iUE7IYHHtMBn866oqC3Y9C/qfxX1aWd0sBbxQcbZDBhz74LMB+tYT/ELxPyBrNyM/3SF/kKs/8ACsvFP/PjH/4EJ/jTT8L/ABSf+XGP/wACE/xrRciKTic9fa/qOoHde3k9yf8AptIXx+ZqkdQkA4OB7V1p+Fnik/8ALin/AIEJ/jTf+FUeKT/y4xD63Cf40+aPcfMjjpb55F2uSy9wTVfcA+A25T37iu2Pwm8Vk8WUP/gQn+NNPwn8WLk/YYcDrm5QD+dPmj3KUkccjlDwa0bW+RcK4BHoazb23+wXT27zQyOpwxhkEij/AIEOD+BqJZB2aquDsztbR9KmUeaCv0rorJvDqIPNugB6Fa8rE2OjEfjSidv+eh/OhszdK/U95GpeG9I0BL9by3zNHIYYvMG5nUkAbAM4ODzmsDUPivBp2pRvotos0CQFGa4GC7tgk8c4GMe/NeSGQ9S1NMgPesnC71LjBJF6+1a7vhGs8zOIwVRSeFBJJA9OSTWa8nYcn1pHJPSo/KZh0IrRJbFNkEsmflU8dz60yJ2icOuNw6ZANWhaMR92lFkx7VVhXH/2tfMctOxJ7mnjVLnHMhNN+xGpE06V/uoT+FKyGmA1O4/vmnDUrj++alTRbthlbeU/RDU6+HdRYgLZXBJ9Im/wpOw1Iqf2jP8A36X+0Z/79akfg3XZRlNHv2HqLZv8K0Ifhp4quIjLHo8oQdfMdEP/AHySCfypXiPmOb/tGf8AvUv9ozf3j+dXpPC+rRMVl066Rh1DQsD/ACpo8Paj/wA+U/8A37P+FF0HMU/7Sm/vfrQNSmH8X61dPhzUP+fKf/v2aQ+HdQ/58p/+/Zp6BzIqf2pP/e/WnDVph3P51Y/4R3Uf+fKf/v2aX/hHdR/58p/+/Zo0C5ANYm9f1o/tibuT+dSnw9qA/wCXOf8A79mmtoN8Bk2kw/7ZmloFyP8AtaU//rpP7Vl/yad/Yl7/AM+03/fBpP7Fu/8An3l/74NPQNBP7Vk/yaP7Vf8Auig6Pdj/AJd5P++DSf2Rdf8APCT/AL5NLQNBw1Vv7opRq0gPAAqL+y7gdY2/Kk/s2b+4fyo0DQsf2vL2P60f2vIevP41X/s+b+4fypDYSgfdpj0J11Mq25VAb1AqT+2pfU/nVL7FJ6Un2N6BOxfGuTDufzpRr06n5XZfoaz/ALG9H2N6BGl/wkN1/wA9pP8Avo0h1+5brM//AH0azvsb0n2V6LArGh/bk/8Az0b/AL6NL/bc/wDz0b/vqs/7K9H2V/SiwaGh/bc//PRv++qP7cnH/LRv++qzvsr0fZXoDQ0v+EguR0mcfRjS/wDCSXn/AD8S/wDfZrKNo/pTfsz+lAaGyPE98Ol1MPpIacPF2qpwuoXQHtM3+NYn2Z/Smm1k9KYtDeTxjqkcgkW+uRIOjiZgR+Oaj1HxfqOq2ptdRuri7hzuCTys4B9Rk8H3rE+ySelH2ST0pWQFKdULkxqVHoTmo13KQRkEcgir/wBjc9qabR89Komxet70Xcah+JUXGB3HsKeW59qzhbujBlyCOQRVlbgkYl6jv61JaZOCR1qZGB4JwKq7yehpwfj0+lNMGbdjqeoWChbW+uIU6hY5CB+XStqx1/Xby5jt47+4eWRgiKMZYngDpXGCRl/iqxbX89rOk0MhSWNg6OOqkHIIpt6EOCe56fLoPiOaUjWL1o412eYtxPu2qzBQSoJ4ycdK07680rwbrmn2N8Glt3jEsssZAKcsMbfqBXk1zr+pXXmebeysJAquA20MF+6CBwQO1Z0tw8jZdyT7mseWT3YciR12r+PdVu9Kk0eG4MenmaR9o4ZwzFtrHuBnpXGvIScnrTWf0NRnJq4xUSjf0LX7vQZHm0+6kt5pF2u0Zxkelby/EfxH0/te4/MVwg3DsaeA2eKuxLSZ3q/EjxGOmqy/jj/Cnf8ACx/EnfVJD/wFf8K4MB6eBJ70WQuVHdD4jeISedRY/VF/wp3/AAsXxF/0E3H0Vf8ACuEAf3pwD+9HKuwcqO6HxD8RHj+05PyX/Cl/4WD4j/6Ckv5L/hXDDeD3p4L+9Oy7C5UdqfiD4k7arJ/3yv8AhR/wn/iQ/wDMWk/75X/CuLG/3p6lsd6El2E4o7QePvEn/QVk/wC+V/wpR498Rnrqkn5L/hXGhj70/efenyx7E2O3tviL4gtn3PcRTr/cliXH5jB/Wte3+K1zuButMhde/kyFP57q8z3H3pQT70OnB9CbH0mCT2I/AUAdMseKT5/XFJhz/EPxrzrGdx3ydxn8KVdn900gU9z+VOANILigr2BpwAP/AOqmYPFKM9P6U7Idx/A7jP0/+vS9v/rVHgjr1oI9OKLBzEgHc/y/+vThgdGP5VEDjuPzo349KLD5ifp/F+lLv9x+VVvMPqBRu47UBzFoOPUUu8f5NUgx9qM+p/WgOYubyOgFJ5p9Kq7vcUb/AKfnQFytdaFod67Pc6Fp07scl5LVGJ/EiqD+CPCjnLeHdPB/2YwP5Vs78DJzVGTVDKfKsEMzlipkx+6THct357Lk/TrT5mPmZlzeB/BUAD3Gh2cYJwMlhk+gGeT7VWj+H/ha+IYeHIraPceCz73HbPzYX6cn6V0FpYiMrNczNc3QH+ukAGPZQOFH069ya0I1564p8zHdnPJ8NfCgA2+HoD9Wc/1p/wDwrfwt/wBC7b/+Pf411CySIOGNP8+RhkM350+Z9xXZzC/DzwmB/wAi9b5/4F/jT18A+FQMjQbUfgf8a6MyN6tn61E8rep/OjmfcLswx4L8MR/d0Oz+hhB/nTx4W8PJ93Q7Af8AbpF/hWp5h54agtnnkVLk+4XZmr4e0dDlNKsl+lug/kKsJZW0J/dW0Sf7qAVYJX3pCR/tUrjuxoG08KB9BThg9VH5Um7rwaNx6YNJsabHcAfd/KkyvdT+dG446Yoyx5pXGLlB2/WnBk/un86YT14o59KLjJg6Y+5/49/9anB0/ugfj/8AWqDn0H5Uiludygc9jmjmYFgSJn7n/j3/ANaneZH/AHP/AB7/AOtVbnjtScjH+FK4y2Jov+eZ/Ol82I8eXn8ap7j60mT7UXYXLu+L/nl+tJvi7RH86phwSRlTjqM07zD7fnRzMdyyzRn/AJZmmFYWHKfoKi8wHt+tG4e3Si4D/Kt+nlfoKaba1JyYQfqopu4A9aXzF7/zouAhsbJutuh+sYqJ9J05z81lbt9YVNWN6eppQUz1PT1pXYXKR0PSj1020P8A27rTf7C0j/oGWn/gOtX2Kg9aBg/xfrRcVzPOg6R/0DLT/wAB1pv9haR/0DLT/wAB1/wrTKjJ+cfnTdv+0KLgZ39haQf+YXZn/t3X/Ck/4R7RTydIsT9bZP8ACtLYO5FGweo/OncDN/4R3Qz10fT/AMbZP8KP+Ea8PnrommH62kf+FaYhB7r1/vCjyfTH5ii7AzP+Eb8Pf9AHS/8AwEj/AMKafDHhvPOg6Zn/AK9k/wAK1Rbk9MfnThat7fnT5mBjnwp4bPXQ9M/8Bk/wpp8IeGT/AMwLTvwt1/wra+ysO6/nR9mP94fnRzMDEHg3wz30Sx/CIUh8FeFj10O1/BcVuG3x3H50ogz/ABL+dHNIVznv+EI8Lf8AQEt/1/xpp8B+Fm/5gcP5n/GuhMDDoV/Ojyj6j86fOwuc4fAXhZeuiQ/99N/8VR/wg3hUD/kCW347j/Wuj8vdgH+dIYcdKOZiOabwH4UY5/sW3z9GIP4ZrJvvhd4UvXYC0uLEnoYJiV/8eBxXctCev9aTyTzwfzp88u4XPL5/ghpJY/Z9Wvoz/thGH8hVQ/A5c/L4kcfWyB/9qV6w0BVSIiYz7dPy6U3zJEYiRGA7MuSP8R/L3qlVl3DmZ5SPgax/5mU/+AP/ANspf+FFv/0MZ/8AAH/7ZXrKzpgEfMD3BpwnT+6fzp+1l3FzyPJf+FFSH/mY/wDyR/8As6T/AIUQxPPiHP8A25f/AGdevi6j7o3/AH1ThdR/3X/76p+0l3DnZ5CPgSR/zHx/4Bn/AOLp3/Cjgp51sH6Wn/2devC8Vf4H/Og6gOnlfmaftJdw52eSD4LRL11r/wAlP/s6X/hTUX/Qb/8AJP8A+zr1hruJvvREfSomkibkK4o9pLuHMzzBPg3bA/Pq8j/7tsB/7MauR/CHRl/1l7fMf9kov81Nd8WXHBYUzd/t/rS9pLuLmZyEXwr8OxDDLdy+7y/4AVaT4d+G4xtOmB/dpH/xrpg2f4/1pSf9r/x6lzy7hzM5r/hX/hr/AKBSD/gbf40h+H3hrP8AyDF/7+P/AI10+R/fH500nPf8c0ueXcV2c1/wr3wz/wBAtf8Av4/+NL/wr3w1/wBA1Pp5j/410ZwOmaTcR3NHPLuFznT8PfDY/wCYYP8Av4/+NJ/wr7w3/wBAsf8Afx/8a6TzcDqaT7Rzg5p+0l3Fc5o+AvDo/wCYYP8Av4/+NJ/wgfh3/oGj/v4/+NdP54OaduBo9pLuK5XDH1NLu6ZJqvv9F/I0AtngEVJKLO/p1o349ar5b0NJyR0/WgCx5nNHm47n86gA9jTgB/d/WgLkvm57t+dJvpuP9ninKqnqtAXDzO3OaC/t+Zp2xD2oCL60DG59v1oP0qTYuOn40BAenSgCI8Z4pMn2qbyuTTZnit4XmmkWNFBZixwABQMZk+1Vb7UIbCNTJueRziKGMZeVvRR3/p34qGS9udQDR6ZGyRkD/S5kIUZ/uKQN/Hf7vuelaFhpFrbs0uwPcP8Afnfl2/H09hge1CQFGK2u9SQHUP8AR4GUZtYny3uHcdfouB15ataGKOKNI40VY0G1VXgAegFWhapg5wPxzUiwRr0HNOwDIo0bkhqmEcXqfypQoHQD60vXtTGJsQcjOaDwOAcUHpx/OomzUgK2DTCikUmD70u0kHk0gGbOMU0qKftPoaaVPPynpQMZtpCPwpxXnpTduPWkAY96MD3NGDzwaNvpSGLgYpDgdqUA/hSEHsKAFyPSjd7H86Z83oaUg45oGO3e5pufY0cjt+tNOQfumkA/J9D+dIT7HNM5/umjHqDiiwxCR3pM/Wl2jPek8sHvSsAnHrRu6UeUKBGR2/WiwXELccZqpJd3aPhbCRl9RIo/rV3Z7frRg4osFyvb3EkyZlt5IT6OR/QmrGQTS7fbjHrTwgI6UWHcYGHoaXdjtUgRaTyx6UWAbuBpQc9jTvLHPFLsA6UWC4gPPSl3f7P5Gl2A5/xpdg7ZoAbn2pCfqKcV57n8aClAXGZ9/wBKAzDnI/Kl2nPQ0m09OfzoC4od+/8AKlD5puxv8mk8s96Yrku760m4d800Bh1pdp6UWAduHGDS7j70zaaXZQIkDAjo3SlPPrTAo75/Onbf85phcTgdjTww96TA7/zoxnP+NArik5HQ0hz2FOB5/wDr07/PWmFyHcfQ0E+oqQr3IppUHt+NAFc2ybmeMmNm5OOhPqR0P86ryu0G3zlyuPmkRTgH3HJA/P8ACtDGBTSPWgCkHVkV0Ksjchgcg/SlEntinyWURZniJjkbqVPBPqR0NQsksQAljz/txgkH8Oo/X60XFYkEqk+/1pdwIFQAhl3DkeoNGcetO4ifK+tHy1X3fjQGHfI/GgCxxjqable/FRZJAw1HzUCH7kpwK5xUJB7gUhGDQFycbT0NBUZ4quQQeM0Bj7/nTsK5OV/CjYO5P51Flvel3MB/9eiwXJNoHajA9DUe49hS7x6frRYLj9opdo9vzpm+l3nHXvTFczlj6HYOtP2AfwD8RUgt3OMAdfWni3fA6D8aCSBVXqVX8qeAvTaPyqYQuPT86cEIHXt60DIlUE8KBTwh9vzqRVAIzg/jT+B1x7e9AXGCNj6/nS+VLng/rUvmLwOKdnjgjpQMgMMm37x59WNN+yseN789xK3+NWc+hAP1pwYDnI96AKwsSDw8h+sz/wCNO+zFT1b8ZG/xp13qFrp9u01zPHFGoyWdgBXKSalq3iQgWAl0/TifmupExLIPRFPTP94/rTsUjU1bWYNMljtkSe6vJMFLa3YlgM9Tk4Vfcmo00+4vZI5tVmWTy33pbRZ8pSOhbPMhHXnAz271NYaZaacri3iCtI26R2JZnPqzHk1dyM5H86BNjzPJjOc8+lL9rkx/9YVAwLKQHYZ/iGMj8xS4x6596AJxdzAZB59MCj7fP/ex+FQdB/8AXpCcAcfhSGWvt0w/iH5Un26bAJIz9KqFschR1o3c5xQBbF/Ngcr+VH26X0WqgYDtT8jrt5pBcnN/IOy0fb5scBfyP+NQkFgBjB/OkC+1AXLH9oTDjYp/A/40f2hIRgqv5f8A16r4JHvSFWHpmkMs/bWP8A/z+NIbtz/yzH51Dg9MGjbzjmgZL9qfOCi/nTvtJH/LMfnUOOen60EfWkBObpgOEH/fX/1qPtR7oPz/APrVBg4PagjuKAJvtXP3B19aX7Rnt+tVtpx1zzThSGWvMJ/hH504OD1H61XBPbA5xmgE98UwLG4dSD+dGFPYn8ag3c/z5oB9T9KB3Jwqen60YXt/OoMnPBpAxzgmgLkxHsD+NKCTjC/rUIc98YoMpzwP1oAl59B+dJkg/dGPrUPmtjnr9aPMPQntQFyXceOP1pRI3ZePrUJOcfTrSkk+nT1oC5L5rf3DS+Yc52H8/wD69Q7iAcn8qPM4PP50rBcmM5/55n9P8aTzjnlG/DH+NRb8Z4H50hkx9O1AE3nHk+W36f40n2og/wCpf8x/jUZfrzSE9/60wuSm7P8Azxkx/wAB/wAaT7XnnyZR+X+NRbm/D60bj7UguTfae/lSfp/jTTc88wTfp/jTA5/ClLYGT0osK48XIH/LGX9P8actyP8AnlKPy/xqDdwCBnn1oZgew6+tMLljz1HRG/Kl+0AfwP8AkP8AGqwbkY6fWnA8Z/rQBN9qX/nm/wCQ/wAaPtS8fupPyH+NQdSMg/nSgD0P50Bcn+1oDyjnHoKQ3yqeYZceoXNQgD0pe3pSAm+2Rnosn/fJpftUQ/56f98H/Cq/PXA6YoHA6fWmIsfaoh2f/vg/4U4XkIP8f/fDf4VVyucU/cB6Y+tAFn7RF2Y/98n/AAo+0R5+8f8Avk/4VV3c5yKcHHrj8aALP2iI5w4/I0nmJ13j9ahGDSgDp/WgVyQzRj+IUn2iP+9Ue3OTj9aNv0FMZBNFG0pkjYo56lejfUdDVZ52gAE67gWI3RKxwPcY4/M1ocAUhUGhCKasrAEK4B5BPFH8I4b8BUzwg9DtPtVdvlI3AJnuW+Un69vxpiJVZeM7h9QaeGB7n8jUXp1/OlC8j1NNCZJnjO4YpmByc0oGRnP60uD/AHv1oFcZ8v8AepDs9T+dOwfc/jRjsDyKAG5QA4OfxpRICOmKQqeeCM+9JsOM5oAcGQnqKUlfVfxNRhev9DS459/c0wA4Azx+dISnTcP++hSH3/nSHOeCPzosK5//2Q==" + ] + } + }, + "widgets_values": [ + "[{\"x\":374.36899413239337,\"y\":315.67555013706055},{\"x\":377.5416127267357,\"y\":142.76783674540425,\"fix\":null}]", + "[{\"x\":374.3689880371094,\"y\":315.675537109375},{\"x\":374.4350891113281,\"y\":312.07330322265625},{\"x\":374.5011901855469,\"y\":308.4710388183594},{\"x\":374.5672912597656,\"y\":304.8688049316406},{\"x\":374.6333923339844,\"y\":301.26654052734375},{\"x\":374.6994934082031,\"y\":297.664306640625},{\"x\":374.76556396484375,\"y\":294.06207275390625},{\"x\":374.8316650390625,\"y\":290.4598083496094},{\"x\":374.89776611328125,\"y\":286.8575744628906},{\"x\":374.9638671875,\"y\":283.25531005859375},{\"x\":375.02996826171875,\"y\":279.653076171875},{\"x\":375.0960693359375,\"y\":276.05084228515625},{\"x\":375.1621398925781,\"y\":272.4486083984375},{\"x\":375.2282409667969,\"y\":268.84637451171875},{\"x\":375.2943420410156,\"y\":265.2441101074219},{\"x\":375.3604431152344,\"y\":261.6418762207031},{\"x\":375.4265441894531,\"y\":258.03961181640625},{\"x\":375.4926452636719,\"y\":254.4373779296875},{\"x\":375.5587463378906,\"y\":250.83514404296875},{\"x\":375.62481689453125,\"y\":247.23291015625},{\"x\":375.69091796875,\"y\":243.63064575195312},{\"x\":375.75701904296875,\"y\":240.02841186523438},{\"x\":375.8231201171875,\"y\":236.42617797851562},{\"x\":375.88922119140625,\"y\":232.8239288330078},{\"x\":375.955322265625,\"y\":229.2216796875},{\"x\":376.02142333984375,\"y\":225.61944580078125},{\"x\":376.0874938964844,\"y\":222.01718139648438},{\"x\":376.1535949707031,\"y\":218.41494750976562},{\"x\":376.2196960449219,\"y\":214.81271362304688},{\"x\":376.2857971191406,\"y\":211.21046447753906},{\"x\":376.3518981933594,\"y\":207.60821533203125},{\"x\":376.4179992675781,\"y\":204.0059814453125},{\"x\":376.48406982421875,\"y\":200.4037322998047},{\"x\":376.5501708984375,\"y\":196.80148315429688},{\"x\":376.61627197265625,\"y\":193.19924926757812},{\"x\":376.682373046875,\"y\":189.5970001220703},{\"x\":376.74847412109375,\"y\":185.9947509765625},{\"x\":376.8145751953125,\"y\":182.39251708984375},{\"x\":376.88067626953125,\"y\":178.790283203125},{\"x\":376.9467468261719,\"y\":175.18801879882812},{\"x\":377.0128479003906,\"y\":171.58578491210938},{\"x\":377.0789489746094,\"y\":167.98355102539062},{\"x\":377.1450500488281,\"y\":164.38128662109375},{\"x\":377.2111511230469,\"y\":160.779052734375},{\"x\":377.2772521972656,\"y\":157.17681884765625},{\"x\":377.34332275390625,\"y\":153.57456970214844},{\"x\":377.409423828125,\"y\":149.97232055664062},{\"x\":377.47552490234375,\"y\":146.3700714111328},{\"x\":377.5416259765625,\"y\":142.76783752441406}]", + 720, + 480, + 49, + "path", + "basis", + 0.5, + 1, + "list", + 0, + 1, + null, + null, + null + ] + }, + { + "id": 75, + "type": "DownloadAndLoadToraModel", + "pos": { + "0": 1074, + "1": 937 + }, + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "tora_model", + "type": "TORAMODEL", + "links": [ + 193 + ] + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadToraModel" + }, + "widgets_values": [ + "kijai/CogVideoX-5b-Tora" + ] + }, + { + "id": 66, + "type": "VHS_VideoCombine", + "pos": { + "0": 1485, + "1": 436 + }, + "size": [ + 605.3909912109375, + 714.2606608072917 + ], + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 203 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "CogVideoX-Tora-trajectory", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX-Tora-trajectory_00011.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + }, + "muted": false + } + } + }, + { + "id": 90, + "type": "Note", + "pos": { + "0": 339, + "1": 1066 + }, + "size": [ + 251.63747656176258, + 73.90463053872986 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [], + "outputs": [], + "properties": {}, + "widgets_values": [ + "Three sets of coordinates are created here and appened to a list" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 92, + "type": "Note", + "pos": { + "0": 1200, + "1": 1045 + }, + "size": [ + 251.63747656176258, + 73.90463053872986 + ], + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [], + "outputs": [], + "properties": {}, + "widgets_values": [ + "Coordinates are used to create optical flow video, which is then encoded for Tora" + ], + "color": "#432", + "bgcolor": "#653" + }, + { + "id": 79, + "type": "CogVideoSampler", + "pos": { + "0": 1089, + "1": 17 + }, + "size": [ + 330, + 570 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 204 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 197 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 198 + }, + { + "name": "samples", + "type": "LATENT", + "link": null, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": 226, + "shape": 7 + }, + { + "name": "context_options", + "type": "COGCONTEXT", + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": 200, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + }, + { + "name": "num_frames", + "type": "INT", + "link": 201, + "widget": { + "name": "num_frames" + } + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 202 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoSampler" + }, + "widgets_values": [ + 49, + 40, + 6, + 3, + "fixed", + "CogVideoXDDIM", + 1 + ] + }, + { + "id": 80, + "type": "DownloadAndLoadCogVideoModel", + "pos": { + "0": 118, + "1": -85 + }, + "size": [ + 378.8459921214321, + 218 + ], + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "block_edit", + "type": "TRANSFORMERBLOCKS", + "link": null, + "shape": 7 + }, + { + "name": "lora", + "type": "COGLORA", + "link": null, + "shape": 7 + }, + { + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "links": [ + 204 + ] + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 205, + 206, + 224 + ] + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadCogVideoModel" + }, + "widgets_values": [ + "alibaba-pai/CogVideoX-Fun-V1.1-5b-InP", + "bf16", + "disabled", + false, + "sdpa", + "main_device" + ] + }, + { + "id": 93, + "type": "CogVideoImageEncodeFunInP", + "pos": { + "0": 623, + "1": 79 + }, + "size": { + "0": 380.4000244140625, + "1": 146 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 224 + }, + { + "name": "start_image", + "type": "IMAGE", + "link": 225 + }, + { + "name": "end_image", + "type": "IMAGE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "image_cond_latents", + "type": "LATENT", + "links": [ + 226 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoImageEncodeFunInP" + }, + "widgets_values": [ + 49, + false, + 0 + ] + } + ], + "links": [ + [ + 54, + 20, + 0, + 30, + 0, + "CLIP" + ], + [ + 146, + 60, + 0, + 67, + 0, + "MASK" + ], + [ + 149, + 67, + 1, + 65, + 2, + "INT" + ], + [ + 150, + 67, + 2, + 65, + 3, + "INT" + ], + [ + 153, + 65, + 0, + 68, + 1, + "IMAGE" + ], + [ + 154, + 65, + 1, + 68, + 2, + "MASK" + ], + [ + 155, + 56, + 0, + 68, + 0, + "IMAGE" + ], + [ + 156, + 68, + 0, + 44, + 0, + "IMAGE" + ], + [ + 166, + 72, + 0, + 73, + 0, + "IMAGE" + ], + [ + 187, + 73, + 0, + 60, + 0, + "IMAGE" + ], + [ + 189, + 67, + 3, + 78, + 3, + "INT" + ], + [ + 190, + 67, + 1, + 78, + 4, + "INT" + ], + [ + 191, + 67, + 2, + 78, + 5, + "INT" + ], + [ + 193, + 75, + 0, + 78, + 0, + "TORAMODEL" + ], + [ + 197, + 30, + 0, + 79, + 1, + "CONDITIONING" + ], + [ + 198, + 31, + 0, + 79, + 2, + "CONDITIONING" + ], + [ + 200, + 78, + 0, + 79, + 7, + "TORAFEATURES" + ], + [ + 201, + 67, + 3, + 79, + 9, + "INT" + ], + [ + 202, + 79, + 0, + 56, + 1, + "LATENT" + ], + [ + 203, + 78, + 1, + 66, + 0, + "IMAGE" + ], + [ + 204, + 80, + 0, + 79, + 0, + "COGVIDEOMODEL" + ], + [ + 205, + 80, + 1, + 78, + 1, + "VAE" + ], + [ + 206, + 80, + 1, + 56, + 0, + "VAE" + ], + [ + 209, + 30, + 1, + 31, + 0, + "CLIP" + ], + [ + 210, + 73, + 0, + 82, + 0, + "IMAGE" + ], + [ + 211, + 82, + 1, + 83, + 1, + "STRING" + ], + [ + 212, + 60, + 1, + 83, + 0, + "STRING" + ], + [ + 216, + 73, + 0, + 85, + 0, + "IMAGE" + ], + [ + 217, + 83, + 0, + 86, + 0, + "STRING" + ], + [ + 218, + 85, + 1, + 86, + 1, + "STRING" + ], + [ + 219, + 86, + 0, + 65, + 0, + "STRING" + ], + [ + 220, + 86, + 0, + 78, + 2, + "STRING" + ], + [ + 224, + 80, + 1, + 93, + 0, + "VAE" + ], + [ + 225, + 73, + 0, + 93, + 1, + "IMAGE" + ], + [ + 226, + 93, + 0, + 79, + 4, + "LATENT" + ] + ], + "groups": [ + { + "title": "VisualizeTrajectories", + "bounding": [ + 1124, + 1198, + 832, + 413 + ], + "color": "#3f789e", + "font_size": 24, + "flags": {} + } + ], + "config": {}, + "extra": { + "ds": { + "scale": 0.5209868481925474, + "offset": [ + 1223.1532630983777, + 259.0053418875374 + ] + } + }, + "version": 0.4 +} \ No newline at end of file diff --git a/examples/cogvideox_fun_pose_example_01.json b/examples/cogvideox_Fun_pose_02.json similarity index 78% rename from examples/cogvideox_fun_pose_example_01.json rename to examples/cogvideox_Fun_pose_02.json index da8750d..9d85910 100644 --- a/examples/cogvideox_fun_pose_example_01.json +++ b/examples/cogvideox_Fun_pose_02.json @@ -1,48 +1,7 @@ { - "last_node_id": 82, - "last_link_id": 182, + "last_node_id": 86, + "last_link_id": 195, "nodes": [ - { - "id": 31, - "type": "CogVideoTextEncode", - "pos": { - "0": 497, - "1": 520 - }, - "size": { - "0": 501.0985412597656, - "1": 138.65379333496094 - }, - "flags": {}, - "order": 4, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 56 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 179 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "The video is not of a high quality, it has a low resolution. Watermark present in each frame. Strange motion trajectory. Character is speaking", - 1, - true - ] - }, { "id": 60, "type": "WidgetToString", @@ -55,13 +14,14 @@ "1": 130 }, "flags": {}, - "order": 6, + "order": 5, "mode": 0, "inputs": [ { "name": "any_input", "type": "*", - "link": 128 + "link": 128, + "shape": 7 } ], "outputs": [ @@ -150,14 +110,153 @@ ] }, { - "id": 11, + "id": 44, + "type": "VHS_VideoCombine", + "pos": { + "0": 1842, + "1": -5 + }, + "size": [ + 1186.0863037109375, + 1457.6950174967449 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 150 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "CogVideoX_Fun_Pose", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX_Fun_Pose_00001.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + }, + "muted": false + } + } + }, + { + "id": 84, + "type": "DownloadAndLoadCogVideoModel", + "pos": { + "0": 605, + "1": -12 + }, + "size": [ + 377.9334482359568, + 218 + ], + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [ + { + "name": "block_edit", + "type": "TRANSFORMERBLOCKS", + "link": null, + "shape": 7 + }, + { + "name": "lora", + "type": "COGLORA", + "link": null, + "shape": 7 + }, + { + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "links": [ + 186 + ], + "slot_index": 0 + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 188, + 191 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadCogVideoModel" + }, + "widgets_values": [ + "alibaba-pai/CogVideoX-Fun-V1.1-5b-Pose", + "bf16", + "disabled", + false, + "sdpa", + "main_device" + ] + }, + { + "id": 85, "type": "CogVideoDecode", "pos": { - "0": 1451, - "1": 363 + "0": 1461, + "1": 357 }, "size": { - "0": 282.7455749511719, + "0": 315, "1": 198 }, "flags": {}, @@ -165,14 +264,14 @@ "mode": 0, "inputs": [ { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 180 + "name": "vae", + "type": "VAE", + "link": 188 }, { "name": "samples", "type": "LATENT", - "link": 181 + "link": 189 } ], "outputs": [ @@ -180,10 +279,8 @@ "name": "images", "type": "IMAGE", "links": [ - 124 - ], - "slot_index": 0, - "shape": 3 + 190 + ] } ], "properties": { @@ -225,10 +322,18 @@ "type": "CONDITIONING", "links": [ 128, - 178 + 183 ], "slot_index": 0, "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 194 + ], + "slot_index": 1 } ], "properties": { @@ -237,33 +342,152 @@ "widgets_values": [ "a brown bear is dancing in a forest, in front of a waterfall", 1, + false + ] + }, + { + "id": 31, + "type": "CogVideoTextEncode", + "pos": { + "0": 507, + "1": 517 + }, + "size": { + "0": 501.0985412597656, + "1": 144 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 194 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 184 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "The video is not of a high quality, it has a low resolution. Watermark present in each frame. Strange motion trajectory. Character is speaking", + 1, true ] }, + { + "id": 20, + "type": "CLIPLoader", + "pos": { + "0": 2, + "1": 412 + }, + "size": { + "0": 451.30548095703125, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CLIPLoader" + }, + "widgets_values": [ + "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", + "sd3" + ] + }, + { + "id": 72, + "type": "INTConstant", + "pos": { + "0": -498, + "1": 276 + }, + "size": { + "0": 210, + "1": 58 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "value", + "type": "INT", + "links": [ + 152, + 195 + ], + "slot_index": 0, + "shape": 3 + } + ], + "title": "Frames", + "properties": { + "Node name for S&R": "INTConstant" + }, + "widgets_values": [ + 49 + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, { "id": 65, "type": "VHS_LoadVideo", "pos": { - "0": -191, - "1": 564 + "0": -506, + "1": 477 }, "size": [ 390.1356201171875, 910.0188802083334 ], "flags": {}, - "order": 5, + "order": 4, "mode": 0, "inputs": [ { "name": "meta_batch", "type": "VHS_BatchManager", - "link": null + "link": null, + "shape": 7 }, { "name": "vae", "type": "VAE", - "link": null + "link": null, + "shape": 7 }, { "name": "frame_load_cap", @@ -333,47 +557,12 @@ } } }, - { - "id": 20, - "type": "CLIPLoader", - "pos": { - "0": 2, - "1": 412 - }, - "size": { - "0": 451.30548095703125, - "1": 82 - }, - "flags": {}, - "order": 0, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "CLIP", - "type": "CLIP", - "links": [ - 54, - 56 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CLIPLoader" - }, - "widgets_values": [ - "t5\\clip\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", - "sd3" - ] - }, { "id": 80, "type": "DWPreprocessor", "pos": { - "0": 260, - "1": 742 + "0": -66, + "1": 583 }, "size": { "0": 364.7358703613281, @@ -418,83 +607,12 @@ "dw-ll_ucoco_384_bs5.torchscript.pt" ] }, - { - "id": 44, - "type": "VHS_VideoCombine", - "pos": { - "0": 1842, - "1": -5 - }, - "size": [ - 1186.0863037109375, - 310 - ], - "flags": {}, - "order": 15, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 150 - }, - { - "name": "audio", - "type": "AUDIO", - "link": null - }, - { - "name": "meta_batch", - "type": "VHS_BatchManager", - "link": null - }, - { - "name": "vae", - "type": "VAE", - "link": null - } - ], - "outputs": [ - { - "name": "Filenames", - "type": "VHS_FILENAMES", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "VHS_VideoCombine" - }, - "widgets_values": { - "frame_rate": 8, - "loop_count": 0, - "filename_prefix": "CogVideoX_Fun_Pose", - "format": "video/h264-mp4", - "pix_fmt": "yuv420p", - "crf": 19, - "save_metadata": true, - "pingpong": false, - "save_output": false, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "CogVideoX_Fun_Pose_00004.mp4", - "subfolder": "", - "type": "temp", - "format": "video/h264-mp4", - "frame_rate": 8 - }, - "muted": false - } - } - }, { "id": 37, "type": "ImageResizeKJ", "pos": { - "0": 292, - "1": 1004 + "0": -4, + "1": 829 }, "size": { "0": 315, @@ -512,7 +630,8 @@ { "name": "get_image_size", "type": "IMAGE", - "link": null + "link": null, + "shape": 7 }, { "name": "width_input", @@ -572,8 +691,8 @@ "id": 61, "type": "GetImageSizeAndCount", "pos": { - "0": 645, - "1": 1000 + "0": 378, + "1": 828 }, "size": { "0": 277.20001220703125, @@ -595,7 +714,7 @@ "type": "IMAGE", "links": [ 135, - 175 + 192 ], "slot_index": 0, "shape": 3 @@ -625,188 +744,149 @@ "widgets_values": [] }, { - "id": 71, - "type": "DownloadAndLoadCogVideoGGUFModel", + "id": 86, + "type": "CogVideoImageEncode", "pos": { - "0": 515, - "1": 35 + "0": 717, + "1": 808 }, "size": { - "0": 466.3737487792969, - "1": 174 - }, - "flags": {}, - "order": 1, - "mode": 0, - "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null - }, - { - "name": "block_edit", - "type": "TRANSFORMERBLOCKS", - "link": null - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 176, - 177 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadCogVideoGGUFModel" - }, - "widgets_values": [ - "CogVideoX_5b_fun_1_1_Pose_GGUF_Q4_0.safetensors", - "bf16", - false, - "main_device", - false - ] - }, - { - "id": 81, - "type": "CogVideoControlImageEncode", - "pos": { - "0": 859, - "1": 769 - }, - "size": { - "0": 367.79998779296875, - "1": 146 + "0": 315, + "1": 122 }, "flags": {}, "order": 10, "mode": 0, "inputs": [ { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 176 + "name": "vae", + "type": "VAE", + "link": 191 }, { - "name": "control_video", + "name": "start_image", "type": "IMAGE", - "link": 175 + "link": 192 + }, + { + "name": "end_image", + "type": "IMAGE", + "link": null, + "shape": 7 } ], "outputs": [ { - "name": "control_latents", - "type": "COGCONTROL_LATENTS", + "name": "samples", + "type": "LATENT", "links": [ - 182 + 193 ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "width", - "type": "INT", - "links": null, - "shape": 3 - }, - { - "name": "height", - "type": "INT", - "links": null, - "shape": 3 + "slot_index": 0 } ], "properties": { - "Node name for S&R": "CogVideoControlImageEncode" + "Node name for S&R": "CogVideoImageEncode" }, "widgets_values": [ - 512, false, 0 ] }, { - "id": 82, - "type": "CogVideoXFunControlSampler", + "id": 83, + "type": "CogVideoSampler", "pos": { - "0": 1085, - "1": 312 - }, - "size": { - "0": 311.2205810546875, - "1": 350 + "0": 1089, + "1": 316 }, + "size": [ + 330, + 570 + ], "flags": {}, "order": 11, "mode": 0, "inputs": [ { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 177 + "name": "model", + "type": "COGVIDEOMODEL", + "link": 186 }, { "name": "positive", "type": "CONDITIONING", - "link": 178 + "link": 183 }, { "name": "negative", "type": "CONDITIONING", - "link": 179 - }, - { - "name": "control_latents", - "type": "COGCONTROL_LATENTS", - "link": 182 + "link": 184 }, { "name": "samples", "type": "LATENT", - "link": null + "link": null, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": 193, + "shape": 7 }, { "name": "context_options", "type": "COGCONTEXT", - "link": null + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": null, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + }, + { + "name": "num_frames", + "type": "INT", + "link": 195, + "widget": { + "name": "num_frames" + } } ], "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 180 - ], - "shape": 3 - }, { "name": "samples", "type": "LATENT", "links": [ - 181 - ], - "shape": 3 + 189 + ] } ], "properties": { - "Node name for S&R": "CogVideoXFunControlSampler" + "Node name for S&R": "CogVideoSampler" }, "widgets_values": [ - 42, - "fixed", - 25, + 49, + 50, 6, - "CogVideoXDPMScheduler", - 0.7000000000000001, 0, - 1, + "fixed", + "CogVideoXDDIM", 1 ] }, @@ -814,8 +894,8 @@ "id": 58, "type": "ImageConcatMulti", "pos": { - "0": 1472, - "1": 649 + "0": 1545, + "1": 679 }, "size": { "0": 210, @@ -833,7 +913,7 @@ { "name": "image_2", "type": "IMAGE", - "link": 124 + "link": 190 } ], "outputs": [ @@ -854,42 +934,6 @@ true, null ] - }, - { - "id": 72, - "type": "INTConstant", - "pos": { - "0": -198, - "1": 260 - }, - "size": { - "0": 210, - "1": 58 - }, - "flags": {}, - "order": 2, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "value", - "type": "INT", - "links": [ - 152 - ], - "slot_index": 0, - "shape": 3 - } - ], - "title": "Frames", - "properties": { - "Node name for S&R": "INTConstant" - }, - "widgets_values": [ - 49 - ], - "color": "#1b4669", - "bgcolor": "#29699c" } ], "links": [ @@ -901,22 +945,6 @@ 0, "CLIP" ], - [ - 56, - 20, - 0, - 31, - 0, - "CLIP" - ], - [ - 124, - 11, - 0, - 58, - 1, - "IMAGE" - ], [ 126, 58, @@ -990,78 +1018,102 @@ "IMAGE" ], [ - 175, - 61, - 0, - 81, - 1, - "IMAGE" - ], - [ - 176, - 71, - 0, - 81, - 0, - "COGVIDEOPIPE" - ], - [ - 177, - 71, - 0, - 82, - 0, - "COGVIDEOPIPE" - ], - [ - 178, + 183, 30, 0, - 82, + 83, 1, "CONDITIONING" ], [ - 179, + 184, 31, 0, - 82, + 83, 2, "CONDITIONING" ], [ - 180, - 82, + 186, + 84, 0, - 11, + 83, 0, - "COGVIDEOPIPE" + "COGVIDEOMODEL" ], [ - 181, - 82, + 188, + 84, 1, - 11, + 85, + 0, + "VAE" + ], + [ + 189, + 83, + 0, + 85, 1, "LATENT" ], [ - 182, - 81, + 190, + 85, 0, - 82, - 3, - "COGCONTROL_LATENTS" + 58, + 1, + "IMAGE" + ], + [ + 191, + 84, + 1, + 86, + 0, + "VAE" + ], + [ + 192, + 61, + 0, + 86, + 1, + "IMAGE" + ], + [ + 193, + 86, + 0, + 83, + 4, + "LATENT" + ], + [ + 194, + 30, + 1, + 31, + 0, + "CLIP" + ], + [ + 195, + 72, + 0, + 83, + 9, + "INT" ] ], "groups": [], "config": {}, "extra": { "ds": { - "scale": 0.6934334949442492, + "scale": 0.6303940863129809, "offset": [ - 39.55130702561554, - 104.54407751572876 + 814.9475817376318, + 180.21736528457424 ] } }, diff --git a/examples/cogvideox_fun_img2vid_tora_01.json b/examples/cogvideox_fun_img2vid_tora_01.json deleted file mode 100644 index 6df7f35..0000000 --- a/examples/cogvideox_fun_img2vid_tora_01.json +++ /dev/null @@ -1,1315 +0,0 @@ -{ - "last_node_id": 83, - "last_link_id": 209, - "nodes": [ - { - "id": 72, - "type": "LoadImage", - "pos": { - "0": -820, - "1": 531 - }, - "size": { - "0": 315, - "1": 314 - }, - "flags": {}, - "order": 0, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 166 - ], - "slot_index": 0 - }, - { - "name": "MASK", - "type": "MASK", - "links": null - } - ], - "properties": { - "Node name for S&R": "LoadImage" - }, - "widgets_values": [ - "6e1a7befce6daa63fc01cb66c1a22ed0.jpg", - "image" - ] - }, - { - "id": 60, - "type": "SplineEditor", - "pos": { - "0": -307, - "1": 868 - }, - "size": { - "0": 557, - "1": 942 - }, - "flags": {}, - "order": 7, - "mode": 0, - "inputs": [ - { - "name": "bg_image", - "type": "IMAGE", - "link": 188, - "shape": 7 - } - ], - "outputs": [ - { - "name": "mask", - "type": "MASK", - "links": [ - 146 - ], - "slot_index": 0 - }, - { - "name": "coord_str", - "type": "STRING", - "links": [ - 145, - 176 - ], - "slot_index": 1 - }, - { - "name": "float", - "type": "FLOAT", - "links": null - }, - { - "name": "count", - "type": "INT", - "links": null - }, - { - "name": "normalized_str", - "type": "STRING", - "links": null - } - ], - "properties": { - "Node name for S&R": "SplineEditor", - "points": "SplineEditor", - "imgData": { - "name": "bg_image", - "base64": [ - "/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAIAAgADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDP6ijpRQeK1OAQgUYFO25FNPFAAKU0gpQM0AJijNKRim96YDu1JxmlHPFBWkAlL29qbiloAXpRiijpQAooPNJRimAEYpVNL2pB1oAWjg0pFNoAQikBIpc0uAaAHDkUhGDR0pM0ALjikBpwI6U0igYuaRhxQKKAG0UpoFAgzRmg0hBoGLxSYpRS4oAbS5ooxQAUmaXFJTELSUtAoASgUYpaAEIzRtxTvejNAADSGjvSmgBuKOlLig8UAGKTFKOlAoATNLtpCMGnZ4oAQDFKwoPWjrQIYBigmlIpBzQAuKKVfSkxg0AKRmhRilzR3oAGFAozmigA78UuOKSjtigBufSl60mDS5qRhnFKDk0lJTAcRSdKXOKUc0AJuzS4GKQr6UgJBxQAdDS7qUCgrQAAAimlaOQaeORQAykJ5p5XmmlcUAKBxRmhc45pD1oAdR3pRyKTmgANApT7U3+dACYzSjigdaXFABjNLim0uaAGleeKCSBTt1LweaAGA0ZpwUUYoGJQKdkUu3PSgBE2eYvmEhMjcV6474rroNG0SWNWWGVlYZVvNPIrkCuK2NBvSk32R2OyTmP2b0/Gsqqdro78BKl7TkqK9zVuPClnKpNncSRP2WT5lP8AWuavLK4sLgwXKbXxkHqGHqD3rrxM6Ng1Ld20erac0D43j5o2PVX7H8ehrKnWd7M9HF5bFx5qaszgiKQdaccg4Iwe49DS9q6j5/YSkpw54pCvNMQ3HFBp2KSgAFFFHagAzTlGaaRSoaAEIwaXGaVuaQCgBOlIKcRg0YoATFJ3paMUAB5pBS45ooEBooFFABSYxS7TTttADe9B5pcUnNACDilNFFABRRRg0AFJS0UABwaQig8Gl3UhjTQKcADSEYoAOtKDikFBoAfmmkDNAooAdRnikxRikAEZ5oHFL2pO/FMBMn8KM+tP7UygB4pMUgoJ5oAXpSmm5paAEopcZpcYoAbigU6kNABjNIRxThyKa3SgBpFLg0A07tQMQZpetA61astOu79sW0DOO7nhR+NDdhxi5O0SoVNAJHWurtPCYADXdySe6RDA/M1qwaJptsPltI2P96T5z+tZuqkd1PL6st9DgFYMcA5+nNPBaN1kTKspBU9ORXo2EiXCIiD0VQKjeYnIOxgezYNZuuux0xyuW6kZRkW5s0uE6OoYf1qbT5T5gHvT5Yz5ZCQqoxwIxgflVeyjkSUbkYc+lcl/e0Pooq9L3tzk7tR9tuNo481sfnUXHSuwXw1ZklnM7Ekk7nx/KpD4Y08j7sq/SSu1VY2Plp5dWbb0OLxijNdRP4T6m2uj7LKv9RWNe6NfWQLSwEoP44/mFaKaZzVMLVp/EihSY5oByOKdiqOcQDNBFL0pCaYgAyKAKMkUvWgAPSm55pwNISKADqKAOKAaUigBh60opcUuMUCExSEU4HNBFADcUd6d3pGFACjpRmkzTe9AD6QikFOoAaRRiloFACd6cKQijPFABwaQ0DrTiMigBvWgrS0tIZHkg04c0EZoXj6UABXmmlTTieaN3FACL70HrTgAaQjmgBRS4pB0oDUDFIpnSnZyaVl4oENBzTmpgBpeTQA0HJxTytLtxS9qAGAc0p6Ud6fjIzigBitzT+opm3mlBxQAY5pG6U73pCOKAEQ0MKQDFSHoM96AIu9TQQS3EqwwxtJI3RVHNWtO0ufU7gpENqL/AKyQjhf8T7V2+n6bb6dD5UCcn7zn7zfU1nOoonbhsHKrq9EZGm+FoosSX5EsnURKfkH1/vfyroVUKoVFAUcAAYAp4WnAVg5N7ntUqMKStFDMGjZmpAlO21Jrch8selJ5Y9Kn20m0UrBzEBiX0FJ5YHarG2kK0WHzkIUdxS7KcVoAIpiuJt+tJhh2zUmKCKLCuYuoaBZXxLqv2e4P8aDAP1XofwrktQ0y602QLPH8h+7IvKt+P9K9GKgjBGfaopIFkiaN0WSNhhkfkGtI1Gtzjr4OnV1WjPMqQda6LVvDbW4e4sAzwry8J5eP6eorn+MZFdEZJ6o8WrRnSlyyEIFGMUYpw6VRkMpdvFGOacelADO9OzmkAzRigQUoNFJ3oACMUA5obpSJyaAAnml9qRutKOlABgUYpcUlAARikB9adnkUjCgBcZpp4pV5NI3WgBeopMUo6UYoAMYooNHQ0AN5opaXFSMaKKcRTKYDhSGgGnEcUANHHeg0meaXbkZoAUEUh4pCOaXGRQAqDJpWoSlbrQAi0poHFGeaAAN2pe1IRzS/hQAlOFVbm9gtWVZC29gSFA6iqp1Yn/VwL/wKT/AUmy4wk9kaZGKTGayG1G8fgGBPopP86iM143W7YD0XAo5i/YyNzDeh/KkPHXiq/h7RDrWqJbzXUgBRnYlznCjoPU8/lV3xpY2XhzT4LG0djPdEs2WztjHU/iePzrmqYnlmqcVds2hhbx5pMy7vVEgbyYAJp2OAB90fWoNEt9W1HxBDaiYkStulY8qsY6nH6D3IrMsVVAZnIGQcf7K9zXqnhDRjYaebqdCt1dgMwPWOP+Ff6n3NaznZG+Fw0ZSt0N20tYbS3S3t02xp0B6n3J7n3q0BjigLtFSKlc+56+kVZCBaeFA60p2oOTUO8yPtFAbkowenSlxSgADApSMUyWxuKKCeaVR3oATFIakxTDQK5GRTaeaYcUFJhS0maM0DFxSUZxRnigQjKDggkMOjDqK5rXdB83fdWcYWUZaWFejj+8vv6iunpGAIxz6gjtTjJxd0Z1aUaseWR5cfaiug8VaYlnFNqka4jUbrhVHT/bHse/oa4q11yG4uzbyReVnhH37lJ+vv2NdMZJnhVcPOm2mjVFIT2pRxSN1qznFA4oFKtNbg0AKelN704Gk/ioEDD5aanWpG+7Ua9aAHkZqNjg1LTHXPSgBUbIpSKYowalxxQBH3pWHFJjmntjbQBGnWnMuaavWpKAIicVIrAimuvNIgx3oAfikNPxxUZoAQUoPNIDQakYpIoK8ZpgG45qQ8CmBHjFOzxQMGnEDFADNneng8UA4FHWgA4pDQ3DUdaAEGRTicijpQBmgBM0A5pxFIF70AOApOppaQdaAM3WrRp7Lzoh++gywHqv8AEP6/hXPwzrKMg4NdkWwa4/WLI6ff7ohiGXLpjt6j8P61MkdFGX2SXe475qSNpZJFjiQvK7BUUdyeAKpwXAK4atvQbG/vdVt3020kuZIJUkYIOFAPc9BxWc5csWzpSuz0fQ/Ddh4ctDfXkokuoI2klnYkJENvzbR9OMnrXk3iDWX1vWLnUpgVWQ4RD/Ag4Vfyr0H4o6kLGxg0eF8S3h82cA9IgeFP+836CvIrh2llWNQScjgdST0FceFjJ3qT3ZtO3wxOx8Eacdc1tTIm60tcTT56H+4n4kZ+gr2VBzk/Wuf8IaAvh7QIbRgPtUn765b1kI6fRRx+ddEozx6da3k7s76VPkhYcoycmn8KM0DgVWuZwgIzUvQtLmdhk8+W2jr6VZgi8tMn7x61TsozLIZW6DpWkOT0pR7lzaXuoAOKjdwKe7YFUpJeetU2ZxVyZPnfFWQKhtxtjyepqXdQhSeopqNjzQz0wmgSQE0xjQWphNBaQuaM0wmlzSKHZzSbsHBpueaH5X3FFwsS9aKhjfNTA0CaGTRJNE0ciK6MCrKwyGB4IPsRXjt9oKeHdVvNMdS9q3760duvlHgqT6qePxBr2YjNcz400v7dojXca/6RZZlT3XGGH4j+Qq4uzMK9PnjpueeWWomOQWs53YbakhP5A/41oyzRwOBOwiJOPn4rlZJklk4PysMfh2r1DSJNN1nwxDe3ioNsZS6boFZeCfbjB/GssTXqUXGUdY9TzoYanO6ejOfjljkX91Ij/wC6wNMJ+bFc3MkJfdGNoOcY4OO36U+K8uoOI7lsej/MP1rtUtDklQfRnRijpWRHrci8TQxuPVG2n8jVuDU7WeRUDlJGOArDqfrVXRm6cl0Lx6U3HNKfpR2pmYDFIaORRigBpBpwbtSjpTT1oAceuaU80lFACdDS0lFAAabg06lAGKAEDdqDkn2prcUqEGgBhGOlKORTsA0YqRiIOae1NA5pxpgNApc0qijbzQAmKUdaXHFJ0oACMmgLTqQ0ABHFIKUHtRjmgBMHtS44p3akNADc80H2pSM03kUAKOarajYrf2Tw8CQfNGT2b/6/SrQxS4oGm1qefYaNypUhgSCD2Ir2nwXeWtj4ZtOViiEImmbpk4yzH1rz++0uB9TiupI90MhxKucfN2P40/W71LDQE0u1yi3LHIznbGDkge2cCvJxrcqkaUT1sO06bqMxvEOtSa9rd5qsuQJm/dqf4Yxwo/L+davw30L+09dbUJ03W1iQ/I4aU/dH4dfyrlJDnhQWPQAdSegH517r4W0UaDoFtYkDzgPMnPrI3J/Lp+Fdj92Nka4aHNLmZtpwMnrUyDio15IFS9BUI7pCSvsU1kSO084Re5qzfT7Vxmo9Li3Fpz9BUt3djWC5Y8xpwxiKMKOwqQHApmaR2wtWc71ZFcS44qmhMsyoO/Wm3U2MjNO0sb98p6dBUXuzdR5Y3NQcDFIzUmaYzVZhYRmqNmpGbmomak2Woj91Gai3c0oNK4xxOBShqjc4WhW5oHYlJpAaO1MzzigBpYo5FTo+cVWl6g0sT9vSjqNq6uXlpjBSSjqGRhgqe47ilQ5om+7u/u81Rmt7Hz14jsX8PeKLmwfPlRS5jJ7xtyp/I/pXUeD9QksryeybmG4G4AjI3qP6ir/xe0cSQafrMa5wTazH2PzIf/QhWB4dJmis7gYLqwB/3l4P6Uq1P21FxW558v3dUf4q0xLC4jvbNPLtLlyjRDokgGePQEduxBrESKST6V3Pjiy1C4tbL7NYyy6faq08ssfzZdvUDnCqOvua5azlhaDeAD6VOBlJ0I87uzOslzu2xHDpoI3SsQB61oaXEpuT5A2xR8PIOrH+6P6+1U2klvrhLaDgtwT2A7k10UNtHawJDEPkQfiT3JrtSOSrPlVkSnpTV60oGaQ8GrOQVhgZpAc077y0wDaaAF6UYzS9aOgoAb0p2Mik60qntQA3ODSnrQynOaAeKACilAoI4oAawyKYAQakHIppHNAC9KUUuOKF6YpDDFL1FGKbkigA6GnA9qBzRjFAC4xTSO9P7UnBoAaDinYzSYpwFADNtGafnmkIoAQUfhSgYFHWgBBxRwaUCmtwaAEK4NOU04DctIVxQAjKGUgjg9a5DXt6ag6yHIRQqf7vWuw7VyHif/kIkf3kXP5VnOMb81tTooSlflWxf+H2k/2r4mS5lXdbWAE7A9Gk6ID+OW/CvaEGT/WuQ+HmlHTfDEcsibZ7xzO2Rzt6IPy/nXYD7pPtXPJ3Z9BQhyQRJHySfU0522qTTU4WoLqTahqb6FpXZl3spkl2L1JxWzboIYEjHYVhWY8/UlzyF+Y1vb8VMO5vWVkoj93zVHO4C0m/5qq3suEqm9DGMLszby4wD6nitiwTyrONe+MmuYnk8y5SMdCa6iM7VA9BUR3OitG0UizmonakMmBTGbIq7nMojWaoWaldutQM1I0SH7qer1W3elOR+aLg0TzNhPqaYr1HcP8AIv1qMNQNLQ0I2yKZIdr/AFpkD9qdc8Ire+KZFtQl5jJ9KhR8NUoO+Ij2qmGPHtSZUV0NWF+RVggMMHoazoH5FaKnKA1S2MZKzMLxHpg1vwjqWn4zKYi0Xr5ifMv6jH41474UvhG88RIHSZAfXo39K93ibZdyD6Nivn3XbY+HvGd7AmRHDcsV9425H6H9K0ps5cXDaR7LoWqpcW4YEbojgHP8PUZ/WvIbuaI6jfG1AWCS4kaNV7KWOMV0ugXqw3FxC+GSaIjB6NjnH5E0v9nWp1T7ZFbxRRxoFjjjXALf3sfyrkoRcMTOC2epzVpr2SkM0rT/ALFBukH+kSDL/wCyP7v+NXzTutIRXqnkyld3YChhkUAcUHpQIbnFH3qXbkUKMUAJ0pw5GKRlyaAMUAKBTcYNPpD9KAE3Z4pMd6QrzTyPloAQGlI4poU07NAEQ4NP60MM0goAUHBoPXigrSjpSGKOlGKUUnOaAExg0tFIQaBDhzQV70inmnYoGN7UA0EUg4oEOIzSYNO60YoGNzRTsU00AKKRlpQCKcKAGKccVIMGmkCgAg8GgBxWqq+HbfWNXhaZnwMF1HRlHb2zVktgitvw9EHeaU9tqD+ZqKjtFnVg4c1aKOjjUKoUAAAYAFTnhR71Cp5qRj0+tcZ9KyTOBWbqMuIyM1eZsLWHqk2FPNKT0LoxvIn0RcpNMe7bR+FajNVHTF8rToQepG4/jUsknpQtEOes2WA3es7UJeOtWlfKg1k6hJyaUnoOnH3jNhk36rCv+1XWowIFeerfeTr1kp+68m1j6dh+td1G/wAuKim9zfEwtYkuZdkP1IH608tWbqU2yBTnjzF/nVsv1rS+pzuGiGySc4qFnyaZJJ8+PWoWfFK4+Um309W5qrvqRH5oE0PunxsHvSK/FVbuX9/GuakRuKLlKOhehb5hVm6/49CfQg1QjbkVcuDmwlP+zmqMpLUZbvlTVRmxI49DTrWTJ+tQzNi4f86T2KiveLsD8itWFspisKBula1u/SqiZ1EK/F+noyEV498WrEReJbe7AwLm2AJ9WU4/livX5z/pduf9oiuJ+KGmm90WC4QfvLVmf6rxkU07MyrQ5oW8jivDZ+1m3dudiHd9RxXTsMGub8HRFEut38BGPx//AFV1BGa7IrqfN1W3KwxaRjSgc4pSncVRiCg9aMUoOOKKBiUmKDmlFAgxSbaGPNIDQA5eBzRinA5FNNAwIzSYo60p6UCExTWBFLnml60ARgnOKcRgUu3FL1oAO1JSjmjbSGJ0p1M5zTloEFOGCaQ5oxjpQMa6HPFKCQOacG9aUqDQA3NKy5HFIBRkigBqhlpck08P60ECgBoNLxQRkUgBFADuKTFHNN5zQA4ikB5peaAO9AxetdRocPladGSMGQmQ/Q8D9BXMxxtLKkS/edgo/Gu2iRY0CqMKoCj6CsK8tLHq5VTvNz7EiH56kc4K1ArYkqWToK5j22tRsr4U1zuquWYJ3YgVuzN8tc7dMH1O3XP/AC0FTI2oLU6EEJEqDoABULvQ7VA7e9MSRPHJmPjsayNRf5iaW01Dfqs9qB8oQHdn+IdR+VV9TbBb6GpbujWEbSOaniaW63jqOh9Dmu+tZ/Ot45P7yg1xaxXaTbvIWSMnI2nnFdNp0wa3CgFWXqrDBFZ09GFSqpysN8QSldOYjruFWrC9S8sIZ0YEMo6eves7xESdLfHqKbpH+iwrbDhAo2/lzVOVpltL2S73NG4fBDehqN3ps53IRUCSboh696ojl0JQ/NSq9UvMxUivxxTJaKd7eKNXgh3fMVzitNG4rjr+Yp4jWcnA3BBXWRPlRUxd7mkorlVi7G/NXpnzp83/AFzNZCPhhV+Z8abOSf4DVpnNKOpVs5PmH0ptw/8ApR+lQWb4YfSiZs3NK+haXvF2BvetS2esiA1o27fMKpGVRFmc5ubb3kqprMKz2bIyhl+YEHuCpqeRt19aqPdqW6XfEw9/8apGclp8jgYokhQLGiov91RgUu4g07BwBTCD1rvR8k9yUcimkmkVuKN3NAgbmhSR1p+ARTccUAL1pMUAU7rQIZjmmkU4nFAGaAGgmpM5FNC96XHNABikIzTh0pDxzQAmOKOgoByaUrQAmeaCMCjHNDGgBqmn0x1I6UAkjmkMeFBzUZBVuOlP6Uu4HrQA3d60A08gHpSBfSgBODS8gcUhBFGT3oANwoz60YApcA8UAGBTSDSlfSm8g80CH8ijNIDTsUDEpB1p1Ko5oAaaB1pzcGm0DNbQ7fzLwzEfLEOP9410qfcrP0m3+zaehIw8nzt+PT9K0lGEH0rjqSvI+mwVL2dFLqyFzhxUrHclRSjjNCP8uKyO5rqRzN8tc3LJnW7cejZroJz8hrlLmTZrMbejYqJs3oLc6RnqtdXHlQsw69B9aaZhtyTxVPc11cYVC5Xoo7fWiUrIiTUVcfptmwlEh+8TnNN1dSu4Hg9CKdca1p+kEpeahFHIOsUa73H4Csu98VaLqGI4ruXzvWWLYpH17Uop2OeliEp+8zfFn5bqMdh/KtKO1Eke0YDY4NUrbUINRhFxZzQ3cIGGaE8r9V6iti02ugIIINSlZnNKo78yOa1lC+mzqRhgM49CKZbxloIZR/Egb9K39X0xrmB5IV3OVIdB/F7j3qpp9m/9j2jOhVxHhlIwRgmiSu7nXLERlSTW9ynKDjnvVHdslK9m5H1ree18xdpGPQ+lYt9bPC2GGPQjv9KtM0o1lPR7kDPhqeJMKaqs5PXr/Om+bhTTNrGdqGlJJMZyC0m7cCTnB9q3rGXzLVGPXGD9aWWEPGOM8VFagRs0YI9cZrOL1MKVS75WXA/z1avpdmjy8/ewv5ms3diUCjW7kpaWsA/5aOWb6Dp+taX0NOX3kPtH4BoZ91xVe2fEdPibdcCjoNL3jTgPNaMDd6y4q0bc+vTqapGMlqTxtv1IntGmPx/yasj5lNUbNiRLMernNX4x8gqkZTOCIwxHoTSMOKfMNs8i+jsP1pnau9HyM9G0MxSgZpwGaQ0yQBwacRxSdTSngUANzR0OaOKKBA3IoXiilPSgAxmjpSrwKQ0DEJpc8YpKKBDQOacDSijFAwpKXNIw4oAA3rQSMUnBoK8UgFzSYBoAo24oEHTpQCaQk0oNAx2aQ0Y4oxQAYzTdpFOFOIzQAwMc0/AIpu3Bp1ACFcULS4zSgYFAARgUitg80ZpcCgB2QansbT7Xexxfwk5b2UdarYxXQ6DbeXavcsPml4X/AHR/iaipLlideEo+1qpdDVP3T71MOlQtwAPerC9K4j6Z6EMi5BFVc7Tirriqkw5yKTNY6kMxyhridUcrqC7QSd/QV2cp+U1y1zHnU8+9Zz2N6cuW7NO2t5rpUB+XP+c1h61rNxLeDw/4dRmmZikk0Z+Zj3CnsB3atPWtTOkeHpJojiebEUR9M9SPoM1d+Hnh1bLSf7RnTFxdjcSeqxdVX8fvH8K0pU+Z3Z4mLxEtkZmn/Dyys7RrjUkuNTnAyYbcHbn0AGC59yRWBca5oEcrW7eDYQiEoQ8uyQYPfA4P417Vui8jeHUR7d27oAuM5+mOa8e8R6XZ3fiC8ubnVraymvZfNht3QkhTwpkI+7uxn8a6ueEdHoeXJySu9QsdHs9SVtS8H3txZ6jbjL2dw/zc+jdwenOQenFdn4W1z+17ZzJD5F7A3l3cBG3Df3gO2fSvLrW5uvDHiBLiSMrPZyYmjzw6fxLnuCOQfpXp2sWJ0/xHpWv2JzBdstrdf7SsMxuffoKmrTi1dG1Cq767HWqOKR1z1p8fKg0MOK5bG9yo0Q9KqXECSKVdQynsa0HFV5BSZrGTTuc3d6GCSbeTb/sPyPzrJn066jJDKnAyT5gwB7ntXYSfnXB6mt14t1i40q0dotKtH23cycmaT+4PXH/16IxcnZHV9clTjd6mTfeL7G1VYYhLfzKMERvtiH49T+VZy+M7qCZJm0RUQHkqXBx35IrrdT0C90XR5n0G0s4BBGXkkk+eZwBk44xnvzXCweLdahcNJcrcp3SVBgj6jpXSsPE4Hjal77HfxXMN0sFzbuHhmUMjDuKfqQ86YKQQUUBc9/WqOlXEGs6LJcaUqW1wGIaNh8scpHcDseuR/jWX4Y1WaS5l0LVmcXUbN5Tv97I6qT3PceorGrScT0IY6FSy6m7GxEQ9as2Q3TE/3RUE0TRR5YYIODirWljKSt6Cs07npx1jzIvRdavE7bYgdW+UVSh5NW8hmUdhWiMHuWoRhEQVe6L9KqWy5bce3SrpHyVSMJvU4fUo/L1S5Tt5hI/Hn+tVn4FamvxbNSEg/wCWkYP4jisw8iu6GqR8tiI8tWS8xgpc5pcCgLVGAzBBzTgwIpaQqDQINuelNGe9LkrT+CKAEwKTFHOacOaAGgigrkcUFM0mSDQAD3p2KXgimj60AB4pM+tP6im4yaAF6im4xThxS0AR7fSjBpASKcDmkMaQetLuIpw5pCKAFHzCkKjtSjiloARRz1pSMCgDAoJ7UAAOaWkx6UgbnFAC5zS5pKCKAEJ9KXfQBijANACdaUUmCO+acpoAkgia4mSFertiuySNYo0iQYVQAPoKwtBgDXEk56RrgfU//Wrdzk/hXLWld2PoMso8tPnfURz8w+tWV6VTLZkFW06Vgj05IVhxVOXgmrx6VWmWhjgyhKODiucvMRXkkhBwo3fKMmuikOzOehrn9UYwzmVY3kIXIRMbm+me9RPY1fws43Ufsera9ZWunTmSK4kAdQ5MauzYOFP3TjqK9r1Bxpvh+9kiGBb2shXHspArxvT5Yrv4iaXOtpNbqZogyzR7GLAnnH5c17Nrds134e1K3QbnltpFA6ZO0100/hPn6/xnkmi69q+oSaToVxfO9gZI1dNoy6DnazdSvHSuc1G6e+1C6upiWeeZ3Y59ScD8sCtTwyQniDTZm4QSLuJ7ZBH9azb3Trq11OXT3hc3KyFFQDO/ngj1B45q5cqqL0PKrtu68y7rmZrbSbl8s89godvXaSoz74r17w6q6j4C0vzxnNtGfxU8H9K8o8URfZLi0syQfsVkkT4PAY5JH617H4btHsvCOlW0gxIltGGHoTz/AFpX/dr0OugaKLgUMKmVeKa68VzI6mynIKrvVuQVVcVLNIlC6MqwSGAAzBD5Ybpuxxn2zVfTrKy8P2Fjp0ZCmRtgLdZZCCzE+5wf0q64yQPeuC+Klw63GkwKxGPNl4OMHhQf51rQWpNd6K51PirVLTS9CuXui+JEMQSM4diwxgenfmvI5LfTNS0+5l0+1ks7i0QSNCZTIskfQkE9CKXUpZW8NaWZJHkeaaaZndtxY8KOvtUGhcTX7EfKLGbdx7f41eqvK+zPOlUvNRNPwBeG28UpbEjyb2NonU9CR8yn6/41pfECxfTdSsdYtgRIGALf7ScqfqRkVgeEEZ/GGkBevn5P0AOa9E+Ilt5nhKSUDmOeJvzOP61rPdo6aeyZajeK/so7lADHPErj6EZpthEIbSbBz8+KyfBy3LeHbWWa7MkWwpFCEChACRyerHitwLstB/tuWrhtaR9Fh5NwsOibFW4eTVGLk4q/FxhR17mrRpJGjb9gOgq4w+SqtqtXJPuVojkm9TnPEUeYIJQPusVJ+oz/AErnq6zWYvN0ycDqoDj8DXJn2rppP3TwcxhatfuMY05TSFaUcVseeKeKaWxTic01uaBCj5qacg8Uo4p3BoAaPendOnWkI9KVVPegBuTml6ilxzS4oAiwQaeOlOppGelACjk0uOaACBQD60ABFKvNL1oA9qAIiKQDBp9J1FIYUueKaTil4NAC0maWgjK0AJ1HFFNBp4oAROOtIwyaecCkBoAQClNGaTvQAYzSqlOQZNEh20ANYU0DPejOVOfSvSrWG2n0iy3W8UiLCGRWQEA461lUqchtRo+06nOaSnk6WhI5kJc/0q6G4Jpk+yImNFCKnAUdAPaqMt4EB56HFcMp3dz63D0uWnGK6FzeDKBV+M8VhW9wJLhRnOc1swtwKcXcurGxYqOVcing0jdKoyW5mXCZU1hXi/Omex4ro5l61j3sWTms5rQ6Iu6My60bWNQntby1S1VbCUTQjcTJLgjIz0XjtXpw5AYDg8iue0Vg0IFbDGZIm+zlPMx8okztz6nHOPpWlKdlqeLiKd5M8xvtBg0HxOYrwiHTp5C9rO3CZJz5ZbsQfXtXXMwVfObywqjiViuAP970rdmxcqLW6so7i3kQ+bvAKHpxtPXP6Vlw+EPDCzs48PQKUb5QxLIeAchS2MdunarnyztzHnTwjcuZNq5zdj4KsfEM1vqsepCe0a5Zp0CZ8wKfuhvqOT6dK9Ff5nA9Dk49aRflQJGixoBgBQBge3pT1XAolO6sjanT5FYKaw4qTFIRmosXcpyL1qq4q9ItVnSoZrFlBhhgfQg1wnxUs2YaXeAfKryQM3puAZf5GvQpIs1U1HTbfWdKm028B2SLjcOoI5Vh7g1dGVnYVWPMjyJdIm1jwxbJagNc2Mjgxscbkbng0xdHuNE0O9luwFurwCCONTkqmcsSR69K6qO2u/DMZtdStZWjBOy9toy8cg/2gOUb1BqGW6TWnhgsNPudRYSKzKYmSPAPO526CqtLn30vc8rlnzW5de/QxfhxpzXXiprkD93ZQlyf9p/lUfz/ACrs/iMwi8GzqcfvJ4k/8ez/AErpdP0ew00zGys4rbz3EkoiGAzYx/nHFct45s73VbzT7K2smurW1f7Tdxhwm/PAQE98ZNazl1PQpQ2RheDodHi0uGWGeCTUnjJlAl3Mp9AvbjFdPcnAjjHZaekMW2IR2q2wflk8tUZfYgVMlqZZWnl6E/KvrXGtW2e/QXLDUit42I6Y960IYwDSKuKsRrWiQ5yLduuMVPNwh+lMgGBRcHCH6Vp0OV6yK0qebG8Z6OhX8xXDEFTg9Rwa7pT8yn2rj7yLy76dPSQ1rRe6PNzSGkZFbpSdaeTTcV0niiAUpFLRmgQmBilAFLjigfWgAK4pA1ONAAPWgBvWnEUFQDilHAoAQCgjmlBoHNAxBQadjFJjNAB0FIDTgKXAFAiAEEUduKbjBp45pDBgCtMGRTzxRQAY4oz2o+lGO9ACbaXpRSZoAUijGKBmn5GKAGGk5pxHHSnKoIoAahwaVwWoIrZ8PadbalfPFcuwRE3BVOCxz60pNRV2VGLk7I1/BYtzZ3aNGhl8wbyyg5UjgfTrXQiNIo/KjUKiDCqOgFVNP0C10u6e4t5ZjvTZscggDOavEfMfU1wVXd6Hp0VyxSZymsHyhIwIGFLEt0UDqTXBvq1xdJLOsTCIt+7I6sB1OK6fxNfJfzppcchSK4fNxID0gXkgf739RWTc2X7jcvJ3HYBwFj/hUD29e9eRUxPLLfQ+gwtRpIo6VrOL+3Dtx5gU+2eK9CibHFeWG1SeCa+YlIIEDmQcEnPC49civRdOvBe2FvdAYE0avj0JHI/Ou3D1FK5vXnGdmjWDUpPFVlfFP8yuo5uUZN1rMulyDWjK1UJuQalmkCTRZtkpQnviunUgjiuIt5TBdq3Ymuvt5g8YIPWpg+hyYqnaV0WhT1GaiDA96kVqs4ZIlApwFMVhTtwqjBj8UEUzeKZJLgdaCRshB6GoSKiMvzHmlEgNQaAy1E0YPaptwpCRRYpSZWk84ROIZdjlSFZhuAPYkd6Y8l0fKCtHtB/eAr1GO3pzU7EVG3SndrqaKKfQiV7jzJTJKpjJBjCrgqMcgnvzyDTJMYNOdwM1SuZ9sZPfsKmTfU2p07vQgiXzLl5Dyq8CpzzTYU8uEA9ep+tOzRFWR6VrKw5RzU8YqAGrEdWjKTLcfAqG7fETGpAeKo38mEVe7MKpvQzirseG5X6VzOqj/iZz+7Z/SuhVsuBnoBXO6kQ2pTn/AGv6VrQ+I4M10pL1KhWkIwKcTS4yK6jwBgpcCnYpCM0AIBijvS4NLigQmcmlwKMc0FcUDEI5pcUZooEGBRxQaVRQMTPqKMinECk2g0CEBoak24PWn4yKAItuRSEEClzjigjjpSGN25pcGlQ44pzCgCOnDpSdKXHNAARQBkUpHFIOKADpQBzTiM0AYoAQihQc07g048CgBpHeuj0rwzdzRQ3hvVtSwDx7AWcD19B9K5sEjpV601e9tUEcU7BB0VuQKiopNe6aUnBP3j0kEBcZzx1rN1a8jtrOQu2FKncQcEIPvf0H41zEfii8TiRI3+mQa57xPrzam6WW/wAmOTiVs/cQck/j/hXl4pzpR1W56tBwqPRlOUzXswvWQA3Ks6kHASMdD9GOcewFJbXFzvWOMbyx2qvv2q0LuOckhVRSAqIvRVAwAPoKt6ebSxt9Q1huXgjEag8jd1H+fQH1r5/mU522PTjNxVzJ1OITi30SMgxwMbrUZM4DN6Z/StjwzfiSGWzZy0kJ3jJBG1j0GOw9T61TsYXtYJLmZSXZGnmaWMhUcDhCT97cDtK+/tUNrbWPhu71e6NuyQ27IA/dA4z5X1Hf0ANelh5crTNFVR2oajfVWKZJY1dHDowDKy9CD0IqXdXpm61JGbiqkpqRmqvKeKC0ijccHIrZ0fUQ8YRjyKxp+VrPhmnt7xyi5TAbOcfhUWfNoZ4hR9neWlj0RZAalV65zT9XSZQrHDeh4rXjuAw4NPmPLkuxfElKZPeqocHvSl+Kq5g1qUtU1tbFkjRDLO/3UzjjuSfSmpq9nJGDLLPG/cFePzFZOr2s66iboKXjZAhIGSuP6VRabArNVGnqehHC05QVjdm1azQEpcO5HbYTVqC5EsaurblYZBrkHmJOADmt/SoZYdPUSjBZywB6gUc92TUw8YQvc1xJxSmT3qpv96TzPequcaRYaUVC81QPL71A8lFzohG5LJNVVD59z/sR8/jVe5ugi7V5Y8Ae9XbWLyIAD97qx9TUr3nY76UOVXZMxxUZamSSY71AZOtamli2r5qeNqz0erUb00ZyRe38VmXUnmX0cfZRk/U1aMoVSScAck1zt3emOF5R/rrl/LiH17/gKUmFODctDZt5A5MnYnI+lYMx8yZ3z95ia05JBb6ewHXaEWsYg9q6cOtGzxM3qJyUF0FKkGkzShiOtHDV0njC57UmRTtoxTdvvQIKU9KaVNOHSgBtO6ikI5pQaBjTwaUGnMMmmbSKBCtQKXGRSYIPSgY7GetJtxRuoD5NABzTlowKYxINAhAp60EU8HHFIOtIYgSlxzinNjFMXO7pQAFTSbcGps7RUandQAh6UD6UYwelBoASjr1oAPpTjQA3oaXOaMZpe1AACO4pGAByKVBk80504oAaORWZPpCzTSymY7n4wy5GPQ1pLkU41nUpQqK0lcunUlTd4uxgSW9xaspdMRA4zCuQB7DtTp7kyR2mmjBjEnnTA9D7H9K3OhyKovpULSvMmVkfruG5fyrysRlf2qR6VHML6VCW31aRdTQzSwW9oTvjlhgXejAcjcc84+6RWN47vUtdNstFgeUvK7XNwZTltzdc/Rdo/Op5ra5iuFL2im2Vg8jxyblwOeh5H0rjNXuZNQ1q4uJG3PnGP1/rXPhcNUVVKorJHVVrQcLwZ2XgbXsouj3D/MuTbE9x3T6jqPau4DgivClZ0YMpZWU5DA4IPqD616T4Z8VLqqLa3bBb9R16CYeo/wBr1H4ivVqQ6o6MFik/3cjrC1RScim780jNxWJ6iKsvQ1TGPMKkgZ6fWrkp61h67AbnRr6FM7zCzKR1DD5h/KiL5ZJmWJpe1oyh3RekjLHqysOjDqDVuxvr2BT50iuAeM8cfWvLtM8dalaReVcKl4u35DKSGU/UdR7Gul0q+bW7BLtn3SZ2zIOAj+gHYEciuiryNczR85h1WS9neyO/h8R2YkEc06xufU5H5itiK6SVAyMrD1U5rz6GzBxgfpWla2rxEGJmjP8AsnFcfXRHdZW1Z2ZZWFV5Le3Y5aJCfpWP9umtsCadCT0D4H61biumuF/dvC59FfP8qNxJyjsyyEiiOY4kU+oFNebuTUD+fjoB+NVpIpm70noVdy3ZZa5APWoHvVB+8KpSQS55JNZepa1pmiZXUZwJSMi2iUNKfr2UfX8qIqUmNuMVc1ptWgiB3yqPqay7nxJF0hVpT7DArkm8S2GoTvNJF9i3E7VLF1OPfrn8MVr2dk9ztaMBlblWzwffPeq9m07HZRdJxUrm7oK3OoXTXtxhYojtjQdC3cn6f1ropXCLiobG3Szs44l6IuM+vvUFxNknmrSsjrtqNll96h83JqB5MmkVsmgbLkb5q1G+KoRmp2mWKPe34D1NNK+xjUkoq7egt9cZQQBsbhlz/dX/AOvXOWE51bWBcFSIIBiJD2Hr9T1q5dyube4kJJOxmPvxUGkRm009cf6yQA/QVUqUuZI5qWYUlTnU6LReZpXkvmOEB+VP1Peq3ail4rujHlVj5itVdWbm+o3rxSbcU4Cg1RiJyaMEUmeeadv7UAJmnCjaCM01jtoAc3AzTc570gYtxS7So60DFPrSc0Bu1O2igAFI3FNZiDQMv1oAXINJtFKQVpB1oAeqk0gxnBpelIpyaAF460oXHNJjApFf5sUhgetLtwM0Op60hbC4oEIWzTkFMUZNTKAq5oAjPB5pMUjNubinYyKAEFBFLzQxxQAlKRTTmnK/agBF4NSdqawHWkB45oAaeO1OBoNJnBoACeacvSgDdRjFADehzVe80+01CLy7m3R16ggYIPqD61ZpSBSaT3GpNaowrzwlp1xGohD2zqMB0O4H6g9frXM33hrU9OPmonnxqciS3ySuO+Oor0POKUdiDg+tDimaRqyicronjUqVtdVy2B/x8qOR/vj+o5rsoriO4hWWKRZI25V0OQfxrH1TR7XVYSko8uTO4SxgBs9Of7w+tcydK1vw9I0+nymWHq3lDcpH+3Gf5iuedHqj18LmjXuzO7fkVRlAL4PRuDWRpvjC1usRXqC1m6bs5jJ+vVfx/OtW5YKVcEFSQQR0Nc8otbnt0a0KqvFnjPlFLgpjG3cMfQ4rT0LVLnStVSS3wyv8ssbfddfQ+/oabcRq15LIvRnf9SaTTYC9/wADopP9K2qPlg5HgQjzVOXzPZdINvqdolzbEhW6pJwyn096v3bfYLZ3RQ0iqT7Cqfgu1H9hs2MgzsB+AArU1l4LPSLuafCp5ZX3Zj0A9yawSvT5lvY0d/a8i11PLZfN1l2nuXZ3cGQbvmCruKjC9zkHOenAqexSTSruCW0lEUwmRPl4+9nBK9M8fiM5p9wumSRCSPURZs7Fmt7mIsu89SjKcjPfpmmwzWtpMivKZnBwrIgRI89TjqfcnmvOcpc1193/AAT0Vh6mseU9a0qePWdLhvI4/Ld1/eRH+Bu+Pb0oufIto2eZ1VVGWJ6AVJoMMdvpqIuGGMVU8UrHD4evXVcFlVc9erCvTl7tNye6R5MdanL3Z5x4x8cyxyGx0ZjECuXucYbnsvp9a83Cy3E+AWeSRuWJyST3PrWrrwI1WQeiL/KoLKPybS8vSP8AUxEJ/vNwP51dF80FJ9R1bRk0iBpF87K/cQbE+g7/AInmvQPh9LJcGSESs0MI3NEwyFJPBQ/nkH615unQD2r1b4c24g8PzXJHzTztz6hRgf1rSeiNcJBTqq/TU7C4mCLtBrKml96fdXGSeaz3lya5mz30iQvk1LHzVZTk1ZTinGLk9Dnr4inSV5uxYDKi5Y/SoZWMpyT9B6UxwWOSablhXbTpKOvU+axmOliHZaRHAEUhNKj7jg05k4rQ4bkJODTsZGaQrSbiKYh4I700vzxRgEUwDDc0ATFQwzUZSpf4ajPWgAXOMUpHPNKDxSE5NAChQtL1prAjmlU5FADSozSgEikP3qdnFACADPNLwOhpActQwweDQA49OaZTs/LSAUAB6UgOOopScikGc9KAHkVEw2tkVKSDxSbQTSAFlBGDTmj3DIpjqMcUI5AxQAm0qaUvlcVJwwqNsA4oAQDFOLAU3oaUjigAFDDnNC+9LnPFABkFaRcUpHGKUDAoGNfjpSIc0dadjZQIViAKZjcKCd1OAwvFADQSKdmk6nmnY4oAOozSDrSHili5NAA1IhycUsnBojA60AK3FInJz0NOcBqjIK9KAKWo6HYapkzQhZj0lj+Vvx9fxrm7vQ9b0iFxYXD3Frg5SMfMo9dh/mtdgrkNU+dwz39aTinua0604axZ48Ey+MYxwa1tBtFa5ndzwu0H1xnJ/lW540WMT2W2NBK+5ncLhmA4GT3qHwt5zXdxbxIjxmMzOvl7n+Xgbe46+9cOOT9hLlPSwU17SMmeieGL/TbHw7aRT3sMczbnaNidwZmJxjHWsPxfdXGqXyw20c0llCoZGjiYh2I5bp26VR8Tag+kWNvPo9zcW0082F81FEpjC/M3IyBu6HjNcyfE3iE9dZvM9PvD/CuWk69airJJbdeh2qrTw9fntdnWeE9Jsmne91YxwlTiK3uVKkY7kEd6yfE2n29pq832eWOS2lJkjMZ3AA9Rx0xWT/wk3iHGP7ZusA9G2n+YpF8T68pJOobvXdEp/pVyo13FRSX3s0pZlyVXVd3c9W8F6kLrQoGmmjVsY+dwpbHGcE55q54rMcuhmEOp8yVBhWByBknpXk9j4h1K7vre1lhsZxNKqfNDtPJ9RXXXRtbS1vbu1iiea0BYIW2s2PvH2H8658TXqwh7GUdX1uYQjCdR1Yvrc8319Q2v3uOiybR9ABUV7GYfBkrj/lpcoD9Bk0SkzTySngyOXI9MnNauqW4bwKyAfNv3/lXrU4csEvI4pyu2zkIGEig1614VnWDwZYAdw5OPXcc14zbTFGrXjnkMWxZZAhOSgcgflROPMjbC4hUZczVz0m+1u0t2IluEDdkU7m/IVnReI4Xu41a3mMBOGK8v9Qo6/TrXFRkL0GPpVuCdg6lWZXU5VlOCD2IPapVJI1q4+rPSOh6iphaFHgKNG6hldDkMD0INPRcVyGk63NC8jSRb7XHmTmJcbCeDIB7nqBxnniusSRJI1kjdXjcZVlOQR7V0RtY8Wqpc15O5KykcimAg8EU5XPSlO0mqMiMjDZFLvalkTuKaCVHSmA8fMKQgDrTFYhvap2G5c0ARBfSmkUuSvFAOaAFBPSk6mlPtSAnvQAoPGKTODTsjFNzzg0AO6ilyAM0n8NNzQAgOTkU4nikUUHP4UAA4pWoBoY+lACZ4pRSUoNACHrTl5ppzmnKO9ADc0m409cY96aACcUgHBsjmm4weOlOIwKaGyaAFDEGgnJp20YyabgHpQAowaCMU0HFOzkUAIBzQV5oHFOoGN6DNODbqQjcKYp2NzQIdyGzQ7ZGKfuV6Y0ZFADVXNTcKvNRKSDzSud1ADc5Y4p+CelIAB1p2cDigBGGTinqNo4pgJNKTigBHpFOKUc0BcUALz60hOaXODSEUwGkY5pwfFPA4ppGTxQBxfi2XzdbjTtHCo/Mk1QsluGvYFtHdLl5FSJo3KsGJwMEdOtO1ib7Rr124OQH2D6Din6abpdTtGsIzJeLKHgQLuy45HHfpWT1O6npFEV+k/wBvuFvJWmuY5DHJI0hckqcfePUVVIxU8kjTSSSucvIxdj6kkk/rURFJKysWRkU01IaYaYEZ9emKu3Gr6pc23l3F1M8EnykkY347Fsc44qk3SrE1+sukWWnhSGtZppWbdkHzMY47Yx+NZzhGTTauNSa2ZCgyQa3rkBvDxTtisKHrW6Pm0plrRCMtfBLap4Yjv9NjY36O4ePPyzKOm30Yfr9a5WGR4nKOCpBwQRgg17F4JcyeEpY1+9FcMP0Bqj4y8Ff2wralYKq6gEDOg4E47/8AAv50WIuedI24cU8FuOynqfX/AOtVV0uLG5a3u4JIZY2w8cilSD6EVa8zzTuJyaRZqWty8MiTQuUlToR/np7Vs6HqE9kkrACW0Vt00CffjB/5aIP7vYgf/Xrlo5Cp61ejlPDKxVh0IOCKNtUJpSVmemQyRywpLE6vG4yrr0Ip2QWrB8EyWr6fqCSTN5kbZWJcnaMcMR6E5HFbUbrIzAKwZTyCMfiPaphiacpunfVHPUw04Lm6EpP5UvBFJnjpSKcnFdBzke05qTOFobik60AIRQBgdKUj0pQPWgBvSg4NPwKMCgBgWlwKXFJigAY8YpAOxpxFAFADcYpc+tOFGBmgBm3mlxgU4rjpSUAJgYpAvNOFGMGgBDS9uKMUY4oATFCr3pOcU4MOlIBpPOKQLg5pWGDmnbhsoAazZGKEHWmAZNTIvHNADDzSgcU1yN3FLk4oACcGlU5pAw707A25HWgBDyaQJk80pBHNND80AIw2NkVIJMjBFIcMKaOKAJCmRmo8AdKcHoxzkUANHvTgDzikHWnZwKAGqecGnMaaCc9KUUAJwDTwQVpjAGgHbQA07g1S5BFN+/RgrTAdk0yWZbaCSZ/uxoXP4Cl3c1leKLjyNDkVT807LEB7dT+gpMcVd2OHjJdy7feYlj+Naek6idI1SK+WIStEHCqTjllK5z7ZzWfEK1tIvILFdSaZXaSexkt4NqghXYjk+gwDzWb2PQMtRtUDPQdaQ07GBimmgBpFMP0qQ0wigCJquXlzayaJpNvCB9ogExuD5eCSzAr838XH5VTerV7HaJp+ktb7DPJbu90VbJ3+YQAR2O3FTLdAQRcVs2rbrZl9qxYu1a1o3yEVQG94BuDG2q2ucAbJgPzH+FdgJ+QvBHTp2rzvwzP9l8VeWThbiJ4vxxkfqK7tDkg1SMJtplTxj4TtvEGjm8ULHqEKgRyf89F/uN/Q9vpXi7xzWVw0EyMjodrKw5Br6Lucf2BMfYfzrzzxP4dXWITcW6gXyDgdPNH90/7XofwpWuHtOVpM8+VgwyKnjkwaogPDIUYEEHBBGCDXR+G9AfXJnkkmFvZQYNxcsMiMegH8Tnsv4nis5zjBc0tjeKctEXfC2kXGo6xHeRzPaxW5y8yNtJ/2c/zr0K9hgv8AUZR5TxwW43RspCu7kkM3PQ/dwPT61maRLb526fbvFaQOBApYbnwCSSfXjr6k+1aZO+4VgQvOWCNvVvXIIBH+8M/SvnMRiHUqOVrdj1KdNQjYiuI1tyu6WNg5wCD/ABehHY1CAc5Fa8+lxPZtcEGV4kJjyMbj7/T/AAHc1kRSLJxkbsZ4PDD1FevgcY6i5Km/5nlYzC8j54bdfIeW3CkIpduDSMCK9Q88O1HUUA5FKRxigBAaXHekCmk39jQApzRzSjnoaM44oENyc04kCk/GjBoAM0vWm4JNLytAC8460mDRuB70vSgBuSKVaXOaTHpQAEE0nIpc4ozmgBD0qPBR+alPA4phUsaQyTKsKY6nt0ppTbyDUiyZXkUARqSDUhb5OKUrkcVGRg9aAEA7nrTwcik5xSgZoANgJpCCDSlSp4NPGSKAGk54ppQZ4pxBWmk9xQAHilpM5HNOHSgAKgUA0v3him4KnGKAFxmkz2xTivFN245zQAqgg8ilI64oEmOooOG6GgBpGTRilPFJQAoBU0pNGcCm8n2oAXGK5HxfPvu7W2B4jQuR7t/9YV1oJ3Y9a8+1m4+1a3dSA5UPtX6DilLY1oq8rkMQ4rTg+xrouomXyzemSBbYH7wXJMhH4YBrOj4Fac9hFD4c0+/3P9ou7mePbkbRHHgA465LE81m+x2GY1R1I1R1QCGmGnk1GxpARv0qzqFitlFpzq7M13ZrcsCANpLEYHtxVVzxU95ZSWa2hkdX+02y3CbSTtUkgA56HjtxUvdARRVp2R+esxOK0bE/vBVCEkkNlq9tdD/lnIr/AJHmvTUxv45GeK831mLBU4HNd7o0/wBq0qznJ+Zolz9Rwf5U0ZVVrc6S4XPh+XPoP51zLoRXUXYx4ek/D+dc2xJGKpGNXc5XxN4cGoMl3axATHPnkH7wA4OPX1NQaP8AZJbL+zriS4ihDfu/KdUXcerNn6V2AXHIrD1ewEERuIlUxg/MO657e659en0NefmFGUo80eh24GvGL5JF2C3CkRW7DIARcZBb2Hb3zxxWtfSHTZY4ZpQ91IMCNOg9Ppx69ue4rl7DXZbQCUHzHUHYJBkgerH+6PQ1rWkYkiluZ5DPdzfflbJ98D2/U183NKCbe567vc1rC9a6slZWO1xuyVIz6cH8abNaJMS4wjAFmYnG0DktmqUNzJCAqoJHaQB97Ywv973x6VpoQ8qiYBkJGIv75Hr7A9u9TH3pJphboZ0MV4kSSXahRKN0a7cHH+cU8nIrpn0VNRHnNPIkhAAP3lx2GO1UbjwzfxEmMJOB/cbB/I19bhqlqaU5XZ4WJoS524xsvIxcY6UFs9qlkhlhcxyo8b/3XGDUX1rrTucbVtxQxFB2sc4opDx2oELgdjQU560mRSk8cGgA20hyKUZoO6gAzzS7s8NSd+aOtAAAtG3I60maBigBQlGCKOc9aDnFACYzxShOKQE9zQM5oAQg0m/FODDbimsnfNIYp+YU0cHFKBgUZoAcrjpSEZOaNvelHFADe9L0pcZpOBwaAEz3pQT2pMDpSjIoAXLd6DQGzQVIoAQilPTikBIPIp5oAhBIbNWMggHvUWMc0DOaAJODSBOM5zQpUkg0nzB8A8UANxk4NKRgU9hg0wg0AA6Uo54pKMYORQAHigfMaU80AYoAivJPstpNcH/lnGW/SvM0yzlj1Jrutcux5LWbMqpKg3NgkjnjH5Vj2uh6a8bSy6k8UY6uYyVH1OK4q2NpU5csvyPRw2FqOPN3M+ytJry5htreMyTTOERB3Jrf1jwm2maO2pQajBexwP5d0kX/ACxbvg9wD+fqa2vDmjabp+sWupReIrBhCSfLmG0nIIxnt1rd8S6OJ9CbSbfUdPtHldGYXE4BMSkkIuegySe9ck8Y5VF7N6HSqNlaW55KyZqFhiusl8HXqf8AL9pR+l2pqnJ4UuwxBvdMBHUfa1z+Vdf12gt5GfsKnY5o1GTXRv4UvP8An7sPwnBqE+FLzPF3Yk+03/1qX13Dv7aD2FTsYCo0jrGiMzsQqqoyWJ7AetbV9oOtm2jmuRFKLWERiOOUM8ca5OMAc4yemTWhoehXWna7aXks1m0cTnIEuTggjI46jNdbY2UcEjTLGpcnDMASduOg7Y/xrixWY8k4qk0zalhrp855SnOCKvWZxIKnPhbXIshtNl7n5SCKkh0bVYnBbT7gf8AzXorEUn9pfec7pz7E+qR77VWroPB0vmaKIyeYpWXHseazp9L1KazwlhdvgfNiA8VP4RD2NxcW10jxGUqYww6kdqqNam3ZSRnUpy5b2PQ77jQTz6fzrnM8muj1Q7dDUDqdv865wDNbo5Ku4u0MKY0YKkMAVPBBoxilBPSmZGHqenNby/bbQbccuijGPce1Ms7wqgY/6nozbh8p9MdefWuh4YYrBbRZ11RpYxGkW4lGJyE+i+teNj8v9o+amtz1sHjEo8lR7GzEUtzHJKnmXcozb23cA9Hf0Ht1PtXRabo0qIbi4O6R+STWDp8cVhciXa0rE5d3OWY+tdpZ6tazxZWVcj+E8H8qzp4H2OskdMcXGovcHQ3kUWEJAI9TVn7TAy8n8q4jUmj/ALUuTCxaIyEqc5+v61WMjNjLMfqTXoRw0mtzili4ptWN7W9TRxPYbBMVI2yn+Dvx79qwDSqMUhFdcIKCsjhq1HUlzMQ0c46UoHGaM8VZkN470uFpxAxmm7c0AGDnig5BpQcUpPGaBjc8cikH0pc5NLigQhPqKPlpeCcUEc4FACYGPlpORwTSqMGnDB60DG/LRjjijbkUD5aBCEY7UmeKXce4owDSGN3ZpQKTbxSgYFADge1MIIppY7qnXDJz1oAaOVyKaVJ5qTjGKQKc5oAaStAG7oaa3uKfjI4oAaVKnNOzmjbkdaNhFACbS1HSpFwF96ZtA5NAADmgigAMOKUqVoAYQynPWpI2B+9SZzxik25oAcykngim5ZeopMMB1p6yZ4NACKVPXrSlfSl2rgimZIOKAHYoGM470A02ciON5ckbQTwKG0ldjSu7Iy7mA3lxIqqS0oCJx3yMGuksIo4JBaqkTWYKxSK3LOHyN57FScjH0NYtneI95E4gljVA235dxzgY4HYCtyGdXiUR29yWErMwEDcLxj+WR9K+Oqz5puT3ufTxi4wUUaGlwxRafE01uJDbuYQmwZkdSQOT0AAzVrVorfWNNkuTYQtfaeFuYtxWUOh64YdQQCPqBUdzcxTyW8RimW3YHzWaJhjcRu4xzwMfjVq1urSLXJAqBbSQSx5xgbGAIGPTO7866KcopWMZJ7kV1Y6JaJa3sOmpNLcYNrHGoLOSN2RngY65PT8ax9W0qzvonvTYRi7teWiIUk55+8v3gfWta98i51yKDy2+yJEsKbQQCg+ZwPrhR7gGqkLOl7PGInji2SxoNmBtyCv5EnFZzcdkVG+5jnR9HgtYb+KAymYYhi+8Gz7dSf8ACsvUbCKeNmlhSMI21mjwGjJ6fd6jityQFGggj3IlvCkEZIxlmO0sPoNx/GqSwGVJIkiVIh5kKrjHyEFlJPfBXr6muV73TNk31OetLCB5GLxMqxsFYBySSegHPt19K1DFayskU0WwYyDFuj2/RgefxzTLSISzRAlgG/eNlcEHAB/QZ/Grz2+UmePLOm18MenIG3HphsfhTu3rcGN/si2QIgkvFdv4kuXzgdSATiplWwmn+xrPNDIveOR1Jx6Mfve9SpBI8jMCRIIPJ5OShDYz+Gf0qeWxUQs2wp5RVAvJA28A/p26g1a1JuPt7GFdTvvMvNW/suyVYnhFyzNPM3TaPxGPzrmbywmtNTa6gvZ5rVlSa1eT733iCGB6EEYIrvoY45Le3JR99zIRKVzg7U2546Ha3Wq2oaUE861Eb7UnZBI47Efz4BruVlFSW5le7aY7VSP7MiHZtpx+tc2z/Px0rZ1GX/iXWakg/KASDkcD1rIwua+jg7xTPBrK07BuyKCuRQUxyKEfnmqMhmDnin5pT14puKYC5FJgUhIxim554zQA4rnpSHg80JkNTpFyKAAg4yDTc+tOTIHWkIJJ4oAOccUgBzzSgY60jHuDQAHg+1KTSKxz83ShwCwx0oAB1p1MORS89M0ALkYpR8w4o25WmoCGoADnPNKPY0rAk9KaFOaAE5B5pxHcUN0600Mc+1ADs8UnelfBTjrTeQKADcM0HNIUI5pwPy4pANzj6U/jHFNPPFAyBQAhAJ6UZx0pwNLgelAAME80p3K3B4puFzjvS84oAfs3Kc1BgqeBxUgJU8mlLZ6UAMDY608nIpOOuKTjqKAF6fWkZs9qBg0u0etACIMGpcfLUZbaQM04E0AJnPamsTninvnHSmgcZNADlG7ikZewFIMqcg04bjzmgBmCKcrLTsN3oHl59DQADjkVDNIIoJXYAgKeD0NTnIHy1navJ5doAeC7VlXnyU5S8jWjDnqKJkWtpJO4P2iRCT0DGul0/Sr4zSiO+uoVRtqt5x+cY+8MHj6GszSIme4AAyQe1dzpsauilCGyc5HIr45OU5M+ll7qK6aXqkUMki6vqLbELFY58scDOACOpqe2sdXmsopxrOqwtIiv5cyxsyZ/hYY6iuitoV2fMAABnPYD1q0Ywq5A4HIzXoRoaHLKpqcXqL6rp1lLcz+JG8uPG7fpobqcDheT1qtdjxDC+z+17WQ+1uV+ldlcRDIG3OeB9azLhOW3jkdqxqwlbQuMkcZNc6+kZ3Xlu685yh4FU01DWXgjuIri2aKRdyNjqPXkV0WoJiJl6Bv1Fc/cNkhcYA4A9PoK4JTcXZ7nRFXI21XWywWTyGx6hT/SmpqmpSu8axWLyLwydCM881Hnk8ir9tCrOJNq574HXjHNNVG3aw2rEP2rVj00+0z9W/xp6PqTzeWItP8ANwCVLSBtp4B69O2a2EGO3tQqIZvN8tfNKhC+OdoOcZ9M5Na3IHWWi67fRgLa6VIPR7mYf1qG80m7+wi5ew0Z4pRkSw3Esm7HXvg9P0rsfD0m115zn9KteILGFNPSGCJIolB2qi4AzzwB+NehGleg5Lc53UftOU84htVhso3UqPOJcpGMKuOOPf3pMce9OUsFMR6RscfjQQK97DNOjG3Y8XE39rK4LnHJo203pwaUHB4roMAJ2HBoOD3p52SDDcGomjKnrkUAKAKUDnpSACgZBxQApwDSEsfpTwBj1prGgBEBDZ7VI7YGRTFPrTyuV45oAhZjSKpfvUhGOooCjPFABtwOTSEe1OK+9NO4UAIcil7Uo5PtSkjHSgBu444pMEtS9uKAeaAJcjbmoWYmpQARxTCuDyKAIgNzYqUJgHmkwDzTtvvQA0jA4FJg0pDLQCc0AJ8zdaQKwPSgNUmcikAiDnmkYZajHvQWHpQAgXsOtOB28EZpg65qTI70ANKbuRSheOvNLgig+9AAQehppQjkUpBpc56UAJuyMGgJtOc0v3uMUvagBDtzzxTSncGnYzgGkPXA6UANCZPNO4XtS4IHWgk8UAITk07BUeopuMjmnc44oAYeTT1A65pwXjNN2k8gUAITzShFNLyO1CnJ5oAQx45DVm6hFeSygqpMSj5Sqhq1WApu3jg1lXoqtDkbsa0arpT5kjHtHktHDC4aJx3aE8V0FhrCwIoXV7WLAwA0RAH6VXw23hj+ddGljFcW8bOoYMoPIzXi1srjSs4yZ7FDGe2upK1iGbU11KwmsrjVdIuLWdCksbSeXuB7Z7Vei12eKFYxd6VhAFUC4XgDgAfhis+TRIv4Pl9sAj9RVKbQ8DgRH/ehX/Cs/q9RKykzf92zV1Ka51axltJ1tJIJQA/k3QViM54IPFNvL+9PDQ2w6AHzx0Hrz1rmrjRSOB5QHtGBWZJoihyzO59gcColQm1bmLUY9Df1Dz7y3eI+SgYY3pMMis+e2kKRKJIsxoE3NKCTjuT6moodOt1t03wIx7k5yak+wWX/AD7L+DN/jWiyepKN+ZanG8whCTjbYg+xTA/egbPYSCrUEdxFLK/LhjlU8wbUAHQY/PNWLfw/b7D5lojknO4Ow/rUh8OwHpCF/wC2jf41yrAzWzO32i6jRLcbQBa5AHXeKaTdGaOUROoUMNgcbWzjGe/GD+dKfDcIPLSfQOf8acvh+EnrL/39b/Gn9Sqdxe0ibGl6nd27D/QpGA7AE/yFXrq/v5Fumj0+8lFwwcqVbamFxhcgYBxn61jQ+GoGxlp8e07j+tKNEsxcPFLE8oVsfvJnP9a7KGErOPJzf19xz1atOLvYyGecXMgnt3hZznDY49qeOOtNMMMdxIYYURdxwFHQU7Ac8cGvXwtKVGmoSd7HkYipGpUcoqw4qG6UwIQacAQMelJnHWugwEYkt0pVYYwaWkJx2oEIAAT3oJqRcH2ppU96AG85oK5pdpNBJpgG3sKMFKUGggk9aAANu4I5pNozincn2pCMHB5oAaVOeDS5oxzxQwxSAByMikwSMUAkCnAigBu3IpQuF4oz6GgNigA2FeaXec4YZFB570oyBQA0qBSFfQ08jHNNIz0oAQcDHegYJ96cV4poJzQAwgUYHY04EY5HNIUPUdKADGe9IVJ70/aPoaayntQAD5RzS5yaaQSMGlVdvegB+e1JzRnJxS9sd6AFUjHUUhTHQ0AZPSl20AAU9qQq2cEcUu3ng80ZYP8AMeKAG9OlO6DNPwMZxQcY+7QAzaD3o+XpinDaexpCFoATocUpHFJtGetSbenNAEQOR1wacuR3pCo3YzzTtvH3ulAC8tx3qLBU1MSMfSmYUnOeaAAcrSU7B4xzSc56UANxzXVaRIJNOi/2QU/KuZKccV0PhsLLb3EWcMjhgfYj/wCtWNaDlHQ6cNPlnqabLxVWZeK0GhcDgA/Sqk0T8/Ka5HB9jvUkZU6A5zWbNF1rZlhc/wAJ/GqxtSzDcRjPQVm6UpdDVVYx6mNs2kZPFJtDdKUjczfU8U0jaehr1IqyseJJ3bZ0mnAS2cTnk4wfqKuGIelZ3h5mlimiGDsYNg+//wCqtowyf3a4Z02pM9SnVUoJlFohTUjG6rbQyf3aasDZ5IFT7N9ivaLuSQKKzr5B9rmIPIPT8K2oIVHJy31rD1x/Iubl84Mjqi/lXRThy7mM5pmHdRCK4YD7p5H41BsPbGasSkP5e487AKi2YPDV0o8+W4gV9uMUmHH8OaUFs4LcUZYHhulAhOccrScVMrk8HmmsAelADBjHajBwQaMAHkYp4IPINADRuUf40jOD2qXAxk81ET83agQowR05oC45oYDHvSLyetMA4Y88U7AxwaQg544pQOcGgBmCD1px6ZpSoAyDzTcnGBSAaV96MYFSDle2aCPl5oAYAM0jL6U7A6ilagCMHGKeSG4o2c9Kcy4HBoAFAA600g560oyPel2g8k4NACDpzTSuaM4yO9OXkc4oGR8Ec9aUccGk2nqRSkn0oEIeRg0fc70dBmlxuGcUAIAc56075CDnijGeRQFGME0AIFGPlNKIyDmlXK9elOyOvNACbcd6UoeopCAz7hSF8etAx2B15BoKuw6ZpF3yMQDwKkTMffJoAizxjoRTg4AxyaJH5+6DScEY20CF+VhnOKUoM9aFUZ5FJIBuGw80DGs4zwOlEbsSc8CpPuD5lH1poZd3PANAhPLDZJfJ9qbjaw54NSlVQbhTGXIyetACDqecinAHPA+tN+XoetORhnAzmgBrSEPgcUokp5Ak4OMjvSMgjA70AKGX6E1s+HCY9TkQH5HhP5gg1iZU9cr71q6BmPWYMtkOGXr7f/WoexcH7yOuNVpatEcVXlFZHUZ0vWoVXLVPKOaYgwGJ6BSf0pDOY27hkHmkAIHOc0m9SF9hTiARkMa2OIv+H5vJ1ZFPCyqUP16iuwPSuCjkaGVJU6owbJ9q70EOodfusAR+NRLc3pPSxFJ0qAfeqy44qvj5qk2LcNcvr0rf2vdQFcgCNlPpkda6mHtWHrVusurEcB3iXk98ZoE9jnpVwI8/3M/rUWATnNWLttt00YHC4Xj2qEtgj5QwrU5JbjSMc4zTSwJ9KlOzb0xRtXHNAhiAdjzRtAPXBp3y+vH0oKK3O/AoAACTzTXUADnNPHyDhsmjlvlwKAEBbGO1NZaeyY7YpuT3JxQIaOTgjilOFORSjnJ3DFNxxnGaYwyTn0oBxS5yOlBTjjikIQY/GkK/NjpSqrfhQRgHsaAABozxgj1pdwz15phbPQ9KXaWAboKBigA96QjDc0KD06j1pccUCDdQMfjQU444oAb6igA6NgUnGeaP0pCO+aAAnjijaVORz7U3cAaA5IoGO+Yd6GduMinbcqOeaU5IwRmgQm0HnFLhQKavGR3pwYAfMfwoAaAufvEUFQe9PADE7cZo8tuKAIyp6bqeFbGDSNgDB601ZOcAcUAOKlOQeKUksB2agjeeDgCjJBAIyKBiKGVs96kZmHUZpAoJ+U8elIQV65P0oAQBWJ6g+lOXkEMuMUgOWzg5+lPIOKAI2zxt6UJtduFOfWnouwnBGD1FLlQePloARvl4Jz7U0Bc8jHFKAxGeppCSowymgBgZn4IGKeAF9aTCEcHFA+Y/ex70CADJzkUhyfTNOK5PcgdxQfl57UDGrkGjJB608KpBwDmmkLjPIoAXdxkkfSrelSqmrWjYwPNA/PiqOM+9PjkMcit3Ugj8DQxp2Z6MRxVeUcVZBDKGHRgD+dQSjrWZ1mZMOagnYR2Ny+QMRkD6nirNwOaz9VcR6YE7ySAY9hzSW4S2MHPA+UUp2FPlODSnaRyKYFQZ+Y9a1OSzBNzcZB+tdposvnaTBk8oDGfw/wAiuLO0+tdH4WnyLm3z0IlXPvwf6UpGlJNM3nFV2GGq2w4NV3HNZnQSRVm6q2L88A4iH9a04qydUI+3zMzgBUUcn2poDlpFJOd/PU+9LgEAK2DTNyk5I/Klyp5HFaHK4sXocbc0Ftvamrwc7uPSn7i3GB+dBNmN3qcgnrQUG0YFBwOqil7ZxyPQ0CGts6jJxQoOM9Kf8uOaAdy8P8vuKAEBYn73HvSEHtilBPopFICucMOfagBAecMo29yKAqsflJFKVUNgnmm9+DmgBwQMPvZprI2flzihQo5Jp24Lgrjd3GaAIsEd6cXB46Uu/J5XmkIRv8aAGAYPanjcp9QaXB2/KM5poYEbc4I7UAIpI9qlwHFRcinDjrQAMpB4JxTcH1p2QORSE5oAQsDxQ33KTA60obs3SmIi2805QVPNPKAglTSDNADjITgAAk1IgZWyeB6etRAMCDtwR6VOXGMlTSGJKVPJXn2qLCHnmncMwAY/jSrtDbT+dADQvPWnOuEyG6dqG+UHGT+FN4bAZsGgAXHV1P1pfk3DbUmGVf8AZ96j2rnigBxUHqPyNKV2jOT+NMJyQM5p209xx70gAJlc96CrL6496AgB4z+dBJxkH8KADJwe5+tB5HPWjLkZwKUDnoufrTAQDd91qOWHOD9aQEMfmABoaRVb+8PrSCzFAA5xj6Uv7zbkc+2aiMiHkAg/XijzuOFP50yuVj9yn7ykUoCuSAASKh3kjH9aRXZCSpwT3FFw5GTGMhvlBHfGaGJ2/MOKhLMerGjNA+QnVxjk4poZB3FRUtBXIiTcvvineYMcJ+NNi2b/AN4CR6VMxhGSi8+4pXHyI7fSpTPpVrITkmMZ/DippRwa8lvviNq+m3B0jTba0iEDENPKnmsw68LwB+tUrj4seIbV9r/2ZPx917Uof0NQzZI9WuBlwKxNfk/fW8IP3E3H2J/+sK5jRvinDqNzDDqemNbtI20TWzGRc+6n5vyzTIdaGu+KNau7dw1ivlQ27AY3hARu/Ek0LcJLQ0qOaTNANWZWFxmtbw+/laqjZ4ZSh/H/AOvVCNYSnzE7qev7s7o3YEEEbfakUkd+elV369Ky9Q8aeHNLwt7qsKSlQxjTLsMjPIHSufu/iloKORBb6hcY6MIQin8WNSaWO3iHNc7qp3ahd9Mb8H3wBWTZ/FXRJIp3mstQiaBdxURiQMew3L0ycDniq2j6rPq2kre3OPPlkkZwOg+Y4H4DA/ChBsNdME0zvU0nWoj1qzKwmaTNTRxI6nc5B7CnCCMA5LMT0weKAsVtx9aUOQakY244AYfnTlhSRsrkLQJxGeafQUKykgnj27Us8SRkbH3evHSoKCXFE7t8vXH0pFI6t+dQ7jS7yKZPITsy5HSkLE9Dz9Kh3c9aVXwTQJxZISzLnHHtTVODkEZoD45yaQMpblRQKwrEE/M/PtTsrjBIx61HiPdnH4U47c46LjigQudpyvT60MUkXByG9RQo2k8j60w7sggZoAewwcZz70cgU3PPNOznjFAhCMc5zSEZqQAAUwjnrQAuBjmkYj0o/nQDzzQAgUDkGnZyfpSnjpzQQSM9KAJUkErBQoJ70SqU6KQv1zU9kQzMdoGBSyOG3DGOanqbRguW7Ke5Tj1FP/Kg4DdP0ozkY5FUZCAMG3YyPSnFE3bh9400ZPTrRnPDKM0AKxZmwRwKavLY6UoUj7uaRsnvzQBLLFhdwOce1RLOSNvU0geRegJ/GpbeQtOgI6n0pDWo94nC8INw7A1WZpF/gK1pSS7JMbQeKYWRvalc6OSJm+Y/rTSx/vGr726N2H4VC1n3DEfWgXJYrdTVlLJnjD71wewqI27r0waQMy9QRTFYe0KqOW579qkjhtmwGYknrg9KhLbutKpweKALX9nxsN0cp2erYFUpYxG5VX3gd8VKWNQs2TQhuw3NGaQ0lAiRAXIC9TVh7SSNcsRn0FVVbFTiVyOpIoGgCMrYIYe+KtJb27SIDMQpIycjiqTYPVaaI0HOBmgZyceizap8QbywtbizWTzWMX2tmCzY/hBXuRVnW/hd4pu7kypb6Z7eVe4AHphgDUXiPSb/APtNdT01N5O0uqHDIw747j6VRl8feNLYeSL+9Ttte3DEfQlc1BqmVdS8H+ItEiiubuyS1gSRFE4ukYA5GMYOSe/H1rpvDAL2l1dN1mmJz647/rXLQ23iLxNqCXOpTXLKvHn3ZwEH+yv+ArurWCKztY7eEYjQYHqT3JpoUn0Lec1IInxnH51XBqZZXA4JxVGaHhSCAaspExYKrLk8VSf5vvAj8MUkcSKwYEnn1oGeeaJay3l5JD5E8rKzBjDA0p4J7AUreCfEmo6kzWnh/VGiZs/vIvLH/jxHFaFxqN/4N8RzzWdzcW8Fw3mDyZNodTzgjocGszVviBr+oSvHJrt4bZv+WQkCZ9iVANQam7otreeF7/V7W+Fos82mSo0UdwJWiPGAdvAJ6Y6/St3wwvk+HbZM/wB4/rXGeF9LvL2V5Gjkgs3UrJJjbvB7Lnrk45r0CGNLe3jgiXbHGu1R7U0TIe3Wk8t/7jU3cQamF04XHB9yKogQwsgyWQ467TmnNbuqByy4IzwaikmLDjj6VGGl6ea2PSgWhNLA8UgjbaWIBABznNBDoPmUinRzuOTIx9abLOzjBPy9hQGhCxphoYmmk0CCjBJwBmgDmrltP5SFcAg9jQJFTY/9xvypfLkx901fkuEdSNpGewPBqqSd3AGKB2RCVYHBUikyQa0LcoG3FefWm3colAUIox3A5/OgLFHPNO3H2/EUeWx6KfypfJf0x9aZPLcNy4xj9aXA8s+o9DSiBieoqVbbjqaBezIlXOKe23HBoYBDtFIMY6c0GTVnYBnsacQvc803HzcHijGDzQIb26UgBzTzzilAAOaBjTlT1pwPvQSW6ikBBNAi1p7bzIeDwOaR/vN9afp7EiQEYxgdMVGerfWp6m6+FDCKBtK5J/I01+mcH6ilVCRkUzFiDaW5IFKUyOCDQVHcc+9BHOD+GKAE2kcgmk2NntilwwBGSBTCSPXNMA+fd0we5zxUlqxN2gJPX0qAStnHX29as2jN9qjBBwc/ypMqO6J7j/W/hUJNTXH+u/CoDSRrLcdux3p3mnGDyKioFMLskwGOafsUiol61IuaCkyNoV9KiMZHQ/nVz6801kU+1IehRYN6VESe9X2j96aIweophYo9atWxgGfNj3eh9KVoI8/dx9KQW/dW/OgVh8whJ/dqR68YpqSvGcAZH1pDE47Z+lIB60BqWUuBIcTJ8o/u9abO8TLtjjIHq3Wo8UbaQFR42eTAND2knXe351dWL5t1SMhxQUYxgZGyWJq/ZQQMD55bPb0qT7OznhSfoKd5Zj68fWmIV7eEHKY468ZpEka3fcrEj02nFPWNmAwVOenNK0EqvtK/NjOAaA1HQ3QMboQ/zDAJHSpkhtVTMh3H0xyarbGQ/MpH1p4GVpDTMjUbaG8kMbwJLEDlUlUNiqMVhBavuhsbZD6rCuf5V0HkYfJFNeEelAFCOaRvvdhVqP5wM8Uq2/zdKljj2DFAFkWUBTIlycdz3+lV3iSIjcueOcnOacxOBTArN0Un6CmJixtbNIu5QVzyADUwtIWJJwq+zYqIQOOq4+vFP27gBvUfrQBXlREkIRiy+pFQk1o/YlK7mdiPYVEbeNT93P1NAWKBNABPQE1opAT92L/x2pvs7ey/VgKAsZgic/wGpBEwHOPzq6bdBy0qj6ZJp6W8J/jc/QAUBZFERA9W/IVMtuh5yT9Tirnkwr/AT9TRkA/Kij6CgNCssPA2LnPYAmrltEygmaMqPpipbd2LjmnaiWzjNAvMpSxhifnUD3NQ+VEOshP+6tIRzTcc0xczJcQA8K7fVqcpT+GNRUYHHvSr1pCuxl8qtNlFCjaOnHOKqBT6jHpVu6Qs5x6CqZUg/wBKZjLcUjC+lN3buQelK0gPBpAMc9qBDsd6Q5HWnYIOByKQsORigADds5xR0OSOtCsCcY5pXGFxQB//2Q==" - ] - } - }, - "widgets_values": [ - "[{\"x\":135.5199999999999,\"y\":241.9999999999998},{\"x\":128.2599999999999,\"y\":313.38999999999976},{\"x\":164.55999999999986,\"y\":373.8899999999997}]", - "[{\"x\":135.52000427246094,\"y\":242},{\"x\":135.22706604003906,\"y\":244.880615234375},{\"x\":134.93411254882812,\"y\":247.76124572753906},{\"x\":134.6411590576172,\"y\":250.64186096191406},{\"x\":134.3482208251953,\"y\":253.52247619628906},{\"x\":134.05743408203125,\"y\":256.4033203125},{\"x\":133.77931213378906,\"y\":259.285400390625},{\"x\":133.52249145507812,\"y\":262.1694641113281},{\"x\":133.2918701171875,\"y\":265.05572509765625},{\"x\":133.091064453125,\"y\":267.9442138671875},{\"x\":132.9228973388672,\"y\":270.83477783203125},{\"x\":132.79014587402344,\"y\":273.7272033691406},{\"x\":132.69479370117188,\"y\":276.6210632324219},{\"x\":132.63876342773438,\"y\":279.5159606933594},{\"x\":132.6254119873047,\"y\":282.411376953125},{\"x\":132.65663146972656,\"y\":285.3066711425781},{\"x\":132.73529052734375,\"y\":288.2010498046875},{\"x\":132.86378479003906,\"y\":291.0936279296875},{\"x\":133.04495239257812,\"y\":293.9833679199219},{\"x\":133.28228759765625,\"y\":296.8690490722656},{\"x\":133.5787811279297,\"y\":299.749267578125},{\"x\":133.93820190429688,\"y\":302.6222839355469},{\"x\":134.36431884765625,\"y\":305.48614501953125},{\"x\":134.8612518310547,\"y\":308.33856201171875},{\"x\":135.4333953857422,\"y\":311.1768493652344},{\"x\":136.08445739746094,\"y\":313.9980773925781},{\"x\":136.8114471435547,\"y\":316.80072021484375},{\"x\":137.6095733642578,\"y\":319.58392333984375},{\"x\":138.47413635253906,\"y\":322.3472595214844},{\"x\":139.4008331298828,\"y\":325.0903625488281},{\"x\":140.38595581054688,\"y\":327.81304931640625},{\"x\":141.42547607421875,\"y\":330.51544189453125},{\"x\":142.5164031982422,\"y\":333.1974792480469},{\"x\":143.65553283691406,\"y\":335.85943603515625},{\"x\":144.83985900878906,\"y\":338.5015869140625},{\"x\":146.0667266845703,\"y\":341.1242370605469},{\"x\":147.33367919921875,\"y\":343.7278137207031},{\"x\":148.63796997070312,\"y\":346.3128356933594},{\"x\":149.9779052734375,\"y\":348.87957763671875},{\"x\":151.3503875732422,\"y\":351.4290771484375},{\"x\":152.7532958984375,\"y\":353.96197509765625},{\"x\":154.18373107910156,\"y\":356.47943115234375},{\"x\":155.6393585205078,\"y\":358.982421875},{\"x\":157.114013671875,\"y\":361.4742126464844},{\"x\":158.6011505126953,\"y\":363.9586181640625},{\"x\":160.0908660888672,\"y\":366.44146728515625},{\"x\":161.58058166503906,\"y\":368.92431640625},{\"x\":163.07028198242188,\"y\":371.40716552734375},{\"x\":164.55999755859375,\"y\":373.8900146484375}]", - 512, - 512, - 49, - "path", - "basis", - 0.5, - 1, - "list", - 0, - 1, - null, - null, - null - ] - }, - { - "id": 56, - "type": "CogVideoDecode", - "pos": { - "0": 1585, - "1": 41 - }, - "size": { - "0": 300.396484375, - "1": 198 - }, - "flags": {}, - "order": 14, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 193 - }, - { - "name": "samples", - "type": "LATENT", - "link": 208 - } - ], - "outputs": [ - { - "name": "images", - "type": "IMAGE", - "links": [ - 155 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoDecode" - }, - "widgets_values": [ - false, - 240, - 360, - 0.2, - 0.2, - true - ] - }, - { - "id": 73, - "type": "ImageResizeKJ", - "pos": { - "0": -436, - "1": 527 - }, - "size": { - "0": 315, - "1": 266 - }, - "flags": {}, - "order": 5, - "mode": 0, - "inputs": [ - { - "name": "image", - "type": "IMAGE", - "link": 166 - }, - { - "name": "get_image_size", - "type": "IMAGE", - "link": null, - "shape": 7 - }, - { - "name": "width_input", - "type": "INT", - "link": null, - "widget": { - "name": "width_input" - }, - "shape": 7 - }, - { - "name": "height_input", - "type": "INT", - "link": null, - "widget": { - "name": "height_input" - }, - "shape": 7 - } - ], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 188, - 203 - ], - "slot_index": 0 - }, - { - "name": "width", - "type": "INT", - "links": null - }, - { - "name": "height", - "type": "INT", - "links": null - } - ], - "properties": { - "Node name for S&R": "ImageResizeKJ" - }, - "widgets_values": [ - 512, - 512, - "lanczos", - false, - 2, - 0, - 0, - "disabled" - ] - }, - { - "id": 31, - "type": "CogVideoTextEncode", - "pos": { - "0": 497, - "1": 520 - }, - "size": { - "0": 466.41448974609375, - "1": 167.15626525878906 - }, - "flags": {}, - "order": 8, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 209 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 202 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "clip", - "type": "CLIP", - "links": null - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "The video is not of a high quality, it has a low resolution. Watermark present in each frame. Strange motion trajectory. ", - 1, - true - ] - }, - { - "id": 20, - "type": "CLIPLoader", - "pos": { - "0": -13, - "1": 307 - }, - "size": { - "0": 451.30548095703125, - "1": 82 - }, - "flags": {}, - "order": 1, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "CLIP", - "type": "CLIP", - "links": [ - 54 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CLIPLoader" - }, - "widgets_values": [ - "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", - "sd3" - ] - }, - { - "id": 68, - "type": "ImageCompositeMasked", - "pos": { - "0": 1845, - "1": 1133 - }, - "size": { - "0": 315, - "1": 146 - }, - "flags": {}, - "order": 15, - "mode": 0, - "inputs": [ - { - "name": "destination", - "type": "IMAGE", - "link": 155 - }, - { - "name": "source", - "type": "IMAGE", - "link": 153 - }, - { - "name": "mask", - "type": "MASK", - "link": 154, - "shape": 7 - } - ], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 156 - ], - "slot_index": 0 - } - ], - "properties": { - "Node name for S&R": "ImageCompositeMasked" - }, - "widgets_values": [ - 0, - 0, - false - ] - }, - { - "id": 66, - "type": "VHS_VideoCombine", - "pos": { - "0": 1185, - "1": 1158 - }, - "size": [ - 605.3909912109375, - 909.3909912109375 - ], - "flags": {}, - "order": 12, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 142 - }, - { - "name": "audio", - "type": "AUDIO", - "link": null, - "shape": 7 - }, - { - "name": "meta_batch", - "type": "VHS_BatchManager", - "link": null, - "shape": 7 - }, - { - "name": "vae", - "type": "VAE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "Filenames", - "type": "VHS_FILENAMES", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "VHS_VideoCombine" - }, - "widgets_values": { - "frame_rate": 8, - "loop_count": 0, - "filename_prefix": "CogVideoX-Tora-trajectory", - "format": "video/h264-mp4", - "pix_fmt": "yuv420p", - "crf": 19, - "save_metadata": true, - "pingpong": false, - "save_output": false, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "CogVideoX-Tora-trajectory_00002.mp4", - "subfolder": "", - "type": "temp", - "format": "video/h264-mp4", - "frame_rate": 8 - }, - "muted": false - } - } - }, - { - "id": 65, - "type": "CreateShapeImageOnPath", - "pos": { - "0": 818, - "1": 1169 - }, - "size": { - "0": 313.4619445800781, - "1": 286 - }, - "flags": {}, - "order": 10, - "mode": 0, - "inputs": [ - { - "name": "coordinates", - "type": "STRING", - "link": 145, - "widget": { - "name": "coordinates" - } - }, - { - "name": "size_multiplier", - "type": "FLOAT", - "link": null, - "widget": { - "name": "size_multiplier" - }, - "shape": 7 - }, - { - "name": "frame_width", - "type": "INT", - "link": 149, - "widget": { - "name": "frame_width" - } - }, - { - "name": "frame_height", - "type": "INT", - "link": 150, - "widget": { - "name": "frame_height" - } - } - ], - "outputs": [ - { - "name": "image", - "type": "IMAGE", - "links": [ - 142, - 153 - ], - "slot_index": 0 - }, - { - "name": "mask", - "type": "MASK", - "links": [ - 154 - ], - "slot_index": 1 - } - ], - "properties": { - "Node name for S&R": "CreateShapeImageOnPath" - }, - "widgets_values": [ - "circle", - "", - 512, - 512, - 12, - 12, - "red", - "black", - 0, - 1, - [ - 1 - ], - 1 - ] - }, - { - "id": 83, - "type": "Note", - "pos": { - "0": 878, - "1": 1512 - }, - "size": [ - 232.98718755357777, - 92.3359134366683 - ], - "flags": {}, - "order": 2, - "mode": 0, - "inputs": [], - "outputs": [], - "properties": {}, - "widgets_values": [ - "This is just for visualization, not necessary otherwise" - ], - "color": "#432", - "bgcolor": "#653" - }, - { - "id": 1, - "type": "DownloadAndLoadCogVideoModel", - "pos": { - "0": 633, - "1": 44 - }, - "size": [ - 397.3594142178358, - 194 - ], - "flags": {}, - "order": 3, - "mode": 0, - "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null, - "shape": 7 - }, - { - "name": "block_edit", - "type": "TRANSFORMERBLOCKS", - "link": null, - "shape": 7 - }, - { - "name": "lora", - "type": "COGLORA", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 174, - 193, - 200 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadCogVideoModel" - }, - "widgets_values": [ - "alibaba-pai/CogVideoX-Fun-V1.1-5b-InP", - "bf16", - "disabled", - "disabled", - false - ] - }, - { - "id": 67, - "type": "GetMaskSizeAndCount", - "pos": { - "0": 364, - "1": 862 - }, - "size": { - "0": 264.5999755859375, - "1": 86 - }, - "flags": { - "collapsed": true - }, - "order": 9, - "mode": 0, - "inputs": [ - { - "name": "mask", - "type": "MASK", - "link": 146 - } - ], - "outputs": [ - { - "name": "mask", - "type": "MASK", - "links": null - }, - { - "name": "width", - "type": "INT", - "links": [ - 149, - 171, - 205 - ], - "slot_index": 1 - }, - { - "name": "height", - "type": "INT", - "links": [ - 150, - 172, - 206 - ], - "slot_index": 2 - }, - { - "name": "count", - "type": "INT", - "links": [ - 170, - 207 - ], - "slot_index": 3 - } - ], - "properties": { - "Node name for S&R": "GetMaskSizeAndCount" - }, - "widgets_values": [] - }, - { - "id": 75, - "type": "DownloadAndLoadToraModel", - "pos": { - "0": 259, - "1": 55 - }, - "size": { - "0": 315, - "1": 58 - }, - "flags": {}, - "order": 4, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "tora_model", - "type": "TORAMODEL", - "links": [ - 175 - ] - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadToraModel" - }, - "widgets_values": [ - "kijai/CogVideoX-5b-Tora" - ] - }, - { - "id": 30, - "type": "CogVideoTextEncode", - "pos": { - "0": 493, - "1": 303 - }, - "size": { - "0": 471.90142822265625, - "1": 168.08047485351562 - }, - "flags": {}, - "order": 6, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 54 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 201 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "clip", - "type": "CLIP", - "links": [ - 209 - ], - "slot_index": 1 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "anime chibi toy moving her arm", - 1, - false - ] - }, - { - "id": 80, - "type": "CogVideoXFunSampler", - "pos": { - "0": 1131, - "1": 150 - }, - "size": { - "0": 367.79998779296875, - "1": 434 - }, - "flags": {}, - "order": 13, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 200 - }, - { - "name": "positive", - "type": "CONDITIONING", - "link": 201 - }, - { - "name": "negative", - "type": "CONDITIONING", - "link": 202 - }, - { - "name": "start_img", - "type": "IMAGE", - "link": 203, - "shape": 7 - }, - { - "name": "end_img", - "type": "IMAGE", - "link": null, - "shape": 7 - }, - { - "name": "context_options", - "type": "COGCONTEXT", - "link": null, - "shape": 7 - }, - { - "name": "tora_trajectory", - "type": "TORAFEATURES", - "link": 204, - "shape": 7 - }, - { - "name": "fastercache", - "type": "FASTERCACHEARGS", - "link": null, - "shape": 7 - }, - { - "name": "vid2vid_images", - "type": "IMAGE", - "link": null, - "shape": 7 - }, - { - "name": "width", - "type": "INT", - "link": 205, - "widget": { - "name": "width" - } - }, - { - "name": "height", - "type": "INT", - "link": 206, - "widget": { - "name": "height" - } - }, - { - "name": "video_length", - "type": "INT", - "link": 207, - "widget": { - "name": "video_length" - } - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": null - }, - { - "name": "samples", - "type": "LATENT", - "links": [ - 208 - ] - } - ], - "properties": { - "Node name for S&R": "CogVideoXFunSampler" - }, - "widgets_values": [ - 49, - 720, - 480, - 43, - "fixed", - 20, - 6, - "DDIM", - 0.056, - 1 - ] - }, - { - "id": 74, - "type": "ToraEncodeTrajectory", - "pos": { - "0": 1129, - "1": 675 - }, - "size": { - "0": 335.1993408203125, - "1": 230 - }, - "flags": {}, - "order": 11, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 174 - }, - { - "name": "tora_model", - "type": "TORAMODEL", - "link": 175 - }, - { - "name": "coordinates", - "type": "STRING", - "link": 176, - "widget": { - "name": "coordinates" - } - }, - { - "name": "num_frames", - "type": "INT", - "link": 170, - "widget": { - "name": "num_frames" - } - }, - { - "name": "width", - "type": "INT", - "link": 171, - "widget": { - "name": "width" - } - }, - { - "name": "height", - "type": "INT", - "link": 172, - "widget": { - "name": "height" - } - } - ], - "outputs": [ - { - "name": "tora_trajectory", - "type": "TORAFEATURES", - "links": [ - 204 - ] - }, - { - "name": "video_flow_images", - "type": "IMAGE", - "links": null - } - ], - "properties": { - "Node name for S&R": "ToraEncodeTrajectory" - }, - "widgets_values": [ - "", - 720, - 480, - 49, - 1, - 0, - 0.4, - false - ] - }, - { - "id": 44, - "type": "VHS_VideoCombine", - "pos": { - "0": 2477, - "1": 48 - }, - "size": [ - 1131.619140625, - 1435.619140625 - ], - "flags": {}, - "order": 16, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 156 - }, - { - "name": "audio", - "type": "AUDIO", - "link": null, - "shape": 7 - }, - { - "name": "meta_batch", - "type": "VHS_BatchManager", - "link": null, - "shape": 7 - }, - { - "name": "vae", - "type": "VAE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "Filenames", - "type": "VHS_FILENAMES", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "VHS_VideoCombine" - }, - "widgets_values": { - "frame_rate": 16, - "loop_count": 0, - "filename_prefix": "CogVideoX-Tora", - "format": "video/h264-mp4", - "pix_fmt": "yuv420p", - "crf": 19, - "save_metadata": true, - "pingpong": false, - "save_output": false, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "CogVideoX-Tora_00005.mp4", - "subfolder": "", - "type": "temp", - "format": "video/h264-mp4", - "frame_rate": 16 - }, - "muted": false - } - } - } - ], - "links": [ - [ - 54, - 20, - 0, - 30, - 0, - "CLIP" - ], - [ - 142, - 65, - 0, - 66, - 0, - "IMAGE" - ], - [ - 145, - 60, - 1, - 65, - 0, - "STRING" - ], - [ - 146, - 60, - 0, - 67, - 0, - "MASK" - ], - [ - 149, - 67, - 1, - 65, - 2, - "INT" - ], - [ - 150, - 67, - 2, - 65, - 3, - "INT" - ], - [ - 153, - 65, - 0, - 68, - 1, - "IMAGE" - ], - [ - 154, - 65, - 1, - 68, - 2, - "MASK" - ], - [ - 155, - 56, - 0, - 68, - 0, - "IMAGE" - ], - [ - 156, - 68, - 0, - 44, - 0, - "IMAGE" - ], - [ - 166, - 72, - 0, - 73, - 0, - "IMAGE" - ], - [ - 170, - 67, - 3, - 74, - 3, - "INT" - ], - [ - 171, - 67, - 1, - 74, - 4, - "INT" - ], - [ - 172, - 67, - 2, - 74, - 5, - "INT" - ], - [ - 174, - 1, - 0, - 74, - 0, - "COGVIDEOPIPE" - ], - [ - 175, - 75, - 0, - 74, - 1, - "TORAMODEL" - ], - [ - 176, - 60, - 1, - 74, - 2, - "STRING" - ], - [ - 188, - 73, - 0, - 60, - 0, - "IMAGE" - ], - [ - 193, - 1, - 0, - 56, - 0, - "COGVIDEOPIPE" - ], - [ - 200, - 1, - 0, - 80, - 0, - "COGVIDEOPIPE" - ], - [ - 201, - 30, - 0, - 80, - 1, - "CONDITIONING" - ], - [ - 202, - 31, - 0, - 80, - 2, - "CONDITIONING" - ], - [ - 203, - 73, - 0, - 80, - 3, - "IMAGE" - ], - [ - 204, - 74, - 0, - 80, - 6, - "TORAFEATURES" - ], - [ - 205, - 67, - 1, - 80, - 9, - "INT" - ], - [ - 206, - 67, - 2, - 80, - 10, - "INT" - ], - [ - 207, - 67, - 3, - 80, - 11, - "INT" - ], - [ - 208, - 80, - 1, - 56, - 1, - "LATENT" - ], - [ - 209, - 30, - 1, - 31, - 0, - "CLIP" - ] - ], - "groups": [ - { - "title": "TrajectoryViz", - "bounding": [ - 758, - 998, - 1508, - 1090 - ], - "color": "#3f789e", - "font_size": 24, - "flags": {} - } - ], - "config": {}, - "extra": { - "ds": { - "scale": 0.513158118230707, - "offset": [ - 1119.103710663005, - 88.72790106693894 - ] - } - }, - "version": 0.4 -} \ No newline at end of file diff --git a/examples/cogvidex_fun_5b_GGUF_10GB_VRAM_example_02.json b/examples/cogvidex_fun_5b_GGUF_10GB_VRAM_example_02.json deleted file mode 100644 index 40c777c..0000000 --- a/examples/cogvidex_fun_5b_GGUF_10GB_VRAM_example_02.json +++ /dev/null @@ -1,622 +0,0 @@ -{ - "last_node_id": 51, - "last_link_id": 114, - "nodes": [ - { - "id": 20, - "type": "CLIPLoader", - "pos": { - "0": -26, - "1": 400 - }, - "size": { - "0": 451.30548095703125, - "1": 82 - }, - "flags": {}, - "order": 0, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "CLIP", - "type": "CLIP", - "links": [ - 54 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CLIPLoader" - }, - "widgets_values": [ - "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", - "sd3" - ] - }, - { - "id": 31, - "type": "CogVideoTextEncode", - "pos": { - "0": 497, - "1": 520 - }, - "size": { - "0": 463.01251220703125, - "1": 144 - }, - "flags": {}, - "order": 5, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 108 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 111 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "clip", - "type": "CLIP", - "links": null - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "The video is not of a high quality, it has a low resolution. Watermark present in each frame. Strange motion trajectory. ", - 1, - true - ] - }, - { - "id": 44, - "type": "VHS_VideoCombine", - "pos": { - "0": 1842, - "1": 345 - }, - "size": [ - 855.81494140625, - 881.2099609375 - ], - "flags": {}, - "order": 8, - "mode": 0, - "inputs": [ - { - "name": "images", - "type": "IMAGE", - "link": 97 - }, - { - "name": "audio", - "type": "AUDIO", - "link": null, - "shape": 7 - }, - { - "name": "meta_batch", - "type": "VHS_BatchManager", - "link": null, - "shape": 7 - }, - { - "name": "vae", - "type": "VAE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "Filenames", - "type": "VHS_FILENAMES", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "VHS_VideoCombine" - }, - "widgets_values": { - "frame_rate": 16, - "loop_count": 0, - "filename_prefix": "CogVideoX_Fun", - "format": "video/h264-mp4", - "pix_fmt": "yuv420p", - "crf": 19, - "save_metadata": true, - "pingpong": false, - "save_output": false, - "videopreview": { - "hidden": false, - "paused": false, - "params": { - "filename": "CogVideoX_Fun_00003.mp4", - "subfolder": "", - "type": "temp", - "format": "video/h264-mp4", - "frame_rate": 16 - }, - "muted": false - } - } - }, - { - "id": 36, - "type": "LoadImage", - "pos": { - "0": 227, - "1": 700 - }, - "size": { - "0": 391.3421325683594, - "1": 456.8497009277344 - }, - "flags": {}, - "order": 1, - "mode": 0, - "inputs": [], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 71 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "MASK", - "type": "MASK", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "LoadImage" - }, - "widgets_values": [ - "sd3stag.png", - "image" - ] - }, - { - "id": 37, - "type": "ImageResizeKJ", - "pos": { - "0": 688, - "1": 708 - }, - "size": { - "0": 315, - "1": 266 - }, - "flags": {}, - "order": 4, - "mode": 0, - "inputs": [ - { - "name": "image", - "type": "IMAGE", - "link": 71 - }, - { - "name": "get_image_size", - "type": "IMAGE", - "link": null, - "shape": 7 - }, - { - "name": "width_input", - "type": "INT", - "link": null, - "widget": { - "name": "width_input" - } - }, - { - "name": "height_input", - "type": "INT", - "link": null, - "widget": { - "name": "height_input" - } - } - ], - "outputs": [ - { - "name": "IMAGE", - "type": "IMAGE", - "links": [ - 112 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "width", - "type": "INT", - "links": null, - "shape": 3 - }, - { - "name": "height", - "type": "INT", - "links": null, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "ImageResizeKJ" - }, - "widgets_values": [ - 720, - 480, - "lanczos", - true, - 16, - 0, - 0, - "disabled" - ] - }, - { - "id": 11, - "type": "CogVideoDecode", - "pos": { - "0": 1477, - "1": 344 - }, - "size": { - "0": 300.396484375, - "1": 198 - }, - "flags": {}, - "order": 7, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 113 - }, - { - "name": "samples", - "type": "LATENT", - "link": 114 - } - ], - "outputs": [ - { - "name": "images", - "type": "IMAGE", - "links": [ - 97 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "CogVideoDecode" - }, - "widgets_values": [ - true, - 240, - 360, - 0.2, - 0.2, - true - ] - }, - { - "id": 30, - "type": "CogVideoTextEncode", - "pos": { - "0": 493, - "1": 303 - }, - "size": { - "0": 471.90142822265625, - "1": 168.08047485351562 - }, - "flags": {}, - "order": 3, - "mode": 0, - "inputs": [ - { - "name": "clip", - "type": "CLIP", - "link": 54 - } - ], - "outputs": [ - { - "name": "conditioning", - "type": "CONDITIONING", - "links": [ - 110 - ], - "slot_index": 0, - "shape": 3 - }, - { - "name": "clip", - "type": "CLIP", - "links": [ - 108 - ], - "slot_index": 1 - } - ], - "properties": { - "Node name for S&R": "CogVideoTextEncode" - }, - "widgets_values": [ - "majestic stag grazing in a forest and basking in the setting sun", - 1, - false - ] - }, - { - "id": 51, - "type": "CogVideoXFunSampler", - "pos": { - "0": 1058, - "1": 345 - }, - "size": { - "0": 367.79998779296875, - "1": 434 - }, - "flags": {}, - "order": 6, - "mode": 0, - "inputs": [ - { - "name": "pipeline", - "type": "COGVIDEOPIPE", - "link": 109 - }, - { - "name": "positive", - "type": "CONDITIONING", - "link": 110 - }, - { - "name": "negative", - "type": "CONDITIONING", - "link": 111 - }, - { - "name": "start_img", - "type": "IMAGE", - "link": 112, - "shape": 7 - }, - { - "name": "end_img", - "type": "IMAGE", - "link": null, - "shape": 7 - }, - { - "name": "context_options", - "type": "COGCONTEXT", - "link": null, - "shape": 7 - }, - { - "name": "tora_trajectory", - "type": "TORAFEATURES", - "link": null, - "shape": 7 - }, - { - "name": "fastercache", - "type": "FASTERCACHEARGS", - "link": null, - "shape": 7 - }, - { - "name": "vid2vid_images", - "type": "IMAGE", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 113 - ] - }, - { - "name": "samples", - "type": "LATENT", - "links": [ - 114 - ] - } - ], - "properties": { - "Node name for S&R": "CogVideoXFunSampler" - }, - "widgets_values": [ - 49, - 720, - 480, - 43, - "randomize", - 50, - 6, - "DDIM", - 0.0563, - 1 - ] - }, - { - "id": 48, - "type": "DownloadAndLoadCogVideoGGUFModel", - "pos": { - "0": 585, - "1": 34 - }, - "size": { - "0": 378, - "1": 198 - }, - "flags": {}, - "order": 2, - "mode": 0, - "inputs": [ - { - "name": "pab_config", - "type": "PAB_CONFIG", - "link": null, - "shape": 7 - }, - { - "name": "block_edit", - "type": "TRANSFORMERBLOCKS", - "link": null, - "shape": 7 - } - ], - "outputs": [ - { - "name": "cogvideo_pipe", - "type": "COGVIDEOPIPE", - "links": [ - 109 - ], - "slot_index": 0, - "shape": 3 - } - ], - "properties": { - "Node name for S&R": "DownloadAndLoadCogVideoGGUFModel" - }, - "widgets_values": [ - "CogVideoX_5b_fun_1_1_GGUF_Q4_0.safetensors", - "bf16", - false, - "offload_device", - false, - "disabled" - ] - } - ], - "links": [ - [ - 54, - 20, - 0, - 30, - 0, - "CLIP" - ], - [ - 71, - 36, - 0, - 37, - 0, - "IMAGE" - ], - [ - 97, - 11, - 0, - 44, - 0, - "IMAGE" - ], - [ - 108, - 30, - 1, - 31, - 0, - "CLIP" - ], - [ - 109, - 48, - 0, - 51, - 0, - "COGVIDEOPIPE" - ], - [ - 110, - 30, - 0, - 51, - 1, - "CONDITIONING" - ], - [ - 111, - 31, - 0, - 51, - 2, - "CONDITIONING" - ], - [ - 112, - 37, - 0, - 51, - 3, - "IMAGE" - ], - [ - 113, - 51, - 0, - 11, - 0, - "COGVIDEOPIPE" - ], - [ - 114, - 51, - 1, - 11, - 1, - "LATENT" - ] - ], - "groups": [], - "config": {}, - "extra": { - "ds": { - "scale": 0.7513148009015784, - "offset": [ - 724.7448506313632, - 128.336592104936 - ] - } - }, - "version": 0.4 -} \ No newline at end of file diff --git a/nodes.py b/nodes.py index bd89609..6af6b6c 100644 --- a/nodes.py +++ b/nodes.py @@ -595,14 +595,14 @@ class CogVideoSampler: FUNCTION = "process" CATEGORY = "CogVideoWrapper" - def process(self, pipeline, positive, negative, steps, cfg, seed, scheduler, num_frames, samples=None, + def process(self, model, positive, negative, steps, cfg, seed, scheduler, num_frames, samples=None, denoise_strength=1.0, image_cond_latents=None, context_options=None, controlnet=None, tora_trajectory=None, fastercache=None): mm.soft_empty_cache() - model_name = pipeline.get("model_name", "") + model_name = model.get("model_name", "") supports_image_conds = True if "I2V" in model_name or "interpolation" in model_name.lower() or "fun" in model_name.lower() else False - if "fun" in model_name.lower() and image_cond_latents is not None: + if "fun" in model_name.lower() and "pose" not in model_name.lower() and image_cond_latents is not None: assert image_cond_latents["mask"] is not None, "For fun inpaint models use CogVideoImageEncodeFunInP" fun_mask = image_cond_latents["mask"] else: @@ -632,11 +632,11 @@ class CogVideoSampler: device = mm.get_torch_device() offload_device = mm.unet_offload_device() - pipe = pipeline["pipe"] - dtype = pipeline["dtype"] - scheduler_config = pipeline["scheduler_config"] + pipe = model["pipe"] + dtype = model["dtype"] + scheduler_config = model["scheduler_config"] - if not pipeline["cpu_offloading"] and pipeline["manual_offloading"]: + if not model["cpu_offloading"] and model["manual_offloading"]: pipe.transformer.to(device) generator = torch.Generator(device=torch.device("cpu")).manual_seed(seed) @@ -683,10 +683,10 @@ class CogVideoSampler: except: pass - autocastcondition = not pipeline["onediff"] or not dtype == torch.float32 + autocastcondition = not model["onediff"] or not dtype == torch.float32 autocast_context = torch.autocast(mm.get_autocast_device(device), dtype=dtype) if autocastcondition else nullcontext() with autocast_context: - latents = pipeline["pipe"]( + latents = model["pipe"]( num_inference_steps=steps, height = height, width = width, @@ -708,7 +708,7 @@ class CogVideoSampler: controlnet=controlnet, tora=tora_trajectory if tora_trajectory is not None else None, ) - if not pipeline["cpu_offloading"] and pipeline["manual_offloading"]: + if not model["cpu_offloading"] and model["manual_offloading"]: pipe.transformer.to(offload_device) if fastercache is not None: @@ -763,18 +763,16 @@ class CogVideoDecode: @classmethod def INPUT_TYPES(s): return {"required": { - "samples": ("LATENT", ), - "vae": ("VAE", {"default": None}), - "enable_vae_tiling": ("BOOLEAN", {"default": True, "tooltip": "Drastically reduces memory use but may introduce seams"}), - }, - "optional": { - "tile_sample_min_height": ("INT", {"default": 240, "min": 16, "max": 2048, "step": 8, "tooltip": "Minimum tile height, default is half the height"}), - "tile_sample_min_width": ("INT", {"default": 360, "min": 16, "max": 2048, "step": 8, "tooltip": "Minimum tile width, default is half the width"}), - "tile_overlap_factor_height": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}), - "tile_overlap_factor_width": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}), - "auto_tile_size": ("BOOLEAN", {"default": True, "tooltip": "Auto size based on height and width, default is half the size"}), - } - } + "vae": ("VAE",), + "samples": ("LATENT",), + "enable_vae_tiling": ("BOOLEAN", {"default": True, "tooltip": "Drastically reduces memory use but may introduce seams"}), + "tile_sample_min_height": ("INT", {"default": 240, "min": 16, "max": 2048, "step": 8, "tooltip": "Minimum tile height, default is half the height"}), + "tile_sample_min_width": ("INT", {"default": 360, "min": 16, "max": 2048, "step": 8, "tooltip": "Minimum tile width, default is half the width"}), + "tile_overlap_factor_height": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}), + "tile_overlap_factor_width": ("FLOAT", {"default": 0.2, "min": 0.0, "max": 1.0, "step": 0.001}), + "auto_tile_size": ("BOOLEAN", {"default": True, "tooltip": "Auto size based on height and width, default is half the size"}), + }, + } RETURN_TYPES = ("IMAGE",) RETURN_NAMES = ("images",) From f606d745e9bb0be33c981b30360d71a33b435cd2 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 19 Nov 2024 15:34:14 +0200 Subject: [PATCH 47/49] more examples --- examples/cogvideox_1.0_5b_vid2vid_02.json | 1061 +++++++++++++++++++++ examples/cogvideox_1_5_5b_I2V_01.json | 736 ++++++++++++++ 2 files changed, 1797 insertions(+) create mode 100644 examples/cogvideox_1.0_5b_vid2vid_02.json create mode 100644 examples/cogvideox_1_5_5b_I2V_01.json diff --git a/examples/cogvideox_1.0_5b_vid2vid_02.json b/examples/cogvideox_1.0_5b_vid2vid_02.json new file mode 100644 index 0000000..a45ff0b --- /dev/null +++ b/examples/cogvideox_1.0_5b_vid2vid_02.json @@ -0,0 +1,1061 @@ +{ + "last_node_id": 78, + "last_link_id": 218, + "nodes": [ + { + "id": 20, + "type": "CLIPLoader", + "pos": { + "0": -29, + "1": 407 + }, + "size": { + "0": 451.30548095703125, + "1": 82 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CLIPLoader" + }, + "widgets_values": [ + "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", + "sd3" + ] + }, + { + "id": 41, + "type": "ImageResizeKJ", + "pos": { + "0": 206, + "1": -69 + }, + "size": { + "0": 315, + "1": 242 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 180 + }, + { + "name": "get_image_size", + "type": "IMAGE", + "link": null, + "shape": 7 + }, + { + "name": "width_input", + "type": "INT", + "link": null, + "widget": { + "name": "width_input" + } + }, + { + "name": "height_input", + "type": "INT", + "link": null, + "widget": { + "name": "height_input" + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 126 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "height", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageResizeKJ" + }, + "widgets_values": [ + 720, + 480, + "lanczos", + false, + 2, + 0, + 0, + "disabled" + ] + }, + { + "id": 45, + "type": "VHS_LoadVideo", + "pos": { + "0": -93, + "1": -153 + }, + "size": [ + 247.455078125, + 365.7275390625 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + }, + { + "name": "frame_load_cap", + "type": "INT", + "link": 177, + "widget": { + "name": "frame_load_cap" + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 179 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "frame_count", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "audio", + "type": "AUDIO", + "links": null, + "shape": 3 + }, + { + "name": "video_info", + "type": "VHS_VIDEOINFO", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_LoadVideo" + }, + "widgets_values": { + "video": "jeep.mp4", + "force_rate": 0, + "force_size": "Disabled", + "custom_width": 512, + "custom_height": 512, + "frame_load_cap": 20, + "skip_first_frames": 0, + "select_every_nth": 1, + "choose video to upload": "image", + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "frame_load_cap": 20, + "skip_first_frames": 0, + "force_rate": 0, + "filename": "jeep.mp4", + "type": "input", + "format": "video/mp4", + "select_every_nth": 1 + } + } + } + }, + { + "id": 70, + "type": "GetImageSizeAndCount", + "pos": { + "0": 214, + "1": -234 + }, + "size": { + "0": 202.2143096923828, + "1": 99.23601531982422 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 179, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 180 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "512 width", + "type": "INT", + "links": [], + "slot_index": 1, + "shape": 3 + }, + { + "name": "256 height", + "type": "INT", + "links": [], + "slot_index": 2, + "shape": 3 + }, + { + "name": "33 count", + "type": "INT", + "links": [], + "slot_index": 3, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "GetImageSizeAndCount" + }, + "widgets_values": [] + }, + { + "id": 69, + "type": "INTConstant", + "pos": { + "0": -90, + "1": -305 + }, + "size": { + "0": 210, + "1": 58 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "value", + "type": "INT", + "links": [ + 177 + ], + "shape": 3 + } + ], + "title": "Frames to load", + "properties": { + "Node name for S&R": "INTConstant" + }, + "widgets_values": [ + 33 + ], + "color": "#1b4669", + "bgcolor": "#29699c" + }, + { + "id": 58, + "type": "ImageConcanate", + "pos": { + "0": 1594, + "1": 230 + }, + "size": { + "0": 315, + "1": 102 + }, + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "image1", + "type": "IMAGE", + "link": 191 + }, + { + "name": "image2", + "type": "IMAGE", + "link": 170 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 132 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageConcanate" + }, + "widgets_values": [ + "right", + false + ] + }, + { + "id": 55, + "type": "GetImageSizeAndCount", + "pos": { + "0": 1654, + "1": 77 + }, + "size": { + "0": 210, + "1": 86 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 208, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 170 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "720 width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "480 height", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "33 count", + "type": "INT", + "links": [], + "slot_index": 3, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "GetImageSizeAndCount" + }, + "widgets_values": [] + }, + { + "id": 77, + "type": "CogVideoImageEncode", + "pos": { + "0": 952, + "1": -118 + }, + "size": { + "0": 315, + "1": 122 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 209 + }, + { + "name": "start_image", + "type": "IMAGE", + "link": 210 + }, + { + "name": "end_image", + "type": "IMAGE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 215 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoImageEncode" + }, + "widgets_values": [ + false, + 0 + ] + }, + { + "id": 76, + "type": "CogVideoDecode", + "pos": { + "0": 1335, + "1": -123 + }, + "size": { + "0": 315, + "1": 198 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 206 + }, + { + "name": "samples", + "type": "LATENT", + "link": 216 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 208 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoDecode" + }, + "widgets_values": [ + true, + 240, + 360, + 0.2, + 0.2, + true + ] + }, + { + "id": 30, + "type": "CogVideoTextEncode", + "pos": { + "0": 491, + "1": 372 + }, + "size": [ + 478.6890949595422, + 215.66308749666905 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 213 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 217 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "A high-definition nature video showcasing a brown bear as it gracefully runs down a crystal-clear stream, surrounded by the serene ambiance of a dense, verdant forest. The sunlight filters through the canopy of tall trees, casting dappled light on the forest floor, while the gentle sound of flowing water and rustling leaves creates a peaceful atmosphere. The brown bear's fur glistens in the sunlight, highlighting its striking red and white markings as it navigates the stream with agility and playfulness.", + 1, + false + ] + }, + { + "id": 31, + "type": "CogVideoTextEncode", + "pos": { + "0": 504, + "1": 651 + }, + "size": { + "0": 463.01251220703125, + "1": 144 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 217 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 214 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "", + 1, + true + ] + }, + { + "id": 78, + "type": "CogVideoSampler", + "pos": { + "0": 1083, + "1": 255 + }, + "size": [ + 330, + 574 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 212 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 213 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 214 + }, + { + "name": "samples", + "type": "LATENT", + "link": 215, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": null, + "shape": 7 + }, + { + "name": "context_options", + "type": "COGCONTEXT", + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": null, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + }, + { + "name": "num_frames", + "type": "INT", + "link": 218, + "widget": { + "name": "num_frames" + } + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 216 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoSampler" + }, + "widgets_values": [ + 49, + 25, + 6, + 0, + "fixed", + "CogVideoXDDIM", + 0.8 + ] + }, + { + "id": 57, + "type": "GetImageSizeAndCount", + "pos": { + "0": 595, + "1": -79 + }, + "size": { + "0": 202.2143096923828, + "1": 99.23601531982422 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 126, + "slot_index": 0 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": [ + 191, + 210 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "720 width", + "type": "INT", + "links": [], + "slot_index": 1, + "shape": 3 + }, + { + "name": "480 height", + "type": "INT", + "links": [], + "slot_index": 2, + "shape": 3 + }, + { + "name": "33 count", + "type": "INT", + "links": [ + 218 + ], + "slot_index": 3, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "GetImageSizeAndCount" + }, + "widgets_values": [] + }, + { + "id": 75, + "type": "DownloadAndLoadCogVideoModel", + "pos": { + "0": 606, + "1": 85 + }, + "size": { + "0": 315, + "1": 218 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "block_edit", + "type": "TRANSFORMERBLOCKS", + "link": null, + "shape": 7 + }, + { + "name": "lora", + "type": "COGLORA", + "link": null, + "shape": 7 + }, + { + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "links": [ + 212 + ] + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 206, + 209 + ] + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadCogVideoModel" + }, + "widgets_values": [ + "THUDM/CogVideoX-5b", + "bf16", + "disabled", + false, + "sdpa", + "main_device" + ] + }, + { + "id": 47, + "type": "VHS_VideoCombine", + "pos": { + "0": 1946, + "1": -172 + }, + "size": [ + 1110, + 687.3333333333333 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 132 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "CogVideoX_vid2vid", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX_vid2vid_00003.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + } + } + } + } + ], + "links": [ + [ + 54, + 20, + 0, + 30, + 0, + "CLIP" + ], + [ + 126, + 41, + 0, + 57, + 0, + "IMAGE" + ], + [ + 132, + 58, + 0, + 47, + 0, + "IMAGE" + ], + [ + 170, + 55, + 0, + 58, + 1, + "IMAGE" + ], + [ + 177, + 69, + 0, + 45, + 2, + "INT" + ], + [ + 179, + 45, + 0, + 70, + 0, + "IMAGE" + ], + [ + 180, + 70, + 0, + 41, + 0, + "IMAGE" + ], + [ + 191, + 57, + 0, + 58, + 0, + "IMAGE" + ], + [ + 206, + 75, + 1, + 76, + 0, + "VAE" + ], + [ + 208, + 76, + 0, + 55, + 0, + "IMAGE" + ], + [ + 209, + 75, + 1, + 77, + 0, + "VAE" + ], + [ + 210, + 57, + 0, + 77, + 1, + "IMAGE" + ], + [ + 212, + 75, + 0, + 78, + 0, + "COGVIDEOMODEL" + ], + [ + 213, + 30, + 0, + 78, + 1, + "CONDITIONING" + ], + [ + 214, + 31, + 0, + 78, + 2, + "CONDITIONING" + ], + [ + 215, + 77, + 0, + 78, + 3, + "LATENT" + ], + [ + 216, + 78, + 0, + 76, + 1, + "LATENT" + ], + [ + 217, + 30, + 1, + 31, + 0, + "CLIP" + ], + [ + 218, + 57, + 3, + 78, + 9, + "INT" + ] + ], + "groups": [], + "config": {}, + "extra": { + "ds": { + "scale": 0.8390545288825798, + "offset": [ + -318.82552550589344, + 331.70430573737934 + ] + } + }, + "version": 0.4 +} \ No newline at end of file diff --git a/examples/cogvideox_1_5_5b_I2V_01.json b/examples/cogvideox_1_5_5b_I2V_01.json new file mode 100644 index 0000000..6a6938c --- /dev/null +++ b/examples/cogvideox_1_5_5b_I2V_01.json @@ -0,0 +1,736 @@ +{ + "last_node_id": 64, + "last_link_id": 149, + "nodes": [ + { + "id": 63, + "type": "CogVideoSampler", + "pos": { + "0": 1142, + "1": 74 + }, + "size": { + "0": 330, + "1": 574 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 144 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 145 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 146 + }, + { + "name": "samples", + "type": "LATENT", + "link": null, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": 147, + "shape": 7 + }, + { + "name": "context_options", + "type": "COGCONTEXT", + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": null, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 148 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoSampler" + }, + "widgets_values": [ + 49, + 25, + 6, + 0, + "fixed", + "CogVideoXDDIM", + 1 + ] + }, + { + "id": 62, + "type": "CogVideoImageEncode", + "pos": { + "0": 1149, + "1": 711 + }, + "size": { + "0": 315, + "1": 122 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 141 + }, + { + "name": "start_image", + "type": "IMAGE", + "link": 142 + }, + { + "name": "end_image", + "type": "IMAGE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 147 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoImageEncode" + }, + "widgets_values": [ + false, + 0 + ] + }, + { + "id": 30, + "type": "CogVideoTextEncode", + "pos": { + "0": 493, + "1": 303 + }, + "size": { + "0": 471.90142822265625, + "1": 168.08047485351562 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 145 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 149 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "a majestic stag is grazing in an enhanced forest, basking in the setting sun filtered by the trees", + 1, + false + ] + }, + { + "id": 36, + "type": "LoadImage", + "pos": { + "0": 335, + "1": 731 + }, + "size": { + "0": 402.06353759765625, + "1": 396.6225891113281 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 71 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "sd3stag.png", + "image" + ] + }, + { + "id": 20, + "type": "CLIPLoader", + "pos": { + "0": -2, + "1": 304 + }, + "size": { + "0": 451.30548095703125, + "1": 82 + }, + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CLIPLoader" + }, + "widgets_values": [ + "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", + "sd3" + ] + }, + { + "id": 60, + "type": "CogVideoDecode", + "pos": { + "0": 1523, + "1": -6 + }, + "size": { + "0": 315, + "1": 198 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 132 + }, + { + "name": "samples", + "type": "LATENT", + "link": 148 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 134 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoDecode" + }, + "widgets_values": [ + true, + 240, + 360, + 0.2, + 0.2, + true + ] + }, + { + "id": 44, + "type": "VHS_VideoCombine", + "pos": { + "0": 1884, + "1": -6 + }, + "size": [ + 605.3909912109375, + 654.5737362132353 + ], + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 134 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "CogVideoX-I2V", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX-I2V_00004.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + }, + "muted": false + } + } + }, + { + "id": 37, + "type": "ImageResizeKJ", + "pos": { + "0": 784, + "1": 731 + }, + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 71 + }, + { + "name": "get_image_size", + "type": "IMAGE", + "link": null, + "shape": 7 + }, + { + "name": "width_input", + "type": "INT", + "link": null, + "widget": { + "name": "width_input" + } + }, + { + "name": "height_input", + "type": "INT", + "link": null, + "widget": { + "name": "height_input" + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 142 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "height", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageResizeKJ" + }, + "widgets_values": [ + 1360, + 768, + "lanczos", + false, + 16, + 0, + 0, + "disabled" + ] + }, + { + "id": 31, + "type": "CogVideoTextEncode", + "pos": { + "0": 497, + "1": 520 + }, + "size": { + "0": 463.01251220703125, + "1": 144 + }, + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 149 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 146 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "", + 1, + true + ] + }, + { + "id": 59, + "type": "DownloadAndLoadCogVideoModel", + "pos": { + "0": 622, + "1": -25 + }, + "size": { + "0": 315, + "1": 218 + }, + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [ + { + "name": "block_edit", + "type": "TRANSFORMERBLOCKS", + "link": null, + "shape": 7 + }, + { + "name": "lora", + "type": "COGLORA", + "link": null, + "shape": 7 + }, + { + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "links": [ + 144 + ] + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 132, + 141 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadCogVideoModel" + }, + "widgets_values": [ + "kijai/CogVideoX-5b-1.5-I2V", + "bf16", + "disabled", + false, + "sdpa", + "main_device" + ] + }, + { + "id": 64, + "type": "CogVideoImageEncodeFunInP", + "pos": { + "0": 1861.032958984375, + "1": 752.6453247070312 + }, + "size": { + "0": 380.4000244140625, + "1": 146 + }, + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": null + }, + { + "name": "start_image", + "type": "IMAGE", + "link": null + }, + { + "name": "end_image", + "type": "IMAGE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "image_cond_latents", + "type": "LATENT", + "links": null + } + ], + "properties": { + "Node name for S&R": "CogVideoImageEncodeFunInP" + }, + "widgets_values": [ + 49, + false, + 0 + ] + } + ], + "links": [ + [ + 54, + 20, + 0, + 30, + 0, + "CLIP" + ], + [ + 71, + 36, + 0, + 37, + 0, + "IMAGE" + ], + [ + 132, + 59, + 1, + 60, + 0, + "VAE" + ], + [ + 134, + 60, + 0, + 44, + 0, + "IMAGE" + ], + [ + 141, + 59, + 1, + 62, + 0, + "VAE" + ], + [ + 142, + 37, + 0, + 62, + 1, + "IMAGE" + ], + [ + 144, + 59, + 0, + 63, + 0, + "COGVIDEOMODEL" + ], + [ + 145, + 30, + 0, + 63, + 1, + "CONDITIONING" + ], + [ + 146, + 31, + 0, + 63, + 2, + "CONDITIONING" + ], + [ + 147, + 62, + 0, + 63, + 4, + "LATENT" + ], + [ + 148, + 63, + 0, + 60, + 1, + "LATENT" + ], + [ + 149, + 30, + 1, + 31, + 0, + "CLIP" + ] + ], + "groups": [], + "config": {}, + "extra": { + "ds": { + "scale": 0.8390545288825803, + "offset": [ + 351.5513339440394, + 161.02862760095286 + ] + } + }, + "version": 0.4 +} \ No newline at end of file From 6302e4b6685f41a0e1ff953d39811d2a099939c9 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 19 Nov 2024 15:49:43 +0200 Subject: [PATCH 48/49] Allow orbit LoRAs with Fun models as well --- examples/cogvideox_Fun_I2V_02.json | 4 +- model_loading.py | 81 ++++++++++++++---------------- 2 files changed, 40 insertions(+), 45 deletions(-) diff --git a/examples/cogvideox_Fun_I2V_02.json b/examples/cogvideox_Fun_I2V_02.json index 66fea99..0a107f8 100644 --- a/examples/cogvideox_Fun_I2V_02.json +++ b/examples/cogvideox_Fun_I2V_02.json @@ -84,7 +84,7 @@ }, "widgets_values": [ 49, - 50, + 25, 6, 458091243358272, "randomize", @@ -268,7 +268,7 @@ }, "widgets_values": [ 49, - false, + true, 0 ] }, diff --git a/model_loading.py b/model_loading.py index c77e3c5..bc0cd75 100644 --- a/model_loading.py +++ b/model_loading.py @@ -240,37 +240,37 @@ class DownloadAndLoadCogVideoModel: #LoRAs if lora is not None: - from .lora_utils import merge_lora#, load_lora_into_transformer - if "fun" in model.lower(): - for l in lora: - log.info(f"Merging LoRA weights from {l['path']} with strength {l['strength']}") - transformer = merge_lora(transformer, l["path"], l["strength"]) - else: - adapter_list = [] - adapter_weights = [] - for l in lora: - fuse = True if l["fuse_lora"] else False - lora_sd = load_torch_file(l["path"]) - for key, val in lora_sd.items(): - if "lora_B" in key: - lora_rank = val.shape[1] - break - log.info(f"Merging rank {lora_rank} LoRA weights from {l['path']} with strength {l['strength']}") - adapter_name = l['path'].split("/")[-1].split(".")[0] - adapter_weight = l['strength'] - pipe.load_lora_weights(l['path'], weight_name=l['path'].split("/")[-1], lora_rank=lora_rank, adapter_name=adapter_name) - - #transformer = load_lora_into_transformer(lora, transformer) - adapter_list.append(adapter_name) - adapter_weights.append(adapter_weight) - for l in lora: - pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) - if fuse: - lora_scale = 1 - dimension_loras = ["orbit", "dimensionx"] # for now dimensionx loras need scaling - if any(item in lora[-1]["path"].lower() for item in dimension_loras): - lora_scale = lora_scale / lora_rank - pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) + # from .lora_utils import merge_lora#, load_lora_into_transformer + # if "fun" in model.lower(): + # for l in lora: + # log.info(f"Merging LoRA weights from {l['path']} with strength {l['strength']}") + # transformer = merge_lora(transformer, l["path"], l["strength"]) + #else: + adapter_list = [] + adapter_weights = [] + for l in lora: + fuse = True if l["fuse_lora"] else False + lora_sd = load_torch_file(l["path"]) + for key, val in lora_sd.items(): + if "lora_B" in key: + lora_rank = val.shape[1] + break + log.info(f"Merging rank {lora_rank} LoRA weights from {l['path']} with strength {l['strength']}") + adapter_name = l['path'].split("/")[-1].split(".")[0] + adapter_weight = l['strength'] + pipe.load_lora_weights(l['path'], weight_name=l['path'].split("/")[-1], lora_rank=lora_rank, adapter_name=adapter_name) + + #transformer = load_lora_into_transformer(lora, transformer) + adapter_list.append(adapter_name) + adapter_weights.append(adapter_weight) + for l in lora: + pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) + if fuse: + lora_scale = 1 + dimension_loras = ["orbit", "dimensionx"] # for now dimensionx loras need scaling + if any(item in lora[-1]["path"].lower() for item in dimension_loras): + lora_scale = lora_scale / lora_rank + pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) if "fused" in attention_mode: from diffusers.models.attention import Attention @@ -653,27 +653,22 @@ class CogVideoXModelLoader: with open(transformer_config_path) as f: transformer_config = json.load(f) - with init_empty_weights(): if model_type in ["I2V", "I2V_5b", "fun_5b_pose", "5b_I2V_1_5"]: transformer_config["in_channels"] = 32 if "1_5" in model_type: transformer_config["ofs_embed_dim"] = 512 + elif "fun" in model_type: + transformer_config["in_channels"] = 33 + else: + transformer_config["in_channels"] = 16 + if "1_5" in model_type: transformer_config["use_learned_positional_embeddings"] = False transformer_config["patch_size_t"] = 2 transformer_config["patch_bias"] = False transformer_config["sample_height"] = 300 transformer_config["sample_width"] = 300 - elif "fun" in model_type: - transformer_config["in_channels"] = 33 - else: - if "1_5" in model_type: - transformer_config["use_learned_positional_embeddings"] = False - transformer_config["patch_size_t"] = 2 - transformer_config["patch_bias"] = False - #transformer_config["sample_height"] = 300 todo: check if this is needed - #transformer_config["sample_width"] = 300 - transformer_config["in_channels"] = 16 - + + with init_empty_weights(): transformer = CogVideoXTransformer3DModel.from_config(transformer_config) #load weights From feeff366b546f270813dea7a77c7043c7dc22e1d Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 19 Nov 2024 19:06:15 +0200 Subject: [PATCH 49/49] update --- examples/cogvideox_Fun_180_orbit_01.json | 1922 ++++++++++++++++++++++ model_loading.py | 62 +- readme.md | 27 + 3 files changed, 1980 insertions(+), 31 deletions(-) create mode 100644 examples/cogvideox_Fun_180_orbit_01.json diff --git a/examples/cogvideox_Fun_180_orbit_01.json b/examples/cogvideox_Fun_180_orbit_01.json new file mode 100644 index 0000000..2d482e8 --- /dev/null +++ b/examples/cogvideox_Fun_180_orbit_01.json @@ -0,0 +1,1922 @@ +{ + "last_node_id": 73, + "last_link_id": 165, + "nodes": [ + { + "id": 20, + "type": "CLIPLoader", + "pos": { + "0": -27, + "1": 42 + }, + "size": { + "0": 451.30548095703125, + "1": 82 + }, + "flags": {}, + "order": 0, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "CLIP", + "type": "CLIP", + "links": [ + 54 + ], + "slot_index": 0, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "CLIPLoader" + }, + "widgets_values": [ + "t5\\google_t5-v1_1-xxl_encoderonly-fp8_e4m3fn.safetensors", + "sd3" + ] + }, + { + "id": 52, + "type": "CogVideoLoraSelect", + "pos": { + "0": -3, + "1": -383 + }, + "size": [ + 438.44762263180314, + 106 + ], + "flags": {}, + "order": 1, + "mode": 0, + "inputs": [ + { + "name": "prev_lora", + "type": "COGLORA", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "lora", + "type": "COGLORA", + "links": [ + 124 + ] + } + ], + "properties": { + "Node name for S&R": "CogVideoLoraSelect" + }, + "widgets_values": [ + "DimensionX_orbit_left_lora_rank256_bf16.safetensors", + 1, + true + ] + }, + { + "id": 55, + "type": "ImageFlip+", + "pos": { + "0": 1247, + "1": 770 + }, + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 22, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 130 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 131, + 151 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageFlip+" + }, + "widgets_values": [ + "x" + ] + }, + { + "id": 54, + "type": "ImageFlip+", + "pos": { + "0": 847, + "1": 802 + }, + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 10, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 128 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 129 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ImageFlip+" + }, + "widgets_values": [ + "x" + ] + }, + { + "id": 50, + "type": "CogVideoImageEncodeFunInP", + "pos": { + "0": 865, + "1": 567 + }, + "size": [ + 253.60000610351562, + 146 + ], + "flags": {}, + "order": 13, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 119 + }, + { + "name": "start_image", + "type": "IMAGE", + "link": 129 + }, + { + "name": "end_image", + "type": "IMAGE", + "link": null, + "shape": 7 + }, + { + "name": "num_frames", + "type": "INT", + "link": 126, + "widget": { + "name": "num_frames" + } + } + ], + "outputs": [ + { + "name": "image_cond_latents", + "type": "LATENT", + "links": [ + 120 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoImageEncodeFunInP" + }, + "widgets_values": [ + 33, + true, + 0.03 + ] + }, + { + "id": 63, + "type": "CogVideoImageEncodeFunInP", + "pos": { + "0": 936.3893432617188, + "1": 1048.5242919921875 + }, + "size": { + "0": 253.60000610351562, + "1": 146 + }, + "flags": {}, + "order": 11, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 144 + }, + { + "name": "start_image", + "type": "IMAGE", + "link": 146 + }, + { + "name": "end_image", + "type": "IMAGE", + "link": null, + "shape": 7 + }, + { + "name": "num_frames", + "type": "INT", + "link": 145, + "widget": { + "name": "num_frames" + } + } + ], + "outputs": [ + { + "name": "image_cond_latents", + "type": "LATENT", + "links": [ + 147 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoImageEncodeFunInP" + }, + "widgets_values": [ + 33, + true, + 0.03 + ] + }, + { + "id": 51, + "type": "CogVideoDecode", + "pos": { + "0": 1219, + "1": -134 + }, + "size": { + "0": 315, + "1": 198 + }, + "flags": {}, + "order": 20, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 122 + }, + { + "name": "samples", + "type": "LATENT", + "link": 123 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 130 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoDecode" + }, + "widgets_values": [ + true, + 240, + 360, + 0.2, + 0.2, + true + ] + }, + { + "id": 53, + "type": "PrimitiveNode", + "pos": { + "0": 117, + "1": 399 + }, + "size": [ + 261.57286031534534, + 82 + ], + "flags": {}, + "order": 2, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 126, + 127, + 143, + 145 + ], + "widget": { + "name": "num_frames" + }, + "slot_index": 0 + } + ], + "title": "num_frames", + "properties": { + "Run widget replace on values": false + }, + "widgets_values": [ + 33, + "fixed" + ] + }, + { + "id": 48, + "type": "CogVideoSampler", + "pos": { + "0": 1200, + "1": 124 + }, + "size": [ + 330, + 574 + ], + "flags": {}, + "order": 18, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 114 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 116 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 117 + }, + { + "name": "samples", + "type": "LATENT", + "link": null, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": 120, + "shape": 7 + }, + { + "name": "context_options", + "type": "COGCONTEXT", + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": null, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + }, + { + "name": "num_frames", + "type": "INT", + "link": 127, + "widget": { + "name": "num_frames" + } + }, + { + "name": "seed", + "type": "INT", + "link": 156, + "widget": { + "name": "seed" + } + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 123 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoSampler" + }, + "widgets_values": [ + 33, + 40, + 6, + 458091243358278, + "fixed", + "CogVideoXDDIM", + 1 + ] + }, + { + "id": 68, + "type": "PrimitiveNode", + "pos": { + "0": 514, + "1": 985 + }, + "size": [ + 295.90419649751334, + 82 + ], + "flags": {}, + "order": 3, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "INT", + "type": "INT", + "links": [ + 156, + 157 + ], + "widget": { + "name": "seed" + }, + "slot_index": 0 + } + ], + "title": "seed", + "properties": { + "Run widget replace on values": false + }, + "widgets_values": [ + 458091243358278, + "fixed" + ] + }, + { + "id": 69, + "type": "DownloadAndLoadFlorence2Model", + "pos": { + "0": -1305, + "1": -13 + }, + "size": [ + 442.37554309913344, + 106 + ], + "flags": {}, + "order": 4, + "mode": 0, + "inputs": [ + { + "name": "lora", + "type": "PEFTLORA", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "florence2_model", + "type": "FL2MODEL", + "links": [ + 158 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadFlorence2Model" + }, + "widgets_values": [ + "MiaoshouAI/Florence-2-base-PromptGen-v2.0", + "fp16", + "sdpa" + ] + }, + { + "id": 37, + "type": "ImageResizeKJ", + "pos": { + "0": -202, + "1": 588 + }, + "size": { + "0": 315, + "1": 266 + }, + "flags": {}, + "order": 9, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 71 + }, + { + "name": "get_image_size", + "type": "IMAGE", + "link": null, + "shape": 7 + }, + { + "name": "width_input", + "type": "INT", + "link": null, + "widget": { + "name": "width_input" + } + }, + { + "name": "height_input", + "type": "INT", + "link": null, + "widget": { + "name": "height_input" + } + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 128, + 146, + 159 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "width", + "type": "INT", + "links": null, + "shape": 3 + }, + { + "name": "height", + "type": "INT", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "ImageResizeKJ" + }, + "widgets_values": [ + 768, + 768, + "lanczos", + true, + 2, + 0, + 0, + "disabled" + ] + }, + { + "id": 71, + "type": "StringConstantMultiline", + "pos": { + "0": -709, + "1": 20 + }, + "size": { + "0": 400, + "1": 200 + }, + "flags": {}, + "order": 5, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 160 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "StringConstantMultiline" + }, + "widgets_values": [ + "camera orbit", + false + ] + }, + { + "id": 72, + "type": "JoinStrings", + "pos": { + "0": -232, + "1": 231 + }, + "size": [ + 315, + 106 + ], + "flags": {}, + "order": 15, + "mode": 0, + "inputs": [ + { + "name": "string1", + "type": "STRING", + "link": 160, + "widget": { + "name": "string1" + } + }, + { + "name": "string2", + "type": "STRING", + "link": 162, + "widget": { + "name": "string2" + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": [ + 163 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "JoinStrings" + }, + "widgets_values": [ + "", + "", + ", " + ] + }, + { + "id": 70, + "type": "Florence2Run", + "pos": { + "0": -1276, + "1": 170 + }, + "size": { + "0": 400, + "1": 352 + }, + "flags": {}, + "order": 12, + "mode": 0, + "inputs": [ + { + "name": "image", + "type": "IMAGE", + "link": 159 + }, + { + "name": "florence2_model", + "type": "FL2MODEL", + "link": 158 + } + ], + "outputs": [ + { + "name": "image", + "type": "IMAGE", + "links": null, + "slot_index": 0 + }, + { + "name": "mask", + "type": "MASK", + "links": null + }, + { + "name": "caption", + "type": "STRING", + "links": [ + 161, + 162 + ], + "slot_index": 2 + }, + { + "name": "data", + "type": "JSON", + "links": null + } + ], + "properties": { + "Node name for S&R": "Florence2Run" + }, + "widgets_values": [ + "", + "more_detailed_caption", + true, + false, + 226, + 3, + true, + "", + 586007018516875, + "fixed" + ] + }, + { + "id": 73, + "type": "ShowText|pysssss", + "pos": { + "0": -793, + "1": 321 + }, + "size": [ + 502.3168660879171, + 180.55015376950485 + ], + "flags": {}, + "order": 14, + "mode": 0, + "inputs": [ + { + "name": "text", + "type": "STRING", + "link": 161, + "widget": { + "name": "text" + } + } + ], + "outputs": [ + { + "name": "STRING", + "type": "STRING", + "links": null, + "shape": 6 + } + ], + "properties": { + "Node name for S&R": "ShowText|pysssss" + }, + "widgets_values": [ + "", + "A digital illustration shoot from a frontal camera angle about a dark knight in shining armor stands in a dimly lit forest, with a glowing fire in the background. the image also shows a mysterious and intense atmosphere. on the middle of the image, a male knight appears to be standing, facing the viewer, with his full body visible. he is wearing a full plate armor with a red cloth draped over his shoulders. the armor is shiny and detailed, with intricate designs and a chain attached to it. he has two curved horns on his head, and his eyes are glowing yellow. the background is dark and smoky, with tall trees and a warm, glowing fire." + ] + }, + { + "id": 59, + "type": "GIMMVFI_interpolate", + "pos": { + "0": 2880, + "1": -200 + }, + "size": { + "0": 330, + "1": 150 + }, + "flags": {}, + "order": 27, + "mode": 0, + "inputs": [ + { + "name": "gimmvfi_model", + "type": "GIMMVIF_MODEL", + "link": 134 + }, + { + "name": "images", + "type": "IMAGE", + "link": 165 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 164 + ], + "slot_index": 0 + }, + { + "name": "flow_tensors", + "type": "IMAGE", + "links": null + } + ], + "properties": { + "Node name for S&R": "GIMMVFI_interpolate" + }, + "widgets_values": [ + 1, + 2, + 223874235763998, + "fixed" + ] + }, + { + "id": 67, + "type": "ImageBatchMulti", + "pos": { + "0": 2900, + "1": 20 + }, + "size": { + "0": 210, + "1": 102 + }, + "flags": {}, + "order": 26, + "mode": 0, + "inputs": [ + { + "name": "image_1", + "type": "IMAGE", + "link": 152 + }, + { + "name": "image_2", + "type": "IMAGE", + "link": 153 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 165 + ], + "slot_index": 0 + } + ], + "properties": {}, + "widgets_values": [ + 2, + null + ] + }, + { + "id": 66, + "type": "ReverseImageBatch", + "pos": { + "0": 2590, + "1": -20 + }, + "size": { + "0": 239.40000915527344, + "1": 26 + }, + "flags": {}, + "order": 25, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 151 + } + ], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 152 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "ReverseImageBatch" + } + }, + { + "id": 58, + "type": "DownloadAndLoadGIMMVFIModel", + "pos": { + "0": 2510, + "1": -210 + }, + "size": { + "0": 315, + "1": 58 + }, + "flags": {}, + "order": 6, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "gimmvfi_model", + "type": "GIMMVIF_MODEL", + "links": [ + 134 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadGIMMVFIModel" + }, + "widgets_values": [ + "gimmvfi_r_arb_lpips_fp32.safetensors" + ] + }, + { + "id": 36, + "type": "LoadImage", + "pos": { + "0": -808, + "1": 573 + }, + "size": [ + 556.7343073028583, + 502.50569947324857 + ], + "flags": {}, + "order": 7, + "mode": 0, + "inputs": [], + "outputs": [ + { + "name": "IMAGE", + "type": "IMAGE", + "links": [ + 71 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "MASK", + "type": "MASK", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "LoadImage" + }, + "widgets_values": [ + "ComfyUI_temp_lhgah_00059_.png", + "image" + ] + }, + { + "id": 60, + "type": "VHS_VideoCombine", + "pos": { + "0": 2520, + "1": 180 + }, + "size": [ + 860.5738525390625, + 1444.76513671875 + ], + "flags": {}, + "order": 28, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 164 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 16, + "loop_count": 0, + "filename_prefix": "CogVideoX_Fun_orbits", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX_Fun_orbits_00003.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 16 + }, + "muted": false + } + } + }, + { + "id": 30, + "type": "CogVideoTextEncode", + "pos": { + "0": 478, + "1": 90 + }, + "size": [ + 471.90142822265625, + 168.08047485351562 + ], + "flags": {}, + "order": 16, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 54 + }, + { + "name": "prompt", + "type": "STRING", + "link": 163, + "widget": { + "name": "prompt" + } + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 116, + 140 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": [ + 110 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "camera orbit around a mouse knight standing in a fantasy forest", + 1, + false + ] + }, + { + "id": 31, + "type": "CogVideoTextEncode", + "pos": { + "0": 493, + "1": 334 + }, + "size": { + "0": 463.01251220703125, + "1": 144 + }, + "flags": {}, + "order": 17, + "mode": 0, + "inputs": [ + { + "name": "clip", + "type": "CLIP", + "link": 110 + } + ], + "outputs": [ + { + "name": "conditioning", + "type": "CONDITIONING", + "links": [ + 117, + 141 + ], + "slot_index": 0, + "shape": 3 + }, + { + "name": "clip", + "type": "CLIP", + "links": null + } + ], + "properties": { + "Node name for S&R": "CogVideoTextEncode" + }, + "widgets_values": [ + "The video is not of a high quality, it has a low resolution. Watermark present in each frame. Strange motion trajectory. ", + 1, + true + ] + }, + { + "id": 62, + "type": "CogVideoSampler", + "pos": { + "0": 1258, + "1": 1151 + }, + "size": [ + 330, + 574 + ], + "flags": {}, + "order": 19, + "mode": 0, + "inputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "link": 139 + }, + { + "name": "positive", + "type": "CONDITIONING", + "link": 140 + }, + { + "name": "negative", + "type": "CONDITIONING", + "link": 141 + }, + { + "name": "samples", + "type": "LATENT", + "link": null, + "shape": 7 + }, + { + "name": "image_cond_latents", + "type": "LATENT", + "link": 147, + "shape": 7 + }, + { + "name": "context_options", + "type": "COGCONTEXT", + "link": null, + "shape": 7 + }, + { + "name": "controlnet", + "type": "COGVIDECONTROLNET", + "link": null, + "shape": 7 + }, + { + "name": "tora_trajectory", + "type": "TORAFEATURES", + "link": null, + "shape": 7 + }, + { + "name": "fastercache", + "type": "FASTERCACHEARGS", + "link": null, + "shape": 7 + }, + { + "name": "num_frames", + "type": "INT", + "link": 143, + "widget": { + "name": "num_frames" + } + }, + { + "name": "seed", + "type": "INT", + "link": 157, + "widget": { + "name": "seed" + } + } + ], + "outputs": [ + { + "name": "samples", + "type": "LATENT", + "links": [ + 148 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoSampler" + }, + "widgets_values": [ + 33, + 40, + 6, + 458091243358278, + "fixed", + "CogVideoXDDIM", + 1 + ] + }, + { + "id": 64, + "type": "CogVideoDecode", + "pos": { + "0": 1258, + "1": 889 + }, + "size": { + "0": 315, + "1": 198 + }, + "flags": {}, + "order": 21, + "mode": 0, + "inputs": [ + { + "name": "vae", + "type": "VAE", + "link": 149 + }, + { + "name": "samples", + "type": "LATENT", + "link": 148 + } + ], + "outputs": [ + { + "name": "images", + "type": "IMAGE", + "links": [ + 150, + 153 + ], + "slot_index": 0 + } + ], + "properties": { + "Node name for S&R": "CogVideoDecode" + }, + "widgets_values": [ + true, + 240, + 360, + 0.2, + 0.2, + true + ] + }, + { + "id": 44, + "type": "VHS_VideoCombine", + "pos": { + "0": 1652, + "1": -465 + }, + "size": [ + 592.7721081788095, + 1087.6961669921875 + ], + "flags": {}, + "order": 24, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 131 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "CogVideoX_Fun", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX_Fun_00027.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + }, + "muted": false + } + } + }, + { + "id": 65, + "type": "VHS_VideoCombine", + "pos": { + "0": 1674, + "1": 688 + }, + "size": [ + 620.0130829180325, + 1124.0174560546875 + ], + "flags": {}, + "order": 23, + "mode": 0, + "inputs": [ + { + "name": "images", + "type": "IMAGE", + "link": 150 + }, + { + "name": "audio", + "type": "AUDIO", + "link": null, + "shape": 7 + }, + { + "name": "meta_batch", + "type": "VHS_BatchManager", + "link": null, + "shape": 7 + }, + { + "name": "vae", + "type": "VAE", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "Filenames", + "type": "VHS_FILENAMES", + "links": null, + "shape": 3 + } + ], + "properties": { + "Node name for S&R": "VHS_VideoCombine" + }, + "widgets_values": { + "frame_rate": 8, + "loop_count": 0, + "filename_prefix": "CogVideoX_Fun", + "format": "video/h264-mp4", + "pix_fmt": "yuv420p", + "crf": 19, + "save_metadata": true, + "pingpong": false, + "save_output": false, + "videopreview": { + "hidden": false, + "paused": false, + "params": { + "filename": "CogVideoX_Fun_00026.mp4", + "subfolder": "", + "type": "temp", + "format": "video/h264-mp4", + "frame_rate": 8 + }, + "muted": false + } + } + }, + { + "id": 49, + "type": "DownloadAndLoadCogVideoModel", + "pos": { + "0": 450, + "1": -217 + }, + "size": { + "0": 362.1656799316406, + "1": 218 + }, + "flags": {}, + "order": 8, + "mode": 0, + "inputs": [ + { + "name": "block_edit", + "type": "TRANSFORMERBLOCKS", + "link": null, + "shape": 7 + }, + { + "name": "lora", + "type": "COGLORA", + "link": 124, + "shape": 7 + }, + { + "name": "compile_args", + "type": "COMPILEARGS", + "link": null, + "shape": 7 + } + ], + "outputs": [ + { + "name": "model", + "type": "COGVIDEOMODEL", + "links": [ + 114, + 139 + ] + }, + { + "name": "vae", + "type": "VAE", + "links": [ + 119, + 122, + 144, + 149 + ], + "slot_index": 1 + } + ], + "properties": { + "Node name for S&R": "DownloadAndLoadCogVideoModel" + }, + "widgets_values": [ + "alibaba-pai/CogVideoX-Fun-V1.1-5b-InP", + "bf16", + "disabled", + false, + "sdpa", + "main_device" + ] + } + ], + "links": [ + [ + 54, + 20, + 0, + 30, + 0, + "CLIP" + ], + [ + 71, + 36, + 0, + 37, + 0, + "IMAGE" + ], + [ + 110, + 30, + 1, + 31, + 0, + "CLIP" + ], + [ + 114, + 49, + 0, + 48, + 0, + "COGVIDEOMODEL" + ], + [ + 116, + 30, + 0, + 48, + 1, + "CONDITIONING" + ], + [ + 117, + 31, + 0, + 48, + 2, + "CONDITIONING" + ], + [ + 119, + 49, + 1, + 50, + 0, + "VAE" + ], + [ + 120, + 50, + 0, + 48, + 4, + "LATENT" + ], + [ + 122, + 49, + 1, + 51, + 0, + "VAE" + ], + [ + 123, + 48, + 0, + 51, + 1, + "LATENT" + ], + [ + 124, + 52, + 0, + 49, + 1, + "COGLORA" + ], + [ + 126, + 53, + 0, + 50, + 3, + "INT" + ], + [ + 127, + 53, + 0, + 48, + 9, + "INT" + ], + [ + 128, + 37, + 0, + 54, + 0, + "IMAGE" + ], + [ + 129, + 54, + 0, + 50, + 1, + "IMAGE" + ], + [ + 130, + 51, + 0, + 55, + 0, + "IMAGE" + ], + [ + 131, + 55, + 0, + 44, + 0, + "IMAGE" + ], + [ + 134, + 58, + 0, + 59, + 0, + "GIMMVIF_MODEL" + ], + [ + 139, + 49, + 0, + 62, + 0, + "COGVIDEOMODEL" + ], + [ + 140, + 30, + 0, + 62, + 1, + "CONDITIONING" + ], + [ + 141, + 31, + 0, + 62, + 2, + "CONDITIONING" + ], + [ + 143, + 53, + 0, + 62, + 9, + "INT" + ], + [ + 144, + 49, + 1, + 63, + 0, + "VAE" + ], + [ + 145, + 53, + 0, + 63, + 3, + "INT" + ], + [ + 146, + 37, + 0, + 63, + 1, + "IMAGE" + ], + [ + 147, + 63, + 0, + 62, + 4, + "LATENT" + ], + [ + 148, + 62, + 0, + 64, + 1, + "LATENT" + ], + [ + 149, + 49, + 1, + 64, + 0, + "VAE" + ], + [ + 150, + 64, + 0, + 65, + 0, + "IMAGE" + ], + [ + 151, + 55, + 0, + 66, + 0, + "IMAGE" + ], + [ + 152, + 66, + 0, + 67, + 0, + "IMAGE" + ], + [ + 153, + 64, + 0, + 67, + 1, + "IMAGE" + ], + [ + 156, + 68, + 0, + 48, + 10, + "INT" + ], + [ + 157, + 68, + 0, + 62, + 10, + "INT" + ], + [ + 158, + 69, + 0, + 70, + 1, + "FL2MODEL" + ], + [ + 159, + 37, + 0, + 70, + 0, + "IMAGE" + ], + [ + 160, + 71, + 0, + 72, + 0, + "STRING" + ], + [ + 161, + 70, + 2, + 73, + 0, + "STRING" + ], + [ + 162, + 70, + 2, + 72, + 1, + "STRING" + ], + [ + 163, + 72, + 0, + 30, + 1, + "STRING" + ], + [ + 164, + 59, + 0, + 60, + 0, + "IMAGE" + ], + [ + 165, + 67, + 0, + 59, + 1, + "IMAGE" + ] + ], + "groups": [], + "config": {}, + "extra": { + "ds": { + "scale": 0.47362440744777223, + "offset": [ + 1633.9967545643788, + 525.3824652843582 + ] + } + }, + "version": 0.4 +} \ No newline at end of file diff --git a/model_loading.py b/model_loading.py index bc0cd75..695804a 100644 --- a/model_loading.py +++ b/model_loading.py @@ -240,37 +240,37 @@ class DownloadAndLoadCogVideoModel: #LoRAs if lora is not None: - # from .lora_utils import merge_lora#, load_lora_into_transformer - # if "fun" in model.lower(): - # for l in lora: - # log.info(f"Merging LoRA weights from {l['path']} with strength {l['strength']}") - # transformer = merge_lora(transformer, l["path"], l["strength"]) - #else: - adapter_list = [] - adapter_weights = [] - for l in lora: - fuse = True if l["fuse_lora"] else False - lora_sd = load_torch_file(l["path"]) - for key, val in lora_sd.items(): - if "lora_B" in key: - lora_rank = val.shape[1] - break - log.info(f"Merging rank {lora_rank} LoRA weights from {l['path']} with strength {l['strength']}") - adapter_name = l['path'].split("/")[-1].split(".")[0] - adapter_weight = l['strength'] - pipe.load_lora_weights(l['path'], weight_name=l['path'].split("/")[-1], lora_rank=lora_rank, adapter_name=adapter_name) - - #transformer = load_lora_into_transformer(lora, transformer) - adapter_list.append(adapter_name) - adapter_weights.append(adapter_weight) - for l in lora: - pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) - if fuse: - lora_scale = 1 - dimension_loras = ["orbit", "dimensionx"] # for now dimensionx loras need scaling - if any(item in lora[-1]["path"].lower() for item in dimension_loras): - lora_scale = lora_scale / lora_rank - pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) + try: + adapter_list = [] + adapter_weights = [] + for l in lora: + fuse = True if l["fuse_lora"] else False + lora_sd = load_torch_file(l["path"]) + for key, val in lora_sd.items(): + if "lora_B" in key: + lora_rank = val.shape[1] + break + log.info(f"Merging rank {lora_rank} LoRA weights from {l['path']} with strength {l['strength']}") + adapter_name = l['path'].split("/")[-1].split(".")[0] + adapter_weight = l['strength'] + pipe.load_lora_weights(l['path'], weight_name=l['path'].split("/")[-1], lora_rank=lora_rank, adapter_name=adapter_name) + + #transformer = load_lora_into_transformer(lora, transformer) + adapter_list.append(adapter_name) + adapter_weights.append(adapter_weight) + for l in lora: + pipe.set_adapters(adapter_list, adapter_weights=adapter_weights) + if fuse: + lora_scale = 1 + dimension_loras = ["orbit", "dimensionx"] # for now dimensionx loras need scaling + if any(item in lora[-1]["path"].lower() for item in dimension_loras): + lora_scale = lora_scale / lora_rank + pipe.fuse_lora(lora_scale=lora_scale, components=["transformer"]) + except: #Fun trainer LoRAs are loaded differently + from .lora_utils import merge_lora + for l in lora: + log.info(f"Merging LoRA weights from {l['path']} with strength {l['strength']}") + transformer = merge_lora(transformer, l["path"], l["strength"]) if "fused" in attention_mode: from diffusers.models.attention import Attention diff --git a/readme.md b/readme.md index 3639341..97c72c1 100644 --- a/readme.md +++ b/readme.md @@ -1,5 +1,32 @@ # WORK IN PROGRESS +## BREAKING Update8 + +This is big one, and unfortunately to do the necessary cleanup and refactoring this will break every old workflow as they are. +I apologize for the inconvenience, if I don't do this now I'll keep making it worse until maintaining becomes too much of a chore, so from my pov there was no choice. + +*Please either use the new workflows or fix the nodes in your old ones before posting issue reports!* + +Old version will be kept in a legacy branch, but not maintained + +- Support CogVideoX 1.5 models +- Major code cleanup (it was bad, still isn't great, wip) +- Merge Fun -model functionality into main pipeline: + - All Fun specific nodes, besides image encode node for Fun -InP models are gone + - Main CogVideo Sampler works with Fun models + - DimensionX LoRAs now work with Fun models as well + +- Remove width/height from the sampler widgets and detect from input instead, this meanst text2vid now requires using empty latents +- Separate VAE from the model, allow using fp32 VAE +- Add ability to load some of the non-GGUF models as single files (only few available for now: https://huggingface.co/Kijai/CogVideoX-comfy) +- Add some torchao quantizations as options +- Add interpolation as option for the main encode node, old interpolation specific node is gone +- torch.compile optimizations +- Remove PAB in favor of FasterCache and cleaner code +- other smaller things I forgot about at this point + +For Fun -model based workflows it's more drastic change, for others migrating generally means re-setting many of the nodes. + ## Update7 - Refactored the Fun version's sampler to accept any resolution, this should make it lot simpler to use with Tora. **BREAKS OLD WORKFLOWS**, old FunSampler nodes need to be remade.