From 0e8f8140e488de191099da5dd3aea3e7ca2a0e12 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Thu, 31 Oct 2024 02:03:59 +0200 Subject: [PATCH] clear FasterCache after done --- cogvideox_fun/pipeline_cogvideox_inpaint.py | 3 +-- nodes.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/cogvideox_fun/pipeline_cogvideox_inpaint.py b/cogvideox_fun/pipeline_cogvideox_inpaint.py index 45ff719..f6e1241 100644 --- a/cogvideox_fun/pipeline_cogvideox_inpaint.py +++ b/cogvideox_fun/pipeline_cogvideox_inpaint.py @@ -723,8 +723,6 @@ class CogVideoX_Fun_Pipeline_Inpaint(VideoSysPipeline): device = self._execution_device - #self.vae.to(device) - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. @@ -911,6 +909,7 @@ class CogVideoX_Fun_Pipeline_Inpaint(VideoSysPipeline): for i, t in enumerate(timesteps): if self.interrupt: continue + if use_temporal_tiling and isinstance(self.scheduler, CogVideoXDDIMScheduler): #temporal tiling code based on https://github.com/mayuelala/FollowYourEmoji/blob/main/models/video_pipeline.py # ===================================================== diff --git a/nodes.py b/nodes.py index f86ffe8..5eb5bdf 100644 --- a/nodes.py +++ b/nodes.py @@ -772,6 +772,13 @@ class CogVideoSampler: ) if not pipeline["cpu_offloading"]: pipe.transformer.to(offload_device) + + if fastercache is not None: + for block in pipe.transformer.transformer_blocks: + if (hasattr, block, "cached_hidden_states") and block.cached_hidden_states is not None: + block.cached_hidden_states = None + block.cached_encoder_hidden_states = None + mm.soft_empty_cache() return (pipeline, {"samples": latents}) @@ -1012,6 +1019,13 @@ class CogVideoXFunSampler: ) #if not pipeline["cpu_offloading"]: # pipe.transformer.to(offload_device) + #clear FasterCache + if fastercache is not None: + for block in pipe.transformer.transformer_blocks: + if (hasattr, block, "cached_hidden_states") and block.cached_hidden_states is not None: + block.cached_hidden_states = None + block.cached_encoder_hidden_states = None + mm.soft_empty_cache() return (pipeline, {"samples": latents})