From 41a0f3338182b32adab3c146833d2666f4b48d56 Mon Sep 17 00:00:00 2001 From: kijai <40791699+kijai@users.noreply.github.com> Date: Tue, 19 Nov 2024 20:27:31 +0200 Subject: [PATCH] Update model_loading.py --- model_loading.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/model_loading.py b/model_loading.py index c190b54..db9d814 100644 --- a/model_loading.py +++ b/model_loading.py @@ -425,7 +425,7 @@ class DownloadAndLoadCogVideoGGUFModel: }, "optional": { "block_edit": ("TRANSFORMERBLOCKS", {"default": None}), - "compile_args":("COMPILEARGS", ), + #"compile_args":("COMPILEARGS", ), "attention_mode": (["sdpa", "sageattn"], {"default": "sdpa"}), } } @@ -523,12 +523,6 @@ class DownloadAndLoadCogVideoGGUFModel: from .fp8_optimization import convert_fp8_linear convert_fp8_linear(transformer, vae_dtype, params_to_keep=params_to_keep) - if compile_args is not None: - torch._dynamo.config.cache_size_limit = compile_args["dynamo_cache_size_limit"] - for i, block in enumerate(transformer.transformer_blocks): - if "CogVideoXBlock" in str(block): - transformer.transformer_blocks[i] = torch.compile(block, fullgraph=compile_args["fullgraph"], dynamic=compile_args["dynamic"], backend=compile_args["backend"], mode=compile_args["mode"]) - with open(scheduler_path) as f: scheduler_config = json.load(f)