From c152e2a8a0f49edfc06d760f04ff617310384757 Mon Sep 17 00:00:00 2001 From: Yuxuan Zhang <2448370773@qq.com> Date: Fri, 8 Aug 2025 10:37:23 +0800 Subject: [PATCH] not tie_word_embeddings for glm-4.5 and glm-4.5v (#22460) Signed-off-by: zRzRzRzRzRzRzR <2448370773@qq.com> --- vllm/model_executor/models/glm4_moe.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/vllm/model_executor/models/glm4_moe.py b/vllm/model_executor/models/glm4_moe.py index bd3e27662ee7c..0053e4e6ffec9 100644 --- a/vllm/model_executor/models/glm4_moe.py +++ b/vllm/model_executor/models/glm4_moe.py @@ -601,8 +601,6 @@ class Glm4MoeForCausalLM(nn.Module, SupportsPP, SupportsLoRA): quant_config=quant_config) else: self.lm_head = PPMissingLayer() - if self.config.tie_word_embeddings: - self.lm_head.weight = self.model.embed_tokens.weight self.logits_processor = LogitsProcessor(config.vocab_size) self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors)