From 2b04c209ee98174f29f1fc98f0dc3222d652a7bd Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Mon, 3 Mar 2025 16:20:24 -0500 Subject: [PATCH] [Bugfix] Allow shared_experts skip quantization for DeepSeekV2/V3 (#14100) Signed-off-by: mgoin --- vllm/model_executor/models/deepseek_v2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index 7ff61f9a1826f..cf244ff572c30 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -145,6 +145,7 @@ class DeepseekV2MoE(nn.Module): hidden_act=config.hidden_act, quant_config=quant_config, reduce_results=False, + prefix=f"{prefix}.shared_experts", ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: