From 0484b6424894d785fb70f3e39c47aaee489340e3 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Tue, 28 Oct 2025 08:44:05 -0400 Subject: [PATCH] [Bug] Fix shape issue for eplb expert weights (#27589) Signed-off-by: yewentao256 Co-authored-by: Cyrus Leung --- vllm/model_executor/layers/fused_moe/layer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 9b826f05fe307..294dddade6cc1 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -1959,6 +1959,8 @@ class FusedMoE(CustomOp): if name not in NON_EXPERT_WEIGHTS and weight.shape != torch.Size([]) and not name.startswith("_shared_experts.") + # exclude parameters from non-expert submodules (e.g. gate/shared) + and not name.startswith("_gate.") ] def set_eplb_state(