From fdc135d768267b3a0ae8ed6fc3eca6a68d75f7a6 Mon Sep 17 00:00:00 2001 From: Tsukasa OI Date: Sat, 13 Dec 2025 14:55:14 +0900 Subject: [PATCH] [Misc][Quantization] Clarify the intent of GGUF `FusedMoE` weight materialization (#30310) Signed-off-by: Tsukasa OI --- vllm/model_executor/layers/fused_moe/layer.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 7f803720d4770..eba6ab4cc35f7 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -1200,10 +1200,14 @@ class FusedMoE(CustomOp): if full_load: shard_dim += 1 - # Materialize GGUF UninitializedParameter + # Materialize GGUF UninitializedParameter accounting merged weights if is_gguf_weight and isinstance(param, UninitializedParameter): + # To materialize a tensor, we must have full shape including + # number of experts, making this portion to require `full_load`. + assert full_load final_shape = list(loaded_weight.shape) - if shard_id in ["w1", "w3"]: + # w1 and w3 are merged per expert. + if shard_id in {"w1", "w3"}: final_shape[1] *= 2 final_shape[shard_dim] = final_shape[shard_dim] // self.tp_size param.materialize(final_shape, dtype=loaded_weight.dtype)