[Misc][Quantization] Clarify the intent of GGUF FusedMoE weight materialization (#30310)

Signed-off-by: Tsukasa OI <floss_llm@irq.a4lg.com>
This commit is contained in:
Tsukasa OI 2025-12-13 14:55:14 +09:00 committed by GitHub
parent 4fa7ce46f3
commit fdc135d768
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1200,10 +1200,14 @@ class FusedMoE(CustomOp):
if full_load:
shard_dim += 1
# Materialize GGUF UninitializedParameter
# Materialize GGUF UninitializedParameter accounting merged weights
if is_gguf_weight and isinstance(param, UninitializedParameter):
# To materialize a tensor, we must have full shape including
# number of experts, making this portion to require `full_load`.
assert full_load
final_shape = list(loaded_weight.shape)
if shard_id in ["w1", "w3"]:
# w1 and w3 are merged per expert.
if shard_id in {"w1", "w3"}:
final_shape[1] *= 2
final_shape[shard_dim] = final_shape[shard_dim] // self.tp_size
param.materialize(final_shape, dtype=loaded_weight.dtype)