[BugFix] Fix glm4_moe_mtp load weights bug (#28805)

Signed-off-by: wuyaoxuehun <798143193@qq.com>
This commit is contained in:
wuyaoxuehun 2025-11-17 16:13:11 +07:00 committed by GitHub
parent 577bb34fff
commit ab01cd14e5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -256,13 +256,12 @@ class Glm4MoeMTP(nn.Module, SupportsPP, Glm4MixtureOfExperts):
params_dict = dict(self.named_parameters())
loaded_params: set[str] = set()
spec_layer = self.model.mtp_start_layer_idx
for name, loaded_weight in weights:
if name == "lm_head.weight":
name = f"model.layers.{spec_layer}.shard_head.head.weight"
spec_layer = self.model.mtp_start_layer_idx
name = f"model.layers.{spec_layer}.shared_head.head.weight"
elif name == "model.embed_tokens.weight":
# This name is same with local model, rewriting is not needed.
pass
spec_layer = self.model.mtp_start_layer_idx
else:
spec_layer = get_spec_layer_idx_from_weight_name(self.config, name)
if spec_layer is None: