[Bugfix] fix Qwen3VLMoe load when pp > 1 (#25838)

Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com>
Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com>
Signed-off-by: simon-mo <simon.mo@hey.com>
This commit is contained in:
JJJYmmm 2025-09-29 01:56:12 +08:00 committed by simon-mo
parent 4c347044c9
commit 09c2cbc04a

View File

@ -212,6 +212,8 @@ class Qwen3MoeLLMModel(Qwen3MoeModel):
# attempted to load as other weights later
is_expert_weight = True
name_mapped = name.replace(weight_name, param_name)
if is_pp_missing_parameter(name_mapped, self):
continue
if is_fused_expert:
loaded_weight = loaded_weight.transpose(-1,
-2) # no bias
@ -230,8 +232,6 @@ class Qwen3MoeLLMModel(Qwen3MoeModel):
name_mapped, params_dict, loaded_weight,
shard_id, num_experts)
else:
if is_pp_missing_parameter(name_mapped, self):
continue
# Skip loading extra parameters for GPTQ/modelopt models
if name_mapped.endswith(
ignore_suffixes