[perf] Enable concurrent execution of "shared_experts" and "selected_experts" in qwen3-next (#27578)

Signed-off-by: zjy0516 <riverclouds.zhu@qq.com>
This commit is contained in:
Jiangyun Zhu 2025-10-29 16:12:54 +08:00 committed by GitHub
parent 4fb8771cc0
commit 8df98c2161
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -159,6 +159,7 @@ class Qwen3NextSparseMoeBlock(nn.Module):
self.experts = SharedFusedMoE(
shared_experts=self.shared_expert,
gate=self.gate,
num_experts=self.n_routed_experts,
top_k=config.num_experts_per_tok,
hidden_size=config.hidden_size,
@ -181,11 +182,17 @@ class Qwen3NextSparseMoeBlock(nn.Module):
if self.is_sequence_parallel:
hidden_states = sequence_parallel_chunk(hidden_states)
# router_logits: (num_tokens, n_experts)
router_logits, _ = self.gate(hidden_states)
final_hidden_states = self.experts(
hidden_states=hidden_states, router_logits=router_logits
)
if self.experts.is_internal_router:
# In this case, the gate/router runs inside the FusedMoE class
final_hidden_states = self.experts(
hidden_states=hidden_states, router_logits=hidden_states
)
else:
# router_logits: (num_tokens, n_experts)
router_logits, _ = self.gate(hidden_states)
final_hidden_states = self.experts(
hidden_states=hidden_states, router_logits=router_logits
)
if self.shared_expert is not None:
final_hidden_states = final_hidden_states[0] + final_hidden_states[1]