From 4470ee2f90661d9eb632687750dfb1a5a2404032 Mon Sep 17 00:00:00 2001 From: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Date: Thu, 4 Dec 2025 19:03:17 -0500 Subject: [PATCH] [Perf] Enable separate shared_experts stream only for CUDA (#30085) Signed-off-by: Alexander Matveev --- vllm/model_executor/layers/fused_moe/layer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 902a77987d61a..6001b6d83c398 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -863,7 +863,8 @@ class FusedMoE(CustomOp): use_chunked_impl: bool, ) -> tuple[bool, torch.Tensor | None]: use_shared_experts_stream = ( - has_separate_shared_experts + current_platform.is_cuda() + and has_separate_shared_experts and not use_chunked_impl and self.shared_experts_stream is not None and (