From 7dbe6d81d6f17abe93389d97d417e4886467546f Mon Sep 17 00:00:00 2001 From: Chaojun Zhang Date: Tue, 11 Nov 2025 20:46:47 +0800 Subject: [PATCH] Fix Fused MoE LoRA Triton kernel bug (#28450) Signed-off-by: chaojun-zhang --- vllm/lora/ops/triton_ops/fused_moe_lora_op.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/lora/ops/triton_ops/fused_moe_lora_op.py b/vllm/lora/ops/triton_ops/fused_moe_lora_op.py index 6d6de2529de3d..893972144e99a 100644 --- a/vllm/lora/ops/triton_ops/fused_moe_lora_op.py +++ b/vllm/lora/ops/triton_ops/fused_moe_lora_op.py @@ -26,7 +26,7 @@ def _get_ptr(lora_weights: list[torch.Tensor], device: torch.device): tensor_ptrs = [] for lora_weight in lora_weights: tensor_ptrs.append(lora_weight.data_ptr()) - ptr_tensor = torch.tensor(tensor_ptrs, device=device) + ptr_tensor = torch.tensor(tensor_ptrs, device=device, dtype=torch.uint64) _LORA_PTR_DICT[key] = ptr_tensor return _LORA_PTR_DICT.get(key) @@ -85,6 +85,7 @@ def _fused_moe_lora_kernel( GROUP_SIZE_M: tl.constexpr, SPLIT_K: tl.constexpr, USE_GDC: tl.constexpr, + launch_pdl: tl.constexpr, IS_PRIMARY: tl.constexpr, ): pid = tl.program_id(axis=0)