[XPU] Support Triton path for LoRA operations on XPU (#28511)

Signed-off-by: Fanli Lin <fanli.lin@intel.com>
This commit is contained in:
Fanli Lin 2025-11-13 13:31:42 +08:00 committed by GitHub
parent 7dca0c90cb
commit dbbe0c756a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 7 additions and 1 deletions

View File

@ -48,6 +48,7 @@ def _lora_expand_kernel(
SLICE_NUM: tl.constexpr,
SAME_STRIDE: tl.constexpr,
USE_GDC: tl.constexpr,
launch_pdl: tl.constexpr,
):
cta_n_num = tl.cdiv(N, BLOCK_N)
cta_m_num = tl.cdiv(M, BLOCK_M)

View File

@ -46,6 +46,7 @@ def _lora_shrink_kernel(
GROUP_SIZE_M: tl.constexpr,
SLICE_NUM: tl.constexpr,
USE_GDC: tl.constexpr,
launch_pdl: tl.constexpr,
):
cta_n_num = tl.cdiv(N, BLOCK_N)
cta_m_num = tl.cdiv(M, BLOCK_M)

View File

@ -101,7 +101,11 @@ class XPUPlatform(Platform):
@classmethod
def get_punica_wrapper(cls) -> str:
return "vllm.lora.punica_wrapper.punica_xpu.PunicaWrapperXPU"
xpu_use_triton_kernel = os.getenv("XPU_USE_TRITON_KERNEL", "0") == "1"
if not xpu_use_triton_kernel:
return "vllm.lora.punica_wrapper.punica_xpu.PunicaWrapperXPU"
else:
return "vllm.lora.punica_wrapper.punica_gpu.PunicaWrapperGPU"
@classmethod
def get_device_total_memory(cls, device_id: int = 0) -> int: