[XPU]Fix crash due to removed VLLM_USE_V1 attribute (#28520)

Signed-off-by: chaojun-zhang <chaojun.zhang@intel.com>
This commit is contained in:
Chaojun Zhang 2025-11-12 18:20:55 +08:00 committed by GitHub
parent d3ade61e42
commit a4730c1b4f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -65,7 +65,6 @@ class XPUPlatform(Platform):
if use_sparse:
raise NotImplementedError("Sparse Attention is not supported on XPU.")
use_v1 = envs.VLLM_USE_V1
if not use_v1:
raise ValueError("XPU backend only supports V1.")
if selected_backend == AttentionBackendEnum.TRITON_ATTN:
@ -115,7 +114,9 @@ class XPUPlatform(Platform):
@classmethod
def get_vit_attn_backend(
cls, head_size: int, dtype: torch.dtype
) -> AttentionBackendEnum:
) -> "AttentionBackendEnum":
from vllm.attention.backends.registry import AttentionBackendEnum
return AttentionBackendEnum.FLASH_ATTN
@classmethod