From 3112271f6e5d50b3d94a2efa88a5a8e77826b897 Mon Sep 17 00:00:00 2001 From: Yan Ma Date: Mon, 7 Jul 2025 16:38:22 +0800 Subject: [PATCH] [XPU] log clean up for XPU platform (#20553) Signed-off-by: yan --- vllm/_custom_ops.py | 3 ++- vllm/platforms/xpu.py | 5 ++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index eb9d0b4058927..92db27f5b8dce 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -13,7 +13,8 @@ from vllm.scalar_type import ScalarType logger = init_logger(__name__) -if not current_platform.is_tpu() and not current_platform.is_hpu(): +if not current_platform.is_tpu() and not current_platform.is_hpu()\ + and not current_platform.is_xpu(): try: import vllm._C except ImportError as e: diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 39828d321edee..e2871c1064926 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -37,7 +37,7 @@ class XPUPlatform(Platform): dtype: torch.dtype, kv_cache_dtype: Optional[str], block_size: int, use_v1: bool, use_mla: bool) -> str: - if selected_backend != _Backend.IPEX: + if selected_backend is not None and selected_backend != _Backend.IPEX: logger.info("Cannot use %s backend on XPU.", selected_backend) use_v1 = envs.VLLM_USE_V1 if not use_v1: @@ -133,8 +133,7 @@ class XPUPlatform(Platform): @classmethod def is_pin_memory_available(cls): - logger.warning("Pin memory is not supported on XPU.") - return False + return True @classmethod def get_current_memory_usage(cls,