From 471ddb99a059bc32486d566e369887a7bc618c14 Mon Sep 17 00:00:00 2001 From: sihao_li <165983188+1643661061leo@users.noreply.github.com> Date: Wed, 24 Dec 2025 13:34:33 +0800 Subject: [PATCH] [XPU] Remove distributed_executor_backend check (#30760) Signed-off-by: sihao.li Co-authored-by: Kunshang Ji --- vllm/platforms/xpu.py | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 2e39a216a10a0..8fecd9e3e65d6 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -7,7 +7,6 @@ from typing import TYPE_CHECKING, Optional import torch -import vllm.envs as envs from vllm.attention.backends.registry import AttentionBackendEnum from vllm.logger import init_logger @@ -168,32 +167,6 @@ class XPUPlatform(Platform): if vllm_config.kv_transfer_config is not None: vllm_config.kv_transfer_config.enable_permute_local_kv = True - if parallel_config.distributed_executor_backend is None: - if parallel_config.world_size > 1: - parallel_config.distributed_executor_backend = "ray" - else: - parallel_config.distributed_executor_backend = "uni" - elif parallel_config.distributed_executor_backend == "mp": - # FIXME(kunshang): - # spawn needs calling `if __name__ == '__main__':` - # fork is not supported for xpu start new process. - if envs.VLLM_WORKER_MULTIPROC_METHOD != "spawn": - os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" - logger.warning( - "Please use spawn as start method if you want to use mp." - ) - elif ( - parallel_config.distributed_executor_backend != "ray" - and parallel_config.distributed_executor_backend != "uni" - and parallel_config.distributed_executor_backend != "external_launcher" - ): - logger.warning( - "%s is not supported on XPU, fallback to ray distributed" - " executor backend.", - parallel_config.distributed_executor_backend, - ) - parallel_config.distributed_executor_backend = "ray" - if model_config and model_config.use_mla: logger.info( "MLA is enabled on a non-GPU platform; forcing chunked "