From c40784c7947acc247e857643d1046335b6d547cd Mon Sep 17 00:00:00 2001 From: Ratnam Parikh <114774508+ratnampa@users.noreply.github.com> Date: Tue, 8 Jul 2025 19:44:23 -0700 Subject: [PATCH] [BugFix][Intel GPU] Use refactored API for dist_backend in V1 worker (#20596) Signed-off-by: ratnampa --- vllm/v1/worker/xpu_worker.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/v1/worker/xpu_worker.py b/vllm/v1/worker/xpu_worker.py index dc52accfbd390..da271b2159afc 100644 --- a/vllm/v1/worker/xpu_worker.py +++ b/vllm/v1/worker/xpu_worker.py @@ -148,11 +148,11 @@ class XPUWorker(Worker): os.environ["CCL_ATL_TRANSPORT"] = ENV_CCL_ATL_TRANSPORT os.environ["LOCAL_WORLD_SIZE"] = ENV_LOCAL_WORLD_SIZE os.environ["LOCAL_RANK"] = str(self.local_rank) - dist_backend = "ccl" init_worker_distributed_environment(self.vllm_config, self.rank, self.distributed_init_method, - self.local_rank, dist_backend) + self.local_rank, + current_platform.dist_backend) # global all_reduce needed for overall oneccl warm up torch.distributed.all_reduce(torch.zeros(1).xpu())