diff --git a/vllm/v1/worker/xpu_worker.py b/vllm/v1/worker/xpu_worker.py index dc52accfbd390..da271b2159afc 100644 --- a/vllm/v1/worker/xpu_worker.py +++ b/vllm/v1/worker/xpu_worker.py @@ -148,11 +148,11 @@ class XPUWorker(Worker): os.environ["CCL_ATL_TRANSPORT"] = ENV_CCL_ATL_TRANSPORT os.environ["LOCAL_WORLD_SIZE"] = ENV_LOCAL_WORLD_SIZE os.environ["LOCAL_RANK"] = str(self.local_rank) - dist_backend = "ccl" init_worker_distributed_environment(self.vllm_config, self.rank, self.distributed_init_method, - self.local_rank, dist_backend) + self.local_rank, + current_platform.dist_backend) # global all_reduce needed for overall oneccl warm up torch.distributed.all_reduce(torch.zeros(1).xpu())