[BugFix][Intel GPU] Use refactored API for dist_backend in V1 worker (#20596)

Signed-off-by: ratnampa <ratnam.parikh@intel.com>
This commit is contained in:
Ratnam Parikh 2025-07-08 19:44:23 -07:00 committed by GitHub
parent baed180aa0
commit c40784c794
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -148,11 +148,11 @@ class XPUWorker(Worker):
os.environ["CCL_ATL_TRANSPORT"] = ENV_CCL_ATL_TRANSPORT
os.environ["LOCAL_WORLD_SIZE"] = ENV_LOCAL_WORLD_SIZE
os.environ["LOCAL_RANK"] = str(self.local_rank)
dist_backend = "ccl"
init_worker_distributed_environment(self.vllm_config, self.rank,
self.distributed_init_method,
self.local_rank, dist_backend)
self.local_rank,
current_platform.dist_backend)
# global all_reduce needed for overall oneccl warm up
torch.distributed.all_reduce(torch.zeros(1).xpu())