From 2f86f710ddeb61d15201c0add5174a4cba3a9ae7 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Fri, 18 Jul 2025 18:48:12 +0000 Subject: [PATCH] Fix precommit Signed-off-by: Tyler Michael Smith --- vllm/distributed/parallel_state.py | 10 ++++++---- vllm/envs.py | 2 +- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index 10a2ce3e05464..db045f3d1b6cd 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -222,13 +222,15 @@ class GroupCoordinator: for ranks in group_ranks: device_group = torch.distributed.new_group( - ranks, backend=torch_distributed_backend, + ranks, + backend=torch_distributed_backend, timeout=envs.VLLM_DISTRIBUTED_INIT_TIMEOUT_SECONDS) # a group with `gloo` backend, to allow direct coordination between # processes through the CPU. - cpu_group = torch.distributed.new_group(ranks, - backend="gloo", - timeout=envs.VLLM_DISTRIBUTED_INIT_TIMEOUT_SECONDS) + cpu_group = torch.distributed.new_group( + ranks, + backend="gloo", + timeout=envs.VLLM_DISTRIBUTED_INIT_TIMEOUT_SECONDS) if self.rank in ranks: self.ranks = ranks self.world_size = len(ranks) diff --git a/vllm/envs.py b/vllm/envs.py index 821a86291f42a..c7a4181a89e94 100755 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -507,7 +507,7 @@ environment_variables: dict[str, Callable[[], Any]] = { lambda: int(os.getenv("VLLM_IMAGE_FETCH_TIMEOUT", "5")), # Timeout for torch distributed calls - "VLLM_DISTRIBUTED_INIT_TIMEOUT_SECONDS": + "VLLM_DISTRIBUTED_INIT_TIMEOUT_SECONDS": lambda: maybe_convert_int(os.getenv("VLLM_IMAGE_FETCH_TIMEOUT", None)), # Timeout for fetching videos when serving multimodal models