Fix precommit

Signed-off-by: Tyler Michael Smith <tysmith@redhat.com>
This commit is contained in:
Tyler Michael Smith 2025-07-18 18:48:12 +00:00
parent feeb17303d
commit 2f86f710dd
2 changed files with 7 additions and 5 deletions

View File

@ -222,13 +222,15 @@ class GroupCoordinator:
for ranks in group_ranks: for ranks in group_ranks:
device_group = torch.distributed.new_group( device_group = torch.distributed.new_group(
ranks, backend=torch_distributed_backend, ranks,
backend=torch_distributed_backend,
timeout=envs.VLLM_DISTRIBUTED_INIT_TIMEOUT_SECONDS) timeout=envs.VLLM_DISTRIBUTED_INIT_TIMEOUT_SECONDS)
# a group with `gloo` backend, to allow direct coordination between # a group with `gloo` backend, to allow direct coordination between
# processes through the CPU. # processes through the CPU.
cpu_group = torch.distributed.new_group(ranks, cpu_group = torch.distributed.new_group(
backend="gloo", ranks,
timeout=envs.VLLM_DISTRIBUTED_INIT_TIMEOUT_SECONDS) backend="gloo",
timeout=envs.VLLM_DISTRIBUTED_INIT_TIMEOUT_SECONDS)
if self.rank in ranks: if self.rank in ranks:
self.ranks = ranks self.ranks = ranks
self.world_size = len(ranks) self.world_size = len(ranks)

View File

@ -507,7 +507,7 @@ environment_variables: dict[str, Callable[[], Any]] = {
lambda: int(os.getenv("VLLM_IMAGE_FETCH_TIMEOUT", "5")), lambda: int(os.getenv("VLLM_IMAGE_FETCH_TIMEOUT", "5")),
# Timeout for torch distributed calls # Timeout for torch distributed calls
"VLLM_DISTRIBUTED_INIT_TIMEOUT_SECONDS": "VLLM_DISTRIBUTED_INIT_TIMEOUT_SECONDS":
lambda: maybe_convert_int(os.getenv("VLLM_IMAGE_FETCH_TIMEOUT", None)), lambda: maybe_convert_int(os.getenv("VLLM_IMAGE_FETCH_TIMEOUT", None)),
# Timeout for fetching videos when serving multimodal models # Timeout for fetching videos when serving multimodal models