From 7de45db9a5b95073c3f99eec75ae510d347d625f Mon Sep 17 00:00:00 2001 From: Ning Xie Date: Sun, 3 Aug 2025 15:55:20 +0800 Subject: [PATCH] [Misc] update doc comment for send (#22026) Signed-off-by: Andy Xie --- .../device_communicators/base_device_communicator.py | 2 +- vllm/distributed/device_communicators/cuda_communicator.py | 2 +- vllm/distributed/parallel_state.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/distributed/device_communicators/base_device_communicator.py b/vllm/distributed/device_communicators/base_device_communicator.py index dc5923cdc5a0d..127a340fc6c6d 100644 --- a/vllm/distributed/device_communicators/base_device_communicator.py +++ b/vllm/distributed/device_communicators/base_device_communicator.py @@ -219,7 +219,7 @@ class DeviceCommunicatorBase: return output_tensor def send(self, tensor: torch.Tensor, dst: Optional[int] = None) -> None: - """Sends a tensor to the destination rank in a non-blocking way""" + """Sends a tensor to the destination rank in a blocking way""" """NOTE: `dst` is the local rank of the destination rank.""" if dst is None: dst = (self.rank_in_group + 1) % self.world_size diff --git a/vllm/distributed/device_communicators/cuda_communicator.py b/vllm/distributed/device_communicators/cuda_communicator.py index e4804691f0f65..4ab8f3d938fcf 100644 --- a/vllm/distributed/device_communicators/cuda_communicator.py +++ b/vllm/distributed/device_communicators/cuda_communicator.py @@ -179,7 +179,7 @@ class CudaCommunicator(DeviceCommunicatorBase): return output.movedim(0, dim).contiguous() def send(self, tensor: torch.Tensor, dst: Optional[int] = None) -> None: - """Sends a tensor to the destination rank in a non-blocking way""" + """Sends a tensor to the destination rank in a blocking way""" """NOTE: `dst` is the local rank of the destination rank.""" if dst is None: dst = (self.rank_in_group + 1) % self.world_size diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index 1f7a14920c418..ee581124db510 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -782,7 +782,7 @@ class GroupCoordinator: torch.distributed.barrier(group=self.cpu_group) def send(self, tensor: torch.Tensor, dst: Optional[int] = None) -> None: - """Sends a tensor to the destination rank in a non-blocking way""" + """Sends a tensor to the destination rank in a blocking way""" """NOTE: `dst` is the local rank of the destination rank.""" self.device_communicator.send(tensor, dst)