From 2655d7ab830dfa87ba9254d296eca44bb1faab16 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Thu, 25 Sep 2025 11:16:06 -0400 Subject: [PATCH] [Logging] Remove TORCH_NCCL_AVOID_RECORD_STREAMS to squash a warning (#25532) Signed-off-by: Tyler Michael Smith Signed-off-by: yewentao256 --- vllm/v1/worker/gpu_worker.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index 9082bbfd8f8e6..8c75e8914857b 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -155,14 +155,6 @@ class Worker(WorkerBase): def init_device(self): if self.device_config.device.type == "cuda": - # torch.distributed.all_reduce does not free the input tensor until - # the synchronization point. This causes the memory usage to grow - # as the number of all_reduce calls increases. This env var disables - # this behavior. - # Related issue: - # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573 - os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1" - # This env var set by Ray causes exceptions with graph building. os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None) self.device = torch.device(f"cuda:{self.local_rank}")