From 26c0406555a5e782a4591953389477bcf9695d10 Mon Sep 17 00:00:00 2001 From: Yang Fan Date: Mon, 21 Apr 2025 18:25:21 +0800 Subject: [PATCH] [Bugfix] Fix distributed bug in Qwen2.5-VL & Qwen2.5-Omni (#16907) --- vllm/model_executor/models/qwen2_5_vl.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vllm/model_executor/models/qwen2_5_vl.py b/vllm/model_executor/models/qwen2_5_vl.py index d5bc3446edb8f..30980316ecfc7 100644 --- a/vllm/model_executor/models/qwen2_5_vl.py +++ b/vllm/model_executor/models/qwen2_5_vl.py @@ -198,9 +198,8 @@ class Qwen2_5_VisionMLP(nn.Module): def all_gather_interleave(local_tensor, hidden_size: int, tp_size: int): """All-gather the input tensor interleavely across model parallel group.""" - import torch.distributed as dist gathered_tensors = [torch.zeros_like(local_tensor) for _ in range(tp_size)] - dist.all_gather(gathered_tensors, local_tensor) + parallel_state.get_tp_group().all_gather(gathered_tensors, local_tensor) gathered_tensors_split = [ torch.split(tensor, hidden_size // tp_size, -1)