mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-09 17:45:19 +08:00
[TPU] Support multi-host inference (#7457)
This commit is contained in:
parent
16422ea76f
commit
a08df8322e
@ -8,7 +8,7 @@ vLLM supports Google Cloud TPUs using PyTorch XLA.
|
||||
Requirements
|
||||
------------
|
||||
|
||||
* Google Cloud TPU VM (single host)
|
||||
* Google Cloud TPU VM (single & multi host)
|
||||
* TPU versions: v5e, v5p, v4
|
||||
* Python: 3.10
|
||||
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import ray
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from torch.distributed import ProcessGroup
|
||||
@ -18,9 +19,15 @@ class TpuCommunicator:
|
||||
return
|
||||
self.disabled = False
|
||||
|
||||
local_rank = dist.get_rank(group)
|
||||
world_size = dist.get_world_size(group)
|
||||
pjrt.initialize_multiprocess(local_rank, world_size)
|
||||
# NOTE(woosuk): When using TP > 1 on TPUs, every TPU on the same node
|
||||
# must be used together. Therefore, the local rank and world size can
|
||||
# be simply calculated as follows.
|
||||
global_rank = dist.get_rank(group)
|
||||
global_world_size = dist.get_world_size(group)
|
||||
num_nodes = len(ray.nodes())
|
||||
local_world_size = global_world_size // num_nodes
|
||||
local_rank = global_rank % local_world_size
|
||||
pjrt.initialize_multiprocess(local_rank, local_world_size)
|
||||
xr._init_world_size_ordinal()
|
||||
|
||||
def all_reduce(self, x: torch.Tensor) -> torch.Tensor:
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user