[TPU] Use Ray for default distributed backend (#8389)

This commit is contained in:
Woosuk Kwon 2024-09-11 20:31:51 -07:00 committed by GitHub
parent f842a7aff1
commit b71c956deb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -869,6 +869,13 @@ class ParallelConfig:
f"distributed executor backend " f"distributed executor backend "
f"'{self.distributed_executor_backend}'.") f"'{self.distributed_executor_backend}'.")
if current_platform.is_tpu() and self.world_size > 1:
if self.distributed_executor_backend is None:
self.distributed_executor_backend = "ray"
if self.distributed_executor_backend != "ray":
raise ValueError(
"TPU backend only supports Ray for distributed inference.")
if self.distributed_executor_backend is None and self.world_size > 1: if self.distributed_executor_backend is None and self.world_size > 1:
# We use multiprocessing by default if world_size fits on the # We use multiprocessing by default if world_size fits on the
# current node and we aren't in a ray placement group. # current node and we aren't in a ray placement group.