From 7ea3cd7c3e9fa1db06cdf8ad1973237b061b7d64 Mon Sep 17 00:00:00 2001 From: Mengqing Cao Date: Mon, 25 Nov 2024 13:14:56 +0800 Subject: [PATCH] [Refactor][MISC] del redundant code in ParallelConfig.postinit (#10614) Signed-off-by: MengqingCao --- vllm/config.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 4ea56a14cabba..dcdaf58b5ccdb 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -998,20 +998,15 @@ class ParallelConfig: raise ValueError(f"worker-use-ray can't be used with " f"distributed executor backend " f"'{self.distributed_executor_backend}'.") - - if current_platform.is_tpu() and self.world_size > 1: + ray_only_devices = ["tpu", "hpu"] + if (current_platform.device_type in ray_only_devices + and self.world_size > 1): if self.distributed_executor_backend is None: self.distributed_executor_backend = "ray" if self.distributed_executor_backend != "ray": raise ValueError( - "TPU backend only supports Ray for distributed inference.") - - if current_platform.is_hpu() and self.world_size > 1: - if self.distributed_executor_backend is None: - self.distributed_executor_backend = "ray" - if self.distributed_executor_backend != "ray": - raise ValueError( - "HPU backend only supports Ray for distributed inference.") + f"{current_platform.device_type.upper()} backend only " + "supports Ray for distributed inference.") if self.distributed_executor_backend is None and self.world_size > 1: # We use multiprocessing by default if world_size fits on the