mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-19 01:14:31 +08:00
Make vllm compatible with verl (#12824)
Co-authored-by: zhangshulai <zhangshulai@bytedance.com>
This commit is contained in:
parent
ef533d25fb
commit
433c4a4923
@ -1024,13 +1024,6 @@ def initialize_model_parallel(
|
||||
backend = backend or torch.distributed.get_backend(
|
||||
get_world_group().device_group)
|
||||
|
||||
if (world_size
|
||||
!= tensor_model_parallel_size * pipeline_model_parallel_size):
|
||||
raise RuntimeError(
|
||||
f"world_size ({world_size}) is not equal to "
|
||||
f"tensor_model_parallel_size ({tensor_model_parallel_size}) x "
|
||||
f"pipeline_model_parallel_size ({pipeline_model_parallel_size})")
|
||||
|
||||
# Build the tensor model-parallel groups.
|
||||
num_tensor_model_parallel_groups: int = (world_size //
|
||||
tensor_model_parallel_size)
|
||||
|
||||
@ -101,7 +101,7 @@ class ExecutorWithExternalLauncher(UniProcExecutor):
|
||||
# - MASTER_PORT
|
||||
distributed_init_method = "env://"
|
||||
rank = int(os.environ["RANK"])
|
||||
local_rank = rank
|
||||
local_rank = int(os.environ["LOCAL_RANK"])
|
||||
is_driver_worker = True
|
||||
kwargs = dict(
|
||||
vllm_config=self.vllm_config,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user