Make vllm compatible with verl (#12824)

Co-authored-by: zhangshulai <zhangshulai@bytedance.com>
This commit is contained in:
ZSL98 2025-02-07 11:54:20 +08:00 committed by GitHub
parent ef533d25fb
commit 433c4a4923
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 1 additions and 8 deletions

View File

@ -1024,13 +1024,6 @@ def initialize_model_parallel(
backend = backend or torch.distributed.get_backend(
get_world_group().device_group)
if (world_size
!= tensor_model_parallel_size * pipeline_model_parallel_size):
raise RuntimeError(
f"world_size ({world_size}) is not equal to "
f"tensor_model_parallel_size ({tensor_model_parallel_size}) x "
f"pipeline_model_parallel_size ({pipeline_model_parallel_size})")
# Build the tensor model-parallel groups.
num_tensor_model_parallel_groups: int = (world_size //
tensor_model_parallel_size)

View File

@ -101,7 +101,7 @@ class ExecutorWithExternalLauncher(UniProcExecutor):
# - MASTER_PORT
distributed_init_method = "env://"
rank = int(os.environ["RANK"])
local_rank = rank
local_rank = int(os.environ["LOCAL_RANK"])
is_driver_worker = True
kwargs = dict(
vllm_config=self.vllm_config,