mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-06 19:58:45 +08:00
[BugFix] [DP/EP] Fix slow execution when BS <= DP (#25407)
Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Signed-off-by: yewentao256 <zhyanwentao@126.com>
This commit is contained in:
parent
864bbe36f0
commit
25dd155e60
@ -55,7 +55,7 @@ from vllm.sampling_params import SamplingType
|
||||
from vllm.sequence import IntermediateTensors, PoolerOutput
|
||||
from vllm.tasks import GenerationTask, PoolingTask, SupportedTask
|
||||
from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler,
|
||||
GiB_bytes, check_use_alibi, get_dtype_size,
|
||||
GiB_bytes, cdiv, check_use_alibi, get_dtype_size,
|
||||
is_pin_memory_available,
|
||||
length_from_prompt_token_ids_or_embeds, round_up,
|
||||
supports_dynamo)
|
||||
@ -2913,12 +2913,13 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin):
|
||||
# Note: Overriding max_query_len to be the prefill tokens
|
||||
max_query_len = num_prefill_tokens
|
||||
elif uniform_decode:
|
||||
num_reqs = num_tokens // max_query_len
|
||||
assert not create_mixed_batch
|
||||
num_reqs = cdiv(num_tokens, max_query_len)
|
||||
assert num_reqs <= max_num_reqs, \
|
||||
"Do not capture num_reqs > max_num_reqs for uniform batch"
|
||||
num_scheduled_tokens_list = [max_query_len] * num_reqs
|
||||
if num_tokens % max_query_len != 0:
|
||||
num_scheduled_tokens_list[-1] += num_tokens % max_query_len
|
||||
num_scheduled_tokens_list[-1] = num_tokens % max_query_len
|
||||
else:
|
||||
num_reqs = min(num_tokens, max_num_reqs)
|
||||
min_tokens_per_req = num_tokens // num_reqs
|
||||
|
||||
@ -487,7 +487,7 @@ class Worker(WorkerBase):
|
||||
sort_by="self_cuda_time_total"))
|
||||
|
||||
def execute_dummy_batch(self) -> None:
|
||||
self.model_runner._dummy_run(1)
|
||||
self.model_runner._dummy_run(1, uniform_decode=True)
|
||||
|
||||
def add_lora(self, lora_request: LoRARequest) -> bool:
|
||||
return self.model_runner.add_lora(lora_request)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user