mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-09 12:15:40 +08:00
[Bugfix] Incorrect another MM data format in vllm bench throughput (#26462)
Signed-off-by: Huy Do <huydhn@gmail.com>
This commit is contained in:
parent
bb6d8c21f9
commit
8bd696fa53
@ -59,16 +59,16 @@ def run_vllm(
|
||||
prompts: list[Union[TextPrompt, TokensPrompt]] = []
|
||||
sampling_params: list[SamplingParams] = []
|
||||
for request in requests:
|
||||
prompts.append(
|
||||
TokensPrompt(
|
||||
prompt_token_ids=request.prompt["prompt_token_ids"],
|
||||
multi_modal_data=request.multi_modal_data,
|
||||
)
|
||||
prompt = (
|
||||
TokensPrompt(prompt_token_ids=request.prompt["prompt_token_ids"])
|
||||
if "prompt_token_ids" in request.prompt
|
||||
else TextPrompt(
|
||||
prompt=request.prompt, multi_modal_data=request.multi_modal_data
|
||||
)
|
||||
else TextPrompt(prompt=request.prompt)
|
||||
)
|
||||
if request.multi_modal_data:
|
||||
assert isinstance(request.multi_modal_data, dict)
|
||||
prompt["multi_modal_data"] = request.multi_modal_data
|
||||
prompts.append(prompt)
|
||||
|
||||
sampling_params.append(
|
||||
SamplingParams(
|
||||
n=n,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user