[Bugfix] Incorrect MM data format in vllm bench throughput (#26395)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung 2025-10-08 13:52:19 +08:00 committed by GitHub
parent 127c8b782a
commit 0d4f48fa10
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -201,16 +201,16 @@ async def run_vllm_async(
sampling_params: list[SamplingParams] = []
lora_requests: list[Optional[LoRARequest]] = []
for request in requests:
prompts.append(
TokensPrompt(
prompt_token_ids=request.prompt["prompt_token_ids"],
multi_modal_data=request.multi_modal_data,
)
prompt = (
TokensPrompt(prompt_token_ids=request.prompt["prompt_token_ids"])
if "prompt_token_ids" in request.prompt
else TextPrompt(
prompt=request.prompt, multi_modal_data=request.multi_modal_data
)
else TextPrompt(prompt=request.prompt)
)
if request.multi_modal_data:
assert isinstance(request.multi_modal_data, dict)
prompt["multi_modal_data"] = request.multi_modal_data
sampling_params.append(
SamplingParams(
n=n,