mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-16 02:05:01 +08:00
[CI] Fix pre commit issue (#20782)
Signed-off-by: yewentao256 <zhyanwentao@126.com>
This commit is contained in:
parent
d6902ce79f
commit
299252ea82
@ -216,8 +216,8 @@ class ServingScores(OpenAIServing):
|
|||||||
# cross_encoder models defaults to using pad_token.
|
# cross_encoder models defaults to using pad_token.
|
||||||
tokenized_prompts = await asyncio.gather(*(
|
tokenized_prompts = await asyncio.gather(*(
|
||||||
tokenize_async(
|
tokenize_async(
|
||||||
text=t1, # type: ignore[arg-type]
|
text=t1, # type: ignore[arg-type]
|
||||||
text_pair=t2, # type: ignore[arg-type]
|
text_pair=t2, # type: ignore[arg-type]
|
||||||
**tokenization_kwargs) for t1, t2 in input_pairs))
|
**tokenization_kwargs) for t1, t2 in input_pairs))
|
||||||
else:
|
else:
|
||||||
# `llm as reranker` models defaults to not using pad_token.
|
# `llm as reranker` models defaults to not using pad_token.
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user