Remove token-adding chat embedding params (#10551)

Signed-off-by: Noam Gat <noamgat@gmail.com>
This commit is contained in:
Noam Gat 2024-11-22 09:59:47 +02:00 committed by GitHub
parent b6374e09b0
commit 11fcf0e066
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 4 additions and 18 deletions

View File

@ -760,22 +760,6 @@ class EmbeddingChatRequest(OpenAIBaseModel):
# doc: end-chat-embedding-pooling-params
# doc: begin-chat-embedding-extra-params
add_generation_prompt: bool = Field(
default=True,
description=
("If true, the generation prompt will be added to the chat template. "
"This is a parameter used by chat template in tokenizer config of the "
"model."),
)
continue_final_message: bool = Field(
default=False,
description=
("If this is set, the chat will be formatted so that the final "
"message in the chat is open-ended, without any EOS tokens. The "
"model will continue this message rather than starting a new one. "
"This allows you to \"prefill\" part of the model's response for it. "
"Cannot be used at the same time as `add_generation_prompt`."),
)
add_special_tokens: bool = Field(
default=False,
description=(

View File

@ -148,8 +148,10 @@ class OpenAIServingEmbedding(OpenAIServing):
chat_template=request.chat_template or self.chat_template,
chat_template_content_format=self.
chat_template_content_format,
add_generation_prompt=request.add_generation_prompt,
continue_final_message=request.continue_final_message,
# In embedding requests, we are not generating tokens,
# so there is no need to append extra tokens to the input
add_generation_prompt=False,
continue_final_message=False,
truncate_prompt_tokens=truncate_prompt_tokens,
add_special_tokens=request.add_special_tokens,
)