From a30c093502f0a671969ab561aacc80bc430f8ed6 Mon Sep 17 00:00:00 2001 From: Roger Wang <136131678+ywang96@users.noreply.github.com> Date: Thu, 20 Feb 2025 22:04:33 -0800 Subject: [PATCH] [Bugfix] Add `mm_processor_kwargs` to chat-related protocols (#13644) --- vllm/entrypoints/openai/protocol.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index 98ea6a46133ff..29f64d28bdf1c 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -974,6 +974,10 @@ class EmbeddingChatRequest(OpenAIBaseModel): description=("Additional kwargs to pass to the template renderer. " "Will be accessible by the chat template."), ) + mm_processor_kwargs: Optional[Dict[str, Any]] = Field( + default=None, + description=("Additional kwargs to pass to the HF processor."), + ) priority: int = Field( default=0, description=( @@ -1394,6 +1398,10 @@ class TokenizeChatRequest(OpenAIBaseModel): description=("Additional kwargs to pass to the template renderer. " "Will be accessible by the chat template."), ) + mm_processor_kwargs: Optional[Dict[str, Any]] = Field( + default=None, + description=("Additional kwargs to pass to the HF processor."), + ) @model_validator(mode="before") @classmethod