From 8c87a9ad46dd8b972d4cd9c6cecb5b284c92f583 Mon Sep 17 00:00:00 2001 From: Chauncey Date: Wed, 23 Apr 2025 15:24:09 +0800 Subject: [PATCH] [Bugfix] Fix AssertionError: skip_special_tokens=False is not supported for Mistral tokenizers (#16964) Signed-off-by: chaunceyjiang --- .../openai/tool_parsers/mistral_tool_parser.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py index bff6cb79ad536..f0000daa0a41c 100644 --- a/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py @@ -72,10 +72,14 @@ class MistralToolParser(ToolParser): def adjust_request( self, request: ChatCompletionRequest) -> ChatCompletionRequest: - if request.tools and request.tool_choice != 'none': - # do not skip special tokens because mistral uses the special - # tokens to indicate the start and end of the tool calls - # information. + if not isinstance( + self.model_tokenizer, MistralTokenizer + ) and request.tools and request.tool_choice != 'none': + # Do not skip special tokens when using chat template + # with Mistral parser as TOOL_CALL token is needed + # for tool detection. + # Note: we don't want skip_special_tokens=False + # with MistralTokenizer as it is incompatible request.skip_special_tokens = False return request