Support LongCat-Flash-Chat tool call (#24083)

Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com>
Signed-off-by: yewentao256 <zhyanwentao@126.com>
This commit is contained in:
Xu Wenqing 2025-09-26 17:25:39 +08:00 committed by yewentao256
parent fa55373af1
commit ced693e845
3 changed files with 50 additions and 0 deletions

View File

@ -310,6 +310,15 @@ Flags:
* For non-reasoning: `--tool-call-parser hunyuan_a13b`
* For reasoning: `--tool-call-parser hunyuan_a13b --reasoning-parser hunyuan_a13b --enable_reasoning`
### LongCat-Flash-Chat Models (`longcat`)
Supported models:
* `meituan-longcat/LongCat-Flash-Chat`
* `meituan-longcat/LongCat-Flash-Chat-FP8`
Flags: `--tool-call-parser longcat`
### GLM-4.5 Models (`glm45`)
Supported models:

View File

@ -14,6 +14,7 @@ from .jamba_tool_parser import JambaToolParser
from .kimi_k2_tool_parser import KimiK2ToolParser
from .llama4_pythonic_tool_parser import Llama4PythonicToolParser
from .llama_tool_parser import Llama3JsonToolParser
from .longcat_tool_parser import LongcatFlashToolParser
from .minimax_tool_parser import MinimaxToolParser
from .mistral_tool_parser import MistralToolParser
from .openai_tool_parser import OpenAIToolParser
@ -36,6 +37,7 @@ __all__ = [
"Llama3JsonToolParser",
"JambaToolParser",
"Llama4PythonicToolParser",
"LongcatFlashToolParser",
"PythonicToolParser",
"Phi4MiniJsonToolParser",
"DeepSeekV3ToolParser",

View File

@ -0,0 +1,39 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import regex as re
from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import (
ToolParserManager)
from vllm.entrypoints.openai.tool_parsers.hermes_tool_parser import (
Hermes2ProToolParser)
from vllm.transformers_utils.tokenizer import AnyTokenizer
@ToolParserManager.register_module("longcat")
class LongcatFlashToolParser(Hermes2ProToolParser):
def __init__(self, tokenizer: AnyTokenizer):
super().__init__(tokenizer)
self.tool_call_start_token: str = "<longcat_tool_call>"
self.tool_call_end_token: str = "</longcat_tool_call>"
self.tool_call_regex = re.compile(
r"<longcat_tool_call>(.*?)</longcat_tool_call>|<longcat_tool_call>(.*)",
re.DOTALL)
self.tool_call_start_token_ids = self.model_tokenizer.encode(
self.tool_call_start_token, add_special_tokens=False)
self.tool_call_end_token_ids = self.model_tokenizer.encode(
self.tool_call_end_token, add_special_tokens=False)
self.tool_call_start_token_array = [
self.model_tokenizer.decode([token_id])
for token_id in self.tool_call_start_token_ids
]
self.tool_call_end_token_array = [
self.model_tokenizer.decode([token_id])
for token_id in self.tool_call_end_token_ids
]