From b8b904795d3033c21bcb8f5c36e135e75dc1baf2 Mon Sep 17 00:00:00 2001 From: Lucia Fang <116399278+luccafong@users.noreply.github.com> Date: Sat, 31 May 2025 03:38:56 -0700 Subject: [PATCH] fix security issue of logging llm output (#18980) Signed-off-by: Lu Fang Co-authored-by: Lucia (Lu) Fang --- vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py index b403a146716d..00690ad79a7a 100644 --- a/vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/phi4mini_tool_parser.py @@ -68,8 +68,8 @@ class Phi4MiniJsonToolParser(ToolParser): len(function_call_arr)) except json.JSONDecodeError as e: logger.error( - "Failed to parse function calls from model output: %s. " - "Error: %s", model_output, str(e)) + "Failed to parse function calls from model output. " + "Error: %s", str(e)) tool_calls: list[ToolCall] = [ ToolCall(