vllm/tests/entrypoints/test_responses_utils.py
Andrew Xia fa8804ad9c
[responsesAPI][4] fix responseOutputItem Kimi K2 thinking bug (#29555)
Signed-off-by: Andrew Xia <axia@fb.com>
Co-authored-by: Andrew Xia <axia@fb.com>
2025-12-02 02:11:35 +00:00

125 lines
4.1 KiB
Python

# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
from openai.types.responses.response_function_tool_call_output_item import (
ResponseFunctionToolCallOutputItem,
)
from openai.types.responses.response_output_message import ResponseOutputMessage
from openai.types.responses.response_output_text import ResponseOutputText
from openai.types.responses.response_reasoning_item import (
Content,
ResponseReasoningItem,
Summary,
)
from vllm.entrypoints.responses_utils import (
construct_chat_message_with_tool_call,
convert_tool_responses_to_completions_format,
)
class TestResponsesUtils:
"""Tests for convert_tool_responses_to_completions_format function."""
def test_convert_tool_responses_to_completions_format(self):
"""Test basic conversion of a flat tool schema to nested format."""
input_tool = {
"type": "function",
"name": "get_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location", "unit"],
},
}
result = convert_tool_responses_to_completions_format(input_tool)
assert result == {"type": "function", "function": input_tool}
def test_construct_chat_message_with_tool_call(self):
item = ResponseReasoningItem(
id="lol",
summary=[],
type="reasoning",
content=[
Content(
text="Leroy Jenkins",
type="reasoning_text",
)
],
encrypted_content=None,
status=None,
)
formatted_item = construct_chat_message_with_tool_call(item)
assert formatted_item["role"] == "assistant"
assert formatted_item["reasoning"] == "Leroy Jenkins"
item = ResponseReasoningItem(
id="lol",
summary=[
Summary(
text='Hmm, the user has just started with a simple "Hello,"',
type="summary_text",
)
],
type="reasoning",
content=None,
encrypted_content=None,
status=None,
)
formatted_item = construct_chat_message_with_tool_call(item)
assert formatted_item["role"] == "assistant"
assert (
formatted_item["reasoning"]
== 'Hmm, the user has just started with a simple "Hello,"'
)
tool_call_output = ResponseFunctionToolCallOutputItem(
id="temp_id",
type="function_call_output",
call_id="temp",
output="1234",
status="completed",
)
formatted_item = construct_chat_message_with_tool_call(tool_call_output)
assert formatted_item["role"] == "tool"
assert formatted_item["content"] == "1234"
assert formatted_item["tool_call_id"] == "temp"
item = ResponseReasoningItem(
id="lol",
summary=[],
type="reasoning",
content=None,
encrypted_content="TOP_SECRET_MESSAGE",
status=None,
)
with pytest.raises(ValueError):
construct_chat_message_with_tool_call(item)
output_item = ResponseOutputMessage(
id="msg_bf585bbbe3d500e0",
content=[
ResponseOutputText(
annotations=[],
text="dongyi",
type="output_text",
logprobs=None,
)
],
role="assistant",
status="completed",
type="message",
)
formatted_item = construct_chat_message_with_tool_call(output_item)
assert formatted_item["role"] == "assistant"
assert formatted_item["content"] == "dongyi"