[Easy][Test] Simplify test_function_tool_use with multiple parametrizes (#19269)

Signed-off-by: Lu Fang <lufang@fb.com>
This commit is contained in:
Lu Fang 2025-06-07 09:19:09 +08:00 committed by GitHub
parent e010688f50
commit 6e0cd10f72
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1,8 +1,6 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
from typing import NamedTuple
import openai # use the official client for correctness check import openai # use the official client for correctness check
import pytest import pytest
import pytest_asyncio import pytest_asyncio
@ -39,53 +37,14 @@ async def client(server):
yield async_client yield async_client
class TestCase(NamedTuple):
model_name: str
stream: bool
tool_choice: str
enable_thinking: bool
@pytest.mark.asyncio @pytest.mark.asyncio
@pytest.mark.parametrize( @pytest.mark.parametrize("model_name", [MODEL_NAME])
"test_case", @pytest.mark.parametrize("stream", [True, False])
[ @pytest.mark.parametrize("tool_choice", ["auto", "required"])
TestCase(model_name=MODEL_NAME, @pytest.mark.parametrize("enable_thinking", [True, False])
stream=True, async def test_function_tool_use(client: openai.AsyncOpenAI, model_name: str,
tool_choice="auto", stream: bool, tool_choice: str,
enable_thinking=False), enable_thinking: bool):
TestCase(model_name=MODEL_NAME,
stream=False,
tool_choice="auto",
enable_thinking=False),
TestCase(model_name=MODEL_NAME,
stream=True,
tool_choice="required",
enable_thinking=False),
TestCase(model_name=MODEL_NAME,
stream=False,
tool_choice="required",
enable_thinking=False),
TestCase(model_name=MODEL_NAME,
stream=True,
tool_choice="auto",
enable_thinking=True),
TestCase(model_name=MODEL_NAME,
stream=False,
tool_choice="auto",
enable_thinking=True),
TestCase(model_name=MODEL_NAME,
stream=True,
tool_choice="required",
enable_thinking=True),
TestCase(model_name=MODEL_NAME,
stream=False,
tool_choice="required",
enable_thinking=True),
],
)
async def test_function_tool_use(client: openai.AsyncOpenAI,
test_case: TestCase):
tools = [ tools = [
{ {
"type": "function", "type": "function",
@ -174,16 +133,16 @@ async def test_function_tool_use(client: openai.AsyncOpenAI,
"forecast for the next 5 days, in fahrenheit?", "forecast for the next 5 days, in fahrenheit?",
}, },
] ]
if not test_case.stream: if not stream:
# Non-streaming test # Non-streaming test
chat_completion = await client.chat.completions.create( chat_completion = await client.chat.completions.create(
messages=messages, messages=messages,
model=test_case.model_name, model=model_name,
tools=tools, tools=tools,
tool_choice=test_case.tool_choice, tool_choice=tool_choice,
extra_body={ extra_body={
"chat_template_kwargs": { "chat_template_kwargs": {
"enable_thinking": test_case.enable_thinking "enable_thinking": enable_thinking
} }
}) })
@ -191,20 +150,20 @@ async def test_function_tool_use(client: openai.AsyncOpenAI,
assert len(chat_completion.choices[0].message.tool_calls) > 0 assert len(chat_completion.choices[0].message.tool_calls) > 0
else: else:
# Streaming test # Streaming test
stream = await client.chat.completions.create( output_stream = await client.chat.completions.create(
messages=messages, messages=messages,
model=test_case.model_name, model=model_name,
tools=tools, tools=tools,
tool_choice=test_case.tool_choice, tool_choice=tool_choice,
stream=True, stream=True,
extra_body={ extra_body={
"chat_template_kwargs": { "chat_template_kwargs": {
"enable_thinking": test_case.enable_thinking "enable_thinking": enable_thinking
} }
}) })
output = [] output = []
async for chunk in stream: async for chunk in output_stream:
if chunk.choices and chunk.choices[0].delta.tool_calls: if chunk.choices and chunk.choices[0].delta.tool_calls:
output.extend(chunk.choices[0].delta.tool_calls) output.extend(chunk.choices[0].delta.tool_calls)