diff --git a/tests/entrypoints/openai/test_response_api_with_harmony.py b/tests/entrypoints/openai/test_response_api_with_harmony.py index b882a2f9326e2..400779064ef51 100644 --- a/tests/entrypoints/openai/test_response_api_with_harmony.py +++ b/tests/entrypoints/openai/test_response_api_with_harmony.py @@ -698,6 +698,22 @@ async def test_function_calling_required(client: OpenAI, model_name: str): ) +@pytest.mark.asyncio +@pytest.mark.parametrize("model_name", [MODEL_NAME]) +async def test_system_message_with_tools(client: OpenAI, model_name: str): + from vllm.entrypoints.harmony_utils import get_system_message + + # Test with custom tools enabled - commentary channel should be available + sys_msg = get_system_message(with_custom_tools=True) + valid_channels = sys_msg.content[0].channel_config.valid_channels + assert "commentary" in valid_channels + + # Test with custom tools disabled - commentary channel should be removed + sys_msg = get_system_message(with_custom_tools=False) + valid_channels = sys_msg.content[0].channel_config.valid_channels + assert "commentary" not in valid_channels + + @pytest.mark.asyncio @pytest.mark.parametrize("model_name", [MODEL_NAME]) async def test_function_calling_full_history(client: OpenAI, model_name: str): diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index ab4bf75102f43..2336158ac51ba 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -1575,7 +1575,9 @@ class OpenAIServingChat(OpenAIServing): sys_msg = get_system_message( reasoning_effort=request.reasoning_effort, browser_description=None, - python_description=None) + python_description=None, + with_custom_tools=request.tools is not None + ) messages.append(sys_msg) # Add developer message.