[ez] move harmony utils to parser folder (#30117)

Signed-off-by: Andrew Xia <axia@fb.com>
Co-authored-by: Andrew Xia <axia@fb.com>
This commit is contained in:
Andrew Xia 2025-12-06 14:34:34 -08:00 committed by GitHub
parent 671427efbf
commit 421125d03a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 9 additions and 9 deletions

View File

@ -4,7 +4,7 @@
from openai.types.responses import ResponseFunctionToolCall, ResponseReasoningItem
from openai_harmony import Author, Message, Role, TextContent
from vllm.entrypoints.harmony_utils import (
from vllm.entrypoints.openai.parser.harmony_utils import (
has_custom_tools,
parse_input_to_harmony_message,
parse_output_message,

View File

@ -726,7 +726,7 @@ async def test_function_calling_required(client: OpenAI, model_name: str):
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_system_message_with_tools(client: OpenAI, model_name: str):
from vllm.entrypoints.harmony_utils import get_system_message
from vllm.entrypoints.openai.parser.harmony_utils import get_system_message
# Test with custom tools enabled - commentary channel should be available
sys_msg = get_system_message(with_custom_tools=True)

View File

@ -19,7 +19,7 @@ from vllm import envs
from vllm.entrypoints.chat_utils import (
ChatTemplateContentFormatOption,
)
from vllm.entrypoints.harmony_utils import (
from vllm.entrypoints.openai.parser.harmony_utils import (
get_encoding,
get_streamable_parser_for_assistant,
render_for_completion,

View File

@ -21,7 +21,8 @@ from vllm.entrypoints.chat_utils import (
get_history_tool_calls_cnt,
make_tool_call_id,
)
from vllm.entrypoints.harmony_utils import (
from vllm.entrypoints.logger import RequestLogger
from vllm.entrypoints.openai.parser.harmony_utils import (
get_developer_message,
get_stop_tokens_for_assistant_actions,
get_streamable_parser_for_assistant,
@ -30,7 +31,6 @@ from vllm.entrypoints.harmony_utils import (
parse_input_to_harmony_message,
render_for_completion,
)
from vllm.entrypoints.logger import RequestLogger
from vllm.entrypoints.openai.protocol import (
ChatCompletionLogProb,
ChatCompletionLogProbs,

View File

@ -64,7 +64,8 @@ from vllm.entrypoints.context import (
SimpleContext,
StreamingHarmonyContext,
)
from vllm.entrypoints.harmony_utils import (
from vllm.entrypoints.logger import RequestLogger
from vllm.entrypoints.openai.parser.harmony_utils import (
construct_harmony_previous_input_messages,
get_developer_message,
get_stop_tokens_for_assistant_actions,
@ -76,7 +77,6 @@ from vllm.entrypoints.harmony_utils import (
parse_response_input,
render_for_completion,
)
from vllm.entrypoints.logger import RequestLogger
from vllm.entrypoints.openai.protocol import (
DeltaMessage,
ErrorResponse,

View File

@ -4,7 +4,7 @@ import json
from collections.abc import Sequence
from typing import TYPE_CHECKING
from vllm.entrypoints.harmony_utils import parse_output_into_messages
from vllm.entrypoints.openai.parser.harmony_utils import parse_output_into_messages
from vllm.entrypoints.openai.protocol import (
ChatCompletionRequest,
DeltaMessage,

View File

@ -5,7 +5,7 @@ from collections.abc import Sequence
from transformers import PreTrainedTokenizerBase
from vllm.entrypoints.harmony_utils import parse_chat_output
from vllm.entrypoints.openai.parser.harmony_utils import parse_chat_output
from vllm.entrypoints.openai.protocol import ChatCompletionRequest, DeltaMessage
from vllm.entrypoints.tool_server import ToolServer
from vllm.logger import init_logger