diff --git a/tests/tool_use/test_ernie45_moe_tool_parser.py b/tests/tool_use/test_ernie45_moe_tool_parser.py index ee9da4fd6464..8fbbbba32538 100644 --- a/tests/tool_use/test_ernie45_moe_tool_parser.py +++ b/tests/tool_use/test_ernie45_moe_tool_parser.py @@ -15,7 +15,7 @@ from vllm.entrypoints.openai.protocol import ( ) from vllm.entrypoints.openai.tool_parsers.ernie45_tool_parser import Ernie45ToolParser from vllm.tokenizers import TokenizerLike -from vllm.transformers_utils.detokenizer_utils import detokenize_incrementally +from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.transformers_utils.tokenizer import get_tokenizer # Use a common model that is likely to be available diff --git a/tests/tool_use/test_jamba_tool_parser.py b/tests/tool_use/test_jamba_tool_parser.py index 2413b983fe87..c7ca024f3a76 100644 --- a/tests/tool_use/test_jamba_tool_parser.py +++ b/tests/tool_use/test_jamba_tool_parser.py @@ -11,7 +11,7 @@ from partial_json_parser.core.options import Allow from vllm.entrypoints.openai.protocol import DeltaMessage, FunctionCall, ToolCall from vllm.entrypoints.openai.tool_parsers.jamba_tool_parser import JambaToolParser from vllm.tokenizers import TokenizerLike -from vllm.transformers_utils.detokenizer_utils import detokenize_incrementally +from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.transformers_utils.tokenizer import get_tokenizer pytestmark = pytest.mark.cpu_test diff --git a/tests/tool_use/test_qwen3coder_tool_parser.py b/tests/tool_use/test_qwen3coder_tool_parser.py index 3cf1f4ef89f1..864bb0d0c06c 100644 --- a/tests/tool_use/test_qwen3coder_tool_parser.py +++ b/tests/tool_use/test_qwen3coder_tool_parser.py @@ -18,7 +18,7 @@ from vllm.entrypoints.openai.tool_parsers.qwen3coder_tool_parser import ( ) from vllm.entrypoints.openai.tool_parsers.qwen3xml_tool_parser import Qwen3XMLToolParser from vllm.tokenizers import TokenizerLike -from vllm.transformers_utils.detokenizer_utils import detokenize_incrementally +from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.transformers_utils.tokenizer import get_tokenizer pytestmark = pytest.mark.cpu_test diff --git a/tests/tool_use/test_seed_oss_tool_parser.py b/tests/tool_use/test_seed_oss_tool_parser.py index 8e1ad5e9cedc..d94df61128c9 100644 --- a/tests/tool_use/test_seed_oss_tool_parser.py +++ b/tests/tool_use/test_seed_oss_tool_parser.py @@ -16,7 +16,7 @@ from vllm.entrypoints.openai.protocol import ( ) from vllm.entrypoints.openai.tool_parsers.seed_oss_tool_parser import SeedOssToolParser from vllm.tokenizers import TokenizerLike -from vllm.transformers_utils.detokenizer_utils import detokenize_incrementally +from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.transformers_utils.tokenizer import get_tokenizer pytestmark = pytest.mark.cpu_test diff --git a/tests/tool_use/test_xlam_tool_parser.py b/tests/tool_use/test_xlam_tool_parser.py index a1852c368eeb..fdcdd4038131 100644 --- a/tests/tool_use/test_xlam_tool_parser.py +++ b/tests/tool_use/test_xlam_tool_parser.py @@ -14,7 +14,7 @@ from vllm.entrypoints.openai.protocol import ( ) from vllm.entrypoints.openai.tool_parsers.xlam_tool_parser import xLAMToolParser from vllm.tokenizers import TokenizerLike -from vllm.transformers_utils.detokenizer_utils import detokenize_incrementally +from vllm.tokenizers.detokenizer_utils import detokenize_incrementally from vllm.transformers_utils.tokenizer import get_tokenizer pytestmark = pytest.mark.cpu_test diff --git a/tests/utils_/test_argparse_utils.py b/tests/utils_/test_argparse_utils.py index 0ea4a43d2602..2d969b8c9347 100644 --- a/tests/utils_/test_argparse_utils.py +++ b/tests/utils_/test_argparse_utils.py @@ -10,7 +10,7 @@ import yaml from transformers import AutoTokenizer from pydantic import ValidationError -from vllm.transformers_utils.detokenizer_utils import convert_ids_list_to_tokens +from vllm.tokenizers.detokenizer_utils import convert_ids_list_to_tokens from vllm.utils.argparse_utils import FlexibleArgumentParser from ..utils import flat_product diff --git a/vllm/transformers_utils/detokenizer_utils.py b/vllm/tokenizers/detokenizer_utils.py similarity index 100% rename from vllm/transformers_utils/detokenizer_utils.py rename to vllm/tokenizers/detokenizer_utils.py diff --git a/vllm/v1/engine/detokenizer.py b/vllm/v1/engine/detokenizer.py index c55240c40f6f..6c0acd9a9f59 100644 --- a/vllm/v1/engine/detokenizer.py +++ b/vllm/v1/engine/detokenizer.py @@ -9,7 +9,7 @@ from tokenizers.decoders import DecodeStream from transformers import PreTrainedTokenizerFast from vllm.logger import init_logger -from vllm.transformers_utils.detokenizer_utils import ( +from vllm.tokenizers.detokenizer_utils import ( TokenizerLike, convert_prompt_ids_to_tokens, detokenize_incrementally, diff --git a/vllm/v1/engine/logprobs.py b/vllm/v1/engine/logprobs.py index 1c8f808bc25b..599725b6de91 100644 --- a/vllm/v1/engine/logprobs.py +++ b/vllm/v1/engine/logprobs.py @@ -12,7 +12,7 @@ from vllm.logprobs import ( create_prompt_logprobs, create_sample_logprobs, ) -from vllm.transformers_utils.detokenizer_utils import ( +from vllm.tokenizers.detokenizer_utils import ( TokenizerLike, convert_ids_list_to_tokens, )