[Chore] Move detokenizer_utils to vllm/tokenizers (#29727)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung 2025-11-29 22:25:17 +08:00 committed by GitHub
parent fe3398fab2
commit fa59fe417f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 8 additions and 8 deletions

View File

@ -15,7 +15,7 @@ from vllm.entrypoints.openai.protocol import (
)
from vllm.entrypoints.openai.tool_parsers.ernie45_tool_parser import Ernie45ToolParser
from vllm.tokenizers import TokenizerLike
from vllm.transformers_utils.detokenizer_utils import detokenize_incrementally
from vllm.tokenizers.detokenizer_utils import detokenize_incrementally
from vllm.transformers_utils.tokenizer import get_tokenizer
# Use a common model that is likely to be available

View File

@ -11,7 +11,7 @@ from partial_json_parser.core.options import Allow
from vllm.entrypoints.openai.protocol import DeltaMessage, FunctionCall, ToolCall
from vllm.entrypoints.openai.tool_parsers.jamba_tool_parser import JambaToolParser
from vllm.tokenizers import TokenizerLike
from vllm.transformers_utils.detokenizer_utils import detokenize_incrementally
from vllm.tokenizers.detokenizer_utils import detokenize_incrementally
from vllm.transformers_utils.tokenizer import get_tokenizer
pytestmark = pytest.mark.cpu_test

View File

@ -18,7 +18,7 @@ from vllm.entrypoints.openai.tool_parsers.qwen3coder_tool_parser import (
)
from vllm.entrypoints.openai.tool_parsers.qwen3xml_tool_parser import Qwen3XMLToolParser
from vllm.tokenizers import TokenizerLike
from vllm.transformers_utils.detokenizer_utils import detokenize_incrementally
from vllm.tokenizers.detokenizer_utils import detokenize_incrementally
from vllm.transformers_utils.tokenizer import get_tokenizer
pytestmark = pytest.mark.cpu_test

View File

@ -16,7 +16,7 @@ from vllm.entrypoints.openai.protocol import (
)
from vllm.entrypoints.openai.tool_parsers.seed_oss_tool_parser import SeedOssToolParser
from vllm.tokenizers import TokenizerLike
from vllm.transformers_utils.detokenizer_utils import detokenize_incrementally
from vllm.tokenizers.detokenizer_utils import detokenize_incrementally
from vllm.transformers_utils.tokenizer import get_tokenizer
pytestmark = pytest.mark.cpu_test

View File

@ -14,7 +14,7 @@ from vllm.entrypoints.openai.protocol import (
)
from vllm.entrypoints.openai.tool_parsers.xlam_tool_parser import xLAMToolParser
from vllm.tokenizers import TokenizerLike
from vllm.transformers_utils.detokenizer_utils import detokenize_incrementally
from vllm.tokenizers.detokenizer_utils import detokenize_incrementally
from vllm.transformers_utils.tokenizer import get_tokenizer
pytestmark = pytest.mark.cpu_test

View File

@ -10,7 +10,7 @@ import yaml
from transformers import AutoTokenizer
from pydantic import ValidationError
from vllm.transformers_utils.detokenizer_utils import convert_ids_list_to_tokens
from vllm.tokenizers.detokenizer_utils import convert_ids_list_to_tokens
from vllm.utils.argparse_utils import FlexibleArgumentParser
from ..utils import flat_product

View File

@ -9,7 +9,7 @@ from tokenizers.decoders import DecodeStream
from transformers import PreTrainedTokenizerFast
from vllm.logger import init_logger
from vllm.transformers_utils.detokenizer_utils import (
from vllm.tokenizers.detokenizer_utils import (
TokenizerLike,
convert_prompt_ids_to_tokens,
detokenize_incrementally,

View File

@ -12,7 +12,7 @@ from vllm.logprobs import (
create_prompt_logprobs,
create_sample_logprobs,
)
from vllm.transformers_utils.detokenizer_utils import (
from vllm.tokenizers.detokenizer_utils import (
TokenizerLike,
convert_ids_list_to_tokens,
)