From 64251f48df0ed16fb67f12ece26ab6c7ea730e74 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sat, 13 Dec 2025 20:42:39 +0800 Subject: [PATCH] [Chore] Adjust tokenizer import to avoid circular imports (#30601) Signed-off-by: DarkLight1337 --- benchmarks/backend_request_func.py | 2 +- tests/entrypoints/openai/test_serving_engine.py | 2 +- tests/entrypoints/test_chat_utils.py | 3 ++- tests/models/language/generation/test_mistral.py | 2 +- tests/models/multimodal/generation/test_voxtral.py | 2 +- tests/models/multimodal/processing/test_common.py | 7 ++----- tests/reasoning/test_mistral_reasoning_parser.py | 2 +- tests/reasoning/utils.py | 2 +- tests/tokenizers_/test_detokenize.py | 2 +- tests/tool_use/test_mistral_tool_parser.py | 7 ++----- vllm/entrypoints/chat_utils.py | 3 ++- vllm/entrypoints/llm.py | 3 ++- vllm/entrypoints/openai/serving_engine.py | 4 +++- vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py | 3 ++- vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py | 3 ++- .../entrypoints/openai/tool_parsers/mistral_tool_parser.py | 5 ++++- vllm/entrypoints/pooling/score/serving.py | 3 ++- vllm/entrypoints/utils.py | 2 +- vllm/model_executor/models/pixtral.py | 3 ++- vllm/model_executor/models/voxtral.py | 3 ++- vllm/reasoning/mistral_reasoning_parser.py | 2 +- vllm/v1/engine/input_processor.py | 3 ++- vllm/v1/structured_output/backend_xgrammar.py | 3 ++- 23 files changed, 40 insertions(+), 31 deletions(-) diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py index d69d74ca61f54..831b76b66e096 100644 --- a/benchmarks/backend_request_func.py +++ b/benchmarks/backend_request_func.py @@ -620,7 +620,7 @@ def get_tokenizer( kwargs["use_fast"] = False if tokenizer_mode == "mistral": try: - from vllm.tokenizers import MistralTokenizer + from vllm.tokenizers.mistral import MistralTokenizer except ImportError as e: raise ImportError( "MistralTokenizer requires vllm package.\n" diff --git a/tests/entrypoints/openai/test_serving_engine.py b/tests/entrypoints/openai/test_serving_engine.py index 956a06dc5487c..192c7cafb7493 100644 --- a/tests/entrypoints/openai/test_serving_engine.py +++ b/tests/entrypoints/openai/test_serving_engine.py @@ -10,7 +10,7 @@ import pytest from vllm.config import ModelConfig from vllm.entrypoints.openai.serving_engine import OpenAIServing from vllm.entrypoints.openai.serving_models import OpenAIServingModels -from vllm.tokenizers import MistralTokenizer +from vllm.tokenizers.mistral import MistralTokenizer @pytest.fixture() diff --git a/tests/entrypoints/test_chat_utils.py b/tests/entrypoints/test_chat_utils.py index 40059c9041541..a87a4c35d3dc7 100644 --- a/tests/entrypoints/test_chat_utils.py +++ b/tests/entrypoints/test_chat_utils.py @@ -29,7 +29,8 @@ from vllm.multimodal.utils import ( encode_image_base64, encode_video_base64, ) -from vllm.tokenizers import MistralTokenizer, get_tokenizer +from vllm.tokenizers import get_tokenizer +from vllm.tokenizers.mistral import MistralTokenizer from vllm.utils.serial_utils import tensor2base64 from ..models.registry import HF_EXAMPLE_MODELS diff --git a/tests/models/language/generation/test_mistral.py b/tests/models/language/generation/test_mistral.py index e2d6271e2faed..bc8bb05c284e6 100644 --- a/tests/models/language/generation/test_mistral.py +++ b/tests/models/language/generation/test_mistral.py @@ -10,7 +10,7 @@ from vllm.entrypoints.openai.tool_parsers.mistral_tool_parser import ( MistralToolParser, ) from vllm.sampling_params import SamplingParams -from vllm.tokenizers import MistralTokenizer +from vllm.tokenizers.mistral import MistralTokenizer from ...utils import check_logprobs_close diff --git a/tests/models/multimodal/generation/test_voxtral.py b/tests/models/multimodal/generation/test_voxtral.py index 9e9087cb0fc4d..0eaef49e2395c 100644 --- a/tests/models/multimodal/generation/test_voxtral.py +++ b/tests/models/multimodal/generation/test_voxtral.py @@ -9,7 +9,7 @@ from mistral_common.audio import Audio from mistral_common.protocol.instruct.chunk import AudioChunk, RawAudio, TextChunk from mistral_common.protocol.instruct.messages import UserMessage -from vllm.tokenizers import MistralTokenizer +from vllm.tokenizers.mistral import MistralTokenizer from ....conftest import AudioTestAssets from ....utils import RemoteOpenAIServer diff --git a/tests/models/multimodal/processing/test_common.py b/tests/models/multimodal/processing/test_common.py index 2e032ac4ca526..67861ebfc44e4 100644 --- a/tests/models/multimodal/processing/test_common.py +++ b/tests/models/multimodal/processing/test_common.py @@ -22,11 +22,8 @@ from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalDataDict from vllm.multimodal.cache import MultiModalProcessorOnlyCache from vllm.multimodal.inputs import MultiModalInputs, batched_tensors_equal from vllm.multimodal.processing import BaseMultiModalProcessor, InputProcessingContext -from vllm.tokenizers import ( - MistralTokenizer, - TokenizerLike, - cached_tokenizer_from_config, -) +from vllm.tokenizers import TokenizerLike, cached_tokenizer_from_config +from vllm.tokenizers.mistral import MistralTokenizer from ....multimodal.utils import random_audio, random_image, random_video from ...registry import ( diff --git a/tests/reasoning/test_mistral_reasoning_parser.py b/tests/reasoning/test_mistral_reasoning_parser.py index 01592fd0782a9..d6da723f80b08 100644 --- a/tests/reasoning/test_mistral_reasoning_parser.py +++ b/tests/reasoning/test_mistral_reasoning_parser.py @@ -5,7 +5,7 @@ import pytest from tests.reasoning.utils import run_reasoning_extraction_mistral from vllm.reasoning import ReasoningParser, ReasoningParserManager -from vllm.tokenizers import MistralTokenizer +from vllm.tokenizers.mistral import MistralTokenizer parser_name = "mistral" diff --git a/tests/reasoning/utils.py b/tests/reasoning/utils.py index 695312a0cadfe..a020fb8e97161 100644 --- a/tests/reasoning/utils.py +++ b/tests/reasoning/utils.py @@ -4,7 +4,7 @@ from vllm.entrypoints.openai.protocol import ChatCompletionRequest, DeltaMessage from vllm.reasoning import ReasoningParser -from vllm.tokenizers import MistralTokenizer +from vllm.tokenizers.mistral import MistralTokenizer class StreamingReasoningReconstructor: diff --git a/tests/tokenizers_/test_detokenize.py b/tests/tokenizers_/test_detokenize.py index ae1d6b0956722..d307993d04df9 100644 --- a/tests/tokenizers_/test_detokenize.py +++ b/tests/tokenizers_/test_detokenize.py @@ -8,7 +8,7 @@ import pytest from transformers import AutoTokenizer, PreTrainedTokenizer, PreTrainedTokenizerFast from vllm.sampling_params import SamplingParams -from vllm.tokenizers import MistralTokenizer +from vllm.tokenizers.mistral import MistralTokenizer from vllm.v1.engine import EngineCoreRequest from vllm.v1.engine.detokenizer import ( FastIncrementalDetokenizer, diff --git a/tests/tool_use/test_mistral_tool_parser.py b/tests/tool_use/test_mistral_tool_parser.py index 2dd0399cb8eeb..d498863317e8d 100644 --- a/tests/tool_use/test_mistral_tool_parser.py +++ b/tests/tool_use/test_mistral_tool_parser.py @@ -13,12 +13,9 @@ from partial_json_parser.core.options import Allow from vllm.entrypoints.openai.protocol import DeltaMessage, DeltaToolCall from vllm.entrypoints.openai.tool_parsers.mistral_tool_parser import MistralToolParser -from vllm.tokenizers import ( - MistralTokenizer, - TokenizerLike, - get_tokenizer, -) +from vllm.tokenizers import TokenizerLike, get_tokenizer from vllm.tokenizers.detokenizer_utils import detokenize_incrementally +from vllm.tokenizers.mistral import MistralTokenizer @pytest.fixture(scope="module") diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index 5a15dec6f84c1..6a8dfe3cd9e38 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -49,7 +49,8 @@ from vllm.logger import init_logger from vllm.model_executor.models import SupportsMultiModal from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalDataDict, MultiModalUUIDDict from vllm.multimodal.utils import MEDIA_CONNECTOR_REGISTRY, MediaConnector -from vllm.tokenizers import MistralTokenizer, TokenizerLike +from vllm.tokenizers import TokenizerLike +from vllm.tokenizers.mistral import MistralTokenizer from vllm.transformers_utils.chat_templates import get_chat_template_fallback_path from vllm.transformers_utils.processor import cached_get_processor from vllm.utils import random_uuid diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 6440b702f4fa6..31319cf64aeb8 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -72,7 +72,8 @@ from vllm.platforms import current_platform from vllm.pooling_params import PoolingParams from vllm.sampling_params import BeamSearchParams, RequestOutputKind, SamplingParams from vllm.tasks import PoolingTask -from vllm.tokenizers import MistralTokenizer, TokenizerLike +from vllm.tokenizers import TokenizerLike +from vllm.tokenizers.mistral import MistralTokenizer from vllm.usage.usage_lib import UsageContext from vllm.utils.collection_utils import as_iter, is_list_of from vllm.utils.counter import Counter diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index a799432baeb40..d83a7c8d59f39 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -117,7 +117,9 @@ from vllm.outputs import CompletionOutput, PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.reasoning import ReasoningParser, ReasoningParserManager from vllm.sampling_params import BeamSearchParams, SamplingParams -from vllm.tokenizers import DeepseekV32Tokenizer, MistralTokenizer, TokenizerLike +from vllm.tokenizers import TokenizerLike +from vllm.tokenizers.deepseekv32 import DeepseekV32Tokenizer +from vllm.tokenizers.mistral import MistralTokenizer from vllm.tracing import ( contains_trace_headers, extract_trace_headers, diff --git a/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py index 19c1c83268ed4..14cf2f38b70cc 100644 --- a/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/hermes_tool_parser.py @@ -22,7 +22,8 @@ from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( ToolParser, ) from vllm.logger import init_logger -from vllm.tokenizers import MistralTokenizer, TokenizerLike +from vllm.tokenizers import TokenizerLike +from vllm.tokenizers.mistral import MistralTokenizer logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py index 4655da8dd4542..92b09917c2521 100644 --- a/vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/jamba_tool_parser.py @@ -21,7 +21,8 @@ from vllm.entrypoints.openai.protocol import ( from vllm.entrypoints.openai.tool_parsers import ToolParser from vllm.entrypoints.openai.tool_parsers.utils import extract_intermediate_diff from vllm.logger import init_logger -from vllm.tokenizers import MistralTokenizer, TokenizerLike +from vllm.tokenizers import TokenizerLike +from vllm.tokenizers.mistral import MistralTokenizer logger = init_logger(__name__) diff --git a/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py index bc827f045606c..f60c379d26711 100644 --- a/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py @@ -6,6 +6,7 @@ from collections.abc import Sequence from enum import Enum, auto from random import choices from string import ascii_letters, digits +from typing import Any import ijson import regex as re @@ -24,7 +25,8 @@ from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( ToolParser, ) from vllm.logger import init_logger -from vllm.tokenizers import MistralTokenizer, TokenizerLike +from vllm.tokenizers import TokenizerLike +from vllm.tokenizers.mistral import MistralTokenizer logger = init_logger(__name__) @@ -84,6 +86,7 @@ class MistralToolParser(ToolParser): # initialize properties used for state when parsing tool calls in # streaming mode + self.prev_tool_call_arr: list[dict[str, Any]] = [] self.current_tool_id: int = -1 self.streaming_state: StreamingState = StreamingState.WAITING_FOR_TOOL_START diff --git a/vllm/entrypoints/pooling/score/serving.py b/vllm/entrypoints/pooling/score/serving.py index e5a66783005a6..f574d8bcebb40 100644 --- a/vllm/entrypoints/pooling/score/serving.py +++ b/vllm/entrypoints/pooling/score/serving.py @@ -38,7 +38,8 @@ from vllm.inputs.data import TokensPrompt from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.outputs import PoolingRequestOutput, ScoringRequestOutput -from vllm.tokenizers import MistralTokenizer, TokenizerLike +from vllm.tokenizers import TokenizerLike +from vllm.tokenizers.mistral import MistralTokenizer from vllm.utils.async_utils import make_async, merge_async_iterators logger = init_logger(__name__) diff --git a/vllm/entrypoints/utils.py b/vllm/entrypoints/utils.py index daeeb995bc749..f4a633c69cb0b 100644 --- a/vllm/entrypoints/utils.py +++ b/vllm/entrypoints/utils.py @@ -30,7 +30,7 @@ from vllm.entrypoints.openai.protocol import ( from vllm.entrypoints.openai.serving_models import LoRAModulePath from vllm.logger import init_logger from vllm.platforms import current_platform -from vllm.tokenizers import MistralTokenizer +from vllm.tokenizers.mistral import MistralTokenizer from vllm.utils.argparse_utils import FlexibleArgumentParser logger = init_logger(__name__) diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index faf2d80d24bba..555e6ea4b8cb2 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -59,7 +59,8 @@ from vllm.multimodal.processing import ( from vllm.multimodal.profiling import BaseDummyInputsBuilder, ProcessorInputs from vllm.platforms import current_platform from vllm.sequence import IntermediateTensors -from vllm.tokenizers import MistralTokenizer, cached_tokenizer_from_config +from vllm.tokenizers import cached_tokenizer_from_config +from vllm.tokenizers.mistral import MistralTokenizer from vllm.utils.tensor_schema import TensorSchema, TensorShape from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP diff --git a/vllm/model_executor/models/voxtral.py b/vllm/model_executor/models/voxtral.py index 7b408248ec74c..331f0c54ecfbc 100644 --- a/vllm/model_executor/models/voxtral.py +++ b/vllm/model_executor/models/voxtral.py @@ -51,7 +51,8 @@ from vllm.multimodal.processing import ( ) from vllm.multimodal.profiling import BaseDummyInputsBuilder, ProcessorInputs from vllm.sequence import IntermediateTensors -from vllm.tokenizers import MistralTokenizer, cached_tokenizer_from_config +from vllm.tokenizers import cached_tokenizer_from_config +from vllm.tokenizers.mistral import MistralTokenizer from .interfaces import SupportsLoRA, SupportsMultiModal, SupportsTranscription from .utils import init_vllm_registered_model, maybe_prefix diff --git a/vllm/reasoning/mistral_reasoning_parser.py b/vllm/reasoning/mistral_reasoning_parser.py index 3206dbb29fe2e..de3d1296ec734 100644 --- a/vllm/reasoning/mistral_reasoning_parser.py +++ b/vllm/reasoning/mistral_reasoning_parser.py @@ -10,7 +10,7 @@ from vllm.entrypoints.openai.protocol import ( from vllm.logger import init_logger from vllm.reasoning import ReasoningParser from vllm.reasoning.basic_parsers import BaseThinkingReasoningParser -from vllm.tokenizers import MistralTokenizer +from vllm.tokenizers.mistral import MistralTokenizer logger = init_logger(__name__) diff --git a/vllm/v1/engine/input_processor.py b/vllm/v1/engine/input_processor.py index 5bd18cc064cb5..65e0c845b0afa 100644 --- a/vllm/v1/engine/input_processor.py +++ b/vllm/v1/engine/input_processor.py @@ -19,7 +19,8 @@ from vllm.multimodal.processing import EncDecMultiModalProcessor from vllm.multimodal.utils import argsort_mm_positions from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams -from vllm.tokenizers import MistralTokenizer, TokenizerLike +from vllm.tokenizers import TokenizerLike +from vllm.tokenizers.mistral import MistralTokenizer from vllm.utils import length_from_prompt_token_ids_or_embeds from vllm.v1.engine import EngineCoreRequest from vllm.v1.metrics.stats import MultiModalCacheStats diff --git a/vllm/v1/structured_output/backend_xgrammar.py b/vllm/v1/structured_output/backend_xgrammar.py index 826ee08caa4e2..c5e7165026d1b 100644 --- a/vllm/v1/structured_output/backend_xgrammar.py +++ b/vllm/v1/structured_output/backend_xgrammar.py @@ -10,7 +10,8 @@ import torch import vllm.envs from vllm.logger import init_logger from vllm.sampling_params import SamplingParams -from vllm.tokenizers import DeepseekV32Tokenizer, MistralTokenizer +from vllm.tokenizers.deepseekv32 import DeepseekV32Tokenizer +from vllm.tokenizers.mistral import MistralTokenizer from vllm.utils.import_utils import LazyLoader from vllm.v1.structured_output.backend_types import ( StructuredOutputBackend,