From fbd8595c5c6f969dfa6cf33e5a371d93d55025fb Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Sun, 10 Aug 2025 02:42:21 +0800 Subject: [PATCH] [Bugfix] Fix basic models tests hanging due to mm processor creation (#22571) Signed-off-by: Isotr0py --- vllm/multimodal/registry.py | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/vllm/multimodal/registry.py b/vllm/multimodal/registry.py index a101f2a55f5d..ded56cca8099 100644 --- a/vllm/multimodal/registry.py +++ b/vllm/multimodal/registry.py @@ -138,8 +138,8 @@ class MultiModalRegistry: if not model_config.is_multimodal_model: return False - processor = self.create_processor(model_config, disable_cache=False) - supported_modalities = processor.info.get_supported_mm_limits() + info = self._create_processing_info(model_config, tokenizer=None) + supported_modalities = info.get_supported_mm_limits() mm_config = model_config.get_multimodal_config() @@ -278,6 +278,26 @@ class MultiModalRegistry: model_cls, _ = get_model_architecture(model_config) return model_cls + def _create_processing_ctx( + self, + model_config: "ModelConfig", + tokenizer: Optional[AnyTokenizer] = None, + ) -> InputProcessingContext: + if tokenizer is None and not model_config.skip_tokenizer_init: + tokenizer = cached_tokenizer_from_config(model_config) + return InputProcessingContext(model_config, tokenizer) + + def _create_processing_info( + self, + model_config: "ModelConfig", + *, + tokenizer: Optional[AnyTokenizer] = None, + ) -> BaseProcessingInfo: + model_cls = self._get_model_cls(model_config) + factories = self._processor_factories[model_cls] + ctx = self._create_processing_ctx(model_config, tokenizer) + return factories.info(ctx) + def create_processor( self, model_config: "ModelConfig", @@ -291,15 +311,13 @@ class MultiModalRegistry: if not model_config.is_multimodal_model: raise ValueError(f"{model_config.model} is not a multimodal model") - if tokenizer is None and not model_config.skip_tokenizer_init: - tokenizer = cached_tokenizer_from_config(model_config) if disable_cache is None: disable_cache = not model_config.enable_mm_processor_cache model_cls = self._get_model_cls(model_config) factories = self._processor_factories[model_cls] - ctx = InputProcessingContext(model_config, tokenizer) + ctx = self._create_processing_ctx(model_config, tokenizer) cache = None if disable_cache else self._get_processor_cache( model_config)