[feat] Enable mm caching for transformers backend (#21358)

Signed-off-by: raushan <raushan@huggingface.co>
This commit is contained in:
Raushan Turganbay 2025-07-22 17:18:46 +02:00 committed by GitHub
parent b194557a6c
commit f38ee34a0a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 7 additions and 18 deletions

View File

@ -18,7 +18,7 @@ These models are what we list in [supported-text-models][supported-text-models]
### Transformers ### Transformers
vLLM also supports model implementations that are available in Transformers. This does not currently work for all models, but most decoder language models and common vision language models are supported! Vision-language models currently accept only image inputs, and require setting `--disable_mm_preprocessor_cache` when running. Support for video inputs and caching of multi-modal preprocessors will be added in future releases. vLLM also supports model implementations that are available in Transformers. This does not currently work for all models, but most decoder language models and common vision language models are supported! Vision-language models currently accept only image inputs. Support for video inputs will be added in future releases.
To check if the modeling backend is Transformers, you can simply do this: To check if the modeling backend is Transformers, you can simply do this:

View File

@ -186,8 +186,6 @@ VLM_TEST_SETTINGS = {
image_size_factors=[(0.25, 0.5, 1.0)], image_size_factors=[(0.25, 0.5, 1.0)],
vllm_runner_kwargs={ vllm_runner_kwargs={
"model_impl": "transformers", "model_impl": "transformers",
"disable_mm_preprocessor_cache": True,
"enable_prefix_caching": False,
}, },
marks=[pytest.mark.core_model], marks=[pytest.mark.core_model],
), ),
@ -205,8 +203,6 @@ VLM_TEST_SETTINGS = {
# image_size_factors=[(0.25, 0.5, 1.0)], # image_size_factors=[(0.25, 0.5, 1.0)],
# vllm_runner_kwargs={ # vllm_runner_kwargs={
# "model_impl": "transformers", # "model_impl": "transformers",
# "disable_mm_preprocessor_cache": True,
# "enable_prefix_caching": False,
# }, # },
# marks=[pytest.mark.core_model], # marks=[pytest.mark.core_model],
# ), # ),
@ -223,8 +219,6 @@ VLM_TEST_SETTINGS = {
image_size_factors=[(0.25, 0.2, 0.15)], image_size_factors=[(0.25, 0.2, 0.15)],
vllm_runner_kwargs={ vllm_runner_kwargs={
"model_impl": "transformers", "model_impl": "transformers",
"disable_mm_preprocessor_cache": True,
"enable_prefix_caching": False,
}, },
marks=[large_gpu_mark(min_gb=32)], marks=[large_gpu_mark(min_gb=32)],
), ),
@ -239,8 +233,6 @@ VLM_TEST_SETTINGS = {
image_size_factors=[(0.25, 0.5, 1.0)], image_size_factors=[(0.25, 0.5, 1.0)],
vllm_runner_kwargs={ vllm_runner_kwargs={
"model_impl": "auto", "model_impl": "auto",
"disable_mm_preprocessor_cache": True,
"enable_prefix_caching": False,
}, },
auto_cls=AutoModelForImageTextToText, auto_cls=AutoModelForImageTextToText,
marks=[pytest.mark.core_model], marks=[pytest.mark.core_model],

View File

@ -315,11 +315,6 @@ class MultiModalProcessor(BaseMultiModalProcessor[MultiModalProcessingInfo]):
Apply HF Processor on prompt text and multi-modal data together, Apply HF Processor on prompt text and multi-modal data together,
outputting token IDs and processed tensors. outputting token IDs and processed tensors.
""" """
if return_mm_hashes:
raise ValueError(
"TransformersForMultimodalLM doesn't support mm hashing yet! "
"Probably you didn't set `disable_mm_preprocessor_cache=True`")
if tokenization_kwargs is None: if tokenization_kwargs is None:
tokenization_kwargs = {} tokenization_kwargs = {}
@ -375,12 +370,14 @@ class MultiModalProcessor(BaseMultiModalProcessor[MultiModalProcessingInfo]):
num_image_patches), num_image_patches),
) )
mm_hashes = self._hash_mm_items(mm_items, hf_processor_mm_kwargs,
tokenization_kwargs)
return MultiModalInputs( return MultiModalInputs(
type="multimodal", type="multimodal",
prompt=prompt, prompt=prompt,
prompt_token_ids=prompt_ids, prompt_token_ids=prompt_ids,
mm_kwargs=mm_kwargs, mm_kwargs=mm_kwargs,
mm_hashes=None, mm_hashes=mm_hashes,
mm_placeholders=mm_placeholders, mm_placeholders=mm_placeholders,
) )

View File

@ -406,9 +406,9 @@ def need_extra_keys(request: Request) -> bool:
# Multimodal requests need to include the MM hash. # Multimodal requests need to include the MM hash.
# LoRA requests need to include the LoRA ID. # LoRA requests need to include the LoRA ID.
# Request with provided cache salt need to include the salt. # Request with provided cache salt need to include the salt.
return bool(request.mm_positions) or (request.lora_request return bool(request.mm_hashes) or (request.lora_request
is not None) or (request.cache_salt is not None) or (request.cache_salt
is not None) is not None)
def _gen_mm_extra_hash_keys(request: Request, start_token_idx: int, def _gen_mm_extra_hash_keys(request: Request, start_token_idx: int,