From e936e401debe7fba64d6462666d7dc632bc76357 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Mon, 30 Jun 2025 18:16:16 +0800 Subject: [PATCH] [Bugfix] Fix processor initialization in transformers 4.53.0 (#20244) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/inputs/registry.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/vllm/inputs/registry.py b/vllm/inputs/registry.py index 66e78833f52af..fc6e190e54806 100644 --- a/vllm/inputs/registry.py +++ b/vllm/inputs/registry.py @@ -5,7 +5,9 @@ from dataclasses import dataclass from typing import TYPE_CHECKING, Any, NamedTuple, Optional, Union import torch +from packaging.version import Version from transformers import BatchFeature, PretrainedConfig, ProcessorMixin +from transformers import __version__ as TRANSFORMERS_VERSION from typing_extensions import TypeVar from vllm.jsontree import JSONTree, json_map_leaves @@ -128,9 +130,13 @@ class InputProcessingContext(InputContext): /, **kwargs: object, ) -> _P: + # Transformers 4.53.0 has issue with passing tokenizer to + # initialize processor. We disable it for this version. + # See: https://github.com/vllm-project/vllm/issues/20224 + if Version(TRANSFORMERS_VERSION) != Version("4.53.0"): + kwargs["tokenizer"] = self.tokenizer return super().get_hf_processor( typ, - tokenizer=self.tokenizer, **kwargs, )