diff --git a/vllm/inputs/registry.py b/vllm/inputs/registry.py index 66e78833f52af..fc6e190e54806 100644 --- a/vllm/inputs/registry.py +++ b/vllm/inputs/registry.py @@ -5,7 +5,9 @@ from dataclasses import dataclass from typing import TYPE_CHECKING, Any, NamedTuple, Optional, Union import torch +from packaging.version import Version from transformers import BatchFeature, PretrainedConfig, ProcessorMixin +from transformers import __version__ as TRANSFORMERS_VERSION from typing_extensions import TypeVar from vllm.jsontree import JSONTree, json_map_leaves @@ -128,9 +130,13 @@ class InputProcessingContext(InputContext): /, **kwargs: object, ) -> _P: + # Transformers 4.53.0 has issue with passing tokenizer to + # initialize processor. We disable it for this version. + # See: https://github.com/vllm-project/vllm/issues/20224 + if Version(TRANSFORMERS_VERSION) != Version("4.53.0"): + kwargs["tokenizer"] = self.tokenizer return super().get_hf_processor( typ, - tokenizer=self.tokenizer, **kwargs, )