mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-04 22:05:19 +08:00
Skip models that cannot currently init on Transformers v5 (#28471)
Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com>
This commit is contained in:
parent
69d0e90313
commit
51c599f0ec
@ -872,12 +872,12 @@ steps:
|
||||
optional: true
|
||||
commands:
|
||||
- pip install --upgrade git+https://github.com/huggingface/transformers
|
||||
- pytest -v -s tests/models/test_initialization.py
|
||||
- pytest -v -s tests/models/test_initialization.py -k 'not (Gemma3 or ModernBert or Qwen2_5_VL or Qwen2_5vl or Qwen2VL or TransformersMultiModalEmbeddingModel or TransformersMultiModalForSequenceClassification or Ultravox or Phi4Multimodal or LlavaNextVideo or MiniCPMO or Lfm2Moe or PaliGemma or RobertaForSequenceClassification or Ovis2_5 or Fuyu or DeepseekOCR or KimiVL)'
|
||||
- pytest -v -s tests/models/test_transformers.py
|
||||
- pytest -v -s tests/models/multimodal/processing/
|
||||
- pytest -v -s tests/models/multimodal/test_mapping.py
|
||||
# - pytest -v -s tests/models/multimodal/processing/
|
||||
- pytest -v -s tests/models/multimodal/test_mapping.py -k 'not (Gemma3 or Qwen2VL or Qwen2_5_VL)'
|
||||
- python3 examples/offline_inference/basic/chat.py
|
||||
- python3 examples/offline_inference/vision_language.py --model-type qwen2_5_vl
|
||||
# - python3 examples/offline_inference/vision_language.py --model-type qwen2_5_vl
|
||||
# Whisper needs spawn method to avoid deadlock
|
||||
- VLLM_WORKER_MULTIPROC_METHOD=spawn python3 examples/offline_inference/audio_language.py --model-type whisper
|
||||
|
||||
|
||||
@ -82,7 +82,8 @@ enable_hf_transfer()
|
||||
|
||||
class DisabledTqdm(tqdm):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs, disable=True)
|
||||
kwargs["disable"] = True
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
|
||||
def get_lock(model_name_or_path: str | Path, cache_dir: str | None = None):
|
||||
|
||||
@ -13,7 +13,6 @@ from transformers import (
|
||||
BatchFeature,
|
||||
WhisperConfig,
|
||||
WhisperFeatureExtractor,
|
||||
WhisperProcessor,
|
||||
)
|
||||
from transformers.models.whisper.modeling_whisper import sinusoids
|
||||
|
||||
@ -660,16 +659,6 @@ class WhisperProcessingInfo(BaseProcessingInfo):
|
||||
def get_hf_config(self) -> WhisperConfig:
|
||||
return self.ctx.get_hf_config(WhisperConfig)
|
||||
|
||||
def get_hf_processor(self, **kwargs: object) -> WhisperProcessor:
|
||||
# HACK: Transformers 4.53.2 has issue with whisper tokenizer to
|
||||
# initialize processor. We use a monkeypatch to fix it here.
|
||||
# See: https://github.com/vllm-project/vllm/issues/20224
|
||||
processor_class = WhisperProcessor
|
||||
tokenizer_class = ("WhisperTokenizer", "WhisperTokenizerFast")
|
||||
if processor_class.tokenizer_class != tokenizer_class:
|
||||
processor_class.tokenizer_class = tokenizer_class
|
||||
return self.ctx.get_hf_processor(processor_class, **kwargs)
|
||||
|
||||
def get_supported_mm_limits(self) -> Mapping[str, int | None]:
|
||||
return {"audio": 1}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user