[CI/Build] Bump test transformers version (#10106)

Signed-off-by: Isotr0py <2037008807@qq.com>
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
Co-authored-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Isotr0py 2024-12-06 00:05:52 +08:00 committed by GitHub
parent 571da8fc43
commit 998eeafe58
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 3 additions and 35 deletions

View File

@ -550,7 +550,7 @@ tqdm==4.66.6
# transformers
tqdm-multiprocess==0.0.11
# via lm-eval
transformers==4.45.2
transformers==4.46.3
# via
# lm-eval
# peft

View File

@ -6,7 +6,6 @@ from pathlib import PosixPath
from typing import Type
import pytest
import transformers
from transformers import AutoModelForVision2Seq
from transformers.utils import is_flash_attn_2_available
@ -187,12 +186,6 @@ VLM_TEST_SETTINGS = {
comparator=check_outputs_equal,
max_tokens=8,
dtype="bfloat16",
marks=[
pytest.mark.skipif(
transformers.__version__ < "4.46.2",
reason="Model broken in HF, see huggingface/transformers#34379"
),
]
),
"fuyu": VLMTestInfo(
models=["adept/fuyu-8b"],
@ -243,13 +236,7 @@ VLM_TEST_SETTINGS = {
max_model_len=8192,
max_num_seqs=2,
auto_cls=AutoModelForVision2Seq,
marks=[
pytest.mark.skipif(
transformers.__version__ < "4.46.0",
reason="Model introduced in HF >= 4.46.0"
),
large_gpu_mark(min_gb=48),
],
marks=[large_gpu_mark(min_gb=48)],
),
"intern_vl": VLMTestInfo(
models=[
@ -318,12 +305,6 @@ VLM_TEST_SETTINGS = {
auto_cls=AutoModelForVision2Seq,
vllm_output_post_proc=model_utils.llava_video_vllm_to_hf_output,
image_sizes=[((1669, 2560), (2560, 1669), (183, 488), (488, 183))],
marks=[
pytest.mark.skipif(
transformers.__version__ < "4.46.2",
reason="Model broken with changes in transformers 4.46"
)
],
),
"minicpmv_25": VLMTestInfo(
models=["openbmb/MiniCPM-Llama3-V-2_5"],
@ -404,10 +385,6 @@ VLM_TEST_SETTINGS = {
cuda_device_count_stateless() < 2,
reason="Need at least 2 GPUs to run the test.",
),
pytest.mark.skipif(
transformers.__version__ < "4.46.2",
reason="Model broken in HF, see huggingface/transformers#34379"
)
],
**COMMON_BROADCAST_SETTINGS # type: ignore
),

View File

@ -228,7 +228,7 @@ def test_model_engine(vllm_runner, model: str, dtype: str) -> None:
name_1="output")
@large_gpu_test(min_gb=24)
@large_gpu_test(min_gb=48)
@pytest.mark.parametrize(
"prompt,expected_ranges",
[(_create_engine_inputs_hf(IMG_URLS[:1]), [{

View File

@ -2,7 +2,6 @@ from typing import List, Type
import pytest
import torch.nn.functional as F
import transformers
from transformers import AutoModelForVision2Seq
from ....conftest import IMAGE_ASSETS, HfRunner, PromptImageInput, VllmRunner
@ -86,9 +85,6 @@ def _run_test(
)
@pytest.mark.skipif(transformers.__version__.startswith("4.46"),
reason="Model broken with changes in transformers 4.46")
@pytest.mark.core_model
@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("dtype", ["half"])
def test_models_text(

View File

@ -1,7 +1,6 @@
from unittest.mock import patch
import pytest
import transformers
from transformers import PretrainedConfig
from vllm import LLM
@ -11,10 +10,6 @@ from .registry import HF_EXAMPLE_MODELS
@pytest.mark.parametrize("model_arch", HF_EXAMPLE_MODELS.get_supported_archs())
def test_can_initialize(model_arch):
if (model_arch in {"Idefics3ForConditionalGeneration", "GlmForCausalLM"}
and transformers.__version__ < "4.46.0"):
pytest.skip(reason="Model introduced in HF >= 4.46.0")
model_info = HF_EXAMPLE_MODELS.get_hf_info(model_arch)
if not model_info.is_available_online:
pytest.skip("Model is not available online")