mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-15 02:45:37 +08:00
[CI/Build] Bump test transformers version (#10106)
Signed-off-by: Isotr0py <2037008807@qq.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Co-authored-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
parent
571da8fc43
commit
998eeafe58
@ -550,7 +550,7 @@ tqdm==4.66.6
|
|||||||
# transformers
|
# transformers
|
||||||
tqdm-multiprocess==0.0.11
|
tqdm-multiprocess==0.0.11
|
||||||
# via lm-eval
|
# via lm-eval
|
||||||
transformers==4.45.2
|
transformers==4.46.3
|
||||||
# via
|
# via
|
||||||
# lm-eval
|
# lm-eval
|
||||||
# peft
|
# peft
|
||||||
|
|||||||
@ -6,7 +6,6 @@ from pathlib import PosixPath
|
|||||||
from typing import Type
|
from typing import Type
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import transformers
|
|
||||||
from transformers import AutoModelForVision2Seq
|
from transformers import AutoModelForVision2Seq
|
||||||
from transformers.utils import is_flash_attn_2_available
|
from transformers.utils import is_flash_attn_2_available
|
||||||
|
|
||||||
@ -187,12 +186,6 @@ VLM_TEST_SETTINGS = {
|
|||||||
comparator=check_outputs_equal,
|
comparator=check_outputs_equal,
|
||||||
max_tokens=8,
|
max_tokens=8,
|
||||||
dtype="bfloat16",
|
dtype="bfloat16",
|
||||||
marks=[
|
|
||||||
pytest.mark.skipif(
|
|
||||||
transformers.__version__ < "4.46.2",
|
|
||||||
reason="Model broken in HF, see huggingface/transformers#34379"
|
|
||||||
),
|
|
||||||
]
|
|
||||||
),
|
),
|
||||||
"fuyu": VLMTestInfo(
|
"fuyu": VLMTestInfo(
|
||||||
models=["adept/fuyu-8b"],
|
models=["adept/fuyu-8b"],
|
||||||
@ -243,13 +236,7 @@ VLM_TEST_SETTINGS = {
|
|||||||
max_model_len=8192,
|
max_model_len=8192,
|
||||||
max_num_seqs=2,
|
max_num_seqs=2,
|
||||||
auto_cls=AutoModelForVision2Seq,
|
auto_cls=AutoModelForVision2Seq,
|
||||||
marks=[
|
marks=[large_gpu_mark(min_gb=48)],
|
||||||
pytest.mark.skipif(
|
|
||||||
transformers.__version__ < "4.46.0",
|
|
||||||
reason="Model introduced in HF >= 4.46.0"
|
|
||||||
),
|
|
||||||
large_gpu_mark(min_gb=48),
|
|
||||||
],
|
|
||||||
),
|
),
|
||||||
"intern_vl": VLMTestInfo(
|
"intern_vl": VLMTestInfo(
|
||||||
models=[
|
models=[
|
||||||
@ -318,12 +305,6 @@ VLM_TEST_SETTINGS = {
|
|||||||
auto_cls=AutoModelForVision2Seq,
|
auto_cls=AutoModelForVision2Seq,
|
||||||
vllm_output_post_proc=model_utils.llava_video_vllm_to_hf_output,
|
vllm_output_post_proc=model_utils.llava_video_vllm_to_hf_output,
|
||||||
image_sizes=[((1669, 2560), (2560, 1669), (183, 488), (488, 183))],
|
image_sizes=[((1669, 2560), (2560, 1669), (183, 488), (488, 183))],
|
||||||
marks=[
|
|
||||||
pytest.mark.skipif(
|
|
||||||
transformers.__version__ < "4.46.2",
|
|
||||||
reason="Model broken with changes in transformers 4.46"
|
|
||||||
)
|
|
||||||
],
|
|
||||||
),
|
),
|
||||||
"minicpmv_25": VLMTestInfo(
|
"minicpmv_25": VLMTestInfo(
|
||||||
models=["openbmb/MiniCPM-Llama3-V-2_5"],
|
models=["openbmb/MiniCPM-Llama3-V-2_5"],
|
||||||
@ -404,10 +385,6 @@ VLM_TEST_SETTINGS = {
|
|||||||
cuda_device_count_stateless() < 2,
|
cuda_device_count_stateless() < 2,
|
||||||
reason="Need at least 2 GPUs to run the test.",
|
reason="Need at least 2 GPUs to run the test.",
|
||||||
),
|
),
|
||||||
pytest.mark.skipif(
|
|
||||||
transformers.__version__ < "4.46.2",
|
|
||||||
reason="Model broken in HF, see huggingface/transformers#34379"
|
|
||||||
)
|
|
||||||
],
|
],
|
||||||
**COMMON_BROADCAST_SETTINGS # type: ignore
|
**COMMON_BROADCAST_SETTINGS # type: ignore
|
||||||
),
|
),
|
||||||
|
|||||||
@ -228,7 +228,7 @@ def test_model_engine(vllm_runner, model: str, dtype: str) -> None:
|
|||||||
name_1="output")
|
name_1="output")
|
||||||
|
|
||||||
|
|
||||||
@large_gpu_test(min_gb=24)
|
@large_gpu_test(min_gb=48)
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"prompt,expected_ranges",
|
"prompt,expected_ranges",
|
||||||
[(_create_engine_inputs_hf(IMG_URLS[:1]), [{
|
[(_create_engine_inputs_hf(IMG_URLS[:1]), [{
|
||||||
|
|||||||
@ -2,7 +2,6 @@ from typing import List, Type
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
import transformers
|
|
||||||
from transformers import AutoModelForVision2Seq
|
from transformers import AutoModelForVision2Seq
|
||||||
|
|
||||||
from ....conftest import IMAGE_ASSETS, HfRunner, PromptImageInput, VllmRunner
|
from ....conftest import IMAGE_ASSETS, HfRunner, PromptImageInput, VllmRunner
|
||||||
@ -86,9 +85,6 @@ def _run_test(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(transformers.__version__.startswith("4.46"),
|
|
||||||
reason="Model broken with changes in transformers 4.46")
|
|
||||||
@pytest.mark.core_model
|
|
||||||
@pytest.mark.parametrize("model", MODELS)
|
@pytest.mark.parametrize("model", MODELS)
|
||||||
@pytest.mark.parametrize("dtype", ["half"])
|
@pytest.mark.parametrize("dtype", ["half"])
|
||||||
def test_models_text(
|
def test_models_text(
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import transformers
|
|
||||||
from transformers import PretrainedConfig
|
from transformers import PretrainedConfig
|
||||||
|
|
||||||
from vllm import LLM
|
from vllm import LLM
|
||||||
@ -11,10 +10,6 @@ from .registry import HF_EXAMPLE_MODELS
|
|||||||
|
|
||||||
@pytest.mark.parametrize("model_arch", HF_EXAMPLE_MODELS.get_supported_archs())
|
@pytest.mark.parametrize("model_arch", HF_EXAMPLE_MODELS.get_supported_archs())
|
||||||
def test_can_initialize(model_arch):
|
def test_can_initialize(model_arch):
|
||||||
if (model_arch in {"Idefics3ForConditionalGeneration", "GlmForCausalLM"}
|
|
||||||
and transformers.__version__ < "4.46.0"):
|
|
||||||
pytest.skip(reason="Model introduced in HF >= 4.46.0")
|
|
||||||
|
|
||||||
model_info = HF_EXAMPLE_MODELS.get_hf_info(model_arch)
|
model_info = HF_EXAMPLE_MODELS.get_hf_info(model_arch)
|
||||||
if not model_info.is_available_online:
|
if not model_info.is_available_online:
|
||||||
pytest.skip("Model is not available online")
|
pytest.skip("Model is not available online")
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user