mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-15 20:16:17 +08:00
[V0 Deprecation][TPU] Remove V1 flag check from tests (#22248)
Signed-off-by: NickLucche <nlucches@redhat.com>
This commit is contained in:
parent
74333ae2f6
commit
0c275ad5ad
@ -12,17 +12,10 @@ import torch_xla
|
|||||||
import torch_xla.core
|
import torch_xla.core
|
||||||
import torch_xla.core.xla_model
|
import torch_xla.core.xla_model
|
||||||
|
|
||||||
from vllm import envs
|
|
||||||
from vllm.attention.layer import MultiHeadAttention
|
from vllm.attention.layer import MultiHeadAttention
|
||||||
from vllm.attention.selector import _cached_get_attn_backend
|
from vllm.attention.selector import _cached_get_attn_backend
|
||||||
from vllm.platforms import current_platform
|
from vllm.platforms import current_platform
|
||||||
|
|
||||||
if not envs.VLLM_USE_V1:
|
|
||||||
pytest.skip(
|
|
||||||
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
|
|
||||||
allow_module_level=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(autouse=True)
|
@pytest.fixture(autouse=True)
|
||||||
def clear_cache():
|
def clear_cache():
|
||||||
|
|||||||
@ -4,19 +4,12 @@
|
|||||||
import openai
|
import openai
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from vllm import envs
|
|
||||||
from vllm.multimodal.utils import encode_image_base64, fetch_image
|
from vllm.multimodal.utils import encode_image_base64, fetch_image
|
||||||
from vllm.platforms import current_platform
|
from vllm.platforms import current_platform
|
||||||
|
|
||||||
from ...entrypoints.openai.test_vision import TEST_IMAGE_URLS
|
from ...entrypoints.openai.test_vision import TEST_IMAGE_URLS
|
||||||
from ...utils import RemoteOpenAIServer
|
from ...utils import RemoteOpenAIServer
|
||||||
|
|
||||||
if not envs.VLLM_USE_V1:
|
|
||||||
pytest.skip(
|
|
||||||
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
|
|
||||||
allow_module_level=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def base64_encoded_image() -> dict[str, str]:
|
def base64_encoded_image() -> dict[str, str]:
|
||||||
|
|||||||
@ -4,16 +4,10 @@ import random
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from vllm import LLM, envs
|
from vllm import LLM
|
||||||
from vllm.platforms import current_platform
|
from vllm.platforms import current_platform
|
||||||
from vllm.sampling_params import SamplingParams
|
from vllm.sampling_params import SamplingParams
|
||||||
|
|
||||||
if not envs.VLLM_USE_V1:
|
|
||||||
pytest.skip(
|
|
||||||
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
|
|
||||||
allow_module_level=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("model_name", ["Qwen/Qwen2.5-1.5B-Instruct"])
|
@pytest.mark.parametrize("model_name", ["Qwen/Qwen2.5-1.5B-Instruct"])
|
||||||
@pytest.mark.skipif(not current_platform.is_tpu(),
|
@pytest.mark.skipif(not current_platform.is_tpu(),
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user