[V0 Deprecation][TPU] Remove V1 flag check from tests (#22248)

Signed-off-by: NickLucche <nlucches@redhat.com>
This commit is contained in:
Nicolò Lucchesi 2025-08-05 15:53:23 +02:00 committed by GitHub
parent 74333ae2f6
commit 0c275ad5ad
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 1 additions and 21 deletions

View File

@ -12,17 +12,10 @@ import torch_xla
import torch_xla.core
import torch_xla.core.xla_model
from vllm import envs
from vllm.attention.layer import MultiHeadAttention
from vllm.attention.selector import _cached_get_attn_backend
from vllm.platforms import current_platform
if not envs.VLLM_USE_V1:
pytest.skip(
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
allow_module_level=True,
)
@pytest.fixture(autouse=True)
def clear_cache():

View File

@ -4,19 +4,12 @@
import openai
import pytest
from vllm import envs
from vllm.multimodal.utils import encode_image_base64, fetch_image
from vllm.platforms import current_platform
from ...entrypoints.openai.test_vision import TEST_IMAGE_URLS
from ...utils import RemoteOpenAIServer
if not envs.VLLM_USE_V1:
pytest.skip(
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
allow_module_level=True,
)
@pytest.fixture(scope="session")
def base64_encoded_image() -> dict[str, str]:

View File

@ -4,16 +4,10 @@ import random
import pytest
from vllm import LLM, envs
from vllm import LLM
from vllm.platforms import current_platform
from vllm.sampling_params import SamplingParams
if not envs.VLLM_USE_V1:
pytest.skip(
"Skipping V1 tests. Rerun with `VLLM_USE_V1=1` to test.",
allow_module_level=True,
)
@pytest.mark.parametrize("model_name", ["Qwen/Qwen2.5-1.5B-Instruct"])
@pytest.mark.skipif(not current_platform.is_tpu(),