[CI/Build][AMD] Skip if flash_attn_varlen_func not available in test_aiter_flash_attn.py (#29043)

Signed-off-by: Randall Smith <ransmith@amd.com>
Co-authored-by: Randall Smith <ransmith@amd.com>
This commit is contained in:
rasmith 2025-11-20 14:39:49 -06:00 committed by GitHub
parent 4d01b64284
commit 3d84ef9054
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -6,6 +6,7 @@ import pytest
import torch
import vllm.v1.attention.backends.rocm_aiter_fa # noqa: F401
from vllm.attention.utils.fa_utils import is_flash_attn_varlen_func_available
from vllm.platforms import current_platform
NUM_HEADS = [(4, 4), (8, 2)]
@ -100,6 +101,8 @@ def test_varlen_with_paged_kv(
num_blocks: int,
q_dtype: torch.dtype | None,
) -> None:
if not is_flash_attn_varlen_func_available():
pytest.skip("flash_attn_varlen_func required to run this test.")
torch.set_default_device("cuda")
current_platform.seed_everything(0)
num_seqs = len(seq_lens)