mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-09 08:15:56 +08:00
[CI/Build][AMD] Skip if flash_attn_varlen_func not available in test_aiter_flash_attn.py (#29043)
Signed-off-by: Randall Smith <ransmith@amd.com> Co-authored-by: Randall Smith <ransmith@amd.com>
This commit is contained in:
parent
4d01b64284
commit
3d84ef9054
@ -6,6 +6,7 @@ import pytest
|
||||
import torch
|
||||
|
||||
import vllm.v1.attention.backends.rocm_aiter_fa # noqa: F401
|
||||
from vllm.attention.utils.fa_utils import is_flash_attn_varlen_func_available
|
||||
from vllm.platforms import current_platform
|
||||
|
||||
NUM_HEADS = [(4, 4), (8, 2)]
|
||||
@ -100,6 +101,8 @@ def test_varlen_with_paged_kv(
|
||||
num_blocks: int,
|
||||
q_dtype: torch.dtype | None,
|
||||
) -> None:
|
||||
if not is_flash_attn_varlen_func_available():
|
||||
pytest.skip("flash_attn_varlen_func required to run this test.")
|
||||
torch.set_default_device("cuda")
|
||||
current_platform.seed_everything(0)
|
||||
num_seqs = len(seq_lens)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user