From 5e5a7eb16f121f05e19c8bdf88247744ab9d1b83 Mon Sep 17 00:00:00 2001 From: rasmith Date: Thu, 20 Nov 2025 14:45:56 -0600 Subject: [PATCH] [CI/Build] Make test_attention_selector.py run tests on correct platform (#29064) Signed-off-by: Randall Smith Signed-off-by: rasmith Co-authored-by: Randall Smith Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- tests/kernels/attention/test_attention_selector.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/kernels/attention/test_attention_selector.py b/tests/kernels/attention/test_attention_selector.py index 3b8e939300a27..9be56a33f76c8 100644 --- a/tests/kernels/attention/test_attention_selector.py +++ b/tests/kernels/attention/test_attention_selector.py @@ -7,6 +7,7 @@ import pytest import torch from vllm.attention.selector import _cached_get_attn_backend, get_attn_backend +from vllm.platforms import current_platform from vllm.platforms.cpu import CpuPlatform from vllm.platforms.cuda import CudaPlatform from vllm.platforms.rocm import RocmPlatform @@ -47,9 +48,11 @@ DEVICE_MLA_BLOCK_SIZES = { def generate_params(): + is_rocm = current_platform.is_rocm() params = [] + device_list = ["cuda", "cpu"] if not is_rocm else ["hip", "cpu"] for use_mla in [True, False]: - for device in ["cuda", "hip", "cpu"]: + for device in device_list: backends = ( DEVICE_MLA_BACKENDS[device] if use_mla