From c5ebe040ac2871bb41587df1357db867aba709a8 Mon Sep 17 00:00:00 2001 From: Jeremy Reizenstein <669761+bottler@users.noreply.github.com> Date: Sun, 6 Jul 2025 03:37:59 +0100 Subject: [PATCH] test_attention compat with coming xformers change (#20487) Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- tests/kernels/attention/test_attention.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/kernels/attention/test_attention.py b/tests/kernels/attention/test_attention.py index 7269d19183bf2..2e0b4efebfdb1 100644 --- a/tests/kernels/attention/test_attention.py +++ b/tests/kernels/attention/test_attention.py @@ -450,7 +450,8 @@ def test_multi_query_kv_attention( start += seq_len # xformers.AttentionBias to Tensor for use in reference impl. alibi_bias = [ - b.materialize(b.shape, device=device).squeeze() for b in attn_bias + b.materialize((1, num_query_heads, i, i), device=device).squeeze() + for b, i in zip(attn_bias, seq_lens) ] else: attn_bias = BlockDiagonalCausalMask.from_seqlens(seq_lens)