From 75e9d4979658f19c8a507a241acd6be8b83a8f55 Mon Sep 17 00:00:00 2001 From: Junlin Zhou Date: Tue, 25 Feb 2025 18:13:09 +0800 Subject: [PATCH] [Bugfix] Initialize attention bias on the same device as Query/Key/Value (#13468) --- vllm/attention/backends/xformers.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/vllm/attention/backends/xformers.py b/vllm/attention/backends/xformers.py index ec8e1f2ee5a6..9fa76634e1fc 100644 --- a/vllm/attention/backends/xformers.py +++ b/vllm/attention/backends/xformers.py @@ -673,7 +673,9 @@ class XFormersImpl(AttentionImpl[XFormersMetadata]): # Cross-attention mask is non-causal attn_bias = BlockDiagonalMask.from_seqlens( - attn_metadata.seq_lens, attn_metadata.encoder_seq_lens) + attn_metadata.seq_lens, + attn_metadata.encoder_seq_lens, + device=query.device) # Encoder branch of encoder-decoder model uses # attn_metadata.encoder_seq_lens @@ -683,7 +685,7 @@ class XFormersImpl(AttentionImpl[XFormersMetadata]): # Encoder self-attention mask is non-causal attn_bias = BlockDiagonalMask.from_seqlens( - attn_metadata.encoder_seq_lens) + attn_metadata.encoder_seq_lens, device=query.device) # Self-attention block of encoder-only model just # uses the seq_lens directly. @@ -692,7 +694,7 @@ class XFormersImpl(AttentionImpl[XFormersMetadata]): # Encoder self-attention mask is non-causal attn_bias = BlockDiagonalMask.from_seqlens( - attn_metadata.seq_lens) + attn_metadata.seq_lens, device=query.device) # Self-attention block of decoder branch just # uses the seq_lens directly @@ -701,7 +703,7 @@ class XFormersImpl(AttentionImpl[XFormersMetadata]): # Decoder self-attention mask is causal attn_bias = BlockDiagonalCausalMask.from_seqlens( - attn_metadata.seq_lens) + attn_metadata.seq_lens, device=query.device) else: raise ValueError("Unknown AttentionType: %s", attn_type)