From 51e5b3e3c422cdd81e3c1bd2b9abd025e53ae986 Mon Sep 17 00:00:00 2001 From: Matthew Bonanni Date: Mon, 15 Dec 2025 14:45:21 -0500 Subject: [PATCH] [Bugfix] Fix ViT with FlashAttention on ROCm (#30703) Signed-off-by: Matthew Bonanni --- vllm/attention/layer.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index 47daf6d138431..7ef77db8fbb5b 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -464,7 +464,10 @@ class MultiHeadAttention(nn.Module): } self.fa_version = None - if self.attn_backend == AttentionBackendEnum.FLASH_ATTN: + if ( + self.attn_backend == AttentionBackendEnum.FLASH_ATTN + and current_platform.is_cuda() + ): self.fa_version = get_flash_attn_version() assert self._flash_attn_varlen_func is not None self._flash_attn_varlen_func = functools.partial(