diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index 47daf6d138431..7ef77db8fbb5b 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -464,7 +464,10 @@ class MultiHeadAttention(nn.Module): } self.fa_version = None - if self.attn_backend == AttentionBackendEnum.FLASH_ATTN: + if ( + self.attn_backend == AttentionBackendEnum.FLASH_ATTN + and current_platform.is_cuda() + ): self.fa_version = get_flash_attn_version() assert self._flash_attn_varlen_func is not None self._flash_attn_varlen_func = functools.partial(