[Bugfix] Honor --mm_encoder_attn_backend when used (#27124)

Co-authored-by: Bradley D <4551889+bradleyhd@users.noreply.github.com>
Co-authored-by: Roger Wang <hey@rogerw.io>
This commit is contained in:
Bradley D 2025-10-23 05:09:52 -07:00 committed by GitHub
parent 3a4255c7c4
commit 570c3e1cd4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 10 additions and 1 deletions

View File

@ -93,12 +93,15 @@ def check_upstream_fa_availability(dtype: torch.dtype):
def maybe_get_vit_flash_attn_backend(
attn_backend: _Backend, use_upstream_fa: bool
attn_backend: _Backend,
use_upstream_fa: bool,
attn_backend_override: _Backend | None = None,
) -> tuple[_Backend, Callable]:
if (
attn_backend != _Backend.FLASH_ATTN
and attn_backend != _Backend.ROCM_AITER_FA
and check_upstream_fa_availability(torch.get_default_dtype())
and attn_backend_override is None
):
attn_backend = _Backend.FLASH_ATTN
use_upstream_fa = True
@ -499,6 +502,7 @@ class MultiHeadAttention(nn.Module):
maybe_get_vit_flash_attn_backend(
self.attn_backend,
use_upstream_fa,
attn_backend_override=attn_backend_override,
)
)

View File

@ -299,6 +299,7 @@ class DotsVisionAttention(nn.Module):
maybe_get_vit_flash_attn_backend(
self.attn_backend,
self.use_upstream_fa,
attn_backend_override=attn_backend_override,
)
)
if self.attn_backend not in {

View File

@ -206,6 +206,7 @@ class Ernie4_5_VisionAttention(nn.Module):
maybe_get_vit_flash_attn_backend(
self.attn_backend,
self.use_upstream_fa,
attn_backend_override=attn_backend_override,
)
)

View File

@ -296,6 +296,7 @@ class Glm4vVisionAttention(nn.Module):
maybe_get_vit_flash_attn_backend(
self.attn_backend,
self.use_upstream_fa,
attn_backend_override=attn_backend_override,
)
)

View File

@ -364,6 +364,7 @@ class Qwen2VisionAttention(nn.Module):
maybe_get_vit_flash_attn_backend(
self.attn_backend,
self.use_upstream_fa,
attn_backend_override=attn_backend_override,
)
)

View File

@ -259,6 +259,7 @@ class Siglip2Attention(nn.Module):
maybe_get_vit_flash_attn_backend(
self.attn_backend,
self.use_upstream_fa,
attn_backend_override=attn_backend_override,
)
)