[Bugfix] Fallback ViT attn backend to SDPA for blackwell (#25851)

Signed-off-by: Roger Wang <hey@rogerw.io>
Signed-off-by: simon-mo <simon.mo@hey.com>
This commit is contained in:
Roger Wang 2025-09-28 23:03:51 -07:00 committed by simon-mo
parent 8ce5d3198d
commit ab5b6459df
2 changed files with 7 additions and 9 deletions

View File

@ -66,7 +66,7 @@ from vllm.multimodal.processing import (BaseMultiModalProcessor,
PromptReplacement, PromptUpdate, PromptReplacement, PromptUpdate,
PromptUpdateDetails) PromptUpdateDetails)
from vllm.multimodal.profiling import BaseDummyInputsBuilder from vllm.multimodal.profiling import BaseDummyInputsBuilder
from vllm.platforms import _Backend, current_platform from vllm.platforms import _Backend
from vllm.sequence import IntermediateTensors from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.config import uses_mrope from vllm.transformers_utils.config import uses_mrope
from vllm.utils import is_list_of from vllm.utils import is_list_of
@ -335,14 +335,6 @@ class Qwen3_VisionTransformer(nn.Module):
}: }:
raise RuntimeError( raise RuntimeError(
f"Qwen3-VL does not support {self.attn_backend} backend now.") f"Qwen3-VL does not support {self.attn_backend} backend now.")
if current_platform.is_device_capability(
100) and self.attn_backend != _Backend.TORCH_SDPA:
# TODO(Roger/Wentao): remove this after FA
# or XFORMERS's issue fixed on Blackwell
logger.info_once("Qwen3-VL vision attention does not support "
f"{self.attn_backend} backend on Blackwell now. "
"Vision attention backend is set to TORCH_SDPA.")
self.attn_backend = _Backend.TORCH_SDPA
self.blocks = nn.ModuleList([ self.blocks = nn.ModuleList([
Qwen3_VisionBlock( Qwen3_VisionBlock(

View File

@ -205,6 +205,12 @@ class CudaPlatformBase(Platform):
@classmethod @classmethod
def get_vit_attn_backend(cls, head_size: int, def get_vit_attn_backend(cls, head_size: int,
dtype: torch.dtype) -> _Backend: dtype: torch.dtype) -> _Backend:
# For Blackwell GPUs, force TORCH_SDPA for now.
# See https://github.com/facebookresearch/xformers/issues/1317#issuecomment-3199392579 # noqa: E501
if cls.has_device_capability(100):
return _Backend.TORCH_SDPA
if dtype not in (torch.float16, torch.bfloat16): if dtype not in (torch.float16, torch.bfloat16):
return _Backend.XFORMERS return _Backend.XFORMERS