[Bugfix] Disable cascade attention with FlashInfer (#26130)

Signed-off-by: mgoin <mgoin64@gmail.com>
Signed-off-by: Michael Goin <mgoin64@gmail.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Michael Goin 2025-10-02 19:30:37 -04:00 committed by GitHub
parent 13cdc02173
commit f1fc2107a3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -29,7 +29,6 @@ from vllm.utils.flashinfer import (can_use_trtllm_attention,
flashinfer_disable_q_quantization,
supports_trtllm_attention,
use_trtllm_attention)
from vllm.v1.attention.backends.flash_attn import use_cascade_attention
# yapf conflicts with isort for this block
# yapf: disable
from vllm.v1.attention.backends.utils import (AttentionCGSupport,
@ -677,7 +676,9 @@ class FlashInferMetadataBuilder(AttentionMetadataBuilder[FlashInferMetadata]):
# TODO: The cascade wrapper currently does not support setting
# kv cache dtype to something different from query dtype.
return False
return use_cascade_attention(*args, **kwargs)
# TODO: Cascade attention doesn't work, disable it for now
# return use_cascade_attention(*args, **kwargs)
return False
class FlashInferImpl(AttentionImpl):