diff --git a/vllm/v1/attention/backends/flashinfer.py b/vllm/v1/attention/backends/flashinfer.py index 688e681f0591..15a252734d4d 100755 --- a/vllm/v1/attention/backends/flashinfer.py +++ b/vllm/v1/attention/backends/flashinfer.py @@ -29,7 +29,6 @@ from vllm.utils.flashinfer import (can_use_trtllm_attention, flashinfer_disable_q_quantization, supports_trtllm_attention, use_trtllm_attention) -from vllm.v1.attention.backends.flash_attn import use_cascade_attention # yapf conflicts with isort for this block # yapf: disable from vllm.v1.attention.backends.utils import (AttentionCGSupport, @@ -677,7 +676,9 @@ class FlashInferMetadataBuilder(AttentionMetadataBuilder[FlashInferMetadata]): # TODO: The cascade wrapper currently does not support setting # kv cache dtype to something different from query dtype. return False - return use_cascade_attention(*args, **kwargs) + # TODO: Cascade attention doesn't work, disable it for now + # return use_cascade_attention(*args, **kwargs) + return False class FlashInferImpl(AttentionImpl):