diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index 1d0714305cc8..0bedd80e5ecf 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -109,7 +109,8 @@ def use_rocm_custom_paged_attention(qtype: torch.dtype, head_size: int, ON_MI250_MI300 = any(arch in GPU_ARCH for arch in ["gfx90a", "gfx942"]) # rocm custom page attention not support on navi (gfx1*) - return (ON_MI250_MI300 and not ON_NAVI and (sliding_window == 0) + return (ON_MI250_MI300 and not ON_NAVI + and (sliding_window == 0 or sliding_window == (-1, -1)) and (qtype == torch.half or qtype == torch.bfloat16) and (head_size == 64 or head_size == 128) and (block_size == 16 or block_size == 32)