mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-21 05:37:02 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
523 lines
18 KiB
Python
523 lines
18 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
import random
|
|
from typing import Optional
|
|
|
|
import pytest
|
|
import torch
|
|
|
|
from tests.kernels.allclose_default import get_default_atol, get_default_rtol
|
|
from tests.kernels.utils import opcheck
|
|
from vllm import _custom_ops as ops
|
|
from vllm.attention.layer import Attention, MultiHeadAttention
|
|
from vllm.platforms import current_platform
|
|
from vllm.utils import get_max_shared_memory_bytes
|
|
|
|
if not current_platform.is_rocm():
|
|
from xformers import ops as xops
|
|
from xformers.ops.fmha.attn_bias import BlockDiagonalCausalMask
|
|
|
|
from tests.kernels.utils import make_alibi_bias
|
|
|
|
FLOAT32_BYTES = torch.finfo(torch.float).bits // 8
|
|
# This will change depending on the compute capability.
|
|
# - 512 as a buffer
|
|
MAX_SEQ_LEN = get_max_shared_memory_bytes() // FLOAT32_BYTES - 512
|
|
# There may not be enough gpu memory due to large NUM_BLOCKS.
|
|
# Reduce NUM_BLOCKS when it happens.
|
|
NUM_BLOCKS = 4321 # Arbitrary values for testing
|
|
PARTITION_SIZE = 512
|
|
PARTITION_SIZE_ROCM = 256
|
|
DTYPES = [torch.bfloat16]
|
|
NUM_GEN_SEQS = [7] # Arbitrary values for testing
|
|
NUM_PREFILL_SEQS = [3] # Arbitrary values for testing
|
|
NUM_HEADS = [(40, 40), (64, 8)] # Arbitrary values for testing
|
|
|
|
# This should be sync with get_supported_head_sizes() in
|
|
# vllm.attention.ops.paged_attn.PagedAttention
|
|
HEAD_SIZES = [32, 80, 128, 256]
|
|
|
|
BLOCK_SIZES = [16, 32]
|
|
USE_ALIBI = [False, True]
|
|
KV_CACHE_DTYPE = ["auto", "fp8"]
|
|
SEEDS = [0]
|
|
CUDA_DEVICES = [
|
|
f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)
|
|
]
|
|
|
|
|
|
def ref_masked_attention(
|
|
query: torch.Tensor,
|
|
key: torch.Tensor,
|
|
value: torch.Tensor,
|
|
scale: float,
|
|
attn_mask: Optional[torch.Tensor] = None,
|
|
) -> torch.Tensor:
|
|
attn_weights = scale * torch.einsum("qhd,khd->hqk", query, key).float()
|
|
if attn_mask is not None:
|
|
attn_weights = attn_weights + attn_mask.float()
|
|
attn_weights = torch.softmax(attn_weights, dim=-1).to(value.dtype)
|
|
out = torch.einsum("hqk,khd->qhd", attn_weights, value)
|
|
return out
|
|
|
|
|
|
def ref_single_query_cached_kv_attention(
|
|
output: torch.Tensor,
|
|
query: torch.Tensor,
|
|
num_queries_per_kv: int,
|
|
key_cache: torch.Tensor,
|
|
value_cache: torch.Tensor,
|
|
block_tables: torch.Tensor,
|
|
seq_lens: torch.Tensor,
|
|
scale: float,
|
|
alibi_slopes: Optional[torch.Tensor],
|
|
) -> None:
|
|
num_query_heads = query.shape[1]
|
|
num_kv_heads = value_cache.shape[1]
|
|
head_size = value_cache.shape[2]
|
|
block_size = value_cache.shape[3]
|
|
num_seqs = query.shape[0]
|
|
|
|
block_tables_lst = block_tables.cpu().tolist()
|
|
seq_lens_lst = seq_lens.cpu().tolist()
|
|
for i in range(num_seqs):
|
|
q = query[i].unsqueeze(0)
|
|
block_table = block_tables_lst[i]
|
|
seq_len = int(seq_lens_lst[i])
|
|
|
|
keys_lst: list[torch.Tensor] = []
|
|
values_lst: list[torch.Tensor] = []
|
|
for j in range(seq_len):
|
|
block_number = int(block_table[j // block_size])
|
|
block_offset = j % block_size
|
|
|
|
k = key_cache[block_number, :, :, block_offset, :]
|
|
k = k.reshape(num_kv_heads, head_size)
|
|
keys_lst.append(k)
|
|
|
|
v = value_cache[block_number, :, :, block_offset]
|
|
values_lst.append(v)
|
|
keys = torch.stack(keys_lst, dim=0)
|
|
values = torch.stack(values_lst, dim=0)
|
|
if num_queries_per_kv > 1:
|
|
# Handle MQA and GQA
|
|
keys = torch.repeat_interleave(keys, num_queries_per_kv, dim=1)
|
|
values = torch.repeat_interleave(values, num_queries_per_kv, dim=1)
|
|
|
|
alibi_bias = None
|
|
if alibi_slopes is not None:
|
|
# Create the ALiBi bias used in the paged attention kernel.
|
|
position_ids = torch.arange(seq_len).int()
|
|
alibi_bias = (position_ids - seq_len + 1).float()
|
|
alibi_bias = alibi_slopes.view(-1, 1, 1) * alibi_bias.view(
|
|
1, 1, -1)
|
|
|
|
out = ref_masked_attention(q, keys, values, scale, alibi_bias)
|
|
out = out.view(num_query_heads, head_size)
|
|
output[i].copy_(out, non_blocking=True)
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"version",
|
|
["v1", "v2"] if not current_platform.is_rocm() else ["v1", "v2", "rocm"])
|
|
@pytest.mark.parametrize("num_seqs", NUM_GEN_SEQS)
|
|
@pytest.mark.parametrize("num_heads", NUM_HEADS)
|
|
@pytest.mark.parametrize("head_size", HEAD_SIZES)
|
|
@pytest.mark.parametrize("use_alibi", USE_ALIBI)
|
|
@pytest.mark.parametrize("block_size", BLOCK_SIZES)
|
|
@pytest.mark.parametrize("dtype", DTYPES)
|
|
@pytest.mark.parametrize("kv_cache_dtype", KV_CACHE_DTYPE)
|
|
@pytest.mark.parametrize("seed", SEEDS)
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
|
def test_paged_attention(
|
|
kv_cache_factory,
|
|
version: str,
|
|
num_seqs: int,
|
|
num_heads: tuple[int, int],
|
|
head_size: int,
|
|
use_alibi: bool,
|
|
block_size: int,
|
|
dtype: torch.dtype,
|
|
kv_cache_dtype: str,
|
|
seed: int,
|
|
device: str,
|
|
) -> None:
|
|
if ((kv_cache_dtype == "fp8" and head_size % 16)
|
|
or (version == "rocm" and head_size not in (64, 128))):
|
|
pytest.skip()
|
|
|
|
if (version == "rocm" and current_platform.is_navi()
|
|
and (kv_cache_dtype == "fp8" or head_size != 128
|
|
or block_size != 16 or use_alibi)):
|
|
pytest.skip()
|
|
|
|
global PARTITION_SIZE
|
|
|
|
current_platform.seed_everything(seed)
|
|
torch.set_default_device(device)
|
|
scale = float(1.0 / (head_size**0.5))
|
|
num_query_heads, num_kv_heads = num_heads
|
|
query = torch.empty(num_seqs, num_query_heads, head_size, dtype=dtype)
|
|
query.uniform_(-scale, scale)
|
|
|
|
assert num_query_heads % num_kv_heads == 0
|
|
num_queries_per_kv = num_query_heads // num_kv_heads
|
|
alibi_slopes = None
|
|
if use_alibi:
|
|
alibi_slopes = torch.randn(num_query_heads, dtype=torch.float)
|
|
|
|
seq_lens = [random.randint(1, MAX_SEQ_LEN) for _ in range(num_seqs)]
|
|
seq_lens[-1] = MAX_SEQ_LEN
|
|
max_seq_len = max(seq_lens)
|
|
seq_lens = torch.tensor(seq_lens, dtype=torch.int)
|
|
|
|
# Create the block tables.
|
|
max_num_blocks_per_seq = (max_seq_len + block_size - 1) // block_size
|
|
block_tables_lst: list[list[int]] = []
|
|
for _ in range(num_seqs):
|
|
block_table = [
|
|
random.randint(0, NUM_BLOCKS - 1)
|
|
for _ in range(max_num_blocks_per_seq)
|
|
]
|
|
block_tables_lst.append(block_table)
|
|
|
|
block_tables = torch.tensor(block_tables_lst, dtype=torch.int)
|
|
|
|
# Create the KV caches.
|
|
key_caches, value_caches = kv_cache_factory(NUM_BLOCKS, block_size, 1,
|
|
num_kv_heads, head_size,
|
|
kv_cache_dtype, dtype, seed,
|
|
device)
|
|
key_cache, value_cache = key_caches[0], value_caches[0]
|
|
|
|
# Using default kv_scale
|
|
k_scale = v_scale = torch.tensor(1.0, dtype=torch.float32, device=device)
|
|
|
|
# Call the paged attention kernel.
|
|
output = torch.empty_like(query)
|
|
if version == "v1":
|
|
ops.paged_attention_v1(
|
|
output,
|
|
query,
|
|
key_cache,
|
|
value_cache,
|
|
num_kv_heads,
|
|
scale,
|
|
block_tables,
|
|
seq_lens,
|
|
block_size,
|
|
max_seq_len,
|
|
alibi_slopes,
|
|
kv_cache_dtype,
|
|
k_scale,
|
|
v_scale,
|
|
)
|
|
|
|
opcheck(torch.ops._C.paged_attention_v1,
|
|
(output, query, key_cache, value_cache, num_kv_heads, scale,
|
|
block_tables, seq_lens, block_size, max_seq_len, alibi_slopes,
|
|
kv_cache_dtype, k_scale, v_scale, 0, 0, 0, 64, 0),
|
|
cond=(head_size == HEAD_SIZES[0]
|
|
and block_size == BLOCK_SIZES[0]))
|
|
|
|
elif version in ("v2", "rocm"):
|
|
if current_platform.is_rocm() and version == "rocm":
|
|
PARTITION_SIZE = PARTITION_SIZE_ROCM
|
|
|
|
num_partitions = ((max_seq_len + PARTITION_SIZE - 1) // PARTITION_SIZE)
|
|
assert PARTITION_SIZE % block_size == 0
|
|
num_seqs, num_heads, head_size = output.shape
|
|
tmp_output = torch.empty(
|
|
size=(num_seqs, num_heads, num_partitions, head_size),
|
|
dtype=output.dtype,
|
|
)
|
|
exp_sums = torch.empty(
|
|
size=(num_seqs, num_heads, num_partitions),
|
|
dtype=torch.float32,
|
|
)
|
|
max_logits = torch.empty_like(exp_sums)
|
|
if version == "v2":
|
|
ops.paged_attention_v2(
|
|
output,
|
|
exp_sums,
|
|
max_logits,
|
|
tmp_output,
|
|
query,
|
|
key_cache,
|
|
value_cache,
|
|
num_kv_heads,
|
|
scale,
|
|
block_tables,
|
|
seq_lens,
|
|
block_size,
|
|
max_seq_len,
|
|
alibi_slopes,
|
|
kv_cache_dtype,
|
|
k_scale,
|
|
v_scale,
|
|
)
|
|
|
|
opcheck(torch.ops._C.paged_attention_v2,
|
|
(output, exp_sums, max_logits, tmp_output, query,
|
|
key_cache, value_cache, num_kv_heads, scale, block_tables,
|
|
seq_lens, block_size, max_seq_len, alibi_slopes,
|
|
kv_cache_dtype, k_scale, v_scale, 0, 0, 0, 64, 0),
|
|
cond=(head_size == HEAD_SIZES[0]
|
|
and block_size == BLOCK_SIZES[0]))
|
|
|
|
else:
|
|
ops.paged_attention_rocm(
|
|
output,
|
|
exp_sums,
|
|
max_logits,
|
|
tmp_output,
|
|
query,
|
|
key_cache,
|
|
value_cache,
|
|
num_kv_heads,
|
|
scale,
|
|
block_tables,
|
|
seq_lens,
|
|
None,
|
|
block_size,
|
|
max_seq_len,
|
|
alibi_slopes,
|
|
kv_cache_dtype,
|
|
k_scale,
|
|
v_scale,
|
|
)
|
|
|
|
opcheck(torch.ops._rocm_C.paged_attention,
|
|
(output, exp_sums, max_logits, tmp_output, query,
|
|
key_cache, value_cache, num_kv_heads, scale, block_tables,
|
|
seq_lens, None, block_size, max_seq_len, alibi_slopes,
|
|
kv_cache_dtype, k_scale, v_scale),
|
|
cond=(head_size == HEAD_SIZES[0]
|
|
and block_size == BLOCK_SIZES[0]))
|
|
|
|
else:
|
|
raise AssertionError(f"Unknown version: {version}")
|
|
|
|
# Run the reference implementation.
|
|
if kv_cache_dtype == "fp8":
|
|
# Convert cache data back to dtype.
|
|
x = 16 // torch.tensor([], dtype=dtype).element_size()
|
|
key_cache_shape = (NUM_BLOCKS, num_kv_heads, head_size // x,
|
|
block_size, x)
|
|
dequantized_key_cache = torch.empty(size=key_cache_shape,
|
|
dtype=dtype,
|
|
device=device)
|
|
ops.convert_fp8(dequantized_key_cache, key_cache)
|
|
key_cache = dequantized_key_cache
|
|
|
|
value_cache_shape = value_cache.shape
|
|
dequantized_value_cache = torch.empty(size=value_cache_shape,
|
|
dtype=dtype,
|
|
device=device)
|
|
ops.convert_fp8(dequantized_value_cache, value_cache)
|
|
value_cache = dequantized_value_cache
|
|
|
|
ref_output = torch.empty_like(query)
|
|
ref_single_query_cached_kv_attention(
|
|
ref_output,
|
|
query,
|
|
num_queries_per_kv,
|
|
key_cache,
|
|
value_cache,
|
|
block_tables,
|
|
seq_lens,
|
|
scale,
|
|
alibi_slopes,
|
|
)
|
|
|
|
# NOTE(woosuk): Due to the kernel-level differences in the two
|
|
# implementations, there is a small numerical difference in the two
|
|
# outputs. Thus, we use a relaxed tolerance for the test.
|
|
atol = get_default_atol(output) if current_platform.is_rocm() else 1e-3
|
|
rtol = get_default_rtol(output) if current_platform.is_rocm() else 1e-5
|
|
|
|
# NOTE(zhaoyang): FP8 KV Cache will introduce quantization error,
|
|
# so we use a relaxed tolerance for the test.
|
|
atol, rtol = 1e-3, 1e-5
|
|
if kv_cache_dtype == "fp8":
|
|
atol, rtol = 1e-2, 1e-5
|
|
torch.testing.assert_close(output, ref_output, atol=atol, rtol=rtol)
|
|
|
|
|
|
def ref_multi_query_kv_attention(
|
|
cu_seq_lens: list[int],
|
|
query: torch.Tensor,
|
|
key: torch.Tensor,
|
|
value: torch.Tensor,
|
|
scale: float,
|
|
alibi_bias: Optional[list[torch.Tensor]],
|
|
dtype: torch.dtype,
|
|
) -> torch.Tensor:
|
|
num_seqs = len(cu_seq_lens) - 1
|
|
ref_outputs: list[torch.Tensor] = []
|
|
if alibi_bias:
|
|
assert len(alibi_bias) == num_seqs
|
|
for i in range(num_seqs):
|
|
start_idx = cu_seq_lens[i]
|
|
end_idx = cu_seq_lens[i + 1]
|
|
seq_len = end_idx - start_idx
|
|
|
|
# Create attention mask. ALiBi already includes a tril causal mask.
|
|
if alibi_bias:
|
|
attn_mask = alibi_bias[i]
|
|
else:
|
|
attn_mask = torch.triu(torch.ones(seq_len, seq_len, dtype=dtype),
|
|
diagonal=1)
|
|
attn_mask = attn_mask * torch.finfo(dtype).min
|
|
attn_mask = attn_mask.to(dtype=dtype)
|
|
|
|
ref_output = ref_masked_attention(
|
|
query[start_idx:end_idx],
|
|
key[start_idx:end_idx],
|
|
value[start_idx:end_idx],
|
|
scale,
|
|
attn_mask=attn_mask,
|
|
)
|
|
ref_outputs.append(ref_output)
|
|
|
|
return torch.cat(ref_outputs, dim=0)
|
|
|
|
|
|
@pytest.mark.parametrize("num_seqs", NUM_PREFILL_SEQS)
|
|
@pytest.mark.parametrize("num_heads", NUM_HEADS)
|
|
@pytest.mark.parametrize("head_size", HEAD_SIZES)
|
|
@pytest.mark.parametrize("dtype", DTYPES)
|
|
@pytest.mark.parametrize("seed", SEEDS)
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
|
@pytest.mark.skipif(current_platform.is_rocm(),
|
|
reason="Xformers backend is not supported on ROCm.")
|
|
@torch.inference_mode()
|
|
def test_multi_query_kv_attention(
|
|
num_seqs: int,
|
|
num_heads: tuple[int, int],
|
|
head_size: int,
|
|
dtype: torch.dtype,
|
|
seed: int,
|
|
device: str,
|
|
use_alibi: bool = False,
|
|
) -> None:
|
|
current_platform.seed_everything(seed)
|
|
torch.set_default_device(device)
|
|
# MAX_SEQ_LEN sometimes causes OOM in the reference implementation.
|
|
# As the xformers library is already tested with its own tests, we can use
|
|
# a smaller MAX_SEQ_LEN here.
|
|
max_len = min(MAX_SEQ_LEN, 4096)
|
|
seq_lens = random.sample(range(1, max_len), num_seqs)
|
|
num_tokens = sum(seq_lens)
|
|
|
|
scale = float(1.0 / (head_size**0.5))
|
|
num_query_heads, num_kv_heads = num_heads
|
|
qkv = torch.empty(num_tokens,
|
|
num_query_heads + 2 * num_kv_heads,
|
|
head_size,
|
|
dtype=dtype)
|
|
qkv.uniform_(-scale, scale)
|
|
query, key, value = qkv.split(
|
|
[num_query_heads, num_kv_heads, num_kv_heads], dim=1)
|
|
|
|
num_queries_per_kv = num_query_heads // num_kv_heads
|
|
if num_queries_per_kv > 1:
|
|
# Handle MQA and GQA
|
|
key = torch.repeat_interleave(key, num_queries_per_kv, dim=1)
|
|
value = torch.repeat_interleave(value, num_queries_per_kv, dim=1)
|
|
alibi_bias = None
|
|
if use_alibi:
|
|
alibi_slopes = torch.randn(num_query_heads, dtype=torch.float)
|
|
attn_bias = make_alibi_bias(alibi_slopes, num_kv_heads, dtype,
|
|
seq_lens)
|
|
output = torch.empty_like(query)
|
|
start = 0
|
|
# Dynamic sequence length not supported with custom attn_bias.
|
|
for i, seq_len in enumerate(seq_lens):
|
|
end = start + seq_len
|
|
out = xops.memory_efficient_attention_forward(
|
|
query[None, start:end],
|
|
key[None, start:end],
|
|
value[None, start:end],
|
|
attn_bias=attn_bias[i],
|
|
p=0.0,
|
|
scale=scale)
|
|
output[start:end].copy_(out.view_as(query[start:end]))
|
|
start += seq_len
|
|
# xformers.AttentionBias to Tensor for use in reference impl.
|
|
alibi_bias = [
|
|
b.materialize((1, num_query_heads, i, i), device=device).squeeze()
|
|
for b, i in zip(attn_bias, seq_lens)
|
|
]
|
|
else:
|
|
attn_bias = BlockDiagonalCausalMask.from_seqlens(seq_lens)
|
|
output = xops.memory_efficient_attention_forward(
|
|
query.unsqueeze(0),
|
|
key.unsqueeze(0),
|
|
value.unsqueeze(0),
|
|
attn_bias=attn_bias,
|
|
p=0.0,
|
|
scale=scale,
|
|
)
|
|
output = output.squeeze(0)
|
|
|
|
cu_seq_lens = [0]
|
|
for seq_len in seq_lens:
|
|
cu_seq_lens.append(cu_seq_lens[-1] + seq_len)
|
|
ref_output = ref_multi_query_kv_attention(
|
|
cu_seq_lens,
|
|
query,
|
|
key,
|
|
value,
|
|
scale,
|
|
alibi_bias,
|
|
dtype,
|
|
)
|
|
atol = get_default_atol(output) if current_platform.is_rocm() else 1e-3
|
|
rtol = get_default_rtol(output) if current_platform.is_rocm() else 1e-5
|
|
torch.testing.assert_close(output, ref_output, atol=atol, rtol=rtol)
|
|
|
|
|
|
@pytest.mark.parametrize("num_seqs", NUM_PREFILL_SEQS)
|
|
@pytest.mark.parametrize("num_heads", NUM_HEADS)
|
|
@pytest.mark.parametrize("head_size", [64])
|
|
@pytest.mark.parametrize("dtype", DTYPES)
|
|
@pytest.mark.parametrize("seed", SEEDS)
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
|
@pytest.mark.skipif(current_platform.is_rocm(),
|
|
reason="Xformers backend is not supported on ROCm.")
|
|
@torch.inference_mode()
|
|
def test_multi_query_kv_attention_with_alibi(
|
|
num_seqs: int,
|
|
num_heads: tuple[int, int],
|
|
head_size: int,
|
|
dtype: torch.dtype,
|
|
seed: int,
|
|
device: str,
|
|
) -> None:
|
|
return test_multi_query_kv_attention(
|
|
num_seqs,
|
|
num_heads,
|
|
head_size,
|
|
dtype,
|
|
seed,
|
|
device,
|
|
use_alibi=True,
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize("attention_cls", [Attention, MultiHeadAttention])
|
|
def test_num_heads_not_divisble_by_num_kv_heads(attention_cls: type) -> None:
|
|
head_size = 64
|
|
scale = float(1.0 / (head_size**0.5))
|
|
num_heads = 16
|
|
num_kv_heads = 5
|
|
with pytest.raises(AssertionError):
|
|
_ = attention_cls(
|
|
num_heads=num_heads,
|
|
head_size=head_size,
|
|
scale=scale,
|
|
num_kv_heads=num_kv_heads,
|
|
)
|