mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-18 14:07:05 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
326 lines
14 KiB
Python
326 lines
14 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
from unittest.mock import patch
|
|
|
|
import pytest
|
|
import torch
|
|
|
|
from vllm.attention.selector import _cached_get_attn_backend, get_attn_backend
|
|
from vllm.platforms.cpu import CpuPlatform
|
|
from vllm.platforms.cuda import CudaPlatform
|
|
from vllm.platforms.rocm import RocmPlatform
|
|
from vllm.utils import STR_BACKEND_ENV_VAR, STR_FLASH_ATTN_VAL, STR_INVALID_VAL
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def clear_cache():
|
|
"""Clear lru cache to ensure each test case runs without caching.
|
|
"""
|
|
_cached_get_attn_backend.cache_clear()
|
|
|
|
|
|
# Define MLA and non-MLA backends separately
|
|
DEVICE_MLA_BACKENDS = {
|
|
"cuda": [
|
|
"TRITON_MLA", "FLASHMLA", "FLASHINFER_MLA", "FLASH_ATTN_MLA",
|
|
"CUTLASS_MLA"
|
|
],
|
|
"hip": ["TRITON_MLA", "ROCM_AITER_MLA"],
|
|
"cpu": [],
|
|
}
|
|
|
|
DEVICE_REGULAR_ATTN_BACKENDS = {
|
|
"cuda": ["XFORMERS", "FLASHINFER", "FLASH_ATTN"],
|
|
"hip": ["ROCM_FLASH"],
|
|
"cpu": ["TORCH_SDPA"],
|
|
}
|
|
|
|
DEVICE_MLA_BLOCK_SIZES = {
|
|
"cuda": [16, 64], # CUDA supports both standard and extended block sizes
|
|
"hip": [16, 1], # HIP requires special handling for block_size=1
|
|
# "cpu": [16] # CPU uses fixed block size from test cases
|
|
"cpu": [] # FIXME(woosuk): Temporarily disable CPU tests
|
|
}
|
|
|
|
|
|
def generate_params():
|
|
params = []
|
|
for use_mla in [True, False]:
|
|
for device in ["cuda", "hip", "cpu"]:
|
|
backends = DEVICE_MLA_BACKENDS[
|
|
device] if use_mla else DEVICE_REGULAR_ATTN_BACKENDS[device]
|
|
for name in backends:
|
|
block_sizes = DEVICE_MLA_BLOCK_SIZES[device] if use_mla else [
|
|
16
|
|
]
|
|
for block_size in block_sizes:
|
|
params.append(
|
|
pytest.param(
|
|
device,
|
|
name,
|
|
use_mla,
|
|
block_size,
|
|
id=
|
|
f"{device}_{name}_mla_{str(use_mla)[0]}_blks{block_size}"
|
|
))
|
|
return params
|
|
|
|
|
|
@pytest.mark.parametrize("device, name, use_mla, block_size",
|
|
generate_params())
|
|
def test_env(
|
|
device: str,
|
|
name: str,
|
|
use_mla: bool,
|
|
block_size: int,
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
):
|
|
"""Test attention backend selection with valid device-backend pairs."""
|
|
with monkeypatch.context() as m:
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
m.setenv(STR_BACKEND_ENV_VAR, name)
|
|
m.setenv("VLLM_MLA_DISABLE", "1" if use_mla else "0")
|
|
|
|
if device == "cpu":
|
|
with patch("vllm.attention.selector.current_platform",
|
|
CpuPlatform()):
|
|
backend = get_attn_backend(16, torch.float16, None, block_size)
|
|
assert backend.get_name() == "TORCH_SDPA"
|
|
|
|
elif device == "hip":
|
|
with patch("vllm.attention.selector.current_platform",
|
|
RocmPlatform()):
|
|
if use_mla:
|
|
# ROCm MLA backend logic:
|
|
# - TRITON_MLA: supported when block_size != 1
|
|
# - ROCM_AITER_MLA: supported when block_size == 1
|
|
# If backend is forced but doesn't match block_size,
|
|
# should raise ValueError
|
|
|
|
if name == "TRITON_MLA" and block_size == 1:
|
|
# TRITON_MLA doesn't support block_size == 1
|
|
with pytest.raises(ValueError) as exc_info:
|
|
get_attn_backend(16,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
assert f"The selected backend, {name}" in str(
|
|
exc_info.value)
|
|
elif name == "ROCM_AITER_MLA" and block_size != 1:
|
|
# ROCM_AITER_MLA only supports block_size == 1
|
|
with pytest.raises(ValueError) as exc_info:
|
|
get_attn_backend(16,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
assert f"The selected backend, {name}" in str(
|
|
exc_info.value)
|
|
else:
|
|
# Valid backend-block_size combination
|
|
backend = get_attn_backend(16,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
expected = name
|
|
assert backend.get_name() == expected
|
|
else:
|
|
backend = get_attn_backend(16,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
expected = "TRITON_ATTN"
|
|
assert backend.get_name() == expected
|
|
|
|
elif device == "cuda":
|
|
with patch("vllm.attention.selector.current_platform",
|
|
CudaPlatform()):
|
|
if use_mla:
|
|
# CUDA MLA backend logic:
|
|
# - CUTLASS_MLA: only supported with block_size == 128
|
|
# and Blackwell GPUs (SM 10.0), V1 only
|
|
# - FLASHINFER_MLA: only supported on Blackwell GPUs
|
|
# (SM 10.0+), V1 only
|
|
# - FLASHMLA: only supported with block_size == 64
|
|
# - FLASH_ATTN_MLA: V1 only
|
|
# - TRITON_MLA: fallback for other cases
|
|
|
|
if name == "CUTLASS_MLA":
|
|
if block_size != 128:
|
|
# CUTLASS_MLA only supports block_size == 128
|
|
pytest.skip(
|
|
"CUTLASS_MLA only supports block_size 128")
|
|
else:
|
|
backend = get_attn_backend(16,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
expected = "CUTLASS_MLA"
|
|
assert backend.get_name() == expected
|
|
elif name == "FLASHINFER_MLA":
|
|
if block_size not in [32, 64]:
|
|
# FlashInfer MLA only supports block_size 32 or 64
|
|
pytest.skip(
|
|
"FlashInfer MLA only supports block_size 32 "
|
|
"or 64")
|
|
else:
|
|
backend = get_attn_backend(16,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
expected = "FLASHINFER_MLA"
|
|
assert backend.get_name() == expected
|
|
elif name == "FLASHMLA":
|
|
if block_size != 64:
|
|
# FlashMLA only supports block_size == 64
|
|
pytest.skip("FlashMLA only supports block_size 64")
|
|
else:
|
|
from vllm.v1.attention.backends.mla.flashmla import ( # noqa: E501
|
|
is_flashmla_supported)
|
|
is_supported, _ = is_flashmla_supported()
|
|
if not is_supported:
|
|
pytest.skip(
|
|
"FlashMLA not supported on this platform")
|
|
else:
|
|
backend = get_attn_backend(16,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
expected = name
|
|
assert backend.get_name() == expected
|
|
elif name == "FLASH_ATTN_MLA":
|
|
backend = get_attn_backend(16,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
expected = "FLASH_ATTN_MLA"
|
|
assert backend.get_name() == expected
|
|
else:
|
|
# TRITON_MLA or other fallback
|
|
backend = get_attn_backend(16,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
expected = "TRITON_MLA"
|
|
assert backend.get_name() == expected
|
|
elif name == "FLASHINFER":
|
|
backend = get_attn_backend(16,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
expected = "FLASHINFER"
|
|
assert backend.get_name() == expected
|
|
elif name == "XFORMERS":
|
|
backend = get_attn_backend(32,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
expected = "XFORMERS"
|
|
assert backend.get_name() == expected
|
|
elif name == "FLASH_ATTN":
|
|
backend = get_attn_backend(32,
|
|
torch.float16,
|
|
None,
|
|
block_size,
|
|
use_mla=use_mla)
|
|
expected = "FLASH_ATTN"
|
|
assert backend.get_name() == expected
|
|
|
|
|
|
@pytest.mark.parametrize("device", ["cpu", "cuda"])
|
|
def test_fp32_fallback(
|
|
device: str,
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
):
|
|
"""Test attention backend selection with fp32."""
|
|
with monkeypatch.context() as m:
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
|
|
if device == "cpu":
|
|
with patch("vllm.attention.selector.current_platform",
|
|
CpuPlatform()):
|
|
backend = get_attn_backend(16, torch.float32, None, 16)
|
|
assert backend.get_name() == "TORCH_SDPA"
|
|
|
|
elif device == "cuda":
|
|
with patch("vllm.attention.selector.current_platform",
|
|
CudaPlatform()):
|
|
backend = get_attn_backend(16, torch.float32, None, 16)
|
|
assert backend.get_name() == "FLEX_ATTENTION"
|
|
|
|
|
|
def test_flash_attn(monkeypatch: pytest.MonkeyPatch):
|
|
"""Test FlashAttn validation."""
|
|
# TODO: When testing for v1, pipe in `use_v1` as an argument to
|
|
# get_attn_backend
|
|
|
|
pytest.skip("Skipping as current backend selector does not " \
|
|
"handle fallbacks when a backend is set via env var.")
|
|
|
|
with monkeypatch.context() as m:
|
|
m.setenv(STR_BACKEND_ENV_VAR, STR_FLASH_ATTN_VAL)
|
|
|
|
# Unsupported CUDA arch
|
|
monkeypatch.setattr(torch.cuda,
|
|
"get_device_capability",
|
|
lambda _=None: (7, 5))
|
|
backend = get_attn_backend(16, torch.float16, None, 16)
|
|
assert backend.get_name() != STR_FLASH_ATTN_VAL
|
|
|
|
# Reset the monkeypatch for subsequent tests
|
|
monkeypatch.undo()
|
|
|
|
# Unsupported data type
|
|
backend = get_attn_backend(16, torch.float8_e4m3fn, None, 16)
|
|
assert backend.get_name() != STR_FLASH_ATTN_VAL
|
|
|
|
# Unsupported kv cache data type
|
|
backend = get_attn_backend(16, torch.float16, "fp8", 16)
|
|
assert backend.get_name() != STR_FLASH_ATTN_VAL
|
|
|
|
# Unsupported block size
|
|
backend = get_attn_backend(16, torch.float16, None, 8)
|
|
assert backend.get_name() != STR_FLASH_ATTN_VAL
|
|
|
|
# flash-attn is not installed
|
|
import sys
|
|
original_module = sys.modules.get('vllm_flash_attn')
|
|
monkeypatch.setitem(sys.modules, 'vllm_flash_attn', None)
|
|
backend = get_attn_backend(16, torch.float16, None, 16)
|
|
assert backend.get_name() != STR_FLASH_ATTN_VAL
|
|
|
|
# Restore the original module if it existed
|
|
if original_module is not None:
|
|
monkeypatch.setitem(sys.modules, 'vllm_flash_attn',
|
|
original_module)
|
|
else:
|
|
monkeypatch.delitem(sys.modules, 'vllm_flash_attn', raising=False)
|
|
|
|
# Unsupported head size
|
|
backend = get_attn_backend(17, torch.float16, None, 16)
|
|
assert backend.get_name() != STR_FLASH_ATTN_VAL
|
|
|
|
|
|
def test_invalid_env(monkeypatch: pytest.MonkeyPatch):
|
|
"""Test that invalid attention backend names raise ValueError."""
|
|
with monkeypatch.context() as m, patch(
|
|
"vllm.attention.selector.current_platform", CudaPlatform()):
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
m.setenv(STR_BACKEND_ENV_VAR, STR_INVALID_VAL)
|
|
|
|
# Should raise ValueError for invalid backend
|
|
with pytest.raises(ValueError) as exc_info:
|
|
get_attn_backend(32, torch.float16, None, 16)
|
|
assert "Invalid value 'INVALID'" in str(exc_info.value)
|