mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-19 00:37:04 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
428 lines
14 KiB
Python
428 lines
14 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
# fmt: off
|
|
# ruff: noqa: E501
|
|
import time
|
|
|
|
import torch
|
|
|
|
from vllm import _custom_ops as ops
|
|
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
|
|
per_token_group_quant_fp8,
|
|
w8a8_block_fp8_matmul,
|
|
)
|
|
from vllm.triton_utils import triton
|
|
from vllm.utils.deep_gemm import (
|
|
calc_diff,
|
|
fp8_gemm_nt,
|
|
get_col_major_tma_aligned_tensor,
|
|
per_block_cast_to_fp8,
|
|
)
|
|
|
|
|
|
def benchmark_shape(m: int,
|
|
n: int,
|
|
k: int,
|
|
warmup: int = 100,
|
|
repeat: int = 10000,
|
|
verbose: bool = False) -> dict:
|
|
"""Benchmark all implementations for a specific (m, n, k) shape."""
|
|
if verbose:
|
|
print(f"\n=== Benchmarking shape: m={m}, n={n}, k={k} ===")
|
|
|
|
# Create test tensors
|
|
A = torch.randn((m, k), device='cuda', dtype=torch.bfloat16)
|
|
B = torch.randn((n, k), device='cuda', dtype=torch.bfloat16)
|
|
|
|
# Reference result in BF16
|
|
torch.cuda.synchronize()
|
|
C_ref = A @ B.t()
|
|
|
|
# Pre-quantize B for all implementations
|
|
# (weights can be pre-quantized offline)
|
|
B_deepgemm, B_scale_deepgemm = per_block_cast_to_fp8(B, [128, 128], use_ue8m0=True)
|
|
B_vllm, B_scale_vllm = per_block_cast_to_fp8(B, [128, 128], use_ue8m0=True)
|
|
|
|
# Block size configuration
|
|
block_size = [128, 128]
|
|
|
|
# Pre-quantize A for all implementations
|
|
A_deepgemm, A_scale_deepgemm = per_token_group_quant_fp8(A, block_size[1])
|
|
A_scale_deepgemm = get_col_major_tma_aligned_tensor(A_scale_deepgemm)
|
|
C_deepgemm = torch.empty((m, n), device='cuda', dtype=torch.bfloat16)
|
|
A_vllm, A_scale_vllm = per_token_group_quant_fp8(A, block_size[1])
|
|
A_vllm_cutlass, A_scale_vllm_cutlass = per_token_group_quant_fp8(
|
|
A, block_size[1], column_major_scales=True)
|
|
|
|
# === DeepGEMM Implementation ===
|
|
def deepgemm_gemm():
|
|
fp8_gemm_nt((A_deepgemm, A_scale_deepgemm),
|
|
(B_deepgemm, B_scale_deepgemm),
|
|
C_deepgemm)
|
|
return C_deepgemm
|
|
|
|
# === vLLM Triton Implementation ===
|
|
def vllm_triton_gemm():
|
|
return w8a8_block_fp8_matmul(A_vllm,
|
|
B_vllm,
|
|
A_scale_vllm,
|
|
B_scale_vllm,
|
|
block_size,
|
|
output_dtype=torch.bfloat16)
|
|
|
|
# === vLLM CUTLASS Implementation ===
|
|
def vllm_cutlass_gemm():
|
|
return ops.cutlass_scaled_mm(A_vllm_cutlass,
|
|
B_vllm.T,
|
|
scale_a=A_scale_vllm_cutlass,
|
|
scale_b=B_scale_vllm.T,
|
|
out_dtype=torch.bfloat16)
|
|
|
|
# Run correctness check first
|
|
if verbose:
|
|
print("Running correctness check...")
|
|
C_deepgemm = deepgemm_gemm()
|
|
C_vllm_triton = vllm_triton_gemm()
|
|
C_vllm_cutlass = vllm_cutlass_gemm()
|
|
|
|
deepgemm_diff = calc_diff(C_deepgemm, C_ref)
|
|
vllm_triton_diff = calc_diff(C_vllm_triton, C_ref)
|
|
vllm_cutlass_diff = calc_diff(C_vllm_cutlass, C_ref)
|
|
|
|
if verbose:
|
|
print(f"DeepGEMM vs Reference difference: {deepgemm_diff:.6f}")
|
|
print(f"vLLM Triton vs Reference difference: {vllm_triton_diff:.6f}")
|
|
print(f"vLLM CUTLASS vs Reference difference: {vllm_cutlass_diff:.6f}")
|
|
print("vLLM Triton vs DeepGEMM difference: "
|
|
f"{calc_diff(C_vllm_triton, C_deepgemm):.6f}")
|
|
print("vLLM CUTLASS vs DeepGEMM difference: "
|
|
f"{calc_diff(C_vllm_cutlass, C_deepgemm):.6f}")
|
|
|
|
# Benchmark implementations
|
|
implementations = {
|
|
"DeepGEMM": deepgemm_gemm,
|
|
"vLLM Triton": vllm_triton_gemm,
|
|
"vLLM CUTLASS": vllm_cutlass_gemm
|
|
}
|
|
|
|
benchmark_results = {
|
|
"shape": {
|
|
"m": m,
|
|
"n": n,
|
|
"k": k
|
|
},
|
|
"implementations": {}
|
|
}
|
|
|
|
for name, func in implementations.items():
|
|
# Warmup
|
|
for _ in range(warmup):
|
|
func()
|
|
torch.cuda.synchronize()
|
|
|
|
# Timing loop
|
|
torch.cuda.synchronize()
|
|
start = time.time()
|
|
for _ in range(repeat):
|
|
func()
|
|
torch.cuda.synchronize()
|
|
end = time.time()
|
|
|
|
# Calculate timing and TFLOPS
|
|
avg_time_ms = (end - start) / repeat * 1000
|
|
avg_time_us = avg_time_ms * 1000
|
|
tflops = 2 * m * n * k / (avg_time_ms * 1e-3) / 1e12
|
|
gb_s = (m * k + k * n + m * n * 2) / 1e9 / (avg_time_ms * 1e-3)
|
|
|
|
benchmark_results["implementations"][name] = {
|
|
"time_ms": avg_time_ms,
|
|
"time_us": avg_time_us,
|
|
"tflops": tflops,
|
|
"gb_s": gb_s,
|
|
"diff": {
|
|
"DeepGEMM":
|
|
0.0 if name == "DeepGEMM" else calc_diff(func(), C_deepgemm),
|
|
"Reference":
|
|
deepgemm_diff if name == "DeepGEMM" else
|
|
(vllm_triton_diff
|
|
if name == "vLLM Triton" else vllm_cutlass_diff)
|
|
}
|
|
}
|
|
|
|
if verbose:
|
|
print(
|
|
f"{name}: {avg_time_ms:.3f} ms, {tflops:.2f} TFLOPS, {gb_s:.2f} GB/s"
|
|
)
|
|
|
|
# Calculate speedups
|
|
baseline = benchmark_results["implementations"]["DeepGEMM"]["time_ms"]
|
|
for name, data in benchmark_results["implementations"].items():
|
|
if name != "DeepGEMM":
|
|
speedup = baseline / data["time_ms"]
|
|
benchmark_results["implementations"][name][
|
|
"speedup_vs_deepgemm"] = speedup
|
|
if verbose:
|
|
print(f"DeepGEMM is {1/speedup:.2f}x "
|
|
f"{'faster' if 1/speedup > 1 else 'slower'} than {name}")
|
|
|
|
vllm_triton_time = benchmark_results["implementations"]["vLLM Triton"][
|
|
"time_ms"]
|
|
vllm_cutlass_time = benchmark_results["implementations"]["vLLM CUTLASS"][
|
|
"time_ms"]
|
|
cutlass_vs_triton = vllm_triton_time / vllm_cutlass_time
|
|
benchmark_results["implementations"]["vLLM CUTLASS"][
|
|
"speedup_vs_triton"] = cutlass_vs_triton
|
|
if verbose:
|
|
print(
|
|
f"vLLM CUTLASS is {cutlass_vs_triton:.2f}x "
|
|
f"{'faster' if cutlass_vs_triton > 1 else 'slower'} than vLLM Triton"
|
|
)
|
|
|
|
return benchmark_results
|
|
|
|
|
|
def format_table_row(values, widths):
|
|
"""Format a row with specified column widths."""
|
|
return "| " + " | ".join(f"{val:{w}}"
|
|
for val, w in zip(values, widths)) + " |"
|
|
|
|
|
|
def print_table(headers, rows, title=None):
|
|
"""Print a table with headers and rows."""
|
|
if title:
|
|
print(f"\n{title}")
|
|
|
|
# Calculate column widths based on headers and data
|
|
widths = [
|
|
max(len(str(h)), max(len(str(row[i])) for row in rows))
|
|
for i, h in enumerate(headers)
|
|
]
|
|
|
|
# Create separator line
|
|
separator = "+-" + "-+-".join("-" * w for w in widths) + "-+"
|
|
|
|
# Print table
|
|
print(separator)
|
|
print(format_table_row(headers, widths))
|
|
print(separator)
|
|
for row in rows:
|
|
print(format_table_row(row, widths))
|
|
print(separator)
|
|
|
|
|
|
def format_speedup(value):
|
|
"""Format speedup value with indicator if it's faster or slower."""
|
|
return f"{value:.2f}x {'faster' if value > 1.0 else 'slower'}"
|
|
|
|
|
|
def run_benchmarks(verbose: bool = False):
|
|
"""Run benchmarks for a set of common shapes."""
|
|
print("===== STARTING FP8 GEMM BENCHMARK =====")
|
|
|
|
# Make sure we're using the GPU
|
|
if not torch.cuda.is_available():
|
|
print("CUDA not available! Tests require GPU.")
|
|
return
|
|
|
|
# Print system information
|
|
print(f"PyTorch version: {torch.__version__}")
|
|
print(f"CUDA version: {torch.version.cuda}")
|
|
print(f"Triton version: {triton.__version__}")
|
|
print(f"Using device: {torch.cuda.get_device_name()}")
|
|
|
|
# Enable TF32 for better performance
|
|
torch.backends.cuda.matmul.allow_tf32 = True
|
|
torch.backends.cudnn.allow_tf32 = True
|
|
|
|
# Set seeds for reproducibility
|
|
torch.manual_seed(42)
|
|
torch.cuda.manual_seed(42)
|
|
|
|
# Define benchmark shapes (m, n, k)
|
|
shapes = [
|
|
(8, 4096, 7168),
|
|
(8, 7168, 18432),
|
|
(8, 18432, 7168),
|
|
(64, 4096, 7168),
|
|
(64, 7168, 18432),
|
|
(64, 18432, 7168),
|
|
(64, 24576, 1536),
|
|
(64, 32768, 512),
|
|
(64, 7168, 16384),
|
|
(128, 4096, 7168),
|
|
(128, 7168, 18432),
|
|
(128, 18432, 7168),
|
|
(1024, 4096, 7168),
|
|
(1024, 18432, 7168),
|
|
(2048, 4096, 7168),
|
|
(4096, 4096, 7168),
|
|
]
|
|
shapes = [
|
|
# (64, 2112, 7168),
|
|
(64, 24576, 1536),
|
|
(64, 32768, 512),
|
|
(64, 7168, 16384),
|
|
(64, 4096, 7168),
|
|
(64, 7168, 2048),
|
|
# (128, 2112, 7168),
|
|
(128, 24576, 1536),
|
|
(128, 32768, 512),
|
|
(128, 7168, 16384),
|
|
(128, 4096, 7168),
|
|
(128, 7168, 2048),
|
|
# (4096, 2112, 7168),
|
|
(4096, 24576, 1536),
|
|
(4096, 32768, 512),
|
|
(4096, 7168, 16384),
|
|
(4096, 4096, 7168),
|
|
(4096, 7168, 2048),
|
|
]
|
|
|
|
all_results = []
|
|
for m, n, k in shapes:
|
|
result = benchmark_shape(m, n, k, verbose=verbose)
|
|
all_results.append(result)
|
|
|
|
# Print results in a nicely formatted table
|
|
print("\n===== PERFORMANCE COMPARISON =====")
|
|
|
|
# Print DeepGEMM table
|
|
deepgemm_headers = ["m", "n", "k", "Time (μs)", "TFLOPS", "GB/s"]
|
|
deepgemm_rows = []
|
|
for result in all_results:
|
|
shape = result["shape"]
|
|
impl_data = result["implementations"]["DeepGEMM"]
|
|
deepgemm_rows.append([
|
|
shape["m"], shape["n"], shape["k"], f"{impl_data['time_us']:.1f}",
|
|
f"{impl_data['tflops']:.1f}", f"{impl_data['gb_s']:.1f}"
|
|
])
|
|
|
|
print_table(deepgemm_headers,
|
|
deepgemm_rows,
|
|
title="DeepGEMM Implementation:")
|
|
|
|
# Print vLLM Triton table
|
|
triton_headers = [
|
|
"m", "n", "k", "Time (μs)", "TFLOPS", "GB/s", "vs DeepGEMM"
|
|
]
|
|
triton_rows = []
|
|
for result in all_results:
|
|
shape = result["shape"]
|
|
impl_data = result["implementations"]["vLLM Triton"]
|
|
speedup = impl_data.get("speedup_vs_deepgemm", 1.0)
|
|
triton_rows.append([
|
|
shape["m"], shape["n"], shape["k"], f"{impl_data['time_us']:.1f}",
|
|
f"{impl_data['tflops']:.1f}", f"{impl_data['gb_s']:.1f}",
|
|
format_speedup(speedup)
|
|
])
|
|
|
|
print_table(triton_headers,
|
|
triton_rows,
|
|
title="vLLM Triton Implementation:")
|
|
|
|
# Print vLLM CUTLASS table
|
|
cutlass_headers = [
|
|
"m", "n", "k", "Time (μs)", "TFLOPS", "GB/s", "vs DeepGEMM",
|
|
"vs Triton"
|
|
]
|
|
cutlass_rows = []
|
|
for result in all_results:
|
|
shape = result["shape"]
|
|
impl_data = result["implementations"]["vLLM CUTLASS"]
|
|
vs_deepgemm = impl_data.get("speedup_vs_deepgemm", 1.0)
|
|
vs_triton = impl_data.get("speedup_vs_triton", 1.0)
|
|
cutlass_rows.append([
|
|
shape["m"], shape["n"], shape["k"], f"{impl_data['time_us']:.1f}",
|
|
f"{impl_data['tflops']:.1f}", f"{impl_data['gb_s']:.1f}",
|
|
format_speedup(vs_deepgemm),
|
|
format_speedup(vs_triton)
|
|
])
|
|
|
|
print_table(cutlass_headers,
|
|
cutlass_rows,
|
|
title="vLLM CUTLASS Implementation:")
|
|
|
|
# Calculate and print averages
|
|
print("\n===== AVERAGE PERFORMANCE =====")
|
|
|
|
implementations = ["DeepGEMM", "vLLM Triton", "vLLM CUTLASS"]
|
|
avg_metrics = {
|
|
impl: {
|
|
"tflops": 0,
|
|
"gb_s": 0,
|
|
"time_ms": 0
|
|
}
|
|
for impl in implementations
|
|
}
|
|
|
|
for result in all_results:
|
|
for impl in implementations:
|
|
impl_data = result["implementations"][impl]
|
|
avg_metrics[impl]["tflops"] += impl_data["tflops"]
|
|
avg_metrics[impl]["gb_s"] += impl_data["gb_s"]
|
|
avg_metrics[impl]["time_ms"] += impl_data["time_ms"]
|
|
|
|
num_shapes = len(all_results)
|
|
avg_headers = ["Implementation", "Avg TFLOPS", "Avg GB/s", "Avg Time (ms)"]
|
|
avg_rows = []
|
|
|
|
for impl in implementations:
|
|
avg_tflops = avg_metrics[impl]["tflops"] / num_shapes
|
|
avg_mem_bw = avg_metrics[impl]["gb_s"] / num_shapes
|
|
avg_time = avg_metrics[impl]["time_ms"] / num_shapes
|
|
avg_rows.append([
|
|
impl, f"{avg_tflops:.2f}", f"{avg_mem_bw:.2f}", f"{avg_time:.2f}"
|
|
])
|
|
|
|
print_table(avg_headers, avg_rows)
|
|
|
|
# Calculate average speedups
|
|
avg_speedups = {
|
|
"DeepGEMM vs vLLM Triton": 0,
|
|
"DeepGEMM vs vLLM CUTLASS": 0,
|
|
"vLLM CUTLASS vs vLLM Triton": 0
|
|
}
|
|
|
|
for result in all_results:
|
|
deepgemm_time = result["implementations"]["DeepGEMM"]["time_ms"]
|
|
vllm_triton_time = result["implementations"]["vLLM Triton"]["time_ms"]
|
|
vllm_cutlass_time = result["implementations"]["vLLM CUTLASS"][
|
|
"time_ms"]
|
|
|
|
avg_speedups[
|
|
"DeepGEMM vs vLLM Triton"] += vllm_triton_time / deepgemm_time
|
|
avg_speedups[
|
|
"DeepGEMM vs vLLM CUTLASS"] += vllm_cutlass_time / deepgemm_time
|
|
avg_speedups[
|
|
"vLLM CUTLASS vs vLLM Triton"] += vllm_triton_time / vllm_cutlass_time
|
|
|
|
print("\n===== AVERAGE SPEEDUPS =====")
|
|
speedup_headers = ["Comparison", "Speedup"]
|
|
speedup_rows = []
|
|
for comparison, total in avg_speedups.items():
|
|
avg_speedup = total / num_shapes
|
|
status = "faster" if avg_speedup > 1 else "slower"
|
|
speedup_rows.append([comparison, f"{avg_speedup:.2f}x {status}"])
|
|
|
|
print_table(speedup_headers, speedup_rows)
|
|
|
|
# Average accuracy comparison
|
|
print("\n===== ACCURACY COMPARISON =====")
|
|
avg_diff = {impl: 0 for impl in implementations}
|
|
|
|
for result in all_results:
|
|
for impl in implementations:
|
|
avg_diff[impl] += result["implementations"][impl]["diff"][
|
|
"Reference"]
|
|
|
|
diff_headers = ["Implementation", "Avg Diff vs Reference"]
|
|
diff_rows = []
|
|
for impl in implementations:
|
|
diff_rows.append([impl, f"{avg_diff[impl] / num_shapes:.6f}"])
|
|
|
|
print_table(diff_headers, diff_rows)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
run_benchmarks(verbose=False)
|