mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-07 23:37:06 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
421 lines
19 KiB
Plaintext
421 lines
19 KiB
Plaintext
/*
|
|
* Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#include <torch/all.h>
|
|
#include <cutlass/arch/arch.h>
|
|
|
|
#include <ATen/cuda/CUDAContext.h>
|
|
#include <c10/cuda/CUDAGuard.h>
|
|
#include <c10/cuda/CUDAStream.h>
|
|
|
|
#include "cute/tensor.hpp"
|
|
#include "cutlass/tensor_ref.h"
|
|
#include "cutlass/epilogue/collective/default_epilogue.hpp"
|
|
#include "cutlass/epilogue/thread/linear_combination.h"
|
|
#include "cutlass/gemm/dispatch_policy.hpp"
|
|
#include "cutlass/gemm/group_array_problem_shape.hpp"
|
|
#include "cutlass/gemm/collective/collective_builder.hpp"
|
|
#include "cutlass/epilogue/collective/collective_builder.hpp"
|
|
#include "cutlass/gemm/device/gemm_universal_adapter.h"
|
|
#include "cutlass/gemm/kernel/gemm_universal.hpp"
|
|
|
|
#include "cutlass/util/command_line.h"
|
|
#include "cutlass/util/distribution.h"
|
|
#include "cutlass/util/host_tensor.h"
|
|
#include "cutlass/util/packed_stride.hpp"
|
|
#include "cutlass/util/tensor_view_io.h"
|
|
#include "cutlass/util/reference/device/gemm.h"
|
|
#include "cutlass/util/reference/device/tensor_compare.h"
|
|
#include "cutlass/util/reference/host/tensor_fill.h"
|
|
#include "cutlass/util/reference/host/gett.hpp"
|
|
#include "cutlass/util/reference/host/tensor_norm.h"
|
|
#include "cutlass/util/reference/host/tensor_compare.h"
|
|
#include <cassert>
|
|
|
|
using namespace cute;
|
|
|
|
template <typename ElementAB, typename ElementC, typename ElementSF,
|
|
typename ElementAccumulator, typename LayoutSFA, typename LayoutSFB,
|
|
typename ScaleConfig>
|
|
__global__ void __get_group_gemm_starts(
|
|
ElementAB** a_offsets, ElementAB** b_offsets, ElementC** out_offsets,
|
|
ElementSF** a_scales_offsets, ElementSF** b_scales_offsets,
|
|
ElementAccumulator** alpha_offsets, LayoutSFA* layout_sfa_base_as_int,
|
|
LayoutSFB* layout_sfb_base_as_int, ElementAB* a_base_as_int,
|
|
ElementAB* b_base_as_int, ElementC* out_base_as_int,
|
|
ElementSF* a_scales_base_as_int, ElementSF* b_scales_base_as_int,
|
|
ElementAccumulator* alphas_base_as_int, const int32_t* expert_offsets,
|
|
const int32_t* sf_offsets, const int32_t* problem_sizes_as_shapes,
|
|
const int K, const int N) {
|
|
int64_t expert_id = threadIdx.x;
|
|
if (expert_id >= gridDim.x * blockDim.x) {
|
|
return;
|
|
}
|
|
// Originally int32_t but upcasting to int64_t to avoid overflow
|
|
// during offset calculations
|
|
int64_t expert_offset = static_cast<int64_t>(expert_offsets[expert_id]);
|
|
int64_t sf_offset = static_cast<int64_t>(sf_offsets[expert_id]);
|
|
// size for block in block scale.
|
|
int64_t group_size = 16;
|
|
int64_t m = static_cast<int64_t>(problem_sizes_as_shapes[expert_id * 3]);
|
|
int64_t n = static_cast<int64_t>(problem_sizes_as_shapes[expert_id * 3 + 1]);
|
|
int64_t k = static_cast<int64_t>(problem_sizes_as_shapes[expert_id * 3 + 2]);
|
|
assert((m >= 0 && n == N && k == K && k % 2 == 0) &&
|
|
"unexpected problem sizes");
|
|
|
|
int64_t half_k = static_cast<int64_t>(k / 2);
|
|
int64_t group_k = static_cast<int64_t>(k / group_size);
|
|
// Shape of A as uint8/byte = [M, K // 2]
|
|
// Shape of B as uint8/byte = [E, N, K // 2]
|
|
a_offsets[expert_id] = a_base_as_int + expert_offset * half_k;
|
|
|
|
b_offsets[expert_id] = b_base_as_int + expert_id * n * half_k;
|
|
// Shape of C = [M, N]
|
|
out_offsets[expert_id] = out_base_as_int + expert_offset * n;
|
|
// Shape of a_scale = [sum(sf_sizes), K // group_size]
|
|
a_scales_offsets[expert_id] = a_scales_base_as_int + sf_offset * group_k;
|
|
|
|
assert((reinterpret_cast<uintptr_t>(a_scales_offsets[expert_id]) % 128) ==
|
|
0 &&
|
|
"TMA requires 128-byte alignment");
|
|
|
|
// Shape of B scale = [E, N, K // group_size]
|
|
b_scales_offsets[expert_id] = b_scales_base_as_int + expert_id * n * group_k;
|
|
assert((reinterpret_cast<uintptr_t>(b_scales_offsets[expert_id]) % 128) ==
|
|
0 &&
|
|
"TMA requires 128-byte alignment");
|
|
// Shape of alpha = [E]
|
|
alpha_offsets[expert_id] = alphas_base_as_int + expert_id;
|
|
|
|
LayoutSFA* layout_sfa_ptr = layout_sfa_base_as_int + expert_id;
|
|
LayoutSFB* layout_sfb_ptr = layout_sfb_base_as_int + expert_id;
|
|
|
|
*layout_sfa_ptr = ScaleConfig::tile_atom_to_shape_SFA(cute::make_shape(
|
|
static_cast<int>(m), static_cast<int>(n), static_cast<int>(k), 1));
|
|
*layout_sfb_ptr = ScaleConfig::tile_atom_to_shape_SFB(cute::make_shape(
|
|
static_cast<int>(m), static_cast<int>(n), static_cast<int>(k), 1));
|
|
}
|
|
|
|
#define __CALL_GET_STARTS_KERNEL_BLOCKSCALE(ELEMENT_AB_TYPE, SF_TYPE, \
|
|
TENSOR_C_TYPE, C_TYPE, LayoutSFA, \
|
|
LayoutSFB, ScaleConfig) \
|
|
else if (out_tensors.dtype() == TENSOR_C_TYPE) { \
|
|
__get_group_gemm_starts<ELEMENT_AB_TYPE, C_TYPE, SF_TYPE, float, \
|
|
LayoutSFA, LayoutSFB, ScaleConfig> \
|
|
<<<1, num_experts, 0, stream>>>( \
|
|
static_cast<ELEMENT_AB_TYPE**>(a_starts.data_ptr()), \
|
|
static_cast<ELEMENT_AB_TYPE**>(b_starts.data_ptr()), \
|
|
static_cast<C_TYPE**>(out_starts.data_ptr()), \
|
|
static_cast<SF_TYPE**>(a_scales_starts.data_ptr()), \
|
|
static_cast<SF_TYPE**>(b_scales_starts.data_ptr()), \
|
|
static_cast<float**>(alpha_starts.data_ptr()), \
|
|
reinterpret_cast<LayoutSFA*>(layout_sfa.data_ptr()), \
|
|
reinterpret_cast<LayoutSFB*>(layout_sfb.data_ptr()), \
|
|
static_cast<ELEMENT_AB_TYPE*>(a_tensors.data_ptr()), \
|
|
static_cast<ELEMENT_AB_TYPE*>(b_tensors.data_ptr()), \
|
|
static_cast<C_TYPE*>(out_tensors.data_ptr()), \
|
|
static_cast<SF_TYPE*>(a_scales.data_ptr()), \
|
|
static_cast<SF_TYPE*>(b_scales.data_ptr()), \
|
|
static_cast<float*>(alphas.data_ptr()), \
|
|
static_cast<int32_t*>(expert_offsets.data_ptr()), \
|
|
static_cast<int32_t*>(sf_offsets.data_ptr()), \
|
|
static_cast<int32_t*>(problem_sizes.data_ptr()), K, N); \
|
|
}
|
|
|
|
template <typename LayoutSFA, typename LayoutSFB, typename ScaleConfig>
|
|
void run_get_group_gemm_starts(
|
|
const torch::Tensor& a_starts, const torch::Tensor& b_starts,
|
|
const torch::Tensor& out_starts, const torch::Tensor& a_scales_starts,
|
|
const torch::Tensor& b_scales_starts, const torch::Tensor& alpha_starts,
|
|
const torch::Tensor& layout_sfa, const torch::Tensor& layout_sfb,
|
|
/*these are used for their base addresses*/
|
|
torch::Tensor const& a_tensors, torch::Tensor const& b_tensors,
|
|
torch::Tensor const& out_tensors, torch::Tensor const& a_scales,
|
|
torch::Tensor const& b_scales, torch::Tensor const& alphas,
|
|
torch::Tensor const& expert_offsets, torch::Tensor const& sf_offsets,
|
|
torch::Tensor const& problem_sizes, int M, int N, int K) {
|
|
int num_experts = (int)expert_offsets.size(0);
|
|
auto stream = at::cuda::getCurrentCUDAStream(a_tensors.device().index());
|
|
|
|
TORCH_CHECK(out_tensors.size(1) == N,
|
|
"Output tensor shape doesn't match expected shape");
|
|
TORCH_CHECK(K / 2 == b_tensors.size(2),
|
|
"b_tensors(dim = 2) and a_tensors(dim = 1) trailing"
|
|
" dimension must match");
|
|
if (false) {
|
|
}
|
|
//(ELEMENT_AB_TYPE, BS_TYPE, TENSOR_C_TYPE, C_TYPE, LayoutSFA, LayoutSFB,
|
|
// ScaleConfig)
|
|
__CALL_GET_STARTS_KERNEL_BLOCKSCALE(
|
|
cutlass::float_e2m1_t, cutlass::float_ue4m3_t, torch::kBFloat16,
|
|
cutlass::bfloat16_t, LayoutSFA, LayoutSFB, ScaleConfig)
|
|
__CALL_GET_STARTS_KERNEL_BLOCKSCALE(cutlass::float_e2m1_t,
|
|
cutlass::float_ue4m3_t, torch::kFloat16,
|
|
half, LayoutSFA, LayoutSFB, ScaleConfig)
|
|
else {
|
|
TORCH_CHECK(false, "Invalid output type (must be float16 or bfloat16)");
|
|
}
|
|
}
|
|
|
|
template <typename OutType>
|
|
void run_fp4_blockwise_scaled_group_mm(
|
|
torch::Tensor& output, const torch::Tensor& a, const torch::Tensor& b,
|
|
const torch::Tensor& a_blockscale, const torch::Tensor& b_blockscales,
|
|
const torch::Tensor& alphas, const torch::Tensor& problem_sizes,
|
|
const torch::Tensor& expert_offsets, const torch::Tensor& sf_offsets, int M,
|
|
int N, int K) {
|
|
using ProblemShape =
|
|
cutlass::gemm::GroupProblemShape<Shape<int32_t, int32_t, int32_t>>;
|
|
using ElementType = cutlass::float_e2m1_t;
|
|
using ElementSFType = cutlass::float_ue4m3_t;
|
|
using ElementA = cutlass::nv_float4_t<cutlass::float_e2m1_t>;
|
|
using ElementB = cutlass::nv_float4_t<cutlass::float_e2m1_t>;
|
|
|
|
using ElementC = OutType;
|
|
using ElementD = ElementC;
|
|
using ElementAccumulator = float;
|
|
// Layout definitions
|
|
using LayoutA = cutlass::layout::RowMajor;
|
|
using LayoutB = cutlass::layout::ColumnMajor;
|
|
using LayoutC = cutlass::layout::RowMajor;
|
|
using LayoutD = LayoutC;
|
|
|
|
// Alignment constraints
|
|
static constexpr int AlignmentA = 32;
|
|
static constexpr int AlignmentB = 32;
|
|
static constexpr int AlignmentC = 128 / cutlass::sizeof_bits<ElementC>::value;
|
|
static constexpr int AlignmentD = 128 / cutlass::sizeof_bits<ElementD>::value;
|
|
|
|
// Architecture definitions
|
|
using ArchTag = cutlass::arch::Sm100;
|
|
using EpilogueOperatorClass =
|
|
cutlass::arch::OpClassTensorOp; // Epilogue Operator class tag
|
|
using MainloopOperatorClass =
|
|
cutlass::arch::OpClassBlockScaledTensorOp; // Mainloop Operator class tag
|
|
using StageCountType =
|
|
cutlass::gemm::collective::StageCountAuto; // Stage count maximized based
|
|
// on the tile size
|
|
|
|
using ClusterShape = Shape<_1, _1, _1>;
|
|
struct MMA1SMConfig {
|
|
using MmaTileShape = Shape<_128, _128, _128>;
|
|
using KernelSchedule = cutlass::gemm::
|
|
KernelPtrArrayTmaWarpSpecialized1SmNvf4Sm100; // Kernel to launch
|
|
using EpilogueSchedule =
|
|
cutlass::epilogue::PtrArrayTmaWarpSpecialized1Sm; // Epilogue to launch
|
|
};
|
|
|
|
using CollectiveEpilogue =
|
|
typename cutlass::epilogue::collective::CollectiveBuilder<
|
|
ArchTag, EpilogueOperatorClass, typename MMA1SMConfig::MmaTileShape,
|
|
ClusterShape, Shape<_128, _64>, ElementAccumulator,
|
|
ElementAccumulator, ElementC, LayoutC*, AlignmentC, ElementD,
|
|
LayoutC*, AlignmentD,
|
|
typename MMA1SMConfig::EpilogueSchedule>::CollectiveOp;
|
|
|
|
using CollectiveMainloop =
|
|
typename cutlass::gemm::collective::CollectiveBuilder<
|
|
ArchTag, MainloopOperatorClass, ElementA, LayoutA*, AlignmentA,
|
|
ElementB, LayoutB*, AlignmentB, ElementAccumulator,
|
|
typename MMA1SMConfig::MmaTileShape, ClusterShape,
|
|
cutlass::gemm::collective::StageCountAutoCarveout<static_cast<int>(
|
|
sizeof(typename CollectiveEpilogue::SharedStorage))>,
|
|
typename MMA1SMConfig::KernelSchedule>::CollectiveOp;
|
|
|
|
using GemmKernel =
|
|
cutlass::gemm::kernel::GemmUniversal<ProblemShape, CollectiveMainloop,
|
|
CollectiveEpilogue>;
|
|
|
|
using Gemm1SM = cutlass::gemm::device::GemmUniversalAdapter<GemmKernel>;
|
|
using Gemm = Gemm1SM;
|
|
using StrideA = typename Gemm::GemmKernel::InternalStrideA;
|
|
using StrideB = typename Gemm::GemmKernel::InternalStrideB;
|
|
using StrideC = typename Gemm::GemmKernel::InternalStrideC;
|
|
using StrideD = typename Gemm::GemmKernel::InternalStrideD;
|
|
|
|
using LayoutSFA =
|
|
typename Gemm::GemmKernel::CollectiveMainloop::InternalLayoutSFA;
|
|
using LayoutSFB =
|
|
typename Gemm::GemmKernel::CollectiveMainloop::InternalLayoutSFB;
|
|
using ScaleConfig =
|
|
typename Gemm::GemmKernel::CollectiveMainloop::Sm1xxBlkScaledConfig;
|
|
|
|
using UnderlyingProblemShape = ProblemShape::UnderlyingProblemShape;
|
|
int num_experts = static_cast<int>(expert_offsets.size(0));
|
|
auto options_int =
|
|
torch::TensorOptions().dtype(torch::kInt64).device(a.device());
|
|
|
|
torch::Tensor a_ptrs = torch::empty(num_experts, options_int);
|
|
torch::Tensor b_ptrs = torch::empty(num_experts, options_int);
|
|
torch::Tensor out_ptrs = torch::empty(num_experts, options_int);
|
|
torch::Tensor a_scales_ptrs = torch::empty(num_experts, options_int);
|
|
torch::Tensor b_scales_ptrs = torch::empty(num_experts, options_int);
|
|
torch::Tensor alpha_ptrs = torch::empty(num_experts, options_int);
|
|
torch::Tensor layout_sfa = torch::empty({num_experts, 5}, options_int);
|
|
torch::Tensor layout_sfb = torch::empty({num_experts, 5}, options_int);
|
|
torch::Tensor c_strides1 =
|
|
torch::full({num_experts}, output.stride(0), options_int);
|
|
torch::Tensor a_strides1 =
|
|
torch::full({num_experts}, a.stride(0) * 2, options_int);
|
|
torch::Tensor b_strides1 =
|
|
torch::full({num_experts}, b.stride(1) * 2, options_int);
|
|
|
|
run_get_group_gemm_starts<LayoutSFA, LayoutSFB, ScaleConfig>(
|
|
a_ptrs, b_ptrs, out_ptrs, a_scales_ptrs, b_scales_ptrs, alpha_ptrs,
|
|
layout_sfa, layout_sfb, a, b, output, a_blockscale, b_blockscales, alphas,
|
|
expert_offsets, sf_offsets, problem_sizes, M, N, K);
|
|
|
|
// Create an instance of the GEMM
|
|
Gemm gemm_op;
|
|
|
|
// Initialize problem_sizes_as_shapes correctly
|
|
UnderlyingProblemShape* problem_sizes_as_shapes =
|
|
static_cast<UnderlyingProblemShape*>(problem_sizes.data_ptr());
|
|
|
|
// Set the Scheduler info
|
|
cutlass::KernelHardwareInfo hw_info;
|
|
using RasterOrderOptions = typename cutlass::gemm::kernel::detail::
|
|
PersistentTileSchedulerSm100GroupParams<
|
|
typename ProblemShape::UnderlyingProblemShape>::RasterOrderOptions;
|
|
typename Gemm::GemmKernel::TileSchedulerArguments scheduler;
|
|
scheduler.raster_order = RasterOrderOptions::AlongM;
|
|
hw_info.device_id = a.get_device();
|
|
static std::unordered_map<int, int> cached_sm_counts;
|
|
if (cached_sm_counts.find(hw_info.device_id) == cached_sm_counts.end()) {
|
|
cached_sm_counts[hw_info.device_id] =
|
|
cutlass::KernelHardwareInfo::query_device_multiprocessor_count(
|
|
hw_info.device_id);
|
|
}
|
|
hw_info.sm_count = min(cached_sm_counts[hw_info.device_id], INT_MAX);
|
|
|
|
// Mainloop Arguments
|
|
typename GemmKernel::MainloopArguments mainloop_args{
|
|
static_cast<const ElementType**>(a_ptrs.data_ptr()),
|
|
static_cast<StrideA*>(a_strides1.data_ptr()),
|
|
static_cast<const ElementType**>(b_ptrs.data_ptr()),
|
|
static_cast<StrideB*>(b_strides1.data_ptr()),
|
|
static_cast<const ElementSFType**>(a_scales_ptrs.data_ptr()),
|
|
reinterpret_cast<LayoutSFA*>(layout_sfa.data_ptr()),
|
|
static_cast<const ElementSFType**>(b_scales_ptrs.data_ptr()),
|
|
reinterpret_cast<LayoutSFB*>(layout_sfb.data_ptr())};
|
|
|
|
// Epilogue Arguments
|
|
typename GemmKernel::EpilogueArguments epilogue_args{
|
|
{}, // epilogue.thread
|
|
nullptr,
|
|
static_cast<StrideC*>(c_strides1.data_ptr()),
|
|
static_cast<ElementD**>(out_ptrs.data_ptr()),
|
|
static_cast<StrideC*>(c_strides1.data_ptr())};
|
|
auto& fusion_args = epilogue_args.thread;
|
|
fusion_args.alpha_ptr_array =
|
|
reinterpret_cast<float**>(alpha_ptrs.data_ptr());
|
|
fusion_args.dAlpha = {_0{}, _0{}, 1};
|
|
|
|
// Gemm Arguments
|
|
typename GemmKernel::Arguments args{
|
|
cutlass::gemm::GemmUniversalMode::kGrouped,
|
|
{num_experts, problem_sizes_as_shapes, nullptr},
|
|
mainloop_args,
|
|
epilogue_args,
|
|
hw_info,
|
|
scheduler};
|
|
|
|
size_t workspace_size = Gemm::get_workspace_size(args);
|
|
auto const workspace_options =
|
|
torch::TensorOptions().dtype(torch::kUInt8).device(a.device());
|
|
auto workspace = torch::empty(workspace_size, workspace_options);
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream(a.get_device());
|
|
|
|
auto can_implement_status = gemm_op.can_implement(args);
|
|
TORCH_CHECK(can_implement_status == cutlass::Status::kSuccess,
|
|
"Failed to implement GEMM");
|
|
|
|
// Run the GEMM
|
|
auto status = gemm_op.initialize(args, workspace.data_ptr());
|
|
TORCH_CHECK(status == cutlass::Status::kSuccess, "Failed to initialize GEMM");
|
|
|
|
status = gemm_op.run(args, workspace.data_ptr(), stream);
|
|
TORCH_CHECK(status == cutlass::Status::kSuccess, "Failed to run GEMM");
|
|
}
|
|
|
|
#if defined ENABLE_NVFP4_SM100 && ENABLE_NVFP4_SM100
|
|
constexpr auto FLOAT4_E2M1X2 = at::ScalarType::Byte;
|
|
constexpr auto SF_DTYPE = at::ScalarType::Float8_e4m3fn;
|
|
#endif
|
|
|
|
#define CHECK_TYPE(x, st, m) \
|
|
TORCH_CHECK(x.scalar_type() == st, ": Inconsistency of Tensor type:", m)
|
|
#define CHECK_TH_CUDA(x, m) \
|
|
TORCH_CHECK(x.is_cuda(), m, ": must be a CUDA tensor.")
|
|
#define CHECK_CONTIGUOUS(x, m) \
|
|
TORCH_CHECK(x.is_contiguous(), m, ": must be contiguous.")
|
|
#define CHECK_INPUT(x, st, m) \
|
|
CHECK_TH_CUDA(x, m); \
|
|
CHECK_CONTIGUOUS(x, m); \
|
|
CHECK_TYPE(x, st, m)
|
|
|
|
void cutlass_fp4_group_mm(
|
|
torch::Tensor& output, const torch::Tensor& a, const torch::Tensor& b,
|
|
const torch::Tensor& a_blockscale, const torch::Tensor& b_blockscales,
|
|
const torch::Tensor& alphas, const torch::Tensor& problem_sizes,
|
|
const torch::Tensor& expert_offsets, const torch::Tensor& sf_offsets) {
|
|
#if defined ENABLE_NVFP4_SM100 && ENABLE_NVFP4_SM100
|
|
// Input validation
|
|
CHECK_INPUT(a, FLOAT4_E2M1X2, "a");
|
|
CHECK_INPUT(b, FLOAT4_E2M1X2, "b");
|
|
CHECK_INPUT(a_blockscale, SF_DTYPE, "a_blockscale");
|
|
CHECK_INPUT(b_blockscales, SF_DTYPE, "b_blockscales");
|
|
CHECK_INPUT(alphas, at::ScalarType::Float, "alphas");
|
|
|
|
TORCH_CHECK(a_blockscale.dim() == 2,
|
|
"expected a_blockscale to be of shape [num_experts, rounded_m,"
|
|
" k // group_size], observed rank: ",
|
|
a_blockscale.dim())
|
|
TORCH_CHECK(b_blockscales.dim() == 3,
|
|
"expected b_blockscale to be of shape: "
|
|
" [num_experts, n, k // group_size], observed rank: ",
|
|
b_blockscales.dim())
|
|
TORCH_CHECK(problem_sizes.dim() == 2, "problem_sizes must be a 2D tensor");
|
|
TORCH_CHECK(problem_sizes.size(1) == 3,
|
|
"problem_sizes must have the shape (num_experts, 3)");
|
|
TORCH_CHECK(problem_sizes.size(0) == expert_offsets.size(0),
|
|
"Number of experts in problem_sizes must match expert_offsets");
|
|
TORCH_CHECK(problem_sizes.dtype() == torch::kInt32,
|
|
"problem_sizes must be int32.");
|
|
|
|
int M = static_cast<int>(a.size(0));
|
|
int N = static_cast<int>(b.size(1));
|
|
int E = static_cast<int>(b.size(0));
|
|
int K = static_cast<int>(2 * b.size(2));
|
|
|
|
if (output.scalar_type() == torch::kBFloat16) {
|
|
run_fp4_blockwise_scaled_group_mm<cutlass::bfloat16_t>(
|
|
output, a, b, a_blockscale, b_blockscales, alphas, problem_sizes,
|
|
expert_offsets, sf_offsets, M, N, K);
|
|
} else {
|
|
run_fp4_blockwise_scaled_group_mm<cutlass::half_t>(
|
|
output, a, b, a_blockscale, b_blockscales, alphas, problem_sizes,
|
|
expert_offsets, sf_offsets, M, N, K);
|
|
}
|
|
#else
|
|
TORCH_CHECK_NOT_IMPLEMENTED(
|
|
false,
|
|
"No compiled cutlass_fp4_group_mm kernel, vLLM must "
|
|
"be compiled with ENABLE_NVFP4_SM100 for SM100+ and CUDA "
|
|
"12.8 or above.");
|
|
#endif
|
|
}
|