mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-05-07 05:52:20 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
579 lines
24 KiB
Plaintext
579 lines
24 KiB
Plaintext
/*
|
|
* Adapted from https://github.com/NVIDIA/TensorRT-LLM/blob/v0.7.1/cpp/tensorrt_llm/kernels/mixtureOfExperts/moe_kernels.cu
|
|
* Copyright (c) 2024, The vLLM team.
|
|
* SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
#include <torch/all.h>
|
|
#include <ATen/cuda/CUDAContext.h>
|
|
#include <c10/cuda/CUDAGuard.h>
|
|
#include "../cuda_compat.h"
|
|
#include "../cub_helpers.h"
|
|
|
|
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
|
|
|
namespace vllm {
|
|
namespace moe {
|
|
|
|
/// Aligned array type
|
|
template <
|
|
typename T,
|
|
/// Number of elements in the array
|
|
int N,
|
|
/// Alignment requirement in bytes
|
|
int Alignment = sizeof(T) * N
|
|
>
|
|
class alignas(Alignment) AlignedArray {
|
|
float data[N];
|
|
};
|
|
|
|
// ====================== Softmax things ===============================
|
|
// We have our own implementation of softmax here so we can support transposing the output
|
|
// in the softmax kernel when we extend this module to support expert-choice routing.
|
|
template <int TPB>
|
|
__launch_bounds__(TPB) __global__
|
|
void moeSoftmax(const float* input, const bool* finished, float* output, const int num_cols)
|
|
{
|
|
using BlockReduce = cub::BlockReduce<float, TPB>;
|
|
__shared__ typename BlockReduce::TempStorage tmpStorage;
|
|
|
|
__shared__ float normalizing_factor;
|
|
__shared__ float float_max;
|
|
|
|
const int thread_row_offset = blockIdx.x * num_cols;
|
|
|
|
float threadData(-FLT_MAX);
|
|
|
|
// Don't touch finished rows.
|
|
if ((finished != nullptr) && finished[blockIdx.x])
|
|
{
|
|
return;
|
|
}
|
|
|
|
for (int ii = threadIdx.x; ii < num_cols; ii += TPB)
|
|
{
|
|
const int idx = thread_row_offset + ii;
|
|
threadData = max(static_cast<float>(input[idx]), threadData);
|
|
}
|
|
|
|
const float maxElem = BlockReduce(tmpStorage).Reduce(threadData, CubMaxOp());
|
|
if (threadIdx.x == 0)
|
|
{
|
|
float_max = maxElem;
|
|
}
|
|
__syncthreads();
|
|
|
|
threadData = 0;
|
|
|
|
for (int ii = threadIdx.x; ii < num_cols; ii += TPB)
|
|
{
|
|
const int idx = thread_row_offset + ii;
|
|
threadData += exp((static_cast<float>(input[idx]) - float_max));
|
|
}
|
|
|
|
const auto Z = BlockReduce(tmpStorage).Reduce(threadData, CubAddOp());
|
|
|
|
if (threadIdx.x == 0)
|
|
{
|
|
normalizing_factor = 1.f / Z;
|
|
}
|
|
__syncthreads();
|
|
|
|
for (int ii = threadIdx.x; ii < num_cols; ii += TPB)
|
|
{
|
|
const int idx = thread_row_offset + ii;
|
|
const float val = exp((static_cast<float>(input[idx]) - float_max)) * normalizing_factor;
|
|
output[idx] = val;
|
|
}
|
|
}
|
|
|
|
template <int TPB, typename IndType>
|
|
__launch_bounds__(TPB) __global__ void moeTopK(
|
|
const float* inputs_after_softmax,
|
|
const bool* finished,
|
|
float* output,
|
|
IndType* indices,
|
|
int* source_rows,
|
|
const int num_experts,
|
|
const int k,
|
|
const int start_expert,
|
|
const int end_expert)
|
|
{
|
|
|
|
using cub_kvp = cub::KeyValuePair<int, float>;
|
|
using BlockReduce = cub::BlockReduce<cub_kvp, TPB>;
|
|
__shared__ typename BlockReduce::TempStorage tmpStorage;
|
|
|
|
cub_kvp thread_kvp;
|
|
cub::ArgMax arg_max;
|
|
|
|
const int num_rows = gridDim.x;
|
|
const int block_row = blockIdx.x;
|
|
|
|
const bool row_is_active = finished ? !finished[block_row] : true;
|
|
const int thread_read_offset = blockIdx.x * num_experts;
|
|
for (int k_idx = 0; k_idx < k; ++k_idx)
|
|
{
|
|
thread_kvp.key = 0;
|
|
thread_kvp.value = -1.f; // This is OK because inputs are probabilities
|
|
|
|
cub_kvp inp_kvp;
|
|
for (int expert = threadIdx.x; expert < num_experts; expert += TPB)
|
|
{
|
|
const int idx = thread_read_offset + expert;
|
|
inp_kvp.key = expert;
|
|
inp_kvp.value = inputs_after_softmax[idx];
|
|
|
|
for (int prior_k = 0; prior_k < k_idx; ++prior_k)
|
|
{
|
|
const int prior_winning_expert = indices[k * block_row + prior_k];
|
|
|
|
if (prior_winning_expert == expert)
|
|
{
|
|
inp_kvp = thread_kvp;
|
|
}
|
|
}
|
|
|
|
thread_kvp = arg_max(inp_kvp, thread_kvp);
|
|
}
|
|
|
|
const cub_kvp result_kvp = BlockReduce(tmpStorage).Reduce(thread_kvp, arg_max);
|
|
if (threadIdx.x == 0)
|
|
{
|
|
// Ignore experts the node isn't responsible for with expert parallelism
|
|
const int expert = result_kvp.key;
|
|
const bool node_uses_expert = expert >= start_expert && expert < end_expert;
|
|
const bool should_process_row = row_is_active && node_uses_expert;
|
|
|
|
const int idx = k * block_row + k_idx;
|
|
output[idx] = result_kvp.value;
|
|
indices[idx] = should_process_row ? (expert - start_expert) : num_experts;
|
|
assert(indices[idx] >= 0);
|
|
source_rows[idx] = k_idx * num_rows + block_row;
|
|
}
|
|
__syncthreads();
|
|
}
|
|
}
|
|
|
|
// ====================== TopK softmax things ===============================
|
|
|
|
/*
|
|
A Top-K gating softmax written to exploit when the number of experts in the MoE layers
|
|
are a small power of 2. This allows us to cleanly share the rows among the threads in
|
|
a single warp and eliminate communication between warps (so no need to use shared mem).
|
|
|
|
It fuses the softmax, max and argmax into a single kernel.
|
|
|
|
Limitations:
|
|
1) This implementation is optimized for when the number of experts is a small power of 2.
|
|
Additionally it also supports when number of experts is multiple of 64 which is still
|
|
faster than the computing softmax and topK separately (only tested on CUDA yet).
|
|
2) This implementation assumes k is small, but will work for any k.
|
|
*/
|
|
|
|
template <int VPT, int NUM_EXPERTS, int WARPS_PER_CTA, int BYTES_PER_LDG, int WARP_SIZE_PARAM, typename IndType>
|
|
__launch_bounds__(WARPS_PER_CTA* WARP_SIZE_PARAM) __global__
|
|
void topkGatingSoftmax(const float* input, const bool* finished, float* output, const int num_rows, IndType* indices,
|
|
int* source_rows, const int k, const int start_expert, const int end_expert)
|
|
{
|
|
// We begin by enforcing compile time assertions and setting up compile time constants.
|
|
static_assert(BYTES_PER_LDG == (BYTES_PER_LDG & -BYTES_PER_LDG), "BYTES_PER_LDG must be power of 2");
|
|
static_assert(BYTES_PER_LDG <= 16, "BYTES_PER_LDG must be leq 16");
|
|
|
|
// Number of bytes each thread pulls in per load
|
|
static constexpr int ELTS_PER_LDG = BYTES_PER_LDG / sizeof(float);
|
|
static constexpr int ELTS_PER_ROW = NUM_EXPERTS;
|
|
static constexpr int THREADS_PER_ROW = ELTS_PER_ROW / VPT;
|
|
static constexpr int LDG_PER_THREAD = VPT / ELTS_PER_LDG;
|
|
|
|
// Restrictions based on previous section.
|
|
static_assert(VPT % ELTS_PER_LDG == 0, "The elements per thread must be a multiple of the elements per ldg");
|
|
static_assert(WARP_SIZE_PARAM % THREADS_PER_ROW == 0, "The threads per row must cleanly divide the threads per warp");
|
|
static_assert(THREADS_PER_ROW == (THREADS_PER_ROW & -THREADS_PER_ROW), "THREADS_PER_ROW must be power of 2");
|
|
static_assert(THREADS_PER_ROW <= WARP_SIZE_PARAM, "THREADS_PER_ROW can be at most warp size");
|
|
|
|
// We have NUM_EXPERTS elements per row. We specialize for small #experts
|
|
static constexpr int ELTS_PER_WARP = WARP_SIZE_PARAM * VPT;
|
|
static constexpr int ROWS_PER_WARP = ELTS_PER_WARP / ELTS_PER_ROW;
|
|
static constexpr int ROWS_PER_CTA = WARPS_PER_CTA * ROWS_PER_WARP;
|
|
|
|
// Restrictions for previous section.
|
|
static_assert(ELTS_PER_WARP % ELTS_PER_ROW == 0, "The elts per row must cleanly divide the total elt per warp");
|
|
|
|
// ===================== From this point, we finally start computing run-time variables. ========================
|
|
|
|
// Compute CTA and warp rows. We pack multiple rows into a single warp, and a block contains WARPS_PER_CTA warps.
|
|
// This, each block processes a chunk of rows. We start by computing the start row for each block.
|
|
const int cta_base_row = blockIdx.x * ROWS_PER_CTA;
|
|
|
|
// Now, using the base row per thread block, we compute the base row per warp.
|
|
const int warp_base_row = cta_base_row + threadIdx.y * ROWS_PER_WARP;
|
|
|
|
// The threads in a warp are split into sub-groups that will work on a row.
|
|
// We compute row offset for each thread sub-group
|
|
const int thread_row_in_warp = threadIdx.x / THREADS_PER_ROW;
|
|
const int thread_row = warp_base_row + thread_row_in_warp;
|
|
|
|
// Threads with indices out of bounds should early exit here.
|
|
if (thread_row >= num_rows)
|
|
{
|
|
return;
|
|
}
|
|
const bool row_is_active = finished ? !finished[thread_row] : true;
|
|
|
|
// We finally start setting up the read pointers for each thread. First, each thread jumps to the start of the
|
|
// row it will read.
|
|
const float* thread_row_ptr = input + thread_row * ELTS_PER_ROW;
|
|
|
|
// Now, we compute the group each thread belong to in order to determine the first column to start loads.
|
|
const int thread_group_idx = threadIdx.x % THREADS_PER_ROW;
|
|
const int first_elt_read_by_thread = thread_group_idx * ELTS_PER_LDG;
|
|
const float* thread_read_ptr = thread_row_ptr + first_elt_read_by_thread;
|
|
|
|
// Determine the pointer type to use to read in the data depending on the BYTES_PER_LDG template param. In theory,
|
|
// this can support all powers of 2 up to 16.
|
|
// NOTE(woosuk): The original implementation uses CUTLASS aligned array here.
|
|
// We defined our own aligned array and use it here to avoid the dependency on CUTLASS.
|
|
using AccessType = AlignedArray<float, ELTS_PER_LDG>;
|
|
|
|
// Finally, we pull in the data from global mem
|
|
float row_chunk[VPT];
|
|
AccessType* row_chunk_vec_ptr = reinterpret_cast<AccessType*>(&row_chunk);
|
|
const AccessType* vec_thread_read_ptr = reinterpret_cast<const AccessType*>(thread_read_ptr);
|
|
#pragma unroll
|
|
for (int ii = 0; ii < LDG_PER_THREAD; ++ii)
|
|
{
|
|
row_chunk_vec_ptr[ii] = vec_thread_read_ptr[ii * THREADS_PER_ROW];
|
|
}
|
|
|
|
// First, we perform a max reduce within the thread. We can do the max in fp16 safely (I think) and just
|
|
// convert to float afterwards for the exp + sum reduction.
|
|
float thread_max = row_chunk[0];
|
|
#pragma unroll
|
|
for (int ii = 1; ii < VPT; ++ii)
|
|
{
|
|
thread_max = max(thread_max, row_chunk[ii]);
|
|
}
|
|
|
|
// Now, we find the max within the thread group and distribute among the threads. We use a butterfly reduce.
|
|
#pragma unroll
|
|
for (int mask = THREADS_PER_ROW / 2; mask > 0; mask /= 2)
|
|
{
|
|
thread_max = max(thread_max, VLLM_SHFL_XOR_SYNC_WIDTH(thread_max, mask, THREADS_PER_ROW));
|
|
}
|
|
|
|
// From this point, thread max in all the threads have the max within the row.
|
|
// Now, we subtract the max from each element in the thread and take the exp. We also compute the thread local sum.
|
|
float row_sum = 0;
|
|
#pragma unroll
|
|
for (int ii = 0; ii < VPT; ++ii)
|
|
{
|
|
row_chunk[ii] = expf(row_chunk[ii] - thread_max);
|
|
row_sum += row_chunk[ii];
|
|
}
|
|
|
|
// Now, we perform the sum reduce within each thread group. Similar to the max reduce, we use a bufferfly pattern.
|
|
#pragma unroll
|
|
for (int mask = THREADS_PER_ROW / 2; mask > 0; mask /= 2)
|
|
{
|
|
row_sum += VLLM_SHFL_XOR_SYNC_WIDTH(row_sum, mask, THREADS_PER_ROW);
|
|
}
|
|
|
|
// From this point, all threads have the max and the sum for their rows in the thread_max and thread_sum variables
|
|
// respectively. Finally, we can scale the rows for the softmax. Technically, for top-k gating we don't need to
|
|
// compute the entire softmax row. We can likely look at the maxes and only compute for the top-k values in the row.
|
|
// However, this kernel will likely not be a bottle neck and it seems better to closer match torch and find the
|
|
// argmax after computing the softmax.
|
|
const float reciprocal_row_sum = 1.f / row_sum;
|
|
|
|
#pragma unroll
|
|
for (int ii = 0; ii < VPT; ++ii)
|
|
{
|
|
row_chunk[ii] = row_chunk[ii] * reciprocal_row_sum;
|
|
}
|
|
|
|
// Now, softmax_res contains the softmax of the row chunk. Now, I want to find the topk elements in each row, along
|
|
// with the max index.
|
|
int start_col = first_elt_read_by_thread;
|
|
static constexpr int COLS_PER_GROUP_LDG = ELTS_PER_LDG * THREADS_PER_ROW;
|
|
|
|
for (int k_idx = 0; k_idx < k; ++k_idx)
|
|
{
|
|
// First, each thread does the local argmax
|
|
float max_val = row_chunk[0];
|
|
int expert = start_col;
|
|
#pragma unroll
|
|
for (int ldg = 0, col = start_col; ldg < LDG_PER_THREAD; ++ldg, col += COLS_PER_GROUP_LDG)
|
|
{
|
|
#pragma unroll
|
|
for (int ii = 0; ii < ELTS_PER_LDG; ++ii)
|
|
{
|
|
float val = row_chunk[ldg * ELTS_PER_LDG + ii];
|
|
|
|
// No check on the experts here since columns with the smallest index are processed first and only
|
|
// updated if > (not >=)
|
|
if (val > max_val)
|
|
{
|
|
max_val = val;
|
|
expert = col + ii;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Now, we perform the argmax reduce. We use the butterfly pattern so threads reach consensus about the max.
|
|
// This will be useful for K > 1 so that the threads can agree on "who" had the max value. That thread can
|
|
// then blank out their max with -inf and the warp can run more iterations...
|
|
#pragma unroll
|
|
for (int mask = THREADS_PER_ROW / 2; mask > 0; mask /= 2)
|
|
{
|
|
float other_max = VLLM_SHFL_XOR_SYNC_WIDTH(max_val, mask, THREADS_PER_ROW);
|
|
int other_expert = VLLM_SHFL_XOR_SYNC_WIDTH(expert, mask, THREADS_PER_ROW);
|
|
|
|
// We want lower indices to "win" in every thread so we break ties this way
|
|
if (other_max > max_val || (other_max == max_val && other_expert < expert))
|
|
{
|
|
max_val = other_max;
|
|
expert = other_expert;
|
|
}
|
|
}
|
|
|
|
// Write the max for this k iteration to global memory.
|
|
if (thread_group_idx == 0)
|
|
{
|
|
// Add a guard to ignore experts not included by this node
|
|
const bool node_uses_expert = expert >= start_expert && expert < end_expert;
|
|
const bool should_process_row = row_is_active && node_uses_expert;
|
|
|
|
// The lead thread from each sub-group will write out the final results to global memory. (This will be a
|
|
// single) thread per row of the input/output matrices.
|
|
const int idx = k * thread_row + k_idx;
|
|
output[idx] = max_val;
|
|
indices[idx] = should_process_row ? (expert - start_expert) : NUM_EXPERTS;
|
|
source_rows[idx] = k_idx * num_rows + thread_row;
|
|
}
|
|
|
|
// Finally, we clear the value in the thread with the current max if there is another iteration to run.
|
|
if (k_idx + 1 < k)
|
|
{
|
|
const int ldg_group_for_expert = expert / COLS_PER_GROUP_LDG;
|
|
const int thread_to_clear_in_group = (expert / ELTS_PER_LDG) % THREADS_PER_ROW;
|
|
|
|
// Only the thread in the group which produced the max will reset the "winning" value to -inf.
|
|
if (thread_group_idx == thread_to_clear_in_group)
|
|
{
|
|
const int offset_for_expert = expert % ELTS_PER_LDG;
|
|
// Safe to set to any negative value since row_chunk values must be between 0 and 1.
|
|
row_chunk[ldg_group_for_expert * ELTS_PER_LDG + offset_for_expert] = -10000.f;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
namespace detail
|
|
{
|
|
// Constructs some constants needed to partition the work across threads at compile time.
|
|
template <int EXPERTS, int BYTES_PER_LDG, int WARP_SIZE_PARAM>
|
|
struct TopkConstants
|
|
{
|
|
static constexpr int ELTS_PER_LDG = BYTES_PER_LDG / sizeof(float);
|
|
static_assert(EXPERTS / (ELTS_PER_LDG * WARP_SIZE_PARAM) == 0 || EXPERTS % (ELTS_PER_LDG * WARP_SIZE_PARAM) == 0, "");
|
|
static constexpr int VECs_PER_THREAD = MAX(1, EXPERTS / (ELTS_PER_LDG * WARP_SIZE_PARAM));
|
|
static constexpr int VPT = VECs_PER_THREAD * ELTS_PER_LDG;
|
|
static constexpr int THREADS_PER_ROW = EXPERTS / VPT;
|
|
static const int ROWS_PER_WARP = WARP_SIZE_PARAM / THREADS_PER_ROW;
|
|
};
|
|
} // namespace detail
|
|
|
|
template <int EXPERTS, int WARPS_PER_TB, int WARP_SIZE_PARAM, int MAX_BYTES_PER_LDG, typename IndType>
|
|
void topkGatingSoftmaxLauncherHelper(const float* input, const bool* finished, float* output, IndType* indices,
|
|
int* source_row, const int num_rows, const int k, const int start_expert, const int end_expert, cudaStream_t stream)
|
|
{
|
|
static constexpr int BYTES_PER_LDG = MIN(MAX_BYTES_PER_LDG, sizeof(float) * EXPERTS);
|
|
using Constants = detail::TopkConstants<EXPERTS, BYTES_PER_LDG, WARP_SIZE_PARAM>;
|
|
static constexpr int VPT = Constants::VPT;
|
|
static constexpr int ROWS_PER_WARP = Constants::ROWS_PER_WARP;
|
|
const int num_warps = (num_rows + ROWS_PER_WARP - 1) / ROWS_PER_WARP;
|
|
const int num_blocks = (num_warps + WARPS_PER_TB - 1) / WARPS_PER_TB;
|
|
|
|
dim3 block_dim(WARP_SIZE_PARAM, WARPS_PER_TB);
|
|
topkGatingSoftmax<VPT, EXPERTS, WARPS_PER_TB, BYTES_PER_LDG, WARP_SIZE_PARAM><<<num_blocks, block_dim, 0, stream>>>(
|
|
input, finished, output, num_rows, indices, source_row, k, start_expert, end_expert);
|
|
}
|
|
|
|
#ifndef USE_ROCM
|
|
#define LAUNCH_SOFTMAX(NUM_EXPERTS, WARPS_PER_TB, MAX_BYTES) \
|
|
static_assert(WARP_SIZE == 32, \
|
|
"Unsupported warp size. Only 32 is supported for CUDA"); \
|
|
topkGatingSoftmaxLauncherHelper<NUM_EXPERTS, WARPS_PER_TB, WARP_SIZE, MAX_BYTES>( \
|
|
gating_output, nullptr, topk_weights, topk_indices, \
|
|
token_expert_indices, num_tokens, topk, 0, num_experts, stream);
|
|
#else
|
|
#define LAUNCH_SOFTMAX(NUM_EXPERTS, WARPS_PER_TB, MAX_BYTES) \
|
|
if (WARP_SIZE == 64) { \
|
|
topkGatingSoftmaxLauncherHelper<NUM_EXPERTS, WARPS_PER_TB, 64, MAX_BYTES>( \
|
|
gating_output, nullptr, topk_weights, topk_indices, \
|
|
token_expert_indices, num_tokens, topk, 0, num_experts, stream); \
|
|
} else if (WARP_SIZE == 32) { \
|
|
topkGatingSoftmaxLauncherHelper<NUM_EXPERTS, WARPS_PER_TB, 32, MAX_BYTES>( \
|
|
gating_output, nullptr, topk_weights, topk_indices, \
|
|
token_expert_indices, num_tokens, topk, 0, num_experts, stream); \
|
|
} else { \
|
|
assert(false && "Unsupported warp size. Only 32 and 64 are supported for ROCm"); \
|
|
}
|
|
#endif
|
|
|
|
template <typename IndType>
|
|
void topkGatingSoftmaxKernelLauncher(
|
|
const float* gating_output,
|
|
float* topk_weights,
|
|
IndType* topk_indices,
|
|
int* token_expert_indices,
|
|
float* softmax_workspace,
|
|
const int num_tokens,
|
|
const int num_experts,
|
|
const int topk,
|
|
cudaStream_t stream) {
|
|
static constexpr int WARPS_PER_TB = 4;
|
|
static constexpr int BYTES_PER_LDG_POWER_OF_2 = 16;
|
|
#ifndef USE_ROCM
|
|
static constexpr int BYTES_PER_LDG_MULTIPLE_64 = 8;
|
|
#endif
|
|
switch (num_experts) {
|
|
case 1:
|
|
LAUNCH_SOFTMAX(1, WARPS_PER_TB, BYTES_PER_LDG_POWER_OF_2);
|
|
break;
|
|
case 2:
|
|
LAUNCH_SOFTMAX(2, WARPS_PER_TB, BYTES_PER_LDG_POWER_OF_2);
|
|
break;
|
|
case 4:
|
|
LAUNCH_SOFTMAX(4, WARPS_PER_TB, BYTES_PER_LDG_POWER_OF_2);
|
|
break;
|
|
case 8:
|
|
LAUNCH_SOFTMAX(8, WARPS_PER_TB, BYTES_PER_LDG_POWER_OF_2);
|
|
break;
|
|
case 16:
|
|
LAUNCH_SOFTMAX(16, WARPS_PER_TB, BYTES_PER_LDG_POWER_OF_2);
|
|
break;
|
|
case 32:
|
|
LAUNCH_SOFTMAX(32, WARPS_PER_TB, BYTES_PER_LDG_POWER_OF_2);
|
|
break;
|
|
case 64:
|
|
LAUNCH_SOFTMAX(64, WARPS_PER_TB, BYTES_PER_LDG_POWER_OF_2);
|
|
break;
|
|
case 128:
|
|
LAUNCH_SOFTMAX(128, WARPS_PER_TB, BYTES_PER_LDG_POWER_OF_2);
|
|
break;
|
|
case 256:
|
|
LAUNCH_SOFTMAX(256, WARPS_PER_TB, BYTES_PER_LDG_POWER_OF_2);
|
|
break;
|
|
case 512:
|
|
LAUNCH_SOFTMAX(512, WARPS_PER_TB, BYTES_PER_LDG_POWER_OF_2);
|
|
break;
|
|
// (CUDA only) support multiples of 64 when num_experts is not power of 2.
|
|
// ROCm uses WARP_SIZE 64 so 8 bytes loading won't fit for some of num_experts,
|
|
// alternatively we can test 4 bytes loading and enable it in future.
|
|
#ifndef USE_ROCM
|
|
case 192:
|
|
LAUNCH_SOFTMAX(192, WARPS_PER_TB, BYTES_PER_LDG_MULTIPLE_64);
|
|
break;
|
|
case 320:
|
|
LAUNCH_SOFTMAX(320, WARPS_PER_TB, BYTES_PER_LDG_MULTIPLE_64);
|
|
break;
|
|
case 384:
|
|
LAUNCH_SOFTMAX(384, WARPS_PER_TB, BYTES_PER_LDG_MULTIPLE_64);
|
|
break;
|
|
case 448:
|
|
LAUNCH_SOFTMAX(448, WARPS_PER_TB, BYTES_PER_LDG_MULTIPLE_64);
|
|
break;
|
|
case 576:
|
|
LAUNCH_SOFTMAX(576, WARPS_PER_TB, BYTES_PER_LDG_MULTIPLE_64);
|
|
break;
|
|
#endif
|
|
default: {
|
|
TORCH_CHECK(softmax_workspace != nullptr,
|
|
"softmax_workspace must be provided for num_experts that are not a power of 2 or multiple of 64.");
|
|
static constexpr int TPB = 256;
|
|
moeSoftmax<TPB><<<num_tokens, TPB, 0, stream>>>(
|
|
gating_output, nullptr, softmax_workspace, num_experts);
|
|
moeTopK<TPB><<<num_tokens, TPB, 0, stream>>>(
|
|
softmax_workspace, nullptr, topk_weights, topk_indices, token_expert_indices,
|
|
num_experts, topk, 0, num_experts);
|
|
}
|
|
}
|
|
}
|
|
|
|
} // namespace moe
|
|
} // namespace vllm
|
|
|
|
void topk_softmax(
|
|
torch::Tensor& topk_weights, // [num_tokens, topk]
|
|
torch::Tensor& topk_indices, // [num_tokens, topk]
|
|
torch::Tensor& token_expert_indices, // [num_tokens, topk]
|
|
torch::Tensor& gating_output) // [num_tokens, num_experts]
|
|
{
|
|
const int num_experts = gating_output.size(-1);
|
|
const auto num_tokens = gating_output.numel() / num_experts;
|
|
const int topk = topk_weights.size(-1);
|
|
|
|
const bool is_pow_2 = (num_experts != 0) && ((num_experts & (num_experts - 1)) == 0);
|
|
const bool needs_workspace = !is_pow_2 || num_experts > 256;
|
|
const int64_t workspace_size = needs_workspace ? num_tokens * num_experts : 0;
|
|
|
|
const at::cuda::OptionalCUDAGuard device_guard(device_of(gating_output));
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
torch::Tensor softmax_workspace = torch::empty({workspace_size}, gating_output.options());
|
|
|
|
if(topk_indices.scalar_type() == at::ScalarType::Int)
|
|
{
|
|
vllm::moe::topkGatingSoftmaxKernelLauncher(
|
|
gating_output.data_ptr<float>(),
|
|
topk_weights.data_ptr<float>(),
|
|
topk_indices.data_ptr<int>(),
|
|
token_expert_indices.data_ptr<int>(),
|
|
softmax_workspace.data_ptr<float>(),
|
|
num_tokens,
|
|
num_experts,
|
|
topk,
|
|
stream);
|
|
}
|
|
else if (topk_indices.scalar_type() == at::ScalarType::UInt32)
|
|
{
|
|
vllm::moe::topkGatingSoftmaxKernelLauncher(
|
|
gating_output.data_ptr<float>(),
|
|
topk_weights.data_ptr<float>(),
|
|
topk_indices.data_ptr<uint32_t>(),
|
|
token_expert_indices.data_ptr<int>(),
|
|
softmax_workspace.data_ptr<float>(),
|
|
num_tokens,
|
|
num_experts,
|
|
topk,
|
|
stream);
|
|
}
|
|
else {
|
|
TORCH_CHECK(topk_indices.scalar_type() == at::ScalarType::Long);
|
|
vllm::moe::topkGatingSoftmaxKernelLauncher(
|
|
gating_output.data_ptr<float>(),
|
|
topk_weights.data_ptr<float>(),
|
|
topk_indices.data_ptr<int64_t>(),
|
|
token_expert_indices.data_ptr<int>(),
|
|
softmax_workspace.data_ptr<float>(),
|
|
num_tokens,
|
|
num_experts,
|
|
topk,
|
|
stream);
|
|
}
|
|
}
|