mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-28 11:17:11 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
325 lines
11 KiB
Plaintext
325 lines
11 KiB
Plaintext
#include <torch/all.h>
|
|
#include <ATen/cuda/CUDAContext.h>
|
|
#include <c10/cuda/CUDAGuard.h>
|
|
#include <cub/cub.cuh>
|
|
|
|
#include <ATen/ATen.h>
|
|
#include <ATen/cuda/Atomic.cuh>
|
|
|
|
#include "../cuda_compat.h"
|
|
#include "../dispatch_utils.h"
|
|
|
|
#define CEILDIV(x, y) (((x) + (y) - 1) / (y))
|
|
|
|
namespace vllm {
|
|
namespace moe {
|
|
|
|
template <typename scalar_t>
|
|
__global__ void moe_align_block_size_kernel(
|
|
const scalar_t* __restrict__ topk_ids,
|
|
int32_t* __restrict__ sorted_token_ids, int32_t* __restrict__ expert_ids,
|
|
int32_t* __restrict__ total_tokens_post_pad, int32_t num_experts,
|
|
int32_t padded_num_experts, int32_t experts_per_warp, int32_t block_size,
|
|
size_t numel, int32_t* __restrict__ cumsum, int32_t max_num_tokens_padded) {
|
|
extern __shared__ int32_t shared_counts[];
|
|
|
|
// Initialize sorted_token_ids with numel
|
|
for (size_t it = threadIdx.x; it < max_num_tokens_padded; it += blockDim.x) {
|
|
sorted_token_ids[it] = numel;
|
|
}
|
|
|
|
const int warp_id = threadIdx.x / WARP_SIZE;
|
|
const int my_expert_start = warp_id * experts_per_warp;
|
|
|
|
for (int i = 0; i < experts_per_warp; ++i) {
|
|
if (my_expert_start + i < padded_num_experts) {
|
|
shared_counts[warp_id * experts_per_warp + i] = 0;
|
|
}
|
|
}
|
|
|
|
__syncthreads();
|
|
|
|
const size_t tid = threadIdx.x;
|
|
const size_t stride = blockDim.x;
|
|
|
|
for (size_t i = tid; i < numel; i += stride) {
|
|
int expert_id = topk_ids[i];
|
|
if (expert_id >= num_experts) {
|
|
continue;
|
|
}
|
|
int warp_idx = expert_id / experts_per_warp;
|
|
int expert_offset = expert_id % experts_per_warp;
|
|
atomicAdd(&shared_counts[warp_idx * experts_per_warp + expert_offset], 1);
|
|
}
|
|
|
|
__syncthreads();
|
|
|
|
// Compute prefix sum over token counts per expert
|
|
using BlockScan = cub::BlockScan<int32_t, 1024>;
|
|
__shared__ typename BlockScan::TempStorage temp_storage;
|
|
|
|
int expert_count = 0;
|
|
int expert_id = threadIdx.x;
|
|
if (expert_id < num_experts) {
|
|
int warp_idx = expert_id / experts_per_warp;
|
|
int expert_offset = expert_id % experts_per_warp;
|
|
expert_count = shared_counts[warp_idx * experts_per_warp + expert_offset];
|
|
expert_count = CEILDIV(expert_count, block_size) * block_size;
|
|
}
|
|
|
|
int cumsum_val;
|
|
BlockScan(temp_storage).ExclusiveSum(expert_count, cumsum_val);
|
|
if (expert_id <= num_experts) {
|
|
cumsum[expert_id] = cumsum_val;
|
|
}
|
|
|
|
if (expert_id == num_experts) {
|
|
*total_tokens_post_pad = cumsum_val;
|
|
}
|
|
|
|
__syncthreads();
|
|
|
|
if (threadIdx.x < num_experts) {
|
|
for (int i = cumsum[threadIdx.x]; i < cumsum[threadIdx.x + 1];
|
|
i += block_size) {
|
|
expert_ids[i / block_size] = threadIdx.x;
|
|
}
|
|
}
|
|
|
|
// Fill remaining expert_ids with 0
|
|
const size_t fill_start_idx = cumsum[num_experts] / block_size + threadIdx.x;
|
|
const size_t expert_ids_size = CEILDIV(max_num_tokens_padded, block_size);
|
|
for (size_t i = fill_start_idx; i < expert_ids_size; i += blockDim.x) {
|
|
expert_ids[i] = 0;
|
|
}
|
|
}
|
|
|
|
template <typename scalar_t>
|
|
__global__ void count_and_sort_expert_tokens_kernel(
|
|
const scalar_t* __restrict__ topk_ids,
|
|
int32_t* __restrict__ sorted_token_ids, int32_t* __restrict__ cumsum_buffer,
|
|
size_t numel, int32_t num_experts) {
|
|
const size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
|
|
const size_t stride = blockDim.x * gridDim.x;
|
|
|
|
for (size_t i = tid; i < numel; i += stride) {
|
|
int32_t expert_id = topk_ids[i];
|
|
if (expert_id >= num_experts) {
|
|
continue;
|
|
}
|
|
int32_t rank_post_pad = atomicAdd(&cumsum_buffer[expert_id], 1);
|
|
sorted_token_ids[rank_post_pad] = i;
|
|
}
|
|
}
|
|
|
|
template <typename scalar_t, int TOPK>
|
|
__global__ void moe_sum_kernel(
|
|
scalar_t* __restrict__ out, // [..., d]
|
|
const scalar_t* __restrict__ input, // [..., topk, d]
|
|
const int d) {
|
|
const int64_t token_idx = blockIdx.x;
|
|
for (int64_t idx = threadIdx.x; idx < d; idx += blockDim.x) {
|
|
scalar_t x = 0.0;
|
|
#pragma unroll
|
|
for (int k = 0; k < TOPK; ++k) {
|
|
x += VLLM_LDG(&input[token_idx * TOPK * d + k * d + idx]);
|
|
}
|
|
out[token_idx * d + idx] = x;
|
|
}
|
|
}
|
|
|
|
template <typename scalar_t>
|
|
__global__ void moe_align_block_size_small_batch_expert_kernel(
|
|
const scalar_t* __restrict__ topk_ids,
|
|
int32_t* __restrict__ sorted_token_ids, int32_t* __restrict__ expert_ids,
|
|
int32_t* __restrict__ total_tokens_post_pad, int32_t num_experts,
|
|
int32_t block_size, size_t numel, int32_t max_num_tokens_padded) {
|
|
// Initialize sorted_token_ids with numel
|
|
for (size_t it = threadIdx.x; it < max_num_tokens_padded; it += blockDim.x) {
|
|
sorted_token_ids[it] = numel;
|
|
}
|
|
|
|
const size_t tid = threadIdx.x;
|
|
const size_t stride = blockDim.x;
|
|
|
|
extern __shared__ int32_t shared_mem[];
|
|
int32_t* cumsum = shared_mem;
|
|
int32_t* tokens_cnts = (int32_t*)(shared_mem + num_experts + 1);
|
|
|
|
for (int i = 0; i < num_experts; ++i) {
|
|
tokens_cnts[(threadIdx.x + 1) * num_experts + i] = 0;
|
|
}
|
|
|
|
for (size_t i = tid; i < numel; i += stride) {
|
|
++tokens_cnts[(threadIdx.x + 1) * num_experts + topk_ids[i]];
|
|
}
|
|
|
|
__syncthreads();
|
|
|
|
if (threadIdx.x < num_experts) {
|
|
tokens_cnts[threadIdx.x] = 0;
|
|
for (int i = 1; i <= blockDim.x; ++i) {
|
|
tokens_cnts[i * num_experts + threadIdx.x] +=
|
|
tokens_cnts[(i - 1) * num_experts + threadIdx.x];
|
|
}
|
|
}
|
|
|
|
__syncthreads();
|
|
|
|
if (threadIdx.x == 0) {
|
|
cumsum[0] = 0;
|
|
for (int i = 1; i <= num_experts; ++i) {
|
|
cumsum[i] =
|
|
cumsum[i - 1] +
|
|
CEILDIV(tokens_cnts[blockDim.x * num_experts + i - 1], block_size) *
|
|
block_size;
|
|
}
|
|
*total_tokens_post_pad = static_cast<int32_t>(cumsum[num_experts]);
|
|
}
|
|
|
|
__syncthreads();
|
|
|
|
if (threadIdx.x < num_experts) {
|
|
for (int i = cumsum[threadIdx.x]; i < cumsum[threadIdx.x + 1];
|
|
i += block_size) {
|
|
expert_ids[i / block_size] = threadIdx.x;
|
|
}
|
|
}
|
|
|
|
// Fill remaining expert_ids with 0
|
|
const size_t fill_start_idx = cumsum[num_experts] / block_size + threadIdx.x;
|
|
const size_t expert_ids_size = CEILDIV(max_num_tokens_padded, block_size);
|
|
for (size_t i = fill_start_idx; i < expert_ids_size; i += blockDim.x) {
|
|
expert_ids[i] = 0;
|
|
}
|
|
|
|
for (size_t i = tid; i < numel; i += stride) {
|
|
int32_t expert_id = topk_ids[i];
|
|
int32_t rank_post_pad =
|
|
tokens_cnts[threadIdx.x * num_experts + expert_id] + cumsum[expert_id];
|
|
sorted_token_ids[rank_post_pad] = i;
|
|
++tokens_cnts[threadIdx.x * num_experts + expert_id];
|
|
}
|
|
}
|
|
|
|
} // namespace moe
|
|
} // namespace vllm
|
|
|
|
// taken from
|
|
// https://github.com/sgl-project/sglang/blob/8b5f83ed3b7d2a49ad5c5cd5aa61c5d502f47dbc
|
|
void moe_align_block_size(torch::Tensor topk_ids, int64_t num_experts,
|
|
int64_t block_size, torch::Tensor sorted_token_ids,
|
|
torch::Tensor experts_ids,
|
|
torch::Tensor num_tokens_post_pad) {
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
|
|
int64_t padded_num_experts =
|
|
((num_experts + WARP_SIZE - 1) / WARP_SIZE) * WARP_SIZE;
|
|
int experts_per_warp = WARP_SIZE;
|
|
int threads = 1024;
|
|
threads = ((threads + WARP_SIZE - 1) / WARP_SIZE) * WARP_SIZE;
|
|
|
|
// BlockScan uses 1024 threads and assigns one thread per expert.
|
|
TORCH_CHECK(padded_num_experts < 1024,
|
|
"padded_num_experts must be less than 1024");
|
|
|
|
VLLM_DISPATCH_INTEGRAL_AND_UNSIGNED_TYPES(
|
|
topk_ids.scalar_type(), "moe_align_block_size_kernel", [&] {
|
|
// calc needed amount of shared mem for `cumsum` tensors
|
|
auto options_int =
|
|
torch::TensorOptions().dtype(torch::kInt).device(topk_ids.device());
|
|
torch::Tensor cumsum_buffer =
|
|
torch::empty({num_experts + 1}, options_int);
|
|
bool small_batch_expert_mode =
|
|
(topk_ids.numel() < 1024) && (num_experts <= 64);
|
|
|
|
if (small_batch_expert_mode) {
|
|
const int32_t threads = max((int32_t)num_experts, WARP_SIZE);
|
|
const int32_t shared_mem_size =
|
|
((threads + 1) * num_experts + (num_experts + 1)) *
|
|
sizeof(int32_t);
|
|
|
|
auto small_batch_expert_kernel =
|
|
vllm::moe::moe_align_block_size_small_batch_expert_kernel<
|
|
scalar_t>;
|
|
small_batch_expert_kernel<<<1, threads, shared_mem_size, stream>>>(
|
|
topk_ids.data_ptr<scalar_t>(),
|
|
sorted_token_ids.data_ptr<int32_t>(),
|
|
experts_ids.data_ptr<int32_t>(),
|
|
num_tokens_post_pad.data_ptr<int32_t>(), num_experts, block_size,
|
|
topk_ids.numel(), sorted_token_ids.size(0));
|
|
} else {
|
|
auto align_kernel = vllm::moe::moe_align_block_size_kernel<scalar_t>;
|
|
|
|
size_t num_warps = CEILDIV(padded_num_experts, experts_per_warp);
|
|
size_t shared_mem_size =
|
|
num_warps * experts_per_warp * sizeof(int32_t);
|
|
|
|
align_kernel<<<1, threads, shared_mem_size, stream>>>(
|
|
topk_ids.data_ptr<scalar_t>(),
|
|
sorted_token_ids.data_ptr<int32_t>(),
|
|
experts_ids.data_ptr<int32_t>(),
|
|
num_tokens_post_pad.data_ptr<int32_t>(), num_experts,
|
|
padded_num_experts, experts_per_warp, block_size,
|
|
topk_ids.numel(), cumsum_buffer.data_ptr<int32_t>(),
|
|
sorted_token_ids.size(0));
|
|
|
|
const int block_threads = std::min(256, (int)threads);
|
|
const int num_blocks =
|
|
(topk_ids.numel() + block_threads - 1) / block_threads;
|
|
const int max_blocks = 65535;
|
|
const int actual_blocks = std::min(num_blocks, max_blocks);
|
|
|
|
auto sort_kernel =
|
|
vllm::moe::count_and_sort_expert_tokens_kernel<scalar_t>;
|
|
sort_kernel<<<actual_blocks, block_threads, 0, stream>>>(
|
|
topk_ids.data_ptr<scalar_t>(),
|
|
sorted_token_ids.data_ptr<int32_t>(),
|
|
cumsum_buffer.data_ptr<int32_t>(), topk_ids.numel(), num_experts);
|
|
}
|
|
});
|
|
}
|
|
|
|
void moe_sum(torch::Tensor& input, // [num_tokens, topk, hidden_size]
|
|
torch::Tensor& output) // [num_tokens, hidden_size]
|
|
{
|
|
const int hidden_size = input.size(-1);
|
|
const auto num_tokens = output.numel() / hidden_size;
|
|
const int topk = input.size(1);
|
|
|
|
dim3 grid(num_tokens);
|
|
dim3 block(std::min(hidden_size, 1024));
|
|
const at::cuda::OptionalCUDAGuard device_guard(device_of(output));
|
|
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
|
|
|
|
switch (topk) {
|
|
case 2:
|
|
VLLM_DISPATCH_FLOATING_TYPES(input.scalar_type(), "moe_sum_kernel", [&] {
|
|
vllm::moe::moe_sum_kernel<scalar_t, 2><<<grid, block, 0, stream>>>(
|
|
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(),
|
|
hidden_size);
|
|
});
|
|
break;
|
|
|
|
case 3:
|
|
VLLM_DISPATCH_FLOATING_TYPES(input.scalar_type(), "moe_sum_kernel", [&] {
|
|
vllm::moe::moe_sum_kernel<scalar_t, 3><<<grid, block, 0, stream>>>(
|
|
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(),
|
|
hidden_size);
|
|
});
|
|
break;
|
|
|
|
case 4:
|
|
VLLM_DISPATCH_FLOATING_TYPES(input.scalar_type(), "moe_sum_kernel", [&] {
|
|
vllm::moe::moe_sum_kernel<scalar_t, 4><<<grid, block, 0, stream>>>(
|
|
output.data_ptr<scalar_t>(), input.data_ptr<scalar_t>(),
|
|
hidden_size);
|
|
});
|
|
break;
|
|
|
|
default:
|
|
at::sum_out(output, input, 1);
|
|
break;
|
|
}
|
|
}
|