mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-07 23:27:06 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
509 lines
16 KiB
Python
509 lines
16 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
from typing import Optional, Union
|
|
|
|
import torch
|
|
|
|
import vllm._custom_ops as ops
|
|
from tests.kernels.quant_utils import per_block_cast_to_int8
|
|
from tests.kernels.quantization.nvfp4_utils import (FLOAT4_E2M1_MAX,
|
|
FLOAT8_E4M3_MAX)
|
|
from vllm.model_executor.layers.activation import SiluAndMul
|
|
from vllm.model_executor.layers.fused_moe import fused_experts, fused_topk
|
|
from vllm.model_executor.layers.fused_moe.config import FusedMoEQuantConfig
|
|
from vllm.model_executor.layers.fused_moe.fused_batched_moe import (
|
|
BatchedPrepareAndFinalize, BatchedTritonExperts, NaiveBatchedExperts)
|
|
from vllm.model_executor.layers.fused_moe.modular_kernel import (
|
|
FusedMoEModularKernel)
|
|
from vllm.model_executor.layers.fused_moe.utils import (
|
|
moe_kernel_quantize_input)
|
|
from vllm.utils import round_up
|
|
from vllm.utils.deep_gemm import per_block_cast_to_fp8
|
|
|
|
|
|
def triton_moe(
|
|
a: torch.Tensor,
|
|
w1: torch.Tensor,
|
|
w2: torch.Tensor,
|
|
topk_weight: torch.Tensor,
|
|
topk_ids: torch.Tensor,
|
|
w1_scale: Optional[torch.Tensor] = None,
|
|
w2_scale: Optional[torch.Tensor] = None,
|
|
a1_scale: Optional[torch.Tensor] = None,
|
|
a2_scale: Optional[torch.Tensor] = None,
|
|
quant_dtype: Optional[torch.dtype] = None,
|
|
per_act_token_quant=False,
|
|
block_shape: Optional[list[int]] = None,
|
|
) -> torch.Tensor:
|
|
quant_config = FusedMoEQuantConfig.make(
|
|
quant_dtype,
|
|
per_act_token_quant=per_act_token_quant,
|
|
block_shape=block_shape,
|
|
w1_scale=w1_scale,
|
|
w2_scale=w2_scale,
|
|
a1_scale=a1_scale,
|
|
a2_scale=a2_scale,
|
|
)
|
|
|
|
return fused_experts(a,
|
|
w1,
|
|
w2,
|
|
topk_weight,
|
|
topk_ids,
|
|
quant_config=quant_config)
|
|
|
|
|
|
def batched_moe(
|
|
a: torch.Tensor,
|
|
w1: torch.Tensor,
|
|
w2: torch.Tensor,
|
|
topk_weight: torch.Tensor,
|
|
topk_ids: torch.Tensor,
|
|
w1_scale: Optional[torch.Tensor] = None,
|
|
w2_scale: Optional[torch.Tensor] = None,
|
|
a1_scale: Optional[torch.Tensor] = None,
|
|
a2_scale: Optional[torch.Tensor] = None,
|
|
quant_dtype: Optional[torch.dtype] = None,
|
|
per_act_token_quant: bool = False,
|
|
block_shape: Optional[list[int]] = None,
|
|
) -> torch.Tensor:
|
|
max_num_tokens = round_up(a.shape[0], 64)
|
|
|
|
quant_config = FusedMoEQuantConfig.make(
|
|
quant_dtype,
|
|
per_act_token_quant=per_act_token_quant,
|
|
block_shape=block_shape,
|
|
w1_scale=w1_scale,
|
|
w2_scale=w2_scale,
|
|
a1_scale=a1_scale,
|
|
a2_scale=a2_scale,
|
|
)
|
|
|
|
fused_experts = FusedMoEModularKernel(
|
|
BatchedPrepareAndFinalize(max_num_tokens,
|
|
num_dispatchers=1,
|
|
num_local_experts=w1.shape[0],
|
|
rank=0),
|
|
BatchedTritonExperts(
|
|
max_num_tokens=max_num_tokens,
|
|
num_dispatchers=1,
|
|
quant_config=quant_config,
|
|
),
|
|
)
|
|
|
|
return fused_experts(a, w1, w2, topk_weight, topk_ids)
|
|
|
|
|
|
def naive_batched_moe(
|
|
a: torch.Tensor,
|
|
w1: torch.Tensor,
|
|
w2: torch.Tensor,
|
|
topk_weight: torch.Tensor,
|
|
topk_ids: torch.Tensor,
|
|
w1_scale: Optional[torch.Tensor] = None,
|
|
w2_scale: Optional[torch.Tensor] = None,
|
|
a1_scale: Optional[torch.Tensor] = None,
|
|
a2_scale: Optional[torch.Tensor] = None,
|
|
quant_dtype: Optional[torch.dtype] = None,
|
|
per_act_token_quant: bool = False,
|
|
block_shape: Optional[list[int]] = None,
|
|
) -> torch.Tensor:
|
|
max_num_tokens = round_up(a.shape[0], 64)
|
|
|
|
quant_config = FusedMoEQuantConfig.make(
|
|
quant_dtype,
|
|
per_act_token_quant=per_act_token_quant,
|
|
block_shape=block_shape,
|
|
w1_scale=w1_scale,
|
|
w2_scale=w2_scale,
|
|
a1_scale=a1_scale,
|
|
a2_scale=a2_scale,
|
|
)
|
|
|
|
fused_experts = FusedMoEModularKernel(
|
|
BatchedPrepareAndFinalize(max_num_tokens,
|
|
num_dispatchers=1,
|
|
num_local_experts=w1.shape[0],
|
|
rank=0),
|
|
NaiveBatchedExperts(
|
|
max_num_tokens=max_num_tokens,
|
|
num_dispatchers=1,
|
|
quant_config=quant_config,
|
|
),
|
|
)
|
|
|
|
return fused_experts(a, w1, w2, topk_weight, topk_ids)
|
|
|
|
|
|
def chunk_scales(scales: Optional[torch.Tensor], start: int,
|
|
end: int) -> Optional[torch.Tensor]:
|
|
if scales is not None:
|
|
if scales.numel() == 1:
|
|
return scales
|
|
else:
|
|
return scales[start:end]
|
|
return None
|
|
|
|
|
|
def make_quantized_test_activations(
|
|
E: int,
|
|
m: int,
|
|
k: int,
|
|
in_dtype: torch.dtype,
|
|
quant_dtype: Optional[torch.dtype] = None,
|
|
block_shape: Optional[list[int]] = None,
|
|
per_act_token_quant: bool = False,
|
|
) -> tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:
|
|
a = torch.randn((E, m, k), device="cuda", dtype=in_dtype) / 10
|
|
a_q = a
|
|
a_scale = None
|
|
|
|
if quant_dtype is not None:
|
|
assert (quant_dtype == torch.float8_e4m3fn
|
|
or quant_dtype == torch.int8), "only fp8/int8 supported"
|
|
a_q = torch.zeros_like(a, dtype=quant_dtype)
|
|
a_scale_l = [None] * E
|
|
for e in range(E):
|
|
a_q[e], a_scale_l[e] = moe_kernel_quantize_input(
|
|
a[e], None, quant_dtype, per_act_token_quant, block_shape)
|
|
a_scale = torch.stack(a_scale_l)
|
|
|
|
if not per_act_token_quant and block_shape is None:
|
|
a_scale = a_scale.view(E, 1, 1)
|
|
|
|
return a, a_q, a_scale
|
|
|
|
|
|
def moe_quantize_weights(
|
|
w: torch.Tensor,
|
|
w_s: Optional[torch.Tensor],
|
|
quant_dtype: Union[torch.dtype, str, None],
|
|
per_token_quant: bool,
|
|
block_shape: Optional[list[int]],
|
|
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]:
|
|
assert (quant_dtype == torch.float8_e4m3fn or quant_dtype == torch.int8
|
|
or quant_dtype == "nvfp4"), "only fp8/int8/nvfp4 supported"
|
|
|
|
w_gs = None
|
|
|
|
if block_shape is not None:
|
|
assert not per_token_quant
|
|
if quant_dtype == torch.int8:
|
|
w, w_s = per_block_cast_to_int8(w, block_shape)
|
|
elif quant_dtype == torch.float8_e4m3fn:
|
|
w, w_s = per_block_cast_to_fp8(w, block_shape)
|
|
elif quant_dtype == "nvfp4":
|
|
raise RuntimeError("blocked quantization not supported for nvfp4")
|
|
else:
|
|
raise RuntimeError(f"Unsupported quant type {quant_dtype}")
|
|
else:
|
|
if quant_dtype == torch.int8:
|
|
w, w_s = ops.scaled_int8_quant(
|
|
w, w_s, use_per_token_if_dynamic=per_token_quant)
|
|
elif quant_dtype == torch.float8_e4m3fn:
|
|
w, w_s = ops.scaled_fp8_quant(
|
|
w, w_s, use_per_token_if_dynamic=per_token_quant)
|
|
elif quant_dtype == "nvfp4":
|
|
assert not per_token_quant
|
|
w_amax = torch.abs(w).max().to(torch.float32)
|
|
w_gs = FLOAT8_E4M3_MAX * FLOAT4_E2M1_MAX / w_amax
|
|
w, w_s = ops.scaled_fp4_quant(w, w_gs)
|
|
else:
|
|
raise RuntimeError(f"Unsupported quant type {quant_dtype}")
|
|
|
|
return w, w_s, w_gs
|
|
|
|
|
|
def make_test_weight(
|
|
e: int,
|
|
rows: int,
|
|
cols: int,
|
|
in_dtype: torch.dtype = torch.bfloat16,
|
|
quant_dtype: Union[torch.dtype, str, None] = None,
|
|
block_shape: Optional[list[int]] = None,
|
|
per_out_ch_quant: bool = False,
|
|
) -> tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor],
|
|
Optional[torch.Tensor]]:
|
|
w_16 = torch.randn((e, rows, cols), device="cuda", dtype=in_dtype) / 15
|
|
w_gs = None
|
|
|
|
if quant_dtype is not None:
|
|
w_l = [None] * e
|
|
w_s_l = [None] * e
|
|
w_gs_l = [None] * e
|
|
for idx in range(e):
|
|
w_l[idx], w_s_l[idx], w_gs_l[idx] = moe_quantize_weights(
|
|
w_16[idx], None, quant_dtype, per_out_ch_quant, block_shape)
|
|
|
|
w = torch.stack(w_l)
|
|
w_s = torch.stack(w_s_l)
|
|
if e > 0 and w_gs_l[0] is not None:
|
|
w_gs = torch.stack(w_gs_l)
|
|
if w_s.ndim == 2:
|
|
assert w_s.shape[-1] == 1
|
|
w_s = w_s.view(-1, 1, 1)
|
|
|
|
if block_shape is not None:
|
|
block_n, block_k = block_shape
|
|
n_tiles = (rows + block_n - 1) // block_n
|
|
k_tiles = (cols + block_k - 1) // block_k
|
|
assert w_s.shape == (e, n_tiles, k_tiles)
|
|
else:
|
|
w = w_16
|
|
w_s = None
|
|
w_gs = None
|
|
|
|
return w_16, w, w_s, w_gs
|
|
|
|
|
|
def make_test_weights(
|
|
e: int,
|
|
n: int,
|
|
k: int,
|
|
in_dtype: torch.dtype = torch.bfloat16,
|
|
quant_dtype: Union[torch.dtype, str, None] = None,
|
|
block_shape: Optional[list[int]] = None,
|
|
per_out_ch_quant: bool = False,
|
|
) -> tuple[tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor],
|
|
Optional[torch.Tensor]],
|
|
tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor],
|
|
Optional[torch.Tensor]]]:
|
|
return (
|
|
make_test_weight(e, 2 * n, k, in_dtype, quant_dtype, block_shape,
|
|
per_out_ch_quant),
|
|
make_test_weight(e, k, n, in_dtype, quant_dtype, block_shape,
|
|
per_out_ch_quant),
|
|
)
|
|
|
|
|
|
def per_token_cast_to_fp8(
|
|
x: torch.Tensor,
|
|
block_size: int = 128) -> tuple[torch.Tensor, torch.Tensor]:
|
|
assert x.dim() == 2
|
|
m, n = x.shape
|
|
pad_size = (block_size - (n % block_size)) % block_size
|
|
x = torch.nn.functional.pad(x,
|
|
(0, pad_size), value=0) if pad_size > 0 else x
|
|
x_view = x.view(m, -1, block_size)
|
|
x_amax = x_view.abs().float().amax(dim=2).view(m, -1).clamp(1e-4)
|
|
fp8_data = (x_view * (448.0 / x_amax.unsqueeze(2))).to(torch.float8_e4m3fn)
|
|
return fp8_data.view(m, n + pad_size)[:, :n], (x_amax / 448.0).view(m, -1)
|
|
|
|
|
|
def make_test_quant_config(
|
|
e: int,
|
|
n: int,
|
|
k: int,
|
|
in_dtype: torch.dtype,
|
|
quant_dtype: Union[torch.dtype, str, None] = None,
|
|
per_act_token_quant: bool = False,
|
|
block_shape: Optional[list[int]] = None,
|
|
) -> tuple[torch.Tensor, torch.Tensor, FusedMoEQuantConfig]:
|
|
(_, w1, w1_s, w1_gs), (_, w2, w2_s, w2_gs) = make_test_weights(
|
|
e,
|
|
n,
|
|
k,
|
|
in_dtype,
|
|
quant_dtype,
|
|
per_out_ch_quant=per_act_token_quant,
|
|
block_shape=block_shape,
|
|
)
|
|
|
|
# Hacky/trivial scales for nvfp4.
|
|
a1_gscale: Optional[torch.Tensor] = None
|
|
a2_gscale: Optional[torch.Tensor] = None
|
|
if quant_dtype == "nvfp4":
|
|
a1_gscale = torch.ones((e, ), device="cuda", dtype=torch.float32)
|
|
a2_gscale = torch.ones((e, ), device="cuda", dtype=torch.float32)
|
|
a1_scale = a1_gscale
|
|
a2_scale = a2_gscale
|
|
else:
|
|
a1_scale = None
|
|
a2_scale = None
|
|
|
|
return w1, w2, FusedMoEQuantConfig.make(
|
|
quant_dtype,
|
|
per_act_token_quant=per_act_token_quant,
|
|
block_shape=block_shape,
|
|
w1_scale=w1_s,
|
|
w2_scale=w2_s,
|
|
a1_gscale=a1_gscale,
|
|
a2_gscale=a2_gscale,
|
|
a1_scale=a1_scale,
|
|
a2_scale=a2_scale,
|
|
# TODO: make sure this is handled properly
|
|
g1_alphas=(1 / w1_gs) if w1_gs is not None else None,
|
|
g2_alphas=(1 / w2_gs) if w2_gs is not None else None,
|
|
)
|
|
|
|
|
|
def fused_moe(
|
|
hidden_states: torch.Tensor,
|
|
w1: torch.Tensor,
|
|
w2: torch.Tensor,
|
|
score: torch.Tensor,
|
|
topk: int,
|
|
renormalize: bool = False,
|
|
quant_config: Optional[FusedMoEQuantConfig] = None,
|
|
global_num_experts: int = -1,
|
|
expert_map: Optional[torch.Tensor] = None,
|
|
) -> torch.Tensor:
|
|
topk_weights, topk_ids, _ = fused_topk(hidden_states, score.float(), topk,
|
|
renormalize)
|
|
return fused_experts(hidden_states,
|
|
w1,
|
|
w2,
|
|
topk_weights,
|
|
topk_ids,
|
|
global_num_experts=global_num_experts,
|
|
expert_map=expert_map,
|
|
quant_config=quant_config)
|
|
|
|
|
|
# CustomOp?
|
|
class BaselineMM(torch.nn.Module):
|
|
|
|
def __init__(
|
|
self,
|
|
b: torch.Tensor,
|
|
out_dtype: torch.dtype,
|
|
):
|
|
super().__init__()
|
|
self.b = b.to(dtype=torch.float32)
|
|
self.out_dtype = out_dtype
|
|
|
|
def forward(
|
|
self,
|
|
a: torch.Tensor) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
|
|
return torch.mm(a.to(dtype=torch.float32),
|
|
self.b).to(self.out_dtype), None
|
|
|
|
|
|
class TestMLP(torch.nn.Module):
|
|
|
|
def __init__(
|
|
self,
|
|
w1: torch.Tensor,
|
|
w2: torch.Tensor,
|
|
out_dtype: torch.dtype,
|
|
):
|
|
super().__init__()
|
|
self.gate_up_proj = BaselineMM(w1, out_dtype)
|
|
self.down_proj = BaselineMM(w2, out_dtype)
|
|
self.act_fn = SiluAndMul()
|
|
|
|
def forward(self, x):
|
|
x, _ = self.gate_up_proj(x)
|
|
x = self.act_fn(x)
|
|
x, _ = self.down_proj(x)
|
|
return x
|
|
|
|
|
|
def make_naive_shared_experts(
|
|
N: int,
|
|
K: int,
|
|
in_dtype: torch.dtype = torch.bfloat16,
|
|
) -> torch.nn.Module:
|
|
w1 = torch.randn((K, N * 2), device="cuda", dtype=in_dtype) / 15
|
|
w2 = torch.randn((N, K), device="cuda", dtype=in_dtype) / 15
|
|
return TestMLP(w1, w2, out_dtype=in_dtype)
|
|
|
|
|
|
class RealMLP(torch.nn.Module):
|
|
|
|
def __init__(
|
|
self,
|
|
hidden_size: int,
|
|
intermediate_size: int,
|
|
w1: torch.Tensor,
|
|
w2: torch.Tensor,
|
|
hidden_act: str = "silu",
|
|
quant_config=None,
|
|
reduce_results: bool = True,
|
|
prefix: str = "",
|
|
w1_s: Optional[torch.Tensor] = None,
|
|
w2_s: Optional[torch.Tensor] = None,
|
|
) -> None:
|
|
from vllm.model_executor.layers.linear import (
|
|
MergedColumnParallelLinear, RowParallelLinear)
|
|
|
|
super().__init__()
|
|
self.gate_up_proj = MergedColumnParallelLinear(
|
|
hidden_size, [intermediate_size] * 2,
|
|
bias=False,
|
|
quant_config=quant_config,
|
|
prefix=f"{prefix}.gate_up_proj")
|
|
self.gate_up_proj.register_parameter(
|
|
"weight", torch.nn.Parameter(w1, requires_grad=False))
|
|
self.gate_up_proj.register_parameter(
|
|
"weight_scale", torch.nn.Parameter(w1_s, requires_grad=False))
|
|
self.gate_up_proj.register_parameter(
|
|
"input_scale",
|
|
None) #torch.nn.Parameter(None, requires_grad=False))
|
|
self.down_proj = RowParallelLinear(intermediate_size,
|
|
hidden_size,
|
|
bias=False,
|
|
quant_config=quant_config,
|
|
reduce_results=reduce_results,
|
|
prefix=f"{prefix}.down_proj")
|
|
self.down_proj.register_parameter(
|
|
"weight", torch.nn.Parameter(w2, requires_grad=False))
|
|
self.down_proj.register_parameter(
|
|
"weight_scale", torch.nn.Parameter(w2_s, requires_grad=False))
|
|
self.down_proj.register_parameter(
|
|
"input_scale",
|
|
None) #torch.nn.Parameter(None, requires_grad=False))
|
|
if hidden_act != "silu":
|
|
raise ValueError(f"Unsupported activation: {hidden_act}. "
|
|
"Only silu is supported for now.")
|
|
self.act_fn = SiluAndMul()
|
|
|
|
def forward(self, x):
|
|
gate_up, _ = self.gate_up_proj(x)
|
|
x = self.act_fn(gate_up)
|
|
x, _ = self.down_proj(x)
|
|
return x
|
|
|
|
|
|
def make_shared_experts(
|
|
N: int,
|
|
K: int,
|
|
in_dtype: torch.dtype = torch.bfloat16,
|
|
quant_dtype: Union[torch.dtype, str, None] = None,
|
|
) -> torch.nn.Module:
|
|
from vllm.model_executor.layers.quantization.fp8 import Fp8Config
|
|
|
|
(_, w1, w1_s, _), (_, w2, w2_s, _) = make_test_weights(
|
|
1,
|
|
N,
|
|
K,
|
|
in_dtype=in_dtype,
|
|
quant_dtype=quant_dtype,
|
|
)
|
|
old_dtype = torch.get_default_dtype()
|
|
try:
|
|
torch.set_default_dtype(in_dtype)
|
|
if quant_dtype == torch.float8_e4m3fn:
|
|
w1 = w1[0].transpose(0, 1)
|
|
w2 = w2[0].transpose(0, 1)
|
|
w1_s = w1_s[0].transpose(0, 1) if w1_s is not None else None
|
|
w2_s = w2_s[0].transpose(0, 1) if w2_s is not None else None
|
|
quant_config = Fp8Config(True)
|
|
else:
|
|
w1 = w1[0]
|
|
w2 = w2[0]
|
|
w1_s = None
|
|
w2_s = None
|
|
quant_config = None
|
|
|
|
return RealMLP(K,
|
|
N,
|
|
w1,
|
|
w2,
|
|
"silu",
|
|
quant_config,
|
|
w1_s=w1_s,
|
|
w2_s=w2_s)
|
|
finally:
|
|
torch.set_default_dtype(old_dtype)
|