mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-05-08 08:36:50 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
326 lines
11 KiB
Python
326 lines
11 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
from typing import Optional
|
|
|
|
import pytest
|
|
import torch
|
|
|
|
from tests.kernels.utils import torch_experts
|
|
from vllm import _custom_ops as ops
|
|
from vllm.config import VllmConfig, set_current_vllm_config
|
|
from vllm.model_executor.layers.fused_moe.config import (
|
|
fp8_w8a8_moe_quant_config)
|
|
from vllm.model_executor.layers.fused_moe.cutlass_moe import (
|
|
CutlassBatchedExpertsFp8)
|
|
from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk
|
|
from vllm.model_executor.layers.fused_moe.modular_kernel import (
|
|
FusedMoEModularKernel)
|
|
from vllm.platforms import current_platform
|
|
from vllm.utils import cdiv
|
|
|
|
from ...utils import multi_gpu_test
|
|
from .parallel_utils import ProcessGroupInfo, parallel_launch
|
|
|
|
try:
|
|
from pplx_kernels import AllToAll
|
|
from pplx_kernels.nvshmem import (nvshmem_alloc_empty_unique_id,
|
|
nvshmem_finalize, nvshmem_get_unique_id,
|
|
nvshmem_init)
|
|
has_pplx = True
|
|
except ImportError:
|
|
has_pplx = False
|
|
|
|
requires_pplx = pytest.mark.skipif(
|
|
not has_pplx,
|
|
reason="Requires PPLX kernels",
|
|
)
|
|
|
|
NUM_EXPERTS = [40, 64]
|
|
TOP_KS = [6, 8]
|
|
|
|
|
|
def rank_chunk(num, r, w):
|
|
rem = num % w
|
|
return (num // w) + (1 if r < rem else 0)
|
|
|
|
|
|
def chunk_by_rank(t, r, w):
|
|
num = t.shape[0]
|
|
chunk = rank_chunk(num, r, w)
|
|
rem = num % w
|
|
if rem == 0 or r < rem:
|
|
return t[(r * chunk):(r + 1) * chunk].contiguous()
|
|
else:
|
|
long_chunks = (num // w + 1) * rem
|
|
short_chunks = (r - rem) * chunk
|
|
start = long_chunks + short_chunks
|
|
return t[start:start + chunk].contiguous()
|
|
|
|
|
|
def pplx_cutlass_moe(
|
|
pgi: ProcessGroupInfo,
|
|
dp_size: int,
|
|
a: torch.Tensor,
|
|
w1: torch.Tensor,
|
|
w2: torch.Tensor,
|
|
w1_scale: torch.Tensor,
|
|
w2_scale: torch.Tensor,
|
|
topk_weights: torch.Tensor,
|
|
topk_ids: torch.Tensor,
|
|
a1_scale: torch.Tensor,
|
|
out_dtype,
|
|
per_act_token: bool,
|
|
per_out_ch: bool,
|
|
group_name: Optional[str],
|
|
):
|
|
from vllm.model_executor.layers.fused_moe.pplx_prepare_finalize import (
|
|
PplxPrepareAndFinalize)
|
|
assert torch.cuda.current_device() == pgi.local_rank
|
|
|
|
num_tokens, hidden_dim = a.shape
|
|
intermediate_dim = w2.shape[2]
|
|
num_experts = w1.shape[0]
|
|
block_size = hidden_dim # TODO support more cases
|
|
device = pgi.device
|
|
rank = pgi.rank
|
|
world_size = pgi.world_size
|
|
rank_num_tokens = rank_chunk(num_tokens, rank, world_size)
|
|
max_num_tokens = rank_chunk(num_tokens, 0, world_size)
|
|
topk = topk_ids.shape[1]
|
|
|
|
if block_size == hidden_dim:
|
|
scale_elems = 4 # hack to circumvent pplx data format requirements
|
|
else:
|
|
scale_elems = (hidden_dim + block_size - 1) // block_size
|
|
|
|
args = dict(
|
|
max_num_tokens=max_num_tokens,
|
|
num_experts=num_experts,
|
|
experts_per_token=topk,
|
|
rank=rank,
|
|
world_size=world_size,
|
|
dp_size=dp_size,
|
|
hidden_dim=hidden_dim,
|
|
hidden_dim_bytes=hidden_dim, # because a.dtype.itemsize == 1
|
|
hidden_dim_scale_bytes=scale_elems * torch.float32.itemsize,
|
|
)
|
|
|
|
if group_name is None:
|
|
ata = AllToAll.internode(**args)
|
|
else:
|
|
args["group_name"] = group_name
|
|
ata = AllToAll.intranode(**args)
|
|
|
|
w1 = w1.to(device)
|
|
w2 = w2.to(device)
|
|
w1_scale = w1_scale.to(device)
|
|
w2_scale = w2_scale.to(device)
|
|
a1_scale = a1_scale.to(device)
|
|
|
|
assert num_experts % world_size == 0
|
|
num_local_experts = cdiv(num_experts, world_size)
|
|
num_dispatchers = pgi.world_size // dp_size
|
|
|
|
prepare_finalize = PplxPrepareAndFinalize(
|
|
ata,
|
|
max_num_tokens=max_num_tokens,
|
|
num_local_experts=num_local_experts,
|
|
num_dispatchers=num_dispatchers)
|
|
|
|
ab_strides1 = torch.full((num_local_experts, ),
|
|
hidden_dim,
|
|
device="cuda",
|
|
dtype=torch.int64)
|
|
ab_strides2 = torch.full((num_local_experts, ),
|
|
intermediate_dim,
|
|
device="cuda",
|
|
dtype=torch.int64)
|
|
c_strides1 = torch.full((num_local_experts, ),
|
|
2 * intermediate_dim,
|
|
device="cuda",
|
|
dtype=torch.int64)
|
|
c_strides2 = torch.full((num_local_experts, ),
|
|
hidden_dim,
|
|
device="cuda",
|
|
dtype=torch.int64)
|
|
|
|
experts = CutlassBatchedExpertsFp8(
|
|
num_local_experts, num_dispatchers, out_dtype, ab_strides1,
|
|
ab_strides2, c_strides1, c_strides2,
|
|
fp8_w8a8_moe_quant_config(
|
|
per_act_token_quant=per_act_token,
|
|
per_out_ch_quant=per_out_ch,
|
|
w1_scale=chunk_by_rank(w1_scale, rank, world_size),
|
|
w2_scale=chunk_by_rank(w2_scale, rank, world_size),
|
|
a1_scale=chunk_by_rank(a1_scale, rank, world_size)
|
|
if per_act_token else a1_scale[rank]))
|
|
|
|
fused_cutlass_experts = FusedMoEModularKernel(
|
|
prepare_finalize,
|
|
experts,
|
|
)
|
|
|
|
a_chunk = chunk_by_rank(a, rank, world_size).to(device)
|
|
chunk_topk_weight = chunk_by_rank(topk_weights, rank,
|
|
world_size).to(device)
|
|
chunk_topk_ids = chunk_by_rank(topk_ids, rank,
|
|
world_size).to(torch.uint32).to(device)
|
|
|
|
out = fused_cutlass_experts(
|
|
a_chunk,
|
|
chunk_by_rank(w1, rank, world_size),
|
|
chunk_by_rank(w2, rank, world_size),
|
|
chunk_topk_weight,
|
|
chunk_topk_ids,
|
|
global_num_experts=num_experts,
|
|
expert_map=None, #TODO
|
|
)
|
|
|
|
torch.cuda.synchronize()
|
|
|
|
ata.destroy()
|
|
|
|
return out[:rank_num_tokens]
|
|
|
|
|
|
vllm_config = VllmConfig()
|
|
vllm_config.scheduler_config.max_num_seqs = 128
|
|
vllm_config.scheduler_config.max_model_len = 8192
|
|
|
|
|
|
def _pplx_moe(
|
|
pgi: ProcessGroupInfo,
|
|
dp_size: int,
|
|
a: torch.Tensor,
|
|
w1: torch.Tensor,
|
|
w2: torch.Tensor,
|
|
w1_scale: torch.Tensor,
|
|
w2_scale: torch.Tensor,
|
|
topk_weights: torch.Tensor,
|
|
topk_ids: torch.Tensor,
|
|
a1_scale: torch.Tensor,
|
|
out_dtype,
|
|
a_full: torch.Tensor,
|
|
w1_full: torch.Tensor,
|
|
w2_full: torch.Tensor,
|
|
per_act_token: bool,
|
|
per_out_ch: bool,
|
|
use_internode: bool,
|
|
):
|
|
try:
|
|
if use_internode:
|
|
uid = nvshmem_get_unique_id(
|
|
) if pgi.rank == 0 else nvshmem_alloc_empty_unique_id()
|
|
torch.distributed.broadcast(uid, src=0)
|
|
nvshmem_init(uid, pgi.rank, pgi.world_size)
|
|
else:
|
|
group_ranks = list(range(pgi.world_size))
|
|
cpu_group = torch.distributed.new_group(group_ranks,
|
|
backend="gloo")
|
|
group_name = cpu_group.group_name
|
|
|
|
with set_current_vllm_config(vllm_config):
|
|
torch_output = torch_experts(a_full, w1_full, w2_full,
|
|
topk_weights, topk_ids)
|
|
pplx_output = pplx_cutlass_moe(pgi, dp_size, a, w1, w2, w1_scale,
|
|
w2_scale, topk_weights, topk_ids,
|
|
a1_scale, out_dtype, per_act_token,
|
|
per_out_ch, group_name)
|
|
|
|
torch_output = chunk_by_rank(torch_output, pgi.rank,
|
|
pgi.world_size).to(pplx_output.device)
|
|
|
|
# Uncomment if more debugging is needed
|
|
# print("PPLX OUT:", pplx_output)
|
|
# print("TORCH OUT:", torch_output)
|
|
|
|
torch.testing.assert_close(pplx_output,
|
|
torch_output,
|
|
atol=0.05,
|
|
rtol=0)
|
|
finally:
|
|
if use_internode:
|
|
nvshmem_finalize()
|
|
|
|
|
|
@pytest.mark.parametrize("m", [2, 224])
|
|
@pytest.mark.parametrize("n", [3072])
|
|
@pytest.mark.parametrize("k", [1536])
|
|
@pytest.mark.parametrize("e", NUM_EXPERTS)
|
|
@pytest.mark.parametrize("topk", TOP_KS)
|
|
@pytest.mark.parametrize("per_act_token", [True, False])
|
|
@pytest.mark.parametrize("per_out_ch", [True, False])
|
|
@pytest.mark.parametrize("world_dp_size", [[2, 1]]) #, [4, 2]])
|
|
@pytest.mark.parametrize("use_internode", [False])
|
|
@multi_gpu_test(num_gpus=2)
|
|
@pytest.mark.skipif(
|
|
(lambda x: x is None or not ops.cutlass_group_gemm_supported(x.to_int()))(
|
|
current_platform.get_device_capability()),
|
|
reason="Grouped gemm is not supported on this GPU type.")
|
|
@requires_pplx
|
|
def test_cutlass_moe_pplx(
|
|
m: int,
|
|
n: int,
|
|
k: int,
|
|
e: int,
|
|
topk: int,
|
|
per_act_token: bool,
|
|
per_out_ch: bool,
|
|
world_dp_size: tuple[int, int],
|
|
use_internode: bool,
|
|
):
|
|
current_platform.seed_everything(7)
|
|
|
|
with set_current_vllm_config(vllm_config):
|
|
|
|
dtype = torch.half
|
|
|
|
a = torch.randn((m, k), device="cuda", dtype=dtype) / 10.0
|
|
w1 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) / 10.0
|
|
w2 = torch.randn((e, k, n), device="cuda", dtype=dtype) / 10.0
|
|
|
|
n_b_scales = 2 * n if per_out_ch else 1
|
|
k_b_scales = k if per_out_ch else 1
|
|
|
|
w1_q = torch.empty((e, 2 * n, k),
|
|
device="cuda",
|
|
dtype=torch.float8_e4m3fn)
|
|
w2_q = torch.empty((e, k, n), device="cuda", dtype=torch.float8_e4m3fn)
|
|
w1_scale = torch.empty((e, n_b_scales, 1),
|
|
device="cuda",
|
|
dtype=torch.float32)
|
|
w2_scale = torch.empty((e, k_b_scales, 1),
|
|
device="cuda",
|
|
dtype=torch.float32)
|
|
|
|
for expert in range(e):
|
|
w1_q[expert], w1_scale[expert] = ops.scaled_fp8_quant(
|
|
w1[expert], use_per_token_if_dynamic=per_out_ch)
|
|
w2_q[expert], w2_scale[expert] = ops.scaled_fp8_quant(
|
|
w2[expert], use_per_token_if_dynamic=per_out_ch)
|
|
|
|
w1_d = torch.empty_like(w1)
|
|
w2_d = torch.empty_like(w2)
|
|
for expert in range(e):
|
|
w1_d[expert] = (w1_q[expert].float() * w1_scale[expert]).half()
|
|
w2_d[expert] = (w2_q[expert].float() * w2_scale[expert]).half()
|
|
|
|
score = torch.randn((m, e), device="cuda", dtype=dtype)
|
|
topk_weights, topk_ids, _ = fused_topk(a,
|
|
score,
|
|
topk,
|
|
renormalize=False)
|
|
|
|
world_size, dp_size = world_dp_size
|
|
a_scale1 = torch.randn(
|
|
(m if per_act_token else 1, 1), device="cuda",
|
|
dtype=torch.float32) / 10.0
|
|
if not per_act_token:
|
|
a_scale1 = a_scale1.repeat(world_size, 1)
|
|
|
|
parallel_launch(world_size, _pplx_moe, dp_size, a, w1_q, w2_q,
|
|
w1_scale, w2_scale, topk_weights, topk_ids, a_scale1,
|
|
dtype, a, w1_d, w2_d, per_act_token, per_out_ch,
|
|
use_internode)
|