mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-07 16:27:04 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
368 lines
17 KiB
C++
368 lines
17 KiB
C++
#pragma once
|
|
|
|
#include <optional>
|
|
#include <torch/library.h>
|
|
|
|
#include "core/scalar_type.hpp"
|
|
|
|
#include <vector>
|
|
|
|
torch::Tensor weak_ref_tensor(torch::Tensor& tensor) {
|
|
// Ensure tensor is on CUDA
|
|
if (!tensor.is_cuda()) {
|
|
throw std::runtime_error("Tensor must be on CUDA device");
|
|
}
|
|
|
|
// Get the raw data pointer
|
|
void* data_ptr = tensor.data_ptr();
|
|
|
|
// Get tensor sizes and strides
|
|
std::vector<int64_t> sizes = tensor.sizes().vec();
|
|
std::vector<int64_t> strides = tensor.strides().vec();
|
|
|
|
// Get tensor options (dtype, device)
|
|
auto options = tensor.options();
|
|
|
|
// Create a new tensor from the raw data pointer
|
|
auto new_tensor = torch::from_blob(data_ptr, sizes, strides, options);
|
|
|
|
return new_tensor;
|
|
}
|
|
|
|
void paged_attention_v1(
|
|
torch::Tensor& out, torch::Tensor& query, torch::Tensor& key_cache,
|
|
torch::Tensor& value_cache, int64_t num_kv_heads, double scale,
|
|
torch::Tensor& block_tables, torch::Tensor& seq_lens, int64_t block_size,
|
|
int64_t max_seq_len, const std::optional<torch::Tensor>& alibi_slopes,
|
|
const std::string& kv_cache_dtype, torch::Tensor& k_scale,
|
|
torch::Tensor& v_scale, const int64_t tp_rank,
|
|
const int64_t blocksparse_local_blocks,
|
|
const int64_t blocksparse_vert_stride, const int64_t blocksparse_block_size,
|
|
const int64_t blocksparse_head_sliding_step);
|
|
|
|
void paged_attention_v2(
|
|
torch::Tensor& out, torch::Tensor& exp_sums, torch::Tensor& max_logits,
|
|
torch::Tensor& tmp_out, torch::Tensor& query, torch::Tensor& key_cache,
|
|
torch::Tensor& value_cache, int64_t num_kv_heads, double scale,
|
|
torch::Tensor& block_tables, torch::Tensor& seq_lens, int64_t block_size,
|
|
int64_t max_seq_len, const std::optional<torch::Tensor>& alibi_slopes,
|
|
const std::string& kv_cache_dtype, torch::Tensor& k_scale,
|
|
torch::Tensor& v_scale, const int64_t tp_rank,
|
|
const int64_t blocksparse_local_blocks,
|
|
const int64_t blocksparse_vert_stride, const int64_t blocksparse_block_size,
|
|
const int64_t blocksparse_head_sliding_step);
|
|
|
|
#ifndef USE_ROCM
|
|
void merge_attn_states(torch::Tensor& output,
|
|
std::optional<torch::Tensor> output_lse,
|
|
const torch::Tensor& prefix_output,
|
|
const torch::Tensor& prefix_lse,
|
|
const torch::Tensor& suffix_output,
|
|
const torch::Tensor& suffix_lse);
|
|
|
|
void convert_vertical_slash_indexes(
|
|
torch::Tensor& block_count, // [BATCH, N_HEADS, NUM_ROWS]
|
|
torch::Tensor& block_offset, // [BATCH, N_HEADS, NUM_ROWS, NNZ_S]
|
|
torch::Tensor& column_count, // [BATCH, N_HEADS, NUM_ROWS]
|
|
torch::Tensor& column_index, // [BATCH, N_HEADS, NUM_ROWS, NNZ_V]
|
|
torch::Tensor q_seqlens, // [BATCH, ]
|
|
torch::Tensor kv_seqlens, // [BATCH, ]
|
|
torch::Tensor vertical_indexes, // [BATCH, N_HEADS, NNZ_V]
|
|
torch::Tensor slash_indexes, // [BATCH, N_HEADS, NNZ_S]
|
|
int64_t context_size, int64_t block_size_M, int64_t block_size_N,
|
|
bool causal);
|
|
|
|
void convert_vertical_slash_indexes_mergehead(
|
|
torch::Tensor& block_count, // [BATCH, N_HEADS, NUM_ROWS]
|
|
torch::Tensor& block_offset, // [BATCH, N_HEADS, NUM_ROWS, NNZ_S]
|
|
torch::Tensor& column_count, // [BATCH, N_HEADS, NUM_ROWS]
|
|
torch::Tensor& column_index, // [BATCH, N_HEADS, NUM_ROWS, NNZ_V]
|
|
torch::Tensor q_seqlens, // [BATCH, ]
|
|
torch::Tensor kv_seqlens, // [BATCH, ]
|
|
torch::Tensor vertical_indexes, // [BATCH, N_HEADS, NNZ_V]
|
|
torch::Tensor slash_indexes, // [BATCH, N_HEADS, NNZ_S]
|
|
torch::Tensor vertical_indices_count, // [N_HEADS, ]
|
|
torch::Tensor slash_indices_count, int64_t context_size,
|
|
int64_t block_size_M, int64_t block_size_N, bool causal);
|
|
#endif
|
|
|
|
void rms_norm(torch::Tensor& out, torch::Tensor& input, torch::Tensor& weight,
|
|
double epsilon);
|
|
|
|
void fused_add_rms_norm(torch::Tensor& input, torch::Tensor& residual,
|
|
torch::Tensor& weight, double epsilon);
|
|
|
|
void poly_norm(torch::Tensor& out, torch::Tensor& input, torch::Tensor& weight,
|
|
torch::Tensor& bias, double epsilon);
|
|
|
|
void apply_repetition_penalties_(torch::Tensor& logits,
|
|
const torch::Tensor& prompt_mask,
|
|
const torch::Tensor& output_mask,
|
|
const torch::Tensor& repetition_penalties);
|
|
|
|
void rms_norm_static_fp8_quant(torch::Tensor& out, torch::Tensor& input,
|
|
torch::Tensor& weight, torch::Tensor& scale,
|
|
double epsilon);
|
|
|
|
void fused_add_rms_norm_static_fp8_quant(torch::Tensor& out,
|
|
torch::Tensor& input,
|
|
torch::Tensor& residual,
|
|
torch::Tensor& weight,
|
|
torch::Tensor& scale, double epsilon);
|
|
|
|
void rms_norm_dynamic_per_token_quant(torch::Tensor& out,
|
|
torch::Tensor const& input,
|
|
torch::Tensor const& weight,
|
|
torch::Tensor& scales,
|
|
double const epsilon,
|
|
std::optional<torch::Tensor> scale_ub,
|
|
std::optional<torch::Tensor> residual);
|
|
|
|
void rotary_embedding(torch::Tensor& positions, torch::Tensor& query,
|
|
std::optional<torch::Tensor> key, int64_t head_size,
|
|
torch::Tensor& cos_sin_cache, bool is_neox);
|
|
|
|
void silu_and_mul(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
void silu_and_mul_quant(torch::Tensor& out, torch::Tensor& input,
|
|
torch::Tensor& scale);
|
|
|
|
#ifndef USE_ROCM
|
|
void silu_and_mul_nvfp4_quant(torch::Tensor& out,
|
|
torch::Tensor& output_block_scale,
|
|
torch::Tensor& input,
|
|
torch::Tensor& input_global_scale);
|
|
#endif
|
|
void silu_mul_fp8_quant_deep_gemm_cuda(
|
|
const at::Tensor& input, // (E, T, 2*H)
|
|
const at::Tensor& counts, // (E)
|
|
at::Tensor& y_q, // (E, T, H) [OUT]
|
|
at::Tensor& y_s, // (E, T, H//group_size) [OUT]
|
|
int64_t group_size, bool use_ue8m0, int64_t num_parallel_tokens);
|
|
|
|
void mul_and_silu(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
void gelu_and_mul(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
void gelu_tanh_and_mul(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
void fatrelu_and_mul(torch::Tensor& out, torch::Tensor& input,
|
|
double threshold);
|
|
void swigluoai_and_mul(torch::Tensor& out, torch::Tensor& input,
|
|
double alpha = 1.702, double limit = 7.0);
|
|
|
|
void gelu_new(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
void gelu_fast(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
void gelu_quick(torch::Tensor& out, torch::Tensor& input);
|
|
|
|
void cutlass_mla_decode(torch::Tensor const& out, torch::Tensor const& q_nope,
|
|
torch::Tensor const& q_pe,
|
|
torch::Tensor const& kv_c_and_k_pe_cache,
|
|
torch::Tensor const& seq_lens,
|
|
torch::Tensor const& page_table, double scale);
|
|
|
|
torch::Tensor get_cuda_view_from_cpu_tensor(torch::Tensor& cpu_tensor);
|
|
|
|
#ifndef USE_ROCM
|
|
|
|
torch::Tensor awq_gemm(torch::Tensor _in_feats, torch::Tensor _kernel,
|
|
torch::Tensor _scaling_factors, torch::Tensor _zeros,
|
|
int64_t split_k_iters);
|
|
|
|
torch::Tensor awq_dequantize(torch::Tensor _kernel,
|
|
torch::Tensor _scaling_factors,
|
|
torch::Tensor _zeros, int64_t split_k_iters,
|
|
int64_t thx, int64_t thy);
|
|
|
|
torch::Tensor permute_cols(torch::Tensor const& A, torch::Tensor const& perm);
|
|
#endif
|
|
|
|
torch::Tensor ggml_dequantize(torch::Tensor W, int64_t type, int64_t m,
|
|
int64_t n,
|
|
std::optional<at::ScalarType> const& dtype);
|
|
|
|
torch::Tensor ggml_mul_mat_vec_a8(torch::Tensor W, torch::Tensor X,
|
|
int64_t type, int64_t row);
|
|
|
|
torch::Tensor ggml_mul_mat_a8(torch::Tensor W, torch::Tensor X, int64_t type,
|
|
int64_t row);
|
|
|
|
torch::Tensor ggml_moe_a8(torch::Tensor X, torch::Tensor W,
|
|
torch::Tensor sorted_token_ids,
|
|
torch::Tensor expert_ids,
|
|
torch::Tensor num_tokens_post_padded, int64_t type,
|
|
int64_t row, int64_t top_k, int64_t tokens);
|
|
|
|
torch::Tensor ggml_moe_a8_vec(torch::Tensor X, torch::Tensor W,
|
|
torch::Tensor topk_ids, int64_t top_k,
|
|
int64_t type, int64_t row, int64_t tokens);
|
|
|
|
int64_t ggml_moe_get_block_size(int64_t type);
|
|
|
|
#ifndef USE_ROCM
|
|
|
|
bool cutlass_scaled_mm_supports_fp4(int64_t cuda_device_capability);
|
|
bool cutlass_scaled_mm_supports_fp8(int64_t cuda_device_capability);
|
|
bool cutlass_scaled_mm_supports_block_fp8(int64_t cuda_device_capability);
|
|
bool cutlass_group_gemm_supported(int64_t cuda_device_capability);
|
|
|
|
void cutlass_scaled_fp4_mm(torch::Tensor& D, torch::Tensor const& A,
|
|
torch::Tensor const& B, torch::Tensor const& A_sf,
|
|
torch::Tensor const& B_sf,
|
|
torch::Tensor const& alpha);
|
|
|
|
void cutlass_scaled_mm(torch::Tensor& out, torch::Tensor const& a,
|
|
torch::Tensor const& b, torch::Tensor const& a_scales,
|
|
torch::Tensor const& b_scales,
|
|
std::optional<torch::Tensor> const& bias);
|
|
|
|
void cutlass_moe_mm(
|
|
torch::Tensor& out_tensors, torch::Tensor const& a_tensors,
|
|
torch::Tensor const& b_tensors, torch::Tensor const& a_scales,
|
|
torch::Tensor const& b_scales, torch::Tensor const& expert_offsets,
|
|
torch::Tensor const& problem_sizes, torch::Tensor const& a_strides,
|
|
torch::Tensor const& b_strides, torch::Tensor const& c_strides,
|
|
bool per_act_token, bool per_out_ch);
|
|
|
|
void cutlass_fp4_group_mm(
|
|
torch::Tensor& output, const torch::Tensor& a, const torch::Tensor& b,
|
|
const torch::Tensor& a_blockscale, const torch::Tensor& b_blockscales,
|
|
const torch::Tensor& alphas, const torch::Tensor& problem_sizes,
|
|
const torch::Tensor& expert_offsets, const torch::Tensor& sf_offsets);
|
|
|
|
void get_cutlass_moe_mm_data(
|
|
const torch::Tensor& topk_ids, torch::Tensor& expert_offsets,
|
|
torch::Tensor& problem_sizes1, torch::Tensor& problem_sizes2,
|
|
torch::Tensor& input_permutation, torch::Tensor& output_permutation,
|
|
const int64_t num_experts, const int64_t n, const int64_t k,
|
|
const std::optional<torch::Tensor>& blockscale_offsets);
|
|
|
|
void get_cutlass_moe_mm_problem_sizes(
|
|
const torch::Tensor& topk_ids, torch::Tensor& problem_sizes1,
|
|
torch::Tensor& problem_sizes2, const int64_t num_experts, const int64_t n,
|
|
const int64_t k, const std::optional<torch::Tensor>& blockscale_offsets);
|
|
|
|
void get_cutlass_pplx_moe_mm_data(torch::Tensor& expert_offsets,
|
|
torch::Tensor& problem_sizes1,
|
|
torch::Tensor& problem_sizes2,
|
|
const torch::Tensor& expert_num_tokens,
|
|
const int64_t num_local_experts,
|
|
const int64_t padded_m, const int64_t n,
|
|
const int64_t k);
|
|
|
|
void cutlass_scaled_mm_azp(torch::Tensor& out, torch::Tensor const& a,
|
|
torch::Tensor const& b,
|
|
torch::Tensor const& a_scales,
|
|
torch::Tensor const& b_scales,
|
|
torch::Tensor const& azp_adj,
|
|
std::optional<torch::Tensor> const& azp,
|
|
std::optional<torch::Tensor> const& bias);
|
|
|
|
bool cutlass_sparse_scaled_mm_supported(int64_t cuda_device_capability);
|
|
|
|
void cutlass_scaled_sparse_mm(torch::Tensor& out, torch::Tensor const& a,
|
|
torch::Tensor const& b, torch::Tensor const& e,
|
|
torch::Tensor const& a_scales,
|
|
torch::Tensor const& b_scales,
|
|
std::optional<torch::Tensor> const& bias);
|
|
|
|
std::vector<torch::Tensor> cutlass_sparse_compress(torch::Tensor const& a);
|
|
|
|
void scaled_fp4_quant(torch::Tensor& output, torch::Tensor const& input,
|
|
torch::Tensor& output_scale,
|
|
torch::Tensor const& input_scale);
|
|
|
|
void scaled_fp4_experts_quant(
|
|
torch::Tensor& output, torch::Tensor& output_scale,
|
|
torch::Tensor const& input, torch::Tensor const& input_global_scale,
|
|
torch::Tensor const& input_offset_by_experts,
|
|
torch::Tensor const& output_scale_offset_by_experts);
|
|
|
|
void per_token_group_quant_fp8(const torch::Tensor& input,
|
|
torch::Tensor& output_q, torch::Tensor& output_s,
|
|
int64_t group_size, double eps, double fp8_min,
|
|
double fp8_max, bool scale_ue8m0);
|
|
|
|
void per_token_group_quant_int8(const torch::Tensor& input,
|
|
torch::Tensor& output_q,
|
|
torch::Tensor& output_s, int64_t group_size,
|
|
double eps, double int8_min, double int8_max);
|
|
#endif
|
|
|
|
void static_scaled_int8_quant(torch::Tensor& out, torch::Tensor const& input,
|
|
torch::Tensor const& scale,
|
|
std::optional<torch::Tensor> const& azp);
|
|
|
|
void dynamic_scaled_int8_quant(torch::Tensor& out, torch::Tensor const& input,
|
|
torch::Tensor& scales,
|
|
std::optional<torch::Tensor> const& azp);
|
|
|
|
torch::Tensor gptq_gemm(torch::Tensor a, torch::Tensor b_q_weight,
|
|
torch::Tensor b_gptq_qzeros,
|
|
torch::Tensor b_gptq_scales, torch::Tensor b_g_idx,
|
|
bool use_exllama, int64_t bit);
|
|
|
|
void gptq_shuffle(torch::Tensor q_weight, torch::Tensor q_perm, int64_t bit);
|
|
|
|
void static_scaled_fp8_quant(torch::Tensor& out, torch::Tensor const& input,
|
|
torch::Tensor const& scale);
|
|
|
|
void dynamic_scaled_fp8_quant(torch::Tensor& out, torch::Tensor const& input,
|
|
torch::Tensor& scale);
|
|
|
|
void dynamic_per_token_scaled_fp8_quant(
|
|
torch::Tensor& out, torch::Tensor const& input, torch::Tensor& scale,
|
|
std::optional<torch::Tensor> const& scale_ub);
|
|
|
|
void selective_scan_fwd(const torch::Tensor& u, const torch::Tensor& delta,
|
|
const torch::Tensor& A, const torch::Tensor& B,
|
|
const torch::Tensor& C,
|
|
const std::optional<torch::Tensor>& D_,
|
|
const std::optional<torch::Tensor>& z_,
|
|
const std::optional<torch::Tensor>& delta_bias_,
|
|
bool delta_softplus,
|
|
const std::optional<torch::Tensor>& query_start_loc,
|
|
const std::optional<torch::Tensor>& cache_indices,
|
|
const std::optional<torch::Tensor>& has_initial_state,
|
|
const torch::Tensor& ssm_states, int64_t pad_slot_id);
|
|
|
|
torch::Tensor dynamic_4bit_int_moe_cpu(
|
|
torch::Tensor x, torch::Tensor topk_ids, torch::Tensor topk_weights,
|
|
torch::Tensor w13_packed, torch::Tensor w2_packed, int64_t H, int64_t I,
|
|
int64_t I2, int64_t group_size, bool apply_router_weight_on_input,
|
|
int64_t activation_kind);
|
|
|
|
using fptr_t = int64_t;
|
|
fptr_t init_custom_ar(const std::vector<int64_t>& fake_ipc_ptrs,
|
|
torch::Tensor& rank_data, int64_t rank,
|
|
bool fully_connected);
|
|
void all_reduce(fptr_t _fa, torch::Tensor& inp, torch::Tensor& out,
|
|
fptr_t reg_buffer, int64_t reg_buffer_sz_bytes);
|
|
void dispose(fptr_t _fa);
|
|
int64_t meta_size();
|
|
void register_buffer(fptr_t _fa, const std::vector<int64_t>& fake_ipc_ptrs);
|
|
std::tuple<std::vector<int64_t>, std::vector<int64_t>>
|
|
get_graph_buffer_ipc_meta(fptr_t _fa);
|
|
void register_graph_buffers(fptr_t _fa,
|
|
const std::vector<std::vector<int64_t>>& handles,
|
|
const std::vector<std::vector<int64_t>>& offsets);
|
|
std::tuple<int64_t, torch::Tensor> allocate_shared_buffer_and_handle(
|
|
int64_t size);
|
|
int64_t open_mem_handle(torch::Tensor& mem_handle);
|
|
void free_shared_buffer(int64_t buffer);
|
|
|
|
torch::Tensor hadacore_transform(torch::Tensor& x, bool inplace);
|
|
|
|
#ifdef USE_ROCM
|
|
fptr_t init_custom_qr(int64_t rank, int64_t world_size,
|
|
std::optional<int64_t> qr_max_size = std::nullopt);
|
|
void qr_destroy(fptr_t _fa);
|
|
torch::Tensor qr_get_handle(fptr_t _fa);
|
|
void qr_open_handles(fptr_t _fa, const std::vector<torch::Tensor>& handles);
|
|
void qr_all_reduce(fptr_t _fa, torch::Tensor& inp, torch::Tensor& out,
|
|
int64_t quant_level, bool cast_bf2half = false);
|
|
int64_t qr_max_size();
|
|
#endif
|