mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-07 23:17:03 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
587 lines
20 KiB
Plaintext
587 lines
20 KiB
Plaintext
#pragma once
|
|
|
|
#include "../../../attention/attention_dtypes.h"
|
|
#include <assert.h>
|
|
#include <float.h>
|
|
#include <stdint.h>
|
|
#include <type_traits>
|
|
|
|
namespace vllm {
|
|
#ifndef USE_ROCM
|
|
|
|
namespace fp8 {
|
|
#ifdef ENABLE_FP8
|
|
|
|
template <typename Tout, typename Tin>
|
|
__inline__ __device__ Tout vec_conversion(
|
|
const Tin& x, const __nv_fp8_interpretation_t fp8_type = __NV_E4M3) {
|
|
return x;
|
|
}
|
|
|
|
// float -> c10::Float8_e4m3fn
|
|
template <>
|
|
__inline__ __device__ c10::Float8_e4m3fn
|
|
vec_conversion<c10::Float8_e4m3fn, float>(
|
|
const float& a, const __nv_fp8_interpretation_t fp8_type) {
|
|
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
|
return static_cast<c10::Float8_e4m3fn>(a);
|
|
#else
|
|
return c10::Float8_e4m3fn(__nv_cvt_float_to_fp8(a, __NV_SATFINITE, fp8_type),
|
|
c10::Float8_e4m3fn::from_bits());
|
|
#endif
|
|
}
|
|
|
|
#if 0 // Disable the following code to reduce the binary size.
|
|
// fp8 -> half
|
|
template <>
|
|
__inline__ __device__ uint16_t vec_conversion<uint16_t, uint8_t>(
|
|
const uint8_t &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
__half_raw res = __nv_cvt_fp8_to_halfraw(a, fp8_type);
|
|
return res.x;
|
|
}
|
|
|
|
// fp8x2 -> half2
|
|
template <>
|
|
__inline__ __device__ uint32_t vec_conversion<uint32_t, uint16_t>(
|
|
const uint16_t &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
union {
|
|
uint16_t u16[2];
|
|
uint32_t u32;
|
|
} tmp;
|
|
__half2_raw res = __nv_cvt_fp8x2_to_halfraw2(a, fp8_type);
|
|
tmp.u16[0] = res.x;
|
|
tmp.u16[1] = res.y;
|
|
return tmp.u32;
|
|
}
|
|
|
|
// fp8x4 -> half2x2
|
|
template <>
|
|
__inline__ __device__ uint2 vec_conversion<uint2, uint32_t>(
|
|
const uint32_t &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
union {
|
|
uint2 u32x2;
|
|
uint32_t u32[2];
|
|
} tmp;
|
|
tmp.u32[0] = vec_conversion<uint32_t, uint16_t>((uint16_t)a, fp8_type);
|
|
tmp.u32[1] =
|
|
vec_conversion<uint32_t, uint16_t>((uint16_t)(a >> 16U), fp8_type);
|
|
return tmp.u32x2;
|
|
}
|
|
|
|
// fp8x8 -> half2x4
|
|
template <>
|
|
__inline__ __device__ uint4 vec_conversion<uint4, uint2>(
|
|
const uint2 &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
union {
|
|
uint4 u64x2;
|
|
uint2 u64[2];
|
|
} tmp;
|
|
tmp.u64[0] = vec_conversion<uint2, uint32_t>(a.x, fp8_type);
|
|
tmp.u64[1] = vec_conversion<uint2, uint32_t>(a.y, fp8_type);
|
|
return tmp.u64x2;
|
|
}
|
|
|
|
// fp8 -> __nv_bfloat16
|
|
template <>
|
|
__inline__ __device__ __nv_bfloat16 vec_conversion<__nv_bfloat16, uint8_t>(
|
|
const uint8_t &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
// Note there is no direct convert function from fp8 to bf16.
|
|
// fp8 -> half
|
|
__half_raw res = __nv_cvt_fp8_to_halfraw(a, fp8_type);
|
|
// half -> float -> bf16
|
|
float tmp = half_to_float(res.x);
|
|
return __float2bfloat16(tmp);
|
|
}
|
|
|
|
// fp8x2 -> __nv_bfloat162
|
|
template <>
|
|
__inline__ __device__ __nv_bfloat162 vec_conversion<__nv_bfloat162, uint16_t>(
|
|
const uint16_t &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
__nv_bfloat162 res;
|
|
res.x = vec_conversion<__nv_bfloat16, uint8_t>((uint8_t)a, fp8_type);
|
|
res.y = vec_conversion<__nv_bfloat16, uint8_t>((uint8_t)(a >> 8U), fp8_type);
|
|
return res;
|
|
}
|
|
|
|
// fp8x4 -> bf16_4_t
|
|
template <>
|
|
__inline__ __device__ bf16_4_t vec_conversion<bf16_4_t, uint32_t>(
|
|
const uint32_t &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
bf16_4_t res;
|
|
res.x = vec_conversion<__nv_bfloat162, uint16_t>((uint16_t)a, fp8_type);
|
|
res.y =
|
|
vec_conversion<__nv_bfloat162, uint16_t>((uint16_t)(a >> 16U), fp8_type);
|
|
return res;
|
|
}
|
|
|
|
// fp8x8 -> bf16_8_t
|
|
template <>
|
|
__inline__ __device__ bf16_8_t vec_conversion<bf16_8_t, uint2>(
|
|
const uint2 &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
bf16_4_t tmp1, tmp2;
|
|
tmp1 = vec_conversion<bf16_4_t, uint32_t>(a.x, fp8_type);
|
|
tmp2 = vec_conversion<bf16_4_t, uint32_t>(a.y, fp8_type);
|
|
bf16_8_t res;
|
|
res.x = tmp1.x;
|
|
res.y = tmp1.y;
|
|
res.z = tmp2.x;
|
|
res.w = tmp2.y;
|
|
return res;
|
|
}
|
|
|
|
// fp8 -> float
|
|
template <>
|
|
__inline__ __device__ float
|
|
vec_conversion<float, uint8_t>(const uint8_t &a,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
// fp8 -> half
|
|
uint16_t tmp = vec_conversion<uint16_t, uint8_t>(a, fp8_type);
|
|
// half -> float
|
|
return half_to_float(tmp);
|
|
}
|
|
|
|
// fp8x2 -> float2
|
|
template <>
|
|
__inline__ __device__ float2 vec_conversion<float2, uint16_t>(
|
|
const uint16_t &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
// fp8x2 -> half2
|
|
uint32_t tmp = vec_conversion<uint32_t, uint16_t>(a, fp8_type);
|
|
// half2 -> float2
|
|
return half2_to_float2(tmp);
|
|
}
|
|
|
|
// fp8x4 -> float4
|
|
template <>
|
|
__inline__ __device__ Float4_ vec_conversion<Float4_, uint32_t>(
|
|
const uint32_t &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
Float4_ res;
|
|
res.x = vec_conversion<float2, uint16_t>((uint16_t)a, fp8_type);
|
|
res.y = vec_conversion<float2, uint16_t>((uint16_t)(a >> 16U), fp8_type);
|
|
return res;
|
|
}
|
|
|
|
// fp8x8 -> float8
|
|
template <>
|
|
__inline__ __device__ Float8_ vec_conversion<Float8_, uint2>(
|
|
const uint2 &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
Float4_ tmp1, tmp2;
|
|
tmp1 = vec_conversion<Float4_, uint32_t>(a.x, fp8_type);
|
|
tmp2 = vec_conversion<Float4_, uint32_t>(a.y, fp8_type);
|
|
Float8_ res;
|
|
res.x = tmp1.x;
|
|
res.y = tmp1.y;
|
|
res.z = tmp2.x;
|
|
res.w = tmp2.y;
|
|
return res;
|
|
}
|
|
|
|
// half -> fp8
|
|
template <>
|
|
__inline__ __device__ uint8_t vec_conversion<uint8_t, uint16_t>(
|
|
const uint16_t &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
__half_raw tmp;
|
|
tmp.x = a;
|
|
__nv_fp8_storage_t res =
|
|
__nv_cvt_halfraw_to_fp8(tmp, __NV_SATFINITE, fp8_type);
|
|
return (uint8_t)res;
|
|
}
|
|
|
|
// bf16 -> fp8
|
|
template <>
|
|
__inline__ __device__ uint8_t vec_conversion<uint8_t, __nv_bfloat16>(
|
|
const __nv_bfloat16 &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
|
assert(false);
|
|
#else
|
|
__nv_fp8_storage_t res = __nv_cvt_bfloat16raw_to_fp8(
|
|
__nv_bfloat16_raw(a), __NV_SATFINITE, fp8_type);
|
|
return (uint8_t)res;
|
|
#endif
|
|
}
|
|
|
|
// float -> fp8
|
|
template <>
|
|
__inline__ __device__ uint8_t vec_conversion<uint8_t, float>(
|
|
const float &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
__nv_fp8_storage_t res = __nv_cvt_float_to_fp8(a, __NV_SATFINITE, fp8_type);
|
|
return (uint8_t)res;
|
|
}
|
|
|
|
// fp8x4 -> float4
|
|
template <>
|
|
__inline__ __device__ float4 vec_conversion<float4, uint32_t>(
|
|
const uint32_t &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
Float4_ tmp = vec_conversion<Float4_, uint32_t>(a, fp8_type);
|
|
float4 res = make_float4(tmp.x.x, tmp.x.y, tmp.y.x, tmp.y.y);
|
|
return res;
|
|
}
|
|
|
|
template <>
|
|
__inline__ __device__ uint32_t vec_conversion<uint32_t, float2>(
|
|
const float2 &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
union {
|
|
half2 float16;
|
|
uint32_t uint32;
|
|
};
|
|
|
|
float16 = __float22half2_rn(a);
|
|
return uint32;
|
|
}
|
|
|
|
template <>
|
|
__inline__ __device__ uint2 vec_conversion<uint2, Float4_>(
|
|
const Float4_ &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
uint2 b;
|
|
float2 val;
|
|
val.x = a.x.x;
|
|
val.y = a.x.y;
|
|
b.x = vec_conversion<uint32_t, float2>(val, fp8_type);
|
|
|
|
val.x = a.y.x;
|
|
val.y = a.y.y;
|
|
b.y = vec_conversion<uint32_t, float2>(val, fp8_type);
|
|
|
|
return b;
|
|
}
|
|
|
|
template <>
|
|
__inline__ __device__ float4 vec_conversion<float4, Float4_>(
|
|
const Float4_ &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
float4 b;
|
|
b.x = a.x.x;
|
|
b.y = a.x.y;
|
|
b.z = a.y.x;
|
|
b.w = a.y.y;
|
|
return b;
|
|
}
|
|
|
|
template <>
|
|
__inline__ __device__ uint4 vec_conversion<uint4, Float8_>(
|
|
const Float8_ &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
uint4 b;
|
|
b.x = vec_conversion<uint32_t, float2>(a.x, fp8_type);
|
|
b.y = vec_conversion<uint32_t, float2>(a.y, fp8_type);
|
|
b.z = vec_conversion<uint32_t, float2>(a.z, fp8_type);
|
|
b.w = vec_conversion<uint32_t, float2>(a.w, fp8_type);
|
|
return b;
|
|
}
|
|
|
|
template <>
|
|
__inline__ __device__ __nv_bfloat162 vec_conversion<__nv_bfloat162, float2>(
|
|
const float2 &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
__nv_bfloat162 b;
|
|
from_float(b, a);
|
|
return b;
|
|
}
|
|
|
|
template <>
|
|
__inline__ __device__ bf16_4_t vec_conversion<bf16_4_t, Float4_>(
|
|
const Float4_ &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
bf16_4_t b;
|
|
from_float(b, a);
|
|
return b;
|
|
}
|
|
|
|
template <>
|
|
__inline__ __device__ bf16_8_t vec_conversion<bf16_8_t, Float8_>(
|
|
const Float8_ &a, const __nv_fp8_interpretation_t fp8_type) {
|
|
bf16_8_t b;
|
|
from_float(b, a);
|
|
return b;
|
|
}
|
|
#endif
|
|
|
|
/* Scaled and vectorized conversions, for data exchange between high and low
|
|
precision domains Convention of the scale in API, e.g: FP8_data =
|
|
Quantization( High_Precision_data / scale ) s.t. Quantize(HP / scale) => FP8
|
|
Dequant(FP8) * scale => HP
|
|
*/
|
|
|
|
template <typename Tout, typename Tin>
|
|
__inline__ __device__ Tout scaled_vec_conversion(
|
|
const Tin& x, const float scale, const __nv_fp8_interpretation_t fp8_type) {
|
|
return x;
|
|
}
|
|
|
|
// fp8 -> half
|
|
template <>
|
|
__inline__ __device__ uint16_t scaled_vec_conversion<uint16_t, uint8_t>(
|
|
const uint8_t& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
__half_raw tmp = __nv_cvt_fp8_to_halfraw(a, fp8_type);
|
|
return float_to_half(half_to_float(tmp.x) * scale);
|
|
}
|
|
|
|
// fp8x2 -> half2
|
|
template <>
|
|
__inline__ __device__ uint32_t scaled_vec_conversion<uint32_t, uint16_t>(
|
|
const uint16_t& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
union {
|
|
uint16_t u16[2];
|
|
uint32_t u32;
|
|
} tmp;
|
|
__half2_raw res = __nv_cvt_fp8x2_to_halfraw2(a, fp8_type);
|
|
tmp.u16[0] = float_to_half(half_to_float(res.x) * scale);
|
|
tmp.u16[1] = float_to_half(half_to_float(res.y) * scale);
|
|
return tmp.u32;
|
|
}
|
|
|
|
// fp8x4 -> half2x2
|
|
template <>
|
|
__inline__ __device__ uint2 scaled_vec_conversion<uint2, uint32_t>(
|
|
const uint32_t& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
union {
|
|
uint2 u32x2;
|
|
uint32_t u32[2];
|
|
} tmp;
|
|
tmp.u32[0] =
|
|
scaled_vec_conversion<uint32_t, uint16_t>((uint16_t)a, scale, fp8_type);
|
|
tmp.u32[1] = scaled_vec_conversion<uint32_t, uint16_t>((uint16_t)(a >> 16U),
|
|
scale, fp8_type);
|
|
return tmp.u32x2;
|
|
}
|
|
|
|
// fp8x8 -> half2x4
|
|
template <>
|
|
__inline__ __device__ uint4
|
|
scaled_vec_conversion<uint4, uint2>(const uint2& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
union {
|
|
uint4 u64x2;
|
|
uint2 u64[2];
|
|
} tmp;
|
|
tmp.u64[0] = scaled_vec_conversion<uint2, uint32_t>(a.x, scale, fp8_type);
|
|
tmp.u64[1] = scaled_vec_conversion<uint2, uint32_t>(a.y, scale, fp8_type);
|
|
return tmp.u64x2;
|
|
}
|
|
|
|
// fp8 -> __nv_bfloat16
|
|
template <>
|
|
__inline__ __device__ __nv_bfloat16
|
|
scaled_vec_conversion<__nv_bfloat16, uint8_t>(
|
|
const uint8_t& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
// Note there is no direct convert function from fp8 to bf16.
|
|
// fp8 -> half
|
|
__half_raw res = __nv_cvt_fp8_to_halfraw(a, fp8_type);
|
|
// half -> float -> bf16
|
|
float tmp = half_to_float(res.x);
|
|
return __float2bfloat16(tmp * scale);
|
|
}
|
|
|
|
// fp8x2 -> __nv_bfloat162
|
|
template <>
|
|
__inline__ __device__ __nv_bfloat162
|
|
scaled_vec_conversion<__nv_bfloat162, uint16_t>(
|
|
const uint16_t& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
__nv_bfloat162 res;
|
|
res.x = scaled_vec_conversion<__nv_bfloat16, uint8_t>((uint8_t)a, scale,
|
|
fp8_type);
|
|
res.y = scaled_vec_conversion<__nv_bfloat16, uint8_t>((uint8_t)(a >> 8U),
|
|
scale, fp8_type);
|
|
return res;
|
|
}
|
|
|
|
// fp8x4 -> bf16_4_t
|
|
template <>
|
|
__inline__ __device__ bf16_4_t scaled_vec_conversion<bf16_4_t, uint32_t>(
|
|
const uint32_t& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
bf16_4_t res;
|
|
res.x = scaled_vec_conversion<__nv_bfloat162, uint16_t>((uint16_t)a, scale,
|
|
fp8_type);
|
|
res.y = scaled_vec_conversion<__nv_bfloat162, uint16_t>((uint16_t)(a >> 16U),
|
|
scale, fp8_type);
|
|
return res;
|
|
}
|
|
|
|
// fp8x8 -> bf16_8_t
|
|
template <>
|
|
__inline__ __device__ bf16_8_t scaled_vec_conversion<bf16_8_t, uint2>(
|
|
const uint2& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
bf16_4_t tmp1, tmp2;
|
|
tmp1 = scaled_vec_conversion<bf16_4_t, uint32_t>(a.x, scale, fp8_type);
|
|
tmp2 = scaled_vec_conversion<bf16_4_t, uint32_t>(a.y, scale, fp8_type);
|
|
bf16_8_t res;
|
|
res.x = tmp1.x;
|
|
res.y = tmp1.y;
|
|
res.z = tmp2.x;
|
|
res.w = tmp2.y;
|
|
return res;
|
|
}
|
|
|
|
// fp8 -> float
|
|
template <>
|
|
__inline__ __device__ float scaled_vec_conversion<float, uint8_t>(
|
|
const uint8_t& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
// fp8 -> half
|
|
__half_raw res = __nv_cvt_fp8_to_halfraw(a, fp8_type);
|
|
uint16_t tmp = res.x;
|
|
|
|
// half -> float
|
|
return half_to_float(tmp) * scale;
|
|
}
|
|
|
|
// fp8x2 -> float2
|
|
template <>
|
|
__inline__ __device__ float2 scaled_vec_conversion<float2, uint16_t>(
|
|
const uint16_t& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
// fp8x2 -> half2
|
|
uint32_t tmp = scaled_vec_conversion<uint32_t, uint16_t>(a, scale, fp8_type);
|
|
// half2 -> float2
|
|
return half2_to_float2(tmp);
|
|
}
|
|
|
|
// fp8x4 -> float4
|
|
template <>
|
|
__inline__ __device__ Float4_ scaled_vec_conversion<Float4_, uint32_t>(
|
|
const uint32_t& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
Float4_ res;
|
|
res.x = scaled_vec_conversion<float2, uint16_t>((uint16_t)a, scale, fp8_type);
|
|
res.y = scaled_vec_conversion<float2, uint16_t>((uint16_t)(a >> 16U), scale,
|
|
fp8_type);
|
|
return res;
|
|
}
|
|
|
|
// fp8x8 -> float8
|
|
template <>
|
|
__inline__ __device__ Float8_ scaled_vec_conversion<Float8_, uint2>(
|
|
const uint2& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
Float4_ tmp1, tmp2;
|
|
tmp1 = scaled_vec_conversion<Float4_, uint32_t>(a.x, scale, fp8_type);
|
|
tmp2 = scaled_vec_conversion<Float4_, uint32_t>(a.y, scale, fp8_type);
|
|
Float8_ res;
|
|
res.x = tmp1.x;
|
|
res.y = tmp1.y;
|
|
res.z = tmp2.x;
|
|
res.w = tmp2.y;
|
|
return res;
|
|
}
|
|
|
|
// half -> fp8
|
|
template <>
|
|
__inline__ __device__ uint8_t scaled_vec_conversion<uint8_t, uint16_t>(
|
|
const uint16_t& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
__nv_fp8_storage_t res =
|
|
__nv_cvt_float_to_fp8(half_to_float(a) / scale, __NV_SATFINITE, fp8_type);
|
|
return (uint8_t)res;
|
|
}
|
|
|
|
// bf16 -> fp8
|
|
template <>
|
|
__inline__ __device__ uint8_t scaled_vec_conversion<uint8_t, __nv_bfloat16>(
|
|
const __nv_bfloat16& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 800
|
|
assert(false);
|
|
#else
|
|
__nv_fp8_storage_t res = __nv_cvt_float_to_fp8(__bfloat162float(a) / scale,
|
|
__NV_SATFINITE, fp8_type);
|
|
return (uint8_t)res;
|
|
#endif
|
|
__builtin_unreachable(); // Suppress missing return statement warning
|
|
}
|
|
|
|
// float -> fp8
|
|
template <>
|
|
__inline__ __device__ uint8_t scaled_vec_conversion<uint8_t, float>(
|
|
const float& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
__nv_fp8_storage_t res =
|
|
__nv_cvt_float_to_fp8(a / scale, __NV_SATFINITE, fp8_type);
|
|
return (uint8_t)res;
|
|
}
|
|
|
|
// fp8x4 -> float4
|
|
template <>
|
|
__inline__ __device__ float4 scaled_vec_conversion<float4, uint32_t>(
|
|
const uint32_t& a, const float scale,
|
|
const __nv_fp8_interpretation_t fp8_type) {
|
|
Float4_ tmp = scaled_vec_conversion<Float4_, uint32_t>(a, scale, fp8_type);
|
|
float4 res = make_float4(tmp.x.x, tmp.x.y, tmp.y.x, tmp.y.y);
|
|
return res;
|
|
}
|
|
#endif // ENABLE_FP8
|
|
|
|
template <typename Tout, typename Tin, Fp8KVCacheDataType kv_dt>
|
|
__inline__ __device__ Tout convert(const Tin& x) {
|
|
#if 0 // Disable the following code to reduce the binary size.
|
|
if constexpr (kv_dt == Fp8KVCacheDataType::kFp8E4M3) {
|
|
return vec_conversion<Tout, Tin>(x, __NV_E4M3);
|
|
} else if constexpr (kv_dt == Fp8KVCacheDataType::kFp8E5M2) {
|
|
return vec_conversion<Tout, Tin>(x, __NV_E5M2);
|
|
}
|
|
#endif
|
|
assert(false);
|
|
__builtin_unreachable(); // Suppress missing return statement warning
|
|
}
|
|
|
|
template <typename Tout, typename Tin, Fp8KVCacheDataType kv_dt>
|
|
__inline__ __device__ Tout scaled_convert(const Tin& x, const float scale) {
|
|
#ifdef ENABLE_FP8
|
|
if constexpr (kv_dt == Fp8KVCacheDataType::kFp8E4M3) {
|
|
return scaled_vec_conversion<Tout, Tin>(x, scale, __NV_E4M3);
|
|
} else if constexpr (kv_dt == Fp8KVCacheDataType::kFp8E5M2) {
|
|
return scaled_vec_conversion<Tout, Tin>(x, scale, __NV_E5M2);
|
|
}
|
|
#endif
|
|
assert(false);
|
|
__builtin_unreachable(); // Suppress missing return statement warning
|
|
}
|
|
|
|
// The following macro is used to dispatch the conversion function based on
|
|
// the data type of the key and value cache. The FN is a macro that calls a
|
|
// function with template<typename scalar_t, typename cache_t,
|
|
// Fp8KVCacheDataType kv_dt>.
|
|
#define DISPATCH_BY_KV_CACHE_DTYPE(SRC_DTYPE, KV_DTYPE, FN) \
|
|
if (KV_DTYPE == "auto") { \
|
|
if (SRC_DTYPE == at::ScalarType::Float) { \
|
|
FN(float, float, vllm::Fp8KVCacheDataType::kAuto); \
|
|
} else if (SRC_DTYPE == at::ScalarType::Half) { \
|
|
FN(uint16_t, uint16_t, vllm::Fp8KVCacheDataType::kAuto); \
|
|
} else if (SRC_DTYPE == at::ScalarType::BFloat16) { \
|
|
FN(__nv_bfloat16, __nv_bfloat16, vllm::Fp8KVCacheDataType::kAuto); \
|
|
} else { \
|
|
TORCH_CHECK(false, "Unsupported input type of kv cache: ", SRC_DTYPE); \
|
|
} \
|
|
} else { \
|
|
if (KV_DTYPE == "fp8" || KV_DTYPE == "fp8_e4m3") { \
|
|
if (SRC_DTYPE == at::ScalarType::Float) { \
|
|
FN(float, uint8_t, vllm::Fp8KVCacheDataType::kFp8E4M3); \
|
|
} else if (SRC_DTYPE == at::ScalarType::Half) { \
|
|
FN(uint16_t, uint8_t, vllm::Fp8KVCacheDataType::kFp8E4M3); \
|
|
} else if (SRC_DTYPE == at::ScalarType::BFloat16) { \
|
|
FN(__nv_bfloat16, uint8_t, vllm::Fp8KVCacheDataType::kFp8E4M3); \
|
|
} else { \
|
|
TORCH_CHECK(false, \
|
|
"Unsupported input type of kv cache: ", SRC_DTYPE); \
|
|
} \
|
|
} else if (KV_DTYPE == "fp8_e5m2") { \
|
|
if (SRC_DTYPE == at::ScalarType::Float) { \
|
|
FN(float, uint8_t, vllm::Fp8KVCacheDataType::kFp8E5M2); \
|
|
} else if (SRC_DTYPE == at::ScalarType::Half) { \
|
|
FN(uint16_t, uint8_t, vllm::Fp8KVCacheDataType::kFp8E5M2); \
|
|
} else if (SRC_DTYPE == at::ScalarType::BFloat16) { \
|
|
FN(__nv_bfloat16, uint8_t, vllm::Fp8KVCacheDataType::kFp8E5M2); \
|
|
} else { \
|
|
TORCH_CHECK(false, \
|
|
"Unsupported input type of kv cache: ", SRC_DTYPE); \
|
|
} \
|
|
} else { \
|
|
TORCH_CHECK(false, "Unsupported data type of kv cache: ", KV_DTYPE); \
|
|
} \
|
|
}
|
|
|
|
} // namespace fp8
|
|
#endif // not USE_ROCM
|
|
} // namespace vllm
|