mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-07 22:57:08 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
285 lines
9.3 KiB
Python
285 lines
9.3 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
"""Test model set-up and weight loading for quark-quantized models.
|
|
|
|
Run `pytest tests/quantization/test_quark.py`.
|
|
|
|
See also `tests/kernels/moe/test_mxfp4_moe.py`.
|
|
"""
|
|
|
|
import importlib.metadata
|
|
import os
|
|
from dataclasses import dataclass
|
|
from importlib.util import find_spec
|
|
|
|
import huggingface_hub
|
|
import lm_eval
|
|
import pytest
|
|
import torch
|
|
from packaging import version
|
|
|
|
from vllm.model_executor.layers.quantization.quark.quark import ( # noqa: E501
|
|
QuarkLinearMethod, QuarkW8A8Fp8, QuarkW8A8Int8)
|
|
from vllm.platforms import current_platform
|
|
|
|
from .reference_mxfp4 import dq_mxfp4_torch, qdq_mxfp4_torch
|
|
|
|
QUARK_MXFP4_AVAILABLE = find_spec("quark") is not None and version.parse(
|
|
importlib.metadata.version("amd-quark")) >= version.parse('0.8.99')
|
|
|
|
if QUARK_MXFP4_AVAILABLE:
|
|
from quark.torch.export.nn.modules.realquantizer import (
|
|
StaticScaledRealQuantizer)
|
|
from quark.torch.kernel import mx as mx_kernel
|
|
from quark.torch.quantization.config.config import FP4PerGroupSpec
|
|
|
|
try:
|
|
huggingface_hub.list_repo_refs(
|
|
"amd/Llama-3.3-70B-Instruct-WMXFP4-AMXFP4-KVFP8-Scale-UINT8-SQ")
|
|
HF_HUB_AMD_ORG_ACCESS = True
|
|
except huggingface_hub.errors.RepositoryNotFoundError:
|
|
HF_HUB_AMD_ORG_ACCESS = False
|
|
|
|
|
|
@pytest.fixture(scope="function", autouse=True)
|
|
def enable_pickle(monkeypatch):
|
|
"""`LLM.apply_model` requires pickling a function."""
|
|
monkeypatch.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1")
|
|
|
|
|
|
@pytest.mark.parametrize('kv_cache_dtype', ['auto', 'fp8'])
|
|
@pytest.mark.parametrize('tp', [1])
|
|
def test_quark_fp8_w_per_tensor_a_per_tensor(vllm_runner, kv_cache_dtype, tp):
|
|
model_path = "amd/Llama-3.1-8B-Instruct-FP8-KV-Quark-test"
|
|
with vllm_runner(model_path,
|
|
kv_cache_dtype=kv_cache_dtype,
|
|
tensor_parallel_size=tp) as llm:
|
|
|
|
def check_model(model):
|
|
layer = model.model.layers[0]
|
|
|
|
qkv_proj = layer.self_attn.qkv_proj
|
|
|
|
assert isinstance(qkv_proj.quant_method, QuarkLinearMethod)
|
|
assert isinstance(qkv_proj.scheme, QuarkW8A8Fp8)
|
|
|
|
if isinstance(qkv_proj.scheme, QuarkW8A8Fp8):
|
|
assert len(qkv_proj.input_scale.shape) == 0
|
|
assert qkv_proj.weight.dtype is current_platform.fp8_dtype()
|
|
assert len(qkv_proj.weight_scale.shape) == 0
|
|
|
|
llm.apply_model(check_model)
|
|
|
|
output = llm.generate_greedy("Hello my name is", max_tokens=20)
|
|
assert output
|
|
|
|
|
|
@pytest.mark.parametrize('tp', [1])
|
|
def test_quark_fp8_w_per_channel_a_per_token(vllm_runner, tp):
|
|
model_path = "amd/Qwen2.5-1.5B-Instruct-ptpc-Quark-ts"
|
|
with vllm_runner(model_path, tensor_parallel_size=tp) as llm:
|
|
|
|
def check_model(model):
|
|
layer = model.model.layers[0]
|
|
|
|
qkv_proj = layer.self_attn.qkv_proj
|
|
|
|
assert isinstance(qkv_proj.quant_method, QuarkLinearMethod)
|
|
assert isinstance(qkv_proj.scheme, QuarkW8A8Fp8)
|
|
|
|
if isinstance(qkv_proj.scheme, QuarkW8A8Fp8):
|
|
assert qkv_proj.weight.dtype is current_platform.fp8_dtype()
|
|
assert qkv_proj.weight_scale.shape[0] == qkv_proj.weight.shape[
|
|
1]
|
|
assert qkv_proj.weight_scale.shape[1] == 1
|
|
|
|
llm.apply_model(check_model)
|
|
|
|
output = llm.generate_greedy("Hello my name is", max_tokens=20)
|
|
assert output
|
|
|
|
|
|
@pytest.mark.parametrize('tp', [1])
|
|
def test_quark_int8_w_per_tensor_a_per_tensor(vllm_runner, tp):
|
|
model_path = "amd/Llama-3.1-8B-Instruct-w-int8-a-int8-sym-test"
|
|
with vllm_runner(model_path, tensor_parallel_size=tp) as llm:
|
|
|
|
def check_model(model):
|
|
layer = model.model.layers[0]
|
|
|
|
qkv_proj = layer.self_attn.qkv_proj
|
|
|
|
assert isinstance(qkv_proj.quant_method, QuarkLinearMethod)
|
|
assert isinstance(qkv_proj.scheme, QuarkW8A8Int8)
|
|
|
|
llm.apply_model(check_model)
|
|
|
|
output = llm.generate_greedy("Hello my name is", max_tokens=20)
|
|
assert output
|
|
|
|
|
|
def test_quark_fp8_parity(vllm_runner):
|
|
quark_model_id = "amd-quark/llama-tiny-fp8-quark-quant-method"
|
|
fp8_model_id = "amd-quark/llama-tiny-fp8-quant-method"
|
|
|
|
llm_kwargs = {
|
|
"tensor_parallel_size": 1,
|
|
"enforce_eager": True,
|
|
"gpu_memory_utilization": 0.1
|
|
}
|
|
with (vllm_runner(quark_model_id, **llm_kwargs) as
|
|
quark_handle, vllm_runner(fp8_model_id, **llm_kwargs) as fp8_handle):
|
|
|
|
def get_state_dict(model):
|
|
return {k: v.cpu() for k, v in model.state_dict().items()}
|
|
|
|
quark_state_dict, = quark_handle.apply_model(get_state_dict)
|
|
fp8_state_dict, = fp8_handle.apply_model(get_state_dict)
|
|
|
|
assert fp8_state_dict.keys() == quark_state_dict.keys()
|
|
|
|
for key in fp8_state_dict:
|
|
assert torch.equal(fp8_state_dict[key], quark_state_dict[key])
|
|
|
|
|
|
@dataclass
|
|
class ModelCase:
|
|
model_id: str
|
|
tp: int
|
|
|
|
|
|
@dataclass
|
|
class GSM8KAccuracyTestConfig:
|
|
model_name: str
|
|
excepted_value: float
|
|
|
|
def get_model_args(self) -> str:
|
|
return (
|
|
f"pretrained={self.model_name},"
|
|
"dtype=auto,add_bos_token=True,tensor_parallel_size=8,gpu_memory_utilization=0.7,max_model_len=38768"
|
|
)
|
|
|
|
|
|
ACCURACY_CONFIGS = [
|
|
# Private model.
|
|
GSM8KAccuracyTestConfig(
|
|
model_name="amd/DeepSeek-R1-WMXFP4-AMXFP4-Scale-UINT8-MoE-Quant",
|
|
excepted_value=0.96),
|
|
]
|
|
|
|
|
|
@pytest.mark.parametrize("config", ACCURACY_CONFIGS)
|
|
@pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE,
|
|
reason="amd-quark>=0.9 is not available")
|
|
@pytest.mark.skipif(
|
|
not HF_HUB_AMD_ORG_ACCESS,
|
|
reason="Read access to huggingface.co/amd is required for this test.")
|
|
def test_mxfp4_gsm8k_correctness(config: GSM8KAccuracyTestConfig):
|
|
if torch.cuda.device_count() < 8:
|
|
pytest.skip(
|
|
f"This test requires >=8 gpus, got only {torch.cuda.device_count()}"
|
|
)
|
|
|
|
task = "gsm8k"
|
|
rtol = 0.03
|
|
|
|
os.environ["VLLM_USE_TRITON_FLASH_ATTN"] = "0"
|
|
|
|
results = lm_eval.simple_evaluate(
|
|
model="vllm",
|
|
model_args=config.get_model_args(),
|
|
tasks=task,
|
|
batch_size=64,
|
|
num_fewshot=8,
|
|
)
|
|
|
|
EXPECTED_VALUE = config.excepted_value
|
|
measured_value = results["results"][task]["exact_match,strict-match"]
|
|
assert (measured_value - rtol < EXPECTED_VALUE
|
|
and measured_value + rtol > EXPECTED_VALUE
|
|
), f"Expected: {EXPECTED_VALUE} | Measured: {measured_value}"
|
|
|
|
del os.environ["VLLM_USE_TRITON_FLASH_ATTN"]
|
|
|
|
|
|
@pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE,
|
|
reason="amd-quark>=0.9 is not available")
|
|
@pytest.mark.parametrize("float_dtype", [torch.bfloat16, torch.float16])
|
|
@pytest.mark.parametrize("scalings",
|
|
[[2.3, 0.03, 7.3, 0.1, 0.004, 17.3, 1e4, 1e-4]])
|
|
def test_mxfp4_fused_qdq_match_quark(float_dtype: torch.dtype,
|
|
scalings: list[int]):
|
|
torch.manual_seed(0)
|
|
|
|
hidden_size = 64 * 32
|
|
inp = (torch.rand(1, hidden_size, dtype=float_dtype, device="cuda") -
|
|
0.5) * 2
|
|
for i in range(hidden_size // 32):
|
|
inp[:, i * 32:(i + 1) *
|
|
32] = inp[:, i * 32:(i + 1) * 32] * scalings[i % len(scalings)]
|
|
|
|
inp_kernel = inp.clone()
|
|
inp_kernel_clone = inp_kernel.clone()
|
|
|
|
res_hip = mx_kernel.qdq_mxfp4_hip(inp_kernel_clone, "even")
|
|
res_torch = qdq_mxfp4_torch(inp_kernel, "even")
|
|
|
|
for i in range(hidden_size // 32):
|
|
assert torch.all(torch.isfinite(res_hip[:, i * 32:(i + 1) * 32]))
|
|
assert torch.all(torch.isfinite(res_torch[:, i * 32:(i + 1) * 32]))
|
|
|
|
torch.testing.assert_close(res_hip[:, i * 32:(i + 1) * 32],
|
|
res_torch[:, i * 32:(i + 1) * 32])
|
|
|
|
|
|
@pytest.mark.skipif(not QUARK_MXFP4_AVAILABLE,
|
|
reason="amd-quark>=0.9 is not available")
|
|
@pytest.mark.parametrize("float_dtype", [torch.bfloat16, torch.float16])
|
|
@pytest.mark.parametrize("scalings",
|
|
[[2.3, 0.03, 7.3, 0.1, 0.004, 17.3, 1e4, 1e-4]])
|
|
def test_mxfp4_dequant_kernel_match_quark(float_dtype: torch.dtype,
|
|
scalings: list[int]):
|
|
qspec = FP4PerGroupSpec(
|
|
ch_axis=-1,
|
|
group_size=32,
|
|
scale_format="e8m0",
|
|
scale_calculation_mode="even",
|
|
is_dynamic=False,
|
|
).to_quantization_spec()
|
|
|
|
weight_quantizer = StaticScaledRealQuantizer(
|
|
qspec=qspec,
|
|
quantizer=None,
|
|
reorder=False,
|
|
real_quantized=True,
|
|
float_dtype=float_dtype,
|
|
device="cuda",
|
|
)
|
|
|
|
observer = qspec.observer_cls(qspec, device="cuda")
|
|
|
|
hidden_size = 512
|
|
shape = (11008, hidden_size)
|
|
|
|
w = (torch.rand(shape, device="cuda", dtype=float_dtype) - 0.5) * 2
|
|
|
|
# Make it so that different groups have different scales.
|
|
for i in range(hidden_size // 32):
|
|
w[:, i * 32:(i + 1) *
|
|
32] = w[:, i * 32:(i + 1) * 32] * scalings[i % len(scalings)]
|
|
|
|
observer(w)
|
|
scale, _ = observer._calculate_qparams()
|
|
weight_quantizer.scale = scale
|
|
|
|
w_mxfp4 = weight_quantizer.to_real_quantize_params(w).to("cuda")
|
|
weight_quantizer.maybe_convert_and_transpose_scale()
|
|
|
|
scale = weight_quantizer.scale
|
|
|
|
out_hip = mx_kernel.dq_mxfp4_hip(w_mxfp4, scale, float_dtype)
|
|
|
|
out_torch = dq_mxfp4_torch(w_mxfp4, scale, float_dtype)
|
|
|
|
assert torch.equal(out_hip, out_torch)
|