mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-07 23:07:12 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
531 lines
17 KiB
Python
531 lines
17 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
import asyncio
|
|
import gc
|
|
import json
|
|
import os
|
|
import pathlib
|
|
import subprocess
|
|
import sys
|
|
from typing import Any
|
|
|
|
import pytest
|
|
import torch
|
|
|
|
import vllm.model_executor.model_loader.tensorizer
|
|
from vllm import LLM, SamplingParams
|
|
from vllm.engine.arg_utils import EngineArgs
|
|
# yapf: disable
|
|
from vllm.model_executor.model_loader.tensorizer import (TensorizerConfig,
|
|
TensorSerializer,
|
|
is_vllm_tensorized,
|
|
open_stream,
|
|
tensorize_vllm_model)
|
|
from vllm.model_executor.model_loader.tensorizer_loader import (
|
|
BLACKLISTED_TENSORIZER_ARGS)
|
|
# yapf: enable
|
|
from vllm.utils import PlaceholderModule
|
|
|
|
from ..utils import VLLM_PATH, RemoteOpenAIServer
|
|
from .conftest import DummyExecutor, assert_from_collective_rpc
|
|
|
|
try:
|
|
import tensorizer
|
|
from tensorizer import EncryptionParams
|
|
except ImportError:
|
|
tensorizer = PlaceholderModule("tensorizer") # type: ignore[assignment]
|
|
EncryptionParams = tensorizer.placeholder_attr("EncryptionParams")
|
|
|
|
|
|
class TensorizerCaughtError(Exception):
|
|
pass
|
|
|
|
|
|
EXAMPLES_PATH = VLLM_PATH / "examples"
|
|
|
|
pytest_plugins = "pytest_asyncio",
|
|
|
|
prompts = [
|
|
"Hello, my name is",
|
|
"The president of the United States is",
|
|
"The capital of France is",
|
|
"The future of AI is",
|
|
]
|
|
# Create a sampling params object.
|
|
sampling_params = SamplingParams(temperature=0.8, top_p=0.95, seed=0)
|
|
|
|
|
|
def patch_init_and_catch_error(self, obj, method_name,
|
|
expected_error: type[Exception]):
|
|
original = getattr(obj, method_name, None)
|
|
if original is None:
|
|
raise ValueError("Method '{}' not found.".format(method_name))
|
|
|
|
def wrapper(*args, **kwargs):
|
|
try:
|
|
return original(*args, **kwargs)
|
|
except expected_error as err:
|
|
raise TensorizerCaughtError from err
|
|
|
|
setattr(obj, method_name, wrapper)
|
|
|
|
self.load_model()
|
|
|
|
|
|
def assert_specific_tensorizer_error_is_raised(
|
|
executor,
|
|
obj: Any,
|
|
method_name: str,
|
|
expected_error: type[Exception],
|
|
):
|
|
with pytest.raises(TensorizerCaughtError):
|
|
executor.collective_rpc(patch_init_and_catch_error,
|
|
args=(
|
|
obj,
|
|
method_name,
|
|
expected_error,
|
|
))
|
|
|
|
|
|
def is_curl_installed():
|
|
try:
|
|
subprocess.check_call(['curl', '--version'])
|
|
return True
|
|
except (subprocess.CalledProcessError, FileNotFoundError):
|
|
return False
|
|
|
|
|
|
def write_keyfile(keyfile_path: str):
|
|
encryption_params = EncryptionParams.random()
|
|
pathlib.Path(keyfile_path).parent.mkdir(parents=True, exist_ok=True)
|
|
with open(keyfile_path, 'wb') as f:
|
|
f.write(encryption_params.key)
|
|
|
|
|
|
@pytest.mark.skipif(not is_curl_installed(), reason="cURL is not installed")
|
|
def test_deserialized_encrypted_vllm_model_has_same_outputs(
|
|
model_ref, vllm_runner, tmp_path, model_path):
|
|
args = EngineArgs(model=model_ref)
|
|
with vllm_runner(model_ref) as vllm_model:
|
|
key_path = tmp_path / model_ref / "model.key"
|
|
write_keyfile(key_path)
|
|
|
|
outputs = vllm_model.generate(prompts, sampling_params)
|
|
|
|
config_for_serializing = TensorizerConfig(tensorizer_uri=str(model_path),
|
|
encryption_keyfile=str(key_path))
|
|
|
|
tensorize_vllm_model(args, config_for_serializing)
|
|
|
|
config_for_deserializing = TensorizerConfig(
|
|
tensorizer_uri=str(model_path), encryption_keyfile=str(key_path))
|
|
|
|
with vllm_runner(model_ref,
|
|
load_format="tensorizer",
|
|
model_loader_extra_config=config_for_deserializing
|
|
) as loaded_vllm_model: # noqa: E501
|
|
|
|
deserialized_outputs = loaded_vllm_model.generate(
|
|
prompts, sampling_params)
|
|
# noqa: E501
|
|
|
|
assert outputs == deserialized_outputs
|
|
|
|
|
|
def test_deserialized_hf_model_has_same_outputs(hf_runner, vllm_runner,
|
|
tmp_path, model_ref,
|
|
model_path):
|
|
with hf_runner(model_ref) as hf_model:
|
|
max_tokens = 50
|
|
outputs = hf_model.generate_greedy(prompts, max_tokens=max_tokens)
|
|
with open_stream(model_path, "wb+") as stream:
|
|
serializer = TensorSerializer(stream)
|
|
serializer.write_module(hf_model.model)
|
|
|
|
with vllm_runner(model_ref,
|
|
load_format="tensorizer",
|
|
model_loader_extra_config=TensorizerConfig(
|
|
tensorizer_uri=str(model_path),
|
|
num_readers=1,
|
|
)) as loaded_hf_model:
|
|
deserialized_outputs = loaded_hf_model.generate_greedy(
|
|
prompts, max_tokens=max_tokens)
|
|
|
|
assert outputs == deserialized_outputs
|
|
|
|
|
|
def test_load_without_tensorizer_load_format(vllm_runner, capfd, model_ref):
|
|
model = None
|
|
try:
|
|
model = vllm_runner(
|
|
model_ref,
|
|
model_loader_extra_config=TensorizerConfig(tensorizer_uri="test"))
|
|
pytest.fail("Expected RuntimeError for extra config keys")
|
|
except RuntimeError:
|
|
out, err = capfd.readouterr()
|
|
combined_output = out + err
|
|
assert ("ValueError: Unexpected extra config keys for load "
|
|
"format auto") in combined_output
|
|
finally:
|
|
del model
|
|
gc.collect()
|
|
torch.cuda.empty_cache()
|
|
|
|
|
|
def test_raise_value_error_on_invalid_load_format(vllm_runner, capfd,
|
|
model_ref):
|
|
model = None
|
|
try:
|
|
model = vllm_runner(
|
|
model_ref,
|
|
load_format="safetensors",
|
|
model_loader_extra_config=TensorizerConfig(tensorizer_uri="test"))
|
|
pytest.fail("Expected RuntimeError for extra config keys")
|
|
except RuntimeError:
|
|
out, err = capfd.readouterr()
|
|
|
|
combined_output = out + err
|
|
assert ("ValueError: Unexpected extra config keys "
|
|
"for load format safetensors") in combined_output
|
|
finally:
|
|
del model
|
|
gc.collect()
|
|
torch.cuda.empty_cache()
|
|
|
|
|
|
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires 2 GPUs")
|
|
def test_tensorizer_with_tp_path_without_template(vllm_runner, capfd):
|
|
try:
|
|
model_ref = "EleutherAI/pythia-1.4b"
|
|
tensorized_path = f"s3://tensorized/{model_ref}/fp16/model.tensors"
|
|
|
|
vllm_runner(
|
|
model_ref,
|
|
load_format="tensorizer",
|
|
model_loader_extra_config=TensorizerConfig(
|
|
tensorizer_uri=tensorized_path,
|
|
num_readers=1,
|
|
s3_endpoint="object.ord1.coreweave.com",
|
|
),
|
|
tensor_parallel_size=2,
|
|
disable_custom_all_reduce=True,
|
|
)
|
|
except RuntimeError:
|
|
out, err = capfd.readouterr()
|
|
combined_output = out + err
|
|
assert ("ValueError: For a sharded model, tensorizer_uri "
|
|
"should include a string format template like '%04d' "
|
|
"to be formatted with the rank "
|
|
"of the shard") in combined_output
|
|
|
|
|
|
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Requires 2 GPUs")
|
|
def test_deserialized_encrypted_vllm_model_with_tp_has_same_outputs(
|
|
vllm_runner, tmp_path):
|
|
model_ref = "EleutherAI/pythia-1.4b"
|
|
# record outputs from un-sharded un-tensorized model
|
|
with vllm_runner(
|
|
model_ref,
|
|
disable_custom_all_reduce=True,
|
|
enforce_eager=True,
|
|
) as base_model:
|
|
outputs = base_model.generate(prompts, sampling_params)
|
|
|
|
# load model with two shards and serialize with encryption
|
|
model_path = str(tmp_path / model_ref / "model-%02d.tensors")
|
|
key_path = tmp_path / (model_ref + ".key")
|
|
|
|
tensorizer_config = TensorizerConfig(
|
|
tensorizer_uri=model_path,
|
|
encryption_keyfile=str(key_path),
|
|
)
|
|
|
|
tensorize_vllm_model(
|
|
engine_args=EngineArgs(
|
|
model=model_ref,
|
|
tensor_parallel_size=2,
|
|
disable_custom_all_reduce=True,
|
|
enforce_eager=True,
|
|
),
|
|
tensorizer_config=tensorizer_config,
|
|
)
|
|
assert os.path.isfile(model_path % 0), "Serialization subprocess failed"
|
|
assert os.path.isfile(model_path % 1), "Serialization subprocess failed"
|
|
|
|
with vllm_runner(
|
|
model_ref,
|
|
tensor_parallel_size=2,
|
|
load_format="tensorizer",
|
|
disable_custom_all_reduce=True,
|
|
enforce_eager=True,
|
|
model_loader_extra_config=tensorizer_config) as loaded_vllm_model:
|
|
deserialized_outputs = loaded_vllm_model.generate(
|
|
prompts, sampling_params)
|
|
|
|
assert outputs == deserialized_outputs
|
|
|
|
|
|
@pytest.mark.flaky(reruns=3)
|
|
def test_vllm_tensorized_model_has_same_outputs(model_ref, vllm_runner,
|
|
tmp_path, model_path):
|
|
gc.collect()
|
|
torch.cuda.empty_cache()
|
|
config = TensorizerConfig(tensorizer_uri=str(model_path))
|
|
args = EngineArgs(model=model_ref)
|
|
|
|
with vllm_runner(model_ref) as vllm_model:
|
|
outputs = vllm_model.generate(prompts, sampling_params)
|
|
|
|
tensorize_vllm_model(args, config)
|
|
assert is_vllm_tensorized(config)
|
|
|
|
with vllm_runner(model_ref,
|
|
load_format="tensorizer",
|
|
model_loader_extra_config=config) as loaded_vllm_model:
|
|
deserialized_outputs = loaded_vllm_model.generate(
|
|
prompts, sampling_params)
|
|
# noqa: E501
|
|
|
|
assert outputs == deserialized_outputs
|
|
|
|
|
|
def test_load_with_just_model_tensors(just_serialize_model_tensors, model_ref):
|
|
# For backwards compatibility, ensure Tensorizer can be still be loaded
|
|
# for inference by passing the model reference name, not a local/S3 dir,
|
|
# and the location of the model tensors
|
|
|
|
model_dir = just_serialize_model_tensors
|
|
|
|
extra_config = {"tensorizer_uri": f"{model_dir}/model.tensors"}
|
|
|
|
## Start OpenAI API server
|
|
args = [
|
|
"--load-format",
|
|
"tensorizer",
|
|
"--model-loader-extra-config",
|
|
json.dumps(extra_config),
|
|
]
|
|
|
|
with RemoteOpenAIServer(model_ref, args):
|
|
# This test only concerns itself with being able to load the model
|
|
# and successfully initialize the server
|
|
pass
|
|
|
|
|
|
def test_assert_serialization_kwargs_passed_to_tensor_serializer(tmp_path):
|
|
|
|
serialization_params = {
|
|
"limit_cpu_concurrency": 2,
|
|
}
|
|
model_ref = "facebook/opt-125m"
|
|
model_path = tmp_path / (model_ref + ".tensors")
|
|
config = TensorizerConfig(tensorizer_uri=str(model_path),
|
|
serialization_kwargs=serialization_params)
|
|
llm = LLM(model=model_ref, )
|
|
|
|
def serialization_test(self, *args, **kwargs):
|
|
# This is performed in the ephemeral worker process, so monkey-patching
|
|
# will actually work, and cleanup is guaranteed so don't
|
|
# need to reset things
|
|
|
|
original_dict = serialization_params
|
|
to_compare = {}
|
|
|
|
original = tensorizer.serialization.TensorSerializer.__init__
|
|
|
|
def tensorizer_serializer_wrapper(self, *args, **kwargs):
|
|
nonlocal to_compare
|
|
to_compare = kwargs.copy()
|
|
return original(self, *args, **kwargs)
|
|
|
|
tensorizer.serialization.TensorSerializer.__init__ = (
|
|
tensorizer_serializer_wrapper)
|
|
|
|
tensorizer_config = TensorizerConfig(**kwargs["tensorizer_config"])
|
|
self.save_tensorized_model(tensorizer_config=tensorizer_config, )
|
|
return to_compare | original_dict == to_compare
|
|
|
|
kwargs = {"tensorizer_config": config.to_serializable()}
|
|
|
|
assert assert_from_collective_rpc(llm, serialization_test, kwargs)
|
|
|
|
|
|
def test_assert_deserialization_kwargs_passed_to_tensor_deserializer(
|
|
tmp_path, capfd):
|
|
|
|
deserialization_kwargs = {
|
|
"num_readers": "bar", # illegal value
|
|
}
|
|
|
|
serialization_params = {
|
|
"limit_cpu_concurrency": 2,
|
|
}
|
|
|
|
model_ref = "facebook/opt-125m"
|
|
model_path = tmp_path / (model_ref + ".tensors")
|
|
config = TensorizerConfig(tensorizer_uri=str(model_path),
|
|
serialization_kwargs=serialization_params)
|
|
|
|
args = EngineArgs(model=model_ref)
|
|
tensorize_vllm_model(args, config)
|
|
|
|
loader_tc = TensorizerConfig(
|
|
tensorizer_uri=str(model_path),
|
|
deserialization_kwargs=deserialization_kwargs,
|
|
)
|
|
|
|
engine_args = EngineArgs(
|
|
model="facebook/opt-125m",
|
|
load_format="tensorizer",
|
|
model_loader_extra_config=loader_tc.to_serializable(),
|
|
)
|
|
|
|
vllm_config = engine_args.create_engine_config()
|
|
executor = DummyExecutor(vllm_config)
|
|
|
|
assert_specific_tensorizer_error_is_raised(
|
|
executor,
|
|
tensorizer.serialization.TensorDeserializer,
|
|
"__init__",
|
|
TypeError,
|
|
)
|
|
|
|
|
|
def test_assert_stream_kwargs_passed_to_tensor_deserializer(tmp_path, capfd):
|
|
|
|
deserialization_kwargs = {
|
|
"num_readers": 1,
|
|
}
|
|
|
|
serialization_params = {
|
|
"limit_cpu_concurrency": 2,
|
|
}
|
|
|
|
model_ref = "facebook/opt-125m"
|
|
model_path = tmp_path / (model_ref + ".tensors")
|
|
config = TensorizerConfig(tensorizer_uri=str(model_path),
|
|
serialization_kwargs=serialization_params)
|
|
|
|
args = EngineArgs(model=model_ref)
|
|
tensorize_vllm_model(args, config)
|
|
|
|
stream_kwargs = {"mode": "foo"}
|
|
|
|
loader_tc = TensorizerConfig(
|
|
tensorizer_uri=str(model_path),
|
|
deserialization_kwargs=deserialization_kwargs,
|
|
stream_kwargs=stream_kwargs,
|
|
)
|
|
|
|
engine_args = EngineArgs(
|
|
model="facebook/opt-125m",
|
|
load_format="tensorizer",
|
|
model_loader_extra_config=loader_tc.to_serializable(),
|
|
)
|
|
|
|
vllm_config = engine_args.create_engine_config()
|
|
executor = DummyExecutor(vllm_config)
|
|
|
|
assert_specific_tensorizer_error_is_raised(
|
|
executor,
|
|
vllm.model_executor.model_loader.tensorizer,
|
|
"open_stream",
|
|
ValueError,
|
|
)
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_serialize_and_serve_entrypoints(tmp_path):
|
|
model_ref = "facebook/opt-125m"
|
|
|
|
suffix = "test"
|
|
try:
|
|
result = subprocess.run([
|
|
sys.executable,
|
|
f"{VLLM_PATH}/examples/others/tensorize_vllm_model.py", "--model",
|
|
model_ref, "serialize", "--serialized-directory",
|
|
str(tmp_path), "--suffix", suffix, "--serialization-kwargs",
|
|
'{"limit_cpu_concurrency": 4}'
|
|
],
|
|
check=True,
|
|
capture_output=True,
|
|
text=True)
|
|
except subprocess.CalledProcessError as e:
|
|
print("Tensorizing failed.")
|
|
print("STDOUT:\n", e.stdout)
|
|
print("STDERR:\n", e.stderr)
|
|
raise
|
|
|
|
assert "Successfully serialized" in result.stdout
|
|
|
|
# Next, try to serve with vllm serve
|
|
model_uri = tmp_path / "vllm" / model_ref / suffix / "model.tensors"
|
|
|
|
model_loader_extra_config = {
|
|
"tensorizer_uri": str(model_uri),
|
|
"stream_kwargs": {
|
|
"force_http": False,
|
|
},
|
|
"deserialization_kwargs": {
|
|
"verify_hash": True,
|
|
"num_readers": 8,
|
|
}
|
|
}
|
|
|
|
cmd = [
|
|
"-m", "vllm.entrypoints.cli.main", "serve", "--host", "localhost",
|
|
"--load-format", "tensorizer", model_ref,
|
|
"--model-loader-extra-config",
|
|
json.dumps(model_loader_extra_config, indent=2)
|
|
]
|
|
|
|
proc = await asyncio.create_subprocess_exec(
|
|
sys.executable,
|
|
*cmd,
|
|
stdout=asyncio.subprocess.PIPE,
|
|
stderr=asyncio.subprocess.STDOUT,
|
|
)
|
|
|
|
assert proc.stdout is not None
|
|
fut = proc.stdout.readuntil(b"Application startup complete.")
|
|
|
|
try:
|
|
await asyncio.wait_for(fut, 180)
|
|
except asyncio.TimeoutError:
|
|
pytest.fail("Server did not start successfully")
|
|
finally:
|
|
proc.terminate()
|
|
await proc.communicate()
|
|
|
|
|
|
@pytest.mark.parametrize("illegal_value", BLACKLISTED_TENSORIZER_ARGS)
|
|
def test_blacklisted_parameter_for_loading(tmp_path, vllm_runner, capfd,
|
|
illegal_value):
|
|
|
|
serialization_params = {
|
|
"limit_cpu_concurrency": 2,
|
|
}
|
|
|
|
model_ref = "facebook/opt-125m"
|
|
model_path = tmp_path / (model_ref + ".tensors")
|
|
config = TensorizerConfig(tensorizer_uri=str(model_path),
|
|
serialization_kwargs=serialization_params)
|
|
|
|
args = EngineArgs(model=model_ref)
|
|
tensorize_vllm_model(args, config)
|
|
|
|
loader_tc = {"tensorizer_uri": str(model_path), illegal_value: "foo"}
|
|
|
|
try:
|
|
vllm_runner(
|
|
model_ref,
|
|
load_format="tensorizer",
|
|
model_loader_extra_config=loader_tc,
|
|
)
|
|
except RuntimeError:
|
|
out, err = capfd.readouterr()
|
|
combined_output = out + err
|
|
assert (f"ValueError: {illegal_value} is not an allowed "
|
|
f"Tensorizer argument.") in combined_output
|