mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-10 13:27:04 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
451 lines
16 KiB
Python
451 lines
16 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
import asyncio
|
|
import subprocess
|
|
import sys
|
|
import tempfile
|
|
import time
|
|
from http import HTTPStatus
|
|
|
|
import openai
|
|
import pytest
|
|
import pytest_asyncio
|
|
import requests
|
|
from prometheus_client.parser import text_string_to_metric_families
|
|
from transformers import AutoTokenizer
|
|
|
|
from vllm import version
|
|
|
|
from ...utils import RemoteOpenAIServer
|
|
|
|
MODEL_NAME = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
|
PREV_MINOR_VERSION = version._prev_minor_version()
|
|
|
|
|
|
@pytest.fixture(scope="module", params=[True])
|
|
def use_v1(request):
|
|
# Module-scoped variant of run_with_both_engines
|
|
#
|
|
# Use this fixture to run a test with both v0 and v1, and
|
|
# also to conditionalize the test logic e.g.
|
|
#
|
|
# def test_metrics_exist(use_v1, server, client):
|
|
# ...
|
|
# expected = EXPECTED_V1_METRICS if use_v1 else EXPECTED_METRICS
|
|
# for metric in expected:
|
|
# assert metric in response.text
|
|
#
|
|
# @skip_v1 wouldn't work here because this is a module-level
|
|
# fixture - per-function decorators would have no effect
|
|
yield request.param
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def default_server_args():
|
|
return [
|
|
# use half precision for speed and memory savings in CI environment
|
|
"--dtype",
|
|
"bfloat16",
|
|
"--max-model-len",
|
|
"1024",
|
|
"--enforce-eager",
|
|
"--max-num-seqs",
|
|
"128",
|
|
]
|
|
|
|
|
|
@pytest.fixture(scope="module",
|
|
params=[
|
|
"",
|
|
"--enable-chunked-prefill",
|
|
"--disable-frontend-multiprocessing",
|
|
f"--show-hidden-metrics-for-version={PREV_MINOR_VERSION}",
|
|
])
|
|
def server(use_v1, default_server_args, request):
|
|
if request.param:
|
|
default_server_args.append(request.param)
|
|
env_dict = dict(VLLM_USE_V1='1' if use_v1 else '0')
|
|
with RemoteOpenAIServer(MODEL_NAME, default_server_args,
|
|
env_dict=env_dict) as remote_server:
|
|
yield remote_server
|
|
|
|
|
|
@pytest_asyncio.fixture
|
|
async def client(server):
|
|
async with server.get_async_client() as cl:
|
|
yield cl
|
|
|
|
|
|
_PROMPT = "Hello my name is Robert and I love magic"
|
|
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
|
_TOKENIZED_PROMPT = tokenizer(_PROMPT)["input_ids"]
|
|
|
|
_NUM_REQUESTS = 10
|
|
_NUM_PROMPT_TOKENS_PER_REQUEST = len(_TOKENIZED_PROMPT)
|
|
_NUM_GENERATION_TOKENS_PER_REQUEST = 10
|
|
|
|
# {metric_family: [(suffix, expected_value)]}
|
|
EXPECTED_VALUES = {
|
|
"vllm:time_to_first_token_seconds": [("_count", _NUM_REQUESTS)],
|
|
"vllm:time_per_output_token_seconds":
|
|
[("_count", _NUM_REQUESTS * (_NUM_GENERATION_TOKENS_PER_REQUEST - 1))],
|
|
"vllm:e2e_request_latency_seconds": [("_count", _NUM_REQUESTS)],
|
|
"vllm:request_queue_time_seconds": [("_count", _NUM_REQUESTS)],
|
|
"vllm:request_inference_time_seconds": [("_count", _NUM_REQUESTS)],
|
|
"vllm:request_prefill_time_seconds": [("_count", _NUM_REQUESTS)],
|
|
"vllm:request_decode_time_seconds": [("_count", _NUM_REQUESTS)],
|
|
"vllm:request_prompt_tokens":
|
|
[("_sum", _NUM_REQUESTS * _NUM_PROMPT_TOKENS_PER_REQUEST),
|
|
("_count", _NUM_REQUESTS)],
|
|
"vllm:request_generation_tokens":
|
|
[("_sum", _NUM_REQUESTS * _NUM_GENERATION_TOKENS_PER_REQUEST),
|
|
("_count", _NUM_REQUESTS)],
|
|
"vllm:request_params_n": [("_count", _NUM_REQUESTS)],
|
|
"vllm:request_params_max_tokens": [
|
|
("_sum", _NUM_REQUESTS * _NUM_GENERATION_TOKENS_PER_REQUEST),
|
|
("_count", _NUM_REQUESTS)
|
|
],
|
|
"vllm:iteration_tokens_total":
|
|
[("_sum", _NUM_REQUESTS *
|
|
(_NUM_PROMPT_TOKENS_PER_REQUEST + _NUM_GENERATION_TOKENS_PER_REQUEST)),
|
|
("_count", _NUM_REQUESTS * _NUM_GENERATION_TOKENS_PER_REQUEST)],
|
|
"vllm:prompt_tokens": [("_total",
|
|
_NUM_REQUESTS * _NUM_PROMPT_TOKENS_PER_REQUEST)],
|
|
"vllm:generation_tokens": [
|
|
("_total", _NUM_REQUESTS * _NUM_PROMPT_TOKENS_PER_REQUEST)
|
|
],
|
|
"vllm:request_success": [("_total", _NUM_REQUESTS)],
|
|
}
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_metrics_counts(server: RemoteOpenAIServer,
|
|
client: openai.AsyncClient, use_v1: bool):
|
|
for _ in range(_NUM_REQUESTS):
|
|
# sending a request triggers the metrics to be logged.
|
|
await client.completions.create(
|
|
model=MODEL_NAME,
|
|
prompt=_TOKENIZED_PROMPT,
|
|
max_tokens=_NUM_GENERATION_TOKENS_PER_REQUEST)
|
|
|
|
response = requests.get(server.url_for("metrics"))
|
|
print(response.text)
|
|
assert response.status_code == HTTPStatus.OK
|
|
|
|
# Loop over all expected metric_families
|
|
for metric_family, suffix_values_list in EXPECTED_VALUES.items():
|
|
if ((use_v1 and metric_family not in EXPECTED_METRICS_V1)
|
|
or (not server.show_hidden_metrics
|
|
and metric_family in HIDDEN_DEPRECATED_METRICS)):
|
|
continue
|
|
|
|
found_metric = False
|
|
|
|
# Check to see if the metric_family is found in the prom endpoint.
|
|
for family in text_string_to_metric_families(response.text):
|
|
if family.name == metric_family:
|
|
found_metric = True
|
|
|
|
# Check that each suffix is found in the prom endpoint.
|
|
for suffix, expected_value in suffix_values_list:
|
|
metric_name_w_suffix = f"{metric_family}{suffix}"
|
|
found_suffix = False
|
|
|
|
for sample in family.samples:
|
|
if sample.name == metric_name_w_suffix:
|
|
found_suffix = True
|
|
|
|
# For each suffix, value sure the value matches
|
|
# what we expect.
|
|
assert sample.value == expected_value, (
|
|
f"{metric_name_w_suffix} expected value of "
|
|
f"{expected_value} did not match found value "
|
|
f"{sample.value}")
|
|
break
|
|
assert found_suffix, (
|
|
f"Did not find {metric_name_w_suffix} in prom endpoint"
|
|
)
|
|
break
|
|
|
|
assert found_metric, (f"Did not find {metric_family} in prom endpoint")
|
|
|
|
|
|
EXPECTED_METRICS = [
|
|
"vllm:num_requests_running",
|
|
"vllm:num_requests_waiting",
|
|
"vllm:gpu_cache_usage_perc",
|
|
"vllm:time_to_first_token_seconds_sum",
|
|
"vllm:time_to_first_token_seconds_bucket",
|
|
"vllm:time_to_first_token_seconds_count",
|
|
"vllm:time_per_output_token_seconds_sum",
|
|
"vllm:time_per_output_token_seconds_bucket",
|
|
"vllm:time_per_output_token_seconds_count",
|
|
"vllm:e2e_request_latency_seconds_sum",
|
|
"vllm:e2e_request_latency_seconds_bucket",
|
|
"vllm:e2e_request_latency_seconds_count",
|
|
"vllm:request_queue_time_seconds_sum",
|
|
"vllm:request_queue_time_seconds_bucket",
|
|
"vllm:request_queue_time_seconds_count",
|
|
"vllm:request_inference_time_seconds_sum",
|
|
"vllm:request_inference_time_seconds_bucket",
|
|
"vllm:request_inference_time_seconds_count",
|
|
"vllm:request_prefill_time_seconds_sum",
|
|
"vllm:request_prefill_time_seconds_bucket",
|
|
"vllm:request_prefill_time_seconds_count",
|
|
"vllm:request_decode_time_seconds_sum",
|
|
"vllm:request_decode_time_seconds_bucket",
|
|
"vllm:request_decode_time_seconds_count",
|
|
"vllm:request_prompt_tokens_sum",
|
|
"vllm:request_prompt_tokens_bucket",
|
|
"vllm:request_prompt_tokens_count",
|
|
"vllm:request_generation_tokens_sum",
|
|
"vllm:request_generation_tokens_bucket",
|
|
"vllm:request_generation_tokens_count",
|
|
"vllm:request_params_n_sum",
|
|
"vllm:request_params_n_bucket",
|
|
"vllm:request_params_n_count",
|
|
"vllm:request_params_max_tokens_sum",
|
|
"vllm:request_params_max_tokens_bucket",
|
|
"vllm:request_params_max_tokens_count",
|
|
"vllm:iteration_tokens_total",
|
|
"vllm:num_preemptions_total",
|
|
"vllm:prompt_tokens_total",
|
|
"vllm:generation_tokens_total",
|
|
"vllm:request_success_total",
|
|
"vllm:cache_config_info",
|
|
# labels in cache_config_info
|
|
"block_size",
|
|
"cache_dtype",
|
|
"cpu_offload_gb",
|
|
"enable_prefix_caching",
|
|
"gpu_memory_utilization",
|
|
"num_cpu_blocks",
|
|
"num_gpu_blocks",
|
|
"num_gpu_blocks_override",
|
|
"sliding_window",
|
|
"swap_space_bytes",
|
|
]
|
|
|
|
EXPECTED_METRICS_V1 = [
|
|
"vllm:num_requests_running",
|
|
"vllm:num_requests_waiting",
|
|
"vllm:gpu_cache_usage_perc",
|
|
"vllm:gpu_prefix_cache_queries",
|
|
"vllm:gpu_prefix_cache_hits",
|
|
"vllm:kv_cache_usage_perc",
|
|
"vllm:prefix_cache_queries",
|
|
"vllm:prefix_cache_hits",
|
|
"vllm:num_preemptions_total",
|
|
"vllm:prompt_tokens_total",
|
|
"vllm:generation_tokens_total",
|
|
"vllm:iteration_tokens_total",
|
|
"vllm:cache_config_info",
|
|
"vllm:request_success_total",
|
|
"vllm:request_prompt_tokens_sum",
|
|
"vllm:request_prompt_tokens_bucket",
|
|
"vllm:request_prompt_tokens_count",
|
|
"vllm:request_generation_tokens_sum",
|
|
"vllm:request_generation_tokens_bucket",
|
|
"vllm:request_generation_tokens_count",
|
|
"vllm:request_params_n_sum",
|
|
"vllm:request_params_n_bucket",
|
|
"vllm:request_params_n_count",
|
|
"vllm:request_params_max_tokens_sum",
|
|
"vllm:request_params_max_tokens_bucket",
|
|
"vllm:request_params_max_tokens_count",
|
|
"vllm:time_per_output_token_seconds_sum",
|
|
"vllm:time_per_output_token_seconds_bucket",
|
|
"vllm:time_per_output_token_seconds_count",
|
|
"vllm:time_to_first_token_seconds_sum",
|
|
"vllm:time_to_first_token_seconds_bucket",
|
|
"vllm:time_to_first_token_seconds_count",
|
|
"vllm:inter_token_latency_seconds_sum",
|
|
"vllm:inter_token_latency_seconds_bucket",
|
|
"vllm:inter_token_latency_seconds_count",
|
|
"vllm:e2e_request_latency_seconds_sum",
|
|
"vllm:e2e_request_latency_seconds_bucket",
|
|
"vllm:e2e_request_latency_seconds_count",
|
|
"vllm:request_queue_time_seconds_sum",
|
|
"vllm:request_queue_time_seconds_bucket",
|
|
"vllm:request_queue_time_seconds_count",
|
|
"vllm:request_inference_time_seconds_sum",
|
|
"vllm:request_inference_time_seconds_bucket",
|
|
"vllm:request_inference_time_seconds_count",
|
|
"vllm:request_prefill_time_seconds_sum",
|
|
"vllm:request_prefill_time_seconds_bucket",
|
|
"vllm:request_prefill_time_seconds_count",
|
|
"vllm:request_decode_time_seconds_sum",
|
|
"vllm:request_decode_time_seconds_bucket",
|
|
"vllm:request_decode_time_seconds_count",
|
|
]
|
|
|
|
HIDDEN_DEPRECATED_METRICS: list[str] = [
|
|
"vllm:gpu_cache_usage_perc",
|
|
"vllm:gpu_prefix_cache_queries",
|
|
"vllm:gpu_prefix_cache_hits",
|
|
"vllm:time_per_output_token_seconds_sum",
|
|
"vllm:time_per_output_token_seconds_bucket",
|
|
"vllm:time_per_output_token_seconds_count",
|
|
]
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_metrics_exist(server: RemoteOpenAIServer,
|
|
client: openai.AsyncClient, use_v1: bool):
|
|
# sending a request triggers the metrics to be logged.
|
|
await client.completions.create(model=MODEL_NAME,
|
|
prompt="Hello, my name is",
|
|
max_tokens=5,
|
|
temperature=0.0)
|
|
|
|
response = requests.get(server.url_for("metrics"))
|
|
assert response.status_code == HTTPStatus.OK
|
|
|
|
for metric in (EXPECTED_METRICS_V1 if use_v1 else EXPECTED_METRICS):
|
|
if (metric in HIDDEN_DEPRECATED_METRICS
|
|
and not server.show_hidden_metrics):
|
|
continue
|
|
assert metric in response.text
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_abort_metrics_reset(server: RemoteOpenAIServer,
|
|
client: openai.AsyncClient, use_v1: bool):
|
|
|
|
running_requests, waiting_requests, kv_cache_usage = (
|
|
_get_running_metrics_from_api(server, use_v1))
|
|
|
|
# Expect no running requests or kvcache usage
|
|
assert running_requests == 0
|
|
assert waiting_requests == 0
|
|
assert kv_cache_usage == 0.0
|
|
|
|
# Start some long-running requests that we can abort
|
|
tasks = []
|
|
for _ in range(3):
|
|
task = asyncio.create_task(
|
|
client.completions.create(
|
|
model=MODEL_NAME,
|
|
prompt=_TOKENIZED_PROMPT,
|
|
max_tokens=100, # Long generation to give time to abort
|
|
temperature=0.0))
|
|
tasks.append(task)
|
|
|
|
# Wait a bit for requests to start processing
|
|
await asyncio.sleep(0.5)
|
|
|
|
# Check that we have running requests
|
|
running_requests, waiting_requests, kv_cache_usage = (
|
|
_get_running_metrics_from_api(server, use_v1))
|
|
|
|
# Expect running requests and kvcache usage
|
|
assert running_requests > 0
|
|
assert kv_cache_usage > 0
|
|
|
|
# Cancel all tasks to abort the requests
|
|
for task in tasks:
|
|
task.cancel()
|
|
|
|
# Wait for cancellations to be processed
|
|
await asyncio.sleep(1.0)
|
|
|
|
# Check that metrics have reset to zero
|
|
response = requests.get(server.url_for("metrics"))
|
|
assert response.status_code == HTTPStatus.OK
|
|
|
|
# Verify running and waiting requests counts and KV cache usage are zero
|
|
running_requests_after, waiting_requests_after, kv_cache_usage_after = (
|
|
_get_running_metrics_from_api(server, use_v1))
|
|
|
|
assert running_requests_after == 0,\
|
|
(f"Expected 0 running requests after abort, got "
|
|
f"{running_requests_after}")
|
|
assert waiting_requests_after == 0,\
|
|
(f"Expected 0 waiting requests after abort, got "
|
|
f"{waiting_requests_after}")
|
|
assert kv_cache_usage_after == 0,\
|
|
(f"Expected 0% KV cache usage after abort, got "
|
|
f"{kv_cache_usage_after}")
|
|
|
|
|
|
def _get_running_metrics_from_api(server: RemoteOpenAIServer, use_v1: bool):
|
|
"""Return (running_count, waiting_count, kv_cache_usage)"""
|
|
|
|
response = requests.get(server.url_for("metrics"))
|
|
assert response.status_code == HTTPStatus.OK
|
|
|
|
# Verify running and waiting requests counts and KV cache usage are zero
|
|
running_requests, waiting_requests, kv_cache_usage = None, None, None
|
|
|
|
kv_cache_usage_metric = ("vllm:kv_cache_usage_perc"
|
|
if use_v1 else "vllm:gpu_cache_usage_perc")
|
|
|
|
for family in text_string_to_metric_families(response.text):
|
|
if family.name == "vllm:num_requests_running":
|
|
for sample in family.samples:
|
|
if sample.name == "vllm:num_requests_running":
|
|
running_requests = sample.value
|
|
break
|
|
elif family.name == "vllm:num_requests_waiting":
|
|
for sample in family.samples:
|
|
if sample.name == "vllm:num_requests_waiting":
|
|
waiting_requests = sample.value
|
|
break
|
|
elif family.name == kv_cache_usage_metric:
|
|
for sample in family.samples:
|
|
if sample.name == kv_cache_usage_metric:
|
|
kv_cache_usage = sample.value
|
|
break
|
|
|
|
assert running_requests is not None
|
|
assert waiting_requests is not None
|
|
assert kv_cache_usage is not None
|
|
|
|
return running_requests, waiting_requests, kv_cache_usage
|
|
|
|
|
|
def test_metrics_exist_run_batch(use_v1: bool):
|
|
input_batch = """{"custom_id": "request-0", "method": "POST", "url": "/v1/embeddings", "body": {"model": "intfloat/multilingual-e5-small", "input": "You are a helpful assistant."}}""" # noqa: E501
|
|
|
|
base_url = "0.0.0.0"
|
|
port = "8001"
|
|
server_url = f"http://{base_url}:{port}"
|
|
|
|
with tempfile.NamedTemporaryFile(
|
|
"w") as input_file, tempfile.NamedTemporaryFile(
|
|
"r") as output_file:
|
|
input_file.write(input_batch)
|
|
input_file.flush()
|
|
proc = subprocess.Popen([
|
|
sys.executable,
|
|
"-m",
|
|
"vllm.entrypoints.openai.run_batch",
|
|
"-i",
|
|
input_file.name,
|
|
"-o",
|
|
output_file.name,
|
|
"--model",
|
|
"intfloat/multilingual-e5-small",
|
|
"--enable-metrics",
|
|
"--url",
|
|
base_url,
|
|
"--port",
|
|
port,
|
|
],
|
|
env={"VLLM_USE_V1": "1"})
|
|
|
|
def is_server_up(url):
|
|
try:
|
|
response = requests.get(url)
|
|
return response.status_code == 200
|
|
except requests.ConnectionError:
|
|
return False
|
|
|
|
while not is_server_up(server_url):
|
|
time.sleep(1)
|
|
|
|
response = requests.get(server_url + "/metrics")
|
|
assert response.status_code == HTTPStatus.OK
|
|
|
|
proc.wait()
|