mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-07 20:07:07 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
564 lines
20 KiB
Python
564 lines
20 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
import asyncio
|
|
from contextlib import ExitStack
|
|
from typing import Optional
|
|
from unittest.mock import MagicMock
|
|
|
|
import pytest
|
|
|
|
from vllm import SamplingParams
|
|
from vllm.assets.image import ImageAsset
|
|
from vllm.config import VllmConfig
|
|
from vllm.engine.arg_utils import AsyncEngineArgs
|
|
from vllm.inputs import PromptType
|
|
from vllm.outputs import RequestOutput
|
|
from vllm.platforms import current_platform
|
|
from vllm.sampling_params import RequestOutputKind
|
|
from vllm.utils import set_default_torch_num_threads
|
|
from vllm.v1.engine.async_llm import AsyncLLM
|
|
from vllm.v1.metrics.loggers import LoggingStatLogger
|
|
|
|
if not current_platform.is_cuda():
|
|
pytest.skip(reason="V1 currently only supported on CUDA.",
|
|
allow_module_level=True)
|
|
|
|
TEXT_ENGINE_ARGS = AsyncEngineArgs(
|
|
model="meta-llama/Llama-3.2-1B-Instruct",
|
|
enforce_eager=True,
|
|
)
|
|
|
|
VISION_ENGINE_ARGS = AsyncEngineArgs(model="Qwen/Qwen2-VL-2B-Instruct",
|
|
enforce_eager=True)
|
|
|
|
TEXT_PROMPT = "Hello my name is Robert and"
|
|
|
|
VISION_PROMPT_TEMPLATE = (
|
|
"<|im_start|>system\nYou are a helpful assistant.<|im_end|>"
|
|
"\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>"
|
|
"What is in the image?<|im_end|>\n"
|
|
"<|im_start|>assistant\n")
|
|
VISION_PROMPT = {
|
|
"prompt": VISION_PROMPT_TEMPLATE,
|
|
"multi_modal_data": {
|
|
"image": ImageAsset("stop_sign").pil_image
|
|
},
|
|
}
|
|
|
|
|
|
async def generate(
|
|
engine: AsyncLLM,
|
|
request_id: str,
|
|
prompt: PromptType,
|
|
output_kind: RequestOutputKind,
|
|
max_tokens: int,
|
|
n: int = 1,
|
|
prompt_logprobs: Optional[int] = None,
|
|
cancel_after: Optional[int] = None,
|
|
) -> tuple[int, str]:
|
|
# Ensure generate doesn't complete too fast for cancellation test.
|
|
await asyncio.sleep(0.2)
|
|
|
|
count = 0
|
|
sampling_params = SamplingParams(
|
|
max_tokens=max_tokens,
|
|
ignore_eos=True,
|
|
output_kind=output_kind,
|
|
temperature=0.5,
|
|
seed=33,
|
|
n=n,
|
|
prompt_logprobs=prompt_logprobs,
|
|
)
|
|
async for out in engine.generate(request_id=request_id,
|
|
prompt=prompt,
|
|
sampling_params=sampling_params):
|
|
|
|
num_tokens = sum(len(output.token_ids) for output in out.outputs)
|
|
if output_kind == RequestOutputKind.DELTA:
|
|
count += num_tokens
|
|
else:
|
|
count = num_tokens
|
|
|
|
if cancel_after is not None and count >= cancel_after:
|
|
return count, request_id
|
|
|
|
await asyncio.sleep(0.0)
|
|
|
|
return count, request_id
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"output_kind", [RequestOutputKind.DELTA, RequestOutputKind.FINAL_ONLY])
|
|
@pytest.mark.parametrize(
|
|
"engine_args,prompt",
|
|
[(TEXT_ENGINE_ARGS, TEXT_PROMPT), (VISION_ENGINE_ARGS, VISION_PROMPT)],
|
|
)
|
|
@pytest.mark.asyncio
|
|
async def test_load(
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
output_kind: RequestOutputKind,
|
|
engine_args: AsyncEngineArgs,
|
|
prompt: PromptType,
|
|
):
|
|
# TODO(rickyx): Remove monkeypatch once we have a better way to test V1
|
|
# so that in the future when we switch, we don't have to change all the
|
|
# tests.
|
|
with monkeypatch.context() as m, ExitStack() as after:
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
|
|
with set_default_torch_num_threads(1):
|
|
engine = AsyncLLM.from_engine_args(engine_args)
|
|
after.callback(engine.shutdown)
|
|
|
|
NUM_REQUESTS = 100
|
|
NUM_EXPECTED_TOKENS = 10
|
|
|
|
request_ids = [f"request-{i}" for i in range(NUM_REQUESTS)]
|
|
|
|
# Create concurrent requests.
|
|
tasks = []
|
|
for request_id in request_ids:
|
|
tasks.append(
|
|
asyncio.create_task(
|
|
generate(engine, request_id, prompt, output_kind,
|
|
NUM_EXPECTED_TOKENS)))
|
|
|
|
# Confirm that we got all the EXPECTED tokens from the requests.
|
|
done, pending = await asyncio.wait(tasks,
|
|
return_when=asyncio.FIRST_EXCEPTION)
|
|
for task in pending:
|
|
task.cancel()
|
|
for task in done:
|
|
num_generated_tokens, request_id = await task
|
|
assert num_generated_tokens == NUM_EXPECTED_TOKENS, (
|
|
f"{request_id} generated {num_generated_tokens} but "
|
|
f"expected {NUM_EXPECTED_TOKENS}")
|
|
|
|
assert not engine.output_processor.has_unfinished_requests()
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"output_kind", [RequestOutputKind.DELTA, RequestOutputKind.FINAL_ONLY])
|
|
@pytest.mark.parametrize(
|
|
"engine_args,prompt",
|
|
[(TEXT_ENGINE_ARGS, TEXT_PROMPT), (VISION_ENGINE_ARGS, VISION_PROMPT)],
|
|
)
|
|
@pytest.mark.asyncio
|
|
async def test_abort(
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
output_kind: RequestOutputKind,
|
|
engine_args: AsyncEngineArgs,
|
|
prompt: PromptType,
|
|
):
|
|
|
|
with monkeypatch.context() as m, ExitStack() as after:
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
|
|
with set_default_torch_num_threads(1):
|
|
engine = AsyncLLM.from_engine_args(engine_args)
|
|
after.callback(engine.shutdown)
|
|
|
|
NUM_REQUESTS = 100
|
|
NUM_EXPECTED_TOKENS = 100
|
|
NUM_EXPECTED_TOKENS_LONG = 50000
|
|
REQUEST_IDS_TO_ABORT = range(1, 100, 10)
|
|
PARALLEL_SAMPLE_REQ_IDS = range(1, 100, 15)
|
|
|
|
request_ids = [f"request-{i}" for i in range(NUM_REQUESTS)]
|
|
|
|
# Create concurrent requests.
|
|
tasks: list[asyncio.Task] = []
|
|
for idx, request_id in enumerate(request_ids):
|
|
max_tokens = (NUM_EXPECTED_TOKENS_LONG if
|
|
(idx
|
|
in REQUEST_IDS_TO_ABORT) else NUM_EXPECTED_TOKENS)
|
|
n = 3 if idx in PARALLEL_SAMPLE_REQ_IDS else 1
|
|
tasks.append(
|
|
asyncio.create_task(
|
|
generate(engine, request_id, prompt, output_kind,
|
|
max_tokens, n)))
|
|
|
|
# API server cancels requests when they disconnect.
|
|
for idx in REQUEST_IDS_TO_ABORT:
|
|
tasks[idx].cancel()
|
|
await asyncio.sleep(0.1)
|
|
|
|
# Confirm the other requests are okay.
|
|
for idx, task in enumerate(tasks):
|
|
# Confirm that it was actually canceled.
|
|
if idx in REQUEST_IDS_TO_ABORT:
|
|
with pytest.raises(asyncio.CancelledError):
|
|
await task
|
|
else:
|
|
# Otherwise, make sure the request was not impacted.
|
|
num_generated_tokens, request_id = await task
|
|
n = 3 if idx in PARALLEL_SAMPLE_REQ_IDS else 1
|
|
expected_tokens = NUM_EXPECTED_TOKENS * n
|
|
assert num_generated_tokens == expected_tokens, (
|
|
f"{request_id} generated {num_generated_tokens} but "
|
|
f"expected {expected_tokens}")
|
|
|
|
# Make sure all aborted requests were really aborted.
|
|
assert not engine.output_processor.has_unfinished_requests()
|
|
|
|
# Confirm we can do another generation.
|
|
request_id = f"request-{REQUEST_IDS_TO_ABORT[0]}"
|
|
task = asyncio.create_task(
|
|
generate(engine, request_id, prompt, output_kind,
|
|
NUM_EXPECTED_TOKENS))
|
|
num_generated_tokens, request_id = await task
|
|
assert num_generated_tokens == NUM_EXPECTED_TOKENS
|
|
assert not engine.output_processor.has_unfinished_requests()
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"output_kind", [RequestOutputKind.DELTA, RequestOutputKind.FINAL_ONLY])
|
|
@pytest.mark.asyncio
|
|
async def test_multi_abort(
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
output_kind: RequestOutputKind,
|
|
):
|
|
|
|
with monkeypatch.context() as m, ExitStack() as after:
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
|
|
with set_default_torch_num_threads(1):
|
|
engine = AsyncLLM.from_engine_args(TEXT_ENGINE_ARGS)
|
|
after.callback(engine.shutdown)
|
|
|
|
NUM_REQUESTS = 50
|
|
NUM_EXPECTED_TOKENS = 100
|
|
NUM_EXPECTED_TOKENS_LONG = 50000
|
|
REQUEST_IDS_TO_ABORT = [5, 10, 15, 20, 25]
|
|
PARALLEL_SAMPLE_REQ_IDS = [5, 15, 30, 35]
|
|
|
|
request_ids = [f"request-{i}" for i in range(NUM_REQUESTS)]
|
|
|
|
# Create concurrent requests.
|
|
tasks: list[asyncio.Task] = []
|
|
for idx, request_id in enumerate(request_ids):
|
|
max_tokens = (NUM_EXPECTED_TOKENS_LONG if
|
|
(idx
|
|
in REQUEST_IDS_TO_ABORT) else NUM_EXPECTED_TOKENS)
|
|
n = 3 if idx in PARALLEL_SAMPLE_REQ_IDS else 1
|
|
tasks.append(
|
|
asyncio.create_task(
|
|
generate(engine, request_id, TEXT_PROMPT, output_kind,
|
|
max_tokens, n)))
|
|
|
|
# Let requests start
|
|
await asyncio.sleep(0.5)
|
|
|
|
# Use multi-abort to abort multiple requests at once
|
|
abort_request_ids = [request_ids[i] for i in REQUEST_IDS_TO_ABORT]
|
|
await engine.abort(abort_request_ids)
|
|
|
|
# Wait for all tasks to complete
|
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
|
|
# Verify results
|
|
for idx, result in enumerate(results):
|
|
if idx in REQUEST_IDS_TO_ABORT:
|
|
# Aborted requests should return partial results
|
|
assert isinstance(
|
|
result, tuple
|
|
), f"Request {idx} should have completed with partial results"
|
|
num_generated_tokens, request_id = result
|
|
# Should have generated some tokens before abort
|
|
assert num_generated_tokens > 0, (
|
|
f"Aborted request "
|
|
f"{request_id} should have generated some tokens")
|
|
else:
|
|
# Non-aborted requests should complete normally
|
|
assert isinstance(
|
|
result,
|
|
tuple), f"Request {idx} should have completed successfully"
|
|
num_generated_tokens, request_id = result
|
|
n = 3 if idx in PARALLEL_SAMPLE_REQ_IDS else 1
|
|
expected_tokens = NUM_EXPECTED_TOKENS * n
|
|
assert num_generated_tokens == expected_tokens, (
|
|
f"{request_id} generated {num_generated_tokens} but "
|
|
f"expected {expected_tokens}")
|
|
|
|
# Make sure all aborted requests were cleaned up
|
|
assert not engine.output_processor.has_unfinished_requests()
|
|
|
|
|
|
@pytest.mark.parametrize("n", [1, 3])
|
|
@pytest.mark.parametrize(
|
|
"engine_args,prompt",
|
|
[(TEXT_ENGINE_ARGS, TEXT_PROMPT), (VISION_ENGINE_ARGS, VISION_PROMPT)],
|
|
)
|
|
@pytest.mark.asyncio
|
|
async def test_finished_flag(
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
n: int,
|
|
engine_args: AsyncEngineArgs,
|
|
prompt: PromptType,
|
|
):
|
|
|
|
with monkeypatch.context() as m, ExitStack() as after:
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
|
|
with set_default_torch_num_threads(1):
|
|
engine = AsyncLLM.from_engine_args(engine_args)
|
|
after.callback(engine.shutdown)
|
|
|
|
sampling_params = SamplingParams(
|
|
max_tokens=100,
|
|
output_kind=RequestOutputKind.DELTA,
|
|
temperature=1.0,
|
|
seed=33,
|
|
n=n,
|
|
)
|
|
outputs = [
|
|
out
|
|
async for out in engine.generate(request_id="request-33",
|
|
prompt=prompt,
|
|
sampling_params=sampling_params)
|
|
]
|
|
|
|
# Assert only the last output has the finished flag set
|
|
assert all(not out.finished for out in outputs[:-1])
|
|
assert outputs[-1].finished
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"engine_args,prompt",
|
|
[(TEXT_ENGINE_ARGS, TEXT_PROMPT), (VISION_ENGINE_ARGS, VISION_PROMPT)],
|
|
)
|
|
@pytest.mark.asyncio
|
|
async def test_mid_stream_cancellation(monkeypatch: pytest.MonkeyPatch,
|
|
engine_args: AsyncEngineArgs,
|
|
prompt: PromptType):
|
|
"""Test that requests can be cancelled mid-stream."""
|
|
with monkeypatch.context() as m, ExitStack() as after:
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
|
|
with set_default_torch_num_threads(1):
|
|
engine = AsyncLLM.from_engine_args(engine_args)
|
|
after.callback(engine.shutdown)
|
|
|
|
NUM_REQUESTS = 100
|
|
NUM_TOKENS = 1000
|
|
NUM_EXPECTED_TOKENS = 20
|
|
|
|
request_ids = [f"request-{i}" for i in range(NUM_REQUESTS)]
|
|
|
|
# Create concurrent requests that will be cancelled mid-stream
|
|
tasks = []
|
|
for request_id in request_ids:
|
|
tasks.append(
|
|
asyncio.create_task(
|
|
generate(
|
|
engine,
|
|
request_id,
|
|
prompt,
|
|
RequestOutputKind.DELTA,
|
|
NUM_TOKENS,
|
|
cancel_after=NUM_EXPECTED_TOKENS,
|
|
)))
|
|
|
|
# Wait for all tasks to complete
|
|
results = await asyncio.gather(*tasks)
|
|
|
|
# Verify all tasks were cancelled at the expected point
|
|
for num_generated_tokens, request_id in results:
|
|
assert num_generated_tokens == NUM_EXPECTED_TOKENS, (
|
|
f"{request_id} generated {num_generated_tokens} tokens but "
|
|
f"expected to cancel after {NUM_EXPECTED_TOKENS}")
|
|
|
|
# Make sure no requests are left hanging
|
|
assert not engine.output_processor.has_unfinished_requests()
|
|
|
|
# Confirm we can reuse the request id after the cancellations.
|
|
request_id = request_ids[0]
|
|
task = asyncio.create_task(
|
|
generate(engine, request_id, prompt, RequestOutputKind.DELTA,
|
|
NUM_EXPECTED_TOKENS))
|
|
num_generated_tokens, request_id = await task
|
|
assert num_generated_tokens == NUM_EXPECTED_TOKENS
|
|
assert not engine.output_processor.has_unfinished_requests()
|
|
|
|
|
|
class MockLoggingStatLogger(LoggingStatLogger):
|
|
|
|
def __init__(self, vllm_config: VllmConfig, engine_index: int = 0):
|
|
super().__init__(vllm_config, engine_index)
|
|
self.log = MagicMock()
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_customize_loggers(monkeypatch):
|
|
"""Test that we can customize the loggers.
|
|
If a customized logger is provided at the init, it should
|
|
be added to the default loggers.
|
|
"""
|
|
|
|
with monkeypatch.context() as m, ExitStack() as after:
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
|
|
with set_default_torch_num_threads(1):
|
|
engine = AsyncLLM.from_engine_args(
|
|
TEXT_ENGINE_ARGS,
|
|
stat_loggers=[MockLoggingStatLogger],
|
|
)
|
|
after.callback(engine.shutdown)
|
|
|
|
await engine.do_log_stats()
|
|
|
|
stat_loggers = engine.logger_manager.per_engine_logger_dict
|
|
assert len(stat_loggers) == 1
|
|
assert len(
|
|
stat_loggers[0]) == 2 # LoggingStatLogger + MockLoggingStatLogger
|
|
stat_loggers[0][0].log.assert_called_once()
|
|
|
|
|
|
@pytest.mark.asyncio(scope="module")
|
|
async def test_dp_rank_argument(monkeypatch: pytest.MonkeyPatch):
|
|
with monkeypatch.context() as m, ExitStack() as after:
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
|
|
with set_default_torch_num_threads(1):
|
|
engine = AsyncLLM.from_engine_args(TEXT_ENGINE_ARGS)
|
|
after.callback(engine.shutdown)
|
|
|
|
sampling_params = SamplingParams(max_tokens=100,
|
|
output_kind=RequestOutputKind.DELTA,
|
|
temperature=1.0,
|
|
seed=33)
|
|
|
|
# Test with valid DP rank.
|
|
async for _ in engine.generate(request_id="request-34",
|
|
prompt=TEXT_PROMPT,
|
|
sampling_params=sampling_params,
|
|
data_parallel_rank=0):
|
|
pass
|
|
|
|
# Test with out-of-range DP rank.
|
|
with pytest.raises(ValueError):
|
|
async for _ in engine.generate(request_id="request-35",
|
|
prompt=TEXT_PROMPT,
|
|
sampling_params=sampling_params,
|
|
data_parallel_rank=1):
|
|
pass
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_check_health(monkeypatch: pytest.MonkeyPatch):
|
|
"""Test that check_health returns normally for healthy engine
|
|
and raises EngineDeadError when the engine is dead.
|
|
"""
|
|
from unittest.mock import patch
|
|
|
|
from vllm.v1.engine.exceptions import EngineDeadError
|
|
|
|
with monkeypatch.context() as m, ExitStack() as after:
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
|
|
with set_default_torch_num_threads(1):
|
|
engine = AsyncLLM.from_engine_args(TEXT_ENGINE_ARGS)
|
|
after.callback(engine.shutdown)
|
|
|
|
# Test 1: Healthy engine should not raise any exception
|
|
await engine.check_health()
|
|
|
|
# Test 2: Mock the errored property to simulate a dead engine
|
|
with patch.object(type(engine),
|
|
'errored',
|
|
new_callable=lambda: property(lambda self: True)
|
|
), pytest.raises(EngineDeadError):
|
|
await engine.check_health()
|
|
|
|
# Test 3: Verify healthy engine still works after mock
|
|
await engine.check_health()
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"output_kind", [RequestOutputKind.DELTA, RequestOutputKind.FINAL_ONLY])
|
|
@pytest.mark.asyncio
|
|
async def test_abort_final_output(
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
output_kind: RequestOutputKind,
|
|
):
|
|
"""Test that abort() returns a final output with correct information."""
|
|
|
|
with monkeypatch.context() as m, ExitStack() as after:
|
|
m.setenv("VLLM_USE_V1", "1")
|
|
|
|
with set_default_torch_num_threads(1):
|
|
engine = AsyncLLM.from_engine_args(TEXT_ENGINE_ARGS)
|
|
after.callback(engine.shutdown)
|
|
|
|
request_id = "test-abort-final-output"
|
|
|
|
# Start a long-running request
|
|
sampling_params = SamplingParams(
|
|
max_tokens=3000, # Long enough to allow abort
|
|
ignore_eos=True,
|
|
output_kind=output_kind,
|
|
temperature=0.5,
|
|
seed=42,
|
|
)
|
|
|
|
outputs: list[RequestOutput] = []
|
|
generated = asyncio.create_task(
|
|
collect_outputs(engine, request_id, TEXT_PROMPT, sampling_params,
|
|
outputs))
|
|
|
|
# Let it generate some tokens
|
|
await asyncio.sleep(0.5)
|
|
|
|
# Abort the request
|
|
await engine.abort(request_id)
|
|
|
|
# Wait for generation to complete and return final output
|
|
final_output = await generated
|
|
|
|
# Verify we got a final output
|
|
assert final_output is not None
|
|
assert final_output.finished
|
|
assert len(final_output.outputs) == 1
|
|
|
|
assert final_output.outputs[0].finish_reason == "abort"
|
|
assert final_output.outputs[0].stop_reason is None
|
|
|
|
# Verify num_cached_tokens is set correctly
|
|
assert hasattr(final_output, 'num_cached_tokens')
|
|
assert final_output.num_cached_tokens >= 0
|
|
|
|
# If we got intermediate outputs, verify they are consistent
|
|
if output_kind == RequestOutputKind.DELTA:
|
|
# For DELTA, sum all intermediate tokens should <= final tokens
|
|
token_count = sum(
|
|
len(output.outputs[0].token_ids) for output in outputs)
|
|
assert token_count > 0
|
|
# This would ordinarily be 0, but could end up > 0 if the
|
|
# final abort is coalesced with another chunk in the output queue.
|
|
assert len(final_output.outputs[0].token_ids) >= 0
|
|
else:
|
|
# For FINAL_ONLY, we should only get the final output
|
|
assert len(outputs) == 0
|
|
assert len(final_output.outputs[0].token_ids) > 0
|
|
|
|
assert not engine.output_processor.has_unfinished_requests()
|
|
|
|
|
|
async def collect_outputs(
|
|
engine: AsyncLLM,
|
|
request_id: str,
|
|
prompt: PromptType,
|
|
sampling_params: SamplingParams,
|
|
outputs_list: list[RequestOutput],
|
|
) -> Optional[RequestOutput]:
|
|
"""Helper to collect outputs and return the final one."""
|
|
final_output: Optional[RequestOutput] = None
|
|
async for output in engine.generate(request_id=request_id,
|
|
prompt=prompt,
|
|
sampling_params=sampling_params):
|
|
if not output.finished:
|
|
outputs_list.append(output)
|
|
final_output = output
|
|
return final_output
|