mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-08 08:57:02 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
1149 lines
48 KiB
Python
1149 lines
48 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||
"""Common tests for testing .generate() functionality for single / multiple
|
||
image, embedding, and video support for different VLMs in vLLM.
|
||
"""
|
||
import math
|
||
import os
|
||
from collections import defaultdict
|
||
from pathlib import PosixPath
|
||
|
||
import pytest
|
||
from transformers import (AutoModel, AutoModelForImageTextToText,
|
||
AutoModelForTextToWaveform)
|
||
|
||
from vllm.platforms import current_platform
|
||
from vllm.utils import identity
|
||
|
||
from ....conftest import (IMAGE_ASSETS, AudioTestAssets, HfRunner,
|
||
ImageTestAssets, VideoTestAssets, VllmRunner)
|
||
from ....utils import (create_new_process_for_each_test, large_gpu_mark,
|
||
multi_gpu_marks)
|
||
from ...utils import check_outputs_equal
|
||
from .vlm_utils import custom_inputs, model_utils, runners
|
||
from .vlm_utils.case_filtering import get_parametrized_options
|
||
from .vlm_utils.types import (CustomTestOptions, ExpandableVLMTestArgs,
|
||
VLMTestInfo, VLMTestType)
|
||
|
||
# This hack is needed for phi3v & paligemma models
|
||
# ROCm Triton FA can run into shared memory issues with these models,
|
||
# use other backends in the meantime
|
||
# FIXME (mattwong, gshtrasb, hongxiayan)
|
||
if current_platform.is_rocm():
|
||
os.environ["VLLM_USE_TRITON_FLASH_ATTN"] = "0"
|
||
|
||
# yapf: disable
|
||
COMMON_BROADCAST_SETTINGS = {
|
||
"test_type": VLMTestType.IMAGE,
|
||
"dtype": "half",
|
||
"max_tokens": 5,
|
||
"tensor_parallel_size": 2,
|
||
"hf_model_kwargs": {"device_map": "auto"},
|
||
"image_size_factors": [(.25, 0.5, 1.0)],
|
||
"distributed_executor_backend": (
|
||
"ray",
|
||
"mp",
|
||
)
|
||
}
|
||
|
||
### Test configuration for specific models
|
||
# NOTE: The convention of the test settings below is to lead each test key
|
||
# with the name of the model arch used in the test, using underscores in place
|
||
# of hyphens; this makes it more convenient to filter tests for a specific kind
|
||
# of model. For example....
|
||
#
|
||
# To run all test types for a specific key:
|
||
# use the k flag to substring match with a leading square bracket; if the
|
||
# model arch happens to be a substring of another one, you can add a
|
||
# trailing hyphen. E.g.,
|
||
# - pytest $TEST_FILE -k "[llava-"
|
||
# prevents matching on "[llava_next-" & will match just the enabled cases
|
||
# for llava, i.e., single image, image embedding, and custom input tests.
|
||
#
|
||
# To run a test for a Test Info for just one of multiple models:
|
||
# use the k flag to substring match the model name, e.g.,
|
||
# - pytest $TEST_FILE -k OpenGVLab/InternVL2-1B
|
||
# prevents matching on nGVLab/InternVL2-2B.
|
||
#
|
||
# You can also combine substrings to match more granularly.
|
||
# ex 1:
|
||
# pytest $TEST_FILE -k "test_single_image and OpenGVLab/InternVL2-1B"
|
||
# will run only test_single_image* for OpenGVLab/InternVL2-1B; this would
|
||
# match both wrappers for single image tests, since it also matches
|
||
# test_single_image_heavy (which forks if we have a distributed backend)
|
||
# ex 2:
|
||
# pytest $TEST_FILE -k "[llava- or [intern_vl-"
|
||
# will run all of the tests for only llava & internvl.
|
||
#
|
||
# NOTE you can add --collect-only to any of the above commands to see
|
||
# which cases would be selected and deselected by pytest. In general,
|
||
# this is a good idea for checking your command first, since tests are slow.
|
||
|
||
VLM_TEST_SETTINGS = {
|
||
#### Core tests to always run in the CI
|
||
"llava": VLMTestInfo(
|
||
models=["llava-hf/llava-1.5-7b-hf"],
|
||
test_type=(
|
||
VLMTestType.EMBEDDING,
|
||
VLMTestType.IMAGE,
|
||
VLMTestType.CUSTOM_INPUTS
|
||
),
|
||
prompt_formatter=lambda img_prompt: f"USER: {img_prompt}\nASSISTANT:",
|
||
convert_assets_to_embeddings=model_utils.get_llava_embeddings,
|
||
max_model_len=4096,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
vllm_output_post_proc=model_utils.llava_image_vllm_to_hf_output,
|
||
custom_test_opts=[CustomTestOptions(
|
||
inputs=custom_inputs.multi_image_multi_aspect_ratio_inputs(
|
||
formatter=lambda img_prompt: f"USER: {img_prompt}\nASSISTANT:"
|
||
),
|
||
limit_mm_per_prompt={"image": 4},
|
||
)],
|
||
# TODO: Revert to "auto" when CPU backend can use torch > 2.6
|
||
dtype="bfloat16" if current_platform.is_cpu() else "auto",
|
||
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
||
),
|
||
"paligemma": VLMTestInfo(
|
||
models=["google/paligemma-3b-mix-224"],
|
||
test_type=VLMTestType.IMAGE,
|
||
prompt_formatter=identity,
|
||
img_idx_to_prompt = lambda idx: "",
|
||
# Paligemma uses its own sample prompts because the default one fails
|
||
single_image_prompts=IMAGE_ASSETS.prompts({
|
||
"stop_sign": "caption es",
|
||
"cherry_blossom": "What is in the picture?",
|
||
}),
|
||
auto_cls=AutoModelForImageTextToText,
|
||
vllm_output_post_proc=model_utils.paligemma_vllm_to_hf_output,
|
||
dtype="bfloat16",
|
||
marks=[pytest.mark.skip(reason="vLLM does not support PrefixLM attention mask")], # noqa: E501
|
||
),
|
||
"qwen2_5_vl": VLMTestInfo(
|
||
models=["Qwen/Qwen2.5-VL-3B-Instruct"],
|
||
test_type=(
|
||
VLMTestType.IMAGE,
|
||
VLMTestType.MULTI_IMAGE,
|
||
VLMTestType.VIDEO
|
||
),
|
||
prompt_formatter=lambda img_prompt: f"<|im_start|>User\n{img_prompt}<|im_end|>\n<|im_start|>assistant\n", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: "<|vision_start|><|image_pad|><|vision_end|>", # noqa: E501
|
||
video_idx_to_prompt=lambda idx: "<|vision_start|><|video_pad|><|vision_end|>", # noqa: E501
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
vllm_output_post_proc=model_utils.qwen2_vllm_to_hf_output,
|
||
image_size_factors=[(), (0.25,), (0.25, 0.25, 0.25), (0.25, 0.2, 0.15)],
|
||
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
||
),
|
||
"qwen2_5_omni": VLMTestInfo(
|
||
models=["Qwen/Qwen2.5-Omni-3B"],
|
||
test_type=(
|
||
VLMTestType.IMAGE,
|
||
VLMTestType.MULTI_IMAGE,
|
||
VLMTestType.VIDEO
|
||
),
|
||
prompt_formatter=lambda img_prompt: f"<|im_start|>User\n{img_prompt}<|im_end|>\n<|im_start|>assistant\n", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: "<|vision_bos|><|IMAGE|><|vision_eos|>", # noqa: E501
|
||
video_idx_to_prompt=lambda idx: "<|vision_bos|><|VIDEO|><|vision_eos|>", # noqa: E501
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
num_logprobs= 6 if current_platform.is_cpu() else 5,
|
||
auto_cls=AutoModelForTextToWaveform,
|
||
vllm_output_post_proc=model_utils.qwen2_vllm_to_hf_output,
|
||
patch_hf_runner=model_utils.qwen2_5_omni_patch_hf_runner,
|
||
image_size_factors=[(), (0.25,), (0.25, 0.25, 0.25), (0.25, 0.2, 0.15)],
|
||
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
||
),
|
||
"ultravox": VLMTestInfo(
|
||
models = ["fixie-ai/ultravox-v0_5-llama-3_2-1b"],
|
||
test_type=VLMTestType.AUDIO,
|
||
prompt_formatter=lambda audio_prompt: f"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{audio_prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", # noqa: E501
|
||
audio_idx_to_prompt=lambda idx: "<|audio|>",
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
auto_cls=AutoModel,
|
||
hf_output_post_proc=model_utils.ultravox_trunc_hf_output,
|
||
marks=[pytest.mark.core_model, pytest.mark.cpu_model],
|
||
),
|
||
#### Transformers fallback to test
|
||
## To reduce test burden, we only test batching arbitrary image size
|
||
# Dynamic image length and number of patches
|
||
"llava-onevision-transformers": VLMTestInfo(
|
||
models=["llava-hf/llava-onevision-qwen2-0.5b-ov-hf"],
|
||
test_type=VLMTestType.IMAGE,
|
||
prompt_formatter=lambda vid_prompt: f"<|im_start|>user\n{vid_prompt}<|im_end|>\n<|im_start|>assistant\n", # noqa: E501
|
||
max_model_len=16384,
|
||
hf_model_kwargs=model_utils.llava_onevision_hf_model_kwargs("llava-hf/llava-onevision-qwen2-0.5b-ov-hf"), # noqa: E501
|
||
auto_cls=AutoModelForImageTextToText,
|
||
vllm_output_post_proc=model_utils.llava_onevision_vllm_to_hf_output,
|
||
image_size_factors=[(0.25, 0.5, 1.0)],
|
||
vllm_runner_kwargs={
|
||
"model_impl": "transformers",
|
||
"default_torch_num_threads": 1,
|
||
},
|
||
# FIXME: Investigate why the test hangs
|
||
# when processing the 3rd prompt in vLLM
|
||
marks=[pytest.mark.core_model, pytest.mark.skip(reason="Test hangs")],
|
||
),
|
||
"idefics3-transformers": VLMTestInfo(
|
||
models=["HuggingFaceTB/SmolVLM-256M-Instruct"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=lambda img_prompt:f"<|begin_of_text|>User:{img_prompt}<end_of_utterance>\nAssistant:", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: ")\n",
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
get_stop_token_ids=lambda tok: [tok.eos_id, tok.eot_id],
|
||
hf_output_post_proc=model_utils.minicpmv_trunc_hf_output,
|
||
patch_hf_runner=model_utils.minicpmv_25_patch_hf_runner,
|
||
# FIXME: https://huggingface.co/openbmb/MiniCPM-V-2_6/discussions/55
|
||
marks=[pytest.mark.skip("HF import fails")],
|
||
),
|
||
"minicpmo_26": VLMTestInfo(
|
||
models=["openbmb/MiniCPM-o-2_6"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=lambda img_prompt: f"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{img_prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: "()\n",
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
get_stop_token_ids=lambda tok: tok.convert_tokens_to_ids(['<|im_end|>', '<|endoftext|>']), # noqa: E501
|
||
hf_output_post_proc=model_utils.minicpmv_trunc_hf_output,
|
||
patch_hf_runner=model_utils.minicpmo_26_patch_hf_runner,
|
||
# FIXME: https://huggingface.co/openbmb/MiniCPM-o-2_6/discussions/49
|
||
marks=[pytest.mark.skip("HF import fails")],
|
||
),
|
||
"minicpmv_26": VLMTestInfo(
|
||
models=["openbmb/MiniCPM-V-2_6"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=lambda img_prompt: f"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{img_prompt}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: "()\n",
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
get_stop_token_ids=lambda tok: tok.convert_tokens_to_ids(['<|im_end|>', '<|endoftext|>']), # noqa: E501
|
||
hf_output_post_proc=model_utils.minicpmv_trunc_hf_output,
|
||
patch_hf_runner=model_utils.minicpmv_26_patch_hf_runner,
|
||
),
|
||
"minimax_vl_01": VLMTestInfo(
|
||
models=["MiniMaxAI/MiniMax-VL-01"],
|
||
prompt_formatter=lambda img_prompt: f"<beginning_of_sentence>user: {img_prompt} assistant:<end_of_sentence>", # noqa: E501
|
||
img_idx_to_prompt=lambda _: "<image>",
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
max_model_len=8192,
|
||
max_num_seqs=4,
|
||
dtype="bfloat16",
|
||
hf_output_post_proc=model_utils.minimax_vl_01_hf_output,
|
||
patch_hf_runner=model_utils.minimax_vl_01_patch_hf_runner,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
marks=[large_gpu_mark(min_gb=80)],
|
||
),
|
||
"molmo": VLMTestInfo(
|
||
models=["allenai/Molmo-7B-D-0924"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=identity,
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
patch_hf_runner=model_utils.molmo_patch_hf_runner,
|
||
),
|
||
"ovis1_6-gemma2": VLMTestInfo(
|
||
models=["AIDC-AI/Ovis1.6-Gemma2-9B"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=lambda img_prompt: f"<bos><start_of_turn>user\n{img_prompt}<end_of_turn>\n<start_of_turn>model\n", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: "<image>\n", # noqa: E501
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
dtype="half",
|
||
# use sdpa mode for hf runner since ovis2 didn't work with flash_attn
|
||
hf_model_kwargs={"llm_attn_implementation": "sdpa"},
|
||
patch_hf_runner=model_utils.ovis_patch_hf_runner,
|
||
marks=[large_gpu_mark(min_gb=32)],
|
||
),
|
||
"ovis2": VLMTestInfo(
|
||
models=["AIDC-AI/Ovis2-1B"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=lambda img_prompt: f"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n{img_prompt}<|im_end|>\n<|im_start|>assistant\n", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: "<image>\n", # noqa: E501
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
dtype="half",
|
||
# use sdpa mode for hf runner since ovis2 didn't work with flash_attn
|
||
hf_model_kwargs={"llm_attn_implementation": "sdpa"},
|
||
patch_hf_runner=model_utils.ovis_patch_hf_runner,
|
||
),
|
||
"ovis2_5": VLMTestInfo(
|
||
models=["AIDC-AI/Ovis2.5-2B"],
|
||
test_type=(
|
||
VLMTestType.IMAGE,
|
||
VLMTestType.MULTI_IMAGE,
|
||
VLMTestType.VIDEO
|
||
),
|
||
prompt_formatter=lambda img_prompt: f"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n{img_prompt}<|im_end|>\n<|im_start|>assistant\n", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: "<image>\n", # noqa: E501
|
||
video_idx_to_prompt=lambda idx: "<video>\n",
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
dtype="half",
|
||
num_logprobs=10,
|
||
patch_hf_runner=model_utils.ovis2_5_patch_hf_runner,
|
||
hf_model_kwargs={"revision": "refs/pr/5"},
|
||
),
|
||
"phi3v": VLMTestInfo(
|
||
models=["microsoft/Phi-3.5-vision-instruct"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=lambda img_prompt: f"<|user|>\n{img_prompt}<|end|>\n<|assistant|>\n", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: f"<|image_{idx}|>\n",
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
runner="generate",
|
||
# use sdpa mode for hf runner since phi3v didn't work with flash_attn
|
||
hf_model_kwargs={"_attn_implementation": "sdpa"},
|
||
use_tokenizer_eos=True,
|
||
vllm_output_post_proc=model_utils.phi3v_vllm_to_hf_output,
|
||
num_logprobs=10,
|
||
),
|
||
"pixtral_hf": VLMTestInfo(
|
||
models=["nm-testing/pixtral-12b-FP8-dynamic"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=lambda img_prompt: f"<s>[INST]{img_prompt}[/INST]",
|
||
img_idx_to_prompt=lambda idx: "[IMG]",
|
||
max_model_len=8192,
|
||
max_num_seqs=2,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
marks=[large_gpu_mark(min_gb=48)],
|
||
),
|
||
"qwen_vl": VLMTestInfo(
|
||
models=["Qwen/Qwen-VL"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=identity,
|
||
img_idx_to_prompt=lambda idx: f"Picture {idx}: <img></img>\n",
|
||
max_model_len=1024,
|
||
max_num_seqs=2,
|
||
vllm_output_post_proc=model_utils.qwen_vllm_to_hf_output,
|
||
prompt_path_encoder=model_utils.qwen_prompt_path_encoder,
|
||
# FIXME: https://github.com/huggingface/transformers/issues/38358
|
||
marks=[pytest.mark.skip("Model initialization fails")],
|
||
),
|
||
"qwen2_vl": VLMTestInfo(
|
||
models=["Qwen/Qwen2-VL-2B-Instruct"],
|
||
test_type=(
|
||
VLMTestType.IMAGE,
|
||
VLMTestType.MULTI_IMAGE,
|
||
VLMTestType.VIDEO
|
||
),
|
||
prompt_formatter=lambda img_prompt: f"<|im_start|>User\n{img_prompt}<|im_end|>\n<|im_start|>assistant\n", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: "<|vision_start|><|image_pad|><|vision_end|>", # noqa: E501
|
||
video_idx_to_prompt=lambda idx: "<|vision_start|><|video_pad|><|vision_end|>", # noqa: E501
|
||
multi_image_prompt="Picture 1: <vlm_image>\nPicture 2: <vlm_image>\nDescribe these two images with one paragraph respectively.", # noqa: E501
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
vllm_output_post_proc=model_utils.qwen2_vllm_to_hf_output,
|
||
image_size_factors=[(), (0.25,), (0.25, 0.25, 0.25), (0.25, 0.2, 0.15)],
|
||
marks=[pytest.mark.cpu_model],
|
||
),
|
||
"skywork_r1v": VLMTestInfo(
|
||
models=["Skywork/Skywork-R1V-38B"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=lambda img_prompt: f"<|begin▁of▁sentence|><|User|>\n{img_prompt}<|Assistant|><think>\n", # noqa: E501
|
||
single_image_prompts=IMAGE_ASSETS.prompts({
|
||
"stop_sign": "<image>\nWhat's the content in the center of the image?", # noqa: E501
|
||
"cherry_blossom": "<image>\nWhat is the season?",
|
||
}),
|
||
multi_image_prompt="<image>\n<image>\nDescribe the two images in short.", # noqa: E501
|
||
max_model_len=4096,
|
||
use_tokenizer_eos=True,
|
||
patch_hf_runner=model_utils.skyworkr1v_patch_hf_runner,
|
||
marks=[large_gpu_mark(min_gb=80)],
|
||
),
|
||
"smolvlm": VLMTestInfo(
|
||
models=["HuggingFaceTB/SmolVLM2-2.2B-Instruct"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=lambda img_prompt: f"<|im_start|>User:{img_prompt}<end_of_utterance>\nAssistant:", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: "<image>",
|
||
max_model_len=8192,
|
||
max_num_seqs=2,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
hf_output_post_proc=model_utils.smolvlm_trunc_hf_output,
|
||
),
|
||
"tarsier": VLMTestInfo(
|
||
models=["omni-research/Tarsier-7b"],
|
||
test_type=(VLMTestType.IMAGE, VLMTestType.MULTI_IMAGE),
|
||
prompt_formatter=lambda img_prompt: f"USER: {img_prompt} ASSISTANT:",
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
patch_hf_runner=model_utils.tarsier_patch_hf_runner,
|
||
),
|
||
"tarsier2": VLMTestInfo(
|
||
models=["omni-research/Tarsier2-Recap-7b"],
|
||
test_type=(
|
||
VLMTestType.IMAGE,
|
||
VLMTestType.MULTI_IMAGE,
|
||
VLMTestType.VIDEO,
|
||
),
|
||
prompt_formatter=lambda img_prompt: f"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n{img_prompt}<|im_end|>\n<|im_start|>assistant\n", # noqa: E501
|
||
img_idx_to_prompt=lambda idx: "<|vision_start|><|image_pad|><|vision_end|>", # noqa: E501
|
||
video_idx_to_prompt=lambda idx: "<|vision_start|><|video_pad|><|vision_end|>", # noqa: E501
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
image_size_factors=[(), (0.25,), (0.25, 0.25, 0.25), (0.25, 0.2, 0.15)],
|
||
marks=[pytest.mark.skip("Model initialization hangs")],
|
||
),
|
||
### Tensor parallel / multi-gpu broadcast tests
|
||
"chameleon-broadcast": VLMTestInfo(
|
||
models=["facebook/chameleon-7b"],
|
||
prompt_formatter=lambda img_prompt: f"USER: {img_prompt}\nASSISTANT:",
|
||
max_model_len=4096,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
vllm_output_post_proc = lambda vllm_output, model: vllm_output[:2],
|
||
hf_output_post_proc = lambda hf_output, model: hf_output[:2],
|
||
comparator=check_outputs_equal,
|
||
marks=multi_gpu_marks(num_gpus=2),
|
||
**COMMON_BROADCAST_SETTINGS # type: ignore
|
||
),
|
||
"llava-broadcast": VLMTestInfo(
|
||
models=["llava-hf/llava-1.5-7b-hf"],
|
||
prompt_formatter=lambda img_prompt: f"USER: {img_prompt}\nASSISTANT:",
|
||
max_model_len=4096,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
vllm_output_post_proc=model_utils.llava_image_vllm_to_hf_output,
|
||
marks=multi_gpu_marks(num_gpus=2),
|
||
**COMMON_BROADCAST_SETTINGS # type: ignore
|
||
),
|
||
"llava_next-broadcast": VLMTestInfo(
|
||
models=["llava-hf/llava-v1.6-mistral-7b-hf"],
|
||
prompt_formatter=lambda img_prompt: f"[INST] {img_prompt} [/INST]",
|
||
max_model_len=10240,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
vllm_output_post_proc=model_utils.llava_image_vllm_to_hf_output,
|
||
marks=multi_gpu_marks(num_gpus=2),
|
||
**COMMON_BROADCAST_SETTINGS # type: ignore
|
||
),
|
||
### Custom input edge-cases for specific models
|
||
"intern_vl-diff-patches": VLMTestInfo(
|
||
models=["OpenGVLab/InternVL2-2B"],
|
||
prompt_formatter=lambda img_prompt: f"<|im_start|>User\n{img_prompt}<|im_end|>\n<|im_start|>Assistant\n", # noqa: E501
|
||
test_type=VLMTestType.CUSTOM_INPUTS,
|
||
max_model_len=4096,
|
||
use_tokenizer_eos=True,
|
||
patch_hf_runner=model_utils.internvl_patch_hf_runner,
|
||
custom_test_opts=[
|
||
CustomTestOptions(
|
||
inputs=inp,
|
||
limit_mm_per_prompt={"image": 2},
|
||
) for inp in custom_inputs.different_patch_input_cases_internvl()
|
||
],
|
||
),
|
||
"llava_onevision-multiple-images": VLMTestInfo(
|
||
models=["llava-hf/llava-onevision-qwen2-0.5b-ov-hf"],
|
||
test_type=VLMTestType.CUSTOM_INPUTS,
|
||
max_model_len=16384,
|
||
max_num_seqs=2,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
hf_model_kwargs=model_utils.llava_onevision_hf_model_kwargs("llava-hf/llava-onevision-qwen2-0.5b-ov-hf"), # noqa: E501
|
||
vllm_output_post_proc=model_utils.llava_onevision_vllm_to_hf_output,
|
||
custom_test_opts=[CustomTestOptions(
|
||
inputs=custom_inputs.multi_image_multi_aspect_ratio_inputs(
|
||
formatter=lambda vid_prompt: f"<|im_start|>user\n{vid_prompt}<|im_end|>\n<|im_start|>assistant\n", # noqa: E501
|
||
),
|
||
limit_mm_per_prompt={"image": 4},
|
||
)],
|
||
),
|
||
# regression test for https://github.com/vllm-project/vllm/issues/15122
|
||
"qwen2_5_vl-windows-attention": VLMTestInfo(
|
||
models=["Qwen/Qwen2.5-VL-3B-Instruct"],
|
||
test_type=VLMTestType.CUSTOM_INPUTS,
|
||
max_model_len=4096,
|
||
max_num_seqs=2,
|
||
auto_cls=AutoModelForImageTextToText,
|
||
vllm_output_post_proc=model_utils.qwen2_vllm_to_hf_output,
|
||
custom_test_opts=[CustomTestOptions(
|
||
inputs=custom_inputs.windows_attention_image_qwen2_5_vl(),
|
||
limit_mm_per_prompt={"image": 1},
|
||
)],
|
||
),
|
||
}
|
||
# yapf: enable
|
||
|
||
|
||
def _mark_splits(
|
||
test_settings: dict[str, VLMTestInfo],
|
||
*,
|
||
num_groups: int,
|
||
) -> dict[str, VLMTestInfo]:
|
||
name_by_test_info_id = {id(v): k for k, v in test_settings.items()}
|
||
test_infos_by_model = defaultdict[str, list[VLMTestInfo]](list)
|
||
|
||
for info in test_settings.values():
|
||
for model in info.models:
|
||
test_infos_by_model[model].append(info)
|
||
|
||
models = sorted(test_infos_by_model.keys())
|
||
split_size = math.ceil(len(models) / num_groups)
|
||
|
||
new_test_settings = dict[str, VLMTestInfo]()
|
||
|
||
for i in range(num_groups):
|
||
models_in_group = models[i * split_size:(i + 1) * split_size]
|
||
|
||
for model in models_in_group:
|
||
for info in test_infos_by_model[model]:
|
||
new_marks = (info.marks or []) + [pytest.mark.split(group=i)]
|
||
new_info = info._replace(marks=new_marks)
|
||
new_test_settings[name_by_test_info_id[id(info)]] = new_info
|
||
|
||
missing_keys = test_settings.keys() - new_test_settings.keys()
|
||
assert not missing_keys, f"Missing keys: {missing_keys}"
|
||
|
||
return new_test_settings
|
||
|
||
|
||
VLM_TEST_SETTINGS = _mark_splits(VLM_TEST_SETTINGS, num_groups=2)
|
||
|
||
|
||
### Test wrappers
|
||
# Wrappers around the core test running func for:
|
||
# - single image
|
||
# - multi-image
|
||
# - image embeddings
|
||
# - video
|
||
# - audio
|
||
# - custom inputs
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.IMAGE,
|
||
create_new_process_for_each_test=False,
|
||
))
|
||
def test_single_image_models(
|
||
tmp_path: PosixPath,
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
image_assets: ImageTestAssets,
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_single_image_test(
|
||
tmp_path=tmp_path,
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
image_assets=image_assets,
|
||
)
|
||
|
||
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.MULTI_IMAGE,
|
||
create_new_process_for_each_test=False,
|
||
))
|
||
def test_multi_image_models(
|
||
tmp_path: PosixPath,
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
image_assets: ImageTestAssets,
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_multi_image_test(
|
||
tmp_path=tmp_path,
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
image_assets=image_assets,
|
||
)
|
||
|
||
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.EMBEDDING,
|
||
create_new_process_for_each_test=False,
|
||
))
|
||
def test_image_embedding_models(
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
image_assets: ImageTestAssets,
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_embedding_test(
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
image_assets=image_assets,
|
||
)
|
||
|
||
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.VIDEO,
|
||
create_new_process_for_each_test=False,
|
||
))
|
||
def test_video_models(
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
video_assets: VideoTestAssets,
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_video_test(
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
video_assets=video_assets,
|
||
)
|
||
|
||
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.AUDIO,
|
||
create_new_process_for_each_test=False,
|
||
))
|
||
def test_audio_models(
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
audio_assets: AudioTestAssets,
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_audio_test(
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
audio_assets=audio_assets,
|
||
)
|
||
|
||
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.CUSTOM_INPUTS,
|
||
create_new_process_for_each_test=False,
|
||
))
|
||
def test_custom_inputs_models(
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_custom_inputs_test(
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
)
|
||
|
||
|
||
#### Tests filtering for things running each test as a new process
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.IMAGE,
|
||
create_new_process_for_each_test=True,
|
||
))
|
||
@create_new_process_for_each_test()
|
||
def test_single_image_models_heavy(
|
||
tmp_path: PosixPath,
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
image_assets: ImageTestAssets,
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_single_image_test(
|
||
tmp_path=tmp_path,
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
image_assets=image_assets,
|
||
)
|
||
|
||
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.MULTI_IMAGE,
|
||
create_new_process_for_each_test=True,
|
||
))
|
||
@create_new_process_for_each_test()
|
||
def test_multi_image_models_heavy(
|
||
tmp_path: PosixPath,
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
image_assets: ImageTestAssets,
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_multi_image_test(
|
||
tmp_path=tmp_path,
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
image_assets=image_assets,
|
||
)
|
||
|
||
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.EMBEDDING,
|
||
create_new_process_for_each_test=True,
|
||
))
|
||
@create_new_process_for_each_test()
|
||
def test_image_embedding_models_heavy(
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
image_assets: ImageTestAssets,
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_embedding_test(
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
image_assets=image_assets,
|
||
)
|
||
|
||
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.VIDEO,
|
||
create_new_process_for_each_test=True,
|
||
))
|
||
def test_video_models_heavy(
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
video_assets: VideoTestAssets,
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_video_test(
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
video_assets=video_assets,
|
||
)
|
||
|
||
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.AUDIO,
|
||
create_new_process_for_each_test=True,
|
||
))
|
||
def test_audio_models_heavy(
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
audio_assets: AudioTestAssets,
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_audio_test(
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
audio_assets=audio_assets,
|
||
)
|
||
|
||
|
||
@pytest.mark.parametrize(
|
||
"model_type,test_case",
|
||
get_parametrized_options(
|
||
VLM_TEST_SETTINGS,
|
||
test_type=VLMTestType.CUSTOM_INPUTS,
|
||
create_new_process_for_each_test=True,
|
||
))
|
||
@create_new_process_for_each_test()
|
||
def test_custom_inputs_models_heavy(
|
||
model_type: str,
|
||
test_case: ExpandableVLMTestArgs,
|
||
hf_runner: type[HfRunner],
|
||
vllm_runner: type[VllmRunner],
|
||
):
|
||
model_test_info = VLM_TEST_SETTINGS[model_type]
|
||
runners.run_custom_inputs_test(
|
||
model_test_info=model_test_info,
|
||
test_case=test_case,
|
||
hf_runner=hf_runner,
|
||
vllm_runner=vllm_runner,
|
||
)
|