mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-07 21:17:07 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
2667 lines
75 KiB
Python
2667 lines
75 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
import warnings
|
|
from collections.abc import Mapping
|
|
from typing import Literal, Optional
|
|
|
|
import pytest
|
|
from mistral_common.tokens.tokenizers.base import (SpecialTokenPolicy,
|
|
SpecialTokens)
|
|
from mistral_common.tokens.tokenizers.tekken import (SpecialTokenInfo,
|
|
Tekkenizer)
|
|
|
|
from vllm.assets.audio import AudioAsset
|
|
from vllm.assets.image import ImageAsset
|
|
from vllm.assets.video import VideoAsset
|
|
from vllm.config import ModelConfig
|
|
from vllm.entrypoints.chat_utils import (_try_extract_ast, load_chat_template,
|
|
parse_chat_messages,
|
|
parse_chat_messages_futures,
|
|
resolve_chat_template_content_format,
|
|
resolve_hf_chat_template)
|
|
from vllm.multimodal import MultiModalDataDict, MultiModalUUIDDict
|
|
from vllm.multimodal.utils import (encode_audio_base64, encode_image_base64,
|
|
encode_video_base64)
|
|
from vllm.transformers_utils.tokenizer import get_tokenizer
|
|
from vllm.transformers_utils.tokenizers.mistral import MistralTokenizer
|
|
|
|
from ..models.registry import HF_EXAMPLE_MODELS
|
|
from ..utils import VLLM_PATH
|
|
|
|
EXAMPLES_DIR = VLLM_PATH / "examples"
|
|
|
|
PHI3V_MODEL_ID = "microsoft/Phi-3.5-vision-instruct"
|
|
ULTRAVOX_MODEL_ID = "fixie-ai/ultravox-v0_5-llama-3_2-1b"
|
|
QWEN2AUDIO_MODEL_ID = "Qwen/Qwen2-Audio-7B-Instruct"
|
|
QWEN2VL_MODEL_ID = "Qwen/Qwen2-VL-2B-Instruct"
|
|
QWEN25VL_MODEL_ID = "Qwen/Qwen2.5-VL-3B-Instruct"
|
|
QWEN25OMNI_MODEL_ID = "Qwen/Qwen2.5-Omni-7B"
|
|
LLAMA_GUARD_MODEL_ID = "meta-llama/Llama-Guard-3-1B"
|
|
HERMES_MODEL_ID = "NousResearch/Hermes-3-Llama-3.1-8B"
|
|
MISTRAL_MODEL_ID = "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
def phi3v_model_config():
|
|
return ModelConfig(
|
|
PHI3V_MODEL_ID,
|
|
runner="generate",
|
|
trust_remote_code=True,
|
|
limit_mm_per_prompt={
|
|
"image": 2,
|
|
},
|
|
)
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
def phi3v_model_config_mm_interleaved():
|
|
return ModelConfig(
|
|
PHI3V_MODEL_ID,
|
|
runner="generate",
|
|
trust_remote_code=True,
|
|
interleave_mm_strings=True,
|
|
limit_mm_per_prompt={
|
|
"image": 2,
|
|
},
|
|
)
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def phi3v_tokenizer():
|
|
return get_tokenizer(PHI3V_MODEL_ID)
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
def qwen2_audio_model_config():
|
|
return ModelConfig(
|
|
QWEN2AUDIO_MODEL_ID,
|
|
runner="generate",
|
|
trust_remote_code=True,
|
|
limit_mm_per_prompt={
|
|
"audio": 1,
|
|
},
|
|
)
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def qwen2_audio_tokenizer():
|
|
return get_tokenizer(QWEN2AUDIO_MODEL_ID)
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
def qwen25omni_model_config_mm_interleaved():
|
|
return ModelConfig(
|
|
QWEN25OMNI_MODEL_ID,
|
|
runner="generate",
|
|
interleave_mm_strings=True,
|
|
limit_mm_per_prompt={
|
|
"image": 2,
|
|
"audio": 1,
|
|
"video": 1,
|
|
},
|
|
)
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def qwen25omni_tokenizer():
|
|
return get_tokenizer(QWEN25OMNI_MODEL_ID)
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
def mistral_model_config():
|
|
return ModelConfig(
|
|
MISTRAL_MODEL_ID,
|
|
runner="generate",
|
|
limit_mm_per_prompt={
|
|
"image": 2,
|
|
},
|
|
)
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def mistral_tokenizer():
|
|
return get_tokenizer(MISTRAL_MODEL_ID)
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def image_url():
|
|
image = ImageAsset("cherry_blossom")
|
|
base64 = encode_image_base64(image.pil_image)
|
|
return f"data:image/jpeg;base64,{base64}"
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def video_url():
|
|
video = VideoAsset("baby_reading", 1)
|
|
base64 = encode_video_base64(video.np_ndarrays)
|
|
return f"data:video/jpeg;base64,{base64}"
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
def audio_url():
|
|
audio = AudioAsset("mary_had_lamb")
|
|
base64 = encode_audio_base64(*audio.audio_and_sample_rate)
|
|
return f"data:audio/ogg;base64,{base64}"
|
|
|
|
|
|
def _assert_mm_data_is_image_input(
|
|
mm_data: Optional[MultiModalDataDict],
|
|
image_count: int,
|
|
skipped_image_indices: Optional[list] = None,
|
|
) -> None:
|
|
assert mm_data is not None
|
|
assert set(mm_data.keys()) == {"image"}
|
|
|
|
image_data = mm_data.get("image")
|
|
assert image_data is not None
|
|
|
|
assert isinstance(image_data, list) and len(image_data) == image_count
|
|
if skipped_image_indices is not None:
|
|
for i in skipped_image_indices:
|
|
assert image_data[i] is None
|
|
|
|
|
|
def _assert_mm_uuids(
|
|
mm_uuids: Optional[MultiModalUUIDDict],
|
|
media_count: int,
|
|
expected_uuids: list[Optional[str]],
|
|
modality: str = "image",
|
|
) -> None:
|
|
if len(expected_uuids) > 0:
|
|
assert mm_uuids is not None
|
|
assert modality in mm_uuids
|
|
|
|
image_uuids = mm_uuids.get(modality)
|
|
assert image_uuids is not None
|
|
|
|
assert isinstance(image_uuids,
|
|
list) and len(image_uuids) == media_count
|
|
|
|
assert image_uuids == expected_uuids
|
|
else:
|
|
assert mm_uuids is None
|
|
|
|
|
|
ModalityType = Literal["image", "video", "audio"]
|
|
MultiModalDataCounts = Mapping[ModalityType, int]
|
|
|
|
|
|
def _assert_mm_data_inputs(
|
|
mm_data: Optional[MultiModalDataDict],
|
|
data_count: MultiModalDataCounts,
|
|
skipped_media_indices: Optional[dict[
|
|
str, list]] = None, # modality -> list[int]
|
|
) -> None:
|
|
assert mm_data is not None
|
|
assert set(data_count.keys()) == (set(mm_data.keys()))
|
|
|
|
for modality, n in data_count.items():
|
|
modality_data = mm_data.get(modality)
|
|
assert modality_data is not None
|
|
assert isinstance(modality_data, list) and len(modality_data) == n
|
|
|
|
if skipped_media_indices is not None:
|
|
skipped_media_indices_for_modality = skipped_media_indices.get(
|
|
modality)
|
|
assert skipped_media_indices_for_modality is not None
|
|
for i in skipped_media_indices_for_modality:
|
|
assert modality_data[i] is None
|
|
|
|
|
|
def test_parse_chat_messages_single_image(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in the image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in the image?"
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 1)
|
|
_assert_mm_uuids(mm_uuids, 1, expected_uuids=[None])
|
|
|
|
|
|
def test_parse_chat_messages_single_image_with_uuid(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid = str(hash(image_url))
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url,
|
|
},
|
|
"uuid": image_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in the image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in the image?"
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 1)
|
|
_assert_mm_uuids(mm_uuids, 1, expected_uuids=[image_uuid])
|
|
|
|
|
|
def test_parse_chat_messages_single_empty_image_with_uuid(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid = str(hash(image_url))
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": None,
|
|
"uuid": image_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in the image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in the image?"
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 1, skipped_image_indices=[0])
|
|
_assert_mm_uuids(mm_uuids, 1, expected_uuids=[image_uuid])
|
|
|
|
|
|
def test_parse_chat_messages_single_image_with_bad_uuid_format(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid = str(hash(image_url))
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url,
|
|
"uuid": image_uuid,
|
|
},
|
|
"bad_uuid_key": image_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in the image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in the image?"
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 1)
|
|
_assert_mm_uuids(mm_uuids, 1, expected_uuids=[None])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_images_with_uuids(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid1 = "my_uuid_1"
|
|
image_uuid2 = "my_uuid_2"
|
|
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url,
|
|
},
|
|
"uuid": image_uuid1,
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url,
|
|
},
|
|
"uuid": image_uuid2,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in the image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"<|image_1|>\n<|image_2|>\nWhat's in the image?",
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[image_uuid1, image_uuid2])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_empty_images_with_uuids(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid1 = "my_uuid_1"
|
|
image_uuid2 = "my_uuid_2"
|
|
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": None,
|
|
"uuid": image_uuid1,
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": None,
|
|
"uuid": image_uuid2,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in the image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"<|image_1|>\n<|image_2|>\nWhat's in the image?",
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 2, skipped_image_indices=[0, 1])
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[image_uuid1, image_uuid2])
|
|
|
|
|
|
def test_parse_chat_messages_mixed_empty_images_with_uuids(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid1 = "my_uuid_1"
|
|
image_uuid2 = "my_uuid_2"
|
|
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url,
|
|
},
|
|
"uuid": image_uuid1,
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": None,
|
|
"uuid": image_uuid2,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in the image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"<|image_1|>\n<|image_2|>\nWhat's in the image?",
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 2, skipped_image_indices=[1])
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[image_uuid1, image_uuid2])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_parse_chat_messages_single_image_with_uuid_async(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid = str(hash(image_url))
|
|
conversation, mm_future, mm_uuids = parse_chat_messages_futures(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
"uuid": image_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in the image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in the image?"
|
|
}]
|
|
_assert_mm_data_is_image_input(await mm_future, 1)
|
|
_assert_mm_uuids(mm_uuids, 1, expected_uuids=[image_uuid])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_parse_chat_messages_empty_image_with_uuid_async(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid = str(hash(image_url))
|
|
conversation, mm_future, mm_uuids = parse_chat_messages_futures(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": None,
|
|
"uuid": image_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in the image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in the image?"
|
|
}]
|
|
_assert_mm_data_is_image_input(await mm_future,
|
|
1,
|
|
skipped_image_indices=[0])
|
|
_assert_mm_uuids(mm_uuids, 1, expected_uuids=[image_uuid])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_parse_chat_messages_multiple_images_with_uuids_async(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid1 = "my_uuid_1"
|
|
image_uuid2 = "my_uuid_2"
|
|
|
|
conversation, mm_future, mm_uuids = parse_chat_messages_futures(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
"uuid": image_uuid1,
|
|
},
|
|
{
|
|
"type": "image_pil",
|
|
"image_pil": ImageAsset("cherry_blossom").pil_image,
|
|
"uuid": image_uuid2,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in these images?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"<|image_1|>\n<|image_2|>\nWhat's in these images?",
|
|
}]
|
|
_assert_mm_data_is_image_input(await mm_future, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[image_uuid1, image_uuid2])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_parse_chat_messages_multiple_empty_images_with_uuids_async(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid1 = "my_uuid_1"
|
|
image_uuid2 = "my_uuid_2"
|
|
|
|
conversation, mm_future, mm_uuids = parse_chat_messages_futures(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": None,
|
|
"uuid": image_uuid1,
|
|
},
|
|
{
|
|
"type": "image_pil",
|
|
"image_pil": None,
|
|
"uuid": image_uuid2,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in these images?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"<|image_1|>\n<|image_2|>\nWhat's in these images?",
|
|
}]
|
|
_assert_mm_data_is_image_input(await mm_future,
|
|
2,
|
|
skipped_image_indices=[0, 1])
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[image_uuid1, image_uuid2])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_parse_chat_messages_multiple_images_with_partial_uuids_async(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid2 = "my_uuid_2"
|
|
|
|
conversation, mm_future, mm_uuids = parse_chat_messages_futures(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
},
|
|
{
|
|
"type": "image_pil",
|
|
"image_pil": ImageAsset("cherry_blossom").pil_image,
|
|
"uuid": image_uuid2,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in these images?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"<|image_1|>\n<|image_2|>\nWhat's in these images?",
|
|
}]
|
|
_assert_mm_data_is_image_input(await mm_future, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[None, image_uuid2])
|
|
|
|
|
|
def test_parse_chat_messages_empty_system(
|
|
mistral_model_config,
|
|
mistral_tokenizer,
|
|
):
|
|
# Test string format
|
|
conversation, _, _ = parse_chat_messages(
|
|
[
|
|
{
|
|
"role": "system",
|
|
"content": ""
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "Who are you?"
|
|
}],
|
|
},
|
|
],
|
|
mistral_model_config,
|
|
mistral_tokenizer,
|
|
content_format="string",
|
|
)
|
|
assert conversation == [
|
|
{
|
|
"role": "system",
|
|
"content": ""
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": "Who are you?"
|
|
},
|
|
]
|
|
|
|
# Test openai format
|
|
conversation, _, _ = parse_chat_messages(
|
|
[
|
|
{
|
|
"role": "system",
|
|
"content": ""
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "Who are you?"
|
|
}],
|
|
},
|
|
],
|
|
mistral_model_config,
|
|
mistral_tokenizer,
|
|
content_format="openai",
|
|
)
|
|
assert conversation == [
|
|
{
|
|
"role": "system",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": ""
|
|
}]
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "Who are you?"
|
|
}]
|
|
},
|
|
]
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_parse_chat_messages_single_image_async(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
conversation, mm_future, mm_uuids = parse_chat_messages_futures(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in the image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in the image?"
|
|
}]
|
|
_assert_mm_data_is_image_input(await mm_future, 1)
|
|
_assert_mm_uuids(mm_uuids, 1, expected_uuids=[None])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_images(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "image_pil",
|
|
"image_pil": ImageAsset("cherry_blossom").pil_image,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in these images?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"<|image_1|>\n<|image_2|>\nWhat's in these images?",
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[None, None])
|
|
|
|
|
|
def test_parse_chat_messages_empty_pil_image_with_uuid(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
):
|
|
uuid = "abcd"
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_pil",
|
|
"image_pil": None,
|
|
"uuid": uuid
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in this image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in this image?",
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 1, skipped_image_indices=[0])
|
|
_assert_mm_uuids(mm_uuids, 1, expected_uuids=[uuid])
|
|
|
|
|
|
def test_parse_chat_messages_empty_image_embeds_with_uuid(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
):
|
|
uuid = "abcd"
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_embeds",
|
|
"image_embeds": None,
|
|
"uuid": uuid
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in this image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in this image?",
|
|
}]
|
|
assert mm_data is not None
|
|
assert "image" in mm_data
|
|
assert mm_data["image"] is None
|
|
_assert_mm_uuids(mm_uuids, 1, expected_uuids=[uuid])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_parse_chat_messages_empty_image_embeds_with_uuid_async(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
):
|
|
uuid = "abcd"
|
|
conversation, mm_future, mm_uuids = parse_chat_messages_futures(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_embeds",
|
|
"image_embeds": None,
|
|
"uuid": uuid
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in this image?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in this image?",
|
|
}]
|
|
mm_data = await mm_future
|
|
assert mm_data is not None
|
|
assert "image" in mm_data
|
|
assert mm_data["image"] is None
|
|
_assert_mm_uuids(mm_uuids, 1, expected_uuids=[uuid])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_parse_chat_messages_multiple_images_async(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
conversation, mm_future, mm_uuids = parse_chat_messages_futures(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "image_pil",
|
|
"image_pil": ImageAsset("cherry_blossom").pil_image,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in these images?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"<|image_1|>\n<|image_2|>\nWhat's in these images?",
|
|
}]
|
|
_assert_mm_data_is_image_input(await mm_future, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[None, None])
|
|
|
|
|
|
def test_parse_chat_messages_placeholder_already_in_prompt(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type":
|
|
"text",
|
|
"text":
|
|
"What's in <|image_1|> and how does it compare to <|image_2|>?", # noqa: E501
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"What's in <|image_1|> and how does it compare to <|image_2|>?",
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[None, None])
|
|
|
|
|
|
def test_parse_chat_messages_placeholder_one_already_in_prompt(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type":
|
|
"text",
|
|
"text":
|
|
"What's in <|image_1|> and how does it compare to the other one?", # noqa: E501
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"<|image_2|>\nWhat's in <|image_1|> and how does it compare to the "
|
|
"other one?",
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[None, None])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_images_across_messages(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in this image?"
|
|
},
|
|
],
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What about this one?"
|
|
},
|
|
],
|
|
},
|
|
],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [
|
|
{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in this image?"
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": "<|image_2|>\nWhat about this one?"
|
|
},
|
|
]
|
|
_assert_mm_data_is_image_input(mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[None, None])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_images_with_uuids_across_messages(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid = str(hash(image_url))
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
"uuid": image_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in this image?"
|
|
},
|
|
],
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
"uuid": image_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What about this one?"
|
|
},
|
|
],
|
|
},
|
|
],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [
|
|
{
|
|
"role": "user",
|
|
"content": "<|image_1|>\nWhat's in this image?"
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": "<|image_2|>\nWhat about this one?"
|
|
},
|
|
]
|
|
_assert_mm_data_is_image_input(mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[image_uuid, image_uuid])
|
|
|
|
|
|
def test_parse_chat_messages_context_text_format(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[
|
|
{
|
|
"role": "user",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "What's in this text?"
|
|
}],
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": "What about this one?"
|
|
},
|
|
],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="openai",
|
|
)
|
|
|
|
assert conversation == [
|
|
{
|
|
"role": "user",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "What's in this text?"
|
|
}],
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "Some stuff."
|
|
}],
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "What about this one?"
|
|
}],
|
|
},
|
|
]
|
|
assert mm_data is None
|
|
assert mm_uuids is None
|
|
|
|
|
|
def test_parse_chat_messages_rejects_too_many_images_in_one_message(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
with warnings.catch_warnings():
|
|
warnings.filterwarnings(
|
|
"ignore",
|
|
message="coroutine 'async_get_and_parse_image' was never awaited",
|
|
)
|
|
with pytest.raises(ValueError, match="At most"):
|
|
parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in these images?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
|
|
def test_parse_chat_messages_rejects_too_many_images_across_messages(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
with warnings.catch_warnings():
|
|
warnings.filterwarnings(
|
|
"ignore",
|
|
message="coroutine 'async_get_and_parse_image' was never awaited",
|
|
)
|
|
with pytest.raises(ValueError, match="At most"):
|
|
parse_chat_messages(
|
|
[
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What's in this image?"
|
|
},
|
|
],
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What about these two?"
|
|
},
|
|
],
|
|
},
|
|
],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
|
|
def test_parse_chat_messages_multiple_images_uncommon_input(
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
"What's in these images?",
|
|
{
|
|
"image_url": image_url
|
|
},
|
|
{
|
|
"image_url": image_url
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"<|image_1|>\n<|image_2|>\nWhat's in these images?",
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[None, None])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_images_interleave(
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "I need you to compare this image",
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "and this one"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "Do they have differences?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"I need you to compare this image\n<|image_1|>\nand this one\n<|image_2|>\n" # noqa: E501
|
|
"Do they have differences?",
|
|
}]
|
|
_assert_mm_data_is_image_input(mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[None, None])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_parse_chat_messages_multiple_images_interleave_async(
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages_futures(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "I need you to compare this image",
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "and this one"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "Do they have differences?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"I need you to compare this image\n<|image_1|>\nand this one\n<|image_2|>\n" # noqa: E501
|
|
"Do they have differences?",
|
|
}]
|
|
_assert_mm_data_is_image_input(await mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[None, None])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_parse_chat_messages_multiple_images_with_uuids_interleave_async(
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid = str(hash(image_url))
|
|
conversation, mm_data, mm_uuids = parse_chat_messages_futures(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "I need you to compare this image",
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
"uuid": image_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "and this one"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
"uuid": image_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "Do they have differences?"
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"I need you to compare this image\n<|image_1|>\nand this one\n<|image_2|>\n" # noqa: E501
|
|
"Do they have differences?",
|
|
}]
|
|
_assert_mm_data_is_image_input(await mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[image_uuid, image_uuid])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_images_multiple_messages_interleave(
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "Be accurate."
|
|
},
|
|
],
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
],
|
|
},
|
|
],
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [
|
|
{
|
|
"role": "user",
|
|
"content": "What's on this image?\n<|image_1|>\nBe accurate.",
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": "What's on this image?\n<|image_2|>"
|
|
},
|
|
]
|
|
_assert_mm_data_is_image_input(mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[None, None])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_images_with_uuids_multiple_messages_interleave( # noqa: E501
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
image_uuid = str(hash(image_url))
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
"uuid": image_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "Be accurate."
|
|
},
|
|
],
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
"uuid": image_uuid,
|
|
},
|
|
],
|
|
},
|
|
],
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [
|
|
{
|
|
"role": "user",
|
|
"content": "What's on this image?\n<|image_1|>\nBe accurate.",
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": "What's on this image?\n<|image_2|>"
|
|
},
|
|
]
|
|
_assert_mm_data_is_image_input(mm_data, 2)
|
|
_assert_mm_uuids(mm_uuids, 2, expected_uuids=[image_uuid, image_uuid])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_modals_multiple_messages_interleave(
|
|
qwen25omni_model_config_mm_interleaved,
|
|
qwen25omni_tokenizer,
|
|
image_url,
|
|
video_url,
|
|
audio_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "Now listen to this audio"
|
|
},
|
|
{
|
|
"type": "audio_url",
|
|
"audio_url": {
|
|
"url": audio_url
|
|
}
|
|
},
|
|
],
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "And what's in the video?"
|
|
},
|
|
{
|
|
"type": "video_url",
|
|
"video_url": {
|
|
"url": video_url
|
|
}
|
|
},
|
|
],
|
|
},
|
|
],
|
|
qwen25omni_model_config_mm_interleaved,
|
|
qwen25omni_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [
|
|
{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"What's on this image?\n<|vision_start|><|IMAGE|><|vision_end|>\n"
|
|
"Now listen to this audio\nAudio 1: <|audio_bos|><|AUDIO|><|audio_eos|>", # noqa: E501
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"What's on this image?\n<|vision_start|><|IMAGE|><|vision_end|>\n"
|
|
"And what's in the video?\n<|vision_start|><|VIDEO|><|vision_end|>",
|
|
},
|
|
]
|
|
|
|
_assert_mm_data_inputs(mm_data, {"image": 2, "video": 1, "audio": 1})
|
|
_assert_mm_uuids(mm_uuids,
|
|
2,
|
|
modality="image",
|
|
expected_uuids=[None, None])
|
|
_assert_mm_uuids(mm_uuids, 1, modality="video", expected_uuids=[None])
|
|
_assert_mm_uuids(mm_uuids, 1, modality="audio", expected_uuids=[None])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_modals_with_uuids_multiple_messages_interleave( # noqa: E501
|
|
qwen25omni_model_config_mm_interleaved,
|
|
qwen25omni_tokenizer,
|
|
image_url,
|
|
video_url,
|
|
audio_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
"uuid": "image_123",
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "Now listen to this audio"
|
|
},
|
|
{
|
|
"type": "audio_url",
|
|
"audio_url": {
|
|
"url": audio_url
|
|
},
|
|
"uuid": "audio_123",
|
|
},
|
|
],
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
"uuid": "image_123",
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "And what's in the video?"
|
|
},
|
|
{
|
|
"type": "video_url",
|
|
"video_url": {
|
|
"url": video_url
|
|
},
|
|
"uuid": "video_123",
|
|
},
|
|
],
|
|
},
|
|
],
|
|
qwen25omni_model_config_mm_interleaved,
|
|
qwen25omni_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [
|
|
{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"What's on this image?\n<|vision_start|><|IMAGE|><|vision_end|>\n"
|
|
"Now listen to this audio\nAudio 1: <|audio_bos|><|AUDIO|><|audio_eos|>", # noqa: E501
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"What's on this image?\n<|vision_start|><|IMAGE|><|vision_end|>\n"
|
|
"And what's in the video?\n<|vision_start|><|VIDEO|><|vision_end|>",
|
|
},
|
|
]
|
|
|
|
_assert_mm_data_inputs(mm_data, {"image": 2, "video": 1, "audio": 1})
|
|
_assert_mm_uuids(mm_uuids,
|
|
2,
|
|
modality="image",
|
|
expected_uuids=["image_123", "image_123"])
|
|
_assert_mm_uuids(mm_uuids,
|
|
1,
|
|
modality="video",
|
|
expected_uuids=["video_123"])
|
|
_assert_mm_uuids(mm_uuids,
|
|
1,
|
|
modality="audio",
|
|
expected_uuids=["audio_123"])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_modals_with_uuids_multiple_empty_media_messages_interleave( # noqa: E501
|
|
qwen25omni_model_config_mm_interleaved,
|
|
qwen25omni_tokenizer,
|
|
image_url,
|
|
video_url,
|
|
audio_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": None,
|
|
"uuid": "image_123",
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "Now listen to this audio"
|
|
},
|
|
{
|
|
"type": "audio_url",
|
|
"audio_url": None,
|
|
"uuid": "audio_123",
|
|
},
|
|
],
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": None,
|
|
"uuid": "image_123",
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "And what's in the video?"
|
|
},
|
|
{
|
|
"type": "video_url",
|
|
"video_url": None,
|
|
"uuid": "video_123",
|
|
},
|
|
],
|
|
},
|
|
],
|
|
qwen25omni_model_config_mm_interleaved,
|
|
qwen25omni_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [
|
|
{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"What's on this image?\n<|vision_start|><|IMAGE|><|vision_end|>\n"
|
|
"Now listen to this audio\nAudio 1: <|audio_bos|><|AUDIO|><|audio_eos|>", # noqa: E501
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"What's on this image?\n<|vision_start|><|IMAGE|><|vision_end|>\n"
|
|
"And what's in the video?\n<|vision_start|><|VIDEO|><|vision_end|>",
|
|
},
|
|
]
|
|
|
|
_assert_mm_data_inputs(mm_data, {
|
|
"image": 2,
|
|
"video": 1,
|
|
"audio": 1
|
|
},
|
|
skipped_media_indices={
|
|
"image": [0, 1],
|
|
"video": [0],
|
|
"audio": [0]
|
|
})
|
|
_assert_mm_uuids(mm_uuids,
|
|
2,
|
|
modality="image",
|
|
expected_uuids=["image_123", "image_123"])
|
|
_assert_mm_uuids(mm_uuids,
|
|
1,
|
|
modality="video",
|
|
expected_uuids=["video_123"])
|
|
_assert_mm_uuids(mm_uuids,
|
|
1,
|
|
modality="audio",
|
|
expected_uuids=["audio_123"])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_modals_with_partial_uuids_multiple_messages_interleave( # noqa: E501
|
|
qwen25omni_model_config_mm_interleaved,
|
|
qwen25omni_tokenizer,
|
|
image_url,
|
|
video_url,
|
|
audio_url,
|
|
):
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
},
|
|
"uuid": "image_123",
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "Now listen to this audio"
|
|
},
|
|
{
|
|
"type": "audio_url",
|
|
"audio_url": {
|
|
"url": audio_url
|
|
}
|
|
},
|
|
],
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "What's on this image?"
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "And what's in the video?"
|
|
},
|
|
{
|
|
"type": "video_url",
|
|
"video_url": {
|
|
"url": video_url
|
|
},
|
|
"uuid": "video_123",
|
|
},
|
|
],
|
|
},
|
|
],
|
|
qwen25omni_model_config_mm_interleaved,
|
|
qwen25omni_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [
|
|
{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"What's on this image?\n<|vision_start|><|IMAGE|><|vision_end|>\n"
|
|
"Now listen to this audio\nAudio 1: <|audio_bos|><|AUDIO|><|audio_eos|>", # noqa: E501
|
|
},
|
|
{
|
|
"role": "assistant",
|
|
"content": "Some stuff."
|
|
},
|
|
{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"What's on this image?\n<|vision_start|><|IMAGE|><|vision_end|>\n"
|
|
"And what's in the video?\n<|vision_start|><|VIDEO|><|vision_end|>",
|
|
},
|
|
]
|
|
|
|
_assert_mm_data_inputs(mm_data, {"image": 2, "video": 1, "audio": 1})
|
|
_assert_mm_uuids(mm_uuids,
|
|
2,
|
|
modality="image",
|
|
expected_uuids=["image_123", None])
|
|
_assert_mm_uuids(mm_uuids,
|
|
1,
|
|
modality="video",
|
|
expected_uuids=["video_123"])
|
|
_assert_mm_uuids(mm_uuids, 1, modality="audio", expected_uuids=[None])
|
|
|
|
|
|
def test_parse_chat_messages_multiple_images_interleave_with_placeholders(
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
image_url,
|
|
):
|
|
with pytest.raises(
|
|
ValueError,
|
|
match=r"Found more '<|image_1|>' placeholders in input prompt "
|
|
"than actual multimodal data items.",
|
|
):
|
|
parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type": "image_url",
|
|
"image_url": {
|
|
"url": image_url
|
|
}
|
|
},
|
|
{
|
|
"type":
|
|
"text",
|
|
"text":
|
|
"I need you to compare this image\n<|image_1|>\nand this one\n<|image_2|>\n" # noqa: E501
|
|
"Do they have differences?",
|
|
},
|
|
],
|
|
}],
|
|
phi3v_model_config_mm_interleaved,
|
|
phi3v_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"model",
|
|
[
|
|
QWEN2VL_MODEL_ID, # tokenizer.chat_template is of type str
|
|
HERMES_MODEL_ID, # tokenizer.chat_template is of type dict
|
|
],
|
|
)
|
|
@pytest.mark.parametrize("use_tools", [True, False])
|
|
def test_resolve_hf_chat_template(sample_json_schema, model, use_tools):
|
|
"""checks that chat_template is a dict type for HF models."""
|
|
model_info = HF_EXAMPLE_MODELS.find_hf_info(model)
|
|
model_info.check_available_online(on_fail="skip")
|
|
|
|
model_config = ModelConfig(
|
|
model,
|
|
tokenizer=model_info.tokenizer or model,
|
|
tokenizer_mode=model_info.tokenizer_mode,
|
|
revision=model_info.revision,
|
|
trust_remote_code=model_info.trust_remote_code,
|
|
hf_overrides=model_info.hf_overrides,
|
|
skip_tokenizer_init=model_info.skip_tokenizer_init,
|
|
enforce_eager=model_info.enforce_eager,
|
|
dtype=model_info.dtype)
|
|
|
|
# Build the tokenizer
|
|
tokenizer = get_tokenizer(
|
|
model,
|
|
trust_remote_code=model_config.trust_remote_code,
|
|
)
|
|
|
|
tools = ([{
|
|
"type": "function",
|
|
"function": {
|
|
"name": "dummy_function_name",
|
|
"description": "This is a dummy function",
|
|
"parameters": sample_json_schema,
|
|
},
|
|
}] if use_tools else None)
|
|
|
|
# Test detecting the tokenizer's chat_template
|
|
chat_template = resolve_hf_chat_template(
|
|
tokenizer,
|
|
chat_template=None,
|
|
tools=tools,
|
|
model_config=model_config,
|
|
)
|
|
assert isinstance(chat_template, str)
|
|
|
|
|
|
# NOTE: Qwen2-Audio default chat template is specially defined inside
|
|
# processor class instead of using `tokenizer_config.json`
|
|
# yapf: disable
|
|
@pytest.mark.parametrize(
|
|
("model", "expected_format"),
|
|
[(PHI3V_MODEL_ID, "string"),
|
|
(QWEN2VL_MODEL_ID, "openai"),
|
|
(QWEN25VL_MODEL_ID, "openai"),
|
|
(ULTRAVOX_MODEL_ID, "string"),
|
|
(QWEN2AUDIO_MODEL_ID, "openai"),
|
|
(LLAMA_GUARD_MODEL_ID, "openai")],
|
|
)
|
|
# yapf: enable
|
|
def test_resolve_content_format_hf_defined(model, expected_format):
|
|
model_info = HF_EXAMPLE_MODELS.find_hf_info(model)
|
|
model_info.check_available_online(on_fail="skip")
|
|
|
|
model_config = ModelConfig(
|
|
model,
|
|
tokenizer=model_info.tokenizer or model,
|
|
tokenizer_mode=model_info.tokenizer_mode,
|
|
revision=model_info.revision,
|
|
trust_remote_code=model_info.trust_remote_code,
|
|
hf_overrides=model_info.hf_overrides,
|
|
skip_tokenizer_init=model_info.skip_tokenizer_init,
|
|
enforce_eager=model_info.enforce_eager,
|
|
dtype=model_info.dtype)
|
|
|
|
tokenizer = get_tokenizer(
|
|
model,
|
|
trust_remote_code=model_config.trust_remote_code,
|
|
)
|
|
|
|
# Test detecting the tokenizer's chat_template
|
|
chat_template = resolve_hf_chat_template(
|
|
tokenizer,
|
|
chat_template=None,
|
|
tools=None,
|
|
model_config=model_config,
|
|
)
|
|
assert isinstance(chat_template, str)
|
|
|
|
print("[TEXT]")
|
|
print(chat_template)
|
|
print("[AST]")
|
|
print(_try_extract_ast(chat_template))
|
|
|
|
resolved_format = resolve_chat_template_content_format(
|
|
None, # Test detecting the tokenizer's chat_template
|
|
None,
|
|
"auto",
|
|
tokenizer,
|
|
model_config=model_config,
|
|
)
|
|
|
|
assert resolved_format == expected_format
|
|
|
|
|
|
# yapf: disable
|
|
@pytest.mark.parametrize(
|
|
("model", "expected_format"),
|
|
[("Salesforce/blip2-opt-2.7b", "string"),
|
|
("facebook/chameleon-7b", "string"),
|
|
("deepseek-ai/deepseek-vl2-tiny", "string"),
|
|
("adept/fuyu-8b", "string"),
|
|
("google/paligemma-3b-mix-224", "string"),
|
|
("Qwen/Qwen-VL", "string"),
|
|
("Qwen/Qwen-VL-Chat", "string")],
|
|
)
|
|
# yapf: enable
|
|
def test_resolve_content_format_fallbacks(model, expected_format):
|
|
model_info = HF_EXAMPLE_MODELS.find_hf_info(model)
|
|
model_info.check_available_online(on_fail="skip")
|
|
|
|
model_config = ModelConfig(
|
|
model,
|
|
tokenizer=model_info.tokenizer or model,
|
|
tokenizer_mode=model_info.tokenizer_mode,
|
|
revision=model_info.revision,
|
|
trust_remote_code=model_info.trust_remote_code,
|
|
hf_overrides=model_info.hf_overrides,
|
|
skip_tokenizer_init=model_info.skip_tokenizer_init,
|
|
enforce_eager=model_info.enforce_eager,
|
|
dtype=model_info.dtype)
|
|
|
|
tokenizer = get_tokenizer(
|
|
model_config.tokenizer,
|
|
trust_remote_code=model_config.trust_remote_code,
|
|
)
|
|
|
|
# Test detecting the tokenizer's chat_template
|
|
chat_template = resolve_hf_chat_template(
|
|
tokenizer,
|
|
chat_template=None,
|
|
tools=None,
|
|
model_config=model_config,
|
|
)
|
|
assert isinstance(chat_template, str)
|
|
|
|
print("[TEXT]")
|
|
print(chat_template)
|
|
print("[AST]")
|
|
print(_try_extract_ast(chat_template))
|
|
|
|
resolved_format = resolve_chat_template_content_format(
|
|
None, # Test detecting the tokenizer's chat_template
|
|
None,
|
|
"auto",
|
|
tokenizer,
|
|
model_config=model_config,
|
|
)
|
|
|
|
assert resolved_format == expected_format
|
|
|
|
|
|
# yapf: disable
|
|
@pytest.mark.parametrize(
|
|
("template_path", "expected_format"),
|
|
[("template_alpaca.jinja", "string"),
|
|
("template_baichuan.jinja", "string"),
|
|
("template_chatglm.jinja", "string"),
|
|
("template_chatglm2.jinja", "string"),
|
|
("template_chatml.jinja", "string"),
|
|
("template_dse_qwen2_vl.jinja", "openai"),
|
|
("template_falcon_180b.jinja", "string"),
|
|
("template_falcon.jinja", "string"),
|
|
("template_inkbot.jinja", "string"),
|
|
("template_teleflm.jinja", "string"),
|
|
("template_vlm2vec.jinja", "openai"),
|
|
("tool_chat_template_granite_20b_fc.jinja", "string"),
|
|
("tool_chat_template_hermes.jinja", "string"),
|
|
("tool_chat_template_internlm2_tool.jinja", "string"),
|
|
("tool_chat_template_llama3.1_json.jinja", "openai"),
|
|
("tool_chat_template_llama3.2_json.jinja", "openai"),
|
|
("tool_chat_template_mistral_parallel.jinja", "string"),
|
|
("tool_chat_template_mistral.jinja", "string")],
|
|
)
|
|
# yapf: enable
|
|
def test_resolve_content_format_examples(template_path, expected_format):
|
|
model_config = ModelConfig(
|
|
PHI3V_MODEL_ID, # Dummy
|
|
tokenizer=PHI3V_MODEL_ID, # Dummy
|
|
trust_remote_code=True,
|
|
)
|
|
|
|
dummy_tokenizer = get_tokenizer(
|
|
PHI3V_MODEL_ID, # Dummy
|
|
trust_remote_code=model_config.trust_remote_code,
|
|
)
|
|
dummy_tokenizer.chat_template = None
|
|
|
|
chat_template = load_chat_template(EXAMPLES_DIR / template_path)
|
|
assert isinstance(chat_template, str)
|
|
|
|
print("[TEXT]")
|
|
print(chat_template)
|
|
print("[AST]")
|
|
print(_try_extract_ast(chat_template))
|
|
|
|
resolved_format = resolve_chat_template_content_format(
|
|
chat_template,
|
|
None,
|
|
"auto",
|
|
dummy_tokenizer,
|
|
model_config=model_config,
|
|
)
|
|
|
|
assert resolved_format == expected_format
|
|
|
|
|
|
def test_parse_chat_messages_include_thinking_chunk(mistral_model_config,
|
|
mistral_tokenizer):
|
|
messages = [{
|
|
"role":
|
|
"system",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "You are a helpful assistant."
|
|
}, {
|
|
"type":
|
|
"thinking",
|
|
"closed":
|
|
True,
|
|
"thinking":
|
|
"Only return the answer when you are confident."
|
|
}]
|
|
}, {
|
|
"role": "user",
|
|
"content": "What is 2+2?"
|
|
}, {
|
|
"role":
|
|
"assistant",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "Let me think about it."
|
|
}, {
|
|
"type": "thinking",
|
|
"closed": True,
|
|
"thinking": "2+2 = 4"
|
|
}, {
|
|
"type": "text",
|
|
"text": "The answer is 4.",
|
|
}],
|
|
}]
|
|
|
|
conversation_with_thinking, _, _ = parse_chat_messages(
|
|
messages,
|
|
mistral_model_config,
|
|
mistral_tokenizer,
|
|
content_format="openai",
|
|
)
|
|
|
|
expected_conversation = [{
|
|
"role":
|
|
"system",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "You are a helpful assistant."
|
|
}, {
|
|
"type": "text",
|
|
"text": "Only return the answer when you are confident."
|
|
}],
|
|
}, {
|
|
"role":
|
|
"user",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "What is 2+2?"
|
|
}],
|
|
}, {
|
|
"role":
|
|
"assistant",
|
|
"content": [
|
|
{
|
|
"type": "text",
|
|
"text": "Let me think about it."
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "2+2 = 4"
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "The answer is 4."
|
|
},
|
|
]
|
|
}]
|
|
|
|
assert conversation_with_thinking == expected_conversation
|
|
|
|
|
|
def test_apply_mistral_chat_template_thinking_chunk():
|
|
# Moved import here to avoid yapf and isort conflicts
|
|
from vllm.entrypoints.chat_utils import apply_mistral_chat_template
|
|
messages = [{
|
|
"role":
|
|
"system",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "You are a helpful assistant."
|
|
}, {
|
|
"type":
|
|
"thinking",
|
|
"closed":
|
|
True,
|
|
"thinking":
|
|
"Only return the answer when you are confident."
|
|
}]
|
|
}, {
|
|
"role": "user",
|
|
"content": "What is 2+2?"
|
|
}, {
|
|
"role":
|
|
"assistant",
|
|
"content": [{
|
|
"type": "text",
|
|
"text": "Let me think about it."
|
|
}, {
|
|
"type": "thinking",
|
|
"closed": True,
|
|
"thinking": "2+2 = 4"
|
|
}, {
|
|
"type": "text",
|
|
"text": "The answer is 4.",
|
|
}],
|
|
}, {
|
|
"role": "user",
|
|
"content": "Thanks, what is 3+3?"
|
|
}]
|
|
|
|
# TODO(Julien): upon model release change to a tokenizer already configured.
|
|
# =================================================================
|
|
mistral_tokenizer = MistralTokenizer.from_pretrained(
|
|
"mistralai/Devstral-Small-2507")
|
|
assert isinstance(mistral_tokenizer.tokenizer, Tekkenizer)
|
|
# Add think special tokens to the tokenizer
|
|
mistral_tokenizer.tokenizer._all_special_tokens[35] = SpecialTokenInfo(
|
|
rank=35, is_control=True, token_str=SpecialTokens.begin_think.value)
|
|
mistral_tokenizer.tokenizer._all_special_tokens[36] = SpecialTokenInfo(
|
|
rank=36, is_control=True, token_str=SpecialTokens.end_think.value)
|
|
mistral_tokenizer.tokenizer._special_tokens_reverse_vocab = {
|
|
k: v
|
|
for k, v in
|
|
mistral_tokenizer.tokenizer._special_tokens_reverse_vocab.items()
|
|
if v not in {35, 36}
|
|
}
|
|
mistral_tokenizer.tokenizer._special_tokens_reverse_vocab[
|
|
SpecialTokens.begin_think.value] = 35
|
|
mistral_tokenizer.tokenizer._special_tokens_reverse_vocab[
|
|
SpecialTokens.end_think.value] = 36
|
|
mistral_tokenizer.instruct.BEGIN_THINK = 35
|
|
mistral_tokenizer.instruct.END_THINK = 36
|
|
# =================================================================
|
|
|
|
tokens_ids = apply_mistral_chat_template(mistral_tokenizer,
|
|
messages,
|
|
chat_template=None,
|
|
tools=None)
|
|
|
|
string_tokens = mistral_tokenizer.mistral.decode(
|
|
tokens_ids, special_token_policy=SpecialTokenPolicy.KEEP)
|
|
|
|
expected_tokens = (
|
|
r"<s>[SYSTEM_PROMPT]You are a helpful assistant.[THINK]Only return the"
|
|
r" answer when you are confident.[/THINK][/SYSTEM_PROMPT]"
|
|
r"[INST]What is 2+2?[/INST]"
|
|
r"Let me think about it.[THINK]2+2 = 4[/THINK]The answer is 4.</s>"
|
|
r"[INST]Thanks, what is 3+3?[/INST]")
|
|
|
|
assert string_tokens == expected_tokens
|
|
|
|
|
|
def test_parse_chat_messages_single_empty_audio_with_uuid(
|
|
qwen2_audio_model_config,
|
|
qwen2_audio_tokenizer,
|
|
):
|
|
audio_uuid = "abcd"
|
|
conversation, mm_data, mm_uuids = parse_chat_messages(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "input_audio",
|
|
"input_audio": {},
|
|
"uuid": audio_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What does the audio say?"
|
|
},
|
|
],
|
|
}],
|
|
qwen2_audio_model_config,
|
|
qwen2_audio_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"Audio 1: <|audio_bos|><|AUDIO|><|audio_eos|>\nWhat does the audio say?"
|
|
}]
|
|
_assert_mm_data_inputs(mm_data, {"audio": 1})
|
|
_assert_mm_uuids(mm_uuids,
|
|
1,
|
|
modality="audio",
|
|
expected_uuids=[audio_uuid])
|
|
|
|
|
|
@pytest.mark.asyncio
|
|
async def test_parse_chat_messages_single_empty_audio_with_uuid_async(
|
|
qwen2_audio_model_config,
|
|
qwen2_audio_tokenizer,
|
|
):
|
|
audio_uuid = "abcd"
|
|
conversation, mm_future, mm_uuids = parse_chat_messages_futures(
|
|
[{
|
|
"role":
|
|
"user",
|
|
"content": [
|
|
{
|
|
"type": "input_audio",
|
|
"input_audio": {},
|
|
"uuid": audio_uuid,
|
|
},
|
|
{
|
|
"type": "text",
|
|
"text": "What does the audio say?"
|
|
},
|
|
],
|
|
}],
|
|
qwen2_audio_model_config,
|
|
qwen2_audio_tokenizer,
|
|
content_format="string",
|
|
)
|
|
|
|
assert conversation == [{
|
|
"role":
|
|
"user",
|
|
"content":
|
|
"Audio 1: <|audio_bos|><|AUDIO|><|audio_eos|>\nWhat does the audio say?"
|
|
}]
|
|
_assert_mm_data_inputs(await mm_future, {"audio": 1})
|
|
_assert_mm_uuids(mm_uuids,
|
|
1,
|
|
modality="audio",
|
|
expected_uuids=[audio_uuid])
|