mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-08 05:07:02 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
853 lines
29 KiB
Python
853 lines
29 KiB
Python
# ruff: noqa: E501
|
|
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
from __future__ import annotations
|
|
|
|
import json
|
|
from dataclasses import fields
|
|
from enum import Enum
|
|
from typing import TYPE_CHECKING, Any
|
|
|
|
import jsonschema
|
|
import pytest
|
|
import regex as re
|
|
import torch
|
|
from pydantic import BaseModel
|
|
|
|
from tests.reasoning.utils import run_reasoning_extraction
|
|
from vllm.config import StructuredOutputsConfig
|
|
from vllm.distributed import cleanup_dist_env_and_memory
|
|
from vllm.entrypoints.llm import LLM
|
|
from vllm.outputs import RequestOutput
|
|
from vllm.platforms import current_platform
|
|
from vllm.reasoning.abs_reasoning_parsers import ReasoningParserManager
|
|
from vllm.sampling_params import (GuidedDecodingParams, SamplingParams,
|
|
StructuredOutputsParams)
|
|
|
|
if TYPE_CHECKING:
|
|
from vllm.config import TokenizerMode
|
|
|
|
NGRAM_SPEC_CONFIG = {
|
|
"model": "[ngram]",
|
|
"num_speculative_tokens": 5,
|
|
"prompt_lookup_max": 5,
|
|
"prompt_lookup_min": 1,
|
|
}
|
|
|
|
EAGLE_SPEC_CONFIG = {
|
|
"method": "eagle",
|
|
"model": "yuhuili/EAGLE-LLaMA3.1-Instruct-8B",
|
|
"num_speculative_tokens": 5,
|
|
}
|
|
|
|
PARAMS_MODELS_BACKENDS_TOKENIZER_MODE = [
|
|
("mistralai/Ministral-8B-Instruct-2410", "xgrammar", "auto", None),
|
|
("mistralai/Ministral-8B-Instruct-2410", "guidance", "auto", None),
|
|
("mistralai/Ministral-8B-Instruct-2410", "lm-format-enforcer", "auto",
|
|
None),
|
|
("mistralai/Ministral-8B-Instruct-2410", "xgrammar", "mistral", None),
|
|
("Qwen/Qwen2.5-1.5B-Instruct", "xgrammar", "auto", None),
|
|
("Qwen/Qwen2.5-1.5B-Instruct", "lm-format-enforcer", "auto", None),
|
|
#FIXME: This tests are flaky on CI thus disabled. Tracking in Issue #24402
|
|
# ("mistralai/Ministral-8B-Instruct-2410", "outlines", "auto", None),
|
|
# ("mistralai/Ministral-8B-Instruct-2410", "outlines", "mistral", None),
|
|
#("Qwen/Qwen2.5-1.5B-Instruct", "guidance", "auto"),
|
|
("mistralai/Ministral-8B-Instruct-2410", "outlines", "auto",
|
|
NGRAM_SPEC_CONFIG),
|
|
("mistralai/Ministral-8B-Instruct-2410", "guidance", "auto",
|
|
NGRAM_SPEC_CONFIG),
|
|
("Qwen/Qwen2.5-1.5B-Instruct", "xgrammar", "auto", NGRAM_SPEC_CONFIG),
|
|
("meta-llama/Meta-Llama-3.1-8B-Instruct", "xgrammar", "auto",
|
|
EAGLE_SPEC_CONFIG)
|
|
]
|
|
|
|
PARAMS_MODELS_TOKENIZER_MODE = [
|
|
("mistralai/Ministral-8B-Instruct-2410", "auto"),
|
|
("Qwen/Qwen2.5-1.5B-Instruct", "auto"),
|
|
]
|
|
|
|
|
|
class CarType(str, Enum):
|
|
sedan = "sedan"
|
|
suv = "SUV"
|
|
truck = "Truck"
|
|
coupe = "Coupe"
|
|
|
|
|
|
class CarDescription(BaseModel):
|
|
brand: str
|
|
model: str
|
|
car_type: CarType
|
|
|
|
|
|
def test_guided_decoding_deprecated():
|
|
with pytest.warns(DeprecationWarning,
|
|
match="GuidedDecodingParams is deprecated.*"):
|
|
guided_decoding = GuidedDecodingParams(json_object=True)
|
|
|
|
structured_outputs = StructuredOutputsParams(json_object=True)
|
|
assert fields(guided_decoding) == fields(structured_outputs)
|
|
|
|
with pytest.warns(DeprecationWarning,
|
|
match="guided_decoding is deprecated.*"):
|
|
sp1 = SamplingParams(guided_decoding=guided_decoding)
|
|
|
|
with pytest.warns(DeprecationWarning,
|
|
match="guided_decoding is deprecated.*"):
|
|
sp2 = SamplingParams.from_optional(guided_decoding=guided_decoding)
|
|
|
|
assert sp1 == sp2
|
|
assert sp1.structured_outputs == guided_decoding
|
|
|
|
|
|
@pytest.mark.skip_global_cleanup
|
|
@pytest.mark.parametrize(
|
|
"model_name, backend, tokenizer_mode, speculative_config",
|
|
PARAMS_MODELS_BACKENDS_TOKENIZER_MODE)
|
|
def test_structured_output(
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
sample_json_schema: dict[str, Any],
|
|
unsupported_json_schema: dict[str, Any],
|
|
sample_sql_ebnf: str,
|
|
sample_sql_lark: str,
|
|
sample_regex: str,
|
|
sample_structured_outputs_choices: str,
|
|
backend: str,
|
|
tokenizer_mode: str,
|
|
model_name: str,
|
|
speculative_config: dict[str, Any],
|
|
):
|
|
monkeypatch.setenv("VLLM_USE_V1", "1")
|
|
|
|
if current_platform.is_tpu() and speculative_config:
|
|
pytest.skip("TPU does not support speculative decoding")
|
|
|
|
# Don't use eager execution on TPUs because we want to test for no
|
|
# recompilation at runtime
|
|
enforce_eager = bool(not current_platform.is_tpu())
|
|
# Use a single LLM instance for several scenarios to
|
|
# speed up the test suite.
|
|
llm = LLM(model=model_name,
|
|
enforce_eager=enforce_eager,
|
|
max_model_len=1024,
|
|
structured_outputs_config=dict(backend=backend,
|
|
disable_any_whitespace=backend
|
|
in {"xgrammar", "guidance"}),
|
|
seed=120,
|
|
tokenizer_mode=tokenizer_mode,
|
|
speculative_config=speculative_config)
|
|
|
|
#
|
|
# Test 1: Generate JSON output based on a provided schema
|
|
#
|
|
sampling_params = SamplingParams(
|
|
temperature=1.0,
|
|
max_tokens=4096,
|
|
structured_outputs=StructuredOutputsParams(json=sample_json_schema))
|
|
|
|
prompt = ("Give an example JSON for an employee profile that fits this "
|
|
"schema. Make the response as short as possible. Schema: "
|
|
f"{sample_json_schema}")
|
|
outputs = llm.generate(
|
|
[prompt] * 2,
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True,
|
|
)
|
|
|
|
assert outputs is not None
|
|
|
|
for output in outputs:
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
prompt = output.prompt
|
|
|
|
generated_text = output.outputs[0].text
|
|
assert generated_text is not None
|
|
if backend != 'lm-format-enforcer':
|
|
assert "\n" not in generated_text
|
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
|
try:
|
|
output_json = json.loads(generated_text)
|
|
except json.JSONDecodeError as e:
|
|
pytest.fail(
|
|
f"Invalid JSON from backend={backend}: {generated_text!r}\n"
|
|
f"Schema: {sample_json_schema}\nError: {e}")
|
|
jsonschema.validate(instance=output_json, schema=sample_json_schema)
|
|
|
|
#
|
|
# Test 2: Generate JSON object without a schema
|
|
#
|
|
if backend != "outlines":
|
|
sampling_params = SamplingParams(
|
|
temperature=1.0,
|
|
max_tokens=4096,
|
|
n=2,
|
|
structured_outputs=StructuredOutputsParams(json_object=True))
|
|
|
|
outputs = llm.generate(prompts=(
|
|
"Generate a JSON object with curly braces for a person with "
|
|
"name and age fields for John Smith who is 31 years old. "
|
|
"Make the response as short as possible."),
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True)
|
|
|
|
assert outputs is not None
|
|
for output in outputs:
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
|
|
for i in range(2):
|
|
generated_text = output.outputs[i].text
|
|
print(generated_text)
|
|
assert generated_text is not None
|
|
|
|
# Parse to verify it is a valid JSON object
|
|
parsed_json = json.loads(generated_text)
|
|
assert isinstance(parsed_json, dict)
|
|
|
|
#
|
|
# Test 3: test a jsonschema incompatible with xgrammar
|
|
#
|
|
sampling_params = SamplingParams(
|
|
temperature=1.0,
|
|
max_tokens=4096,
|
|
structured_outputs=StructuredOutputsParams(
|
|
json=unsupported_json_schema))
|
|
if backend.startswith("xgrammar"):
|
|
with pytest.raises(ValueError,
|
|
match="The provided JSON schema contains features "
|
|
"not supported by xgrammar."):
|
|
|
|
prompt = (f"Give an example JSON for an employee profile that "
|
|
f"fits this schema: {unsupported_json_schema}. "
|
|
f"Make the response as short as possible.")
|
|
llm.generate(
|
|
[prompt] * 2,
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True,
|
|
)
|
|
else:
|
|
prompt = (f"Give an example JSON object for a grade that "
|
|
f"fits this schema: {unsupported_json_schema}. "
|
|
f"Make the response as short as possible.")
|
|
outputs = llm.generate(
|
|
prompt,
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True,
|
|
)
|
|
assert outputs is not None
|
|
for output in outputs:
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
generated_text = output.outputs[0].text
|
|
assert generated_text is not None
|
|
print(generated_text)
|
|
|
|
# Parse to verify it is valid JSON
|
|
parsed_json = json.loads(generated_text)
|
|
assert isinstance(parsed_json, dict)
|
|
|
|
if backend not in ["outlines", "lm-format-enforcer"]:
|
|
#
|
|
# Test 4: Generate SQL statement using EBNF grammar
|
|
#
|
|
sampling_params = SamplingParams(
|
|
temperature=0.8,
|
|
top_p=0.95,
|
|
max_tokens=1000,
|
|
structured_outputs=StructuredOutputsParams(
|
|
grammar=sample_sql_ebnf))
|
|
outputs = llm.generate(
|
|
("Generate a sql statement that selects col_1 from "
|
|
"table_1 where it is equal to 1. Make the response as short as "
|
|
"possible."),
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True,
|
|
)
|
|
|
|
assert outputs is not None
|
|
for output in outputs:
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
prompt = output.prompt
|
|
|
|
generated_text = output.outputs[0].text
|
|
assert generated_text is not None
|
|
|
|
# remove spaces for comparison b/c we removed them in the grammar
|
|
ground_truth = "SELECT col_1 from table_1 where col_1 = 1".replace(
|
|
" ", "")
|
|
|
|
assert generated_text.strip() == ground_truth
|
|
|
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
|
|
|
#
|
|
# Test 5: Generate SQL statement using Lark grammar
|
|
#
|
|
sampling_params = SamplingParams(
|
|
temperature=0.8,
|
|
top_p=0.95,
|
|
max_tokens=1000,
|
|
structured_outputs=StructuredOutputsParams(
|
|
grammar=sample_sql_lark))
|
|
outputs = llm.generate(
|
|
("Generate a sql statement that selects col_1 from "
|
|
"table_1 where it is equal to 1. Make the response as short as "
|
|
"possible."),
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True,
|
|
)
|
|
|
|
assert outputs is not None
|
|
for output in outputs:
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
prompt = output.prompt
|
|
|
|
generated_text = output.outputs[0].text
|
|
assert generated_text is not None
|
|
|
|
# use Lark to parse the output, and make sure it's a valid parse tree
|
|
from lark import Lark
|
|
parser = Lark(sample_sql_lark)
|
|
parser.parse(generated_text)
|
|
|
|
# remove spaces for comparison b/c we removed them in the grammar
|
|
ground_truth = "SELECT col_1 from table_1 where col_1 = 1".replace(
|
|
" ", "")
|
|
|
|
assert generated_text.strip() == ground_truth
|
|
|
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
|
|
|
#
|
|
# Test 6: Test invalid grammar input
|
|
#
|
|
sampling_params = SamplingParams(
|
|
temperature=0.8,
|
|
top_p=0.95,
|
|
max_tokens=1000,
|
|
structured_outputs=StructuredOutputsParams(
|
|
grammar="not a grammar"))
|
|
with pytest.raises(ValueError, match="Failed to convert the grammar "):
|
|
llm.generate(
|
|
("Generate a sql statement that selects col_1 from "
|
|
"table_1 where it is equal to 1. Make the response as short "
|
|
"as possible."),
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True,
|
|
)
|
|
|
|
#
|
|
# Test 7: Generate text based on a regex pattern
|
|
#
|
|
sampling_params = SamplingParams(
|
|
temperature=0.8,
|
|
top_p=0.95,
|
|
structured_outputs=StructuredOutputsParams(regex=sample_regex))
|
|
|
|
prompt = (f"Give an example IPv4 address with this regex: {sample_regex}. "
|
|
f"Make the response as short as possible.")
|
|
outputs = llm.generate(
|
|
[prompt] * 2,
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True,
|
|
)
|
|
|
|
assert outputs is not None
|
|
for output in outputs:
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
prompt = output.prompt
|
|
generated_text = output.outputs[0].text
|
|
print(generated_text)
|
|
assert generated_text is not None
|
|
assert re.fullmatch(sample_regex, generated_text) is not None
|
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
|
|
|
#
|
|
# Test 8: Generate text based on a choices
|
|
#
|
|
sampling_params = SamplingParams(
|
|
temperature=0.8,
|
|
top_p=0.95,
|
|
structured_outputs=StructuredOutputsParams(
|
|
choice=sample_structured_outputs_choices))
|
|
|
|
outputs = llm.generate(
|
|
("The best language for type-safe systems programming is "
|
|
"(Make the response as short as possible.) "),
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True,
|
|
)
|
|
assert outputs is not None
|
|
for output in outputs:
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
prompt = output.prompt
|
|
generated_text = output.outputs[0].text
|
|
print(generated_text)
|
|
assert generated_text is not None
|
|
assert generated_text in sample_structured_outputs_choices
|
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
|
|
|
#
|
|
# Test 9: Generate structured output using a Pydantic model with an enum
|
|
#
|
|
json_schema = CarDescription.model_json_schema()
|
|
sampling_params = SamplingParams(
|
|
temperature=1.0,
|
|
max_tokens=1000,
|
|
structured_outputs=StructuredOutputsParams(json=json_schema))
|
|
|
|
outputs = llm.generate(
|
|
("Generate a JSON with the brand, model and car_type of the most "
|
|
"iconic car from the 90's. Make the response as short as "
|
|
"possible."),
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True,
|
|
)
|
|
|
|
assert outputs is not None
|
|
|
|
for output in outputs:
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
prompt = output.prompt
|
|
|
|
generated_text = output.outputs[0].text
|
|
assert generated_text is not None
|
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
|
try:
|
|
output_json = json.loads(generated_text)
|
|
except json.JSONDecodeError as e:
|
|
pytest.fail(
|
|
f"Invalid JSON from backend={backend}: {generated_text!r}\n"
|
|
f"Schema: {json_schema}\nError: {e}")
|
|
jsonschema.validate(instance=output_json, schema=json_schema)
|
|
|
|
#
|
|
# Test 10: Generate structured with minLength and maxLength
|
|
#
|
|
min_length = 50
|
|
max_length = 50
|
|
json_schema = {
|
|
"type": "object",
|
|
"properties": {
|
|
"description": {
|
|
"type": "string",
|
|
"maxLength": max_length,
|
|
"minLength": min_length
|
|
}
|
|
},
|
|
"required": ["description"],
|
|
"additionalProperties": False
|
|
}
|
|
|
|
sampling_params = SamplingParams(
|
|
temperature=1.0,
|
|
max_tokens=4096,
|
|
structured_outputs=StructuredOutputsParams(json=json_schema))
|
|
|
|
outputs = llm.generate(
|
|
("Generate a description of a frog using 50 characters. "
|
|
"Make the response as short as possible."),
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True,
|
|
)
|
|
|
|
assert outputs is not None
|
|
|
|
for output in outputs:
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
prompt = output.prompt
|
|
|
|
generated_text = output.outputs[0].text
|
|
assert generated_text is not None
|
|
print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}")
|
|
try:
|
|
output_json = json.loads(generated_text)
|
|
except json.JSONDecodeError as e:
|
|
pytest.fail(
|
|
f"Invalid JSON from backend={backend}: {generated_text!r}\n"
|
|
f"Schema: {json_schema}\nError: {e}")
|
|
jsonschema.validate(instance=output_json, schema=json_schema)
|
|
|
|
if backend not in ["outlines", "lm-format-enforcer"]:
|
|
#
|
|
# Test 11: Generate structured output using structural_tag format
|
|
#
|
|
structural_tag_config = {
|
|
"type":
|
|
"structural_tag",
|
|
"structures": [{
|
|
"begin": "<function=get_weather>",
|
|
"schema": {
|
|
"type": "object",
|
|
"properties": {
|
|
"city": {
|
|
"type": "string"
|
|
}
|
|
},
|
|
"additionalProperties": False
|
|
},
|
|
"end": "</function>"
|
|
}],
|
|
"triggers": ["<function="]
|
|
}
|
|
|
|
sampling_params = SamplingParams(
|
|
temperature=0.0,
|
|
max_tokens=4096,
|
|
structured_outputs=StructuredOutputsParams(
|
|
structural_tag=json.dumps(structural_tag_config)))
|
|
|
|
prompt = """
|
|
You have access to the following function to retrieve the weather in a city:
|
|
|
|
{
|
|
"name": "get_weather",
|
|
"parameters": {
|
|
"city": {
|
|
"param_type": "string",
|
|
"description": "The city to get the weather for",
|
|
"required": True
|
|
}
|
|
}
|
|
}
|
|
|
|
If a you choose to call a function ONLY reply in the following format:
|
|
<{start_tag}={function_name}>{parameters}{end_tag}
|
|
where
|
|
|
|
start_tag => `<function`
|
|
parameters => a JSON dict with the function argument name
|
|
as key and function argument value as value.
|
|
end_tag => `</function>`
|
|
|
|
Here is an example,
|
|
<function=example_function_name>{"example_name": "example_value"}</function>
|
|
|
|
Reminder:
|
|
- Function calls MUST follow the specified format
|
|
- Required parameters MUST be specified
|
|
- Only call one function at a time
|
|
- Put the entire function call reply on one line
|
|
- Always add your sources when using search results to answer the user query
|
|
|
|
You are a helpful assistant.
|
|
|
|
Given the previous instructions, what is the weather in New York City? \
|
|
Make the response as short as possible.
|
|
"""
|
|
|
|
# Change this once other backends support structural_tag
|
|
outputs = llm.generate(prompt,
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True)
|
|
assert outputs is not None
|
|
|
|
for output in outputs:
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
generated_text = output.outputs[0].text
|
|
assert generated_text is not None
|
|
|
|
# Search for function call pattern in the response
|
|
function_call_pattern = r'<function=get_weather>(.*?)</function>'
|
|
matches = re.findall(function_call_pattern, generated_text)
|
|
|
|
if not matches:
|
|
print(f"Warning: No function calls found in response: "
|
|
f"{generated_text!r}")
|
|
continue
|
|
|
|
# Take the first function call if multiple are found
|
|
json_str = matches[0]
|
|
try:
|
|
json_content = json.loads(json_str)
|
|
assert "city" in json_content
|
|
assert isinstance(json_content["city"], str)
|
|
print(f"Found valid function call: {generated_text!r}")
|
|
except (json.JSONDecodeError, AssertionError) as e:
|
|
pytest.fail("Invalid function call format: "
|
|
f"{generated_text!r}\nError: {str(e)}")
|
|
|
|
|
|
@pytest.mark.skip_global_cleanup
|
|
@pytest.mark.parametrize(
|
|
"model_name, backend, tokenizer_mode, reasoning_parser, speculative_config", # noqa: E501
|
|
[
|
|
("deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", "xgrammar", "auto",
|
|
"deepseek_r1", NGRAM_SPEC_CONFIG),
|
|
("Qwen/Qwen3-1.7B", "xgrammar", "auto", "deepseek_r1", None),
|
|
],
|
|
)
|
|
def test_structured_output_with_reasoning_matrices(
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
backend: str,
|
|
tokenizer_mode: TokenizerMode,
|
|
reasoning_parser: str,
|
|
model_name: str,
|
|
speculative_config: dict[str, Any] | None,
|
|
):
|
|
monkeypatch.setenv("VLLM_USE_V1", "1")
|
|
|
|
if current_platform.is_tpu() and speculative_config:
|
|
pytest.skip("TPU does not support speculative decoding")
|
|
|
|
# Use a single LLM instance for several scenarios to
|
|
# speed up the test suite.
|
|
llm = LLM(
|
|
model=model_name,
|
|
# Don't use eager execution on TPUs because we want to test for no
|
|
# recompilation at runtime
|
|
enforce_eager=bool(not current_platform.is_tpu()),
|
|
max_model_len=1024,
|
|
max_num_seqs=16,
|
|
structured_outputs_config=dict(backend=backend,
|
|
disable_any_whitespace=backend
|
|
in {"xgrammar", "guidance"},
|
|
reasoning_parser=reasoning_parser),
|
|
tokenizer_mode=tokenizer_mode,
|
|
speculative_config=speculative_config,
|
|
)
|
|
tokenizer = llm.get_tokenizer()
|
|
reasoner = ReasoningParserManager.get_reasoning_parser(reasoning_parser)(
|
|
tokenizer=tokenizer)
|
|
|
|
reasoning_prompt = "Solve the following math problem step-by-step, then provide the final answer as JSON object with a single key 'result'. Make sure to correct your reasoning if there are any issue should it arise.\nProblem: What is 5 * 8 + 2?" # noqa: E501
|
|
reasoning_schema = {
|
|
"type": "object",
|
|
"properties": {
|
|
"result": {
|
|
"type": "integer"
|
|
}
|
|
},
|
|
"required": ["result"],
|
|
"additionalProperties": False
|
|
}
|
|
if "Qwen3" in model_name:
|
|
reasoning_prompt += "<think>\n"
|
|
|
|
sampling_params = SamplingParams(
|
|
temperature=0.1,
|
|
max_tokens=8192,
|
|
structured_outputs=StructuredOutputsParams(json=reasoning_schema),
|
|
)
|
|
outputs = llm.generate(
|
|
[reasoning_prompt],
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True,
|
|
)
|
|
|
|
assert outputs is not None
|
|
output = outputs[0]
|
|
assert output is not None and isinstance(output, RequestOutput)
|
|
prompt = output.prompt
|
|
generated_text = output.outputs[0].text
|
|
reasoning_content, content = run_reasoning_extraction(
|
|
reasoner, [generated_text])
|
|
print(
|
|
f"Prompt: {prompt!r}\nReasoning: {reasoning_content!r}\nContent: {content!r}"
|
|
)
|
|
|
|
assert content is not None and reasoning_content is not None
|
|
output_json = json.loads(content)
|
|
jsonschema.validate(instance=output_json, schema=reasoning_schema)
|
|
|
|
|
|
@pytest.mark.skip_global_cleanup
|
|
@pytest.mark.parametrize("model_name, tokenizer_mode",
|
|
PARAMS_MODELS_TOKENIZER_MODE)
|
|
def test_structured_output_auto_mode(
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
unsupported_json_schema: dict[str, Any],
|
|
model_name: str,
|
|
tokenizer_mode: str,
|
|
):
|
|
monkeypatch.setenv("VLLM_USE_V1", "1")
|
|
|
|
llm = LLM(model=model_name,
|
|
max_model_len=1024,
|
|
structured_outputs_config=dict(backend="auto"),
|
|
tokenizer_mode=tokenizer_mode)
|
|
|
|
sampling_params = SamplingParams(
|
|
temperature=1.0,
|
|
max_tokens=1000,
|
|
structured_outputs=StructuredOutputsParams(
|
|
json=unsupported_json_schema))
|
|
|
|
prompts = (
|
|
"Give an example JSON object for a grade "
|
|
"that fits this schema: "
|
|
f"{unsupported_json_schema}. Make the response as short as possible.")
|
|
# This would fail with the default of "xgrammar", but in "auto"
|
|
# we will handle fallback automatically.
|
|
outputs = llm.generate(prompts,
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True)
|
|
# Make sure `auto` backend handling doesn't mess up sampling_params
|
|
# and that we can reuse it without error.
|
|
outputs.extend(
|
|
llm.generate(prompts, sampling_params=sampling_params, use_tqdm=True))
|
|
|
|
assert outputs is not None
|
|
for output in outputs:
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
generated_text = output.outputs[0].text
|
|
assert generated_text is not None
|
|
print(generated_text)
|
|
|
|
# Parse to verify it is valid JSON
|
|
parsed_json = json.loads(generated_text)
|
|
assert isinstance(parsed_json, dict)
|
|
|
|
|
|
@pytest.mark.skip_global_cleanup
|
|
def test_guidance_no_additional_properties(monkeypatch: pytest.MonkeyPatch):
|
|
monkeypatch.setenv("VLLM_USE_V1", "1")
|
|
|
|
llm = LLM(model="Qwen/Qwen2.5-1.5B-Instruct",
|
|
max_model_len=1024,
|
|
structured_outputs_config=dict(
|
|
backend="guidance",
|
|
disable_any_whitespace=True,
|
|
disable_additional_properties=True))
|
|
|
|
schema = {
|
|
'type': 'object',
|
|
'properties': {
|
|
'a1': {
|
|
'type': 'string'
|
|
},
|
|
'a2': {
|
|
'type': 'string'
|
|
},
|
|
'a3': {
|
|
'type': 'string'
|
|
}
|
|
},
|
|
'required': ['a1', 'a2', 'a3'],
|
|
}
|
|
|
|
prompt = (
|
|
"<|im_start|>system\nYou are Qwen, created by Alibaba Cloud. You are a "
|
|
"helpful assistant.<|im_end|>\n<|im_start|>user\nPlease generate a "
|
|
"large JSON object with key-value pairs a1=b1, a2=b2, ..., a20=b20. "
|
|
"Make the response as short as possible."
|
|
"<|im_end|>\n<|im_start|>assistant\n")
|
|
|
|
def generate_with_backend(backend):
|
|
structured_outputs_params = StructuredOutputsParams(
|
|
json=schema,
|
|
backend=backend,
|
|
disable_any_whitespace=True,
|
|
disable_additional_properties=True)
|
|
sampling_params = SamplingParams(
|
|
temperature=0,
|
|
max_tokens=256,
|
|
structured_outputs=structured_outputs_params)
|
|
|
|
outputs = llm.generate(prompt, sampling_params=sampling_params)
|
|
assert outputs is not None
|
|
generated_text = outputs[0].outputs[0].text
|
|
assert generated_text is not None
|
|
parsed_json = json.loads(generated_text)
|
|
assert isinstance(parsed_json, dict)
|
|
jsonschema.validate(instance=parsed_json, schema=schema)
|
|
return parsed_json
|
|
|
|
generated = generate_with_backend("guidance")
|
|
assert "a1" in generated
|
|
assert "a2" in generated
|
|
assert "a3" in generated
|
|
assert "a4" not in generated
|
|
assert "a5" not in generated
|
|
assert "a6" not in generated
|
|
|
|
|
|
@pytest.mark.parametrize("backend", ["guidance", "xgrammar", "outlines"])
|
|
def test_structured_output_batched_with_non_structured_outputs_requests(
|
|
monkeypatch: pytest.MonkeyPatch,
|
|
sample_json_schema: dict[str, Any],
|
|
backend: str,
|
|
):
|
|
monkeypatch.setenv("VLLM_USE_V1", "1")
|
|
|
|
# Don't use eager execution on TPUs because we want to test for no
|
|
# recompilation at runtime
|
|
enforce_eager = bool(not current_platform.is_tpu())
|
|
|
|
llm = LLM(
|
|
model="meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
enforce_eager=enforce_eager,
|
|
max_model_len=1024,
|
|
structured_outputs_config=StructuredOutputsConfig(
|
|
backend=backend,
|
|
disable_any_whitespace=backend in {"xgrammar", "guidance"},
|
|
),
|
|
)
|
|
|
|
structured_outputs_prompt = (
|
|
"Give an example JSON for an employee profile that fits this "
|
|
"schema. Make the response as short as possible. Schema: "
|
|
f"{sample_json_schema}")
|
|
|
|
non_structured_outputs_prompt = "The diameter of the Earth in kilometers is "
|
|
|
|
prompts = [structured_outputs_prompt, non_structured_outputs_prompt]
|
|
sampling_params = [
|
|
SamplingParams(temperature=1.0,
|
|
max_tokens=400,
|
|
structured_outputs=StructuredOutputsParams(
|
|
json=sample_json_schema)),
|
|
# No max tokens, temp=0 to assert on contents
|
|
SamplingParams(
|
|
seed=42,
|
|
temperature=0,
|
|
top_p=1.0,
|
|
),
|
|
]
|
|
|
|
outputs = llm.generate(prompts=prompts,
|
|
sampling_params=sampling_params,
|
|
use_tqdm=True)
|
|
|
|
assert outputs is not None
|
|
|
|
# Free memory as soon as possible as failed assertions
|
|
# will short circuit and not free up memory
|
|
del llm
|
|
torch.cuda.empty_cache()
|
|
cleanup_dist_env_and_memory()
|
|
|
|
for index, output in enumerate(outputs):
|
|
assert output is not None
|
|
assert isinstance(output, RequestOutput)
|
|
prompt = output.prompt
|
|
|
|
generated_text = output.outputs[0].text
|
|
assert generated_text is not None
|
|
print(f"Prompt:\n{prompt!r}\nGenerated text:\n{generated_text!r}")
|
|
|
|
if index == 0:
|
|
# First prompt is structured outputs, expect valid JSON
|
|
assert "\n" not in generated_text
|
|
output_json = json.loads(generated_text)
|
|
jsonschema.validate(instance=output_json,
|
|
schema=sample_json_schema)
|
|
else:
|
|
# Second prompt is not structured outputs, expect valid output
|
|
# Cannot assert on exact output, but we can expect it to be factual
|
|
assert "12,742" in generated_text
|
|
|
|
# non-structured outputs requests should not return a valid JSON here
|
|
with pytest.raises(ValueError):
|
|
output_json = json.loads(generated_text)
|