mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-07 23:27:06 +08:00
Signed-off-by: Nick Hill <nhill@redhat.com> Signed-off-by: Lucas Kabela <lucaskabela@meta.com> Signed-off-by: Max de Bayser <mbayser@br.ibm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Boyuan Feng <boyuan@meta.com> Signed-off-by: Boyuan Feng <fby.1994@gmail.com> Signed-off-by: boyuanfeng <boyuan@meta.com> Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Signed-off-by: JartX <sagformas@epdcenter.es> Signed-off-by: Chendi Xue <Chendi.Xue@intel.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: Chen Zhang <zhangch99@outlook.com> Signed-off-by: Roger Wang <hey@rogerw.io> Signed-off-by: mgoin <mgoin64@gmail.com> Signed-off-by: wwl2755 <wangwenlong2755@gmail.com> Signed-off-by: Manoel Marques <manoel.marques@ibm.com> Signed-off-by: Manoel Marques <manoelmrqs@gmail.com> Signed-off-by: Isotr0py <mozf@mail2.sysu.edu.cn> Signed-off-by: pengdrumli <pengdrumli@tencent.com> Signed-off-by: windsonsea <haifeng.yao@daocloud.io> Signed-off-by: Woosuk Kwon <woosuk@thinkingmachines.ai> Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Signed-off-by: Huamin Li <3ericli@gmail.com> Signed-off-by: simondanielsson <simon.danielsson99@hotmail.com> Signed-off-by: Rahul Tuli <rtuli@redhat.com> Signed-off-by: Yang <lymailforjob@gmail.com> Signed-off-by: Debolina Roy <debroy@redhat.com> Signed-off-by: David Chen <530634352@qq.com> Signed-off-by: wangzi <3220100013@zju.edu.cn> Signed-off-by: Eldar Kurtic <8884008+eldarkurtic@users.noreply.github.com> Signed-off-by: NickLucche <nlucches@redhat.com> Signed-off-by: Yizhou Liu <liu_yizhou@outlook.com> Signed-off-by: Sara Kokkila Schumacher <saraks@ibm.com> Signed-off-by: Csrayz <jover@cmbchina.com> Signed-off-by: ivyilike <pww123@cmbchina.com> Signed-off-by: Burkhard Ringlein <ngl@zurich.ibm.com> Signed-off-by: Bowen Wang <abmfy@icloud.com> Signed-off-by: qqma <qqma@amazon.com> Signed-off-by: ElizaWszola <ewszola@redhat.com> Signed-off-by: Lu Fang <fanglu@fb.com> Signed-off-by: Zhuohan Li <zhuohan123@gmail.com> Signed-off-by: Luka Govedič <lgovedic@redhat.com> Signed-off-by: luka <lgovedic@redhat.com> Signed-off-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Signed-off-by: Or Ozeri <oro@il.ibm.com> Signed-off-by: Johnny Yang <johnnyyang@google.com> Signed-off-by: Alec Solder <alecs@fb.com> Signed-off-by: Alec S <10566873+alecsolder@users.noreply.github.com> Signed-off-by: Russell Bryant <rbryant@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Alexander Matveev <amatveev@redhat.com> Signed-off-by: yewentao256 <zhyanwentao@126.com> Signed-off-by: liuye.hj <liuye.hj@alibaba-inc.com> Signed-off-by: Kunshang Ji <kunshang.ji@intel.com> Signed-off-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Signed-off-by: Michael Goin <mgoin64@gmail.com> Signed-off-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Signed-off-by: Ming Yang <minos.future@gmail.com> Signed-off-by: Zhikaiiii <1658973216@qq.com> Signed-off-by: Andreas Hartel <andreas.hartel@aleph-alpha.com> Signed-off-by: Jee Jee Li <pandaleefree@gmail.com> Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com> Signed-off-by: wuxibin <wuxibin@bytedance.com> Signed-off-by: youkaichao <youkaichao@gmail.com> Signed-off-by: Peter Pan <Peter.Pan@daocloud.io> Signed-off-by: Peter Pan <peter.pan@daocloud.io> Signed-off-by: Nicolò Lucchesi<nicolo.lucchesi@gmail.com> Signed-off-by: Thomas Parnell <tpa@zurich.ibm.com> Signed-off-by: Sage Moore <sage@neuralmagic.com> Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com> Signed-off-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com> Signed-off-by: Bill Nell <bnell@redhat.com> Signed-off-by: Shreeasish Kumar <shreeasish@rivosinc.com> Signed-off-by: Weida Hong <wdhongtw@google.com> Signed-off-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Signed-off-by: Hashem Hashemi <hashem.hashemi@amd.com> Signed-off-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Signed-off-by: Amir Samani <asamani@nvidia.com> Signed-off-by: ElizaWszola <elizaw.9289@gmail.com> Signed-off-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Signed-off-by: ilmarkov <markovilya197@gmail.com> Signed-off-by: Gregory Shtrasberg <Gregory.Shtrasberg@amd.com> Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Signed-off-by: rouchenzi <ruochenwen@gmail.com> Signed-off-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Signed-off-by: Andrew Xia <axia@meta.com> Signed-off-by: Kourosh Hakhamaneshi <kourosh@anyscale.com> Signed-off-by: Corey Lowman <clowman1993@gmail.com> Signed-off-by: jpvillam <jpvillam@amd.com> Signed-off-by: dougbtv <dosmith@redhat.com> Signed-off-by: Chenxi Yang <cxyang@fb.com> Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Signed-off-by: ahao-anyscale <ahao@anyscale.com> Signed-off-by: Yan Lu <luyan@nvidia.com> Signed-off-by: baxingpiaochong <771405853@qq.com> Signed-off-by: Kyle Sayers <kylesayrs@gmail.com> Signed-off-by: Nikhil Gupta <nikhil.gupta2@arm.com> Signed-off-by: Yong Hoon Shin <yhshin@meta.com> Signed-off-by: Benjamin Chislett <benjamin.chislett@centml.ai> Signed-off-by: Benjamin Chislett <bchislett@nvidia.com> Signed-off-by: Ben Browning <bbrownin@redhat.com> Signed-off-by: Chengji Yao <chengjiyao@google.com> Signed-off-by: jiang1.li <jiang1.li@intel.com> Signed-off-by: Jackmin801 <ongjackm@gmail.com> Signed-off-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Signed-off-by: taohui <taohui3@gmail.com> Signed-off-by: rongfu.leng <rongfu.leng@daocloud.io> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Signed-off-by: Duncan Moss <djm.moss@gmail.com> Signed-off-by: Shiyan Deng <dsy842974287@meta.com> Signed-off-by: Wei Wei <wwei6@meta.com> Signed-off-by: Saman Keon <samanamp@outlook.com> Signed-off-by: yangxurui <yangxurui@meituan.com> Signed-off-by: nicole-lihui <nicole.li@daocloud.io> Signed-off-by: courage17340 <courage17340@163.com> Signed-off-by: Jacob Kahn <jacobkahn1@gmail.com> Signed-off-by: Fadi Arafeh <fadi.arafeh@arm.com> Signed-off-by: Agata Dobrzyniewicz <adobrzyniewicz@habana.ai> Signed-off-by: zxw <1020938856@qq.com> Signed-off-by: wang.yuqi <noooop@126.com> Signed-off-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Signed-off-by: chenlang <chen.lang5@zte.com.cn> Signed-off-by: Jonas Kuebler <kuebj@amazon.com> Signed-off-by: AlonKejzman <alonkeizman@gmail.com> Signed-off-by: Tao Hui <taohui3@gmail.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Signed-off-by: Tomer Asida <57313761+tomeras91@users.noreply.github.com> Signed-off-by: Aleksandr Malyshev <maleksan@amd.com> Signed-off-by: Eugene Khvedchenia <ekhvedchenia@nvidia.com> Signed-off-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Signed-off-by: yiting.jiang <yiting.jiang@daocloud.io> Signed-off-by: xaguilar <Xavier.AguilarFruto@amd.com> Signed-off-by: Iceber Gu <caiwei95@hotmail.com> Signed-off-by: Tao He <linzhu.ht@alibaba-inc.com> Signed-off-by: Icey <1790571317@qq.com> Signed-off-by: 许文卿 <xwq391974@alibaba-inc.com> Signed-off-by: Chih-Chieh-Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: Nick Hill <nhill@redhat.com> Co-authored-by: Lucas Kabela <lucasakabela@gmail.com> Co-authored-by: Maximilien de Bayser <mbayser@br.ibm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Boyuan Feng <boyuan@meta.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: JartX <sagformas@epdcenter.es> Co-authored-by: Chendi.Xue <chendi.xue@intel.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: xin.li <xin.li@daocloud.io> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Chen Zhang <zhangch99@outlook.com> Co-authored-by: Roger Wang <hey@rogerw.io> Co-authored-by: Michael Goin <mgoin64@gmail.com> Co-authored-by: Wenlong Wang <wangwenlong2755@gmail.com> Co-authored-by: Manoel Marques <manoelmrqs@gmail.com> Co-authored-by: Isotr0py <mozf@mail2.sysu.edu.cn> Co-authored-by: lirong <56789630+lirong-lirong@users.noreply.github.com> Co-authored-by: Michael Yao <haifeng.yao@daocloud.io> Co-authored-by: Woosuk Kwon <woosuk.kwon@berkeley.edu> Co-authored-by: Huamin Li <3ericli@gmail.com> Co-authored-by: Lu Fang <30275821+houseroad@users.noreply.github.com> Co-authored-by: Simon Danielsson <70206058+simondanielsson@users.noreply.github.com> Co-authored-by: Rahul Tuli <rtuli@redhat.com> Co-authored-by: Claude <noreply@anthropic.com> Co-authored-by: Yang Liu <127183760+KKSK-DON@users.noreply.github.com> Co-authored-by: Deboleina <debroy@redhat.com> Co-authored-by: yinz-aizip <yinz@aizip.ai> Co-authored-by: WeiQing Chen <40507679+david6666666@users.noreply.github.com> Co-authored-by: wangzi <3220100013@zju.edu.cn> Co-authored-by: Eldar Kurtić <8884008+eldarkurtic@users.noreply.github.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Ye (Charlotte) Qi <yeq@meta.com> Co-authored-by: Yizhou <136800916+yiz-liu@users.noreply.github.com> Co-authored-by: Sara-KS <50249410+Sara-KS@users.noreply.github.com> Co-authored-by: Csrayz <jover@cmbchina.com> Co-authored-by: ivyilike <pww123@cmbchina.com> Co-authored-by: Burkhard Ringlein <ngl@zurich.ibm.com> Co-authored-by: Bowen Wang <abmfy@icloud.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Daisy-Ma-coder <daisy.ma.0117@gmail.com> Co-authored-by: qqma <qqma@amazon.com> Co-authored-by: ElizaWszola <ewszola@redhat.com> Co-authored-by: Lucia Fang <116399278+luccafong@users.noreply.github.com> Co-authored-by: Zhuohan Li <zhuohan123@gmail.com> Co-authored-by: Simon Mo <simon.mo@hey.com> Co-authored-by: Or Ozeri <oro@il.ibm.com> Co-authored-by: Johnny Yang <24908445+jcyang43@users.noreply.github.com> Co-authored-by: Chengji Yao <chengjiyao@google.com> Co-authored-by: Alec S <10566873+alecsolder@users.noreply.github.com> Co-authored-by: Alec Solder <alecs@fb.com> Co-authored-by: Russell Bryant <rbryant@redhat.com> Co-authored-by: Matthew Bonanni <mbonanni@redhat.com> Co-authored-by: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Co-authored-by: Chris Bamford <chrisbam4d@gmail.com> Co-authored-by: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Co-authored-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: JJJYmmm <92386084+JJJYmmm@users.noreply.github.com> Co-authored-by: liuye.hj <liuye.hj@alibaba-inc.com> Co-authored-by: Kunshang Ji <kunshang.ji@intel.com> Co-authored-by: Lucia (Lu) Fang <fanglu@meta.com> Co-authored-by: Varun Sundar Rabindranath <varunsundar08@gmail.com> Co-authored-by: Varun Sundar Rabindranath <vsundarr@redhat.com> Co-authored-by: Ming Yang <yming@meta.com> Co-authored-by: Zhikaiiii <55917203+Zhikaiiii@users.noreply.github.com> Co-authored-by: Andreas Hartel <andreas@hartel.me> Co-authored-by: Jee Jee Li <pandaleefree@gmail.com> Co-authored-by: vllmellm <vllm.ellm@embeddedllm.com> Co-authored-by: Joel <wuxibin89@163.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: Mark McLoughlin <markmc@redhat.com> Co-authored-by: Peter Pan <peter.pan@daocloud.io> Co-authored-by: Nicolò Lucchesi <nicolo.lucchesi@gmail.com> Co-authored-by: Fanli Lin <fanli.lin@intel.com> Co-authored-by: Thomas Parnell <tpa@zurich.ibm.com> Co-authored-by: Lucas Wilkinson <LucasWilkinson@users.noreply.github.com> Co-authored-by: Sage Moore <sage@neuralmagic.com> Co-authored-by: yewentao256 <zhyanwentao@126.com> Co-authored-by: bnellnm <49004751+bnellnm@users.noreply.github.com> Co-authored-by: rivos-shreeasish <shreeasish@rivosinc.com> Co-authored-by: Chih-Chieh Yang <chih.chieh.yang@ibm.com> Co-authored-by: Weida Hong <wdhongtw@gmail.com> Co-authored-by: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Co-authored-by: Hashem Hashemi <159079214+amd-hhashemi@users.noreply.github.com> Co-authored-by: Amir Samani <samani@ualberta.ca> Co-authored-by: Luka Govedič <lgovedic@redhat.com> Co-authored-by: jiahanc <173873397+jiahanc@users.noreply.github.com> Co-authored-by: Ilya Markov <markovilya197@gmail.com> Co-authored-by: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Co-authored-by: Jialin Ouyang <Jialin.Ouyang@gmail.com> Co-authored-by: rouchenzi <40842833+rouchenzi@users.noreply.github.com> Co-authored-by: Andrew Xia <axia@meta.com> Co-authored-by: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Co-authored-by: Corey Lowman <clowman1993@gmail.com> Co-authored-by: Juan Villamizar <100237675+jpvillam-amd@users.noreply.github.com> Co-authored-by: jpvillam <jpvillam@amd.com> Co-authored-by: Doug Smith <dosmith@redhat.com> Co-authored-by: Chenxi Yang <cxyang@cs.utexas.edu> Co-authored-by: Chenxi Yang <cxyang@fb.com> Co-authored-by: ahao-anyscale <ahao@anyscale.com> Co-authored-by: 0xNullPath <luyanfcp@foxmail.com> Co-authored-by: baxingpiaochong <771405853@qq.com> Co-authored-by: Benjamin Chislett <bchislett@nvidia.com> Co-authored-by: Kyle Sayers <kylesayrs@gmail.com> Co-authored-by: Nikhil Gupta <nikhil.gupta2@arm.com> Co-authored-by: Yong Hoon Shin <48474650+sarckk@users.noreply.github.com> Co-authored-by: lhsjohn <huashuoli@tencent.com> Co-authored-by: Ben Browning <bbrownin@redhat.com> Co-authored-by: Li, Jiang <jiang1.li@intel.com> Co-authored-by: Jackmin801 <56836461+Jackmin801@users.noreply.github.com> Co-authored-by: Jonas M. Kübler <44084297+jmkuebler@users.noreply.github.com> Co-authored-by: Tao Hui <taohui3@gmail.com> Co-authored-by: rongfu.leng <rongfu.leng@daocloud.io> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: Tyler Michael Smith <tlrmchlsmth@gmail.com> Co-authored-by: Duncan Moss <djm.moss@gmail.com> Co-authored-by: Shiyan Deng <dsy842974287@meta.com> Co-authored-by: Wei Wei <wwei6@meta.com> Co-authored-by: Saman A. Pour <samanamp@outlook.com> Co-authored-by: XuruiYang <530534756@qq.com> Co-authored-by: yangxurui <yangxurui@meituan.com> Co-authored-by: Nicole LiHui 🥜 <nicolelihui@outlook.com> Co-authored-by: courage17340 <courage17340@users.noreply.github.com> Co-authored-by: Jacob Kahn <jacobkahn1@gmail.com> Co-authored-by: Nicole LiHui 🥜 <nicole.li@daocloud.io> Co-authored-by: Fadi Arafeh <115173828+fadara01@users.noreply.github.com> Co-authored-by: Agata Dobrzyniewicz <160237065+adobrzyn@users.noreply.github.com> Co-authored-by: yyzxw <34639446+yyzxw@users.noreply.github.com> Co-authored-by: wang.yuqi <noooop@126.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: chenlang <chen.lang5@zte.com.cn> Co-authored-by: chenlang <10346245@zte.com.cn> Co-authored-by: AlonKejzman <alonkeizman@gmail.com> Co-authored-by: tomeras91 <57313761+tomeras91@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <164964928+maleksan85@users.noreply.github.com> Co-authored-by: Aleksandr Malyshev <maleksan@amd.com> Co-authored-by: Doug Lehr <douglehr@amd.com> Co-authored-by: Eugene Khvedchenya <ekhvedchenya@gmail.com> Co-authored-by: yitingdc <59356937+yitingdc@users.noreply.github.com> Co-authored-by: xaguilar-amd <xavier.aguilarfruto@amd.com> Co-authored-by: Iceber Gu <caiwei95@hotmail.com> Co-authored-by: Tao He <linzhu.ht@alibaba-inc.com> Co-authored-by: Icey <1790571317@qq.com> Co-authored-by: Xu Wenqing <121550081+Xu-Wenqing@users.noreply.github.com> Co-authored-by: Chih-Chieh Yang <7364402+cyang49@users.noreply.github.com> Co-authored-by: RishiAstra <40644327+RishiAstra@users.noreply.github.com>
433 lines
19 KiB
Python
433 lines
19 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
from typing import Optional
|
|
|
|
import numpy as np
|
|
import pytest
|
|
import torch
|
|
|
|
from vllm.platforms import current_platform
|
|
from vllm.utils import is_pin_memory_available, make_tensor_with_pad
|
|
from vllm.v1.sample.logits_processor import LogitsProcessors
|
|
from vllm.v1.sample.metadata import SamplingMetadata
|
|
from vllm.v1.sample.sampler import Sampler
|
|
|
|
PIN_MEMORY_AVAILABLE = is_pin_memory_available()
|
|
MAX_NUM_REQS = 256
|
|
VOCAB_SIZE = 1024
|
|
NUM_OUTPUT_TOKENS = 20
|
|
CUDA_DEVICES = [
|
|
f"{current_platform.device_type}:{i}"
|
|
for i in range(1 if current_platform.device_count() == 1 else 2)
|
|
]
|
|
MAX_NUM_PROMPT_TOKENS = 64
|
|
|
|
|
|
def _create_fake_logits(batch_size: int, vocab_size: int) -> torch.Tensor:
|
|
fake_logits = torch.full((batch_size, vocab_size), 1e-2, dtype=torch.float)
|
|
return fake_logits
|
|
|
|
|
|
def _create_penalty_tensor(batch_size: int, penalty_value: float,
|
|
device: torch.device) -> torch.Tensor:
|
|
return torch.full((batch_size, ),
|
|
fill_value=penalty_value,
|
|
dtype=torch.float,
|
|
device=device)
|
|
|
|
|
|
def _create_prompt_tokens_tensor(
|
|
prompt_token_ids: list[list[int]],
|
|
vocab_size: int,
|
|
device: torch.device,
|
|
) -> torch.Tensor:
|
|
return make_tensor_with_pad(
|
|
prompt_token_ids,
|
|
pad=vocab_size,
|
|
device=device,
|
|
dtype=torch.int64,
|
|
pin_memory=False,
|
|
)
|
|
|
|
|
|
def _create_allowed_token_ids(
|
|
batch_size: int,
|
|
vocab_size: int,
|
|
num_allowed_token_ids: int,
|
|
device: torch.device,
|
|
) -> Optional[torch.Tensor]:
|
|
mask: Optional[torch.Tensor] = None
|
|
for i in range(batch_size):
|
|
if i % 2 == 1:
|
|
continue
|
|
if mask is None:
|
|
mask = torch.zeros((batch_size, vocab_size),
|
|
dtype=torch.bool,
|
|
device=device)
|
|
start = min(i, vocab_size - 1)
|
|
end = min(i + num_allowed_token_ids, vocab_size - 1)
|
|
mask[i, start:end] = True
|
|
return mask
|
|
|
|
|
|
def _create_bad_words_token_ids(
|
|
batch_size: int,
|
|
vocab_size: int,
|
|
bad_words_lengths: tuple[int, ...],
|
|
) -> dict[int, list[list[int]]]:
|
|
bad_words_token_ids = {}
|
|
for batch_idx in range(batch_size):
|
|
token_ids_single_batch = []
|
|
for bad_words_length in bad_words_lengths:
|
|
token_ids = np.random.choice(vocab_size,
|
|
size=bad_words_length,
|
|
replace=True).tolist()
|
|
token_ids_single_batch.append(token_ids)
|
|
bad_words_token_ids[batch_idx] = token_ids_single_batch
|
|
if batch_size >= 2:
|
|
# Test no bad_words for some batch
|
|
no_bad_words_batch_idx = np.random.choice(batch_size)
|
|
bad_words_token_ids.pop(no_bad_words_batch_idx, None)
|
|
return bad_words_token_ids
|
|
|
|
|
|
# Returns all last tokens of bad word sequences that share the same prefix
|
|
# as `given_prefix` (excluding the last token).
|
|
def _collect_suffixes_with_same_prefix(
|
|
given_prefix: list[int],
|
|
bad_words_token_ids: list[list[int]]) -> list[int]:
|
|
return [bwt[-1] for bwt in bad_words_token_ids if bwt[:-1] == given_prefix]
|
|
|
|
|
|
# generate a valid token id that is not in bad_words_token_ids
|
|
def _generate_valid_token_id(bad_words_token_ids: list[list[int]],
|
|
vocab_size: int) -> int:
|
|
forbidden_start_tokens = set()
|
|
for bad_word in bad_words_token_ids:
|
|
forbidden_start_tokens.add(bad_word[0])
|
|
# Get a safe token that's not in forbidden starts
|
|
safe_token_candidates = list(
|
|
set(range(vocab_size)) - forbidden_start_tokens)
|
|
# Pick a random safe token
|
|
return np.random.choice(safe_token_candidates)
|
|
|
|
|
|
def _update_output_token_ids_for_bad_words(
|
|
metadata: SamplingMetadata, vocab_size: int) -> dict[int, list[int]]:
|
|
bad_words_last_tokens = {}
|
|
for batch_idx, bad_words_token_ids in metadata.bad_words_token_ids.items():
|
|
output_token_ids = metadata.output_token_ids[batch_idx]
|
|
bad_words_last_token: list[int] = []
|
|
for i, bad_word_token_ids in enumerate(bad_words_token_ids):
|
|
if len(bad_word_token_ids) == 1:
|
|
# Single token id always affects logits
|
|
bad_words_last_token.append(bad_word_token_ids[0])
|
|
else:
|
|
prefix_length = len(bad_word_token_ids) - 1
|
|
has_bad_words = np.random.choice([True, False])
|
|
if has_bad_words:
|
|
prefix = bad_word_token_ids[:-1]
|
|
output_token_ids[-prefix_length:] = prefix
|
|
# Collect all last tokens from other bad words
|
|
# that share this prefix
|
|
bad_words_last_token.extend(
|
|
_collect_suffixes_with_same_prefix(
|
|
prefix, bad_words_token_ids))
|
|
break # Maximum one update to output_token_ids
|
|
else: # Make sure no accidental match to bad words
|
|
output_token_ids[-1] = _generate_valid_token_id(
|
|
bad_words_token_ids, vocab_size)
|
|
bad_words_last_tokens[batch_idx] = bad_words_last_token
|
|
return bad_words_last_tokens
|
|
|
|
|
|
def _create_default_sampling_metadata(
|
|
num_output_tokens: int,
|
|
batch_size: int,
|
|
vocab_size: int,
|
|
device: torch.device,
|
|
) -> SamplingMetadata:
|
|
output_token_ids: list[list[int]] = []
|
|
prompt_token_ids: list[list[int]] = []
|
|
for _ in range(batch_size):
|
|
output_token_ids.append(
|
|
np.random.randint(0, vocab_size, size=num_output_tokens).tolist())
|
|
prompt_token_ids.append(
|
|
np.random.randint(0,
|
|
vocab_size,
|
|
size=np.random.randint(
|
|
1, MAX_NUM_PROMPT_TOKENS)).tolist())
|
|
fake_sampling_metadata = SamplingMetadata(
|
|
temperature=torch.full((batch_size, ), 0.0),
|
|
all_greedy=True,
|
|
all_random=False,
|
|
top_p=None,
|
|
top_k=None,
|
|
generators={},
|
|
max_num_logprobs=0,
|
|
prompt_token_ids=_create_prompt_tokens_tensor(prompt_token_ids,
|
|
vocab_size, device),
|
|
output_token_ids=output_token_ids,
|
|
frequency_penalties=_create_penalty_tensor(batch_size, 0.0, device),
|
|
presence_penalties=_create_penalty_tensor(batch_size, 0.0, device),
|
|
repetition_penalties=_create_penalty_tensor(batch_size, 1.0, device),
|
|
no_penalties=True,
|
|
allowed_token_ids_mask=None,
|
|
bad_words_token_ids={},
|
|
logitsprocs=LogitsProcessors(),
|
|
)
|
|
return fake_sampling_metadata
|
|
|
|
|
|
def _create_weighted_output_token_list(
|
|
batch_size: int,
|
|
vocab_size: int) -> tuple[list[list[int]], list[list[int]]]:
|
|
"""
|
|
Creates an output token list where each token occurs a distinct
|
|
number of times.
|
|
|
|
For each batch, a random subset of token IDs is selected from the
|
|
vocabulary. The selected tokens are then added to the output token
|
|
list, each with a different frequency.
|
|
|
|
Returns:
|
|
tuple[list[list[int]], list[list[int]]]:
|
|
- The first element is the output token list, where each sublist
|
|
corresponds to a batch and contains tokens with weighted
|
|
frequencies.
|
|
- The second element is a list of distinct token IDs for each
|
|
batch, ordered by their frequency in the corresponding output
|
|
list.
|
|
"""
|
|
output_token_ids: list[list[int]] = []
|
|
sorted_token_ids_in_output: list[list[int]] = []
|
|
for _ in range(batch_size):
|
|
distinct_token_ids = np.random.choice(vocab_size,
|
|
size=np.random.randint(1, 10),
|
|
replace=False).tolist()
|
|
sorted_token_ids_in_output.append(distinct_token_ids)
|
|
output_token_ids_for_batch = []
|
|
for index, token_id in enumerate(distinct_token_ids):
|
|
output_token_ids_for_batch.extend(
|
|
[token_id for _ in range(index + 1)])
|
|
output_token_ids.append(output_token_ids_for_batch)
|
|
return output_token_ids, sorted_token_ids_in_output
|
|
|
|
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
|
@pytest.mark.parametrize("batch_size", [1, 2, 32])
|
|
@pytest.mark.parametrize("presence_penalty", [-2.0, 2.0])
|
|
def test_sampler_presence_penalty(device: str, batch_size: int,
|
|
presence_penalty: float):
|
|
"""
|
|
Test to verify that if presence penalty is enabled then tokens
|
|
are penalized as per their presence in the existing output.
|
|
"""
|
|
torch.set_default_device(device)
|
|
# Create fake logits where each token is assigned the same
|
|
# logit value.
|
|
fake_logits = _create_fake_logits(batch_size, VOCAB_SIZE)
|
|
sampling_metadata = _create_default_sampling_metadata(
|
|
NUM_OUTPUT_TOKENS, batch_size, VOCAB_SIZE, torch.device(device))
|
|
output_token_ids = sampling_metadata.output_token_ids
|
|
sampling_metadata.presence_penalties = _create_penalty_tensor(
|
|
batch_size, presence_penalty, torch.device(device))
|
|
sampling_metadata.no_penalties = False
|
|
sampler = Sampler()
|
|
logits = sampler.apply_penalties(fake_logits, sampling_metadata)
|
|
logits = logits.cpu()
|
|
for batch_idx in range(batch_size):
|
|
# Since all tokens initially have the same logits, the non-penalized
|
|
# token ID will be the one with the highest logit value, while the
|
|
# penalized token ID will be the one with the lowest logit value.
|
|
non_penalized_token_id = logits[batch_idx].argmax().item()
|
|
penalized_token_id = logits[batch_idx].argmin().item()
|
|
if presence_penalty > 0:
|
|
# If `presence_penalty` is set to a value greater than 0, it
|
|
# indicates a preference for new tokens over those already
|
|
# present in the output.
|
|
# Verify that the penalized token ID exists in the output, while the
|
|
# non-penalized token ID does not.
|
|
assert penalized_token_id in output_token_ids[batch_idx]
|
|
assert non_penalized_token_id not in output_token_ids[batch_idx]
|
|
elif presence_penalty < 0:
|
|
# If `presence_penalty` is set to a value less than 0, it indicates
|
|
# a preference for existing tokens over new ones. Verify that the
|
|
# non-penalized token ID exists in the output, while the penalized
|
|
# token ID does not.
|
|
assert non_penalized_token_id in output_token_ids[batch_idx]
|
|
assert penalized_token_id not in output_token_ids[batch_idx]
|
|
|
|
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
|
@pytest.mark.parametrize("batch_size", [1, 2, 32])
|
|
@pytest.mark.parametrize("frequency_penalty", [-2.0, 2.0])
|
|
def test_sampler_frequency_penalty(device: str, batch_size: int,
|
|
frequency_penalty: float):
|
|
"""
|
|
Test to verify that if frequency penalty is enabled then tokens are
|
|
penalized as per their frequency of occurrence.
|
|
"""
|
|
torch.set_default_device(device)
|
|
# Create fake logits where each token is assigned the same
|
|
# logit value.
|
|
fake_logits = _create_fake_logits(batch_size, VOCAB_SIZE)
|
|
sampling_metadata = _create_default_sampling_metadata(
|
|
NUM_OUTPUT_TOKENS, batch_size, VOCAB_SIZE, torch.device(device))
|
|
sampling_metadata.frequency_penalties = _create_penalty_tensor(
|
|
batch_size, frequency_penalty, torch.device(device))
|
|
output_token_ids, sorted_token_ids_in_output = \
|
|
_create_weighted_output_token_list(
|
|
batch_size,
|
|
VOCAB_SIZE,
|
|
)
|
|
sampling_metadata.output_token_ids = output_token_ids
|
|
sampling_metadata.no_penalties = False
|
|
sampler = Sampler()
|
|
logits = sampler.apply_penalties(fake_logits, sampling_metadata)
|
|
logits = logits.cpu()
|
|
for batch_idx in range(batch_size):
|
|
non_penalized_token_id = logits[batch_idx].argmax().item()
|
|
penalized_token_id = logits[batch_idx].argmin().item()
|
|
distinct_sorted_token_ids_in_output = sorted_token_ids_in_output[
|
|
batch_idx]
|
|
most_frequent_token_id = distinct_sorted_token_ids_in_output[
|
|
len(distinct_sorted_token_ids_in_output) - 1]
|
|
if frequency_penalty > 0:
|
|
# If `frequency_penalty` is set to > 0, it indicates
|
|
# a preference for new tokens over existing ones. Verify that the
|
|
# non-penalized token ID is not present in the output, while the
|
|
# most penalized token is the one that occurs most frequently in
|
|
# the output.
|
|
assert (non_penalized_token_id
|
|
not in distinct_sorted_token_ids_in_output)
|
|
assert penalized_token_id == most_frequent_token_id
|
|
elif frequency_penalty < 0:
|
|
# If `frequency_penalty` is set to < 0, it indicates
|
|
# a preference for existing tokens over new ones. Verify that the
|
|
# non-penalized token ID is the one that occurs most frequently
|
|
# in the output, while the penalized token ID is one that has not
|
|
# yet appeared.
|
|
assert non_penalized_token_id == most_frequent_token_id
|
|
assert penalized_token_id not in distinct_sorted_token_ids_in_output
|
|
|
|
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
|
@pytest.mark.parametrize("batch_size", [1, 2, 32])
|
|
@pytest.mark.parametrize("repetition_penalty", [0.1, 1.9])
|
|
def test_sampler_repetition_penalty(device: str, batch_size: int,
|
|
repetition_penalty: float):
|
|
"""
|
|
Test to verify that when the repetition penalty is enabled, tokens
|
|
are penalized based on their presence in the prompt or the existing
|
|
output.
|
|
"""
|
|
torch.set_default_device(device)
|
|
# Create fake logits where each token is assigned the same
|
|
# logit value.
|
|
fake_logits = _create_fake_logits(batch_size, VOCAB_SIZE)
|
|
sampling_metadata = _create_default_sampling_metadata(
|
|
NUM_OUTPUT_TOKENS, batch_size, VOCAB_SIZE, torch.device(device))
|
|
sampling_metadata.repetition_penalties = _create_penalty_tensor(
|
|
batch_size, repetition_penalty, torch.device(device))
|
|
sampling_metadata.no_penalties = False
|
|
sampler = Sampler()
|
|
logits = sampler.apply_penalties(fake_logits, sampling_metadata)
|
|
logits = logits.cpu()
|
|
for batch_idx in range(batch_size):
|
|
non_penalized_token_id = logits[batch_idx].argmax().item()
|
|
penalized_token_id = logits[batch_idx].argmin().item()
|
|
prompt_tokens = sampling_metadata.prompt_token_ids[
|
|
batch_idx][:].tolist()
|
|
output_tokens = sampling_metadata.output_token_ids[batch_idx]
|
|
if repetition_penalty > 1.0:
|
|
# If `repetition_penalty` > 1.0, verify that the non-penalized
|
|
# token ID has not been seen before, while the penalized token ID
|
|
# exists either in the prompt or the output.
|
|
assert (non_penalized_token_id not in prompt_tokens
|
|
and non_penalized_token_id not in output_tokens)
|
|
assert (penalized_token_id in prompt_tokens
|
|
or penalized_token_id in output_tokens)
|
|
elif repetition_penalty < 1.0:
|
|
# If `repetition_penalty` < 1.0, verify that the penalized
|
|
# token ID has not been seen before, while the non-penalized
|
|
# token ID exists either in the prompt or the output.
|
|
assert (penalized_token_id not in prompt_tokens
|
|
and penalized_token_id not in output_tokens)
|
|
assert (non_penalized_token_id in prompt_tokens
|
|
or non_penalized_token_id in output_tokens)
|
|
|
|
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
|
@pytest.mark.parametrize("batch_size", [1, 2, 32])
|
|
@pytest.mark.parametrize("num_allowed_token_ids", [0, 1, 2])
|
|
def test_sampler_allowed_token_ids(device: str, batch_size: int,
|
|
num_allowed_token_ids: int):
|
|
"""
|
|
Test to verify that when the repetition penalty is enabled, tokens
|
|
are penalized based on their presence in the prompt or the existing
|
|
output.
|
|
"""
|
|
torch.set_default_device(device)
|
|
# Create fake logits where each token is assigned the same
|
|
# logit value.
|
|
fake_logits = _create_fake_logits(batch_size, VOCAB_SIZE)
|
|
sampling_metadata = _create_default_sampling_metadata(
|
|
NUM_OUTPUT_TOKENS, batch_size, VOCAB_SIZE, torch.device(device))
|
|
mask = _create_allowed_token_ids(
|
|
batch_size=batch_size,
|
|
vocab_size=VOCAB_SIZE,
|
|
num_allowed_token_ids=num_allowed_token_ids,
|
|
device=device,
|
|
)
|
|
sampling_metadata.allowed_token_ids_mask = mask
|
|
sampler = Sampler()
|
|
logits = sampler.apply_allowed_token_ids(fake_logits, sampling_metadata)
|
|
logits = logits.cpu()
|
|
for batch_idx in range(batch_size):
|
|
logits_for_req = logits[batch_idx]
|
|
if batch_idx % 2 == 1:
|
|
assert torch.all(logits_for_req != -float("inf"))
|
|
continue
|
|
for token_id in range(VOCAB_SIZE):
|
|
start = min(batch_idx, VOCAB_SIZE - 1)
|
|
end = min(batch_idx + num_allowed_token_ids, VOCAB_SIZE - 1)
|
|
if token_id >= start and token_id < end:
|
|
assert logits_for_req[token_id] == -float(
|
|
"inf"), f"{batch_idx}, {token_id}"
|
|
else:
|
|
assert logits_for_req[token_id] != -float("inf")
|
|
|
|
|
|
@pytest.mark.parametrize("device", CUDA_DEVICES)
|
|
@pytest.mark.parametrize("batch_size", [1, 2, 32])
|
|
@pytest.mark.parametrize("bad_words_lengths", [(1, ), (1, 3), (2, 2)])
|
|
def test_sampler_bad_words(device: str, batch_size: int,
|
|
bad_words_lengths: tuple[int, ...]):
|
|
"""
|
|
Test to verify that when the bad words restriction is present, tokens
|
|
are penalized based on their match with the bad words.
|
|
"""
|
|
torch.set_default_device(device)
|
|
# Create fake logits where each token is assigned the same
|
|
# logit value.
|
|
fake_logits = _create_fake_logits(batch_size, VOCAB_SIZE)
|
|
sampling_metadata = _create_default_sampling_metadata(
|
|
NUM_OUTPUT_TOKENS, batch_size, VOCAB_SIZE, torch.device(device))
|
|
sampling_metadata.bad_words_token_ids = _create_bad_words_token_ids(
|
|
batch_size, VOCAB_SIZE, bad_words_lengths)
|
|
bad_words_last_tokens = _update_output_token_ids_for_bad_words(
|
|
sampling_metadata, VOCAB_SIZE)
|
|
sampler = Sampler()
|
|
logits = sampler.apply_bad_words(fake_logits, sampling_metadata)
|
|
logits = logits.cpu()
|
|
for batch_idx in range(batch_size):
|
|
logits_for_req = logits[batch_idx]
|
|
for token_id in range(VOCAB_SIZE):
|
|
if (batch_idx in bad_words_last_tokens
|
|
and token_id in bad_words_last_tokens[batch_idx]):
|
|
assert logits_for_req[token_id] == -float("inf")
|
|
else:
|
|
assert logits_for_req[token_id] != -float("inf")
|