mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 07:04:53 +08:00
Signed-off-by: Roger Wang <hey@rogerw.me> Signed-off-by: Andy Xie <andy.xning@gmail.com> Signed-off-by: tjtanaa <tunjian.tan@embeddedllm.com> Signed-off-by: Andrew Sansom <andrew@protopia.ai> Signed-off-by: Zhiyu Cheng <zhiyuc@nvidia.com> Signed-off-by: Shu Wang <shuw@nvidia.com> Signed-off-by: Po-Han Huang <pohanh@nvidia.com> Signed-off-by: Shu Wang. <shuw@nvidia.com> Signed-off-by: XIn Li <xinli@nvidia.com> Signed-off-by: Junhao Li <junhao@ubicloud.com> Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com> Signed-off-by: zRzRzRzRzRzRzR <2448370773@qq.com> Signed-off-by: zitian.zhao <zitian.zhao@tencentmusic.com> Signed-off-by: zitian zhao <zitian.zhao@tencentmusic.com> Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk> Signed-off-by: iAmir97 <Amir.balwel@embeddedllm.com> Signed-off-by: iAmir97 <71513472+iAmir97@users.noreply.github.com> Signed-off-by: Linkun <github@lkchen.net> Co-authored-by: Ning Xie <andy.xning@gmail.com> Co-authored-by: TJian <tunjian.tan@embeddedllm.com> Co-authored-by: Andrew Sansom <andrew@protopia.ai> Co-authored-by: Zhiyu <zhiyuc@nvidia.com> Co-authored-by: Shu Wang <shuw@nvidia.com> Co-authored-by: XIn Li <xinli@nvidia.com> Co-authored-by: Junhao Li <streaver91@gmail.com> Co-authored-by: Chauncey <chaunceyjiang@gmail.com> Co-authored-by: Yuxuan Zhang <2448370773@qq.com> Co-authored-by: ZiTian Zhao <zitian.zhao@tencentmusic.com> Co-authored-by: Cyrus Leung <tlleungac@connect.ust.hk> Co-authored-by: Po-Han Huang (NVIDIA) <53919306+nvpohanh@users.noreply.github.com> Co-authored-by: iAmir97 <71513472+iAmir97@users.noreply.github.com> Co-authored-by: iAmir97 <Amir.balwel@embeddedllm.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com> Co-authored-by: Hong Hanh <hanh.usth@gmail.com> Co-authored-by: youkaichao <youkaichao@gmail.com> Co-authored-by: lkchen <github@lkchen.net>
38 lines
1.1 KiB
Python
38 lines
1.1 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
"""
|
|
Unit tests for MultiModalRegistry.supports_multimodal_inputs and
|
|
Qwen2.5-VL visual component loading behavior.
|
|
"""
|
|
|
|
import pytest
|
|
|
|
from vllm.multimodal import MULTIMODAL_REGISTRY
|
|
|
|
from ..models.utils import build_model_context
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
"model_id,limit_mm_per_prompt,expected",
|
|
[
|
|
("Qwen/Qwen2-0.5B-Instruct", {}, False),
|
|
("Qwen/Qwen2.5-VL-3B-Instruct", {}, True),
|
|
("Qwen/Qwen2.5-VL-3B-Instruct", {
|
|
"image": 0,
|
|
"video": 0
|
|
}, False),
|
|
("Qwen/Qwen2.5-VL-3B-Instruct", {
|
|
"image": 0
|
|
}, True),
|
|
],
|
|
)
|
|
@pytest.mark.core_model
|
|
def test_supports_multimodal_inputs(model_id, limit_mm_per_prompt, expected):
|
|
"""Test supports_multimodal_inputs returns correct boolean for various
|
|
configs."""
|
|
ctx = build_model_context(
|
|
model_id,
|
|
limit_mm_per_prompt=limit_mm_per_prompt,
|
|
)
|
|
assert MULTIMODAL_REGISTRY.supports_multimodal_inputs(
|
|
ctx.model_config) is expected |