mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-13 18:27:05 +08:00
test
Signed-off-by: Roger Wang <hey@rogerw.me>
This commit is contained in:
parent
32c0155774
commit
f96a3cc713
@ -2,6 +2,7 @@
|
||||
import weakref
|
||||
|
||||
import pytest
|
||||
from PIL import Image
|
||||
|
||||
from vllm import LLM
|
||||
from vllm.distributed import cleanup_dist_env_and_memory
|
||||
@ -118,6 +119,29 @@ def test_chat_multi_image(vision_llm, image_urls: list[str]):
|
||||
assert len(outputs) >= 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize("image_urls",
|
||||
[[TEST_IMAGE_URLS[0], TEST_IMAGE_URLS[1]]])
|
||||
def test_chat_multi_pil_image(vision_llm, image_urls: list[str]):
|
||||
images = [Image.open(image_url) for image_url in image_urls]
|
||||
|
||||
messages = [{
|
||||
"role":
|
||||
"user",
|
||||
"content": [
|
||||
*({
|
||||
"type": "image",
|
||||
"image": image
|
||||
} for image in images),
|
||||
{
|
||||
"type": "text",
|
||||
"text": "What's in this image?"
|
||||
},
|
||||
],
|
||||
}]
|
||||
outputs = vision_llm.chat(messages)
|
||||
assert len(outputs) >= 0
|
||||
|
||||
|
||||
def test_llm_chat_tokenization_no_double_bos(text_llm):
|
||||
"""
|
||||
LLM.chat() should not add special tokens when using chat templates.
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user