From f96a3cc71317133600203b43a5f243b8f3b041bd Mon Sep 17 00:00:00 2001 From: Roger Wang Date: Fri, 9 May 2025 13:31:08 -0700 Subject: [PATCH] test Signed-off-by: Roger Wang --- tests/entrypoints/llm/test_chat.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tests/entrypoints/llm/test_chat.py b/tests/entrypoints/llm/test_chat.py index 742a666834457..d6519b70882b9 100644 --- a/tests/entrypoints/llm/test_chat.py +++ b/tests/entrypoints/llm/test_chat.py @@ -2,6 +2,7 @@ import weakref import pytest +from PIL import Image from vllm import LLM from vllm.distributed import cleanup_dist_env_and_memory @@ -118,6 +119,29 @@ def test_chat_multi_image(vision_llm, image_urls: list[str]): assert len(outputs) >= 0 +@pytest.mark.parametrize("image_urls", + [[TEST_IMAGE_URLS[0], TEST_IMAGE_URLS[1]]]) +def test_chat_multi_pil_image(vision_llm, image_urls: list[str]): + images = [Image.open(image_url) for image_url in image_urls] + + messages = [{ + "role": + "user", + "content": [ + *({ + "type": "image", + "image": image + } for image in images), + { + "type": "text", + "text": "What's in this image?" + }, + ], + }] + outputs = vision_llm.chat(messages) + assert len(outputs) >= 0 + + def test_llm_chat_tokenization_no_double_bos(text_llm): """ LLM.chat() should not add special tokens when using chat templates.