diff --git a/tests/entrypoints/llm/test_chat.py b/tests/entrypoints/llm/test_chat.py index 742a666834457..d6519b70882b9 100644 --- a/tests/entrypoints/llm/test_chat.py +++ b/tests/entrypoints/llm/test_chat.py @@ -2,6 +2,7 @@ import weakref import pytest +from PIL import Image from vllm import LLM from vllm.distributed import cleanup_dist_env_and_memory @@ -118,6 +119,29 @@ def test_chat_multi_image(vision_llm, image_urls: list[str]): assert len(outputs) >= 0 +@pytest.mark.parametrize("image_urls", + [[TEST_IMAGE_URLS[0], TEST_IMAGE_URLS[1]]]) +def test_chat_multi_pil_image(vision_llm, image_urls: list[str]): + images = [Image.open(image_url) for image_url in image_urls] + + messages = [{ + "role": + "user", + "content": [ + *({ + "type": "image", + "image": image + } for image in images), + { + "type": "text", + "text": "What's in this image?" + }, + ], + }] + outputs = vision_llm.chat(messages) + assert len(outputs) >= 0 + + def test_llm_chat_tokenization_no_double_bos(text_llm): """ LLM.chat() should not add special tokens when using chat templates.