mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-21 04:55:01 +08:00
[CI/Build] Use vLLM client's user agent to fetch images (#23561)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
parent
56dcf4e7e9
commit
6fd45e7b8a
@ -6,8 +6,6 @@ import json
|
|||||||
import openai
|
import openai
|
||||||
import pytest
|
import pytest
|
||||||
import pytest_asyncio
|
import pytest_asyncio
|
||||||
import requests
|
|
||||||
from PIL import Image
|
|
||||||
from transformers import AutoProcessor
|
from transformers import AutoProcessor
|
||||||
|
|
||||||
from vllm.multimodal.utils import encode_image_base64, fetch_image
|
from vllm.multimodal.utils import encode_image_base64, fetch_image
|
||||||
@ -36,7 +34,7 @@ EXPECTED_MM_BEAM_SEARCH_RES = [
|
|||||||
],
|
],
|
||||||
[
|
[
|
||||||
"The image shows a Venn diagram with three over",
|
"The image shows a Venn diagram with three over",
|
||||||
"The image shows a Venn diagram with three intersect",
|
"This image shows a Venn diagram with three intersect",
|
||||||
],
|
],
|
||||||
[
|
[
|
||||||
"This image displays a gradient of colors ranging from",
|
"This image displays a gradient of colors ranging from",
|
||||||
@ -88,7 +86,7 @@ def get_hf_prompt_tokens(model_name, content, image_url):
|
|||||||
"role": "user",
|
"role": "user",
|
||||||
"content": f"{placeholder}{content}",
|
"content": f"{placeholder}{content}",
|
||||||
}]
|
}]
|
||||||
images = [Image.open(requests.get(image_url, stream=True).raw)]
|
images = [fetch_image(image_url)]
|
||||||
|
|
||||||
prompt = processor.tokenizer.apply_chat_template(
|
prompt = processor.tokenizer.apply_chat_template(
|
||||||
messages, tokenize=False, add_generation_prompt=True)
|
messages, tokenize=False, add_generation_prompt=True)
|
||||||
|
|||||||
@ -5,7 +5,6 @@ import json
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import requests
|
import requests
|
||||||
from PIL import Image
|
|
||||||
from transformers import AutoProcessor
|
from transformers import AutoProcessor
|
||||||
|
|
||||||
from vllm.entrypoints.openai.protocol import EmbeddingResponse
|
from vllm.entrypoints.openai.protocol import EmbeddingResponse
|
||||||
@ -64,7 +63,7 @@ def get_hf_prompt_tokens(model_name, content, image_url):
|
|||||||
|
|
||||||
placeholder = "<|image_1|> "
|
placeholder = "<|image_1|> "
|
||||||
prompt = f"{placeholder}{content}"
|
prompt = f"{placeholder}{content}"
|
||||||
images = [Image.open(requests.get(image_url, stream=True).raw)]
|
images = [fetch_image(image_url)]
|
||||||
inputs = processor(prompt, images, return_tensors="pt")
|
inputs = processor(prompt, images, return_tensors="pt")
|
||||||
return inputs.input_ids.shape[1]
|
return inputs.input_ids.shape[1]
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user