[Frontend][3/N] Improve all pooling task | Support binary embedding response (#27066)

Signed-off-by: wang.yuqi <noooop@126.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com>
This commit is contained in:
wang.yuqi 2025-10-22 18:38:57 +08:00 committed by GitHub
parent a4c29e6e82
commit 1f633b8632
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
12 changed files with 691 additions and 230 deletions

View File

@ -6,10 +6,16 @@
python examples/online_serving/pooling/cohere_rerank_client.py python examples/online_serving/pooling/cohere_rerank_client.py
``` ```
## Embedding embed_dtype usage ## Embedding requests base64 encoding_format usage
```bash ```bash
python examples/online_serving/pooling/embedding_embed_dtype_client.py python examples/online_serving/pooling/embedding_requests_base64_client.py
```
## Embedding requests bytes encoding_format usage
```bash
python examples/online_serving/pooling/embedding_requests_bytes_client.py
``` ```
## Jinaai rerank usage ## Jinaai rerank usage

View File

@ -12,7 +12,11 @@ import base64
import requests import requests
import torch import torch
from vllm.entrypoints.openai.protocol import EMBED_DTYPE_TO_TORCH_DTYPE from vllm.utils.serial_utils import (
EMBED_DTYPE_TO_TORCH_DTYPE,
ENDIANNESS,
binary2tensor,
)
def post_http_request(prompt: dict, api_url: str) -> requests.Response: def post_http_request(prompt: dict, api_url: str) -> requests.Response:
@ -34,24 +38,25 @@ def main(args):
api_url = f"http://{args.host}:{args.port}/v1/embeddings" api_url = f"http://{args.host}:{args.port}/v1/embeddings"
model_name = args.model model_name = args.model
for embed_dtype, torch_dtype in EMBED_DTYPE_TO_TORCH_DTYPE.items(): # The OpenAI client does not support the embed_dtype and endianness parameters.
for embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE:
for endianness in ENDIANNESS:
prompt = { prompt = {
"model": model_name, "model": model_name,
"input": "vLLM is great!", "input": "vLLM is great!",
"encoding_format": "base64", "encoding_format": "base64",
"embed_dtype": embed_dtype, "embed_dtype": embed_dtype,
"endianness": endianness,
} }
response = post_http_request(prompt=prompt, api_url=api_url) response = post_http_request(prompt=prompt, api_url=api_url)
embedding = [] embedding = []
for data in response.json()["data"]: for data in response.json()["data"]:
embedding.append( binary = base64.b64decode(data["embedding"])
torch.frombuffer( tensor = binary2tensor(binary, (-1,), embed_dtype, endianness)
base64.b64decode(data["embedding"]), dtype=torch_dtype embedding.append(tensor.to(torch.float32))
).to(torch.float32)
)
embedding = torch.cat(embedding) embedding = torch.cat(embedding)
print(embed_dtype, embedding.shape) print(embed_dtype, endianness, embedding.shape)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -0,0 +1,66 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
"""Example Python client for embedding API using vLLM API server
NOTE:
start a supported embeddings model server with `vllm serve`, e.g.
vllm serve intfloat/e5-small
"""
import argparse
import json
import requests
import torch
from vllm.utils.serial_utils import (
EMBED_DTYPE_TO_TORCH_DTYPE,
ENDIANNESS,
MetadataItem,
decode_pooling_output,
)
def post_http_request(prompt: dict, api_url: str) -> requests.Response:
headers = {"User-Agent": "Test Client"}
response = requests.post(api_url, headers=headers, json=prompt)
return response
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--host", type=str, default="localhost")
parser.add_argument("--port", type=int, default=8000)
parser.add_argument("--model", type=str, default="intfloat/e5-small")
return parser.parse_args()
def main(args):
api_url = f"http://{args.host}:{args.port}/v1/embeddings"
model_name = args.model
# The OpenAI client does not support the bytes encoding_format.
# The OpenAI client does not support the embed_dtype and endianness parameters.
for embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE:
for endianness in ENDIANNESS:
prompt = {
"model": model_name,
"input": "vLLM is great!",
"encoding_format": "bytes",
"embed_dtype": embed_dtype,
"endianness": endianness,
}
response = post_http_request(prompt=prompt, api_url=api_url)
metadata = json.loads(response.headers["metadata"])
body = response.content
items = [MetadataItem(**x) for x in metadata["data"]]
embedding = decode_pooling_output(items=items, body=body)
embedding = [x.to(torch.float32) for x in embedding]
embedding = torch.cat(embedding)
print(embed_dtype, endianness, embedding.shape)
if __name__ == "__main__":
args = parse_args()
main(args)

View File

@ -2,6 +2,7 @@
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import base64 import base64
import json
import numpy as np import numpy as np
import openai import openai
@ -15,11 +16,17 @@ from tests.models.language.pooling.embed_utils import run_embedding_correctness_
from tests.models.utils import check_embeddings_close from tests.models.utils import check_embeddings_close
from tests.utils import RemoteOpenAIServer from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.openai.protocol import ( from vllm.entrypoints.openai.protocol import (
EMBED_DTYPE_TO_TORCH_DTYPE,
EmbeddingResponse, EmbeddingResponse,
PoolingResponse, PoolingResponse,
) )
from vllm.transformers_utils.tokenizer import get_tokenizer from vllm.transformers_utils.tokenizer import get_tokenizer
from vllm.utils.serial_utils import (
EMBED_DTYPE_TO_TORCH_DTYPE,
ENDIANNESS,
MetadataItem,
binary2tensor,
decode_pooling_output,
)
MODEL_NAME = "intfloat/multilingual-e5-small" MODEL_NAME = "intfloat/multilingual-e5-small"
DUMMY_CHAT_TEMPLATE = """{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\\n'}}{% endfor %}""" # noqa: E501 DUMMY_CHAT_TEMPLATE = """{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\\n'}}{% endfor %}""" # noqa: E501
@ -250,8 +257,8 @@ async def test_batch_base64_embedding(
@pytest.mark.asyncio @pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME]) @pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_base64_embed_dtype( async def test_base64_embed_dtype_and_endianness(
hf_model, server: RemoteOpenAIServer, client: openai.AsyncOpenAI, model_name: str server: RemoteOpenAIServer, client: openai.AsyncOpenAI, model_name: str
): ):
input_texts = [ input_texts = [
"The best thing about vLLM is that it supports many different models", "The best thing about vLLM is that it supports many different models",
@ -262,7 +269,8 @@ async def test_base64_embed_dtype(
) )
float_data = [d.embedding for d in responses_float.data] float_data = [d.embedding for d in responses_float.data]
for embed_dtype, torch_dtype in EMBED_DTYPE_TO_TORCH_DTYPE.items(): for embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE:
for endianness in ENDIANNESS:
responses_base64 = requests.post( responses_base64 = requests.post(
server.url_for("/v1/embeddings"), server.url_for("/v1/embeddings"),
json={ json={
@ -270,16 +278,15 @@ async def test_base64_embed_dtype(
"input": input_texts, "input": input_texts,
"encoding_format": "base64", "encoding_format": "base64",
"embed_dtype": embed_dtype, "embed_dtype": embed_dtype,
"endianness": endianness,
}, },
) )
base64_data = [] base64_data = []
for data in responses_base64.json()["data"]: for data in responses_base64.json()["data"]:
base64_data.append( binary = base64.b64decode(data["embedding"])
torch.frombuffer(base64.b64decode(data["embedding"]), dtype=torch_dtype) tensor = binary2tensor(binary, (-1,), embed_dtype, endianness)
.to(torch.float32) base64_data.append(tensor.to(torch.float32).tolist())
.tolist()
)
check_embeddings_close( check_embeddings_close(
embeddings_0_lst=float_data, embeddings_0_lst=float_data,
@ -292,14 +299,56 @@ async def test_base64_embed_dtype(
@pytest.mark.asyncio @pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME]) @pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_base64_embed_dtype_not_supported( async def test_bytes_embed_dtype_and_endianness(
hf_model, server: RemoteOpenAIServer, model_name: str server: RemoteOpenAIServer, client: openai.AsyncOpenAI, model_name: str
): ):
input_texts = [ input_texts = [
"The best thing about vLLM is that it supports many different models", "The best thing about vLLM is that it supports many different models",
] ]
bad_embed_dtype = "bad_embed_dtype" responses_float = await client.embeddings.create(
input=input_texts, model=model_name, encoding_format="float"
)
float_data = [d.embedding for d in responses_float.data]
for embed_dtype in list(EMBED_DTYPE_TO_TORCH_DTYPE.keys()):
for endianness in ENDIANNESS:
responses_bytes = requests.post(
server.url_for("/v1/embeddings"),
json={
"model": model_name,
"input": input_texts,
"encoding_format": "bytes",
"embed_dtype": embed_dtype,
"endianness": endianness,
},
)
metadata = json.loads(responses_bytes.headers["metadata"])
body = responses_bytes.content
items = [MetadataItem(**x) for x in metadata["data"]]
bytes_data = decode_pooling_output(items=items, body=body)
bytes_data = [x.to(torch.float32).tolist() for x in bytes_data]
check_embeddings_close(
embeddings_0_lst=float_data,
embeddings_1_lst=bytes_data,
name_0="float_data",
name_1="bytes_data",
tol=1e-2,
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("param_name", ["encoding_format", "embed_dtype", "endianness"])
async def test_params_not_supported(
server: RemoteOpenAIServer, model_name: str, param_name: str
):
input_texts = [
"The best thing about vLLM is that it supports many different models",
]
responses_base64 = requests.post( responses_base64 = requests.post(
server.url_for("/v1/embeddings"), server.url_for("/v1/embeddings"),
@ -307,14 +356,13 @@ async def test_base64_embed_dtype_not_supported(
"model": model_name, "model": model_name,
"input": input_texts, "input": input_texts,
"encoding_format": "base64", "encoding_format": "base64",
"embed_dtype": bad_embed_dtype, param_name: f"bad_{param_name}",
}, },
) )
assert responses_base64.status_code == 400 assert responses_base64.status_code == 400
assert responses_base64.json()["error"]["message"].startswith( assert "literal_error" in responses_base64.json()["error"]["message"]
f"embed_dtype={bad_embed_dtype!r} is not supported." assert f"bad_{param_name}" in responses_base64.json()["error"]["message"]
)
@pytest.mark.asyncio @pytest.mark.asyncio

View File

@ -2,6 +2,7 @@
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import base64 import base64
import json
import numpy as np import numpy as np
import pytest import pytest
@ -10,8 +11,15 @@ import torch
from tests.models.utils import check_embeddings_close from tests.models.utils import check_embeddings_close
from tests.utils import RemoteOpenAIServer from tests.utils import RemoteOpenAIServer
from vllm.entrypoints.openai.protocol import EMBED_DTYPE_TO_TORCH_DTYPE, PoolingResponse from vllm.entrypoints.openai.protocol import PoolingResponse
from vllm.transformers_utils.tokenizer import get_tokenizer from vllm.transformers_utils.tokenizer import get_tokenizer
from vllm.utils.serial_utils import (
EMBED_DTYPE_TO_TORCH_DTYPE,
ENDIANNESS,
MetadataItem,
binary2tensor,
decode_pooling_output,
)
MODEL_NAME = "internlm/internlm2-1_8b-reward" MODEL_NAME = "internlm/internlm2-1_8b-reward"
DUMMY_CHAT_TEMPLATE = """{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\\n'}}{% endfor %}""" # noqa: E501 DUMMY_CHAT_TEMPLATE = """{% for message in messages %}{{message['role'] + ': ' + message['content'] + '\\n'}}{% endfor %}""" # noqa: E501
@ -251,7 +259,9 @@ async def test_batch_base64_pooling(server: RemoteOpenAIServer, model_name: str)
@pytest.mark.asyncio @pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME]) @pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_base64_embed_dtype(server: RemoteOpenAIServer, model_name: str): async def test_base64_embed_dtype_and_endianness(
server: RemoteOpenAIServer, model_name: str
):
input_texts = [ input_texts = [
"The best thing about vLLM is that it supports many different models", "The best thing about vLLM is that it supports many different models",
] ]
@ -268,7 +278,8 @@ async def test_base64_embed_dtype(server: RemoteOpenAIServer, model_name: str):
responses_float = PoolingResponse.model_validate(float_response.json()) responses_float = PoolingResponse.model_validate(float_response.json())
float_data = [np.array(d.data).squeeze(-1).tolist() for d in responses_float.data] float_data = [np.array(d.data).squeeze(-1).tolist() for d in responses_float.data]
for embed_dtype, torch_dtype in EMBED_DTYPE_TO_TORCH_DTYPE.items(): for embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE:
for endianness in ENDIANNESS:
responses_base64 = requests.post( responses_base64 = requests.post(
url, url,
json={ json={
@ -276,16 +287,15 @@ async def test_base64_embed_dtype(server: RemoteOpenAIServer, model_name: str):
"input": input_texts, "input": input_texts,
"encoding_format": "base64", "encoding_format": "base64",
"embed_dtype": embed_dtype, "embed_dtype": embed_dtype,
"endianness": endianness,
}, },
) )
base64_data = [] base64_data = []
for data in responses_base64.json()["data"]: for data in responses_base64.json()["data"]:
base64_data.append( binary = base64.b64decode(data["data"])
torch.frombuffer(base64.b64decode(data["data"]), dtype=torch_dtype) tensor = binary2tensor(binary, (-1,), embed_dtype, endianness)
.to(torch.float32) base64_data.append(tensor.to(torch.float32).tolist())
.tolist()
)
check_embeddings_close( check_embeddings_close(
embeddings_0_lst=float_data, embeddings_0_lst=float_data,
@ -298,14 +308,63 @@ async def test_base64_embed_dtype(server: RemoteOpenAIServer, model_name: str):
@pytest.mark.asyncio @pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME]) @pytest.mark.parametrize("model_name", [MODEL_NAME])
async def test_base64_embed_dtype_not_supported( async def test_bytes_embed_dtype_and_endianness(
server: RemoteOpenAIServer, model_name: str server: RemoteOpenAIServer, model_name: str
): ):
input_texts = [ input_texts = [
"The best thing about vLLM is that it supports many different models", "The best thing about vLLM is that it supports many different models",
] ]
bad_embed_dtype = "bad_embed_dtype" url = server.url_for("pooling")
float_response = requests.post(
url,
json={
"model": model_name,
"input": input_texts,
"encoding_format": "float",
},
)
responses_float = PoolingResponse.model_validate(float_response.json())
float_data = [np.array(d.data).squeeze(-1).tolist() for d in responses_float.data]
for embed_dtype in list(EMBED_DTYPE_TO_TORCH_DTYPE.keys()):
for endianness in ENDIANNESS:
responses_bytes = requests.post(
url,
json={
"model": model_name,
"input": input_texts,
"encoding_format": "bytes",
"embed_dtype": embed_dtype,
"endianness": endianness,
},
)
metadata = json.loads(responses_bytes.headers["metadata"])
body = responses_bytes.content
items = [MetadataItem(**x) for x in metadata["data"]]
bytes_data = decode_pooling_output(items=items, body=body)
bytes_data = [x.to(torch.float32).view(-1).tolist() for x in bytes_data]
check_embeddings_close(
embeddings_0_lst=float_data,
embeddings_1_lst=bytes_data,
name_0="float_data",
name_1="bytes_data",
tol=1e-2,
)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_name", [MODEL_NAME])
@pytest.mark.parametrize("param_name", ["encoding_format", "embed_dtype", "endianness"])
async def test_params_not_supported(
server: RemoteOpenAIServer, model_name: str, param_name: str
):
input_texts = [
"The best thing about vLLM is that it supports many different models",
]
responses_base64 = requests.post( responses_base64 = requests.post(
server.url_for("pooling"), server.url_for("pooling"),
@ -313,14 +372,13 @@ async def test_base64_embed_dtype_not_supported(
"model": model_name, "model": model_name,
"input": input_texts, "input": input_texts,
"encoding_format": "base64", "encoding_format": "base64",
"embed_dtype": bad_embed_dtype, param_name: f"bad_{param_name}",
}, },
) )
assert responses_base64.status_code == 400 assert responses_base64.status_code == 400
assert responses_base64.json()["error"]["message"].startswith( assert "literal_error" in responses_base64.json()["error"]["message"]
f"embed_dtype={bad_embed_dtype!r} is not supported." assert f"bad_{param_name}" in responses_base64.json()["error"]["message"]
)
@pytest.mark.asyncio @pytest.mark.asyncio

View File

@ -0,0 +1,40 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import pytest
import torch
from tests.models.utils import check_embeddings_close
from vllm.utils.serial_utils import (
EMBED_DTYPE_TO_TORCH_DTYPE,
ENDIANNESS,
binary2tensor,
tensor2binary,
)
@pytest.mark.parametrize("endianness", ENDIANNESS)
@pytest.mark.parametrize("embed_dtype", EMBED_DTYPE_TO_TORCH_DTYPE.keys())
@torch.inference_mode
def test_encode_and_decode(embed_dtype: str, endianness: str):
for i in range(10):
tensor = torch.rand(2, 3, 5, 7, 11, 13, device="cpu", dtype=torch.float32)
shape = tensor.shape
binary = tensor2binary(tensor, embed_dtype, endianness)
new_tensor = binary2tensor(binary, shape, embed_dtype, endianness).to(
torch.float32
)
if embed_dtype in ["float32", "float16"]:
torch.testing.assert_close(tensor, new_tensor, atol=0.001, rtol=0.001)
elif embed_dtype == "bfloat16":
torch.testing.assert_close(tensor, new_tensor, atol=0.01, rtol=0.01)
else: # for fp8
torch.testing.assert_close(tensor, new_tensor, atol=0.1, rtol=0.1)
check_embeddings_close(
embeddings_0_lst=tensor.view(1, -1),
embeddings_1_lst=new_tensor.view(1, -1),
name_0="gt",
name_1="new",
tol=1e-2,
)

View File

@ -58,12 +58,14 @@ from vllm.entrypoints.openai.protocol import (
CompletionResponse, CompletionResponse,
DetokenizeRequest, DetokenizeRequest,
DetokenizeResponse, DetokenizeResponse,
EmbeddingBytesResponse,
EmbeddingRequest, EmbeddingRequest,
EmbeddingResponse, EmbeddingResponse,
ErrorInfo, ErrorInfo,
ErrorResponse, ErrorResponse,
IOProcessorResponse, IOProcessorResponse,
LoadLoRAAdapterRequest, LoadLoRAAdapterRequest,
PoolingBytesResponse,
PoolingRequest, PoolingRequest,
PoolingResponse, PoolingResponse,
RerankRequest, RerankRequest,
@ -681,7 +683,10 @@ async def create_completion(request: CompletionRequest, raw_request: Request):
) )
@with_cancellation @with_cancellation
@load_aware_call @load_aware_call
async def create_embedding(request: EmbeddingRequest, raw_request: Request): async def create_embedding(
request: EmbeddingRequest,
raw_request: Request,
):
handler = embedding(raw_request) handler = embedding(raw_request)
if handler is None: if handler is None:
return base(raw_request).create_error_response( return base(raw_request).create_error_response(
@ -701,6 +706,12 @@ async def create_embedding(request: EmbeddingRequest, raw_request: Request):
) )
elif isinstance(generator, EmbeddingResponse): elif isinstance(generator, EmbeddingResponse):
return JSONResponse(content=generator.model_dump()) return JSONResponse(content=generator.model_dump())
elif isinstance(generator, EmbeddingBytesResponse):
return StreamingResponse(
content=generator.body,
headers={"metadata": generator.metadata},
media_type=generator.media_type,
)
assert_never(generator) assert_never(generator)
@ -733,6 +744,12 @@ async def create_pooling(request: PoolingRequest, raw_request: Request):
) )
elif isinstance(generator, (PoolingResponse, IOProcessorResponse)): elif isinstance(generator, (PoolingResponse, IOProcessorResponse)):
return JSONResponse(content=generator.model_dump()) return JSONResponse(content=generator.model_dump())
elif isinstance(generator, PoolingBytesResponse):
return StreamingResponse(
content=generator.body,
headers={"metadata": generator.metadata},
media_type=generator.media_type,
)
assert_never(generator) assert_never(generator)

View File

@ -48,6 +48,12 @@ from openai.types.responses.response_reasoning_item import (
Content as ResponseReasoningTextContent, Content as ResponseReasoningTextContent,
) )
from vllm.utils.serial_utils import (
EmbedDType,
EncodingFormat,
Endianness,
)
# Backward compatibility for OpenAI client versions # Backward compatibility for OpenAI client versions
try: # For older openai versions (< 1.100.0) try: # For older openai versions (< 1.100.0)
from openai.types.responses import ResponseTextConfig from openai.types.responses import ResponseTextConfig
@ -84,18 +90,6 @@ from vllm.sampling_params import (
from vllm.utils import random_uuid from vllm.utils import random_uuid
from vllm.utils.import_utils import resolve_obj_by_qualname from vllm.utils.import_utils import resolve_obj_by_qualname
EMBED_DTYPE_TO_TORCH_DTYPE = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
# I'm not sure if other platforms' CPUs support the fp8 data format.
# EMBED_DTYPE only uses the fp8 data representation,
# does not use fp8 computation, and only occurs on the CPU.
# Apologize for any possible break.
"fp8_e4m3": torch.float8_e4m3fn,
"fp8_e5m2": torch.float8_e5m2,
}
logger = init_logger(__name__) logger = init_logger(__name__)
_LONG_INFO = torch.iinfo(torch.long) _LONG_INFO = torch.iinfo(torch.long)
@ -1531,7 +1525,7 @@ class EmbeddingCompletionRequest(OpenAIBaseModel):
# https://platform.openai.com/docs/api-reference/embeddings # https://platform.openai.com/docs/api-reference/embeddings
model: str | None = None model: str | None = None
input: list[int] | list[list[int]] | str | list[str] input: list[int] | list[list[int]] | str | list[str]
encoding_format: Literal["float", "base64"] = "float" encoding_format: EncodingFormat = "float"
dimensions: int | None = None dimensions: int | None = None
user: str | None = None user: str | None = None
truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None
@ -1564,11 +1558,20 @@ class EmbeddingCompletionRequest(OpenAIBaseModel):
default=None, default=None,
description="Whether to normalize the embeddings outputs. Default is True.", description="Whether to normalize the embeddings outputs. Default is True.",
) )
embed_dtype: str = Field( embed_dtype: EmbedDType = Field(
default="float32", default="float32",
description=( description=(
"What dtype to use for base64 encoding. Default to using " "What dtype to use for encoding. Default to using float32 for base64 "
"float32 for base64 encoding to match the OpenAI python client behavior." "encoding to match the OpenAI python client behavior. "
"This parameter will affect base64 and binary_response."
),
)
endianness: Endianness = Field(
default="native",
description=(
"What endianness to use for encoding. Default to using native for "
"base64 encoding to match the OpenAI python client behavior."
"This parameter will affect base64 and binary_response."
), ),
) )
# --8<-- [end:embedding-extra-params] # --8<-- [end:embedding-extra-params]
@ -1585,7 +1588,7 @@ class EmbeddingChatRequest(OpenAIBaseModel):
model: str | None = None model: str | None = None
messages: list[ChatCompletionMessageParam] messages: list[ChatCompletionMessageParam]
encoding_format: Literal["float", "base64"] = "float" encoding_format: EncodingFormat = "float"
dimensions: int | None = None dimensions: int | None = None
user: str | None = None user: str | None = None
truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None truncate_prompt_tokens: Annotated[int, Field(ge=-1)] | None = None
@ -1650,11 +1653,20 @@ class EmbeddingChatRequest(OpenAIBaseModel):
default=None, default=None,
description="Whether to normalize the embeddings outputs. Default is True.", description="Whether to normalize the embeddings outputs. Default is True.",
) )
embed_dtype: str = Field( embed_dtype: EmbedDType = Field(
default="float32", default="float32",
description=( description=(
"Which dtype to use for base64 encoding. Defaults to float32 " "What dtype to use for encoding. Default to using float32 for base64 "
"to match OpenAI API." "encoding to match the OpenAI python client behavior. "
"This parameter will affect base64 and binary_response."
),
)
endianness: Endianness = Field(
default="native",
description=(
"What endianness to use for encoding. Default to using native for "
"base64 encoding to match the OpenAI python client behavior."
"This parameter will affect base64 and binary_response."
), ),
) )
# --8<-- [end:chat-embedding-extra-params] # --8<-- [end:chat-embedding-extra-params]
@ -1701,11 +1713,21 @@ class IOProcessorRequest(OpenAIBaseModel, Generic[T]):
""" """
activation: bool = False activation: bool = False
embed_dtype: str = Field( encoding_format: EncodingFormat = "float"
embed_dtype: EmbedDType = Field(
default="float32", default="float32",
description=( description=(
"What dtype to use for base64 encoding. Default to using " "What dtype to use for encoding. Default to using float32 for base64 "
"float32 for base64 encoding to match the OpenAI python client behavior." "encoding to match the OpenAI python client behavior. "
"This parameter will affect base64 and binary_response."
),
)
endianness: Endianness = Field(
default="native",
description=(
"What endianness to use for encoding. Default to using native for "
"base64 encoding to match the OpenAI python client behavior."
"This parameter will affect base64 and binary_response."
), ),
) )
@ -1905,6 +1927,12 @@ class EmbeddingResponse(OpenAIBaseModel):
usage: UsageInfo usage: UsageInfo
class EmbeddingBytesResponse(OpenAIBaseModel):
body: list[bytes]
metadata: str
media_type: str = "application/octet-stream"
class PoolingResponseData(OpenAIBaseModel): class PoolingResponseData(OpenAIBaseModel):
index: int index: int
object: str = "pooling" object: str = "pooling"
@ -1920,6 +1948,12 @@ class PoolingResponse(OpenAIBaseModel):
usage: UsageInfo usage: UsageInfo
class PoolingBytesResponse(OpenAIBaseModel):
body: list[bytes]
metadata: str
media_type: str = "application/octet-stream"
class ScoreResponseData(OpenAIBaseModel): class ScoreResponseData(OpenAIBaseModel):
index: int index: int
object: str = "score" object: str = "score"

View File

@ -1,18 +1,19 @@
# SPDX-License-Identifier: Apache-2.0 # SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import json
from collections.abc import AsyncGenerator, Mapping from collections.abc import AsyncGenerator, Mapping
from typing import Any, Final, cast from typing import Any, Final, cast
import torch import torch
from fastapi import Request from fastapi import Request
from typing_extensions import override from fastapi.responses import Response
from typing_extensions import assert_never, override
from vllm.engine.protocol import EngineClient from vllm.engine.protocol import EngineClient
from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption
from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.logger import RequestLogger
from vllm.entrypoints.openai.protocol import ( from vllm.entrypoints.openai.protocol import (
EMBED_DTYPE_TO_TORCH_DTYPE, EmbeddingBytesResponse,
EmbeddingChatRequest, EmbeddingChatRequest,
EmbeddingCompletionRequest, EmbeddingCompletionRequest,
EmbeddingRequest, EmbeddingRequest,
@ -28,7 +29,6 @@ from vllm.entrypoints.openai.serving_engine import (
TextTokensPrompt, TextTokensPrompt,
) )
from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.openai.serving_models import OpenAIServingModels
from vllm.entrypoints.openai.utils import encoding_pooling_output
from vllm.entrypoints.renderer import RenderConfig from vllm.entrypoints.renderer import RenderConfig
from vllm.inputs.data import TokensPrompt as EngineTokensPrompt from vllm.inputs.data import TokensPrompt as EngineTokensPrompt
from vllm.logger import init_logger from vllm.logger import init_logger
@ -41,6 +41,13 @@ from vllm.outputs import (
from vllm.pooling_params import PoolingParams from vllm.pooling_params import PoolingParams
from vllm.utils.async_utils import merge_async_iterators from vllm.utils.async_utils import merge_async_iterators
from vllm.utils.collection_utils import chunk_list from vllm.utils.collection_utils import chunk_list
from vllm.utils.serial_utils import (
EmbedDType,
EncodingFormat,
Endianness,
encode_pooling_bytes,
encode_pooling_output,
)
logger = init_logger(__name__) logger = init_logger(__name__)
@ -68,12 +75,6 @@ class EmbeddingMixin(OpenAIServing):
) -> ErrorResponse | None: ) -> ErrorResponse | None:
ctx = cast(EmbeddingServeContext, ctx) ctx = cast(EmbeddingServeContext, ctx)
try: try:
if ctx.request.embed_dtype not in EMBED_DTYPE_TO_TORCH_DTYPE:
return self.create_error_response(
f"embed_dtype={ctx.request.embed_dtype!r} is not supported. "
f"Supported types: {EMBED_DTYPE_TO_TORCH_DTYPE.keys()}"
)
ctx.lora_request = self._maybe_get_adapters(ctx.request) ctx.lora_request = self._maybe_get_adapters(ctx.request)
tokenizer = await self.engine_client.get_tokenizer() tokenizer = await self.engine_client.get_tokenizer()
@ -121,17 +122,25 @@ class EmbeddingMixin(OpenAIServing):
def _build_response( def _build_response(
self, self,
ctx: ServeContext, ctx: ServeContext,
) -> EmbeddingResponse | ErrorResponse: ) -> EmbeddingResponse | Response | ErrorResponse:
final_res_batch_checked = cast(list[PoolingRequestOutput], ctx.final_res_batch)
encoding_format: EncodingFormat = ctx.request.encoding_format
embed_dtype: EmbedDType = ctx.request.embed_dtype
endianness: Endianness = ctx.request.endianness
def encode_float_base64():
items: list[EmbeddingResponseData] = [] items: list[EmbeddingResponseData] = []
num_prompt_tokens = 0 num_prompt_tokens = 0
final_res_batch_checked = cast(list[PoolingRequestOutput], ctx.final_res_batch)
for idx, final_res in enumerate(final_res_batch_checked): for idx, final_res in enumerate(final_res_batch_checked):
item = EmbeddingResponseData( item = EmbeddingResponseData(
index=idx, index=idx,
embedding=encoding_pooling_output( embedding=encode_pooling_output(
final_res, ctx.request.encoding_format, ctx.request.embed_dtype final_res,
encoding_format=encoding_format,
embed_dtype=embed_dtype,
endianness=endianness,
), ),
) )
prompt_token_ids = final_res.prompt_token_ids prompt_token_ids = final_res.prompt_token_ids
@ -152,6 +161,32 @@ class EmbeddingMixin(OpenAIServing):
usage=usage, usage=usage,
) )
def encode_bytes():
body, items, usage = encode_pooling_bytes(
pooling_outputs=final_res_batch_checked,
embed_dtype=embed_dtype,
endianness=endianness,
)
metadata = {
"id": ctx.request_id,
"created": ctx.created_time,
"model": ctx.model_name,
"data": items,
"usage": usage,
}
return EmbeddingBytesResponse(
body=body,
metadata=json.dumps(metadata),
)
if encoding_format == "float" or encoding_format == "base64":
return encode_float_base64()
elif encoding_format == "bytes":
return encode_bytes()
else:
assert_never(encoding_format)
def _get_max_position_embeddings(self) -> int: def _get_max_position_embeddings(self) -> int:
"""Get the model's effective maximum sequence length for chunking.""" """Get the model's effective maximum sequence length for chunking."""
return self.model_config.max_model_len return self.model_config.max_model_len

View File

@ -2,14 +2,12 @@
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project # SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import asyncio import asyncio
import base64 import json
import time import time
from collections.abc import AsyncGenerator from collections.abc import AsyncGenerator
from typing import Final, Literal, cast from typing import Final, cast
import jinja2 import jinja2
import numpy as np
import torch
from fastapi import Request from fastapi import Request
from typing_extensions import assert_never from typing_extensions import assert_never
@ -17,10 +15,10 @@ from vllm.engine.protocol import EngineClient
from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption
from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.logger import RequestLogger
from vllm.entrypoints.openai.protocol import ( from vllm.entrypoints.openai.protocol import (
EMBED_DTYPE_TO_TORCH_DTYPE,
ErrorResponse, ErrorResponse,
IOProcessorRequest, IOProcessorRequest,
IOProcessorResponse, IOProcessorResponse,
PoolingBytesResponse,
PoolingChatRequest, PoolingChatRequest,
PoolingCompletionRequest, PoolingCompletionRequest,
PoolingRequest, PoolingRequest,
@ -30,33 +28,23 @@ from vllm.entrypoints.openai.protocol import (
) )
from vllm.entrypoints.openai.serving_engine import OpenAIServing from vllm.entrypoints.openai.serving_engine import OpenAIServing
from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.openai.serving_models import OpenAIServingModels
from vllm.entrypoints.openai.utils import encoding_pooling_output
from vllm.entrypoints.renderer import RenderConfig from vllm.entrypoints.renderer import RenderConfig
from vllm.entrypoints.utils import _validate_truncation_size from vllm.entrypoints.utils import _validate_truncation_size
from vllm.logger import init_logger from vllm.logger import init_logger
from vllm.outputs import PoolingOutput, PoolingRequestOutput from vllm.outputs import PoolingRequestOutput
from vllm.tasks import SupportedTask from vllm.tasks import SupportedTask
from vllm.utils.async_utils import merge_async_iterators from vllm.utils.async_utils import merge_async_iterators
from vllm.utils.serial_utils import (
EmbedDType,
EncodingFormat,
Endianness,
encode_pooling_bytes,
encode_pooling_output,
)
logger = init_logger(__name__) logger = init_logger(__name__)
def _get_data(
output: PoolingOutput,
encoding_format: Literal["float", "base64"],
) -> list[float] | str:
if encoding_format == "float":
return output.data.tolist()
elif encoding_format == "base64":
# Force to use float32 for base64 encoding
# to match the OpenAI python client behavior
pt_float32 = output.data.to(dtype=torch.float32)
pooling_bytes = np.array(pt_float32, dtype="float32").tobytes()
return base64.b64encode(pooling_bytes).decode("utf-8")
assert_never(encoding_format)
class OpenAIServingPooling(OpenAIServing): class OpenAIServingPooling(OpenAIServing):
def __init__( def __init__(
self, self,
@ -86,7 +74,7 @@ class OpenAIServingPooling(OpenAIServing):
self, self,
request: PoolingRequest, request: PoolingRequest,
raw_request: Request | None = None, raw_request: Request | None = None,
) -> PoolingResponse | IOProcessorResponse | ErrorResponse: ) -> PoolingResponse | IOProcessorResponse | PoolingBytesResponse | ErrorResponse:
""" """
See https://platform.openai.com/docs/api-reference/embeddings/create See https://platform.openai.com/docs/api-reference/embeddings/create
for the API specification. This API mimics the OpenAI Embedding API. for the API specification. This API mimics the OpenAI Embedding API.
@ -95,12 +83,6 @@ class OpenAIServingPooling(OpenAIServing):
if error_check_ret is not None: if error_check_ret is not None:
return error_check_ret return error_check_ret
if request.embed_dtype not in EMBED_DTYPE_TO_TORCH_DTYPE:
return self.create_error_response(
f"embed_dtype={request.embed_dtype!r} is not supported. "
f"Supported types: {EMBED_DTYPE_TO_TORCH_DTYPE.keys()}"
)
model_name = self.models.model_name() model_name = self.models.model_name()
request_id = f"pool-{self._base_request_id(raw_request)}" request_id = f"pool-{self._base_request_id(raw_request)}"
@ -256,6 +238,7 @@ class OpenAIServingPooling(OpenAIServing):
model_name, model_name,
request.encoding_format, request.encoding_format,
request.embed_dtype, request.embed_dtype,
request.endianness,
) )
except asyncio.CancelledError: except asyncio.CancelledError:
return self.create_error_response("Client disconnected") return self.create_error_response("Client disconnected")
@ -271,16 +254,23 @@ class OpenAIServingPooling(OpenAIServing):
request_id: str, request_id: str,
created_time: int, created_time: int,
model_name: str, model_name: str,
encoding_format: Literal["float", "base64"], encoding_format: EncodingFormat,
embed_dtype: str, embed_dtype: EmbedDType,
) -> PoolingResponse: endianness: Endianness,
) -> PoolingResponse | PoolingBytesResponse:
def encode_float_base64():
items: list[PoolingResponseData] = [] items: list[PoolingResponseData] = []
num_prompt_tokens = 0 num_prompt_tokens = 0
for idx, final_res in enumerate(final_res_batch): for idx, final_res in enumerate(final_res_batch):
item = PoolingResponseData( item = PoolingResponseData(
index=idx, index=idx,
data=encoding_pooling_output(final_res, encoding_format, embed_dtype), data=encode_pooling_output(
final_res,
encoding_format=encoding_format,
embed_dtype=embed_dtype,
endianness=endianness,
),
) )
prompt_token_ids = final_res.prompt_token_ids prompt_token_ids = final_res.prompt_token_ids
@ -300,6 +290,32 @@ class OpenAIServingPooling(OpenAIServing):
usage=usage, usage=usage,
) )
def encode_bytes():
body, items, usage = encode_pooling_bytes(
pooling_outputs=final_res_batch,
embed_dtype=embed_dtype,
endianness=endianness,
)
metadata = {
"id": request_id,
"created": created_time,
"model": model_name,
"data": items,
"usage": usage,
}
return PoolingBytesResponse(
body=body,
metadata=json.dumps(metadata),
)
if encoding_format == "float" or encoding_format == "base64":
return encode_float_base64()
elif encoding_format == "bytes":
return encode_bytes()
else:
assert_never(encoding_format)
def _build_render_config(self, request: PoolingCompletionRequest) -> RenderConfig: def _build_render_config(self, request: PoolingCompletionRequest) -> RenderConfig:
return RenderConfig( return RenderConfig(
max_length=self.max_model_len, max_length=self.max_model_len,

View File

@ -1,33 +0,0 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import base64
from typing import Literal
import torch
from typing_extensions import assert_never
from vllm import PoolingRequestOutput
from vllm.entrypoints.openai.protocol import EMBED_DTYPE_TO_TORCH_DTYPE
def encoding_pooling_output(
output: PoolingRequestOutput,
encoding_format: Literal["float", "base64"],
embed_dtype: str,
) -> list[float] | str:
if encoding_format == "float":
return output.outputs.data.tolist()
elif encoding_format == "base64":
assert embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE
torch_dtype = EMBED_DTYPE_TO_TORCH_DTYPE[embed_dtype]
embedding_bytes = (
output.outputs.data.to(torch_dtype)
.flatten()
.contiguous()
.view(torch.uint8)
.numpy()
.tobytes()
)
return base64.b64encode(embedding_bytes).decode("utf-8")
assert_never(encoding_format)

169
vllm/utils/serial_utils.py Normal file
View File

@ -0,0 +1,169 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import base64
import sys
from dataclasses import dataclass
from typing import Literal
import numpy as np
import torch
from typing_extensions import assert_never
from vllm import PoolingRequestOutput
sys_byteorder = sys.byteorder
EMBED_DTYPE_TO_TORCH_DTYPE = {
"float32": torch.float32,
"float16": torch.float16,
"bfloat16": torch.bfloat16,
# I'm not sure if other platforms' CPUs support the fp8 data format.
# EMBED_DTYPE only uses the fp8 data representation,
# does not use fp8 computation, and only occurs on the CPU.
# Apologize for any possible break.
"fp8_e4m3": torch.float8_e4m3fn,
"fp8_e5m2": torch.float8_e5m2,
}
EMBED_DTYPE_TO_TORCH_DTYPE_VIEW = {
"float32": torch.float32,
"float16": torch.float16,
# numpy does not support bfloat16 and fp8
"bfloat16": torch.float16,
"fp8_e4m3": torch.uint8,
"fp8_e5m2": torch.uint8,
}
EMBED_DTYPE_TO_NUMPY_DTYPE_VIEW = {
"float32": np.float32,
"float16": np.float16,
# numpy does not support bfloat16 and fp8
"bfloat16": np.float16,
"fp8_e4m3": np.uint8,
"fp8_e5m2": np.uint8,
}
ENDIANNESS = ["native", "big", "little"]
EmbedDType = Literal["float32", "float16", "bfloat16", "fp8_e4m3", "fp8_e5m2"]
Endianness = Literal["native", "big", "little"]
EncodingFormat = Literal["float", "base64", "bytes"]
def tensor2binary(
tensor: torch.Tensor, embed_dtype: EmbedDType, endianness: Endianness
) -> bytes:
assert isinstance(tensor, torch.Tensor)
assert embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE
assert endianness in ENDIANNESS
torch_dtype = EMBED_DTYPE_TO_TORCH_DTYPE[embed_dtype]
torch_view_dtype = EMBED_DTYPE_TO_TORCH_DTYPE_VIEW[embed_dtype]
np_array = (
tensor.to(torch_dtype).flatten().contiguous().view(torch_view_dtype).numpy()
)
if endianness != "native" and endianness != sys_byteorder:
np_array = np_array.byteswap()
return np_array.tobytes()
def binary2tensor(
binary: bytes,
shape: tuple[int, ...],
embed_dtype: EmbedDType,
endianness: Endianness,
) -> torch.Tensor:
assert embed_dtype in EMBED_DTYPE_TO_TORCH_DTYPE
assert embed_dtype in EMBED_DTYPE_TO_NUMPY_DTYPE_VIEW
assert endianness in ENDIANNESS
torch_dtype = EMBED_DTYPE_TO_TORCH_DTYPE[embed_dtype]
np_dtype = EMBED_DTYPE_TO_NUMPY_DTYPE_VIEW[embed_dtype]
np_array = np.frombuffer(binary, dtype=np_dtype).reshape(shape)
if endianness != "native" and endianness != sys_byteorder:
np_array = np_array.byteswap()
return torch.from_numpy(np_array).view(torch_dtype)
def encode_pooling_output(
output: PoolingRequestOutput,
encoding_format: EncodingFormat,
embed_dtype: EmbedDType,
endianness: Endianness,
) -> list[float] | str | bytes:
if encoding_format == "float":
return output.outputs.data.tolist()
elif encoding_format == "base64":
embedding_bytes = tensor2binary(output.outputs.data, embed_dtype, endianness)
return base64.b64encode(embedding_bytes).decode("utf-8")
elif encoding_format == "bytes":
return tensor2binary(output.outputs.data, embed_dtype, endianness)
assert_never(encoding_format)
@dataclass
class MetadataItem:
index: int
embed_dtype: EmbedDType
endianness: Endianness
start: int
end: int
shape: tuple[int, ...]
def encode_pooling_bytes(
pooling_outputs: list[PoolingRequestOutput],
embed_dtype: EmbedDType,
endianness: Endianness,
):
num_prompt_tokens = 0
items: list[dict[str, MetadataItem]] = []
body = []
offset = 0
for idx, output in enumerate(pooling_outputs):
binary = tensor2binary(
tensor=output.outputs.data,
embed_dtype=embed_dtype,
endianness=endianness,
)
size = len(binary)
item = {
"index": idx,
"embed_dtype": embed_dtype,
"endianness": endianness,
"start": offset,
"end": offset + size,
"shape": output.outputs.data.shape,
}
body.append(binary)
items.append(item)
prompt_token_ids = output.prompt_token_ids
num_prompt_tokens += len(prompt_token_ids)
offset += size
usage = {
"prompt_tokens": num_prompt_tokens,
"total_tokens": num_prompt_tokens,
}
return body, items, usage
def decode_pooling_output(items: list[MetadataItem], body: bytes) -> list[torch.Tensor]:
items.sort(key=lambda x: x.index)
tensor_list: list[torch.Tensor] = []
for item in items:
binary = body[item.start : item.end]
tensor = binary2tensor(binary, item.shape, item.embed_dtype, item.endianness)
tensor_list.append(tensor)
return tensor_list