mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-09 21:45:44 +08:00
Enable request body OpenAPI spec for OpenAI endpoints (#865)
This commit is contained in:
parent
75471386de
commit
becd7a56f1
@ -178,7 +178,8 @@ def create_logprobs(token_ids: List[int],
|
||||
|
||||
|
||||
@app.post("/v1/chat/completions")
|
||||
async def create_chat_completion(raw_request: Request):
|
||||
async def create_chat_completion(request: ChatCompletionRequest,
|
||||
raw_request: Request):
|
||||
"""Completion API similar to OpenAI's API.
|
||||
|
||||
See https://platform.openai.com/docs/api-reference/chat/create
|
||||
@ -188,7 +189,6 @@ async def create_chat_completion(raw_request: Request):
|
||||
- function_call (Users should implement this by themselves)
|
||||
- logit_bias (to be supported by vLLM engine)
|
||||
"""
|
||||
request = ChatCompletionRequest(**await raw_request.json())
|
||||
logger.info(f"Received chat completion request: {request}")
|
||||
|
||||
error_check_ret = await check_model(request)
|
||||
@ -348,7 +348,7 @@ async def create_chat_completion(raw_request: Request):
|
||||
|
||||
|
||||
@app.post("/v1/completions")
|
||||
async def create_completion(raw_request: Request):
|
||||
async def create_completion(request: CompletionRequest, raw_request: Request):
|
||||
"""Completion API similar to OpenAI's API.
|
||||
|
||||
See https://platform.openai.com/docs/api-reference/completions/create
|
||||
@ -361,7 +361,6 @@ async def create_completion(raw_request: Request):
|
||||
suffix)
|
||||
- logit_bias (to be supported by vLLM engine)
|
||||
"""
|
||||
request = CompletionRequest(**await raw_request.json())
|
||||
logger.info(f"Received completion request: {request}")
|
||||
|
||||
error_check_ret = await check_model(request)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user