mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 06:45:01 +08:00
[Frontend] enable custom logging for the uvicorn server (OpenAI API server) (#18403)
Signed-off-by: François Paupier <francois.paupier@gmail.com> Co-authored-by: Cyrus Leung <cyrus.tl.leung@gmail.com>
This commit is contained in:
parent
ebb1ec9318
commit
20133cfee2
@ -5,6 +5,7 @@ import atexit
|
||||
import gc
|
||||
import importlib
|
||||
import inspect
|
||||
import json
|
||||
import multiprocessing
|
||||
import os
|
||||
import signal
|
||||
@ -16,7 +17,6 @@ from collections.abc import AsyncIterator
|
||||
from contextlib import asynccontextmanager
|
||||
from functools import partial
|
||||
from http import HTTPStatus
|
||||
from json import JSONDecodeError
|
||||
from typing import Annotated, Any, Optional
|
||||
|
||||
import prometheus_client
|
||||
@ -930,7 +930,7 @@ async def invocations(raw_request: Request):
|
||||
"""
|
||||
try:
|
||||
body = await raw_request.json()
|
||||
except JSONDecodeError as e:
|
||||
except json.JSONDecodeError as e:
|
||||
raise HTTPException(status_code=HTTPStatus.BAD_REQUEST.value,
|
||||
detail=f"JSON decode error: {e}") from e
|
||||
|
||||
@ -1003,6 +1003,18 @@ if envs.VLLM_ALLOW_RUNTIME_LORA_UPDATING:
|
||||
return Response(status_code=200, content=response)
|
||||
|
||||
|
||||
def load_log_config(log_config_file: Optional[str]) -> Optional[dict]:
|
||||
if not log_config_file:
|
||||
return None
|
||||
try:
|
||||
with open(log_config_file) as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to load log config from file %s: error %s",
|
||||
log_config_file, e)
|
||||
return None
|
||||
|
||||
|
||||
def build_app(args: Namespace) -> FastAPI:
|
||||
if args.disable_fastapi_docs:
|
||||
app = FastAPI(openapi_url=None,
|
||||
@ -1324,6 +1336,11 @@ async def run_server_worker(listen_address,
|
||||
|
||||
server_index = client_config.get("client_index", 0) if client_config else 0
|
||||
|
||||
# Load logging config for uvicorn if specified
|
||||
log_config = load_log_config(args.log_config_file)
|
||||
if log_config is not None:
|
||||
uvicorn_kwargs['log_config'] = log_config
|
||||
|
||||
async with build_async_engine_client(args, client_config) as engine_client:
|
||||
app = build_app(args)
|
||||
|
||||
|
||||
@ -11,6 +11,7 @@ import ssl
|
||||
from collections.abc import Sequence
|
||||
from typing import Optional, Union, get_args
|
||||
|
||||
import vllm.envs as envs
|
||||
from vllm.engine.arg_utils import AsyncEngineArgs, optional_type
|
||||
from vllm.entrypoints.chat_utils import (ChatTemplateContentFormatOption,
|
||||
validate_chat_template)
|
||||
@ -243,6 +244,13 @@ def make_arg_parser(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
|
||||
" into OpenAI API format, the name register in this plugin can be used "
|
||||
"in ``--tool-call-parser``.")
|
||||
|
||||
parser.add_argument(
|
||||
"--log-config-file",
|
||||
type=str,
|
||||
default=envs.VLLM_LOGGING_CONFIG_PATH,
|
||||
help="Path to logging config JSON file for both vllm and uvicorn",
|
||||
)
|
||||
|
||||
parser = AsyncEngineArgs.add_cli_args(parser)
|
||||
|
||||
parser.add_argument('--max-log-len',
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user