mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-09 13:15:48 +08:00
[BugFix] respect VLLM_LOGGING_LEVEL in logger (#29761)
Signed-off-by: Boyuan Feng <boyuan@meta.com>
This commit is contained in:
parent
0037b5746a
commit
3b221cb661
@ -1174,6 +1174,7 @@ def caplog_mp_spawn(tmp_path, monkeypatch):
|
||||
"level": level,
|
||||
"filename": log_path.as_posix(),
|
||||
}
|
||||
config["loggers"]["vllm"]["level"] = level
|
||||
|
||||
config_path.write_text(json.dumps(config))
|
||||
|
||||
|
||||
@ -716,7 +716,7 @@ def test_is_chunked_prefill_supported(
|
||||
):
|
||||
model_config = ModelConfig(model_id, trust_remote_code=True)
|
||||
assert model_config.attn_type == expected_attn_type
|
||||
with caplog_vllm.at_level(level=logging.DEBUG):
|
||||
with caplog_vllm.at_level(level=logging.DEBUG, logger="vllm"):
|
||||
assert model_config.is_chunked_prefill_supported == expected_result
|
||||
assert reason in caplog_vllm.text
|
||||
|
||||
@ -835,7 +835,7 @@ def test_is_prefix_caching_supported(
|
||||
):
|
||||
model_config = ModelConfig(model_id, trust_remote_code=True)
|
||||
assert model_config.attn_type == expected_attn_type
|
||||
with caplog_vllm.at_level(level=logging.DEBUG):
|
||||
with caplog_vllm.at_level(level=logging.DEBUG, logger="vllm"):
|
||||
assert model_config.is_prefix_caching_supported == expected_result
|
||||
assert reason in caplog_vllm.text
|
||||
|
||||
|
||||
@ -57,7 +57,7 @@ def test_default_vllm_root_logger_configuration(monkeypatch):
|
||||
_configure_vllm_root_logger()
|
||||
|
||||
logger = logging.getLogger("vllm")
|
||||
assert logger.level == logging.DEBUG
|
||||
assert logger.level == logging.INFO
|
||||
assert not logger.propagate
|
||||
|
||||
handler = logger.handlers[0]
|
||||
@ -524,7 +524,7 @@ def mp_function(**kwargs):
|
||||
|
||||
|
||||
def test_caplog_mp_fork(caplog_vllm, caplog_mp_fork):
|
||||
with caplog_vllm.at_level(logging.DEBUG), caplog_mp_fork():
|
||||
with caplog_vllm.at_level(logging.DEBUG, logger="vllm"), caplog_mp_fork():
|
||||
import multiprocessing
|
||||
|
||||
ctx = multiprocessing.get_context("fork")
|
||||
|
||||
@ -62,7 +62,7 @@ DEFAULT_LOGGING_CONFIG = {
|
||||
"loggers": {
|
||||
"vllm": {
|
||||
"handlers": ["vllm"],
|
||||
"level": "DEBUG",
|
||||
"level": envs.VLLM_LOGGING_LEVEL,
|
||||
"propagate": False,
|
||||
},
|
||||
},
|
||||
@ -175,6 +175,9 @@ def _configure_vllm_root_logger() -> None:
|
||||
vllm_handler["stream"] = envs.VLLM_LOGGING_STREAM
|
||||
vllm_handler["formatter"] = "vllm_color" if _use_color() else "vllm"
|
||||
|
||||
vllm_loggers = logging_config["loggers"]["vllm"]
|
||||
vllm_loggers["level"] = envs.VLLM_LOGGING_LEVEL
|
||||
|
||||
if envs.VLLM_LOGGING_CONFIG_PATH:
|
||||
if not path.exists(envs.VLLM_LOGGING_CONFIG_PATH):
|
||||
raise RuntimeError(
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user