[V1][Minor] Print KV cache size in token counts (#13596)

Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu>
This commit is contained in:
Woosuk Kwon 2025-02-20 09:24:31 -08:00 committed by GitHub
parent 34aad515c8
commit d3ea50113c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -519,11 +519,13 @@ def _get_kv_cache_config_uniform_type(vllm_config: VllmConfig,
"num_gpu_blocks_override=%d", num_blocks, num_gpu_blocks_override)
num_blocks = num_gpu_blocks_override
logger.info("# GPU blocks: %d", num_blocks)
max_concurrency = (num_blocks * vllm_config.cache_config.block_size /
vllm_config.model_config.max_model_len)
num_tokens = num_blocks * vllm_config.cache_config.block_size
num_tokens_str = f"{num_tokens:,}"
logger.info("GPU KV cache size: %s tokens", num_tokens_str)
max_model_len_str = f"{vllm_config.model_config.max_model_len:,}"
max_concurrency = num_tokens / vllm_config.model_config.max_model_len
logger.info("Maximum concurrency for %s tokens per request: %.2fx",
vllm_config.model_config.max_model_len, max_concurrency)
max_model_len_str, max_concurrency)
per_layer_size = page_size * num_blocks