From 12e6c0b41c197c5079d6ef285930e0dd3bb60c37 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Tue, 13 May 2025 23:36:17 -0400 Subject: [PATCH] [Bugfix][V1] Fix FlashInfer V1 backend using the wrong VllmConfig (#18086) --- vllm/v1/attention/backends/flashinfer.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/vllm/v1/attention/backends/flashinfer.py b/vllm/v1/attention/backends/flashinfer.py index dcc33cffb1d7..1c4f7f62fa67 100755 --- a/vllm/v1/attention/backends/flashinfer.py +++ b/vllm/v1/attention/backends/flashinfer.py @@ -14,8 +14,7 @@ import vllm.envs as envs from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, AttentionType) from vllm.attention.layer import Attention -from vllm.config import (VllmConfig, get_current_vllm_config, - get_layers_from_vllm_config) +from vllm.config import VllmConfig, get_layers_from_vllm_config from vllm.logger import init_logger from vllm.v1.attention.backends.flash_attn import use_cascade_attention from vllm.v1.attention.backends.utils import CommonAttentionMetadata @@ -215,7 +214,7 @@ class FlashInferMetadataBuilder: # Global hyperparameters shared by all attention layers self.global_hyperparameters: Optional[PerLayerParameters] = None - self.vllm_config = get_current_vllm_config() + self.vllm_config = runner.vllm_config self.kv_cache_spec = kv_cache_spec self.block_table = block_table