From cb72a0ef0135060c4bd3c9187c7532d710786c0a Mon Sep 17 00:00:00 2001 From: bk-201 Date: Sat, 20 Dec 2025 16:36:13 +0000 Subject: [PATCH] fix pre-commit Signed-off-by: bk-201 --- vllm/v1/engine/input_processor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/v1/engine/input_processor.py b/vllm/v1/engine/input_processor.py index acae5c5108afa..01d0b7f50f45e 100644 --- a/vllm/v1/engine/input_processor.py +++ b/vllm/v1/engine/input_processor.py @@ -463,9 +463,10 @@ class InputProcessor: # When enable_tower_connector_lora is True, multi-modal embeddings # vary depending on the LoRA request. Therefore, the mm_hash must be # generated based on the LoRA request to prevent incorrect cache hits. + lora_config = self.lora_config lora_kwargs = ( msgspec.structs.asdict(lora_request) - if lora_request and self.lora_config.enable_tower_connector_lora + if lora_request and lora_config and lora_config.enable_tower_connector_lora else {} ) lora_kwargs = {k: v for k, v in lora_kwargs.items() if v is not None}