From 8863bd2b74501cf79037fee9da269e6b0b2d5d34 Mon Sep 17 00:00:00 2001 From: bk-201 Date: Mon, 13 Oct 2025 02:41:29 +0000 Subject: [PATCH] update Signed-off-by: bk-201 --- vllm/lora/models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/lora/models.py b/vllm/lora/models.py index bfb65bbab9015..0de2b4ceec9bf 100644 --- a/vllm/lora/models.py +++ b/vllm/lora/models.py @@ -390,12 +390,12 @@ class LoRAModelManager: ) else: self.supports_mm_lora = False - if self.supports_mm_lora: # 从init传进来就可以了,不需要model_config了 + if self.supports_mm_lora: self.mm_mapping: MultiModelKeys = self.model.get_mm_mapping() self.mm_config = model_config.multimodal_config # limit_per_prompt: int = max( # self.info.get_allowed_mm_limits().values()) - limit_per_prompt = 5 + limit_per_prompt = 5 # TODO # For vision tower # max_num_batched_tokens = encoder_budget