From 493c275352bddf7d4877602e19b8bd29d662de63 Mon Sep 17 00:00:00 2001 From: Vensen Date: Mon, 23 Jun 2025 11:40:28 +0800 Subject: [PATCH] Fix(models/siglip): Add compatibility for Gemma models quantized by llm-compressor (#19643) Signed-off-by: Vensenmu --- vllm/model_executor/models/gemma3_mm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/model_executor/models/gemma3_mm.py b/vllm/model_executor/models/gemma3_mm.py index 3a1c14978b45b..619d2aa674919 100644 --- a/vllm/model_executor/models/gemma3_mm.py +++ b/vllm/model_executor/models/gemma3_mm.py @@ -479,6 +479,7 @@ class Gemma3ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP, "model.vision_tower.": "vision_tower.", "model.multi_modal_projector.": "multi_modal_projector.", "lm_head.": "language_model.lm_head.", + "vision_tower.vision_model.": "vision_model.", }) def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):