Fix(models/siglip): Add compatibility for Gemma models quantized by llm-compressor (#19643)

Signed-off-by: Vensenmu <vensenmu@gmail.com>
This commit is contained in:
Vensen 2025-06-23 11:40:28 +08:00 committed by GitHub
parent f39ab2d4bd
commit 493c275352
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -479,6 +479,7 @@ class Gemma3ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP,
"model.vision_tower.": "vision_tower.",
"model.multi_modal_projector.": "multi_modal_projector.",
"lm_head.": "language_model.lm_head.",
"vision_tower.vision_model.": "vision_model.",
})
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):