[Quant] Molmo SupportsQuant (#13336)

This commit is contained in:
Kyle Sayers 2025-02-18 00:34:47 -05:00 committed by GitHub
parent 932b51cedd
commit 88787bce1d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -52,7 +52,8 @@ from vllm.multimodal.profiling import BaseDummyInputsBuilder, ProcessorInputs
from vllm.sequence import IntermediateTensors
from vllm.utils import JSONTree, json_map_leaves
from .interfaces import SupportsLoRA, SupportsMultiModal, SupportsPP
from .interfaces import (SupportsLoRA, SupportsMultiModal, SupportsPP,
SupportsQuant)
from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn,
is_pp_missing_parameter,
make_empty_intermediate_tensors_factory, make_layers,
@ -633,7 +634,8 @@ class MolmoDecoderNormAfterLayer(MolmoDecoderLayer):
return hidden_states, residual
class MolmoVisionBackbone(nn.Module):
class MolmoVisionBackbone(nn.Module, SupportsQuant):
packed_modules_mapping = {"merged_linear": ["gate_proj", "up_proj"]}
def __init__(
self,
@ -794,7 +796,7 @@ class MolmoVisionBackbone(nn.Module):
@support_torch_compile
class MolmoModel(nn.Module):
class MolmoModel(nn.Module, SupportsQuant):
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
@ -1402,8 +1404,8 @@ class MolmoMultiModalProcessor(BaseMultiModalProcessor[MolmoProcessingInfo]):
@MULTIMODAL_REGISTRY.register_processor(MolmoMultiModalProcessor,
info=MolmoProcessingInfo,
dummy_inputs=MolmoDummyInputsBuilder)
class MolmoForCausalLM(nn.Module, SupportsMultiModal, SupportsPP,
SupportsLoRA):
class MolmoForCausalLM(nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA,
SupportsQuant):
hf_to_vllm_mapper = WeightsMapper(
orig_to_new_substr={
# vision backbone mapping