diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 23141124e69e1..f43a40a0bfd34 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -57,7 +57,7 @@ class LlamaMLP(nn.Module): hidden_size: int, intermediate_size: int, hidden_act: str, - quant_config: Optional[QKVParallelLinear] = None, + quant_config: Optional[QuantizationConfig] = None, bias: bool = False, ) -> None: super().__init__()