mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-21 02:25:42 +08:00
[Minor] Fix small typo in llama.py: QKVParallelLinear -> QuantizationConfig (#4991)
This commit is contained in:
parent
eb6d3c264d
commit
a36de682d4
@ -57,7 +57,7 @@ class LlamaMLP(nn.Module):
|
||||
hidden_size: int,
|
||||
intermediate_size: int,
|
||||
hidden_act: str,
|
||||
quant_config: Optional[QKVParallelLinear] = None,
|
||||
quant_config: Optional[QuantizationConfig] = None,
|
||||
bias: bool = False,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user