mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-13 18:05:49 +08:00
Signed-off-by: Bill Nell <bnell@redhat.com> Co-authored-by: Michael Goin <mgoin64@gmail.com>
229 lines
8.8 KiB
Python
229 lines
8.8 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
from typing import Any, Optional, Union
|
|
|
|
import torch
|
|
|
|
from vllm import _custom_ops as ops
|
|
from vllm.logger import init_logger
|
|
from vllm.model_executor.layers.fused_moe.layer import FusedMoE
|
|
from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase,
|
|
UnquantizedLinearMethod)
|
|
from vllm.model_executor.layers.quantization import QuantizationMethods
|
|
from vllm.model_executor.layers.quantization.base_config import (
|
|
QuantizationConfig, QuantizeMethodBase)
|
|
from vllm.model_executor.parameter import (GroupQuantScaleParameter,
|
|
PackedvLLMParameter)
|
|
|
|
logger = init_logger(__name__)
|
|
|
|
|
|
class AWQConfig(QuantizationConfig):
|
|
"""Config class for AWQ.
|
|
|
|
Reference: https://arxiv.org/abs/2306.00978
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
weight_bits: int,
|
|
group_size: int,
|
|
zero_point: bool,
|
|
modules_to_not_convert: Optional[list[str]] = None,
|
|
) -> None:
|
|
super().__init__()
|
|
self.weight_bits = weight_bits
|
|
self.group_size = group_size
|
|
self.zero_point = zero_point
|
|
self.modules_to_not_convert = modules_to_not_convert or []
|
|
|
|
if self.weight_bits != 4:
|
|
raise ValueError(
|
|
"Currently, only 4-bit weight quantization is supported for "
|
|
f"AWQ, but got {self.weight_bits} bits.")
|
|
self.pack_factor = 32 // self.weight_bits
|
|
|
|
def __repr__(self) -> str:
|
|
return (f"AWQConfig(weight_bits={self.weight_bits}, "
|
|
f"group_size={self.group_size}, "
|
|
f"zero_point={self.zero_point}, "
|
|
f"modules_to_not_convert={self.modules_to_not_convert})")
|
|
|
|
def get_name(self) -> QuantizationMethods:
|
|
return "awq"
|
|
|
|
def get_supported_act_dtypes(self) -> list[torch.dtype]:
|
|
return [torch.half]
|
|
|
|
@classmethod
|
|
def get_min_capability(cls) -> int:
|
|
# The AWQ kernel only supports Turing or newer GPUs.
|
|
return 75
|
|
|
|
@staticmethod
|
|
def get_config_filenames() -> list[str]:
|
|
return [
|
|
"quant_config.json", # E.g., casperhansen/vicuna-7b-v1.5-awq
|
|
# E.g., abhinavkulkarni/mosaicml-mpt-7b-instruct-w4-g128-awq
|
|
"quantize_config.json",
|
|
]
|
|
|
|
@classmethod
|
|
def from_config(cls, config: dict[str, Any]) -> "AWQConfig":
|
|
weight_bits = cls.get_from_keys(config, ["w_bit", "bits"])
|
|
group_size = cls.get_from_keys(config, ["q_group_size", "group_size"])
|
|
zero_point = cls.get_from_keys(config, ["zero_point"])
|
|
modules_to_not_convert = cls.get_from_keys_or(
|
|
config, ["modules_to_not_convert"], None)
|
|
return cls(weight_bits, group_size, zero_point, modules_to_not_convert)
|
|
|
|
def get_quant_method(
|
|
self, layer: torch.nn.Module, prefix: str
|
|
) -> Optional[Union["LinearMethodBase", "QuantizeMethodBase"]]:
|
|
if isinstance(layer, LinearBase):
|
|
if is_layer_skipped_awq(prefix, self.modules_to_not_convert):
|
|
return UnquantizedLinearMethod()
|
|
return AWQLinearMethod(self)
|
|
elif isinstance(layer, FusedMoE):
|
|
# Lazy import to avoid circular import.
|
|
from .awq_marlin import AWQMarlinConfig, AWQMoEMethod
|
|
from .moe_wna16 import MoeWNA16Config
|
|
from .utils.marlin_utils import check_moe_marlin_supports_layer
|
|
if not check_moe_marlin_supports_layer(layer, self.group_size):
|
|
logger.warning_once(
|
|
f"Layer '{prefix}' is not supported by AWQMoeMarlin. "
|
|
"Falling back to Moe WNA16 kernels.")
|
|
config = {
|
|
"quant_method": "awq",
|
|
"bits": self.weight_bits,
|
|
"group_size": self.group_size,
|
|
"zero_point": self.zero_point,
|
|
"lm_head": False,
|
|
}
|
|
return MoeWNA16Config.from_config(config).get_quant_method(
|
|
layer, prefix)
|
|
marlin_compatible_config_dict = {
|
|
"quant_method": "awq",
|
|
"bits": self.weight_bits,
|
|
"group_size": self.group_size,
|
|
"zero_point": self.zero_point,
|
|
"lm_head": False,
|
|
"modules_to_not_convert": self.modules_to_not_convert,
|
|
}
|
|
awq_marlin_config = AWQMarlinConfig.from_config(
|
|
marlin_compatible_config_dict)
|
|
return AWQMoEMethod(awq_marlin_config, layer.moe_config)
|
|
return None
|
|
|
|
|
|
def is_layer_skipped_awq(prefix: str, modules_to_not_convert: list[str]):
|
|
return any(module_name in prefix for module_name in modules_to_not_convert)
|
|
|
|
|
|
class AWQLinearMethod(LinearMethodBase):
|
|
"""Linear method for AWQ.
|
|
|
|
Args:
|
|
quant_config: The AWQ quantization config.
|
|
"""
|
|
|
|
def __init__(self, quant_config: AWQConfig):
|
|
self.quant_config = quant_config
|
|
|
|
def create_weights(self, layer: torch.nn.Module,
|
|
input_size_per_partition: int,
|
|
output_partition_sizes: list[int], input_size: int,
|
|
output_size: int, params_dtype: torch.dtype,
|
|
**extra_weight_attrs):
|
|
# Normalize group_size
|
|
if self.quant_config.group_size != -1:
|
|
group_size = self.quant_config.group_size
|
|
else:
|
|
group_size = input_size
|
|
|
|
if input_size_per_partition % group_size != 0:
|
|
raise ValueError(
|
|
"The input size is not aligned with the quantized "
|
|
"weight shape. This can be caused by too large "
|
|
"tensor parallel size.")
|
|
|
|
output_size_per_partition = sum(output_partition_sizes)
|
|
if output_size_per_partition % self.quant_config.pack_factor != 0:
|
|
raise ValueError(
|
|
"The output size is not aligned with the quantized "
|
|
"weight shape. This can be caused by too large "
|
|
"tensor parallel size.")
|
|
|
|
weight_loader = extra_weight_attrs.get("weight_loader")
|
|
qweight = PackedvLLMParameter(
|
|
data=torch.empty(
|
|
input_size_per_partition,
|
|
output_size_per_partition // self.quant_config.pack_factor,
|
|
dtype=torch.int32,
|
|
),
|
|
input_dim=0,
|
|
output_dim=1,
|
|
packed_dim=1,
|
|
packed_factor=self.quant_config.pack_factor,
|
|
weight_loader=weight_loader)
|
|
|
|
num_groups = input_size_per_partition // group_size
|
|
|
|
qzeros = PackedvLLMParameter(
|
|
data=torch.empty(
|
|
num_groups,
|
|
output_size_per_partition // self.quant_config.pack_factor,
|
|
dtype=torch.int32,
|
|
),
|
|
input_dim=0,
|
|
output_dim=1,
|
|
packed_dim=1,
|
|
packed_factor=self.quant_config.pack_factor,
|
|
weight_loader=weight_loader)
|
|
|
|
scales = GroupQuantScaleParameter(data=torch.empty(
|
|
num_groups,
|
|
output_size_per_partition,
|
|
dtype=params_dtype,
|
|
),
|
|
input_dim=0,
|
|
output_dim=1,
|
|
weight_loader=weight_loader)
|
|
|
|
layer.register_parameter("qweight", qweight)
|
|
layer.register_parameter("qzeros", qzeros)
|
|
layer.register_parameter("scales", scales)
|
|
|
|
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
|
|
layer.qweight = torch.nn.Parameter(layer.qweight.data,
|
|
requires_grad=False)
|
|
layer.qzeros = torch.nn.Parameter(layer.qzeros.data,
|
|
requires_grad=False)
|
|
layer.scales = torch.nn.Parameter(layer.scales.data,
|
|
requires_grad=False)
|
|
|
|
def apply(self,
|
|
layer: torch.nn.Module,
|
|
x: torch.Tensor,
|
|
bias: Optional[torch.Tensor] = None) -> torch.Tensor:
|
|
qweight = layer.qweight
|
|
scales = layer.scales
|
|
qzeros = layer.qzeros
|
|
pack_factor = self.quant_config.pack_factor
|
|
out_shape = (x.shape[:-1] + (qweight.shape[-1] * pack_factor, ))
|
|
reshaped_x = x.reshape(-1, x.shape[-1])
|
|
|
|
# num_tokens >= threshold
|
|
FP16_MATMUL_HEURISTIC_CONDITION = x.shape[:-1].numel() >= 256
|
|
|
|
if FP16_MATMUL_HEURISTIC_CONDITION:
|
|
out = ops.awq_dequantize(qweight, scales, qzeros, 0, 0, 0)
|
|
out = torch.matmul(reshaped_x, out)
|
|
else:
|
|
out = ops.awq_gemm(reshaped_x, qweight, scales, qzeros,
|
|
pack_factor)
|
|
if bias is not None:
|
|
out.add_(bias)
|
|
return out.reshape(out_shape)
|