[Feature][Quantization] auto_round support for mixed bits quantization (#23812)

Signed-off-by: n1ck-guo <heng.guo@intel.com>
Signed-off-by: Heng Guo <heng.guo@intel.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Heng Guo 2025-10-21 06:23:30 +08:00 committed by GitHub
parent f9e7ad5400
commit 87778d5f00
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -436,6 +436,12 @@ class AutoRoundConfig(QuantizationConfig):
return None
def get_quant_method(self, layer: torch.nn.Module, prefix: str):
if prefix and self.extra_config:
for layer_name in self.extra_config:
if (
layer_name == prefix or layer_name == f"model.{prefix}"
) and self.extra_config[layer_name].get("bits", 16) >= 16:
return UnquantizedLinearMethod()
if (
current_platform.is_cpu()
or current_platform.is_xpu()