[Quant] Fix use_mla TypeError and support loading pure-sparsity Compressed Tensors configs (#12711)

This commit is contained in:
Kyle Sayers 2025-02-04 02:27:11 -05:00 committed by GitHub
parent bb392af434
commit 4896d0c2dd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 8 additions and 2 deletions

View File

@ -1000,8 +1000,9 @@ class ModelConfig:
# have fp8 for both weights and activations.
if self.quantization == "compressed-tensors":
quant_config = self._parse_quant_hf_config()
for group_name, cfg in quant_config.get("config_groups",
("", {})).items():
for group_name, cfg in quant_config.get("config_groups", {
"": {}
}).items():
act_cfg = cfg.get("input_activations", {})
act_type = None if act_cfg is None else act_cfg.get("type", "")
w_cfg = cfg.get("weights", {})

View File

@ -424,6 +424,11 @@ class CompressedTensorsConfig(QuantizationConfig):
or input_quant is not None,
weight_quant=weight_quant,
input_quant=input_quant)
elif weight_quant is None:
logger.warning_once("Acceleration for non-quantized schemes is "
"not supported by Compressed Tensors. "
"Falling back to UnquantizedLinearMethod")
return None
else:
# Find the quant_scheme
scheme = self._get_scheme_from_parts( # type: ignore