mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-19 19:44:28 +08:00
[Quant] Fix use_mla TypeError and support loading pure-sparsity Compressed Tensors configs (#12711)
This commit is contained in:
parent
bb392af434
commit
4896d0c2dd
@ -1000,8 +1000,9 @@ class ModelConfig:
|
||||
# have fp8 for both weights and activations.
|
||||
if self.quantization == "compressed-tensors":
|
||||
quant_config = self._parse_quant_hf_config()
|
||||
for group_name, cfg in quant_config.get("config_groups",
|
||||
("", {})).items():
|
||||
for group_name, cfg in quant_config.get("config_groups", {
|
||||
"": {}
|
||||
}).items():
|
||||
act_cfg = cfg.get("input_activations", {})
|
||||
act_type = None if act_cfg is None else act_cfg.get("type", "")
|
||||
w_cfg = cfg.get("weights", {})
|
||||
|
||||
@ -424,6 +424,11 @@ class CompressedTensorsConfig(QuantizationConfig):
|
||||
or input_quant is not None,
|
||||
weight_quant=weight_quant,
|
||||
input_quant=input_quant)
|
||||
elif weight_quant is None:
|
||||
logger.warning_once("Acceleration for non-quantized schemes is "
|
||||
"not supported by Compressed Tensors. "
|
||||
"Falling back to UnquantizedLinearMethod")
|
||||
return None
|
||||
else:
|
||||
# Find the quant_scheme
|
||||
scheme = self._get_scheme_from_parts( # type: ignore
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user