Mistral-format support for compressed-tensors (#16803)

Signed-off-by: mgoin <mgoin64@gmail.com>
This commit is contained in:
Michael Goin 2025-04-23 06:46:23 -06:00 committed by GitHub
parent ce17db8085
commit aa72d9a4ea
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -690,6 +690,9 @@ def load_params_config(model: Union[str, Path], revision: Optional[str],
"quant_method": "fp8",
"activation_scheme": "static"
}
elif quantization.get("quant_method") == "compressed-tensors":
# Pass through the quantization config to compressed-tensors
quantization_config = quantization
else:
raise ValueError(
f"Found unknown quantization='{quantization}' in config")
@ -707,6 +710,7 @@ def load_params_config(model: Union[str, Path], revision: Optional[str],
if config_type == "multimodal":
multimodal_config = config_dict.pop("vision_encoder")
quantization_config = config_dict.get("quantization_config", {})
config_dict = {
"text_config": config_dict,
@ -714,6 +718,8 @@ def load_params_config(model: Union[str, Path], revision: Optional[str],
}
config_dict["architectures"] = ["PixtralForConditionalGeneration"]
config_dict["model_type"] = "pixtral"
if quantization_config:
config_dict["quantization_config"] = quantization_config
config_dict.update(kwargs)