[Feature] models: pass layer prefix to replace_linear_class for per-layer quantization routing. Addresses #23239 (#23556)

Signed-off-by: Shrey Gupta <shreyg1303@gmail.com>
This commit is contained in:
Shrey Gupta 2025-08-28 08:42:44 +05:30 committed by GitHub
parent a69693e38f
commit 1b7b161a09
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 18 additions and 8 deletions

View File

@ -408,13 +408,17 @@ class DeepseekVLV2ForCausalLM(nn.Module, SupportsMultiModal, SupportsPP):
if isinstance(module, nn.Linear):
parent, attr_name = self._get_parent_and_attr(vit, name)
if isinstance(parent, timm.layers.Mlp) and attr_name == "fc1":
new_linear = replace_linear_class(module, "colwise",
quant_config)
new_linear = replace_linear_class(module,
"colwise",
quant_config,
prefix=name)
setattr(parent, attr_name, new_linear)
elif isinstance(parent,
timm.layers.Mlp) and attr_name == "fc2":
new_linear = replace_linear_class(module, "rowwise",
quant_config)
new_linear = replace_linear_class(module,
"rowwise",
quant_config,
prefix=name)
setattr(parent, attr_name, new_linear)
return vit

View File

@ -106,8 +106,11 @@ def can_enable_torch_compile(vllm_config: VllmConfig) -> bool:
def replace_linear_class(
linear: nn.Linear, style: Literal["colwise", "rowwise"],
quant_config: QuantizationConfig
linear: nn.Linear,
style: Literal["colwise", "rowwise"],
quant_config: QuantizationConfig,
*,
prefix: str = "",
) -> Union[ColumnParallelLinear, RowParallelLinear, ReplicatedLinear]:
"""
Replace nn.Linear with one of vLLM's tensor parallel linear classes.
@ -141,6 +144,7 @@ def replace_linear_class(
output_size=linear.out_features,
bias=linear.bias is not None,
quant_config=quant_config,
prefix=prefix,
return_bias=False,
**vllm_linear_kwargs,
)
@ -557,8 +561,10 @@ class TransformersBase(nn.Module, SupportsQuant, SupportsLoRA, SupportsPP):
generator = (p for p in tp_plan if re.match(p, qual_name))
pattern = next(generator, None)
style = tp_plan.get(pattern, "replicate")
new_module = replace_linear_class(child_module, style,
self.quant_config)
new_module = replace_linear_class(child_module,
style,
self.quant_config,
prefix=qual_name)
setattr(module, child_name, new_module)
log_replacement(qual_name, child_module, new_module)
else: