[Bugfix] Fix precision loss in LoRA-wrapped RowParallelLinear by fusing bias into GEMM (#28972)

Signed-off-by: prashanth058 <prashanth.dannamaneni@uipath.com>
This commit is contained in:
prashanth058 2025-11-19 19:50:37 -08:00 committed by GitHub
parent a8c536829c
commit 0cca9b4d13
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -63,23 +63,18 @@ class RowParallelLinearWithLoRA(BaseLinearLayerWithLoRA):
input_parallel = splitted_input[self.tp_rank].contiguous()
# Matrix multiply.
output_parallel = self.apply(input_parallel)
bias_ = (
None
if (self.tp_rank > 0 or self.base_layer.skip_bias_add)
else self.base_layer.bias
)
output_parallel = self.apply(input_parallel, bias_)
if self.base_layer.reduce_results and self.tp_size > 1:
output_ = tensor_model_parallel_all_reduce(output_parallel)
output = tensor_model_parallel_all_reduce(output_parallel)
else:
output_ = output_parallel
if not self.base_layer.skip_bias_add:
output = (
output_ + self.base_layer.bias
if self.base_layer.bias is not None
else output_
)
output_bias = None
else:
output = output_
output_bias = self.base_layer.bias
output = output_parallel
output_bias = self.base_layer.bias if self.base_layer.skip_bias_add else None
if not self.base_layer.return_bias:
return output
@ -120,7 +115,7 @@ class RowParallelLinearWithShardedLoRA(RowParallelLinearWithLoRA):
return lora_b
def apply(self, x: torch.Tensor, bias: torch.Tensor | None = None) -> torch.Tensor:
output = self.base_layer.quant_method.apply(self.base_layer, x)
output = self.base_layer.quant_method.apply(self.base_layer, x, bias)
x = x.view(-1, x.shape[-1])
output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape