diff --git a/vllm/lora/layers/row_parallel_linear.py b/vllm/lora/layers/row_parallel_linear.py index 2ef1bd98fc612..95517b1aee263 100644 --- a/vllm/lora/layers/row_parallel_linear.py +++ b/vllm/lora/layers/row_parallel_linear.py @@ -63,23 +63,18 @@ class RowParallelLinearWithLoRA(BaseLinearLayerWithLoRA): input_parallel = splitted_input[self.tp_rank].contiguous() # Matrix multiply. - output_parallel = self.apply(input_parallel) + bias_ = ( + None + if (self.tp_rank > 0 or self.base_layer.skip_bias_add) + else self.base_layer.bias + ) + output_parallel = self.apply(input_parallel, bias_) if self.base_layer.reduce_results and self.tp_size > 1: - output_ = tensor_model_parallel_all_reduce(output_parallel) + output = tensor_model_parallel_all_reduce(output_parallel) else: - output_ = output_parallel - - if not self.base_layer.skip_bias_add: - output = ( - output_ + self.base_layer.bias - if self.base_layer.bias is not None - else output_ - ) - output_bias = None - else: - output = output_ - output_bias = self.base_layer.bias + output = output_parallel + output_bias = self.base_layer.bias if self.base_layer.skip_bias_add else None if not self.base_layer.return_bias: return output @@ -120,7 +115,7 @@ class RowParallelLinearWithShardedLoRA(RowParallelLinearWithLoRA): return lora_b def apply(self, x: torch.Tensor, bias: torch.Tensor | None = None) -> torch.Tensor: - output = self.base_layer.quant_method.apply(self.base_layer, x) + output = self.base_layer.quant_method.apply(self.base_layer, x, bias) x = x.view(-1, x.shape[-1]) output, out_orig_shape = output.view(-1, output.shape[-1]), output.shape