From c8fd97f26d05aff5a4603177c75aaccf4e6de11b Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Mon, 15 Jul 2024 13:05:52 -0400 Subject: [PATCH] [Kernel] Use CUTLASS kernels for the FP8 layers with Bias (#6270) --- vllm/model_executor/layers/quantization/utils/w8a8_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/layers/quantization/utils/w8a8_utils.py b/vllm/model_executor/layers/quantization/utils/w8a8_utils.py index 30a82e1b547aa..f290a6830c91b 100644 --- a/vllm/model_executor/layers/quantization/utils/w8a8_utils.py +++ b/vllm/model_executor/layers/quantization/utils/w8a8_utils.py @@ -112,7 +112,7 @@ def apply_fp8_linear( # If dynamic, layer.input_scale is None and x_scale computed from x. # If static, layer.input_scale is scalar and x_scale is input_scale. - if bias is None and cutlass_fp8_supported: + if cutlass_fp8_supported: qinput, x_scale = ops.scaled_fp8_quant(input, input_scale) # Fused GEMM_DQ @@ -120,7 +120,8 @@ def apply_fp8_linear( weight, out_dtype=input.dtype, scale_a=x_scale, - scale_b=weight_scale) + scale_b=weight_scale, + bias=bias) else: qinput, x_scale = ops.scaled_fp8_quant(input,