[Quantization] fix marlin w8a8 check (#30961)

Signed-off-by: Jinzhen Lin <jinzhen.ljz@antgroup.com>
This commit is contained in:
Jinzhen Lin 2025-12-19 23:33:22 +08:00 committed by GitHub
parent 23a1946e3b
commit 5fbfa8d9ef
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -11,7 +11,6 @@ from vllm.model_executor.layers.quantization.utils.marlin_utils import (
marlin_make_workspace_new,
marlin_permute_bias,
marlin_permute_scales,
marlin_quant_input,
should_use_atomic_add_reduce,
)
from vllm.model_executor.utils import replace_parameter
@ -63,13 +62,11 @@ def apply_fp8_marlin_linear(
inputs = reshaped_x
a_scales = None
if input_dtype is not None and input_dtype.itemsize == 1:
if input_dtype != torch.float8_e4m3fn:
raise RuntimeError("FP8 weight + INT8 activation is not supported.")
inputs, a_scales = marlin_quant_input(inputs, torch.float8_e4m3fn)
# inputs, a_scales = marlin_quant_input(inputs, torch.float8_e4m3fn)
raise RuntimeError("Marlin W8A8 is not supported.")
output = ops.gptq_marlin_gemm(
a=reshaped_x,
a=inputs,
c=None,
b_q_weight=weight,
b_bias=bias,