[Bugfix] fix _get_quant_method of FusedMoE for deepseekV3.2 on non-NV… (#30057)

Signed-off-by: tjp_zju <tanjianpingzju1990@gmail.com>
This commit is contained in:
tjp_zju 2025-12-14 18:20:51 +08:00 committed by GitHub
parent 0bb0bae436
commit 6ecc1e411b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -17,6 +17,9 @@ from vllm.model_executor.layers.fused_moe.layer import (
FusedMoEMethodBase,
FusedMoeWeightScaleSupported,
)
from vllm.model_executor.layers.fused_moe.unquantized_fused_moe_method import (
UnquantizedFusedMoEMethod,
)
from vllm.model_executor.layers.linear import LinearBase, UnquantizedLinearMethod
from vllm.model_executor.layers.quantization import QuantizationMethods
from vllm.model_executor.layers.quantization.base_config import (
@ -162,6 +165,8 @@ class MoeWNA16Config(QuantizationConfig):
self, layer: torch.nn.Module, prefix: str
) -> Optional["QuantizeMethodBase"]:
if is_layer_skipped_quant(prefix, self.modules_to_not_convert):
if isinstance(layer, FusedMoE):
return UnquantizedFusedMoEMethod(layer.moe_config)
return UnquantizedLinearMethod()
elif isinstance(layer, LinearBase):
# Avoid circular import