From 3de8a858b332e4ad57a4d395304295472fba7fad Mon Sep 17 00:00:00 2001 From: Robert Shaw Date: Wed, 24 Dec 2025 17:48:41 +0000 Subject: [PATCH] updated Signed-off-by: Robert Shaw --- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py b/vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py index ebd9e3a4a8f2a..ec1e410608d68 100644 --- a/vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +++ b/vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py @@ -325,8 +325,11 @@ class AiterExperts(mk.FusedMoEPermuteExpertsUnpermute): expert_tokens_meta: mk.ExpertTokensMetadata | None, apply_router_weight_on_input: bool, ): + # TODO(rob): rocm_aiter_fused_experts uses self.quant_config's + # a_scales for static quantization. Update this to fit better + # with the interface once all quant integrations are complete. assert a1q_scale is None - assert a2_scale is None + assert a2_scale == self.quant_config.a2_scale assert expert_tokens_meta is None result = rocm_aiter_fused_experts(