From e5d35d62f5bd308abf05ee0bfc23bc17c5c46be2 Mon Sep 17 00:00:00 2001 From: Varun Sundar Rabindranath Date: Thu, 12 Jun 2025 00:28:12 -0400 Subject: [PATCH] [BugFix] Force registration of w8a8_block_fp8_matmul_deepgemm via lazy import (#19514) Signed-off-by: Varun Sundar Rabindranath Co-authored-by: Varun Sundar Rabindranath --- vllm/model_executor/layers/quantization/utils/fp8_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/model_executor/layers/quantization/utils/fp8_utils.py b/vllm/model_executor/layers/quantization/utils/fp8_utils.py index 3d67c09de58e..754650ebeffb 100644 --- a/vllm/model_executor/layers/quantization/utils/fp8_utils.py +++ b/vllm/model_executor/layers/quantization/utils/fp8_utils.py @@ -143,6 +143,7 @@ def apply_w8a8_block_fp8_linear( column_major_scales=True, ) + import vllm.model_executor.layers.quantization.deepgemm # noqa: F401 output = torch.ops.vllm.w8a8_block_fp8_matmul_deepgemm( q_input, weight,