From e6a226efba6f11b0c0b32bc460f0f6fe315801d2 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Tue, 30 Sep 2025 14:13:03 -0400 Subject: [PATCH] [Bug] Fix AttributeError: 'QKVParallelLinear' object has no attribute 'orig_dtype' (#25958) Signed-off-by: yewentao256 --- .../compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py index d42ae22c5139..5ad1b15b7ed5 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w8a8_fp8.py @@ -66,6 +66,7 @@ class CompressedTensorsW8A8Fp8(CompressedTensorsScheme): output_size_per_partition = sum(output_partition_sizes) layer.logical_widths = output_partition_sizes layer.weight_block_size = None + layer.orig_dtype = params_dtype if self.strategy == QuantizationStrategy.BLOCK: assert self.weight_block_size is not None