[Bugfix] DeepSeek Accuracy (#14476)

Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com>
This commit is contained in:
Lucas Wilkinson 2025-03-08 11:47:03 -05:00 committed by GitHub
parent 206e2577fa
commit db84f5eb3b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -222,8 +222,7 @@ from vllm.model_executor.layers.quantization.utils.fp8_utils import (
Fp8LinearGenericOp, current_platform_fp8_dtype, is_fp8)
from vllm.model_executor.layers.quantization.utils.quant_utils import (
scaled_quantize)
from vllm.model_executor.layers.rotary_embedding import (
DeepseekScalingRotaryEmbedding, RotaryEmbedding)
from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding
from vllm.utils import cdiv, round_down
try:
@ -626,9 +625,12 @@ class MLACommonImpl(MLAAttentionImpl[M], Generic[M]):
self.qk_head_dim = qk_head_dim
self.v_head_dim = v_head_dim
self.rotary_emb = rotary_emb
self.use_yarn_rope = isinstance(rotary_emb,
DeepseekScalingRotaryEmbedding)
# Hack for V1 for now to avoid torch library overhead (since we are
# already inside an attention custom op), pull out the forward
# method from the rotary embedding and call it directly
# TODO(lucas): we should probably find a cleaner way to do this
self.rotary_emb = rotary_emb._forward_method
self.q_proj = q_proj
self.kv_b_proj = kv_b_proj
self.o_proj = o_proj