[Perf] Reduce MLA CPU overheads in V1 (#14384)

Signed-off-by: Lucas Wilkinson <lwilkinson@neuralmagic.com>
Signed-off-by: Lucas Wilkinson <lwilkins@redhat.com>
This commit is contained in:
Lucas Wilkinson 2025-03-06 22:59:14 -05:00 committed by GitHub
parent c34eeec58d
commit dae6896977
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 18 additions and 6 deletions

View File

@ -161,8 +161,13 @@ class RotaryEmbedding(CustomOp):
) -> Tuple[torch.Tensor, torch.Tensor]:
from vllm import _custom_ops as ops
self.cos_sin_cache = self.cos_sin_cache.to(query.device,
dtype=query.dtype)
# __setattr__ in nn.Module (called by `self.cos_sin_cache = ...`)
# is expensive, so avoid calling it if possible
if self.cos_sin_cache.device != query.device or \
self.cos_sin_cache.dtype != query.dtype:
self.cos_sin_cache = self.cos_sin_cache.to(query.device,
dtype=query.dtype)
# ops.rotary_embedding()/batched_rotary_embedding()
# are in-place operations that update the query and key tensors.
if offsets is not None:

View File

@ -222,8 +222,8 @@ from vllm.model_executor.layers.quantization.utils.fp8_utils import (
apply_fp8_linear_generic, current_platform_fp8_dtype, is_fp8)
from vllm.model_executor.layers.quantization.utils.quant_utils import (
scaled_quantize)
from vllm.model_executor.layers.rotary_embedding import (
DeepseekScalingRotaryEmbedding, RotaryEmbedding)
from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding
from vllm.platforms import current_platform
from vllm.utils import cdiv, round_down
try:
@ -627,8 +627,15 @@ class MLACommonImpl(MLAAttentionImpl[M], Generic[M]):
self.v_head_dim = v_head_dim
self.rotary_emb = rotary_emb
self.use_yarn_rope = isinstance(rotary_emb,
DeepseekScalingRotaryEmbedding)
if current_platform.is_cuda():
# Hack for V1 for now to avoid torch library overhead (since we are
# already inside an attention custom op), pull out the forward
# method from the rotary embedding and call it directly (and avoid
# calling forward_native, when we can call forward_cuda)
# TODO(lucas): we should probably find a cleaner way to do this
self.rotary_emb = rotary_emb.forward_cuda
self.q_proj = q_proj
self.kv_b_proj = kv_b_proj
self.o_proj = o_proj