Apply torch.compile to fused_moe/grouped_topk (#12637)

This commit is contained in:
Michael Goin 2025-02-01 11:16:19 -05:00 committed by GitHub
parent 4f4d427ac2
commit 3194039c0e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 3 additions and 2 deletions

View File

@ -759,6 +759,7 @@ def fused_topk(
# This is used by the Deepseek-V2 and Deepseek-V3 model
@torch.compile(dynamic=True, backend=current_platform.simple_compile_backend)
def grouped_topk(hidden_states: torch.Tensor,
gating_output: torch.Tensor,
topk: int,

View File

@ -27,6 +27,7 @@ from torch import nn
from transformers import PretrainedConfig
from vllm.attention import Attention, AttentionMetadata
from vllm.compilation.decorators import support_torch_compile
from vllm.config import CacheConfig, ModelConfig, VllmConfig
from vllm.distributed import (get_pp_group,
get_tensor_model_parallel_world_size,
@ -566,8 +567,7 @@ class DeepseekV3DecoderLayer(nn.Module):
return hidden_states, residual
# TODO(simon): check whether we support torch compile for Deepseek V3
# @support_torch_compile
@support_torch_compile
class DeepseekV3Model(nn.Module):
fall_back_to_pt_during_load = False