From 91276c57210b36997861af706a48ac784573ed4c Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Fri, 28 Mar 2025 21:14:09 +0800 Subject: [PATCH] [Model] Adding torch compile annotations to chatglm (#15624) Signed-off-by: Jee Jee Li --- vllm/model_executor/models/chatglm.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 14dca23b3934..a51a0af9e2bc 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -10,6 +10,7 @@ from torch import nn from torch.nn import LayerNorm from vllm.attention import Attention +from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size from vllm.model_executor.layers.activation import SiluAndMul @@ -293,6 +294,7 @@ class GLMTransformer(nn.Module): return hidden_states +@support_torch_compile class ChatGLMModel(nn.Module): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):