mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-03 02:15:17 +08:00
Signed-off-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com> Co-authored-by: rshaw@neuralmagic.com <rshaw@neuralmagic.com> Co-authored-by: Nicolò Lucchesi <nlucches@redhat.com> Co-authored-by: Tyler Michael Smith <tyler@neuralmagic.com> Co-authored-by: Michael Goin <michael@neuralmagic.com>
24 lines
1.0 KiB
Python
24 lines
1.0 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
"""Inference-only HF format GLM-4 model compatible with THUDM weights."""
|
|
from vllm.config import VllmConfig
|
|
from vllm.model_executor.models.llama import LlamaForCausalLM
|
|
|
|
from .interfaces import SupportsV0Only
|
|
from .utils import PPMissingLayer
|
|
|
|
|
|
class GlmForCausalLM(LlamaForCausalLM, SupportsV0Only):
|
|
|
|
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
|
|
super().__init__(vllm_config=vllm_config, prefix=prefix)
|
|
# Hack Llama model to fit HF format GLM implementation
|
|
# Attention difference between GLM and Llama:
|
|
# 1. Half partial rotary_dim and no Neox style.
|
|
# 2. There is no bias for o_proj in attention
|
|
for layer in self.model.layers:
|
|
if not isinstance(layer, PPMissingLayer):
|
|
layer.self_attn.rotary_emb.rotary_dim //= 2
|
|
layer.self_attn.rotary_emb.is_neox_style = False
|
|
layer.self_attn.o_proj.bias = None
|
|
layer.self_attn.o_proj.skip_bias_add = True
|