mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-10 18:27:06 +08:00
- **Add SPDX license headers to python source files**
- **Check for SPDX headers using pre-commit**
commit 9d7ef44c3cfb72ca4c32e1c677d99259d10d4745
Author: Russell Bryant <rbryant@redhat.com>
Date: Fri Jan 31 14:18:24 2025 -0500
Add SPDX license headers to python source files
This commit adds SPDX license headers to python source files as
recommended to
the project by the Linux Foundation. These headers provide a concise way
that is
both human and machine readable for communicating license information
for each
source file. It helps avoid any ambiguity about the license of the code
and can
also be easily used by tools to help manage license compliance.
The Linux Foundation runs license scans against the codebase to help
ensure
we are in compliance with the licenses of the code we use, including
dependencies. Having these headers in place helps that tool do its job.
More information can be found on the SPDX site:
- https://spdx.dev/learn/handling-license-info/
Signed-off-by: Russell Bryant <rbryant@redhat.com>
commit 5a1cf1cb3b80759131c73f6a9dddebccac039dea
Author: Russell Bryant <rbryant@redhat.com>
Date: Fri Jan 31 14:36:32 2025 -0500
Check for SPDX headers using pre-commit
Signed-off-by: Russell Bryant <rbryant@redhat.com>
---------
Signed-off-by: Russell Bryant <rbryant@redhat.com>
625 lines
24 KiB
Python
625 lines
24 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
|
|
# Adapted from
|
|
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py
|
|
# Copyright 2023 The vLLM team.
|
|
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
|
|
#
|
|
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
|
|
# and OPT implementations in this library. It has been modified from its
|
|
# original forms to accommodate minor architectural differences compared
|
|
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""Inference-only MiniCPM model compatible with HuggingFace weights."""
|
|
import math
|
|
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
|
|
|
|
import torch
|
|
from torch import nn
|
|
from transformers import PretrainedConfig
|
|
|
|
from vllm.attention import Attention, AttentionMetadata
|
|
from vllm.compilation.decorators import support_torch_compile
|
|
from vllm.config import CacheConfig, VllmConfig
|
|
from vllm.distributed import (get_pp_group, get_tensor_model_parallel_rank,
|
|
get_tensor_model_parallel_world_size,
|
|
tensor_model_parallel_all_reduce)
|
|
from vllm.model_executor.layers.activation import FatreluAndMul, SiluAndMul
|
|
from vllm.model_executor.layers.fused_moe import fused_moe
|
|
from vllm.model_executor.layers.layernorm import RMSNorm
|
|
from vllm.model_executor.layers.linear import (MergedColumnParallelLinear,
|
|
QKVParallelLinear,
|
|
ReplicatedLinear,
|
|
RowParallelLinear)
|
|
from vllm.model_executor.layers.logits_processor import LogitsProcessor
|
|
from vllm.model_executor.layers.quantization import QuantizationConfig
|
|
from vllm.model_executor.layers.rotary_embedding import get_rope
|
|
from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler
|
|
from vllm.model_executor.layers.vocab_parallel_embedding import (
|
|
DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding)
|
|
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
|
|
from vllm.model_executor.sampling_metadata import SamplingMetadata
|
|
from vllm.model_executor.utils import set_weight_attrs
|
|
from vllm.sequence import IntermediateTensors
|
|
|
|
from .interfaces import SupportsLoRA, SupportsPP
|
|
from .utils import (AutoWeightsLoader, is_pp_missing_parameter,
|
|
make_empty_intermediate_tensors_factory, make_layers,
|
|
maybe_prefix)
|
|
|
|
|
|
class MiniCPMMoE(nn.Module):
|
|
"""A tensor-parallel MoE implementation that shards each expert
|
|
across all ranks.
|
|
|
|
Each expert's weights are sharded across all ranks and a fused MoE
|
|
kernel is used for the forward pass, and finally we reduce the outputs
|
|
across ranks.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
num_experts: int,
|
|
top_k: int,
|
|
hidden_size: int,
|
|
intermediate_size: int,
|
|
params_dtype: Optional[torch.dtype] = None,
|
|
tp_size: Optional[int] = None,
|
|
):
|
|
super().__init__()
|
|
self.tp_size = tp_size or get_tensor_model_parallel_world_size()
|
|
self.num_total_experts = num_experts
|
|
self.top_k = top_k
|
|
self.hidden_size = hidden_size
|
|
self.intermediate_size = intermediate_size // self.tp_size
|
|
|
|
if params_dtype is None:
|
|
params_dtype = torch.get_default_dtype()
|
|
self.params_dtype = params_dtype
|
|
|
|
self.gate = ReplicatedLinear(self.hidden_size,
|
|
self.num_total_experts,
|
|
bias=False,
|
|
params_dtype=self.params_dtype,
|
|
quant_config=None)
|
|
|
|
self.ws = nn.Parameter(
|
|
torch.empty(self.num_total_experts,
|
|
2 * self.intermediate_size,
|
|
self.hidden_size,
|
|
device="cuda",
|
|
dtype=self.params_dtype))
|
|
self.w2s = nn.Parameter(
|
|
torch.empty(self.num_total_experts,
|
|
self.hidden_size,
|
|
self.intermediate_size,
|
|
device="cuda",
|
|
dtype=self.params_dtype))
|
|
|
|
set_weight_attrs(self.ws, {
|
|
"weight_loader": self.weight_loader,
|
|
})
|
|
set_weight_attrs(self.w2s, {
|
|
"weight_loader": self.weight_loader,
|
|
})
|
|
|
|
def weight_loader(self, param: nn.Parameter, loaded_weight: torch.Tensor,
|
|
weight_name: str, expert_id: int):
|
|
tp_rank = get_tensor_model_parallel_rank()
|
|
param_data = param.data
|
|
shard_size = self.intermediate_size
|
|
shard = slice(tp_rank * shard_size, (tp_rank + 1) * shard_size)
|
|
if weight_name.endswith("w1.weight"):
|
|
param_data[expert_id, 0:shard_size, :] = loaded_weight[shard, :]
|
|
if weight_name.endswith("w3.weight"):
|
|
param_data[expert_id,
|
|
shard_size:2 * shard_size, :] = loaded_weight[shard, :]
|
|
if weight_name.endswith("w2.weight"):
|
|
param_data[expert_id, :, :] = loaded_weight[:, shard]
|
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
|
num_tokens, hidden_size = hidden_states.shape
|
|
hidden_states = hidden_states.view(-1, self.hidden_size)
|
|
# router_logits: (num_tokens, n_experts)
|
|
router_logits, _ = self.gate(hidden_states)
|
|
final_hidden_states = fused_moe(hidden_states,
|
|
self.ws,
|
|
self.w2s,
|
|
router_logits,
|
|
self.top_k,
|
|
renormalize=True,
|
|
inplace=True)
|
|
|
|
if self.tp_size > 1:
|
|
final_hidden_states = tensor_model_parallel_all_reduce(
|
|
final_hidden_states)
|
|
|
|
return final_hidden_states.view(num_tokens, hidden_size)
|
|
|
|
|
|
class MiniCPMMLP(nn.Module):
|
|
|
|
def __init__(
|
|
self,
|
|
hidden_size: int,
|
|
intermediate_size: int,
|
|
hidden_act: str,
|
|
hidden_act_param: float,
|
|
quant_config: Optional[QuantizationConfig] = None,
|
|
) -> None:
|
|
super().__init__()
|
|
self.gate_up_proj = MergedColumnParallelLinear(
|
|
hidden_size, [intermediate_size] * 2,
|
|
bias=False,
|
|
quant_config=quant_config)
|
|
self.down_proj = RowParallelLinear(intermediate_size,
|
|
hidden_size,
|
|
bias=False,
|
|
quant_config=quant_config)
|
|
if hidden_act == "silu":
|
|
self.act_fn = SiluAndMul()
|
|
elif hidden_act == "fatrelu":
|
|
self.act_fn = FatreluAndMul(threshold=hidden_act_param)
|
|
else:
|
|
raise ValueError(f"Unsupported activation: {hidden_act}. "
|
|
"Only silu and fatrelu are supported for now.")
|
|
|
|
def forward(self, x):
|
|
gate_up, _ = self.gate_up_proj(x)
|
|
x = self.act_fn(gate_up)
|
|
x, _ = self.down_proj(x)
|
|
return x
|
|
|
|
|
|
class MiniCPMAttention(nn.Module):
|
|
|
|
def __init__(
|
|
self,
|
|
hidden_size: int,
|
|
num_heads: int,
|
|
num_kv_heads: int,
|
|
rope_theta: float = 10000,
|
|
rope_scaling: Optional[Dict[str, Any]] = None,
|
|
max_position_embeddings: int = 8192,
|
|
cache_config: Optional[CacheConfig] = None,
|
|
quant_config: Optional[QuantizationConfig] = None,
|
|
prefix: str = "",
|
|
) -> None:
|
|
super().__init__()
|
|
self.hidden_size = hidden_size
|
|
tp_size = get_tensor_model_parallel_world_size()
|
|
self.total_num_heads = num_heads
|
|
assert self.total_num_heads % tp_size == 0
|
|
self.num_heads = self.total_num_heads // tp_size
|
|
self.total_num_kv_heads = num_kv_heads
|
|
if self.total_num_kv_heads >= tp_size:
|
|
# Number of KV heads is greater than TP size, so we partition
|
|
# the KV heads across multiple tensor parallel GPUs.
|
|
assert self.total_num_kv_heads % tp_size == 0
|
|
else:
|
|
# Number of KV heads is less than TP size, so we replicate
|
|
# the KV heads across multiple tensor parallel GPUs.
|
|
assert tp_size % self.total_num_kv_heads == 0
|
|
self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
|
|
self.head_dim = hidden_size // self.total_num_heads
|
|
self.q_size = self.num_heads * self.head_dim
|
|
self.kv_size = self.num_kv_heads * self.head_dim
|
|
self.scaling = self.head_dim**-0.5
|
|
self.rope_theta = rope_theta
|
|
self.max_position_embeddings = max_position_embeddings
|
|
|
|
self.qkv_proj = QKVParallelLinear(
|
|
hidden_size,
|
|
self.head_dim,
|
|
self.total_num_heads,
|
|
self.total_num_kv_heads,
|
|
bias=False,
|
|
quant_config=quant_config,
|
|
)
|
|
self.o_proj = RowParallelLinear(
|
|
self.total_num_heads * self.head_dim,
|
|
hidden_size,
|
|
bias=False,
|
|
quant_config=quant_config,
|
|
)
|
|
|
|
self.rotary_emb = get_rope(
|
|
self.head_dim,
|
|
rotary_dim=self.head_dim,
|
|
max_position=max_position_embeddings,
|
|
base=rope_theta,
|
|
rope_scaling=rope_scaling,
|
|
)
|
|
# set rope as fp32 instead of bf16
|
|
self.rotary_emb.cos_sin_cache = self.rotary_emb._compute_cos_sin_cache(
|
|
)
|
|
self.attn = Attention(self.num_heads,
|
|
self.head_dim,
|
|
self.scaling,
|
|
num_kv_heads=self.num_kv_heads,
|
|
cache_config=cache_config,
|
|
quant_config=quant_config,
|
|
prefix=f"{prefix}.attn")
|
|
|
|
def forward(
|
|
self,
|
|
positions: torch.Tensor,
|
|
hidden_states: torch.Tensor,
|
|
kv_cache: torch.Tensor,
|
|
attn_metadata: AttentionMetadata,
|
|
) -> torch.Tensor:
|
|
qkv, _ = self.qkv_proj(hidden_states)
|
|
q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
|
|
orig_dtype = q.dtype
|
|
q, k = q.float(), k.float()
|
|
q, k = self.rotary_emb(positions, q, k)
|
|
q, k = q.to(orig_dtype), k.to(orig_dtype)
|
|
attn_output = self.attn(q, k, v, kv_cache, attn_metadata)
|
|
output, _ = self.o_proj(attn_output)
|
|
return output
|
|
|
|
|
|
class MiniCPMDecoderLayer(nn.Module):
|
|
|
|
def __init__(
|
|
self,
|
|
config: PretrainedConfig,
|
|
cache_config: Optional[CacheConfig] = None,
|
|
quant_config: Optional[QuantizationConfig] = None,
|
|
prefix: str = "",
|
|
) -> None:
|
|
super().__init__()
|
|
self.config = config
|
|
self.cache_config = cache_config
|
|
self.quant_config = quant_config
|
|
self.hidden_size = config.hidden_size
|
|
self.rope_theta = getattr(config, "rope_theta", 10000)
|
|
self.rope_scaling = getattr(config, "rope_scaling", None)
|
|
self.max_position_embeddings = getattr(config,
|
|
"max_position_embeddings", 8192)
|
|
self.prefix = prefix
|
|
self._init_attn_block()
|
|
self._init_ffn_block()
|
|
|
|
def _init_attn_block(self):
|
|
self.input_layernorm = RMSNorm(self.config.hidden_size,
|
|
eps=self.config.rms_norm_eps)
|
|
self.self_attn = MiniCPMAttention(
|
|
hidden_size=self.hidden_size,
|
|
num_heads=self.config.num_attention_heads,
|
|
num_kv_heads=self.config.num_key_value_heads,
|
|
rope_theta=self.rope_theta,
|
|
rope_scaling=self.rope_scaling,
|
|
max_position_embeddings=self.max_position_embeddings,
|
|
cache_config=self.cache_config,
|
|
quant_config=self.quant_config,
|
|
prefix=f"{self.prefix}.self_attn",
|
|
)
|
|
|
|
def _init_ffn_block(self):
|
|
self.post_attention_layernorm = RMSNorm(self.config.hidden_size,
|
|
eps=self.config.rms_norm_eps)
|
|
self.num_experts = getattr(self.config, "num_experts", 0)
|
|
if self.num_experts == 0:
|
|
self.mlp = MiniCPMMLP(
|
|
hidden_size=self.hidden_size,
|
|
intermediate_size=self.config.intermediate_size,
|
|
hidden_act=self.config.hidden_act,
|
|
hidden_act_param=getattr(self.config, "hidden_act_param", 0.),
|
|
quant_config=self.quant_config,
|
|
)
|
|
else:
|
|
self.mlp = MiniCPMMoE(
|
|
num_experts=self.config.num_experts,
|
|
top_k=self.config.num_experts_per_tok,
|
|
hidden_size=self.config.hidden_size,
|
|
intermediate_size=self.config.intermediate_size)
|
|
|
|
def forward(
|
|
self,
|
|
positions: torch.Tensor,
|
|
hidden_states: torch.Tensor,
|
|
kv_cache: torch.Tensor,
|
|
attn_metadata: AttentionMetadata,
|
|
residual: Optional[torch.Tensor],
|
|
) -> Tuple[torch.Tensor, torch.Tensor]:
|
|
# Self Attention
|
|
residual = hidden_states
|
|
hidden_states = self.input_layernorm(hidden_states)
|
|
hidden_states = self.self_attn(
|
|
positions=positions,
|
|
hidden_states=hidden_states,
|
|
kv_cache=kv_cache,
|
|
attn_metadata=attn_metadata,
|
|
)
|
|
hidden_states = residual + hidden_states * \
|
|
(self.config.scale_depth / math.sqrt(self.config.num_hidden_layers))
|
|
|
|
# Fully Connected
|
|
residual = hidden_states
|
|
hidden_states = self.post_attention_layernorm(hidden_states)
|
|
hidden_states = self.mlp(hidden_states)
|
|
hidden_states = residual + hidden_states * \
|
|
(self.config.scale_depth / math.sqrt(self.config.num_hidden_layers))
|
|
|
|
return hidden_states, None
|
|
|
|
|
|
@support_torch_compile
|
|
class MiniCPMModel(nn.Module):
|
|
|
|
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
|
|
super().__init__()
|
|
|
|
config = vllm_config.model_config.hf_config
|
|
cache_config = vllm_config.cache_config
|
|
quant_config = vllm_config.quant_config
|
|
lora_config = vllm_config.lora_config
|
|
|
|
self.config = config
|
|
self.cache_config = cache_config
|
|
self.quant_config = quant_config
|
|
self.padding_idx = config.pad_token_id
|
|
lora_vocab = (lora_config.lora_extra_vocab_size *
|
|
(lora_config.max_loras or 1)) if lora_config else 0
|
|
self.vocab_size = config.vocab_size + lora_vocab
|
|
self.org_vocab_size = config.vocab_size
|
|
self.embed_tokens = VocabParallelEmbedding(
|
|
self.vocab_size,
|
|
config.hidden_size,
|
|
org_num_embeddings=config.vocab_size,
|
|
)
|
|
self.num_experts = getattr(self.config, "num_experts", 0)
|
|
self._init_layers(prefix, config, cache_config, quant_config)
|
|
self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
|
self.make_empty_intermediate_tensors = (
|
|
make_empty_intermediate_tensors_factory(
|
|
["hidden_states", "residual"], self.config.hidden_size))
|
|
|
|
def _init_layers(
|
|
self,
|
|
prefix: str,
|
|
config: PretrainedConfig,
|
|
cache_config: Optional[CacheConfig],
|
|
quant_config: Optional[QuantizationConfig],
|
|
):
|
|
self.start_layer, self.end_layer, self.layers = make_layers(
|
|
config.num_hidden_layers,
|
|
lambda prefix: MiniCPMDecoderLayer(
|
|
config, cache_config, quant_config, prefix=prefix),
|
|
prefix=f"{prefix}.layers")
|
|
|
|
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
|
|
embedding = self.embed_tokens(input_ids)
|
|
return embedding * self.config.scale_emb
|
|
|
|
def forward(
|
|
self,
|
|
input_ids: torch.Tensor,
|
|
positions: torch.Tensor,
|
|
kv_caches: List[torch.Tensor],
|
|
attn_metadata: AttentionMetadata,
|
|
intermediate_tensors: Optional[IntermediateTensors] = None,
|
|
inputs_embeds: Optional[torch.Tensor] = None,
|
|
) -> Union[torch.Tensor, IntermediateTensors]:
|
|
if get_pp_group().is_first_rank:
|
|
if inputs_embeds is not None:
|
|
hidden_states = inputs_embeds
|
|
else:
|
|
hidden_states = self.get_input_embeddings(input_ids)
|
|
residual = None
|
|
else:
|
|
hidden_states = intermediate_tensors["hidden_states"]
|
|
residual = intermediate_tensors["residual"]
|
|
|
|
for i in range(self.start_layer, self.end_layer):
|
|
layer = self.layers[i]
|
|
hidden_states, residual = layer(
|
|
positions,
|
|
hidden_states,
|
|
kv_caches[i - self.start_layer],
|
|
attn_metadata,
|
|
residual,
|
|
)
|
|
if not get_pp_group().is_last_rank:
|
|
return IntermediateTensors({
|
|
"hidden_states": hidden_states,
|
|
"residual": residual
|
|
})
|
|
hidden_states = self.norm(hidden_states)
|
|
return hidden_states
|
|
|
|
def load_weights(self, weights: Iterable[Tuple[str,
|
|
torch.Tensor]]) -> Set[str]:
|
|
stacked_params_mapping = [
|
|
# (param_name, shard_name, shard_id)
|
|
("qkv_proj", "q_proj", "q"),
|
|
("qkv_proj", "k_proj", "k"),
|
|
("qkv_proj", "v_proj", "v"),
|
|
("gate_up_proj", "gate_proj", 0),
|
|
("gate_up_proj", "up_proj", 1),
|
|
]
|
|
expert_params_mapping = [
|
|
# (param_name, weight_name, expert_id)
|
|
("ws" if weight_name in ["w1", "w3"] else "w2s",
|
|
f"experts.{expert_id}.{weight_name}.weight", expert_id)
|
|
for expert_id in range(self.num_experts)
|
|
for weight_name in ["w1", "w2", "w3"]
|
|
]
|
|
params_dict = dict(self.named_parameters())
|
|
loaded_params: Set[str] = set()
|
|
for name, loaded_weight in weights:
|
|
if "rotary_emb.inv_freq" in name:
|
|
continue
|
|
if ("rotary_emb.cos_cached" in name
|
|
or "rotary_emb.sin_cached" in name):
|
|
# Models trained using ColossalAI may include these tensors in
|
|
# the checkpoint. Skip them.
|
|
continue
|
|
for (param_name, weight_name, shard_id) in stacked_params_mapping:
|
|
if weight_name not in name:
|
|
continue
|
|
name = name.replace(weight_name, param_name)
|
|
# Skip loading extra bias for GPTQ models.
|
|
if name.endswith(".bias") and name not in params_dict:
|
|
continue
|
|
if is_pp_missing_parameter(name, self):
|
|
continue
|
|
param = params_dict[name]
|
|
weight_loader = param.weight_loader
|
|
weight_loader(param, loaded_weight, shard_id)
|
|
break
|
|
else:
|
|
for param_name, weight_name, expert_id in expert_params_mapping:
|
|
if weight_name not in name:
|
|
continue
|
|
name = name.replace(weight_name, param_name)
|
|
if is_pp_missing_parameter(name, self):
|
|
continue
|
|
param = params_dict[name]
|
|
weight_loader = param.weight_loader
|
|
weight_loader(param,
|
|
loaded_weight,
|
|
weight_name,
|
|
expert_id=expert_id)
|
|
break
|
|
else:
|
|
# Skip loading extra bias for GPTQ models.
|
|
if name.endswith(".bias") and name not in params_dict:
|
|
continue
|
|
if is_pp_missing_parameter(name, self):
|
|
continue
|
|
param = params_dict[name]
|
|
weight_loader = getattr(param, "weight_loader",
|
|
default_weight_loader)
|
|
weight_loader(param, loaded_weight)
|
|
loaded_params.add(name)
|
|
return loaded_params
|
|
|
|
|
|
class MiniCPMForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
|
|
packed_modules_mapping = {
|
|
"qkv_proj": [
|
|
"q_proj",
|
|
"k_proj",
|
|
"v_proj",
|
|
],
|
|
"gate_up_proj": [
|
|
"gate_proj",
|
|
"up_proj",
|
|
],
|
|
}
|
|
|
|
# LoRA specific attributes
|
|
supported_lora_modules = [
|
|
"qkv_proj",
|
|
"o_proj",
|
|
"gate_up_proj",
|
|
"down_proj",
|
|
"embed_tokens",
|
|
"lm_head",
|
|
]
|
|
embedding_modules = {
|
|
"embed_tokens": "input_embeddings",
|
|
"lm_head": "output_embeddings",
|
|
}
|
|
embedding_padding_modules = ["lm_head"]
|
|
|
|
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
|
|
super().__init__()
|
|
config = vllm_config.model_config.hf_config
|
|
cache_config = vllm_config.cache_config
|
|
quant_config = vllm_config.quant_config
|
|
lora_config = vllm_config.lora_config
|
|
|
|
self.prefix = prefix
|
|
self.vllm_config = vllm_config
|
|
self.config = config
|
|
self.lora_config = lora_config
|
|
self.cache_config = cache_config
|
|
self.quant_config = quant_config
|
|
|
|
self.model = self._init_model(vllm_config=vllm_config,
|
|
prefix=maybe_prefix(prefix, "model"))
|
|
|
|
unpadded_vocab_size = config.vocab_size
|
|
if lora_config:
|
|
unpadded_vocab_size += lora_config.lora_extra_vocab_size
|
|
self.lm_head = ParallelLMHead(
|
|
unpadded_vocab_size,
|
|
config.hidden_size,
|
|
org_num_embeddings=config.vocab_size,
|
|
padding_size=DEFAULT_VOCAB_PADDING_SIZE
|
|
# We need bigger padding if using lora for kernel
|
|
# compatibility
|
|
if not lora_config else lora_config.lora_vocab_padding_size,
|
|
quant_config=quant_config,
|
|
)
|
|
if config.tie_word_embeddings:
|
|
self.lm_head = self.lm_head.tie_weights(self.model.embed_tokens)
|
|
self.scale_width = self.config.hidden_size / self.config.dim_model_base
|
|
|
|
self.logits_processor = LogitsProcessor(unpadded_vocab_size,
|
|
config.vocab_size)
|
|
self.sampler = get_sampler()
|
|
self.make_empty_intermediate_tensors = (
|
|
self.model.make_empty_intermediate_tensors)
|
|
|
|
def _init_model(self, *, vllm_config: VllmConfig, prefix: str = ""):
|
|
return MiniCPMModel(vllm_config=vllm_config, prefix=prefix)
|
|
|
|
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
|
|
return self.model.get_input_embeddings(input_ids)
|
|
|
|
def forward(
|
|
self,
|
|
input_ids: torch.Tensor,
|
|
positions: torch.Tensor,
|
|
kv_caches: List[torch.Tensor],
|
|
attn_metadata: AttentionMetadata,
|
|
intermediate_tensors: Optional[IntermediateTensors] = None,
|
|
inputs_embeds: Optional[torch.Tensor] = None,
|
|
) -> Union[torch.Tensor, IntermediateTensors]:
|
|
hidden_states = self.model(input_ids, positions, kv_caches,
|
|
attn_metadata, intermediate_tensors,
|
|
inputs_embeds)
|
|
return hidden_states
|
|
|
|
def compute_logits(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
sampling_metadata: SamplingMetadata,
|
|
) -> Optional[torch.Tensor]:
|
|
hidden_states = hidden_states / self.scale_width
|
|
logits = self.logits_processor(self.lm_head, hidden_states,
|
|
sampling_metadata)
|
|
return logits
|
|
|
|
def sample(
|
|
self,
|
|
logits: torch.Tensor,
|
|
sampling_metadata: SamplingMetadata,
|
|
) -> Optional[SamplerOutput]:
|
|
next_tokens = self.sampler(logits, sampling_metadata)
|
|
return next_tokens
|
|
|
|
def load_weights(self, weights: Iterable[Tuple[str,
|
|
torch.Tensor]]) -> Set[str]:
|
|
loader = AutoWeightsLoader(
|
|
self,
|
|
skip_prefixes=(["lm_head."]
|
|
if self.config.tie_word_embeddings else None),
|
|
)
|
|
return loader.load_weights(weights)
|