mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-03-21 04:44:46 +08:00
* Fix AttributeError: MixtralModel object has no attribute org_vocab_size. * Make LoRA logic for Mistral and Mixtral the same --------- Co-authored-by: Pernekhan Utemuratov <pernekhan@deepinfra.com>
This commit is contained in:
parent
25e86b6a61
commit
31348dff03
@ -285,15 +285,19 @@ class MixtralModel(nn.Module):
|
||||
self,
|
||||
config: MixtralConfig,
|
||||
linear_method: Optional[LinearMethodBase] = None,
|
||||
lora_config: Optional[LoRAConfig] = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self.padding_idx = config.pad_token_id
|
||||
self.vocab_size = config.vocab_size
|
||||
lora_vocab = (lora_config.lora_extra_vocab_size *
|
||||
(lora_config.max_loras or 1)) if lora_config else 0
|
||||
self.vocab_size = config.vocab_size + lora_vocab
|
||||
self.org_vocab_size = config.vocab_size
|
||||
|
||||
self.embed_tokens = VocabParallelEmbedding(
|
||||
config.vocab_size,
|
||||
self.vocab_size,
|
||||
config.hidden_size,
|
||||
org_num_embeddings=self.org_vocab_size,
|
||||
org_num_embeddings=config.vocab_size,
|
||||
)
|
||||
self.layers = nn.ModuleList([
|
||||
MixtralDecoderLayer(config, linear_method=linear_method)
|
||||
@ -350,7 +354,9 @@ class MixtralForCausalLM(nn.Module):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.linear_method = linear_method
|
||||
self.model = MixtralModel(config, linear_method)
|
||||
self.model = MixtralModel(config,
|
||||
linear_method,
|
||||
lora_config=lora_config)
|
||||
self.unpadded_vocab_size = config.vocab_size
|
||||
if lora_config:
|
||||
self.unpadded_vocab_size += lora_config.lora_extra_vocab_size
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user