Align LoRA code between Mistral and Mixtral (fixes #2875) (#2880)

* Fix AttributeError: MixtralModel object has no attribute org_vocab_size.

* Make LoRA logic for Mistral and Mixtral the same

---------

Co-authored-by: Pernekhan Utemuratov <pernekhan@deepinfra.com>
This commit is contained in:
Philipp Moritz 2024-02-14 16:00:43 -08:00 committed by GitHub
parent 25e86b6a61
commit 31348dff03
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -285,15 +285,19 @@ class MixtralModel(nn.Module):
self,
config: MixtralConfig,
linear_method: Optional[LinearMethodBase] = None,
lora_config: Optional[LoRAConfig] = None,
) -> None:
super().__init__()
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
lora_vocab = (lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0
self.vocab_size = config.vocab_size + lora_vocab
self.org_vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
self.vocab_size,
config.hidden_size,
org_num_embeddings=self.org_vocab_size,
org_num_embeddings=config.vocab_size,
)
self.layers = nn.ModuleList([
MixtralDecoderLayer(config, linear_method=linear_method)
@ -350,7 +354,9 @@ class MixtralForCausalLM(nn.Module):
super().__init__()
self.config = config
self.linear_method = linear_method
self.model = MixtralModel(config, linear_method)
self.model = MixtralModel(config,
linear_method,
lora_config=lora_config)
self.unpadded_vocab_size = config.vocab_size
if lora_config:
self.unpadded_vocab_size += lora_config.lora_extra_vocab_size