[Misc] Delete LoRA-related redundancy code (#17841)

Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
Jee Jee Li 2025-05-08 21:02:21 +08:00 committed by GitHub
parent 015815fe01
commit a944f8ede7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 3 additions and 17 deletions

View File

@ -504,9 +504,7 @@ class Grok1ForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
self.unpadded_vocab_size, self.unpadded_vocab_size,
config.hidden_size, config.hidden_size,
org_num_embeddings=config.vocab_size, org_num_embeddings=config.vocab_size,
padding_size=DEFAULT_VOCAB_PADDING_SIZE padding_size=DEFAULT_VOCAB_PADDING_SIZE,
# We need bigger padding if using lora for kernel compatibility
if not lora_config else lora_config.lora_vocab_padding_size,
quant_config=quant_config, quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"), prefix=maybe_prefix(prefix, "lm_head"),
) )

View File

@ -334,14 +334,6 @@ class DeciLMForCausalLM(nn.Module, SupportsLoRA, SupportsPP, HasNoOps):
} }
# LoRA specific attributes # LoRA specific attributes
supported_lora_modules = [
"qkv_proj",
"o_proj",
"gate_up_proj",
"down_proj",
"embed_tokens",
"lm_head",
]
embedding_modules = { embedding_modules = {
"embed_tokens": "input_embeddings", "embed_tokens": "input_embeddings",
"lm_head": "output_embeddings", "lm_head": "output_embeddings",

View File

@ -955,11 +955,7 @@ class Phi4MMForCausalLM(nn.Module, SupportsLoRA, SupportsMultiModal):
self.unpadded_vocab_size, self.unpadded_vocab_size,
config.hidden_size, config.hidden_size,
org_num_embeddings=config.vocab_size, org_num_embeddings=config.vocab_size,
padding_size=( padding_size=DEFAULT_VOCAB_PADDING_SIZE,
DEFAULT_VOCAB_PADDING_SIZE
# We need bigger padding if using lora for kernel
# compatibility
if not lora_config else lora_config.lora_vocab_padding_size),
quant_config=quant_config, quant_config=quant_config,
) )
if config.tie_word_embeddings: if config.tie_word_embeddings: