[Misc] Delete LoRA-related redundancy code (#17841)

Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
Jee Jee Li 2025-05-08 21:02:21 +08:00 committed by GitHub
parent 015815fe01
commit a944f8ede7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 3 additions and 17 deletions

View File

@ -344,7 +344,7 @@ class LoRAModelManager(AdapterModelManager):
self.supported_lora_modules = get_supported_lora_modules(self.model)
assert self.supported_lora_modules, "No supported LoRA modules found in"
f"{self.model.__class__.__name__}."
f" {self.model.__class__.__name__}."
if lora_config.long_lora_scaling_factors:
# We need to replace rotary emb layer to do batch computation
# for long lora.

View File

@ -504,9 +504,7 @@ class Grok1ForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
self.unpadded_vocab_size,
config.hidden_size,
org_num_embeddings=config.vocab_size,
padding_size=DEFAULT_VOCAB_PADDING_SIZE
# We need bigger padding if using lora for kernel compatibility
if not lora_config else lora_config.lora_vocab_padding_size,
padding_size=DEFAULT_VOCAB_PADDING_SIZE,
quant_config=quant_config,
prefix=maybe_prefix(prefix, "lm_head"),
)

View File

@ -334,14 +334,6 @@ class DeciLMForCausalLM(nn.Module, SupportsLoRA, SupportsPP, HasNoOps):
}
# LoRA specific attributes
supported_lora_modules = [
"qkv_proj",
"o_proj",
"gate_up_proj",
"down_proj",
"embed_tokens",
"lm_head",
]
embedding_modules = {
"embed_tokens": "input_embeddings",
"lm_head": "output_embeddings",

View File

@ -955,11 +955,7 @@ class Phi4MMForCausalLM(nn.Module, SupportsLoRA, SupportsMultiModal):
self.unpadded_vocab_size,
config.hidden_size,
org_num_embeddings=config.vocab_size,
padding_size=(
DEFAULT_VOCAB_PADDING_SIZE
# We need bigger padding if using lora for kernel
# compatibility
if not lora_config else lora_config.lora_vocab_padding_size),
padding_size=DEFAULT_VOCAB_PADDING_SIZE,
quant_config=quant_config,
)
if config.tie_word_embeddings: