Clean up unused padding_idx variables across many model definitions (#13240)

Signed-off-by: Tyler Michael Smith <tyler@neuralmagic.com>
This commit is contained in:
Tyler Michael Smith 2025-03-04 16:27:00 -05:00 committed by GitHub
parent 288ca110f6
commit 4f5b059f14
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
30 changed files with 1 additions and 35 deletions

View File

@ -370,7 +370,6 @@ class ArcticModel(nn.Module):
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,

View File

@ -267,7 +267,6 @@ class BaiChuanModel(nn.Module):
quant_config = vllm_config.quant_config
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(

View File

@ -725,7 +725,6 @@ class BartModel(nn.Module):
self.config = config
self.padding_idx = config.pad_token_id
lora_vocab = (lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -851,7 +851,6 @@ class ChameleonModel(nn.Module):
quant_config = vllm_config.quant_config
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(
self.vocab_size,

View File

@ -339,7 +339,6 @@ class DeepseekModel(nn.Module):
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(

View File

@ -570,7 +570,6 @@ class DeepseekV2Model(nn.Module):
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
if get_pp_group().is_first_rank:

View File

@ -313,7 +313,6 @@ class ExaoneModel(nn.Module):
lora_config = vllm_config.lora_config
self.config = config
self.padding_idx = config.pad_token_id
lora_vocab = ((lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0)
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -592,7 +592,6 @@ class Florence2LanguageModel(nn.Module):
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.shared = BartScaledWordEmbedding(self.vocab_size, config.d_model)

View File

@ -255,7 +255,6 @@ class FuyuForCausalLM(nn.Module, SupportsMultiModal, SupportsPP):
self.config = config
self.multimodal_config = multimodal_config
self.padding_idx = config.pad_token_id
self.vocab_size = config.text_config.vocab_size
self.image_token_id = _IMAGE_TOKEN_ID
self.image_feature_size = config.patch_size**2 * config.num_channels

View File

@ -260,7 +260,6 @@ class GraniteModel(nn.Module):
lora_config = vllm_config.lora_config
self.config = config
self.padding_idx = config.pad_token_id
lora_vocab = (lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -252,7 +252,6 @@ class GraniteMoeModel(nn.Module):
quant_config = vllm_config.quant_config
lora_config = vllm_config.lora_config
self.padding_idx = config.pad_token_id
lora_vocab = (lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -404,7 +404,6 @@ class Idefics3Model(nn.Module):
quant_config = vllm_config.quant_config
self.config = config
self.padding_idx = self.config.text_config.pad_token_id
self.vocab_size = self.config.text_config.vocab_size
self.vision_model = Idefics3VisionTransformer(
config.vision_config,

View File

@ -261,7 +261,6 @@ class InternLM2Model(nn.Module):
quant_config = vllm_config.quant_config
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.tok_embeddings = VocabParallelEmbedding(
config.vocab_size,

View File

@ -271,7 +271,6 @@ class JambaModel(nn.Module):
lora_config = vllm_config.lora_config
self.config = config
self.padding_idx = config.pad_token_id
lora_vocab = ((lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0)
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -302,7 +302,6 @@ class LlamaModel(nn.Module):
self.config = config
self.quant_config = quant_config
self.padding_idx = config.pad_token_id
lora_vocab = (lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -90,7 +90,6 @@ class MambaModel(nn.Module):
is_lora_enabled = bool(lora_config)
self.config = config
self.padding_idx = config.pad_token_id
lora_vocab = ((lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0)
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -365,7 +365,6 @@ class MiniCPMModel(nn.Module):
self.config = config
self.cache_config = cache_config
self.quant_config = quant_config
self.padding_idx = config.pad_token_id
lora_vocab = (lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -254,7 +254,6 @@ class MixtralModel(nn.Module):
quant_config = vllm_config.quant_config
lora_config = vllm_config.lora_config
self.padding_idx = config.pad_token_id
lora_vocab = (lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -302,7 +302,6 @@ class MixtralModel(nn.Module):
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(

View File

@ -1031,7 +1031,6 @@ class MllamaTextModel(nn.Module):
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(config.vocab_size + 8,
config.hidden_size)

View File

@ -300,7 +300,6 @@ class NemotronModel(nn.Module):
lora_config = vllm_config.lora_config
self.config = config
self.padding_idx = config.pad_token_id
lora_vocab = (lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -252,7 +252,6 @@ class OlmoeModel(nn.Module):
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(

View File

@ -200,7 +200,6 @@ class OPTDecoder(nn.Module):
):
super().__init__()
self.config = config
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.vocab_size = config.vocab_size

View File

@ -217,7 +217,6 @@ class OrionModel(nn.Module):
quant_config = vllm_config.quant_config
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,

View File

@ -441,7 +441,6 @@ class PhiMoEModel(nn.Module):
quant_config = vllm_config.quant_config
lora_config = vllm_config.lora_config
self.padding_idx = config.pad_token_id
lora_vocab = ((lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0)
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -284,7 +284,6 @@ class Qwen2Model(nn.Module):
self.config = config
self.quant_config = quant_config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
if get_pp_group().is_first_rank or (config.tie_word_embeddings

View File

@ -325,7 +325,6 @@ class Qwen2MoeModel(nn.Module):
cache_config = vllm_config.cache_config
quant_config = vllm_config.quant_config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = VocabParallelEmbedding(

View File

@ -269,7 +269,6 @@ class SolarModel(nn.Module):
lora_config = vllm_config.lora_config
self.config = config
self.padding_idx = config.pad_token_id
lora_vocab = ((lora_config.lora_extra_vocab_size *
(lora_config.max_loras or 1)) if lora_config else 0)
self.vocab_size = config.vocab_size + lora_vocab

View File

@ -212,10 +212,8 @@ class Starcoder2Model(nn.Module):
quant_config = vllm_config.quant_config
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
# TODO: consider padding_idx (currently removed)
self.embed_tokens = VocabParallelEmbedding(
config.vocab_size,
config.hidden_size,

View File

@ -49,10 +49,7 @@ class WhisperAudioInputs(TypedDict):
class WhisperPositionalEmbedding(nn.Embedding):
def __init__(self,
num_positions: int,
embedding_dim: int,
padding_idx: Optional[int] = None):
def __init__(self, num_positions: int, embedding_dim: int):
super().__init__(num_positions, embedding_dim)
def forward(self, position_ids):
@ -359,7 +356,6 @@ class WhisperEncoder(nn.Module):
config = vllm_config.model_config.hf_config
embed_dim = config.d_model
self.num_mel_bins = config.num_mel_bins
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_source_positions
self.embed_scale = (math.sqrt(embed_dim)
if config.scale_embedding else 1.0)