[Misc] Remove more get_input_embeddings_v0 (#25857)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
Signed-off-by: yewentao256 <zhyanwentao@126.com>
This commit is contained in:
Cyrus Leung 2025-09-29 16:03:37 +08:00 committed by yewentao256
parent b765adccd7
commit ea55445b8d
4 changed files with 7 additions and 83 deletions

View File

@ -45,8 +45,7 @@ from vllm.sequence import IntermediateTensors
from .interfaces import (MultiModalEmbeddings, SupportsMultiModal,
SupportsTranscription)
from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn,
init_vllm_registered_model, maybe_prefix,
merge_multimodal_embeddings)
init_vllm_registered_model, maybe_prefix)
logger = init_logger(__name__)

View File

@ -41,7 +41,6 @@ from vllm.multimodal.processing import (BaseMultiModalProcessor,
from vllm.multimodal.profiling import BaseDummyInputsBuilder
from vllm.platforms import _Backend
from vllm.sequence import IntermediateTensors
from vllm.transformers_utils.config import uses_mrope
from vllm.utils import is_list_of
from vllm.utils.tensor_schema import TensorSchema, TensorShape
@ -50,7 +49,7 @@ from .interfaces import (MultiModalEmbeddings, SupportsLoRA,
from .siglip import SiglipMLP
from .utils import (AutoWeightsLoader, WeightsMapper,
init_vllm_registered_model, is_pp_missing_parameter,
maybe_prefix, merge_multimodal_embeddings)
maybe_prefix)
from .vision import get_vit_attn_backend
logger = init_logger(__name__)
@ -1450,32 +1449,6 @@ class BaseKeyeModule(nn.Module):
multimodal_embeddings += video_embeddings
return multimodal_embeddings
def get_input_embeddings_v0(
self,
input_ids: torch.Tensor,
image_input: Optional[Any] = None,
video_input: Optional[Any] = None,
) -> torch.Tensor:
inputs_embeds = self.get_input_embeddings(input_ids)
if image_input is not None:
image_embeds = self._process_image_input(image_input)
inputs_embeds = merge_multimodal_embeddings(
input_ids,
inputs_embeds,
image_embeds,
placeholder_token_id=self.config.image_token_id,
)
if video_input is not None:
video_embeds = self._process_video_input(video_input)
inputs_embeds = merge_multimodal_embeddings(
input_ids,
inputs_embeds,
video_embeds,
placeholder_token_id=self.config.video_token_id,
)
return inputs_embeds
def forward(
self,
input_ids: torch.Tensor,
@ -1500,23 +1473,6 @@ class BaseKeyeModule(nn.Module):
if intermediate_tensors is not None:
inputs_embeds = None
elif inputs_embeds is None:
image_input = self._parse_and_validate_image_input(**kwargs)
video_input = self._parse_and_validate_video_input(**kwargs)
if image_input is None and video_input is None:
inputs_embeds = None
else:
if uses_mrope(self.config):
assert positions.ndim == 2 and positions.size(0) == 3, (
"multimodal section rotary embedding requires "
f"(3, seq_len) positions, but got {positions.size()}")
inputs_embeds = self.get_input_embeddings_v0(
input_ids,
image_input=image_input,
video_input=video_input,
)
input_ids = None
hidden_states = self.language_model.model(
input_ids=input_ids,
positions=positions,

View File

@ -44,13 +44,7 @@ from vllm.utils.tensor_schema import TensorSchema, TensorShape
from .idefics2_vision_model import Idefics2VisionTransformer
from .interfaces import MultiModalEmbeddings, SupportsLoRA, SupportsMultiModal
from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn,
init_vllm_registered_model, maybe_prefix,
merge_multimodal_embeddings)
# <|endoftext10|> (see vocab.json in hf model)
_IMAGE_PLACEHOLDER_TOKEN_ID = 200010
# <|endoftext11|>
_AUDIO_PLACEHOLDER_TOKEN_ID = 200011
init_vllm_registered_model, maybe_prefix)
_AUDIO_MAX_SOUNDFILE_SIZE = 241_000
@ -1371,35 +1365,6 @@ class Phi4MultimodalForCausalLM(nn.Module, SupportsLoRA, SupportsMultiModal):
return multimodal_embeddings
def get_input_embeddings_v0(
self,
input_ids: torch.Tensor,
image_input: Optional[Phi4MMImagePixelInputs] = None,
audio_input: Optional[Phi4MMAudioFeatureInputs] = None,
) -> torch.Tensor:
audio_projection_mode = 'speech'
inputs_embeds = self.get_input_embeddings(input_ids)
if image_input is not None:
image_embeds = self._process_image_input(image_input)
inputs_embeds = merge_multimodal_embeddings(
input_ids,
inputs_embeds,
image_embeds,
placeholder_token_id=_IMAGE_PLACEHOLDER_TOKEN_ID,
)
audio_projection_mode = 'vision'
if audio_input is not None:
audio_embeds = self._process_audio_input(
audio_input, audio_projection_mode=audio_projection_mode)
inputs_embeds = merge_multimodal_embeddings(
input_ids,
inputs_embeds,
audio_embeds,
placeholder_token_id=_AUDIO_PLACEHOLDER_TOKEN_ID,
)
return inputs_embeds
def forward(
self,
input_ids: torch.Tensor,

View File

@ -10,6 +10,7 @@ import torch
import torch.nn as nn
from torch.func import functional_call
from transformers import PretrainedConfig
from typing_extensions import deprecated
import vllm.envs as envs
from vllm.config import VllmConfig
@ -439,6 +440,9 @@ def _merge_multimodal_embeddings(
return inputs_embeds
@deprecated("`merge_multimodal_embeddings` has been replaced with "
"`SupportsMultiModal.get_input_embeddings` and will be "
"removed in v0.12.")
def merge_multimodal_embeddings(
input_ids: torch.Tensor,
inputs_embeds: torch.Tensor,