[Docs] add __init__.py to vllm/model_executor/layers/quantization/compressed_tensors/transform (#24974)

Signed-off-by: samzong <samzong.lu@gmail.com>
This commit is contained in:
samzong 2025-09-20 02:32:27 +08:00 committed by GitHub
parent 2506ce5189
commit 138f0d1e75
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 5 additions and 4 deletions

View File

@ -680,7 +680,7 @@ class Blip2ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP,
batch. batch.
Info: Info:
[Blip2ImageInputs][] [`Blip2ImageInputs`][vllm.model_executor.models.blip2.Blip2ImageInputs]
""" """
if intermediate_tensors is not None: if intermediate_tensors is not None:

View File

@ -737,7 +737,7 @@ class LlavaForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP):
inputs_embeds: Optional tensor of input embeddings. inputs_embeds: Optional tensor of input embeddings.
Info: Info:
[LlavaImageInputs][] [`LlavaImageInputs`][vllm.model_executor.models.llava.LlavaImageInputs]
""" """
if intermediate_tensors is not None: if intermediate_tensors is not None:
inputs_embeds = None inputs_embeds = None

View File

@ -527,7 +527,8 @@ class LlavaNextForConditionalGeneration(nn.Module, SupportsMultiModal,
Unlike in LLaVA-1.5, the number of image tokens inputted to the language Unlike in LLaVA-1.5, the number of image tokens inputted to the language
model depends on the original size of the input image. Including the model depends on the original size of the input image. Including the
original image token in the input, the required number of image tokens original image token in the input, the required number of image tokens
is given by [get_llava_next_image_feature_size][]. is given by [`LlavaNextProcessingInfo.get_num_image_tokens`][vllm.\
model_executor.models.llava_next.LlavaNextProcessingInfo.get_num_image_tokens].
This way, the `positions` and `attn_metadata` are consistent This way, the `positions` and `attn_metadata` are consistent
with the `input_ids`. with the `input_ids`.
@ -540,7 +541,7 @@ class LlavaNextForConditionalGeneration(nn.Module, SupportsMultiModal,
inputs_embeds: Optional tensor of input embeddings. inputs_embeds: Optional tensor of input embeddings.
Info: Info:
[LlavaNextImageInputs][] [`LlavaNextImageInputs`][vllm.model_executor.models.llava_next.LlavaNextImageInputs]
""" """
if intermediate_tensors is not None: if intermediate_tensors is not None:
inputs_embeds = None inputs_embeds = None