PaliGemma 2 support (#11142)

This commit is contained in:
Jani Monoses 2024-12-13 09:40:07 +02:00 committed by GitHub
parent be39e3cd18
commit 7cd7409142
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 25 additions and 3 deletions

View File

@ -664,9 +664,9 @@ Text Generation (``--task generate``)
- ✅︎
- ✅︎
* - :code:`PaliGemmaForConditionalGeneration`
- PaliGemma
- PaliGemma, PaliGemma 2
- T + I\ :sup:`E`
- :code:`google/paligemma-3b-pt-224`, :code:`google/paligemma-3b-mix-224`, etc.
- :code:`google/paligemma-3b-pt-224`, :code:`google/paligemma-3b-mix-224`, :code:`google/paligemma2-3b-ft-docci-448`, etc.
-
- ✅︎
-

View File

@ -137,6 +137,18 @@ def run_paligemma(question: str, modality: str):
return llm, prompt, stop_token_ids
# PaliGemma 2
def run_paligemma2(question: str, modality: str):
assert modality == "image"
# PaliGemma 2 has special prompt format for VQA
prompt = "caption en"
llm = LLM(model="google/paligemma2-3b-ft-docci-448",
mm_cache_preprocessor=args.mm_cache_preprocessor)
stop_token_ids = None
return llm, prompt, stop_token_ids
# Chameleon
def run_chameleon(question: str, modality: str):
assert modality == "image"
@ -473,6 +485,7 @@ model_example_map = {
"fuyu": run_fuyu,
"phi3_v": run_phi3v,
"paligemma": run_paligemma,
"paligemma2": run_paligemma2,
"chameleon": run_chameleon,
"minicpmv": run_minicpmv,
"blip-2": run_blip2,

View File

@ -105,6 +105,11 @@ def input_processor_for_paligemma(ctx: InputContext,
orig_prompt_ids.remove(hf_config.image_token_index)
new_prompt = f"{image_token_str_pad}{bos_token}{orig_prompt}\n"
# The PaliGemma 2 tokenizer does not include a starting BOS token
if orig_prompt_ids[0] != hf_config.bos_token_id:
orig_prompt_ids = [hf_config.bos_token_id] + orig_prompt_ids
new_token_ids = image_token_ids_pad + orig_prompt_ids + [108] #newline
# NOTE: Create a defensive copy of the original inputs
@ -149,7 +154,11 @@ class PaliGemmaForConditionalGeneration(nn.Module, SupportsMultiModal,
projection_dim=config.vision_config.projection_dim)
self.quant_config = quant_config
config.text_config.architectures = ["GemmaForCausalLM"]
if config.text_config.model_type == "gemma":
config.text_config.architectures = ["GemmaForCausalLM"]
else:
config.text_config.architectures = ["Gemma2ForCausalLM"]
self.language_model = init_vllm_registered_model(
vllm_config=vllm_config,
hf_config=config.text_config,