mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-03-25 00:01:25 +08:00
fix mm_hash
Signed-off-by: bk-201 <joy25810@foxmail.com>
This commit is contained in:
parent
8aedddd546
commit
f3a55ff958
@ -458,6 +458,28 @@ class InputProcessor:
|
||||
else:
|
||||
mm_uuids = None
|
||||
|
||||
# When enable_tower_connector_lora is True, multi-modal embeddings
|
||||
# vary depending on the LoRA request. Therefore, the mm_hash must be
|
||||
# generated based on the LoRA request to prevent incorrect cache hits.
|
||||
lora_config = self.lora_config
|
||||
if (
|
||||
mm_uuids
|
||||
and lora_request
|
||||
and lora_config
|
||||
and lora_config.enable_tower_connector_lora
|
||||
):
|
||||
|
||||
def add_mm_lora_prefix(val):
|
||||
if isinstance(val, list):
|
||||
return [
|
||||
f"{lora_request.lora_name}:{v}" if v is not None else None
|
||||
for v in val
|
||||
]
|
||||
else:
|
||||
return f"{lora_request.lora_name}:{val}"
|
||||
|
||||
mm_uuids = {k: add_mm_lora_prefix(v) for k, v in mm_uuids.items()}
|
||||
|
||||
# Process inputs, which includes:
|
||||
# 1. Tokenize text prompt, with LoRA request if one exists.
|
||||
# 2. For multimodal models with a merged preprocessor, preprocess
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user