[Misc] Adjust max_position_embeddings for LoRA compatibility (#8957)

This commit is contained in:
Jee Jee Li 2024-09-30 20:57:39 +08:00 committed by GitHub
parent be76e5aabf
commit 1cabfcefb6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1037,9 +1037,17 @@ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
assert supports_lora(
self.model
), f"{self.model.__class__.__name__} does not support LoRA yet."
if supports_multimodal(self.model):
logger.warning("Regarding multimodal models, vLLM currently "
"only supports adding LoRA to language model.")
# It's necessary to distinguish between the max_position_embeddings
# of VLMs and LLMs.
if hasattr(self.model.config, "max_position_embeddings"):
max_pos_embeddings = self.model.config.max_position_embeddings
else:
max_pos_embeddings = (
self.model.config.text_config.max_position_embeddings)
self.lora_manager = LRUCacheWorkerLoRAManager(
self.scheduler_config.max_num_seqs,
@ -1049,8 +1057,7 @@ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
self.device,
self.model.embedding_modules,
self.model.embedding_padding_modules,
max_position_embeddings=self.model.config.
max_position_embeddings,
max_position_embeddings=max_pos_embeddings,
)
self.model = self.lora_manager.create_lora_manager(self.model)