mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 04:05:01 +08:00
[Misc] Adjust max_position_embeddings for LoRA compatibility (#8957)
This commit is contained in:
parent
be76e5aabf
commit
1cabfcefb6
@ -1037,9 +1037,17 @@ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
|
||||
assert supports_lora(
|
||||
self.model
|
||||
), f"{self.model.__class__.__name__} does not support LoRA yet."
|
||||
|
||||
if supports_multimodal(self.model):
|
||||
logger.warning("Regarding multimodal models, vLLM currently "
|
||||
"only supports adding LoRA to language model.")
|
||||
# It's necessary to distinguish between the max_position_embeddings
|
||||
# of VLMs and LLMs.
|
||||
if hasattr(self.model.config, "max_position_embeddings"):
|
||||
max_pos_embeddings = self.model.config.max_position_embeddings
|
||||
else:
|
||||
max_pos_embeddings = (
|
||||
self.model.config.text_config.max_position_embeddings)
|
||||
|
||||
self.lora_manager = LRUCacheWorkerLoRAManager(
|
||||
self.scheduler_config.max_num_seqs,
|
||||
@ -1049,8 +1057,7 @@ class GPUModelRunnerBase(ModelRunnerBase[TModelInputForGPU]):
|
||||
self.device,
|
||||
self.model.embedding_modules,
|
||||
self.model.embedding_padding_modules,
|
||||
max_position_embeddings=self.model.config.
|
||||
max_position_embeddings,
|
||||
max_position_embeddings=max_pos_embeddings,
|
||||
)
|
||||
self.model = self.lora_manager.create_lora_manager(self.model)
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user