diff --git a/tests/lora/test_qwenvl.py b/tests/lora/test_qwenvl.py index ec9990fee0f67..dfddbbd168894 100644 --- a/tests/lora/test_qwenvl.py +++ b/tests/lora/test_qwenvl.py @@ -15,6 +15,7 @@ class TestConfig: max_num_seqs: int = 2 max_loras: int = 2 max_lora_rank: int = 32 + enable_mm_lora: bool = True max_model_len: int = 8192 gpu_memory_utilization: float = 0.85 mm_processor_kwargs: dict[str, int] | None = None @@ -49,6 +50,7 @@ class Qwen2VLTester: enable_lora=True, max_loras=self.config.max_loras, max_lora_rank=self.config.max_lora_rank, + enable_mm_lora=self.config.enable_mm_lora, trust_remote_code=True, gpu_memory_utilization=self.config.gpu_memory_utilization, mm_processor_kwargs=self.config.mm_processor_kwargs, diff --git a/vllm/config/lora.py b/vllm/config/lora.py index 6a8fd6359aadd..23a46b9632cd1 100644 --- a/vllm/config/lora.py +++ b/vllm/config/lora.py @@ -55,6 +55,9 @@ class LoRAConfig: per prompt. When run in offline mode, the lora IDs for n modalities will be automatically assigned to 1-n with the names of the modalities in alphabetic order.""" + enable_mm_lora: bool = False + """If `True`, LoRA support for multimodal models will be enabled. Currently, + only the qwenvl series models support this feature. The default is False.""" def compute_hash(self) -> str: """ diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index fd07cded7bc51..978f553d7b8a1 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -484,6 +484,7 @@ class EngineArgs: fully_sharded_loras: bool = LoRAConfig.fully_sharded_loras max_cpu_loras: int | None = LoRAConfig.max_cpu_loras lora_dtype: str | torch.dtype | None = LoRAConfig.lora_dtype + enable_mm_lora: bool = LoRAConfig.enable_mm_lora ray_workers_use_nsight: bool = ParallelConfig.ray_workers_use_nsight num_gpu_blocks_override: int | None = CacheConfig.num_gpu_blocks_override @@ -985,6 +986,11 @@ class EngineArgs: "--lora-dtype", **lora_kwargs["lora_dtype"], ) + lora_group.add_argument( + "--enable-mm-lora", + action=argparse.BooleanOptionalAction, + **lora_kwargs["enable_mm_lora"], + ) lora_group.add_argument("--max-cpu-loras", **lora_kwargs["max_cpu_loras"]) lora_group.add_argument( "--fully-sharded-loras", **lora_kwargs["fully_sharded_loras"] @@ -1660,6 +1666,7 @@ class EngineArgs: default_mm_loras=self.default_mm_loras, fully_sharded_loras=self.fully_sharded_loras, lora_dtype=self.lora_dtype, + enable_mm_lora=self.enable_mm_lora, max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras and self.max_cpu_loras > 0 else None, diff --git a/vllm/lora/models.py b/vllm/lora/models.py index e8b8ddb6841e4..dc6b0790f36d8 100644 --- a/vllm/lora/models.py +++ b/vllm/lora/models.py @@ -367,10 +367,11 @@ class LoRAModelManager: if self.supports_mm: model_config: ModelConfig = vllm_config.model_config self.mm_mapping: MultiModelKeys = self.model.get_mm_mapping() - self.info = MULTIMODAL_REGISTRY.create_processor(model_config).info - self.supports_mm_lora = self.supports_mm and hasattr( - self.info, "get_num_mm_encoder_tokens" - ) + if self.lora_config.enable_mm_lora: + self.info = MULTIMODAL_REGISTRY.create_processor(model_config).info + self.supports_mm_lora = self.supports_mm and hasattr( + self.info, "get_num_mm_encoder_tokens" + ) if not self.supports_mm_lora: return @@ -380,7 +381,6 @@ class LoRAModelManager: vllm_config.scheduler_config, MULTIMODAL_REGISTRY, ) - self.mm_mapping: MultiModelKeys = self.model.get_mm_mapping() limit_per_prompt: int = max(self.info.get_allowed_mm_limits().values()) # For vision tower