update argument name

Signed-off-by: bk-201 <joy25810@foxmail.com>
This commit is contained in:
bk-201 2025-12-11 06:46:53 +00:00
parent e2ea025ee3
commit 27448490f1
4 changed files with 8 additions and 8 deletions

View File

@ -15,7 +15,7 @@ class TestConfig:
max_num_seqs: int = 2
max_loras: int = 2
max_lora_rank: int = 32
enable_mm_lora: bool = True
enable_tower_connector_lora: bool = True
max_model_len: int = 8192
gpu_memory_utilization: float = 0.85
mm_processor_kwargs: dict[str, int] | None = None
@ -50,7 +50,7 @@ class Qwen2VLTester:
enable_lora=True,
max_loras=self.config.max_loras,
max_lora_rank=self.config.max_lora_rank,
enable_mm_lora=self.config.enable_mm_lora,
enable_tower_connector_lora=self.config.enable_tower_connector_lora,
trust_remote_code=True,
gpu_memory_utilization=self.config.gpu_memory_utilization,
mm_processor_kwargs=self.config.mm_processor_kwargs,

View File

@ -55,7 +55,7 @@ class LoRAConfig:
per prompt. When run in offline mode, the lora IDs for n modalities
will be automatically assigned to 1-n with the names of the modalities
in alphabetic order."""
enable_mm_lora: bool = False
enable_tower_connector_lora: bool = False
"""If `True`, LoRA support for multimodal models will be enabled. Currently,
only the qwenvl series models support this feature. The default is False."""

View File

@ -486,7 +486,7 @@ class EngineArgs:
fully_sharded_loras: bool = LoRAConfig.fully_sharded_loras
max_cpu_loras: int | None = LoRAConfig.max_cpu_loras
lora_dtype: str | torch.dtype | None = LoRAConfig.lora_dtype
enable_mm_lora: bool = LoRAConfig.enable_mm_lora
enable_tower_connector_lora: bool = LoRAConfig.enable_tower_connector_lora
ray_workers_use_nsight: bool = ParallelConfig.ray_workers_use_nsight
num_gpu_blocks_override: int | None = CacheConfig.num_gpu_blocks_override
@ -1008,8 +1008,8 @@ class EngineArgs:
**lora_kwargs["lora_dtype"],
)
lora_group.add_argument(
"--enable-mm-lora",
**lora_kwargs["enable_mm_lora"],
"--enable-tower-connector-lora",
**lora_kwargs["enable_tower_connector_lora"],
)
lora_group.add_argument("--max-cpu-loras", **lora_kwargs["max_cpu_loras"])
lora_group.add_argument(
@ -1687,7 +1687,7 @@ class EngineArgs:
default_mm_loras=self.default_mm_loras,
fully_sharded_loras=self.fully_sharded_loras,
lora_dtype=self.lora_dtype,
enable_mm_lora=self.enable_mm_lora,
enable_tower_connector_lora=self.enable_tower_connector_lora,
max_cpu_loras=self.max_cpu_loras
if self.max_cpu_loras and self.max_cpu_loras > 0
else None,

View File

@ -128,7 +128,7 @@ class LoRAModelManager:
if self.supports_mm and vllm_config is not None:
model_config: ModelConfig = vllm_config.model_config
self.mm_mapping: MultiModelKeys = self.model.get_mm_mapping()
if self.lora_config.enable_mm_lora:
if self.lora_config.enable_tower_connector_lora:
self.info = MULTIMODAL_REGISTRY.create_processor(model_config).info
self.supports_mm_lora = self.supports_mm and hasattr(
self.info, "get_num_mm_encoder_tokens"