mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 03:15:20 +08:00
[Bugfix] Fix GPUModelRunner has no attribute lora_manager (#24762)
Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
parent
89e08d6d18
commit
99bfef841f
@ -63,8 +63,7 @@ class LoRAModelRunnerMixin:
|
||||
def _set_active_loras(self, prompt_lora_mapping: tuple[int, ...],
|
||||
token_lora_mapping: tuple[int, ...],
|
||||
lora_requests: set[LoRARequest]) -> None:
|
||||
if not self.lora_manager:
|
||||
raise RuntimeError("LoRA is not enabled.")
|
||||
self._ensure_lora_enabled()
|
||||
|
||||
# Set is_prefill to True, so we always use the SGMV kernels on
|
||||
# non-cuda platforms.
|
||||
@ -75,6 +74,11 @@ class LoRAModelRunnerMixin:
|
||||
is_prefill=True)
|
||||
self.lora_manager.set_active_adapters(lora_requests, lora_mapping)
|
||||
|
||||
def _ensure_lora_enabled(self) -> None:
|
||||
if not hasattr(self, "lora_manager"):
|
||||
raise RuntimeError(
|
||||
"LoRA is not enabled. Use --enable-lora to enable LoRA.")
|
||||
|
||||
def set_active_loras(self, input_batch: InputBatch,
|
||||
num_scheduled_tokens: np.ndarray) -> None:
|
||||
|
||||
@ -172,21 +176,17 @@ class LoRAModelRunnerMixin:
|
||||
self.lora_manager.remove_all_adapters()
|
||||
|
||||
def add_lora(self, lora_request: LoRARequest) -> bool:
|
||||
if not self.lora_manager:
|
||||
raise RuntimeError("LoRA is not enabled.")
|
||||
self._ensure_lora_enabled()
|
||||
return self.lora_manager.add_adapter(lora_request)
|
||||
|
||||
def remove_lora(self, lora_id: int) -> bool:
|
||||
if not self.lora_manager:
|
||||
raise RuntimeError("LoRA is not enabled.")
|
||||
self._ensure_lora_enabled()
|
||||
return self.lora_manager.remove_adapter(lora_id)
|
||||
|
||||
def pin_lora(self, lora_id: int) -> bool:
|
||||
if not self.lora_manager:
|
||||
raise RuntimeError("LoRA is not enabled.")
|
||||
self._ensure_lora_enabled()
|
||||
return self.lora_manager.pin_adapter(lora_id)
|
||||
|
||||
def list_loras(self) -> set[int]:
|
||||
if not self.lora_manager:
|
||||
raise RuntimeError("LoRA is not enabled.")
|
||||
self._ensure_lora_enabled()
|
||||
return self.lora_manager.list_adapters()
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user