mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-03 08:55:15 +08:00
[Misc] Remove the chunked prefill warning for LoRA (#16925)
Signed-off-by: Jee Jee Li <pandaleefree@gmail.com>
This commit is contained in:
parent
5b794cae8d
commit
c9acbf1141
@ -2686,13 +2686,6 @@ class LoRAConfig:
|
||||
elif isinstance(self.lora_dtype, str):
|
||||
self.lora_dtype = getattr(torch, self.lora_dtype)
|
||||
|
||||
def verify_with_scheduler_config(self, scheduler_config: SchedulerConfig):
|
||||
# Reminder: Please update docs/source/features/compatibility_matrix.md
|
||||
# If the feature combo become valid
|
||||
if scheduler_config.chunked_prefill_enabled:
|
||||
logger.warning("LoRA with chunked prefill is still experimental "
|
||||
"and may be unstable.")
|
||||
|
||||
def verify_lora_support(self):
|
||||
if self.long_lora_scaling_factors is not None and envs.VLLM_USE_V1:
|
||||
raise ValueError(
|
||||
@ -3820,8 +3813,6 @@ class VllmConfig:
|
||||
if self.lora_config:
|
||||
self.lora_config.verify_with_cache_config(self.cache_config)
|
||||
self.lora_config.verify_with_model_config(self.model_config)
|
||||
self.lora_config.verify_with_scheduler_config(
|
||||
self.scheduler_config)
|
||||
self.lora_config.verify_lora_support()
|
||||
if self.prompt_adapter_config:
|
||||
self.prompt_adapter_config.verify_with_model_config(
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user