[Misc] Remove deprecated names (#10817)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung 2024-12-03 14:52:57 +08:00 committed by GitHub
parent ef51831ee8
commit 3257d449fa
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 31 additions and 102 deletions

View File

@ -6,6 +6,8 @@ from typing import (Any, AsyncGenerator, Callable, Coroutine, Dict, Iterable,
List, Mapping, Optional, Set, Tuple, Type, Union, overload)
from weakref import ReferenceType
from typing_extensions import deprecated
import vllm.envs as envs
from vllm.config import (DecodingConfig, LoRAConfig, ModelConfig,
ParallelConfig, SchedulerConfig, VllmConfig)
@ -422,7 +424,8 @@ class _AsyncLLMEngine(LLMEngine):
return await (
self.get_tokenizer_group().get_lora_tokenizer_async(lora_request))
@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
async def add_request_async(
self,
request_id: str,
@ -894,7 +897,8 @@ class AsyncLLMEngine(EngineClient):
# This method does not need to be async, but kept that way
# for backwards compatibility.
@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
def add_request(
self,
request_id: str,

View File

@ -10,7 +10,7 @@ from typing import Sequence as GenericSequence
from typing import Set, Type, Union, cast, overload
import torch
from typing_extensions import TypeVar
from typing_extensions import TypeVar, deprecated
import vllm.envs as envs
from vllm.config import (DecodingConfig, LoRAConfig, ModelConfig,
@ -719,7 +719,8 @@ class LLMEngine:
def stop_remote_worker_execution_loop(self) -> None:
self.model_executor.stop_remote_worker_execution_loop()
@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
def add_request(
self,
request_id: str,

View File

@ -2,6 +2,8 @@ from dataclasses import dataclass
from enum import Enum
from typing import List, Mapping, Optional, Union, overload
from typing_extensions import deprecated
from vllm import PoolingParams
from vllm.inputs import PromptType
from vllm.lora.request import LoRARequest
@ -32,7 +34,8 @@ class RPCProcessRequest:
prompt_adapter_request: Optional[PromptAdapterRequest] = None
priority: int = 0
@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
def __init__(
self,
*,

View File

@ -9,6 +9,7 @@ import cloudpickle
import psutil
import zmq
import zmq.asyncio
from typing_extensions import deprecated
from zmq import Frame # type: ignore[attr-defined]
from zmq.asyncio import Socket
@ -414,7 +415,8 @@ class MQLLMEngineClient(EngineClient):
def dead_error(self) -> BaseException:
return ENGINE_DEAD_ERROR(self._errored_with)
@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
def generate(
self,
*,
@ -485,7 +487,8 @@ class MQLLMEngineClient(EngineClient):
lora_request, trace_headers,
prompt_adapter_request, priority)
@overload # DEPRECATED
@overload
@deprecated("'inputs' will be renamed to 'prompt")
def encode(
self,
*,

View File

@ -6,6 +6,7 @@ from typing import (Any, ClassVar, Dict, List, Optional, Sequence, Tuple, Type,
Union, cast, overload)
from tqdm import tqdm
from typing_extensions import deprecated
from vllm import envs
from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput,
@ -256,6 +257,7 @@ class LLM:
tokenizer_group.tokenizer = get_cached_tokenizer(tokenizer)
@overload # LEGACY: single (prompt + optional token ids)
@deprecated("'prompt_token_ids' will become part of 'prompts")
def generate(
self,
prompts: str,
@ -268,6 +270,7 @@ class LLM:
...
@overload # LEGACY: multi (prompt + optional token ids)
@deprecated("'prompt_token_ids' will become part of 'prompts")
def generate(
self,
prompts: List[str],
@ -280,6 +283,7 @@ class LLM:
...
@overload # LEGACY: single (token ids + optional prompt)
@deprecated("'prompt_token_ids' will become part of 'prompts")
def generate(
self,
prompts: Optional[str] = None,
@ -293,6 +297,7 @@ class LLM:
...
@overload # LEGACY: multi (token ids + optional prompt)
@deprecated("'prompt_token_ids' will become part of 'prompts")
def generate(
self,
prompts: Optional[List[str]] = None,
@ -306,6 +311,7 @@ class LLM:
...
@overload # LEGACY: single or multi token ids [pos-only]
@deprecated("'prompt_token_ids' will become part of 'prompts")
def generate(
self,
prompts: None,
@ -671,6 +677,7 @@ class LLM:
)
@overload # LEGACY: single (prompt + optional token ids)
@deprecated("'prompt_token_ids' will become part of 'prompts")
def encode(
self,
prompts: str,
@ -683,6 +690,7 @@ class LLM:
...
@overload # LEGACY: multi (prompt + optional token ids)
@deprecated("'prompt_token_ids' will become part of 'prompts")
def encode(
self,
prompts: List[str],
@ -695,6 +703,7 @@ class LLM:
...
@overload # LEGACY: single (token ids + optional prompt)
@deprecated("'prompt_token_ids' will become part of 'prompts")
def encode(
self,
prompts: Optional[str] = None,
@ -708,6 +717,7 @@ class LLM:
...
@overload # LEGACY: multi (token ids + optional prompt)
@deprecated("'prompt_token_ids' will become part of 'prompts")
def encode(
self,
prompts: Optional[List[str]] = None,
@ -721,6 +731,7 @@ class LLM:
...
@overload # LEGACY: single or multi token ids [pos-only]
@deprecated("'prompt_token_ids' will become part of 'prompts")
def encode(
self,
prompts: None,

View File

@ -38,34 +38,3 @@ __all__ = [
"InputProcessingContext",
"InputRegistry",
]
def __getattr__(name: str):
import warnings
if name == "PromptInput":
msg = ("PromptInput has been renamed to PromptType. "
"The original name will be removed in an upcoming version.")
warnings.warn(DeprecationWarning(msg), stacklevel=2)
return PromptType
if name == "LLMInputs":
msg = ("LLMInputs has been renamed to DecoderOnlyInputs. "
"The original name will be removed in an upcoming version.")
warnings.warn(DeprecationWarning(msg), stacklevel=2)
return DecoderOnlyInputs
if name == "EncoderDecoderLLMInputs":
msg = (
"EncoderDecoderLLMInputs has been renamed to EncoderDecoderInputs. "
"The original name will be removed in an upcoming version.")
warnings.warn(DeprecationWarning(msg), stacklevel=2)
return EncoderDecoderInputs
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

View File

@ -358,34 +358,3 @@ def to_enc_dec_tuple_list(
return [(enc_dec_prompt["encoder_prompt"],
enc_dec_prompt["decoder_prompt"])
for enc_dec_prompt in enc_dec_prompts]
def __getattr__(name: str):
import warnings
if name == "PromptInput":
msg = ("PromptInput has been renamed to PromptType. "
"The original name will be removed in an upcoming version.")
warnings.warn(DeprecationWarning(msg), stacklevel=2)
return PromptType
if name == "LLMInputs":
msg = ("LLMInputs has been renamed to DecoderOnlyInputs. "
"The original name will be removed in an upcoming version.")
warnings.warn(DeprecationWarning(msg), stacklevel=2)
return DecoderOnlyInputs
if name == "EncoderDecoderLLMInputs":
msg = (
"EncoderDecoderLLMInputs has been renamed to EncoderDecoderInputs. "
"The original name will be removed in an upcoming version.")
warnings.warn(DeprecationWarning(msg), stacklevel=2)
return EncoderDecoderInputs
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

View File

@ -32,9 +32,8 @@ from vllm.model_executor.models.utils import (AutoWeightsLoader, WeightsMapper,
maybe_prefix,
merge_multimodal_embeddings)
from vllm.multimodal import MULTIMODAL_REGISTRY
from vllm.multimodal.base import MultiModalInputs
from vllm.multimodal.image import cached_get_image_processor
from vllm.multimodal.inputs import NestedTensors
from vllm.multimodal.inputs import MultiModalKwargs, NestedTensors
from vllm.multimodal.utils import (cached_get_tokenizer,
repeat_and_pad_placeholder_tokens)
from vllm.sequence import IntermediateTensors
@ -451,7 +450,7 @@ def get_max_multimodal_tokens(ctx):
def input_mapper_for_aria(ctx, data):
return MultiModalInputs(data)
return MultiModalKwargs(data)
def input_processor(ctx, llm_inputs):

View File

@ -27,18 +27,3 @@ __all__ = [
"MULTIMODAL_REGISTRY",
"MultiModalRegistry",
]
def __getattr__(name: str):
import warnings
if name == "MultiModalInputs":
msg = ("MultiModalInputs has been renamed to MultiModalKwargs. "
"The original name will take another meaning in an upcoming "
"version.")
warnings.warn(DeprecationWarning(msg), stacklevel=2)
return MultiModalKwargs
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")

View File

@ -433,18 +433,3 @@ class MultiModalPlaceholderMap:
return MultiModalPlaceholderMap.IndexMap(src=src_indices,
dest=dest_indices)
def __getattr__(name: str):
import warnings
if name == "MultiModalInputs":
msg = ("MultiModalInputs has been renamed to MultiModalKwargs. "
"The original name will take another meaning in an upcoming "
"version.")
warnings.warn(DeprecationWarning(msg), stacklevel=2)
return MultiModalKwargs
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")