{Deprecation] Remove tokenizer setter (#30400)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung 2025-12-11 03:10:58 +08:00 committed by GitHub
parent a9e4106f28
commit e72d65b959
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 1 additions and 24 deletions

View File

@ -9,7 +9,7 @@ import cloudpickle
import torch.nn as nn
from pydantic import ValidationError
from tqdm.auto import tqdm
from typing_extensions import TypeVar, deprecated
from typing_extensions import TypeVar
from vllm.beam_search import (
BeamSearchInstance,
@ -73,7 +73,6 @@ from vllm.pooling_params import PoolingParams
from vllm.sampling_params import BeamSearchParams, RequestOutputKind, SamplingParams
from vllm.tasks import PoolingTask
from vllm.tokenizers import MistralTokenizer, TokenizerLike
from vllm.tokenizers.hf import get_cached_tokenizer
from vllm.usage.usage_lib import UsageContext
from vllm.utils.collection_utils import as_iter, is_list_of
from vllm.utils.counter import Counter
@ -367,16 +366,6 @@ class LLM:
def get_tokenizer(self) -> TokenizerLike:
return self.llm_engine.get_tokenizer()
@deprecated("`set_tokenizer` is deprecated and will be removed in v0.13.")
def set_tokenizer(self, tokenizer: TokenizerLike) -> None:
# While CachedTokenizer is dynamic, have no choice but
# compare class name. Misjudgment will arise from
# user-defined tokenizer started with 'Cached'
if tokenizer.__class__.__name__.startswith("Cached"):
self.llm_engine.tokenizer = tokenizer
else:
self.llm_engine.tokenizer = get_cached_tokenizer(tokenizer)
def reset_mm_cache(self) -> None:
self.input_processor.clear_mm_cache()
self.llm_engine.reset_mm_cache()

View File

@ -701,10 +701,6 @@ class AsyncLLM(EngineClient):
def tokenizer(self) -> TokenizerLike | None:
return self.input_processor.tokenizer
@tokenizer.setter
def tokenizer(self, tokenizer: TokenizerLike | None) -> None:
self.input_processor.tokenizer = tokenizer
async def get_tokenizer(self) -> TokenizerLike:
if self.tokenizer is None:
raise ValueError(

View File

@ -64,10 +64,6 @@ class InputProcessor:
def tokenizer(self) -> TokenizerLike | None:
return self.input_preprocessor.tokenizer
@tokenizer.setter
def tokenizer(self, tokenizer: TokenizerLike | None) -> None:
self.input_preprocessor.tokenizer = tokenizer
def _validate_logprobs(
self,
params: SamplingParams,

View File

@ -358,10 +358,6 @@ class LLMEngine:
def tokenizer(self) -> TokenizerLike | None:
return self.input_processor.tokenizer
@tokenizer.setter
def tokenizer(self, tokenizer: TokenizerLike | None) -> None:
self.input_processor.tokenizer = tokenizer
def get_tokenizer(self) -> TokenizerLike:
if self.tokenizer is None:
raise ValueError(