From d007387aa742c25f60d9b35bc103cbaf753114c8 Mon Sep 17 00:00:00 2001 From: Mingliang Li Date: Wed, 10 Dec 2025 12:05:51 +0800 Subject: [PATCH] [Bugfix] Cache added_vocab to avoid per-token overhead (#30351) Signed-off-by: limingliang Co-authored-by: limingliang --- vllm/tokenizers/deepseekv32.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/vllm/tokenizers/deepseekv32.py b/vllm/tokenizers/deepseekv32.py index b0490dacbe2d4..5c4936b5e7ad3 100644 --- a/vllm/tokenizers/deepseekv32.py +++ b/vllm/tokenizers/deepseekv32.py @@ -17,6 +17,8 @@ class DeepseekV32Tokenizer(HfTokenizer): self.name_or_path = ( tokenizer.name_or_path if hasattr(tokenizer, "name_or_path") else "" ) + self._added_vocab = self.tokenizer.get_added_vocab() + self._added_vocab_size = len(self._added_vocab) @classmethod def from_pretrained( @@ -98,7 +100,7 @@ class DeepseekV32Tokenizer(HfTokenizer): def __len__(self) -> int: # is an added token in DeepseekV32 tokenizer - return self.vocab_size + len(self.get_added_vocab()) + return self.vocab_size + self._added_vocab_size def __call__( self, @@ -120,7 +122,7 @@ class DeepseekV32Tokenizer(HfTokenizer): return self.tokenizer.get_vocab() def get_added_vocab(self) -> dict[str, int]: - return self.tokenizer.get_added_vocab() + return self._added_vocab.copy() def encode( self,