diff --git a/vllm/v1/structured_output/backend_xgrammar.py b/vllm/v1/structured_output/backend_xgrammar.py index f8a2df43dd90e..826ee08caa4e2 100644 --- a/vllm/v1/structured_output/backend_xgrammar.py +++ b/vllm/v1/structured_output/backend_xgrammar.py @@ -10,7 +10,7 @@ import torch import vllm.envs from vllm.logger import init_logger from vllm.sampling_params import SamplingParams -from vllm.tokenizers import MistralTokenizer +from vllm.tokenizers import DeepseekV32Tokenizer, MistralTokenizer from vllm.utils.import_utils import LazyLoader from vllm.v1.structured_output.backend_types import ( StructuredOutputBackend, @@ -56,6 +56,27 @@ class XgrammarBackend(StructuredOutputBackend): stop_token_ids=stop_token_ids, add_prefix_space=True, ) + elif isinstance(self.tokenizer, DeepseekV32Tokenizer): + # copy from xgr.TokenizerInfo.from_huggingface() + # because we are using a custom tokenizer wrapper here. + vocab_dict = self.tokenizer.get_vocab() + tokenizer_vocab_size = max(len(vocab_dict), self.tokenizer.max_token_id + 1) + vocab_size = self.vocab_size or tokenizer_vocab_size + # maintain tokenizer's indexing + encoded_vocab = [""] * vocab_size + for token, idx in vocab_dict.items(): + if idx < vocab_size: + encoded_vocab[idx] = token + stop_token_ids = [self.tokenizer.eos_token_id] + backend_str = self.tokenizer.tokenizer.backend_tokenizer.to_str() + metadata = xgr.TokenizerInfo._detect_metadata_from_hf(backend_str) + tokenizer_info = xgr.TokenizerInfo( + encoded_vocab=encoded_vocab, + vocab_type=metadata["vocab_type"], + vocab_size=vocab_size, + stop_token_ids=stop_token_ids, + add_prefix_space=metadata["add_prefix_space"], + ) else: tokenizer_info = xgr.TokenizerInfo.from_huggingface( self.tokenizer,