mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-24 14:06:32 +08:00
[Bugfix] Fix the issue where DeepSeek v3.2 cannot use structured_output (#30371)
Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com>
This commit is contained in:
parent
434ac76a7c
commit
9db78f34dc
@ -10,7 +10,7 @@ import torch
|
||||
import vllm.envs
|
||||
from vllm.logger import init_logger
|
||||
from vllm.sampling_params import SamplingParams
|
||||
from vllm.tokenizers import MistralTokenizer
|
||||
from vllm.tokenizers import DeepseekV32Tokenizer, MistralTokenizer
|
||||
from vllm.utils.import_utils import LazyLoader
|
||||
from vllm.v1.structured_output.backend_types import (
|
||||
StructuredOutputBackend,
|
||||
@ -56,6 +56,27 @@ class XgrammarBackend(StructuredOutputBackend):
|
||||
stop_token_ids=stop_token_ids,
|
||||
add_prefix_space=True,
|
||||
)
|
||||
elif isinstance(self.tokenizer, DeepseekV32Tokenizer):
|
||||
# copy from xgr.TokenizerInfo.from_huggingface()
|
||||
# because we are using a custom tokenizer wrapper here.
|
||||
vocab_dict = self.tokenizer.get_vocab()
|
||||
tokenizer_vocab_size = max(len(vocab_dict), self.tokenizer.max_token_id + 1)
|
||||
vocab_size = self.vocab_size or tokenizer_vocab_size
|
||||
# maintain tokenizer's indexing
|
||||
encoded_vocab = [""] * vocab_size
|
||||
for token, idx in vocab_dict.items():
|
||||
if idx < vocab_size:
|
||||
encoded_vocab[idx] = token
|
||||
stop_token_ids = [self.tokenizer.eos_token_id]
|
||||
backend_str = self.tokenizer.tokenizer.backend_tokenizer.to_str()
|
||||
metadata = xgr.TokenizerInfo._detect_metadata_from_hf(backend_str)
|
||||
tokenizer_info = xgr.TokenizerInfo(
|
||||
encoded_vocab=encoded_vocab,
|
||||
vocab_type=metadata["vocab_type"],
|
||||
vocab_size=vocab_size,
|
||||
stop_token_ids=stop_token_ids,
|
||||
add_prefix_space=metadata["add_prefix_space"],
|
||||
)
|
||||
else:
|
||||
tokenizer_info = xgr.TokenizerInfo.from_huggingface(
|
||||
self.tokenizer,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user