[Bugfix] Fix Incremental Detokenization with tokenizers == 0.22.0 (#24159)

Signed-off-by: Fanli Lin <fanli.lin@intel.com>
Signed-off-by: Fanli Lin <fanli0116@gmail.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Fanli Lin 2025-09-04 17:47:08 +08:00 committed by GitHub
parent 3efb9f4d95
commit 2c301ee2eb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -234,7 +234,7 @@ class FastIncrementalDetokenizer(BaseIncrementalDetokenizer):
try:
token = self.stream.step(self.tokenizer, next_token_id)
except Exception as e:
if str(e) != INVALID_PREFIX_ERR_MSG:
if not str(e).startswith(INVALID_PREFIX_ERR_MSG):
raise e
# Recover from edge case where tokenizer can produce non-monotonic,
# invalid UTF-8 output, which breaks the internal state of
@ -243,7 +243,8 @@ class FastIncrementalDetokenizer(BaseIncrementalDetokenizer):
logger.warning(
"Encountered invalid prefix detokenization error"
" for request %s, resetting decode stream.", self.request_id)
self.stream = DecodeStream(self.skip_special_tokens)
self.stream = DecodeStream(
skip_special_tokens=self.skip_special_tokens)
token = self.stream.step(self.tokenizer, next_token_id)
return token