[Bugfix] fixed inconsistent finish_reason handling between V0 and V1 engines (#27555)

Signed-off-by: chaunceyjiang <chaunceyjiang@gmail.com>
This commit is contained in:
Chauncey 2025-10-28 10:18:08 +08:00 committed by GitHub
parent 255e34ca50
commit 61fbfe5274
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -42,13 +42,6 @@ def remove_all(lst: list, items_to_remove: set) -> list:
def check_stop(
request: Request, max_model_len: int, pooler_output: torch.Tensor | None = None
) -> bool:
if (
request.num_tokens >= max_model_len
or request.num_output_tokens >= request.max_tokens
):
request.status = RequestStatus.FINISHED_LENGTH_CAPPED
return True
if request.pooling_params:
if pooler_output is not None:
request.status = RequestStatus.FINISHED_STOPPED
@ -70,4 +63,10 @@ def check_stop(
request.status = RequestStatus.FINISHED_STOPPED
request.stop_reason = last_token_id
return True
if (
request.num_tokens >= max_model_len
or request.num_output_tokens >= request.max_tokens
):
request.status = RequestStatus.FINISHED_LENGTH_CAPPED
return True
return False