Fix test_max_model_len in tests/entrypoints/llm/test_generate.py (#19451)

Signed-off-by: Lu Fang <lufang@fb.com>
This commit is contained in:
Lu Fang 2025-06-11 12:54:59 +08:00 committed by GitHub
parent a45b979d9f
commit 2b1e2111b0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -125,4 +125,7 @@ def test_max_model_len():
for output in outputs:
num_total_tokens = len(output.prompt_token_ids) + len(
output.outputs[0].token_ids)
assert num_total_tokens == max_model_len
# Total tokens must not exceed max_model_len.
# It can be less if generation finishes due to other reasons (e.g., EOS)
# before reaching the absolute model length limit.
assert num_total_tokens <= max_model_len