diff --git a/vllm/core/scheduler.py b/vllm/core/scheduler.py index faf392d26085..d6379f93c5a5 100644 --- a/vllm/core/scheduler.py +++ b/vllm/core/scheduler.py @@ -190,7 +190,7 @@ class Scheduler: break num_prompt_tokens = seq_group.get_seqs()[0].get_len() - if num_prompt_tokens >= self.scheduler_config.max_seq_len: + if num_prompt_tokens > self.scheduler_config.max_seq_len: logger.warning( f"Input prompt ({num_prompt_tokens} tokens) is too long" " and exceeds limit of " diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 8936eda70501..1fdb2d04d53a 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -300,7 +300,7 @@ class LLMEngine: continue # Check if the sequence has reached max_seq_len. - if (seq.get_len() >= + if (seq.get_len() > self.scheduler.scheduler_config.max_seq_len): self.scheduler.free_seq( seq, SequenceStatus.FINISHED_LENGTH_CAPPED)