From 18ed3132d2bfe1df9a74729457b69243955221e8 Mon Sep 17 00:00:00 2001 From: Chengyang LIU <464004340@qq.com> Date: Sun, 30 Mar 2025 19:39:56 -0700 Subject: [PATCH] [Misc] update the comments (#15780) Signed-off-by: chengyang liu Co-authored-by: chengyang liu --- vllm/v1/worker/gpu_model_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 8071c98b269fd..e3df2a62e67f4 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -673,7 +673,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): # use two kernels for cascade attention. Let's imagine: # Request 3's input query: [D] # Request 3's kv cache: [A, B, C, D] - # Request 3's num_computed_tokens: 4 (i.e., [A, B, C, D]) + # Request 3's num_computed_tokens: 3 (i.e., [A, B, C]) # If we use [A, B, C, D] as the common prefix for Request 1-3, # then Request 3 will be processed only by the first kernel, # and the second kernel will get an empty input. While this is not