From 93e5f3c5fb4a4bbd49610efb96aad30df95fca66 Mon Sep 17 00:00:00 2001 From: SnowCharm Date: Sat, 12 Apr 2025 22:54:37 +0800 Subject: [PATCH] [Perf] Optimize Preparing Inputs for GPU Model Runner (#16484) Signed-off-by: snowcharm Co-authored-by: Nick Hill --- vllm/v1/worker/gpu_model_runner.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 0e70d77e1b7e7..70e8bd75ec94e 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -484,14 +484,10 @@ class GPUModelRunner(LoRAModelRunnerMixin): self.input_batch.block_table.commit(num_reqs) # Get the number of scheduled tokens for each request. - # TODO: The Python loop can be slow. Optimize. - num_scheduled_tokens = np.empty(num_reqs, dtype=np.int32) - max_num_scheduled_tokens = 0 - for i, req_id in enumerate(self.input_batch.req_ids): - num_tokens = scheduler_output.num_scheduled_tokens[req_id] - num_scheduled_tokens[i] = num_tokens - max_num_scheduled_tokens = max(max_num_scheduled_tokens, - num_tokens) + req_ids = self.input_batch.req_ids + tokens = [scheduler_output.num_scheduled_tokens[i] for i in req_ids] + num_scheduled_tokens = np.array(tokens, dtype=np.int32) + max_num_scheduled_tokens = max(tokens) # Get request indices. # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2]