From 5a0a6dfd55e1b9b2b518e0d2e91bd2c1241a7694 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Tue, 4 Nov 2025 07:38:16 -0800 Subject: [PATCH] [BugFix] Fix incorrect preallocated sampled_token_ids tensor size (#28025) Signed-off-by: Nick Hill --- vllm/v1/worker/gpu_model_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index e700c09038e28..177542ed96c8e 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -524,7 +524,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin): self._draft_token_ids: list[list[int]] | torch.Tensor | None = None self.transfer_event = torch.cuda.Event() self.sampled_token_ids_pinned_cpu = torch.empty( - (self.max_model_len, 1), + (self.max_num_reqs, 1), dtype=torch.int64, device="cpu", pin_memory=self.pin_memory,