[BugFix] Fix incorrect preallocated sampled_token_ids tensor size (#28025)

Signed-off-by: Nick Hill <nhill@redhat.com>
This commit is contained in:
Nick Hill 2025-11-04 07:38:16 -08:00 committed by GitHub
parent 938772af03
commit 5a0a6dfd55
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -524,7 +524,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin):
self._draft_token_ids: list[list[int]] | torch.Tensor | None = None
self.transfer_event = torch.cuda.Event()
self.sampled_token_ids_pinned_cpu = torch.empty(
(self.max_model_len, 1),
(self.max_num_reqs, 1),
dtype=torch.int64,
device="cpu",
pin_memory=self.pin_memory,