[Core][Bookkeeping Optimization] Update against numpy view of is_token_ids tensor (#27618)

Signed-off-by: Jialin Ouyang <Jialin.Ouyang@gmail.com>
This commit is contained in:
Jialin Ouyang 2025-10-28 01:13:10 -07:00 committed by GitHub
parent d34f5fe939
commit b46e4a06f1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 3 additions and 2 deletions

View File

@ -108,9 +108,10 @@ class InputBatch:
pin_memory=False,
)
self.token_ids_cpu = self.token_ids_cpu_tensor.numpy()
self.is_token_ids = torch.zeros(
self.is_token_ids_tensor = torch.zeros(
(max_num_reqs, max_model_len), device="cpu", dtype=bool, pin_memory=False
)
self.is_token_ids = self.is_token_ids_tensor.numpy()
# Store prompt embeddings per request to avoid OOM from large upfront
# allocation if max_model_len is big.
# Maps req_index -> tensor of shape (num_prompt_tokens, hidden_size)

View File

@ -1103,7 +1103,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin):
out=self.input_ids.cpu[:total_num_scheduled_tokens],
)
if self.enable_prompt_embeds:
is_token_ids = self.input_batch.is_token_ids.flatten()
is_token_ids = self.input_batch.is_token_ids_tensor.flatten()
torch.index_select(
is_token_ids,
0,