[Model Runner V2] Optimize CUDA graph capture time (#29275)

Signed-off-by: Woosuk Kwon <woosuk.kwon@berkeley.edu>
This commit is contained in:
Woosuk Kwon 2025-11-23 11:15:32 -08:00 committed by GitHub
parent b004c00418
commit 62d54ba46d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 5 additions and 1 deletions

View File

@ -106,7 +106,10 @@ class CudaGraphManager:
input_buffers.query_start_loc.np[: batch_size + 1] = np.arange(batch_size + 1)
input_buffers.query_start_loc.np[batch_size:] = batch_size
input_buffers.query_start_loc.copy_to_gpu()
input_buffers.seq_lens[:batch_size] = self.max_model_len
# HACK(woosuk): To optimize warmup time, we use 1 (instead of max_model_len)
# for seq_lens. This leads to a mismatch between seq_lens (GPU) and
# seq_lens_np (CPU), which might cause issues in some attention backends.
input_buffers.seq_lens[:batch_size] = 1
input_buffers.seq_lens[batch_size:] = 0
input_block_tables = [x[:batch_size] for x in block_tables.input_block_tables]

View File

@ -313,6 +313,7 @@ class GPUModelRunner(LoRAModelRunnerMixin, KVConnectorModelRunnerMixin):
return 0
start_time = time.perf_counter()
gc.collect()
torch.cuda.empty_cache()
start_free_gpu_memory = torch.cuda.mem_get_info()[0]