From 9f0608fc166ba0173dac4a470753464b969c7043 Mon Sep 17 00:00:00 2001 From: zhrrr <43847754+izhuhaoran@users.noreply.github.com> Date: Thu, 26 Jun 2025 05:03:17 +0800 Subject: [PATCH] [Bugfix] default set cuda_graph_sizes to max_num_seqs for v1 engine (#20062) Signed-off-by: izhuhaoran --- vllm/config.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 96ea47a0dce38..e90ad5e9c8b65 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2042,11 +2042,12 @@ class SchedulerConfig: NOTE: This will be replaced by speculative config in the future; it is present to enable correctness tests until then.""" - cuda_graph_sizes: list[int] = field(default_factory=lambda: [512]) - """Cuda graph capture sizes, default is 512. - 1. if one value is provided, then the capture list would follow the + cuda_graph_sizes: list[int] = field(default_factory=list) + """Cuda graph capture sizes + 1. if none provided, then default set to [max_num_seqs] + 2. if one value is provided, then the capture list would follow the pattern: [1, 2, 4] + [i for i in range(8, cuda_graph_sizes + 1, 8)] - 2. more than one value (e.g. 1 2 128) is provided, then the capture list + 3. more than one value (e.g. 1 2 128) is provided, then the capture list will follow the provided list.""" delay_factor: float = 0.0 @@ -2211,6 +2212,10 @@ class SchedulerConfig: self.max_num_partial_prefills, self.max_long_partial_prefills, self.long_prefill_token_threshold) + # If cuda_graph_sizes is not specified, default set to [max_num_seqs]. + if not self.cuda_graph_sizes: + self.cuda_graph_sizes = [self.max_num_seqs] + @model_validator(mode='after') def _verify_args(self) -> Self: if (self.max_num_batched_tokens < self.max_model_len