diff --git a/vllm/config.py b/vllm/config.py index ca71accdfa33f..253a7880bd11d 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -3174,11 +3174,13 @@ class VllmConfig: # and avoid any potential issues with the inductor. self.compilation_config.custom_ops = ["none"] self.compilation_config.use_cudagraph = True - self.compilation_config.use_inductor = True +# self.compilation_config.use_inductor = True + self.compilation_config.use_inductor = False self.compilation_config.cudagraph_num_of_warmups = 1 self.compilation_config.pass_config.enable_fusion = False self.compilation_config.pass_config.enable_reshape = False - self.compilation_config.level = CompilationLevel.PIECEWISE +# self.compilation_config.level = CompilationLevel.PIECEWISE + self.compilation_config.level = CompilationLevel.NO_COMPILATION self._set_cudagraph_sizes() diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index fda5dc3e5babf..9697864048ecb 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -840,12 +840,6 @@ class GPUModelRunner: gc.collect() def capture_model(self) -> None: - if not self.use_cuda_graph: - logger.warning( - "Skipping CUDA graph capture. Please add " - "-O %s to use CUDA graphs.", CompilationLevel.PIECEWISE) - return - start_time = time.perf_counter() start_free_gpu_memory = torch.cuda.mem_get_info()[0]