From 3a765bd5e1891b8c6454e60b56c2405fbe35bb9e Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Sun, 17 Dec 2023 01:51:12 -0800 Subject: [PATCH] Temporarily enforce eager mode for GPTQ models (#2154) --- vllm/config.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/vllm/config.py b/vllm/config.py index fa22a705aa229..a2b2050240f57 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -185,6 +185,11 @@ class ModelConfig: self.max_context_len_to_capture = self.max_model_len self.max_context_len_to_capture = min(self.max_context_len_to_capture, self.max_model_len) + if self.quantization == "gptq" and not self.enforce_eager: + # Related issue: https://github.com/vllm-project/vllm/issues/2147 + logger.warning("GPTQ does not support CUDA graph yet. Disabling " + "CUDA graph.") + self.enforce_eager = True def verify_with_parallel_config( self,