mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 04:15:01 +08:00
Temporarily enforce eager mode for GPTQ models (#2154)
This commit is contained in:
parent
26c52a5ea6
commit
3a765bd5e1
@ -185,6 +185,11 @@ class ModelConfig:
|
||||
self.max_context_len_to_capture = self.max_model_len
|
||||
self.max_context_len_to_capture = min(self.max_context_len_to_capture,
|
||||
self.max_model_len)
|
||||
if self.quantization == "gptq" and not self.enforce_eager:
|
||||
# Related issue: https://github.com/vllm-project/vllm/issues/2147
|
||||
logger.warning("GPTQ does not support CUDA graph yet. Disabling "
|
||||
"CUDA graph.")
|
||||
self.enforce_eager = True
|
||||
|
||||
def verify_with_parallel_config(
|
||||
self,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user