From a869baca73eb90ae7bd18402915dc4bfc36cf06b Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Mon, 26 May 2025 22:49:22 +0800 Subject: [PATCH] [Bugfix] Fix Llama GGUF initialization (#18717) Signed-off-by: DarkLight1337 --- vllm/model_executor/models/llama.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 6584980f6dc2..d36b6466c0bb 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -208,7 +208,7 @@ class LlamaAttention(nn.Module): quant_config: Optional[QuantizationConfig]) -> None: is_neox_style = True is_gguf = quant_config and quant_config.get_name() == "gguf" - if is_gguf and self.config.model_type == "llama": + if is_gguf and config.model_type == "llama": is_neox_style = False self.rotary_emb = get_rope(