From 2b30afa4420cbada6dd9084de3ee7eb19142b7ff Mon Sep 17 00:00:00 2001 From: nopperl <54780682+nopperl@users.noreply.github.com> Date: Thu, 4 Sep 2025 20:59:16 +0900 Subject: [PATCH] Use hidden_size_per_head as head_size fallback (#24221) Signed-off-by: nopperl <54780682+nopperl@users.noreply.github.com> --- vllm/config/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/vllm/config/__init__.py b/vllm/config/__init__.py index 7c2b497022658..941aff8919a92 100644 --- a/vllm/config/__init__.py +++ b/vllm/config/__init__.py @@ -1426,6 +1426,11 @@ class ModelConfig: if getattr(self.hf_text_config, "head_dim", None) is not None: return self.hf_text_config.head_dim + # NOTE: Some models (such as PLaMo2.1) use `hidden_size_per_head` + if getattr(self.hf_text_config, "hidden_size_per_head", + None) is not None: + return self.hf_text_config.hidden_size_per_head + # FIXME(woosuk): This may not be true for all models. return (self.hf_text_config.hidden_size // self.hf_text_config.num_attention_heads)