From 5e13c07d00df0ca906c4c06110c277421202d3e2 Mon Sep 17 00:00:00 2001 From: RonaldBXu <72748153+RonaldBXu@users.noreply.github.com> Date: Tue, 27 May 2025 22:09:14 -0700 Subject: [PATCH] [V1] [Bugfix] eagle bugfix and enable correct lm_head for multimodal (2) (#18781) Signed-off-by: Ronald Xu --- vllm/transformers_utils/configs/eagle.py | 2 +- vllm/v1/spec_decode/eagle.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/vllm/transformers_utils/configs/eagle.py b/vllm/transformers_utils/configs/eagle.py index 377523efefc30..a43e4746cb6c6 100644 --- a/vllm/transformers_utils/configs/eagle.py +++ b/vllm/transformers_utils/configs/eagle.py @@ -70,7 +70,7 @@ class EAGLEConfig(PretrainedConfig): if self.model is not None: for k, v in self.model.to_dict().items(): - if not hasattr(self, k): + if k not in kwargs: setattr(self, k, v) @classmethod diff --git a/vllm/v1/spec_decode/eagle.py b/vllm/v1/spec_decode/eagle.py index 971b06758c214..1ca8564231659 100644 --- a/vllm/v1/spec_decode/eagle.py +++ b/vllm/v1/spec_decode/eagle.py @@ -9,6 +9,7 @@ from vllm.distributed.parallel_state import get_pp_group from vllm.forward_context import set_forward_context from vllm.logger import init_logger from vllm.model_executor.model_loader import get_model +from vllm.model_executor.models import supports_multimodal from vllm.model_executor.models.llama_eagle3 import Eagle3LlamaForCausalLM from vllm.v1.attention.backends.flash_attn import (CommonAttentionMetadata, FlashAttentionMetadata) @@ -346,7 +347,10 @@ class EagleProposer: if self.vllm_config.speculative_config.method != "eagle3" and \ hasattr(target_model, "lm_head"): logger.info("Loading EAGLE LM head weights from the target model.") - self.model.lm_head = target_model.lm_head + if supports_multimodal(target_model): + self.model.lm_head = target_model.get_language_model().lm_head + else: + self.model.lm_head = target_model.lm_head @torch.inference_mode() def dummy_run(