Revert "[V1] [Bugfix] eagle bugfix and enable correct lm_head for multimodal (#18034)" (#18600)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung 2025-05-23 17:18:22 +08:00 committed by GitHub
parent a1fe24d961
commit fbb13a2c15
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 3 additions and 6 deletions

View File

@ -70,7 +70,8 @@ class EAGLEConfig(PretrainedConfig):
if self.model is not None:
for k, v in self.model.to_dict().items():
setattr(self, k, v)
if not hasattr(self, k):
setattr(self, k, v)
@classmethod
def from_pretrained(

View File

@ -9,7 +9,6 @@ from vllm.distributed.parallel_state import get_pp_group
from vllm.forward_context import set_forward_context
from vllm.logger import init_logger
from vllm.model_executor.model_loader import get_model
from vllm.model_executor.models import supports_multimodal
from vllm.model_executor.models.llama_eagle3 import Eagle3LlamaForCausalLM
from vllm.triton_utils import tl, triton
from vllm.v1.attention.backends.flash_attn import FlashAttentionMetadata
@ -311,10 +310,7 @@ class EagleProposer:
if self.vllm_config.speculative_config.method != "eagle3" and \
hasattr(target_model, "lm_head"):
logger.info("Loading EAGLE LM head weights from the target model.")
if supports_multimodal(target_model):
self.model.lm_head = target_model.get_language_model().lm_head
else:
self.model.lm_head = target_model.lm_head
self.model.lm_head = target_model.lm_head
@torch.inference_mode()
def dummy_run(