mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-15 18:25:45 +08:00
[V1] [Bugfix] eagle bugfix and enable correct lm_head for multimodal (#18034)
Signed-off-by: Ronald Xu <ronaldxu@amazon.com>
This commit is contained in:
parent
60cad94b86
commit
4c611348a7
@ -70,8 +70,7 @@ class EAGLEConfig(PretrainedConfig):
|
|||||||
|
|
||||||
if self.model is not None:
|
if self.model is not None:
|
||||||
for k, v in self.model.to_dict().items():
|
for k, v in self.model.to_dict().items():
|
||||||
if not hasattr(self, k):
|
setattr(self, k, v)
|
||||||
setattr(self, k, v)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_pretrained(
|
def from_pretrained(
|
||||||
|
|||||||
@ -9,6 +9,7 @@ from vllm.distributed.parallel_state import get_pp_group
|
|||||||
from vllm.forward_context import set_forward_context
|
from vllm.forward_context import set_forward_context
|
||||||
from vllm.logger import init_logger
|
from vllm.logger import init_logger
|
||||||
from vllm.model_executor.model_loader import get_model
|
from vllm.model_executor.model_loader import get_model
|
||||||
|
from vllm.model_executor.models import supports_multimodal
|
||||||
from vllm.model_executor.models.llama_eagle3 import Eagle3LlamaForCausalLM
|
from vllm.model_executor.models.llama_eagle3 import Eagle3LlamaForCausalLM
|
||||||
from vllm.triton_utils import tl, triton
|
from vllm.triton_utils import tl, triton
|
||||||
from vllm.v1.attention.backends.flash_attn import FlashAttentionMetadata
|
from vllm.v1.attention.backends.flash_attn import FlashAttentionMetadata
|
||||||
@ -310,7 +311,10 @@ class EagleProposer:
|
|||||||
if self.vllm_config.speculative_config.method != "eagle3" and \
|
if self.vllm_config.speculative_config.method != "eagle3" and \
|
||||||
hasattr(target_model, "lm_head"):
|
hasattr(target_model, "lm_head"):
|
||||||
logger.info("Loading EAGLE LM head weights from the target model.")
|
logger.info("Loading EAGLE LM head weights from the target model.")
|
||||||
self.model.lm_head = target_model.lm_head
|
if supports_multimodal(target_model):
|
||||||
|
self.model.lm_head = target_model.get_language_model().lm_head
|
||||||
|
else:
|
||||||
|
self.model.lm_head = target_model.lm_head
|
||||||
|
|
||||||
@torch.inference_mode()
|
@torch.inference_mode()
|
||||||
def dummy_run(
|
def dummy_run(
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user