mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 07:04:53 +08:00
[Bug] [Spec Dec]: Fix kv_cache dtype mismatch for Eagle3 drafter on FP8 target (#24505)
Signed-off-by: vllmellm <vllm.ellm@embeddedllm.com>
This commit is contained in:
parent
17871983a2
commit
8c54610265
@ -9,7 +9,7 @@ import torch.nn as nn
|
|||||||
from transformers import LlamaConfig
|
from transformers import LlamaConfig
|
||||||
|
|
||||||
from vllm.compilation.decorators import support_torch_compile
|
from vllm.compilation.decorators import support_torch_compile
|
||||||
from vllm.config import VllmConfig
|
from vllm.config import CacheConfig, VllmConfig, get_current_vllm_config
|
||||||
from vllm.logger import init_logger
|
from vllm.logger import init_logger
|
||||||
from vllm.model_executor.layers.layernorm import RMSNorm
|
from vllm.model_executor.layers.layernorm import RMSNorm
|
||||||
from vllm.model_executor.layers.linear import QKVParallelLinear
|
from vllm.model_executor.layers.linear import QKVParallelLinear
|
||||||
@ -33,10 +33,14 @@ class LlamaDecoderLayer(LlamaDecoderLayer):
|
|||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
config: LlamaConfig,
|
config: LlamaConfig,
|
||||||
|
cache_config: Optional[CacheConfig] = None,
|
||||||
quant_config: Optional[QuantizationConfig] = None,
|
quant_config: Optional[QuantizationConfig] = None,
|
||||||
prefix: str = "",
|
prefix: str = "",
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__(config, quant_config=quant_config, prefix=prefix)
|
super().__init__(config,
|
||||||
|
cache_config=cache_config,
|
||||||
|
quant_config=quant_config,
|
||||||
|
prefix=prefix)
|
||||||
|
|
||||||
# override qkv
|
# override qkv
|
||||||
self.self_attn.qkv_proj = QKVParallelLinear(
|
self.self_attn.qkv_proj = QKVParallelLinear(
|
||||||
@ -114,6 +118,8 @@ class LlamaModel(nn.Module):
|
|||||||
speculative_config.draft_model_config.hf_config
|
speculative_config.draft_model_config.hf_config
|
||||||
self.vocab_size = self.config.vocab_size
|
self.vocab_size = self.config.vocab_size
|
||||||
|
|
||||||
|
current_vllm_config = get_current_vllm_config()
|
||||||
|
|
||||||
self.embed_tokens = VocabParallelEmbedding(
|
self.embed_tokens = VocabParallelEmbedding(
|
||||||
self.config.vocab_size,
|
self.config.vocab_size,
|
||||||
self.config.hidden_size,
|
self.config.hidden_size,
|
||||||
@ -123,6 +129,7 @@ class LlamaModel(nn.Module):
|
|||||||
self.layers = nn.ModuleList([
|
self.layers = nn.ModuleList([
|
||||||
LlamaDecoderLayer(
|
LlamaDecoderLayer(
|
||||||
config=self.config,
|
config=self.config,
|
||||||
|
cache_config=current_vllm_config.cache_config,
|
||||||
prefix=maybe_prefix(prefix, f"layers.{start_layer_id}"),
|
prefix=maybe_prefix(prefix, f"layers.{start_layer_id}"),
|
||||||
)
|
)
|
||||||
])
|
])
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user