From cf6bbcb49324c24fc0f6f9381400c299c9c2d7ac Mon Sep 17 00:00:00 2001 From: Concurrensee Date: Mon, 13 Jan 2025 01:05:06 -0600 Subject: [PATCH] [Misc] Fix Deepseek V2 fp8 kv-scale remapping (#11947) Signed-off-by: Yida Wu --- vllm/model_executor/models/deepseek_v2.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index 9132040545863..d83cafaf998ab 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -45,7 +45,8 @@ from vllm.model_executor.layers.rotary_embedding import get_rope from vllm.model_executor.layers.sampler import SamplerOutput, get_sampler from vllm.model_executor.layers.vocab_parallel_embedding import ( ParallelLMHead, VocabParallelEmbedding) -from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.model_loader.weight_utils import ( + default_weight_loader, maybe_remap_kv_scale_name) from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors @@ -635,6 +636,11 @@ class DeepseekV2ForCausalLM(nn.Module, SupportsPP): if name.endswith(".bias") and name not in params_dict: continue + # Remapping the name of FP8 kv-scale. + name = maybe_remap_kv_scale_name(name, params_dict) + if name is None: + continue + if is_pp_missing_parameter(name, self): continue