[Bugfix] Fix broken MTP weight loading for FP8 KV Scales (#27227)

Signed-off-by: Benjamin Chislett <bchislett@nvidia.com>
This commit is contained in:
Benjamin Chislett 2025-10-21 01:51:44 -04:00 committed by GitHub
parent 5ff5d94e77
commit f381cf2302
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -16,7 +16,10 @@ from vllm.model_executor.layers.vocab_parallel_embedding import (
ParallelLMHead,
VocabParallelEmbedding,
)
from vllm.model_executor.model_loader.weight_utils import default_weight_loader
from vllm.model_executor.model_loader.weight_utils import (
default_weight_loader,
maybe_remap_kv_scale_name,
)
from vllm.platforms import current_platform
from vllm.sequence import IntermediateTensors
@ -278,6 +281,10 @@ class DeepSeekMTP(nn.Module, SupportsPP):
if name.endswith(".bias") and name not in params_dict:
continue
name = maybe_remap_kv_scale_name(name, params_dict)
if name is None:
continue
# According to DeepSeek-V3 Technical Report, MTP modules
# shares embedding layer. We only load the first weights.
if (