[BugFix] Fix shared storage connector load kv only load attention layer (#21428)

Signed-off-by: David Chen <530634352@qq.com>
This commit is contained in:
WeiQing Chen 2025-07-26 22:07:40 +08:00 committed by GitHub
parent a40a8506df
commit 97d6c30cc9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -156,8 +156,16 @@ class SharedStorageConnector(KVConnectorBase_V1):
logger.info("Inject KV cache of %d tokens to the paged memory",
len(request.slot_mapping))
for layer_name in forward_context.no_compile_layers:
attn_layer = forward_context.no_compile_layers[layer_name]
kv_cache_layer = attn_layer.kv_cache[\
layer = forward_context.no_compile_layers[layer_name]
# Only process layers that have kv_cache
# attribute (attention layers) Skip non-attention
# layers like FusedMoE/MLP etc.
kv_cache_attr = getattr(layer, 'kv_cache', None)
if kv_cache_attr is None:
continue
kv_cache_layer = kv_cache_attr[ \
forward_context.virtual_engine]
filename = self._generate_filename_debug(