mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-20 05:25:01 +08:00
36 lines
1.0 KiB
Python
36 lines
1.0 KiB
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
"""Base class for attention-like layers."""
|
|
|
|
from abc import ABC, abstractmethod
|
|
from typing import TYPE_CHECKING
|
|
|
|
from vllm.config import VllmConfig
|
|
from vllm.v1.kv_cache_interface import KVCacheSpec
|
|
|
|
if TYPE_CHECKING:
|
|
from vllm.attention.backends.abstract import AttentionBackend
|
|
|
|
|
|
class AttentionLayerBase(ABC):
|
|
"""
|
|
Base class for attention-like layers (Attention, Mamba, etc.)
|
|
that support the v1 engine.
|
|
|
|
This provides a common interface for getting attention backends
|
|
from different layer types.
|
|
"""
|
|
|
|
@abstractmethod
|
|
def get_attn_backend(self) -> type["AttentionBackend"]:
|
|
"""Get the attention backend class for this layer."""
|
|
pass
|
|
|
|
@abstractmethod
|
|
def get_kv_cache_spec(self, vllm_config: VllmConfig) -> KVCacheSpec | None:
|
|
"""
|
|
Get the KV cache spec for this layer.
|
|
May be None if the layer does not need KV cache.
|
|
"""
|
|
pass
|