vllm/csrc/attention.cpp

36 lines
986 B
C++

#include <torch/extension.h>
void single_query_cached_kv_attention(
torch::Tensor& out,
torch::Tensor& query,
torch::Tensor& key_cache,
torch::Tensor& value_cache,
float scale,
torch::Tensor& block_tables,
torch::Tensor& context_lens,
int block_size,
int max_context_len);
void multi_query_cached_kv_attention(
torch::Tensor& cu_query_lens,
torch::Tensor& out,
torch::Tensor& query,
torch::Tensor& key_cache,
torch::Tensor& value_cache,
float scale,
torch::Tensor& block_tables,
torch::Tensor& context_lens,
int block_size,
int max_context_len);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def(
"single_query_cached_kv_attention",
&single_query_cached_kv_attention,
"Compute the attention between an input query and the cached key/value tensors");
m.def(
"multi_query_cached_kv_attention",
&multi_query_cached_kv_attention,
"Compute the attention between multiple input queries and the cached key/value tensors");
}