mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-10 17:24:30 +08:00
[Bugfix] Safeguard against missing backend in AttentionBackendEnum (#28846)
Signed-off-by: jesse <szxfml@gmail.com> Signed-off-by: Song Zhixin <szxfml@gmail.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
parent
439368496d
commit
285eaa4285
@ -310,7 +310,8 @@ class Attention(nn.Module, AttentionLayerBase):
|
||||
kv_sharing_target_layer_name,
|
||||
**extra_impl_args,
|
||||
)
|
||||
self.backend = AttentionBackendEnum[self.attn_backend.get_name()]
|
||||
backend_name = self.attn_backend.get_name()
|
||||
self.backend = AttentionBackendEnum.__members__.get(backend_name)
|
||||
self.dtype = dtype
|
||||
|
||||
# For cuda-alike (CUDA and ROCM) and cpu platforms, we control how
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user