From 395aa823ea456ef4f4677b7a43c37806307be2bc Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Thu, 28 Mar 2024 21:12:24 -0700 Subject: [PATCH] [Misc] Minor type annotation fix (#3716) --- vllm/attention/selector.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/attention/selector.py b/vllm/attention/selector.py index 42f4284c6c775..c2ec4376c9f3c 100644 --- a/vllm/attention/selector.py +++ b/vllm/attention/selector.py @@ -1,4 +1,5 @@ from functools import lru_cache +from typing import Type import torch @@ -10,7 +11,7 @@ logger = init_logger(__name__) @lru_cache(maxsize=None) -def get_attn_backend(dtype: torch.dtype) -> AttentionBackend: +def get_attn_backend(dtype: torch.dtype) -> Type[AttentionBackend]: if _can_use_flash_attn(dtype): logger.info("Using FlashAttention backend.") from vllm.attention.backends.flash_attn import ( # noqa: F401