From 819d548e8a4e56f0e68ea8b4f9bf41f759548191 Mon Sep 17 00:00:00 2001 From: yihong Date: Wed, 9 Apr 2025 15:59:02 +0800 Subject: [PATCH] [BugFix] logger is not callable (#16312) Signed-off-by: yihong0618 --- vllm/attention/backends/hpu_attn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/attention/backends/hpu_attn.py b/vllm/attention/backends/hpu_attn.py index 55b03bbf32e4b..cede9915efcf3 100644 --- a/vllm/attention/backends/hpu_attn.py +++ b/vllm/attention/backends/hpu_attn.py @@ -149,8 +149,8 @@ class HPUAttentionImpl(AttentionImpl, torch.nn.Module): self.fused_scaled_dot_product_attention = ModuleFusedSDPA( FusedSDPA) except ImportError: - logger().warning("Could not import HPU FusedSDPA kernel. " - "vLLM will use native implementation.") + logger.warning("Could not import HPU FusedSDPA kernel. " + "vLLM will use native implementation.") suppored_head_sizes = HPUPagedAttention.get_supported_head_sizes() if head_size not in suppored_head_sizes: