From 72ad2735823e23b4e1cc79b7c73c3a5f3c093ab0 Mon Sep 17 00:00:00 2001 From: QiliangCui Date: Wed, 16 Jul 2025 17:25:26 -0700 Subject: [PATCH] Remove torch_xla.tpu.version() from pallas.py. (#21065) Signed-off-by: Qiliang Cui --- vllm/v1/attention/backends/pallas.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/vllm/v1/attention/backends/pallas.py b/vllm/v1/attention/backends/pallas.py index b7fc1ffeb65e6..52e12a1a506f5 100644 --- a/vllm/v1/attention/backends/pallas.py +++ b/vllm/v1/attention/backends/pallas.py @@ -167,10 +167,6 @@ class PallasAttentionBackendImpl(AttentionImpl): "are not implemented for " "PallasAttentionBackendImpl") - tpu_version = torch_xla.tpu.version() - if tpu_version < 4: - raise NotImplementedError("TPU version must be 4 or higher.") - def forward( self, layer: AttentionLayer,