mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-01-27 13:26:04 +08:00
Remove torch_xla.tpu.version() from pallas.py. (#21065)
Signed-off-by: Qiliang Cui <derrhein@gmail.com>
This commit is contained in:
parent
01513a334a
commit
72ad273582
@ -167,10 +167,6 @@ class PallasAttentionBackendImpl(AttentionImpl):
|
||||
"are not implemented for "
|
||||
"PallasAttentionBackendImpl")
|
||||
|
||||
tpu_version = torch_xla.tpu.version()
|
||||
if tpu_version < 4:
|
||||
raise NotImplementedError("TPU version must be 4 or higher.")
|
||||
|
||||
def forward(
|
||||
self,
|
||||
layer: AttentionLayer,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user