From 87efc681dbd57ab8e522c1d5cf50dc6ee5f7db2d Mon Sep 17 00:00:00 2001 From: Huamin Li <3ericli@gmail.com> Date: Tue, 14 Oct 2025 11:54:12 -0700 Subject: [PATCH] llama4_vision_rope: add HIP override to accept (q, k) and avoid (positions, q, k) mismatch (#26790) Signed-off-by: Huamin Li <3ericli@gmail.com> --- .../layers/rotary_embedding/llama4_vision_rope.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py b/vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py index efef8877bcaae..6241cb5abbc8e 100644 --- a/vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py +++ b/vllm/model_executor/layers/rotary_embedding/llama4_vision_rope.py @@ -78,3 +78,10 @@ class Llama4VisionRotaryEmbedding(RotaryEmbedding): key: torch.Tensor | None = None, ) -> tuple[torch.Tensor, torch.Tensor | None]: return self.forward_native(query, key) + + def forward_hip( # type: ignore[override] + self, + query: torch.Tensor, + key: torch.Tensor | None = None, + ) -> tuple[torch.Tensor, torch.Tensor | None]: + return self.forward_native(query, key)