diff --git a/vllm/model_executor/models/ernie45_vl.py b/vllm/model_executor/models/ernie45_vl.py index e6ac0e6a0b992..372675178ccc3 100644 --- a/vllm/model_executor/models/ernie45_vl.py +++ b/vllm/model_executor/models/ernie45_vl.py @@ -1404,9 +1404,8 @@ class Ernie4_5_VLMoeForConditionalGeneration( else: self.visual_token_mask = None - @classmethod def get_mrope_input_positions( - cls, + self, input_tokens: list[int], hf_config: PretrainedConfig, image_grid_thw: list[list[int]] | torch.Tensor, diff --git a/vllm/model_executor/models/glm4v.py b/vllm/model_executor/models/glm4v.py index a247ba55c51a0..2de1e48109521 100644 --- a/vllm/model_executor/models/glm4v.py +++ b/vllm/model_executor/models/glm4v.py @@ -619,9 +619,8 @@ class GLM4VForCausalLM( return self.transformer.vision(pixel_values) - @classmethod def get_mrope_input_positions( - cls, + self, input_tokens: list[int], hf_config: PretrainedConfig, image_grid_thw: list[list[int]] | torch.Tensor, diff --git a/vllm/model_executor/models/keye_vl1_5.py b/vllm/model_executor/models/keye_vl1_5.py index 9a9a46995af9e..13e5b2d5f1575 100644 --- a/vllm/model_executor/models/keye_vl1_5.py +++ b/vllm/model_executor/models/keye_vl1_5.py @@ -594,9 +594,8 @@ class KeyeVL1_5ForConditionalGeneration( new_video_embeds.append(video_embeds[start:end]) return tuple(new_video_embeds) - @classmethod def get_mrope_input_positions( - cls, + self, input_tokens: list[int], hf_config: PretrainedConfig, image_grid_thw: list[list[int]] | torch.Tensor, diff --git a/vllm/model_executor/models/qwen2_5_omni_thinker.py b/vllm/model_executor/models/qwen2_5_omni_thinker.py index c40b97a2c4e09..a5d6004faf381 100644 --- a/vllm/model_executor/models/qwen2_5_omni_thinker.py +++ b/vllm/model_executor/models/qwen2_5_omni_thinker.py @@ -986,9 +986,8 @@ class Qwen2_5OmniThinkerForConditionalGeneration( def get_language_model(self) -> torch.nn.Module: return self.language_model - @classmethod def get_mrope_input_positions( - cls, + self, input_tokens: list[int], hf_config: PretrainedConfig, image_grid_thw: list[list[int]] | torch.Tensor, diff --git a/vllm/model_executor/models/qwen2_5_vl.py b/vllm/model_executor/models/qwen2_5_vl.py index e49387648ae35..4f3c7fdb69365 100644 --- a/vllm/model_executor/models/qwen2_5_vl.py +++ b/vllm/model_executor/models/qwen2_5_vl.py @@ -1078,9 +1078,8 @@ class Qwen2_5_VLForConditionalGeneration( supports_encoder_tp_data = True - @classmethod def get_mrope_input_positions( - cls, + self, input_tokens: list[int], hf_config: PretrainedConfig, image_grid_thw: list[list[int]] | torch.Tensor, diff --git a/vllm/model_executor/models/qwen3_omni_moe_thinker.py b/vllm/model_executor/models/qwen3_omni_moe_thinker.py index 1176c559bffef..cdef1bdaedc50 100755 --- a/vllm/model_executor/models/qwen3_omni_moe_thinker.py +++ b/vllm/model_executor/models/qwen3_omni_moe_thinker.py @@ -1421,7 +1421,6 @@ class Qwen3OmniMoeThinkerForConditionalGeneration( return loaded_weights - @classmethod def get_mrope_input_positions( self, input_tokens: list[int], diff --git a/vllm/model_executor/models/qwen3_vl.py b/vllm/model_executor/models/qwen3_vl.py index 0ece93791954a..e9e16762e525a 100644 --- a/vllm/model_executor/models/qwen3_vl.py +++ b/vllm/model_executor/models/qwen3_vl.py @@ -1480,9 +1480,8 @@ class Qwen3VLForConditionalGeneration( ) return mm_input_by_modality - @classmethod def get_mrope_input_positions( - cls, + self, input_tokens: list[int], hf_config: PretrainedConfig, image_grid_thw: list[list[int]] | torch.Tensor,