From cf3eacfe58fa9e745c2854782ada884a9f992cf7 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Thu, 11 Dec 2025 20:45:23 +0000 Subject: [PATCH] Standardise `get_rope` to use `rope_parameters["partial_rotary_factor"]`, not `rotary_dim` (#30389) Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- benchmarks/kernels/benchmark_mrope.py | 1 - benchmarks/kernels/benchmark_rope.py | 4 +- tests/compile/test_functionalization.py | 5 +- tests/kernels/core/test_mrope.py | 2 - tests/kernels/core/test_pos_encoding.py | 12 +- vllm/config/utils.py | 18 +- .../layers/rotary_embedding/__init__.py | 370 +++++++++--------- vllm/model_executor/models/afmoe.py | 1 - vllm/model_executor/models/apertus.py | 1 - vllm/model_executor/models/arctic.py | 1 - vllm/model_executor/models/baichuan.py | 1 - vllm/model_executor/models/bailing_moe.py | 4 +- vllm/model_executor/models/bamba.py | 7 +- vllm/model_executor/models/chameleon.py | 1 - vllm/model_executor/models/chatglm.py | 7 +- vllm/model_executor/models/commandr.py | 1 - vllm/model_executor/models/config.py | 12 +- vllm/model_executor/models/dbrx.py | 1 - vllm/model_executor/models/deepseek_v2.py | 4 - vllm/model_executor/models/dots1.py | 1 - vllm/model_executor/models/ernie45_moe.py | 1 - vllm/model_executor/models/exaone.py | 1 - vllm/model_executor/models/exaone4.py | 1 - vllm/model_executor/models/falcon.py | 1 - vllm/model_executor/models/falcon_h1.py | 7 +- vllm/model_executor/models/gemma.py | 1 - vllm/model_executor/models/gemma2.py | 1 - vllm/model_executor/models/gemma3.py | 1 - vllm/model_executor/models/gemma3n.py | 1 - vllm/model_executor/models/glm4.py | 2 - vllm/model_executor/models/glm4_1v.py | 2 +- vllm/model_executor/models/glm4_moe.py | 1 - vllm/model_executor/models/gpt_j.py | 5 +- vllm/model_executor/models/gpt_neox.py | 1 - vllm/model_executor/models/gpt_oss.py | 1 - vllm/model_executor/models/granite.py | 1 - vllm/model_executor/models/granitemoe.py | 1 - .../model_executor/models/granitemoehybrid.py | 1 - vllm/model_executor/models/grok1.py | 1 - vllm/model_executor/models/hunyuan_v1.py | 2 - vllm/model_executor/models/internlm2.py | 1 - vllm/model_executor/models/lfm2.py | 1 - vllm/model_executor/models/lfm2_moe.py | 1 - vllm/model_executor/models/llama.py | 1 - vllm/model_executor/models/llama4.py | 1 - vllm/model_executor/models/minicpm.py | 1 - vllm/model_executor/models/minicpm3.py | 1 - vllm/model_executor/models/minimax_m2.py | 6 +- vllm/model_executor/models/minimax_text_01.py | 7 +- vllm/model_executor/models/mixtral.py | 1 - vllm/model_executor/models/mllama4.py | 2 +- vllm/model_executor/models/modernbert.py | 1 - vllm/model_executor/models/molmo.py | 1 - vllm/model_executor/models/nemotron.py | 1 - vllm/model_executor/models/nemotron_nas.py | 1 - vllm/model_executor/models/olmo.py | 1 - vllm/model_executor/models/olmo2.py | 1 - vllm/model_executor/models/olmoe.py | 1 - vllm/model_executor/models/openpangu.py | 2 - vllm/model_executor/models/orion.py | 1 - vllm/model_executor/models/ouro.py | 1 - vllm/model_executor/models/persimmon.py | 1 - vllm/model_executor/models/phi.py | 12 +- vllm/model_executor/models/phimoe.py | 1 - vllm/model_executor/models/plamo2.py | 1 - vllm/model_executor/models/plamo3.py | 1 - vllm/model_executor/models/qwen.py | 1 - vllm/model_executor/models/qwen2.py | 1 - vllm/model_executor/models/qwen2_5_vl.py | 2 +- vllm/model_executor/models/qwen2_moe.py | 1 - vllm/model_executor/models/qwen2_vl.py | 2 +- vllm/model_executor/models/qwen3.py | 1 - vllm/model_executor/models/qwen3_moe.py | 1 - vllm/model_executor/models/qwen3_next.py | 1 - .../models/qwen3_omni_moe_thinker.py | 2 +- vllm/model_executor/models/qwen3_vl.py | 2 +- vllm/model_executor/models/seed_oss.py | 1 - vllm/model_executor/models/solar.py | 1 - vllm/model_executor/models/stablelm.py | 1 - vllm/model_executor/models/starcoder2.py | 1 - vllm/model_executor/models/step3_text.py | 1 - vllm/model_executor/models/zamba2.py | 1 - vllm/transformers_utils/config.py | 17 +- 83 files changed, 260 insertions(+), 314 deletions(-) diff --git a/benchmarks/kernels/benchmark_mrope.py b/benchmarks/kernels/benchmark_mrope.py index 83bd91917508f..09de5fa822f86 100644 --- a/benchmarks/kernels/benchmark_mrope.py +++ b/benchmarks/kernels/benchmark_mrope.py @@ -99,7 +99,6 @@ def benchmark_mrope( # the parameters to compute the q k v size based on tp_size mrope_helper_class = get_rope( head_size=head_dim, - rotary_dim=head_dim, max_position=max_position, is_neox_style=is_neox_style, rope_parameters=rope_parameters, diff --git a/benchmarks/kernels/benchmark_rope.py b/benchmarks/kernels/benchmark_rope.py index 074b7a440b612..7a1bc050bb33f 100644 --- a/benchmarks/kernels/benchmark_rope.py +++ b/benchmarks/kernels/benchmark_rope.py @@ -32,8 +32,8 @@ def get_benchmark(head_size, rotary_dim, is_neox_style, device): def benchmark(batch_size, seq_len, num_heads, provider): dtype = torch.bfloat16 max_position = 8192 - base = 10000 - rope = get_rope(head_size, rotary_dim, max_position, base, is_neox_style) + rope_parameters = {"partial_rotary_factor": rotary_dim / head_size} + rope = get_rope(head_size, max_position, is_neox_style, rope_parameters) rope = rope.to(dtype=dtype, device=device) cos_sin_cache = rope.cos_sin_cache.to(dtype=torch.float, device=device) diff --git a/tests/compile/test_functionalization.py b/tests/compile/test_functionalization.py index 7585915892700..ad5ead36e2310 100644 --- a/tests/compile/test_functionalization.py +++ b/tests/compile/test_functionalization.py @@ -128,14 +128,12 @@ class TestFusedAddRMSNorm(torch.nn.Module): class TestRotaryEmbedding(torch.nn.Module): - def __init__(self, head_dim=64, rotary_dim=None, max_position=2048, base=10000): + def __init__(self, head_dim=64, max_position=2048, base=10000): super().__init__() self.head_dim = head_dim - self.rotary_dim = rotary_dim or head_dim self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.rotary_dim, max_position=max_position, rope_parameters={"rope_type": "default", "rope_theta": base}, ) @@ -170,7 +168,6 @@ class TestRotaryEmbeddingSliceScatter(torch.nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position, rope_parameters={"rope_type": "default", "rope_theta": base}, ) diff --git a/tests/kernels/core/test_mrope.py b/tests/kernels/core/test_mrope.py index 4e1559a049bf9..ba5d593b2d355 100644 --- a/tests/kernels/core/test_mrope.py +++ b/tests/kernels/core/test_mrope.py @@ -116,7 +116,6 @@ def test_mrope( mrope_helper_class = get_rope( head_size=head_dim, - rotary_dim=head_dim, max_position=max_position, is_neox_style=is_neox_style, rope_parameters=config.rope_parameters, @@ -185,7 +184,6 @@ def test_mrope_torch_compile_tracing( mrope_helper_class = get_rope( head_size=head_dim, - rotary_dim=head_dim, max_position=max_position, is_neox_style=is_neox_style, rope_parameters=config.rope_parameters, diff --git a/tests/kernels/core/test_pos_encoding.py b/tests/kernels/core/test_pos_encoding.py index a8ed3825689d3..d18f01314c8f5 100644 --- a/tests/kernels/core/test_pos_encoding.py +++ b/tests/kernels/core/test_pos_encoding.py @@ -83,8 +83,12 @@ def test_rotary_embedding( torch.set_default_device(device) if rotary_dim is None: rotary_dim = head_size - rope_parameters = {"rope_type": "default", "rope_theta": rope_theta} - rope = get_rope(head_size, rotary_dim, max_position, is_neox_style, rope_parameters) + rope_parameters = { + "rope_type": "default", + "rope_theta": rope_theta, + "partial_rotary_factor": rotary_dim / head_size, + } + rope = get_rope(head_size, max_position, is_neox_style, rope_parameters) rope = rope.to(dtype=dtype, device=torch.get_default_device()) positions = torch.randint(0, max_position, (batch_size, seq_len)) @@ -150,9 +154,9 @@ def test_rope_module_cache(): if rotary_dim is None: rotary_dim = head_size rope_parameters["rope_theta"] = rope_theta + rope_parameters["partial_rotary_factor"] = rotary_dim / head_size rope = get_rope( head_size, - rotary_dim, max_position, is_neox_style, rope_parameters, @@ -177,9 +181,9 @@ def test_rope_module_cache(): if rotary_dim is None: rotary_dim = head_size rope_parameters["rope_theta"] = rope_theta + rope_parameters["partial_rotary_factor"] = rotary_dim / head_size rope = get_rope( head_size, - rotary_dim, max_position, is_neox_style, rope_parameters, diff --git a/vllm/config/utils.py b/vllm/config/utils.py index 93da3fd417ace..470296517deb1 100644 --- a/vllm/config/utils.py +++ b/vllm/config/utils.py @@ -73,14 +73,28 @@ def get_field(cls: ConfigType, name: str) -> Field: ) -def getattr_iter(object: object, names: Iterable[str], default: Any) -> Any: +def getattr_iter( + object: object, names: Iterable[str], default: Any, warn: bool = False +) -> Any: """ A helper function that retrieves an attribute from an object which may have multiple possible names. This is useful when fetching attributes from arbitrary `transformers.PretrainedConfig` instances. + + In the case where the first name in `names` is the preferred name, and + any other names are deprecated aliases, setting `warn=True` will log a + warning when a deprecated name is used. """ - for name in names: + for i, name in enumerate(names): if hasattr(object, name): + if warn and i > 0: + logger.warning_once( + "%s contains a deprecated attribute name '%s'. " + "Please use the preferred attribute name '%s' instead.", + type(object).__name__, + name, + names[0], + ) return getattr(object, name) return default diff --git a/vllm/model_executor/layers/rotary_embedding/__init__.py b/vllm/model_executor/layers/rotary_embedding/__init__.py index 4dff984f92be6..452b87ea4e7a5 100644 --- a/vllm/model_executor/layers/rotary_embedding/__init__.py +++ b/vllm/model_executor/layers/rotary_embedding/__init__.py @@ -25,7 +25,6 @@ _ROPE_DICT: dict[tuple, RotaryEmbedding] = {} def get_rope( head_size: int, - rotary_dim: int, max_position: int, is_neox_style: bool = True, rope_parameters: dict[str, Any] | None = None, @@ -54,12 +53,15 @@ def get_rope( else: dual_chunk_attention_args = None - partial_rotary_factor = 1.0 - if rope_parameters is not None: - partial_rotary_factor = rope_parameters.get("partial_rotary_factor", 1.0) + rope_parameters = rope_parameters or {} + base = rope_parameters.get("rope_theta", 10000) + scaling_type = rope_parameters.get("rope_type", "default") + partial_rotary_factor = rope_parameters.get("partial_rotary_factor", 1.0) + + if partial_rotary_factor <= 0.0 or partial_rotary_factor > 1.0: + raise ValueError(f"{partial_rotary_factor=} must be between 0.0 and 1.0") + rotary_dim = int(head_size * partial_rotary_factor) - if partial_rotary_factor < 1.0: - rotary_dim = int(rotary_dim * partial_rotary_factor) key = ( head_size, rotary_dim, @@ -72,7 +74,6 @@ def get_rope( if key in _ROPE_DICT: return _ROPE_DICT[key] - base = rope_parameters["rope_theta"] if rope_parameters else 10000 if dual_chunk_attention_config is not None: extra_kwargs = { k: v @@ -88,109 +89,76 @@ def get_rope( dtype, **extra_kwargs, ) - elif not rope_parameters: - rotary_emb = RotaryEmbedding( + elif scaling_type == "default": + if "mrope_section" in rope_parameters: + rotary_emb = MRotaryEmbedding( + head_size, + rotary_dim, + max_position, + base, + is_neox_style, + dtype, + mrope_section=rope_parameters["mrope_section"], + mrope_interleaved=rope_parameters.get("mrope_interleaved", False), + ) + else: + rotary_emb = RotaryEmbedding( + head_size, + rotary_dim, + max_position, + base, + is_neox_style, + dtype, + ) + elif scaling_type == "llama3": + scaling_factor = rope_parameters["factor"] + low_freq_factor = rope_parameters["low_freq_factor"] + high_freq_factor = rope_parameters["high_freq_factor"] + original_max_position = rope_parameters["original_max_position_embeddings"] + rotary_emb = Llama3RotaryEmbedding( + head_size, + rotary_dim, + max_position, + base, + is_neox_style, + dtype, + scaling_factor, + low_freq_factor, + high_freq_factor, + original_max_position, + ) + elif scaling_type == "mllama4": + rotary_emb = Llama4VisionRotaryEmbedding( head_size, rotary_dim, max_position, base, is_neox_style, dtype ) - else: - scaling_type = rope_parameters["rope_type"] - - if scaling_type == "llama3": - scaling_factor = rope_parameters["factor"] - low_freq_factor = rope_parameters["low_freq_factor"] - high_freq_factor = rope_parameters["high_freq_factor"] - original_max_position = rope_parameters["original_max_position_embeddings"] - rotary_emb = Llama3RotaryEmbedding( - head_size, - rotary_dim, - max_position, - base, - is_neox_style, - dtype, - scaling_factor, - low_freq_factor, - high_freq_factor, - original_max_position, - ) - elif scaling_type == "mllama4": - rotary_emb = Llama4VisionRotaryEmbedding( - head_size, rotary_dim, max_position, base, is_neox_style, dtype - ) - elif scaling_type == "default": - if "mrope_section" in rope_parameters: - rotary_emb = MRotaryEmbedding( - head_size, - rotary_dim, - max_position, - base, - is_neox_style, - dtype, - mrope_section=rope_parameters["mrope_section"], - mrope_interleaved=rope_parameters.get("mrope_interleaved", False), - ) - else: - rotary_emb = RotaryEmbedding( - head_size, - rotary_dim, - max_position, - base, - is_neox_style, - dtype, - ) - elif scaling_type == "linear": - scaling_factor = rope_parameters["factor"] - rotary_emb = LinearScalingRotaryEmbedding( - head_size, - rotary_dim, - max_position, - base, - is_neox_style, - scaling_factor, - dtype, - ) - elif scaling_type == "ntk": - scaling_factor = rope_parameters["factor"] - mixed_b = rope_parameters.get("mixed_b") - rotary_emb = NTKScalingRotaryEmbedding( - head_size, - rotary_dim, - max_position, - base, - is_neox_style, - scaling_factor, - dtype, - mixed_b, - ) - elif scaling_type == "dynamic": - if "alpha" in rope_parameters: - scaling_alpha = rope_parameters["alpha"] - rotary_emb = DynamicNTKAlphaRotaryEmbedding( - head_size, - rotary_dim, - max_position, - base, - is_neox_style, - scaling_alpha, - dtype, - ) - elif "factor" in rope_parameters: - scaling_factor = rope_parameters["factor"] - rotary_emb = DynamicNTKScalingRotaryEmbedding( - head_size, - rotary_dim, - max_position, - base, - is_neox_style, - scaling_factor, - dtype, - ) - else: - raise ValueError( - "Dynamic rope scaling must contain either 'alpha' or 'factor' field" - ) - elif scaling_type == "xdrope": + elif scaling_type == "linear": + scaling_factor = rope_parameters["factor"] + rotary_emb = LinearScalingRotaryEmbedding( + head_size, + rotary_dim, + max_position, + base, + is_neox_style, + scaling_factor, + dtype, + ) + elif scaling_type == "ntk": + scaling_factor = rope_parameters["factor"] + mixed_b = rope_parameters.get("mixed_b") + rotary_emb = NTKScalingRotaryEmbedding( + head_size, + rotary_dim, + max_position, + base, + is_neox_style, + scaling_factor, + dtype, + mixed_b, + ) + elif scaling_type == "dynamic": + if "alpha" in rope_parameters: scaling_alpha = rope_parameters["alpha"] - rotary_emb = XDRotaryEmbedding( + rotary_emb = DynamicNTKAlphaRotaryEmbedding( head_size, rotary_dim, max_position, @@ -198,67 +166,66 @@ def get_rope( is_neox_style, scaling_alpha, dtype, - xdrope_section=rope_parameters["xdrope_section"], ) - elif scaling_type == "yarn": + elif "factor" in rope_parameters: scaling_factor = rope_parameters["factor"] - original_max_position = rope_parameters["original_max_position_embeddings"] - extra_kwargs = { - k: v - for k, v in rope_parameters.items() - if k - in ( - "extrapolation_factor", - "attn_factor", - "beta_fast", - "beta_slow", - "apply_yarn_scaling", - "truncate", - ) - } - if "mrope_section" in rope_parameters: - extra_kwargs.pop("apply_yarn_scaling", None) - rotary_emb = MRotaryEmbedding( - head_size, - rotary_dim, - original_max_position, - base, - is_neox_style, - dtype, - mrope_section=rope_parameters["mrope_section"], - mrope_interleaved=rope_parameters.get("mrope_interleaved", False), - scaling_factor=scaling_factor, - **extra_kwargs, - ) - else: - rotary_emb = YaRNScalingRotaryEmbedding( - head_size, - rotary_dim, - original_max_position, - base, - is_neox_style, - scaling_factor, - dtype, - **extra_kwargs, - ) - elif scaling_type in ["deepseek_yarn", "deepseek_llama_scaling"]: - scaling_factor = rope_parameters["factor"] - original_max_position = rope_parameters["original_max_position_embeddings"] - # assert max_position == original_max_position * scaling_factor - extra_kwargs = { - k: v - for k, v in rope_parameters.items() - if k - in ( - "extrapolation_factor", - "attn_factor", - "beta_fast", - "beta_slow", - "mscale", - "mscale_all_dim", - ) - } - rotary_emb = DeepseekScalingRotaryEmbedding( + rotary_emb = DynamicNTKScalingRotaryEmbedding( + head_size, + rotary_dim, + max_position, + base, + is_neox_style, + scaling_factor, + dtype, + ) + else: + raise ValueError( + "Dynamic rope scaling must contain either 'alpha' or 'factor' field" + ) + elif scaling_type == "xdrope": + scaling_alpha = rope_parameters["alpha"] + rotary_emb = XDRotaryEmbedding( + head_size, + rotary_dim, + max_position, + base, + is_neox_style, + scaling_alpha, + dtype, + xdrope_section=rope_parameters["xdrope_section"], + ) + elif scaling_type == "yarn": + scaling_factor = rope_parameters["factor"] + original_max_position = rope_parameters["original_max_position_embeddings"] + extra_kwargs = { + k: v + for k, v in rope_parameters.items() + if k + in ( + "extrapolation_factor", + "attn_factor", + "beta_fast", + "beta_slow", + "apply_yarn_scaling", + "truncate", + ) + } + if "mrope_section" in rope_parameters: + extra_kwargs.pop("apply_yarn_scaling", None) + rotary_emb = MRotaryEmbedding( + head_size, + rotary_dim, + original_max_position, + base, + is_neox_style, + dtype, + mrope_section=rope_parameters["mrope_section"], + mrope_interleaved=rope_parameters.get("mrope_interleaved", False), + scaling_factor=scaling_factor, + **extra_kwargs, + ) + else: + rotary_emb = YaRNScalingRotaryEmbedding( head_size, rotary_dim, original_max_position, @@ -268,28 +235,55 @@ def get_rope( dtype, **extra_kwargs, ) - elif scaling_type == "longrope": - short_factor = rope_parameters["short_factor"] - long_factor = rope_parameters["long_factor"] - original_max_position = rope_parameters["original_max_position_embeddings"] - extra_kwargs = { - k: v - for k, v in rope_parameters.items() - if k in ("short_mscale", "long_mscale") - } - rotary_emb = Phi3LongRoPEScaledRotaryEmbedding( - head_size, - rotary_dim, - max_position, - original_max_position, - base, - is_neox_style, - dtype, - short_factor, - long_factor, - **extra_kwargs, + elif scaling_type in ["deepseek_yarn", "deepseek_llama_scaling"]: + scaling_factor = rope_parameters["factor"] + original_max_position = rope_parameters["original_max_position_embeddings"] + # assert max_position == original_max_position * scaling_factor + extra_kwargs = { + k: v + for k, v in rope_parameters.items() + if k + in ( + "extrapolation_factor", + "attn_factor", + "beta_fast", + "beta_slow", + "mscale", + "mscale_all_dim", ) - else: - raise ValueError(f"Unknown RoPE scaling type {scaling_type}") + } + rotary_emb = DeepseekScalingRotaryEmbedding( + head_size, + rotary_dim, + original_max_position, + base, + is_neox_style, + scaling_factor, + dtype, + **extra_kwargs, + ) + elif scaling_type == "longrope": + short_factor = rope_parameters["short_factor"] + long_factor = rope_parameters["long_factor"] + original_max_position = rope_parameters["original_max_position_embeddings"] + extra_kwargs = { + k: v + for k, v in rope_parameters.items() + if k in ("short_mscale", "long_mscale") + } + rotary_emb = Phi3LongRoPEScaledRotaryEmbedding( + head_size, + rotary_dim, + max_position, + original_max_position, + base, + is_neox_style, + dtype, + short_factor, + long_factor, + **extra_kwargs, + ) + else: + raise ValueError(f"Unknown RoPE scaling type {scaling_type}") _ROPE_DICT[key] = rotary_emb return rotary_emb diff --git a/vllm/model_executor/models/afmoe.py b/vllm/model_executor/models/afmoe.py index 85827d54c911a..3ced52c2050d6 100644 --- a/vllm/model_executor/models/afmoe.py +++ b/vllm/model_executor/models/afmoe.py @@ -241,7 +241,6 @@ class AfmoeAttention(nn.Module): if self.is_local_attention: self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config["rope_parameters"], is_neox_style=True, diff --git a/vllm/model_executor/models/apertus.py b/vllm/model_executor/models/apertus.py index 2a8be29d8d306..e3f97a718b0f4 100644 --- a/vllm/model_executor/models/apertus.py +++ b/vllm/model_executor/models/apertus.py @@ -226,7 +226,6 @@ class ApertusAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=is_neox_style, diff --git a/vllm/model_executor/models/arctic.py b/vllm/model_executor/models/arctic.py index 266d29a8d9b2b..0200984c0ec85 100644 --- a/vllm/model_executor/models/arctic.py +++ b/vllm/model_executor/models/arctic.py @@ -314,7 +314,6 @@ class ArcticAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/baichuan.py b/vllm/model_executor/models/baichuan.py index beb22995a0719..ee4a1dbd6df94 100644 --- a/vllm/model_executor/models/baichuan.py +++ b/vllm/model_executor/models/baichuan.py @@ -189,7 +189,6 @@ class BaiChuanAttention(nn.Module): else: self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=rope_parameters, ) diff --git a/vllm/model_executor/models/bailing_moe.py b/vllm/model_executor/models/bailing_moe.py index 0143e140af265..4bccee7521749 100644 --- a/vllm/model_executor/models/bailing_moe.py +++ b/vllm/model_executor/models/bailing_moe.py @@ -127,11 +127,11 @@ class BailingAttention(nn.Module): prefix=f"{prefix}.dense", ) - self.rotary_dim = getattr(config, "rotary_dim", self.head_dim) + rotary_dim = getattr(config, "rotary_dim", self.head_dim) + config.rope_parameters["partial_rotary_factor"] = rotary_dim / self.head_dim self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.rotary_dim, max_position=config.max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/bamba.py b/vllm/model_executor/models/bamba.py index 00d742f84ef79..22631bbc5489b 100644 --- a/vllm/model_executor/models/bamba.py +++ b/vllm/model_executor/models/bamba.py @@ -178,14 +178,11 @@ class BambaAttentionDecoderLayer(nn.Module): self.scaling = self.head_dim**-0.5 self.max_position_embeddings = max_position_embeddings - if hasattr(config, "attn_rotary_emb"): - rotary_dim = config.attn_rotary_emb # for backward compatibility - else: - rotary_dim = self.head_dim # default + rotary_dim = getattr(config, "attn_rotary_emb", self.head_dim) + config.rope_parameters["partial_rotary_factor"] = rotary_dim / self.head_dim self.rotary_emb = get_rope( head_size=self.head_dim, - rotary_dim=rotary_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index dfc05a366b286..176c5cd14c6e2 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -314,7 +314,6 @@ class ChameleonAttention(nn.Module): self.k_norm = ChameleonLayerNorm((self.num_kv_heads, self.head_dim)) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, ) diff --git a/vllm/model_executor/models/chatglm.py b/vllm/model_executor/models/chatglm.py index 3d485fdd0a2e1..26181d1c9bae4 100644 --- a/vllm/model_executor/models/chatglm.py +++ b/vllm/model_executor/models/chatglm.py @@ -99,13 +99,16 @@ class GLMAttention(nn.Module): # https://huggingface.co/zai-org/chatglm3-6b-32k/blob/e210410255278dd9d74463cf396ba559c0ef801c/modeling_chatglm.py#L141 rope_ratio = getattr(config, "rope_ratio", 1.0) max_positions = getattr(config, "seq_length", 8192) - rope_parameters = {"rope_type": "default", "rope_theta": 10000 * rope_ratio} + rope_parameters = { + "rope_type": "default", + "rope_theta": 10000 * rope_ratio, + "partial_rotary_factor": 0.5, + } # NOTE: zai-org/cogagent-9b-20241220 uses original_rope=False, # which is equivalent to is_neox_style=True is_neox_style = not config.original_rope self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim // 2, max_position=max_positions, rope_parameters=rope_parameters, is_neox_style=is_neox_style, diff --git a/vllm/model_executor/models/commandr.py b/vllm/model_executor/models/commandr.py index f837502c468f1..63a93eaa2d4f3 100644 --- a/vllm/model_executor/models/commandr.py +++ b/vllm/model_executor/models/commandr.py @@ -175,7 +175,6 @@ class CohereAttention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=False, diff --git a/vllm/model_executor/models/config.py b/vllm/model_executor/models/config.py index 8de793941b8c3..06cc92ee88180 100644 --- a/vllm/model_executor/models/config.py +++ b/vllm/model_executor/models/config.py @@ -42,9 +42,10 @@ class GteNewModelConfig(VerifyAndUpdateConfig): config.hidden_act = "geglu" head_dim = config.hidden_size // config.num_attention_heads + rotary_dim = getattr(config, "rotary_emb_dim", head_dim) + config.rope_parameters["partial_rotary_factor"] = rotary_dim / head_dim config.rotary_kwargs = { "head_size": head_dim, - "rotary_dim": getattr(config, "rotary_emb_dim", head_dim), "max_position": config.max_position_embeddings, "rope_parameters": config.rope_parameters, } @@ -77,9 +78,11 @@ class JinaRobertaModelConfig(VerifyAndUpdateConfig): if not model_config.enforce_eager: max_position = round_up(max_position, 8) + rotary_dim = getattr(config, "rotary_emb_dim", head_dim) + config.rope_parameters["partial_rotary_factor"] = rotary_dim / head_dim + config.rotary_kwargs = { "head_size": head_dim, - "rotary_dim": getattr(config, "rotary_emb_dim", head_dim), "max_position": max_position, "rope_parameters": config.rope_parameters, } @@ -113,12 +116,10 @@ class NomicBertModelConfig(VerifyAndUpdateConfig): config.num_hidden_layers = config.n_layer head_dim = config.hidden_size // config.num_attention_heads - rotary_emb_dim = int(head_dim * config.rotary_emb_fraction) max_trained_positions = getattr(config, "max_trained_positions", 2048) config.rotary_kwargs = { "head_size": head_dim, - "rotary_dim": rotary_emb_dim, "max_position": max_trained_positions, "rope_parameters": config.rope_parameters, } @@ -240,9 +241,10 @@ class SnowflakeGteNewModelConfig(VerifyAndUpdateConfig): config.hidden_act = "geglu" head_dim = config.hidden_size // config.num_attention_heads + rotary_dim = getattr(config, "rotary_emb_dim", head_dim) + config.rope_parameters["partial_rotary_factor"] = rotary_dim / head_dim config.rotary_kwargs = { "head_size": head_dim, - "rotary_dim": getattr(config, "rotary_emb_dim", head_dim), "max_position": config.max_position_embeddings, "rope_parameters": config.rope_parameters, } diff --git a/vllm/model_executor/models/dbrx.py b/vllm/model_executor/models/dbrx.py index 946baffc8817a..db4fe61b0d85f 100644 --- a/vllm/model_executor/models/dbrx.py +++ b/vllm/model_executor/models/dbrx.py @@ -222,7 +222,6 @@ class DbrxAttention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position, rope_parameters=rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index 0b6513789aea8..a9fa76deecbd2 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -156,7 +156,6 @@ class DeepseekAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, ) @@ -499,7 +498,6 @@ class DeepseekV2Attention(nn.Module): self.rotary_emb = get_rope( qk_rope_head_dim, - rotary_dim=qk_rope_head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=False, @@ -1018,7 +1016,6 @@ class DeepseekV2MLAAttention(nn.Module): self.rotary_emb = get_rope( qk_rope_head_dim, - rotary_dim=qk_rope_head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=False, @@ -1038,7 +1035,6 @@ class DeepseekV2MLAAttention(nn.Module): if self.is_v32: self.indexer_rope_emb = get_rope( qk_rope_head_dim, - rotary_dim=qk_rope_head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/dots1.py b/vllm/model_executor/models/dots1.py index 3beee9f864634..870a37039f151 100644 --- a/vllm/model_executor/models/dots1.py +++ b/vllm/model_executor/models/dots1.py @@ -250,7 +250,6 @@ class Dots1Attention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/ernie45_moe.py b/vllm/model_executor/models/ernie45_moe.py index 278ba45e9684c..fbbd31a485383 100644 --- a/vllm/model_executor/models/ernie45_moe.py +++ b/vllm/model_executor/models/ernie45_moe.py @@ -288,7 +288,6 @@ class Ernie4_5_MoeAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, is_neox_style=False, diff --git a/vllm/model_executor/models/exaone.py b/vllm/model_executor/models/exaone.py index acf651ed24988..039e7cf68e52b 100644 --- a/vllm/model_executor/models/exaone.py +++ b/vllm/model_executor/models/exaone.py @@ -167,7 +167,6 @@ class ExaoneAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=is_neox_style, diff --git a/vllm/model_executor/models/exaone4.py b/vllm/model_executor/models/exaone4.py index cb710a7ec5cf9..b4b7a798fd050 100644 --- a/vllm/model_executor/models/exaone4.py +++ b/vllm/model_executor/models/exaone4.py @@ -176,7 +176,6 @@ class Exaone4Attention(nn.Module): set_default_rope_theta(config, default_theta=1000000) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=is_neox_style, diff --git a/vllm/model_executor/models/falcon.py b/vllm/model_executor/models/falcon.py index 32d9e7b925597..7cdfcae0e718d 100644 --- a/vllm/model_executor/models/falcon.py +++ b/vllm/model_executor/models/falcon.py @@ -167,7 +167,6 @@ class FalconAttention(nn.Module): max_position_embeddings = getattr(config, "max_position_embeddings", 8192) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/falcon_h1.py b/vllm/model_executor/models/falcon_h1.py index a1c1263f8d724..bfb6b1a1f160d 100644 --- a/vllm/model_executor/models/falcon_h1.py +++ b/vllm/model_executor/models/falcon_h1.py @@ -242,14 +242,11 @@ class FalconH1AttentionDecoderLayer(nn.Module): self.scaling = self.head_dim**-0.5 self.max_position_embeddings = max_position_embeddings - if hasattr(config, "attn_rotary_emb"): - rotary_dim = config.attn_rotary_emb # for backward compatibility - else: - rotary_dim = self.head_dim # default + rotary_dim = getattr(config, "attn_rotary_emb", self.head_dim) + config.rope_parameters["partial_rotary_factor"] = rotary_dim / self.head_dim self.rotary_emb = get_rope( head_size=self.head_dim, - rotary_dim=rotary_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py index dd5a74c8ed005..7304a728067f4 100644 --- a/vllm/model_executor/models/gemma.py +++ b/vllm/model_executor/models/gemma.py @@ -174,7 +174,6 @@ class GemmaAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index cb36e04824588..fe6ec5ff83dec 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -152,7 +152,6 @@ class Gemma2Attention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/gemma3.py b/vllm/model_executor/models/gemma3.py index 73176eba95ed5..40f6d100c767e 100644 --- a/vllm/model_executor/models/gemma3.py +++ b/vllm/model_executor/models/gemma3.py @@ -176,7 +176,6 @@ class Gemma3Attention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/gemma3n.py b/vllm/model_executor/models/gemma3n.py index f4427c9fd1d10..4d446f51c2ecb 100644 --- a/vllm/model_executor/models/gemma3n.py +++ b/vllm/model_executor/models/gemma3n.py @@ -384,7 +384,6 @@ class Gemma3nAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/glm4.py b/vllm/model_executor/models/glm4.py index 9adfa942b99fa..2cd11e66c752b 100644 --- a/vllm/model_executor/models/glm4.py +++ b/vllm/model_executor/models/glm4.py @@ -81,7 +81,6 @@ class Glm4Attention(nn.Module): config.rope_parameters.setdefault("partial_rotary_factor", 0.5) self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = head_dim or hidden_size // self.total_num_heads - self.rotary_dim = self.head_dim self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim**-0.5 @@ -103,7 +102,6 @@ class Glm4Attention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.rotary_dim, max_position=max_position, rope_parameters=config.rope_parameters, is_neox_style=False, diff --git a/vllm/model_executor/models/glm4_1v.py b/vllm/model_executor/models/glm4_1v.py index de091f03e881c..786482d77a1d2 100644 --- a/vllm/model_executor/models/glm4_1v.py +++ b/vllm/model_executor/models/glm4_1v.py @@ -678,9 +678,9 @@ class Glm4vVisionTransformer(nn.Module): head_dim = self.hidden_size // self.num_heads self.rotary_pos_emb = get_rope( head_size=head_dim, - rotary_dim=head_dim // 2, max_position=8192, is_neox_style=True, + rope_parameters={"partial_rotary_factor": 0.5}, ) self.blocks = nn.ModuleList( [ diff --git a/vllm/model_executor/models/glm4_moe.py b/vllm/model_executor/models/glm4_moe.py index 8cae5ee425e4d..541d3b2beff83 100644 --- a/vllm/model_executor/models/glm4_moe.py +++ b/vllm/model_executor/models/glm4_moe.py @@ -285,7 +285,6 @@ class Glm4MoeAttention(nn.Module): config.rope_parameters.setdefault("partial_rotary_factor", 0.5) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/gpt_j.py b/vllm/model_executor/models/gpt_j.py index f0a34c47da54c..f32ac2639435c 100644 --- a/vllm/model_executor/models/gpt_j.py +++ b/vllm/model_executor/models/gpt_j.py @@ -95,12 +95,13 @@ class GPTJAttention(nn.Module): scaling = self.head_size**-0.5 assert getattr(config, "rotary", True) assert config.rotary_dim % 2 == 0 + rope_parameters = getattr(config, "rope_parameters", {}) + rope_parameters["partial_rotary_factor"] = config.rotary_dim / self.head_size max_position_embeddings = getattr(config, "max_position_embeddings", 8192) self.rotary_emb = get_rope( self.head_size, - rotary_dim=config.rotary_dim, max_position=max_position_embeddings, - rope_parameters=getattr(config, "rope_parameters", None), + rope_parameters=rope_parameters, is_neox_style=False, ) self.attn = Attention( diff --git a/vllm/model_executor/models/gpt_neox.py b/vllm/model_executor/models/gpt_neox.py index 212d605c17285..c4d11b488f38b 100644 --- a/vllm/model_executor/models/gpt_neox.py +++ b/vllm/model_executor/models/gpt_neox.py @@ -92,7 +92,6 @@ class GPTNeoXAttention(nn.Module): max_position_embeddings = getattr(config, "max_position_embeddings", 8192) self.rotary_emb = get_rope( self.head_size, - rotary_dim=self.head_size, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/gpt_oss.py b/vllm/model_executor/models/gpt_oss.py index cff16b7a7a8cd..6a92cf1533213 100644 --- a/vllm/model_executor/models/gpt_oss.py +++ b/vllm/model_executor/models/gpt_oss.py @@ -67,7 +67,6 @@ class OAIAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=config.max_position_embeddings, dtype=torch.float32, rope_parameters={ diff --git a/vllm/model_executor/models/granite.py b/vllm/model_executor/models/granite.py index 76519c4660f15..82c945f5ad5ec 100644 --- a/vllm/model_executor/models/granite.py +++ b/vllm/model_executor/models/granite.py @@ -160,7 +160,6 @@ class GraniteAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/granitemoe.py b/vllm/model_executor/models/granitemoe.py index b038400a1262a..0b1064b6343e3 100644 --- a/vllm/model_executor/models/granitemoe.py +++ b/vllm/model_executor/models/granitemoe.py @@ -190,7 +190,6 @@ class GraniteMoeAttention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position, rope_parameters=rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/granitemoehybrid.py b/vllm/model_executor/models/granitemoehybrid.py index 1d9c2f5df4a55..3434716b83789 100644 --- a/vllm/model_executor/models/granitemoehybrid.py +++ b/vllm/model_executor/models/granitemoehybrid.py @@ -271,7 +271,6 @@ class GraniteMoeHybridAttention(nn.Module): if config.position_embedding_type == "rope": self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=config.max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/grok1.py b/vllm/model_executor/models/grok1.py index 6f62a1d11e52e..0a2e5cf39ffd8 100644 --- a/vllm/model_executor/models/grok1.py +++ b/vllm/model_executor/models/grok1.py @@ -181,7 +181,6 @@ class Grok1Attention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position, rope_parameters=rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/hunyuan_v1.py b/vllm/model_executor/models/hunyuan_v1.py index ccdfa3fe175f1..0e82e84c4edbe 100644 --- a/vllm/model_executor/models/hunyuan_v1.py +++ b/vllm/model_executor/models/hunyuan_v1.py @@ -199,7 +199,6 @@ class HunYuanAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, @@ -305,7 +304,6 @@ class HunYuanCrossAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/internlm2.py b/vllm/model_executor/models/internlm2.py index c79934e121447..3ca8864618628 100644 --- a/vllm/model_executor/models/internlm2.py +++ b/vllm/model_executor/models/internlm2.py @@ -140,7 +140,6 @@ class InternLM2Attention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, ) diff --git a/vllm/model_executor/models/lfm2.py b/vllm/model_executor/models/lfm2.py index a4a994f97a2f8..142ad3d6d1d1a 100644 --- a/vllm/model_executor/models/lfm2.py +++ b/vllm/model_executor/models/lfm2.py @@ -143,7 +143,6 @@ class Lfm2Attention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/lfm2_moe.py b/vllm/model_executor/models/lfm2_moe.py index c8669de72dd09..70804e0a843e8 100644 --- a/vllm/model_executor/models/lfm2_moe.py +++ b/vllm/model_executor/models/lfm2_moe.py @@ -236,7 +236,6 @@ class Lfm2MoeAttention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/llama.py b/vllm/model_executor/models/llama.py index 167dfbca248ce..3507a2bc66c17 100644 --- a/vllm/model_executor/models/llama.py +++ b/vllm/model_executor/models/llama.py @@ -259,7 +259,6 @@ class LlamaAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=getattr(config, "rope_parameters", None), is_neox_style=is_neox_style, diff --git a/vllm/model_executor/models/llama4.py b/vllm/model_executor/models/llama4.py index 423be45e80149..7b3da3e10ab8a 100644 --- a/vllm/model_executor/models/llama4.py +++ b/vllm/model_executor/models/llama4.py @@ -243,7 +243,6 @@ class Llama4Attention(nn.Module): self.rotary_emb = ( get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=is_neox_style, diff --git a/vllm/model_executor/models/minicpm.py b/vllm/model_executor/models/minicpm.py index 67c462f4b25c4..f104018d3aa6c 100644 --- a/vllm/model_executor/models/minicpm.py +++ b/vllm/model_executor/models/minicpm.py @@ -277,7 +277,6 @@ class MiniCPMAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, ) diff --git a/vllm/model_executor/models/minicpm3.py b/vllm/model_executor/models/minicpm3.py index 0a2bcbd7f6084..c7a54cea21544 100644 --- a/vllm/model_executor/models/minicpm3.py +++ b/vllm/model_executor/models/minicpm3.py @@ -120,7 +120,6 @@ class MiniCPM3Attention(nn.Module): self.rotary_emb = get_rope( self.qk_rope_head_dim, - rotary_dim=self.qk_rope_head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/minimax_m2.py b/vllm/model_executor/models/minimax_m2.py index 3e6a9add9ec49..ee19288ae6852 100644 --- a/vllm/model_executor/models/minimax_m2.py +++ b/vllm/model_executor/models/minimax_m2.py @@ -199,9 +199,13 @@ class MiniMaxM2Attention(nn.Module): prefix=f"{prefix}.o_proj", ) + if ( + rope_parameters is not None + and "partial_rotary_factor" not in rope_parameters + ): + rope_parameters["partial_rotary_factor"] = rotary_dim / self.head_dim self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, ) diff --git a/vllm/model_executor/models/minimax_text_01.py b/vllm/model_executor/models/minimax_text_01.py index 390de78cc27b4..4bfe3c391c26f 100644 --- a/vllm/model_executor/models/minimax_text_01.py +++ b/vllm/model_executor/models/minimax_text_01.py @@ -187,7 +187,6 @@ class MiniMaxText01Attention(nn.Module): num_heads: int, head_dim: int, num_kv_heads: int, - rotary_dim: int, max_position: int = 4096 * 32, rope_parameters: dict | None = None, sliding_window: int | None = None, @@ -245,7 +244,6 @@ class MiniMaxText01Attention(nn.Module): ) self.rotary_emb = get_rope( head_size=self.head_dim, - rotary_dim=rotary_dim, max_position=max_position, rope_parameters=rope_parameters, is_neox_style=True, @@ -290,6 +288,8 @@ class MiniMaxText01DecoderLayer(nn.Module): head_dim = getattr(config, "head_dim", None) if head_dim is None: head_dim = config.hidden_size // config.num_attention_heads + rotary_dim = getattr(config, "rotary_dim", head_dim) + config.rope_parameters["partial_rotary_factor"] = rotary_dim / head_dim if hasattr(config, "max_model_len") and isinstance(config.max_model_len, int): max_position_embeddings = min( config.max_position_embeddings, config.max_model_len @@ -321,9 +321,6 @@ class MiniMaxText01DecoderLayer(nn.Module): hidden_size=self.hidden_size, num_heads=config.num_attention_heads, head_dim=head_dim, - rotary_dim=config.rotary_dim - if hasattr(config, "rotary_dim") - else head_dim, num_kv_heads=config.num_key_value_heads, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, diff --git a/vllm/model_executor/models/mixtral.py b/vllm/model_executor/models/mixtral.py index 50ec57e7a8053..e170c530ca29f 100644 --- a/vllm/model_executor/models/mixtral.py +++ b/vllm/model_executor/models/mixtral.py @@ -206,7 +206,6 @@ class MixtralAttention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/mllama4.py b/vllm/model_executor/models/mllama4.py index e944c0ee38aa1..fe963cc6644fb 100644 --- a/vllm/model_executor/models/mllama4.py +++ b/vllm/model_executor/models/mllama4.py @@ -295,11 +295,11 @@ class Llama4VisionAttention(nn.Module): rope_parameters = { "rope_type": "mllama4", "rope_theta": config.rope_parameters["rope_theta"], + "partial_rotary_factor": 0.5, } self.rotary_emb = get_rope( head_size=self.head_dim, - rotary_dim=config.hidden_size // config.num_attention_heads // 2, # number of image patches max_position=(config.image_size // config.patch_size) ** 2, rope_parameters=rope_parameters, diff --git a/vllm/model_executor/models/modernbert.py b/vllm/model_executor/models/modernbert.py index be36f761c63aa..4655ffa7b2f61 100644 --- a/vllm/model_executor/models/modernbert.py +++ b/vllm/model_executor/models/modernbert.py @@ -105,7 +105,6 @@ class ModernBertAttention(nn.Module): self.rotary_emb = get_rope( head_size=self.head_dim, - rotary_dim=self.head_dim, max_position=config.max_position_embeddings, rope_parameters=rope_parameters, dtype=torch.float16, diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index a6cd9ad16c188..71c6b1aa2e814 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -433,7 +433,6 @@ class MolmoAttention(nn.Module): # Rotary embeddings. self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/nemotron.py b/vllm/model_executor/models/nemotron.py index bf83ee5e42a15..21605015c470b 100644 --- a/vllm/model_executor/models/nemotron.py +++ b/vllm/model_executor/models/nemotron.py @@ -199,7 +199,6 @@ class NemotronAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/nemotron_nas.py b/vllm/model_executor/models/nemotron_nas.py index 734fbc60709fa..19a942a5277cc 100644 --- a/vllm/model_executor/models/nemotron_nas.py +++ b/vllm/model_executor/models/nemotron_nas.py @@ -118,7 +118,6 @@ class DeciLMAttention(LlamaAttention): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=is_neox_style, diff --git a/vllm/model_executor/models/olmo.py b/vllm/model_executor/models/olmo.py index 3bbb4dd242262..dd7c27f10c531 100644 --- a/vllm/model_executor/models/olmo.py +++ b/vllm/model_executor/models/olmo.py @@ -102,7 +102,6 @@ class OlmoAttention(nn.Module): # Rotary embeddings. self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/olmo2.py b/vllm/model_executor/models/olmo2.py index 88e9c2d8541a1..b030c94b54cd5 100644 --- a/vllm/model_executor/models/olmo2.py +++ b/vllm/model_executor/models/olmo2.py @@ -146,7 +146,6 @@ class Olmo2Attention(nn.Module): rope_parameters = {"rope_type": "default", "rope_theta": rope_theta} self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=rope_parameters, ) diff --git a/vllm/model_executor/models/olmoe.py b/vllm/model_executor/models/olmoe.py index 1376583a99725..a5a926151c5c9 100644 --- a/vllm/model_executor/models/olmoe.py +++ b/vllm/model_executor/models/olmoe.py @@ -171,7 +171,6 @@ class OlmoeAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/openpangu.py b/vllm/model_executor/models/openpangu.py index bddd9fa50957a..47abd7bf0b68a 100644 --- a/vllm/model_executor/models/openpangu.py +++ b/vllm/model_executor/models/openpangu.py @@ -352,7 +352,6 @@ class OpenPanguMLAAttention(nn.Module): } self.rotary_emb = get_rope( qk_rope_head_dim, - rotary_dim=qk_rope_head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, is_neox_style=False, @@ -525,7 +524,6 @@ class OpenPanguEmbeddedAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=is_neox_style, diff --git a/vllm/model_executor/models/orion.py b/vllm/model_executor/models/orion.py index 544a44ed54681..9d9066c4ba619 100644 --- a/vllm/model_executor/models/orion.py +++ b/vllm/model_executor/models/orion.py @@ -135,7 +135,6 @@ class OrionAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, ) diff --git a/vllm/model_executor/models/ouro.py b/vllm/model_executor/models/ouro.py index dcae92ed20881..829148b4c1fb7 100644 --- a/vllm/model_executor/models/ouro.py +++ b/vllm/model_executor/models/ouro.py @@ -166,7 +166,6 @@ class OuroAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position, rope_parameters=config.rope_parameters, dual_chunk_attention_config=dual_chunk_attention_config, diff --git a/vllm/model_executor/models/persimmon.py b/vllm/model_executor/models/persimmon.py index 8f26c68720a5c..b644603c5baa1 100644 --- a/vllm/model_executor/models/persimmon.py +++ b/vllm/model_executor/models/persimmon.py @@ -134,7 +134,6 @@ class PersimmonAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/phi.py b/vllm/model_executor/models/phi.py index 253fbbc41330c..e01e9d47c545c 100644 --- a/vllm/model_executor/models/phi.py +++ b/vllm/model_executor/models/phi.py @@ -84,19 +84,18 @@ class PhiAttention(nn.Module): prefix: str = "", ): super().__init__() - self.total_num_heads = config.num_attention_heads self.hidden_size = config.hidden_size - self.head_size = self.hidden_size // self.total_num_heads + self.head_size = self.hidden_size // config.num_attention_heads tensor_model_parallel_world_size = get_tensor_model_parallel_world_size() - assert self.total_num_heads % tensor_model_parallel_world_size == 0 - self.num_heads = self.total_num_heads // tensor_model_parallel_world_size + assert config.num_attention_heads % tensor_model_parallel_world_size == 0 + self.num_heads = config.num_attention_heads // tensor_model_parallel_world_size # pylint: disable=C0103 self.qkv_proj = QKVParallelLinear( self.hidden_size, self.head_size, - self.total_num_heads, + config.num_attention_heads, bias=True, quant_config=quant_config, prefix=f"{prefix}.qkv_proj", @@ -109,13 +108,10 @@ class PhiAttention(nn.Module): ) scaling = self.head_size**-0.5 - rotary_dim = config.hidden_size // config.num_attention_heads - assert rotary_dim % 2 == 0 max_position_embeddings = getattr(config, "max_position_embeddings", 2048) self.rotary_emb = get_rope( self.head_size, - rotary_dim=rotary_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/phimoe.py b/vllm/model_executor/models/phimoe.py index 49530776f8903..14f73d0c64586 100644 --- a/vllm/model_executor/models/phimoe.py +++ b/vllm/model_executor/models/phimoe.py @@ -352,7 +352,6 @@ class PhiMoEAttention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position, rope_parameters=rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/plamo2.py b/vllm/model_executor/models/plamo2.py index 472de5590dcf8..6765ee0c5779c 100644 --- a/vllm/model_executor/models/plamo2.py +++ b/vllm/model_executor/models/plamo2.py @@ -574,7 +574,6 @@ class Plamo2AttentionMixer(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/plamo3.py b/vllm/model_executor/models/plamo3.py index 4aeb9d432dcc6..3557104d905cb 100644 --- a/vllm/model_executor/models/plamo3.py +++ b/vllm/model_executor/models/plamo3.py @@ -179,7 +179,6 @@ class Plamo3AttentionMixer(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position, rope_parameters=rope_parameters, ) diff --git a/vllm/model_executor/models/qwen.py b/vllm/model_executor/models/qwen.py index 12285cf9c1968..492ba2fb12145 100644 --- a/vllm/model_executor/models/qwen.py +++ b/vllm/model_executor/models/qwen.py @@ -114,7 +114,6 @@ class QWenAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, ) diff --git a/vllm/model_executor/models/qwen2.py b/vllm/model_executor/models/qwen2.py index f5501bae78418..3af4a49cd77cc 100644 --- a/vllm/model_executor/models/qwen2.py +++ b/vllm/model_executor/models/qwen2.py @@ -164,7 +164,6 @@ class Qwen2Attention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position, rope_parameters=rope_parameters, dual_chunk_attention_config=dual_chunk_attention_config, diff --git a/vllm/model_executor/models/qwen2_5_vl.py b/vllm/model_executor/models/qwen2_5_vl.py index 3cc3a3a7873c6..fba06e34f6227 100644 --- a/vllm/model_executor/models/qwen2_5_vl.py +++ b/vllm/model_executor/models/qwen2_5_vl.py @@ -624,9 +624,9 @@ class Qwen2_5_VisionTransformer(nn.Module): head_dim = self.hidden_size // self.num_heads self.rotary_pos_emb = get_rope( head_size=head_dim, - rotary_dim=head_dim // 2, max_position=8192, is_neox_style=True, + rope_parameters={"partial_rotary_factor": 0.5}, ) self.attn_backend = get_vit_attn_backend( diff --git a/vllm/model_executor/models/qwen2_moe.py b/vllm/model_executor/models/qwen2_moe.py index cbc618f1abd08..2750f1864b81a 100644 --- a/vllm/model_executor/models/qwen2_moe.py +++ b/vllm/model_executor/models/qwen2_moe.py @@ -244,7 +244,6 @@ class Qwen2MoeAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, dual_chunk_attention_config=dual_chunk_attention_config, diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 608e90337f452..2c4ac2f8efff1 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -621,9 +621,9 @@ class Qwen2VisionTransformer(nn.Module): head_dim = embed_dim // num_heads self.rotary_pos_emb = get_rope( head_size=head_dim, - rotary_dim=head_dim // 2, max_position=8192, is_neox_style=True, + rope_parameters={"partial_rotary_factor": 0.5}, ) self.blocks = nn.ModuleList( diff --git a/vllm/model_executor/models/qwen3.py b/vllm/model_executor/models/qwen3.py index 7d2b3e5f9bc79..0d0da52ed7382 100644 --- a/vllm/model_executor/models/qwen3.py +++ b/vllm/model_executor/models/qwen3.py @@ -111,7 +111,6 @@ class Qwen3Attention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position, rope_parameters=rope_parameters, dual_chunk_attention_config=dual_chunk_attention_config, diff --git a/vllm/model_executor/models/qwen3_moe.py b/vllm/model_executor/models/qwen3_moe.py index c6984dc37c51c..0be81ecc7dd3a 100644 --- a/vllm/model_executor/models/qwen3_moe.py +++ b/vllm/model_executor/models/qwen3_moe.py @@ -269,7 +269,6 @@ class Qwen3MoeAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=rope_parameters, dual_chunk_attention_config=dual_chunk_attention_config, diff --git a/vllm/model_executor/models/qwen3_next.py b/vllm/model_executor/models/qwen3_next.py index dd64e3983e381..6a5447ad0fed4 100644 --- a/vllm/model_executor/models/qwen3_next.py +++ b/vllm/model_executor/models/qwen3_next.py @@ -747,7 +747,6 @@ class Qwen3NextAttention(nn.Module): self.rotary_emb = get_rope( head_size=self.head_dim, - rotary_dim=self.head_dim, max_position=config.max_position_embeddings, rope_parameters=config.rope_parameters, dual_chunk_attention_config=self.dual_chunk_attention_config, diff --git a/vllm/model_executor/models/qwen3_omni_moe_thinker.py b/vllm/model_executor/models/qwen3_omni_moe_thinker.py index dbe7bcd07576b..635c3bfdc65c7 100755 --- a/vllm/model_executor/models/qwen3_omni_moe_thinker.py +++ b/vllm/model_executor/models/qwen3_omni_moe_thinker.py @@ -333,9 +333,9 @@ class Qwen3Omni_VisionTransformer(nn.Module): head_dim = self.hidden_size // self.num_heads self.rotary_pos_emb = get_rope( head_size=head_dim, - rotary_dim=head_dim // 2, max_position=8192, is_neox_style=True, + rope_parameters={"partial_rotary_factor": 0.5}, ) self.blocks = nn.ModuleList( diff --git a/vllm/model_executor/models/qwen3_vl.py b/vllm/model_executor/models/qwen3_vl.py index f8e0ea6284994..fcd58c4d33cd7 100644 --- a/vllm/model_executor/models/qwen3_vl.py +++ b/vllm/model_executor/models/qwen3_vl.py @@ -340,9 +340,9 @@ class Qwen3_VisionTransformer(nn.Module): head_dim = self.hidden_size // self.num_heads self.rotary_pos_emb = get_rope( head_size=head_dim, - rotary_dim=head_dim // 2, max_position=8192, is_neox_style=True, + rope_parameters={"partial_rotary_factor": 0.5}, ) self.merger = Qwen3_VisionPatchMerger( diff --git a/vllm/model_executor/models/seed_oss.py b/vllm/model_executor/models/seed_oss.py index 267c60157506d..f25223c782552 100644 --- a/vllm/model_executor/models/seed_oss.py +++ b/vllm/model_executor/models/seed_oss.py @@ -161,7 +161,6 @@ class SeedOssAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position, rope_parameters=rope_parameters, ) diff --git a/vllm/model_executor/models/solar.py b/vllm/model_executor/models/solar.py index 7bef56110cab7..964aa902704b3 100644 --- a/vllm/model_executor/models/solar.py +++ b/vllm/model_executor/models/solar.py @@ -160,7 +160,6 @@ class SolarAttention(nn.Module): self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embeddings, rope_parameters=config.rope_parameters, ) diff --git a/vllm/model_executor/models/stablelm.py b/vllm/model_executor/models/stablelm.py index e879599ad3ead..ea4342882feb4 100644 --- a/vllm/model_executor/models/stablelm.py +++ b/vllm/model_executor/models/stablelm.py @@ -148,7 +148,6 @@ class StablelmAttention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.config.max_position_embeddings, rope_parameters=self.config.rope_parameters, ) diff --git a/vllm/model_executor/models/starcoder2.py b/vllm/model_executor/models/starcoder2.py index 46422f303ff43..569ca9b082cfa 100644 --- a/vllm/model_executor/models/starcoder2.py +++ b/vllm/model_executor/models/starcoder2.py @@ -112,7 +112,6 @@ class Starcoder2Attention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=self.max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/model_executor/models/step3_text.py b/vllm/model_executor/models/step3_text.py index 077cce84a98dd..7077f1a22e8d7 100644 --- a/vllm/model_executor/models/step3_text.py +++ b/vllm/model_executor/models/step3_text.py @@ -196,7 +196,6 @@ class Step3TextAttention(nn.Module): ) self.rotary_emb = get_rope( self.head_dim, - rotary_dim=self.head_dim, max_position=max_position_embedding, rope_parameters=rope_parameters, ) diff --git a/vllm/model_executor/models/zamba2.py b/vllm/model_executor/models/zamba2.py index 653b5b9beef7b..fe157887eea91 100644 --- a/vllm/model_executor/models/zamba2.py +++ b/vllm/model_executor/models/zamba2.py @@ -230,7 +230,6 @@ class Zamba2Attention(nn.Module): if config.use_mem_rope: self.rotary_emb = get_rope( head_size=self.attention_head_dim, - rotary_dim=self.attention_head_dim, max_position=config.max_position_embeddings, rope_parameters=config.rope_parameters, is_neox_style=True, diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index d761802da9403..fb88c62dc5b23 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -306,8 +306,13 @@ def patch_rope_parameters(config: PretrainedConfig) -> None: """Provide backwards compatibility for RoPE.""" from vllm.config.utils import getattr_iter - rope_theta_names = ("rope_theta", "rotary_emb_base") - rope_theta = getattr_iter(config, rope_theta_names, None) + # Older custom models may use non-standard field names + # which need patching for both Transformers v4 and v5. + names = ["rope_theta", "rotary_emb_base"] + rope_theta = getattr_iter(config, names, None, warn=True) + names = ["partial_rotary_factor", "rotary_pct", "rotary_emb_fraction"] + partial_rotary_factor = getattr_iter(config, names, None, warn=True) + if Version(version("transformers")) < Version("5.0.0.dev0"): # Transformers v4 installed, legacy config fields may be present if (rope_scaling := getattr(config, "rope_scaling", None)) is not None: @@ -316,14 +321,18 @@ def patch_rope_parameters(config: PretrainedConfig) -> None: if not hasattr(config, "rope_parameters"): config.rope_parameters = {"rope_type": "default"} config.rope_parameters["rope_theta"] = rope_theta - partial_rotary_factor_names = ("partial_rotary_factor", "rotary_pct") - partial_rotary_factor = getattr_iter(config, partial_rotary_factor_names, None) if partial_rotary_factor is not None: if not hasattr(config, "rope_parameters"): config.rope_parameters = {"rope_type": "default"} config.rope_parameters["partial_rotary_factor"] = partial_rotary_factor elif rope_theta is not None or hasattr(config, "rope_parameters"): # Transformers v5 installed + # Patch these fields in case they used non-standard names + if rope_theta is not None: + config.rope_theta = rope_theta + if partial_rotary_factor is not None: + config.partial_rotary_factor = partial_rotary_factor + # Standardize and validate RoPE parameters config.standardize_rope_params() config.validate_rope()