Mark invariant normalizer in Gemma as non-persistent (#19788)

Signed-off-by: Yu-Hang Tang <Tang.Maxin@gmail.com>
This commit is contained in:
Yu-Hang "Maxin" Tang 2025-06-18 22:56:03 -07:00 committed by GitHub
parent e2148dc5ea
commit 83ca9ae47b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 29 additions and 3 deletions

View File

@ -0,0 +1,20 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
import numpy as np
import pytest
MODELS = ["google/gemma-2b", "google/gemma-2-2b", "google/gemma-3-4b-it"]
@pytest.mark.parametrize("model", MODELS)
def test_dummy_loader(vllm_runner, model: str) -> None:
with vllm_runner(
model,
load_format="dummy",
) as llm:
normalizers = llm.collective_rpc(lambda self: self.worker.model_runner.
model.model.normalizer.cpu().item())
assert np.allclose(
normalizers,
llm.llm_engine.model_config.hf_config.hidden_size**0.5,
rtol=1e-3)

View File

@ -281,7 +281,9 @@ class GemmaModel(nn.Module):
# data type such as bfloat16, not float32.
# See https://github.com/huggingface/transformers/pull/29402
normalizer = self.config.hidden_size**0.5
self.register_buffer("normalizer", torch.tensor(normalizer))
self.register_buffer("normalizer",
torch.tensor(normalizer),
persistent=False)
self.make_empty_intermediate_tensors = (
make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size))

View File

@ -267,7 +267,9 @@ class Gemma2Model(nn.Module):
# data type such as bfloat16, not float32.
# See https://github.com/huggingface/transformers/pull/29402
normalizer = self.config.hidden_size**0.5
self.register_buffer("normalizer", torch.tensor(normalizer))
self.register_buffer("normalizer",
torch.tensor(normalizer),
persistent=False)
self.make_empty_intermediate_tensors = (
make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size))

View File

@ -371,7 +371,9 @@ class Gemma3Model(nn.Module):
# data type such as bfloat16, not float32.
# See https://github.com/huggingface/transformers/pull/29402
normalizer = self.config.hidden_size**0.5
self.register_buffer("normalizer", torch.tensor(normalizer))
self.register_buffer("normalizer",
torch.tensor(normalizer),
persistent=False)
self.make_empty_intermediate_tensors = (
make_empty_intermediate_tensors_factory(
["hidden_states", "residual"], config.hidden_size))