fix: use cache_salt for gpt-oss (#23186)

Signed-off-by: Marko Rosenmueller <5467316+dr75@users.noreply.github.com>
This commit is contained in:
Marko Rosenmueller 2025-08-19 20:12:25 +02:00 committed by GitHub
parent b94faf9d50
commit 80141bbf2f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 13 additions and 1 deletions

View File

@ -282,9 +282,11 @@ async def test_serving_chat_could_load_correct_generation_config():
assert mock_engine.generate.call_args.args[1].repetition_penalty == 1.05
@pytest.mark.parametrize("model_type", ["gpt_oss", "any"])
@pytest.mark.asyncio
async def test_serving_chat_did_set_correct_cache_salt():
async def test_serving_chat_did_set_correct_cache_salt(model_type):
mock_model_config = MockModelConfig()
mock_model_config.hf_config.model_type = model_type
mock_engine = MagicMock(spec=MQLLMEngineClient)
mock_engine.get_tokenizer.return_value = get_tokenizer(MODEL_NAME)

View File

@ -1483,4 +1483,9 @@ class OpenAIServingChat(OpenAIServing):
# Render prompt token ids.
prompt_token_ids = render_for_completion(messages)
engine_prompt = EngineTokensPrompt(prompt_token_ids=prompt_token_ids)
# Add cache_salt if provided in the request
if request.cache_salt is not None:
engine_prompt["cache_salt"] = request.cache_salt
return messages, [prompt_token_ids], [engine_prompt]

View File

@ -408,6 +408,11 @@ class OpenAIServingResponses(OpenAIServing):
request, prev_response)
prompt_token_ids = render_for_completion(messages)
engine_prompt = EngineTokensPrompt(prompt_token_ids=prompt_token_ids)
# Add cache_salt if provided in the request
if request.cache_salt is not None:
engine_prompt["cache_salt"] = request.cache_salt
return messages, [prompt_token_ids], [engine_prompt]
async def responses_full_generator(