From 59d5d2c736028d8a67f230149ad6470adb1e71e5 Mon Sep 17 00:00:00 2001 From: "Li, Jiang" Date: Fri, 12 Sep 2025 18:51:01 +0800 Subject: [PATCH] [CI/Build] Skip prompt embeddings tests on V1-only CPU backend (#24721) Signed-off-by: jiang1.li --- tests/models/language/generation/test_common.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/models/language/generation/test_common.py b/tests/models/language/generation/test_common.py index 062258930fe1..a39f24c80f1c 100644 --- a/tests/models/language/generation/test_common.py +++ b/tests/models/language/generation/test_common.py @@ -119,6 +119,12 @@ def test_models(hf_runner, vllm_runner, example_prompts, model: str, # in parts of the operators pytest.skip(f"Skipping '{model}' model test with AITER kernel.") + # Note: can be removed when + # https://github.com/vllm-project/vllm/pull/24278 finished + if current_platform.is_cpu() and use_prompt_embeds: + pytest.skip("Skipping use_prompt_embeds=True with " + "V1-only CPU backend.") + with hf_runner(model) as hf_model: hf_outputs = hf_model.generate_greedy_logprobs_limit( example_prompts, max_tokens, num_logprobs)