From 783644e4ac7d4ee324a1817bbe199fc5b557bc7d Mon Sep 17 00:00:00 2001 From: Andreas Karatzas Date: Thu, 11 Dec 2025 21:54:56 -0600 Subject: [PATCH] [ROCm][CI] Skip multi-GPU speculative decoding tests when insufficient GPUs available (#30527) Signed-off-by: Andreas Karatzas --- tests/v1/e2e/test_spec_decode.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/v1/e2e/test_spec_decode.py b/tests/v1/e2e/test_spec_decode.py index c8587659d6580..fcfc8bdce12e9 100644 --- a/tests/v1/e2e/test_spec_decode.py +++ b/tests/v1/e2e/test_spec_decode.py @@ -16,6 +16,16 @@ from vllm.platforms import current_platform MTP_SIMILARITY_RATE = 0.8 +def _skip_if_insufficient_gpus_for_tp(tp_size: int): + """Skip test if available GPUs < tp_size on ROCm.""" + if current_platform.is_rocm(): + available_gpus = torch.cuda.device_count() + if available_gpus < tp_size: + pytest.skip( + f"Test requires {tp_size} GPUs, but only {available_gpus} available" + ) + + def get_test_prompts(mm_enabled: bool): prompt_types = ["repeat", "sentence"] if mm_enabled: @@ -455,6 +465,8 @@ def test_eagle_correctness( m.setenv("VLLM_ROCM_USE_AITER", "1") method, model_name, spec_model_name, tp_size = model_setup + _skip_if_insufficient_gpus_for_tp(tp_size) + max_model_len = 2048 max_num_batched_tokens = 128 if enable_chunked_prefill else max_model_len @@ -525,6 +537,7 @@ def test_mtp_correctness( m.setenv("VLLM_MLA_DISABLE", "1") method, model_name, tp_size = model_setup + _skip_if_insufficient_gpus_for_tp(tp_size) ref_llm = LLM( model=model_name,