From 78e5e62bbf9d59c1ba1fac3ec57d7899d80e3e39 Mon Sep 17 00:00:00 2001 From: Divakar Verma <137818590+divakar-amd@users.noreply.github.com> Date: Mon, 22 Dec 2025 19:28:19 -0600 Subject: [PATCH] [AMD][CI] fix v1/engine test_preprocess_error_handling (#31192) Signed-off-by: Divakar Verma --- tests/v1/engine/test_preprocess_error_handling.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/v1/engine/test_preprocess_error_handling.py b/tests/v1/engine/test_preprocess_error_handling.py index 0586cc64fa104..821ac168d97a9 100644 --- a/tests/v1/engine/test_preprocess_error_handling.py +++ b/tests/v1/engine/test_preprocess_error_handling.py @@ -5,6 +5,7 @@ import pytest import torch.cuda from vllm import LLM, SamplingParams +from vllm.platforms import current_platform from vllm.v1.engine import EngineCoreRequest from vllm.v1.engine.core import EngineCore @@ -14,6 +15,11 @@ MODEL_NAME = "hmellor/tiny-random-LlamaForCausalLM" def test_preprocess_error_handling(monkeypatch: pytest.MonkeyPatch): """Test that preprocessing errors are handled gracefully.""" + if current_platform.is_rocm(): + pytest.skip( + "Skipped on ROCm: this test only works with 'fork', but ROCm uses 'spawn'." + ) + assert not torch.cuda.is_initialized(), ( "fork needs to be used for the engine " "core process and this isn't possible if cuda is already initialized"