From c7abff299021a9562dc8b50a82509dd2af28eddb Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Sun, 26 Oct 2025 19:44:27 +0800 Subject: [PATCH] Revert "[CI/Build] Use CPU for mm processing test on CI (#27522)" (#27531) Signed-off-by: DarkLight1337 --- .buildkite/test-pipeline.yaml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 0a7a871fb9c5c..3f1d50d558109 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -733,16 +733,14 @@ steps: commands: - pytest -v -s models/language/pooling_mteb_test -- label: Multi-Modal Processor and Models Test (CPU) # 44min +- label: Multi-Modal Processor Test # 44min timeout_in_minutes: 60 - no_gpu: true source_file_dependencies: - vllm/ - tests/models/multimodal commands: - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git - pytest -v -s models/multimodal/processing - - pytest -v -s models/multimodal/test_mapping.py - label: Multi-Modal Models Test (Standard) # 60min timeout_in_minutes: 80 @@ -754,7 +752,7 @@ steps: commands: - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git - pip freeze | grep -E 'torch' - - pytest -v -s models/multimodal -m core_model --ignore models/multimodal/test_mapping.py --ignore models/multimodal/generation/test_whisper.py --ignore models/multimodal/processing + - pytest -v -s models/multimodal -m core_model --ignore models/multimodal/generation/test_whisper.py --ignore models/multimodal/processing - cd .. && VLLM_WORKER_MULTIPROC_METHOD=spawn pytest -v -s tests/models/multimodal/generation/test_whisper.py -m core_model # Otherwise, mp_method="spawn" doesn't work - label: Multi-Modal Accuracy Eval (Small Models) # 50min