From 72a8639b68964ba50a019856f2fabd3c4fdbaa3f Mon Sep 17 00:00:00 2001 From: Alexander Matveev <59768536+alexm-redhat@users.noreply.github.com> Date: Tue, 18 Mar 2025 17:39:21 -0400 Subject: [PATCH] [V1] TPU - CI/CD use smaller model (#15054) Signed-off-by: Alexander Matveev --- .buildkite/run-tpu-v1-test.sh | 20 ++++++++++---------- tests/v1/tpu/test_basic.py | 5 +++-- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.buildkite/run-tpu-v1-test.sh b/.buildkite/run-tpu-v1-test.sh index e396e8faf78c7..82f40c650f8cf 100755 --- a/.buildkite/run-tpu-v1-test.sh +++ b/.buildkite/run-tpu-v1-test.sh @@ -20,17 +20,17 @@ docker run --privileged --net host --shm-size=16G -it \ && python3 -m pip install pytest \ && python3 -m pip install lm_eval[api]==0.4.4 \ && echo TEST_1 \ - && VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/v1/tpu/test_basic.py \ - && echo TEST_2 \ - && VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/entrypoints/llm/test_accuracy.py::test_lm_eval_accuracy_v1_engine \ - && echo TEST_3 \ - && VLLM_USE_V1=1 pytest -s -v /workspace/vllm/tests/tpu/test_quantization_accuracy.py \ - && echo TEST_4 \ - && VLLM_USE_V1=1 python3 /workspace/vllm/examples/offline_inference/tpu.py" \ - && echo TEST_5 \ && VLLM_USE_V1=1 python3 /workspace/vllm/tests/tpu/test_compilation.py \ + && echo TEST_2 \ + && VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/v1/tpu/test_basic.py \ + && echo TEST_3 \ + && VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/entrypoints/llm/test_accuracy.py::test_lm_eval_accuracy_v1_engine \ + && echo TEST_4 \ + && VLLM_USE_V1=1 pytest -s -v /workspace/vllm/tests/tpu/test_quantization_accuracy.py \ + && echo TEST_5 \ + && VLLM_USE_V1=1 python3 /workspace/vllm/examples/offline_inference/tpu.py" \ + - -# TODO: Fix these tests +# TODO: This test fails because it uses RANDOM_SEED sampling # && VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/tpu/test_custom_dispatcher.py \ diff --git a/tests/v1/tpu/test_basic.py b/tests/v1/tpu/test_basic.py index 241f49e4faea8..417483853916b 100644 --- a/tests/v1/tpu/test_basic.py +++ b/tests/v1/tpu/test_basic.py @@ -15,9 +15,10 @@ if TYPE_CHECKING: from tests.conftest import VllmRunner MODELS = [ + "Qwen/Qwen2.5-1.5B-Instruct", + # TODO: Enable this models with v6e # "Qwen/Qwen2-7B-Instruct", - "meta-llama/Llama-3.1-8B", - # TODO: Add models here as necessary + # "meta-llama/Llama-3.1-8B", ] TENSOR_PARALLEL_SIZES = [1]