mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-11 04:54:59 +08:00
[V1] TPU - CI/CD use smaller model (#15054)
Signed-off-by: Alexander Matveev <amatveev@redhat.com>
This commit is contained in:
parent
99abb8b650
commit
72a8639b68
@ -20,17 +20,17 @@ docker run --privileged --net host --shm-size=16G -it \
|
|||||||
&& python3 -m pip install pytest \
|
&& python3 -m pip install pytest \
|
||||||
&& python3 -m pip install lm_eval[api]==0.4.4 \
|
&& python3 -m pip install lm_eval[api]==0.4.4 \
|
||||||
&& echo TEST_1 \
|
&& echo TEST_1 \
|
||||||
&& VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/v1/tpu/test_basic.py \
|
|
||||||
&& echo TEST_2 \
|
|
||||||
&& VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/entrypoints/llm/test_accuracy.py::test_lm_eval_accuracy_v1_engine \
|
|
||||||
&& echo TEST_3 \
|
|
||||||
&& VLLM_USE_V1=1 pytest -s -v /workspace/vllm/tests/tpu/test_quantization_accuracy.py \
|
|
||||||
&& echo TEST_4 \
|
|
||||||
&& VLLM_USE_V1=1 python3 /workspace/vllm/examples/offline_inference/tpu.py" \
|
|
||||||
&& echo TEST_5 \
|
|
||||||
&& VLLM_USE_V1=1 python3 /workspace/vllm/tests/tpu/test_compilation.py \
|
&& VLLM_USE_V1=1 python3 /workspace/vllm/tests/tpu/test_compilation.py \
|
||||||
|
&& echo TEST_2 \
|
||||||
|
&& VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/v1/tpu/test_basic.py \
|
||||||
|
&& echo TEST_3 \
|
||||||
|
&& VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/entrypoints/llm/test_accuracy.py::test_lm_eval_accuracy_v1_engine \
|
||||||
|
&& echo TEST_4 \
|
||||||
|
&& VLLM_USE_V1=1 pytest -s -v /workspace/vllm/tests/tpu/test_quantization_accuracy.py \
|
||||||
|
&& echo TEST_5 \
|
||||||
|
&& VLLM_USE_V1=1 python3 /workspace/vllm/examples/offline_inference/tpu.py" \
|
||||||
|
|
||||||
|
|
||||||
# TODO: Fix these tests
|
# TODO: This test fails because it uses RANDOM_SEED sampling
|
||||||
# && VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/tpu/test_custom_dispatcher.py \
|
# && VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/tpu/test_custom_dispatcher.py \
|
||||||
|
|
||||||
|
|||||||
@ -15,9 +15,10 @@ if TYPE_CHECKING:
|
|||||||
from tests.conftest import VllmRunner
|
from tests.conftest import VllmRunner
|
||||||
|
|
||||||
MODELS = [
|
MODELS = [
|
||||||
|
"Qwen/Qwen2.5-1.5B-Instruct",
|
||||||
|
# TODO: Enable this models with v6e
|
||||||
# "Qwen/Qwen2-7B-Instruct",
|
# "Qwen/Qwen2-7B-Instruct",
|
||||||
"meta-llama/Llama-3.1-8B",
|
# "meta-llama/Llama-3.1-8B",
|
||||||
# TODO: Add models here as necessary
|
|
||||||
]
|
]
|
||||||
|
|
||||||
TENSOR_PARALLEL_SIZES = [1]
|
TENSOR_PARALLEL_SIZES = [1]
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user