[CI][TPU] Temporarily Disable Quant Test on TPU (#15649)

Signed-off-by: rshaw@neuralmagic.com <robertgshaw2@gmail.com>
This commit is contained in:
Robert Shaw 2025-03-27 22:45:05 -04:00 committed by GitHub
parent b4245a48df
commit 8a49eea74b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 5 additions and 7 deletions

View File

@ -28,15 +28,16 @@ docker run --privileged --net host --shm-size=16G -it \
&& echo TEST_3 \ && echo TEST_3 \
&& pytest -v -s /workspace/vllm/tests/entrypoints/llm/test_accuracy.py::test_lm_eval_accuracy_v1_engine \ && pytest -v -s /workspace/vllm/tests/entrypoints/llm/test_accuracy.py::test_lm_eval_accuracy_v1_engine \
&& echo TEST_4 \ && echo TEST_4 \
&& pytest -s -v /workspace/vllm/tests/tpu/test_quantization_accuracy.py \
&& echo TEST_5 \
&& python3 /workspace/vllm/examples/offline_inference/tpu.py \ && python3 /workspace/vllm/examples/offline_inference/tpu.py \
&& echo TEST_6 \ && echo TEST_5 \
&& pytest -s -v /workspace/vllm/tests/tpu/worker/test_tpu_model_runner.py \ && pytest -s -v /workspace/vllm/tests/tpu/worker/test_tpu_model_runner.py \
&& echo TEST_7 \ && echo TEST_6 \
&& pytest -s -v /workspace/vllm/tests/v1/tpu/test_sampler.py" \ && pytest -s -v /workspace/vllm/tests/v1/tpu/test_sampler.py" \
# TODO: This test fails because it uses RANDOM_SEED sampling # TODO: This test fails because it uses RANDOM_SEED sampling
# && VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/tpu/test_custom_dispatcher.py \ # && VLLM_USE_V1=1 pytest -v -s /workspace/vllm/tests/tpu/test_custom_dispatcher.py \
# TODO: Re-enable this after fixing recompilation in quantization.
# && echo TEST_4 \
# && pytest -s -v /workspace/vllm/tests/tpu/test_quantization_accuracy.py \

View File

@ -31,14 +31,12 @@ TENSOR_PARALLEL_SIZES = [1]
reason="This is a basic test for TPU only") reason="This is a basic test for TPU only")
@pytest.mark.parametrize("model", MODELS) @pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("max_tokens", [5]) @pytest.mark.parametrize("max_tokens", [5])
@pytest.mark.parametrize("enforce_eager", [True])
@pytest.mark.parametrize("tensor_parallel_size", TENSOR_PARALLEL_SIZES) @pytest.mark.parametrize("tensor_parallel_size", TENSOR_PARALLEL_SIZES)
def test_models( def test_models(
vllm_runner: type[VllmRunner], vllm_runner: type[VllmRunner],
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,
model: str, model: str,
max_tokens: int, max_tokens: int,
enforce_eager: bool,
tensor_parallel_size: int, tensor_parallel_size: int,
) -> None: ) -> None:
prompt = "The next numbers of the sequence " + ", ".join( prompt = "The next numbers of the sequence " + ", ".join(
@ -51,7 +49,6 @@ def test_models(
with vllm_runner( with vllm_runner(
model, model,
max_model_len=8192, max_model_len=8192,
enforce_eager=enforce_eager,
gpu_memory_utilization=0.7, gpu_memory_utilization=0.7,
max_num_seqs=16, max_num_seqs=16,
tensor_parallel_size=tensor_parallel_size) as vllm_model: tensor_parallel_size=tensor_parallel_size) as vllm_model: