diff --git a/.buildkite/scripts/hardware_ci/run-tpu-v1-test-part2.sh b/.buildkite/scripts/hardware_ci/run-tpu-v1-test-part2.sh index d998c1f73b514..734a817fd1a06 100755 --- a/.buildkite/scripts/hardware_ci/run-tpu-v1-test-part2.sh +++ b/.buildkite/scripts/hardware_ci/run-tpu-v1-test-part2.sh @@ -4,8 +4,7 @@ set -xu remove_docker_container() { - docker rm -f tpu-test || true; - docker rm -f vllm-tpu || true; + docker rm -f tpu-test || true; } trap remove_docker_container EXIT diff --git a/.buildkite/scripts/hardware_ci/run-tpu-v1-test.sh b/.buildkite/scripts/hardware_ci/run-tpu-v1-test.sh index e565d4b246945..9e7b5a546243c 100755 --- a/.buildkite/scripts/hardware_ci/run-tpu-v1-test.sh +++ b/.buildkite/scripts/hardware_ci/run-tpu-v1-test.sh @@ -5,7 +5,6 @@ set -xu remove_docker_container() { docker rm -f tpu-test || true; - docker rm -f vllm-tpu || true; } trap remove_docker_container EXIT diff --git a/.buildkite/scripts/tpu/config_v6e_1.env b/.buildkite/scripts/tpu/config_v6e_1.env index 03ec116f698d2..c9e3c26571e76 100644 --- a/.buildkite/scripts/tpu/config_v6e_1.env +++ b/.buildkite/scripts/tpu/config_v6e_1.env @@ -1,6 +1,6 @@ # Environment config TEST_NAME=llama8b -CONTAINER_NAME=vllm-tpu +CONTAINER_NAME=tpu-test # vllm config MODEL=meta-llama/Llama-3.1-8B-Instruct diff --git a/.buildkite/scripts/tpu/docker_run_bm.sh b/.buildkite/scripts/tpu/docker_run_bm.sh index 8959877a3c052..08e36611809d9 100755 --- a/.buildkite/scripts/tpu/docker_run_bm.sh +++ b/.buildkite/scripts/tpu/docker_run_bm.sh @@ -12,8 +12,6 @@ source /etc/environment source $ENV_FILE remove_docker_container() { - docker rm -f tpu-test || true; - docker rm -f vllm-tpu || true; docker rm -f $CONTAINER_NAME || true; } diff --git a/.buildkite/scripts/tpu/quantized_v6e_1.env b/.buildkite/scripts/tpu/quantized_v6e_1.env index bab34b3be3b9a..bd25c803081a6 100644 --- a/.buildkite/scripts/tpu/quantized_v6e_1.env +++ b/.buildkite/scripts/tpu/quantized_v6e_1.env @@ -1,6 +1,6 @@ # Environment config TEST_NAME=llama8bw8a8 -CONTAINER_NAME=vllm-tpu +CONTAINER_NAME=tpu-test # vllm config MODEL=RedHatAI/Meta-Llama-3.1-8B-Instruct-quantized.w8a8