mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-09 05:34:55 +08:00
65 lines
2.2 KiB
Bash
Executable File
65 lines
2.2 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# This script build the CPU docker image and run the offline inference inside the container.
|
|
# It serves a sanity check for compilation and basic model usage.
|
|
set -ex
|
|
|
|
# allow to bind to different cores
|
|
CORE_RANGE=${CORE_RANGE:-0-16}
|
|
OMP_CORE_RANGE=${OMP_CORE_RANGE:-0-16}
|
|
NUMA_NODE=${NUMA_NODE:-0}
|
|
|
|
export CMAKE_BUILD_PARALLEL_LEVEL=32
|
|
|
|
# Setup cleanup
|
|
remove_docker_container() {
|
|
set -e;
|
|
docker rm -f cpu-test-"$NUMA_NODE" || true;
|
|
}
|
|
trap remove_docker_container EXIT
|
|
remove_docker_container
|
|
|
|
# Try building the docker image
|
|
numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --tag cpu-test-"$NUMA_NODE" --target vllm-test -f docker/Dockerfile.cpu .
|
|
|
|
# Run the image, setting --shm-size=4g for tensor parallel.
|
|
docker run -itd --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=16 --env VLLM_CPU_CI_ENV=1 -e E2E_OMP_THREADS="$OMP_CORE_RANGE" --shm-size=4g --name cpu-test-"$NUMA_NODE" cpu-test-"$NUMA_NODE"
|
|
|
|
function cpu_tests() {
|
|
set -e
|
|
export NUMA_NODE=$2
|
|
|
|
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
|
set -e
|
|
pip list"
|
|
|
|
# offline inference
|
|
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
|
set -e
|
|
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m"
|
|
|
|
# Run kernel tests
|
|
docker exec cpu-test-"$NUMA_NODE" bash -c "
|
|
set -e
|
|
pytest -x -v -s tests/kernels/test_onednn.py
|
|
pytest -x -v -s tests/kernels/attention/test_cpu_attn.py"
|
|
|
|
# basic online serving
|
|
docker exec cpu-test-"$NUMA_NODE" bash -c '
|
|
set -e
|
|
VLLM_CPU_OMP_THREADS_BIND=$E2E_OMP_THREADS vllm serve meta-llama/Llama-3.2-3B-Instruct --max-model-len 2048 &
|
|
server_pid=$!
|
|
timeout 600 bash -c "until curl localhost:8000/v1/models; do sleep 1; done" || exit 1
|
|
vllm bench serve \
|
|
--backend vllm \
|
|
--dataset-name random \
|
|
--model meta-llama/Llama-3.2-3B-Instruct \
|
|
--num-prompts 20 \
|
|
--endpoint /v1/completions
|
|
kill -s SIGTERM $server_pid &'
|
|
}
|
|
|
|
# All of CPU tests are expected to be finished less than 40 mins.
|
|
export -f cpu_tests
|
|
timeout 2h bash -c "cpu_tests $CORE_RANGE $NUMA_NODE"
|