diff --git a/.buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh b/.buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh index fdb8ec5393b3..b2e910e1ba8a 100644 --- a/.buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh +++ b/.buildkite/lm-eval-harness/run-lm-eval-gsm-hf-baseline.sh @@ -2,7 +2,7 @@ # We can use this script to compute baseline accuracy on GSM for transformers. # # Make sure you have lm-eval-harness installed: -# pip install git+https://github.com/EleutherAI/lm-evaluation-harness.git@9516087b81a61d0e220b22cc1b75be76de23bc10 +# pip install lm-eval==0.4.4 usage() { echo`` diff --git a/.buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh b/.buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh index de841d959a4e..4d32b49a4fac 100644 --- a/.buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh +++ b/.buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh @@ -3,7 +3,7 @@ # We use this for fp8, which HF does not support. # # Make sure you have lm-eval-harness installed: -# pip install lm-eval==0.4.3 +# pip install lm-eval==0.4.4 usage() { echo`` diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 427dc14513d4..66c7a8dd82c1 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -98,7 +98,6 @@ steps: - vllm/ commands: - pip install -e ./plugins/vllm_add_dummy_model - - pip install git+https://github.com/EleutherAI/lm-evaluation-harness.git@a4987bba6e9e9b3f22bd3a6c1ecf0abd04fd5622#egg=lm_eval[api] - pytest -v -s entrypoints/llm --ignore=entrypoints/llm/test_lazy_outlines.py --ignore=entrypoints/llm/test_generate.py --ignore=entrypoints/llm/test_generate_multiple_loras.py --ignore=entrypoints/llm/test_guided_generate.py - pytest -v -s entrypoints/llm/test_lazy_outlines.py # it needs a clean process - pytest -v -s entrypoints/llm/test_generate.py # it needs a clean process @@ -278,7 +277,6 @@ steps: - csrc/ - vllm/model_executor/layers/quantization commands: - - pip install lm-eval - export VLLM_WORKER_MULTIPROC_METHOD=spawn - bash ./run-tests.sh -c configs/models-small.txt -t 1 @@ -492,6 +490,5 @@ steps: - csrc/ - vllm/model_executor/layers/quantization commands: - - pip install lm-eval - export VLLM_WORKER_MULTIPROC_METHOD=spawn - bash ./run-tests.sh -c configs/models-large.txt -t 4 diff --git a/docs/source/quantization/fp8.rst b/docs/source/quantization/fp8.rst index d7d9b21b4b94..aacd07a34ad4 100644 --- a/docs/source/quantization/fp8.rst +++ b/docs/source/quantization/fp8.rst @@ -106,7 +106,7 @@ Install ``vllm`` and ``lm-evaluation-harness``: .. code-block:: console - $ pip install vllm lm_eval==0.4.3 + $ pip install vllm lm-eval==0.4.4 Load and run the model in ``vllm``: diff --git a/requirements-test.txt b/requirements-test.txt index 37c3bd8ba879..997df9afac76 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -22,6 +22,7 @@ timm # required for internvl test transformers_stream_generator # required for qwen-vl test matplotlib # required for qwen-vl test datamodel_code_generator # required for minicpm3 test +lm-eval[api]==0.4.4 # required for model evaluation test # TODO: Add this after fully implementing llava(mantis) # git+https://github.com/TIGER-AI-Lab/Mantis.git # required for llava(mantis) test