mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-04-06 03:57:02 +08:00
Merge branch 'main' into refactor-fp8-linear
This commit is contained in:
commit
38825fce0f
@ -0,0 +1,14 @@
|
||||
model_name: "Qwen/Qwen3-235B-A22B-Instruct-2507-FP8"
|
||||
tasks:
|
||||
- name: "mmlu_pro"
|
||||
metrics:
|
||||
- name: "exact_match,custom-extract"
|
||||
value: 0.82
|
||||
limit: 250 # will run on 250 * 14 subjects = 3500 samples
|
||||
num_fewshot: 5
|
||||
enforce_eager: false # we use false to speed up the eval process
|
||||
kv_cache_dtype: fp8 # we use fp8 to speed up the eval process
|
||||
max_model_len: 40960
|
||||
apply_chat_template: true
|
||||
fewshot_as_multiturn: true
|
||||
gen_kwargs: "temperature=0,top_p=1,top_k=0,max_gen_toks=5632,until=<|ENDANSWER|>"
|
||||
@ -1 +0,0 @@
|
||||
Meta-Llama-4-Maverick-17B-128E-Instruct-FP8.yaml
|
||||
@ -0,0 +1 @@
|
||||
Qwen3-235B-A22B-Instruct-2507-FP8.yaml
|
||||
@ -21,10 +21,13 @@ def launch_lm_eval(eval_config, tp_size):
|
||||
max_model_len = eval_config.get("max_model_len", 4096)
|
||||
batch_size = eval_config.get("batch_size", "auto")
|
||||
backend = eval_config.get("backend", "vllm")
|
||||
enforce_eager = eval_config.get("enforce_eager", "true")
|
||||
kv_cache_dtype = eval_config.get("kv_cache_dtype", "auto")
|
||||
model_args = (
|
||||
f"pretrained={eval_config['model_name']},"
|
||||
f"tensor_parallel_size={tp_size},"
|
||||
f"enforce_eager=true,"
|
||||
f"enforce_eager={enforce_eager},"
|
||||
f"kv_cache_dtype={kv_cache_dtype},"
|
||||
f"add_bos_token=true,"
|
||||
f"trust_remote_code={trust_remote_code},"
|
||||
f"max_model_len={max_model_len},"
|
||||
@ -37,8 +40,13 @@ def launch_lm_eval(eval_config, tp_size):
|
||||
limit=eval_config["limit"],
|
||||
# TODO(yeq): using chat template w/ fewshot_as_multiturn is supposed help
|
||||
# text models. however, this is regressing measured strict-match for
|
||||
# existing text models in CI, so only apply it for mm.
|
||||
apply_chat_template=backend == "vllm-vlm",
|
||||
# existing text models in CI, so only apply it for mm, or explicitly set
|
||||
apply_chat_template=eval_config.get(
|
||||
"apply_chat_template", backend == "vllm-vlm"
|
||||
),
|
||||
fewshot_as_multiturn=eval_config.get("fewshot_as_multiturn", False),
|
||||
# Forward decoding and early-stop controls (e.g., max_gen_toks, until=...)
|
||||
gen_kwargs=eval_config.get("gen_kwargs"),
|
||||
batch_size=batch_size,
|
||||
)
|
||||
return results
|
||||
|
||||
@ -1,184 +0,0 @@
|
||||
steps:
|
||||
- label: "Wait for container to be ready"
|
||||
key: wait-for-container-image
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
containers:
|
||||
- image: badouralix/curl-jq
|
||||
command:
|
||||
- sh .buildkite/nightly-benchmarks/scripts/wait-for-image.sh
|
||||
- label: "Cleanup H100"
|
||||
agents:
|
||||
queue: H100
|
||||
depends_on: ~
|
||||
command: docker system prune -a --volumes --force
|
||||
|
||||
- label: "A100"
|
||||
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||
agents:
|
||||
queue: A100
|
||||
depends_on: wait-for-container-image
|
||||
if: build.branch == "main"
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
priorityClassName: perf-benchmark
|
||||
containers:
|
||||
- image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT
|
||||
command:
|
||||
- bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||
resources:
|
||||
limits:
|
||||
nvidia.com/gpu: 8
|
||||
volumeMounts:
|
||||
- name: devshm
|
||||
mountPath: /dev/shm
|
||||
env:
|
||||
- name: VLLM_USAGE_SOURCE
|
||||
value: ci-test
|
||||
- name: HF_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hf-token-secret
|
||||
key: token
|
||||
nodeSelector:
|
||||
nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB
|
||||
volumes:
|
||||
- name: devshm
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
|
||||
- label: "H200"
|
||||
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||
agents:
|
||||
queue: H200
|
||||
depends_on: wait-for-container-image
|
||||
if: build.branch == "main"
|
||||
plugins:
|
||||
- docker#v5.12.0:
|
||||
image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT
|
||||
command:
|
||||
- bash
|
||||
- .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||
mount-buildkite-agent: true
|
||||
propagate-environment: true
|
||||
ipc: host
|
||||
gpus: 4,5,6,7
|
||||
volumes:
|
||||
- /data/benchmark-hf-cache:/root/.cache/huggingface
|
||||
environment:
|
||||
- VLLM_USAGE_SOURCE
|
||||
- HF_TOKEN
|
||||
|
||||
#- block: "Run H100 Benchmark"
|
||||
#key: block-h100
|
||||
#depends_on: ~
|
||||
|
||||
- label: "H100"
|
||||
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||
agents:
|
||||
queue: H100
|
||||
depends_on: wait-for-container-image
|
||||
if: build.branch == "main"
|
||||
plugins:
|
||||
- docker#v5.12.0:
|
||||
image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT
|
||||
command:
|
||||
- bash
|
||||
- .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||
mount-buildkite-agent: true
|
||||
propagate-environment: true
|
||||
ipc: host
|
||||
gpus: all # see CUDA_VISIBLE_DEVICES for actual GPUs used
|
||||
volumes:
|
||||
- /data/benchmark-hf-cache:/root/.cache/huggingface
|
||||
environment:
|
||||
- VLLM_USAGE_SOURCE
|
||||
- HF_TOKEN
|
||||
|
||||
# Premerge benchmark
|
||||
- label: "A100"
|
||||
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||
agents:
|
||||
queue: A100
|
||||
depends_on: wait-for-container-image
|
||||
if: build.branch != "main"
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
priorityClassName: perf-benchmark
|
||||
containers:
|
||||
- image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
|
||||
command:
|
||||
- bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||
resources:
|
||||
limits:
|
||||
nvidia.com/gpu: 8
|
||||
volumeMounts:
|
||||
- name: devshm
|
||||
mountPath: /dev/shm
|
||||
env:
|
||||
- name: VLLM_USAGE_SOURCE
|
||||
value: ci-test
|
||||
- name: HF_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hf-token-secret
|
||||
key: token
|
||||
nodeSelector:
|
||||
nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB
|
||||
volumes:
|
||||
- name: devshm
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
|
||||
- label: "H200"
|
||||
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||
agents:
|
||||
queue: H200
|
||||
depends_on: wait-for-container-image
|
||||
if: build.branch != "main"
|
||||
plugins:
|
||||
- docker#v5.12.0:
|
||||
image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
|
||||
command:
|
||||
- bash
|
||||
- .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||
mount-buildkite-agent: true
|
||||
propagate-environment: true
|
||||
ipc: host
|
||||
gpus: 4,5,6,7
|
||||
volumes:
|
||||
- /data/benchmark-hf-cache:/root/.cache/huggingface
|
||||
environment:
|
||||
- VLLM_USAGE_SOURCE
|
||||
- HF_TOKEN
|
||||
|
||||
#- block: "Run H100 Benchmark"
|
||||
#key: block-h100
|
||||
#depends_on: ~
|
||||
|
||||
- label: "H100"
|
||||
# skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
|
||||
agents:
|
||||
queue: H100
|
||||
depends_on: wait-for-container-image
|
||||
if: build.branch != "main"
|
||||
plugins:
|
||||
- docker#v5.12.0:
|
||||
image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
|
||||
command:
|
||||
- bash
|
||||
- .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||
mount-buildkite-agent: true
|
||||
propagate-environment: true
|
||||
ipc: host
|
||||
gpus: all # see CUDA_VISIBLE_DEVICES for actual GPUs used
|
||||
volumes:
|
||||
- /data/benchmark-hf-cache:/root/.cache/huggingface
|
||||
environment:
|
||||
- VLLM_USAGE_SOURCE
|
||||
- HF_TOKEN
|
||||
@ -1,28 +0,0 @@
|
||||
# Nightly benchmark annotation
|
||||
|
||||
## Description
|
||||
|
||||
This file contains the downloading link for benchmarking results.
|
||||
|
||||
- [benchmarking pipeline](artifact://nightly-pipeline.yaml)
|
||||
- [benchmarking results](artifact://results.zip)
|
||||
- [benchmarking code](artifact://nightly-benchmarks.zip)
|
||||
|
||||
Please download the visualization scripts in the post
|
||||
|
||||
## Results reproduction
|
||||
|
||||
- Find the docker we use in `benchmarking pipeline`
|
||||
- Deploy the docker, and inside the docker:
|
||||
- Download `nightly-benchmarks.zip`.
|
||||
- In the same folder, run the following code:
|
||||
|
||||
```bash
|
||||
export HF_TOKEN=<your HF token>
|
||||
apt update
|
||||
apt install -y git
|
||||
unzip nightly-benchmarks.zip
|
||||
VLLM_SOURCE_CODE_LOC=./ bash .buildkite/nightly-benchmarks/scripts/run-nightly-benchmarks.sh
|
||||
```
|
||||
|
||||
And the results will be inside `./benchmarks/results`.
|
||||
@ -1,39 +0,0 @@
|
||||
|
||||
# Nightly benchmark
|
||||
|
||||
This benchmark aims to:
|
||||
|
||||
- Provide performance clarity: Provide clarity on which one (vllm, tensorrt-llm, lmdeploy and SGLang) leads in performance in what workload.
|
||||
- Be reproducible: one can run the exact same set of benchmarking commands inside the exact same docker by following reproducing instructions.
|
||||
|
||||
Latest results: [results link](https://blog.vllm.ai/2024/09/05/perf-update.html), scroll to the end.
|
||||
|
||||
Latest reproduction guide: [github issue link](https://github.com/vllm-project/vllm/issues/8176)
|
||||
|
||||
## Setup
|
||||
|
||||
- Docker images:
|
||||
- vLLM: `vllm/vllm-openai:v0.6.2`
|
||||
- SGLang: `lmsysorg/sglang:v0.3.2-cu121`
|
||||
- LMDeploy: `openmmlab/lmdeploy:v0.6.1-cu12`
|
||||
- TensorRT-LLM: `nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3`
|
||||
- *NOTE: we use r24.07 as the current implementation only works for this version. We are going to bump this up.*
|
||||
- Check [nightly-pipeline.yaml](nightly-pipeline.yaml) for the concrete docker images, specs and commands we use for the benchmark.
|
||||
- Hardware
|
||||
- 8x Nvidia A100 GPUs
|
||||
- Workload:
|
||||
- Dataset
|
||||
- ShareGPT dataset
|
||||
- Prefill-heavy dataset (in average 462 input tokens, 16 tokens as output)
|
||||
- Decode-heavy dataset (in average 462 input tokens, 256 output tokens)
|
||||
- Check [nightly-tests.json](tests/nightly-tests.json) for the concrete configuration of datasets we use.
|
||||
- Models: llama-3 8B, llama-3 70B.
|
||||
- We do not use llama 3.1 as it is incompatible with trt-llm r24.07. ([issue](https://github.com/NVIDIA/TensorRT-LLM/issues/2105)).
|
||||
- Average QPS (query per second): 2, 4, 8, 16, 32 and inf.
|
||||
- Queries are randomly sampled, and arrival patterns are determined via Poisson process, but all with fixed random seed.
|
||||
- Evaluation metrics: Throughput (higher the better), TTFT (time to the first token, lower the better), ITL (inter-token latency, lower the better).
|
||||
|
||||
## Known issues
|
||||
|
||||
- TRT-LLM crashes with Llama 3.1 8B [issue](https://github.com/NVIDIA/TensorRT-LLM/issues/2105).
|
||||
- TGI does not support `ignore-eos` flag.
|
||||
@ -1,196 +0,0 @@
|
||||
common_pod_spec: &common_pod_spec
|
||||
priorityClassName: perf-benchmark
|
||||
nodeSelector:
|
||||
nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB
|
||||
volumes:
|
||||
- name: devshm
|
||||
emptyDir:
|
||||
medium: Memory
|
||||
- name: hf-cache
|
||||
hostPath:
|
||||
path: /root/.cache/huggingface
|
||||
type: Directory
|
||||
|
||||
common_container_settings: &common_container_settings
|
||||
command:
|
||||
- bash .buildkite/nightly-benchmarks/scripts/run-nightly-benchmarks.sh
|
||||
resources:
|
||||
limits:
|
||||
nvidia.com/gpu: 8
|
||||
volumeMounts:
|
||||
- name: devshm
|
||||
mountPath: /dev/shm
|
||||
- name: hf-cache
|
||||
mountPath: /root/.cache/huggingface
|
||||
env:
|
||||
- name: VLLM_USAGE_SOURCE
|
||||
value: ci-test
|
||||
- name: HF_HOME
|
||||
value: /root/.cache/huggingface
|
||||
- name: VLLM_SOURCE_CODE_LOC
|
||||
value: /workspace/build/buildkite/vllm/performance-benchmark
|
||||
- name: HF_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hf-token-secret
|
||||
key: token
|
||||
|
||||
steps:
|
||||
- block: ":rocket: Ready for comparing vllm against alternatives? This will take 4 hours."
|
||||
|
||||
|
||||
|
||||
- label: "A100 vllm step 10"
|
||||
priority: 100
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
<<: *common_pod_spec
|
||||
containers:
|
||||
- image: vllm/vllm-openai:v0.6.2
|
||||
<<: *common_container_settings
|
||||
|
||||
|
||||
|
||||
- label: "A100 sglang benchmark"
|
||||
priority: 100
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
<<: *common_pod_spec
|
||||
containers:
|
||||
- image: lmsysorg/sglang:v0.3.2-cu121
|
||||
<<: *common_container_settings
|
||||
|
||||
- label: "A100 lmdeploy benchmark"
|
||||
priority: 100
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
<<: *common_pod_spec
|
||||
containers:
|
||||
- image: openmmlab/lmdeploy:v0.6.1-cu12
|
||||
<<: *common_container_settings
|
||||
|
||||
|
||||
|
||||
|
||||
- label: "A100 trt llama-8B"
|
||||
priority: 100
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
<<: *common_pod_spec
|
||||
containers:
|
||||
- image: nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3
|
||||
<<: *common_container_settings
|
||||
env:
|
||||
- name: VLLM_USAGE_SOURCE
|
||||
value: ci-test
|
||||
- name: HF_HOME
|
||||
value: /root/.cache/huggingface
|
||||
- name: VLLM_SOURCE_CODE_LOC
|
||||
value: /workspace/build/buildkite/vllm/performance-benchmark
|
||||
- name: HF_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hf-token-secret
|
||||
key: token
|
||||
- name: TEST_SELECTOR
|
||||
value: "llama8B"
|
||||
|
||||
|
||||
- label: "A100 trt llama-70B"
|
||||
priority: 100
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
<<: *common_pod_spec
|
||||
containers:
|
||||
- image: nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3
|
||||
<<: *common_container_settings
|
||||
env:
|
||||
- name: VLLM_USAGE_SOURCE
|
||||
value: ci-test
|
||||
- name: HF_HOME
|
||||
value: /root/.cache/huggingface
|
||||
- name: VLLM_SOURCE_CODE_LOC
|
||||
value: /workspace/build/buildkite/vllm/performance-benchmark
|
||||
- name: HF_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hf-token-secret
|
||||
key: token
|
||||
- name: TEST_SELECTOR
|
||||
value: "llama70B"
|
||||
|
||||
|
||||
# FIXME(Kuntai): uncomment this after NVIDIA gives us their test docker image
|
||||
# - label: "A100 trt benchmark"
|
||||
# priority: 100
|
||||
# agents:
|
||||
# queue: A100
|
||||
# plugins:
|
||||
# - kubernetes:
|
||||
# podSpec:
|
||||
# <<: *common_pod_spec
|
||||
# containers:
|
||||
# - image: nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3
|
||||
# <<: *common_container_settings
|
||||
|
||||
|
||||
# FIXME(Kuntai): uncomment this after TGI supports `--ignore-eos`.
|
||||
# - label: "A100 tgi benchmark"
|
||||
# priority: 100
|
||||
# agents:
|
||||
# queue: A100
|
||||
# plugins:
|
||||
# - kubernetes:
|
||||
# podSpec:
|
||||
# <<: *common_pod_spec
|
||||
# containers:
|
||||
# - image: ghcr.io/huggingface/text-generation-inference:2.2.0
|
||||
# <<: *common_container_settings
|
||||
|
||||
- wait
|
||||
|
||||
- label: "Collect the results"
|
||||
priority: 100
|
||||
agents:
|
||||
queue: A100
|
||||
plugins:
|
||||
- kubernetes:
|
||||
podSpec:
|
||||
<<: *common_pod_spec
|
||||
containers:
|
||||
- image: vllm/vllm-openai:v0.5.0.post1
|
||||
command:
|
||||
- bash .buildkite/nightly-benchmarks/scripts/nightly-annotate.sh
|
||||
resources:
|
||||
limits:
|
||||
nvidia.com/gpu: 8
|
||||
volumeMounts:
|
||||
- name: devshm
|
||||
mountPath: /dev/shm
|
||||
env:
|
||||
- name: VLLM_USAGE_SOURCE
|
||||
value: ci-test
|
||||
- name: VLLM_SOURCE_CODE_LOC
|
||||
value: /workspace/build/buildkite/vllm/performance-benchmark
|
||||
- name: HF_TOKEN
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: hf-token-secret
|
||||
key: token
|
||||
|
||||
- block: ":rocket: check the results!"
|
||||
@ -1,26 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import argparse
|
||||
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
|
||||
def main(model, cachedir):
|
||||
# Load the tokenizer and save it to the specified directory
|
||||
tokenizer = AutoTokenizer.from_pretrained(model)
|
||||
tokenizer.save_pretrained(cachedir)
|
||||
print(f"Tokenizer saved to {cachedir}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Download and save Hugging Face tokenizer"
|
||||
)
|
||||
parser.add_argument("--model", type=str, required=True, help="Name of the model")
|
||||
parser.add_argument(
|
||||
"--cachedir", type=str, required=True, help="Directory to save the tokenizer"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
main(args.model, args.cachedir)
|
||||
@ -1,97 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Parse command line arguments for summary-nightly-results script."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--results-folder",
|
||||
type=str,
|
||||
required=True,
|
||||
help="The folder where the results are stored.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--description", type=str, required=True, help="Description of the results."
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def get_perf(df, method, model, metric):
|
||||
means = []
|
||||
|
||||
for qps in [2, 4, 8, 16, "inf"]:
|
||||
target = df["Test name"].str.contains(model)
|
||||
target = target & df["Engine"].str.contains(method)
|
||||
target = target & df["Test name"].str.contains("qps_" + str(qps))
|
||||
filtered_df = df[target]
|
||||
|
||||
if filtered_df.empty:
|
||||
means.append(0.0)
|
||||
else:
|
||||
means.append(filtered_df[metric].values[0])
|
||||
|
||||
return np.array(means)
|
||||
|
||||
|
||||
def get_perf_w_std(df, method, model, metric):
|
||||
if metric in ["TTFT", "ITL"]:
|
||||
mean = get_perf(df, method, model, "Mean " + metric + " (ms)")
|
||||
mean = mean.tolist()
|
||||
std = get_perf(df, method, model, "Std " + metric + " (ms)")
|
||||
if std.mean() == 0:
|
||||
std = None
|
||||
success = get_perf(df, method, model, "Successful req.")
|
||||
if std is not None:
|
||||
std = std / np.sqrt(success)
|
||||
std = std.tolist()
|
||||
|
||||
else:
|
||||
assert metric == "Tput"
|
||||
mean = get_perf(df, method, model, "Input Tput (tok/s)") + get_perf(
|
||||
df, method, model, "Output Tput (tok/s)"
|
||||
)
|
||||
mean = mean.tolist()
|
||||
std = None
|
||||
|
||||
return mean, std
|
||||
|
||||
|
||||
def main(args):
|
||||
results_folder = Path(args.results_folder)
|
||||
|
||||
results = []
|
||||
|
||||
# collect results
|
||||
for test_file in results_folder.glob("*_nightly_results.json"):
|
||||
with open(test_file) as f:
|
||||
results = results + json.loads(f.read())
|
||||
|
||||
# generate markdown table
|
||||
df = pd.DataFrame.from_dict(results)
|
||||
|
||||
md_table = tabulate(df, headers="keys", tablefmt="pipe", showindex=False)
|
||||
|
||||
with open(args.description) as f:
|
||||
description = f.read()
|
||||
|
||||
description = description.format(nightly_results_benchmarking_table=md_table)
|
||||
|
||||
with open("nightly_results.md", "w") as f:
|
||||
f.write(description)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
args = parse_arguments()
|
||||
main(args)
|
||||
@ -1,9 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
from lmdeploy.serve.openai.api_client import APIClient
|
||||
|
||||
api_client = APIClient("http://localhost:8000")
|
||||
model_name = api_client.available_models[0]
|
||||
|
||||
print(model_name)
|
||||
@ -1,78 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -ex
|
||||
set -o pipefail
|
||||
|
||||
|
||||
main() {
|
||||
|
||||
(which wget && which curl) || (apt-get update && apt-get install -y wget curl)
|
||||
(which jq) || (apt-get update && apt-get -y install jq)
|
||||
(which zip) || (apt-get install -y zip)
|
||||
|
||||
if [ ! -f /workspace/buildkite-agent ]; then
|
||||
echo "buildkite-agent binary not found. Skip plotting the results."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# initial annotation
|
||||
#description="$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/nightly-descriptions.md"
|
||||
|
||||
# download results
|
||||
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||
mkdir -p results/
|
||||
/workspace/buildkite-agent artifact download 'results/*nightly_results.json' results/
|
||||
ls
|
||||
ls results/
|
||||
|
||||
# upload benchmark results
|
||||
zip -r results.zip results/
|
||||
/workspace/buildkite-agent artifact upload "results.zip"
|
||||
|
||||
# upload benchmarking scripts
|
||||
cd "$VLLM_SOURCE_CODE_LOC/"
|
||||
zip -r nightly-benchmarks.zip .buildkite/ benchmarks/
|
||||
/workspace/buildkite-agent artifact upload "nightly-benchmarks.zip"
|
||||
|
||||
cd "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/"
|
||||
# upload benchmarking pipeline
|
||||
/workspace/buildkite-agent artifact upload "nightly-pipeline.yaml"
|
||||
|
||||
cd "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/"
|
||||
/workspace/buildkite-agent annotate --style "success" --context "nightly-benchmarks-results" --append < nightly-annotation.md
|
||||
|
||||
|
||||
|
||||
# The figures should be generated by a separate process outside the CI/CD pipeline
|
||||
|
||||
# # generate figures
|
||||
# python3 -m pip install tabulate pandas matplotlib
|
||||
|
||||
# python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/generate-nightly-markdown.py \
|
||||
# --description $description \
|
||||
# --results-folder results/
|
||||
|
||||
|
||||
# python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py \
|
||||
# --description $description \
|
||||
# --results-folder results/ \
|
||||
# --dataset sharegpt
|
||||
|
||||
# python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py \
|
||||
# --description $description \
|
||||
# --results-folder results/ \
|
||||
# --dataset sonnet_2048_128
|
||||
|
||||
# python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py \
|
||||
# --description $description \
|
||||
# --results-folder results/ \
|
||||
# --dataset sonnet_128_2048
|
||||
|
||||
# # upload results and figures
|
||||
# /workspace/buildkite-agent artifact upload "nightly_results*.png"
|
||||
# /workspace/buildkite-agent artifact upload $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/nightly-pipeline.yaml
|
||||
# /workspace/buildkite-agent artifact upload $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/tests/nightly-tests.json
|
||||
# /workspace/buildkite-agent annotate --style "success" --context "nightly-benchmarks-results" --append < nightly_results.md
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@ -1,464 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o pipefail
|
||||
set -x
|
||||
|
||||
check_gpus() {
|
||||
# check the number of GPUs and GPU type.
|
||||
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
||||
if [[ $gpu_count -gt 0 ]]; then
|
||||
echo "GPU found."
|
||||
else
|
||||
echo "Need at least 1 GPU to run benchmarking."
|
||||
exit 1
|
||||
fi
|
||||
declare -g gpu_type="$(nvidia-smi --query-gpu=name --format=csv,noheader | awk '{print $2}')"
|
||||
echo "GPU type is $gpu_type"
|
||||
}
|
||||
|
||||
check_hf_token() {
|
||||
# check if HF_TOKEN is available and valid
|
||||
if [[ -z "$HF_TOKEN" ]]; then
|
||||
echo "Error: HF_TOKEN is not set."
|
||||
exit 1
|
||||
elif [[ ! "$HF_TOKEN" =~ ^hf_ ]]; then
|
||||
echo "Error: HF_TOKEN does not start with 'hf_'."
|
||||
exit 1
|
||||
else
|
||||
echo "HF_TOKEN is set and valid."
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
upload_to_buildkite() {
|
||||
# upload the benchmarking results to buildkite
|
||||
|
||||
# if the agent binary is not found, skip uploading the results, exit 0
|
||||
if [ ! -f /workspace/buildkite-agent ]; then
|
||||
echo "buildkite-agent binary not found. Skip uploading the results."
|
||||
return 0
|
||||
fi
|
||||
# /workspace/buildkite-agent annotate --style "success" --context "benchmark-results" --append < $RESULTS_FOLDER/${CURRENT_LLM_SERVING_ENGINE}_nightly_results.md
|
||||
/workspace/buildkite-agent artifact upload "$RESULTS_FOLDER/*"
|
||||
}
|
||||
|
||||
|
||||
get_current_llm_serving_engine() {
|
||||
|
||||
if which lmdeploy >/dev/null; then
|
||||
echo "Container: lmdeploy"
|
||||
export CURRENT_LLM_SERVING_ENGINE=lmdeploy
|
||||
return
|
||||
fi
|
||||
|
||||
if [ -e /tgi-entrypoint.sh ]; then
|
||||
echo "Container: tgi"
|
||||
export CURRENT_LLM_SERVING_ENGINE=tgi
|
||||
return
|
||||
fi
|
||||
|
||||
if which trtllm-build >/dev/null; then
|
||||
echo "Container: tensorrt-llm"
|
||||
export CURRENT_LLM_SERVING_ENGINE=trt
|
||||
return
|
||||
fi
|
||||
|
||||
if [ -e /sgl-workspace ]; then
|
||||
echo "Container: sglang"
|
||||
export CURRENT_LLM_SERVING_ENGINE=sglang
|
||||
return
|
||||
fi
|
||||
|
||||
if [ -e /vllm-workspace ]; then
|
||||
echo "Container: vllm"
|
||||
# move to a completely irrelevant directory, to avoid import vllm from current folder
|
||||
export CURRENT_LLM_SERVING_ENGINE=vllm
|
||||
|
||||
return
|
||||
fi
|
||||
}
|
||||
|
||||
json2args() {
|
||||
# transforms the JSON string to command line args, and '_' is replaced to '-'
|
||||
# example:
|
||||
# input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 }
|
||||
# output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1
|
||||
local json_string=$1
|
||||
local args=$(
|
||||
echo "$json_string" | jq -r '
|
||||
to_entries |
|
||||
map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) |
|
||||
join(" ")
|
||||
'
|
||||
)
|
||||
echo "$args"
|
||||
}
|
||||
|
||||
kill_gpu_processes() {
|
||||
pkill -f '[p]ython'
|
||||
pkill -f '[p]ython3'
|
||||
pkill -f '[t]ritonserver'
|
||||
pkill -f '[p]t_main_thread'
|
||||
pkill -f '[t]ext-generation'
|
||||
pkill -f '[l]mdeploy'
|
||||
# vLLM now names the process with VLLM prefix after https://github.com/vllm-project/vllm/pull/21445
|
||||
pkill -f '[V]LLM'
|
||||
|
||||
while [ "$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits | head -n 1)" -ge 1000 ]; do
|
||||
sleep 1
|
||||
done
|
||||
}
|
||||
|
||||
wait_for_server() {
|
||||
# wait for vllm server to start
|
||||
# return 1 if vllm server crashes
|
||||
timeout 1200 bash -c '
|
||||
until curl -s localhost:8000/v1/completions > /dev/null; do
|
||||
sleep 1
|
||||
done' && return 0 || return 1
|
||||
}
|
||||
|
||||
ensure_installed() {
|
||||
# Ensure that the given command is installed by apt-get
|
||||
local cmd=$1
|
||||
if ! which "$cmd" >/dev/null; then
|
||||
apt-get update && apt-get install -y "$cmd"
|
||||
fi
|
||||
}
|
||||
|
||||
run_serving_tests() {
|
||||
# run serving tests using `vllm bench serve` command
|
||||
# $1: a json file specifying serving test cases
|
||||
|
||||
local serving_test_file
|
||||
serving_test_file=$1
|
||||
|
||||
# Iterate over serving tests
|
||||
jq -c '.[]' "$serving_test_file" | while read -r params; do
|
||||
# get the test name, and append the GPU type back to it.
|
||||
test_name=$(echo "$params" | jq -r '.test_name')
|
||||
|
||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||
echo "Skip test case $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
# prepend the current serving engine to the test name
|
||||
test_name=${CURRENT_LLM_SERVING_ENGINE}_${test_name}
|
||||
|
||||
# get common parameters
|
||||
common_params=$(echo "$params" | jq -r '.common_parameters')
|
||||
model=$(echo "$common_params" | jq -r '.model')
|
||||
tp=$(echo "$common_params" | jq -r '.tp')
|
||||
dataset_name=$(echo "$common_params" | jq -r '.dataset_name')
|
||||
dataset_path=$(echo "$common_params" | jq -r '.dataset_path')
|
||||
port=$(echo "$common_params" | jq -r '.port')
|
||||
num_prompts=$(echo "$common_params" | jq -r '.num_prompts')
|
||||
reuse_server=$(echo "$common_params" | jq -r '.reuse_server')
|
||||
|
||||
# get client and server arguments
|
||||
server_params=$(echo "$params" | jq -r ".${CURRENT_LLM_SERVING_ENGINE}_server_parameters")
|
||||
client_params=$(echo "$params" | jq -r ".${CURRENT_LLM_SERVING_ENGINE}_client_parameters")
|
||||
client_args=$(json2args "$client_params")
|
||||
qps_list=$(echo "$params" | jq -r '.qps_list')
|
||||
qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
|
||||
echo "Running over qps list $qps_list"
|
||||
|
||||
# check if there is enough GPU to run the test
|
||||
if [[ $gpu_count -lt $tp ]]; then
|
||||
echo "Required num-shard $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ $reuse_server == "true" ]]; then
|
||||
echo "Reuse previous server for test case $test_name"
|
||||
else
|
||||
kill_gpu_processes
|
||||
bash "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/launch-server.sh" \
|
||||
"$server_params" "$common_params"
|
||||
fi
|
||||
|
||||
if wait_for_server; then
|
||||
echo ""
|
||||
echo "$CURRENT_LLM_SERVING_ENGINE server is up and running."
|
||||
else
|
||||
echo ""
|
||||
echo "$CURRENT_LLM_SERVING_ENGINE failed to start within the timeout period."
|
||||
break
|
||||
fi
|
||||
|
||||
# prepare tokenizer
|
||||
# this is required for lmdeploy.
|
||||
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||
rm -rf /tokenizer_cache
|
||||
mkdir /tokenizer_cache
|
||||
python3 ../.buildkite/nightly-benchmarks/scripts/download-tokenizer.py \
|
||||
--model "$model" \
|
||||
--cachedir /tokenizer_cache
|
||||
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||
|
||||
|
||||
# change model name for lmdeploy (it will not follow standard hf name)
|
||||
if [[ "$CURRENT_LLM_SERVING_ENGINE" == "lmdeploy" ]]; then
|
||||
model=$(python ../.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py)
|
||||
fi
|
||||
|
||||
# iterate over different QPS
|
||||
for qps in $qps_list; do
|
||||
# remove the surrounding single quote from qps
|
||||
if [[ "$qps" == *"inf"* ]]; then
|
||||
echo "qps was $qps"
|
||||
qps="inf"
|
||||
echo "now qps is $qps"
|
||||
fi
|
||||
|
||||
new_test_name=$test_name"_qps_"$qps
|
||||
|
||||
backend=$CURRENT_LLM_SERVING_ENGINE
|
||||
|
||||
if [[ $backend = "trt" ]]; then
|
||||
backend="tensorrt-llm"
|
||||
fi
|
||||
|
||||
if [[ "$backend" == *"vllm"* ]]; then
|
||||
backend="vllm"
|
||||
fi
|
||||
|
||||
if [[ "$dataset_name" = "sharegpt" ]]; then
|
||||
|
||||
client_command="vllm bench serve \
|
||||
--backend $backend \
|
||||
--tokenizer /tokenizer_cache \
|
||||
--model $model \
|
||||
--dataset-name $dataset_name \
|
||||
--dataset-path $dataset_path \
|
||||
--num-prompts $num_prompts \
|
||||
--port $port \
|
||||
--save-result \
|
||||
--result-dir $RESULTS_FOLDER \
|
||||
--result-filename ${new_test_name}.json \
|
||||
--request-rate $qps \
|
||||
--ignore-eos \
|
||||
$client_args"
|
||||
|
||||
elif [[ "$dataset_name" = "sonnet" ]]; then
|
||||
|
||||
sonnet_input_len=$(echo "$common_params" | jq -r '.sonnet_input_len')
|
||||
sonnet_output_len=$(echo "$common_params" | jq -r '.sonnet_output_len')
|
||||
sonnet_prefix_len=$(echo "$common_params" | jq -r '.sonnet_prefix_len')
|
||||
|
||||
client_command="vllm bench serve \
|
||||
--backend $backend \
|
||||
--tokenizer /tokenizer_cache \
|
||||
--model $model \
|
||||
--dataset-name $dataset_name \
|
||||
--dataset-path $dataset_path \
|
||||
--num-prompts $num_prompts \
|
||||
--sonnet-input-len $sonnet_input_len \
|
||||
--sonnet-output-len $sonnet_output_len \
|
||||
--sonnet-prefix-len $sonnet_prefix_len \
|
||||
--port $port \
|
||||
--save-result \
|
||||
--result-dir $RESULTS_FOLDER \
|
||||
--result-filename ${new_test_name}.json \
|
||||
--request-rate $qps \
|
||||
--ignore-eos \
|
||||
$client_args"
|
||||
|
||||
else
|
||||
|
||||
echo "The dataset name must be either 'sharegpt' or 'sonnet'. Got $dataset_name."
|
||||
exit 1
|
||||
|
||||
fi
|
||||
|
||||
|
||||
|
||||
echo "Running test case $test_name with qps $qps"
|
||||
echo "Client command: $client_command"
|
||||
|
||||
eval "$client_command"
|
||||
|
||||
server_command="None"
|
||||
|
||||
# record the benchmarking commands
|
||||
jq_output=$(jq -n \
|
||||
--arg server "$server_command" \
|
||||
--arg client "$client_command" \
|
||||
--arg gpu "$gpu_type" \
|
||||
--arg engine "$CURRENT_LLM_SERVING_ENGINE" \
|
||||
'{
|
||||
server_command: $server,
|
||||
client_command: $client,
|
||||
gpu_type: $gpu,
|
||||
engine: $engine
|
||||
}')
|
||||
echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands"
|
||||
|
||||
done
|
||||
|
||||
done
|
||||
|
||||
kill_gpu_processes
|
||||
}
|
||||
|
||||
run_genai_perf_tests() {
|
||||
# run genai-perf tests
|
||||
|
||||
# $1: a json file specifying genai-perf test cases
|
||||
local genai_perf_test_file
|
||||
genai_perf_test_file=$1
|
||||
|
||||
# Iterate over genai-perf tests
|
||||
jq -c '.[]' "$genai_perf_test_file" | while read -r params; do
|
||||
# get the test name, and append the GPU type back to it.
|
||||
test_name=$(echo "$params" | jq -r '.test_name')
|
||||
|
||||
# if TEST_SELECTOR is set, only run the test cases that match the selector
|
||||
if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
|
||||
echo "Skip test case $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
# prepend the current serving engine to the test name
|
||||
test_name=${CURRENT_LLM_SERVING_ENGINE}_${test_name}
|
||||
|
||||
# get common parameters
|
||||
common_params=$(echo "$params" | jq -r '.common_parameters')
|
||||
model=$(echo "$common_params" | jq -r '.model')
|
||||
tp=$(echo "$common_params" | jq -r '.tp')
|
||||
dataset_name=$(echo "$common_params" | jq -r '.dataset_name')
|
||||
dataset_path=$(echo "$common_params" | jq -r '.dataset_path')
|
||||
port=$(echo "$common_params" | jq -r '.port')
|
||||
num_prompts=$(echo "$common_params" | jq -r '.num_prompts')
|
||||
reuse_server=$(echo "$common_params" | jq -r '.reuse_server')
|
||||
|
||||
# get client and server arguments
|
||||
server_params=$(echo "$params" | jq -r ".${CURRENT_LLM_SERVING_ENGINE}_server_parameters")
|
||||
qps_list=$(echo "$params" | jq -r '.qps_list')
|
||||
qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
|
||||
echo "Running over qps list $qps_list"
|
||||
|
||||
# check if there is enough GPU to run the test
|
||||
if [[ $gpu_count -lt $tp ]]; then
|
||||
echo "Required num-shard $tp but only $gpu_count GPU found. Skip testcase $test_name."
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ $reuse_server == "true" ]]; then
|
||||
echo "Reuse previous server for test case $test_name"
|
||||
else
|
||||
kill_gpu_processes
|
||||
bash "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/launch-server.sh" \
|
||||
"$server_params" "$common_params"
|
||||
fi
|
||||
|
||||
if wait_for_server; then
|
||||
echo ""
|
||||
echo "$CURRENT_LLM_SERVING_ENGINE server is up and running."
|
||||
else
|
||||
echo ""
|
||||
echo "$CURRENT_LLM_SERVING_ENGINE failed to start within the timeout period."
|
||||
break
|
||||
fi
|
||||
|
||||
# iterate over different QPS
|
||||
for qps in $qps_list; do
|
||||
# remove the surrounding single quote from qps
|
||||
if [[ "$qps" == *"inf"* ]]; then
|
||||
echo "qps was $qps"
|
||||
qps=$num_prompts
|
||||
echo "now qps is $qps"
|
||||
fi
|
||||
|
||||
new_test_name=$test_name"_qps_"$qps
|
||||
backend=$CURRENT_LLM_SERVING_ENGINE
|
||||
|
||||
if [[ "$backend" == *"vllm"* ]]; then
|
||||
backend="vllm"
|
||||
fi
|
||||
#TODO: add output dir.
|
||||
client_command="genai-perf profile \
|
||||
-m $model \
|
||||
--service-kind openai \
|
||||
--backend "$backend" \
|
||||
--endpoint-type chat \
|
||||
--streaming \
|
||||
--url localhost:$port \
|
||||
--request-rate $qps \
|
||||
--num-prompts $num_prompts \
|
||||
"
|
||||
|
||||
echo "Client command: $client_command"
|
||||
|
||||
eval "$client_command"
|
||||
|
||||
#TODO: process/record outputs
|
||||
done
|
||||
done
|
||||
|
||||
kill_gpu_processes
|
||||
|
||||
}
|
||||
|
||||
prepare_dataset() {
|
||||
|
||||
# download sharegpt dataset
|
||||
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||
wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
|
||||
|
||||
# duplicate sonnet by 4x, to allow benchmarking with input length 2048
|
||||
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||
echo "" > sonnet_4x.txt
|
||||
for _ in {1..4}
|
||||
do
|
||||
cat sonnet.txt >> sonnet_4x.txt
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
# check if the environment variable is successfully injected from yaml
|
||||
|
||||
check_gpus
|
||||
check_hf_token
|
||||
get_current_llm_serving_engine
|
||||
|
||||
pip install -U transformers
|
||||
|
||||
pip install -r requirements/dev.txt
|
||||
which genai-perf
|
||||
|
||||
# check storage
|
||||
df -h
|
||||
|
||||
ensure_installed wget
|
||||
ensure_installed curl
|
||||
ensure_installed jq
|
||||
# genai-perf dependency
|
||||
ensure_installed libb64-0d
|
||||
|
||||
prepare_dataset
|
||||
|
||||
cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
|
||||
declare -g RESULTS_FOLDER=results/
|
||||
mkdir -p $RESULTS_FOLDER
|
||||
BENCHMARK_ROOT="$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/"
|
||||
|
||||
# run the test
|
||||
run_serving_tests "$BENCHMARK_ROOT/tests/nightly-tests.json"
|
||||
|
||||
# run genai-perf tests
|
||||
run_genai_perf_tests "$BENCHMARK_ROOT/tests/genai-perf-tests.json"
|
||||
mv artifacts/ $RESULTS_FOLDER/
|
||||
|
||||
# upload benchmark results to buildkite
|
||||
python3 -m pip install tabulate pandas
|
||||
python3 "$BENCHMARK_ROOT/scripts/summary-nightly-results.py"
|
||||
upload_to_buildkite
|
||||
|
||||
}
|
||||
|
||||
main "$@"
|
||||
@ -1,82 +0,0 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import pandas as pd
|
||||
from tabulate import tabulate
|
||||
|
||||
results_folder = Path("results/")
|
||||
|
||||
# serving results and the keys that will be printed into markdown
|
||||
serving_results = []
|
||||
serving_column_mapping = {
|
||||
"test_name": "Test name",
|
||||
"gpu_type": "GPU",
|
||||
"completed": "Successful req.",
|
||||
"request_throughput": "Tput (req/s)",
|
||||
"mean_ttft_ms": "Mean TTFT (ms)",
|
||||
"std_ttft_ms": "Std TTFT (ms)",
|
||||
"median_ttft_ms": "Median TTFT (ms)",
|
||||
"mean_itl_ms": "Mean ITL (ms)",
|
||||
"std_itl_ms": "Std ITL (ms)",
|
||||
"median_itl_ms": "Median ITL (ms)",
|
||||
"mean_tpot_ms": "Mean TPOT (ms)",
|
||||
"std_tpot_ms": "Std TPOT (ms)",
|
||||
"median_tpot_ms": "Median TPOT (ms)",
|
||||
"total_token_throughput": "Total Token Tput (tok/s)",
|
||||
"output_throughput": "Output Tput (tok/s)",
|
||||
"total_input_tokens": "Total input tokens",
|
||||
"total_output_tokens": "Total output tokens",
|
||||
"engine": "Engine",
|
||||
}
|
||||
|
||||
if __name__ == "__main__":
|
||||
# collect results
|
||||
for test_file in results_folder.glob("*.json"):
|
||||
with open(test_file) as f:
|
||||
raw_result = json.loads(f.read())
|
||||
|
||||
# attach the benchmarking command to raw_result
|
||||
with open(test_file.with_suffix(".commands")) as f:
|
||||
command = json.loads(f.read())
|
||||
raw_result.update(command)
|
||||
|
||||
# update the test name of this result
|
||||
raw_result.update({"test_name": test_file.stem})
|
||||
|
||||
# add the result to raw_result
|
||||
serving_results.append(raw_result)
|
||||
continue
|
||||
|
||||
serving_results = pd.DataFrame.from_dict(serving_results)
|
||||
|
||||
if not serving_results.empty:
|
||||
serving_results = serving_results[list(serving_column_mapping.keys())].rename(
|
||||
columns=serving_column_mapping
|
||||
)
|
||||
|
||||
serving_md_table_with_headers = tabulate(
|
||||
serving_results, headers="keys", tablefmt="pipe", showindex=False
|
||||
)
|
||||
# remove the first line of header
|
||||
serving_md_table_lines = serving_md_table_with_headers.split("\n")
|
||||
serving_md_table_without_header = "\n".join(serving_md_table_lines[2:])
|
||||
|
||||
prefix = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
prefix = prefix + "_" + os.environ.get("CURRENT_LLM_SERVING_ENGINE")
|
||||
|
||||
# document benchmarking results in markdown
|
||||
with open(results_folder / f"{prefix}_nightly_results.md", "w") as f:
|
||||
# document results with header.
|
||||
# for those who wants to reproduce our benchmark.
|
||||
f.write(serving_md_table_with_headers)
|
||||
f.write("\n")
|
||||
|
||||
# document benchmarking results in json
|
||||
with open(results_folder / f"{prefix}_nightly_results.json", "w") as f:
|
||||
results = serving_results.to_dict(orient="records")
|
||||
f.write(json.dumps(results))
|
||||
@ -1,23 +0,0 @@
|
||||
#!/bin/sh
|
||||
TOKEN=$(curl -s -L "https://public.ecr.aws/token?service=public.ecr.aws&scope=repository:q9t5s3a7/vllm-ci-postmerge-repo:pull" | jq -r .token)
|
||||
if [[ "$BUILDKITE_BRANCH" == "main" ]]; then
|
||||
URL="https://public.ecr.aws/v2/q9t5s3a7/vllm-ci-postmerge-repo/manifests/$BUILDKITE_COMMIT"
|
||||
else
|
||||
URL="https://public.ecr.aws/v2/q9t5s3a7/vllm-ci-test-repo/manifests/$BUILDKITE_COMMIT"
|
||||
fi
|
||||
|
||||
TIMEOUT_SECONDS=10
|
||||
|
||||
retries=0
|
||||
while [ $retries -lt 1000 ]; do
|
||||
if [ "$(curl -s --max-time "$TIMEOUT_SECONDS" -L -H "Authorization: Bearer $TOKEN" -o /dev/null -w "%{http_code}" "$URL")" -eq 200 ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Waiting for image to be available..."
|
||||
|
||||
retries=$((retries + 1))
|
||||
sleep 5
|
||||
done
|
||||
|
||||
exit 1
|
||||
@ -2,40 +2,23 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
This directory contains two sets of benchmark for vllm.
|
||||
|
||||
- Performance benchmark: benchmark vllm's performance under various workload, for **developers** to gain clarity on whether their PR improves/degrades vllm's performance
|
||||
- Nightly benchmark: compare vllm's performance against alternatives (tgi, trt-llm and lmdeploy), for **the public** to know when to choose vllm.
|
||||
|
||||
See [vLLM performance dashboard](https://hud.pytorch.org/benchmark/llms?repoName=vllm-project%2Fvllm) for the latest performance benchmark results and [vLLM GitHub README](https://github.com/vllm-project/vllm/blob/main/README.md) for latest nightly benchmark results.
|
||||
This directory contains a benchmarking suite for **developers** to run locally and gain clarity on whether their PR improves/degrades vllm's performance.
|
||||
vLLM also maintains a continuous performance benchmark under [perf.vllm.ai](https://perf.vllm.ai/), hosted under PyTorch CI HUD.
|
||||
|
||||
## Performance benchmark quick overview
|
||||
|
||||
**Benchmarking Coverage**: latency, throughput and fix-qps serving on A100 (the support for FP8 benchmark on H100 is coming!) and Intel® Xeon® Processors, with different models.
|
||||
**Benchmarking Coverage**: latency, throughput and fix-qps serving on B200, A100, H100, Intel® Xeon® Processors and Intel® Gaudi® 3 Accelerators with different models.
|
||||
|
||||
**Benchmarking Duration**: about 1hr.
|
||||
|
||||
**For benchmarking developers**: please try your best to constraint the duration of benchmarking to about 1 hr so that it won't take forever to run.
|
||||
|
||||
## Nightly benchmark quick overview
|
||||
|
||||
**Benchmarking Coverage**: Fix-qps serving on A100 (the support for FP8 benchmark on H100 is coming!) on Llama-3 8B, 70B and Mixtral 8x7B.
|
||||
|
||||
**Benchmarking engines**: vllm, TGI, trt-llm and lmdeploy.
|
||||
|
||||
**Benchmarking Duration**: about 3.5hrs.
|
||||
|
||||
## Trigger the benchmark
|
||||
|
||||
Performance benchmark will be triggered when:
|
||||
|
||||
- A PR being merged into vllm.
|
||||
- Every commit for those PRs with `perf-benchmarks` label AND `ready` label.
|
||||
|
||||
Manually Trigger the benchmark
|
||||
The benchmark needs to be triggered manually:
|
||||
|
||||
```bash
|
||||
bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||
bash .buildkite/performance-benchmarks/scripts/run-performance-benchmarks.sh
|
||||
```
|
||||
|
||||
Runtime environment variables:
|
||||
@ -47,14 +30,11 @@ Runtime environment variables:
|
||||
- `REMOTE_HOST`: IP for the remote vLLM service to benchmark. Default value is empty string.
|
||||
- `REMOTE_PORT`: Port for the remote vLLM service to benchmark. Default value is empty string.
|
||||
|
||||
Nightly benchmark will be triggered when:
|
||||
|
||||
- Every commit for those PRs with `perf-benchmarks` label and `nightly-benchmarks` label.
|
||||
|
||||
## Performance benchmark details
|
||||
|
||||
See [performance-benchmarks-descriptions.md](performance-benchmarks-descriptions.md) for detailed descriptions, and use `tests/latency-tests.json`, `tests/throughput-tests.json`, `tests/serving-tests.json` to configure the test cases.
|
||||
> NOTE: For Intel® Xeon® Processors, use `tests/latency-tests-cpu.json`, `tests/throughput-tests-cpu.json`, `tests/serving-tests-cpu.json` instead.
|
||||
For Intel® Gaudi® 3 Accelerators, use `tests/latency-tests-hpu.json`, `tests/throughput-tests-hpu.json`, `tests/serving-tests-hpu.json` instead.
|
||||
>
|
||||
### Latency test
|
||||
|
||||
@ -152,26 +132,3 @@ Here is an example using the script to compare result_a and result_b with Model,
|
||||
A comparison diagram will be generated below the table.
|
||||
Here is an example to compare between 96c/results_gnr_96c_091_tp2pp3 and 128c/results_gnr_128c_091_tp2pp3
|
||||
<img width="1886" height="828" alt="image" src="https://github.com/user-attachments/assets/c02a43ef-25d0-4fd6-90e5-2169a28682dd" />
|
||||
|
||||
## Nightly test details
|
||||
|
||||
See [nightly-descriptions.md](nightly-descriptions.md) for the detailed description on test workload, models and docker containers of benchmarking other llm engines.
|
||||
|
||||
### Workflow
|
||||
|
||||
- The [nightly-pipeline.yaml](nightly-pipeline.yaml) specifies the docker containers for different LLM serving engines.
|
||||
- Inside each container, we run [scripts/run-nightly-benchmarks.sh](scripts/run-nightly-benchmarks.sh), which will probe the serving engine of the current container.
|
||||
- The `scripts/run-nightly-benchmarks.sh` will parse the workload described in [nightly-tests.json](tests/nightly-tests.json) and launch the right benchmark for the specified serving engine via `scripts/launch-server.sh`.
|
||||
- At last, we run [scripts/summary-nightly-results.py](scripts/summary-nightly-results.py) to collect and plot the final benchmarking results, and update the results to buildkite.
|
||||
|
||||
### Nightly tests
|
||||
|
||||
In [nightly-tests.json](tests/nightly-tests.json), we include the command line arguments for benchmarking commands, together with the benchmarking test cases. The format is highly similar to performance benchmark.
|
||||
|
||||
### Docker containers
|
||||
|
||||
The docker containers for benchmarking are specified in `nightly-pipeline.yaml`.
|
||||
|
||||
WARNING: the docker versions are HARD-CODED and SHOULD BE ALIGNED WITH `nightly-descriptions.md`. The docker versions need to be hard-coded as there are several version-specific bug fixes inside `scripts/run-nightly-benchmarks.sh` and `scripts/launch-server.sh`.
|
||||
|
||||
WARNING: populating `trt-llm` to latest version is not easy, as it requires updating several protobuf files in [tensorrt-demo](https://github.com/neuralmagic/tensorrt-demo.git).
|
||||
@ -5,7 +5,7 @@
|
||||
- Input length: 32 tokens.
|
||||
- Output length: 128 tokens.
|
||||
- Batch size: fixed (8).
|
||||
- GPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- CPU Models: llama-3.1 8B.
|
||||
- Evaluation metrics: end-to-end latency (mean, median, p99).
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
- Input length: randomly sample 200 prompts from ShareGPT dataset (with fixed random seed).
|
||||
- Output length: the corresponding output length of these 200 prompts.
|
||||
- Batch size: dynamically determined by vllm to achieve maximum throughput.
|
||||
- GPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- CPU Models: llama-3.1 8B.
|
||||
- Evaluation metrics: throughput.
|
||||
|
||||
@ -28,7 +28,7 @@
|
||||
- Output length: the corresponding output length of these 200 prompts.
|
||||
- Batch size: dynamically determined by vllm and the arrival pattern of the requests.
|
||||
- **Average QPS (query per second)**: 1, 4, 16 and inf. QPS = inf means all requests come at once. For other QPS values, the arrival time of each query is determined using a random Poisson process (with fixed random seed).
|
||||
- GPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
|
||||
- We also added a speculative decoding test for llama-3 70B on GPU, under QPS 2
|
||||
- CPU Models: llama-3.1 8B.
|
||||
- Evaluation metrics: throughput, TTFT (time to the first token, with mean, median and p99), ITL (inter-token latency, with mean, median and p99).
|
||||
@ -392,7 +392,7 @@ if __name__ == "__main__":
|
||||
json_file = "benchmark_results.json"
|
||||
with open(results_folder / md_file, "w") as f:
|
||||
results = read_markdown(
|
||||
"../.buildkite/nightly-benchmarks/"
|
||||
"../.buildkite/performance-benchmarks/"
|
||||
+ "performance-benchmarks-descriptions.md"
|
||||
)
|
||||
results = results.format(
|
||||
@ -15,6 +15,8 @@ check_gpus() {
|
||||
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
|
||||
elif command -v amd-smi; then
|
||||
declare -g gpu_count=$(amd-smi list | grep 'GPU' | wc -l)
|
||||
elif command -v hl-smi; then
|
||||
declare -g gpu_count=$(hl-smi --list | grep -i "Module ID" | wc -l)
|
||||
fi
|
||||
|
||||
if [[ $gpu_count -gt 0 ]]; then
|
||||
@ -23,10 +25,16 @@ check_gpus() {
|
||||
echo "Need at least 1 GPU to run benchmarking."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -g arch_suffix=''
|
||||
|
||||
if command -v nvidia-smi; then
|
||||
declare -g gpu_type=$(nvidia-smi --query-gpu=name --format=csv,noheader | awk '{print $2}')
|
||||
elif command -v amd-smi; then
|
||||
declare -g gpu_type=$(amd-smi static -g 0 -a | grep 'MARKET_NAME' | awk '{print $2}')
|
||||
elif command -v hl-smi; then
|
||||
declare -g gpu_type=$(hl-smi -q | grep "Product Name" | head -n 1 | awk -F ':' '{print $2}' | sed 's/^ *//')
|
||||
arch_suffix='-hpu'
|
||||
fi
|
||||
echo "GPU type is $gpu_type"
|
||||
}
|
||||
@ -138,6 +146,10 @@ kill_gpu_processes() {
|
||||
while [ "$(amd-smi metric -g 0 | grep 'USED_VRAM' | awk '{print $2}')" -ge 1000 ]; do
|
||||
sleep 1
|
||||
done
|
||||
elif command -v hl-smi; then
|
||||
while [ "$(hl-smi -q | grep "Used" | head -n 1 | awk '{print $3}')" -ge 1000 ]; do
|
||||
sleep 1
|
||||
done
|
||||
fi
|
||||
|
||||
# remove vllm config file
|
||||
@ -451,6 +463,7 @@ main() {
|
||||
ARCH='-cpu'
|
||||
else
|
||||
check_gpus
|
||||
ARCH="$arch_suffix"
|
||||
fi
|
||||
check_hf_token
|
||||
|
||||
@ -469,7 +482,7 @@ main() {
|
||||
ensure_sharegpt_downloaded
|
||||
declare -g RESULTS_FOLDER=results/
|
||||
mkdir -p $RESULTS_FOLDER
|
||||
QUICK_BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/
|
||||
QUICK_BENCHMARK_ROOT=../.buildkite/performance-benchmarks/
|
||||
|
||||
# dump vllm info via vllm collect-env
|
||||
env_output=$(vllm collect-env)
|
||||
@ -0,0 +1,55 @@
|
||||
[
|
||||
{
|
||||
"test_name": "latency_llama8B_tp1",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"tensor_parallel_size": 1,
|
||||
"load_format": "dummy",
|
||||
"num-iters-warmup": 5,
|
||||
"num-iters": 15,
|
||||
"max-model-len": 256,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "latency_llama70B_tp4",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"tensor_parallel_size": 4,
|
||||
"load_format": "dummy",
|
||||
"num-iters-warmup": 5,
|
||||
"num-iters": 15,
|
||||
"max-model-len": 256,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "latency_mixtral8x7B_tp2",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"tensor_parallel_size": 2,
|
||||
"load_format": "dummy",
|
||||
"num-iters-warmup": 5,
|
||||
"num-iters": 15,
|
||||
"max-model-len": 256,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
}
|
||||
]
|
||||
@ -0,0 +1,82 @@
|
||||
[
|
||||
{
|
||||
"test_name": "serving_llama8B_tp1_sharegpt",
|
||||
"qps_list": [1, 4, 16, "inf"],
|
||||
"server_environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"server_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"tensor_parallel_size": 1,
|
||||
"swap_space": 16,
|
||||
"disable_log_stats": "",
|
||||
"load_format": "dummy",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 256,
|
||||
"async-scheduling": ""
|
||||
},
|
||||
"client_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"backend": "vllm",
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "serving_llama70B_tp4_sharegpt",
|
||||
"qps_list": [1, 4, 16, "inf"],
|
||||
"server_environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"server_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"tensor_parallel_size": 4,
|
||||
"swap_space": 16,
|
||||
"disable_log_stats": "",
|
||||
"load_format": "dummy",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 256,
|
||||
"async-scheduling": ""
|
||||
},
|
||||
"client_parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"backend": "vllm",
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "serving_mixtral8x7B_tp2_sharegpt",
|
||||
"qps_list": [1, 4, 16, "inf"],
|
||||
"server_environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"server_parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"tensor_parallel_size": 2,
|
||||
"swap_space": 16,
|
||||
"disable_log_stats": "",
|
||||
"load_format": "dummy",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 256,
|
||||
"async-scheduling": ""
|
||||
},
|
||||
"client_parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"backend": "vllm",
|
||||
"dataset_name": "sharegpt",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 200
|
||||
}
|
||||
}
|
||||
]
|
||||
@ -0,0 +1,61 @@
|
||||
[
|
||||
{
|
||||
"test_name": "throughput_llama8B_tp1",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
"tensor_parallel_size": 1,
|
||||
"load_format": "dummy",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 1000,
|
||||
"backend": "vllm",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 512,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "throughput_llama70B_tp4",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
|
||||
"tensor_parallel_size": 4,
|
||||
"load_format": "dummy",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 1000,
|
||||
"backend": "vllm",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 512,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
},
|
||||
{
|
||||
"test_name": "throughput_mixtral8x7B_tp2",
|
||||
"environment_variables": {
|
||||
"PT_HPU_LAZY_MODE": 1,
|
||||
"PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
|
||||
"VLLM_CONTIGUOUS_PA": 1,
|
||||
"VLLM_DEFRAG": 1
|
||||
},
|
||||
"parameters": {
|
||||
"model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||
"tensor_parallel_size": 2,
|
||||
"load_format": "dummy",
|
||||
"dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
|
||||
"num_prompts": 1000,
|
||||
"backend": "vllm",
|
||||
"max-model-len": 2048,
|
||||
"max-num-seqs": 512,
|
||||
"async-scheduling": ""
|
||||
}
|
||||
}
|
||||
]
|
||||
@ -20,7 +20,10 @@ trap remove_docker_container EXIT
|
||||
|
||||
# Run the image and test offline inference/tensor parallel
|
||||
docker run \
|
||||
--device /dev/dri \
|
||||
--device /dev/dri:/dev/dri \
|
||||
--net=host \
|
||||
--ipc=host \
|
||||
--privileged \
|
||||
-v /dev/dri/by-path:/dev/dri/by-path \
|
||||
--entrypoint="" \
|
||||
-e "HF_TOKEN=${HF_TOKEN}" \
|
||||
@ -42,7 +45,7 @@ docker run \
|
||||
pytest -v -s v1/sample --ignore=v1/sample/test_logprobs.py --ignore=v1/sample/test_logprobs_e2e.py
|
||||
pytest -v -s v1/worker --ignore=v1/worker/test_gpu_model_runner.py
|
||||
pytest -v -s v1/structured_output
|
||||
pytest -v -s v1/spec_decode --ignore=v1/spec_decode/test_max_len.py --ignore=v1/spec_decode/test_tree_attention.py
|
||||
pytest -v -s v1/spec_decode --ignore=v1/spec_decode/test_max_len.py --ignore=v1/spec_decode/test_tree_attention.py --ignore=v1/spec_decode/test_speculators_eagle3.py
|
||||
pytest -v -s v1/kv_connector/unit --ignore=v1/kv_connector/unit/test_multi_connector.py --ignore=v1/kv_connector/unit/test_nixl_connector.py --ignore=v1/kv_connector/unit/test_shared_storage_connector.py
|
||||
pytest -v -s v1/test_serial_utils.py
|
||||
'
|
||||
|
||||
@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euxo pipefail
|
||||
|
||||
# args: [THRESHOLD] [NUM_QUESTIONS] [START_PORT]
|
||||
THRESHOLD=${1:-0.25}
|
||||
NUM_Q=${2:-1319}
|
||||
PORT=${3:-8010}
|
||||
OUT_DIR=${OUT_DIR:-/tmp/vllm-scheduled}
|
||||
mkdir -p "${OUT_DIR}"
|
||||
|
||||
wait_for_server() {
|
||||
local port=$1
|
||||
timeout 600 bash -c '
|
||||
until curl -sf "http://127.0.0.1:'"$port"'/health" > /dev/null; do
|
||||
sleep 1
|
||||
done'
|
||||
}
|
||||
|
||||
MODEL="deepseek-ai/DeepSeek-V2-lite"
|
||||
BACKENDS=("deepep_high_throughput" "deepep_low_latency")
|
||||
|
||||
cleanup() {
|
||||
if [[ -n "${SERVER_PID:-}" ]] && kill -0 "${SERVER_PID}" 2>/dev/null; then
|
||||
kill "${SERVER_PID}" 2>/dev/null || true
|
||||
for _ in {1..20}; do
|
||||
kill -0 "${SERVER_PID}" 2>/dev/null || break
|
||||
sleep 0.5
|
||||
done
|
||||
kill -9 "${SERVER_PID}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
for BACK in "${BACKENDS[@]}"; do
|
||||
VLLM_DEEP_GEMM_WARMUP=skip \
|
||||
VLLM_ALL2ALL_BACKEND=$BACK \
|
||||
vllm serve "$MODEL" \
|
||||
--enforce-eager \
|
||||
--tensor-parallel-size 2 \
|
||||
--data-parallel-size 2 \
|
||||
--enable-expert-parallel \
|
||||
--enable-eplb \
|
||||
--trust-remote-code \
|
||||
--max-model-len 2048 \
|
||||
--port $PORT &
|
||||
SERVER_PID=$!
|
||||
wait_for_server $PORT
|
||||
|
||||
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
||||
OUT="${OUT_DIR}/${TAG}_${BACK}.json"
|
||||
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port $PORT --num-questions ${NUM_Q} --save-results ${OUT}
|
||||
python3 - <<PY
|
||||
import json; acc=json.load(open('${OUT}'))['accuracy']
|
||||
print(f"${MODEL} ${BACK}: accuracy {acc:.3f}")
|
||||
assert acc >= ${THRESHOLD}, f"${MODEL} ${BACK} accuracy {acc}"
|
||||
PY
|
||||
|
||||
cleanup
|
||||
SERVER_PID=
|
||||
sleep 1
|
||||
PORT=$((PORT+1))
|
||||
done
|
||||
@ -0,0 +1,61 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euxo pipefail
|
||||
|
||||
# args: [THRESHOLD] [NUM_QUESTIONS] [START_PORT]
|
||||
THRESHOLD=${1:-0.8}
|
||||
NUM_Q=${2:-1319}
|
||||
PORT=${3:-8020}
|
||||
OUT_DIR=${OUT_DIR:-/tmp/vllm-scheduled}
|
||||
mkdir -p "${OUT_DIR}"
|
||||
|
||||
wait_for_server() {
|
||||
local port=$1
|
||||
timeout 600 bash -c '
|
||||
until curl -sf "http://127.0.0.1:'"$port"'/health" > /dev/null; do
|
||||
sleep 1
|
||||
done'
|
||||
}
|
||||
|
||||
MODEL="QWen/Qwen3-30B-A3B-FP8"
|
||||
BACKENDS=("deepep_high_throughput" "deepep_low_latency")
|
||||
|
||||
cleanup() {
|
||||
if [[ -n "${SERVER_PID:-}" ]] && kill -0 "${SERVER_PID}" 2>/dev/null; then
|
||||
kill "${SERVER_PID}" 2>/dev/null || true
|
||||
for _ in {1..20}; do
|
||||
kill -0 "${SERVER_PID}" 2>/dev/null || break
|
||||
sleep 0.5
|
||||
done
|
||||
kill -9 "${SERVER_PID}" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
for BACK in "${BACKENDS[@]}"; do
|
||||
VLLM_DEEP_GEMM_WARMUP=skip \
|
||||
VLLM_ALL2ALL_BACKEND=$BACK \
|
||||
vllm serve "$MODEL" \
|
||||
--enforce-eager \
|
||||
--tensor-parallel-size 2 \
|
||||
--data-parallel-size 2 \
|
||||
--enable-expert-parallel \
|
||||
--trust-remote-code \
|
||||
--max-model-len 2048 \
|
||||
--port $PORT &
|
||||
SERVER_PID=$!
|
||||
wait_for_server $PORT
|
||||
|
||||
TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
|
||||
OUT="${OUT_DIR}/${TAG}_${BACK}.json"
|
||||
python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port $PORT --num-questions ${NUM_Q} --save-results ${OUT}
|
||||
python3 - <<PY
|
||||
import json; acc=json.load(open('${OUT}'))['accuracy']
|
||||
print(f"${MODEL} ${BACK}: accuracy {acc:.3f}")
|
||||
assert acc >= ${THRESHOLD}, f"${MODEL} ${BACK} accuracy {acc}"
|
||||
PY
|
||||
|
||||
cleanup
|
||||
SERVER_PID=
|
||||
sleep 1
|
||||
PORT=$((PORT+1))
|
||||
done
|
||||
@ -38,7 +38,7 @@ steps:
|
||||
- label: Pytorch Nightly Dependency Override Check # 2min
|
||||
# if this test fails, it means the nightly torch version is not compatible with some
|
||||
# of the dependencies. Please check the error message and add the package to whitelist
|
||||
# in /vllm/tools/generate_nightly_torch_test.py
|
||||
# in /vllm/tools/pre_commit/generate_nightly_torch_test.py
|
||||
mirror_hardwares: [amdexperimental]
|
||||
agent_pool: mi325_1
|
||||
# grade: Blocking
|
||||
@ -286,7 +286,7 @@ steps:
|
||||
|
||||
- label: Engine Test # 25min
|
||||
timeout_in_minutes: 40
|
||||
mirror_hardwares: [amdexperimental]
|
||||
mirror_hardwares: [amdexperimental, amdproduction]
|
||||
agent_pool: mi325_1
|
||||
#grade: Blocking
|
||||
source_file_dependencies:
|
||||
@ -318,7 +318,7 @@ steps:
|
||||
|
||||
- label: V1 Test entrypoints # 35min
|
||||
timeout_in_minutes: 50
|
||||
mirror_hardwares: [amdexperimental]
|
||||
mirror_hardwares: [amdexperimental, amdproduction]
|
||||
agent_pool: mi325_1
|
||||
# grade: Blocking
|
||||
source_file_dependencies:
|
||||
@ -561,7 +561,7 @@ steps:
|
||||
|
||||
- label: Model Executor Test # 23min
|
||||
timeout_in_minutes: 35
|
||||
mirror_hardwares: [amdexperimental]
|
||||
mirror_hardwares: [amdexperimental, amdproduction]
|
||||
agent_pool: mi325_1
|
||||
# grade: Blocking
|
||||
source_file_dependencies:
|
||||
@ -908,7 +908,7 @@ steps:
|
||||
|
||||
- label: Quantized Models Test # 45 min
|
||||
timeout_in_minutes: 60
|
||||
mirror_hardwares: [amdexperimental]
|
||||
mirror_hardwares: [amdexperimental, amdproduction]
|
||||
agent_pool: mi325_1
|
||||
# grade: Blocking
|
||||
source_file_dependencies:
|
||||
|
||||
@ -38,7 +38,7 @@ steps:
|
||||
- label: Pytorch Nightly Dependency Override Check # 2min
|
||||
# if this test fails, it means the nightly torch version is not compatible with some
|
||||
# of the dependencies. Please check the error message and add the package to whitelist
|
||||
# in /vllm/tools/generate_nightly_torch_test.py
|
||||
# in /vllm/tools/pre_commit/generate_nightly_torch_test.py
|
||||
soft_fail: true
|
||||
source_file_dependencies:
|
||||
- requirements/nightly_torch_test.txt
|
||||
@ -205,6 +205,24 @@ steps:
|
||||
- VLLM_ALLOW_INSECURE_SERIALIZATION=1 RAY_DEDUP_LOGS=0 python3 rlhf_colocate.py
|
||||
- popd
|
||||
|
||||
- label: Distributed Tests (8 GPUs) # 4min
|
||||
timeout_in_minutes: 10
|
||||
gpu: h100
|
||||
num_gpus: 8
|
||||
working_dir: "/vllm-workspace/tests"
|
||||
source_file_dependencies:
|
||||
- examples/offline_inference/torchrun_dp_example.py
|
||||
- vllm/config/parallel.py
|
||||
- vllm/distributed/
|
||||
- vllm/v1/engine/llm_engine.py
|
||||
- vllm/v1/executor/uniproc_executor.py
|
||||
- vllm/v1/worker/gpu_worker.py
|
||||
commands:
|
||||
# https://github.com/NVIDIA/nccl/issues/1838
|
||||
- export NCCL_CUMEM_HOST_ENABLE=0
|
||||
# test with torchrun tp=2 and dp=4 with ep
|
||||
- torchrun --nproc-per-node=8 ../examples/offline_inference/torchrun_dp_example.py --tp-size=2 --pp-size=1 --dp-size=4 --enable-ep
|
||||
|
||||
- label: EPLB Algorithm Test # 5min
|
||||
timeout_in_minutes: 15
|
||||
working_dir: "/vllm-workspace/tests"
|
||||
@ -313,6 +331,15 @@ steps:
|
||||
- pip install -U git+https://github.com/robertgshaw2-redhat/lm-evaluation-harness.git@streaming-api
|
||||
- pytest -v -s entrypoints/openai/correctness/test_lmeval.py::test_lm_eval_accuracy_v1_engine
|
||||
|
||||
- label: V1 Test attention (H100) # 10min
|
||||
timeout_in_minutes: 30
|
||||
gpu: h100
|
||||
source_file_dependencies:
|
||||
- vllm/v1/attention
|
||||
- tests/v1/attention
|
||||
commands:
|
||||
- pytest -v -s v1/attention
|
||||
|
||||
- label: V1 Test others (CPU) # 5 mins
|
||||
source_file_dependencies:
|
||||
- vllm/
|
||||
@ -392,7 +419,7 @@ steps:
|
||||
--ignore=lora/test_deepseekv2_tp.py \
|
||||
--ignore=lora/test_gptoss.py \
|
||||
--ignore=lora/test_qwen3moe_tp.py
|
||||
|
||||
|
||||
parallelism: 4
|
||||
|
||||
- label: PyTorch Compilation Unit Tests # 15min
|
||||
@ -489,6 +516,8 @@ steps:
|
||||
- tests/kernels/moe
|
||||
- vllm/model_executor/layers/fused_moe/
|
||||
- vllm/distributed/device_communicators/
|
||||
- vllm/envs.py
|
||||
- vllm/config
|
||||
commands:
|
||||
- pytest -v -s kernels/moe --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
|
||||
parallelism: 2
|
||||
@ -1115,7 +1144,7 @@ steps:
|
||||
- tests/weight_loading
|
||||
commands:
|
||||
- bash weight_loading/run_model_weight_loading_test.sh -c weight_loading/models-large.txt
|
||||
|
||||
|
||||
- label: NixlConnector PD accuracy tests (Distributed) # 30min
|
||||
timeout_in_minutes: 30
|
||||
working_dir: "/vllm-workspace/tests"
|
||||
@ -1157,6 +1186,19 @@ steps:
|
||||
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
- pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-large.txt --tp-size=4
|
||||
|
||||
##### H100 test #####
|
||||
- label: LM Eval Large Models (H100) # optional
|
||||
gpu: h100
|
||||
optional: true
|
||||
num_gpus: 4
|
||||
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
|
||||
source_file_dependencies:
|
||||
- csrc/
|
||||
- vllm/model_executor/layers/quantization
|
||||
commands:
|
||||
- export VLLM_USE_DEEP_GEMM=0 # We found Triton is faster than DeepGEMM for H100
|
||||
- pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-large-hopper.txt --tp-size=4
|
||||
|
||||
##### H200 test #####
|
||||
- label: Distributed Tests (H200) # optional
|
||||
gpu: h200
|
||||
@ -1192,3 +1234,21 @@ steps:
|
||||
- .buildkite/scripts/run-prime-rl-test.sh
|
||||
commands:
|
||||
- bash .buildkite/scripts/run-prime-rl-test.sh
|
||||
|
||||
- label: DeepSeek V2-Lite Accuracy
|
||||
timeout_in_minutes: 60
|
||||
gpu: h100
|
||||
optional: true
|
||||
num_gpus: 4
|
||||
working_dir: "/vllm-workspace"
|
||||
commands:
|
||||
- bash .buildkite/scripts/scheduled_integration_test/deepseek_v2_lite_ep_eplb.sh 0.25 200 8010
|
||||
|
||||
- label: Qwen3-30B-A3B-FP8-block Accuracy
|
||||
timeout_in_minutes: 60
|
||||
gpu: h100
|
||||
optional: true
|
||||
num_gpus: 4
|
||||
working_dir: "/vllm-workspace"
|
||||
commands:
|
||||
- bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep.sh 0.8 200 8020
|
||||
|
||||
2
.github/mergify.yml
vendored
2
.github/mergify.yml
vendored
@ -108,7 +108,7 @@ pull_request_rules:
|
||||
- files~=^benchmarks/
|
||||
- files~=^vllm/benchmarks/
|
||||
- files~=^tests/benchmarks/
|
||||
- files~=^\.buildkite/nightly-benchmarks/
|
||||
- files~=^\.buildkite/performance-benchmarks/
|
||||
actions:
|
||||
label:
|
||||
add:
|
||||
|
||||
@ -45,7 +45,7 @@ repos:
|
||||
- id: format-torch-nightly-test
|
||||
name: reformat nightly_torch_test.txt to be in sync with test.in
|
||||
language: python
|
||||
entry: python tools/generate_nightly_torch_test.py
|
||||
entry: python tools/pre_commit/generate_nightly_torch_test.py
|
||||
files: ^requirements/test\.(in|txt)$
|
||||
- id: mypy-local
|
||||
name: Run mypy locally for lowest supported Python version
|
||||
@ -78,12 +78,12 @@ repos:
|
||||
stages: [manual] # Only run in CI
|
||||
- id: shellcheck
|
||||
name: Lint shell scripts
|
||||
entry: tools/shellcheck.sh
|
||||
entry: tools/pre_commit/shellcheck.sh
|
||||
language: script
|
||||
types: [shell]
|
||||
- id: png-lint
|
||||
name: Lint PNG exports from excalidraw
|
||||
entry: tools/png-lint.sh
|
||||
entry: tools/pre_commit/png-lint.sh
|
||||
language: script
|
||||
types: [png]
|
||||
- id: signoff-commit
|
||||
@ -100,12 +100,12 @@ repos:
|
||||
stages: [commit-msg]
|
||||
- id: check-spdx-header
|
||||
name: Check SPDX headers
|
||||
entry: python tools/check_spdx_header.py
|
||||
entry: python tools/pre_commit/check_spdx_header.py
|
||||
language: python
|
||||
types: [python]
|
||||
- id: check-root-lazy-imports
|
||||
name: Check root lazy imports
|
||||
entry: python tools/check_init_lazy_imports.py
|
||||
entry: python tools/pre_commit/check_init_lazy_imports.py
|
||||
language: python
|
||||
types: [python]
|
||||
- id: check-filenames
|
||||
@ -119,11 +119,11 @@ repos:
|
||||
pass_filenames: false
|
||||
- id: update-dockerfile-graph
|
||||
name: Update Dockerfile dependency graph
|
||||
entry: tools/update-dockerfile-graph.sh
|
||||
entry: tools/pre_commit/update-dockerfile-graph.sh
|
||||
language: script
|
||||
- id: enforce-import-regex-instead-of-re
|
||||
name: Enforce import regex as re
|
||||
entry: python tools/enforce_regex_import.py
|
||||
entry: python tools/pre_commit/enforce_regex_import.py
|
||||
language: python
|
||||
types: [python]
|
||||
pass_filenames: false
|
||||
@ -131,7 +131,7 @@ repos:
|
||||
# forbid directly import triton
|
||||
- id: forbid-direct-triton-import
|
||||
name: "Forbid direct 'import triton'"
|
||||
entry: python tools/check_triton_import.py
|
||||
entry: python tools/pre_commit/check_triton_import.py
|
||||
language: python
|
||||
types: [python]
|
||||
pass_filenames: false
|
||||
@ -144,7 +144,7 @@ repos:
|
||||
additional_dependencies: [regex]
|
||||
- id: validate-config
|
||||
name: Validate configuration has default values and that each field has a docstring
|
||||
entry: python tools/validate_config.py
|
||||
entry: python tools/pre_commit/validate_config.py
|
||||
language: python
|
||||
additional_dependencies: [regex]
|
||||
# Keep `suggestion` last
|
||||
|
||||
@ -21,6 +21,7 @@ Join us at the [PyTorch Conference, October 22-23](https://events.linuxfoundatio
|
||||
|
||||
*Latest News* 🔥
|
||||
|
||||
- [2025/10] We hosted [vLLM Shanghai Meetup](https://mp.weixin.qq.com/s/__xb4OyOsImz-9eAVrdlcg) focused on hands-on vLLM inference optimization! Please find the meetup slides [here](https://drive.google.com/drive/folders/1KqwjsFJLfEsC8wlDugnrR61zsWHt94Q6).
|
||||
- [2025/09] We hosted [vLLM Toronto Meetup](https://luma.com/e80e0ymm) focused on tackling inference at scale and speculative decoding with speakers from NVIDIA and Red Hat! Please find the meetup slides [here](https://docs.google.com/presentation/d/1IYJYmJcu9fLpID5N5RbW_vO0XLo0CGOR14IXOjB61V8/edit?usp=sharing).
|
||||
- [2025/08] We hosted [vLLM Shenzhen Meetup](https://mp.weixin.qq.com/s/k8ZBO1u2_2odgiKWH_GVTQ) focusing on the ecosystem around vLLM! Please find the meetup slides [here](https://drive.google.com/drive/folders/1Ua2SVKVSu-wp5vou_6ElraDt2bnKhiEA).
|
||||
- [2025/08] We hosted [vLLM Singapore Meetup](https://www.sginnovate.com/event/vllm-sg-meet). We shared V1 updates, disaggregated serving and MLLM speedups with speakers from Embedded LLM, AMD, WekaIO, and A*STAR. Please find the meetup slides [here](https://drive.google.com/drive/folders/1ncf3GyqLdqFaB6IeB834E5TZJPLAOiXZ?usp=sharing).
|
||||
|
||||
@ -212,11 +212,24 @@ if ((AVX512_FOUND AND NOT AVX512_DISABLED) OR (ASIMD_FOUND AND NOT APPLE_SILICON
|
||||
# Build ACL with scons
|
||||
include(ProcessorCount)
|
||||
ProcessorCount(_NPROC)
|
||||
set(_scons_cmd
|
||||
scons -j${_NPROC}
|
||||
Werror=0 debug=0 neon=1 examples=0 embed_kernels=0 os=linux
|
||||
arch=armv8.2-a build=native benchmark_examples=0 fixed_format_kernels=1
|
||||
multi_isa=1 openmp=1 cppthreads=0
|
||||
)
|
||||
|
||||
# locate PyTorch's libgomp (e.g. site-packages/torch.libs/libgomp-947d5fa1.so.1.0.0)
|
||||
# and create a local shim dir with it
|
||||
include("${CMAKE_CURRENT_LIST_DIR}/utils.cmake")
|
||||
vllm_prepare_torch_gomp_shim(VLLM_TORCH_GOMP_SHIM_DIR)
|
||||
|
||||
if(NOT VLLM_TORCH_GOMP_SHIM_DIR STREQUAL "")
|
||||
list(APPEND _scons_cmd extra_link_flags=-L${VLLM_TORCH_GOMP_SHIM_DIR})
|
||||
endif()
|
||||
|
||||
execute_process(
|
||||
COMMAND scons -j${_NPROC}
|
||||
Werror=0 debug=0 neon=1 examples=0 embed_kernels=0 os=linux
|
||||
arch=armv8.2-a build=native benchmark_examples=0 fixed_format_kernels=1
|
||||
multi_isa=1 openmp=1 cppthreads=0
|
||||
COMMAND ${_scons_cmd}
|
||||
WORKING_DIRECTORY "$ENV{ACL_ROOT_DIR}"
|
||||
RESULT_VARIABLE _acl_rc
|
||||
)
|
||||
|
||||
@ -129,6 +129,44 @@ function (get_torch_gpu_compiler_flags OUT_GPU_FLAGS GPU_LANG)
|
||||
set(${OUT_GPU_FLAGS} ${GPU_FLAGS} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# Find libgomp that gets shipped with PyTorch wheel and create a shim dir with:
|
||||
# libgomp.so -> libgomp-<hash>.so...
|
||||
# libgomp.so.1 -> libgomp-<hash>.so...
|
||||
# OUTPUT: TORCH_GOMP_SHIM_DIR ("" if not found)
|
||||
function(vllm_prepare_torch_gomp_shim TORCH_GOMP_SHIM_DIR)
|
||||
set(${TORCH_GOMP_SHIM_DIR} "" PARENT_SCOPE)
|
||||
|
||||
# Use run_python to locate vendored libgomp; never throw on failure.
|
||||
run_python(_VLLM_TORCH_GOMP_PATH
|
||||
"
|
||||
import os, glob
|
||||
try:
|
||||
import torch
|
||||
torch_pkg = os.path.dirname(torch.__file__)
|
||||
site_root = os.path.dirname(torch_pkg)
|
||||
torch_libs = os.path.join(site_root, 'torch.libs')
|
||||
print(glob.glob(os.path.join(torch_libs, 'libgomp-*.so*'))[0])
|
||||
except:
|
||||
print('')
|
||||
"
|
||||
"failed to probe torch.libs for libgomp")
|
||||
|
||||
if(_VLLM_TORCH_GOMP_PATH STREQUAL "" OR NOT EXISTS "${_VLLM_TORCH_GOMP_PATH}")
|
||||
return()
|
||||
endif()
|
||||
|
||||
# Create shim under the build tree
|
||||
set(_shim "${CMAKE_BINARY_DIR}/gomp_shim")
|
||||
file(MAKE_DIRECTORY "${_shim}")
|
||||
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E rm -f "${_shim}/libgomp.so")
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E rm -f "${_shim}/libgomp.so.1")
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink "${_VLLM_TORCH_GOMP_PATH}" "${_shim}/libgomp.so")
|
||||
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink "${_VLLM_TORCH_GOMP_PATH}" "${_shim}/libgomp.so.1")
|
||||
|
||||
set(${TORCH_GOMP_SHIM_DIR} "${_shim}" PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
# Macro for converting a `gencode` version number to a cmake version number.
|
||||
macro(string_to_ver OUT_VER IN_STR)
|
||||
string(REGEX REPLACE "\([0-9]+\)\([0-9]\)" "\\1.\\2" ${OUT_VER} ${IN_STR})
|
||||
|
||||
@ -361,13 +361,6 @@ RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist
|
||||
&& uv pip install --system dist/*.whl --verbose \
|
||||
--extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.')
|
||||
|
||||
# TODO (huydhn): Remove this once xformers is released for 2.9.0
|
||||
RUN --mount=type=cache,target=/root/.cache/uv bash - <<'BASH'
|
||||
. /etc/environment
|
||||
export TORCH_CUDA_ARCH_LIST='7.5 8.0+PTX 9.0a'
|
||||
uv pip install --system --no-build-isolation "git+https://github.com/facebookresearch/xformers@v0.0.32.post2"
|
||||
BASH
|
||||
|
||||
# Install FlashInfer pre-compiled kernel cache and binaries
|
||||
# https://docs.flashinfer.ai/installation.html
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
@ -495,7 +488,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
else \
|
||||
BITSANDBYTES_VERSION="0.46.1"; \
|
||||
fi; \
|
||||
uv pip install --system accelerate hf_transfer modelscope "bitsandbytes>=${BITSANDBYTES_VERSION}" 'timm>=1.0.17' 'runai-model-streamer[s3,gcs]>=0.14.0'
|
||||
uv pip install --system accelerate hf_transfer modelscope "bitsandbytes>=${BITSANDBYTES_VERSION}" 'timm>=1.0.17' 'runai-model-streamer[s3,gcs]>=0.15.0'
|
||||
|
||||
ENV VLLM_USAGE_SOURCE production-docker-image
|
||||
|
||||
|
||||
@ -79,7 +79,7 @@ RUN echo 'ulimit -c 0' >> ~/.bashrc
|
||||
######################### BUILD IMAGE #########################
|
||||
FROM base AS vllm-build
|
||||
|
||||
ARG max_jobs=2
|
||||
ARG max_jobs=32
|
||||
ENV MAX_JOBS=${max_jobs}
|
||||
|
||||
ARG GIT_REPO_CHECK=0
|
||||
|
||||
@ -7,7 +7,7 @@ ARG PYTORCH_REPO="https://github.com/ROCm/pytorch.git"
|
||||
ARG PYTORCH_VISION_REPO="https://github.com/pytorch/vision.git"
|
||||
ARG FA_BRANCH="0e60e394"
|
||||
ARG FA_REPO="https://github.com/Dao-AILab/flash-attention.git"
|
||||
ARG AITER_BRANCH="eef23c7f"
|
||||
ARG AITER_BRANCH="9716b1b8"
|
||||
ARG AITER_REPO="https://github.com/ROCm/aiter.git"
|
||||
|
||||
FROM ${BASE_IMAGE} AS base
|
||||
|
||||
BIN
docs/assets/contributing/load-pattern-examples.png
Normal file
BIN
docs/assets/contributing/load-pattern-examples.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 577 KiB |
@ -5,4 +5,4 @@ nav:
|
||||
- complete.md
|
||||
- run-batch.md
|
||||
- vllm bench:
|
||||
- bench/*.md
|
||||
- bench/**/*.md
|
||||
|
||||
9
docs/cli/bench/sweep/plot.md
Normal file
9
docs/cli/bench/sweep/plot.md
Normal file
@ -0,0 +1,9 @@
|
||||
# vllm bench sweep plot
|
||||
|
||||
## JSON CLI Arguments
|
||||
|
||||
--8<-- "docs/cli/json_tip.inc.md"
|
||||
|
||||
## Options
|
||||
|
||||
--8<-- "docs/argparse/bench_sweep_plot.md"
|
||||
9
docs/cli/bench/sweep/serve.md
Normal file
9
docs/cli/bench/sweep/serve.md
Normal file
@ -0,0 +1,9 @@
|
||||
# vllm bench sweep serve
|
||||
|
||||
## JSON CLI Arguments
|
||||
|
||||
--8<-- "docs/cli/json_tip.inc.md"
|
||||
|
||||
## Options
|
||||
|
||||
--8<-- "docs/argparse/bench_sweep_serve.md"
|
||||
9
docs/cli/bench/sweep/serve_sla.md
Normal file
9
docs/cli/bench/sweep/serve_sla.md
Normal file
@ -0,0 +1,9 @@
|
||||
# vllm bench sweep serve_sla
|
||||
|
||||
## JSON CLI Arguments
|
||||
|
||||
--8<-- "docs/cli/json_tip.inc.md"
|
||||
|
||||
## Options
|
||||
|
||||
--8<-- "docs/argparse/bench_sweep_serve_sla.md"
|
||||
@ -2,6 +2,7 @@
|
||||
|
||||
We host regular meetups in San Francisco Bay Area every 2 months. We will share the project updates from the vLLM team and have guest speakers from the industry to share their experience and insights. Please find the materials of our previous meetups below:
|
||||
|
||||
- [vLLM Shanghai Meetup](https://mp.weixin.qq.com/s/__xb4OyOsImz-9eAVrdlcg), October 25th 2025. [[Slides]](https://drive.google.com/drive/folders/1KqwjsFJLfEsC8wlDugnrR61zsWHt94Q6)
|
||||
- [vLLM Toronto Meetup](https://luma.com/e80e0ymm), September 25th 2025. [[Slides]](https://docs.google.com/presentation/d/1IYJYmJcu9fLpID5N5RbW_vO0XLo0CGOR14IXOjB61V8/edit?usp=sharing)
|
||||
- [vLLM Shenzhen Meetup](https://mp.weixin.qq.com/s/k8ZBO1u2_2odgiKWH_GVTQ), August 30th 2025. [[Slides]](https://drive.google.com/drive/folders/1Ua2SVKVSu-wp5vou_6ElraDt2bnKhiEA)
|
||||
- [vLLM Singapore Meetup](https://www.sginnovate.com/event/vllm-sg-meet), August 27th 2025. [[Slides]](https://drive.google.com/drive/folders/1ncf3GyqLdqFaB6IeB834E5TZJPLAOiXZ?usp=sharing)
|
||||
|
||||
@ -9,7 +9,6 @@ vLLM provides comprehensive benchmarking tools for performance testing and evalu
|
||||
- **[Benchmark CLI](#benchmark-cli)**: `vllm bench` CLI tools and specialized benchmark scripts for interactive performance testing
|
||||
- **[Parameter sweeps](#parameter-sweeps)**: Automate `vllm bench` runs for multiple configurations
|
||||
- **[Performance benchmarks](#performance-benchmarks)**: Automated CI benchmarks for development
|
||||
- **[Nightly benchmarks](#nightly-benchmarks)**: Comparative benchmarks against alternatives
|
||||
|
||||
[Benchmark CLI]: #benchmark-cli
|
||||
|
||||
@ -321,6 +320,73 @@ The following arguments can be used to control the ramp-up:
|
||||
- `--ramp-up-start-rps`: The request rate at the beginning of the benchmark.
|
||||
- `--ramp-up-end-rps`: The request rate at the end of the benchmark.
|
||||
|
||||
##### Load Pattern Configuration
|
||||
|
||||
vLLM's benchmark serving script provides sophisticated load pattern simulation capabilities through three key parameters that control request generation and concurrency behavior:
|
||||
|
||||
###### Load Pattern Control Parameters
|
||||
|
||||
- `--request-rate`: Controls the target request generation rate (requests per second). Set to `inf` for maximum throughput testing or finite values for controlled load simulation.
|
||||
- `--burstiness`: Controls traffic variability using a Gamma distribution (range: > 0). Lower values create bursty traffic, higher values create uniform traffic.
|
||||
- `--max-concurrency`: Limits concurrent outstanding requests. If this argument is not provided, concurrency is unlimited. Set a value to simulate backpressure.
|
||||
|
||||
These parameters work together to create realistic load patterns with carefully chosen defaults. The `--request-rate` parameter defaults to `inf` (infinite), which sends all requests immediately for maximum throughput testing. When set to finite values, it uses either a Poisson process (default `--burstiness=1.0`) or Gamma distribution for realistic request timing. The `--burstiness` parameter only takes effect when `--request-rate` is not infinite - a value of 1.0 creates natural Poisson traffic, while lower values (0.1-0.5) create bursty patterns and higher values (2.0-5.0) create uniform spacing. The `--max-concurrency` parameter defaults to `None` (unlimited) but can be set to simulate real-world constraints where a load balancer or API gateway limits concurrent connections. When combined, these parameters allow you to simulate everything from unrestricted stress testing (`--request-rate=inf`) to production-like scenarios with realistic arrival patterns and resource constraints.
|
||||
|
||||
The `--burstiness` parameter mathematically controls request arrival patterns using a Gamma distribution where:
|
||||
|
||||
- Shape parameter: `burstiness` value
|
||||
- Coefficient of Variation (CV): $\frac{1}{\sqrt{burstiness}}$
|
||||
- Traffic characteristics:
|
||||
- `burstiness = 0.1`: Highly bursty traffic (CV ≈ 3.16) - stress testing
|
||||
- `burstiness = 1.0`: Natural Poisson traffic (CV = 1.0) - realistic simulation
|
||||
- `burstiness = 5.0`: Uniform traffic (CV ≈ 0.45) - controlled load testing
|
||||
|
||||

|
||||
|
||||
*Figure: Load pattern examples for each use case. Top row: Request arrival timelines showing cumulative requests over time. Bottom row: Inter-arrival time distributions showing traffic variability patterns. Each column represents a different use case with its specific parameter settings and resulting traffic characteristics.*
|
||||
|
||||
Load Pattern Recommendations by Use Case:
|
||||
|
||||
| Use Case | Burstiness | Request Rate | Max Concurrency | Description |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| Maximum Throughput | N/A | Infinite | Limited | **Most common**: Simulates load balancer/gateway limits with unlimited user demand |
|
||||
| Realistic Testing | 1.0 | Moderate (5-20) | Infinite | Natural Poisson traffic patterns for baseline performance |
|
||||
| Stress Testing | 0.1-0.5 | High (20-100) | Infinite | Challenging burst patterns to test resilience |
|
||||
| Latency Profiling | 2.0-5.0 | Low (1-10) | Infinite | Uniform load for consistent timing analysis |
|
||||
| Capacity Planning | 1.0 | Variable | Limited | Test resource limits with realistic constraints |
|
||||
| SLA Validation | 1.0 | Target rate | SLA limit | Production-like constraints for compliance testing |
|
||||
|
||||
These load patterns help evaluate different aspects of your vLLM deployment, from basic performance characteristics to resilience under challenging traffic conditions.
|
||||
|
||||
The **Maximum Throughput** pattern (`--request-rate=inf --max-concurrency=<limit>`) is the most commonly used configuration for production benchmarking. This simulates real-world deployment architectures where:
|
||||
|
||||
- Users send requests as fast as they can (infinite rate)
|
||||
- A load balancer or API gateway controls the maximum concurrent connections
|
||||
- The system operates at its concurrency limit, revealing true throughput capacity
|
||||
- `--burstiness` has no effect since request timing is not controlled when rate is infinite
|
||||
|
||||
This pattern helps determine optimal concurrency settings for your production load balancer configuration.
|
||||
|
||||
To effectively configure load patterns, especially for **Capacity Planning** and **SLA Validation** use cases, you need to understand your system's resource limits. During startup, vLLM reports KV cache configuration that directly impacts your load testing parameters:
|
||||
|
||||
```text
|
||||
GPU KV cache size: 15,728,640 tokens
|
||||
Maximum concurrency for 8,192 tokens per request: 1920
|
||||
```
|
||||
|
||||
Where:
|
||||
|
||||
- GPU KV cache size: Total tokens that can be cached across all concurrent requests
|
||||
- Maximum concurrency: Theoretical maximum concurrent requests for the given `max_model_len`
|
||||
- Calculation: `max_concurrency = kv_cache_size / max_model_len`
|
||||
|
||||
Using KV cache metrics for load pattern configuration:
|
||||
|
||||
- For Capacity Planning: Set `--max-concurrency` to 80-90% of the reported maximum to test realistic resource constraints
|
||||
- For SLA Validation: Use the reported maximum as your SLA limit to ensure compliance testing matches production capacity
|
||||
- For Realistic Testing: Monitor memory usage when approaching theoretical limits to understand sustainable request rates
|
||||
- Request rate guidance: Use the KV cache size to estimate sustainable request rates for your specific workload and sequence lengths
|
||||
|
||||
</details>
|
||||
|
||||
#### 📈 Offline Throughput Benchmark
|
||||
@ -994,7 +1060,7 @@ Follow these steps to run the script:
|
||||
Example command:
|
||||
|
||||
```bash
|
||||
python -m vllm.benchmarks.sweep.serve \
|
||||
vllm bench sweep serve \
|
||||
--serve-cmd 'vllm serve meta-llama/Llama-2-7b-chat-hf' \
|
||||
--bench-cmd 'vllm bench serve --model meta-llama/Llama-2-7b-chat-hf --backend vllm --endpoint /v1/completions --dataset-name sharegpt --dataset-path benchmarks/ShareGPT_V3_unfiltered_cleaned_split.json' \
|
||||
--serve-params benchmarks/serve_hparams.json \
|
||||
@ -1042,7 +1108,7 @@ For example, to ensure E2E latency within different target values for 99% of req
|
||||
Example command:
|
||||
|
||||
```bash
|
||||
python -m vllm.benchmarks.sweep.serve_sla \
|
||||
vllm bench sweep serve_sla \
|
||||
--serve-cmd 'vllm serve meta-llama/Llama-2-7b-chat-hf' \
|
||||
--bench-cmd 'vllm bench serve --model meta-llama/Llama-2-7b-chat-hf --backend vllm --endpoint /v1/completions --dataset-name sharegpt --dataset-path benchmarks/ShareGPT_V3_unfiltered_cleaned_split.json' \
|
||||
--serve-params benchmarks/serve_hparams.json \
|
||||
@ -1071,7 +1137,7 @@ The algorithm for adjusting the SLA variable is as follows:
|
||||
Example command:
|
||||
|
||||
```bash
|
||||
python -m vllm.benchmarks.sweep.plot benchmarks/results/<timestamp> \
|
||||
vllm bench sweep plot benchmarks/results/<timestamp> \
|
||||
--var-x max_concurrency \
|
||||
--row-by random_input_len \
|
||||
--col-by random_output_len \
|
||||
@ -1100,7 +1166,7 @@ docker run -it --entrypoint /bin/bash -v /data/huggingface:/root/.cache/huggingf
|
||||
Then, run below command inside the docker instance.
|
||||
|
||||
```bash
|
||||
bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
|
||||
bash .buildkite/performance-benchmarks/scripts/run-performance-benchmarks.sh
|
||||
```
|
||||
|
||||
When run, benchmark script generates results under **benchmark/results** folder, along with the benchmark_results.md and benchmark_results.json.
|
||||
@ -1118,7 +1184,7 @@ For more results visualization, check the [visualizing the results](https://gith
|
||||
|
||||
The latest performance results are hosted on the public [vLLM Performance Dashboard](https://hud.pytorch.org/benchmark/llms?repoName=vllm-project%2Fvllm).
|
||||
|
||||
More information on the performance benchmarks and their parameters can be found in [Benchmark README](https://github.com/intel-ai-tce/vllm/blob/more_cpu_models/.buildkite/nightly-benchmarks/README.md) and [performance benchmark description](../../.buildkite/nightly-benchmarks/performance-benchmarks-descriptions.md).
|
||||
More information on the performance benchmarks and their parameters can be found in [Benchmark README](https://github.com/intel-ai-tce/vllm/blob/more_cpu_models/.buildkite/nightly-benchmarks/README.md) and [performance benchmark description](../../.buildkite/performance-benchmarks/performance-benchmarks-descriptions.md).
|
||||
|
||||
### Continuous Benchmarking
|
||||
|
||||
@ -1143,11 +1209,3 @@ The benchmarking currently runs on a predefined set of models configured in the
|
||||
#### Viewing Results
|
||||
|
||||
All continuous benchmarking results are automatically published to the public [vLLM Performance Dashboard](https://hud.pytorch.org/benchmark/llms?repoName=vllm-project%2Fvllm).
|
||||
|
||||
## Nightly Benchmarks
|
||||
|
||||
These compare vLLM's performance against alternatives (`tgi`, `trt-llm`, and `lmdeploy`) when there are major updates of vLLM (e.g., bumping up to a new version). They are primarily intended for consumers to evaluate when to choose vLLM over other options and are triggered on every commit with both the `perf-benchmarks` and `nightly-benchmarks` labels.
|
||||
|
||||
The latest nightly benchmark results are shared in major release blog posts such as [vLLM v0.6.0](https://blog.vllm.ai/2024/09/05/perf-update.html).
|
||||
|
||||
More information on the nightly benchmarks and their parameters can be found [here](../../.buildkite/nightly-benchmarks/nightly-descriptions.md).
|
||||
|
||||
@ -41,11 +41,11 @@ You can add any other [engine-args](../configuration/engine_args.md) you need af
|
||||
create a custom Dockerfile on top of the base image with an extra layer that installs them:
|
||||
|
||||
```Dockerfile
|
||||
FROM vllm/vllm-openai:v0.9.0
|
||||
FROM vllm/vllm-openai:v0.11.0
|
||||
|
||||
# e.g. install the `audio` optional dependencies
|
||||
# NOTE: Make sure the version of vLLM matches the base image!
|
||||
RUN uv pip install --system vllm[audio]==0.9.0
|
||||
RUN uv pip install --system vllm[audio]==0.11.0
|
||||
```
|
||||
|
||||
!!! tip
|
||||
|
||||
@ -49,11 +49,14 @@ First, create a Kubernetes PVC and Secret for downloading and storing Hugging Fa
|
||||
metadata:
|
||||
name: hf-token-secret
|
||||
type: Opaque
|
||||
data:
|
||||
token: $(HF_TOKEN)
|
||||
stringData:
|
||||
token: "REPLACE_WITH_TOKEN"
|
||||
EOF
|
||||
```
|
||||
|
||||
Here, the `token` field stores your **Hugging Face access token**. For details on how to generate a token,
|
||||
see the [Hugging Face documentation](https://huggingface.co/docs/hub/en/security-tokens).
|
||||
|
||||
Next, start the vLLM server as a Kubernetes Deployment and Service:
|
||||
|
||||
??? console "Config"
|
||||
|
||||
@ -79,7 +79,7 @@ The `post_process*` methods take `PoolingRequestOutput` objects as input and gen
|
||||
The `validate_or_generate_params` method is used for validating with the plugin any `SamplingParameters`/`PoolingParameters` received with the user request, or to generate new ones if none are specified. The function always returns the validated/generated parameters.
|
||||
The `output_to_response` method is used only for online serving and converts the plugin output to the `IOProcessorResponse` type that is then returned by the API Server. The implementation of the `/pooling` serving endpoint is available here [vllm/entrypoints/openai/serving_pooling.py](../../vllm/entrypoints/openai/serving_pooling.py).
|
||||
|
||||
An example implementation of a plugin that enables generating geotiff images with the PrithviGeospatialMAE model is available [here](https://github.com/IBM/terratorch/tree/main/terratorch/vllm/plugins/segmentation). Please, also refer to our online ([examples/online_serving/prithvi_geospatial_mae.py](../../examples/online_serving/prithvi_geospatial_mae.py)) and offline ([examples/offline_inference/prithvi_geospatial_mae_io_processor.py](../../examples/offline_inference/prithvi_geospatial_mae_io_processor.py)) inference examples.
|
||||
An example implementation of a plugin that enables generating geotiff images with the PrithviGeospatialMAE model is available [here](https://github.com/IBM/terratorch/tree/main/terratorch/vllm/plugins/segmentation). Please, also refer to our online ([examples/online_serving/pooling/prithvi_geospatial_mae.py](../../examples/online_serving/pooling/prithvi_geospatial_mae.py)) and offline ([examples/offline_inference/pooling/prithvi_geospatial_mae_io_processor.py](../../examples/offline_inference/pooling/prithvi_geospatial_mae_io_processor.py)) inference examples.
|
||||
|
||||
## Using an IO Processor plugin
|
||||
|
||||
|
||||
@ -14,11 +14,12 @@ vLLM currently supports the following reasoning models:
|
||||
| [DeepSeek-V3.1](https://huggingface.co/collections/deepseek-ai/deepseek-v31-68a491bed32bd77e7fca048f) | `deepseek_v3` | `json`, `regex` | ❌ |
|
||||
| [ERNIE-4.5-VL series](https://huggingface.co/baidu/ERNIE-4.5-VL-28B-A3B-PT) | `ernie45` | `json`, `regex` | ❌ |
|
||||
| [ERNIE-4.5-21B-A3B-Thinking](https://huggingface.co/baidu/ERNIE-4.5-21B-A3B-Thinking) | `ernie45` | `json`, `regex` | ✅ |
|
||||
| [QwQ-32B](https://huggingface.co/Qwen/QwQ-32B) | `deepseek_r1` | `json`, `regex` | ✅ |
|
||||
| [IBM Granite 3.2 language models](https://huggingface.co/collections/ibm-granite/granite-32-language-models-67b3bc8c13508f6d064cff9a) | `granite` | ❌ | ❌ |
|
||||
| [Qwen3 series](https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f) | `qwen3` | `json`, `regex` | ✅ |
|
||||
| [Hunyuan A13B series](https://huggingface.co/collections/tencent/hunyuan-a13b-685ec38e5b46321e3ea7c4be) | `hunyuan_a13b` | `json`, `regex` | ✅ |
|
||||
| [GLM-4.5 series](https://huggingface.co/collections/zai-org/glm-45-687c621d34bda8c9e4bf503b) | `glm45` | `json`, `regex` | ✅ |
|
||||
| [Hunyuan A13B series](https://huggingface.co/collections/tencent/hunyuan-a13b-685ec38e5b46321e3ea7c4be) | `hunyuan_a13b` | `json`, `regex` | ✅ |
|
||||
| [IBM Granite 3.2 language models](https://huggingface.co/collections/ibm-granite/granite-32-language-models-67b3bc8c13508f6d064cff9a) | `granite` | ❌ | ❌ |
|
||||
| [MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2) | `minimax_m2_append_think` | `json`, `regex` | ✅ |
|
||||
| [Qwen3 series](https://huggingface.co/collections/Qwen/qwen3-67dd247413f0e2e4f653967f) | `qwen3` | `json`, `regex` | ✅ |
|
||||
| [QwQ-32B](https://huggingface.co/Qwen/QwQ-32B) | `deepseek_r1` | `json`, `regex` | ✅ |
|
||||
|
||||
!!! note
|
||||
IBM Granite 3.2 and DeepSeek-V3.1 reasoning is disabled by default; to enable it, you must also pass `thinking=True` in your `chat_template_kwargs`.
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# --8<-- [start:installation]
|
||||
|
||||
vLLM supports AMD GPUs with ROCm 6.3 or above.
|
||||
vLLM supports AMD GPUs with ROCm 6.3 or above, and torch 2.8.0 and above.
|
||||
|
||||
!!! tip
|
||||
[Docker](#set-up-using-docker) is the recommended way to use vLLM on ROCm.
|
||||
@ -28,57 +28,63 @@ Currently, there are no pre-built ROCm wheels.
|
||||
# --8<-- [end:pre-built-wheels]
|
||||
# --8<-- [start:build-wheel-from-source]
|
||||
|
||||
!!! tip
|
||||
- If you found that the following installation step does not work for you, please refer to [docker/Dockerfile.rocm_base](https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile.rocm_base). Dockerfile is a form of installation steps.
|
||||
|
||||
0. Install prerequisites (skip if you are already in an environment/docker with the following installed):
|
||||
|
||||
- [ROCm](https://rocm.docs.amd.com/en/latest/deploy/linux/index.html)
|
||||
- [PyTorch](https://pytorch.org/)
|
||||
|
||||
For installing PyTorch, you can start from a fresh docker image, e.g, `rocm/pytorch:rocm6.4.3_ubuntu24.04_py3.12_pytorch_release_2.6.0`, `rocm/pytorch-nightly`. If you are using docker image, you can skip to Step 3.
|
||||
For installing PyTorch, you can start from a fresh docker image, e.g, `rocm/pytorch:rocm7.0_ubuntu22.04_py3.10_pytorch_release_2.8.0`, `rocm/pytorch-nightly`. If you are using docker image, you can skip to Step 3.
|
||||
|
||||
Alternatively, you can install PyTorch using PyTorch wheels. You can check PyTorch installation guide in PyTorch [Getting Started](https://pytorch.org/get-started/locally/). Example:
|
||||
|
||||
```bash
|
||||
# Install PyTorch
|
||||
pip uninstall torch -y
|
||||
pip install --no-cache-dir torch torchvision --index-url https://download.pytorch.org/whl/rocm6.4
|
||||
pip install --no-cache-dir torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm7.0
|
||||
```
|
||||
|
||||
1. Install [Triton for ROCm](https://github.com/triton-lang/triton)
|
||||
1. Install [Triton for ROCm](https://github.com/ROCm/triton.git)
|
||||
|
||||
Install ROCm's Triton (the default triton-mlir branch) following the instructions from [ROCm/triton](https://github.com/ROCm/triton/blob/triton-mlir/README.md)
|
||||
Install ROCm's Triton following the instructions from [ROCm/triton](https://github.com/ROCm/triton.git)
|
||||
|
||||
```bash
|
||||
python3 -m pip install ninja cmake wheel pybind11
|
||||
pip uninstall -y triton
|
||||
git clone https://github.com/triton-lang/triton.git
|
||||
git clone https://github.com/ROCm/triton.git
|
||||
cd triton
|
||||
git checkout e5be006
|
||||
# git checkout $TRITON_BRANCH
|
||||
git checkout f9e5bf54
|
||||
if [ ! -f setup.py ]; then cd python; fi
|
||||
python3 setup.py install
|
||||
cd ../..
|
||||
```
|
||||
|
||||
!!! note
|
||||
If you see HTTP issue related to downloading packages during building triton, please try again as the HTTP error is intermittent.
|
||||
- The validated `$TRITON_BRANCH` can be found in the [docker/Dockerfile.rocm_base](https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile.rocm_base).
|
||||
- If you see HTTP issue related to downloading packages during building triton, please try again as the HTTP error is intermittent.
|
||||
|
||||
2. Optionally, if you choose to use CK flash attention, you can install [flash attention for ROCm](https://github.com/Dao-AILab/flash-attention)
|
||||
2. Optionally, if you choose to use CK flash attention, you can install [flash attention for ROCm](https://github.com/Dao-AILab/flash-attention.git)
|
||||
|
||||
Install ROCm's flash attention (v2.7.2) following the instructions from [ROCm/flash-attention](https://github.com/ROCm/flash-attention#amd-rocm-support)
|
||||
Alternatively, wheels intended for vLLM use can be accessed under the releases.
|
||||
Install ROCm's flash attention (v2.8.0) following the instructions from [ROCm/flash-attention](https://github.com/Dao-AILab/flash-attention#amd-rocm-support)
|
||||
|
||||
For example, for ROCm 6.3, suppose your gfx arch is `gfx90a`. To get your gfx architecture, run `rocminfo |grep gfx`.
|
||||
For example, for ROCm 7.0, suppose your gfx arch is `gfx942`. To get your gfx architecture, run `rocminfo |grep gfx`.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/Dao-AILab/flash-attention.git
|
||||
cd flash-attention
|
||||
git checkout 1a7f4dfa
|
||||
# git checkout $FA_BRANCH
|
||||
git checkout 0e60e394
|
||||
git submodule update --init
|
||||
GPU_ARCHS="gfx90a" python3 setup.py install
|
||||
GPU_ARCHS="gfx942" python3 setup.py install
|
||||
cd ..
|
||||
```
|
||||
|
||||
!!! note
|
||||
You might need to downgrade the "ninja" version to 1.10 as it is not used when compiling flash-attention-2 (e.g. `pip install ninja==1.10.2.4`)
|
||||
- The validated `$FA_BRANCH` can be found in the [docker/Dockerfile.rocm_base](https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile.rocm_base).
|
||||
|
||||
|
||||
3. If you choose to build AITER yourself to use a certain branch or commit, you can build AITER using the following steps:
|
||||
|
||||
@ -92,11 +98,13 @@ Currently, there are no pre-built ROCm wheels.
|
||||
```
|
||||
|
||||
!!! note
|
||||
You will need to config the `$AITER_BRANCH_OR_COMMIT` for your purpose.
|
||||
- You will need to config the `$AITER_BRANCH_OR_COMMIT` for your purpose.
|
||||
- The validated `$AITER_BRANCH_OR_COMMIT` can be found in the [docker/Dockerfile.rocm_base](https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile.rocm_base).
|
||||
|
||||
|
||||
4. Build vLLM. For example, vLLM on ROCM 6.3 can be built with the following steps:
|
||||
4. Build vLLM. For example, vLLM on ROCM 7.0 can be built with the following steps:
|
||||
|
||||
??? console "Commands"
|
||||
???+ console "Commands"
|
||||
|
||||
```bash
|
||||
pip install --upgrade pip
|
||||
@ -109,31 +117,48 @@ Currently, there are no pre-built ROCm wheels.
|
||||
scipy \
|
||||
huggingface-hub[cli,hf_transfer] \
|
||||
setuptools_scm
|
||||
pip install "numpy<2"
|
||||
pip install -r requirements/rocm.txt
|
||||
|
||||
# Build vLLM for MI210/MI250/MI300.
|
||||
export PYTORCH_ROCM_ARCH="gfx90a;gfx942"
|
||||
# To build for a single architecture (e.g., MI300) for faster installation (recommended):
|
||||
export PYTORCH_ROCM_ARCH="gfx942"
|
||||
|
||||
# To build vLLM for multiple arch MI210/MI250/MI300, use this instead
|
||||
# export PYTORCH_ROCM_ARCH="gfx90a;gfx942"
|
||||
|
||||
python3 setup.py develop
|
||||
```
|
||||
|
||||
This may take 5-10 minutes. Currently, `pip install .` does not work for ROCm installation.
|
||||
|
||||
!!! tip
|
||||
- Triton flash attention is used by default. For benchmarking purposes, it is recommended to run a warm-up step before collecting perf numbers.
|
||||
- Triton flash attention does not currently support sliding window attention. If using half precision, please use CK flash-attention for sliding window support.
|
||||
- To use CK flash-attention or PyTorch naive attention, please use this flag `export VLLM_USE_TRITON_FLASH_ATTN=0` to turn off triton flash attention.
|
||||
- The ROCm version of PyTorch, ideally, should match the ROCm driver version.
|
||||
|
||||
!!! tip
|
||||
- For MI300x (gfx942) users, to achieve optimal performance, please refer to [MI300x tuning guide](https://rocm.docs.amd.com/en/latest/how-to/tuning-guides/mi300x/index.html) for performance optimization and tuning tips on system and workflow level.
|
||||
For vLLM, please refer to [vLLM performance optimization](https://rocm.docs.amd.com/en/latest/how-to/tuning-guides/mi300x/workload.html#vllm-performance-optimization).
|
||||
For vLLM, please refer to [vLLM performance optimization](https://rocm.docs.amd.com/en/latest/how-to/rocm-for-ai/inference-optimization/vllm-optimization.html).
|
||||
|
||||
# --8<-- [end:build-wheel-from-source]
|
||||
# --8<-- [start:pre-built-images]
|
||||
|
||||
The [AMD Infinity hub for vLLM](https://hub.docker.com/r/rocm/vllm/tags) offers a prebuilt, optimized
|
||||
docker image designed for validating inference performance on the AMD Instinct™ MI300X accelerator.
|
||||
AMD also offers nightly prebuilt docker image from [Docker Hub](https://hub.docker.com/r/rocm/vllm-dev), which has vLLM and all its dependencies installed.
|
||||
|
||||
???+ console "Commands"
|
||||
```bash
|
||||
docker pull rocm/vllm-dev:nightly # to get the latest image
|
||||
docker run -it --rm \
|
||||
--network=host \
|
||||
--group-add=video \
|
||||
--ipc=host \
|
||||
--cap-add=SYS_PTRACE \
|
||||
--security-opt seccomp=unconfined \
|
||||
--device /dev/kfd \
|
||||
--device /dev/dri \
|
||||
-v <path/to/your/models>:/app/models \
|
||||
-e HF_HOME="/app/models" \
|
||||
rocm/vllm-dev:nightly
|
||||
```
|
||||
|
||||
!!! tip
|
||||
Please check [LLM inference performance validation on AMD Instinct MI300X](https://rocm.docs.amd.com/en/latest/how-to/performance-validation/mi300x/vllm-benchmark.html)
|
||||
@ -144,29 +169,29 @@ docker image designed for validating inference performance on the AMD Instinct
|
||||
|
||||
Building the Docker image from source is the recommended way to use vLLM with ROCm.
|
||||
|
||||
#### (Optional) Build an image with ROCm software stack
|
||||
??? info "(Optional) Build an image with ROCm software stack"
|
||||
|
||||
Build a docker image from [docker/Dockerfile.rocm_base](https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile.rocm_base) which setup ROCm software stack needed by the vLLM.
|
||||
**This step is optional as this rocm_base image is usually prebuilt and store at [Docker Hub](https://hub.docker.com/r/rocm/vllm-dev) under tag `rocm/vllm-dev:base` to speed up user experience.**
|
||||
If you choose to build this rocm_base image yourself, the steps are as follows.
|
||||
Build a docker image from [docker/Dockerfile.rocm_base](https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile.rocm_base) which setup ROCm software stack needed by the vLLM.
|
||||
**This step is optional as this rocm_base image is usually prebuilt and store at [Docker Hub](https://hub.docker.com/r/rocm/vllm-dev) under tag `rocm/vllm-dev:base` to speed up user experience.**
|
||||
If you choose to build this rocm_base image yourself, the steps are as follows.
|
||||
|
||||
It is important that the user kicks off the docker build using buildkit. Either the user put DOCKER_BUILDKIT=1 as environment variable when calling docker build command, or the user needs to set up buildkit in the docker daemon configuration /etc/docker/daemon.json as follows and restart the daemon:
|
||||
It is important that the user kicks off the docker build using buildkit. Either the user put DOCKER_BUILDKIT=1 as environment variable when calling docker build command, or the user needs to set up buildkit in the docker daemon configuration /etc/docker/daemon.json as follows and restart the daemon:
|
||||
|
||||
```json
|
||||
{
|
||||
"features": {
|
||||
"buildkit": true
|
||||
```json
|
||||
{
|
||||
"features": {
|
||||
"buildkit": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
```
|
||||
|
||||
To build vllm on ROCm 6.3 for MI200 and MI300 series, you can use the default:
|
||||
To build vllm on ROCm 7.0 for MI200 and MI300 series, you can use the default:
|
||||
|
||||
```bash
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
-f docker/Dockerfile.rocm_base \
|
||||
-t rocm/vllm-dev:base .
|
||||
```
|
||||
```bash
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
-f docker/Dockerfile.rocm_base \
|
||||
-t rocm/vllm-dev:base .
|
||||
```
|
||||
|
||||
#### Build an image with vLLM
|
||||
|
||||
@ -181,7 +206,7 @@ It is important that the user kicks off the docker build using buildkit. Either
|
||||
}
|
||||
```
|
||||
|
||||
[docker/Dockerfile.rocm](https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile.rocm) uses ROCm 6.3 by default, but also supports ROCm 5.7, 6.0, 6.1, and 6.2, in older vLLM branches.
|
||||
[docker/Dockerfile.rocm](https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile.rocm) uses ROCm 7.0 by default, but also supports ROCm 5.7, 6.0, 6.1, 6.2, 6.3, and 6.4, in older vLLM branches.
|
||||
It provides flexibility to customize the build of docker image using the following arguments:
|
||||
|
||||
- `BASE_IMAGE`: specifies the base image used when running `docker build`. The default value `rocm/vllm-dev:base` is an image published and maintained by AMD. It is being built using [docker/Dockerfile.rocm_base](https://github.com/vllm-project/vllm/blob/main/docker/Dockerfile.rocm_base)
|
||||
@ -189,16 +214,16 @@ It provides flexibility to customize the build of docker image using the followi
|
||||
|
||||
Their values can be passed in when running `docker build` with `--build-arg` options.
|
||||
|
||||
To build vllm on ROCm 6.3 for MI200 and MI300 series, you can use the default:
|
||||
To build vllm on ROCm 7.0 for MI200 and MI300 series, you can use the default:
|
||||
|
||||
```bash
|
||||
DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile.rocm -t vllm-rocm .
|
||||
```
|
||||
???+ console "Commands"
|
||||
```bash
|
||||
DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile.rocm -t vllm-rocm .
|
||||
```
|
||||
|
||||
To run the above docker image `vllm-rocm`, use the below command:
|
||||
|
||||
??? console "Command"
|
||||
|
||||
???+ console "Commands"
|
||||
```bash
|
||||
docker run -it \
|
||||
--network=host \
|
||||
|
||||
@ -56,8 +56,10 @@ docker build -f docker/Dockerfile.xpu -t vllm-xpu-env --shm-size=4g .
|
||||
docker run -it \
|
||||
--rm \
|
||||
--network=host \
|
||||
--device /dev/dri \
|
||||
--device /dev/dri:/dev/dri \
|
||||
-v /dev/dri/by-path:/dev/dri/by-path \
|
||||
--ipc=host \
|
||||
--privileged \
|
||||
vllm-xpu-env
|
||||
```
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
It's recommended to use [uv](https://docs.astral.sh/uv/), a very fast Python environment manager, to create and manage Python environments. Please follow the [documentation](https://docs.astral.sh/uv/#getting-started) to install `uv`. After installing `uv`, you can create a new Python environment using the following commands:
|
||||
On NVIDIA CUDA only, it's recommended to use [uv](https://docs.astral.sh/uv/), a very fast Python environment manager, to create and manage Python environments. Please follow the [documentation](https://docs.astral.sh/uv/#getting-started) to install `uv`. After installing `uv`, you can create a new Python environment using the following commands:
|
||||
|
||||
```bash
|
||||
uv venv --python 3.12 --seed
|
||||
|
||||
@ -56,16 +56,23 @@ def auto_mock(module, attr, max_mocks=50):
|
||||
)
|
||||
|
||||
|
||||
latency = auto_mock("vllm.benchmarks", "latency")
|
||||
serve = auto_mock("vllm.benchmarks", "serve")
|
||||
throughput = auto_mock("vllm.benchmarks", "throughput")
|
||||
bench_latency = auto_mock("vllm.benchmarks", "latency")
|
||||
bench_serve = auto_mock("vllm.benchmarks", "serve")
|
||||
bench_sweep_plot = auto_mock("vllm.benchmarks.sweep.plot", "SweepPlotArgs")
|
||||
bench_sweep_serve = auto_mock("vllm.benchmarks.sweep.serve", "SweepServeArgs")
|
||||
bench_sweep_serve_sla = auto_mock(
|
||||
"vllm.benchmarks.sweep.serve_sla", "SweepServeSLAArgs"
|
||||
)
|
||||
bench_throughput = auto_mock("vllm.benchmarks", "throughput")
|
||||
AsyncEngineArgs = auto_mock("vllm.engine.arg_utils", "AsyncEngineArgs")
|
||||
EngineArgs = auto_mock("vllm.engine.arg_utils", "EngineArgs")
|
||||
ChatCommand = auto_mock("vllm.entrypoints.cli.openai", "ChatCommand")
|
||||
CompleteCommand = auto_mock("vllm.entrypoints.cli.openai", "CompleteCommand")
|
||||
cli_args = auto_mock("vllm.entrypoints.openai", "cli_args")
|
||||
run_batch = auto_mock("vllm.entrypoints.openai", "run_batch")
|
||||
FlexibleArgumentParser = auto_mock("vllm.utils", "FlexibleArgumentParser")
|
||||
openai_cli_args = auto_mock("vllm.entrypoints.openai", "cli_args")
|
||||
openai_run_batch = auto_mock("vllm.entrypoints.openai", "run_batch")
|
||||
FlexibleArgumentParser = auto_mock(
|
||||
"vllm.utils.argparse_utils", "FlexibleArgumentParser"
|
||||
)
|
||||
|
||||
|
||||
class MarkdownFormatter(HelpFormatter):
|
||||
@ -112,6 +119,9 @@ class MarkdownFormatter(HelpFormatter):
|
||||
self._markdown_output.append(f"{action.help}\n\n")
|
||||
|
||||
if (default := action.default) != SUPPRESS:
|
||||
# Make empty string defaults visible
|
||||
if default == "":
|
||||
default = '""'
|
||||
self._markdown_output.append(f"Default: `{default}`\n\n")
|
||||
|
||||
def format_help(self):
|
||||
@ -148,17 +158,23 @@ def on_startup(command: Literal["build", "gh-deploy", "serve"], dirty: bool):
|
||||
|
||||
# Create parsers to document
|
||||
parsers = {
|
||||
# Engine args
|
||||
"engine_args": create_parser(EngineArgs.add_cli_args),
|
||||
"async_engine_args": create_parser(
|
||||
AsyncEngineArgs.add_cli_args, async_args_only=True
|
||||
),
|
||||
"serve": create_parser(cli_args.make_arg_parser),
|
||||
# CLI
|
||||
"serve": create_parser(openai_cli_args.make_arg_parser),
|
||||
"chat": create_parser(ChatCommand.add_cli_args),
|
||||
"complete": create_parser(CompleteCommand.add_cli_args),
|
||||
"bench_latency": create_parser(latency.add_cli_args),
|
||||
"bench_throughput": create_parser(throughput.add_cli_args),
|
||||
"bench_serve": create_parser(serve.add_cli_args),
|
||||
"run-batch": create_parser(run_batch.make_arg_parser),
|
||||
"run-batch": create_parser(openai_run_batch.make_arg_parser),
|
||||
# Benchmark CLI
|
||||
"bench_latency": create_parser(bench_latency.add_cli_args),
|
||||
"bench_serve": create_parser(bench_serve.add_cli_args),
|
||||
"bench_sweep_plot": create_parser(bench_sweep_plot.add_cli_args),
|
||||
"bench_sweep_serve": create_parser(bench_sweep_serve.add_cli_args),
|
||||
"bench_sweep_serve_sla": create_parser(bench_sweep_serve_sla.add_cli_args),
|
||||
"bench_throughput": create_parser(bench_throughput.add_cli_args),
|
||||
}
|
||||
|
||||
# Generate documentation for each parser
|
||||
|
||||
@ -45,6 +45,15 @@ vllm serve s3://core-llm/Llama-3-8b \
|
||||
|
||||
You can tune parameters using `--model-loader-extra-config`:
|
||||
|
||||
You can tune `distributed` that controls whether distributed streaming should be used. This is currently only possible on CUDA and ROCM devices. This can significantly improve loading times from object storage or high-throughput network fileshares.
|
||||
You can read further about Distributed streaming [here](https://github.com/run-ai/runai-model-streamer/blob/master/docs/src/usage.md#distributed-streaming)
|
||||
|
||||
```bash
|
||||
vllm serve /home/meta-llama/Llama-3.2-3B-Instruct \
|
||||
--load-format runai_streamer \
|
||||
--model-loader-extra-config '{"distributed":true}'
|
||||
```
|
||||
|
||||
You can tune `concurrency` that controls the level of concurrency and number of OS threads reading tensors from the file to the CPU buffer.
|
||||
For reading from S3, it will be the number of client instances the host is opening to the S3 server.
|
||||
|
||||
|
||||
@ -30,11 +30,11 @@ If `--runner pooling` has been set (manually or automatically) but the model doe
|
||||
vLLM will attempt to automatically convert the model according to the architecture names
|
||||
shown in the table below.
|
||||
|
||||
| Architecture | `--convert` | Supported pooling tasks |
|
||||
|-------------------------------------------------|-------------|-------------------------------|
|
||||
| `*ForTextEncoding`, `*EmbeddingModel`, `*Model` | `embed` | `encode`, `embed` |
|
||||
| `*For*Classification`, `*ClassificationModel` | `classify` | `encode`, `classify`, `score` |
|
||||
| `*ForRewardModeling`, `*RewardModel` | `reward` | `encode` |
|
||||
| Architecture | `--convert` | Supported pooling tasks |
|
||||
|-------------------------------------------------|-------------|---------------------------------------|
|
||||
| `*ForTextEncoding`, `*EmbeddingModel`, `*Model` | `embed` | `token_embed`, `embed` |
|
||||
| `*For*Classification`, `*ClassificationModel` | `classify` | `token_classify`, `classify`, `score` |
|
||||
| `*ForRewardModeling`, `*RewardModel` | `reward` | `token_classify` |
|
||||
|
||||
!!! tip
|
||||
You can explicitly set `--convert <type>` to specify how to convert the model.
|
||||
@ -45,12 +45,14 @@ Each pooling model in vLLM supports one or more of these tasks according to
|
||||
[Pooler.get_supported_tasks][vllm.model_executor.layers.pooler.Pooler.get_supported_tasks],
|
||||
enabling the corresponding APIs:
|
||||
|
||||
| Task | APIs |
|
||||
|------------|--------------------------------------|
|
||||
| `encode` | `LLM.reward(...)` |
|
||||
| `embed` | `LLM.embed(...)`, `LLM.score(...)`\* |
|
||||
| `classify` | `LLM.classify(...)` |
|
||||
| `score` | `LLM.score(...)` |
|
||||
| Task | APIs |
|
||||
|------------------|-------------------------------------------------------------------------------|
|
||||
| `embed` | `LLM.embed(...)`, `LLM.score(...)`\*, `LLM.encode(..., pooling_task="embed")` |
|
||||
| `classify` | `LLM.classify(...)`, `LLM.encode(..., pooling_task="classify")` |
|
||||
| `score` | `LLM.score(...)` |
|
||||
| `token_classify` | `LLM.reward(...)`, `LLM.encode(..., pooling_task="token_classify")` |
|
||||
| `token_embed` | `LLM.encode(..., pooling_task="token_embed")` |
|
||||
| `plugin` | `LLM.encode(..., pooling_task="plugin")` |
|
||||
|
||||
\* The `LLM.score(...)` API falls back to `embed` task if the model does not support `score` task.
|
||||
|
||||
@ -144,7 +146,6 @@ A code example can be found here: [examples/offline_inference/basic/score.py](..
|
||||
### `LLM.reward`
|
||||
|
||||
The [reward][vllm.LLM.reward] method is available to all reward models in vLLM.
|
||||
It returns the extracted hidden states directly.
|
||||
|
||||
```python
|
||||
from vllm import LLM
|
||||
@ -161,15 +162,17 @@ A code example can be found here: [examples/offline_inference/basic/reward.py](.
|
||||
### `LLM.encode`
|
||||
|
||||
The [encode][vllm.LLM.encode] method is available to all pooling models in vLLM.
|
||||
It returns the extracted hidden states directly.
|
||||
|
||||
!!! note
|
||||
Please use one of the more specific methods or set the task directly when using `LLM.encode`:
|
||||
|
||||
- For embeddings, use `LLM.embed(...)` or `pooling_task="embed"`.
|
||||
- For classification logits, use `LLM.classify(...)` or `pooling_task="classify"`.
|
||||
- For rewards, use `LLM.reward(...)` or `pooling_task="reward"`.
|
||||
- For similarity scores, use `LLM.score(...)`.
|
||||
- For rewards, use `LLM.reward(...)` or `pooling_task="token_classify"`.
|
||||
- For token classification, use `pooling_task="token_classify"`.
|
||||
- For multi-vector retrieval, use `pooling_task="token_embed"`
|
||||
- For IO Processor Plugins , use `pooling_task="plugin"`
|
||||
|
||||
```python
|
||||
from vllm import LLM
|
||||
@ -185,10 +188,47 @@ print(f"Data: {data!r}")
|
||||
|
||||
Our [OpenAI-Compatible Server](../serving/openai_compatible_server.md) provides endpoints that correspond to the offline APIs:
|
||||
|
||||
- [Pooling API](../serving/openai_compatible_server.md#pooling-api) is similar to `LLM.encode`, being applicable to all types of pooling models.
|
||||
- [Embeddings API](../serving/openai_compatible_server.md#embeddings-api) is similar to `LLM.embed`, accepting both text and [multi-modal inputs](../features/multimodal_inputs.md) for embedding models.
|
||||
- [Classification API](../serving/openai_compatible_server.md#classification-api) is similar to `LLM.classify` and is applicable to sequence classification models.
|
||||
- [Score API](../serving/openai_compatible_server.md#score-api) is similar to `LLM.score` for cross-encoder models.
|
||||
- [Pooling API](../serving/openai_compatible_server.md#pooling-api) is similar to `LLM.encode`, being applicable to all types of pooling models.
|
||||
|
||||
!!! note
|
||||
Please use one of the more specific methods or set the task directly when using [Pooling API](../serving/openai_compatible_server.md#pooling-api) api.:
|
||||
|
||||
- For embeddings, use [Embeddings API](../serving/openai_compatible_server.md#embeddings-api) or `"task":"embed"`.
|
||||
- For classification logits, use [Classification API](../serving/openai_compatible_server.md#classification-api) or `task":"classify"`.
|
||||
- For similarity scores, use [Score API](../serving/openai_compatible_server.md#score-api).
|
||||
- For rewards, `task":"token_classify"`.
|
||||
- For token classification, use `task":"token_classify"`.
|
||||
- For multi-vector retrieval, use `task":"token_embed"`
|
||||
- For IO Processor Plugins , use `task":"plugin"`
|
||||
|
||||
```python
|
||||
# start a supported embeddings model server with `vllm serve`, e.g.
|
||||
# vllm serve intfloat/e5-small
|
||||
import requests
|
||||
|
||||
host = "localhost"
|
||||
port = "8000"
|
||||
model_name = "intfloat/e5-small"
|
||||
|
||||
api_url = f"http://{host}:{port}/pooling"
|
||||
|
||||
prompts = [
|
||||
"Hello, my name is",
|
||||
"The president of the United States is",
|
||||
"The capital of France is",
|
||||
"The future of AI is",
|
||||
]
|
||||
prompt = {"model": model_name, "input": prompts, "task": "embed"}
|
||||
|
||||
response = requests.post(api_url, json=prompt)
|
||||
|
||||
for output in response.json()["data"]:
|
||||
data = output["data"]
|
||||
print(f"Data: {data!r} (size={len(data)})")
|
||||
```
|
||||
|
||||
## Matryoshka Embeddings
|
||||
|
||||
@ -265,3 +305,16 @@ Expected output:
|
||||
```
|
||||
|
||||
An OpenAI client example can be found here: [examples/online_serving/pooling/openai_embedding_matryoshka_fy.py](../../examples/online_serving/pooling/openai_embedding_matryoshka_fy.py)
|
||||
|
||||
## Deprecated Features
|
||||
|
||||
### Encode task
|
||||
|
||||
We have split the `encode` task into two more specific token wise tasks: `token_embed` and `token_classify`:
|
||||
|
||||
- `token_embed` is the same as embed, using normalize as activation.
|
||||
- `token_classify` is the same as classify, default using softmax as activation.
|
||||
|
||||
### Remove softmax from PoolingParams
|
||||
|
||||
We are going to remove `softmax` and `activation` from `PoolingParams`. Instead, you should set `use_activation`, since we actually allow `classify` and `token_classify` to use any activation function.
|
||||
|
||||
@ -382,6 +382,7 @@ th {
|
||||
| `InternLM3ForCausalLM` | InternLM3 | `internlm/internlm3-8b-instruct`, etc. | ✅︎ | ✅︎ |
|
||||
| `JAISLMHeadModel` | Jais | `inceptionai/jais-13b`, `inceptionai/jais-13b-chat`, `inceptionai/jais-30b-v3`, `inceptionai/jais-30b-chat-v3`, etc. | | ✅︎ |
|
||||
| `JambaForCausalLM` | Jamba | `ai21labs/AI21-Jamba-1.5-Large`, `ai21labs/AI21-Jamba-1.5-Mini`, `ai21labs/Jamba-v0.1`, etc. | ✅︎ | ✅︎ |
|
||||
| `KimiLinearForCausalLM` | Kimi-Linear-48B-A3B-Base, Kimi-Linear-48B-A3B-Instruct | `moonshotai/Kimi-Linear-48B-A3B-Base`, `moonshotai/Kimi-Linear-48B-A3B-Instruct` | | ✅︎ |
|
||||
| `Lfm2ForCausalLM` | LFM2 | `LiquidAI/LFM2-1.2B`, `LiquidAI/LFM2-700M`, `LiquidAI/LFM2-350M`, etc. | ✅︎ | ✅︎ |
|
||||
| `Lfm2MoeForCausalLM` | LFM2MoE | `LiquidAI/LFM2-8B-A1B-preview`, etc. | ✅︎ | ✅︎ |
|
||||
| `LlamaForCausalLM` | Llama 3.1, Llama 3, Llama 2, LLaMA, Yi | `meta-llama/Meta-Llama-3.1-405B-Instruct`, `meta-llama/Meta-Llama-3.1-70B`, `meta-llama/Meta-Llama-3-70B-Instruct`, `meta-llama/Llama-2-70b-hf`, `01-ai/Yi-34B`, etc. | ✅︎ | ✅︎ |
|
||||
@ -390,6 +391,7 @@ th {
|
||||
| `MiMoForCausalLM` | MiMo | `XiaomiMiMo/MiMo-7B-RL`, etc. | ✅︎ | ✅︎ |
|
||||
| `MiniCPMForCausalLM` | MiniCPM | `openbmb/MiniCPM-2B-sft-bf16`, `openbmb/MiniCPM-2B-dpo-bf16`, `openbmb/MiniCPM-S-1B-sft`, etc. | ✅︎ | ✅︎ |
|
||||
| `MiniCPM3ForCausalLM` | MiniCPM3 | `openbmb/MiniCPM3-4B`, etc. | ✅︎ | ✅︎ |
|
||||
| `MiniMaxM2ForCausalLM` | MiniMax-M2 |`MiniMaxAI/MiniMax-M2`, etc. | | ✅︎ |
|
||||
| `MistralForCausalLM` | Mistral, Mistral-Instruct | `mistralai/Mistral-7B-v0.1`, `mistralai/Mistral-7B-Instruct-v0.1`, etc. | ✅︎ | ✅︎ |
|
||||
| `MixtralForCausalLM` | Mixtral-8x7B, Mixtral-8x7B-Instruct | `mistralai/Mixtral-8x7B-v0.1`, `mistralai/Mixtral-8x7B-Instruct-v0.1`, `mistral-community/Mixtral-8x22B-v0.1`, etc. | ✅︎ | ✅︎ |
|
||||
| `MPTForCausalLM` | MPT, MPT-Instruct, MPT-Chat, MPT-StoryWriter | `mosaicml/mpt-7b`, `mosaicml/mpt-7b-storywriter`, `mosaicml/mpt-30b`, etc. | | ✅︎ |
|
||||
@ -401,6 +403,7 @@ th {
|
||||
| `OLMoEForCausalLM` | OLMoE | `allenai/OLMoE-1B-7B-0924`, `allenai/OLMoE-1B-7B-0924-Instruct`, etc. | | ✅︎ |
|
||||
| `OPTForCausalLM` | OPT, OPT-IML | `facebook/opt-66b`, `facebook/opt-iml-max-30b`, etc. | ✅︎ | ✅︎ |
|
||||
| `OrionForCausalLM` | Orion | `OrionStarAI/Orion-14B-Base`, `OrionStarAI/Orion-14B-Chat`, etc. | | ✅︎ |
|
||||
| `OuroForCausalLM` | ouro | `ByteDance/Ouro-1.4B`, `ByteDance/Ouro-2.6B`, etc. | ✅︎ | |
|
||||
| `PhiForCausalLM` | Phi | `microsoft/phi-1_5`, `microsoft/phi-2`, etc. | ✅︎ | ✅︎ |
|
||||
| `Phi3ForCausalLM` | Phi-4, Phi-3 | `microsoft/Phi-4-mini-instruct`, `microsoft/Phi-4`, `microsoft/Phi-3-mini-4k-instruct`, `microsoft/Phi-3-mini-128k-instruct`, `microsoft/Phi-3-medium-128k-instruct`, etc. | ✅︎ | ✅︎ |
|
||||
| `PhiMoEForCausalLM` | Phi-3.5-MoE | `microsoft/Phi-3.5-MoE-instruct`, etc. | ✅︎ | ✅︎ |
|
||||
@ -774,7 +777,7 @@ The following table lists those that are tested in vLLM.
|
||||
| `CLIPModel` | CLIP | T / I | `openai/clip-vit-base-patch32`, `openai/clip-vit-large-patch14`, etc. | | |
|
||||
| `LlavaNextForConditionalGeneration`<sup>C</sup> | LLaVA-NeXT-based | T / I | `royokong/e5-v` | | ✅︎ |
|
||||
| `Phi3VForCausalLM`<sup>C</sup> | Phi-3-Vision-based | T + I | `TIGER-Lab/VLM2Vec-Full` | | ✅︎ |
|
||||
| `SiglipModel` | SigLIP | T / I | `google/siglip-base-patch16-224` | | |
|
||||
| `SiglipModel` | SigLIP, SigLIP2 | T / I | `google/siglip-base-patch16-224`, `google/siglip2-base-patch16-224` | | |
|
||||
| `*ForConditionalGeneration`<sup>C</sup>, `*ForCausalLM`<sup>C</sup>, etc. | Generative models | \* | N/A | \* | \* |
|
||||
|
||||
<sup>C</sup> Automatically converted into an embedding model via `--convert embed`. ([details](./pooling_models.md#model-conversion))
|
||||
|
||||
@ -638,7 +638,7 @@ Usually, the score for a sentence pair refers to the similarity between two sent
|
||||
|
||||
You can find the documentation for cross encoder models at [sbert.net](https://www.sbert.net/docs/package_reference/cross_encoder/cross_encoder.html).
|
||||
|
||||
Code example: [examples/online_serving/openai_cross_encoder_score.py](../../examples/online_serving/openai_cross_encoder_score.py)
|
||||
Code example: [examples/online_serving/pooling/openai_cross_encoder_score.py](../../examples/online_serving/pooling/openai_cross_encoder_score.py)
|
||||
|
||||
#### Single inference
|
||||
|
||||
@ -819,7 +819,7 @@ You can pass multi-modal inputs to scoring models by passing `content` including
|
||||
print("Scoring output:", response_json["data"][0]["score"])
|
||||
print("Scoring output:", response_json["data"][1]["score"])
|
||||
```
|
||||
Full example: [examples/online_serving/openai_cross_encoder_score_for_multimodal.py](../../examples/online_serving/openai_cross_encoder_score_for_multimodal.py)
|
||||
Full example: [examples/online_serving/pooling/openai_cross_encoder_score_for_multimodal.py](../../examples/online_serving/pooling/openai_cross_encoder_score_for_multimodal.py)
|
||||
|
||||
#### Extra parameters
|
||||
|
||||
|
||||
@ -38,6 +38,18 @@ python examples/offline_inference/pooling/multi_vector_retrieval.py
|
||||
python examples/offline_inference/pooling/ner.py
|
||||
```
|
||||
|
||||
## Prithvi Geospatial MAE usage
|
||||
|
||||
```bash
|
||||
python examples/offline_inference/pooling/prithvi_geospatial_mae.py
|
||||
```
|
||||
|
||||
## IO Processor Plugins for Prithvi Geospatial MAE
|
||||
|
||||
```bash
|
||||
python examples/offline_inference/pooling/prithvi_geospatial_mae_io_processor.py
|
||||
```
|
||||
|
||||
## Qwen3 reranker usage
|
||||
|
||||
```bash
|
||||
|
||||
@ -33,7 +33,7 @@ def main(args: Namespace):
|
||||
label_map = llm.llm_engine.vllm_config.model_config.hf_config.id2label
|
||||
|
||||
# Run inference
|
||||
outputs = llm.encode(prompts)
|
||||
outputs = llm.encode(prompts, pooling_task="token_classify")
|
||||
|
||||
for prompt, output in zip(prompts, outputs):
|
||||
logits = output.outputs.data
|
||||
|
||||
@ -9,10 +9,76 @@ To run this example:
|
||||
```bash
|
||||
$ torchrun --nproc-per-node=2 examples/offline_inference/torchrun_dp_example.py
|
||||
```
|
||||
|
||||
With custom parallelism settings:
|
||||
```bash
|
||||
$ torchrun --nproc-per-node=8 examples/offline_inference/torchrun_dp_example.py \
|
||||
--tp-size=2 --pp-size=1 --dp-size=4 --enable-ep
|
||||
```
|
||||
"""
|
||||
|
||||
import argparse
|
||||
|
||||
from vllm import LLM, SamplingParams
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Data-parallel inference with torchrun"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tp-size",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Tensor parallel size (default: 1)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pp-size",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Pipeline parallel size (default: 1)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dp-size",
|
||||
type=int,
|
||||
default=2,
|
||||
help="Data parallel size (default: 2)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--enable-ep",
|
||||
action="store_true",
|
||||
help="Enable expert parallel (default: False)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
type=str,
|
||||
default="microsoft/Phi-mini-MoE-instruct",
|
||||
help="Model name or path (default: microsoft/Phi-mini-MoE-instruct)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max-model-len",
|
||||
type=int,
|
||||
default=4096,
|
||||
help="Maximum model length (default: 4096)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gpu-memory-utilization",
|
||||
type=float,
|
||||
default=0.6,
|
||||
help="GPU memory utilization (default: 0.6)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--seed",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Random seed (default: 1)",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
args = parse_args()
|
||||
|
||||
|
||||
# Create prompts, the same across all ranks
|
||||
prompts = [
|
||||
"Hello, my name is",
|
||||
@ -30,15 +96,15 @@ sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
|
||||
# all ranks have the same random seed, so that sampling can be
|
||||
# deterministic across ranks.
|
||||
llm = LLM(
|
||||
model="microsoft/Phi-mini-MoE-instruct",
|
||||
tensor_parallel_size=1,
|
||||
data_parallel_size=2,
|
||||
pipeline_parallel_size=1,
|
||||
enable_expert_parallel=False,
|
||||
model=args.model,
|
||||
tensor_parallel_size=args.tp_size,
|
||||
data_parallel_size=args.dp_size,
|
||||
pipeline_parallel_size=args.pp_size,
|
||||
enable_expert_parallel=args.enable_ep,
|
||||
distributed_executor_backend="external_launcher",
|
||||
max_model_len=4096,
|
||||
gpu_memory_utilization=0.6,
|
||||
seed=1,
|
||||
max_model_len=args.max_model_len,
|
||||
gpu_memory_utilization=args.gpu_memory_utilization,
|
||||
seed=args.seed,
|
||||
)
|
||||
|
||||
dp_rank = llm.llm_engine.vllm_config.parallel_config.data_parallel_rank
|
||||
|
||||
@ -3,65 +3,95 @@
|
||||
## Cohere rerank usage
|
||||
|
||||
```bash
|
||||
# vllm serve BAAI/bge-reranker-base
|
||||
python examples/online_serving/pooling/cohere_rerank_client.py
|
||||
```
|
||||
|
||||
## Embedding requests base64 encoding_format usage
|
||||
|
||||
```bash
|
||||
# vllm serve intfloat/e5-small
|
||||
python examples/online_serving/pooling/embedding_requests_base64_client.py
|
||||
```
|
||||
|
||||
## Embedding requests bytes encoding_format usage
|
||||
|
||||
```bash
|
||||
# vllm serve intfloat/e5-small
|
||||
python examples/online_serving/pooling/embedding_requests_bytes_client.py
|
||||
```
|
||||
|
||||
## Jinaai rerank usage
|
||||
|
||||
```bash
|
||||
# vllm serve BAAI/bge-reranker-base
|
||||
python examples/online_serving/pooling/jinaai_rerank_client.py
|
||||
```
|
||||
|
||||
## Multi vector retrieval usage
|
||||
|
||||
```bash
|
||||
# vllm serve BAAI/bge-m3
|
||||
python examples/online_serving/pooling/multi_vector_retrieval_client.py
|
||||
```
|
||||
|
||||
## Named Entity Recognition (NER) usage
|
||||
|
||||
```bash
|
||||
# vllm serve boltuix/NeuroBERT-NER
|
||||
python examples/online_serving/pooling/ner_client.py
|
||||
```
|
||||
|
||||
## Openai chat embedding for multimodal usage
|
||||
## OpenAI chat embedding for multimodal usage
|
||||
|
||||
```bash
|
||||
python examples/online_serving/pooling/openai_chat_embedding_client_for_multimodal.py
|
||||
```
|
||||
|
||||
## Openai classification usage
|
||||
## OpenAI classification usage
|
||||
|
||||
```bash
|
||||
# vllm serve jason9693/Qwen2.5-1.5B-apeach
|
||||
python examples/online_serving/pooling/openai_classification_client.py
|
||||
```
|
||||
|
||||
## Openai embedding usage
|
||||
## OpenAI cross_encoder score usage
|
||||
|
||||
```bash
|
||||
# vllm serve BAAI/bge-reranker-v2-m3
|
||||
python examples/online_serving/pooling/openai_cross_encoder_score.py
|
||||
```
|
||||
|
||||
## OpenAI cross_encoder score for multimodal usage
|
||||
|
||||
```bash
|
||||
# vllm serve jinaai/jina-reranker-m0
|
||||
python examples/online_serving/pooling/openai_cross_encoder_score_for_multimodal.py
|
||||
```
|
||||
|
||||
## OpenAI embedding usage
|
||||
|
||||
```bash
|
||||
# vllm serve intfloat/e5-small
|
||||
python examples/online_serving/pooling/openai_embedding_client.py
|
||||
```
|
||||
|
||||
## Openai embedding matryoshka dimensions usage
|
||||
## OpenAI embedding matryoshka dimensions usage
|
||||
|
||||
```bash
|
||||
# vllm serve jinaai/jina-embeddings-v3 --trust-remote-code
|
||||
python examples/online_serving/pooling/openai_embedding_matryoshka_fy.py
|
||||
```
|
||||
|
||||
## Openai pooling usage
|
||||
## OpenAI pooling usage
|
||||
|
||||
```bash
|
||||
# vllm serve internlm/internlm2-1_8b-reward --trust-remote-code
|
||||
python examples/online_serving/pooling/openai_pooling_client.py
|
||||
```
|
||||
|
||||
## Online Prithvi Geospatial MAE usage
|
||||
|
||||
```bash
|
||||
python examples/online_serving/pooling/prithvi_geospatial_mae.py
|
||||
```
|
||||
|
||||
@ -9,9 +9,7 @@ torch==2.9.0
|
||||
torchaudio==2.9.0
|
||||
# These must be updated alongside torch
|
||||
torchvision==0.24.0 # Required for phi3v processor. See https://github.com/pytorch/vision?tab=readme-ov-file#installation for corresponding version
|
||||
# https://github.com/facebookresearch/xformers/releases/tag/v0.0.32.post1
|
||||
# xformers==0.0.32.post1; platform_system == 'Linux' and platform_machine == 'x86_64' # Requires PyTorch >= 2.8
|
||||
# Build from https://github.com/facebookresearch/xformers/releases/tag/v0.0.32.post1
|
||||
xformers==0.0.33+5d4b92a5.d20251029; platform_system == 'Linux' and platform_machine == 'x86_64' # Requires PyTorch >= 2.9
|
||||
# FlashInfer should be updated together with the Dockerfile
|
||||
flashinfer-python==0.4.1
|
||||
# Triton Kernels are needed for mxfp4 fused moe. (Should be updated alongside torch)
|
||||
triton_kernels @ git+https://github.com/triton-lang/triton.git@v3.5.0#subdirectory=python/triton_kernels
|
||||
|
||||
@ -13,6 +13,8 @@ ruff
|
||||
# Required for argparse hook only
|
||||
-f https://download.pytorch.org/whl/cpu
|
||||
cachetools
|
||||
cloudpickle
|
||||
py-cpuinfo
|
||||
msgspec
|
||||
pydantic
|
||||
torch
|
||||
|
||||
@ -42,6 +42,6 @@ tritonclient==2.51.0
|
||||
|
||||
numba == 0.61.2 # Required for N-gram speculative decoding
|
||||
numpy
|
||||
runai-model-streamer[s3,gcs]==0.14.0
|
||||
runai-model-streamer[s3,gcs]==0.15.0
|
||||
fastsafetensors>=0.1.10
|
||||
pydantic>=2.12 # 2.11 leads to error on python 3.13
|
||||
|
||||
@ -12,6 +12,6 @@ tensorizer==2.10.1
|
||||
packaging>=24.2
|
||||
setuptools>=77.0.3,<80.0.0
|
||||
setuptools-scm>=8
|
||||
runai-model-streamer[s3,gcs]==0.14.0
|
||||
runai-model-streamer[s3,gcs]==0.15.0
|
||||
conch-triton-kernels==1.2.1
|
||||
timm>=1.0.17
|
||||
|
||||
@ -50,7 +50,7 @@ tritonclient==2.51.0
|
||||
|
||||
numba == 0.61.2 # Required for N-gram speculative decoding
|
||||
numpy
|
||||
runai-model-streamer[s3,gcs]==0.14.0
|
||||
runai-model-streamer[s3,gcs]==0.15.0
|
||||
fastsafetensors>=0.1.10
|
||||
pydantic>=2.12 # 2.11 leads to error on python 3.13
|
||||
decord==0.6.0
|
||||
|
||||
@ -965,11 +965,11 @@ rsa==4.9.1
|
||||
# via google-auth
|
||||
rtree==1.4.0
|
||||
# via torchgeo
|
||||
runai-model-streamer==0.14.0
|
||||
runai-model-streamer==0.15.0
|
||||
# via -r requirements/test.in
|
||||
runai-model-streamer-gcs==0.14.0
|
||||
runai-model-streamer-gcs==0.15.0
|
||||
# via runai-model-streamer
|
||||
runai-model-streamer-s3==0.14.0
|
||||
runai-model-streamer-s3==0.15.0
|
||||
# via runai-model-streamer
|
||||
s3transfer==0.10.3
|
||||
# via boto3
|
||||
|
||||
@ -15,4 +15,4 @@ torchaudio
|
||||
torchvision
|
||||
--extra-index-url=https://download.pytorch.org/whl/xpu
|
||||
|
||||
intel-extension-for-pytorch @ https://intel-extension-for-pytorch.s3.us-east-1.amazonaws.com/ipex_dev/xpu/intel_extension_for_pytorch-2.8.10.post0%2Bxpu-cp312-cp312-linux_x86_64.whl
|
||||
intel-extension-for-pytorch @ https://intel-extension-for-pytorch.s3.us-east-1.amazonaws.com/ipex_dev/xpu/intel_extension_for_pytorch-2.8.10.post1%2Bxpu-cp312-cp312-linux_x86_64.whl
|
||||
|
||||
4
setup.py
4
setup.py
@ -709,10 +709,10 @@ setup(
|
||||
ext_modules=ext_modules,
|
||||
install_requires=get_requirements(),
|
||||
extras_require={
|
||||
"bench": ["pandas", "datasets"],
|
||||
"bench": ["pandas", "matplotlib", "seaborn", "datasets"],
|
||||
"tensorizer": ["tensorizer==2.10.1"],
|
||||
"fastsafetensors": ["fastsafetensors >= 0.1.10"],
|
||||
"runai": ["runai-model-streamer[s3,gcs] >= 0.14.0"],
|
||||
"runai": ["runai-model-streamer[s3,gcs] >= 0.15.0"],
|
||||
"audio": [
|
||||
"librosa",
|
||||
"soundfile",
|
||||
|
||||
@ -1,10 +1,12 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import asyncio
|
||||
|
||||
import pytest
|
||||
import torch
|
||||
|
||||
from vllm import LLM, SamplingParams
|
||||
from vllm import LLM, AsyncEngineArgs, AsyncLLMEngine, SamplingParams
|
||||
from vllm.device_allocator.cumem import CuMemAllocator
|
||||
from vllm.utils.mem_constants import GiB_bytes
|
||||
|
||||
@ -201,3 +203,42 @@ def test_deep_sleep():
|
||||
|
||||
# cmp output
|
||||
assert output[0].outputs[0].text == output2[0].outputs[0].text
|
||||
|
||||
|
||||
@create_new_process_for_each_test()
|
||||
def test_deep_sleep_async():
|
||||
async def test():
|
||||
model = "hmellor/tiny-random-LlamaForCausalLM"
|
||||
free, total = torch.cuda.mem_get_info()
|
||||
used_bytes_baseline = total - free # in case other process is running
|
||||
engine_args = AsyncEngineArgs(
|
||||
model=model,
|
||||
enable_sleep_mode=True,
|
||||
)
|
||||
|
||||
llm = AsyncLLMEngine.from_engine_args(engine_args)
|
||||
prompt = "How are you?"
|
||||
sampling_params = SamplingParams(temperature=0, max_tokens=10)
|
||||
outputs = llm.generate(prompt, sampling_params, request_id="test_request_id1")
|
||||
async for output in outputs:
|
||||
pass
|
||||
|
||||
# Put the engine to deep sleep
|
||||
await llm.sleep(level=2)
|
||||
|
||||
await llm.wake_up(tags=["weights"])
|
||||
await llm.collective_rpc("reload_weights")
|
||||
free_gpu_bytes_wake_up_w, total = torch.cuda.mem_get_info()
|
||||
used_bytes = total - free_gpu_bytes_wake_up_w - used_bytes_baseline
|
||||
assert used_bytes < 4 * GiB_bytes
|
||||
|
||||
# now allocate kv cache and cuda graph memory
|
||||
await llm.wake_up(tags=["kv_cache"])
|
||||
outputs2 = llm.generate(prompt, sampling_params, request_id="test_request_id2")
|
||||
async for output2 in outputs2:
|
||||
pass
|
||||
|
||||
# cmp output
|
||||
assert output.outputs[0].text == output2.outputs[0].text
|
||||
|
||||
asyncio.run(test())
|
||||
|
||||
@ -359,3 +359,126 @@ def test_random_mm_bucket_config_not_mutated(
|
||||
assert len(mm_data) >= 1
|
||||
for it in mm_data:
|
||||
assert it.get("type") == "image_url"
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_random_mm_video_sampling(hf_tokenizer: PreTrainedTokenizerBase) -> None:
|
||||
"""Test video sampling functionality in RandomMultiModalDataset."""
|
||||
ds = RandomMultiModalDataset(random_seed=42)
|
||||
|
||||
# Test with video bucket configuration
|
||||
bucket_config = {
|
||||
(64, 64, 1): 0.3, # Images
|
||||
(64, 64, 8): 0.7, # Videos
|
||||
}
|
||||
|
||||
limit_mm_per_prompt = {"image": 2, "video": 2}
|
||||
|
||||
samples = _collect_mm_samples(
|
||||
ds,
|
||||
hf_tokenizer,
|
||||
num_requests=5,
|
||||
base_items_per_request=1,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
)
|
||||
|
||||
assert len(samples) == 5
|
||||
|
||||
# Check that we have both images and videos
|
||||
video_count = 0
|
||||
image_count = 0
|
||||
|
||||
for s in samples:
|
||||
mm_data = cast(list[dict[str, Any]], s.multi_modal_data)
|
||||
assert len(mm_data) == 1
|
||||
|
||||
item = mm_data[0]
|
||||
if item.get("type") == "video_url":
|
||||
video_count += 1
|
||||
# Verify video URL format
|
||||
url = item.get("video_url", {}).get("url", "")
|
||||
assert url.startswith("data:video/mp4;base64,")
|
||||
elif item.get("type") == "image_url":
|
||||
image_count += 1
|
||||
# Verify image URL format
|
||||
url = item.get("image_url", {}).get("url", "")
|
||||
assert url.startswith("data:image/jpeg;base64,")
|
||||
|
||||
# Should have some videos due to 0.7 probability
|
||||
assert video_count > 0
|
||||
assert image_count > 0
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_random_mm_video_only_sampling(hf_tokenizer: PreTrainedTokenizerBase) -> None:
|
||||
"""Test sampling with only video buckets."""
|
||||
ds = RandomMultiModalDataset(random_seed=42)
|
||||
|
||||
bucket_config = {
|
||||
(64, 64, 8): 1.0, # Only videos
|
||||
}
|
||||
|
||||
limit_mm_per_prompt = {"image": 0, "video": 1}
|
||||
|
||||
samples = _collect_mm_samples(
|
||||
ds,
|
||||
hf_tokenizer,
|
||||
num_requests=3,
|
||||
base_items_per_request=1,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
)
|
||||
|
||||
assert len(samples) == 3
|
||||
|
||||
for s in samples:
|
||||
mm_data = cast(list[dict[str, Any]], s.multi_modal_data)
|
||||
assert len(mm_data) == 1
|
||||
|
||||
item = mm_data[0]
|
||||
assert item.get("type") == "video_url"
|
||||
url = item.get("video_url", {}).get("url", "")
|
||||
assert url.startswith("data:video/mp4;base64,")
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_random_mm_video_deterministic_sampling(
|
||||
hf_tokenizer: PreTrainedTokenizerBase,
|
||||
) -> None:
|
||||
"""Test that video sampling is deterministic with same seed."""
|
||||
seed = 123
|
||||
ds_a = RandomMultiModalDataset(random_seed=seed)
|
||||
ds_b = RandomMultiModalDataset(random_seed=seed)
|
||||
|
||||
bucket_config = {
|
||||
(64, 64, 8): 1.0, # Only videos
|
||||
}
|
||||
|
||||
limit_mm_per_prompt = {"image": 0, "video": 1}
|
||||
|
||||
a = _collect_mm_samples(
|
||||
ds_a,
|
||||
hf_tokenizer,
|
||||
num_requests=3,
|
||||
base_items_per_request=1,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
)
|
||||
|
||||
b = _collect_mm_samples(
|
||||
ds_b,
|
||||
hf_tokenizer,
|
||||
num_requests=3,
|
||||
base_items_per_request=1,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
)
|
||||
|
||||
fa = [_mm_fingerprint_sample(s) for s in a]
|
||||
fb = [_mm_fingerprint_sample(s) for s in b]
|
||||
assert fa == fb
|
||||
|
||||
398
tests/benchmarks/test_random_multimodal_dataset_video.py
Normal file
398
tests/benchmarks/test_random_multimodal_dataset_video.py
Normal file
@ -0,0 +1,398 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import base64
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile
|
||||
from typing import Any, cast
|
||||
|
||||
import cv2
|
||||
import pytest
|
||||
from transformers import AutoTokenizer, PreTrainedTokenizerBase
|
||||
|
||||
from vllm.benchmarks.datasets import RandomMultiModalDataset, SampleRequest
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def hf_tokenizer() -> PreTrainedTokenizerBase:
|
||||
"""Use a small, commonly available tokenizer."""
|
||||
return AutoTokenizer.from_pretrained("gpt2")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def video_dataset() -> RandomMultiModalDataset:
|
||||
"""Create a RandomMultiModalDataset instance for testing."""
|
||||
return RandomMultiModalDataset(random_seed=42)
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_generate_synthetic_video_different_seeds():
|
||||
"""Test that different seeds produce different videos."""
|
||||
dataset1 = RandomMultiModalDataset(random_seed=123)
|
||||
dataset2 = RandomMultiModalDataset(random_seed=456)
|
||||
|
||||
width, height, num_frames = 64, 48, 8
|
||||
|
||||
video1 = dataset1.generate_synthetic_video(width, height, num_frames)
|
||||
video2 = dataset2.generate_synthetic_video(width, height, num_frames)
|
||||
|
||||
# Videos should be different due to different seeds
|
||||
assert video1["bytes"] != video2["bytes"]
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_map_config_to_modality(video_dataset: RandomMultiModalDataset):
|
||||
"""Test modality mapping for different configurations."""
|
||||
# Test image configuration (num_frames = 1)
|
||||
assert video_dataset.map_config_to_modality((256, 256, 1)) == "image"
|
||||
assert video_dataset.map_config_to_modality((720, 1280, 1)) == "image"
|
||||
|
||||
# Test video configurations (num_frames > 1)
|
||||
assert video_dataset.map_config_to_modality((256, 256, 8)) == "video"
|
||||
assert video_dataset.map_config_to_modality((720, 1280, 16)) == "video"
|
||||
assert video_dataset.map_config_to_modality((64, 64, 32)) == "video"
|
||||
|
||||
# Test invalid configurations
|
||||
with pytest.raises(ValueError, match="Invalid multimodal item configuration"):
|
||||
video_dataset.map_config_to_modality((256, 256, 0))
|
||||
|
||||
with pytest.raises(ValueError, match="Invalid multimodal item configuration"):
|
||||
video_dataset.map_config_to_modality((256, 256, -1))
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_generate_mm_item_video(video_dataset: RandomMultiModalDataset):
|
||||
"""Test generating multimodal items for video configurations."""
|
||||
# Test video item generation
|
||||
video_config = (64, 48, 8) # height, width, num_frames
|
||||
result = video_dataset.generate_mm_item(video_config)
|
||||
|
||||
# Check the result structure matches OpenAI API format
|
||||
assert isinstance(result, dict)
|
||||
assert result["type"] == "video_url"
|
||||
assert "video_url" in result
|
||||
assert "url" in result["video_url"]
|
||||
|
||||
# Check that the URL is a data URL with base64 encoded video
|
||||
url = result["video_url"]["url"]
|
||||
assert url.startswith("data:video/mp4;base64,")
|
||||
|
||||
# Decode and verify the video content
|
||||
base64_data = url.split(",")[1]
|
||||
video_bytes = base64.b64decode(base64_data)
|
||||
assert len(video_bytes) > 0
|
||||
|
||||
# Verify the video can be decoded
|
||||
with NamedTemporaryFile(suffix=".mp4", delete=False) as temp_file:
|
||||
temp_path = temp_file.name
|
||||
temp_file.write(video_bytes)
|
||||
|
||||
try:
|
||||
cap = cv2.VideoCapture(temp_path)
|
||||
assert cap.isOpened()
|
||||
|
||||
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
|
||||
assert frame_count == 8
|
||||
assert frame_width == 48
|
||||
assert frame_height == 64
|
||||
|
||||
cap.release()
|
||||
finally:
|
||||
if os.path.exists(temp_path):
|
||||
os.unlink(temp_path)
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_generate_mm_item_image(video_dataset: RandomMultiModalDataset):
|
||||
"""Test generating multimodal items for image configurations."""
|
||||
# Test image item generation
|
||||
image_config = (64, 48, 1) # height, width, num_frames=1
|
||||
result = video_dataset.generate_mm_item(image_config)
|
||||
|
||||
# Check the result structure matches OpenAI API format
|
||||
assert isinstance(result, dict)
|
||||
assert result["type"] == "image_url"
|
||||
assert "image_url" in result
|
||||
assert "url" in result["image_url"]
|
||||
|
||||
# Check that the URL is a data URL with base64 encoded image
|
||||
url = result["image_url"]["url"]
|
||||
assert url.startswith("data:image/jpeg;base64,")
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_generate_mm_item_invalid_config(video_dataset: RandomMultiModalDataset):
|
||||
"""Test error handling for invalid configurations."""
|
||||
with pytest.raises(ValueError, match="Invalid multimodal item configuration"):
|
||||
video_dataset.generate_mm_item((256, 256, 0))
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_sample_with_video_buckets(
|
||||
video_dataset: RandomMultiModalDataset, hf_tokenizer: PreTrainedTokenizerBase
|
||||
):
|
||||
"""Test sampling with video bucket configurations."""
|
||||
# Configure bucket with video probability > 0
|
||||
bucket_config = {
|
||||
(64, 64, 1): 0.3, # Images
|
||||
(64, 64, 8): 0.7, # Videos
|
||||
}
|
||||
|
||||
limit_mm_per_prompt = {"image": 5, "video": 3}
|
||||
|
||||
samples = video_dataset.sample(
|
||||
tokenizer=hf_tokenizer,
|
||||
num_requests=5,
|
||||
base_items_per_request=2,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
input_len=20,
|
||||
output_len=5,
|
||||
)
|
||||
|
||||
assert len(samples) == 5
|
||||
|
||||
# Check that samples contain both images and videos
|
||||
video_count = 0
|
||||
image_count = 0
|
||||
|
||||
for sample in samples:
|
||||
assert isinstance(sample, SampleRequest)
|
||||
assert sample.multi_modal_data is not None
|
||||
assert isinstance(sample.multi_modal_data, list)
|
||||
|
||||
mm_data = cast(list[dict[str, Any]], sample.multi_modal_data)
|
||||
assert len(mm_data) == 2 # base_items_per_request
|
||||
|
||||
for item in mm_data:
|
||||
if item["type"] == "video_url":
|
||||
video_count += 1
|
||||
# Verify video URL format
|
||||
url = item["video_url"]["url"]
|
||||
assert url.startswith("data:video/mp4;base64,")
|
||||
elif item["type"] == "image_url":
|
||||
image_count += 1
|
||||
# Verify image URL format
|
||||
url = item["image_url"]["url"]
|
||||
assert url.startswith("data:image/jpeg;base64,")
|
||||
|
||||
# Should have some videos due to 0.7 probability
|
||||
assert video_count > 0
|
||||
assert image_count > 0
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_sample_video_only_buckets(
|
||||
video_dataset: RandomMultiModalDataset, hf_tokenizer: PreTrainedTokenizerBase
|
||||
):
|
||||
"""Test sampling with only video buckets."""
|
||||
bucket_config = {
|
||||
(64, 64, 8): 1.0, # Only videos
|
||||
}
|
||||
|
||||
limit_mm_per_prompt = {"image": 0, "video": 2}
|
||||
|
||||
samples = video_dataset.sample(
|
||||
tokenizer=hf_tokenizer,
|
||||
num_requests=3,
|
||||
base_items_per_request=1,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
input_len=20,
|
||||
output_len=5,
|
||||
)
|
||||
|
||||
assert len(samples) == 3
|
||||
|
||||
for sample in samples:
|
||||
assert isinstance(sample, SampleRequest)
|
||||
assert sample.multi_modal_data is not None
|
||||
assert isinstance(sample.multi_modal_data, list)
|
||||
|
||||
mm_data = cast(list[dict[str, Any]], sample.multi_modal_data)
|
||||
assert len(mm_data) == 1
|
||||
|
||||
item = mm_data[0]
|
||||
assert item["type"] == "video_url"
|
||||
url = item["video_url"]["url"]
|
||||
assert url.startswith("data:video/mp4;base64,")
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_sample_respects_video_limits(
|
||||
video_dataset: RandomMultiModalDataset, hf_tokenizer: PreTrainedTokenizerBase
|
||||
):
|
||||
"""Test that sampling respects video limits per prompt."""
|
||||
bucket_config = {
|
||||
(64, 64, 8): 1.0, # Only videos
|
||||
}
|
||||
|
||||
# Set very low video limit
|
||||
limit_mm_per_prompt = {"image": 0, "video": 1}
|
||||
|
||||
samples = video_dataset.sample(
|
||||
tokenizer=hf_tokenizer,
|
||||
num_requests=3,
|
||||
base_items_per_request=1,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
input_len=20,
|
||||
output_len=5,
|
||||
)
|
||||
|
||||
assert len(samples) == 3
|
||||
|
||||
for sample in samples:
|
||||
mm_data = cast(list[dict[str, Any]], sample.multi_modal_data)
|
||||
assert len(mm_data) <= 1 # Should respect video limit
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_sample_mixed_buckets_with_zero_probability(
|
||||
video_dataset: RandomMultiModalDataset, hf_tokenizer: PreTrainedTokenizerBase
|
||||
):
|
||||
"""Test sampling with mixed buckets including zero probability entries."""
|
||||
bucket_config = {
|
||||
(64, 64, 1): 0.5, # Images
|
||||
(64, 64, 8): 0.5, # Videos
|
||||
(128, 128, 16): 0.0, # Zero probability videos (should be ignored)
|
||||
}
|
||||
|
||||
limit_mm_per_prompt = {"image": 2, "video": 2}
|
||||
|
||||
samples = video_dataset.sample(
|
||||
tokenizer=hf_tokenizer,
|
||||
num_requests=4,
|
||||
base_items_per_request=2,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
input_len=20,
|
||||
output_len=5,
|
||||
)
|
||||
|
||||
assert len(samples) == 4
|
||||
|
||||
# Should only see 64x64 videos, not 128x128 videos
|
||||
for sample in samples:
|
||||
mm_data = cast(list[dict[str, Any]], sample.multi_modal_data)
|
||||
for item in mm_data:
|
||||
if item["type"] == "video_url":
|
||||
# Decode video to verify dimensions
|
||||
url = item["video_url"]["url"]
|
||||
base64_data = url.split(",")[1]
|
||||
video_bytes = base64.b64decode(base64_data)
|
||||
|
||||
with NamedTemporaryFile(suffix=".mp4", delete=False) as temp_file: # noqa
|
||||
temp_path = temp_file.name
|
||||
temp_file.write(video_bytes)
|
||||
|
||||
try:
|
||||
cap = cv2.VideoCapture(temp_path)
|
||||
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
cap.release()
|
||||
|
||||
# Should be 64x64, not 128x128
|
||||
assert frame_width == 64
|
||||
assert frame_height == 64
|
||||
finally:
|
||||
if os.path.exists(temp_path):
|
||||
os.unlink(temp_path)
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_sample_deterministic_with_videos(hf_tokenizer: PreTrainedTokenizerBase):
|
||||
"""Test that sampling with videos is deterministic with same seed."""
|
||||
dataset1 = RandomMultiModalDataset(random_seed=123)
|
||||
dataset2 = RandomMultiModalDataset(random_seed=123)
|
||||
|
||||
bucket_config = {
|
||||
(64, 64, 1): 0.3, # Images
|
||||
(64, 64, 8): 0.7, # Videos
|
||||
}
|
||||
|
||||
limit_mm_per_prompt = {"image": 2, "video": 2}
|
||||
|
||||
samples1 = dataset1.sample(
|
||||
tokenizer=hf_tokenizer,
|
||||
num_requests=3,
|
||||
base_items_per_request=1,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
input_len=20,
|
||||
output_len=5,
|
||||
)
|
||||
|
||||
samples2 = dataset2.sample(
|
||||
tokenizer=hf_tokenizer,
|
||||
num_requests=3,
|
||||
base_items_per_request=1,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
input_len=20,
|
||||
output_len=5,
|
||||
)
|
||||
|
||||
assert len(samples1) == len(samples2)
|
||||
|
||||
# Compare multimodal data
|
||||
for s1, s2 in zip(samples1, samples2):
|
||||
assert s1.multi_modal_data == s2.multi_modal_data
|
||||
|
||||
|
||||
@pytest.mark.benchmark
|
||||
def test_sample_different_seeds_produce_different_videos(
|
||||
hf_tokenizer: PreTrainedTokenizerBase,
|
||||
):
|
||||
"""Test that different seeds produce different video content."""
|
||||
dataset1 = RandomMultiModalDataset(random_seed=123)
|
||||
dataset2 = RandomMultiModalDataset(random_seed=456)
|
||||
|
||||
bucket_config = {
|
||||
(64, 64, 8): 1.0, # Only videos
|
||||
}
|
||||
|
||||
limit_mm_per_prompt = {"image": 0, "video": 1}
|
||||
|
||||
samples1 = dataset1.sample(
|
||||
tokenizer=hf_tokenizer,
|
||||
num_requests=2,
|
||||
base_items_per_request=1,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
input_len=20,
|
||||
output_len=5,
|
||||
)
|
||||
|
||||
samples2 = dataset2.sample(
|
||||
tokenizer=hf_tokenizer,
|
||||
num_requests=2,
|
||||
base_items_per_request=1,
|
||||
num_mm_items_range_ratio=0.0,
|
||||
limit_mm_per_prompt=limit_mm_per_prompt,
|
||||
bucket_config=bucket_config,
|
||||
input_len=20,
|
||||
output_len=5,
|
||||
)
|
||||
|
||||
# Video content should be different
|
||||
for s1, s2 in zip(samples1, samples2):
|
||||
mm_data1 = cast(list[dict[str, Any]], s1.multi_modal_data)
|
||||
mm_data2 = cast(list[dict[str, Any]], s2.multi_modal_data)
|
||||
|
||||
assert len(mm_data1) == len(mm_data2) == 1
|
||||
|
||||
url1 = mm_data1[0]["video_url"]["url"]
|
||||
url2 = mm_data2[0]["video_url"]["url"]
|
||||
|
||||
assert url1 != url2 # Different video content
|
||||
36
tests/compile/test_multimodal_compile.py
Normal file
36
tests/compile/test_multimodal_compile.py
Normal file
@ -0,0 +1,36 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
import pytest
|
||||
|
||||
from vllm.compilation.counter import compilation_counter
|
||||
from vllm.config.compilation import CompilationMode
|
||||
|
||||
|
||||
# forked needed to workaround https://github.com/vllm-project/vllm/issues/21073
|
||||
@pytest.mark.forked
|
||||
def test_qwen2_5_vl_compilation(vllm_runner, monkeypatch):
|
||||
"""Test that Qwen2.5-VL vision submodules are compiled.
|
||||
|
||||
This test verifies that the 3 vision submodules (Qwen2_5_VisionPatchEmbed,
|
||||
Qwen2_5_VisionBlock, and Qwen2_5_VisionPatchMerger) are properly tagged
|
||||
for compilation by checking that num_models_seen increases by at least 3.
|
||||
"""
|
||||
# Disable multiprocessing so that the counter is in the same process
|
||||
monkeypatch.setenv("VLLM_ENABLE_V1_MULTIPROCESSING", "0")
|
||||
|
||||
with (
|
||||
# NOTE: Qwen2.5-VL has 35 models in total - the LLM backend
|
||||
# Vision Patch Embed, Vision Patch Merger, and then 32 Vision Blocks
|
||||
# (one for each layer) - in the future, we should fix vLLM compilation
|
||||
# logic to handle this case and only compile the Vision submodules once
|
||||
# and reuse the compiled code for all layers
|
||||
# See https://github.com/vllm-project/vllm/issues/27590
|
||||
compilation_counter.expect(num_models_seen=35),
|
||||
vllm_runner(
|
||||
"Qwen/Qwen2.5-VL-3B-Instruct",
|
||||
max_model_len=2048,
|
||||
gpu_memory_utilization=0.7,
|
||||
compilation_config={"mode": CompilationMode.VLLM_COMPILE},
|
||||
) as _,
|
||||
):
|
||||
pass
|
||||
@ -831,8 +831,9 @@ class VllmRunner:
|
||||
images: PromptImageInput | None = None,
|
||||
videos: PromptVideoInput | None = None,
|
||||
audios: PromptAudioInput | None = None,
|
||||
return_logprobs: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> list[tuple[list[list[int]], list[str]]]:
|
||||
) -> list[tuple[list[list[int]], list[str]]] | tuple[list, list]:
|
||||
inputs = self.get_inputs(prompts, images=images, videos=videos, audios=audios)
|
||||
|
||||
req_outputs = self.llm.generate(
|
||||
@ -840,18 +841,23 @@ class VllmRunner:
|
||||
)
|
||||
|
||||
outputs: list[tuple[list[list[int]], list[str]]] = []
|
||||
logprobs = []
|
||||
for req_output in req_outputs:
|
||||
prompt_str = req_output.prompt
|
||||
prompt_ids = req_output.prompt_token_ids
|
||||
req_sample_output_ids: list[list[int]] = []
|
||||
req_sample_output_strs: list[str] = []
|
||||
req_logprobs = []
|
||||
for sample in req_output.outputs:
|
||||
output_str = sample.text
|
||||
output_ids = list(sample.token_ids)
|
||||
req_sample_output_ids.append(prompt_ids + output_ids)
|
||||
req_sample_output_strs.append((prompt_str or "") + output_str)
|
||||
if sample.logprobs:
|
||||
req_logprobs.extend(sample.logprobs)
|
||||
outputs.append((req_sample_output_ids, req_sample_output_strs))
|
||||
return outputs
|
||||
logprobs.append(req_logprobs)
|
||||
return outputs if not return_logprobs else (outputs, logprobs)
|
||||
|
||||
@staticmethod
|
||||
def _final_steps_generate_w_logprobs(
|
||||
|
||||
@ -4,6 +4,8 @@
|
||||
import traceback
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
|
||||
from vllm.distributed.device_communicators.shm_object_storage import (
|
||||
SingleWriterShmRingBuffer,
|
||||
)
|
||||
@ -113,6 +115,69 @@ class TestSingleWriterShmRingBuffer(unittest.TestCase):
|
||||
self.assertEqual(self.ring_buffer.data_buffer_start, 0)
|
||||
self.assertEqual(self.ring_buffer.data_buffer_end, 0)
|
||||
|
||||
def test_allocation_cycles(self):
|
||||
buffer_size = 100
|
||||
ring = SingleWriterShmRingBuffer(data_buffer_size=buffer_size, create=True)
|
||||
|
||||
# tracking allocations for assertions
|
||||
allocated_bitmap = np.zeros(
|
||||
(buffer_size,), dtype=np.bool_
|
||||
) # addr -> is_allocated
|
||||
allocation_map = dict() # monotonic_id -> (addr, size)
|
||||
|
||||
def count_allocated(bitmap) -> int:
|
||||
return np.sum(bitmap).item()
|
||||
|
||||
def is_free_fn(a, b) -> bool:
|
||||
return True
|
||||
|
||||
def mark_allocated_with_assertion(id, addr, size):
|
||||
addr = addr % buffer_size
|
||||
self.assertEqual(count_allocated(allocated_bitmap[addr : addr + size]), 0)
|
||||
|
||||
allocated_bitmap[addr : addr + size] = True
|
||||
allocation_map[id] = (addr, size)
|
||||
|
||||
def mark_freed_with_assertion(id):
|
||||
self.assertTrue(id in allocation_map)
|
||||
|
||||
addr, size = allocation_map.pop(id)
|
||||
addr = addr % buffer_size
|
||||
self.assertEqual(
|
||||
count_allocated(allocated_bitmap[addr : addr + size]), size
|
||||
)
|
||||
|
||||
allocated_bitmap[addr : addr + size] = False
|
||||
|
||||
def ring_free(free_size=None):
|
||||
freed_ids = ring.free_buf(is_free_fn, free_size)
|
||||
for freed_id in freed_ids:
|
||||
mark_freed_with_assertion(freed_id)
|
||||
|
||||
def ring_allocate(allocate_size):
|
||||
allocate_size_with_md = allocate_size + ring.MD_SIZE
|
||||
try:
|
||||
addr, monotonic_id = ring.allocate_buf(allocate_size)
|
||||
mark_allocated_with_assertion(monotonic_id, addr, allocate_size_with_md)
|
||||
except MemoryError:
|
||||
# free 2x size for enough space if wrapping happened
|
||||
ring_free(allocate_size_with_md * 2)
|
||||
|
||||
# retry allocating
|
||||
addr, monotonic_id = ring.allocate_buf(allocate_size)
|
||||
mark_allocated_with_assertion(monotonic_id, addr, allocate_size_with_md)
|
||||
|
||||
# 1. allocation & free cycles
|
||||
for _ in range(33):
|
||||
# will consume 2 + 8 = 10 bytes per allocation
|
||||
ring_allocate(2)
|
||||
|
||||
# 2. free all allocations
|
||||
ring_free()
|
||||
|
||||
# 3. try allocate the largest possible buffer
|
||||
ring_allocate(buffer_size - ring.MD_SIZE)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function demonstrating usage and running tests"""
|
||||
|
||||
@ -71,6 +71,26 @@ def test_multiple_sampling_params(llm: LLM):
|
||||
assert len(PROMPTS) == len(outputs)
|
||||
|
||||
|
||||
def test_multiple_priority(llm: LLM):
|
||||
# Generate works when priority is None
|
||||
outputs = llm.generate(PROMPTS, sampling_params=None, priority=None)
|
||||
assert len(PROMPTS) == len(outputs)
|
||||
|
||||
# Generate works when length of priority is same as the len(PROMPTS)
|
||||
outputs = llm.generate(PROMPTS, sampling_params=None, priority=[0] * len(PROMPTS))
|
||||
assert len(PROMPTS) == len(outputs)
|
||||
|
||||
# Exception raised, if the length of priority does not match the length of prompts
|
||||
with pytest.raises(ValueError):
|
||||
outputs = llm.generate(
|
||||
PROMPTS, sampling_params=None, priority=[0] * (len(PROMPTS) - 1)
|
||||
)
|
||||
|
||||
# Exception raised, if the priority list is empty
|
||||
with pytest.raises(ValueError):
|
||||
outputs = llm.generate(PROMPTS, sampling_params=None, priority=[])
|
||||
|
||||
|
||||
def test_max_model_len():
|
||||
max_model_len = 20
|
||||
llm = LLM(
|
||||
|
||||
@ -29,7 +29,7 @@ def multimodal_server(): # noqa: F811
|
||||
"--dtype",
|
||||
"half",
|
||||
"--max-model-len",
|
||||
"12800",
|
||||
"4096",
|
||||
"--enforce-eager",
|
||||
# lora config below
|
||||
"--enable-lora",
|
||||
|
||||
@ -26,6 +26,8 @@ def mcp_disabled_server(monkeypatch_module: pytest.MonkeyPatch):
|
||||
with monkeypatch_module.context() as m:
|
||||
m.setenv("VLLM_ENABLE_RESPONSES_API_STORE", "1")
|
||||
m.setenv("PYTHON_EXECUTION_BACKEND", "dangerously_use_uv")
|
||||
# Helps the model follow instructions better
|
||||
m.setenv("VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS", "1")
|
||||
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
@ -37,7 +39,9 @@ def mcp_enabled_server(monkeypatch_module: pytest.MonkeyPatch):
|
||||
with monkeypatch_module.context() as m:
|
||||
m.setenv("VLLM_ENABLE_RESPONSES_API_STORE", "1")
|
||||
m.setenv("PYTHON_EXECUTION_BACKEND", "dangerously_use_uv")
|
||||
m.setenv("GPT_OSS_SYSTEM_TOOL_MCP_LABELS", "code_interpreter,container")
|
||||
m.setenv("VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS", "code_interpreter,container")
|
||||
# Helps the model follow instructions better
|
||||
m.setenv("VLLM_GPT_OSS_HARMONY_SYSTEM_INSTRUCTIONS", "1")
|
||||
with RemoteOpenAIServer(MODEL_NAME, args) as remote_server:
|
||||
yield remote_server
|
||||
|
||||
@ -56,18 +60,15 @@ async def mcp_enabled_client(mcp_enabled_server):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
@pytest.mark.skip(reason="Code interpreter tool is not available in CI yet.")
|
||||
async def test_mcp_tool_env_flag_enabled(mcp_enabled_client: OpenAI, model_name: str):
|
||||
response = await mcp_enabled_client.responses.create(
|
||||
model=model_name,
|
||||
# TODO: Ideally should be able to set max tool calls
|
||||
# to prevent multi-turn, but it is not currently supported
|
||||
# would speed up the test
|
||||
input=(
|
||||
"What's the first 4 digits after the decimal point of "
|
||||
"cube root of `19910212 * 20250910`? "
|
||||
"Show only the digits. The python interpreter is not stateful "
|
||||
"and you must print to see the output."
|
||||
"Execute the following code: "
|
||||
"import random; print(random.randint(1, 1000000))"
|
||||
),
|
||||
instructions=(
|
||||
"You must use the Python tool to execute code. Never simulate execution."
|
||||
),
|
||||
tools=[
|
||||
{
|
||||
@ -77,26 +78,47 @@ async def test_mcp_tool_env_flag_enabled(mcp_enabled_client: OpenAI, model_name:
|
||||
"server_url": "http://localhost:8888",
|
||||
}
|
||||
],
|
||||
extra_body={"enable_response_messages": True},
|
||||
)
|
||||
assert response is not None
|
||||
assert response.status == "completed"
|
||||
assert response.usage.output_tokens_details.tool_output_tokens > 0
|
||||
# Verify output messages: Tool calls and responses on analysis channel
|
||||
tool_call_found = False
|
||||
tool_response_found = False
|
||||
for message in response.output_messages:
|
||||
recipient = message.get("recipient")
|
||||
if recipient and recipient.startswith("python"):
|
||||
tool_call_found = True
|
||||
assert message.get("channel") == "analysis", (
|
||||
"Tool call should be on analysis channel"
|
||||
)
|
||||
author = message.get("author", {})
|
||||
if (
|
||||
author.get("role") == "tool"
|
||||
and author.get("name")
|
||||
and author.get("name").startswith("python")
|
||||
):
|
||||
tool_response_found = True
|
||||
assert message.get("channel") == "analysis", (
|
||||
"Tool response should be on analysis channel"
|
||||
)
|
||||
|
||||
assert tool_call_found, "Should have found at least one Python tool call"
|
||||
assert tool_response_found, "Should have found at least one Python tool response"
|
||||
for message in response.input_messages:
|
||||
assert message.get("author").get("role") != "developer", (
|
||||
"No developer messages should be present with valid mcp tool"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
@pytest.mark.skip(reason="Code interpreter tool is not available in CI yet.")
|
||||
async def test_mcp_tool_env_flag_disabled(mcp_disabled_client: OpenAI, model_name: str):
|
||||
response = await mcp_disabled_client.responses.create(
|
||||
model=model_name,
|
||||
# TODO: Ideally should be able to set max tool calls
|
||||
# to prevent multi-turn, but it is not currently supported
|
||||
# would speed up the test
|
||||
input=(
|
||||
"What's the first 4 digits after the decimal point of "
|
||||
"cube root of `19910212 * 20250910`? "
|
||||
"Show only the digits. The python interpreter is not stateful "
|
||||
"and you must print to see the output."
|
||||
"Execute the following code if the tool is present: "
|
||||
"import random; print(random.randint(1, 1000000))"
|
||||
),
|
||||
tools=[
|
||||
{
|
||||
@ -106,7 +128,34 @@ async def test_mcp_tool_env_flag_disabled(mcp_disabled_client: OpenAI, model_nam
|
||||
"server_url": "http://localhost:8888",
|
||||
}
|
||||
],
|
||||
extra_body={"enable_response_messages": True},
|
||||
)
|
||||
assert response is not None
|
||||
assert response.status == "completed"
|
||||
assert response.usage.output_tokens_details.tool_output_tokens == 0
|
||||
# Verify output messages: No tool calls and responses
|
||||
tool_call_found = False
|
||||
tool_response_found = False
|
||||
for message in response.output_messages:
|
||||
recipient = message.get("recipient")
|
||||
if recipient and recipient.startswith("python"):
|
||||
tool_call_found = True
|
||||
assert message.get("channel") == "analysis", (
|
||||
"Tool call should be on analysis channel"
|
||||
)
|
||||
author = message.get("author", {})
|
||||
if (
|
||||
author.get("role") == "tool"
|
||||
and author.get("name")
|
||||
and author.get("name").startswith("python")
|
||||
):
|
||||
tool_response_found = True
|
||||
assert message.get("channel") == "analysis", (
|
||||
"Tool response should be on analysis channel"
|
||||
)
|
||||
|
||||
assert not tool_call_found, "Should not have a python call"
|
||||
assert not tool_response_found, "Should not have a tool response"
|
||||
for message in response.input_messages:
|
||||
assert message.get("author").get("role") != "developer", (
|
||||
"No developer messages should be present without a valid tool"
|
||||
)
|
||||
|
||||
@ -535,11 +535,17 @@ def get_place_to_travel():
|
||||
return "Paris"
|
||||
|
||||
|
||||
def get_horoscope(sign):
|
||||
return f"{sign}: Next Tuesday you will befriend a baby otter."
|
||||
|
||||
|
||||
def call_function(name, args):
|
||||
if name == "get_weather":
|
||||
return get_weather(**args)
|
||||
elif name == "get_place_to_travel":
|
||||
return get_place_to_travel()
|
||||
elif name == "get_horoscope":
|
||||
return get_horoscope(**args)
|
||||
else:
|
||||
raise ValueError(f"Unknown function: {name}")
|
||||
|
||||
@ -828,3 +834,126 @@ async def test_output_messages_enabled(client: OpenAI, model_name: str, server):
|
||||
assert response.status == "completed"
|
||||
assert len(response.input_messages) > 0
|
||||
assert len(response.output_messages) > 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize("model_name", [MODEL_NAME])
|
||||
async def test_function_call_with_previous_input_messages(
|
||||
client: OpenAI, model_name: str
|
||||
):
|
||||
"""Test function calling using previous_input_messages
|
||||
for multi-turn conversation with a function call"""
|
||||
|
||||
# Define the get_horoscope tool
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"name": "get_horoscope",
|
||||
"description": "Get today's horoscope for an astrological sign.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"sign": {"type": "string"},
|
||||
},
|
||||
"required": ["sign"],
|
||||
"additionalProperties": False,
|
||||
},
|
||||
"strict": True,
|
||||
}
|
||||
]
|
||||
|
||||
# Step 1: First call with the function tool
|
||||
stream_response = await client.responses.create(
|
||||
model=model_name,
|
||||
input="What is the horoscope for Aquarius today?",
|
||||
tools=tools,
|
||||
extra_body={"enable_response_messages": True},
|
||||
stream=True,
|
||||
)
|
||||
|
||||
response = None
|
||||
async for event in stream_response:
|
||||
if event.type == "response.completed":
|
||||
response = event.response
|
||||
|
||||
assert response is not None
|
||||
assert response.status == "completed"
|
||||
|
||||
# Step 2: Parse the first output to find the function_call type
|
||||
function_call = None
|
||||
for item in response.output:
|
||||
if item.type == "function_call":
|
||||
function_call = item
|
||||
break
|
||||
|
||||
assert function_call is not None, "Expected a function_call in the output"
|
||||
assert function_call.name == "get_horoscope"
|
||||
assert function_call.call_id is not None
|
||||
|
||||
# Verify the format matches expectations
|
||||
args = json.loads(function_call.arguments)
|
||||
assert "sign" in args
|
||||
|
||||
# Step 3: Call the get_horoscope function
|
||||
result = call_function(function_call.name, args)
|
||||
assert "Aquarius" in result
|
||||
assert "baby otter" in result
|
||||
|
||||
# Get the input_messages and output_messages from the first response
|
||||
first_input_messages = response.input_messages
|
||||
first_output_messages = response.output_messages
|
||||
|
||||
# Construct the full conversation history using previous_input_messages
|
||||
previous_messages = (
|
||||
first_input_messages
|
||||
+ first_output_messages
|
||||
+ [
|
||||
{
|
||||
"role": "tool",
|
||||
"name": "functions.get_horoscope",
|
||||
"content": [{"type": "text", "text": str(result)}],
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
# Step 4: Make another responses.create() call with previous_input_messages
|
||||
stream_response_2 = await client.responses.create(
|
||||
model=model_name,
|
||||
tools=tools,
|
||||
input="",
|
||||
extra_body={
|
||||
"previous_input_messages": previous_messages,
|
||||
"enable_response_messages": True,
|
||||
},
|
||||
stream=True,
|
||||
)
|
||||
|
||||
async for event in stream_response_2:
|
||||
if event.type == "response.completed":
|
||||
response_2 = event.response
|
||||
|
||||
assert response_2 is not None
|
||||
assert response_2.status == "completed"
|
||||
assert response_2.output_text is not None
|
||||
|
||||
# verify only one system message / developer message
|
||||
num_system_messages_input = 0
|
||||
num_developer_messages_input = 0
|
||||
num_function_call_input = 0
|
||||
for message_dict in response_2.input_messages:
|
||||
message = Message.from_dict(message_dict)
|
||||
if message.author.role == "system":
|
||||
num_system_messages_input += 1
|
||||
elif message.author.role == "developer":
|
||||
num_developer_messages_input += 1
|
||||
elif message.author.role == "tool":
|
||||
num_function_call_input += 1
|
||||
assert num_system_messages_input == 1
|
||||
assert num_developer_messages_input == 1
|
||||
assert num_function_call_input == 1
|
||||
|
||||
# Verify the output makes sense - should contain information about the horoscope
|
||||
output_text = response_2.output_text.lower()
|
||||
assert (
|
||||
"aquarius" in output_text or "otter" in output_text or "tuesday" in output_text
|
||||
)
|
||||
|
||||
@ -27,8 +27,12 @@ def server():
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_basic_completion_with_emoji(server):
|
||||
@pytest.mark.parametrize("return_token_ids", [True, False, None])
|
||||
async def test_basic_completion_with_emoji(server, return_token_ids: bool | None):
|
||||
"""Test basic completion with emoji to verify token_ids field."""
|
||||
extra_body = None
|
||||
if return_token_ids is not None:
|
||||
extra_body = {"return_token_ids": return_token_ids}
|
||||
async with server.get_async_client() as client:
|
||||
# Test with return_token_ids enabled
|
||||
completion = await client.completions.create(
|
||||
@ -37,7 +41,7 @@ async def test_basic_completion_with_emoji(server):
|
||||
max_tokens=10,
|
||||
temperature=0,
|
||||
logprobs=1,
|
||||
extra_body={"return_token_ids": True},
|
||||
extra_body=extra_body,
|
||||
)
|
||||
|
||||
# Check the raw response to see the structure
|
||||
@ -45,6 +49,12 @@ async def test_basic_completion_with_emoji(server):
|
||||
|
||||
# Verify prompt_token_ids field is present in the completion response
|
||||
assert "prompt_token_ids" in completion_dict["choices"][0]
|
||||
if not return_token_ids:
|
||||
# If return_token_ids is False, token_ids should not be present
|
||||
assert completion_dict["choices"][0].get("token_ids") is None
|
||||
assert completion_dict["choices"][0].get("prompt_token_ids") is None
|
||||
# Skip further checks
|
||||
return
|
||||
assert isinstance(completion.choices[0].prompt_token_ids, list)
|
||||
|
||||
# Check against the expected prompt token IDs
|
||||
|
||||
@ -651,3 +651,79 @@ async def test_serving_chat_did_set_correct_cache_salt(model_type):
|
||||
await serving_chat.create_chat_completion(req)
|
||||
engine_prompt = serving_chat._process_inputs.await_args_list[1].args[1]
|
||||
assert engine_prompt.get("cache_salt") == "test_salt"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_serving_chat_data_parallel_rank_extraction():
|
||||
"""Test that data_parallel_rank is properly extracted from header and
|
||||
passed to engine."""
|
||||
mock_engine = MagicMock(spec=AsyncLLM)
|
||||
mock_engine.get_tokenizer.return_value = get_tokenizer(MODEL_NAME)
|
||||
mock_engine.errored = False
|
||||
mock_engine.model_config = MockModelConfig()
|
||||
mock_engine.processor = MagicMock()
|
||||
mock_engine.io_processor = MagicMock()
|
||||
|
||||
# Mock the generate method to return an async generator
|
||||
async def mock_generate(*args, **kwargs):
|
||||
# Yield a fake RequestOutput
|
||||
from vllm.outputs import CompletionOutput, RequestOutput
|
||||
|
||||
yield RequestOutput(
|
||||
request_id="test-request",
|
||||
prompt="test prompt",
|
||||
prompt_token_ids=[1, 2, 3],
|
||||
prompt_logprobs=None,
|
||||
outputs=[
|
||||
CompletionOutput(
|
||||
index=0,
|
||||
text="test response",
|
||||
token_ids=[4, 5, 6],
|
||||
cumulative_logprob=0.0,
|
||||
logprobs=None,
|
||||
finish_reason="stop",
|
||||
stop_reason=None,
|
||||
)
|
||||
],
|
||||
finished=True,
|
||||
)
|
||||
|
||||
mock_engine.generate = AsyncMock(side_effect=mock_generate)
|
||||
|
||||
serving_chat = _build_serving_chat(mock_engine)
|
||||
|
||||
# Test when data_parallel_rank is present in header
|
||||
req = ChatCompletionRequest(
|
||||
model=MODEL_NAME,
|
||||
messages=[{"role": "user", "content": "what is 1+1?"}],
|
||||
)
|
||||
|
||||
# Mock request with X-data-parallel-rank header
|
||||
mock_raw_request = MagicMock()
|
||||
mock_raw_request.headers = {"X-data-parallel-rank": "2"}
|
||||
mock_raw_request.state = MagicMock()
|
||||
|
||||
with suppress(Exception):
|
||||
await serving_chat.create_chat_completion(req, mock_raw_request)
|
||||
|
||||
# Verify that data_parallel_rank was passed to engine.generate
|
||||
assert "data_parallel_rank" in mock_engine.generate.call_args.kwargs
|
||||
assert mock_engine.generate.call_args.kwargs["data_parallel_rank"] == 2
|
||||
|
||||
# Test when data_parallel_rank is not present (defaults to None)
|
||||
req_no_dp = ChatCompletionRequest(
|
||||
model=MODEL_NAME,
|
||||
messages=[{"role": "user", "content": "what is 2+2?"}],
|
||||
)
|
||||
|
||||
# Mock request with no header
|
||||
mock_raw_request_no_dp = MagicMock()
|
||||
mock_raw_request_no_dp.headers = {}
|
||||
mock_raw_request_no_dp.state = MagicMock()
|
||||
|
||||
with suppress(Exception):
|
||||
await serving_chat.create_chat_completion(req_no_dp, mock_raw_request_no_dp)
|
||||
|
||||
# Verify that data_parallel_rank defaults to None
|
||||
assert "data_parallel_rank" in mock_engine.generate.call_args.kwargs
|
||||
assert mock_engine.generate.call_args.kwargs["data_parallel_rank"] is None
|
||||
|
||||
@ -6,10 +6,19 @@ from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from openai.types.responses.tool import (
|
||||
CodeInterpreterContainerCodeInterpreterToolAuto,
|
||||
LocalShell,
|
||||
Mcp,
|
||||
Tool,
|
||||
)
|
||||
|
||||
from vllm.entrypoints.context import ConversationContext
|
||||
from vllm.entrypoints.openai.protocol import ErrorResponse, ResponsesRequest
|
||||
from vllm.entrypoints.openai.serving_responses import OpenAIServingResponses
|
||||
from vllm.entrypoints.openai.serving_responses import (
|
||||
OpenAIServingResponses,
|
||||
extract_tool_types,
|
||||
)
|
||||
from vllm.entrypoints.tool_server import ToolServer
|
||||
from vllm.inputs.data import TokensPrompt as EngineTokensPrompt
|
||||
|
||||
@ -62,6 +71,45 @@ def mock_exit_stack():
|
||||
return MagicMock(spec=AsyncExitStack)
|
||||
|
||||
|
||||
def test_extract_tool_types(monkeypatch: pytest.MonkeyPatch) -> None:
|
||||
tools: list[Tool] = []
|
||||
assert extract_tool_types(tools) == set()
|
||||
|
||||
tools.append(LocalShell(type="local_shell"))
|
||||
assert extract_tool_types(tools) == {"local_shell"}
|
||||
|
||||
tools.append(CodeInterpreterContainerCodeInterpreterToolAuto(type="auto"))
|
||||
assert extract_tool_types(tools) == {"local_shell", "auto"}
|
||||
|
||||
tools.extend(
|
||||
[
|
||||
Mcp(type="mcp", server_label="random", server_url=""),
|
||||
Mcp(type="mcp", server_label="container", server_url=""),
|
||||
Mcp(type="mcp", server_label="code_interpreter", server_url=""),
|
||||
Mcp(type="mcp", server_label="web_search_preview", server_url=""),
|
||||
]
|
||||
)
|
||||
# When envs.VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS is not set,
|
||||
# mcp tool types are all ignored.
|
||||
assert extract_tool_types(tools) == {"local_shell", "auto"}
|
||||
|
||||
# container is allowed, it would be extracted
|
||||
monkeypatch.setenv("VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS", "container")
|
||||
assert extract_tool_types(tools) == {"local_shell", "auto", "container"}
|
||||
|
||||
# code_interpreter and web_search_preview are allowed,
|
||||
# they would be extracted
|
||||
monkeypatch.setenv(
|
||||
"VLLM_GPT_OSS_SYSTEM_TOOL_MCP_LABELS", "code_interpreter,web_search_preview"
|
||||
)
|
||||
assert extract_tool_types(tools) == {
|
||||
"local_shell",
|
||||
"auto",
|
||||
"code_interpreter",
|
||||
"web_search_preview",
|
||||
}
|
||||
|
||||
|
||||
class TestInitializeToolSessions:
|
||||
"""Test class for _initialize_tool_sessions method"""
|
||||
|
||||
@ -125,6 +173,28 @@ class TestInitializeToolSessions:
|
||||
# Verify that init_tool_sessions was called
|
||||
assert mock_context.init_tool_sessions_called
|
||||
|
||||
def test_validate_create_responses_input(
|
||||
self, serving_responses_instance, mock_context, mock_exit_stack
|
||||
):
|
||||
request = ResponsesRequest(
|
||||
input="test input",
|
||||
previous_input_messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "What is my horoscope? I am an Aquarius.",
|
||||
}
|
||||
],
|
||||
}
|
||||
],
|
||||
previous_response_id="lol",
|
||||
)
|
||||
error = serving_responses_instance._validate_create_responses_input(request)
|
||||
assert error is not None
|
||||
assert error.error.type == "invalid_request_error"
|
||||
|
||||
|
||||
class TestValidateGeneratorInput:
|
||||
"""Test class for _validate_generator_input method"""
|
||||
|
||||
@ -2,6 +2,7 @@
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import requests
|
||||
from prometheus_client.parser import text_string_to_metric_families
|
||||
|
||||
from ...utils import RemoteOpenAIServer
|
||||
|
||||
@ -31,12 +32,28 @@ def test_sleep_mode():
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("is_sleeping") is True
|
||||
|
||||
# check sleep metrics
|
||||
response = requests.get(remote_server.url_for("metrics"))
|
||||
assert response.status_code == 200
|
||||
awake, weights_offloaded, discard_all = _get_sleep_metrics_from_api(response)
|
||||
assert awake == 0
|
||||
assert weights_offloaded == 1
|
||||
assert discard_all == 0
|
||||
|
||||
response = requests.post(remote_server.url_for("wake_up"))
|
||||
assert response.status_code == 200
|
||||
response = requests.get(remote_server.url_for("is_sleeping"))
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("is_sleeping") is False
|
||||
|
||||
# check sleep metrics
|
||||
response = requests.get(remote_server.url_for("metrics"))
|
||||
assert response.status_code == 200
|
||||
awake, weights_offloaded, discard_all = _get_sleep_metrics_from_api(response)
|
||||
assert awake == 1
|
||||
assert weights_offloaded == 0
|
||||
assert discard_all == 0
|
||||
|
||||
# test wake up with tags
|
||||
response = requests.post(remote_server.url_for("sleep"), params={"level": "1"})
|
||||
assert response.status_code == 200
|
||||
@ -59,3 +76,35 @@ def test_sleep_mode():
|
||||
response = requests.get(remote_server.url_for("is_sleeping"))
|
||||
assert response.status_code == 200
|
||||
assert response.json().get("is_sleeping") is False
|
||||
|
||||
# check sleep metrics
|
||||
response = requests.get(remote_server.url_for("metrics"))
|
||||
assert response.status_code == 200
|
||||
awake, weights_offloaded, discard_all = _get_sleep_metrics_from_api(response)
|
||||
assert awake == 1
|
||||
assert weights_offloaded == 0
|
||||
assert discard_all == 0
|
||||
|
||||
|
||||
def _get_sleep_metrics_from_api(response: requests.Response):
|
||||
"""Return (awake, weights_offloaded, discard_all)"""
|
||||
|
||||
awake, weights_offloaded, discard_all = None, None, None
|
||||
|
||||
for family in text_string_to_metric_families(response.text):
|
||||
if family.name == "vllm:engine_sleep_state":
|
||||
for sample in family.samples:
|
||||
if sample.name == "vllm:engine_sleep_state":
|
||||
for label_name, label_value in sample.labels.items():
|
||||
if label_value == "awake":
|
||||
awake = sample.value
|
||||
elif label_value == "weights_offloaded":
|
||||
weights_offloaded = sample.value
|
||||
elif label_value == "discard_all":
|
||||
discard_all = sample.value
|
||||
|
||||
assert awake is not None
|
||||
assert weights_offloaded is not None
|
||||
assert discard_all is not None
|
||||
|
||||
return awake, weights_offloaded, discard_all
|
||||
|
||||
12
tests/entrypoints/openai/tool_parsers/conftest.py
Normal file
12
tests/entrypoints/openai/tool_parsers/conftest.py
Normal file
@ -0,0 +1,12 @@
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import pytest
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from vllm.transformers_utils.tokenizer import AnyTokenizer
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def default_tokenizer() -> AnyTokenizer:
|
||||
return AutoTokenizer.from_pretrained("gpt2")
|
||||
@ -2,17 +2,15 @@
|
||||
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
||||
|
||||
import pytest
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
from vllm.entrypoints.openai.protocol import ExtractedToolCallInformation
|
||||
from vllm.entrypoints.openai.tool_parsers.llama_tool_parser import Llama3JsonToolParser
|
||||
from vllm.transformers_utils.tokenizer import AnyTokenizer
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def parser():
|
||||
# Use a small tokenizer for testing
|
||||
tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
||||
return Llama3JsonToolParser(tokenizer)
|
||||
def parser(default_tokenizer: AnyTokenizer):
|
||||
return Llama3JsonToolParser(default_tokenizer)
|
||||
|
||||
|
||||
def test_extract_tool_calls_simple(parser):
|
||||
|
||||
@ -11,6 +11,7 @@ from tests.entrypoints.openai.tool_parsers.utils import (
|
||||
)
|
||||
from vllm.entrypoints.openai.protocol import FunctionCall
|
||||
from vllm.entrypoints.openai.tool_parsers import ToolParser, ToolParserManager
|
||||
from vllm.transformers_utils.tokenizer import AnyTokenizer
|
||||
|
||||
# Test cases similar to pythonic parser but with Llama4 specific format
|
||||
SIMPLE_FUNCTION_OUTPUT = "[get_weather(city='LA', metric='C')]"
|
||||
@ -63,10 +64,9 @@ PYTHON_TAG_FUNCTION_OUTPUT = (
|
||||
|
||||
|
||||
@pytest.mark.parametrize("streaming", [True, False])
|
||||
def test_no_tool_call(streaming: bool):
|
||||
mock_tokenizer = MagicMock()
|
||||
def test_no_tool_call(streaming: bool, default_tokenizer: AnyTokenizer):
|
||||
tool_parser: ToolParser = ToolParserManager.get_tool_parser("llama4_pythonic")(
|
||||
mock_tokenizer
|
||||
default_tokenizer
|
||||
)
|
||||
model_output = "How can I help you today?"
|
||||
|
||||
@ -205,11 +205,13 @@ TEST_CASES = [
|
||||
|
||||
@pytest.mark.parametrize("streaming, model_output, expected_tool_calls", TEST_CASES)
|
||||
def test_tool_call(
|
||||
streaming: bool, model_output: str, expected_tool_calls: list[FunctionCall]
|
||||
streaming: bool,
|
||||
model_output: str,
|
||||
expected_tool_calls: list[FunctionCall],
|
||||
default_tokenizer: AnyTokenizer,
|
||||
):
|
||||
mock_tokenizer = MagicMock()
|
||||
tool_parser: ToolParser = ToolParserManager.get_tool_parser("llama4_pythonic")(
|
||||
mock_tokenizer
|
||||
default_tokenizer
|
||||
)
|
||||
|
||||
content, tool_calls = run_tool_extraction(
|
||||
@ -222,10 +224,9 @@ def test_tool_call(
|
||||
assert actual.function == expected
|
||||
|
||||
|
||||
def test_streaming_tool_call_with_large_steps():
|
||||
mock_tokenizer = MagicMock()
|
||||
def test_streaming_tool_call_with_large_steps(default_tokenizer: AnyTokenizer):
|
||||
tool_parser: ToolParser = ToolParserManager.get_tool_parser("llama4_pythonic")(
|
||||
mock_tokenizer
|
||||
default_tokenizer
|
||||
)
|
||||
model_output_deltas = [
|
||||
"<|python_start|>[get_weather(city='LA', metric='C'), "
|
||||
@ -245,11 +246,10 @@ def test_streaming_tool_call_with_large_steps():
|
||||
|
||||
|
||||
@pytest.mark.parametrize("streaming", [False])
|
||||
def test_regex_timeout_handling(streaming: bool):
|
||||
def test_regex_timeout_handling(streaming: bool, default_tokenizer: AnyTokenizer):
|
||||
"""test regex timeout is handled gracefully"""
|
||||
mock_tokenizer = MagicMock()
|
||||
tool_parser: ToolParser = ToolParserManager.get_tool_parser("llama4_pythonic")(
|
||||
mock_tokenizer
|
||||
default_tokenizer
|
||||
)
|
||||
|
||||
fake_problematic_input = "hello world[A(A=" + "\t)A(A=,\t" * 2
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user