diff --git a/.buildkite/generate_index.py b/.buildkite/generate_index.py
deleted file mode 100644
index bbed80ebe8476..0000000000000
--- a/.buildkite/generate_index.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
-
-import argparse
-import os
-
-template = """
-
-
- Links for vLLM
- {x86_wheel}
- {arm_wheel}
-
-
-"""
-
-parser = argparse.ArgumentParser()
-parser.add_argument("--wheel", help="The wheel path.", required=True)
-args = parser.parse_args()
-
-filename = os.path.basename(args.wheel)
-
-with open("index.html", "w") as f:
- print(f"Generated index.html for {args.wheel}")
- # sync the abi tag with .buildkite/scripts/upload-wheels.sh
- if "x86_64" in filename:
- x86_wheel = filename
- arm_wheel = filename.replace("x86_64", "aarch64").replace(
- "manylinux1", "manylinux2014"
- )
- elif "aarch64" in filename:
- x86_wheel = filename.replace("aarch64", "x86_64").replace(
- "manylinux2014", "manylinux1"
- )
- arm_wheel = filename
- else:
- raise ValueError(f"Unsupported wheel: {filename}")
- # cloudfront requires escaping the '+' character
- f.write(
- template.format(
- x86_wheel=x86_wheel,
- x86_wheel_html_escaped=x86_wheel.replace("+", "%2B"),
- arm_wheel=arm_wheel,
- arm_wheel_html_escaped=arm_wheel.replace("+", "%2B"),
- )
- )
diff --git a/.buildkite/lm-eval-harness/configs/Qwen2-1.5B-Instruct-W8A16-compressed-tensors.yaml b/.buildkite/lm-eval-harness/configs/Qwen2-1.5B-Instruct-W8A16-compressed-tensors.yaml
deleted file mode 100644
index 1bce7e7fdf146..0000000000000
--- a/.buildkite/lm-eval-harness/configs/Qwen2-1.5B-Instruct-W8A16-compressed-tensors.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-# For vllm script, with -t option (tensor parallel size).
-# bash .buildkite/lm-eval-harness/run-lm-eval-gsm-vllm-baseline.sh -m nm-testing/Qwen2-1.5B-Instruct-W8A16-Channelwise -b "auto" -l 1000 -f 5 -t 1
-model_name: "nm-testing/Qwen2-1.5B-Instruct-W8A16-Channelwise"
-tasks:
-- name: "gsm8k"
- metrics:
- - name: "exact_match,strict-match"
- value: 0.595
- - name: "exact_match,flexible-extract"
- value: 0.582
-limit: 1000
-num_fewshot: 5
diff --git a/.buildkite/lm-eval-harness/configs/Qwen3-235B-A22B-Instruct-2507-FP8.yaml b/.buildkite/lm-eval-harness/configs/Qwen3-235B-A22B-Instruct-2507-FP8.yaml
new file mode 100644
index 0000000000000..514c15d6098ed
--- /dev/null
+++ b/.buildkite/lm-eval-harness/configs/Qwen3-235B-A22B-Instruct-2507-FP8.yaml
@@ -0,0 +1,14 @@
+model_name: "Qwen/Qwen3-235B-A22B-Instruct-2507-FP8"
+tasks:
+ - name: "mmlu_pro"
+ metrics:
+ - name: "exact_match,custom-extract"
+ value: 0.82
+limit: 250 # will run on 250 * 14 subjects = 3500 samples
+num_fewshot: 5
+enforce_eager: false # we use false to speed up the eval process
+kv_cache_dtype: fp8 # we use fp8 to speed up the eval process
+max_model_len: 40960
+apply_chat_template: true
+fewshot_as_multiturn: true
+gen_kwargs: "temperature=0,top_p=1,top_k=0,max_gen_toks=5632,until=<|ENDANSWER|>"
diff --git a/.buildkite/lm-eval-harness/configs/models-large-h100.txt b/.buildkite/lm-eval-harness/configs/models-large-h100.txt
deleted file mode 100644
index 4fb0b84bc4d81..0000000000000
--- a/.buildkite/lm-eval-harness/configs/models-large-h100.txt
+++ /dev/null
@@ -1 +0,0 @@
-Meta-Llama-4-Maverick-17B-128E-Instruct-FP8.yaml
diff --git a/.buildkite/lm-eval-harness/configs/models-large-hopper.txt b/.buildkite/lm-eval-harness/configs/models-large-hopper.txt
new file mode 100644
index 0000000000000..5552391d9eaba
--- /dev/null
+++ b/.buildkite/lm-eval-harness/configs/models-large-hopper.txt
@@ -0,0 +1 @@
+Qwen3-235B-A22B-Instruct-2507-FP8.yaml
diff --git a/.buildkite/lm-eval-harness/test_lm_eval_correctness.py b/.buildkite/lm-eval-harness/test_lm_eval_correctness.py
index f10de82b1d8e8..3627b760eddcf 100644
--- a/.buildkite/lm-eval-harness/test_lm_eval_correctness.py
+++ b/.buildkite/lm-eval-harness/test_lm_eval_correctness.py
@@ -21,10 +21,13 @@ def launch_lm_eval(eval_config, tp_size):
max_model_len = eval_config.get("max_model_len", 4096)
batch_size = eval_config.get("batch_size", "auto")
backend = eval_config.get("backend", "vllm")
+ enforce_eager = eval_config.get("enforce_eager", "true")
+ kv_cache_dtype = eval_config.get("kv_cache_dtype", "auto")
model_args = (
f"pretrained={eval_config['model_name']},"
f"tensor_parallel_size={tp_size},"
- f"enforce_eager=true,"
+ f"enforce_eager={enforce_eager},"
+ f"kv_cache_dtype={kv_cache_dtype},"
f"add_bos_token=true,"
f"trust_remote_code={trust_remote_code},"
f"max_model_len={max_model_len},"
@@ -37,8 +40,13 @@ def launch_lm_eval(eval_config, tp_size):
limit=eval_config["limit"],
# TODO(yeq): using chat template w/ fewshot_as_multiturn is supposed help
# text models. however, this is regressing measured strict-match for
- # existing text models in CI, so only apply it for mm.
- apply_chat_template=backend == "vllm-vlm",
+ # existing text models in CI, so only apply it for mm, or explicitly set
+ apply_chat_template=eval_config.get(
+ "apply_chat_template", backend == "vllm-vlm"
+ ),
+ fewshot_as_multiturn=eval_config.get("fewshot_as_multiturn", False),
+ # Forward decoding and early-stop controls (e.g., max_gen_toks, until=...)
+ gen_kwargs=eval_config.get("gen_kwargs"),
batch_size=batch_size,
)
return results
diff --git a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml b/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml
deleted file mode 100644
index 4259514940d3f..0000000000000
--- a/.buildkite/nightly-benchmarks/benchmark-pipeline.yaml
+++ /dev/null
@@ -1,184 +0,0 @@
-steps:
- - label: "Wait for container to be ready"
- key: wait-for-container-image
- agents:
- queue: A100
- plugins:
- - kubernetes:
- podSpec:
- containers:
- - image: badouralix/curl-jq
- command:
- - sh .buildkite/nightly-benchmarks/scripts/wait-for-image.sh
- - label: "Cleanup H100"
- agents:
- queue: H100
- depends_on: ~
- command: docker system prune -a --volumes --force
-
- - label: "A100"
- # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
- agents:
- queue: A100
- depends_on: wait-for-container-image
- if: build.branch == "main"
- plugins:
- - kubernetes:
- podSpec:
- priorityClassName: perf-benchmark
- containers:
- - image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT
- command:
- - bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
- resources:
- limits:
- nvidia.com/gpu: 8
- volumeMounts:
- - name: devshm
- mountPath: /dev/shm
- env:
- - name: VLLM_USAGE_SOURCE
- value: ci-test
- - name: HF_TOKEN
- valueFrom:
- secretKeyRef:
- name: hf-token-secret
- key: token
- nodeSelector:
- nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB
- volumes:
- - name: devshm
- emptyDir:
- medium: Memory
-
- - label: "H200"
- # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
- agents:
- queue: H200
- depends_on: wait-for-container-image
- if: build.branch == "main"
- plugins:
- - docker#v5.12.0:
- image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT
- command:
- - bash
- - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
- mount-buildkite-agent: true
- propagate-environment: true
- ipc: host
- gpus: 4,5,6,7
- volumes:
- - /data/benchmark-hf-cache:/root/.cache/huggingface
- environment:
- - VLLM_USAGE_SOURCE
- - HF_TOKEN
-
- #- block: "Run H100 Benchmark"
- #key: block-h100
- #depends_on: ~
-
- - label: "H100"
- # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
- agents:
- queue: H100
- depends_on: wait-for-container-image
- if: build.branch == "main"
- plugins:
- - docker#v5.12.0:
- image: public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:$BUILDKITE_COMMIT
- command:
- - bash
- - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
- mount-buildkite-agent: true
- propagate-environment: true
- ipc: host
- gpus: all # see CUDA_VISIBLE_DEVICES for actual GPUs used
- volumes:
- - /data/benchmark-hf-cache:/root/.cache/huggingface
- environment:
- - VLLM_USAGE_SOURCE
- - HF_TOKEN
-
- # Premerge benchmark
- - label: "A100"
- # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
- agents:
- queue: A100
- depends_on: wait-for-container-image
- if: build.branch != "main"
- plugins:
- - kubernetes:
- podSpec:
- priorityClassName: perf-benchmark
- containers:
- - image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
- command:
- - bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
- resources:
- limits:
- nvidia.com/gpu: 8
- volumeMounts:
- - name: devshm
- mountPath: /dev/shm
- env:
- - name: VLLM_USAGE_SOURCE
- value: ci-test
- - name: HF_TOKEN
- valueFrom:
- secretKeyRef:
- name: hf-token-secret
- key: token
- nodeSelector:
- nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB
- volumes:
- - name: devshm
- emptyDir:
- medium: Memory
-
- - label: "H200"
- # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
- agents:
- queue: H200
- depends_on: wait-for-container-image
- if: build.branch != "main"
- plugins:
- - docker#v5.12.0:
- image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
- command:
- - bash
- - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
- mount-buildkite-agent: true
- propagate-environment: true
- ipc: host
- gpus: 4,5,6,7
- volumes:
- - /data/benchmark-hf-cache:/root/.cache/huggingface
- environment:
- - VLLM_USAGE_SOURCE
- - HF_TOKEN
-
- #- block: "Run H100 Benchmark"
- #key: block-h100
- #depends_on: ~
-
- - label: "H100"
- # skip: "use this flag to conditionally skip the benchmark step, useful for PR testing"
- agents:
- queue: H100
- depends_on: wait-for-container-image
- if: build.branch != "main"
- plugins:
- - docker#v5.12.0:
- image: public.ecr.aws/q9t5s3a7/vllm-ci-test-repo:$BUILDKITE_COMMIT
- command:
- - bash
- - .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
- mount-buildkite-agent: true
- propagate-environment: true
- ipc: host
- gpus: all # see CUDA_VISIBLE_DEVICES for actual GPUs used
- volumes:
- - /data/benchmark-hf-cache:/root/.cache/huggingface
- environment:
- - VLLM_USAGE_SOURCE
- - HF_TOKEN
diff --git a/.buildkite/nightly-benchmarks/nightly-annotation.md b/.buildkite/nightly-benchmarks/nightly-annotation.md
deleted file mode 100644
index 466def07b6f1f..0000000000000
--- a/.buildkite/nightly-benchmarks/nightly-annotation.md
+++ /dev/null
@@ -1,28 +0,0 @@
-# Nightly benchmark annotation
-
-## Description
-
-This file contains the downloading link for benchmarking results.
-
-- [benchmarking pipeline](artifact://nightly-pipeline.yaml)
-- [benchmarking results](artifact://results.zip)
-- [benchmarking code](artifact://nightly-benchmarks.zip)
-
-Please download the visualization scripts in the post
-
-## Results reproduction
-
-- Find the docker we use in `benchmarking pipeline`
-- Deploy the docker, and inside the docker:
- - Download `nightly-benchmarks.zip`.
- - In the same folder, run the following code:
-
- ```bash
- export HF_TOKEN=
- apt update
- apt install -y git
- unzip nightly-benchmarks.zip
- VLLM_SOURCE_CODE_LOC=./ bash .buildkite/nightly-benchmarks/scripts/run-nightly-benchmarks.sh
- ```
-
-And the results will be inside `./benchmarks/results`.
diff --git a/.buildkite/nightly-benchmarks/nightly-descriptions.md b/.buildkite/nightly-benchmarks/nightly-descriptions.md
deleted file mode 100644
index 2ef36089b6afb..0000000000000
--- a/.buildkite/nightly-benchmarks/nightly-descriptions.md
+++ /dev/null
@@ -1,39 +0,0 @@
-
-# Nightly benchmark
-
-This benchmark aims to:
-
-- Provide performance clarity: Provide clarity on which one (vllm, tensorrt-llm, lmdeploy and SGLang) leads in performance in what workload.
-- Be reproducible: one can run the exact same set of benchmarking commands inside the exact same docker by following reproducing instructions.
-
-Latest results: [results link](https://blog.vllm.ai/2024/09/05/perf-update.html), scroll to the end.
-
-Latest reproduction guide: [github issue link](https://github.com/vllm-project/vllm/issues/8176)
-
-## Setup
-
-- Docker images:
- - vLLM: `vllm/vllm-openai:v0.6.2`
- - SGLang: `lmsysorg/sglang:v0.3.2-cu121`
- - LMDeploy: `openmmlab/lmdeploy:v0.6.1-cu12`
- - TensorRT-LLM: `nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3`
- - *NOTE: we use r24.07 as the current implementation only works for this version. We are going to bump this up.*
- - Check [nightly-pipeline.yaml](nightly-pipeline.yaml) for the concrete docker images, specs and commands we use for the benchmark.
-- Hardware
- - 8x Nvidia A100 GPUs
-- Workload:
- - Dataset
- - ShareGPT dataset
- - Prefill-heavy dataset (in average 462 input tokens, 16 tokens as output)
- - Decode-heavy dataset (in average 462 input tokens, 256 output tokens)
- - Check [nightly-tests.json](tests/nightly-tests.json) for the concrete configuration of datasets we use.
- - Models: llama-3 8B, llama-3 70B.
- - We do not use llama 3.1 as it is incompatible with trt-llm r24.07. ([issue](https://github.com/NVIDIA/TensorRT-LLM/issues/2105)).
- - Average QPS (query per second): 2, 4, 8, 16, 32 and inf.
- - Queries are randomly sampled, and arrival patterns are determined via Poisson process, but all with fixed random seed.
- - Evaluation metrics: Throughput (higher the better), TTFT (time to the first token, lower the better), ITL (inter-token latency, lower the better).
-
-## Known issues
-
-- TRT-LLM crashes with Llama 3.1 8B [issue](https://github.com/NVIDIA/TensorRT-LLM/issues/2105).
-- TGI does not support `ignore-eos` flag.
diff --git a/.buildkite/nightly-benchmarks/nightly-pipeline.yaml b/.buildkite/nightly-benchmarks/nightly-pipeline.yaml
deleted file mode 100644
index 199517e8b067c..0000000000000
--- a/.buildkite/nightly-benchmarks/nightly-pipeline.yaml
+++ /dev/null
@@ -1,196 +0,0 @@
-common_pod_spec: &common_pod_spec
- priorityClassName: perf-benchmark
- nodeSelector:
- nvidia.com/gpu.product: NVIDIA-A100-SXM4-80GB
- volumes:
- - name: devshm
- emptyDir:
- medium: Memory
- - name: hf-cache
- hostPath:
- path: /root/.cache/huggingface
- type: Directory
-
-common_container_settings: &common_container_settings
- command:
- - bash .buildkite/nightly-benchmarks/scripts/run-nightly-benchmarks.sh
- resources:
- limits:
- nvidia.com/gpu: 8
- volumeMounts:
- - name: devshm
- mountPath: /dev/shm
- - name: hf-cache
- mountPath: /root/.cache/huggingface
- env:
- - name: VLLM_USAGE_SOURCE
- value: ci-test
- - name: HF_HOME
- value: /root/.cache/huggingface
- - name: VLLM_SOURCE_CODE_LOC
- value: /workspace/build/buildkite/vllm/performance-benchmark
- - name: HF_TOKEN
- valueFrom:
- secretKeyRef:
- name: hf-token-secret
- key: token
-
-steps:
- - block: ":rocket: Ready for comparing vllm against alternatives? This will take 4 hours."
-
-
-
- - label: "A100 vllm step 10"
- priority: 100
- agents:
- queue: A100
- plugins:
- - kubernetes:
- podSpec:
- <<: *common_pod_spec
- containers:
- - image: vllm/vllm-openai:v0.6.2
- <<: *common_container_settings
-
-
-
- - label: "A100 sglang benchmark"
- priority: 100
- agents:
- queue: A100
- plugins:
- - kubernetes:
- podSpec:
- <<: *common_pod_spec
- containers:
- - image: lmsysorg/sglang:v0.3.2-cu121
- <<: *common_container_settings
-
- - label: "A100 lmdeploy benchmark"
- priority: 100
- agents:
- queue: A100
- plugins:
- - kubernetes:
- podSpec:
- <<: *common_pod_spec
- containers:
- - image: openmmlab/lmdeploy:v0.6.1-cu12
- <<: *common_container_settings
-
-
-
-
- - label: "A100 trt llama-8B"
- priority: 100
- agents:
- queue: A100
- plugins:
- - kubernetes:
- podSpec:
- <<: *common_pod_spec
- containers:
- - image: nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3
- <<: *common_container_settings
- env:
- - name: VLLM_USAGE_SOURCE
- value: ci-test
- - name: HF_HOME
- value: /root/.cache/huggingface
- - name: VLLM_SOURCE_CODE_LOC
- value: /workspace/build/buildkite/vllm/performance-benchmark
- - name: HF_TOKEN
- valueFrom:
- secretKeyRef:
- name: hf-token-secret
- key: token
- - name: TEST_SELECTOR
- value: "llama8B"
-
-
- - label: "A100 trt llama-70B"
- priority: 100
- agents:
- queue: A100
- plugins:
- - kubernetes:
- podSpec:
- <<: *common_pod_spec
- containers:
- - image: nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3
- <<: *common_container_settings
- env:
- - name: VLLM_USAGE_SOURCE
- value: ci-test
- - name: HF_HOME
- value: /root/.cache/huggingface
- - name: VLLM_SOURCE_CODE_LOC
- value: /workspace/build/buildkite/vllm/performance-benchmark
- - name: HF_TOKEN
- valueFrom:
- secretKeyRef:
- name: hf-token-secret
- key: token
- - name: TEST_SELECTOR
- value: "llama70B"
-
-
- # FIXME(Kuntai): uncomment this after NVIDIA gives us their test docker image
- # - label: "A100 trt benchmark"
- # priority: 100
- # agents:
- # queue: A100
- # plugins:
- # - kubernetes:
- # podSpec:
- # <<: *common_pod_spec
- # containers:
- # - image: nvcr.io/nvidia/tritonserver:24.07-trtllm-python-py3
- # <<: *common_container_settings
-
-
- # FIXME(Kuntai): uncomment this after TGI supports `--ignore-eos`.
- # - label: "A100 tgi benchmark"
- # priority: 100
- # agents:
- # queue: A100
- # plugins:
- # - kubernetes:
- # podSpec:
- # <<: *common_pod_spec
- # containers:
- # - image: ghcr.io/huggingface/text-generation-inference:2.2.0
- # <<: *common_container_settings
-
- - wait
-
- - label: "Collect the results"
- priority: 100
- agents:
- queue: A100
- plugins:
- - kubernetes:
- podSpec:
- <<: *common_pod_spec
- containers:
- - image: vllm/vllm-openai:v0.5.0.post1
- command:
- - bash .buildkite/nightly-benchmarks/scripts/nightly-annotate.sh
- resources:
- limits:
- nvidia.com/gpu: 8
- volumeMounts:
- - name: devshm
- mountPath: /dev/shm
- env:
- - name: VLLM_USAGE_SOURCE
- value: ci-test
- - name: VLLM_SOURCE_CODE_LOC
- value: /workspace/build/buildkite/vllm/performance-benchmark
- - name: HF_TOKEN
- valueFrom:
- secretKeyRef:
- name: hf-token-secret
- key: token
-
- - block: ":rocket: check the results!"
\ No newline at end of file
diff --git a/.buildkite/nightly-benchmarks/scripts/download-tokenizer.py b/.buildkite/nightly-benchmarks/scripts/download-tokenizer.py
deleted file mode 100644
index 8532ff7ef798c..0000000000000
--- a/.buildkite/nightly-benchmarks/scripts/download-tokenizer.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
-
-import argparse
-
-from transformers import AutoTokenizer
-
-
-def main(model, cachedir):
- # Load the tokenizer and save it to the specified directory
- tokenizer = AutoTokenizer.from_pretrained(model)
- tokenizer.save_pretrained(cachedir)
- print(f"Tokenizer saved to {cachedir}")
-
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(
- description="Download and save Hugging Face tokenizer"
- )
- parser.add_argument("--model", type=str, required=True, help="Name of the model")
- parser.add_argument(
- "--cachedir", type=str, required=True, help="Directory to save the tokenizer"
- )
-
- args = parser.parse_args()
- main(args.model, args.cachedir)
diff --git a/.buildkite/nightly-benchmarks/scripts/generate-nightly-markdown.py b/.buildkite/nightly-benchmarks/scripts/generate-nightly-markdown.py
deleted file mode 100644
index 053fd52c35ae9..0000000000000
--- a/.buildkite/nightly-benchmarks/scripts/generate-nightly-markdown.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
-
-import argparse
-import json
-from pathlib import Path
-
-import numpy as np
-import pandas as pd
-from tabulate import tabulate
-
-
-def parse_arguments():
- parser = argparse.ArgumentParser(
- description="Parse command line arguments for summary-nightly-results script."
- )
- parser.add_argument(
- "--results-folder",
- type=str,
- required=True,
- help="The folder where the results are stored.",
- )
- parser.add_argument(
- "--description", type=str, required=True, help="Description of the results."
- )
-
- args = parser.parse_args()
- return args
-
-
-def get_perf(df, method, model, metric):
- means = []
-
- for qps in [2, 4, 8, 16, "inf"]:
- target = df["Test name"].str.contains(model)
- target = target & df["Engine"].str.contains(method)
- target = target & df["Test name"].str.contains("qps_" + str(qps))
- filtered_df = df[target]
-
- if filtered_df.empty:
- means.append(0.0)
- else:
- means.append(filtered_df[metric].values[0])
-
- return np.array(means)
-
-
-def get_perf_w_std(df, method, model, metric):
- if metric in ["TTFT", "ITL"]:
- mean = get_perf(df, method, model, "Mean " + metric + " (ms)")
- mean = mean.tolist()
- std = get_perf(df, method, model, "Std " + metric + " (ms)")
- if std.mean() == 0:
- std = None
- success = get_perf(df, method, model, "Successful req.")
- if std is not None:
- std = std / np.sqrt(success)
- std = std.tolist()
-
- else:
- assert metric == "Tput"
- mean = get_perf(df, method, model, "Input Tput (tok/s)") + get_perf(
- df, method, model, "Output Tput (tok/s)"
- )
- mean = mean.tolist()
- std = None
-
- return mean, std
-
-
-def main(args):
- results_folder = Path(args.results_folder)
-
- results = []
-
- # collect results
- for test_file in results_folder.glob("*_nightly_results.json"):
- with open(test_file) as f:
- results = results + json.loads(f.read())
-
- # generate markdown table
- df = pd.DataFrame.from_dict(results)
-
- md_table = tabulate(df, headers="keys", tablefmt="pipe", showindex=False)
-
- with open(args.description) as f:
- description = f.read()
-
- description = description.format(nightly_results_benchmarking_table=md_table)
-
- with open("nightly_results.md", "w") as f:
- f.write(description)
-
-
-if __name__ == "__main__":
- args = parse_arguments()
- main(args)
diff --git a/.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py b/.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py
deleted file mode 100644
index ddea1d2b1b1ed..0000000000000
--- a/.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
-
-from lmdeploy.serve.openai.api_client import APIClient
-
-api_client = APIClient("http://localhost:8000")
-model_name = api_client.available_models[0]
-
-print(model_name)
diff --git a/.buildkite/nightly-benchmarks/scripts/nightly-annotate.sh b/.buildkite/nightly-benchmarks/scripts/nightly-annotate.sh
deleted file mode 100644
index 69b6b146b3549..0000000000000
--- a/.buildkite/nightly-benchmarks/scripts/nightly-annotate.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/bash
-
-set -ex
-set -o pipefail
-
-
-main() {
-
- (which wget && which curl) || (apt-get update && apt-get install -y wget curl)
- (which jq) || (apt-get update && apt-get -y install jq)
- (which zip) || (apt-get install -y zip)
-
- if [ ! -f /workspace/buildkite-agent ]; then
- echo "buildkite-agent binary not found. Skip plotting the results."
- exit 0
- fi
-
- # initial annotation
- #description="$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/nightly-descriptions.md"
-
- # download results
- cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
- mkdir -p results/
- /workspace/buildkite-agent artifact download 'results/*nightly_results.json' results/
- ls
- ls results/
-
- # upload benchmark results
- zip -r results.zip results/
- /workspace/buildkite-agent artifact upload "results.zip"
-
- # upload benchmarking scripts
- cd "$VLLM_SOURCE_CODE_LOC/"
- zip -r nightly-benchmarks.zip .buildkite/ benchmarks/
- /workspace/buildkite-agent artifact upload "nightly-benchmarks.zip"
-
- cd "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/"
- # upload benchmarking pipeline
- /workspace/buildkite-agent artifact upload "nightly-pipeline.yaml"
-
- cd "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/"
- /workspace/buildkite-agent annotate --style "success" --context "nightly-benchmarks-results" --append < nightly-annotation.md
-
-
-
- # The figures should be generated by a separate process outside the CI/CD pipeline
-
- # # generate figures
- # python3 -m pip install tabulate pandas matplotlib
-
- # python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/generate-nightly-markdown.py \
- # --description $description \
- # --results-folder results/
-
-
- # python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py \
- # --description $description \
- # --results-folder results/ \
- # --dataset sharegpt
-
- # python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py \
- # --description $description \
- # --results-folder results/ \
- # --dataset sonnet_2048_128
-
- # python3 $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/plot-nightly-results.py \
- # --description $description \
- # --results-folder results/ \
- # --dataset sonnet_128_2048
-
- # # upload results and figures
- # /workspace/buildkite-agent artifact upload "nightly_results*.png"
- # /workspace/buildkite-agent artifact upload $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/nightly-pipeline.yaml
- # /workspace/buildkite-agent artifact upload $VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/tests/nightly-tests.json
- # /workspace/buildkite-agent annotate --style "success" --context "nightly-benchmarks-results" --append < nightly_results.md
-}
-
-main "$@"
diff --git a/.buildkite/nightly-benchmarks/scripts/run-nightly-benchmarks.sh b/.buildkite/nightly-benchmarks/scripts/run-nightly-benchmarks.sh
deleted file mode 100644
index a00de940cbbb8..0000000000000
--- a/.buildkite/nightly-benchmarks/scripts/run-nightly-benchmarks.sh
+++ /dev/null
@@ -1,464 +0,0 @@
-#!/bin/bash
-
-set -o pipefail
-set -x
-
-check_gpus() {
- # check the number of GPUs and GPU type.
- declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
- if [[ $gpu_count -gt 0 ]]; then
- echo "GPU found."
- else
- echo "Need at least 1 GPU to run benchmarking."
- exit 1
- fi
- declare -g gpu_type="$(nvidia-smi --query-gpu=name --format=csv,noheader | awk '{print $2}')"
- echo "GPU type is $gpu_type"
-}
-
-check_hf_token() {
- # check if HF_TOKEN is available and valid
- if [[ -z "$HF_TOKEN" ]]; then
- echo "Error: HF_TOKEN is not set."
- exit 1
- elif [[ ! "$HF_TOKEN" =~ ^hf_ ]]; then
- echo "Error: HF_TOKEN does not start with 'hf_'."
- exit 1
- else
- echo "HF_TOKEN is set and valid."
- fi
-}
-
-
-upload_to_buildkite() {
- # upload the benchmarking results to buildkite
-
- # if the agent binary is not found, skip uploading the results, exit 0
- if [ ! -f /workspace/buildkite-agent ]; then
- echo "buildkite-agent binary not found. Skip uploading the results."
- return 0
- fi
- # /workspace/buildkite-agent annotate --style "success" --context "benchmark-results" --append < $RESULTS_FOLDER/${CURRENT_LLM_SERVING_ENGINE}_nightly_results.md
- /workspace/buildkite-agent artifact upload "$RESULTS_FOLDER/*"
-}
-
-
-get_current_llm_serving_engine() {
-
- if which lmdeploy >/dev/null; then
- echo "Container: lmdeploy"
- export CURRENT_LLM_SERVING_ENGINE=lmdeploy
- return
- fi
-
- if [ -e /tgi-entrypoint.sh ]; then
- echo "Container: tgi"
- export CURRENT_LLM_SERVING_ENGINE=tgi
- return
- fi
-
- if which trtllm-build >/dev/null; then
- echo "Container: tensorrt-llm"
- export CURRENT_LLM_SERVING_ENGINE=trt
- return
- fi
-
- if [ -e /sgl-workspace ]; then
- echo "Container: sglang"
- export CURRENT_LLM_SERVING_ENGINE=sglang
- return
- fi
-
- if [ -e /vllm-workspace ]; then
- echo "Container: vllm"
- # move to a completely irrelevant directory, to avoid import vllm from current folder
- export CURRENT_LLM_SERVING_ENGINE=vllm
-
- return
- fi
-}
-
-json2args() {
- # transforms the JSON string to command line args, and '_' is replaced to '-'
- # example:
- # input: { "model": "meta-llama/Llama-2-7b-chat-hf", "tensor_parallel_size": 1 }
- # output: --model meta-llama/Llama-2-7b-chat-hf --tensor-parallel-size 1
- local json_string=$1
- local args=$(
- echo "$json_string" | jq -r '
- to_entries |
- map("--" + (.key | gsub("_"; "-")) + " " + (.value | tostring)) |
- join(" ")
- '
- )
- echo "$args"
-}
-
-kill_gpu_processes() {
- pkill -f '[p]ython'
- pkill -f '[p]ython3'
- pkill -f '[t]ritonserver'
- pkill -f '[p]t_main_thread'
- pkill -f '[t]ext-generation'
- pkill -f '[l]mdeploy'
- # vLLM now names the process with VLLM prefix after https://github.com/vllm-project/vllm/pull/21445
- pkill -f '[V]LLM'
-
- while [ "$(nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits | head -n 1)" -ge 1000 ]; do
- sleep 1
- done
-}
-
-wait_for_server() {
- # wait for vllm server to start
- # return 1 if vllm server crashes
- timeout 1200 bash -c '
- until curl -s localhost:8000/v1/completions > /dev/null; do
- sleep 1
- done' && return 0 || return 1
-}
-
-ensure_installed() {
- # Ensure that the given command is installed by apt-get
- local cmd=$1
- if ! which "$cmd" >/dev/null; then
- apt-get update && apt-get install -y "$cmd"
- fi
-}
-
-run_serving_tests() {
- # run serving tests using `vllm bench serve` command
- # $1: a json file specifying serving test cases
-
- local serving_test_file
- serving_test_file=$1
-
- # Iterate over serving tests
- jq -c '.[]' "$serving_test_file" | while read -r params; do
- # get the test name, and append the GPU type back to it.
- test_name=$(echo "$params" | jq -r '.test_name')
-
- # if TEST_SELECTOR is set, only run the test cases that match the selector
- if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
- echo "Skip test case $test_name."
- continue
- fi
-
- # prepend the current serving engine to the test name
- test_name=${CURRENT_LLM_SERVING_ENGINE}_${test_name}
-
- # get common parameters
- common_params=$(echo "$params" | jq -r '.common_parameters')
- model=$(echo "$common_params" | jq -r '.model')
- tp=$(echo "$common_params" | jq -r '.tp')
- dataset_name=$(echo "$common_params" | jq -r '.dataset_name')
- dataset_path=$(echo "$common_params" | jq -r '.dataset_path')
- port=$(echo "$common_params" | jq -r '.port')
- num_prompts=$(echo "$common_params" | jq -r '.num_prompts')
- reuse_server=$(echo "$common_params" | jq -r '.reuse_server')
-
- # get client and server arguments
- server_params=$(echo "$params" | jq -r ".${CURRENT_LLM_SERVING_ENGINE}_server_parameters")
- client_params=$(echo "$params" | jq -r ".${CURRENT_LLM_SERVING_ENGINE}_client_parameters")
- client_args=$(json2args "$client_params")
- qps_list=$(echo "$params" | jq -r '.qps_list')
- qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
- echo "Running over qps list $qps_list"
-
- # check if there is enough GPU to run the test
- if [[ $gpu_count -lt $tp ]]; then
- echo "Required num-shard $tp but only $gpu_count GPU found. Skip testcase $test_name."
- continue
- fi
-
- if [[ $reuse_server == "true" ]]; then
- echo "Reuse previous server for test case $test_name"
- else
- kill_gpu_processes
- bash "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/launch-server.sh" \
- "$server_params" "$common_params"
- fi
-
- if wait_for_server; then
- echo ""
- echo "$CURRENT_LLM_SERVING_ENGINE server is up and running."
- else
- echo ""
- echo "$CURRENT_LLM_SERVING_ENGINE failed to start within the timeout period."
- break
- fi
-
- # prepare tokenizer
- # this is required for lmdeploy.
- cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
- rm -rf /tokenizer_cache
- mkdir /tokenizer_cache
- python3 ../.buildkite/nightly-benchmarks/scripts/download-tokenizer.py \
- --model "$model" \
- --cachedir /tokenizer_cache
- cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
-
-
- # change model name for lmdeploy (it will not follow standard hf name)
- if [[ "$CURRENT_LLM_SERVING_ENGINE" == "lmdeploy" ]]; then
- model=$(python ../.buildkite/nightly-benchmarks/scripts/get-lmdeploy-modelname.py)
- fi
-
- # iterate over different QPS
- for qps in $qps_list; do
- # remove the surrounding single quote from qps
- if [[ "$qps" == *"inf"* ]]; then
- echo "qps was $qps"
- qps="inf"
- echo "now qps is $qps"
- fi
-
- new_test_name=$test_name"_qps_"$qps
-
- backend=$CURRENT_LLM_SERVING_ENGINE
-
- if [[ $backend = "trt" ]]; then
- backend="tensorrt-llm"
- fi
-
- if [[ "$backend" == *"vllm"* ]]; then
- backend="vllm"
- fi
-
- if [[ "$dataset_name" = "sharegpt" ]]; then
-
- client_command="vllm bench serve \
- --backend $backend \
- --tokenizer /tokenizer_cache \
- --model $model \
- --dataset-name $dataset_name \
- --dataset-path $dataset_path \
- --num-prompts $num_prompts \
- --port $port \
- --save-result \
- --result-dir $RESULTS_FOLDER \
- --result-filename ${new_test_name}.json \
- --request-rate $qps \
- --ignore-eos \
- $client_args"
-
- elif [[ "$dataset_name" = "sonnet" ]]; then
-
- sonnet_input_len=$(echo "$common_params" | jq -r '.sonnet_input_len')
- sonnet_output_len=$(echo "$common_params" | jq -r '.sonnet_output_len')
- sonnet_prefix_len=$(echo "$common_params" | jq -r '.sonnet_prefix_len')
-
- client_command="vllm bench serve \
- --backend $backend \
- --tokenizer /tokenizer_cache \
- --model $model \
- --dataset-name $dataset_name \
- --dataset-path $dataset_path \
- --num-prompts $num_prompts \
- --sonnet-input-len $sonnet_input_len \
- --sonnet-output-len $sonnet_output_len \
- --sonnet-prefix-len $sonnet_prefix_len \
- --port $port \
- --save-result \
- --result-dir $RESULTS_FOLDER \
- --result-filename ${new_test_name}.json \
- --request-rate $qps \
- --ignore-eos \
- $client_args"
-
- else
-
- echo "The dataset name must be either 'sharegpt' or 'sonnet'. Got $dataset_name."
- exit 1
-
- fi
-
-
-
- echo "Running test case $test_name with qps $qps"
- echo "Client command: $client_command"
-
- eval "$client_command"
-
- server_command="None"
-
- # record the benchmarking commands
- jq_output=$(jq -n \
- --arg server "$server_command" \
- --arg client "$client_command" \
- --arg gpu "$gpu_type" \
- --arg engine "$CURRENT_LLM_SERVING_ENGINE" \
- '{
- server_command: $server,
- client_command: $client,
- gpu_type: $gpu,
- engine: $engine
- }')
- echo "$jq_output" >"$RESULTS_FOLDER/${new_test_name}.commands"
-
- done
-
- done
-
- kill_gpu_processes
-}
-
-run_genai_perf_tests() {
- # run genai-perf tests
-
- # $1: a json file specifying genai-perf test cases
- local genai_perf_test_file
- genai_perf_test_file=$1
-
- # Iterate over genai-perf tests
- jq -c '.[]' "$genai_perf_test_file" | while read -r params; do
- # get the test name, and append the GPU type back to it.
- test_name=$(echo "$params" | jq -r '.test_name')
-
- # if TEST_SELECTOR is set, only run the test cases that match the selector
- if [[ -n "$TEST_SELECTOR" ]] && [[ ! "$test_name" =~ $TEST_SELECTOR ]]; then
- echo "Skip test case $test_name."
- continue
- fi
-
- # prepend the current serving engine to the test name
- test_name=${CURRENT_LLM_SERVING_ENGINE}_${test_name}
-
- # get common parameters
- common_params=$(echo "$params" | jq -r '.common_parameters')
- model=$(echo "$common_params" | jq -r '.model')
- tp=$(echo "$common_params" | jq -r '.tp')
- dataset_name=$(echo "$common_params" | jq -r '.dataset_name')
- dataset_path=$(echo "$common_params" | jq -r '.dataset_path')
- port=$(echo "$common_params" | jq -r '.port')
- num_prompts=$(echo "$common_params" | jq -r '.num_prompts')
- reuse_server=$(echo "$common_params" | jq -r '.reuse_server')
-
- # get client and server arguments
- server_params=$(echo "$params" | jq -r ".${CURRENT_LLM_SERVING_ENGINE}_server_parameters")
- qps_list=$(echo "$params" | jq -r '.qps_list')
- qps_list=$(echo "$qps_list" | jq -r '.[] | @sh')
- echo "Running over qps list $qps_list"
-
- # check if there is enough GPU to run the test
- if [[ $gpu_count -lt $tp ]]; then
- echo "Required num-shard $tp but only $gpu_count GPU found. Skip testcase $test_name."
- continue
- fi
-
- if [[ $reuse_server == "true" ]]; then
- echo "Reuse previous server for test case $test_name"
- else
- kill_gpu_processes
- bash "$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/scripts/launch-server.sh" \
- "$server_params" "$common_params"
- fi
-
- if wait_for_server; then
- echo ""
- echo "$CURRENT_LLM_SERVING_ENGINE server is up and running."
- else
- echo ""
- echo "$CURRENT_LLM_SERVING_ENGINE failed to start within the timeout period."
- break
- fi
-
- # iterate over different QPS
- for qps in $qps_list; do
- # remove the surrounding single quote from qps
- if [[ "$qps" == *"inf"* ]]; then
- echo "qps was $qps"
- qps=$num_prompts
- echo "now qps is $qps"
- fi
-
- new_test_name=$test_name"_qps_"$qps
- backend=$CURRENT_LLM_SERVING_ENGINE
-
- if [[ "$backend" == *"vllm"* ]]; then
- backend="vllm"
- fi
- #TODO: add output dir.
- client_command="genai-perf profile \
- -m $model \
- --service-kind openai \
- --backend "$backend" \
- --endpoint-type chat \
- --streaming \
- --url localhost:$port \
- --request-rate $qps \
- --num-prompts $num_prompts \
- "
-
- echo "Client command: $client_command"
-
- eval "$client_command"
-
- #TODO: process/record outputs
- done
- done
-
- kill_gpu_processes
-
-}
-
-prepare_dataset() {
-
- # download sharegpt dataset
- cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
- wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json
-
- # duplicate sonnet by 4x, to allow benchmarking with input length 2048
- cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
- echo "" > sonnet_4x.txt
- for _ in {1..4}
- do
- cat sonnet.txt >> sonnet_4x.txt
- done
-
-}
-
-main() {
-
- # check if the environment variable is successfully injected from yaml
-
- check_gpus
- check_hf_token
- get_current_llm_serving_engine
-
- pip install -U transformers
-
- pip install -r requirements/dev.txt
- which genai-perf
-
- # check storage
- df -h
-
- ensure_installed wget
- ensure_installed curl
- ensure_installed jq
- # genai-perf dependency
- ensure_installed libb64-0d
-
- prepare_dataset
-
- cd "$VLLM_SOURCE_CODE_LOC/benchmarks"
- declare -g RESULTS_FOLDER=results/
- mkdir -p $RESULTS_FOLDER
- BENCHMARK_ROOT="$VLLM_SOURCE_CODE_LOC/.buildkite/nightly-benchmarks/"
-
- # run the test
- run_serving_tests "$BENCHMARK_ROOT/tests/nightly-tests.json"
-
- # run genai-perf tests
- run_genai_perf_tests "$BENCHMARK_ROOT/tests/genai-perf-tests.json"
- mv artifacts/ $RESULTS_FOLDER/
-
- # upload benchmark results to buildkite
- python3 -m pip install tabulate pandas
- python3 "$BENCHMARK_ROOT/scripts/summary-nightly-results.py"
- upload_to_buildkite
-
-}
-
-main "$@"
diff --git a/.buildkite/nightly-benchmarks/scripts/summary-nightly-results.py b/.buildkite/nightly-benchmarks/scripts/summary-nightly-results.py
deleted file mode 100644
index fb3b9d5e34e03..0000000000000
--- a/.buildkite/nightly-benchmarks/scripts/summary-nightly-results.py
+++ /dev/null
@@ -1,82 +0,0 @@
-# SPDX-License-Identifier: Apache-2.0
-# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
-
-import datetime
-import json
-import os
-from pathlib import Path
-
-import pandas as pd
-from tabulate import tabulate
-
-results_folder = Path("results/")
-
-# serving results and the keys that will be printed into markdown
-serving_results = []
-serving_column_mapping = {
- "test_name": "Test name",
- "gpu_type": "GPU",
- "completed": "Successful req.",
- "request_throughput": "Tput (req/s)",
- "mean_ttft_ms": "Mean TTFT (ms)",
- "std_ttft_ms": "Std TTFT (ms)",
- "median_ttft_ms": "Median TTFT (ms)",
- "mean_itl_ms": "Mean ITL (ms)",
- "std_itl_ms": "Std ITL (ms)",
- "median_itl_ms": "Median ITL (ms)",
- "mean_tpot_ms": "Mean TPOT (ms)",
- "std_tpot_ms": "Std TPOT (ms)",
- "median_tpot_ms": "Median TPOT (ms)",
- "total_token_throughput": "Total Token Tput (tok/s)",
- "output_throughput": "Output Tput (tok/s)",
- "total_input_tokens": "Total input tokens",
- "total_output_tokens": "Total output tokens",
- "engine": "Engine",
-}
-
-if __name__ == "__main__":
- # collect results
- for test_file in results_folder.glob("*.json"):
- with open(test_file) as f:
- raw_result = json.loads(f.read())
-
- # attach the benchmarking command to raw_result
- with open(test_file.with_suffix(".commands")) as f:
- command = json.loads(f.read())
- raw_result.update(command)
-
- # update the test name of this result
- raw_result.update({"test_name": test_file.stem})
-
- # add the result to raw_result
- serving_results.append(raw_result)
- continue
-
- serving_results = pd.DataFrame.from_dict(serving_results)
-
- if not serving_results.empty:
- serving_results = serving_results[list(serving_column_mapping.keys())].rename(
- columns=serving_column_mapping
- )
-
- serving_md_table_with_headers = tabulate(
- serving_results, headers="keys", tablefmt="pipe", showindex=False
- )
- # remove the first line of header
- serving_md_table_lines = serving_md_table_with_headers.split("\n")
- serving_md_table_without_header = "\n".join(serving_md_table_lines[2:])
-
- prefix = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
- prefix = prefix + "_" + os.environ.get("CURRENT_LLM_SERVING_ENGINE")
-
- # document benchmarking results in markdown
- with open(results_folder / f"{prefix}_nightly_results.md", "w") as f:
- # document results with header.
- # for those who wants to reproduce our benchmark.
- f.write(serving_md_table_with_headers)
- f.write("\n")
-
- # document benchmarking results in json
- with open(results_folder / f"{prefix}_nightly_results.json", "w") as f:
- results = serving_results.to_dict(orient="records")
- f.write(json.dumps(results))
diff --git a/.buildkite/nightly-benchmarks/scripts/wait-for-image.sh b/.buildkite/nightly-benchmarks/scripts/wait-for-image.sh
deleted file mode 100644
index 50e1ab0242202..0000000000000
--- a/.buildkite/nightly-benchmarks/scripts/wait-for-image.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/bin/sh
-TOKEN=$(curl -s -L "https://public.ecr.aws/token?service=public.ecr.aws&scope=repository:q9t5s3a7/vllm-ci-postmerge-repo:pull" | jq -r .token)
-if [[ "$BUILDKITE_BRANCH" == "main" ]]; then
- URL="https://public.ecr.aws/v2/q9t5s3a7/vllm-ci-postmerge-repo/manifests/$BUILDKITE_COMMIT"
-else
- URL="https://public.ecr.aws/v2/q9t5s3a7/vllm-ci-test-repo/manifests/$BUILDKITE_COMMIT"
-fi
-
-TIMEOUT_SECONDS=10
-
-retries=0
-while [ $retries -lt 1000 ]; do
- if [ "$(curl -s --max-time "$TIMEOUT_SECONDS" -L -H "Authorization: Bearer $TOKEN" -o /dev/null -w "%{http_code}" "$URL")" -eq 200 ]; then
- exit 0
- fi
-
- echo "Waiting for image to be available..."
-
- retries=$((retries + 1))
- sleep 5
-done
-
-exit 1
diff --git a/.buildkite/nightly-benchmarks/README.md b/.buildkite/performance-benchmarks/README.md
similarity index 69%
rename from .buildkite/nightly-benchmarks/README.md
rename to .buildkite/performance-benchmarks/README.md
index e6f5c8b60f459..6d494f64f14fa 100644
--- a/.buildkite/nightly-benchmarks/README.md
+++ b/.buildkite/performance-benchmarks/README.md
@@ -2,40 +2,23 @@
## Introduction
-This directory contains two sets of benchmark for vllm.
-
-- Performance benchmark: benchmark vllm's performance under various workload, for **developers** to gain clarity on whether their PR improves/degrades vllm's performance
-- Nightly benchmark: compare vllm's performance against alternatives (tgi, trt-llm and lmdeploy), for **the public** to know when to choose vllm.
-
-See [vLLM performance dashboard](https://hud.pytorch.org/benchmark/llms?repoName=vllm-project%2Fvllm) for the latest performance benchmark results and [vLLM GitHub README](https://github.com/vllm-project/vllm/blob/main/README.md) for latest nightly benchmark results.
+This directory contains a benchmarking suite for **developers** to run locally and gain clarity on whether their PR improves/degrades vllm's performance.
+vLLM also maintains a continuous performance benchmark under [perf.vllm.ai](https://perf.vllm.ai/), hosted under PyTorch CI HUD.
## Performance benchmark quick overview
-**Benchmarking Coverage**: latency, throughput and fix-qps serving on A100 (the support for FP8 benchmark on H100 is coming!) and Intel® Xeon® Processors, with different models.
+**Benchmarking Coverage**: latency, throughput and fix-qps serving on B200, A100, H100, Intel® Xeon® Processors and Intel® Gaudi® 3 Accelerators with different models.
**Benchmarking Duration**: about 1hr.
**For benchmarking developers**: please try your best to constraint the duration of benchmarking to about 1 hr so that it won't take forever to run.
-## Nightly benchmark quick overview
-
-**Benchmarking Coverage**: Fix-qps serving on A100 (the support for FP8 benchmark on H100 is coming!) on Llama-3 8B, 70B and Mixtral 8x7B.
-
-**Benchmarking engines**: vllm, TGI, trt-llm and lmdeploy.
-
-**Benchmarking Duration**: about 3.5hrs.
-
## Trigger the benchmark
-Performance benchmark will be triggered when:
-
-- A PR being merged into vllm.
-- Every commit for those PRs with `perf-benchmarks` label AND `ready` label.
-
-Manually Trigger the benchmark
+The benchmark needs to be triggered manually:
```bash
-bash .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
+bash .buildkite/performance-benchmarks/scripts/run-performance-benchmarks.sh
```
Runtime environment variables:
@@ -47,14 +30,11 @@ Runtime environment variables:
- `REMOTE_HOST`: IP for the remote vLLM service to benchmark. Default value is empty string.
- `REMOTE_PORT`: Port for the remote vLLM service to benchmark. Default value is empty string.
-Nightly benchmark will be triggered when:
-
-- Every commit for those PRs with `perf-benchmarks` label and `nightly-benchmarks` label.
-
## Performance benchmark details
See [performance-benchmarks-descriptions.md](performance-benchmarks-descriptions.md) for detailed descriptions, and use `tests/latency-tests.json`, `tests/throughput-tests.json`, `tests/serving-tests.json` to configure the test cases.
> NOTE: For Intel® Xeon® Processors, use `tests/latency-tests-cpu.json`, `tests/throughput-tests-cpu.json`, `tests/serving-tests-cpu.json` instead.
+For Intel® Gaudi® 3 Accelerators, use `tests/latency-tests-hpu.json`, `tests/throughput-tests-hpu.json`, `tests/serving-tests-hpu.json` instead.
>
### Latency test
@@ -152,26 +132,3 @@ Here is an example using the script to compare result_a and result_b with Model,
A comparison diagram will be generated below the table.
Here is an example to compare between 96c/results_gnr_96c_091_tp2pp3 and 128c/results_gnr_128c_091_tp2pp3
-
-## Nightly test details
-
-See [nightly-descriptions.md](nightly-descriptions.md) for the detailed description on test workload, models and docker containers of benchmarking other llm engines.
-
-### Workflow
-
-- The [nightly-pipeline.yaml](nightly-pipeline.yaml) specifies the docker containers for different LLM serving engines.
-- Inside each container, we run [scripts/run-nightly-benchmarks.sh](scripts/run-nightly-benchmarks.sh), which will probe the serving engine of the current container.
-- The `scripts/run-nightly-benchmarks.sh` will parse the workload described in [nightly-tests.json](tests/nightly-tests.json) and launch the right benchmark for the specified serving engine via `scripts/launch-server.sh`.
-- At last, we run [scripts/summary-nightly-results.py](scripts/summary-nightly-results.py) to collect and plot the final benchmarking results, and update the results to buildkite.
-
-### Nightly tests
-
-In [nightly-tests.json](tests/nightly-tests.json), we include the command line arguments for benchmarking commands, together with the benchmarking test cases. The format is highly similar to performance benchmark.
-
-### Docker containers
-
-The docker containers for benchmarking are specified in `nightly-pipeline.yaml`.
-
-WARNING: the docker versions are HARD-CODED and SHOULD BE ALIGNED WITH `nightly-descriptions.md`. The docker versions need to be hard-coded as there are several version-specific bug fixes inside `scripts/run-nightly-benchmarks.sh` and `scripts/launch-server.sh`.
-
-WARNING: populating `trt-llm` to latest version is not easy, as it requires updating several protobuf files in [tensorrt-demo](https://github.com/neuralmagic/tensorrt-demo.git).
diff --git a/.buildkite/nightly-benchmarks/performance-benchmarks-descriptions.md b/.buildkite/performance-benchmarks/performance-benchmarks-descriptions.md
similarity index 92%
rename from .buildkite/nightly-benchmarks/performance-benchmarks-descriptions.md
rename to .buildkite/performance-benchmarks/performance-benchmarks-descriptions.md
index 8bb16bd3cf373..b9437ac5ca99a 100644
--- a/.buildkite/nightly-benchmarks/performance-benchmarks-descriptions.md
+++ b/.buildkite/performance-benchmarks/performance-benchmarks-descriptions.md
@@ -5,7 +5,7 @@
- Input length: 32 tokens.
- Output length: 128 tokens.
- Batch size: fixed (8).
-- GPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
+- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
- CPU Models: llama-3.1 8B.
- Evaluation metrics: end-to-end latency (mean, median, p99).
@@ -16,7 +16,7 @@
- Input length: randomly sample 200 prompts from ShareGPT dataset (with fixed random seed).
- Output length: the corresponding output length of these 200 prompts.
- Batch size: dynamically determined by vllm to achieve maximum throughput.
-- GPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
+- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
- CPU Models: llama-3.1 8B.
- Evaluation metrics: throughput.
@@ -28,7 +28,7 @@
- Output length: the corresponding output length of these 200 prompts.
- Batch size: dynamically determined by vllm and the arrival pattern of the requests.
- **Average QPS (query per second)**: 1, 4, 16 and inf. QPS = inf means all requests come at once. For other QPS values, the arrival time of each query is determined using a random Poisson process (with fixed random seed).
-- GPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
+- GPU/HPU Models: llama-3.1 8B, llama-3 70B, mixtral 8x7B.
- We also added a speculative decoding test for llama-3 70B on GPU, under QPS 2
- CPU Models: llama-3.1 8B.
- Evaluation metrics: throughput, TTFT (time to the first token, with mean, median and p99), ITL (inter-token latency, with mean, median and p99).
diff --git a/.buildkite/nightly-benchmarks/scripts/compare-json-results.py b/.buildkite/performance-benchmarks/scripts/compare-json-results.py
similarity index 100%
rename from .buildkite/nightly-benchmarks/scripts/compare-json-results.py
rename to .buildkite/performance-benchmarks/scripts/compare-json-results.py
diff --git a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py b/.buildkite/performance-benchmarks/scripts/convert-results-json-to-markdown.py
similarity index 99%
rename from .buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py
rename to .buildkite/performance-benchmarks/scripts/convert-results-json-to-markdown.py
index a7544aeef4c74..80bb4d846a226 100644
--- a/.buildkite/nightly-benchmarks/scripts/convert-results-json-to-markdown.py
+++ b/.buildkite/performance-benchmarks/scripts/convert-results-json-to-markdown.py
@@ -392,7 +392,7 @@ if __name__ == "__main__":
json_file = "benchmark_results.json"
with open(results_folder / md_file, "w") as f:
results = read_markdown(
- "../.buildkite/nightly-benchmarks/"
+ "../.buildkite/performance-benchmarks/"
+ "performance-benchmarks-descriptions.md"
)
results = results.format(
diff --git a/.buildkite/nightly-benchmarks/scripts/launch-server.sh b/.buildkite/performance-benchmarks/scripts/launch-server.sh
similarity index 100%
rename from .buildkite/nightly-benchmarks/scripts/launch-server.sh
rename to .buildkite/performance-benchmarks/scripts/launch-server.sh
diff --git a/.buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh b/.buildkite/performance-benchmarks/scripts/run-performance-benchmarks.sh
similarity index 96%
rename from .buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
rename to .buildkite/performance-benchmarks/scripts/run-performance-benchmarks.sh
index 5a47576483bbf..99a5a5e334f8e 100644
--- a/.buildkite/nightly-benchmarks/scripts/run-performance-benchmarks.sh
+++ b/.buildkite/performance-benchmarks/scripts/run-performance-benchmarks.sh
@@ -15,6 +15,8 @@ check_gpus() {
declare -g gpu_count=$(nvidia-smi --list-gpus | wc -l)
elif command -v amd-smi; then
declare -g gpu_count=$(amd-smi list | grep 'GPU' | wc -l)
+ elif command -v hl-smi; then
+ declare -g gpu_count=$(hl-smi --list | grep -i "Module ID" | wc -l)
fi
if [[ $gpu_count -gt 0 ]]; then
@@ -23,10 +25,16 @@ check_gpus() {
echo "Need at least 1 GPU to run benchmarking."
exit 1
fi
+
+ declare -g arch_suffix=''
+
if command -v nvidia-smi; then
declare -g gpu_type=$(nvidia-smi --query-gpu=name --format=csv,noheader | awk '{print $2}')
elif command -v amd-smi; then
declare -g gpu_type=$(amd-smi static -g 0 -a | grep 'MARKET_NAME' | awk '{print $2}')
+ elif command -v hl-smi; then
+ declare -g gpu_type=$(hl-smi -q | grep "Product Name" | head -n 1 | awk -F ':' '{print $2}' | sed 's/^ *//')
+ arch_suffix='-hpu'
fi
echo "GPU type is $gpu_type"
}
@@ -138,6 +146,10 @@ kill_gpu_processes() {
while [ "$(amd-smi metric -g 0 | grep 'USED_VRAM' | awk '{print $2}')" -ge 1000 ]; do
sleep 1
done
+ elif command -v hl-smi; then
+ while [ "$(hl-smi -q | grep "Used" | head -n 1 | awk '{print $3}')" -ge 1000 ]; do
+ sleep 1
+ done
fi
# remove vllm config file
@@ -451,6 +463,7 @@ main() {
ARCH='-cpu'
else
check_gpus
+ ARCH="$arch_suffix"
fi
check_hf_token
@@ -469,7 +482,7 @@ main() {
ensure_sharegpt_downloaded
declare -g RESULTS_FOLDER=results/
mkdir -p $RESULTS_FOLDER
- QUICK_BENCHMARK_ROOT=../.buildkite/nightly-benchmarks/
+ QUICK_BENCHMARK_ROOT=../.buildkite/performance-benchmarks/
# dump vllm info via vllm collect-env
env_output=$(vllm collect-env)
diff --git a/.buildkite/nightly-benchmarks/tests/genai-perf-tests.json b/.buildkite/performance-benchmarks/tests/genai-perf-tests.json
similarity index 100%
rename from .buildkite/nightly-benchmarks/tests/genai-perf-tests.json
rename to .buildkite/performance-benchmarks/tests/genai-perf-tests.json
diff --git a/.buildkite/nightly-benchmarks/tests/latency-tests-cpu.json b/.buildkite/performance-benchmarks/tests/latency-tests-cpu.json
similarity index 100%
rename from .buildkite/nightly-benchmarks/tests/latency-tests-cpu.json
rename to .buildkite/performance-benchmarks/tests/latency-tests-cpu.json
diff --git a/.buildkite/performance-benchmarks/tests/latency-tests-hpu.json b/.buildkite/performance-benchmarks/tests/latency-tests-hpu.json
new file mode 100644
index 0000000000000..296380f72a668
--- /dev/null
+++ b/.buildkite/performance-benchmarks/tests/latency-tests-hpu.json
@@ -0,0 +1,55 @@
+[
+ {
+ "test_name": "latency_llama8B_tp1",
+ "environment_variables": {
+ "PT_HPU_LAZY_MODE": 1,
+ "VLLM_CONTIGUOUS_PA": 1,
+ "VLLM_DEFRAG": 1
+ },
+ "parameters": {
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
+ "tensor_parallel_size": 1,
+ "load_format": "dummy",
+ "num-iters-warmup": 5,
+ "num-iters": 15,
+ "max-model-len": 256,
+ "async-scheduling": ""
+ }
+ },
+ {
+ "test_name": "latency_llama70B_tp4",
+ "environment_variables": {
+ "PT_HPU_LAZY_MODE": 1,
+ "PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
+ "VLLM_CONTIGUOUS_PA": 1,
+ "VLLM_DEFRAG": 1
+ },
+ "parameters": {
+ "model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
+ "tensor_parallel_size": 4,
+ "load_format": "dummy",
+ "num-iters-warmup": 5,
+ "num-iters": 15,
+ "max-model-len": 256,
+ "async-scheduling": ""
+ }
+ },
+ {
+ "test_name": "latency_mixtral8x7B_tp2",
+ "environment_variables": {
+ "PT_HPU_LAZY_MODE": 1,
+ "PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
+ "VLLM_CONTIGUOUS_PA": 1,
+ "VLLM_DEFRAG": 1
+ },
+ "parameters": {
+ "model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ "tensor_parallel_size": 2,
+ "load_format": "dummy",
+ "num-iters-warmup": 5,
+ "num-iters": 15,
+ "max-model-len": 256,
+ "async-scheduling": ""
+ }
+ }
+]
diff --git a/.buildkite/nightly-benchmarks/tests/latency-tests.json b/.buildkite/performance-benchmarks/tests/latency-tests.json
similarity index 100%
rename from .buildkite/nightly-benchmarks/tests/latency-tests.json
rename to .buildkite/performance-benchmarks/tests/latency-tests.json
diff --git a/.buildkite/nightly-benchmarks/tests/nightly-tests.json b/.buildkite/performance-benchmarks/tests/nightly-tests.json
similarity index 100%
rename from .buildkite/nightly-benchmarks/tests/nightly-tests.json
rename to .buildkite/performance-benchmarks/tests/nightly-tests.json
diff --git a/.buildkite/nightly-benchmarks/tests/serving-tests-cpu-snc2.json b/.buildkite/performance-benchmarks/tests/serving-tests-cpu-snc2.json
similarity index 100%
rename from .buildkite/nightly-benchmarks/tests/serving-tests-cpu-snc2.json
rename to .buildkite/performance-benchmarks/tests/serving-tests-cpu-snc2.json
diff --git a/.buildkite/nightly-benchmarks/tests/serving-tests-cpu-snc3.json b/.buildkite/performance-benchmarks/tests/serving-tests-cpu-snc3.json
similarity index 100%
rename from .buildkite/nightly-benchmarks/tests/serving-tests-cpu-snc3.json
rename to .buildkite/performance-benchmarks/tests/serving-tests-cpu-snc3.json
diff --git a/.buildkite/nightly-benchmarks/tests/serving-tests-cpu.json b/.buildkite/performance-benchmarks/tests/serving-tests-cpu.json
similarity index 100%
rename from .buildkite/nightly-benchmarks/tests/serving-tests-cpu.json
rename to .buildkite/performance-benchmarks/tests/serving-tests-cpu.json
diff --git a/.buildkite/performance-benchmarks/tests/serving-tests-hpu.json b/.buildkite/performance-benchmarks/tests/serving-tests-hpu.json
new file mode 100644
index 0000000000000..8c6b34bd9fa33
--- /dev/null
+++ b/.buildkite/performance-benchmarks/tests/serving-tests-hpu.json
@@ -0,0 +1,82 @@
+[
+ {
+ "test_name": "serving_llama8B_tp1_sharegpt",
+ "qps_list": [1, 4, 16, "inf"],
+ "server_environment_variables": {
+ "PT_HPU_LAZY_MODE": 1,
+ "VLLM_CONTIGUOUS_PA": 1,
+ "VLLM_DEFRAG": 1
+ },
+ "server_parameters": {
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
+ "tensor_parallel_size": 1,
+ "swap_space": 16,
+ "disable_log_stats": "",
+ "load_format": "dummy",
+ "max-model-len": 2048,
+ "max-num-seqs": 256,
+ "async-scheduling": ""
+ },
+ "client_parameters": {
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
+ "backend": "vllm",
+ "dataset_name": "sharegpt",
+ "dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
+ "num_prompts": 200
+ }
+ },
+ {
+ "test_name": "serving_llama70B_tp4_sharegpt",
+ "qps_list": [1, 4, 16, "inf"],
+ "server_environment_variables": {
+ "PT_HPU_LAZY_MODE": 1,
+ "PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
+ "VLLM_CONTIGUOUS_PA": 1,
+ "VLLM_DEFRAG": 1
+ },
+ "server_parameters": {
+ "model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
+ "tensor_parallel_size": 4,
+ "swap_space": 16,
+ "disable_log_stats": "",
+ "load_format": "dummy",
+ "max-model-len": 2048,
+ "max-num-seqs": 256,
+ "async-scheduling": ""
+ },
+ "client_parameters": {
+ "model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
+ "backend": "vllm",
+ "dataset_name": "sharegpt",
+ "dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
+ "num_prompts": 200
+ }
+ },
+ {
+ "test_name": "serving_mixtral8x7B_tp2_sharegpt",
+ "qps_list": [1, 4, 16, "inf"],
+ "server_environment_variables": {
+ "PT_HPU_LAZY_MODE": 1,
+ "PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
+ "VLLM_CONTIGUOUS_PA": 1,
+ "VLLM_DEFRAG": 1
+ },
+ "server_parameters": {
+ "model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ "tensor_parallel_size": 2,
+ "swap_space": 16,
+ "disable_log_stats": "",
+ "load_format": "dummy",
+ "max-model-len": 2048,
+ "max-num-seqs": 256,
+ "async-scheduling": ""
+ },
+ "client_parameters": {
+ "model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ "backend": "vllm",
+ "dataset_name": "sharegpt",
+ "dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
+ "num_prompts": 200
+ }
+ }
+]
diff --git a/.buildkite/nightly-benchmarks/tests/serving-tests.json b/.buildkite/performance-benchmarks/tests/serving-tests.json
similarity index 100%
rename from .buildkite/nightly-benchmarks/tests/serving-tests.json
rename to .buildkite/performance-benchmarks/tests/serving-tests.json
diff --git a/.buildkite/nightly-benchmarks/tests/throughput-tests-cpu.json b/.buildkite/performance-benchmarks/tests/throughput-tests-cpu.json
similarity index 100%
rename from .buildkite/nightly-benchmarks/tests/throughput-tests-cpu.json
rename to .buildkite/performance-benchmarks/tests/throughput-tests-cpu.json
diff --git a/.buildkite/performance-benchmarks/tests/throughput-tests-hpu.json b/.buildkite/performance-benchmarks/tests/throughput-tests-hpu.json
new file mode 100644
index 0000000000000..3127bf2f6bce3
--- /dev/null
+++ b/.buildkite/performance-benchmarks/tests/throughput-tests-hpu.json
@@ -0,0 +1,61 @@
+[
+ {
+ "test_name": "throughput_llama8B_tp1",
+ "environment_variables": {
+ "PT_HPU_LAZY_MODE": 1,
+ "VLLM_CONTIGUOUS_PA": 1,
+ "VLLM_DEFRAG": 1
+ },
+ "parameters": {
+ "model": "meta-llama/Meta-Llama-3.1-8B-Instruct",
+ "tensor_parallel_size": 1,
+ "load_format": "dummy",
+ "dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
+ "num_prompts": 1000,
+ "backend": "vllm",
+ "max-model-len": 2048,
+ "max-num-seqs": 512,
+ "async-scheduling": ""
+ }
+ },
+ {
+ "test_name": "throughput_llama70B_tp4",
+ "environment_variables": {
+ "PT_HPU_LAZY_MODE": 1,
+ "PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
+ "VLLM_CONTIGUOUS_PA": 1,
+ "VLLM_DEFRAG": 1
+ },
+ "parameters": {
+ "model": "meta-llama/Meta-Llama-3.1-70B-Instruct",
+ "tensor_parallel_size": 4,
+ "load_format": "dummy",
+ "dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
+ "num_prompts": 1000,
+ "backend": "vllm",
+ "max-model-len": 2048,
+ "max-num-seqs": 512,
+ "async-scheduling": ""
+ }
+ },
+ {
+ "test_name": "throughput_mixtral8x7B_tp2",
+ "environment_variables": {
+ "PT_HPU_LAZY_MODE": 1,
+ "PT_HPU_ENABLE_LAZY_COLLECTIVES": 1,
+ "VLLM_CONTIGUOUS_PA": 1,
+ "VLLM_DEFRAG": 1
+ },
+ "parameters": {
+ "model": "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ "tensor_parallel_size": 2,
+ "load_format": "dummy",
+ "dataset_path": "./ShareGPT_V3_unfiltered_cleaned_split.json",
+ "num_prompts": 1000,
+ "backend": "vllm",
+ "max-model-len": 2048,
+ "max-num-seqs": 512,
+ "async-scheduling": ""
+ }
+ }
+]
diff --git a/.buildkite/nightly-benchmarks/tests/throughput-tests.json b/.buildkite/performance-benchmarks/tests/throughput-tests.json
similarity index 100%
rename from .buildkite/nightly-benchmarks/tests/throughput-tests.json
rename to .buildkite/performance-benchmarks/tests/throughput-tests.json
diff --git a/.buildkite/release-pipeline.yaml b/.buildkite/release-pipeline.yaml
index 33b7114666fa2..fbfc923998f89 100644
--- a/.buildkite/release-pipeline.yaml
+++ b/.buildkite/release-pipeline.yaml
@@ -8,7 +8,7 @@ steps:
commands:
# #NOTE: torch_cuda_arch_list is derived from upstream PyTorch build files here:
# https://github.com/pytorch/pytorch/blob/main/.ci/aarch64_linux/aarch64_ci_build.sh#L7
- - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.9.1 --build-arg VLLM_MAIN_CUDA_VERSION=12.9 --build-arg torch_cuda_arch_list='8.7 8.9 9.0 10.0+PTX 12.0' --tag vllm-ci:build-image --target build --progress plain -f docker/Dockerfile ."
+ - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.9.1 --build-arg torch_cuda_arch_list='8.7 8.9 9.0 10.0+PTX 12.0' --tag vllm-ci:build-image --target build --progress plain -f docker/Dockerfile ."
- "mkdir artifacts"
- "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
- "bash .buildkite/scripts/upload-wheels.sh"
@@ -30,19 +30,6 @@ steps:
DOCKER_BUILDKIT: "1"
# x86 + CUDA builds
- - label: "Build wheel - CUDA 12.8"
- depends_on: ~
- id: build-wheel-cuda-12-8
- agents:
- queue: cpu_queue_postmerge
- commands:
- - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --build-arg CUDA_VERSION=12.8.1 --tag vllm-ci:build-image --target build --progress plain -f docker/Dockerfile ."
- - "mkdir artifacts"
- - "docker run --rm -v $(pwd)/artifacts:/artifacts_host vllm-ci:build-image bash -c 'cp -r dist /artifacts_host && chmod -R a+rw /artifacts_host'"
- - "bash .buildkite/scripts/upload-wheels.sh"
- env:
- DOCKER_BUILDKIT: "1"
-
- label: "Build wheel - CUDA 12.9"
depends_on: ~
id: build-wheel-cuda-12-9
@@ -109,31 +96,12 @@ steps:
- label: "Annotate release workflow"
depends_on:
- create-multi-arch-manifest
- - build-wheel-cuda-12-8
id: annotate-release-workflow
agents:
queue: cpu_queue_postmerge
commands:
- "bash .buildkite/scripts/annotate-release.sh"
- - label: "Build and publish TPU release image"
- depends_on: ~
- if: build.env("NIGHTLY") == "1"
- agents:
- queue: tpu_queue_postmerge
- commands:
- - "yes | docker system prune -a"
- - "git fetch --all"
- - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg USE_SCCACHE=1 --build-arg GIT_REPO_CHECK=1 --tag vllm/vllm-tpu:nightly --tag vllm/vllm-tpu:$BUILDKITE_COMMIT --progress plain -f docker/Dockerfile.tpu ."
- - "docker push vllm/vllm-tpu:nightly"
- - "docker push vllm/vllm-tpu:$BUILDKITE_COMMIT"
- plugins:
- - docker-login#v3.0.0:
- username: vllmbot
- password-env: DOCKERHUB_TOKEN
- env:
- DOCKER_BUILDKIT: "1"
-
- input: "Provide Release version here"
id: input-release-version
fields:
@@ -150,7 +118,7 @@ steps:
queue: cpu_queue_postmerge
commands:
- "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7"
- - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --build-arg VLLM_CPU_AVX512BF16=true --build-arg VLLM_CPU_AVX512VNNI=true --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:$(buildkite-agent meta-data get release-version) --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:latest --progress plain --target vllm-openai -f docker/Dockerfile.cpu ."
+ - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --build-arg VLLM_CPU_AVX512BF16=true --build-arg VLLM_CPU_AVX512VNNI=true --build-arg VLLM_CPU_AMXBF16=true --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:$(buildkite-agent meta-data get release-version) --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:latest --progress plain --target vllm-openai -f docker/Dockerfile.cpu ."
- "docker push public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:latest"
- "docker push public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:$(buildkite-agent meta-data get release-version)"
env:
diff --git a/.buildkite/scripts/annotate-release.sh b/.buildkite/scripts/annotate-release.sh
index fde48603ad3cd..df805e0850806 100755
--- a/.buildkite/scripts/annotate-release.sh
+++ b/.buildkite/scripts/annotate-release.sh
@@ -2,22 +2,29 @@
set -ex
-# Get release version and strip leading 'v' if present
-RELEASE_VERSION=$(buildkite-agent meta-data get release-version | sed 's/^v//')
-
-if [ -z "$RELEASE_VERSION" ]; then
- echo "Error: RELEASE_VERSION is empty. 'release-version' metadata might not be set or is invalid."
- exit 1
+# Get release version, default to 1.0.0.dev for nightly/per-commit builds
+RELEASE_VERSION=$(buildkite-agent meta-data get release-version 2>/dev/null | sed 's/^v//')
+if [ -z "${RELEASE_VERSION}" ]; then
+ RELEASE_VERSION="1.0.0.dev"
fi
buildkite-agent annotate --style 'info' --context 'release-workflow' << EOF
-To download the wheel:
+To download the wheel (by commit):
+\`\`\`
+aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux1_x86_64.whl .
+aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux2014_aarch64.whl .
+
+aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cu129-cp38-abi3-manylinux1_x86_64.whl .
+aws s3 cp s3://vllm-wheels/${BUILDKITE_COMMIT}/vllm-${RELEASE_VERSION}+cu129-cp38-abi3-manylinux1_x86_64.whl .
+\`\`\`
+
+To download the wheel (by version):
\`\`\`
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux1_x86_64.whl .
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}/vllm-${RELEASE_VERSION}-cp38-abi3-manylinux2014_aarch64.whl .
-aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}+cu126/vllm-${RELEASE_VERSION}+cu126-cp38-abi3-manylinux1_x86_64.whl .
aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}+cu129/vllm-${RELEASE_VERSION}+cu129-cp38-abi3-manylinux1_x86_64.whl .
+aws s3 cp s3://vllm-wheels/${RELEASE_VERSION}+cu130/vllm-${RELEASE_VERSION}+cu130-cp38-abi3-manylinux1_x86_64.whl .
\`\`\`
To download and upload the image:
@@ -38,9 +45,10 @@ docker tag vllm/vllm-openai:aarch64 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
docker push vllm/vllm-openai:latest-aarch64
docker push vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
-docker manifest create vllm/vllm-openai:latest vllm/vllm-openai:latest-x86_64 vllm/vllm-openai:latest-aarch64 --amend
-docker manifest create vllm/vllm-openai:v${RELEASE_VERSION} vllm/vllm-openai:v${RELEASE_VERSION}-x86_64 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64 --amend
+docker manifest rm vllm/vllm-openai:latest
+docker manifest create vllm/vllm-openai:latest vllm/vllm-openai:latest-x86_64 vllm/vllm-openai:latest-aarch64
+docker manifest create vllm/vllm-openai:v${RELEASE_VERSION} vllm/vllm-openai:v${RELEASE_VERSION}-x86_64 vllm/vllm-openai:v${RELEASE_VERSION}-aarch64
docker manifest push vllm/vllm-openai:latest
docker manifest push vllm/vllm-openai:v${RELEASE_VERSION}
\`\`\`
-EOF
\ No newline at end of file
+EOF
diff --git a/.buildkite/scripts/generate-nightly-index.py b/.buildkite/scripts/generate-nightly-index.py
new file mode 100644
index 0000000000000..90286ad4c6e14
--- /dev/null
+++ b/.buildkite/scripts/generate-nightly-index.py
@@ -0,0 +1,369 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: Apache-2.0
+# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
+
+# do not complain about line length (for docstring)
+# ruff: noqa: E501
+
+import argparse
+import json
+import re
+import sys
+from dataclasses import asdict, dataclass
+from pathlib import Path
+from typing import Any
+from urllib.parse import quote
+
+if not sys.version_info >= (3, 10):
+ raise RuntimeError("This script requires Python 3.10 or higher.")
+
+INDEX_HTML_TEMPLATE = """
+
+
+
+{items}
+
+
+"""
+
+
+@dataclass
+class WheelFileInfo:
+ package_name: str
+ version: str
+ build_tag: str | None
+ python_tag: str
+ abi_tag: str
+ platform_tag: str
+ variant: str | None
+ filename: str
+
+
+def parse_from_filename(file: str) -> WheelFileInfo:
+ """
+ Parse wheel file name to extract metadata.
+
+ The format of wheel names:
+ {package_name}-{version}(-{build_tag})?-{python_tag}-{abi_tag}-{platform_tag}.whl
+ All versions could contain a variant like '+cu129' or '.cpu' or `.rocm` (or not).
+ Example:
+ vllm-0.11.0-cp38-abi3-manylinux1_x86_64.whl
+ vllm-0.10.2rc2+cu129-cp38-abi3-manylinux2014_aarch64.whl
+ vllm-0.11.1rc8.dev14+gaa384b3c0-cp38-abi3-manylinux2014_aarch64.whl
+ vllm-0.11.1rc8.dev14+gaa384b3c0.cu130-cp38-abi3-manylinux1_x86_64.whl
+ """
+ wheel_file_re = re.compile(
+ r"^(?P.+)-(?P[^-]+?)(-(?P[^-]+))?-(?P[^-]+)-(?P[^-]+)-(?P[^-]+)\.whl$"
+ )
+ match = wheel_file_re.match(file)
+ if not match:
+ raise ValueError(f"Invalid wheel file name: {file}")
+
+ package_name = match.group("package_name")
+ version = match.group("version")
+ build_tag = match.group("build_tag")
+ python_tag = match.group("python_tag")
+ abi_tag = match.group("abi_tag")
+ platform_tag = match.group("platform_tag")
+
+ # extract variant from version
+ variant = None
+ if "dev" in version:
+ ver_after_dev = version.split("dev")[-1]
+ if "." in ver_after_dev:
+ variant = ver_after_dev.split(".")[-1]
+ version = version.removesuffix("." + variant)
+ else:
+ if "+" in version:
+ version, variant = version.split("+")
+
+ return WheelFileInfo(
+ package_name=package_name,
+ version=version,
+ build_tag=build_tag,
+ python_tag=python_tag,
+ abi_tag=abi_tag,
+ platform_tag=platform_tag,
+ variant=variant,
+ filename=file,
+ )
+
+
+def generate_project_list(subdir_names: list[str]) -> str:
+ """
+ Generate project list HTML content linking to each project & variant sub-directory.
+ """
+ href_tags = []
+ for name in sorted(subdir_names):
+ name = name.strip("/").strip(".")
+ href_tags.append(f' {name}/
')
+ return INDEX_HTML_TEMPLATE.format(items="\n".join(href_tags))
+
+
+def generate_package_index_and_metadata(
+ wheel_files: list[WheelFileInfo], wheel_base_dir: Path, index_base_dir: Path
+) -> tuple[str, str]:
+ """
+ Generate package index HTML content for a specific package, linking to actual wheel files.
+ """
+ href_tags = []
+ metadata = []
+ for file in sorted(wheel_files, key=lambda x: x.filename):
+ relative_path = (
+ wheel_base_dir.relative_to(index_base_dir, walk_up=True) / file.filename
+ )
+ # handle with '+' in URL, and avoid double-encoding '/' and already-encoded '%2B'
+ # NOTE: this is AWS S3 specific behavior!
+ file_path_quoted = quote(relative_path.as_posix(), safe=":%/")
+ href_tags.append(f' {file.filename}
')
+ file_meta = asdict(file)
+ file_meta["path"] = file_path_quoted
+ metadata.append(file_meta)
+ index_str = INDEX_HTML_TEMPLATE.format(items="\n".join(href_tags))
+ metadata_str = json.dumps(metadata, indent=2)
+ return index_str, metadata_str
+
+
+def generate_index_and_metadata(
+ whl_files: list[str],
+ wheel_base_dir: Path,
+ index_base_dir: Path,
+ default_variant: str | None = None,
+ alias_to_default: str | None = None,
+):
+ """
+ Generate index for all wheel files.
+
+ Args:
+ whl_files (list[str]): List of wheel files (must be directly under `wheel_base_dir`).
+ wheel_base_dir (Path): Base directory for wheel files.
+ index_base_dir (Path): Base directory to store index files.
+ default_variant (str | None): The default variant name, if any.
+ alias_to_default (str | None): Alias variant name for the default variant, if any.
+
+ First, parse all wheel files to extract metadata.
+ We need to collect all wheel files for each variant, and generate an index for it (in a sub-directory).
+ The index for the default variant (if any) is generated in the root index directory.
+
+ If `default_variant` is provided, all wheels must have variant suffixes, and the default variant index
+ is purely a copy of the corresponding variant index, with only the links adjusted.
+ Otherwise, all wheels without variant suffixes are treated as the default variant.
+
+ If `alias_to_default` is provided, an additional alias sub-directory is created, it has the same content
+ as the default variant index, but the links are adjusted accordingly.
+
+ Index directory structure:
+ index_base_dir/ (hosted at wheels.vllm.ai/{nightly,$commit,$version}/)
+ index.html # project list, linking to "vllm/" and other packages, and all variant sub-directories
+ vllm/
+ index.html # package index, pointing to actual files in wheel_base_dir (relative path)
+ metadata.json # machine-readable metadata for all wheels in this package
+ cpu/ # cpu variant sub-directory
+ index.html
+ vllm/
+ index.html
+ metadata.json
+ cu129/ # cu129 is actually the alias to default variant
+ index.html
+ vllm/
+ index.html
+ metadata.json
+ cu130/ # cu130 variant sub-directory
+ index.html
+ vllm/
+ index.html
+ metadata.json
+ ...
+
+ metadata.json stores a dump of all wheel files' metadata in a machine-readable format:
+ [
+ {
+ "package_name": "vllm",
+ "version": "0.10.2rc2",
+ "build_tag": null,
+ "python_tag": "cp38",
+ "abi_tag": "abi3",
+ "platform_tag": "manylinux2014_aarch64",
+ "variant": "cu129",
+ "filename": "vllm-0.10.2rc2+cu129-cp38-abi3-manylinux2014_aarch64.whl",
+ "path": "../vllm-0.10.2rc2%2Bcu129-cp38-abi3-manylinux2014_aarch64.whl" # to be concatenated with the directory URL and URL-encoded
+ },
+ ...
+ ]
+ """
+
+ parsed_files = [parse_from_filename(f) for f in whl_files]
+
+ if not parsed_files:
+ print("No wheel files found, skipping index generation.")
+ return
+
+ # Group by variant
+ variant_to_files: dict[str, list[WheelFileInfo]] = {}
+ for file in parsed_files:
+ variant = file.variant or "default"
+ if variant not in variant_to_files:
+ variant_to_files[variant] = []
+ variant_to_files[variant].append(file)
+
+ print(f"Found variants: {list(variant_to_files.keys())}")
+
+ # sanity check for default variant
+ if default_variant:
+ if "default" in variant_to_files:
+ raise ValueError(
+ "All wheel files must have variant suffixes when `default_variant` is specified."
+ )
+ if default_variant not in variant_to_files:
+ raise ValueError(
+ f"Default variant '{default_variant}' not found among wheel files."
+ )
+
+ if alias_to_default:
+ if "default" not in variant_to_files:
+ # e.g. only some wheels are uploaded to S3 currently
+ print(
+ "[WARN] Alias to default variant specified, but no default variant found."
+ )
+ elif alias_to_default in variant_to_files:
+ raise ValueError(
+ f"Alias variant name '{alias_to_default}' already exists among wheel files."
+ )
+ else:
+ variant_to_files[alias_to_default] = variant_to_files["default"].copy()
+ print(f"Alias variant '{alias_to_default}' created for default variant.")
+
+ # Generate index for each variant
+ subdir_names = set()
+ for variant, files in variant_to_files.items():
+ if variant == "default":
+ variant_dir = index_base_dir
+ else:
+ variant_dir = index_base_dir / variant
+ subdir_names.add(variant)
+
+ variant_dir.mkdir(parents=True, exist_ok=True)
+
+ # gather all package names in this variant
+ packages = set(f.package_name for f in files)
+ if variant == "default":
+ # these packages should also appear in the "project list"
+ # generate after all variants are processed
+ subdir_names = subdir_names.union(packages)
+ else:
+ # generate project list for this variant directly
+ project_list_str = generate_project_list(sorted(packages))
+ with open(variant_dir / "index.html", "w") as f:
+ f.write(project_list_str)
+
+ for package in packages:
+ # filter files belonging to this package only
+ package_files = [f for f in files if f.package_name == package]
+ package_dir = variant_dir / package
+ package_dir.mkdir(parents=True, exist_ok=True)
+ index_str, metadata_str = generate_package_index_and_metadata(
+ package_files, wheel_base_dir, package_dir
+ )
+ with open(package_dir / "index.html", "w") as f:
+ f.write(index_str)
+ with open(package_dir / "metadata.json", "w") as f:
+ f.write(metadata_str)
+
+ # Generate top-level project list index
+ project_list_str = generate_project_list(sorted(subdir_names))
+ with open(index_base_dir / "index.html", "w") as f:
+ f.write(project_list_str)
+
+
+if __name__ == "__main__":
+ """
+ Arguments:
+ --version : version string for the current build (e.g., commit hash)
+ --current-objects : path to JSON file containing current S3 objects listing in this version directory
+ --output-dir : directory to store generated index files
+ --alias-to-default : (optional) alias variant name for the default variant
+ """
+
+ parser = argparse.ArgumentParser(
+ description="Process nightly build wheel files to generate indices."
+ )
+ parser.add_argument(
+ "--version",
+ type=str,
+ required=True,
+ help="Version string for the current build (e.g., commit hash)",
+ )
+ parser.add_argument(
+ "--current-objects",
+ type=str,
+ required=True,
+ help="Path to JSON file containing current S3 objects listing in this version directory",
+ )
+ parser.add_argument(
+ "--output-dir",
+ type=str,
+ required=True,
+ help="Directory to store generated index files",
+ )
+ parser.add_argument(
+ "--alias-to-default",
+ type=str,
+ default=None,
+ help="Alias variant name for the default variant",
+ )
+
+ args = parser.parse_args()
+
+ version = args.version
+ if "/" in version or "\\" in version:
+ raise ValueError("Version string must not contain slashes.")
+ current_objects_path = Path(args.current_objects)
+ output_dir = Path(args.output_dir)
+ if not output_dir.exists():
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ # Read current objects JSON
+ with open(current_objects_path) as f:
+ current_objects: dict[str, list[dict[str, Any]]] = json.load(f)
+
+ # current_objects looks like from list_objects_v2 S3 API:
+ """
+ "Contents": [
+ {
+ "Key": "e2f56c309d2a28899c68975a7e104502d56deb8f/vllm-0.11.2.dev363+ge2f56c309-cp38-abi3-manylinux1_x86_64.whl",
+ "LastModified": "2025-11-28T14:00:32+00:00",
+ "ETag": "\"37a38339c7cdb61ca737021b968075df-52\"",
+ "ChecksumAlgorithm": [
+ "CRC64NVME"
+ ],
+ "ChecksumType": "FULL_OBJECT",
+ "Size": 435649349,
+ "StorageClass": "STANDARD"
+ },
+ ...
+ ]
+ """
+
+ # Extract wheel file keys
+ wheel_files = []
+ for item in current_objects.get("Contents", []):
+ key: str = item["Key"]
+ if key.endswith(".whl"):
+ wheel_files.append(key.split("/")[-1]) # only the filename is used
+
+ print(f"Found {len(wheel_files)} wheel files for version {version}: {wheel_files}")
+
+ # Generate index and metadata, assuming wheels and indices are stored as:
+ # s3://vllm-wheels/{version}/
+ # s3://vllm-wheels//
+ wheel_base_dir = Path(output_dir).parent / version
+ index_base_dir = Path(output_dir)
+
+ generate_index_and_metadata(
+ whl_files=wheel_files,
+ wheel_base_dir=wheel_base_dir,
+ index_base_dir=index_base_dir,
+ default_variant=None,
+ alias_to_default=args.alias_to_default,
+ )
+ print(f"Successfully generated index and metadata in {output_dir}")
diff --git a/.buildkite/scripts/hardware_ci/run-amd-test.sh b/.buildkite/scripts/hardware_ci/run-amd-test.sh
index aa4cc7b35a543..864eb470bb0a7 100755
--- a/.buildkite/scripts/hardware_ci/run-amd-test.sh
+++ b/.buildkite/scripts/hardware_ci/run-amd-test.sh
@@ -59,7 +59,7 @@ while true; do
fi
done
-echo "--- Pulling container"
+echo "--- Pulling container"
image_name="rocm/vllm-ci:${BUILDKITE_COMMIT}"
container_name="rocm_${BUILDKITE_COMMIT}_$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 10; echo)"
docker pull "${image_name}"
@@ -78,17 +78,13 @@ HF_MOUNT="/root/.cache/huggingface"
commands=$@
echo "Commands:$commands"
-if [[ $commands == *"pytest -v -s basic_correctness/test_basic_correctness.py"* ]]; then
- commands=${commands//"pytest -v -s basic_correctness/test_basic_correctness.py"/"VLLM_USE_TRITON_FLASH_ATTN=0 pytest -v -s basic_correctness/test_basic_correctness.py"}
-fi
+commands=${commands//"pytest -v -s basic_correctness/test_basic_correctness.py"/"pytest -v -s basic_correctness/test_basic_correctness.py"}
if [[ $commands == *"pytest -v -s models/test_registry.py"* ]]; then
commands=${commands//"pytest -v -s models/test_registry.py"/"pytest -v -s models/test_registry.py -k 'not BambaForCausalLM and not GritLM and not Mamba2ForCausalLM and not Zamba2ForCausalLM'"}
fi
-if [[ $commands == *"pytest -v -s compile/test_basic_correctness.py"* ]]; then
- commands=${commands//"pytest -v -s compile/test_basic_correctness.py"/"VLLM_USE_TRITON_FLASH_ATTN=0 pytest -v -s compile/test_basic_correctness.py"}
-fi
+commands=${commands//"pytest -v -s compile/test_basic_correctness.py"/"pytest -v -s compile/test_basic_correctness.py"}
if [[ $commands == *"pytest -v -s lora"* ]]; then
commands=${commands//"pytest -v -s lora"/"VLLM_ROCM_CUSTOM_PAGED_ATTN=0 pytest -v -s lora"}
@@ -173,19 +169,28 @@ fi
PARALLEL_JOB_COUNT=8
MYPYTHONPATH=".."
-# check if the command contains shard flag, we will run all shards in parallel because the host have 8 GPUs.
+# Test that we're launching on the machine that has
+# proper access to GPUs
+render_gid=$(getent group render | cut -d: -f3)
+if [[ -z "$render_gid" ]]; then
+ echo "Error: 'render' group not found. This is required for GPU access." >&2
+ exit 1
+fi
+
+# check if the command contains shard flag, we will run all shards in parallel because the host have 8 GPUs.
if [[ $commands == *"--shard-id="* ]]; then
- # assign job count as the number of shards used
- commands=${commands//"--num-shards= "/"--num-shards=${PARALLEL_JOB_COUNT} "}
+ # assign job count as the number of shards used
+ commands=$(echo "$commands" | sed -E "s/--num-shards[[:blank:]]*=[[:blank:]]*[0-9]*/--num-shards=${PARALLEL_JOB_COUNT} /g" | sed 's/ \\ / /g')
for GPU in $(seq 0 $(($PARALLEL_JOB_COUNT-1))); do
# assign shard-id for each shard
- commands_gpu=${commands//"--shard-id= "/"--shard-id=${GPU} "}
+ commands_gpu=$(echo "$commands" | sed -E "s/--shard-id[[:blank:]]*=[[:blank:]]*[0-9]*/--shard-id=${GPU} /g" | sed 's/ \\ / /g')
echo "Shard ${GPU} commands:$commands_gpu"
echo "Render devices: $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES"
docker run \
--device /dev/kfd $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES \
--network=host \
--shm-size=16gb \
+ --group-add "$render_gid" \
--rm \
-e HIP_VISIBLE_DEVICES="${GPU}" \
-e HF_TOKEN \
@@ -217,8 +222,8 @@ else
--device /dev/kfd $BUILDKITE_AGENT_META_DATA_RENDER_DEVICES \
--network=host \
--shm-size=16gb \
+ --group-add "$render_gid" \
--rm \
- -e HIP_VISIBLE_DEVICES=0 \
-e HF_TOKEN \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
diff --git a/.buildkite/scripts/hardware_ci/run-cpu-test-arm.sh b/.buildkite/scripts/hardware_ci/run-cpu-test-arm.sh
new file mode 100755
index 0000000000000..b5f6b2494792f
--- /dev/null
+++ b/.buildkite/scripts/hardware_ci/run-cpu-test-arm.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+
+# This script build the CPU docker image and run the offline inference inside the container.
+# It serves a sanity check for compilation and basic model usage.
+set -ex
+
+# allow to bind to different cores
+CORE_RANGE=${CORE_RANGE:-0-16}
+OMP_CORE_RANGE=${OMP_CORE_RANGE:-0-16}
+
+export CMAKE_BUILD_PARALLEL_LEVEL=16
+
+# Setup cleanup
+remove_docker_container() {
+ set -e;
+ docker rm -f cpu-test || true;
+}
+trap remove_docker_container EXIT
+remove_docker_container
+
+# Try building the docker image
+docker build --tag cpu-test --target vllm-test -f docker/Dockerfile.cpu .
+
+# Run the image
+docker run -itd --cpuset-cpus="$CORE_RANGE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=16 --env VLLM_CPU_CI_ENV=1 -e E2E_OMP_THREADS="$OMP_CORE_RANGE" --shm-size=4g --name cpu-test cpu-test
+
+function cpu_tests() {
+ set -e
+
+ docker exec cpu-test bash -c "
+ set -e
+ pip list"
+
+ # offline inference
+ docker exec cpu-test bash -c "
+ set -e
+ python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m"
+
+ # Run kernel tests
+ docker exec cpu-test bash -c "
+ set -e
+ pytest -x -v -s tests/kernels/test_onednn.py
+ pytest -x -v -s tests/kernels/attention/test_cpu_attn.py"
+
+ # basic online serving
+ docker exec cpu-test bash -c '
+ set -e
+ VLLM_CPU_OMP_THREADS_BIND=$E2E_OMP_THREADS vllm serve Qwen/Qwen3-0.6B --max-model-len 2048 &
+ server_pid=$!
+ timeout 600 bash -c "until curl localhost:8000/v1/models; do sleep 1; done" || exit 1
+ vllm bench serve \
+ --backend vllm \
+ --dataset-name random \
+ --model Qwen/Qwen3-0.6B \
+ --num-prompts 20 \
+ --endpoint /v1/completions
+ kill -s SIGTERM $server_pid &'
+}
+
+# All of CPU tests are expected to be finished less than 40 mins.
+export -f cpu_tests
+timeout 2h bash -c cpu_tests
diff --git a/.buildkite/scripts/hardware_ci/run-cpu-test-ppc64le.sh b/.buildkite/scripts/hardware_ci/run-cpu-test-ppc64le.sh
index 39ea180173081..3728f73fa2a36 100755
--- a/.buildkite/scripts/hardware_ci/run-cpu-test-ppc64le.sh
+++ b/.buildkite/scripts/hardware_ci/run-cpu-test-ppc64le.sh
@@ -25,20 +25,22 @@ function cpu_tests() {
# offline inference
podman exec -it "$container_id" bash -c "
+ export TORCH_COMPILE_DISABLE=1
set -xve
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m" >> $HOME/test_basic.log
# Run basic model test
podman exec -it "$container_id" bash -c "
+ export TORCH_COMPILE_DISABLE=1
set -evx
pip install pytest pytest-asyncio einops peft Pillow soundfile transformers_stream_generator matplotlib
- pip install sentence-transformers datamodel_code_generator
+ pip install sentence-transformers datamodel_code_generator tblib
# Note: disable Bart until supports V1
# pytest -v -s tests/models/language/generation/test_bart.py -m cpu_model
- pytest -v -s tests/models/language/generation/test_common.py::test_models[False-5-32-openai-community/gpt2]
- pytest -v -s tests/models/language/generation/test_common.py::test_models[False-5-32-facebook/opt-125m]
- pytest -v -s tests/models/language/generation/test_common.py::test_models[False-5-32-google/gemma-1.1-2b-it]
+ pytest -v -s tests/models/language/generation/test_common.py::test_models[False-False-5-32-openai-community/gpt2]
+ pytest -v -s tests/models/language/generation/test_common.py::test_models[False-False-5-32-facebook/opt-125m]
+ pytest -v -s tests/models/language/generation/test_common.py::test_models[False-False-5-32-google/gemma-1.1-2b-it]
pytest -v -s tests/models/language/pooling/test_classification.py::test_models[float-jason9693/Qwen2.5-1.5B-apeach]
# TODO: Below test case tests/models/language/pooling/test_embedding.py::test_models[True-ssmits/Qwen2-7B-Instruct-embed-base] fails on ppc64le. Disabling it for time being.
# pytest -v -s tests/models/language/pooling/test_embedding.py -m cpu_model" >> $HOME/test_rest.log
diff --git a/.buildkite/scripts/hardware_ci/run-cpu-test.sh b/.buildkite/scripts/hardware_ci/run-cpu-test.sh
index 7927aef19e4eb..438fe522c8702 100644
--- a/.buildkite/scripts/hardware_ci/run-cpu-test.sh
+++ b/.buildkite/scripts/hardware_ci/run-cpu-test.sh
@@ -21,8 +21,8 @@ trap remove_docker_container EXIT
remove_docker_container
# Try building the docker image
-numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --tag cpu-test-"$NUMA_NODE" --target vllm-test -f docker/Dockerfile.cpu .
-numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --build-arg VLLM_CPU_DISABLE_AVX512="true" --tag cpu-test-"$NUMA_NODE"-avx2 --target vllm-test -f docker/Dockerfile.cpu .
+numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --progress plain --tag cpu-test-"$NUMA_NODE" --target vllm-test -f docker/Dockerfile.cpu .
+numactl -C "$CORE_RANGE" -N "$NUMA_NODE" docker build --progress plain --build-arg VLLM_CPU_DISABLE_AVX512="true" --tag cpu-test-"$NUMA_NODE"-avx2 --target vllm-test -f docker/Dockerfile.cpu .
# Run the image, setting --shm-size=4g for tensor parallel.
docker run -itd --cpuset-cpus="$CORE_RANGE" --cpuset-mems="$NUMA_NODE" --entrypoint /bin/bash -v ~/.cache/huggingface:/root/.cache/huggingface --privileged=true -e HF_TOKEN --env VLLM_CPU_KVCACHE_SPACE=16 --env VLLM_CPU_CI_ENV=1 -e E2E_OMP_THREADS="$OMP_CORE_RANGE" --shm-size=4g --name cpu-test-"$NUMA_NODE" cpu-test-"$NUMA_NODE"
@@ -49,6 +49,7 @@ function cpu_tests() {
# Run kernel tests
docker exec cpu-test-"$NUMA_NODE" bash -c "
set -e
+ pytest -x -v -s tests/kernels/attention/test_cpu_attn.py
pytest -x -v -s tests/kernels/test_onednn.py"
# Run basic model test
@@ -72,12 +73,11 @@ function cpu_tests() {
pytest -x -s -v \
tests/quantization/test_compressed_tensors.py::test_compressed_tensors_w8a8_logprobs"
- # Note: disable it until supports V1
- # Run AWQ test
- # docker exec cpu-test-"$NUMA_NODE" bash -c "
- # set -e
- # VLLM_USE_V1=0 pytest -x -s -v \
- # tests/quantization/test_ipex_quant.py"
+ # Run AWQ/GPTQ test
+ docker exec cpu-test-"$NUMA_NODE" bash -c "
+ set -e
+ pytest -x -s -v \
+ tests/quantization/test_cpu_wna16.py"
# Run multi-lora tests
docker exec cpu-test-"$NUMA_NODE" bash -c "
@@ -116,4 +116,4 @@ function cpu_tests() {
# All of CPU tests are expected to be finished less than 40 mins.
export -f cpu_tests
-timeout 2h bash -c "cpu_tests $CORE_RANGE $NUMA_NODE"
+timeout 2.5h bash -c "cpu_tests $CORE_RANGE $NUMA_NODE"
diff --git a/.buildkite/scripts/hardware_ci/run-xpu-test.sh b/.buildkite/scripts/hardware_ci/run-xpu-test.sh
index 250a64fdd071c..4d163399cfc6c 100644
--- a/.buildkite/scripts/hardware_ci/run-xpu-test.sh
+++ b/.buildkite/scripts/hardware_ci/run-xpu-test.sh
@@ -20,7 +20,10 @@ trap remove_docker_container EXIT
# Run the image and test offline inference/tensor parallel
docker run \
- --device /dev/dri \
+ --device /dev/dri:/dev/dri \
+ --net=host \
+ --ipc=host \
+ --privileged \
-v /dev/dri/by-path:/dev/dri/by-path \
--entrypoint="" \
-e "HF_TOKEN=${HF_TOKEN}" \
@@ -32,7 +35,7 @@ docker run \
echo $ZE_AFFINITY_MASK
pip install tblib==3.1.0
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager
- python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 -O3 -O.cudagraph_mode=NONE
+ python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 -O3 -cc.cudagraph_mode=NONE
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager -tp 2 --distributed-executor-backend ray
python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager -tp 2 --distributed-executor-backend mp
VLLM_ATTENTION_BACKEND=TRITON_ATTN python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager
@@ -42,7 +45,7 @@ docker run \
pytest -v -s v1/sample --ignore=v1/sample/test_logprobs.py --ignore=v1/sample/test_logprobs_e2e.py
pytest -v -s v1/worker --ignore=v1/worker/test_gpu_model_runner.py
pytest -v -s v1/structured_output
- pytest -v -s v1/spec_decode --ignore=v1/spec_decode/test_max_len.py --ignore=v1/spec_decode/test_tree_attention.py
- pytest -v -s v1/kv_connector/unit --ignore=v1/kv_connector/unit/test_multi_connector.py --ignore=v1/kv_connector/unit/test_nixl_connector.py --ignore=v1/kv_connector/unit/test_shared_storage_connector.py
+ pytest -v -s v1/spec_decode --ignore=v1/spec_decode/test_max_len.py --ignore=v1/spec_decode/test_tree_attention.py --ignore=v1/spec_decode/test_speculators_eagle3.py
+ pytest -v -s v1/kv_connector/unit --ignore=v1/kv_connector/unit/test_multi_connector.py --ignore=v1/kv_connector/unit/test_nixl_connector.py --ignore=v1/kv_connector/unit/test_shared_storage_connector.py --ignore=v1/kv_connector/unit/test_lmcache_integration.py
pytest -v -s v1/test_serial_utils.py
'
diff --git a/.buildkite/scripts/scheduled_integration_test/deepseek_v2_lite_ep_eplb.sh b/.buildkite/scripts/scheduled_integration_test/deepseek_v2_lite_ep_eplb.sh
new file mode 100644
index 0000000000000..8106f50f18f66
--- /dev/null
+++ b/.buildkite/scripts/scheduled_integration_test/deepseek_v2_lite_ep_eplb.sh
@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+set -euxo pipefail
+
+# args: [THRESHOLD] [NUM_QUESTIONS] [START_PORT]
+THRESHOLD=${1:-0.25}
+NUM_Q=${2:-1319}
+PORT=${3:-8010}
+OUT_DIR=${OUT_DIR:-/tmp/vllm-scheduled}
+mkdir -p "${OUT_DIR}"
+
+wait_for_server() {
+ local port=$1
+ timeout 600 bash -c '
+ until curl -sf "http://127.0.0.1:'"$port"'/health" > /dev/null; do
+ sleep 1
+ done'
+}
+
+MODEL="deepseek-ai/DeepSeek-V2-lite"
+
+# Set BACKENDS based on platform
+if command -v rocm-smi &> /dev/null || [[ -d /opt/rocm ]] || [[ -n "${ROCM_PATH:-}" ]]; then
+ # ROCm platform
+ BACKENDS=("allgather_reducescatter")
+ # Disable MOE padding for ROCm since it is causing eplb to fail
+ export VLLM_ROCM_MOE_PADDING=0
+else
+ # Non-ROCm platform (CUDA/other)
+ BACKENDS=("deepep_high_throughput" "deepep_low_latency")
+fi
+
+cleanup() {
+ if [[ -n "${SERVER_PID:-}" ]] && kill -0 "${SERVER_PID}" 2>/dev/null; then
+ kill "${SERVER_PID}" 2>/dev/null || true
+ for _ in {1..20}; do
+ kill -0 "${SERVER_PID}" 2>/dev/null || break
+ sleep 0.5
+ done
+ kill -9 "${SERVER_PID}" 2>/dev/null || true
+ fi
+}
+trap cleanup EXIT
+
+for BACK in "${BACKENDS[@]}"; do
+ VLLM_DEEP_GEMM_WARMUP=skip \
+ VLLM_ALL2ALL_BACKEND=$BACK \
+ vllm serve "$MODEL" \
+ --enforce-eager \
+ --tensor-parallel-size 2 \
+ --data-parallel-size 2 \
+ --enable-expert-parallel \
+ --enable-eplb \
+ --trust-remote-code \
+ --max-model-len 2048 \
+ --port $PORT &
+ SERVER_PID=$!
+ wait_for_server $PORT
+
+ TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
+ OUT="${OUT_DIR}/${TAG}_${BACK}.json"
+ python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port $PORT --num-questions ${NUM_Q} --save-results ${OUT}
+ python3 - <= ${THRESHOLD}, f"${MODEL} ${BACK} accuracy {acc}"
+PY
+
+ cleanup
+ SERVER_PID=
+ sleep 1
+ PORT=$((PORT+1))
+done
diff --git a/.buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh b/.buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh
new file mode 100644
index 0000000000000..6a1bef275d047
--- /dev/null
+++ b/.buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+set -euxo pipefail
+
+# args: [THRESHOLD] [NUM_QUESTIONS] [START_PORT] [DATA_PARALLEL_SIZE] [TENSOR_PARALLEL_SIZE]
+THRESHOLD=${1:-0.8}
+NUM_Q=${2:-1319}
+PORT=${3:-8020}
+DATA_PARALLEL_SIZE=${4:-2}
+TENSOR_PARALLEL_SIZE=${5:-2}
+OUT_DIR=${OUT_DIR:-/tmp/vllm-scheduled}
+mkdir -p "${OUT_DIR}"
+
+wait_for_server() {
+ local port=$1
+ timeout 600 bash -c '
+ until curl -sf "http://127.0.0.1:'"$port"'/health" > /dev/null; do
+ sleep 1
+ done'
+}
+
+MODEL="QWen/Qwen3-30B-A3B-FP8"
+# Set BACKENDS based on platform
+if command -v rocm-smi &> /dev/null || [[ -d /opt/rocm ]] || [[ -n "${ROCM_PATH:-}" ]]; then
+ # ROCm platform
+ BACKENDS=("allgather_reducescatter")
+ # Disable MOE padding for ROCm since it is causing eplb to fail
+ export VLLM_ROCM_MOE_PADDING=0
+else
+ # Non-ROCm platform (CUDA/other)
+ BACKENDS=("deepep_high_throughput" "deepep_low_latency")
+fi
+
+cleanup() {
+ if [[ -n "${SERVER_PID:-}" ]] && kill -0 "${SERVER_PID}" 2>/dev/null; then
+ kill "${SERVER_PID}" 2>/dev/null || true
+ for _ in {1..20}; do
+ kill -0 "${SERVER_PID}" 2>/dev/null || break
+ sleep 0.5
+ done
+ kill -9 "${SERVER_PID}" 2>/dev/null || true
+ fi
+}
+trap cleanup EXIT
+
+for BACK in "${BACKENDS[@]}"; do
+ VLLM_DEEP_GEMM_WARMUP=skip \
+ VLLM_ALL2ALL_BACKEND=$BACK \
+ vllm serve "$MODEL" \
+ --enforce-eager \
+ --enable-eplb \
+ --eplb-config '{"window_size":10, "step_interval":100, "num_redundant_experts":0, "log_balancedness":true}' \
+ --tensor-parallel-size ${TENSOR_PARALLEL_SIZE} \
+ --data-parallel-size ${DATA_PARALLEL_SIZE} \
+ --enable-expert-parallel \
+ --trust-remote-code \
+ --max-model-len 2048 \
+ --port $PORT &
+ SERVER_PID=$!
+ wait_for_server $PORT
+
+ TAG=$(echo "$MODEL" | tr '/: \\n' '_____')
+ OUT="${OUT_DIR}/${TAG}_${BACK}.json"
+ python3 tests/evals/gsm8k/gsm8k_eval.py --host http://127.0.0.1 --port $PORT --num-questions ${NUM_Q} --save-results ${OUT}
+ python3 - <= ${THRESHOLD}, f"${MODEL} ${BACK} accuracy {acc}"
+PY
+
+ cleanup
+ SERVER_PID=
+ sleep 1
+ PORT=$((PORT+1))
+done
diff --git a/.buildkite/scripts/upload-wheels.sh b/.buildkite/scripts/upload-wheels.sh
index 945c5e48c0090..05accb9cf16d0 100644
--- a/.buildkite/scripts/upload-wheels.sh
+++ b/.buildkite/scripts/upload-wheels.sh
@@ -2,6 +2,28 @@
set -ex
+# ======== part 0: setup ========
+
+BUCKET="vllm-wheels"
+INDICES_OUTPUT_DIR="indices"
+DEFAULT_VARIANT_ALIAS="cu129" # align with vLLM_MAIN_CUDA_VERSION in vllm/envs.py
+PYTHON=${PYTHON_PROG:=python3} # try to read from env var, otherwise use python3
+SUBPATH=$BUILDKITE_COMMIT
+S3_COMMIT_PREFIX="s3://$BUCKET/$SUBPATH/"
+
+# detect if python3.10+ is available
+has_new_python=$($PYTHON -c "print(1 if __import__('sys').version_info >= (3,10) else 0)")
+if [[ "$has_new_python" -eq 0 ]]; then
+ # use new python from docker
+ docker pull python:3-slim
+ PYTHON="docker run --rm -v $(pwd):/app -w /app python:3-slim python3"
+fi
+
+echo "Using python interpreter: $PYTHON"
+echo "Python version: $($PYTHON --version)"
+
+# ========= part 1: collect, rename & upload the wheel ==========
+
# Assume wheels are in artifacts/dist/*.whl
wheel_files=(artifacts/dist/*.whl)
@@ -10,74 +32,69 @@ if [[ ${#wheel_files[@]} -ne 1 ]]; then
echo "Error: Expected exactly one wheel file in artifacts/dist/, but found ${#wheel_files[@]}"
exit 1
fi
-
-# Get the single wheel file
wheel="${wheel_files[0]}"
-# Detect architecture and rename 'linux' to appropriate manylinux version
-arch=$(uname -m)
-if [[ $arch == "x86_64" ]]; then
- manylinux_version="manylinux1"
-elif [[ $arch == "aarch64" ]]; then
- manylinux_version="manylinux2014"
-else
- echo "Warning: Unknown architecture $arch, using manylinux1 as default"
- manylinux_version="manylinux1"
-fi
+# current build image uses ubuntu 20.04, which corresponds to manylinux_2_31
+# refer to https://github.com/mayeut/pep600_compliance?tab=readme-ov-file#acceptable-distros-to-build-wheels
+manylinux_version="manylinux_2_31"
# Rename 'linux' to the appropriate manylinux version in the wheel filename
+if [[ "$wheel" != *"linux"* ]]; then
+ echo "Error: Wheel filename does not contain 'linux': $wheel"
+ exit 1
+fi
new_wheel="${wheel/linux/$manylinux_version}"
mv -- "$wheel" "$new_wheel"
wheel="$new_wheel"
+echo "Renamed wheel to: $wheel"
# Extract the version from the wheel
version=$(unzip -p "$wheel" '**/METADATA' | grep '^Version: ' | cut -d' ' -f2)
-echo "Version: $version"
+echo "Version in wheel: $version"
+pure_version="${version%%+*}"
+echo "Pure version (without variant): $pure_version"
-normal_wheel="$wheel" # Save the original wheel filename
+# copy wheel to its own bucket
+aws s3 cp "$wheel" "$S3_COMMIT_PREFIX"
-# If the version contains "dev", rename it to v1.0.0.dev for consistency
-if [[ $version == *dev* ]]; then
- suffix="${version##*.}"
- if [[ $suffix == cu* ]]; then
- new_version="1.0.0.dev+${suffix}"
- else
- new_version="1.0.0.dev"
- fi
- new_wheel="${wheel/$version/$new_version}"
- # use cp to keep both files in the artifacts directory
- cp -- "$wheel" "$new_wheel"
- wheel="$new_wheel"
- version="$new_version"
-fi
+# ========= part 2: generate and upload indices ==========
+# generate indices for all existing wheels in the commit directory
+# this script might be run multiple times if there are multiple variants being built
+# so we need to guarantee there is little chance for "TOCTOU" issues
+# i.e., one process is generating indices while another is uploading a new wheel
+# so we need to ensure no time-consuming operations happen below
-# Upload the wheel to S3
-python3 .buildkite/generate_index.py --wheel "$normal_wheel"
+# list all wheels in the commit directory
+echo "Existing wheels on S3:"
+aws s3 ls "$S3_COMMIT_PREFIX"
+obj_json="objects.json"
+aws s3api list-objects-v2 --bucket "$BUCKET" --prefix "$SUBPATH/" --delimiter / --output json > "$obj_json"
+mkdir -p "$INDICES_OUTPUT_DIR"
-# generate index for this commit
-aws s3 cp "$wheel" "s3://vllm-wheels/$BUILDKITE_COMMIT/"
-aws s3 cp "$normal_wheel" "s3://vllm-wheels/$BUILDKITE_COMMIT/"
-
-if [[ $normal_wheel == *"cu129"* ]]; then
- # only upload index.html for cu129 wheels (default wheels) as it
- # is available on both x86 and arm64
- aws s3 cp index.html "s3://vllm-wheels/$BUILDKITE_COMMIT/vllm/index.html"
- aws s3 cp "s3://vllm-wheels/nightly/index.html" "s3://vllm-wheels/$BUILDKITE_COMMIT/index.html"
+# call script to generate indicies for all existing wheels
+# this indices have relative paths that could work as long as it is next to the wheel directory in s3
+# i.e., the wheels are always in s3://vllm-wheels//
+# and indices can be placed in //, or /nightly/, or //
+if [[ ! -z "$DEFAULT_VARIANT_ALIAS" ]]; then
+ alias_arg="--alias-to-default $DEFAULT_VARIANT_ALIAS"
else
- echo "Skipping index files for non-cu129 wheels"
+ alias_arg=""
fi
-# generate index for nightly
-aws s3 cp "$wheel" "s3://vllm-wheels/nightly/"
-aws s3 cp "$normal_wheel" "s3://vllm-wheels/nightly/"
+$PYTHON .buildkite/scripts/generate-nightly-index.py --version "$SUBPATH" --current-objects "$obj_json" --output-dir "$INDICES_OUTPUT_DIR" $alias_arg
-if [[ $normal_wheel == *"cu129"* ]]; then
- # only upload index.html for cu129 wheels (default wheels) as it
- # is available on both x86 and arm64
- aws s3 cp index.html "s3://vllm-wheels/nightly/vllm/index.html"
-else
- echo "Skipping index files for non-cu129 wheels"
+# copy indices to // unconditionally
+echo "Uploading indices to $S3_COMMIT_PREFIX"
+aws s3 cp --recursive "$INDICES_OUTPUT_DIR/" "$S3_COMMIT_PREFIX"
+
+# copy to /nightly/ only if it is on the main branch and not a PR
+if [[ "$BUILDKITE_BRANCH" == "main" && "$BUILDKITE_PULL_REQUEST" == "false" ]]; then
+ echo "Uploading indices to overwrite /nightly/"
+ aws s3 cp --recursive "$INDICES_OUTPUT_DIR/" "s3://$BUCKET/nightly/"
fi
-aws s3 cp "$wheel" "s3://vllm-wheels/$version/"
-aws s3 cp index.html "s3://vllm-wheels/$version/vllm/index.html"
+# copy to // only if it does not have "dev" in the version
+if [[ "$version" != *"dev"* ]]; then
+ echo "Uploading indices to overwrite /$pure_version/"
+ aws s3 cp --recursive "$INDICES_OUTPUT_DIR/" "s3://$BUCKET/$pure_version/"
+fi
diff --git a/.buildkite/test-amd.yaml b/.buildkite/test-amd.yaml
index 56e7b1083b17e..687b6b08507c7 100644
--- a/.buildkite/test-amd.yaml
+++ b/.buildkite/test-amd.yaml
@@ -48,8 +48,8 @@ steps:
commands:
- bash standalone_tests/pytorch_nightly_dependency.sh
-- label: Async Engine, Inputs, Utils, Worker Test # 36min
- timeout_in_minutes: 50
+- label: Async Engine, Inputs, Utils, Worker Test # 10min
+ timeout_in_minutes: 15
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
# grade: Blocking
@@ -61,8 +61,8 @@ steps:
- pytest -v -s -m 'not cpu_test' multimodal
- pytest -v -s utils_
-- label: Async Engine, Inputs, Utils, Worker Test (CPU) # 4 mins
- timeout_in_minutes: 10
+- label: Async Engine, Inputs, Utils, Worker, Config Test (CPU) # 15min
+ timeout_in_minutes: 20
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
# grade: Blocking
@@ -72,14 +72,18 @@ steps:
- tests/test_outputs.py
- tests/multimodal
- tests/standalone_tests/lazy_imports.py
+ - tests/tokenizers_
- tests/transformers_utils
+ - tests/config
no_gpu: true
commands:
- python3 standalone_tests/lazy_imports.py
- pytest -v -s test_inputs.py
- pytest -v -s test_outputs.py
- pytest -v -s -m 'cpu_test' multimodal
+ - pytest -v -s tokenizers_
- pytest -v -s transformers_utils
+ - pytest -v -s config
- label: Python-only Installation Test # 10min
timeout_in_minutes: 20
@@ -187,7 +191,7 @@ steps:
- tests/distributed/test_utils
- tests/distributed/test_pynccl
- tests/distributed/test_events
- - tests/compile/test_basic_correctness
+ - tests/compile/fullgraph/test_basic_correctness.py
- examples/offline_inference/rlhf.py
- examples/offline_inference/rlhf_colocate.py
- tests/examples/offline_inference/data_parallel.py
@@ -215,7 +219,7 @@ steps:
- TP_SIZE=1 DP_SIZE=4 pytest -v -s v1/distributed/test_hybrid_lb_dp.py
- pytest -v -s v1/engine/test_engine_core_client.py::test_kv_cache_events_dp
- pytest -v -s distributed/test_utils.py
- - pytest -v -s compile/test_basic_correctness.py
+ - pytest -v -s compile/fullgraph/test_basic_correctness.py
- pytest -v -s distributed/test_pynccl.py
- pytest -v -s distributed/test_events.py
- pytest -v -s distributed/test_symm_mem_allreduce.py
@@ -226,6 +230,27 @@ steps:
- VLLM_ALLOW_INSECURE_SERIALIZATION=1 RAY_DEDUP_LOGS=0 python3 rlhf_colocate.py
- popd
+- label: Distributed Tests (8 GPUs) # 4min
+ timeout_in_minutes: 10
+ mirror_hardwares: [amdexperimental]
+ agent_pool: mi325_8
+ # grade: Blocking
+ gpu: h100
+ num_gpus: 8
+ working_dir: "/vllm-workspace/tests"
+ source_file_dependencies:
+ - examples/offline_inference/torchrun_dp_example.py
+ - vllm/config/parallel.py
+ - vllm/distributed/
+ - vllm/v1/engine/llm_engine.py
+ - vllm/v1/executor/uniproc_executor.py
+ - vllm/v1/worker/gpu_worker.py
+ commands:
+ # https://github.com/NVIDIA/nccl/issues/1838
+ #- export NCCL_CUMEM_HOST_ENABLE=0
+ # test with torchrun tp=2 and dp=4 with ep
+ - torchrun --nproc-per-node=8 ../examples/offline_inference/torchrun_dp_example.py --tp-size=2 --pp-size=1 --dp-size=4 --enable-ep
+
- label: EPLB Algorithm Test # 5min
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
@@ -238,11 +263,11 @@ steps:
commands:
- pytest -v -s distributed/test_eplb_algo.py
-- label: EPLB Execution Test # 5min
+- label: EPLB Execution Test # 10min
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_4
# grade: Blocking
- timeout_in_minutes: 15
+ timeout_in_minutes: 20
working_dir: "/vllm-workspace/tests"
num_gpus: 4
source_file_dependencies:
@@ -250,6 +275,7 @@ steps:
- tests/distributed/test_eplb_execute.py
commands:
- pytest -v -s distributed/test_eplb_execute.py
+ - pytest -v -s distributed/test_eplb_spec_decode.py
- label: Metrics, Tracing Test # 12min
timeout_in_minutes: 20
@@ -273,7 +299,7 @@ steps:
- label: Regression Test # 7min
timeout_in_minutes: 20
- mirror_hardwares: [amdexperimental, amdproduction]
+ mirror_hardwares: [amdexperimental, amdproduction, amdtentative]
agent_pool: mi325_1
grade: Blocking
source_file_dependencies:
@@ -284,23 +310,20 @@ steps:
- pytest -v -s test_regression.py
working_dir: "/vllm-workspace/tests" # optional
-- label: Engine Test # 25min
- timeout_in_minutes: 40
+- label: Engine Test # 9min
+ timeout_in_minutes: 15
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
- #grade: Blocking
+ # grade: Blocking
source_file_dependencies:
- vllm/
- tests/engine
- - tests/tokenization
- tests/test_sequence
- tests/test_config
- tests/test_logger
- tests/test_vllm_port
commands:
- pytest -v -s engine test_sequence.py test_config.py test_logger.py test_vllm_port.py
- # OOM in the CI unless we run this separately
- - pytest -v -s tokenization
- label: V1 Test e2e + engine # 30min
timeout_in_minutes: 45
@@ -318,7 +341,7 @@ steps:
- label: V1 Test entrypoints # 35min
timeout_in_minutes: 50
- mirror_hardwares: [amdexperimental]
+ mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
# grade: Blocking
source_file_dependencies:
@@ -337,6 +360,7 @@ steps:
- tests/v1
commands:
# split the test to avoid interference
+ - uv pip install --system -r /vllm-workspace/requirements/kv_connectors.txt
- pytest -v -s -m 'not cpu_test' v1/core
- pytest -v -s v1/executor
- pytest -v -s v1/kv_offload
@@ -348,10 +372,34 @@ steps:
- pytest -v -s -m 'not cpu_test' v1/metrics
- pytest -v -s v1/test_oracle.py
- pytest -v -s v1/test_request.py
+ - pytest -v -s v1/test_outputs.py
# Integration test for streaming correctness (requires special branch).
- pip install -U git+https://github.com/robertgshaw2-redhat/lm-evaluation-harness.git@streaming-api
- pytest -v -s entrypoints/openai/correctness/test_lmeval.py::test_lm_eval_accuracy_v1_engine
+# TODO: Add the "V1 Test attetion (MI300)" test group
+
+- label: V1 Test attention (H100) # 10min
+ mirror_hardwares: [amdexperimental]
+ agent_pool: mi325_1
+ # grade: Blocking
+ timeout_in_minutes: 30
+ gpu: h100
+ source_file_dependencies:
+ - vllm/v1/attention
+ - tests/v1/attention
+ commands:
+ - pytest -v -s v1/attention
+
+- label: V1 Test attention (B200) # 10min
+ timeout_in_minutes: 30
+ gpu: b200
+ source_file_dependencies:
+ - vllm/v1/attention
+ - tests/v1/attention
+ commands:
+ - VLLM_DISABLE_FLASHINFER_PREFILL=1 pytest -v -s v1/attention # TODO: FI prefill is bugged and causes incorrectness, fix this
+
- label: V1 Test others (CPU) # 5 mins
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
@@ -441,7 +489,7 @@ steps:
--ignore=lora/test_llm_with_multi_loras.py \
--ignore=lora/test_olmoe_tp.py \
--ignore=lora/test_deepseekv2_tp.py \
- --ignore=lora/test_gptoss.py \
+ --ignore=lora/test_gptoss_tp.py \
--ignore=lora/test_qwen3moe_tp.py
parallelism: 4
@@ -455,17 +503,12 @@ steps:
- vllm/
- tests/compile
commands:
- - pytest -v -s compile/test_pass_manager.py
- - pytest -v -s compile/test_fusion.py
- - pytest -v -s compile/test_fusion_attn.py
- - pytest -v -s compile/test_functionalization.py
- - pytest -v -s compile/test_silu_mul_quant_fusion.py
- # - pytest -v -s compile/test_sequence_parallelism.py
- # - pytest -v -s compile/test_async_tp.py
- - pytest -v -s compile/test_fusion_all_reduce.py
- - pytest -v -s compile/test_decorator.py
- - pytest -v -s compile/test_noop_elimination.py
- - pytest -v -s compile/test_aot_compile.py
+ # Run unit tests defined directly under compile/,
+ # not including subdirectories, which are usually heavier
+ # tests covered elsewhere.
+ # Use `find` to launch multiple instances of pytest so that
+ # they do not suffer from https://github.com/vllm-project/vllm/issues/28965
+ - "find compile/ -maxdepth 1 -name 'test_*.py' -exec pytest -s -v {} \\\\;"
- label: PyTorch Fullgraph Smoke Test # 15min
timeout_in_minutes: 30
@@ -477,11 +520,14 @@ steps:
- vllm/
- tests/compile
commands:
- - pytest -v -s compile/test_basic_correctness.py
- - pytest -v -s compile/piecewise/
+ # Run smoke tests under fullgraph directory, except test_full_graph.py
+ # as it is a heavy test that is covered in other steps.
+ # Use `find` to launch multiple instances of pytest so that
+ # they do not suffer from https://github.com/vllm-project/vllm/issues/28965
+ - "find compile/fullgraph/ -name 'test_*.py' -not -name 'test_full_graph.py' -exec pytest -s -v {} \\\\;"
-- label: PyTorch Fullgraph Test # 22min
- timeout_in_minutes: 35
+- label: PyTorch Fullgraph Test # 27min
+ timeout_in_minutes: 40
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
# grade: Blocking
@@ -490,8 +536,23 @@ steps:
- vllm/
- tests/compile
commands:
- - pytest -v -s compile/test_full_graph.py
- - pytest -v -s compile/test_fusions_e2e.py
+ - pytest -v -s compile/fullgraph/test_full_graph.py -k 'not test_fp8_kv_scale_compile'
+ # Limit to no custom ops to reduce running time
+ # Wrap with quotes to escape yaml and avoid starting -k string with a -
+ - "pytest -v -s compile/distributed/test_fusions_e2e.py -k 'TRITON and not +quant_fp8 and not Llama-4'"
+
+- label: Cudagraph test
+ timeout_in_minutes: 20
+ mirror_hardwares: [amdexperimental, amdproduction]
+ agent_pool: mi325_1
+ source_file_dependencies:
+ - tests/v1/cudagraph
+ - vllm/v1/cudagraph_dispatcher.py
+ - vllm/config/compilation.py
+ - vllm/compilation
+ commands:
+ - pytest -v -s v1/cudagraph/test_cudagraph_dispatch.py
+ - pytest -v -s v1/cudagraph/test_cudagraph_mode.py
- label: Kernels Core Operation Test # 48min
timeout_in_minutes: 75
@@ -543,6 +604,8 @@ steps:
- tests/kernels/moe
- vllm/model_executor/layers/fused_moe/
- vllm/distributed/device_communicators/
+ - vllm/envs.py
+ - vllm/config
commands:
- pytest -v -s kernels/moe --shard-id=$$BUILDKITE_PARALLEL_JOB --num-shards=$$BUILDKITE_PARALLEL_JOB_COUNT
parallelism: 2
@@ -561,10 +624,13 @@ steps:
- label: Model Executor Test # 23min
timeout_in_minutes: 35
+ torch_nightly: true
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
# grade: Blocking
source_file_dependencies:
+ - vllm/engine/arg_utils.py
+ - vllm/config/model.py
- vllm/model_executor
- tests/model_executor
- tests/entrypoints/openai/test_tensorizer_entrypoint.py
@@ -616,9 +682,9 @@ steps:
- uv pip install --system torchao==0.13.0
- VLLM_TEST_FORCE_LOAD_FORMAT=auto pytest -v -s quantization/ --ignore quantization/test_blackwell_moe.py
-- label: LM Eval Small Models # 53min
- timeout_in_minutes: 75
- mirror_hardwares: [amdexperimental]
+- label: LM Eval Small Models # 15min
+ timeout_in_minutes: 20
+ mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
# grade: Blocking
source_file_dependencies:
@@ -627,16 +693,17 @@ steps:
commands:
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-small.txt --tp-size=1
-- label: OpenAI API correctness # 22min
- timeout_in_minutes: 30
- mirror_hardwares: [amdexperimental]
+- label: OpenAI API correctness # 10min
+ timeout_in_minutes: 15
+ mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
# grade: Blocking
source_file_dependencies:
- csrc/
- vllm/entrypoints/openai/
- vllm/model_executor/models/whisper.py
- commands: # LMEval+Transcription WER check
+ commands: # LMEval
+ # Transcription WER check is skipped because encoder-decoder models are not supported on ROCm, see https://github.com/vllm-project/vllm/issues/27442
- pytest -s entrypoints/openai/correctness/
- label: OpenAI-Compatible Tool Use # 23 min
@@ -686,6 +753,7 @@ steps:
torch_nightly: true
source_file_dependencies:
- vllm/model_executor/models/
+ - vllm/transformers_utils/
- tests/models/test_initialization.py
commands:
# Only when vLLM model source is modified - test initialization of a large
@@ -858,10 +926,11 @@ steps:
- pytest -v -s models/multimodal -m core_model --ignore models/multimodal/generation/test_whisper.py --ignore models/multimodal/processing
- cd .. && VLLM_WORKER_MULTIPROC_METHOD=spawn pytest -v -s tests/models/multimodal/generation/test_whisper.py -m core_model # Otherwise, mp_method="spawn" doesn't work
-- label: Multi-Modal Accuracy Eval (Small Models) # 50min
- mirror_hardwares: [amdexperimental]
- agent_pool: mi325_1
+- label: Multi-Modal Accuracy Eval (Small Models) # 10min
timeout_in_minutes: 70
+ mirror_hardwares: [amdexperimental, amdproduction]
+ agent_pool: mi325_1
+ # grade: Blocking
working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
source_file_dependencies:
- vllm/multimodal/
@@ -932,16 +1001,17 @@ steps:
- label: Transformers Nightly Models Test
mirror_hardwares: [amdexperimental]
agent_pool: mi325_1
+ # grade: Blocking
working_dir: "/vllm-workspace/"
optional: true
commands:
- pip install --upgrade git+https://github.com/huggingface/transformers
- - pytest -v -s tests/models/test_initialization.py
+ - pytest -v -s tests/models/test_initialization.py -k 'not (Gemma3 or ModernBert or Qwen2_5_VL or Qwen2_5vl or Qwen2VL or TransformersMultiModalEmbeddingModel or TransformersMultiModalForSequenceClassification or Ultravox or Phi4Multimodal or LlavaNextVideo or MiniCPMO or Lfm2Moe or PaliGemma or RobertaForSequenceClassification or Ovis2_5 or Fuyu or DeepseekOCR or KimiVL)'
- pytest -v -s tests/models/test_transformers.py
- - pytest -v -s tests/models/multimodal/processing/
- - pytest -v -s tests/models/multimodal/test_mapping.py
+ # - pytest -v -s tests/models/multimodal/processing/
+ - pytest -v -s tests/models/multimodal/test_mapping.py -k 'not (Gemma3 or Qwen2VL or Qwen2_5_VL)'
- python3 examples/offline_inference/basic/chat.py
- - python3 examples/offline_inference/vision_language.py --model-type qwen2_5_vl
+ # - python3 examples/offline_inference/vision_language.py --model-type qwen2_5_vl
# Whisper needs spawn method to avoid deadlock
- VLLM_WORKER_MULTIPROC_METHOD=spawn python3 examples/offline_inference/audio_language.py --model-type whisper
@@ -959,11 +1029,16 @@ steps:
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
- vllm/v1/attention/backends/flashinfer.py
+ - vllm/v1/attention/backends/mla/cutlass_mla.py
+ - vllm/v1/attention/backends/mla/flashinfer_mla.py
+ - vllm/platforms/cuda.py
+ - vllm/attention/selector.py
commands:
- nvidia-smi
- python3 examples/offline_inference/basic/chat.py
# Attention
# num_heads2 broken by https://github.com/flashinfer-ai/flashinfer/issues/1353
+ - pytest -v -s tests/kernels/attention/test_attention_selector.py
- pytest -v -s tests/kernels/attention/test_flashinfer.py -k 'not num_heads2'
- pytest -v -s tests/kernels/attention/test_flashinfer_trtllm_attention.py
- pytest -v -s tests/kernels/attention/test_cutlass_mla_decode.py
@@ -981,7 +1056,7 @@ steps:
- pytest -v -s tests/kernels/moe/test_ocp_mx_moe.py
- pytest -v -s tests/kernels/moe/test_flashinfer.py
-- label: Blackwell Fusion Tests # 30 min
+- label: Blackwell Fusion and Compile Tests # 30 min
timeout_in_minutes: 40
working_dir: "/vllm-workspace/"
gpu: b200
@@ -999,13 +1074,40 @@ steps:
- pytest -v -s tests/compile/test_fusion_attn.py
- pytest -v -s tests/compile/test_silu_mul_quant_fusion.py
# this runner has 2 GPUs available even though num_gpus=2 is not set
- - pytest -v -s tests/compile/test_fusion_all_reduce.py
- - pytest -v -s tests/compile/test_fusions_e2e.py
+ - pytest -v -s tests/compile/distributed/test_fusion_all_reduce.py
+ # Limit to Inductor partition, no custom ops, and allreduce & attn fusion to reduce running time
+ # Wrap with quotes to escape yaml
+ - "pytest -v -s tests/compile/distributed/test_fusions_e2e.py::test_tp2_attn_quant_allreduce_rmsnorm -k 'True and not +quant_fp8 and not +rms_norm'"
+ # test_fp8_kv_scale_compile requires FlashAttention (not supported on default L4/L40)
+ - pytest -v -s tests/compile/distributed/test_full_graph.py::test_fp8_kv_scale_compile
-- label: Blackwell GPT-OSS Eval
- timeout_in_minutes: 60
+- label: Blackwell Fusion E2E Tests # 30 min
+ timeout_in_minutes: 40
working_dir: "/vllm-workspace/"
gpu: b200
+ optional: true
+ num_gpus: 2
+ source_file_dependencies:
+ - csrc/quantization/fp4/
+ - vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
+ - vllm/v1/attention/backends/flashinfer.py
+ - vllm/compilation/
+ # can affect pattern matching
+ - vllm/model_executor/layers/layernorm.py
+ - vllm/model_executor/layers/activation.py
+ - vllm/model_executor/layers/quantization/input_quant_fp8.py
+ - tests/compile/distributed/test_fusions_e2e.py
+ - tests/compile/fullgraph/test_full_graph.py
+ commands:
+ - nvidia-smi
+ # Run all e2e fusion tests
+ - pytest -v -s tests/compile/test_fusions_e2e.py
+
+- label: ROCm GPT-OSS Eval
+ timeout_in_minutes: 60
+ working_dir: "/vllm-workspace/"
+ agent_pool: mi325_1
+ mirror_hardwares: [amdexperimental, amdproduction]
optional: true # run on nightlies
source_file_dependencies:
- tests/evals/gpt_oss
@@ -1014,7 +1116,7 @@ steps:
- vllm/v1/attention/backends/flashinfer.py
commands:
- uv pip install --system 'gpt-oss[eval]==0.0.5'
- - pytest -s -v tests/evals/gpt_oss/test_gpqa_correctness.py --model openai/gpt-oss-20b --metric 0.58
+ - VLLM_ROCM_USE_AITER_MHA=0 VLLM_ROCM_USE_AITER=1 VLLM_USE_AITER_UNIFIED_ATTENTION=1 pytest -s -v tests/evals/gpt_oss/test_gpqa_correctness.py --model openai/gpt-oss-20b --metric 0.58
- label: Blackwell Quantized MoE Test
timeout_in_minutes: 60
@@ -1104,7 +1206,7 @@ steps:
- vllm/worker/worker_base.py
- vllm/v1/engine/
- vllm/v1/worker/
- - tests/compile/test_basic_correctness.py
+ - tests/compile/fullgraph/test_basic_correctness.py
- tests/compile/test_wrapper.py
- tests/distributed/
- tests/entrypoints/llm/test_collective_rpc.py
@@ -1117,7 +1219,7 @@ steps:
- TP_SIZE=1 DP_SIZE=2 pytest -v -s v1/distributed/test_external_lb_dp.py
- DP_SIZE=2 pytest -v -s v1/entrypoints/openai/test_multi_api_servers.py
- pytest -v -s entrypoints/llm/test_collective_rpc.py
- - pytest -v -s ./compile/test_basic_correctness.py
+ - pytest -v -s ./compile/fullgraph/test_basic_correctness.py
- pytest -v -s ./compile/test_wrapper.py
- VLLM_TEST_SAME_HOST=1 torchrun --nproc-per-node=4 distributed/test_same_node.py | grep 'Same node test passed'
- VLLM_TEST_SAME_HOST=1 VLLM_TEST_WITH_DEFAULT_DEVICE_SET=1 torchrun --nproc-per-node=4 distributed/test_same_node.py | grep 'Same node test passed'
@@ -1218,6 +1320,11 @@ steps:
- pytest -v -s -x lora/test_llm_with_multi_loras.py
- pytest -v -s -x lora/test_olmoe_tp.py
+ # Disabled for now because MXFP4 backend on non-cuda platform
+ # doesn't support LoRA yet
+ #- pytest -v -s -x lora/test_gptoss_tp.py
+
+
- label: Weight Loading Multiple GPU Test # 33min
timeout_in_minutes: 45
mirror_hardwares: [amdexperimental]
@@ -1230,7 +1337,7 @@ steps:
- vllm/
- tests/weight_loading
commands:
- - bash weight_loading/run_model_weight_loading_test.sh -c weight_loading/models.txt
+ - bash weight_loading/run_model_weight_loading_test.sh -c weight_loading/models-amd.txt
- label: Weight Loading Multiple GPU Test - Large Models # optional
mirror_hardwares: [amdexperimental]
@@ -1238,17 +1345,17 @@ steps:
# grade: Blocking
working_dir: "/vllm-workspace/tests"
num_gpus: 2
- gpu: a100
optional: true
source_file_dependencies:
- vllm/
- tests/weight_loading
commands:
- - bash weight_loading/run_model_weight_loading_test.sh -c weight_loading/models-large.txt
+ - bash weight_loading/run_model_weight_loading_test.sh -c weight_loading/models-large-amd.txt
- label: NixlConnector PD accuracy tests (Distributed) # 30min
mirror_hardwares: [amdexperimental]
agent_pool: mi325_4
+ # grade: Blocking
timeout_in_minutes: 30
working_dir: "/vllm-workspace/tests"
num_gpus: 4
@@ -1263,6 +1370,9 @@ steps:
##### A100 test #####
- label: Distributed Tests (A100) # optional
+ mirror_hardwares: [amdexperimental]
+ agent_pool: mi325_4
+ # grade: Blocking
gpu: a100
optional: true
num_gpus: 4
@@ -1277,6 +1387,9 @@ steps:
- pytest -v -s -x lora/test_mixtral.py
- label: LM Eval Large Models # optional
+ mirror_hardwares: [amdexperimental, amdproduction]
+ agent_pool: mi325_4
+ # grade: Blocking
gpu: a100
optional: true
num_gpus: 4
@@ -1288,19 +1401,41 @@ steps:
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
- pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-large.txt --tp-size=4
+##### H100 test #####
+- label: LM Eval Large Models (H100) # optional
+ mirror_hardwares: [amdexperimental, amdproduction]
+ agent_pool: mi325_4
+ # grade: Blocking
+ gpu: h100
+ optional: true
+ num_gpus: 4
+ working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
+ source_file_dependencies:
+ - csrc/
+ - vllm/model_executor/layers/quantization
+ commands:
+ - export VLLM_USE_DEEP_GEMM=0 # We found Triton is faster than DeepGEMM for H100
+ - pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-large-hopper.txt --tp-size=4
+
##### H200 test #####
- label: Distributed Tests (H200) # optional
+ mirror_hardwares: [amdexperimental]
+ agent_pool: mi325_2
+ # grade: Blocking
gpu: h200
optional: true
working_dir: "/vllm-workspace/"
num_gpus: 2
commands:
- - pytest -v -s tests/compile/test_async_tp.py
- - pytest -v -s tests/compile/test_sequence_parallelism.py
- - pytest -v -s tests/compile/test_fusion_all_reduce.py
- - pytest -v -s tests/compile/test_fusions_e2e.py::test_tp2_attn_quant_allreduce_rmsnorm
+ - pytest -v -s tests/compile/distributed/test_async_tp.py
+ - pytest -v -s tests/compile/distributed/test_sequence_parallelism.py
+ - pytest -v -s tests/compile/distributed/test_fusion_all_reduce.py
+ #- pytest -v -s tests/compile/distributed/test_fusions_e2e.py::test_tp2_attn_quant_allreduce_rmsnorm
+ - "pytest -v -s tests/compile/distributed/test_fusions_e2e.py -k 'not Llama-4'"
+ - pytest -v -s tests/compile/distributed/test_sequence_parallel.py
- pytest -v -s tests/distributed/test_context_parallel.py
- CUDA_VISIBLE_DEVICES=1,2 VLLM_ALL2ALL_BACKEND=deepep_high_throughput VLLM_USE_DEEP_GEMM=1 VLLM_LOGGING_LEVEL=DEBUG python3 examples/offline_inference/data_parallel.py --model Qwen/Qwen1.5-MoE-A2.7B --tp-size=1 --dp-size=2 --max-model-len 2048
+ - pytest -v -s tests/v1/distributed/test_dbo.py
##### B200 test #####
- label: Distributed Tests (B200) # optional
@@ -1311,6 +1446,7 @@ steps:
commands:
- pytest -v -s tests/distributed/test_context_parallel.py
- pytest -v -s tests/distributed/test_nccl_symm_mem_allreduce.py
+ - pytest -v -s tests/v1/distributed/test_dbo.py
##### RL Integration Tests #####
- label: Prime-RL Integration Test # 15min
@@ -1326,3 +1462,27 @@ steps:
- .buildkite/scripts/run-prime-rl-test.sh
commands:
- bash .buildkite/scripts/run-prime-rl-test.sh
+
+- label: DeepSeek V2-Lite Accuracy
+ mirror_hardwares: [amdexperimental]
+ agent_pool: mi325_4
+ # grade: Blocking
+ timeout_in_minutes: 60
+ gpu: h100
+ optional: true
+ num_gpus: 4
+ working_dir: "/vllm-workspace"
+ commands:
+ - bash .buildkite/scripts/scheduled_integration_test/deepseek_v2_lite_ep_eplb.sh 0.25 200 8010
+
+- label: Qwen3-30B-A3B-FP8-block Accuracy
+ mirror_hardwares: [amdexperimental]
+ agent_pool: mi325_4
+ # grade: Blocking
+ timeout_in_minutes: 60
+ gpu: h100
+ optional: true
+ num_gpus: 4
+ working_dir: "/vllm-workspace"
+ commands:
+ - bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh 0.8 200 8020
diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml
index d556073cd1049..9f2107fb1e5ab 100644
--- a/.buildkite/test-pipeline.yaml
+++ b/.buildkite/test-pipeline.yaml
@@ -25,6 +25,7 @@
# and $$BUILDKITE_PARALLEL_JOB_COUNT environment variables.
# working_dir(str): specify the place where the command should execute, default to /vllm-workspace/tests
# source_file_dependencies(list): the list of prefixes to opt-in the test for, if empty, the test will always run.
+# autorun_on_main (bool): default to false, if true, the test will run automatically when commit is pushed to main branch.
# When adding a test
# - If the test belongs to an existing group, add it there
@@ -56,22 +57,26 @@ steps:
- pytest -v -s -m 'not cpu_test' multimodal
- pytest -v -s utils_
-- label: Async Engine, Inputs, Utils, Worker Test (CPU) # 4 mins
- timeout_in_minutes: 10
+- label: Async Engine, Inputs, Utils, Worker, Config Test (CPU) # 15min
+ timeout_in_minutes: 20
source_file_dependencies:
- vllm/
- tests/test_inputs.py
- tests/test_outputs.py
- tests/multimodal
- tests/standalone_tests/lazy_imports.py
+ - tests/tokenizers_
- tests/transformers_utils
+ - tests/config
no_gpu: true
commands:
- python3 standalone_tests/lazy_imports.py
- pytest -v -s test_inputs.py
- pytest -v -s test_outputs.py
- pytest -v -s -m 'cpu_test' multimodal
+ - pytest -v -s tokenizers_
- pytest -v -s transformers_utils
+ - pytest -v -s config
- label: Python-only Installation Test # 10min
timeout_in_minutes: 20
@@ -164,7 +169,7 @@ steps:
- tests/distributed/test_utils
- tests/distributed/test_pynccl
- tests/distributed/test_events
- - tests/compile/test_basic_correctness
+ - tests/compile/fullgraph/test_basic_correctness.py
- examples/offline_inference/rlhf.py
- examples/offline_inference/rlhf_colocate.py
- tests/examples/offline_inference/data_parallel.py
@@ -189,12 +194,13 @@ steps:
# test with internal dp
- python3 ../examples/offline_inference/data_parallel.py --enforce-eager
- TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/distributed/test_async_llm_dp.py
+ - TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/distributed/test_eagle_dp.py
- TP_SIZE=2 DP_SIZE=2 pytest -v -s v1/distributed/test_external_lb_dp.py
- TP_SIZE=1 DP_SIZE=4 pytest -v -s v1/distributed/test_internal_lb_dp.py
- TP_SIZE=1 DP_SIZE=4 pytest -v -s v1/distributed/test_hybrid_lb_dp.py
- pytest -v -s v1/engine/test_engine_core_client.py::test_kv_cache_events_dp
- pytest -v -s distributed/test_utils.py
- - pytest -v -s compile/test_basic_correctness.py
+ - pytest -v -s compile/fullgraph/test_basic_correctness.py
- pytest -v -s distributed/test_pynccl.py
- pytest -v -s distributed/test_events.py
- pytest -v -s distributed/test_symm_mem_allreduce.py
@@ -232,8 +238,8 @@ steps:
commands:
- pytest -v -s distributed/test_eplb_algo.py
-- label: EPLB Execution Test # 5min
- timeout_in_minutes: 15
+- label: EPLB Execution Test # 10min
+ timeout_in_minutes: 20
working_dir: "/vllm-workspace/tests"
num_gpus: 4
source_file_dependencies:
@@ -241,6 +247,7 @@ steps:
- tests/distributed/test_eplb_execute.py
commands:
- pytest -v -s distributed/test_eplb_execute.py
+ - pytest -v -s distributed/test_eplb_spec_decode.py
- label: Metrics, Tracing Test # 12min
timeout_in_minutes: 20
@@ -271,21 +278,18 @@ steps:
- pytest -v -s test_regression.py
working_dir: "/vllm-workspace/tests" # optional
-- label: Engine Test # 25min
- timeout_in_minutes: 40
+- label: Engine Test # 9min
+ timeout_in_minutes: 15
mirror_hardwares: [amdexperimental]
source_file_dependencies:
- vllm/
- tests/engine
- - tests/tokenization
- tests/test_sequence
- tests/test_config
- tests/test_logger
- tests/test_vllm_port
commands:
- pytest -v -s engine test_sequence.py test_config.py test_logger.py test_vllm_port.py
- # OOM in the CI unless we run this separately
- - pytest -v -s tokenization
- label: V1 Test e2e + engine # 30min
timeout_in_minutes: 45
@@ -315,6 +319,7 @@ steps:
- vllm/
- tests/v1
commands:
+ - uv pip install --system -r /vllm-workspace/requirements/kv_connectors.txt
# split the test to avoid interference
- pytest -v -s -m 'not cpu_test' v1/core
- pytest -v -s v1/executor
@@ -327,6 +332,7 @@ steps:
- pytest -v -s -m 'not cpu_test' v1/metrics
- pytest -v -s v1/test_oracle.py
- pytest -v -s v1/test_request.py
+ - pytest -v -s v1/test_outputs.py
# Integration test for streaming correctness (requires special branch).
- pip install -U git+https://github.com/robertgshaw2-redhat/lm-evaluation-harness.git@streaming-api
- pytest -v -s entrypoints/openai/correctness/test_lmeval.py::test_lm_eval_accuracy_v1_engine
@@ -340,6 +346,27 @@ steps:
commands:
- pytest -v -s v1/attention
+- label: Batch Invariance Tests (H100) # 10min
+ timeout_in_minutes: 25
+ gpu: h100
+ source_file_dependencies:
+ - vllm/
+ - tests/v1/determinism/
+ commands:
+ - export VLLM_WORKER_MULTIPROC_METHOD=spawn
+ - pip install pytest-timeout pytest-forked
+ - pytest -v -s v1/determinism/test_batch_invariance.py
+ - pytest -v -s v1/determinism/test_rms_norm_batch_invariant.py
+
+- label: V1 Test attention (B200) # 10min
+ timeout_in_minutes: 30
+ gpu: b200
+ source_file_dependencies:
+ - vllm/v1/attention
+ - tests/v1/attention
+ commands:
+ - VLLM_DISABLE_FLASHINFER_PREFILL=1 pytest -v -s v1/attention # TODO: FI prefill is bugged and causes incorrectness, fix this
+
- label: V1 Test others (CPU) # 5 mins
source_file_dependencies:
- vllm/
@@ -417,7 +444,7 @@ steps:
--ignore=lora/test_llm_with_multi_loras.py \
--ignore=lora/test_olmoe_tp.py \
--ignore=lora/test_deepseekv2_tp.py \
- --ignore=lora/test_gptoss.py \
+ --ignore=lora/test_gptoss_tp.py \
--ignore=lora/test_qwen3moe_tp.py
parallelism: 4
@@ -430,15 +457,12 @@ steps:
- vllm/
- tests/compile
commands:
- - pytest -v -s compile/test_pass_manager.py
- - pytest -v -s compile/test_fusion.py
- - pytest -v -s compile/test_fusion_attn.py
- - pytest -v -s compile/test_functionalization.py
- - pytest -v -s compile/test_silu_mul_quant_fusion.py
- - pytest -v -s compile/test_fusion_all_reduce.py
- - pytest -v -s compile/test_decorator.py
- - pytest -v -s compile/test_noop_elimination.py
- - pytest -v -s compile/test_aot_compile.py
+ # Run unit tests defined directly under compile/,
+ # not including subdirectories, which are usually heavier
+ # tests covered elsewhere.
+ # Use `find` to launch multiple instances of pytest so that
+ # they do not suffer from https://github.com/vllm-project/vllm/issues/28965
+ - "find compile/ -maxdepth 1 -name 'test_*.py' -exec pytest -s -v {} \\\\;"
- label: PyTorch Fullgraph Smoke Test # 15min
timeout_in_minutes: 30
@@ -448,19 +472,25 @@ steps:
- vllm/
- tests/compile
commands:
- - pytest -v -s compile/test_basic_correctness.py
- - pytest -v -s compile/piecewise/
+ # Run smoke tests under fullgraph directory, except test_full_graph.py
+ # as it is a heavy test that is covered in other steps.
+ # Use `find` to launch multiple instances of pytest so that
+ # they do not suffer from https://github.com/vllm-project/vllm/issues/28965
+ - "find compile/fullgraph/ -name 'test_*.py' -not -name 'test_full_graph.py' -exec pytest -s -v {} \\\\;"
-- label: PyTorch Fullgraph Test # 22min
- timeout_in_minutes: 35
+- label: PyTorch Fullgraph Test # 27min
+ timeout_in_minutes: 40
mirror_hardwares: [amdexperimental]
torch_nightly: true
source_file_dependencies:
- vllm/
- tests/compile
commands:
- - pytest -v -s compile/test_full_graph.py
- - pytest -v -s compile/test_fusions_e2e.py
+ # fp8 kv scales not supported on sm89, tested on Blackwell instead
+ - pytest -v -s compile/fullgraph/test_full_graph.py -k 'not test_fp8_kv_scale_compile'
+ # Limit to no custom ops to reduce running time
+ # Wrap with quotes to escape yaml and avoid starting -k string with a -
+ - "pytest -v -s compile/distributed/test_fusions_e2e.py -k 'TRITON and not +quant_fp8 and not Llama-4'"
- label: Cudagraph test
timeout_in_minutes: 20
@@ -532,10 +562,32 @@ steps:
commands:
- pytest -v -s kernels/mamba
+- label: Kernels DeepGEMM Test (H100)
+ timeout_in_minutes: 45
+ gpu: h100
+ num_gpus: 1
+ source_file_dependencies:
+ - tools/install_deepgemm.sh
+ - vllm/utils/deep_gemm.py
+ - vllm/model_executor/layers/fused_moe
+ - vllm/model_executor/layers/quantization
+ - tests/kernels/quantization/test_block_fp8.py
+ - tests/kernels/moe/test_deepgemm.py
+ - tests/kernels/moe/test_batched_deepgemm.py
+ - tests/kernels/attention/test_deepgemm_attention.py
+ commands:
+ - pytest -v -s kernels/quantization/test_block_fp8.py -k deep_gemm
+ - pytest -v -s kernels/moe/test_deepgemm.py
+ - pytest -v -s kernels/moe/test_batched_deepgemm.py
+ - pytest -v -s kernels/attention/test_deepgemm_attention.py
+
- label: Model Executor Test # 23min
timeout_in_minutes: 35
+ torch_nightly: true
mirror_hardwares: [amdexperimental]
source_file_dependencies:
+ - vllm/engine/arg_utils.py
+ - vllm/config/model.py
- vllm/model_executor
- tests/model_executor
- tests/entrypoints/openai/test_tensorizer_entrypoint.py
@@ -579,6 +631,7 @@ steps:
# we can only upgrade after this is resolved
# TODO(jerryzh168): resolve the above comment
- uv pip install --system torchao==0.13.0 --index-url https://download.pytorch.org/whl/cu129
+ - uv pip install --system conch-triton-kernels
- VLLM_TEST_FORCE_LOAD_FORMAT=auto pytest -v -s quantization/ --ignore quantization/test_blackwell_moe.py
- label: LM Eval Small Models # 53min
@@ -587,6 +640,7 @@ steps:
source_file_dependencies:
- csrc/
- vllm/model_executor/layers/quantization
+ autorun_on_main: true
commands:
- pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=configs/models-small.txt --tp-size=1
@@ -638,6 +692,7 @@ steps:
torch_nightly: true
source_file_dependencies:
- vllm/model_executor/models/
+ - vllm/transformers_utils/
- tests/models/test_initialization.py
commands:
# Only when vLLM model source is modified - test initialization of a large
@@ -764,14 +819,24 @@ steps:
commands:
- pytest -v -s models/language/pooling_mteb_test
-- label: Multi-Modal Processor Test # 44min
+- label: Multi-Modal Processor Test (CPU)
+ timeout_in_minutes: 60
+ source_file_dependencies:
+ - vllm/
+ - tests/models/multimodal
+ no_gpu: true
+ commands:
+ - pip install git+https://github.com/TIGER-AI-Lab/Mantis.git
+ - pytest -v -s models/multimodal/processing --ignore models/multimodal/processing/test_tensor_schema.py
+
+- label: Multi-Modal Processor Test
timeout_in_minutes: 60
source_file_dependencies:
- vllm/
- tests/models/multimodal
commands:
- pip install git+https://github.com/TIGER-AI-Lab/Mantis.git
- - pytest -v -s models/multimodal/processing
+ - pytest -v -s models/multimodal/processing/test_tensor_schema.py
- label: Multi-Modal Models Test (Standard) # 60min
timeout_in_minutes: 80
@@ -848,6 +913,7 @@ steps:
- label: Transformers Nightly Models Test
working_dir: "/vllm-workspace/"
optional: true
+ soft_fail: true
commands:
- pip install --upgrade git+https://github.com/huggingface/transformers
- pytest -v -s tests/models/test_initialization.py
@@ -873,11 +939,16 @@ steps:
- vllm/model_executor/layers/fused_moe/flashinfer_cutlass_prepare_finalize.py
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
- vllm/v1/attention/backends/flashinfer.py
+ - vllm/v1/attention/backends/mla/cutlass_mla.py
+ - vllm/v1/attention/backends/mla/flashinfer_mla.py
+ - vllm/platforms/cuda.py
+ - vllm/attention/selector.py
commands:
- nvidia-smi
- python3 examples/offline_inference/basic/chat.py
# Attention
# num_heads2 broken by https://github.com/flashinfer-ai/flashinfer/issues/1353
+ - pytest -v -s tests/kernels/attention/test_attention_selector.py
- pytest -v -s tests/kernels/attention/test_flashinfer.py -k 'not num_heads2'
- pytest -v -s tests/kernels/attention/test_flashinfer_trtllm_attention.py
- pytest -v -s tests/kernels/attention/test_cutlass_mla_decode.py
@@ -894,8 +965,9 @@ steps:
- pytest -v -s tests/kernels/moe/test_nvfp4_moe.py
- pytest -v -s tests/kernels/moe/test_ocp_mx_moe.py
- pytest -v -s tests/kernels/moe/test_flashinfer.py
+ - pytest -v -s tests/kernels/moe/test_cutedsl_moe.py
-- label: Blackwell Fusion Tests # 30 min
+- label: Blackwell Fusion and Compile Tests # 30 min
timeout_in_minutes: 40
working_dir: "/vllm-workspace/"
gpu: b200
@@ -903,18 +975,50 @@ steps:
- csrc/quantization/fp4/
- vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
- vllm/v1/attention/backends/flashinfer.py
+ - vllm/v1/worker/
+ - vllm/v1/cudagraph_dispatcher.py
+ - vllm/compilation/
+ # can affect pattern matching
+ - vllm/model_executor/layers/layernorm.py
+ - vllm/model_executor/layers/activation.py
+ - vllm/model_executor/layers/quantization/input_quant_fp8.py
+ - tests/compile/test_fusion_attn.py
+ - tests/compile/test_silu_mul_quant_fusion.py
+ - tests/compile/distributed/test_fusion_all_reduce.py
+ - tests/compile/distributed/test_fusions_e2e.py
+ - tests/compile/fullgraph/test_full_graph.py
+ commands:
+ - nvidia-smi
+ - pytest -v -s tests/compile/test_fusion_attn.py
+ - pytest -v -s tests/compile/test_silu_mul_quant_fusion.py
+ # this runner has 2 GPUs available even though num_gpus=2 is not set
+ - pytest -v -s tests/compile/distributed/test_fusion_all_reduce.py
+ # Limit to Inductor partition, no custom ops, and allreduce & attn fusion to reduce running time
+ # Wrap with quotes to escape yaml
+ - "pytest -v -s tests/compile/distributed/test_fusions_e2e.py::test_tp2_attn_quant_allreduce_rmsnorm -k 'True and not +quant_fp8 and not +rms_norm'"
+ # test_fp8_kv_scale_compile requires FlashAttention (not supported on default L4/L40)
+ - pytest -v -s tests/compile/fullgraph/test_full_graph.py::test_fp8_kv_scale_compile
+
+- label: Blackwell Fusion E2E Tests # 30 min
+ timeout_in_minutes: 40
+ working_dir: "/vllm-workspace/"
+ gpu: b200
+ optional: true
+ num_gpus: 2
+ source_file_dependencies:
+ - csrc/quantization/fp4/
+ - vllm/model_executor/layers/quantization/utils/flashinfer_utils.py
+ - vllm/v1/attention/backends/flashinfer.py
- vllm/compilation/
# can affect pattern matching
- vllm/model_executor/layers/layernorm.py
- vllm/model_executor/layers/activation.py
- vllm/model_executor/layers/quantization/input_quant_fp8.py
+ - tests/compile/distributed/test_fusions_e2e.py
commands:
- nvidia-smi
- - pytest -v -s tests/compile/test_fusion_attn.py
- - pytest -v -s tests/compile/test_silu_mul_quant_fusion.py
- # this runner has 2 GPUs available even though num_gpus=2 is not set
- - pytest -v -s tests/compile/test_fusion_all_reduce.py
- - pytest -v -s tests/compile/test_fusions_e2e.py
+ # Run all e2e fusion tests
+ - pytest -v -s tests/compile/distributed/test_fusions_e2e.py
- label: Blackwell GPT-OSS Eval
timeout_in_minutes: 60
@@ -1012,7 +1116,7 @@ steps:
- vllm/worker/worker_base.py
- vllm/v1/engine/
- vllm/v1/worker/
- - tests/compile/test_basic_correctness.py
+ - tests/compile/fullgraph/test_basic_correctness.py
- tests/compile/test_wrapper.py
- tests/distributed/
- tests/entrypoints/llm/test_collective_rpc.py
@@ -1024,10 +1128,11 @@ steps:
# https://github.com/NVIDIA/nccl/issues/1838
- export NCCL_CUMEM_HOST_ENABLE=0
- TP_SIZE=1 DP_SIZE=2 pytest -v -s v1/distributed/test_async_llm_dp.py
+ - TP_SIZE=1 DP_SIZE=2 pytest -v -s v1/distributed/test_eagle_dp.py
- TP_SIZE=1 DP_SIZE=2 pytest -v -s v1/distributed/test_external_lb_dp.py
- DP_SIZE=2 pytest -v -s v1/entrypoints/openai/test_multi_api_servers.py
- pytest -v -s entrypoints/llm/test_collective_rpc.py
- - pytest -v -s ./compile/test_basic_correctness.py
+ - pytest -v -s ./compile/fullgraph/test_basic_correctness.py
- pytest -v -s ./compile/test_wrapper.py
- VLLM_TEST_SAME_HOST=1 torchrun --nproc-per-node=4 distributed/test_same_node.py | grep 'Same node test passed'
- VLLM_TEST_SAME_HOST=1 VLLM_TEST_WITH_DEFAULT_DEVICE_SET=1 torchrun --nproc-per-node=4 distributed/test_same_node.py | grep 'Same node test passed'
@@ -1119,6 +1224,7 @@ steps:
- pytest -v -s -x lora/test_llama_tp.py
- pytest -v -s -x lora/test_llm_with_multi_loras.py
- pytest -v -s -x lora/test_olmoe_tp.py
+ - pytest -v -s -x lora/test_gptoss_tp.py
- label: Weight Loading Multiple GPU Test # 33min
@@ -1186,6 +1292,19 @@ steps:
- export VLLM_WORKER_MULTIPROC_METHOD=spawn
- pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-large.txt --tp-size=4
+##### H100 test #####
+- label: LM Eval Large Models (H100) # optional
+ gpu: h100
+ optional: true
+ num_gpus: 4
+ working_dir: "/vllm-workspace/.buildkite/lm-eval-harness"
+ source_file_dependencies:
+ - csrc/
+ - vllm/model_executor/layers/quantization
+ commands:
+ - export VLLM_USE_DEEP_GEMM=0 # We found Triton is faster than DeepGEMM for H100
+ - pytest -s -v test_lm_eval_correctness.py --config-list-file=configs/models-large-hopper.txt --tp-size=4
+
##### H200 test #####
- label: Distributed Tests (H200) # optional
gpu: h200
@@ -1193,12 +1312,14 @@ steps:
working_dir: "/vllm-workspace/"
num_gpus: 2
commands:
- - pytest -v -s tests/compile/test_async_tp.py
- - pytest -v -s tests/compile/test_sequence_parallelism.py
- - pytest -v -s tests/compile/test_fusion_all_reduce.py
- - pytest -v -s tests/compile/test_fusions_e2e.py::test_tp2_attn_quant_allreduce_rmsnorm
+ - VLLM_TEST_CLEAN_GPU_MEMORY=1 pytest -v -s tests/compile/distributed/test_async_tp.py
+ - pytest -v -s tests/compile/distributed/test_sequence_parallelism.py
+ - pytest -v -s tests/compile/distributed/test_fusion_all_reduce.py
+ - "VLLM_TEST_CLEAN_GPU_MEMORY=1 pytest -v -s tests/compile/distributed/test_fusions_e2e.py -k 'not Llama-4'"
+ - VLLM_TEST_CLEAN_GPU_MEMORY=1 pytest -v -s tests/distributed/test_sequence_parallel.py
- pytest -v -s tests/distributed/test_context_parallel.py
- CUDA_VISIBLE_DEVICES=1,2 VLLM_ALL2ALL_BACKEND=deepep_high_throughput VLLM_USE_DEEP_GEMM=1 VLLM_LOGGING_LEVEL=DEBUG python3 examples/offline_inference/data_parallel.py --model Qwen/Qwen1.5-MoE-A2.7B --tp-size=1 --dp-size=2 --max-model-len 2048
+ - pytest -v -s tests/v1/distributed/test_dbo.py
##### B200 test #####
- label: Distributed Tests (B200) # optional
@@ -1209,6 +1330,7 @@ steps:
commands:
- pytest -v -s tests/distributed/test_context_parallel.py
- pytest -v -s tests/distributed/test_nccl_symm_mem_allreduce.py
+ - pytest -v -s tests/v1/distributed/test_dbo.py
##### RL Integration Tests #####
- label: Prime-RL Integration Test # 15min
@@ -1221,3 +1343,30 @@ steps:
- .buildkite/scripts/run-prime-rl-test.sh
commands:
- bash .buildkite/scripts/run-prime-rl-test.sh
+
+- label: DeepSeek V2-Lite Accuracy
+ timeout_in_minutes: 60
+ gpu: h100
+ optional: true
+ num_gpus: 4
+ working_dir: "/vllm-workspace"
+ commands:
+ - bash .buildkite/scripts/scheduled_integration_test/deepseek_v2_lite_ep_eplb.sh 0.25 200 8010
+
+- label: Qwen3-30B-A3B-FP8-block Accuracy (H100)
+ timeout_in_minutes: 60
+ gpu: h100
+ optional: true
+ num_gpus: 4
+ working_dir: "/vllm-workspace"
+ commands:
+ - bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh 0.8 200 8020
+
+- label: Qwen3-30B-A3B-FP8-block Accuracy (B200)
+ timeout_in_minutes: 60
+ gpu: b200
+ optional: true
+ num_gpus: 2
+ working_dir: "/vllm-workspace"
+ commands:
+ - bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh 0.8 200 8020 2 1
\ No newline at end of file
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index ba08a43352154..ecb10d1a450f3 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -3,13 +3,14 @@
# This lists cover the "core" components of vLLM that require careful review
/vllm/attention @LucasWilkinson
-/vllm/attention/backends/abstract.py @WoosukKwon @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill
-/vllm/executor/executor_base.py @zhuohan123 @youkaichao @alexm-redhat @comaniac @njhill @22quinn
+/vllm/attention/backends/abstract.py @WoosukKwon @zhuohan123 @youkaichao @alexm-redhat @njhill
+/vllm/executor/executor_base.py @zhuohan123 @youkaichao @alexm-redhat @njhill @22quinn
/vllm/model_executor/layers/fused_moe @mgoin @pavanimajety
/vllm/model_executor/layers/quantization @mgoin @robertgshaw2-redhat @tlrmchlsmth @yewentao256 @pavanimajety
/vllm/model_executor/layers/mamba @tdoublep
/vllm/model_executor/model_loader @22quinn
-/vllm/multimodal @DarkLight1337 @ywang96 @NickLucche
+/vllm/model_executor/layers/batch_invariant.py @yewentao256
+/vllm/multimodal @DarkLight1337 @ywang96 @NickLucche @tjtanaa
/vllm/vllm_flash_attn @LucasWilkinson
/vllm/lora @jeejeelee
/vllm/reasoning @aarnphm @chaunceyjiang
@@ -20,27 +21,30 @@ CMakeLists.txt @tlrmchlsmth @LucasWilkinson
# Any change to the VllmConfig changes can have a large user-facing impact,
# so spam a lot of people
-/vllm/config @simon-mo @WoosukKwon @youkaichao @robertgshaw2-redhat @mgoin @tlrmchlsmth @houseroad @hmellor @yewentao256 @ProExpertProg
-/vllm/config/cache.py @simon-mo @WoosukKwon @youkaichao @robertgshaw2-redhat @mgoin @tlrmchlsmth @houseroad @hmellor @yewentao256 @ProExpertProg @heheda12345
+/vllm/config @WoosukKwon @youkaichao @robertgshaw2-redhat @mgoin @tlrmchlsmth @houseroad @hmellor @yewentao256 @ProExpertProg
+/vllm/config/cache.py @WoosukKwon @youkaichao @robertgshaw2-redhat @mgoin @tlrmchlsmth @houseroad @hmellor @yewentao256 @ProExpertProg @heheda12345
# vLLM V1
/vllm/v1/attention @LucasWilkinson
/vllm/v1/attention/backends/mla @pavanimajety
/vllm/v1/attention/backends/flashinfer.py @mgoin @pavanimajety
/vllm/v1/attention/backends/triton_attn.py @tdoublep
-/vllm/v1/core @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @comaniac @alexm-redhat @heheda12345 @ApostaC
+/vllm/v1/core @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @alexm-redhat @heheda12345 @ApostaC
/vllm/v1/sample @22quinn @houseroad @njhill
/vllm/v1/spec_decode @benchislett @luccafong
/vllm/v1/structured_output @mgoin @russellb @aarnphm @benchislett
/vllm/v1/kv_cache_interface.py @heheda12345
/vllm/v1/offloading @ApostaC
+# Model runner V2
+/vllm/v1/worker/gpu @WoosukKwon
+
# Test ownership
-/.buildkite/lm-eval-harness @mgoin @simon-mo
+/.buildkite/lm-eval-harness @mgoin
/tests/distributed/test_multi_node_assignment.py @youkaichao
/tests/distributed/test_pipeline_parallel.py @youkaichao
/tests/distributed/test_same_node.py @youkaichao
-/tests/entrypoints @DarkLight1337 @robertgshaw2-redhat @simon-mo @aarnphm @NickLucche
+/tests/entrypoints @DarkLight1337 @robertgshaw2-redhat @aarnphm @NickLucche
/tests/evals @mgoin
/tests/kernels @mgoin @tlrmchlsmth @WoosukKwon @yewentao256
/tests/models @DarkLight1337 @ywang96
@@ -49,18 +53,29 @@ CMakeLists.txt @tlrmchlsmth @LucasWilkinson
/tests/test_inputs.py @DarkLight1337 @ywang96
/tests/v1/entrypoints/llm/test_struct_output_generate.py @mgoin @russellb @aarnphm
/tests/v1/structured_output @mgoin @russellb @aarnphm
-/tests/v1/core @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @comaniac @alexm-redhat @heheda12345 @ApostaC
+/tests/v1/core @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @alexm-redhat @heheda12345 @ApostaC
/tests/weight_loading @mgoin @youkaichao @yewentao256
/tests/lora @jeejeelee
/tests/models/language/generation/test_hybrid.py @tdoublep
/tests/v1/kv_connector/nixl_integration @NickLucche
/tests/v1/kv_connector @ApostaC
/tests/v1/offloading @ApostaC
+/tests/v1/determinism @yewentao256
-# Transformers backend
+# Transformers modeling backend
/vllm/model_executor/models/transformers @hmellor
/tests/models/test_transformers.py @hmellor
+# Observability
+/vllm/config/observability.py @markmc
+/vllm/v1/metrics @markmc
+/tests/v1/metrics @markmc
+/vllm/tracing.py @markmc
+/tests/v1/tracing/test_tracing.py @markmc
+/vllm/config/kv_events.py @markmc
+/vllm/distributed/kv_events.py @markmc
+/tests/distributed/test_events.py @markmc
+
# Docs
/docs/mkdocs @hmellor
/docs/**/*.yml @hmellor
@@ -105,11 +120,21 @@ mkdocs.yaml @hmellor
/vllm/attention/ops/triton_unified_attention.py @tdoublep
# ROCm related: specify owner with write access to notify AMD folks for careful code review
-/docker/Dockerfile.rocm* @gshtras
-/vllm/v1/attention/backends/rocm*.py @gshtras
-/vllm/v1/attention/backends/mla/rocm*.py @gshtras
-/vllm/attention/ops/rocm*.py @gshtras
-/vllm/model_executor/layers/fused_moe/rocm*.py @gshtras
+/vllm/**/*rocm* @tjtanaa
+/docker/Dockerfile.rocm* @gshtras @tjtanaa
+/vllm/v1/attention/backends/rocm*.py @gshtras @tjtanaa
+/vllm/v1/attention/backends/mla/rocm*.py @gshtras @tjtanaa
+/vllm/attention/ops/rocm*.py @gshtras @tjtanaa
+/vllm/model_executor/layers/fused_moe/rocm*.py @gshtras @tjtanaa
+/csrc/rocm @gshtras @tjtanaa
+/requirements/*rocm* @tjtanaa
+/tests/**/*rocm* @tjtanaa
+/docs/**/*rocm* @tjtanaa
+/vllm/**/*quark* @tjtanaa
+/tests/**/*quark* @tjtanaa
+/docs/**/*quark* @tjtanaa
+/vllm/**/*aiter* @tjtanaa
+/tests/**/*aiter* @tjtanaa
# TPU
/vllm/v1/worker/tpu* @NickLucche
@@ -124,6 +149,12 @@ mkdocs.yaml @hmellor
/examples/*/pooling/ @noooop
/tests/models/*/pooling* @noooop
/tests/entrypoints/pooling @noooop
+/vllm/entrypoints/pooling @aarnphm @chaunceyjiang @noooop
/vllm/config/pooler.py @noooop
/vllm/pooling_params.py @noooop
/vllm/model_executor/layers/pooler.py @noooop
+
+# Security guide and policies
+/docs/usage/security.md @russellb
+/SECURITY.md @russellb
+/docs/contributing/vulnerability_management.md @russellb
diff --git a/.github/mergify.yml b/.github/mergify.yml
index de1a8314a4ecd..997a40e18e588 100644
--- a/.github/mergify.yml
+++ b/.github/mergify.yml
@@ -108,7 +108,7 @@ pull_request_rules:
- files~=^benchmarks/
- files~=^vllm/benchmarks/
- files~=^tests/benchmarks/
- - files~=^\.buildkite/nightly-benchmarks/
+ - files~=^\.buildkite/performance-benchmarks/
actions:
label:
add:
@@ -151,6 +151,23 @@ pull_request_rules:
add:
- gpt-oss
+- name: label-nvidia
+ description: Automatically apply nvidia label
+ conditions:
+ - label != stale
+ - or:
+ - files~=cuda
+ - files~=cutlass
+ - files~=flashinfer
+ - files~=trtllm
+ - title~=(?i)NVIDIA
+ - title~=(?i)CUDA
+ - title~=(?i)CUTLASS
+ actions:
+ label:
+ add:
+ - nvidia
+
- name: label-rocm
description: Automatically apply rocm label
conditions:
diff --git a/.github/workflows/cleanup_pr_body.yml b/.github/workflows/cleanup_pr_body.yml
index c3e132a536a42..861290ea43c87 100644
--- a/.github/workflows/cleanup_pr_body.yml
+++ b/.github/workflows/cleanup_pr_body.yml
@@ -13,7 +13,7 @@ jobs:
steps:
- name: Checkout repository
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- name: Set up Python
uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
diff --git a/.github/workflows/issue_autolabel.yml b/.github/workflows/issue_autolabel.yml
index 7d565ef9f2e45..629966b959330 100644
--- a/.github/workflows/issue_autolabel.yml
+++ b/.github/workflows/issue_autolabel.yml
@@ -105,6 +105,31 @@ jobs:
}
],
},
+ cpu: {
+ // Keyword search - matches whole words only (with word boundaries)
+ keywords: [
+ {
+ term: "CPU Backend",
+ searchIn: "title"
+ },
+ {
+ term: "x86",
+ searchIn: "title"
+ },
+ {
+ term: "ARM",
+ searchIn: "title"
+ },
+ {
+ term: "Apple Silicon",
+ searchIn: "title"
+ },
+ {
+ term: "IBM Z",
+ searchIn: "title"
+ },
+ ],
+ },
// Add more label configurations here as needed
// example: {
// keywords: [...],
diff --git a/.github/workflows/macos-smoke-test.yml b/.github/workflows/macos-smoke-test.yml
new file mode 100644
index 0000000000000..3a12c4b3a8300
--- /dev/null
+++ b/.github/workflows/macos-smoke-test.yml
@@ -0,0 +1,80 @@
+name: macOS Apple Silicon Smoke Test
+
+on:
+ push:
+ branches:
+ - main
+ workflow_dispatch: # Manual trigger
+
+jobs:
+ macos-m1-smoke-test:
+ runs-on: macos-latest
+ timeout-minutes: 30
+
+ steps:
+ - uses: actions/checkout@v6
+
+ - uses: astral-sh/setup-uv@v7
+ with:
+ enable-cache: true
+ cache-dependency-glob: |
+ requirements/**/*.txt
+ pyproject.toml
+ python-version: '3.12'
+
+ - name: Create virtual environment
+ run: |
+ uv venv
+ echo "$GITHUB_WORKSPACE/.venv/bin" >> "$GITHUB_PATH"
+
+ - name: Install dependencies and build vLLM
+ run: |
+ uv pip install -r requirements/cpu.txt --index-strategy unsafe-best-match
+ uv pip install -e .
+ env:
+ CMAKE_BUILD_PARALLEL_LEVEL: 4
+
+ - name: Verify installation
+ run: |
+ python -c "import vllm; print(f'vLLM version: {vllm.__version__}')"
+
+ - name: Smoke test vllm serve
+ run: |
+ # Start server in background
+ vllm serve Qwen/Qwen3-0.6B \
+ --max-model-len=2K \
+ --load-format=dummy \
+ --hf-overrides '{"num_hidden_layers": 2}' \
+ --enforce-eager \
+ --port 8000 &
+
+ SERVER_PID=$!
+
+ # Wait for server to start
+ for i in {1..30}; do
+ if curl -s http://localhost:8000/health > /dev/null; then
+ echo "Server started successfully"
+ break
+ fi
+ if [ "$i" -eq 30 ]; then
+ echo "Server failed to start"
+ kill "$SERVER_PID"
+ exit 1
+ fi
+ sleep 2
+ done
+
+ # Test health endpoint
+ curl -f http://localhost:8000/health
+
+ # Test completion
+ curl -f http://localhost:8000/v1/completions \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": "Qwen/Qwen3-0.6B",
+ "prompt": "Hello",
+ "max_tokens": 5
+ }'
+
+ # Cleanup
+ kill "$SERVER_PID"
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
index e21d13b8161f3..d5e70f30ef638 100644
--- a/.github/workflows/pre-commit.yml
+++ b/.github/workflows/pre-commit.yml
@@ -16,7 +16,7 @@ jobs:
pre-commit:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
+ - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with:
python-version: "3.12"
diff --git a/.gitignore b/.gitignore
index ffa36dee1ab9d..7cda86478664f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,6 +4,9 @@
# vllm-flash-attn built from source
vllm/vllm_flash_attn/*
+# OpenAI triton kernels copied from source
+vllm/third_party/triton_kernels/*
+
# triton jit
.triton
@@ -221,3 +224,6 @@ csrc/moe/marlin_moe_wna16/kernel_*
# Ignore ep_kernels_workspace folder
ep_kernels_workspace/
+
+# Allow tracked library source folders under submodules (e.g., benchmarks/lib)
+!vllm/benchmarks/lib/
diff --git a/.markdownlint.yaml b/.markdownlint.yaml
index cd9df57cd9803..937487f47364d 100644
--- a/.markdownlint.yaml
+++ b/.markdownlint.yaml
@@ -3,10 +3,9 @@ MD007:
MD013: false
MD024:
siblings_only: true
+MD031:
+ list_items: false
MD033: false
-MD045: false
MD046: false
-MD051: false
MD052: false
-MD053: false
MD059: false
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index bcd40e7f8ab39..e034f75a9d322 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -38,7 +38,7 @@ repos:
rev: 0.9.1
hooks:
- id: pip-compile
- args: [requirements/test.in, -o, requirements/test.txt, --index-strategy, unsafe-best-match, --torch-backend, cu129, --python-platform, x86_64-manylinux_2_28]
+ args: [requirements/test.in, -o, requirements/test.txt, --index-strategy, unsafe-best-match, --torch-backend, cu129, --python-platform, x86_64-manylinux_2_28, --python-version, "3.12"]
files: ^requirements/test\.(in|txt)$
- repo: local
hooks:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 7cb94f919f123..e09972fe71995 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -39,6 +39,13 @@ set(PYTHON_SUPPORTED_VERSIONS "3.10" "3.11" "3.12" "3.13")
# Supported AMD GPU architectures.
set(HIP_SUPPORTED_ARCHS "gfx906;gfx908;gfx90a;gfx942;gfx950;gfx1030;gfx1100;gfx1101;gfx1200;gfx1201;gfx1150;gfx1151")
+# ROCm installation prefix. Default to /opt/rocm but allow override via
+# -DROCM_PATH=/your/rocm/path when invoking cmake.
+if(NOT DEFINED ROCM_PATH)
+ set(ROCM_PATH "/opt/rocm" CACHE PATH "ROCm installation prefix")
+else()
+ set(ROCM_PATH ${ROCM_PATH} CACHE PATH "ROCm installation prefix" FORCE)
+endif()
#
# Supported/expected torch versions for CUDA/ROCm.
#
@@ -129,7 +136,7 @@ elseif(HIP_FOUND)
# ROCm 5.X and 6.X
if (ROCM_VERSION_DEV_MAJOR GREATER_EQUAL 5 AND
- NOT Torch_VERSION VERSION_EQUAL ${TORCH_SUPPORTED_VERSION_ROCM})
+ Torch_VERSION VERSION_LESS ${TORCH_SUPPORTED_VERSION_ROCM})
message(WARNING "Pytorch version >= ${TORCH_SUPPORTED_VERSION_ROCM} "
"expected for ROCm build, saw ${Torch_VERSION} instead.")
endif()
@@ -237,11 +244,28 @@ set_gencode_flags_for_srcs(
SRCS "${VLLM_CUMEM_EXT_SRC}"
CUDA_ARCHS "${CUDA_ARCHS}")
-if(VLLM_GPU_LANG STREQUAL "CUDA")
+if(VLLM_GPU_LANG STREQUAL "CUDA" OR VLLM_GPU_LANG STREQUAL "HIP")
message(STATUS "Enabling cumem allocator extension.")
- # link against cuda driver library
- list(APPEND CUMEM_LIBS CUDA::cuda_driver)
- define_gpu_extension_target(
+ if(VLLM_GPU_LANG STREQUAL "CUDA")
+ # link against cuda driver library
+ list(APPEND CUMEM_LIBS CUDA::cuda_driver)
+ else()
+ # link against rocm driver library. Prefer an absolute path to
+ # libamdhip64.so inside ${ROCM_PATH}/lib if available, otherwise fall
+ # back to linking by name "amdhip64".
+ find_library(AMDHIP64_LIB
+ NAMES amdhip64 libamdhip64.so
+ PATHS ${ROCM_PATH}/lib
+ NO_DEFAULT_PATH)
+ if(AMDHIP64_LIB)
+ message(STATUS "Found libamdhip64 at ${AMDHIP64_LIB}")
+ list(APPEND CUMEM_LIBS ${AMDHIP64_LIB})
+ else()
+ message(WARNING "libamdhip64 not found in ${ROCM_PATH}/lib; falling back to linking 'amdhip64' by name")
+ list(APPEND CUMEM_LIBS amdhip64)
+ endif()
+ endif()
+ define_extension_target(
cumem_allocator
DESTINATION vllm
LANGUAGE CXX
@@ -265,6 +289,7 @@ set(VLLM_EXT_SRC
"csrc/pos_encoding_kernels.cu"
"csrc/activation_kernels.cu"
"csrc/layernorm_kernels.cu"
+ "csrc/fused_qknorm_rope_kernel.cu"
"csrc/layernorm_quant_kernels.cu"
"csrc/sampler.cu"
"csrc/cuda_view.cu"
@@ -282,7 +307,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
SET(CUTLASS_ENABLE_HEADERS_ONLY ON CACHE BOOL "Enable only the header library")
# Set CUTLASS_REVISION. Used for FetchContent. Also fixes some bogus messages when building.
- set(CUTLASS_REVISION "v4.2.1" CACHE STRING "CUTLASS revision to use")
+ set(CUTLASS_REVISION "v4.2.1")
# Use the specified CUTLASS source directory for compilation if VLLM_CUTLASS_SRC_DIR is provided
if (DEFINED ENV{VLLM_CUTLASS_SRC_DIR})
@@ -329,8 +354,17 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
# Only build Marlin kernels if we are building for at least some compatible archs.
# Keep building Marlin for 9.0 as there are some group sizes and shapes that
# are not supported by Machete yet.
- # 9.0 for latest bf16 atomicAdd PTX
- cuda_archs_loose_intersection(MARLIN_ARCHS "8.0;8.7;9.0+PTX" "${CUDA_ARCHS}")
+
+ # marlin arches for fp16 output
+ cuda_archs_loose_intersection(MARLIN_ARCHS "8.0+PTX" "${CUDA_ARCHS}")
+ # marlin arches for bf16 output (we need 9.0 for bf16 atomicAdd PTX)
+ cuda_archs_loose_intersection(MARLIN_BF16_ARCHS "8.0+PTX;9.0+PTX" "${CUDA_ARCHS}")
+ # marlin arches for fp8 input
+ # - sm80 doesn't support fp8 computation
+ # - sm90 and sm100 don't support QMMA.16832.F32.E4M3.E4M3 SAAS instruction
+ # so we only enable fp8 computation for SM89 (e.g. RTX 40x0) and 12.0 (e.g. RTX 50x0)
+ cuda_archs_loose_intersection(MARLIN_FP8_ARCHS "8.9;12.0" "${CUDA_ARCHS}")
+
if (MARLIN_ARCHS)
#
@@ -340,16 +374,18 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
set(MARLIN_GEN_SCRIPT
${CMAKE_CURRENT_SOURCE_DIR}/csrc/quantization/gptq_marlin/generate_kernels.py)
file(MD5 ${MARLIN_GEN_SCRIPT} MARLIN_GEN_SCRIPT_HASH)
+ list(JOIN CUDA_ARCHS "," CUDA_ARCHS_STR)
+ set(MARLIN_GEN_SCRIPT_HASH_AND_ARCH "${MARLIN_GEN_SCRIPT_HASH}(ARCH:${CUDA_ARCHS_STR})")
- message(STATUS "Marlin generation script hash: ${MARLIN_GEN_SCRIPT_HASH}")
- message(STATUS "Last run Marlin generate script hash: $CACHE{MARLIN_GEN_SCRIPT_HASH}")
+ message(STATUS "Marlin generation script hash: ${MARLIN_GEN_SCRIPT_HASH_AND_ARCH}")
+ message(STATUS "Last run Marlin generate script hash: $CACHE{MARLIN_GEN_SCRIPT_HASH_AND_ARCH}")
- if (NOT DEFINED CACHE{MARLIN_GEN_SCRIPT_HASH}
- OR NOT $CACHE{MARLIN_GEN_SCRIPT_HASH} STREQUAL ${MARLIN_GEN_SCRIPT_HASH})
+ if (NOT DEFINED CACHE{MARLIN_GEN_SCRIPT_HASH_AND_ARCH}
+ OR NOT $CACHE{MARLIN_GEN_SCRIPT_HASH_AND_ARCH} STREQUAL ${MARLIN_GEN_SCRIPT_HASH_AND_ARCH})
execute_process(
COMMAND ${CMAKE_COMMAND} -E env
PYTHONPATH=$PYTHONPATH
- ${Python_EXECUTABLE} ${MARLIN_GEN_SCRIPT}
+ ${Python_EXECUTABLE} ${MARLIN_GEN_SCRIPT} ${CUDA_ARCHS_STR}
RESULT_VARIABLE marlin_generation_result
OUTPUT_VARIABLE marlin_generation_result
OUTPUT_FILE ${CMAKE_CURRENT_BINARY_DIR}/marlin_generation.log
@@ -362,15 +398,15 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
"\nCheck the log for details: "
"${CMAKE_CURRENT_BINARY_DIR}/marlin_generation.log")
else()
- set(MARLIN_GEN_SCRIPT_HASH ${MARLIN_GEN_SCRIPT_HASH}
- CACHE STRING "Last run Marlin generate script hash" FORCE)
+ set(MARLIN_GEN_SCRIPT_HASH_AND_ARCH ${MARLIN_GEN_SCRIPT_HASH_AND_ARCH}
+ CACHE STRING "Last run Marlin generate script hash and arch" FORCE)
message(STATUS "Marlin generation completed successfully.")
endif()
else()
message(STATUS "Marlin generation script has not changed, skipping generation.")
endif()
- file(GLOB MARLIN_TEMPLATE_KERNEL_SRC "csrc/quantization/gptq_marlin/kernel_*.cu")
+ file(GLOB MARLIN_TEMPLATE_KERNEL_SRC "csrc/quantization/gptq_marlin/sm80_kernel_*_float16.cu")
set_gencode_flags_for_srcs(
SRCS "${MARLIN_TEMPLATE_KERNEL_SRC}"
CUDA_ARCHS "${MARLIN_ARCHS}")
@@ -378,12 +414,34 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
set_source_files_properties(${MARLIN_TEMPLATE_KERNEL_SRC}
PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false")
endif()
-
list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_KERNEL_SRC})
+ file(GLOB MARLIN_TEMPLATE_BF16_KERNEL_SRC "csrc/quantization/gptq_marlin/sm80_kernel_*_bfloat16.cu")
+ set_gencode_flags_for_srcs(
+ SRCS "${MARLIN_TEMPLATE_BF16_KERNEL_SRC}"
+ CUDA_ARCHS "${MARLIN_BF16_ARCHS}")
+ if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8)
+ set_source_files_properties(${MARLIN_TEMPLATE_BF16_KERNEL_SRC}
+ PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false")
+ endif()
+ list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_BF16_KERNEL_SRC})
+
+ if (MARLIN_FP8_ARCHS)
+ file(GLOB MARLIN_TEMPLATE_FP8_KERNEL_SRC "csrc/quantization/gptq_marlin/sm89_kernel_*.cu")
+ set_gencode_flags_for_srcs(
+ SRCS "${MARLIN_TEMPLATE_FP8_KERNEL_SRC}"
+ CUDA_ARCHS "${MARLIN_FP8_ARCHS}")
+ if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8)
+ set_source_files_properties(${MARLIN_TEMPLATE_FP8_KERNEL_SRC}
+ PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false")
+ endif()
+ list(APPEND VLLM_EXT_SRC ${MARLIN_TEMPLATE_FP8_KERNEL_SRC})
+ endif()
+
set(MARLIN_SRCS
"csrc/quantization/marlin/sparse/marlin_24_cuda_kernel.cu"
"csrc/quantization/gptq_marlin/gptq_marlin.cu"
+ "csrc/quantization/gptq_marlin/marlin_int4_fp8_preprocess.cu"
"csrc/quantization/gptq_marlin/gptq_marlin_repack.cu"
"csrc/quantization/gptq_marlin/awq_marlin_repack.cu")
set_gencode_flags_for_srcs(
@@ -487,9 +545,9 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
# The cutlass_scaled_mm kernels for Blackwell SM100 (c3x, i.e. CUTLASS 3.x)
# require CUDA 12.8 or later
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 13.0)
- cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0f;11.0f;12.0f" "${CUDA_ARCHS}")
+ cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0f;11.0f" "${CUDA_ARCHS}")
else()
- cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0a;10.1a;10.3a;12.0a;12.1a" "${CUDA_ARCHS}")
+ cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0a;10.1a;10.3a" "${CUDA_ARCHS}")
endif()
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8 AND SCALED_MM_ARCHS)
set(SRCS
@@ -579,12 +637,15 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
set(SRCS
"csrc/quantization/fp4/nvfp4_quant_kernels.cu"
"csrc/quantization/fp4/activation_nvfp4_quant_fusion_kernels.cu"
- "csrc/quantization/fp4/nvfp4_scaled_mm_sm120_kernels.cu")
+ "csrc/quantization/fp4/nvfp4_experts_quant.cu"
+ "csrc/quantization/fp4/nvfp4_scaled_mm_sm120_kernels.cu"
+ "csrc/quantization/fp4/nvfp4_blockwise_moe_kernel.cu")
set_gencode_flags_for_srcs(
SRCS "${SRCS}"
CUDA_ARCHS "${FP4_ARCHS}")
list(APPEND VLLM_EXT_SRC "${SRCS}")
list(APPEND VLLM_GPU_FLAGS "-DENABLE_NVFP4_SM120=1")
+ list(APPEND VLLM_GPU_FLAGS "-DENABLE_CUTLASS_MOE_SM120=1")
message(STATUS "Building NVFP4 for archs: ${FP4_ARCHS}")
else()
message(STATUS "Not building NVFP4 as no compatible archs were found.")
@@ -594,9 +655,9 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
# FP4 Archs and flags
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 13.0)
- cuda_archs_loose_intersection(FP4_ARCHS "10.0f;11.0f;12.0f" "${CUDA_ARCHS}")
+ cuda_archs_loose_intersection(FP4_ARCHS "10.0f;11.0f" "${CUDA_ARCHS}")
else()
- cuda_archs_loose_intersection(FP4_ARCHS "10.0a;10.1a;12.0a;12.1a" "${CUDA_ARCHS}")
+ cuda_archs_loose_intersection(FP4_ARCHS "10.0a;10.1a;10.3a" "${CUDA_ARCHS}")
endif()
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8 AND FP4_ARCHS)
set(SRCS
@@ -670,7 +731,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 13.0)
cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0f;11.0f" "${CUDA_ARCHS}")
else()
- cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0a" "${CUDA_ARCHS}")
+ cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0a;10.1a;10.3a" "${CUDA_ARCHS}")
endif()
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8 AND SCALED_MM_ARCHS)
set(SRCS "csrc/quantization/w8a8/cutlass/moe/grouped_mm_c3x_sm100.cu")
@@ -716,9 +777,9 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
endif()
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 13.0)
- cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0f;11.0f;12.0f" "${CUDA_ARCHS}")
+ cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0f;11.0f" "${CUDA_ARCHS}")
else()
- cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0a;10.1a;10.3a;12.0a;12.1a" "${CUDA_ARCHS}")
+ cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0a;10.1a;10.3a" "${CUDA_ARCHS}")
endif()
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8 AND SCALED_MM_ARCHS)
set(SRCS "csrc/quantization/w8a8/cutlass/moe/blockwise_scaled_group_mm_sm100.cu")
@@ -836,7 +897,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
endif()
# Hadacore kernels
- cuda_archs_loose_intersection(HADACORE_ARCHS "8.0;8.9;9.0" "${CUDA_ARCHS}")
+ cuda_archs_loose_intersection(HADACORE_ARCHS "8.0+PTX;9.0+PTX" "${CUDA_ARCHS}")
if(HADACORE_ARCHS)
set(SRCS "csrc/quantization/hadamard/hadacore/hadamard_transform_cuda.cu")
set_gencode_flags_for_srcs(
@@ -858,7 +919,7 @@ if (VLLM_GPU_LANG STREQUAL "HIP")
endif()
message(STATUS "Enabling C extension.")
-define_gpu_extension_target(
+define_extension_target(
_C
DESTINATION vllm
LANGUAGE ${VLLM_GPU_LANG}
@@ -913,8 +974,15 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
CUDA_ARCHS "${CUDA_ARCHS}")
list(APPEND VLLM_MOE_EXT_SRC "${VLLM_MOE_WNA16_SRC}")
- # 9.0 for latest bf16 atomicAdd PTX
- cuda_archs_loose_intersection(MARLIN_MOE_ARCHS "8.0;8.7;9.0+PTX" "${CUDA_ARCHS}")
+ # moe marlin arches
+ # note that we always set `use_atomic_add=False` for moe marlin now,
+ # so we don't need 9.0 for bf16 atomicAdd PTX
+ cuda_archs_loose_intersection(MARLIN_MOE_ARCHS "8.0+PTX" "${CUDA_ARCHS}")
+ # moe marlin arches for fp8 input
+ # - sm80 doesn't support fp8 computation
+ # - sm90 and sm100 don't support QMMA.16832.F32.E4M3.E4M3 SAAS instruction
+ # so we only enable fp8 computation for SM89 (e.g. RTX 40x0) and 12.0 (e.g. RTX 50x0)
+ cuda_archs_loose_intersection(MARLIN_MOE_FP8_ARCHS "8.9;12.0" "${CUDA_ARCHS}")
if (MARLIN_MOE_ARCHS)
#
@@ -924,16 +992,18 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
set(MOE_MARLIN_GEN_SCRIPT
${CMAKE_CURRENT_SOURCE_DIR}/csrc/moe/marlin_moe_wna16/generate_kernels.py)
file(MD5 ${MOE_MARLIN_GEN_SCRIPT} MOE_MARLIN_GEN_SCRIPT_HASH)
+ list(JOIN CUDA_ARCHS "," CUDA_ARCHS_STR)
+ set(MOE_MARLIN_GEN_SCRIPT_HASH_AND_ARCH "${MOE_MARLIN_GEN_SCRIPT_HASH}(ARCH:${CUDA_ARCHS_STR})")
- message(STATUS "Marlin MOE generation script hash: ${MOE_MARLIN_GEN_SCRIPT_HASH}")
- message(STATUS "Last run Marlin MOE generate script hash: $CACHE{MOE_MARLIN_GEN_SCRIPT_HASH}")
+ message(STATUS "Marlin MOE generation script hash with arch: ${MOE_MARLIN_GEN_SCRIPT_HASH_AND_ARCH}")
+ message(STATUS "Last run Marlin MOE generate script hash with arch: $CACHE{MOE_MARLIN_GEN_SCRIPT_HASH_AND_ARCH}")
- if (NOT DEFINED CACHE{MOE_MARLIN_GEN_SCRIPT_HASH}
- OR NOT $CACHE{MOE_MARLIN_GEN_SCRIPT_HASH} STREQUAL ${MOE_MARLIN_GEN_SCRIPT_HASH})
+ if (NOT DEFINED CACHE{MOE_MARLIN_GEN_SCRIPT_HASH_AND_ARCH}
+ OR NOT $CACHE{MOE_MARLIN_GEN_SCRIPT_HASH_AND_ARCH} STREQUAL ${MOE_MARLIN_GEN_SCRIPT_HASH_AND_ARCH})
execute_process(
COMMAND ${CMAKE_COMMAND} -E env
PYTHONPATH=$PYTHONPATH
- ${Python_EXECUTABLE} ${MOE_MARLIN_GEN_SCRIPT}
+ ${Python_EXECUTABLE} ${MOE_MARLIN_GEN_SCRIPT} ${CUDA_ARCHS_STR}
RESULT_VARIABLE moe_marlin_generation_result
OUTPUT_VARIABLE moe_marlin_generation_output
OUTPUT_FILE ${CMAKE_CURRENT_BINARY_DIR}/moe_marlin_generation.log
@@ -946,7 +1016,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
"\nCheck the log for details: "
"${CMAKE_CURRENT_BINARY_DIR}/moe_marlin_generation.log")
else()
- set(MOE_MARLIN_GEN_SCRIPT_HASH ${MOE_MARLIN_GEN_SCRIPT_HASH}
+ set(MOE_MARLIN_GEN_SCRIPT_HASH_AND_ARCH ${MOE_MARLIN_GEN_SCRIPT_HASH_AND_ARCH}
CACHE STRING "Last run Marlin MOE generate script hash" FORCE)
message(STATUS "Marlin MOE generation completed successfully.")
endif()
@@ -954,16 +1024,28 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
message(STATUS "Marlin MOE generation script has not changed, skipping generation.")
endif()
- file(GLOB MOE_WNAA16_MARLIN_SRC "csrc/moe/marlin_moe_wna16/*.cu")
+ file(GLOB MARLIN_MOE_SRC "csrc/moe/marlin_moe_wna16/sm80_kernel_*.cu")
+ list(APPEND MARLIN_MOE_SRC "csrc/moe/marlin_moe_wna16/ops.cu")
set_gencode_flags_for_srcs(
- SRCS "${MOE_WNAA16_MARLIN_SRC}"
+ SRCS "${MARLIN_MOE_SRC}"
CUDA_ARCHS "${MARLIN_MOE_ARCHS}")
if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8)
- set_source_files_properties(${MOE_WNAA16_MARLIN_SRC}
+ set_source_files_properties(${MARLIN_MOE_SRC}
PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false")
endif()
+ list(APPEND VLLM_MOE_EXT_SRC ${MARLIN_MOE_SRC})
- list(APPEND VLLM_MOE_EXT_SRC ${MOE_WNAA16_MARLIN_SRC})
+ if (MARLIN_MOE_FP8_ARCHS)
+ file(GLOB MARLIN_MOE_FP8_SRC "csrc/moe/marlin_moe_wna16/sm89_kernel_*.cu")
+ set_gencode_flags_for_srcs(
+ SRCS "${MARLIN_MOE_FP8_SRC}"
+ CUDA_ARCHS "${MARLIN_MOE_FP8_ARCHS}")
+ if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.8)
+ set_source_files_properties(${MARLIN_MOE_FP8_SRC}
+ PROPERTIES COMPILE_FLAGS "-static-global-template-stub=false")
+ endif()
+ list(APPEND VLLM_MOE_EXT_SRC ${MARLIN_MOE_FP8_SRC})
+ endif()
message(STATUS "Building Marlin MOE kernels for archs: ${MARLIN_MOE_ARCHS}")
else()
@@ -973,7 +1055,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA")
endif()
message(STATUS "Enabling moe extension.")
-define_gpu_extension_target(
+define_extension_target(
_moe_C
DESTINATION vllm
LANGUAGE ${VLLM_GPU_LANG}
@@ -994,7 +1076,7 @@ if(VLLM_GPU_LANG STREQUAL "HIP")
"csrc/rocm/skinny_gemms.cu"
"csrc/rocm/attention.cu")
- define_gpu_extension_target(
+ define_extension_target(
_rocm_C
DESTINATION vllm
LANGUAGE ${VLLM_GPU_LANG}
@@ -1005,6 +1087,11 @@ if(VLLM_GPU_LANG STREQUAL "HIP")
WITH_SOABI)
endif()
+# For CUDA and HIP builds also build the triton_kernels external package.
+if(VLLM_GPU_LANG STREQUAL "CUDA" OR VLLM_GPU_LANG STREQUAL "HIP")
+ include(cmake/external_projects/triton_kernels.cmake)
+endif()
+
# For CUDA we also build and ship some external projects.
if (VLLM_GPU_LANG STREQUAL "CUDA")
include(cmake/external_projects/flashmla.cmake)
diff --git a/README.md b/README.md
index 3dcdd7dc00942..abbb63158f166 100644
--- a/README.md
+++ b/README.md
@@ -21,6 +21,10 @@ Join us at the [PyTorch Conference, October 22-23](https://events.linuxfoundatio
*Latest News* 🔥
+- [2025/11] We hosted [vLLM Bangkok Meetup](https://luma.com/v0f647nv). We explored vLLM and LMCache inference and low-resource language adaptation with speakers from Embedded LLM, AMD, and Red Hat. Please find the meetup slides [here](https://drive.google.com/drive/folders/1H0DS57F8HQ5q3kSOSoRmucPJWL3E0A_X?usp=sharing).
+- [2025/11] We hosted [the first vLLM Europe Meetup in Zurich](https://luma.com/0gls27kb) focused on quantization, distributed inference, and reinforcement learning at scale with speakers from Mistral, IBM, and Red Hat. Please find the meetup slides [here](https://docs.google.com/presentation/d/1UC9PTLCHYXQpOmJDSFg6Sljra3iVXzc09DeEI7dnxMc/edit?usp=sharing) and recording [here](https://www.youtube.com/watch?v=6m6ZE6yVEDI)
+- [2025/11] We hosted [vLLM Beijing Meetup](https://mp.weixin.qq.com/s/xSrYXjNgr1HbCP4ExYNG1w) focusing on distributed inference and diverse accelerator support with vLLM! Please find the meetup slides [here](https://drive.google.com/drive/folders/1nQJ8ZkLSjKxvu36sSHaceVXtttbLvvu-?usp=drive_link).
+- [2025/10] We hosted [vLLM Shanghai Meetup](https://mp.weixin.qq.com/s/__xb4OyOsImz-9eAVrdlcg) focused on hands-on vLLM inference optimization! Please find the meetup slides [here](https://drive.google.com/drive/folders/1KqwjsFJLfEsC8wlDugnrR61zsWHt94Q6).
- [2025/09] We hosted [vLLM Toronto Meetup](https://luma.com/e80e0ymm) focused on tackling inference at scale and speculative decoding with speakers from NVIDIA and Red Hat! Please find the meetup slides [here](https://docs.google.com/presentation/d/1IYJYmJcu9fLpID5N5RbW_vO0XLo0CGOR14IXOjB61V8/edit?usp=sharing).
- [2025/08] We hosted [vLLM Shenzhen Meetup](https://mp.weixin.qq.com/s/k8ZBO1u2_2odgiKWH_GVTQ) focusing on the ecosystem around vLLM! Please find the meetup slides [here](https://drive.google.com/drive/folders/1Ua2SVKVSu-wp5vou_6ElraDt2bnKhiEA).
- [2025/08] We hosted [vLLM Singapore Meetup](https://www.sginnovate.com/event/vllm-sg-meet). We shared V1 updates, disaggregated serving and MLLM speedups with speakers from Embedded LLM, AMD, WekaIO, and A*STAR. Please find the meetup slides [here](https://drive.google.com/drive/folders/1ncf3GyqLdqFaB6IeB834E5TZJPLAOiXZ?usp=sharing).
@@ -82,7 +86,7 @@ vLLM is flexible and easy to use with:
- Tensor, pipeline, data and expert parallelism support for distributed inference
- Streaming outputs
- OpenAI-compatible API server
-- Support for NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs and GPUs, PowerPC CPUs, and TPU. Additionally, support for diverse hardware plugins such as Intel Gaudi, IBM Spyre and Huawei Ascend.
+- Support for NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs and GPUs, PowerPC CPUs, Arm CPUs, and TPU. Additionally, support for diverse hardware plugins such as Intel Gaudi, IBM Spyre and Huawei Ascend.
- Prefix caching support
- Multi-LoRA support
diff --git a/benchmarks/auto_tune/README.md b/benchmarks/auto_tune/README.md
index d1bdb4c43f10b..9a9600e08dafe 100644
--- a/benchmarks/auto_tune/README.md
+++ b/benchmarks/auto_tune/README.md
@@ -83,7 +83,7 @@ MIN_CACHE_HIT_PCT=0
MAX_LATENCY_ALLOWED_MS=100000000000 # A very large number
```
-#### 2. Maximize Throughput with a Latency Requirement
+### 2. Maximize Throughput with a Latency Requirement
- **Goal**: Find the best server parameters when P99 end-to-end latency must be below 500ms.
- **Configuration**:
@@ -96,7 +96,7 @@ MIN_CACHE_HIT_PCT=0
MAX_LATENCY_ALLOWED_MS=500
```
-#### 3. Maximize Throughput with Prefix Caching and Latency Requirements
+### 3. Maximize Throughput with Prefix Caching and Latency Requirements
- **Goal**: Find the best server parameters assuming a 60% prefix cache hit rate and a latency requirement of 500ms.
- **Configuration**:
diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py
index 4021fede72153..d69d74ca61f54 100644
--- a/benchmarks/backend_request_func.py
+++ b/benchmarks/backend_request_func.py
@@ -620,7 +620,7 @@ def get_tokenizer(
kwargs["use_fast"] = False
if tokenizer_mode == "mistral":
try:
- from vllm.transformers_utils.tokenizer import MistralTokenizer
+ from vllm.tokenizers import MistralTokenizer
except ImportError as e:
raise ImportError(
"MistralTokenizer requires vllm package.\n"
diff --git a/benchmarks/benchmark_batch_invariance.py b/benchmarks/benchmark_batch_invariance.py
new file mode 100755
index 0000000000000..b5c16c42de467
--- /dev/null
+++ b/benchmarks/benchmark_batch_invariance.py
@@ -0,0 +1,380 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: Apache-2.0
+# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
+"""
+Benchmark to measure the performance overhead of VLLM_BATCH_INVARIANT mode.
+
+This benchmark runs the same workload twice:
+1. With VLLM_BATCH_INVARIANT=0 (baseline)
+2. With VLLM_BATCH_INVARIANT=1 (batch invariant mode)
+
+And reports the timing and throughput metrics for comparison.
+
+Environment variables:
+ VLLM_BENCH_MODEL: Model to benchmark (default: "Qwen/Qwen3-1.7B")
+ VLLM_BENCH_TP_SIZE: Tensor parallel size (default: 1, use 8 for deepseek)
+ VLLM_BENCH_BATCH_SIZE: Max batch size (default: 128)
+ VLLM_BENCH_NUM_TRIALS: Number of trials to run (default: 5)
+ VLLM_BENCH_MIN_PROMPT: Min prompt length in words (default: 1024)
+ VLLM_BENCH_MAX_PROMPT: Max prompt length in words (default: 2048)
+ VLLM_BENCH_MAX_TOKENS: Max tokens to generate (default: 128)
+ VLLM_BENCH_TEMPERATURE: Temperature for sampling (default: 0.0)
+ VLLM_BENCH_GPU_MEMORY_UTILIZATION: GPU memory utilization (default: 0.4)
+ VLLM_BENCH_MAX_MODEL_LEN: Max model length (default: 5120)
+ VLLM_BENCH_BACKEND: Attention backend (default: FLASH_ATTN)
+
+Example usage:
+ # Benchmark qwen3 (default)
+ python benchmarks/benchmark_batch_invariance.py
+
+ # Benchmark deepseek with 8 GPUs
+ VLLM_BENCH_MODEL="deepseek-ai/DeepSeek-V3" VLLM_BENCH_TP_SIZE=8 \\
+ python benchmarks/benchmark_batch_invariance.py
+
+ # Quick test with fewer trials
+ VLLM_BENCH_NUM_TRIALS=2 VLLM_BENCH_BATCH_SIZE=32 \\
+ python benchmarks/benchmark_batch_invariance.py
+"""
+
+import contextlib
+import os
+import random
+import time
+
+from vllm import LLM, SamplingParams
+from vllm.platforms import current_platform
+
+
+def _random_prompt(min_words: int = 1024, max_words: int = 1024 * 2) -> str:
+ """Generate a random prompt for benchmarking."""
+ prompt_templates = [
+ "Question: What is the capital of France?\nAnswer: The capital of France is",
+ "Q: How does photosynthesis work?\nA: Photosynthesis is the process by which",
+ "User: Can you explain quantum mechanics?\nAssistant: Quantum mechanics is",
+ "Once upon a time in a distant galaxy, there lived",
+ "The old man walked slowly down the street, remembering",
+ "In the year 2157, humanity finally discovered",
+ "To implement a binary search tree in Python, first we need to",
+ "The algorithm works by iterating through the array and",
+ "Here's how to optimize database queries using indexing:",
+ "The Renaissance was a period in European history that",
+ "Climate change is caused by several factors including",
+ "The human brain contains approximately 86 billion neurons which",
+ "I've been thinking about getting a new laptop because",
+ "Yesterday I went to the store and bought",
+ "My favorite thing about summer is definitely",
+ ]
+
+ base_prompt = random.choice(prompt_templates)
+
+ if max_words < min_words:
+ max_words = min_words
+ target_words = random.randint(min_words, max_words)
+
+ if target_words > 50:
+ padding_text = (
+ " This is an interesting topic that deserves more explanation. "
+ * (target_words // 50)
+ )
+ base_prompt = base_prompt + padding_text
+
+ return base_prompt
+
+
+def run_benchmark_with_batch_invariant(
+ model: str,
+ tp_size: int,
+ max_batch_size: int,
+ num_trials: int,
+ min_prompt: int,
+ max_prompt: int,
+ max_tokens: int,
+ temperature: float,
+ gpu_mem_util: float,
+ max_model_len: int,
+ backend: str,
+ batch_invariant: bool,
+ seed: int = 12345,
+) -> dict:
+ """
+ Run the benchmark with the specified configuration.
+
+ Returns a dict with timing and throughput metrics.
+ """
+ random.seed(seed)
+
+ # Set environment variables
+ os.environ["VLLM_ATTENTION_BACKEND"] = backend
+ if batch_invariant:
+ os.environ["VLLM_BATCH_INVARIANT"] = "1"
+ else:
+ os.environ["VLLM_BATCH_INVARIANT"] = "0"
+
+ print(f"\n{'=' * 80}")
+ print(f"BENCHMARK: VLLM_BATCH_INVARIANT={int(batch_invariant)}")
+ print(f" Model: {model}")
+ print(f" TP Size: {tp_size}")
+ print(f" Backend: {backend}")
+ print(f" Max Batch Size: {max_batch_size}")
+ print(f" Trials: {num_trials}")
+ print(f" Max Tokens: {max_tokens}")
+ print(f"{'=' * 80}\n")
+
+ sampling = SamplingParams(
+ temperature=temperature,
+ top_p=0.95,
+ max_tokens=max_tokens,
+ seed=20240919,
+ )
+
+ needle_prompt = "There once was a "
+
+ llm = None
+ try:
+ # Create LLM engine
+ start_init = time.perf_counter()
+ llm = LLM(
+ model=model,
+ max_num_seqs=max_batch_size,
+ gpu_memory_utilization=gpu_mem_util,
+ max_model_len=max_model_len,
+ dtype="bfloat16",
+ tensor_parallel_size=tp_size,
+ enable_prefix_caching=False,
+ )
+ init_time = time.perf_counter() - start_init
+ print(f"Engine initialization time: {init_time:.2f}s\n")
+
+ # Generate baseline
+ print("Generating baseline (warmup)...")
+ baseline_out = llm.generate([needle_prompt], sampling)
+ assert len(baseline_out) == 1
+ baseline_text = baseline_out[0].outputs[0].text
+ print(f"Baseline output: '{baseline_text[:50]}...'\n")
+
+ # Run trials and measure timing
+ trial_times: list[float] = []
+ total_tokens = 0
+ total_prompts = 0
+
+ for trial in range(num_trials):
+ # Create a batch
+ prompts: list[str] = []
+ batch_size = random.randint(max_batch_size // 2, max_batch_size)
+ needle_pos = random.randint(0, batch_size - 1)
+ for i in range(batch_size):
+ if i == needle_pos:
+ prompts.append(needle_prompt)
+ else:
+ prompts.append(_random_prompt(min_prompt, max_prompt))
+
+ # Measure time for this trial
+ start_time = time.perf_counter()
+ outputs = llm.generate(prompts, sampling)
+ trial_time = time.perf_counter() - start_time
+
+ trial_times.append(trial_time)
+ total_prompts += len(prompts)
+
+ # Count tokens
+ for output in outputs:
+ if output.outputs:
+ total_tokens += len(output.outputs[0].token_ids)
+
+ print(
+ f"Trial {trial + 1}/{num_trials}: "
+ f"batch_size={batch_size}, "
+ f"time={trial_time:.2f}s"
+ )
+
+ # Verify needle output still matches
+ needle_output = outputs[needle_pos]
+ assert needle_output.prompt == needle_prompt
+
+ # Compute statistics
+ avg_time = sum(trial_times) / len(trial_times)
+ min_time = min(trial_times)
+ max_time = max(trial_times)
+ throughput = total_tokens / sum(trial_times)
+ prompts_per_sec = total_prompts / sum(trial_times)
+
+ print(f"\n{'=' * 80}")
+ print("RESULTS:")
+ print(f" Average time per trial: {avg_time:.2f}s")
+ print(f" Min time: {min_time:.2f}s")
+ print(f" Max time: {max_time:.2f}s")
+ print(f" Total tokens generated: {total_tokens}")
+ print(f" Total prompts processed: {total_prompts}")
+ print(f" Throughput: {throughput:.2f} tokens/s")
+ print(f" Prompts/s: {prompts_per_sec:.2f}")
+ print(f"{'=' * 80}\n")
+
+ return {
+ "init_time": init_time,
+ "avg_time": avg_time,
+ "min_time": min_time,
+ "max_time": max_time,
+ "total_tokens": total_tokens,
+ "total_prompts": total_prompts,
+ "throughput": throughput,
+ "prompts_per_sec": prompts_per_sec,
+ "trial_times": trial_times,
+ }
+
+ finally:
+ # Cleanup
+ if llm is not None:
+ with contextlib.suppress(Exception):
+ llm.shutdown()
+
+
+def main():
+ # Check platform support
+ if not (current_platform.is_cuda() and current_platform.has_device_capability(90)):
+ print("ERROR: Requires CUDA and >= Hopper (SM90)")
+ print(f"Current platform: {current_platform.device_type}")
+ if current_platform.is_cuda():
+ print(f"Device capability: {current_platform.get_device_capability()}")
+ return 1
+
+ # Read configuration from environment
+ model = os.getenv("VLLM_BENCH_MODEL", "Qwen/Qwen3-1.7B")
+ tp_size = int(os.getenv("VLLM_BENCH_TP_SIZE", "1"))
+ max_batch_size = int(os.getenv("VLLM_BENCH_BATCH_SIZE", "128"))
+ num_trials = int(os.getenv("VLLM_BENCH_NUM_TRIALS", "5"))
+ min_prompt = int(os.getenv("VLLM_BENCH_MIN_PROMPT", "1024"))
+ max_prompt = int(os.getenv("VLLM_BENCH_MAX_PROMPT", "2048"))
+ max_tokens = int(os.getenv("VLLM_BENCH_MAX_TOKENS", "128"))
+ temperature = float(os.getenv("VLLM_BENCH_TEMPERATURE", "0.0"))
+ gpu_mem_util = float(os.getenv("VLLM_BENCH_GPU_MEMORY_UTILIZATION", "0.4"))
+ max_model_len = int(os.getenv("VLLM_BENCH_MAX_MODEL_LEN", "5120"))
+ backend = os.getenv("VLLM_BENCH_BACKEND", "FLASH_ATTN")
+
+ print("\n" + "=" * 80)
+ print("VLLM BATCH INVARIANCE BENCHMARK")
+ print("=" * 80)
+ print("\nConfiguration:")
+ print(f" Model: {model}")
+ print(f" Tensor Parallel Size: {tp_size}")
+ print(f" Attention Backend: {backend}")
+ print(f" Max Batch Size: {max_batch_size}")
+ print(f" Number of Trials: {num_trials}")
+ print(f" Prompt Length Range: {min_prompt}-{max_prompt} words")
+ print(f" Max Tokens to Generate: {max_tokens}")
+ print(f" Temperature: {temperature}")
+ print(f" GPU Memory Utilization: {gpu_mem_util}")
+ print(f" Max Model Length: {max_model_len}")
+ print("=" * 80)
+
+ # Run benchmark WITHOUT batch invariance (baseline)
+ print("\n" + "=" * 80)
+ print("PHASE 1: Running WITHOUT batch invariance (baseline)")
+ print("=" * 80)
+ baseline_results = run_benchmark_with_batch_invariant(
+ model=model,
+ tp_size=tp_size,
+ max_batch_size=max_batch_size,
+ num_trials=num_trials,
+ min_prompt=min_prompt,
+ max_prompt=max_prompt,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ gpu_mem_util=gpu_mem_util,
+ max_model_len=max_model_len,
+ backend=backend,
+ batch_invariant=False,
+ )
+
+ # Run benchmark WITH batch invariance
+ print("\n" + "=" * 80)
+ print("PHASE 2: Running WITH batch invariance")
+ print("=" * 80)
+ batch_inv_results = run_benchmark_with_batch_invariant(
+ model=model,
+ tp_size=tp_size,
+ max_batch_size=max_batch_size,
+ num_trials=num_trials,
+ min_prompt=min_prompt,
+ max_prompt=max_prompt,
+ max_tokens=max_tokens,
+ temperature=temperature,
+ gpu_mem_util=gpu_mem_util,
+ max_model_len=max_model_len,
+ backend=backend,
+ batch_invariant=True,
+ )
+
+ # Compare results
+ print("\n" + "=" * 80)
+ print("COMPARISON: Batch Invariance vs Baseline")
+ print("=" * 80)
+
+ init_overhead_pct = (
+ (batch_inv_results["init_time"] - baseline_results["init_time"])
+ / baseline_results["init_time"]
+ * 100
+ )
+ time_overhead_pct = (
+ (batch_inv_results["avg_time"] - baseline_results["avg_time"])
+ / baseline_results["avg_time"]
+ * 100
+ )
+ throughput_change_pct = (
+ (batch_inv_results["throughput"] - baseline_results["throughput"])
+ / baseline_results["throughput"]
+ * 100
+ )
+
+ print("\nInitialization Time:")
+ print(f" Baseline: {baseline_results['init_time']:.2f}s")
+ print(f" Batch Invariant: {batch_inv_results['init_time']:.2f}s")
+ print(f" Overhead: {init_overhead_pct:+.2f}%")
+
+ print("\nAverage Trial Time:")
+ print(f" Baseline: {baseline_results['avg_time']:.2f}s")
+ print(f" Batch Invariant: {batch_inv_results['avg_time']:.2f}s")
+ print(f" Overhead: {time_overhead_pct:+.2f}%")
+
+ print("\nThroughput (tokens/s):")
+ print(f" Baseline: {baseline_results['throughput']:.2f}")
+ print(f" Batch Invariant: {batch_inv_results['throughput']:.2f}")
+ print(f" Change: {throughput_change_pct:+.2f}%")
+
+ print("\nPrompts/s:")
+ print(f" Baseline: {baseline_results['prompts_per_sec']:.2f}")
+ print(f" Batch Invariant: {batch_inv_results['prompts_per_sec']:.2f}")
+
+ print("\n" + "=" * 80)
+ print("SUMMARY")
+ print("=" * 80)
+ if time_overhead_pct > 0:
+ print(
+ f"Batch invariance mode adds approximately {time_overhead_pct:.1f}% "
+ "overhead"
+ )
+ else:
+ print(
+ f"Batch invariance mode is approximately {-time_overhead_pct:.1f}% "
+ "faster (unexpected!)"
+ )
+
+ if abs(throughput_change_pct) < 1.0:
+ print("Throughput difference is negligible (< 1%)")
+ elif throughput_change_pct < 0:
+ print(
+ f"Throughput decreased by {-throughput_change_pct:.1f}% "
+ "with batch invariance"
+ )
+ else:
+ print(
+ f"Throughput increased by {throughput_change_pct:.1f}% "
+ "with batch invariance (unexpected!)"
+ )
+
+ print("=" * 80 + "\n")
+
+ return 0
+
+
+if __name__ == "__main__":
+ exit(main())
diff --git a/benchmarks/benchmark_prefix_caching.py b/benchmarks/benchmark_prefix_caching.py
index 146c268a6b7f2..28fc383a318dd 100644
--- a/benchmarks/benchmark_prefix_caching.py
+++ b/benchmarks/benchmark_prefix_caching.py
@@ -69,7 +69,7 @@ def sample_tokens(tokenizer: PreTrainedTokenizerBase, length: int) -> list[int]:
# Remove the special tokens.
return random.choices(
- [v for k, v in vocab.items() if k not in all_special_ids],
+ [v for v in vocab.values() if v not in all_special_ids],
k=length,
)
diff --git a/benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py b/benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py
index 904f805349148..d072c03c440b2 100644
--- a/benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py
+++ b/benchmarks/disagg_benchmarks/disagg_prefill_proxy_server.py
@@ -5,11 +5,12 @@ import argparse
import asyncio
import logging
import os
+import time
+import uuid
+from urllib.parse import urlparse
import aiohttp
from quart import Quart, Response, make_response, request
-from rate_limiter import RateLimiter
-from request_queue import RequestQueue
# Configure logging
logging.basicConfig(level=logging.INFO)
@@ -24,26 +25,8 @@ def parse_args():
parser.add_argument(
"--timeout",
type=float,
- default=300,
- help="Timeout for backend service requests in seconds (default: 300)",
- )
- parser.add_argument(
- "--max-concurrent",
- type=int,
- default=100,
- help="Maximum concurrent requests to backend services (default: 100)",
- )
- parser.add_argument(
- "--queue-size",
- type=int,
- default=500,
- help="Maximum number of requests in the queue (default: 500)",
- )
- parser.add_argument(
- "--rate-limit",
- type=int,
- default=40,
- help="Maximum requests per second (default: 40)",
+ default=6 * 60 * 60,
+ help="Timeout for backend service requests in seconds (default: 21600)",
)
parser.add_argument(
"--port",
@@ -54,14 +37,32 @@ def parse_args():
parser.add_argument(
"--prefill-url",
type=str,
- default="http://localhost:8100/v1/completions",
- help="Prefill service endpoint URL",
+ default="http://localhost:8100",
+ help="Prefill service base URL (protocol + host[:port])",
)
parser.add_argument(
"--decode-url",
type=str,
- default="http://localhost:8200/v1/completions",
- help="Decode service endpoint URL",
+ default="http://localhost:8200",
+ help="Decode service base URL (protocol + host[:port])",
+ )
+ parser.add_argument(
+ "--kv-host",
+ type=str,
+ default="localhost",
+ help="Hostname or IP used by KV transfer (default: localhost)",
+ )
+ parser.add_argument(
+ "--prefill-kv-port",
+ type=int,
+ default=14579,
+ help="Prefill KV port (default: 14579)",
+ )
+ parser.add_argument(
+ "--decode-kv-port",
+ type=int,
+ default=14580,
+ help="Decode KV port (default: 14580)",
)
return parser.parse_args()
@@ -73,70 +74,129 @@ def main():
# Initialize configuration using command line parameters
AIOHTTP_TIMEOUT = aiohttp.ClientTimeout(total=args.timeout)
- MAX_CONCURRENT_REQUESTS = args.max_concurrent
- REQUEST_QUEUE_SIZE = args.queue_size
- RATE_LIMIT = args.rate_limit
PREFILL_SERVICE_URL = args.prefill_url
DECODE_SERVICE_URL = args.decode_url
PORT = args.port
+ PREFILL_KV_ADDR = f"{args.kv_host}:{args.prefill_kv_port}"
+ DECODE_KV_ADDR = f"{args.kv_host}:{args.decode_kv_port}"
+
+ logger.info(
+ "Proxy resolved KV addresses -> prefill: %s, decode: %s",
+ PREFILL_KV_ADDR,
+ DECODE_KV_ADDR,
+ )
+
app = Quart(__name__)
- # Initialize the rate limiter and request queue
- rate_limiter = RateLimiter(RATE_LIMIT)
- request_queue = RequestQueue(MAX_CONCURRENT_REQUESTS, REQUEST_QUEUE_SIZE)
-
- # Attach the configuration object to the application instance
+ # Attach the configuration object to the application instance so helper
+ # coroutines can read the resolved backend URLs and timeouts without using
+ # globals.
app.config.update(
{
"AIOHTTP_TIMEOUT": AIOHTTP_TIMEOUT,
- "rate_limiter": rate_limiter,
- "request_queue": request_queue,
"PREFILL_SERVICE_URL": PREFILL_SERVICE_URL,
"DECODE_SERVICE_URL": DECODE_SERVICE_URL,
+ "PREFILL_KV_ADDR": PREFILL_KV_ADDR,
+ "DECODE_KV_ADDR": DECODE_KV_ADDR,
}
)
- # Start queue processing on app startup
- @app.before_serving
- async def startup():
- """Start request processing task when app starts serving"""
- asyncio.create_task(request_queue.process())
+ def _normalize_base_url(url: str) -> str:
+ """Remove any trailing slash so path joins behave predictably."""
+ return url.rstrip("/")
- async def forward_request(url, data):
- """Forward request to backend service with rate limiting and error handling"""
- headers = {"Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}"}
+ def _get_host_port(url: str) -> str:
+ """Return the hostname:port portion for logging and KV headers."""
+ parsed = urlparse(url)
+ host = parsed.hostname or "localhost"
+ port = parsed.port
+ if port is None:
+ port = 80 if parsed.scheme == "http" else 443
+ return f"{host}:{port}"
- # Use rate limiter as context manager
- async with (
- rate_limiter,
- aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session,
- ):
- try:
- async with session.post(
- url=url, json=data, headers=headers
- ) as response:
- if response.status == 200:
- # Stream response chunks
- async for chunk_bytes in response.content.iter_chunked(1024):
- yield chunk_bytes
- else:
- # Handle backend service errors
- error_text = await response.text()
- logger.error(
- "Backend service error: %s - %s",
- response.status,
- error_text,
- )
- yield b'{"error": "Backend service error"}'
- except aiohttp.ClientError as e:
- # Handle connection errors
- logger.error("Connection error to %s: %s", url, str(e))
- yield b'{"error": "Service unavailable"}'
- except asyncio.TimeoutError:
- # Handle timeout errors
- logger.error("Timeout connecting to %s", url)
- yield b'{"error": "Service timeout"}'
+ PREFILL_BASE = _normalize_base_url(PREFILL_SERVICE_URL)
+ DECODE_BASE = _normalize_base_url(DECODE_SERVICE_URL)
+ KV_TARGET = _get_host_port(DECODE_SERVICE_URL)
+
+ def _build_headers(request_id: str) -> dict[str, str]:
+ """Construct the headers expected by vLLM's P2P disagg connector."""
+ headers: dict[str, str] = {"X-Request-Id": request_id, "X-KV-Target": KV_TARGET}
+ api_key = os.environ.get("OPENAI_API_KEY")
+ if api_key:
+ headers["Authorization"] = f"Bearer {api_key}"
+ return headers
+
+ async def _run_prefill(
+ request_path: str,
+ payload: dict,
+ headers: dict[str, str],
+ request_id: str,
+ ):
+ url = f"{PREFILL_BASE}{request_path}"
+ start_ts = time.perf_counter()
+ logger.info("[prefill] start request_id=%s url=%s", request_id, url)
+ try:
+ async with (
+ aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session,
+ session.post(url=url, json=payload, headers=headers) as resp,
+ ):
+ if resp.status != 200:
+ error_text = await resp.text()
+ raise RuntimeError(
+ f"Prefill backend error {resp.status}: {error_text}"
+ )
+ await resp.read()
+ logger.info(
+ "[prefill] done request_id=%s status=%s elapsed=%.2fs",
+ request_id,
+ resp.status,
+ time.perf_counter() - start_ts,
+ )
+ except asyncio.TimeoutError as exc:
+ raise RuntimeError(f"Prefill service timeout at {url}") from exc
+ except aiohttp.ClientError as exc:
+ raise RuntimeError(f"Prefill service unavailable at {url}") from exc
+
+ async def _stream_decode(
+ request_path: str,
+ payload: dict,
+ headers: dict[str, str],
+ request_id: str,
+ ):
+ url = f"{DECODE_BASE}{request_path}"
+ # Stream tokens from the decode service once the prefill stage has
+ # materialized KV caches on the target workers.
+ logger.info("[decode] start request_id=%s url=%s", request_id, url)
+ try:
+ async with (
+ aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session,
+ session.post(url=url, json=payload, headers=headers) as resp,
+ ):
+ if resp.status != 200:
+ error_text = await resp.text()
+ logger.error(
+ "Decode backend error %s - %s", resp.status, error_text
+ )
+ err_msg = (
+ '{"error": "Decode backend error ' + str(resp.status) + '"}'
+ )
+ yield err_msg.encode()
+ return
+ logger.info(
+ "[decode] streaming response request_id=%s status=%s",
+ request_id,
+ resp.status,
+ )
+ async for chunk_bytes in resp.content.iter_chunked(1024):
+ yield chunk_bytes
+ logger.info("[decode] finished streaming request_id=%s", request_id)
+ except asyncio.TimeoutError:
+ logger.error("Decode service timeout at %s", url)
+ yield b'{"error": "Decode service timeout"}'
+ except aiohttp.ClientError as exc:
+ logger.error("Decode service error at %s: %s", url, exc)
+ yield b'{"error": "Decode service unavailable"}'
async def process_request():
"""Process a single request through prefill and decode stages"""
@@ -146,13 +206,27 @@ def main():
# Create prefill request (max_tokens=1)
prefill_request = original_request_data.copy()
prefill_request["max_tokens"] = 1
+ if "max_completion_tokens" in prefill_request:
+ prefill_request["max_completion_tokens"] = 1
# Execute prefill stage
- async for _ in forward_request(PREFILL_SERVICE_URL, prefill_request):
- continue
+ # The request id encodes both KV socket addresses so the backend can
+ # shuttle tensors directly via NCCL once the prefill response
+ # completes.
+ request_id = (
+ f"___prefill_addr_{PREFILL_KV_ADDR}___decode_addr_"
+ f"{DECODE_KV_ADDR}_{uuid.uuid4().hex}"
+ )
+
+ headers = _build_headers(request_id)
+ await _run_prefill(request.path, prefill_request, headers, request_id)
# Execute decode stage and stream response
- generator = forward_request(DECODE_SERVICE_URL, original_request_data)
+ # Pass the unmodified user request so the decode phase can continue
+ # sampling with the already-populated KV cache.
+ generator = _stream_decode(
+ request.path, original_request_data, headers, request_id
+ )
response = await make_response(generator)
response.timeout = None # Disable timeout for streaming response
return response
@@ -168,23 +242,10 @@ def main():
@app.route("/v1/completions", methods=["POST"])
async def handle_request():
"""Handle incoming API requests with concurrency and rate limiting"""
- # Create task for request processing
- task = asyncio.create_task(process_request())
-
- # Enqueue request or reject if queue is full
- if not await request_queue.enqueue(task):
- return Response(
- response=b'{"error": "Server busy, try again later"}',
- status=503,
- content_type="application/json",
- )
-
try:
- # Return the response from the processing task
- return await task
+ return await process_request()
except asyncio.CancelledError:
- # Handle task cancellation (timeout or queue full)
- logger.warning("Request cancelled due to timeout or queue full")
+ logger.warning("Request cancelled")
return Response(
response=b'{"error": "Request cancelled"}',
status=503,
diff --git a/benchmarks/kernels/bench_block_fp8_gemm.py b/benchmarks/kernels/bench_block_fp8_gemm.py
index f1e504499eaf6..11e3ac7f0c1fa 100644
--- a/benchmarks/kernels/bench_block_fp8_gemm.py
+++ b/benchmarks/kernels/bench_block_fp8_gemm.py
@@ -1,10 +1,18 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
+import os
+
+# Disable DeepGEMM for this benchmark to use CUTLASS
+os.environ["VLLM_USE_DEEP_GEMM"] = "0"
+
import torch
from vllm.model_executor.layers.quantization.utils.fp8_utils import (
- apply_w8a8_block_fp8_linear,
+ W8A8BlockFp8LinearOp,
+)
+from vllm.model_executor.layers.quantization.utils.quant_utils import (
+ GroupShape,
)
from vllm.model_executor.layers.quantization.utils.w8a8_utils import (
CUTLASS_BLOCK_FP8_SUPPORTED,
@@ -39,13 +47,14 @@ def build_w8a8_block_fp8_runner(M, N, K, block_size, device, use_cutlass):
fp8_info = torch.finfo(torch.float8_e4m3fn)
fp8_max, fp8_min = fp8_info.max, fp8_info.min
- # Create random FP8 tensors
+ # Create random input tensor (bfloat16, will be quantized by W8A8BlockFp8LinearOp)
A_ref = (torch.rand(M, K, dtype=torch.bfloat16, device=device) - 0.5) * 2 * fp8_max
+ # Create quantized weight tensor
B_ref = (torch.rand(N, K, dtype=torch.bfloat16, device=device) - 0.5) * 2 * fp8_max
B = B_ref.clamp(min=fp8_min, max=fp8_max).to(torch.float8_e4m3fn)
- # Create scales
+ # Create weight scales
block_n, block_k = block_size[0], block_size[1]
n_tiles = (N + block_n - 1) // block_n
k_tiles = (K + block_k - 1) // block_k
@@ -55,19 +64,25 @@ def build_w8a8_block_fp8_runner(M, N, K, block_size, device, use_cutlass):
* factor_for_scale
)
- # SM90 CUTLASS requires row-major format for scales
- if use_cutlass and current_platform.is_device_capability(90):
- Bs = Bs.T.contiguous()
+ # Create W8A8BlockFp8LinearOp instance
+ weight_group_shape = GroupShape(block_n, block_k)
+ act_quant_group_shape = GroupShape(1, block_k) # Per-token, per-group quantization
+
+ linear_op = W8A8BlockFp8LinearOp(
+ weight_group_shape=weight_group_shape,
+ act_quant_group_shape=act_quant_group_shape,
+ cutlass_block_fp8_supported=use_cutlass,
+ use_aiter_and_is_supported=False,
+ )
def run():
- if use_cutlass:
- return apply_w8a8_block_fp8_linear(
- A_ref, B, block_size, Bs, cutlass_block_fp8_supported=True
- )
- else:
- return apply_w8a8_block_fp8_linear(
- A_ref, B, block_size, Bs, cutlass_block_fp8_supported=False
- )
+ return linear_op.apply(
+ input=A_ref,
+ weight=B,
+ weight_scale=Bs,
+ input_scale=None,
+ bias=None,
+ )
return run
diff --git a/benchmarks/kernels/benchmark_cutlass_moe_fp8.py b/benchmarks/kernels/benchmark_cutlass_moe_fp8.py
index 027f67ad4db69..e07d6c776bc00 100644
--- a/benchmarks/kernels/benchmark_cutlass_moe_fp8.py
+++ b/benchmarks/kernels/benchmark_cutlass_moe_fp8.py
@@ -255,8 +255,8 @@ def bench_run(
torch.cuda.synchronize()
# Timing
- start_event = torch.cuda.Event(enable_timing=True)
- end_event = torch.cuda.Event(enable_timing=True)
+ start_event = torch.Event(enable_timing=True)
+ end_event = torch.Event(enable_timing=True)
latencies = []
for _ in range(num_iters):
diff --git a/benchmarks/kernels/benchmark_fused_collective.py b/benchmarks/kernels/benchmark_fused_collective.py
new file mode 100644
index 0000000000000..38e7fdcf55426
--- /dev/null
+++ b/benchmarks/kernels/benchmark_fused_collective.py
@@ -0,0 +1,1129 @@
+# SPDX-License-Identifier: Apache-2.0
+# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
+
+"""
+Benchmark for FlashInfer fused collective operations vs standard operations.
+
+This benchmark compares:
+1. FlashInfer's trtllm_allreduce_fusion (fused allreduce + rmsnorm + optional quant)
+2. Standard tensor_model_parallel_all_reduce + separate rmsnorm/quant operations
+
+Usage with torchrun:
+ torchrun --nproc_per_node=2 benchmark_fused_collective.py
+
+"""
+
+import argparse
+import itertools
+import os
+import time
+
+import pandas as pd
+import torch # type: ignore
+import torch.distributed as dist # type: ignore
+
+from vllm.config.vllm import CompilationConfig, VllmConfig, set_current_vllm_config
+from vllm.distributed import (
+ get_tp_group,
+ tensor_model_parallel_all_reduce,
+)
+from vllm.distributed.parallel_state import (
+ graph_capture,
+ init_distributed_environment,
+ initialize_model_parallel,
+)
+from vllm.logger import init_logger
+from vllm.model_executor.layers.layernorm import RMSNorm # noqa
+from vllm.model_executor.layers.quantization.input_quant_fp8 import QuantFP8 # noqa
+from vllm.model_executor.layers.quantization.utils.quant_utils import GroupShape # noqa
+from vllm.platforms import current_platform # noqa
+
+RMS_NORM_OP = torch.ops._C.rms_norm
+FUSED_ADD_RMS_NORM_OP = torch.ops._C.fused_add_rms_norm
+RMS_NORM_STATIC_FP8_QUANT_OP = torch.ops._C.rms_norm_static_fp8_quant
+FUSED_ADD_RMS_NORM_STATIC_FP8_QUANT_OP = (
+ torch.ops._C.fused_add_rms_norm_static_fp8_quant
+)
+SCALED_FP4_QUANT_OP = torch.ops._C.scaled_fp4_quant
+
+logger = init_logger(__name__)
+
+# Try to import FlashInfer
+try:
+ import flashinfer.comm as flashinfer_comm # type: ignore
+
+ if not hasattr(flashinfer_comm, "trtllm_allreduce_fusion"):
+ flashinfer_comm = None
+ logger.warning(
+ "FlashInfer comm module found but missing trtllm_allreduce_fusion"
+ )
+except ImportError:
+ flashinfer_comm = None
+ logger.warning("FlashInfer not found, only benchmarking standard operations")
+
+# Constants
+FP8_DTYPE = current_platform.fp8_dtype()
+MiB = 1024 * 1024
+
+# FlashInfer max sizes per world size
+# Enable 64MB for 2, 4, 8 world sizes to verify large input sizes
+# use --disable-oneshot to disable oneshot mode for very large input sizes
+_FI_MAX_SIZES = {
+ 2: 64 * MiB, # 64MB
+ 4: 64 * MiB, # 64MB
+ 8: 64 * MiB, # 64MB
+}
+
+# Global workspace tensor for FlashInfer
+_FI_WORKSPACE_TENSOR = None
+
+
+def setup_flashinfer_workspace(
+ world_size: int,
+ rank: int,
+ hidden_dim: int,
+ max_token_num: int,
+ use_fp32_lamport: bool = False,
+):
+ """Setup FlashInfer workspace for fused allreduce operations."""
+ global _FI_WORKSPACE_TENSOR
+
+ if flashinfer_comm is None:
+ return None, None
+
+ if world_size not in _FI_MAX_SIZES:
+ logger.warning("FlashInfer not supported for world size %s", world_size)
+ return None, None
+
+ try:
+ # Create IPC workspace
+ ipc_handles, workspace_tensor = (
+ flashinfer_comm.trtllm_create_ipc_workspace_for_all_reduce_fusion(
+ tp_rank=rank,
+ tp_size=world_size,
+ max_token_num=max_token_num,
+ hidden_dim=hidden_dim,
+ group=get_tp_group().device_group,
+ use_fp32_lamport=use_fp32_lamport,
+ )
+ )
+
+ _FI_WORKSPACE_TENSOR = workspace_tensor
+ return ipc_handles, workspace_tensor
+ except Exception as e:
+ logger.error("Failed to setup FlashInfer workspace: %s", e)
+ return None, None
+
+
+def cleanup_flashinfer_workspace(ipc_handles):
+ """Cleanup FlashInfer workspace."""
+ if flashinfer_comm is None or ipc_handles is None:
+ return
+
+ try:
+ group = get_tp_group().device_group
+ flashinfer_comm.trtllm_destroy_ipc_workspace_for_all_reduce(ipc_handles, group)
+ except Exception as e:
+ logger.error("Failed to cleanup FlashInfer workspace: %s", e)
+
+
+class FlashInferFusedAllReduceParams:
+ """Parameters for FlashInfer fused allreduce operations."""
+
+ def __init__(
+ self,
+ rank: int,
+ world_size: int,
+ use_fp32_lamport: bool = False,
+ max_token_num: int = 1024,
+ ):
+ self.rank = rank
+ self.world_size = world_size
+ self.use_fp32_lamport = use_fp32_lamport
+ self.trigger_completion_at_end = True
+ self.launch_with_pdl = True
+ self.fp32_acc = True
+ self.max_token_num = max_token_num
+
+ def get_trtllm_fused_allreduce_kwargs(self):
+ return {
+ "world_rank": self.rank,
+ "world_size": self.world_size,
+ "launch_with_pdl": self.launch_with_pdl,
+ "trigger_completion_at_end": self.trigger_completion_at_end,
+ "fp32_acc": self.fp32_acc,
+ }
+
+
+def flashinfer_fused_allreduce_rmsnorm(
+ input_tensor: torch.Tensor,
+ residual: torch.Tensor | None,
+ rms_gamma: torch.Tensor,
+ rms_eps: float,
+ allreduce_params: "FlashInferFusedAllReduceParams",
+ use_oneshot: bool,
+ norm_out: torch.Tensor | None = None,
+):
+ """FlashInfer fused allreduce + rmsnorm operation."""
+ if flashinfer_comm is None or _FI_WORKSPACE_TENSOR is None:
+ raise RuntimeError("FlashInfer not available or workspace not initialized")
+
+ if norm_out is None:
+ norm_out = input_tensor
+ residual_out = residual
+ else:
+ residual_out = input_tensor
+
+ flashinfer_comm.trtllm_allreduce_fusion(
+ allreduce_in=input_tensor,
+ token_num=input_tensor.shape[0],
+ residual_in=residual,
+ residual_out=residual_out,
+ norm_out=norm_out,
+ rms_gamma=rms_gamma,
+ rms_eps=rms_eps,
+ hidden_dim=input_tensor.shape[-1],
+ workspace_ptrs=_FI_WORKSPACE_TENSOR,
+ pattern_code=flashinfer_comm.AllReduceFusionPattern.kARResidualRMSNorm,
+ allreduce_out=None,
+ quant_out=None,
+ scale_out=None,
+ layout_code=flashinfer_comm.QuantizationSFLayout.SWIZZLED_128x4,
+ scale_factor=None,
+ use_oneshot=use_oneshot,
+ **allreduce_params.get_trtllm_fused_allreduce_kwargs(),
+ )
+
+
+def flashinfer_fused_allreduce_rmsnorm_fp8_quant(
+ input_tensor: torch.Tensor,
+ residual: torch.Tensor | None,
+ rms_gamma: torch.Tensor,
+ rms_eps: float,
+ scale_factor: torch.Tensor,
+ allreduce_params: FlashInferFusedAllReduceParams,
+ use_oneshot: bool = True,
+ norm_out: torch.Tensor | None = None,
+ quant_out: torch.Tensor | None = None,
+):
+ """FlashInfer fused allreduce + rmsnorm + FP8 quantization."""
+ if flashinfer_comm is None or _FI_WORKSPACE_TENSOR is None:
+ raise RuntimeError("FlashInfer not available or workspace not initialized")
+
+ if norm_out is None:
+ norm_out = input_tensor
+ residual_out = residual
+ else:
+ residual_out = input_tensor
+
+ flashinfer_comm.trtllm_allreduce_fusion(
+ allreduce_in=input_tensor,
+ token_num=input_tensor.shape[0],
+ residual_in=residual,
+ residual_out=residual_out,
+ norm_out=norm_out,
+ rms_gamma=rms_gamma,
+ rms_eps=rms_eps,
+ hidden_dim=input_tensor.shape[-1],
+ workspace_ptrs=_FI_WORKSPACE_TENSOR,
+ pattern_code=flashinfer_comm.AllReduceFusionPattern.kARResidualRMSNormFP8Quant,
+ allreduce_out=None,
+ quant_out=quant_out,
+ scale_out=None,
+ layout_code=flashinfer_comm.QuantizationSFLayout.SWIZZLED_128x4,
+ scale_factor=scale_factor,
+ use_oneshot=use_oneshot,
+ **allreduce_params.get_trtllm_fused_allreduce_kwargs(),
+ )
+
+
+def flashinfer_fused_allreduce_rmsnorm_fp4_quant(
+ input_tensor: torch.Tensor,
+ residual: torch.Tensor | None,
+ rms_gamma: torch.Tensor,
+ rms_eps: float,
+ input_global_scale: torch.Tensor,
+ allreduce_params: FlashInferFusedAllReduceParams,
+ quant_out: torch.Tensor,
+ use_oneshot: bool,
+ output_scale: torch.Tensor,
+ norm_out: torch.Tensor | None = None,
+):
+ """FlashInfer fused allreduce + rmsnorm + FP4 quantization."""
+ if flashinfer_comm is None or _FI_WORKSPACE_TENSOR is None:
+ raise RuntimeError("FlashInfer not available or workspace not initialized")
+
+ if norm_out is None:
+ norm_out = input_tensor
+ residual_out = residual
+ else:
+ residual_out = input_tensor
+
+ flashinfer_comm.trtllm_allreduce_fusion(
+ allreduce_in=input_tensor,
+ token_num=input_tensor.shape[0],
+ residual_in=residual,
+ residual_out=residual_out,
+ norm_out=norm_out,
+ rms_gamma=rms_gamma,
+ rms_eps=rms_eps,
+ hidden_dim=input_tensor.shape[-1],
+ workspace_ptrs=_FI_WORKSPACE_TENSOR,
+ pattern_code=flashinfer_comm.AllReduceFusionPattern.kARResidualRMSNormFP4Quant,
+ allreduce_out=None,
+ quant_out=quant_out,
+ scale_out=output_scale,
+ layout_code=flashinfer_comm.QuantizationSFLayout.SWIZZLED_128x4,
+ scale_factor=input_global_scale,
+ use_oneshot=use_oneshot,
+ **allreduce_params.get_trtllm_fused_allreduce_kwargs(),
+ )
+
+
+class VllmFusedAllreduce:
+ def __init__(self, hidden_dim, dtype):
+ self.rms_eps = 1e-6
+ self.rms_norm = RMSNorm(hidden_dim, eps=self.rms_eps, dtype=dtype)
+ self.fp8_quant = QuantFP8(
+ static=True,
+ group_shape=GroupShape.PER_TENSOR,
+ )
+
+ def allreduce_rmsnorm(
+ self, input_tensor: torch.Tensor, residual: torch.Tensor | None
+ ):
+ allreduce_out = tensor_model_parallel_all_reduce(input_tensor)
+ return self.rms_norm(allreduce_out, residual)
+
+ def allreduce_rmsnorm_fp8_quant(
+ self,
+ input_tensor: torch.Tensor,
+ residual: torch.Tensor | None,
+ scale_factor: torch.Tensor,
+ ):
+ allreduce_out = tensor_model_parallel_all_reduce(input_tensor)
+ rms_out = self.rms_norm(allreduce_out, residual)
+ if residual is None:
+ quant_out = self.fp8_quant(rms_out, scale_factor)
+ return quant_out
+ else:
+ rms_out, residual_out = rms_out
+ quant_out = self.fp8_quant(rms_out, scale_factor)
+ return quant_out, residual_out
+
+ def allreduce_rmsnorm_fp4_quant(
+ self,
+ input_tensor: torch.Tensor,
+ residual: torch.Tensor | None,
+ input_global_scale: torch.Tensor,
+ quant_out: torch.Tensor,
+ output_scale: torch.Tensor,
+ ):
+ allreduce_out = tensor_model_parallel_all_reduce(input_tensor)
+ rms_out = self.rms_norm(allreduce_out, residual)
+ if residual is None:
+ SCALED_FP4_QUANT_OP(quant_out, rms_out, output_scale, input_global_scale)
+ return quant_out, output_scale
+ else:
+ rms_out, residual_out = rms_out
+ SCALED_FP4_QUANT_OP(quant_out, rms_out, output_scale, input_global_scale)
+ return quant_out, residual_out, output_scale
+
+
+def create_test_tensors(
+ num_tokens: int, hidden_dim: int, dtype: torch.dtype, use_residual: bool = True
+):
+ """Create test tensors for benchmarking."""
+ input_tensor = torch.randn(num_tokens, hidden_dim, dtype=dtype)
+ residual = (
+ torch.randn_like(input_tensor)
+ if use_residual
+ else torch.zeros_like(input_tensor)
+ )
+ rms_gamma = torch.ones(hidden_dim, dtype=dtype)
+ norm_out = None if use_residual else torch.empty_like(input_tensor)
+
+ # Quantization scales
+ scale_fp8 = torch.tensor(1.0, dtype=torch.float32)
+ scale_fp4 = torch.tensor(1.0, dtype=torch.float32)
+ quant_out_fp8 = torch.empty_like(input_tensor, dtype=FP8_DTYPE)
+ # Pre-allocate FP4 output tensors (to avoid allocation overhead in benchmarks)
+ fp4_quant_out = torch.empty((num_tokens, hidden_dim // 2), dtype=torch.uint8)
+ fp4_output_scale = torch.empty((128, 4), dtype=torch.int32)
+
+ return (
+ input_tensor,
+ norm_out,
+ residual,
+ rms_gamma,
+ scale_fp8,
+ quant_out_fp8,
+ scale_fp4,
+ fp4_quant_out,
+ fp4_output_scale,
+ )
+
+
+def benchmark_operation(
+ operation_func, *args, warmup: int = 5, trials: int = 20, **kwargs
+):
+ """Benchmark a single operation using CUDA graphs."""
+ # Warmup before graph capture
+ for _ in range(warmup):
+ operation_func(*args, **kwargs)
+ torch.cuda.synchronize()
+
+ # Create CUDA graph
+ graph = torch.cuda.CUDAGraph()
+ num_op_per_cudagraph = 10
+
+ # Use vLLM's graph_capture to make tensor_model_parallel_all_reduce graph-safe
+ device = torch.device(f"cuda:{torch.cuda.current_device()}")
+ with graph_capture(device=device), torch.cuda.graph(graph):
+ for _ in range(num_op_per_cudagraph):
+ operation_func(*args, **kwargs)
+
+ # Graph warmup
+ torch.cuda.synchronize()
+ for _ in range(warmup):
+ graph.replay()
+
+ # Benchmark with CUDA graph
+ torch.cuda.synchronize()
+ start_time = time.perf_counter()
+
+ for _ in range(trials // num_op_per_cudagraph):
+ # operation_func(*args, **kwargs)
+ graph.replay()
+
+ torch.cuda.synchronize()
+ end_time = time.perf_counter()
+
+ avg_time_ms = ((end_time - start_time) / trials) * 1000
+ return avg_time_ms
+
+
+def run_benchmarks(
+ num_tokens: int,
+ hidden_dim: int,
+ dtype: torch.dtype,
+ use_residual: bool,
+ allreduce_params: FlashInferFusedAllReduceParams | None,
+ quant_modes: set[str],
+ no_oneshot: bool,
+):
+ """Run all benchmarks for given configuration.
+
+ Args:
+ quant_mode: "none", "fp8_only", "fp4_only", or "all"
+ """
+ (
+ input_tensor,
+ norm_out,
+ residual,
+ rms_gamma,
+ scale_fp8,
+ quant_out_fp8,
+ scale_fp4,
+ fp4_quant_out,
+ fp4_output_scale,
+ ) = create_test_tensors(num_tokens, hidden_dim, dtype, use_residual)
+
+ rms_eps = 1e-6
+ results = {}
+ vllm_fused_allreduce = VllmFusedAllreduce(hidden_dim, dtype)
+ use_oneshot_options = [False] if no_oneshot else [True, False]
+
+ # Create RMSNorm and QuantFP8 layers once for native benchmarks
+
+ if "none" in quant_modes:
+ # Standard AllReduce + RMSNorm
+ for custom_op in ["-rms_norm", "+rms_norm"]:
+ with set_current_vllm_config(
+ VllmConfig(compilation_config=CompilationConfig(custom_ops=[custom_op]))
+ ):
+ try:
+ suffix = (
+ "_custom_rms_norm" if "+" in custom_op else "_native_rms_norm"
+ )
+ time_ms = benchmark_operation(
+ vllm_fused_allreduce.allreduce_rmsnorm,
+ input_tensor,
+ residual=residual,
+ )
+ results[f"standard_allreduce_{suffix}"] = time_ms
+ except Exception as e:
+ logger.error("Standard AllReduce+RMSNorm failed: %s", e)
+ results[f"standard_allreduce_{suffix}"] = float("inf")
+
+ # Standard AllReduce + RMSNorm Native Compiled
+ with set_current_vllm_config(
+ VllmConfig(compilation_config=CompilationConfig(custom_ops=["-rms_norm"]))
+ ):
+ try:
+ standard_allreduce_rmsnorm_native_compiled = torch.compile(
+ vllm_fused_allreduce.allreduce_rmsnorm,
+ fullgraph=True,
+ dynamic=False,
+ )
+ time_ms = benchmark_operation(
+ standard_allreduce_rmsnorm_native_compiled,
+ input_tensor,
+ residual=residual,
+ )
+ results["standard_allreduce_rmsnorm_native_compiled"] = time_ms
+ except Exception as e:
+ logger.error("Standard AllReduce+RMSNorm Native Compiled failed: %s", e)
+ results["standard_allreduce_rmsnorm_native_compiled"] = float("inf")
+
+ # FlashInfer Fused AllReduce + RMSNorm Oneshot/Twoshot
+ if flashinfer_comm is not None and allreduce_params is not None:
+ for use_oneshot in use_oneshot_options:
+ suffix = "_oneshot" if use_oneshot else "_twoshot"
+ try:
+ time_ms = benchmark_operation(
+ flashinfer_fused_allreduce_rmsnorm,
+ input_tensor,
+ residual=residual,
+ norm_out=norm_out,
+ rms_gamma=rms_gamma,
+ rms_eps=rms_eps,
+ allreduce_params=allreduce_params,
+ use_oneshot=use_oneshot,
+ )
+ results[f"flashinfer_fused_allreduce_rmsnorm{suffix}"] = time_ms
+ except Exception as e:
+ logger.error("FlashInfer Fused AllReduce+RMSNorm failed: %s", e)
+ results[f"flashinfer_fused_allreduce_rmsnorm{suffix}"] = float(
+ "inf"
+ )
+
+ if "fp8" in quant_modes:
+ # Standard AllReduce + RMSNorm + FP8 Quant
+ for rms_norm_custom_op in ["-rms_norm", "+rms_norm"]:
+ suffix = (
+ "_custom_rms_norm" if "+" in rms_norm_custom_op else "_native_rms_norm"
+ )
+ for quant_fp8_custom_op in ["-quant_fp8", "+quant_fp8"]:
+ suffix += (
+ "_custom_quant_fp8"
+ if "+" in quant_fp8_custom_op
+ else "_native_quant_fp8"
+ )
+ with set_current_vllm_config(
+ VllmConfig(
+ compilation_config=CompilationConfig(
+ custom_ops=[rms_norm_custom_op, quant_fp8_custom_op]
+ )
+ )
+ ):
+ try:
+ time_ms = benchmark_operation(
+ vllm_fused_allreduce.allreduce_rmsnorm_fp8_quant,
+ input_tensor,
+ residual=residual,
+ scale_factor=scale_fp8,
+ )
+ results[f"standard_allreduce{suffix}"] = time_ms
+ except Exception as e:
+ logger.error("Standard AllReduce+RMSNorm+FP8 failed: %s", e)
+ results[f"standard_allreduce{suffix}"] = float("inf")
+
+ # Standard AllReduce + RMSNorm + FP8 Quant Native Compiled
+ with set_current_vllm_config(
+ VllmConfig(
+ compilation_config=CompilationConfig(
+ custom_ops=["-rms_norm", "-quant_fp8"]
+ )
+ )
+ ):
+ try:
+ standard_allreduce_rmsnorm_fp8_quant_native_compiled = torch.compile(
+ vllm_fused_allreduce.allreduce_rmsnorm_fp8_quant,
+ fullgraph=True,
+ dynamic=False,
+ )
+ time_ms = benchmark_operation(
+ standard_allreduce_rmsnorm_fp8_quant_native_compiled,
+ input_tensor,
+ residual=residual,
+ scale_factor=scale_fp8,
+ )
+ results["standard_allreduce_rmsnorm_fp8_quant_native_compiled"] = (
+ time_ms
+ )
+ except Exception as e:
+ logger.error(
+ "Standard AllReduce+RMSNorm+FP8 Native Compiled failed: %s", e
+ )
+ results["standard_allreduce_rmsnorm_fp8_quant_native_compiled"] = float(
+ "inf"
+ )
+
+ # FlashInfer Fused AllReduce + RMSNorm + FP8 Quant Oneshot
+ if flashinfer_comm is not None and allreduce_params is not None:
+ for use_oneshot in use_oneshot_options:
+ suffix = "_oneshot" if use_oneshot else "_twoshot"
+ try:
+ time_ms = benchmark_operation(
+ flashinfer_fused_allreduce_rmsnorm_fp8_quant,
+ input_tensor,
+ norm_out=norm_out,
+ residual=residual,
+ rms_gamma=rms_gamma,
+ rms_eps=rms_eps,
+ scale_factor=scale_fp8,
+ quant_out=quant_out_fp8,
+ allreduce_params=allreduce_params,
+ use_oneshot=use_oneshot,
+ )
+ results[f"flashinfer_fused_allreduce_rmsnorm_fp8_quant{suffix}"] = (
+ time_ms
+ )
+ except Exception as e:
+ logger.error(
+ "FlashInfer Fused AllReduce+RMSNorm+FP8 Oneshot failed: %s",
+ e,
+ )
+ results[f"flashinfer_fused_allreduce_rmsnorm_fp8_quant{suffix}"] = (
+ float("inf")
+ )
+
+ if "fp4" in quant_modes and current_platform.has_device_capability(100):
+ # Standard AllReduce + RMSNorm + FP4 Quant
+ for rms_norm_custom_op in ["-rms_norm", "+rms_norm"]:
+ suffix = (
+ "_custom_rms_norm" if "+" in rms_norm_custom_op else "_native_rms_norm"
+ )
+ with set_current_vllm_config(
+ VllmConfig(
+ compilation_config=CompilationConfig(
+ custom_ops=[rms_norm_custom_op]
+ )
+ )
+ ):
+ try:
+ time_ms = benchmark_operation(
+ vllm_fused_allreduce.allreduce_rmsnorm_fp4_quant,
+ input_tensor,
+ residual=residual,
+ input_global_scale=scale_fp4,
+ quant_out=fp4_quant_out,
+ output_scale=fp4_output_scale,
+ )
+ results[f"standard_allreduce_{suffix}_fp4_quant"] = time_ms
+ except Exception as e:
+ logger.error("Standard AllReduce+RMSNorm+FP4 failed: %s", e)
+ results[f"standard_allreduce_{suffix}_fp4_quant"] = float("inf")
+
+ # Standard AllReduce + RMSNorm + FP4 Quant Native Compiled
+ with set_current_vllm_config(
+ VllmConfig(compilation_config=CompilationConfig(custom_ops=["-rms_norm"]))
+ ):
+ try:
+ standard_allreduce_rmsnorm_fp4_quant_native_compiled = torch.compile(
+ vllm_fused_allreduce.allreduce_rmsnorm_fp4_quant,
+ fullgraph=True,
+ dynamic=False,
+ )
+ time_ms = benchmark_operation(
+ standard_allreduce_rmsnorm_fp4_quant_native_compiled,
+ input_tensor,
+ residual=residual,
+ quant_out=fp4_quant_out,
+ input_global_scale=scale_fp4,
+ output_scale=fp4_output_scale,
+ )
+ results["standard_allreduce_rmsnorm_fp4_quant_native_compiled"] = (
+ time_ms
+ )
+ except Exception as e:
+ logger.error(
+ "Standard AllReduce+RMSNorm+FP4 Native Compiled failed: %s", e
+ )
+ results["standard_allreduce_rmsnorm_fp4_quant_native_compiled"] = float(
+ "inf"
+ )
+
+ # FlashInfer Fused AllReduce + RMSNorm + FP4 Quant Oneshot
+ if flashinfer_comm is not None and allreduce_params is not None:
+ for use_oneshot in use_oneshot_options:
+ suffix = "_oneshot" if use_oneshot else "_twoshot"
+ try:
+ time_ms = benchmark_operation(
+ flashinfer_fused_allreduce_rmsnorm_fp4_quant,
+ input_tensor,
+ residual=residual,
+ norm_out=norm_out,
+ rms_gamma=rms_gamma,
+ rms_eps=rms_eps,
+ input_global_scale=scale_fp4,
+ allreduce_params=allreduce_params,
+ quant_out=fp4_quant_out,
+ output_scale=fp4_output_scale,
+ use_oneshot=use_oneshot,
+ )
+ results[f"flashinfer_fused_allreduce_rmsnorm_fp4_quant{suffix}"] = (
+ time_ms
+ )
+ except Exception as e:
+ logger.error(
+ "FlashInfer Fused AllReduce+RMSNorm+FP4 Oneshot failed: %s",
+ e,
+ )
+ results[f"flashinfer_fused_allreduce_rmsnorm_fp4_quant{suffix}"] = (
+ float("inf")
+ )
+
+ # FlashInfer Fused AllReduce + RMSNorm + FP4 Quant Two-shot
+ if flashinfer_comm is not None and allreduce_params is not None:
+ try:
+ time_ms = benchmark_operation(
+ flashinfer_fused_allreduce_rmsnorm_fp4_quant,
+ input_tensor,
+ residual=residual,
+ norm_out=norm_out,
+ rms_gamma=rms_gamma,
+ rms_eps=rms_eps,
+ input_global_scale=scale_fp4,
+ allreduce_params=allreduce_params,
+ quant_out=fp4_quant_out,
+ output_scale=fp4_output_scale,
+ use_oneshot=False,
+ )
+ results["flashinfer_fused_allreduce_rmsnorm_fp4_quant_twoshot"] = (
+ time_ms
+ )
+ except Exception as e:
+ logger.error(
+ "FlashInfer Fused AllReduce+RMSNorm+FP4 Two-shot failed: %s",
+ e,
+ )
+ results["flashinfer_fused_allreduce_rmsnorm_fp4_quant_twoshot"] = float(
+ "inf"
+ )
+
+ return results
+
+
+def prepare_results_with_speedups(results_dict):
+ """Prepare results with speedup calculations based on dynamic baseline selection."""
+ prepared_results = []
+
+ # Determine the fastest baseline for each operation type
+ def get_fastest_baseline(op_name, results_dict):
+ """Get the fastest baseline between standard and native_compiled versions."""
+ if "fp8_quant" in op_name:
+ candidates = [
+ "standard_allreduce_rmsnorm_fp8_quant",
+ "standard_allreduce_rmsnorm_fp8_quant_native_compiled",
+ ]
+ elif "fp4_quant" in op_name:
+ candidates = [
+ "standard_allreduce_rmsnorm_fp4_quant",
+ "standard_allreduce_rmsnorm_fp4_quant_native_compiled",
+ ]
+ else:
+ candidates = [
+ "standard_allreduce_rmsnorm",
+ "standard_allreduce_rmsnorm_native_compiled",
+ ]
+
+ # Find the fastest among available candidates
+ fastest_time = float("inf")
+ fastest_baseline = None
+
+ for candidate in candidates:
+ if (
+ candidate in results_dict
+ and results_dict[candidate] != float("inf")
+ and results_dict[candidate] < fastest_time
+ ):
+ fastest_time = results_dict[candidate]
+ fastest_baseline = candidate
+
+ return fastest_baseline
+
+ # Create dynamic baseline mapping
+ dynamic_baseline_mapping = {}
+ for op_name in results_dict:
+ if (
+ op_name.startswith("flashinfer_")
+ or op_name.startswith("standard_")
+ and not op_name.endswith("_native_compiled")
+ ):
+ dynamic_baseline_mapping[op_name] = get_fastest_baseline(
+ op_name, results_dict
+ )
+
+ for op_name, time_ms in results_dict.items():
+ if time_ms == float("inf"):
+ speedup_str = "FAILED"
+ time_str = "FAILED"
+ else:
+ time_str = f"{time_ms:.3f}"
+ # Find the appropriate baseline for this operation
+ baseline_op = dynamic_baseline_mapping.get(op_name)
+ if baseline_op and baseline_op in results_dict:
+ baseline_time = results_dict[baseline_op]
+ if baseline_time != float("inf") and baseline_time > 0:
+ speedup = baseline_time / time_ms
+ speedup_str = f"{speedup:.2f}x"
+ else:
+ speedup_str = "N/A"
+ else:
+ # For baseline operations, determine if this is the fastest baseline
+ if op_name.endswith("_native_compiled") or (
+ op_name.startswith("standard_")
+ and not op_name.endswith("_native_compiled")
+ ):
+ fastest_baseline = get_fastest_baseline(op_name, results_dict)
+ if fastest_baseline == op_name:
+ speedup_str = "baseline"
+ else:
+ if fastest_baseline and fastest_baseline in results_dict:
+ baseline_time = results_dict[fastest_baseline]
+ if baseline_time != float("inf") and baseline_time > 0:
+ speedup = baseline_time / time_ms
+ speedup_str = f"{speedup:.2f}x"
+ else:
+ speedup_str = "N/A"
+ else:
+ speedup_str = "N/A"
+ else:
+ speedup_str = "N/A"
+
+ prepared_results.append(
+ {
+ "operation": op_name,
+ "time_ms": time_ms,
+ "time_str": time_str,
+ "speedup_str": speedup_str,
+ }
+ )
+
+ return prepared_results
+
+
+def print_results(
+ results_dict,
+ num_tokens,
+ hidden_dim,
+ dtype,
+ use_residual,
+ quant_modes,
+ input_size_mb,
+):
+ """Print benchmark results in a formatted table."""
+ print(f"\n{'=' * 80}")
+ print(
+ f"Results: num_tokens={num_tokens}, hidden_dim={hidden_dim} "
+ f"(input size: {input_size_mb:.2f} MB)"
+ )
+ print(
+ f"dtype={dtype}, residual={'yes' if use_residual else 'no'}, "
+ f"quant_modes={','.join(sorted(list(quant_modes)))}"
+ )
+ print(f"{'=' * 80}")
+ print(f"{'Operation':<50} {'Time (ms)':<12} {'Speedup':<10}")
+ print(f"{'-' * 80}")
+
+ # Prepare results with speedup calculations
+ prepared_results = prepare_results_with_speedups(results_dict)
+
+ for result in prepared_results:
+ if result["time_ms"] == float("inf"):
+ time_display = result["time_str"]
+ else:
+ time_display = f"{result['time_ms']:.3f}"
+
+ print(
+ f"{result['operation']:<50} {time_display:<12} {result['speedup_str']:<10}"
+ )
+
+
+def format_results_markdown(
+ all_results: list[dict], world_size: int, args: argparse.Namespace
+) -> str:
+ """Format all benchmark results as markdown."""
+ lines: list[str] = []
+ lines.append("# FlashInfer Fused Collective Operations Benchmark Results")
+ lines.append("")
+ lines.append(f"**World Size:** {world_size} ")
+ lines.append(f"**Hidden Dimension:** {args.hidden_dim} ")
+ lines.append(f"**Warmup Iterations:** {args.warmup} ")
+ lines.append(f"**Benchmark Trials:** {args.trials} ")
+ modes = ",".join(all_results[0]["quant_modes"]) if all_results else "N/A"
+ lines.append(f"**Quantization Modes:** {modes} ")
+ lines.append("")
+ lines.append("---")
+ lines.append("")
+
+ for entry in all_results:
+ num_tokens = entry["num_tokens"]
+ dtype = entry["dtype"]
+ use_residual = entry["use_residual"]
+ results_dict = entry["results"]
+ input_size_mb = entry["input_size_mb"]
+ residual_str = "with residual" if use_residual else "no residual"
+
+ lines.append(
+ f"## Configuration: num_tokens={num_tokens}, dtype={dtype}, {residual_str}"
+ )
+ lines.append(f"**Input Size:** {input_size_mb:.2f} MB")
+ lines.append("")
+
+ prepared = prepare_results_with_speedups(results_dict)
+ # Build DataFrame for markdown export
+ rows = [
+ {
+ "Operation": r["operation"].replace("_", " ").title(),
+ "Time (ms)": r["time_str"],
+ "Speedup": r["speedup_str"],
+ }
+ for r in prepared
+ ]
+ df = pd.DataFrame(rows)
+ if df.empty:
+ lines.append("No results.")
+ else:
+ lines.append(df.to_markdown(index=False))
+ lines.append("")
+
+ return "\n".join(lines)
+
+
+def save_results_to_file(
+ all_results: list[dict], world_size: int, args: argparse.Namespace, rank: int
+):
+ """Save benchmark results to markdown file (only on rank 0)."""
+ if rank != 0:
+ return
+
+ if not all_results:
+ logger.warning("No results to save")
+ return
+
+ output_path = args.output_file
+
+ try:
+ markdown_content = format_results_markdown(all_results, world_size, args)
+
+ with open(output_path, "a") as f:
+ f.write(markdown_content)
+
+ except Exception as e:
+ logger.error("Failed to save results to file: %s", e)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Benchmark fused collective operations"
+ )
+ parser.add_argument(
+ "--num-tokens",
+ type=int,
+ nargs="+",
+ default=[128, 512, 1024, 2048],
+ help="Numbers of tokens to test",
+ )
+ parser.add_argument(
+ "--hidden-dim", type=int, default=8192, help="Hidden dimension size"
+ )
+ parser.add_argument(
+ "--dtypes",
+ type=str,
+ nargs="+",
+ default=["bfloat16"],
+ choices=["float16", "bfloat16", "float32"],
+ help="Data types to test",
+ )
+ parser.add_argument(
+ "--no-residual",
+ action="store_true",
+ help="Skip residual connection tests",
+ )
+
+ parser.add_argument(
+ "--quant-modes",
+ type=str,
+ default="none,fp8,fp4",
+ help=(
+ "Comma-separated quantization modes to run: none, fp8, fp4. "
+ "Default: none,fp8,fp4"
+ ),
+ )
+
+ parser.add_argument(
+ "--warmup", type=int, default=5, help="Number of warmup iterations"
+ )
+ parser.add_argument(
+ "--trials", type=int, default=20, help="Number of benchmark trials"
+ )
+ parser.add_argument(
+ "--output-file",
+ type=str,
+ help="""Output file path for markdown results
+ (default: benchmark_results_.md)
+ """,
+ )
+
+ parser.add_argument(
+ "--no-oneshot",
+ action="store_true",
+ help="Skip oneshot benchmarks",
+ )
+
+ args = parser.parse_args()
+
+ # Check if running with torchrun (required for collective operations)
+ if "RANK" not in os.environ or "WORLD_SIZE" not in os.environ:
+ raise RuntimeError(
+ "Must run with torchrun for distributed benchmarking. "
+ "Example: torchrun --nproc_per_node=2 benchmark_fused_collective.py"
+ )
+
+ # Initialize distributed environment
+ rank = int(os.environ["RANK"])
+ world_size = int(os.environ["WORLD_SIZE"])
+
+ device = torch.device(f"cuda:{rank}")
+ torch.cuda.set_device(device)
+ torch.set_default_device(device)
+
+ init_distributed_environment()
+ initialize_model_parallel(tensor_model_parallel_size=world_size)
+
+ # Validate world size (must be > 1 for collective operations)
+ if world_size <= 1:
+ raise ValueError(
+ "World size must be > 1 for collective operations benchmarking. "
+ f"Current world size: {world_size}. Use torchrun with --nproc_per_node > 1."
+ )
+
+ # Parse quantization modes
+ valid_quant_modes = {"none", "fp8", "fp4"}
+ raw_modes = [
+ m.strip().lower() for m in (args.quant_modes or "").split(",") if m.strip()
+ ]
+ quant_modes = set(raw_modes) if raw_modes else {"none", "fp8", "fp4"}
+ invalid = sorted(list(quant_modes - valid_quant_modes))
+ if invalid:
+ raise ValueError(
+ f"Invalid --quant-modes entries: {','.join(invalid)}. "
+ f"Valid options are: {','.join(sorted(valid_quant_modes))}."
+ )
+
+ if rank == 0:
+ logger.info("Running benchmark with world_size=%s, rank=%s", world_size, rank)
+ logger.info("Quantization modes: %s", ",".join(sorted(list(quant_modes))))
+ if flashinfer_comm is not None:
+ logger.info(
+ "FlashInfer available - will benchmark fused operations",
+ )
+ else:
+ logger.info(
+ "FlashInfer not available - only benchmarking standard operations"
+ )
+
+ # Convert dtype strings to torch dtypes
+ dtype_map = {
+ "float16": torch.float16,
+ "bfloat16": torch.bfloat16,
+ "float32": torch.float32,
+ }
+ dtypes = [dtype_map[dt] for dt in args.dtypes]
+
+ # Test configurations
+ residual_options = [True] if not args.no_residual else [False]
+
+ configs = list(itertools.product(args.num_tokens, dtypes, residual_options))
+
+ # Setup FlashInfer workspace if available
+ ipc_handles = None
+ allreduce_params = None
+
+ if flashinfer_comm is not None:
+ # Use the largest hidden dimension for workspace setup
+ max_num_token = _FI_MAX_SIZES.get(world_size) // (
+ args.hidden_dim * world_size * 2
+ )
+
+ ipc_handles, workspace_tensor = setup_flashinfer_workspace(
+ world_size, rank, args.hidden_dim, max_num_token
+ )
+
+ if workspace_tensor is not None:
+ allreduce_params = FlashInferFusedAllReduceParams(
+ rank=rank,
+ world_size=world_size,
+ max_token_num=max_num_token,
+ )
+
+ # Collect all results for markdown export
+ all_results = []
+
+ try:
+ # Run benchmarks
+ for num_tokens, dtype, use_residual in configs:
+ if rank == 0:
+ logger.info(
+ "\nTesting: num_tokens=%s, hidden_dim=%s, dtype=%s, residual=%s",
+ num_tokens,
+ args.hidden_dim,
+ dtype,
+ use_residual,
+ )
+
+ results = run_benchmarks(
+ num_tokens,
+ args.hidden_dim,
+ dtype,
+ use_residual,
+ allreduce_params,
+ quant_modes=quant_modes,
+ no_oneshot=args.no_oneshot,
+ )
+
+ # Store results for markdown export
+ if rank == 0:
+ # Calculate input size in MB
+ input_size_mb = (
+ num_tokens * args.hidden_dim * torch.finfo(dtype).bits
+ ) / (8 * 1024 * 1024)
+ all_results.append(
+ {
+ "num_tokens": num_tokens,
+ "hidden_dim": args.hidden_dim,
+ "dtype": str(dtype).replace("torch.", ""),
+ "use_residual": use_residual,
+ "quant_modes": sorted(list(quant_modes)),
+ "input_size_mb": input_size_mb,
+ "results": results,
+ }
+ )
+
+ print_results(
+ results,
+ num_tokens,
+ args.hidden_dim,
+ dtype,
+ use_residual,
+ quant_modes,
+ input_size_mb,
+ )
+
+ # Save results to markdown file
+ if args.output_file and rank == 0:
+ save_results_to_file(all_results, world_size, args, rank)
+
+ finally:
+ # Cleanup
+ if ipc_handles is not None:
+ cleanup_flashinfer_workspace(ipc_handles)
+
+ dist.barrier()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/benchmarks/kernels/benchmark_grouped_gemm_cutlass.py b/benchmarks/kernels/benchmark_grouped_gemm_cutlass.py
index d525bd5faacf6..9b426d8d5f778 100644
--- a/benchmarks/kernels/benchmark_grouped_gemm_cutlass.py
+++ b/benchmarks/kernels/benchmark_grouped_gemm_cutlass.py
@@ -16,8 +16,8 @@ from vllm.model_executor.layers.fused_moe.fused_moe import (
from vllm.utils.argparse_utils import FlexibleArgumentParser
DEFAULT_MODELS = [
- "nm-testing/Mixtral-8x7B-Instruct-v0.1",
- "nm-testing/deepseekv2-lite",
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ "deepseek-ai/DeepSeek-V2-Lite",
"ibm-granite/granite-3.0-1b-a400m",
"ibm-granite/granite-3.0-3b-a800m",
]
diff --git a/benchmarks/kernels/benchmark_lora.py b/benchmarks/kernels/benchmark_lora.py
index bf1512268fe0b..6715c9b548aa1 100644
--- a/benchmarks/kernels/benchmark_lora.py
+++ b/benchmarks/kernels/benchmark_lora.py
@@ -19,13 +19,24 @@ from torch.utils.benchmark import Measurement as TMeasurement
from utils import ArgPool, Bench, CudaGraphBenchParams
from weight_shapes import WEIGHT_SHAPES
-from vllm.triton_utils import HAS_TRITON
+from vllm.lora.ops.triton_ops.utils import get_lora_op_configs
+from vllm.triton_utils import HAS_TRITON, triton
if HAS_TRITON:
- from vllm.lora.ops.triton_ops import LoRAKernelMeta, lora_expand, lora_shrink
+ from vllm.lora.ops.triton_ops import ( ## added fused_moe_lora
+ LoRAKernelMeta,
+ fused_moe_lora_expand,
+ fused_moe_lora_shrink,
+ lora_expand,
+ lora_shrink,
+ )
+ from vllm.lora.ops.triton_ops.fused_moe_lora_op import (
+ _LORA_PTR_DICT, ## added _LORA_PTR_DICT for fused_moe_lora
+ )
from vllm.lora.ops.triton_ops.utils import _LORA_A_PTR_DICT, _LORA_B_PTR_DICT
-
+from vllm import _custom_ops as ops
from vllm.utils.argparse_utils import FlexibleArgumentParser
+from vllm.utils.math_utils import round_up
DEFAULT_MODELS = list(WEIGHT_SHAPES.keys())
DEFAULT_TP_SIZES = [1]
@@ -59,6 +70,8 @@ DEFAULT_NUM_LORAS = [1, 2, 3, 4]
DEFAULT_SORT_BY_LORA_IDS = [False, True]
DEFAULT_SEQ_LENGTHS = [1]
DEFAULT_EXPAND_FN_ADD_INPUTS = [True, False]
+DEFAULT_TOP_K_NUMS = [1] # Added for MoE LoRA top_k
+DEFAULT_NUM_EXPERTS = [8] # Added for MoE LoRA num_experts
# Utilities
@@ -191,6 +204,11 @@ class OpType(Enum):
LORA_SHRINK = auto()
LORA_EXPAND = auto()
+ ## Adding support for fused moe lora
+ FUSED_MOE_LORA_GATE_UP_SHRINK = auto() ## Gate/Up projection variant with shrink
+ FUSED_MOE_LORA_GATE_UP_EXPAND = auto() ## Gate/Up projection variant with expand
+ FUSED_MOE_LORA_DOWN_SHRINK = auto() ## Down projection variant with shrink
+ FUSED_MOE_LORA_DOWN_EXPAND = auto() ## Down projection variant with expand
@staticmethod
def from_str(s: str) -> "OpType":
@@ -198,6 +216,15 @@ class OpType(Enum):
return OpType.LORA_SHRINK
if s.lower() == "lora_expand":
return OpType.LORA_EXPAND
+ # Adding support for fused moe lora, both in gate_up and down
+ if s.lower() == "fused_moe_lora_gate_up_shrink": ## Gate/Up variant with shrink
+ return OpType.FUSED_MOE_LORA_GATE_UP_SHRINK
+ if s.lower() == "fused_moe_lora_gate_up_expand": ## Gate/Up variant with expand
+ return OpType.FUSED_MOE_LORA_GATE_UP_EXPAND
+ if s.lower() == "fused_moe_lora_down_shrink": ## Down variant with shrink
+ return OpType.FUSED_MOE_LORA_DOWN_SHRINK
+ if s.lower() == "fused_moe_lora_down_expand": ## Down variant with expand
+ return OpType.FUSED_MOE_LORA_DOWN_EXPAND
raise ValueError(f"Unrecognized str {s} to convert to OpType")
def is_shrink_fn(self) -> bool:
@@ -206,19 +233,56 @@ class OpType(Enum):
def is_expand_fn(self) -> bool:
return self in [OpType.LORA_EXPAND]
+ def is_fused_moe_lora_fn(self) -> bool: ## adding for fused MoE LoRA
+ return self in [
+ OpType.FUSED_MOE_LORA_GATE_UP_SHRINK,
+ OpType.FUSED_MOE_LORA_DOWN_SHRINK,
+ OpType.FUSED_MOE_LORA_GATE_UP_EXPAND,
+ OpType.FUSED_MOE_LORA_DOWN_EXPAND,
+ ]
+
+ def is_fused_moe_lora_gate_up_fn(
+ self,
+ ) -> bool: ## adding for fused MoE LoRA Gate/Up
+ return self in [
+ OpType.FUSED_MOE_LORA_GATE_UP_SHRINK,
+ OpType.FUSED_MOE_LORA_GATE_UP_EXPAND,
+ ]
+
+ def is_fused_moe_lora_down_fn(self) -> bool: ## adding for fused MoE LoRA Down
+ return self in [
+ OpType.FUSED_MOE_LORA_DOWN_SHRINK,
+ OpType.FUSED_MOE_LORA_DOWN_EXPAND,
+ ]
+
+ def is_fused_moe_lora_shrink_fn(self) -> bool:
+ return self in [
+ OpType.FUSED_MOE_LORA_GATE_UP_SHRINK,
+ OpType.FUSED_MOE_LORA_DOWN_SHRINK,
+ ]
+
+ def is_fused_moe_lora_expand_fn(self) -> bool:
+ return self in [
+ OpType.FUSED_MOE_LORA_GATE_UP_EXPAND,
+ OpType.FUSED_MOE_LORA_DOWN_EXPAND,
+ ]
+
def num_slices(self) -> list[int]:
+ if self.is_fused_moe_lora_gate_up_fn():
+ return [2]
+ elif self.is_fused_moe_lora_down_fn():
+ return [1]
return [1, 2, 3]
def mkn(
self, batch_size: int, seq_length: int, hidden_size: int, lora_rank: int
) -> tuple[int, int, int]:
num_tokens = batch_size * seq_length
- if self.is_shrink_fn():
+ if self.is_shrink_fn() or self.is_fused_moe_lora_fn():
m = num_tokens
k = hidden_size
n = lora_rank
- else:
- assert self.is_expand_fn()
+ elif self.is_expand_fn():
m = num_tokens
k = lora_rank
n = hidden_size
@@ -232,9 +296,36 @@ class OpType(Enum):
"""
if self.is_shrink_fn():
return op_dtype, op_dtype, torch.float32
- else:
- assert self.is_expand_fn()
+ elif self.is_expand_fn():
return torch.float32, op_dtype, op_dtype
+ else:
+ assert self.is_fused_moe_lora_fn()
+ return op_dtype, op_dtype, op_dtype
+
+ def matmul_shapes_fused_moe_lora(
+ self,
+ m: int,
+ n: int,
+ k: int,
+ num_loras: int,
+ num_slices: int,
+ top_k_num: int,
+ num_experts: int,
+ ) -> tuple[tuple[int], tuple[int], tuple[int], tuple[int]]:
+ if self.is_fused_moe_lora_shrink_fn():
+ input_shape = (
+ (m * top_k_num, n)
+ if self in [OpType.FUSED_MOE_LORA_DOWN_SHRINK]
+ else (m, n)
+ )
+ output_shape = (num_slices, m, top_k_num, k)
+ weight_shape = (num_loras, num_experts, k, n)
+ else:
+ assert self.is_fused_moe_lora_expand_fn()
+ input_shape = (num_slices, m, top_k_num, k)
+ output_shape = (m, top_k_num, n * num_slices)
+ weight_shape = (num_loras, num_experts, n, k)
+ return (input_shape, weight_shape, output_shape)
def matmul_shapes(
self,
@@ -244,6 +335,8 @@ class OpType(Enum):
lora_rank: int,
num_loras: int,
num_slices: int,
+ top_k_num: int | None = None,
+ num_experts: int | None = None,
) -> tuple[tuple[int, ...], tuple[int, ...], tuple[int, ...]]:
"""
Given num_slices, return the shapes of the A, B, and C matrices
@@ -258,6 +351,16 @@ class OpType(Enum):
if self in [OpType.LORA_EXPAND]:
# LoRA expand kernels support num_slices inherently in the kernel
return ((num_slices, m, k), b_shape, (m, n * num_slices))
+ if self.is_fused_moe_lora_fn():
+ return self.matmul_shapes_fused_moe_lora(
+ m,
+ k,
+ n,
+ num_loras,
+ num_slices,
+ top_k_num,
+ num_experts,
+ )
raise ValueError(f"Unrecognized op_type {self}")
def bench_fn(self) -> Callable:
@@ -265,6 +368,16 @@ class OpType(Enum):
return lora_shrink
if self == OpType.LORA_EXPAND:
return lora_expand
+ if self in [
+ OpType.FUSED_MOE_LORA_GATE_UP_SHRINK,
+ OpType.FUSED_MOE_LORA_DOWN_SHRINK,
+ ]:
+ return fused_moe_lora_shrink
+ if self in [
+ OpType.FUSED_MOE_LORA_GATE_UP_EXPAND,
+ OpType.FUSED_MOE_LORA_DOWN_EXPAND,
+ ]:
+ return fused_moe_lora_expand
raise ValueError(f"Unrecognized optype {self}")
@@ -318,6 +431,8 @@ class BenchmarkContext:
sort_by_lora_id: bool
dtype: torch.dtype
seq_length: int | None = None
+ num_experts: int | None = None # num_experts for MoE based ops
+ top_k_num: int | None = None # top_k for MoE based ops
num_slices: int | None = None # num_slices for slice based ops
def with_seq_length(self, seq_length: int) -> "BenchmarkContext":
@@ -373,6 +488,11 @@ class BenchmarkTensors:
f"{dtype_to_str(self.output.dtype)}"
)
+ def get_num_tokens(self, size: int, top_k_num: int, op_type: OpType):
+ return (
+ size * top_k_num if op_type in [OpType.FUSED_MOE_LORA_DOWN_SHRINK] else size
+ )
+
@staticmethod
def make(
ctx: BenchmarkContext, op_type: OpType, device: str = "cuda"
@@ -385,6 +505,8 @@ class BenchmarkTensors:
ctx.lora_rank,
ctx.num_loras,
ctx.num_slices,
+ ctx.top_k_num,
+ ctx.num_experts,
)
a_type, b_type, c_type = op_type.matmul_dtypes(ctx.dtype)
input_tensor, lora_weights, output_tensor = make_rand_tensors(
@@ -432,17 +554,27 @@ class BenchmarkTensors:
prompt_lora_indices_tensor,
)
- def sanity_check(self) -> None:
+ def sanity_check(self, ctx: BenchmarkContext, op_type: OpType) -> None:
"""
Fails asserts when non-conformality is detected.
"""
- num_tokens = self.input.shape[-2]
+ num_tokens = (
+ self.input.shape[1]
+ if op_type.is_fused_moe_lora_expand_fn()
+ else self.input.shape[-2]
+ )
# check metadata tensors
- assert torch.sum(self.seq_lens) == num_tokens
+ ## In down shrink case, each token is repeated top_k_num times
+ assert num_tokens == self.get_num_tokens(
+ torch.sum(self.seq_lens), ctx.top_k_num, op_type
+ ), f"Expected {num_tokens} tokens, but got {torch.sum(self.seq_lens)}"
num_seqs = self.seq_lens.shape[0]
# assert self.seq_start_loc.shape[0] == num_seqs
+ ## In down shrink case, each prompt corresponds to top_k_num sequences
assert self.prompt_lora_mapping.shape[0] == num_seqs
- assert self.lora_kernel_meta.token_lora_mapping.shape[0] == num_tokens
+ assert self.get_num_tokens(
+ self.lora_kernel_meta.token_lora_mapping.shape[0], ctx.top_k_num, op_type
+ )
def to_device(self, device: str):
"""
@@ -471,21 +603,111 @@ class BenchmarkTensors:
to_device(field) if field_name != "no_lora_flag_cpu" else field,
)
- def metadata(self) -> tuple[int, int, int]:
+ def metadata(self, ctx: BenchmarkContext, op_type: OpType) -> tuple[int, int, int]:
"""
Return num_seqs, num_tokens and max_seq_len
"""
num_seqs = self.seq_lens.shape[0]
- num_tokens = self.lora_kernel_meta.token_lora_mapping.shape[0]
+ num_tokens = self.get_num_tokens(
+ self.lora_kernel_meta.token_lora_mapping.shape[0], ctx.top_k_num, op_type
+ )
max_seq_len = torch.max(self.seq_lens).item()
num_slices = len(self.lora_weights_lst)
return num_seqs, num_tokens, max_seq_len, num_slices
- def as_lora_shrink_kwargs(self) -> dict[str, Any]:
- self.sanity_check()
+ def fused_moe_lora_data_prepare(
+ self,
+ block_size: int,
+ token_lora_mapping: torch.Tensor,
+ ctx: BenchmarkContext,
+ ):
+ def moe_lora_align_block_size(
+ topk_ids: torch.Tensor,
+ token_lora_mapping: torch.Tensor,
+ block_size: int,
+ num_experts: int,
+ max_loras: int,
+ expert_map: torch.Tensor | None = None,
+ pad_sorted_ids: bool = False,
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Aligns tokens and experts into block-sized chunks for LoRA-based
+ mixture-of-experts (MoE) execution.
+ """
+ max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1)
+ if pad_sorted_ids:
+ max_num_tokens_padded = round_up(max_num_tokens_padded, block_size)
+ sorted_ids = torch.empty(
+ (max_loras * max_num_tokens_padded,),
+ dtype=torch.int32,
+ device=topk_ids.device,
+ )
+ max_num_m_blocks = triton.cdiv(max_num_tokens_padded, block_size)
+ # Expert ids must be set default to -1 to prevent a blank block
+ expert_ids = torch.empty(
+ (max_loras * max_num_m_blocks,),
+ dtype=torch.int32,
+ device=topk_ids.device,
+ )
+ num_tokens_post_pad = torch.empty(
+ (max_loras), dtype=torch.int32, device=topk_ids.device
+ )
+
+ ops.moe_lora_align_block_size(
+ topk_ids,
+ token_lora_mapping,
+ num_experts,
+ block_size,
+ max_loras,
+ max_num_tokens_padded,
+ max_num_m_blocks,
+ sorted_ids,
+ expert_ids,
+ num_tokens_post_pad,
+ )
+ if expert_map is not None:
+ expert_ids = expert_map[expert_ids]
+
+ return sorted_ids, expert_ids, num_tokens_post_pad
+
+ num_tokens = ctx.batch_size
+ curr_topk_ids = torch.randint(
+ 0,
+ ctx.num_experts,
+ (num_tokens, ctx.top_k_num),
+ device="cuda",
+ dtype=torch.int32,
+ )
+ topk_weights = torch.randint(
+ 0,
+ ctx.num_experts,
+ (num_tokens, ctx.top_k_num),
+ device="cuda",
+ dtype=torch.int32,
+ )
+
+ (sorted_token_ids_lora, expert_ids_lora, num_tokens_post_padded_lora) = (
+ moe_lora_align_block_size(
+ topk_ids=curr_topk_ids,
+ token_lora_mapping=token_lora_mapping,
+ block_size=block_size,
+ num_experts=ctx.num_experts,
+ max_loras=ctx.num_loras,
+ )
+ )
+
+ sorted_token_ids = sorted_token_ids_lora.view(ctx.num_loras, -1)
+ expert_ids = expert_ids_lora.view(ctx.num_loras, -1)
+ num_tokens_post_padded = num_tokens_post_padded_lora
+ return (topk_weights, sorted_token_ids, expert_ids, num_tokens_post_padded)
+
+ def as_lora_shrink_kwargs(
+ self, ctx: BenchmarkContext, op_type: OpType
+ ) -> dict[str, Any]:
+ self.sanity_check(ctx, op_type)
self.to_device(self.input.device)
- _, num_tokens, _, num_slices = self.metadata()
+ _, num_tokens, _, num_slices = self.metadata(ctx, op_type)
# Sanity check matrix shapes.
i_shape, lw_shape, o_shape = (
@@ -520,11 +742,13 @@ class BenchmarkTensors:
"no_lora_flag_cpu": self.lora_kernel_meta.no_lora_flag_cpu,
}
- def as_lora_expand_kwargs(self, add_inputs: bool) -> dict[str, Any]:
- self.sanity_check()
+ def as_lora_expand_kwargs(
+ self, ctx: BenchmarkContext, op_type: OpType, add_inputs: bool
+ ) -> dict[str, Any]:
+ self.sanity_check(ctx, op_type)
self.to_device(self.input.device)
- _, num_tokens, _, num_slices = self.metadata()
+ _, num_tokens, _, num_slices = self.metadata(ctx, op_type)
# Sanity check matrix shapes.
i_shape, lw_shape, o_shape = (
@@ -561,18 +785,173 @@ class BenchmarkTensors:
"no_lora_flag_cpu": self.lora_kernel_meta.no_lora_flag_cpu,
}
- def bench_fn_kwargs(
- self, op_type: OpType, add_inputs: bool | None = None
+ def as_fused_moe_lora_shrink_kwargs(
+ self, ctx: BenchmarkContext, op_type: OpType
) -> dict[str, Any]:
- if op_type.is_shrink_fn():
+ self.sanity_check(ctx, op_type)
+ self.to_device(self.input.device)
+
+ _, num_tokens, _, num_slices = self.metadata(ctx, op_type)
+
+ # Sanity check matrix shapes.
+ i_shape, lw_shape, o_shape = (
+ self.input.shape,
+ self.lora_weights_lst[0].shape,
+ self.output.shape,
+ )
+ # Expected input shape : [num_tokens, hidden_size] for gate_up
+ # Expected input shape : [top_k_num * num_tokens, hidden_size] for down
+ assert len(i_shape) == 2
+ assert i_shape[0] == num_tokens
+ hidden_size = i_shape[1]
+ # Expected lora weight shape [max_lora, num_experts, lora_rank, hidden_size]
+ assert len(lw_shape) == 4
+ assert lw_shape[-1] == hidden_size
+ lora_rank = lw_shape[-2]
+ # Expected output shape : [num_slices, num_tokens, top_k_num, lora_rank]
+ assert len(o_shape) == 4
+ assert (
+ o_shape
+ == (num_slices, num_tokens // ctx.top_k_num, ctx.top_k_num, lora_rank)
+ if op_type in [OpType.FUSED_MOE_LORA_DOWN_SHRINK]
+ else o_shape == (num_slices, num_tokens, ctx.top_k_num, lora_rank)
+ )
+ kernel_config = get_lora_op_configs(
+ op_type.name.lower(),
+ max_loras=lw_shape[0],
+ batch=num_tokens,
+ hidden_size=hidden_size,
+ rank=lora_rank,
+ num_slices=num_slices,
+ add_inputs=False,
+ )
+
+ (topk_weights, sorted_token_ids, expert_ids, num_tokens_post_padded) = (
+ self.fused_moe_lora_data_prepare(
+ block_size=kernel_config["BLOCK_SIZE_M"],
+ token_lora_mapping=self.lora_kernel_meta.token_lora_mapping,
+ ctx=ctx,
+ )
+ )
+
+ return {
+ "qcurr_hidden_states": self.input,
+ "lora_a_stacked": self.lora_weights_lst,
+ "a_intermediate_cache1": self.output,
+ "topk_weights": topk_weights,
+ "sorted_token_ids": sorted_token_ids,
+ "expert_ids": expert_ids,
+ "num_tokens_post_padded": num_tokens_post_padded,
+ "top_k_num": ctx.top_k_num,
+ "device": self.input.device,
+ "N": lora_rank,
+ "M": topk_weights.shape[0],
+ "EM": sorted_token_ids.shape[1],
+ "K": self.input.shape[1],
+ "num_tokens": num_tokens,
+ "num_experts": ctx.num_experts,
+ "num_slices": num_slices,
+ "shrink_block_size_m": kernel_config["BLOCK_SIZE_M"],
+ "shrink_block_size_n": kernel_config["BLOCK_SIZE_N"],
+ "shrink_block_size_k": kernel_config["BLOCK_SIZE_K"],
+ "shrink_group_size_m": kernel_config["GROUP_SIZE_M"],
+ "shrink_num_warps": kernel_config["NUM_WARPS"],
+ "shrink_num_stages": kernel_config["NUM_STAGES"],
+ "shrink_split_k": kernel_config.get("SPLIT_K", 1),
+ "mul_routed_weight": op_type.is_fused_moe_lora_down_fn(),
+ }
+
+ def as_fused_moe_lora_expand_kwargs(
+ self, ctx: BenchmarkContext, op_type: OpType
+ ) -> dict[str, Any]:
+ self.sanity_check(ctx, op_type)
+ self.to_device(self.input.device)
+
+ _, num_tokens, _, num_slices = self.metadata(ctx, op_type)
+
+ # Sanity check matrix shapes.
+ i_shape, lw_shape, o_shape = (
+ self.input.shape,
+ self.lora_weights_lst[0].shape,
+ self.output.shape,
+ )
+
+ # Expected input shape : [num_slices, num_tokens, top_k_num, lora_rank]
+ assert len(i_shape) == 4
+ assert i_shape[0] == num_slices
+ assert i_shape[1] == num_tokens
+ lora_rank = i_shape[-1]
+ # Expected lora weight shape : [num_loras, num_experts, hidden_size, lora_rank]
+ assert len(lw_shape) == 4
+ assert lw_shape[-1] == lora_rank
+ hidden_size = lw_shape[-2]
+ # Expected output shape : [num_tokens, top_k_num, hidden_size * num_slices]
+ assert len(o_shape) == 3
+ assert o_shape == (num_tokens, ctx.top_k_num, hidden_size * num_slices)
+
+ kernel_config = get_lora_op_configs(
+ op_type.name.lower(),
+ max_loras=lw_shape[0],
+ batch=num_tokens,
+ hidden_size=hidden_size,
+ rank=lora_rank,
+ num_slices=num_slices,
+ add_inputs=False,
+ )
+
+ (topk_weights, sorted_token_ids, expert_ids, num_tokens_post_padded) = (
+ self.fused_moe_lora_data_prepare(
+ block_size=kernel_config["BLOCK_SIZE_M"],
+ token_lora_mapping=self.lora_kernel_meta.token_lora_mapping,
+ ctx=ctx,
+ )
+ )
+
+ return {
+ "a_intermediate_cache1": self.input,
+ "lora_b_stacked": self.lora_weights_lst,
+ "output": self.output,
+ "topk_weights": topk_weights,
+ "sorted_token_ids": sorted_token_ids,
+ "expert_ids": expert_ids,
+ "num_tokens_post_padded": num_tokens_post_padded,
+ "top_k_num": ctx.top_k_num,
+ "device": self.input.device,
+ "N": lora_rank,
+ "M": topk_weights.shape[0],
+ "EM": sorted_token_ids.shape[1],
+ "K": self.input.shape[1],
+ "num_tokens": num_tokens,
+ "num_experts": ctx.num_experts,
+ "num_slices": num_slices,
+ "max_lora_rank": lora_rank,
+ "w1_output_dim_size": lw_shape[2],
+ "expand_block_size_m": kernel_config["BLOCK_SIZE_M"],
+ "expand_block_size_n": kernel_config["BLOCK_SIZE_N"],
+ "expand_block_size_k": kernel_config["BLOCK_SIZE_K"],
+ "expand_group_size_m": kernel_config["GROUP_SIZE_M"],
+ "expand_num_warps": kernel_config["NUM_WARPS"],
+ "expand_num_stages": kernel_config["NUM_STAGES"],
+ "expand_split_k": kernel_config.get("SPLIT_K", 1),
+ "mul_routed_weight": op_type.is_fused_moe_lora_down_fn(),
+ }
+
+ def bench_fn_kwargs(
+ self, ctx: BenchmarkContext, op_type: OpType, add_inputs: bool | None = None
+ ) -> dict[str, Any]:
+ if op_type.is_shrink_fn() or op_type.is_fused_moe_lora_fn():
assert add_inputs is None
else:
assert add_inputs is not None
if op_type == OpType.LORA_SHRINK:
- return self.as_lora_shrink_kwargs()
+ return self.as_lora_shrink_kwargs(ctx, op_type)
if op_type == OpType.LORA_EXPAND:
- return self.as_lora_expand_kwargs(add_inputs)
+ return self.as_lora_expand_kwargs(ctx, op_type, add_inputs)
+ if op_type.is_fused_moe_lora_shrink_fn():
+ return self.as_fused_moe_lora_shrink_kwargs(ctx, op_type)
+ if op_type.is_fused_moe_lora_expand_fn():
+ return self.as_fused_moe_lora_expand_kwargs(ctx, op_type)
raise ValueError(f"Unrecognized optype {self}")
def test_correctness(
@@ -617,7 +996,7 @@ def bench_optype(
test_correctness: bool = False,
) -> TMeasurement:
assert arg_pool_size >= 1
- if op_type.is_shrink_fn():
+ if op_type.is_shrink_fn() or op_type.is_fused_moe_lora_fn():
assert expand_fn_add_inputs is None
else:
assert expand_fn_add_inputs is not None
@@ -627,23 +1006,30 @@ def bench_optype(
BenchmarkTensors.make(ctx, op_type) for _ in range(arg_pool_size)
]
for bt in bench_tensors:
- bt.sanity_check()
+ bt.sanity_check(ctx, op_type)
# Test correctness of our implementation.
if test_correctness:
+ assert op_type in [OpType.LORA_SHRINK, OpType.LORA_EXPAND], (
+ f"Correctness testing is not supported for {op_type.name}."
+ )
assert all(
- [bt.test_correctness(op_type, expand_fn_add_inputs) for bt in bench_tensors]
+ [
+ bt.test_correctness(ctx, op_type, expand_fn_add_inputs)
+ for bt in bench_tensors
+ ]
)
# BenchmarkTensors -> dict (kwargs)
kwargs_list = [
- bt.bench_fn_kwargs(op_type, add_inputs=expand_fn_add_inputs)
+ bt.bench_fn_kwargs(ctx, op_type, add_inputs=expand_fn_add_inputs)
for bt in bench_tensors
]
# Clear LoRA optimization hash-maps.
_LORA_A_PTR_DICT.clear()
_LORA_B_PTR_DICT.clear()
+ _LORA_PTR_DICT.clear()
# Run bench function so that _LORA_A_PTR_DICT and _LORA_B_PTR_DICT are set up
for kwargs in kwargs_list:
op_type.bench_fn()(**kwargs)
@@ -793,7 +1179,9 @@ def run(args: argparse.Namespace, bench_ctxs: list[BenchmarkContext]):
# Benchmark bench_op
expand_fn_add_inputs = (
- [None] if bench_op.is_shrink_fn() else args.expand_fn_add_inputs
+ [None]
+ if bench_op.is_shrink_fn() or bench_op.is_fused_moe_lora_fn()
+ else args.expand_fn_add_inputs
)
for add_input_arg in expand_fn_add_inputs:
seq_len_timers.append(
@@ -831,12 +1219,22 @@ def as_benchmark_contexts(
hidden_sizes: list[int], lora_ranks: list[int], args: argparse.Namespace
) -> list[BenchmarkContext]:
ctxs: list[BenchmarkContext] = []
- for batch_size, hidden_size, lora_rank, num_loras, sort_by_lora_id in product( # noqa
+ for (
+ batch_size,
+ hidden_size,
+ lora_rank,
+ num_loras,
+ sort_by_lora_id,
+ top_k_num,
+ num_experts,
+ ) in product( # noqa
args.batch_sizes,
list(hidden_sizes),
lora_ranks,
args.num_loras,
args.sort_by_lora_id,
+ args.top_k_nums,
+ args.num_experts,
):
ctxs.append(
BenchmarkContext(
@@ -851,6 +1249,8 @@ def as_benchmark_contexts(
seq_length=None,
sort_by_lora_id=sort_by_lora_id,
dtype=args.dtype,
+ top_k_num=top_k_num,
+ num_experts=num_experts,
# To be filled based on the OpType to benchmark
num_slices=None,
)
@@ -1012,6 +1412,22 @@ if __name__ == "__main__":
),
)
+ p.add_argument(
+ "--top-k-nums",
+ nargs="+",
+ type=int,
+ default=DEFAULT_TOP_K_NUMS,
+ help="Top-K values for MoE LoRA operations",
+ )
+
+ p.add_argument(
+ "--num-experts",
+ nargs="+",
+ type=int,
+ default=DEFAULT_NUM_EXPERTS,
+ help="Number of experts for MoE LoRA operations",
+ )
+
parser = FlexibleArgumentParser(
description=f"""
Benchmark LoRA kernels:
diff --git a/benchmarks/kernels/benchmark_machete.py b/benchmarks/kernels/benchmark_machete.py
index 8787724d77cfb..ac78c019a59e5 100644
--- a/benchmarks/kernels/benchmark_machete.py
+++ b/benchmarks/kernels/benchmark_machete.py
@@ -237,6 +237,7 @@ def marlin_create_bench_fn(bt: BenchmarkTensors) -> Callable:
b_q_weight=w_q,
b_bias=None,
b_scales=w_s,
+ a_scales=None,
global_scale=None,
b_zeros=w_zp,
g_idx=g_idx,
diff --git a/benchmarks/kernels/benchmark_marlin.py b/benchmarks/kernels/benchmark_marlin.py
index 12ca9214b1f95..48d790aec9e07 100644
--- a/benchmarks/kernels/benchmark_marlin.py
+++ b/benchmarks/kernels/benchmark_marlin.py
@@ -263,7 +263,7 @@ def bench_run(
results.append(
benchmark.Timer(
- stmt="output = gptq_marlin_gemm(a, None, marlin_q_w, marlin_s, marlin_s2, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, False, False)", # noqa: E501
+ stmt="output = gptq_marlin_gemm(a, None, marlin_q_w, marlin_s, None, marlin_s2, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, False, False)", # noqa: E501
globals=globals,
label=label,
sub_label=sub_label,
@@ -273,7 +273,7 @@ def bench_run(
results.append(
benchmark.Timer(
- stmt="output = gptq_marlin_gemm(a, None, marlin_q_w, marlin_s, marlin_s2, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, True, False)", # noqa: E501
+ stmt="output = gptq_marlin_gemm(a, None, marlin_q_w, marlin_s, None, marlin_s2, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, True, False)", # noqa: E501
globals=globals,
label=label,
sub_label=sub_label,
diff --git a/benchmarks/kernels/benchmark_moe.py b/benchmarks/kernels/benchmark_moe.py
index bc6cf83bc21fd..a1af0b8aec3d0 100644
--- a/benchmarks/kernels/benchmark_moe.py
+++ b/benchmarks/kernels/benchmark_moe.py
@@ -185,8 +185,8 @@ def benchmark_config(
graph.replay()
torch.cuda.synchronize()
- start_event = torch.cuda.Event(enable_timing=True)
- end_event = torch.cuda.Event(enable_timing=True)
+ start_event = torch.Event(enable_timing=True)
+ end_event = torch.Event(enable_timing=True)
latencies: list[float] = []
for i in range(num_iters):
@@ -211,7 +211,7 @@ def get_rocm_tuning_space(use_fp16):
num_warps_range = [1, 2, 4, 8]
group_m_range = [1, 4, 8, 16, 32]
num_stage_range = [2]
- waves_per_eu_range = [0]
+ waves_per_eu_range = [0, 1, 2, 4]
matrix_instr_nonkdim_range = [16, 32] if use_fp16 else []
kpack_range = [1, 2] if use_fp16 else []
@@ -590,6 +590,7 @@ def main(args: argparse.Namespace):
"DeepseekV3ForCausalLM",
"DeepseekV32ForCausalLM",
"Glm4MoeForCausalLM",
+ "NemotronHForCausalLM",
):
E = config.n_routed_experts
topk = config.num_experts_per_tok
@@ -615,6 +616,11 @@ def main(args: argparse.Namespace):
topk = config.moe_topk[0]
intermediate_size = config.moe_intermediate_size[0]
hidden_size = config.hidden_size
+ elif config.architectures[0] in ["Qwen3OmniMoeForConditionalGeneration"]:
+ E = config.thinker_config.text_config.num_experts
+ topk = config.thinker_config.text_config.num_experts_per_tok
+ intermediate_size = config.thinker_config.text_config.moe_intermediate_size
+ hidden_size = config.thinker_config.text_config.hidden_size
else:
# Support for llama4
config = config.get_text_config()
diff --git a/benchmarks/kernels/benchmark_moe_permute_unpermute.py b/benchmarks/kernels/benchmark_moe_permute_unpermute.py
index efa5a7386027e..b8913a217c608 100644
--- a/benchmarks/kernels/benchmark_moe_permute_unpermute.py
+++ b/benchmarks/kernels/benchmark_moe_permute_unpermute.py
@@ -105,8 +105,8 @@ def benchmark_permute(
graph.replay()
torch.cuda.synchronize()
- start_event = torch.cuda.Event(enable_timing=True)
- end_event = torch.cuda.Event(enable_timing=True)
+ start_event = torch.Event(enable_timing=True)
+ end_event = torch.Event(enable_timing=True)
latencies: list[float] = []
for i in range(num_iters):
@@ -241,8 +241,8 @@ def benchmark_unpermute(
graph.replay()
torch.cuda.synchronize()
- start_event = torch.cuda.Event(enable_timing=True)
- end_event = torch.cuda.Event(enable_timing=True)
+ start_event = torch.Event(enable_timing=True)
+ end_event = torch.Event(enable_timing=True)
latencies: list[float] = []
for i in range(num_iters):
diff --git a/benchmarks/kernels/benchmark_mrope.py b/benchmarks/kernels/benchmark_mrope.py
index cb848d2bf579e..83bd91917508f 100644
--- a/benchmarks/kernels/benchmark_mrope.py
+++ b/benchmarks/kernels/benchmark_mrope.py
@@ -6,7 +6,7 @@
#
# The CSV file (named with current date/time) contains these columns:
# model_name, tp_size, num_tokens, num_heads, num_kv_heads, head_dim, max_position,
-# rope_theta, is_neox_style, rope_scaling, dtype, torch_mean, torch_median, torch_p99,
+# is_neox_style, rope_parameters, dtype, torch_mean, torch_median, torch_p99,
# torch_min, torch_max, triton_mean, triton_median, triton_p99, triton_min, triton_max,
# speedup
#
@@ -86,9 +86,8 @@ def benchmark_mrope(
num_heads: int,
num_kv_heads: int,
max_position: int = 8192,
- rope_theta: float = 10000,
is_neox_style: bool = True,
- rope_scaling: dict[str, Any] = None,
+ rope_parameters: dict[str, Any] | None = None,
dtype: torch.dtype = torch.bfloat16,
seed: int = 0,
warmup_iter: int = 10,
@@ -102,9 +101,8 @@ def benchmark_mrope(
head_size=head_dim,
rotary_dim=head_dim,
max_position=max_position,
- base=rope_theta,
is_neox_style=is_neox_style,
- rope_scaling=rope_scaling,
+ rope_parameters=rope_parameters,
dtype=dtype,
).to(device=device)
@@ -203,9 +201,8 @@ def benchmark_mrope(
num_kv_heads,
head_dim,
max_position,
- rope_theta,
is_neox_style,
- str(rope_scaling),
+ str(rope_parameters),
str(dtype).split(".")[-1],
torch_stats["mean"],
torch_stats["median"],
@@ -255,9 +252,8 @@ if __name__ == "__main__":
"num_kv_heads",
"head_dim",
"max_position",
- "rope_theta",
"is_neox_style",
- "rope_scaling",
+ "rope_parameters",
"dtype",
"torch_mean",
"torch_median",
@@ -303,7 +299,7 @@ if __name__ == "__main__":
q_size = num_heads * head_dim
kv_size = num_kv_heads * head_dim
is_neox_style = True
- rope_theta = config.rope_theta
+ rope_parameters = config.rope_parameters
max_position = config.max_position_embeddings
for num_tokens in num_tokens_list:
@@ -315,9 +311,8 @@ if __name__ == "__main__":
num_heads=num_heads,
num_kv_heads=num_kv_heads,
max_position=max_position,
- rope_theta=rope_theta,
is_neox_style=is_neox_style,
- rope_scaling=config.rope_scaling,
+ rope_parameters=rope_parameters,
dtype=getattr(torch, args.dtype),
seed=args.seed,
warmup_iter=args.warmup_iter,
diff --git a/benchmarks/kernels/benchmark_per_token_group_quant.py b/benchmarks/kernels/benchmark_per_token_group_quant.py
index bdc1eb733084e..eba4d510258b6 100644
--- a/benchmarks/kernels/benchmark_per_token_group_quant.py
+++ b/benchmarks/kernels/benchmark_per_token_group_quant.py
@@ -30,8 +30,8 @@ def _time_cuda(
fn()
torch.cuda.synchronize()
- start = torch.cuda.Event(enable_timing=True)
- end = torch.cuda.Event(enable_timing=True)
+ start = torch.Event(enable_timing=True)
+ end = torch.Event(enable_timing=True)
start.record()
for _ in range(bench_iters):
diff --git a/benchmarks/kernels/benchmark_rope.py b/benchmarks/kernels/benchmark_rope.py
index 29ef6409bb166..074b7a440b612 100644
--- a/benchmarks/kernels/benchmark_rope.py
+++ b/benchmarks/kernels/benchmark_rope.py
@@ -1,97 +1,76 @@
# SPDX-License-Identifier: Apache-2.0
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
-from itertools import accumulate
+import itertools
-import nvtx
import torch
-from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding, get_rope
-from vllm.platforms import current_platform
+from vllm.model_executor.layers.rotary_embedding import get_rope
+from vllm.triton_utils import triton
from vllm.utils.argparse_utils import FlexibleArgumentParser
+batch_size_range = [2**i for i in range(0, 8, 2)]
+seq_len_range = [2**i for i in range(6, 10, 1)]
+num_heads_range = [32, 48]
+configs = list(itertools.product(batch_size_range, seq_len_range, num_heads_range))
-def benchmark_rope_kernels_multi_lora(
- is_neox_style: bool,
- batch_size: int,
- seq_len: int,
- num_heads: int,
- head_size: int,
- rotary_dim: int | None,
- dtype: torch.dtype,
- seed: int,
- device: str,
- max_position: int = 8192,
- base: float = 10000,
-) -> None:
- current_platform.seed_everything(seed)
- torch.set_default_device(device)
- if rotary_dim is None:
- rotary_dim = head_size
- # silulating serving 4 LoRAs
- scaling_factors = [1, 2, 4, 8]
- # batched RoPE can take multiple scaling factors
- batched_rope = get_rope(
- head_size,
- rotary_dim,
- max_position,
- base,
- is_neox_style,
- {"rope_type": "linear", "factor": tuple(scaling_factors)},
- )
- # non-batched RoPE takes only one scaling factor, we create multiple
- # instances to simulate the same behavior
- non_batched_ropes: list[RotaryEmbedding] = []
- for scaling_factor in scaling_factors:
- non_batched_ropes.append(
- get_rope(
- head_size,
- rotary_dim,
- max_position,
- base,
- is_neox_style,
- {"rope_type": "linear", "factor": (scaling_factor,)},
- )
- )
- positions = torch.randint(0, max_position, (batch_size, seq_len))
- query = torch.randn(batch_size, seq_len, num_heads * head_size, dtype=dtype)
- key = torch.randn_like(query)
-
- # create query offsets for batched RoPE, we concat multiple kv cache
- # together and each query needs to find the right kv cache of its type
- offset_map = torch.tensor(
- list(
- accumulate(
- [0]
- + [
- max_position * scaling_factor * 2
- for scaling_factor in scaling_factors[:-1]
- ]
- )
+def get_benchmark(head_size, rotary_dim, is_neox_style, device):
+ @triton.testing.perf_report(
+ triton.testing.Benchmark(
+ x_names=["batch_size", "seq_len", "num_heads"],
+ x_vals=[list(_) for _ in configs],
+ line_arg="provider",
+ line_vals=["torch", "flashinfer", "vllm"],
+ line_names=["PyTorch", "FlashInfer", "vLLM"],
+ styles=[("blue", "-"), ("green", "-"), ("red", "-")],
+ ylabel="us",
+ plot_name=f"rope-perf{'-neox-style' if is_neox_style else ''}",
+ args={},
)
)
- query_types = torch.randint(
- 0, len(scaling_factors), (batch_size, seq_len), device=device
- )
- # map query types to offsets
- query_offsets = offset_map[query_types]
- # the kernel takes flattened offsets
- flatten_offsets = query_offsets.flatten()
+ def benchmark(batch_size, seq_len, num_heads, provider):
+ dtype = torch.bfloat16
+ max_position = 8192
+ base = 10000
+ rope = get_rope(head_size, rotary_dim, max_position, base, is_neox_style)
+ rope = rope.to(dtype=dtype, device=device)
+ cos_sin_cache = rope.cos_sin_cache.to(dtype=torch.float, device=device)
- # batched queries of the same type together for non-batched RoPE
- queries = [query[query_types == i] for i in range(len(scaling_factors))]
- keys = [key[query_types == i] for i in range(len(scaling_factors))]
- packed_qkr = zip(queries, keys, non_batched_ropes)
- # synchronize before start timing
- torch.cuda.synchronize()
- with nvtx.annotate("non-batched", color="yellow"):
- for q, k, r in packed_qkr:
- r.forward(positions, q, k)
- torch.cuda.synchronize()
- with nvtx.annotate("batched", color="green"):
- batched_rope.forward(positions, query, key, flatten_offsets)
- torch.cuda.synchronize()
+ positions = torch.randint(0, max_position, (batch_size, seq_len), device=device)
+ query = torch.randn(
+ (batch_size, seq_len, num_heads * head_size), dtype=dtype, device=device
+ )
+ key = torch.randn_like(query)
+
+ quantiles = [0.5, 0.2, 0.8]
+
+ if provider == "torch":
+ ms, min_ms, max_ms = triton.testing.do_bench(
+ lambda: rope.forward_native(positions, query.clone(), key.clone()),
+ quantiles=quantiles,
+ )
+ elif provider == "flashinfer":
+ ms, min_ms, max_ms = triton.testing.do_bench(
+ lambda: torch.ops.vllm.flashinfer_rotary_embedding(
+ positions,
+ query.clone(),
+ key.clone(),
+ head_size,
+ cos_sin_cache,
+ is_neox_style,
+ ),
+ quantiles=quantiles,
+ )
+ else:
+ ms, min_ms, max_ms = triton.testing.do_bench(
+ lambda: rope.forward_cuda(positions, query.clone(), key.clone()),
+ quantiles=quantiles,
+ )
+
+ return 1000 * ms, 1000 * max_ms, 1000 * min_ms
+
+ return benchmark
if __name__ == "__main__":
@@ -116,17 +95,12 @@ if __name__ == "__main__":
parser.add_argument(
"--device", type=str, choices=["cuda:0", "cuda:1"], default="cuda:0"
)
+ parser.add_argument("--save-path", type=str, default="./configs/rope/")
args = parser.parse_args()
- print(args)
- benchmark_rope_kernels_multi_lora(
- is_neox_style=args.is_neox_style,
- batch_size=args.batch_size,
- seq_len=args.seq_len,
- num_heads=args.num_heads,
- head_size=args.head_size,
- rotary_dim=args.rotary_dim,
- dtype=getattr(torch, args.dtype),
- seed=args.seed,
- device=args.device,
+ # Get the benchmark function
+ benchmark = get_benchmark(
+ args.head_size, args.rotary_dim, args.is_neox_style, args.device
)
+ # Run performance benchmark
+ benchmark.run(print_data=True, save_path=args.save_path)
diff --git a/benchmarks/kernels/benchmark_shapes.py b/benchmarks/kernels/benchmark_shapes.py
index 18c459c31d3f8..3e23c4cac059c 100644
--- a/benchmarks/kernels/benchmark_shapes.py
+++ b/benchmarks/kernels/benchmark_shapes.py
@@ -78,11 +78,11 @@ WEIGHT_SHAPES = {
}
WEIGHT_SHAPES_MOE = {
- "nm-testing/Mixtral-8x7B-Instruct-v0.1": [
+ "mistralai/Mixtral-8x7B-Instruct-v0.1": [
[8, 2, 4096, 28672],
[8, 2, 14336, 4096],
],
- "nm-testing/deepseekv2-lite": [
+ "deepseek-ai/DeepSeek-V2-Lite": [
[64, 6, 2048, 1408],
],
"ibm-granite/granite-3.0-1b-a400m": [
diff --git a/benchmarks/kernels/benchmark_silu_mul_fp8_quant.py b/benchmarks/kernels/benchmark_silu_mul_fp8_quant.py
index a5887aafd30d6..de01ff197eab7 100644
--- a/benchmarks/kernels/benchmark_silu_mul_fp8_quant.py
+++ b/benchmarks/kernels/benchmark_silu_mul_fp8_quant.py
@@ -253,8 +253,8 @@ def benchmark(
)
torch.cuda.synchronize()
- start_event = torch.cuda.Event(enable_timing=True)
- end_event = torch.cuda.Event(enable_timing=True)
+ start_event = torch.Event(enable_timing=True)
+ end_event = torch.Event(enable_timing=True)
# Benchmark
latencies: list[float] = []
diff --git a/benchmarks/kernels/benchmark_trtllm_decode_attention.py b/benchmarks/kernels/benchmark_trtllm_decode_attention.py
index 29ce18234dfa0..1d0d6fbb9a470 100644
--- a/benchmarks/kernels/benchmark_trtllm_decode_attention.py
+++ b/benchmarks/kernels/benchmark_trtllm_decode_attention.py
@@ -127,8 +127,8 @@ def benchmark_decode(
def time_fn(fn, warmup=10, trials=20):
torch.cuda.synchronize()
- start = torch.cuda.Event(enable_timing=True)
- end = torch.cuda.Event(enable_timing=True)
+ start = torch.Event(enable_timing=True)
+ end = torch.Event(enable_timing=True)
times = []
for i in range(warmup):
fn()
diff --git a/benchmarks/kernels/benchmark_trtllm_prefill_attention.py b/benchmarks/kernels/benchmark_trtllm_prefill_attention.py
index 2a25d03748112..84bde723abf7f 100644
--- a/benchmarks/kernels/benchmark_trtllm_prefill_attention.py
+++ b/benchmarks/kernels/benchmark_trtllm_prefill_attention.py
@@ -139,8 +139,8 @@ def benchmark_prefill(
def time_fn(fn, warmup=10, trials=20):
torch.cuda.synchronize()
- start = torch.cuda.Event(enable_timing=True)
- end = torch.cuda.Event(enable_timing=True)
+ start = torch.Event(enable_timing=True)
+ end = torch.Event(enable_timing=True)
times = []
for i in range(warmup):
fn()
diff --git a/benchmarks/kernels/benchmark_w8a8_block_fp8.py b/benchmarks/kernels/benchmark_w8a8_block_fp8.py
index ab54f81985bc2..b52500c8c5217 100644
--- a/benchmarks/kernels/benchmark_w8a8_block_fp8.py
+++ b/benchmarks/kernels/benchmark_w8a8_block_fp8.py
@@ -183,8 +183,8 @@ def benchmark_config(
run()
torch.cuda.synchronize()
- start_event = torch.cuda.Event(enable_timing=True)
- end_event = torch.cuda.Event(enable_timing=True)
+ start_event = torch.Event(enable_timing=True)
+ end_event = torch.Event(enable_timing=True)
latencies: list[float] = []
for i in range(num_iters):
diff --git a/benchmarks/kernels/deepgemm/README.md b/benchmarks/kernels/deepgemm/README.md
index 41e68e047be82..a28c6956be0e9 100644
--- a/benchmarks/kernels/deepgemm/README.md
+++ b/benchmarks/kernels/deepgemm/README.md
@@ -2,7 +2,7 @@
This directory includes benchmarks between DeepSeek's DeepGEMM block fp8 kernels against vLLM's existing triton and CUTLASS-based kernels.
-Currently this just includes dense GEMMs and only works on Hopper GPUs.
+Currently, this just includes dense GEMMs and only works on Hopper GPUs.
## Setup
diff --git a/benchmarks/multi_turn/README.md b/benchmarks/multi_turn/README.md
index f5b5c6c97d484..b0be1e3a69a66 100644
--- a/benchmarks/multi_turn/README.md
+++ b/benchmarks/multi_turn/README.md
@@ -55,6 +55,10 @@ output_num_chunks 166.0 99.01 11.80 79.00 90.00 98.00 108.75
----------------------------------------------------------------------------------------------------
```
+If you run with `--warmup-step`, the summary will also include `warmup_runtime_sec`
+and `total_runtime_incl_warmup_sec` (while `runtime_sec` continues to reflect the
+benchmark-only runtime so the reported throughput stays comparable).
+
### JSON configuration file for synthetic conversations generation
The input flag `--input-file` is used to determine the input conversations for the benchmark.
diff --git a/benchmarks/multi_turn/bench_dataset.py b/benchmarks/multi_turn/bench_dataset.py
index 2674899d1cc56..8cb8a2f386a97 100644
--- a/benchmarks/multi_turn/bench_dataset.py
+++ b/benchmarks/multi_turn/bench_dataset.py
@@ -11,6 +11,7 @@ from bench_utils import (
Color,
logger,
)
+from tqdm import tqdm
from transformers import AutoTokenizer # type: ignore
# Conversation ID is a string (e.g: "UzTK34D")
@@ -417,6 +418,10 @@ def generate_conversations(
data = file.read()
tokens_in_file = tokenizer.encode(data, add_special_tokens=False)
list_of_tokens.extend(tokens_in_file)
+ logger.info(
+ f"Loaded {len(tokens_in_file)} tokens from file {filename}, "
+ f"total tokens so far: {len(list_of_tokens)}"
+ )
conversations: ConversationsMap = {}
conv_id = 0
@@ -449,18 +454,25 @@ def generate_conversations(
)
base_offset += common_prefix_tokens
- for conv_id in range(args.num_conversations):
+ for conv_id in tqdm(
+ range(args.num_conversations),
+ total=args.num_conversations,
+ desc="Generating conversations",
+ unit="conv",
+ ):
# Generate a single conversation
messages: MessagesList = []
nturns = turn_count[conv_id]
# User prompt token count per turn (with lower limit)
- input_token_count: np.ndarray = args.input_num_tokens.sample(nturns)
+ input_token_count: np.ndarray = args.input_num_tokens.sample(nturns).astype(int)
input_token_count = np.maximum(input_token_count, base_prompt_token_count)
# Assistant answer token count per turn (with lower limit)
- output_token_count: np.ndarray = args.output_num_tokens.sample(nturns)
+ output_token_count: np.ndarray = args.output_num_tokens.sample(nturns).astype(
+ int
+ )
output_token_count = np.maximum(output_token_count, 1)
user_turn = True
diff --git a/benchmarks/multi_turn/benchmark_serving_multi_turn.py b/benchmarks/multi_turn/benchmark_serving_multi_turn.py
index 67a085b40ed35..e23f6b923f1b9 100644
--- a/benchmarks/multi_turn/benchmark_serving_multi_turn.py
+++ b/benchmarks/multi_turn/benchmark_serving_multi_turn.py
@@ -55,6 +55,7 @@ class ClientArgs(NamedTuple):
verify_output: bool
conversation_sampling: ConversationSampling
request_rate: float
+ max_retries: int
class RequestArgs(NamedTuple):
@@ -63,6 +64,7 @@ class RequestArgs(NamedTuple):
stream: bool
limit_min_tokens: int # Use negative value for no limit
limit_max_tokens: int # Use negative value for no limit
+ timeout_sec: int
class BenchmarkArgs(NamedTuple):
@@ -214,6 +216,7 @@ async def send_request(
stream: bool = True,
min_tokens: int | None = None,
max_tokens: int | None = None,
+ timeout_sec: int = 120,
) -> ServerResponse:
payload = {
"model": model,
@@ -235,10 +238,16 @@ async def send_request(
headers = {"Content-Type": "application/json"}
# Calculate the timeout for the request
- timeout_sec = 120
if max_tokens is not None:
# Assume TPOT of 200ms and use max_tokens to determine timeout
- timeout_sec = max(timeout_sec, int(max_tokens * 0.2))
+ token_based_timeout = int(max_tokens * 0.2)
+ if token_based_timeout > timeout_sec:
+ timeout_sec = token_based_timeout
+ logger.info(
+ "Using timeout of %ds based on max_tokens %d",
+ timeout_sec,
+ max_tokens,
+ )
timeout = aiohttp.ClientTimeout(total=timeout_sec)
valid_response = True
@@ -409,6 +418,7 @@ async def send_turn(
req_args.stream,
min_tokens,
max_tokens,
+ req_args.timeout_sec,
)
if response.valid is False:
@@ -518,6 +528,25 @@ async def poisson_sleep(request_rate: float, verbose: bool = False) -> None:
await asyncio.sleep(interval)
+async def exponential_backoff_sleep(
+ attempt_cnt: int,
+ base_rate: float = 1.0,
+ backoff_factor: float = 2.0,
+ jitter_fraction: float = 0.10,
+ verbose: bool = False,
+) -> None:
+ # Sleep with exponential backoff and jitter after a failed request.
+ backoff_delay = base_rate * (backoff_factor**attempt_cnt)
+ jittered_delay = backoff_delay * (
+ 1 + np.random.uniform(-jitter_fraction, jitter_fraction)
+ )
+
+ if verbose:
+ logger.info(f"Backoff for {jittered_delay:.3f} seconds...")
+
+ await asyncio.sleep(jittered_delay)
+
+
async def client_main(
args: ClientArgs,
req_args: RequestArgs,
@@ -532,8 +561,11 @@ async def client_main(
f"{Color.CYAN}Started client {client_id}: max_num_requests={args.max_num_requests}, max_active_conversations={args.max_active_conversations}{Color.RESET}" # noqa: E501
)
- random.seed(args.seed)
- np.random.seed(args.seed)
+ # Set unique seed per client (each client runs in its own process)
+ # Add 1 to ensure no client uses the same seed as the main process
+ client_seed = args.seed + client_id + 1
+ random.seed(client_seed)
+ np.random.seed(client_seed)
# Active conversations
active_convs: ConversationsMap = {}
@@ -646,49 +678,62 @@ async def client_main(
)
time_of_last_turn[conv_id] = curr_time_sec
- success = True
- try:
- result = await send_turn(
- session,
- client_id,
- conv_id,
- messages,
- current_turn,
- tokenizer,
- req_args,
- args.print_content,
- args.verify_output,
- )
- if result is not None:
- result_queue.put(result)
- else:
- # None means that the request failed,
- # and should not be added to the statistics.
- success = False
- num_failures += 1
-
- logger.warning(
- f"{Color.YELLOW}Client {client_id} - Request rejected during conversation ID {conv_id} (turn: {current_turn}){Color.RESET}" # noqa: E501
+ success = False
+ for attempt_cnt in range(args.max_retries + 1):
+ try:
+ exception = False
+ result = await send_turn(
+ session,
+ client_id,
+ conv_id,
+ messages,
+ current_turn,
+ tokenizer,
+ req_args,
+ args.print_content,
+ args.verify_output,
+ )
+ if result is not None:
+ result_queue.put(result)
+ success = True
+ break
+ else:
+ logger.warning(
+ f"{Color.YELLOW}Client {client_id} - Request rejected during conversation ID {conv_id} (turn: {current_turn}){Color.RESET}" # noqa: E501
+ )
+ except asyncio.exceptions.TimeoutError:
+ exception = True
+ logger.error(
+ "%sClient %d - Timeout during conversation ID %s (turn: %d). "
+ "Base timeout is %ss (set with --request-timeout-sec), but the "
+ "effective timeout may be longer based on max_tokens. If this "
+ "is unexpected, consider increasing the timeout or checking "
+ "model performance.%s",
+ Color.RED,
+ client_id,
+ conv_id,
+ current_turn,
+ req_args.timeout_sec,
+ Color.RESET,
+ )
+ except Exception:
+ exception = True
+ logger.exception(
+ f"{Color.RED}Client {client_id} - Exception during conversation ID {conv_id} (turn: {current_turn}){Color.RESET}" # noqa: E501
)
- # Remove the conversation (should not be used again)
- active_convs.pop(conv_id)
+ # Sleep before retry if not last attempt
+ if not success and attempt_cnt < args.max_retries:
+ await exponential_backoff_sleep(attempt_cnt, verbose=args.verbose)
- except asyncio.exceptions.TimeoutError:
+ if not success:
num_failures += 1
- logger.exception(
- f"{Color.RED}Client {client_id} - Timeout during conversation ID {conv_id} (turn: {current_turn}){Color.RESET}" # noqa: E501
- )
- break # Exit gracefully instead of raising an error
+ # Remove the conversation (should not be used again)
+ active_convs.pop(conv_id)
+ if exception:
+ break # Exit gracefully instead of raising an error
- except Exception:
- num_failures += 1
- logger.exception(
- f"{Color.RED}Client {client_id} - Exception during conversation ID {conv_id} (turn: {current_turn}){Color.RESET}" # noqa: E501
- )
- break # Exit gracefully instead of raising an error
-
- if success:
+ else:
num_successes += 1
# Update the turns counter to include the LLM response
@@ -803,6 +848,7 @@ def get_client_config(
verify_output=args.verify_output,
conversation_sampling=args.conversation_sampling,
request_rate=args.request_rate,
+ max_retries=args.max_retries,
)
if args.limit_min_tokens > 0 or args.limit_max_tokens > 0:
@@ -815,6 +861,9 @@ def get_client_config(
"Invalid min/max tokens limits (min should not be larger than max)"
)
+ if args.request_timeout_sec <= 0:
+ raise ValueError("Request timeout must be a positive number")
+
# Arguments for API requests
chat_url = f"{args.url}/v1/chat/completions"
model_name = args.served_model_name if args.served_model_name else args.model
@@ -825,6 +874,7 @@ def get_client_config(
stream=not args.no_stream,
limit_min_tokens=args.limit_min_tokens,
limit_max_tokens=args.limit_max_tokens,
+ timeout_sec=args.request_timeout_sec,
)
return client_args, req_args
@@ -968,7 +1018,7 @@ async def main_mp(
f"(is alive: {client.is_alive()}){Color.RESET}"
)
- client.join(timeout=120)
+ client.join(timeout=req_args.timeout_sec + 1)
if client.is_alive():
logger.warning(
@@ -1026,6 +1076,7 @@ def process_statistics(
verbose: bool,
gen_conv_args: GenConvArgs | None = None,
excel_output: bool = False,
+ warmup_runtime_sec: float | None = None,
) -> None:
if len(client_metrics) == 0:
logger.info("No samples to process")
@@ -1119,8 +1170,13 @@ def process_statistics(
# Convert milliseconds to seconds
runtime_sec = runtime_sec / 1000.0
requests_per_sec = float(len(df)) / runtime_sec
-
- params = {"runtime_sec": runtime_sec, "requests_per_sec": requests_per_sec}
+ params = {
+ "runtime_sec": runtime_sec,
+ "requests_per_sec": requests_per_sec,
+ }
+ if warmup_runtime_sec is not None:
+ params["warmup_runtime_sec"] = warmup_runtime_sec
+ params["total_runtime_incl_warmup_sec"] = runtime_sec + warmup_runtime_sec
# Generate a summary of relevant metrics (and drop irrelevant data)
df = df.drop(columns=exclude).describe(percentiles=percentiles).transpose()
@@ -1334,6 +1390,16 @@ async def main() -> None:
help="Expected request rate (Poisson process) per client in requests/sec."
"Set to 0 for no delay between requests.",
)
+ parser.add_argument(
+ "--max-retries",
+ type=int,
+ default=int(os.environ.get("MULTITURN_BENCH_MAX_RETRIES", "0")),
+ help="Maximum number of retry attempts for timed-out requests. "
+ "Default is 0 (no retries). "
+ "Set to higher values to retry failed requests and maintain "
+ "fair workload distribution. "
+ "Can also be set via MULTITURN_BENCH_MAX_RETRIES environment variable.",
+ )
parser.add_argument(
"--conversation-sampling",
type=ConversationSampling,
@@ -1351,6 +1417,13 @@ async def main() -> None:
action="store_true",
help="Verify the LLM output (compare to the answers in the input JSON file)",
)
+ parser.add_argument(
+ "--request-timeout-sec",
+ type=int,
+ default=120,
+ help="Timeout in seconds for each API request (default: 120). "
+ "Automatically increased if max tokens imply longer decoding.",
+ )
parser.add_argument(
"--no-stream",
@@ -1426,11 +1499,10 @@ async def main() -> None:
f"Invalid --warmup-percentage={args.warmup_percentage}"
) from None
+ # Set global seeds for main process
random.seed(args.seed)
np.random.seed(args.seed)
- if not os.path.exists(args.model):
- raise OSError(f"Path does not exist: {args.model}")
logger.info("Loading tokenizer")
tokenizer = AutoTokenizer.from_pretrained(args.model)
@@ -1486,6 +1558,8 @@ async def main() -> None:
url=args.url, num_clients=args.num_clients, early_stop=not args.no_early_stop
)
+ warmup_runtime_sec: float | None = None
+
# Warm-up step
if args.warmup_step:
# Only send a single user prompt from every conversation.
@@ -1500,26 +1574,56 @@ async def main() -> None:
# all clients should finish their work before exiting
warmup_bench_args = bench_args._replace(early_stop=False)
- logger.info(f"{Color.PURPLE}Warmup start{Color.RESET}")
+ logger.info("%sWarmup start%s", Color.PURPLE, Color.RESET)
+ warmup_start_ns = time.perf_counter_ns()
conversations, _ = await main_mp(
warmup_client_args, req_args, warmup_bench_args, tokenizer, conversations
)
- logger.info(f"{Color.PURPLE}Warmup done{Color.RESET}")
+ warmup_runtime_sec = nanosec_to_sec(time.perf_counter_ns() - warmup_start_ns)
+ logger.info(
+ "%sWarmup runtime: %.3f sec (%.3f ms)%s",
+ Color.PURPLE,
+ warmup_runtime_sec,
+ warmup_runtime_sec * 1000,
+ Color.RESET,
+ )
+ logger.info("%sWarmup done%s", Color.PURPLE, Color.RESET)
# Run the benchmark
- start_time = time.perf_counter_ns()
+ benchmark_start_ns = time.perf_counter_ns()
client_convs, client_metrics = await main_mp(
client_args, req_args, bench_args, tokenizer, conversations
)
- total_runtime_ms = nanosec_to_millisec(time.perf_counter_ns() - start_time)
+ benchmark_runtime_sec = nanosec_to_sec(time.perf_counter_ns() - benchmark_start_ns)
# Calculate requests per second
- total_runtime_sec = total_runtime_ms / 1000.0
- rps = len(client_metrics) / total_runtime_sec
+ requests_per_sec = len(client_metrics) / benchmark_runtime_sec
+ benchmark_runtime_ms = benchmark_runtime_sec * 1000.0
logger.info(
- f"{Color.GREEN}All clients finished, total runtime: {total_runtime_sec:.3f} sec"
- f" ({total_runtime_ms:.3f} ms), requests per second: {rps:.3f}{Color.RESET}"
+ "%sAll clients finished, benchmark runtime: %.3f sec (%.3f ms), "
+ "requests per second: %.3f%s",
+ Color.GREEN,
+ benchmark_runtime_sec,
+ benchmark_runtime_ms,
+ requests_per_sec,
+ Color.RESET,
)
+ if warmup_runtime_sec is not None:
+ total_runtime_sec = benchmark_runtime_sec + warmup_runtime_sec
+ logger.info(
+ "%sWarmup runtime: %.3f sec (%.3f ms)%s",
+ Color.GREEN,
+ warmup_runtime_sec,
+ warmup_runtime_sec * 1000,
+ Color.RESET,
+ )
+ logger.info(
+ "%sTotal runtime (including warmup): %.3f sec (%.3f ms)%s",
+ Color.GREEN,
+ total_runtime_sec,
+ total_runtime_sec * 1000,
+ Color.RESET,
+ )
# Benchmark parameters
params = {
@@ -1544,6 +1648,7 @@ async def main() -> None:
verbose=args.verbose,
gen_conv_args=gen_conv_args,
excel_output=args.excel_output,
+ warmup_runtime_sec=warmup_runtime_sec,
)
if args.output_file is not None:
diff --git a/benchmarks/multi_turn/requirements.txt b/benchmarks/multi_turn/requirements.txt
index f0e1935914a14..bae656a5c5c4b 100644
--- a/benchmarks/multi_turn/requirements.txt
+++ b/benchmarks/multi_turn/requirements.txt
@@ -2,4 +2,5 @@ numpy>=1.24
pandas>=2.0.0
aiohttp>=3.10
transformers>=4.46
-xlsxwriter>=3.2.1
\ No newline at end of file
+xlsxwriter>=3.2.1
+tqdm>=4.66
diff --git a/cmake/cpu_extension.cmake b/cmake/cpu_extension.cmake
index 192d349b30099..fbbb03c5ed465 100644
--- a/cmake/cpu_extension.cmake
+++ b/cmake/cpu_extension.cmake
@@ -15,6 +15,7 @@ endif()
#
set(ENABLE_AVX512BF16 $ENV{VLLM_CPU_AVX512BF16})
set(ENABLE_AVX512VNNI $ENV{VLLM_CPU_AVX512VNNI})
+set(ENABLE_AMXBF16 $ENV{VLLM_CPU_AMXBF16})
include_directories("${CMAKE_SOURCE_DIR}/csrc")
@@ -140,6 +141,22 @@ if (AVX512_FOUND AND NOT AVX512_DISABLED)
set(ENABLE_AVX512VNNI OFF)
message(WARNING "Disable AVX512-VNNI ISA support, no avx512_vnni found in local CPU flags." " If cross-compilation is required, please set env VLLM_CPU_AVX512VNNI=1.")
endif()
+
+ find_isa(${CPUINFO} "amx_bf16" AMXBF16_FOUND)
+ if (AMXBF16_FOUND OR ENABLE_AMXBF16)
+ if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND
+ CMAKE_CXX_COMPILER_VERSION VERSION_GREATER_EQUAL 12.3)
+ list(APPEND CXX_COMPILE_FLAGS "-mamx-bf16" "-mamx-tile")
+ set(ENABLE_AMXBF16 ON)
+ add_compile_definitions(-DCPU_CAPABILITY_AMXBF16)
+ else()
+ set(ENABLE_AMXBF16 OFF)
+ message(WARNING "Disable AMX_BF16 ISA support, requires gcc/g++ >= 12.3")
+ endif()
+ else()
+ set(ENABLE_AMXBF16 OFF)
+ message(WARNING "Disable AMX_BF16 ISA support, no amx_bf16 found in local CPU flags." " If cross-compilation is required, please set env VLLM_CPU_AMXBF16=1.")
+ endif()
elseif (AVX2_FOUND)
list(APPEND CXX_COMPILE_FLAGS "-mavx2")
@@ -193,7 +210,30 @@ endif()
if ((AVX512_FOUND AND NOT AVX512_DISABLED) OR (ASIMD_FOUND AND NOT APPLE_SILICON_FOUND) OR POWER9_FOUND OR POWER10_FOUND OR POWER11_FOUND)
# Fetch and build Arm Compute Library (ACL) as oneDNN's backend for AArch64
# TODO [fadara01]: remove this once ACL can be fetched and built automatically as a dependency of oneDNN
+ set(ONEDNN_AARCH64_USE_ACL OFF CACHE BOOL "")
if(ASIMD_FOUND)
+ # Set number of parallel build processes
+ include(ProcessorCount)
+ ProcessorCount(NPROC)
+ if(NOT NPROC)
+ set(NPROC 4)
+ endif()
+ # locate PyTorch's libgomp (e.g. site-packages/torch.libs/libgomp-947d5fa1.so.1.0.0)
+ # and create a local shim dir with it
+ vllm_prepare_torch_gomp_shim(VLLM_TORCH_GOMP_SHIM_DIR)
+
+ find_library(OPEN_MP
+ NAMES gomp
+ PATHS ${VLLM_TORCH_GOMP_SHIM_DIR}
+ NO_DEFAULT_PATH
+ REQUIRED
+ )
+ # Set LD_LIBRARY_PATH to include the shim dir at build time to use the same libgomp as PyTorch
+ if (OPEN_MP)
+ set(ENV{LD_LIBRARY_PATH} "${VLLM_TORCH_GOMP_SHIM_DIR}:$ENV{LD_LIBRARY_PATH}")
+ endif()
+
+ # Fetch and populate ACL
if(DEFINED ENV{ACL_ROOT_DIR} AND IS_DIRECTORY "$ENV{ACL_ROOT_DIR}")
message(STATUS "Using ACL from specified source directory: $ENV{ACL_ROOT_DIR}")
else()
@@ -202,43 +242,58 @@ if ((AVX512_FOUND AND NOT AVX512_DISABLED) OR (ASIMD_FOUND AND NOT APPLE_SILICON
SUBBUILD_DIR "${FETCHCONTENT_BASE_DIR}/arm_compute-subbuild"
SOURCE_DIR "${FETCHCONTENT_BASE_DIR}/arm_compute-src"
GIT_REPOSITORY https://github.com/ARM-software/ComputeLibrary.git
- GIT_TAG v52.2.0
+ GIT_TAG v52.6.0
GIT_SHALLOW TRUE
GIT_PROGRESS TRUE
)
set(ENV{ACL_ROOT_DIR} "${arm_compute_SOURCE_DIR}")
+ set(ACL_LIB_DIR "$ENV{ACL_ROOT_DIR}/build")
endif()
- # Build ACL with scons
- include(ProcessorCount)
- ProcessorCount(_NPROC)
- set(_scons_cmd
- scons -j${_NPROC}
- Werror=0 debug=0 neon=1 examples=0 embed_kernels=0 os=linux
- arch=armv8.2-a build=native benchmark_examples=0 fixed_format_kernels=1
- multi_isa=1 openmp=1 cppthreads=0
+ # Build ACL with CMake
+ set(ARM_COMPUTE_BUILD_SHARED_LIB "OFF")
+ set(CMAKE_BUILD_TYPE "Release")
+ set(ARM_COMPUTE_ARCH "armv8.2-a")
+ set(ARM_COMPUTE_ENABLE_ASSERTS "OFF")
+ set(ARM_COMPUTE_ENABLE_CPPTHREADS "OFF")
+ set(ONEDNN_ENABLE_PRIMITIVE "MATMUL;REORDER")
+ set(ARM_COMPUTE_ENABLE_OPENMP "ON")
+ set(ARM_COMPUTE_ENABLE_WERROR "OFF")
+ set(ARM_COMPUTE_BUILD_EXAMPLES "OFF")
+ set(ARM_COMPUTE_BUILD_TESTING "OFF")
+
+ set(_cmake_config_cmd
+ ${CMAKE_COMMAND} -G Ninja -B build
+ -DARM_COMPUTE_BUILD_SHARED_LIB=OFF
+ -DCMAKE_BUILD_TYPE=Release
+ -DARM_COMPUTE_ARCH=armv8.2-a
+ -DARM_COMPUTE_ENABLE_ASSERTS=OFF
+ -DARM_COMPUTE_ENABLE_CPPTHREADS=OFF
+ -DARM_COMPUTE_ENABLE_OPENMP=ON
+ -DARM_COMPUTE_ENABLE_WERROR=OFF
+ -DARM_COMPUTE_BUILD_EXAMPLES=OFF
+ -DARM_COMPUTE_BUILD_TESTING=OFF)
+ set(_cmake_build_cmd
+ ${CMAKE_COMMAND} --build build -- -j${NPROC}
)
- # locate PyTorch's libgomp (e.g. site-packages/torch.libs/libgomp-947d5fa1.so.1.0.0)
- # and create a local shim dir with it
- include("${CMAKE_CURRENT_LIST_DIR}/utils.cmake")
- vllm_prepare_torch_gomp_shim(VLLM_TORCH_GOMP_SHIM_DIR)
-
- if(NOT VLLM_TORCH_GOMP_SHIM_DIR STREQUAL "")
- list(APPEND _scons_cmd extra_link_flags=-L${VLLM_TORCH_GOMP_SHIM_DIR})
- endif()
-
execute_process(
- COMMAND ${_scons_cmd}
+ COMMAND ${_cmake_config_cmd}
+ WORKING_DIRECTORY "$ENV{ACL_ROOT_DIR}"
+ )
+ execute_process(
+ COMMAND ${_cmake_build_cmd}
WORKING_DIRECTORY "$ENV{ACL_ROOT_DIR}"
RESULT_VARIABLE _acl_rc
)
+
if(NOT _acl_rc EQUAL 0)
message(FATAL_ERROR "ACL SCons build failed (exit ${_acl_rc}).")
endif()
+ message(STATUS "Arm Compute Library (ACL) built successfully.")
- set(ONEDNN_AARCH64_USE_ACL "ON")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wl,-rpath,$ENV{ACL_ROOT_DIR}/build/")
+ # VLLM/oneDNN settings for ACL
+ set(ONEDNN_AARCH64_USE_ACL ON CACHE BOOL "" FORCE)
add_compile_definitions(VLLM_USE_ACL)
endif()
@@ -255,7 +310,7 @@ if ((AVX512_FOUND AND NOT AVX512_DISABLED) OR (ASIMD_FOUND AND NOT APPLE_SILICON
FetchContent_Declare(
oneDNN
GIT_REPOSITORY https://github.com/oneapi-src/oneDNN.git
- GIT_TAG v3.9
+ GIT_TAG v3.10
GIT_PROGRESS TRUE
GIT_SHALLOW TRUE
)
@@ -275,7 +330,10 @@ if ((AVX512_FOUND AND NOT AVX512_DISABLED) OR (ASIMD_FOUND AND NOT APPLE_SILICON
set(ONEDNN_VERBOSE "OFF")
set(CMAKE_POLICY_DEFAULT_CMP0077 NEW)
+ set(VLLM_BUILD_TYPE ${CMAKE_BUILD_TYPE})
+ set(CMAKE_BUILD_TYPE "Release") # remove oneDNN debug symbols to reduce size
FetchContent_MakeAvailable(oneDNN)
+ set(CMAKE_BUILD_TYPE ${VLLM_BUILD_TYPE})
add_library(dnnl_ext OBJECT "csrc/cpu/dnnl_helper.cpp")
target_include_directories(
dnnl_ext
@@ -305,18 +363,19 @@ endif()
#
set(VLLM_EXT_SRC
"csrc/cpu/activation.cpp"
- "csrc/cpu/attention.cpp"
- "csrc/cpu/cache.cpp"
"csrc/cpu/utils.cpp"
"csrc/cpu/layernorm.cpp"
"csrc/cpu/mla_decode.cpp"
"csrc/cpu/pos_encoding.cpp"
- "csrc/cpu/torch_bindings.cpp"
- "csrc/moe/dynamic_4bit_int_moe_cpu.cpp")
+ "csrc/moe/dynamic_4bit_int_moe_cpu.cpp"
+ "csrc/cpu/cpu_attn.cpp"
+ "csrc/cpu/scratchpad_manager.cpp"
+ "csrc/cpu/torch_bindings.cpp")
if (AVX512_FOUND AND NOT AVX512_DISABLED)
set(VLLM_EXT_SRC
"csrc/cpu/shm.cpp"
+ "csrc/cpu/cpu_wna16.cpp"
${VLLM_EXT_SRC})
if (ENABLE_AVX512BF16 AND ENABLE_AVX512VNNI)
set(VLLM_EXT_SRC
@@ -343,7 +402,7 @@ message(STATUS "CPU extension source files: ${VLLM_EXT_SRC}")
# Define extension targets
#
-define_gpu_extension_target(
+define_extension_target(
_C
DESTINATION vllm
LANGUAGE CXX
@@ -354,4 +413,4 @@ define_gpu_extension_target(
WITH_SOABI
)
-message(STATUS "Enabling C extension.")
\ No newline at end of file
+message(STATUS "Enabling C extension.")
diff --git a/cmake/external_projects/flashmla.cmake b/cmake/external_projects/flashmla.cmake
index f661084ec48ae..2cf3c1a755d3c 100644
--- a/cmake/external_projects/flashmla.cmake
+++ b/cmake/external_projects/flashmla.cmake
@@ -92,7 +92,7 @@ if(FLASH_MLA_ARCHS)
SRCS "${FlashMLA_Extension_SOURCES}"
CUDA_ARCHS "${FLASH_MLA_ARCHS}")
- define_gpu_extension_target(
+ define_extension_target(
_flashmla_C
DESTINATION vllm
LANGUAGE ${VLLM_GPU_LANG}
@@ -109,7 +109,7 @@ if(FLASH_MLA_ARCHS)
$<$:-UPy_LIMITED_API>
$<$:-UPy_LIMITED_API>)
- define_gpu_extension_target(
+ define_extension_target(
_flashmla_extension_C
DESTINATION vllm
LANGUAGE ${VLLM_GPU_LANG}
diff --git a/cmake/external_projects/triton_kernels.cmake b/cmake/external_projects/triton_kernels.cmake
new file mode 100644
index 0000000000000..d35ad123dd9de
--- /dev/null
+++ b/cmake/external_projects/triton_kernels.cmake
@@ -0,0 +1,53 @@
+# Install OpenAI triton_kernels from https://github.com/triton-lang/triton/tree/main/python/triton_kernels
+
+set(DEFAULT_TRITON_KERNELS_TAG "v3.5.0")
+
+# Set TRITON_KERNELS_SRC_DIR for use with local development with vLLM. We expect TRITON_KERNELS_SRC_DIR to
+# be directly set to the triton_kernels python directory.
+if (DEFINED ENV{TRITON_KERNELS_SRC_DIR})
+ message(STATUS "[triton_kernels] Fetch from $ENV{TRITON_KERNELS_SRC_DIR}")
+ FetchContent_Declare(
+ triton_kernels
+ SOURCE_DIR $ENV{TRITON_KERNELS_SRC_DIR}
+ )
+
+else()
+ set(TRITON_GIT "https://github.com/triton-lang/triton.git")
+ message (STATUS "[triton_kernels] Fetch from ${TRITON_GIT}:${DEFAULT_TRITON_KERNELS_TAG}")
+ FetchContent_Declare(
+ triton_kernels
+ # TODO (varun) : Fetch just the triton_kernels directory from Triton
+ GIT_REPOSITORY https://github.com/triton-lang/triton.git
+ GIT_TAG ${DEFAULT_TRITON_KERNELS_TAG}
+ GIT_PROGRESS TRUE
+ SOURCE_SUBDIR python/triton_kernels/triton_kernels
+ )
+endif()
+
+# Fetch content
+FetchContent_MakeAvailable(triton_kernels)
+
+if (NOT triton_kernels_SOURCE_DIR)
+ message (FATAL_ERROR "[triton_kernels] Cannot resolve triton_kernels_SOURCE_DIR")
+endif()
+
+if (DEFINED ENV{TRITON_KERNELS_SRC_DIR})
+ set(TRITON_KERNELS_PYTHON_DIR "${triton_kernels_SOURCE_DIR}/")
+else()
+ set(TRITON_KERNELS_PYTHON_DIR "${triton_kernels_SOURCE_DIR}/python/triton_kernels/triton_kernels/")
+endif()
+
+message (STATUS "[triton_kernels] triton_kernels is available at ${TRITON_KERNELS_PYTHON_DIR}")
+
+add_custom_target(triton_kernels)
+
+# Ensure the vllm/third_party directory exists before installation
+install(CODE "file(MAKE_DIRECTORY \"\${CMAKE_INSTALL_PREFIX}/vllm/third_party/triton_kernels\")")
+
+## Copy .py files to install directory.
+install(DIRECTORY
+ ${TRITON_KERNELS_PYTHON_DIR}
+ DESTINATION
+ vllm/third_party/triton_kernels/
+ COMPONENT triton_kernels
+ FILES_MATCHING PATTERN "*.py")
diff --git a/cmake/external_projects/vllm_flash_attn.cmake b/cmake/external_projects/vllm_flash_attn.cmake
index 931090db50e92..ff687e0af7b44 100644
--- a/cmake/external_projects/vllm_flash_attn.cmake
+++ b/cmake/external_projects/vllm_flash_attn.cmake
@@ -38,7 +38,7 @@ else()
FetchContent_Declare(
vllm-flash-attn
GIT_REPOSITORY https://github.com/vllm-project/flash-attention.git
- GIT_TAG a893712401d70362fbb299cd9c4b3476e8e9ed54
+ GIT_TAG 86f8f157cf82aa2342743752b97788922dd7de43
GIT_PROGRESS TRUE
# Don't share the vllm-flash-attn build between build types
BINARY_DIR ${CMAKE_BINARY_DIR}/vllm-flash-attn
diff --git a/cmake/utils.cmake b/cmake/utils.cmake
index c2181d4549236..5047c354ff7d2 100644
--- a/cmake/utils.cmake
+++ b/cmake/utils.cmake
@@ -453,21 +453,20 @@ macro(override_gpu_arches GPU_ARCHES GPU_LANG GPU_SUPPORTED_ARCHES)
endmacro()
#
-# Define a target named `GPU_MOD_NAME` for a single extension. The
+# Define a target named `MOD_NAME` for a single extension. The
# arguments are:
#
# DESTINATION - Module destination directory.
-# LANGUAGE - The GPU language for this module, e.g CUDA, HIP,
-# etc.
+# LANGUAGE - The language for this module, e.g. CUDA, HIP,
+# CXX, etc.
# SOURCES - List of source files relative to CMakeLists.txt
# directory.
#
# Optional arguments:
#
-# ARCHITECTURES - A list of target GPU architectures in cmake
-# format.
-# Refer `CMAKE_CUDA_ARCHITECTURES` documentation
-# and `CMAKE_HIP_ARCHITECTURES` for more info.
+# ARCHITECTURES - A list of target architectures in cmake format.
+# For GPU, refer to CMAKE_CUDA_ARCHITECTURES and
+# CMAKE_HIP_ARCHITECTURES for more info.
# ARCHITECTURES will use cmake's defaults if
# not provided.
# COMPILE_FLAGS - Extra compiler flags passed to NVCC/hip.
@@ -478,63 +477,67 @@ endmacro()
#
# Note: optimization level/debug info is set via cmake build type.
#
-function (define_gpu_extension_target GPU_MOD_NAME)
+function (define_extension_target MOD_NAME)
cmake_parse_arguments(PARSE_ARGV 1
- GPU
+ ARG
"WITH_SOABI"
"DESTINATION;LANGUAGE;USE_SABI"
"SOURCES;ARCHITECTURES;COMPILE_FLAGS;INCLUDE_DIRECTORIES;LIBRARIES")
# Add hipify preprocessing step when building with HIP/ROCm.
- if (GPU_LANGUAGE STREQUAL "HIP")
- hipify_sources_target(GPU_SOURCES ${GPU_MOD_NAME} "${GPU_SOURCES}")
+ if (ARG_LANGUAGE STREQUAL "HIP")
+ hipify_sources_target(ARG_SOURCES ${MOD_NAME} "${ARG_SOURCES}")
endif()
- if (GPU_WITH_SOABI)
- set(GPU_WITH_SOABI WITH_SOABI)
+ if (ARG_WITH_SOABI)
+ set(SOABI_KEYWORD WITH_SOABI)
else()
- set(GPU_WITH_SOABI)
+ set(SOABI_KEYWORD "")
endif()
- if (GPU_USE_SABI)
- Python_add_library(${GPU_MOD_NAME} MODULE USE_SABI ${GPU_USE_SABI} ${GPU_WITH_SOABI} "${GPU_SOURCES}")
+ run_python(IS_FREETHREADED_PYTHON
+ "import sysconfig; print(1 if sysconfig.get_config_var(\"Py_GIL_DISABLED\") else 0)"
+ "Failed to determine whether interpreter is free-threaded")
+
+ # Free-threaded Python doesn't yet support the stable ABI (see PEP 803/809),
+ # so avoid using the stable ABI under free-threading only.
+ if (ARG_USE_SABI AND NOT IS_FREETHREADED_PYTHON)
+ Python_add_library(${MOD_NAME} MODULE USE_SABI ${ARG_USE_SABI} ${SOABI_KEYWORD} "${ARG_SOURCES}")
else()
- Python_add_library(${GPU_MOD_NAME} MODULE ${GPU_WITH_SOABI} "${GPU_SOURCES}")
+ Python_add_library(${MOD_NAME} MODULE ${SOABI_KEYWORD} "${ARG_SOURCES}")
endif()
- if (GPU_LANGUAGE STREQUAL "HIP")
+ if (ARG_LANGUAGE STREQUAL "HIP")
# Make this target dependent on the hipify preprocessor step.
- add_dependencies(${GPU_MOD_NAME} hipify${GPU_MOD_NAME})
+ add_dependencies(${MOD_NAME} hipify${MOD_NAME})
# Make sure we include the hipified versions of the headers, and avoid conflicts with the ones in the original source folder
- target_include_directories(${GPU_MOD_NAME} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/csrc
- ${GPU_INCLUDE_DIRECTORIES})
+ target_include_directories(${MOD_NAME} PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/csrc
+ ${ARG_INCLUDE_DIRECTORIES})
else()
- target_include_directories(${GPU_MOD_NAME} PRIVATE csrc
- ${GPU_INCLUDE_DIRECTORIES})
+ target_include_directories(${MOD_NAME} PRIVATE csrc
+ ${ARG_INCLUDE_DIRECTORIES})
endif()
- if (GPU_ARCHITECTURES)
- set_target_properties(${GPU_MOD_NAME} PROPERTIES
- ${GPU_LANGUAGE}_ARCHITECTURES "${GPU_ARCHITECTURES}")
+ if (ARG_ARCHITECTURES)
+ set_target_properties(${MOD_NAME} PROPERTIES
+ ${ARG_LANGUAGE}_ARCHITECTURES "${ARG_ARCHITECTURES}")
endif()
+ target_compile_options(${MOD_NAME} PRIVATE
+ $<$:${ARG_COMPILE_FLAGS}>)
- target_compile_options(${GPU_MOD_NAME} PRIVATE
- $<$:${GPU_COMPILE_FLAGS}>)
+ target_compile_definitions(${MOD_NAME} PRIVATE
+ "-DTORCH_EXTENSION_NAME=${MOD_NAME}")
- target_compile_definitions(${GPU_MOD_NAME} PRIVATE
- "-DTORCH_EXTENSION_NAME=${GPU_MOD_NAME}")
-
-
- target_link_libraries(${GPU_MOD_NAME} PRIVATE torch ${GPU_LIBRARIES})
+ target_link_libraries(${MOD_NAME} PRIVATE torch ${ARG_LIBRARIES})
# Don't use `TORCH_LIBRARIES` for CUDA since it pulls in a bunch of
# dependencies that are not necessary and may not be installed.
- if (GPU_LANGUAGE STREQUAL "CUDA")
- target_link_libraries(${GPU_MOD_NAME} PRIVATE CUDA::cudart CUDA::cuda_driver)
+ if (ARG_LANGUAGE STREQUAL "CUDA")
+ target_link_libraries(${MOD_NAME} PRIVATE torch CUDA::cudart CUDA::cuda_driver ${ARG_LIBRARIES})
else()
- target_link_libraries(${GPU_MOD_NAME} PRIVATE ${TORCH_LIBRARIES})
+ target_link_libraries(${MOD_NAME} PRIVATE torch ${TORCH_LIBRARIES} ${ARG_LIBRARIES})
endif()
- install(TARGETS ${GPU_MOD_NAME} LIBRARY DESTINATION ${GPU_DESTINATION} COMPONENT ${GPU_MOD_NAME})
+ install(TARGETS ${MOD_NAME} LIBRARY DESTINATION ${ARG_DESTINATION} COMPONENT ${MOD_NAME})
endfunction()
diff --git a/csrc/attention/merge_attn_states.cu b/csrc/attention/merge_attn_states.cu
index 6bee9e4ce1166..27d1e990c611e 100644
--- a/csrc/attention/merge_attn_states.cu
+++ b/csrc/attention/merge_attn_states.cu
@@ -16,7 +16,8 @@ __global__ void merge_attn_states_kernel(
scalar_t* output, float* output_lse, const scalar_t* prefix_output,
const float* prefix_lse, const scalar_t* suffix_output,
const float* suffix_lse, const uint num_tokens, const uint num_heads,
- const uint head_size) {
+ const uint head_size, const uint prefix_head_stride,
+ const uint output_head_stride) {
using pack_128b_t = uint4;
const uint pack_size = 16 / sizeof(scalar_t);
const uint threads_per_head = head_size / pack_size;
@@ -34,11 +35,13 @@ __global__ void merge_attn_states_kernel(
const uint head_idx = token_head_idx % num_heads;
const uint pack_offset = pack_idx * pack_size; // (0~15)*8, etc.
- const uint head_offset =
- token_idx * num_heads * head_size + head_idx * head_size;
- const scalar_t* prefix_head_ptr = prefix_output + head_offset;
- const scalar_t* suffix_head_ptr = suffix_output + head_offset;
- scalar_t* output_head_ptr = output + head_offset;
+ const uint src_head_offset = token_idx * num_heads * prefix_head_stride +
+ head_idx * prefix_head_stride;
+ const uint dst_head_offset = token_idx * num_heads * output_head_stride +
+ head_idx * output_head_stride;
+ const scalar_t* prefix_head_ptr = prefix_output + src_head_offset;
+ const scalar_t* suffix_head_ptr = suffix_output + src_head_offset;
+ scalar_t* output_head_ptr = output + dst_head_offset;
float p_lse = prefix_lse[head_idx * num_tokens + token_idx];
float s_lse = suffix_lse[head_idx * num_tokens + token_idx];
@@ -46,6 +49,32 @@ __global__ void merge_attn_states_kernel(
s_lse = std::isinf(s_lse) ? -std::numeric_limits::infinity() : s_lse;
const float max_lse = fmaxf(p_lse, s_lse);
+
+ /* In certain edge cases, MLA can produce p_lse = s_lse = -inf;
+ continuing the pipeline then yields NaN. Root cause: with chunked prefill
+ a batch may be split into two chunks; if a request in that batch has no
+ prefix hit, every LSE entry for that request’s position is -inf, and at
+ this moment we merge cross-attention at first. For now we simply emit
+ prefix_output (expected to be all zeros) and prefix_lse (-inf) to fix
+ this problem.
+ */
+ if (std::isinf(max_lse)) {
+ if (pack_offset < head_size) {
+ // Pack 128b load
+ pack_128b_t p_out_pack = reinterpret_cast(
+ prefix_head_ptr)[pack_offset / pack_size];
+
+ // Pack 128b storage
+ reinterpret_cast(output_head_ptr)[pack_offset / pack_size] =
+ p_out_pack;
+ }
+ // We only need to write to output_lse once per head.
+ if (output_lse != nullptr && pack_idx == 0) {
+ output_lse[head_idx * num_tokens + token_idx] = max_lse;
+ }
+ return;
+ }
+
p_lse = p_lse - max_lse;
s_lse = s_lse - max_lse;
const float p_se = expf(p_lse);
@@ -114,7 +143,7 @@ __global__ void merge_attn_states_kernel(
reinterpret_cast(prefix_lse.data_ptr()), \
reinterpret_cast(suffix_output.data_ptr()), \
reinterpret_cast(suffix_lse.data_ptr()), num_tokens, \
- num_heads, head_size); \
+ num_heads, head_size, prefix_head_stride, output_head_stride); \
}
/*@brief Merges the attention states from prefix and suffix
@@ -140,17 +169,11 @@ void merge_attn_states_launcher(torch::Tensor& output,
const uint num_tokens = output.size(0);
const uint num_heads = output.size(1);
const uint head_size = output.size(2);
+ const uint prefix_head_stride = prefix_output.stride(1);
+ const uint output_head_stride = output.stride(1);
const uint pack_size = 16 / sizeof(scalar_t);
TORCH_CHECK(head_size % pack_size == 0,
"headsize must be multiple of pack_size:", pack_size);
- TORCH_CHECK(output.stride(-2) == head_size && output.stride(-1) == 1,
- "output heads must be contiguous in memory");
- TORCH_CHECK(
- prefix_output.stride(-2) == head_size && prefix_output.stride(-1) == 1,
- "prefix_output heads must be contiguous in memory");
- TORCH_CHECK(
- suffix_output.stride(-2) == head_size && suffix_output.stride(-1) == 1,
- "suffix_output heads must be contiguous in memory");
float* output_lse_ptr = nullptr;
if (output_lse.has_value()) {
output_lse_ptr = output_lse.value().data_ptr();
diff --git a/csrc/cache.h b/csrc/cache.h
index b162a4a2bc31f..f2a5ec0acf5cd 100644
--- a/csrc/cache.h
+++ b/csrc/cache.h
@@ -41,11 +41,12 @@ void convert_fp8(torch::Tensor& dst_cache, torch::Tensor& src_cache,
const double scale, const std::string& kv_cache_dtype);
void gather_and_maybe_dequant_cache(
- torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...]
- torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...]
- torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES]
- torch::Tensor const& cu_seq_lens, // [BATCH+1]
- int64_t batch_size, const std::string& kv_cache_dtype,
+ torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...]
+ torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...]
+ torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES]
+ torch::Tensor const& cu_seq_lens, // [BATCH+1]
+ torch::Tensor const& token_to_seq, // [MAX_TOKEN_ACROSS_CHUNKS]
+ int64_t num_tokens, const std::string& kv_cache_dtype,
torch::Tensor const& scale,
std::optional seq_starts = std::nullopt);
diff --git a/csrc/cache_kernels.cu b/csrc/cache_kernels.cu
index 0aa0dc14c7480..8a5457206c706 100644
--- a/csrc/cache_kernels.cu
+++ b/csrc/cache_kernels.cu
@@ -552,7 +552,11 @@ __global__ void indexer_k_quant_and_cache_kernel(
#ifndef USE_ROCM
__syncwarp();
#endif
+#if defined(__gfx942__)
+ float scale = fmaxf(amax, 1e-4) / 224.0f;
+#else
float scale = fmaxf(amax, 1e-4) / 448.0f;
+#endif
if (use_ue8m0) {
scale = exp2f(ceilf(log2f(scale)));
}
@@ -901,87 +905,80 @@ void convert_fp8(torch::Tensor& dst_cache, torch::Tensor& src_cache,
namespace vllm {
// grid is launched with dimensions (batch, num_splits)
-template
+template
__global__ void gather_and_maybe_dequant_cache(
- const cache_t* __restrict__ src_cache, // [NUM_BLOCKS, BLOCK_SIZE,
- // ENTRIES...]
- scalar_t* __restrict__ dst, // [TOT_TOKENS, ENTRIES...]
- const int32_t* __restrict__ block_table, // [BATCH, BLOCK_INDICES]
- const int32_t* __restrict__ cu_seq_lens, // [BATCH+1]
- const int32_t block_size, const int32_t entry_size,
+ const cache_t* __restrict__ src_cache, // [NUM_BLOCKS, BLOCK_SIZE,
+ // ENTRIES...]
+ scalar_t* __restrict__ dst, // [TOT_TOKENS, ENTRIES...]
+ const int32_t* __restrict__ block_table, // [BATCH, BLOCK_INDICES]
+ const int32_t* __restrict__ cu_seq_lens, // [BATCH+1]
+ const int32_t* __restrict__ token_to_seq, // [MAX_TOKEN_ACROSS_CHUNK]
+ const int32_t num_tokens, const int32_t block_size,
const int64_t block_table_stride, const int64_t cache_block_stride,
const int64_t cache_entry_stride, const int64_t dst_entry_stride,
const float* __restrict__ scale,
const int32_t* __restrict__ seq_starts) { // Optional: starting offsets per
// batch
+ constexpr int vec_size = sizeof(float4) / sizeof(scalar_t);
+ using ltype = vllm::vec_n_t;
+ using stype = vllm::vec_n_t;
+ // We are adding this for code readability which will be optimized out when
+ // build in release.
+ assert(CTA_SIZE == blockDim.x);
- const int64_t bid = blockIdx.x; // Batch ID
- const int32_t num_splits = gridDim.y;
- const int32_t split = blockIdx.y;
- const int32_t seq_start = cu_seq_lens[bid];
- const int32_t seq_end = cu_seq_lens[bid + 1];
- const int32_t seq_len = seq_end - seq_start;
- const int32_t tot_blocks = cuda_utils::ceil_div(seq_len, block_size);
- const int32_t split_blocks = cuda_utils::ceil_div(tot_blocks, num_splits);
+#pragma unroll
+ for (int token_id = blockIdx.x; token_id < num_tokens;
+ token_id += gridDim.x) {
+ int64_t batch_id = token_to_seq[token_id];
+ int64_t batch_start = cu_seq_lens[batch_id];
+ int64_t batch_end = cu_seq_lens[batch_id + 1];
+ int32_t batch_offset = token_id - batch_start;
- const int32_t split_start = split * split_blocks;
- const int32_t split_end = min((split + 1) * split_blocks, tot_blocks);
+ if (token_id >= batch_end) return;
+ int32_t offset = 0;
+ if (seq_starts != nullptr) {
+ offset = seq_starts[batch_id];
+ }
+ batch_offset += offset;
+ int32_t block_table_id = batch_offset / block_size;
+ int32_t slot_id = batch_offset % block_size;
+ int32_t block_table_offset = batch_id * block_table_stride + block_table_id;
+ int32_t block_id = block_table[block_table_offset];
+ int64_t cache_offset =
+ block_id * cache_block_stride + slot_id * cache_entry_stride;
+ constexpr int32_t vec_iter_cnt = ENTRY_SIZE / vec_size;
+ scalar_t* dst_ = dst + token_id * dst_entry_stride;
+ cache_t* src_ = const_cast(src_cache) + cache_offset;
- const bool is_active_split = (split_start < tot_blocks);
- const bool is_last_split = (split_end == tot_blocks);
-
- if (!is_active_split) return;
-
- int32_t full_blocks_end = split_end;
- int32_t partial_block_size = 0;
-
- // Adjust the pointer for the block_table for this batch.
- // If seq_starts is provided, compute an offset based on (seq_starts[bid] /
- // page_size)
- const int32_t batch_offset = bid * block_table_stride;
- int32_t offset = 0;
- if (seq_starts != nullptr) {
- offset = seq_starts[bid] / block_size;
- }
- const int32_t* batch_block_table = block_table + batch_offset + offset;
-
- // Adjust dst pointer based on the cumulative sequence lengths.
- dst += seq_start * dst_entry_stride;
-
- if (is_last_split) {
- partial_block_size = seq_len % block_size;
- if (partial_block_size) full_blocks_end -= 1;
- }
-
- auto copy_entry = [&](const cache_t* __restrict__ _src,
- scalar_t* __restrict__ _dst) {
- for (int i = threadIdx.x; i < entry_size; i += blockDim.x) {
+#pragma unroll
+ for (int idx = threadIdx.x; idx < vec_iter_cnt; idx += CTA_SIZE) {
if constexpr (kv_dt == Fp8KVCacheDataType::kAuto) {
- _dst[i] = static_cast(_src[i]);
+ reinterpret_cast(dst_)[idx] =
+ static_cast(reinterpret_cast(src_)[idx]);
} else {
- _dst[i] =
- fp8::scaled_convert(_src[i], *scale);
+ ltype loaded_val = reinterpret_cast(src_)[idx];
+ stype store_val;
+#pragma unroll
+ for (int j = 0; j < vec_size; ++j) {
+ store_val.val[j] = fp8::scaled_convert(
+ loaded_val.val[j], *scale);
+ }
+ reinterpret_cast(dst_)[idx] = store_val;
}
}
- };
-
- for (int pid = split_start; pid < full_blocks_end; ++pid) {
- auto block_id = batch_block_table[pid];
- auto block_start_ptr = src_cache + block_id * cache_block_stride;
- auto block_dst_ptr = dst + pid * block_size * dst_entry_stride;
- for (int eid = 0; eid < block_size; ++eid) {
- copy_entry(block_start_ptr + eid * cache_entry_stride,
- block_dst_ptr + eid * dst_entry_stride);
- }
- }
-
- if (partial_block_size) {
- auto block_id = batch_block_table[full_blocks_end];
- auto block_start_ptr = src_cache + block_id * cache_block_stride;
- auto block_dst_ptr = dst + full_blocks_end * block_size * dst_entry_stride;
- for (int eid = 0; eid < partial_block_size; ++eid) {
- copy_entry(block_start_ptr + eid * cache_entry_stride,
- block_dst_ptr + eid * dst_entry_stride);
+ // process tail
+ constexpr int32_t tail_cnt = ENTRY_SIZE % vec_size;
+ dst_ = dst_ + ENTRY_SIZE - tail_cnt;
+ src_ = src_ + ENTRY_SIZE - tail_cnt;
+#pragma unroll
+ for (int idx = threadIdx.x; idx < tail_cnt; idx += CTA_SIZE) {
+ if constexpr (kv_dt == Fp8KVCacheDataType::kAuto) {
+ dst_[idx] = static_cast(src_[idx]);
+ } else {
+ dst_[idx] =
+ fp8::scaled_convert(src_[idx], *scale);
+ }
}
}
}
@@ -992,34 +989,38 @@ __global__ void gather_and_maybe_dequant_cache(
// SCALAR_T is the data type of the destination tensor.
// CACHE_T is the stored data type of kv-cache.
// KV_DTYPE is the real data type of kv-cache.
-#define CALL_GATHER_CACHE(SCALAR_T, CACHE_T, KV_DTYPE) \
- vllm::gather_and_maybe_dequant_cache \
- <<>>( \
- reinterpret_cast(src_cache.data_ptr()), \
- reinterpret_cast(dst.data_ptr()), \
- block_table.data_ptr(), cu_seq_lens.data_ptr(), \
- block_size, entry_size, block_table_stride, cache_block_stride, \
- cache_entry_stride, dst_entry_stride, \
- reinterpret_cast(scale.data_ptr()), seq_starts_ptr);
+#define CALL_GATHER_CACHE(SCALAR_T, CACHE_T, KV_DTYPE) \
+ vllm::gather_and_maybe_dequant_cache \
+ <<>>( \
+ reinterpret_cast(src_cache.data_ptr()), \
+ reinterpret_cast(dst.data_ptr()), \
+ block_table.data_ptr(), cu_seq_lens.data_ptr(), \
+ token_to_seq.data_ptr(), num_tokens, block_size, \
+ block_table_stride, cache_block_stride, cache_entry_stride, \
+ dst_entry_stride, reinterpret_cast(scale.data_ptr()), \
+ seq_starts_ptr);
// Gather sequences from the cache into the destination tensor.
// - cu_seq_lens contains the cumulative sequence lengths for each batch
// - block_table contains the cache block indices for each sequence
+// - token_to_seq contains the back mapping from token_id to batch_id
// - Optionally, seq_starts (if provided) offsets the starting block index by
// (seq_starts[bid] / page_size)
void gather_and_maybe_dequant_cache(
- torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...]
- torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...]
- torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES]
- torch::Tensor const& cu_seq_lens, // [BATCH+1]
- int64_t batch_size, const std::string& kv_cache_dtype,
+ torch::Tensor const& src_cache, // [NUM_BLOCKS, BLOCK_SIZE, ENTRIES...]
+ torch::Tensor const& dst, // [TOT_TOKENS, ENTRIES...]
+ torch::Tensor const& block_table, // [BATCH, BLOCK_INDICES]
+ torch::Tensor const& cu_seq_lens, // [BATCH+1]
+ torch::Tensor const& token_to_seq, // [MAX_TOKEN_ACROSS_CHUNKS]
+ int64_t num_tokens, const std::string& kv_cache_dtype,
torch::Tensor const& scale,
std::optional seq_starts = std::nullopt) {
at::cuda::OptionalCUDAGuard device_guard(src_cache.device());
const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
int32_t block_size = src_cache.size(1);
- int32_t entry_size = src_cache.flatten(2, -1).size(2);
+ int32_t head_dim = dst.size(-1);
TORCH_CHECK(block_table.dtype() == torch::kInt32,
"block_table must be int32");
@@ -1029,6 +1030,9 @@ void gather_and_maybe_dequant_cache(
TORCH_CHECK(seq_starts.value().dtype() == torch::kInt32,
"seq_starts must be int32");
}
+ TORCH_CHECK(head_dim == 576,
+ "gather_and_maybe_dequant_cache only support the head_dim to 576 "
+ "for better performance")
TORCH_CHECK(src_cache.device() == dst.device(),
"src_cache and dst must be on the same device");
@@ -1046,10 +1050,9 @@ void gather_and_maybe_dequant_cache(
int64_t cache_entry_stride = src_cache.stride(1);
int64_t dst_entry_stride = dst.stride(0);
- // Decide on the number of splits based on the batch size.
- int num_splits = batch_size > 128 ? 2 : batch_size > 64 ? 4 : 16;
- dim3 grid(batch_size, num_splits);
- dim3 block(1024);
+ constexpr int32_t thread_block_size = 64;
+ dim3 grid(num_tokens);
+ dim3 block(thread_block_size);
const int32_t* seq_starts_ptr =
seq_starts.has_value() ? seq_starts.value().data_ptr() : nullptr;
diff --git a/csrc/cpu/attention.cpp b/csrc/cpu/attention.cpp
deleted file mode 100644
index 82862fea7f2be..0000000000000
--- a/csrc/cpu/attention.cpp
+++ /dev/null
@@ -1,798 +0,0 @@
-#include "cpu_types.hpp"
-
-namespace {
-
-template
-struct KernelVecType {
- using q_load_vec_type = void;
- using q_vec_type = void;
- using k_load_vec_type = void;
- using k_vec_type = void;
- using qk_acc_vec_type = void;
- using v_load_vec_type = void;
-};
-
-template <>
-struct KernelVecType {
- using q_load_vec_type = vec_op::FP32Vec4;
- using q_vec_type = vec_op::FP32Vec16;
- using k_load_vec_type = vec_op::FP32Vec16;
- using k_vec_type = vec_op::FP32Vec16;
- using qk_acc_vec_type = vec_op::FP32Vec16;
- using v_load_vec_type = vec_op::FP32Vec16;
-};
-
-template <>
-struct KernelVecType {
-#if defined(__powerpc64__) || defined(__s390x__)
- // Power and s390x architecture-specific vector types
- using q_load_vec_type = vec_op::FP32Vec8;
- using k_load_vec_type = vec_op::FP32Vec16;
- using v_load_vec_type = vec_op::FP32Vec16;
-#else
- // Fallback for other architectures, including x86
- using q_load_vec_type = vec_op::FP16Vec8;
- using k_load_vec_type = vec_op::FP16Vec16;
- using v_load_vec_type = vec_op::FP16Vec16;
-#endif
- using q_vec_type = vec_op::FP32Vec16;
- using k_vec_type = vec_op::FP32Vec16;
- using qk_acc_vec_type = vec_op::FP32Vec16;
-};
-
-#ifdef __AVX512BF16__
-template <>
-struct KernelVecType {
- using q_load_vec_type = vec_op::BF16Vec8;
- using q_vec_type = vec_op::BF16Vec32;
- using k_load_vec_type = vec_op::BF16Vec32;
- using k_vec_type = vec_op::BF16Vec32;
- using qk_acc_vec_type = vec_op::FP32Vec16;
- using v_load_vec_type = vec_op::BF16Vec16;
-};
-#else
- #ifdef __aarch64__
- #ifndef ARM_BF16_SUPPORT
- // pass
- #else
-template <>
-struct KernelVecType {
- using q_load_vec_type = vec_op::BF16Vec8;
- using q_vec_type = vec_op::FP32Vec16;
- using k_load_vec_type = vec_op::BF16Vec16;
- using k_vec_type = vec_op::FP32Vec16;
- using qk_acc_vec_type = vec_op::FP32Vec16;
- using v_load_vec_type = vec_op::BF16Vec16;
-};
- #endif
- #else
-template <>
-struct KernelVecType {
- using q_load_vec_type = vec_op::BF16Vec8;
- using q_vec_type = vec_op::FP32Vec16;
- using k_load_vec_type = vec_op::BF16Vec16;
- using k_vec_type = vec_op::FP32Vec16;
- using qk_acc_vec_type = vec_op::FP32Vec16;
- using v_load_vec_type = vec_op::BF16Vec16;
-};
- #endif
-#endif
-
-template
-FORCE_INLINE std::pair reduceSoftmax(T* data, const int size,
- const int capacity) {
- T max = data[0];
- for (int i = 1; i < size; ++i) {
- max = max >= data[i] ? max : data[i];
- }
-
- T sum = 0;
- for (int i = 0; i < size; ++i) {
- data[i] = std::exp(data[i] - max);
- sum += data[i];
- }
-
- int i = 0;
- for (; i < size; ++i) {
- data[i] /= sum;
- }
-
- for (; i < capacity; ++i) {
- data[i] = 0;
- }
-
- return {max, sum};
-}
-
-template
-FORCE_INLINE std::pair reduceSoftmaxAlibi(T* data, const int size,
- const int capacity,
- const float alibi_slope,
- const int start_index,
- const int seq_len) {
- data[0] += alibi_slope * (start_index - seq_len + 1);
- T max = data[0];
- for (int i = 1; i < size; ++i) {
- T qk = data[i] + alibi_slope * (start_index + i - seq_len + 1);
- data[i] = qk;
- max = max >= qk ? max : qk;
- }
-
- T sum = 0;
- for (int i = 0; i < size; ++i) {
- data[i] = std::exp(data[i] - max);
- sum += data[i];
- }
-
- int i = 0;
- for (; i < size; ++i) {
- data[i] /= sum;
- }
-
- for (; i < capacity; ++i) {
- data[i] = 0;
- }
-
- return {max, sum};
-}
-
-template
-FORCE_INLINE void reducePartitionSoftmax(const T* max_data, T* sum_data,
- const int size) {
- T max = max_data[0];
- for (int i = 1; i < size; ++i) {
- max = max >= max_data[i] ? max : max_data[i];
- }
-
- T rescaled_sum = 0;
- for (int i = 0; i < size; ++i) {
- T rescale_factor = std::exp(max_data[i] - max);
- rescaled_sum += rescale_factor * sum_data[i];
- sum_data[i] *= rescale_factor;
- }
- for (int i = 0; i < size; ++i) {
- sum_data[i] /= rescaled_sum + 1e-8;
- }
-}
-
-template
-struct reduceQKBlockKernel {
- using q_load_vec_type = typename KernelVecType::q_load_vec_type;
- using q_vec_type = typename KernelVecType::q_vec_type;
- using k_load_vec_type = typename KernelVecType::k_load_vec_type;
- using k_vec_type = typename KernelVecType::k_vec_type;
- using qk_acc_vec_type = typename KernelVecType::qk_acc_vec_type;
-
- constexpr static int TOKEN_PER_GROUP = k_load_vec_type::get_elem_num() / x;
- constexpr static int MAX_GROUP_NUM = 16 / TOKEN_PER_GROUP;
- constexpr static int UNROLL_GROUP_NUM = MAX_GROUP_NUM / 4;
-
- static_assert(MAX_GROUP_NUM == 8 || MAX_GROUP_NUM == 4);
- static_assert(k_load_vec_type::get_elem_num() % x == 0);
- static_assert(q_load_vec_type::get_elem_num() * sizeof(scalar_t) == 16);
-
- FORCE_INLINE static void call(const scalar_t* __restrict__ q,
- const scalar_t* __restrict__ k_block,
- float* __restrict__ logits, float scale,
- const int token_num) {
- const int group_num = (token_num + TOKEN_PER_GROUP - 1) / TOKEN_PER_GROUP;
-
- qk_acc_vec_type group_accums[MAX_GROUP_NUM];
- if (token_num == BLOCK_SIZE) {
- for (int q_offset = 0; q_offset < HEAD_SIZE;
- q_offset += x, k_block += x * BLOCK_SIZE) {
- q_load_vec_type q_load_group_vec(q + q_offset);
- q_vec_type q_group_vec(q_load_group_vec);
-
- vec_op::unroll_loop(
- [k_block, &q_group_vec, &group_accums](int token_group_idx) {
- k_load_vec_type k_load_group_vec(k_block + token_group_idx * x *
- TOKEN_PER_GROUP);
- k_vec_type k_group_vec(k_load_group_vec);
- vec_op::fma(group_accums[token_group_idx], q_group_vec,
- k_group_vec);
- vec_op::prefetch(k_block + x * BLOCK_SIZE +
- token_group_idx * x * TOKEN_PER_GROUP);
- });
- }
- } else {
- for (int q_offset = 0; q_offset < HEAD_SIZE;
- q_offset += x, k_block += x * BLOCK_SIZE) {
- q_load_vec_type q_load_group_vec(q + q_offset);
- q_vec_type q_group_vec(q_load_group_vec);
- for (int token_group_start = 0; token_group_start < group_num;
- token_group_start += UNROLL_GROUP_NUM) {
- vec_op::unroll_loop(
- [token_group_start, k_block, &q_group_vec,
- &group_accums](int token_group_idx) {
- token_group_idx += token_group_start;
- k_load_vec_type k_load_group_vec(k_block + token_group_idx * x *
- TOKEN_PER_GROUP);
- k_vec_type k_group_vec(k_load_group_vec);
- vec_op::fma(group_accums[token_group_idx], q_group_vec,
- k_group_vec);
- vec_op::prefetch(k_block + x * BLOCK_SIZE +
- token_group_idx * x * TOKEN_PER_GROUP);
- });
- }
- }
- }
-
- for (int token_group_idx = 0; token_group_idx < group_num;
- ++token_group_idx) {
- vec_op::unroll_loop(
- [&group_accums, logits, scale, token_group_idx](int token_idx) {
- float dot_v =
- group_accums[token_group_idx]
- .template reduce_sub_sum(token_idx);
- logits[token_group_idx * TOKEN_PER_GROUP + token_idx] =
- dot_v * scale;
- });
- }
- }
-};
-
-template
-FORCE_INLINE void reduceValueBlock(const float* prob, const scalar_t* v_block,
- acc_t&& acc) {
- using v_load_vec_type = typename KernelVecType::v_load_vec_type;
- constexpr int ELEM_NUM = v_load_vec_type::get_elem_num();
- static_assert(BLOCK_SIZE == ELEM_NUM);
- vec_op::FP32Vec16 prob_vec(prob);
-
- vec_op::unroll_loop([&](int head_elem_idx) {
- v_load_vec_type v_vec(v_block + BLOCK_SIZE * head_elem_idx);
- vec_op::FP32Vec16 fp32_v_vec(v_vec);
- acc[head_elem_idx] = acc[head_elem_idx] + prob_vec * fp32_v_vec;
- });
-}
-}; // namespace
-
-// Paged attention v1
-namespace {
-template
-struct paged_attention_v1_impl {
- static void call(
- scalar_t* __restrict__ out, // [num_seqs, num_heads, head_size]
- const scalar_t* __restrict__ q, // [num_seqs, num_heads, head_size]
- const scalar_t* __restrict__ k_cache, // [num_blocks, num_kv_heads,
- // head_size/x, block_size, x]
- const scalar_t* __restrict__ v_cache, // [num_blocks, num_kv_heads,
- // head_size, block_size]
- const int num_kv_heads, const float scale,
- const int* __restrict__ block_tables, // [num_seqs,
- // max_num_blocks_per_seq]
- const int* __restrict__ seq_lens, // [num_seqs]
- const int max_num_blocks_per_seq,
- const float* __restrict__ alibi_slopes, // [num_heads]
- const int q_stride, const int kv_block_stride, const int kv_head_stride,
- const int num_seqs, const int num_heads) {
- constexpr int x = 16 / sizeof(scalar_t);
- const int num_queries_per_kv = num_heads / num_kv_heads;
-
- static_assert(BLOCK_SIZE == 16);
-
- int max_seq_len = max_num_blocks_per_seq * BLOCK_SIZE;
- int max_seq_len_padded = (max_seq_len + 15) & 0xFFFFFFF0;
- TORCH_CHECK((max_seq_len_padded * sizeof(float)) % 64 == 0);
-
- const int parallel_work_item_num = omp_get_max_threads();
-
- size_t logits_bytes =
- parallel_work_item_num * max_seq_len_padded * sizeof(float);
- float* logits = (float*)std::aligned_alloc(
- 64, logits_bytes); // Cacheline alignment for each context token.
- // [parallel_work_item_num, max_seq_len_padded]
-
-#pragma omp parallel for collapse(2) schedule(dynamic, 1)
- for (int seq_idx = 0; seq_idx < num_seqs; ++seq_idx) {
- for (int head_idx = 0; head_idx < num_heads; ++head_idx) {
- int seq_len = seq_lens[seq_idx];
- const int* seq_block_table =
- block_tables + max_num_blocks_per_seq * seq_idx;
- const int block_num = (seq_len + BLOCK_SIZE - 1) / BLOCK_SIZE;
- const int64_t kv_head_idx = head_idx / num_queries_per_kv;
- const scalar_t* __restrict__ q_vec_ptr =
- q + seq_idx * q_stride + head_idx * HEAD_SIZE;
- const int last_block_token_num = seq_len - (block_num - 1) * BLOCK_SIZE;
- float* __restrict__ thread_block_logits =
- logits + omp_get_thread_num() * max_seq_len_padded;
-
- // Compute logits
- for (int block_idx = 0; block_idx < block_num; ++block_idx) {
- const int64_t physical_block_idx = seq_block_table[block_idx];
- const scalar_t* __restrict__ k_block_cache_ptr =
- k_cache + physical_block_idx * kv_block_stride +
- kv_head_idx * kv_head_stride;
- float* __restrict__ head_block_logits =
- thread_block_logits + block_idx * BLOCK_SIZE;
-
- reduceQKBlockKernel::call(
- q_vec_ptr, k_block_cache_ptr, head_block_logits, scale,
- block_idx == block_num - 1 ? last_block_token_num : BLOCK_SIZE);
- }
-
- // Compute softmax
- if (alibi_slopes) {
- reduceSoftmaxAlibi(thread_block_logits, seq_len,
- block_num * BLOCK_SIZE, alibi_slopes[head_idx], 0,
- seq_len);
- } else {
- reduceSoftmax(thread_block_logits, seq_len, block_num * BLOCK_SIZE);
- }
-
- // Compute value
- constexpr int head_elem_num_per_partition = 16;
- constexpr int head_partition_num =
- HEAD_SIZE / head_elem_num_per_partition;
- for (int head_part_idx = 0; head_part_idx < head_partition_num;
- ++head_part_idx) {
- vec_op::FP32Vec16 accums[head_elem_num_per_partition];
- scalar_t* __restrict__ out_ptr =
- out + seq_idx * num_heads * HEAD_SIZE + head_idx * HEAD_SIZE +
- head_part_idx * head_elem_num_per_partition;
- for (int block_idx = 0; block_idx < block_num; ++block_idx) {
- const int64_t physical_block_idx = seq_block_table[block_idx];
- const float* __restrict__ prob_vec_ptr =
- thread_block_logits + block_idx * BLOCK_SIZE;
- const scalar_t* __restrict__ v_block_cache_ptr =
- v_cache + physical_block_idx * kv_block_stride +
- kv_head_idx * kv_head_stride +
- BLOCK_SIZE * head_part_idx * head_elem_num_per_partition;
- reduceValueBlock(
- prob_vec_ptr, v_block_cache_ptr, accums);
-
- if (block_idx != block_num - 1) {
- const int64_t next_physical_block_idx =
- seq_block_table[block_idx + 1];
- const scalar_t* __restrict__ next_v_block_cache_ptr =
- v_cache + next_physical_block_idx * kv_block_stride +
- kv_head_idx * kv_head_stride +
- BLOCK_SIZE * head_part_idx * head_elem_num_per_partition;
- vec_op::unroll_loop(
- [&](int head_elem_idx) {
- if (head_elem_idx % 2 == 0) {
- vec_op::prefetch(next_v_block_cache_ptr +
- BLOCK_SIZE * head_elem_idx);
- }
- });
- }
- }
-
- vec_op::unroll_loop(
- [&](int head_elem_idx) {
- float value = accums[head_elem_idx].reduce_sum();
- vec_op::storeFP32(value, out_ptr + head_elem_idx);
- });
- }
- }
- }
- std::free(logits);
- }
-};
-
-#define LAUNCH_V1_ATTENTION_KERNEL(T, HEAD_SIZE, BLOCK_SIZE) \
- paged_attention_v1_impl::call( \
- out_ptr, query_ptr, key_cache_ptr, value_cache_ptr, num_kv_heads, scale, \
- block_tables_ptr, seq_lens_ptr, max_num_blocks_per_seq, \
- alibi_slopes_ptr, q_stride, kv_block_stride, kv_head_stride, num_seqs, \
- num_heads);
-
-template
-void paged_attention_v1_impl_launcher(
- torch::Tensor& out, torch::Tensor& query, torch::Tensor& key_cache,
- torch::Tensor& value_cache, int num_kv_heads, float scale,
- torch::Tensor& block_tables, torch::Tensor& seq_lens, int max_seq_len,
- const std::optional& alibi_slopes) {
- int num_seqs = query.size(0);
- int num_heads = query.size(1);
- int head_size = query.size(2);
- int max_num_blocks_per_seq = block_tables.size(1);
- int q_stride = query.stride(0);
- int kv_block_stride = key_cache.stride(0);
- int kv_head_stride = key_cache.stride(1);
-
- // NOTE: alibi_slopes is optional.
- const float* alibi_slopes_ptr =
- alibi_slopes
- ? reinterpret_cast(alibi_slopes.value().data_ptr())
- : nullptr;
-
- T* out_ptr = reinterpret_cast(out.data_ptr());
- T* query_ptr = reinterpret_cast(query.data_ptr());
- T* key_cache_ptr = reinterpret_cast(key_cache.data_ptr());
- T* value_cache_ptr = reinterpret_cast(value_cache.data_ptr());
- int* block_tables_ptr = block_tables.data_ptr();
- int* seq_lens_ptr = seq_lens.data_ptr();
-
- switch (head_size) {
- case 32:
- LAUNCH_V1_ATTENTION_KERNEL(T, 32, BLOCK_SIZE);
- break;
- case 64:
- LAUNCH_V1_ATTENTION_KERNEL(T, 64, BLOCK_SIZE);
- break;
- case 80:
- LAUNCH_V1_ATTENTION_KERNEL(T, 80, BLOCK_SIZE);
- break;
- case 96:
- LAUNCH_V1_ATTENTION_KERNEL(T, 96, BLOCK_SIZE);
- break;
- case 112:
- LAUNCH_V1_ATTENTION_KERNEL(T, 112, BLOCK_SIZE);
- break;
- case 128:
- LAUNCH_V1_ATTENTION_KERNEL(T, 128, BLOCK_SIZE);
- break;
- case 192:
- LAUNCH_V1_ATTENTION_KERNEL(T, 192, BLOCK_SIZE);
- break;
- case 256:
- LAUNCH_V1_ATTENTION_KERNEL(T, 256, BLOCK_SIZE);
- break;
- default:
- TORCH_CHECK(false, "Unsupported head size: ", head_size);
- break;
- }
-}
-
-#define CALL_V1_KERNEL_LAUNCHER(T, BLOCK_SIZE) \
- paged_attention_v1_impl_launcher( \
- out, query, key_cache, value_cache, num_kv_heads, scale, block_tables, \
- seq_lens, max_seq_len, alibi_slopes);
-
-#define CALL_V1_KERNEL_LAUNCHER_BLOCK_SIZE(T) \
- switch (block_size) { \
- case 16: \
- CALL_V1_KERNEL_LAUNCHER(T, 16); \
- break; \
- default: \
- TORCH_CHECK(false, "Unsupported block size: ", block_size); \
- break; \
- }
-} // namespace
-
-void paged_attention_v1(
- torch::Tensor& out, torch::Tensor& query, torch::Tensor& key_cache,
- torch::Tensor& value_cache, int64_t num_kv_heads, double scale,
- torch::Tensor& block_tables, torch::Tensor& seq_lens, int64_t block_size,
- int64_t max_seq_len, const std::optional& alibi_slopes,
- const std::string& kv_cache_dtype, torch::Tensor& k_scale,
- torch::Tensor& v_scale, const int64_t tp_rank,
- const int64_t blocksparse_local_blocks,
- const int64_t blocksparse_vert_stride, const int64_t blocksparse_block_size,
- const int64_t blocksparse_head_sliding_step) {
- TORCH_CHECK(blocksparse_vert_stride <= 1,
- "CPU backend does not support blocksparse attention yet.");
- VLLM_DISPATCH_FLOATING_TYPES(query.scalar_type(), "paged_attention_v1_impl",
- [&] {
- CPU_KERNEL_GUARD_IN(paged_attention_v1_impl)
- CALL_V1_KERNEL_LAUNCHER_BLOCK_SIZE(scalar_t);
- CPU_KERNEL_GUARD_OUT(paged_attention_v1_impl)
- });
-}
-
-// Paged attention v2
-namespace {
-template
-struct paged_attention_v2_impl {
- static void call(
- scalar_t* __restrict__ out, // [num_seqs, num_heads, head_size]
- float* __restrict__ exp_sums, // [num_seqs, num_heads,
- // max_num_partitions]
- float* __restrict__ max_logits, // [num_seqs, num_heads,
- // max_num_partitions]
- scalar_t* __restrict__ tmp_out, // [num_seqs, num_heads,
- // max_num_partitions, head_size]
- const scalar_t* __restrict__ q, // [num_seqs, num_heads, head_size]
- const scalar_t* __restrict__ k_cache, // [num_blocks, num_kv_heads,
- // head_size/x, block_size, x]
- const scalar_t* __restrict__ v_cache, // [num_blocks, num_kv_heads,
- // head_size, block_size]
- const int num_kv_heads, const float scale,
- const int* __restrict__ block_tables, // [num_seqs,
- // max_num_blocks_per_seq]
- const int* __restrict__ seq_lens, // [num_seqs]
- const int max_num_blocks_per_seq,
- const float* __restrict__ alibi_slopes, // [num_heads]
- const int q_stride, const int kv_block_stride, const int kv_head_stride,
- const int num_seqs, const int num_heads, const int max_num_partitions) {
- constexpr int x = 16 / sizeof(scalar_t);
- const int num_queries_per_kv = num_heads / num_kv_heads;
-
- static_assert(BLOCK_SIZE == 16);
- static_assert(PARTITION_SIZE * sizeof(float) % 64 == 0);
- static_assert(PARTITION_SIZE % BLOCK_SIZE == 0);
-
-#pragma omp parallel for collapse(3) schedule(static, 1)
- for (int seq_idx = 0; seq_idx < num_seqs; ++seq_idx) {
- for (int partition_idx = 0; partition_idx < max_num_partitions;
- ++partition_idx) {
- for (int head_idx = 0; head_idx < num_heads; ++head_idx) {
- const int seq_len = seq_lens[seq_idx];
- const int start_token_idx = partition_idx * PARTITION_SIZE;
-
- if (start_token_idx >= seq_len) continue;
-
- const int partition_num =
- (seq_len + PARTITION_SIZE - 1) / PARTITION_SIZE;
- const bool no_reduce = (partition_num == 1);
- const int token_num =
- (std::min(seq_len, start_token_idx + PARTITION_SIZE) -
- start_token_idx);
- const int block_num = (token_num + BLOCK_SIZE - 1) / BLOCK_SIZE;
- const int last_block_token_num =
- token_num - (block_num - 1) * BLOCK_SIZE;
- const int* seq_block_table = block_tables +
- max_num_blocks_per_seq * seq_idx +
- start_token_idx / BLOCK_SIZE;
- const int64_t kv_head_idx = head_idx / num_queries_per_kv;
- const scalar_t* __restrict__ q_vec_ptr =
- q + seq_idx * q_stride + head_idx * HEAD_SIZE;
-
- float logits[PARTITION_SIZE] __attribute__((aligned(64))) = {0};
-
- // Compute logits
- for (int block_idx = 0; block_idx < block_num; ++block_idx) {
- const int64_t physical_block_idx = seq_block_table[block_idx];
- const scalar_t* __restrict__ k_block_cache_ptr =
- k_cache + physical_block_idx * kv_block_stride +
- kv_head_idx * kv_head_stride;
- float* __restrict__ head_block_logits =
- logits + block_idx * BLOCK_SIZE;
-
- reduceQKBlockKernel::call(
- q_vec_ptr, k_block_cache_ptr, head_block_logits, scale,
- block_idx == block_num - 1 ? last_block_token_num : BLOCK_SIZE);
- }
-
- std::pair max_and_sum;
- if (alibi_slopes) {
- max_and_sum = reduceSoftmaxAlibi(
- logits, token_num, block_num * BLOCK_SIZE,
- alibi_slopes[head_idx], start_token_idx, seq_len);
- } else {
- max_and_sum =
- reduceSoftmax(logits, token_num, block_num * BLOCK_SIZE);
- }
-
- auto&& [max_logit, exp_sum] = max_and_sum;
-
- scalar_t* __restrict__ output_buffer = nullptr;
- if (!no_reduce) {
- auto idx = seq_idx * num_heads * max_num_partitions +
- head_idx * max_num_partitions + partition_idx;
- max_logits[idx] = max_logit;
- exp_sums[idx] = exp_sum;
- output_buffer =
- tmp_out + seq_idx * num_heads * max_num_partitions * HEAD_SIZE +
- head_idx * max_num_partitions * HEAD_SIZE +
- partition_idx * HEAD_SIZE;
- } else {
- output_buffer =
- out + seq_idx * num_heads * HEAD_SIZE + head_idx * HEAD_SIZE;
- }
-
- // Compute value
- constexpr int head_elem_num_per_partition = 16;
- constexpr int head_partition_num =
- HEAD_SIZE / head_elem_num_per_partition;
- for (int head_part_idx = 0; head_part_idx < head_partition_num;
- ++head_part_idx) {
- vec_op::FP32Vec16 accums[head_elem_num_per_partition];
- scalar_t* __restrict__ out_ptr =
- output_buffer + head_part_idx * head_elem_num_per_partition;
- for (int block_idx = 0; block_idx < block_num; ++block_idx) {
- const int64_t physical_block_idx = seq_block_table[block_idx];
- const float* __restrict__ prob_vec_ptr =
- logits + block_idx * BLOCK_SIZE;
- const scalar_t* __restrict__ v_block_cache_ptr =
- v_cache + physical_block_idx * kv_block_stride +
- kv_head_idx * kv_head_stride +
- BLOCK_SIZE * head_part_idx * head_elem_num_per_partition;
- reduceValueBlock(
- prob_vec_ptr, v_block_cache_ptr, accums);
-
- if (block_idx != block_num - 1) {
- const int64_t next_physical_block_idx =
- seq_block_table[block_idx + 1];
- const scalar_t* __restrict__ next_v_block_cache_ptr =
- v_cache + next_physical_block_idx * kv_block_stride +
- kv_head_idx * kv_head_stride +
- BLOCK_SIZE * head_part_idx * head_elem_num_per_partition;
- vec_op::unroll_loop(
- [&](int head_elem_idx) {
- if (head_elem_idx % 2 == 0) {
- vec_op::prefetch(next_v_block_cache_ptr +
- BLOCK_SIZE * head_elem_idx);
- }
- });
- }
- }
-
- vec_op::unroll_loop(
- [&](int head_elem_idx) {
- float value = accums[head_elem_idx].reduce_sum();
- vec_op::storeFP32(value, out_ptr + head_elem_idx);
- });
- }
- }
- }
- }
-
- // Rescale partition softmax and store the factors to exp_sums
-#pragma omp parallel for collapse(2) schedule(static, 1)
- for (int seq_idx = 0; seq_idx < num_seqs; ++seq_idx) {
- for (int head_idx = 0; head_idx < num_heads; ++head_idx) {
- const int seq_len = seq_lens[seq_idx];
- const int partition_num =
- (seq_len + PARTITION_SIZE - 1) / PARTITION_SIZE;
-
- if (partition_num == 1) continue;
-
- reducePartitionSoftmax(
- max_logits + seq_idx * num_heads * max_num_partitions +
- head_idx * max_num_partitions,
- exp_sums + seq_idx * num_heads * max_num_partitions +
- head_idx * max_num_partitions,
- partition_num);
- }
- }
-
- // Reduce values
- using v_load_vec_type = typename KernelVecType::v_load_vec_type;
- static_assert(v_load_vec_type::get_elem_num() == BLOCK_SIZE);
- constexpr int head_elem_num_per_group =
- 16; // Note: didn't align with the cacheline size, due to some
- // HEAD_SIZE didn't align with 64 bytes
- static_assert(HEAD_SIZE % head_elem_num_per_group == 0);
- constexpr int head_group_num = HEAD_SIZE / head_elem_num_per_group;
- const float* __restrict__ rescale_factors = exp_sums;
-#pragma omp parallel for collapse(3) schedule(static, 1)
- for (int seq_idx = 0; seq_idx < num_seqs; ++seq_idx) {
- for (int head_idx = 0; head_idx < num_heads; ++head_idx) {
- for (int group_idx = 0; group_idx < head_group_num; ++group_idx) {
- const int seq_len = seq_lens[seq_idx];
- const int partition_num =
- (seq_len + PARTITION_SIZE - 1) / PARTITION_SIZE;
-
- if (partition_num == 1) continue;
-
- const float* __restrict__ seq_head_rescale_factors =
- rescale_factors + seq_idx * num_heads * max_num_partitions +
- head_idx * max_num_partitions;
- const scalar_t* __restrict__ seq_head_tmp_out =
- tmp_out + seq_idx * num_heads * max_num_partitions * HEAD_SIZE +
- head_idx * max_num_partitions * HEAD_SIZE +
- group_idx * head_elem_num_per_group;
- scalar_t* __restrict__ seq_head_output =
- out + seq_idx * num_heads * HEAD_SIZE + head_idx * HEAD_SIZE +
- group_idx * head_elem_num_per_group;
-
- vec_op::FP32Vec16 acc;
- for (int i = 0; i < partition_num; ++i) {
- vec_op::FP32Vec16 rescale_factor(seq_head_rescale_factors[i]);
- v_load_vec_type value(seq_head_tmp_out + i * HEAD_SIZE);
- vec_op::FP32Vec16 fp32_value(value);
- acc = acc + fp32_value * rescale_factor;
- }
- v_load_vec_type cast_acc(acc);
- cast_acc.save(seq_head_output);
- }
- }
- }
- }
-};
-
-#define LAUNCH_V2_ATTENTION_KERNEL(T, HEAD_SIZE, BLOCK_SIZE) \
- paged_attention_v2_impl::call( \
- out_ptr, exp_sums_ptr, max_logits_ptr, tmp_out_ptr, query_ptr, \
- key_cache_ptr, value_cache_ptr, num_kv_heads, scale, block_tables_ptr, \
- seq_lens_ptr, max_num_blocks_per_seq, alibi_slopes_ptr, q_stride, \
- kv_block_stride, kv_head_stride, num_seqs, num_heads, \
- max_num_partitions);
-
-template
-void paged_attention_v2_impl_launcher(
- torch::Tensor& out, torch::Tensor& exp_sums, torch::Tensor& max_logits,
- torch::Tensor& tmp_out, torch::Tensor& query, torch::Tensor& key_cache,
- torch::Tensor& value_cache, int num_kv_heads, float scale,
- torch::Tensor& block_tables, torch::Tensor& seq_lens, int block_size,
- int max_seq_len, const std::optional& alibi_slopes) {
- int num_seqs = query.size(0);
- int num_heads = query.size(1);
- int head_size = query.size(2);
- int max_num_blocks_per_seq = block_tables.size(1);
- int q_stride = query.stride(0);
- int kv_block_stride = key_cache.stride(0);
- int kv_head_stride = key_cache.stride(1);
- int max_num_partitions = exp_sums.size(-1);
-
- // NOTE: alibi_slopes is optional.
- const float* alibi_slopes_ptr =
- alibi_slopes
- ? reinterpret_cast(alibi_slopes.value().data_ptr())
- : nullptr;
-
- T* out_ptr = reinterpret_cast(out.data_ptr());
- float* exp_sums_ptr = reinterpret_cast(exp_sums.data_ptr());
- float* max_logits_ptr = reinterpret_cast(max_logits.data_ptr());
- T* tmp_out_ptr = reinterpret_cast(tmp_out.data_ptr());
- T* query_ptr = reinterpret_cast(query.data_ptr());
- T* key_cache_ptr = reinterpret_cast(key_cache.data_ptr());
- T* value_cache_ptr = reinterpret_cast(value_cache.data_ptr());
- int* block_tables_ptr = block_tables.data_ptr();
- int* seq_lens_ptr = seq_lens.data_ptr();
-
- switch (head_size) {
- case 32:
- LAUNCH_V2_ATTENTION_KERNEL(T, 32, BLOCK_SIZE);
- break;
- case 64:
- LAUNCH_V2_ATTENTION_KERNEL(T, 64, BLOCK_SIZE);
- break;
- case 80:
- LAUNCH_V2_ATTENTION_KERNEL(T, 80, BLOCK_SIZE);
- break;
- case 96:
- LAUNCH_V2_ATTENTION_KERNEL(T, 96, BLOCK_SIZE);
- break;
- case 112:
- LAUNCH_V2_ATTENTION_KERNEL(T, 112, BLOCK_SIZE);
- break;
- case 128:
- LAUNCH_V2_ATTENTION_KERNEL(T, 128, BLOCK_SIZE);
- break;
- case 192:
- LAUNCH_V2_ATTENTION_KERNEL(T, 192, BLOCK_SIZE);
- break;
- case 256:
- LAUNCH_V2_ATTENTION_KERNEL(T, 256, BLOCK_SIZE);
- break;
- default:
- TORCH_CHECK(false, "Unsupported head size: ", head_size);
- break;
- }
-}
-
-#define CALL_V2_KERNEL_LAUNCHER(T, BLOCK_SIZE) \
- paged_attention_v2_impl_launcher( \
- out, exp_sums, max_logits, tmp_out, query, key_cache, value_cache, \
- num_kv_heads, scale, block_tables, seq_lens, block_size, max_seq_len, \
- alibi_slopes);
-
-#define CALL_V2_KERNEL_LAUNCHER_BLOCK_SIZE(T) \
- switch (block_size) { \
- case 16: \
- CALL_V2_KERNEL_LAUNCHER(T, 16); \
- break; \
- default: \
- TORCH_CHECK(false, "Unsupported block size: ", block_size); \
- break; \
- }
-} // namespace
-
-void paged_attention_v2(
- torch::Tensor& out, torch::Tensor& exp_sums, torch::Tensor& max_logits,
- torch::Tensor& tmp_out, torch::Tensor& query, torch::Tensor& key_cache,
- torch::Tensor& value_cache, int64_t num_kv_heads, double scale,
- torch::Tensor& block_tables, torch::Tensor& seq_lens, int64_t block_size,
- int64_t max_seq_len, const std::optional& alibi_slopes,
- const std::string& kv_cache_dtype, torch::Tensor& k_scale,
- torch::Tensor& v_scale, const int64_t tp_rank,
- const int64_t blocksparse_local_blocks,
- const int64_t blocksparse_vert_stride, const int64_t blocksparse_block_size,
- const int64_t blocksparse_head_sliding_step) {
- TORCH_CHECK(blocksparse_vert_stride <= 1,
- "CPU backend does not support blocksparse attention yet.");
- VLLM_DISPATCH_FLOATING_TYPES(query.scalar_type(), "paged_attention_v2_impl",
- [&] {
- CPU_KERNEL_GUARD_IN(paged_attention_v2_impl)
- CALL_V2_KERNEL_LAUNCHER_BLOCK_SIZE(scalar_t);
- CPU_KERNEL_GUARD_OUT(paged_attention_v2_impl)
- });
-}
\ No newline at end of file
diff --git a/csrc/cpu/cache.cpp b/csrc/cpu/cache.cpp
deleted file mode 100644
index 69f6d06e3c967..0000000000000
--- a/csrc/cpu/cache.cpp
+++ /dev/null
@@ -1,214 +0,0 @@
-#include