From 5a1c2e15d847e30d9c72d60ba1e28dc4b89df23d Mon Sep 17 00:00:00 2001 From: CYJiang <86391540+googs1025@users.noreply.github.com> Date: Tue, 17 Jun 2025 23:17:38 +0800 Subject: [PATCH 001/211] [Mis] remove duplicate engine status checks (#19647) Signed-off-by: googs1025 --- vllm/v1/engine/core_client.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/vllm/v1/engine/core_client.py b/vllm/v1/engine/core_client.py index 7eff377b74b56..8058cd3127df7 100644 --- a/vllm/v1/engine/core_client.py +++ b/vllm/v1/engine/core_client.py @@ -794,7 +794,6 @@ class AsyncMPClient(MPClient): request_type: EngineCoreRequestType, request: Any, engine: Optional[CoreEngine] = None) -> Awaitable[Any]: - self.ensure_alive() if engine is None: engine = self.core_engine @@ -1059,7 +1058,7 @@ class DPAsyncMPClient(AsyncMPClient): self.reqs_in_flight.pop(req_id, None) async def abort_requests_async(self, request_ids: list[str]) -> None: - if not request_ids: + if not request_ids or self.resources.engine_dead: return if len(request_ids) == 1: @@ -1077,9 +1076,8 @@ class DPAsyncMPClient(AsyncMPClient): async def _abort_requests(self, request_ids: list[str], engine: CoreEngine) -> None: - if not self.resources.engine_dead: - await self._send_input(EngineCoreRequestType.ABORT, request_ids, - engine) + await self._send_input(EngineCoreRequestType.ABORT, request_ids, + engine) class RayDPClient(DPAsyncMPClient): From ca94d7fa007d113f2daf74e2f2278c03be676e93 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Tue, 17 Jun 2025 23:58:38 +0800 Subject: [PATCH 002/211] [Bugfix] Update multimodel models mapping to fit new checkpoint after Transformers v4.52 (#19151) Signed-off-by: Isotr0py <2037008807@qq.com> --- tests/models/multimodal/test_mapping.py | 86 ++++++++++ vllm/model_executor/models/aria.py | 5 + vllm/model_executor/models/aya_vision.py | 16 +- vllm/model_executor/models/fuyu.py | 9 +- vllm/model_executor/models/gemma3_mm.py | 16 +- vllm/model_executor/models/llava.py | 16 +- vllm/model_executor/models/llava_next.py | 16 +- .../model_executor/models/llava_next_video.py | 17 +- vllm/model_executor/models/llava_onevision.py | 17 +- vllm/model_executor/models/mistral3.py | 16 +- vllm/model_executor/models/mllama.py | 149 ++++++++++++------ vllm/model_executor/models/paligemma.py | 16 +- 12 files changed, 304 insertions(+), 75 deletions(-) create mode 100644 tests/models/multimodal/test_mapping.py diff --git a/tests/models/multimodal/test_mapping.py b/tests/models/multimodal/test_mapping.py new file mode 100644 index 0000000000000..5f20452aff3d8 --- /dev/null +++ b/tests/models/multimodal/test_mapping.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from collections.abc import Iterable + +import pytest +import torch +import transformers +from transformers import AutoConfig, PreTrainedModel + +from vllm.config import ModelConfig +from vllm.model_executor.models.utils import WeightsMapper +from vllm.multimodal import MULTIMODAL_REGISTRY +from vllm.transformers_utils.config import try_get_safetensors_metadata + +from ..registry import _MULTIMODAL_EXAMPLE_MODELS, HF_EXAMPLE_MODELS + + +def create_repo_dummy_weights(repo: str) -> Iterable[tuple[str, torch.Tensor]]: + """Create weights from safetensors checkpoint metadata""" + metadata = try_get_safetensors_metadata(repo) + weight_names = list(metadata.weight_map.keys()) + with torch.device('meta'): + return ((name, torch.empty(0)) for name in weight_names) + + +def create_model_dummy_weights( + repo: str, + model_arch: str, +) -> Iterable[tuple[str, torch.Tensor]]: + """ + Create weights from a dummy meta deserialized hf model with name conversion + """ + model_cls: PreTrainedModel = getattr(transformers, model_arch) + config = AutoConfig.from_pretrained(repo) + with torch.device("meta"): + model: PreTrainedModel = model_cls._from_config(config) + return model.named_parameters() + + +def model_architectures_for_test() -> list[str]: + arch_to_test = list[str]() + for model_arch, info in _MULTIMODAL_EXAMPLE_MODELS.items(): + if not info.trust_remote_code and hasattr(transformers, model_arch): + model_cls: PreTrainedModel = getattr(transformers, model_arch) + if getattr(model_cls, "_checkpoint_conversion_mapping", None): + arch_to_test.append(model_arch) + return arch_to_test + + +@pytest.mark.core_model +@pytest.mark.parametrize("model_arch", model_architectures_for_test()) +def test_hf_model_weights_mapper(model_arch: str): + model_info = HF_EXAMPLE_MODELS.get_hf_info(model_arch) + model_info.check_available_online(on_fail="skip") + model_info.check_transformers_version(on_fail="skip") + + model_id = model_info.default + + model_config = ModelConfig( + model_id, + task="auto", + tokenizer=model_info.tokenizer or model_id, + tokenizer_mode=model_info.tokenizer_mode, + trust_remote_code=model_info.trust_remote_code, + seed=0, + dtype="auto", + revision=None, + hf_overrides=model_info.hf_overrides, + ) + model_cls = MULTIMODAL_REGISTRY._get_model_cls(model_config) + + original_weights = create_repo_dummy_weights(model_id) + hf_converted_weights = create_model_dummy_weights(model_id, model_arch) + mapper: WeightsMapper = model_cls.hf_to_vllm_mapper + + mapped_original_weights = mapper.apply(original_weights) + mapped_hf_converted_weights = mapper.apply(hf_converted_weights) + + ref_weight_names = set(map(lambda x: x[0], mapped_original_weights)) + weight_names = set(map(lambda x: x[0], mapped_hf_converted_weights)) + + weights_missing = ref_weight_names - weight_names + weights_unmapped = weight_names - ref_weight_names + assert (not weights_missing and not weights_unmapped), ( + f"Following weights are not mapped correctly: {weights_unmapped}, " + f"Missing expected weights: {weights_missing}.") diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py index b69c7b6a9376d..4fe6a7b9e9383 100644 --- a/vllm/model_executor/models/aria.py +++ b/vllm/model_executor/models/aria.py @@ -486,6 +486,11 @@ class AriaForConditionalGeneration(nn.Module, SupportsMultiModal): """ hf_to_vllm_mapper = WeightsMapper( orig_to_new_prefix={ + # mapping for new names in checkpoint saved after transformers v4.52 + "model.language_model.": "language_model.model.", + "model.vision_tower.": "vision_tower.", + "model.multi_modal_projector.": "multi_modal_projector.", + # mapping for original checkpoint "language_model.model": "language_model", "language_model.lm_head": "lm_head", }, diff --git a/vllm/model_executor/models/aya_vision.py b/vllm/model_executor/models/aya_vision.py index 6a95ac089ff4a..7c02d245db8b7 100644 --- a/vllm/model_executor/models/aya_vision.py +++ b/vllm/model_executor/models/aya_vision.py @@ -32,8 +32,9 @@ from vllm.sequence import IntermediateTensors from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP from .siglip import SiglipVisionModel -from .utils import (AutoWeightsLoader, flatten_bn, init_vllm_registered_model, - maybe_prefix, merge_multimodal_embeddings) +from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, + init_vllm_registered_model, maybe_prefix, + merge_multimodal_embeddings) class AyaVisionImagePixelInputs(TypedDict): @@ -292,6 +293,15 @@ def _get_layer_index(feature_layer_index: int, num_hidden_layers: int) -> int: class AyaVisionForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + # mapping for new names in checkpoint saved after transformers v4.52 + "model.language_model.": "language_model.model.", + "model.vision_tower.": "vision_tower.", + "model.multi_modal_projector.": "multi_modal_projector.", + "lm_head.": "language_model.lm_head.", + }) + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() config: AyaVisionConfig = vllm_config.model_config.hf_config @@ -323,7 +333,7 @@ class AyaVisionForConditionalGeneration(nn.Module, SupportsMultiModal, def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: loader = AutoWeightsLoader(self) - return loader.load_weights(weights) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) def _image_pixels_to_features(self, vision_tower: SiglipVisionModel, pixel_values: torch.Tensor, diff --git a/vllm/model_executor/models/fuyu.py b/vllm/model_executor/models/fuyu.py index 462f85c3dd623..9692899f7b993 100644 --- a/vllm/model_executor/models/fuyu.py +++ b/vllm/model_executor/models/fuyu.py @@ -42,7 +42,7 @@ from vllm.multimodal.profiling import BaseDummyInputsBuilder from vllm.sequence import IntermediateTensors from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP -from .utils import (AutoWeightsLoader, flatten_bn, maybe_prefix, +from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, maybe_prefix, merge_multimodal_embeddings) # Cannot find the following 2 numbers from hf config. @@ -245,6 +245,13 @@ class FuyuMultiModalProcessor(BaseMultiModalProcessor[FuyuProcessingInfo]): dummy_inputs=FuyuDummyInputsBuilder) class FuyuForCausalLM(nn.Module, SupportsMultiModal, SupportsPP): + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + "model.vision_embed_tokens.": "vision_embed_tokens.", + "model.language_model.": "language_model.model.", + "lm_head.": "language_model.lm_head.", + }) + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() config = vllm_config.model_config.hf_config diff --git a/vllm/model_executor/models/gemma3_mm.py b/vllm/model_executor/models/gemma3_mm.py index b633c0003c637..415a8dbdcf87f 100644 --- a/vllm/model_executor/models/gemma3_mm.py +++ b/vllm/model_executor/models/gemma3_mm.py @@ -35,8 +35,9 @@ from vllm.sequence import IntermediateTensors from .interfaces import (MultiModalEmbeddings, SupportsLoRA, SupportsMultiModal, SupportsPP) from .siglip import SiglipVisionModel -from .utils import (AutoWeightsLoader, flatten_bn, init_vllm_registered_model, - maybe_prefix, merge_multimodal_embeddings) +from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, + init_vllm_registered_model, maybe_prefix, + merge_multimodal_embeddings) logger = init_logger(__name__) @@ -471,6 +472,15 @@ class Gemma3ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP, ], } + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + # mapping for new names in checkpoint saved after transformers v4.52 + "model.language_model.": "language_model.model.", + "model.vision_tower.": "vision_tower.", + "model.multi_modal_projector.": "multi_modal_projector.", + "lm_head.": "language_model.lm_head.", + }) + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() config = vllm_config.model_config.hf_config @@ -697,7 +707,7 @@ class Gemma3ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP, def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: loader = AutoWeightsLoader(self) - return loader.load_weights(weights) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) def get_mm_mapping(self) -> MultiModelKeys: """ diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index 7dea260a58e0d..f70ad37a3d3ac 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -40,8 +40,9 @@ from .clip import CLIPVisionModel from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP from .pixtral import PixtralHFEncoderInfo, PixtralHFVisionModel from .siglip import SiglipVisionModel -from .utils import (AutoWeightsLoader, flatten_bn, init_vllm_registered_model, - maybe_prefix, merge_multimodal_embeddings) +from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, + init_vllm_registered_model, maybe_prefix, + merge_multimodal_embeddings) from .vision import get_vision_encoder_info @@ -499,6 +500,15 @@ class LlavaForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): "gate_up_proj": ["gate_proj", "up_proj"] } + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + # mapping for new names in checkpoint saved after transformers v4.52 + "model.language_model.": "language_model.model.", + "model.vision_tower.": "vision_tower.", + "model.multi_modal_projector.": "multi_modal_projector.", + "lm_head.": "language_model.lm_head.", + }) + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() @@ -754,7 +764,7 @@ class LlavaForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: loader = AutoWeightsLoader(self) - return loader.load_weights(weights) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) class MantisProcessingInfo(LlavaProcessingInfo): diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index 60ede454ff272..bc792be19dbf6 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -26,8 +26,8 @@ from .llava import (BaseLlavaMultiModalProcessor, BaseLlavaProcessingInfo, LlavaDummyInputsBuilder, LlavaLikeConfig, LlavaMultiModalProjector, init_vision_tower_for_llava) from .siglip import SiglipVisionModel -from .utils import (AutoWeightsLoader, embed_multimodal, flatten_bn, - init_vllm_registered_model, maybe_prefix) +from .utils import (AutoWeightsLoader, WeightsMapper, embed_multimodal, + flatten_bn, init_vllm_registered_model, maybe_prefix) class LlavaNextImagePixelInputs(TypedDict): @@ -205,6 +205,16 @@ class LlavaNextMultiModalProcessor( class LlavaNextForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + # mapping for new names in checkpoint saved after transformers v4.52 + "model.language_model.": "language_model.model.", + "model.vision_tower.": "vision_tower.", + "model.multi_modal_projector.": "multi_modal_projector.", + "model.image_newline": "image_newline", + "lm_head.": "language_model.lm_head.", + }) + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() config = vllm_config.model_config.hf_config @@ -583,4 +593,4 @@ class LlavaNextForConditionalGeneration(nn.Module, SupportsMultiModal, def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: loader = AutoWeightsLoader(self) - return loader.load_weights(weights) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) diff --git a/vllm/model_executor/models/llava_next_video.py b/vllm/model_executor/models/llava_next_video.py index 78084465e7a27..c13e8e9b24140 100644 --- a/vllm/model_executor/models/llava_next_video.py +++ b/vllm/model_executor/models/llava_next_video.py @@ -29,8 +29,9 @@ from vllm.utils import is_list_of from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP from .llava import init_vision_tower_for_llava from .siglip import SiglipVisionModel -from .utils import (AutoWeightsLoader, init_vllm_registered_model, - maybe_prefix, merge_multimodal_embeddings) +from .utils import (AutoWeightsLoader, WeightsMapper, + init_vllm_registered_model, maybe_prefix, + merge_multimodal_embeddings) from .vision import get_vision_encoder_info @@ -270,6 +271,16 @@ class LlavaNextMultiModalProjector(nn.Module): class LlavaNextVideoForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + # mapping for new names in checkpoint saved after transformers v4.52 + "model.language_model.": "language_model.model.", + "model.vision_tower.": "vision_tower.", + "model.multi_modal_projector.": "multi_modal_projector.", + "model.image_newline": "image_newline", + "lm_head.": "language_model.lm_head.", + }) + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() config = vllm_config.model_config.hf_config @@ -468,4 +479,4 @@ class LlavaNextVideoForConditionalGeneration(nn.Module, SupportsMultiModal, # This model doesn't support images for now ignore_unexpected_prefixes=["image_newline"], ) - return loader.load_weights(weights) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py index 265f63d7bd295..373b0a2a7d5e6 100644 --- a/vllm/model_executor/models/llava_onevision.py +++ b/vllm/model_executor/models/llava_onevision.py @@ -30,8 +30,9 @@ from .llava import LlavaDummyInputsBuilder, init_vision_tower_for_llava from .llava_next import (BaseLlavaNextMultiModalProcessor, LlavaNextLikeConfig, LlavaNextProcessingInfo) from .siglip import SiglipVisionModel -from .utils import (AutoWeightsLoader, flatten_bn, init_vllm_registered_model, - maybe_prefix, merge_multimodal_embeddings) +from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, + init_vllm_registered_model, maybe_prefix, + merge_multimodal_embeddings) # For profile run _MAX_FRAMES_PER_VIDEO = 16 @@ -428,6 +429,16 @@ class LlavaOnevisionMultiModalProjector(nn.Module): class LlavaOnevisionForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + # mapping for new names in checkpoint saved after transformers v4.52 + "model.language_model.": "language_model.model.", + "model.vision_tower.": "vision_tower.", + "model.multi_modal_projector.": "multi_modal_projector.", + "model.image_newline": "image_newline", + "lm_head.": "language_model.lm_head.", + }) + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() config = vllm_config.model_config.hf_config @@ -954,4 +965,4 @@ class LlavaOnevisionForConditionalGeneration(nn.Module, SupportsMultiModal, def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: loader = AutoWeightsLoader(self) - return loader.load_weights(weights) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) diff --git a/vllm/model_executor/models/mistral3.py b/vllm/model_executor/models/mistral3.py index 59deacffd2851..ebc176e2c7242 100644 --- a/vllm/model_executor/models/mistral3.py +++ b/vllm/model_executor/models/mistral3.py @@ -36,8 +36,9 @@ from vllm.sequence import IntermediateTensors from .interfaces import (MultiModalEmbeddings, SupportsLoRA, SupportsMultiModal, SupportsPP) from .pixtral import PixtralHFEncoderInfo, PixtralHFVisionModel -from .utils import (AutoWeightsLoader, flatten_bn, init_vllm_registered_model, - maybe_prefix, merge_multimodal_embeddings) +from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, + init_vllm_registered_model, maybe_prefix, + merge_multimodal_embeddings) from .vision import get_vision_encoder_info @@ -389,6 +390,15 @@ class Mistral3ForConditionalGeneration(nn.Module, SupportsLoRA, "gate_up_proj": ["gate_proj", "up_proj"] } + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + # mapping for new names in checkpoint saved after transformers v4.52 + "model.language_model.": "language_model.model.", + "model.vision_tower.": "vision_tower.", + "model.multi_modal_projector.": "multi_modal_projector.", + "lm_head.": "language_model.lm_head.", + }) + def __init__(self, *, vllm_config: VllmConfig, prefix: str = "") -> None: super().__init__() @@ -592,7 +602,7 @@ class Mistral3ForConditionalGeneration(nn.Module, SupportsLoRA, def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: loader = AutoWeightsLoader(self) - return loader.load_weights(weights) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) def get_mm_mapping(self) -> MultiModelKeys: """ diff --git a/vllm/model_executor/models/mllama.py b/vllm/model_executor/models/mllama.py index e9f91feb3359d..1b7e93fafad93 100644 --- a/vllm/model_executor/models/mllama.py +++ b/vllm/model_executor/models/mllama.py @@ -67,7 +67,7 @@ from vllm.multimodal.profiling import BaseDummyInputsBuilder from .clip import CLIPMLP from .interfaces import SupportsMultiModal, SupportsV0Only from .llama import LlamaDecoderLayer, LlamaMLP -from .utils import maybe_prefix +from .utils import AutoWeightsLoader, WeightsMapper, maybe_prefix logger = init_logger(__name__) @@ -790,6 +790,36 @@ class MllamaVisionModel(nn.Module): dim=-1) return hidden_state + def load_weights(self, weights: Iterable[tuple[str, + torch.Tensor]]) -> set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + (".qkv_proj", ".q_proj", "q"), + (".qkv_proj", ".k_proj", "k"), + (".qkv_proj", ".v_proj", "v"), + ] + params_dict = dict(self.named_parameters()) + updated_params: set[str] = set() + for name, loaded_weight in weights: + if 'patch_embedding._linear.weight' in name: + loaded_weight = loaded_weight.view(loaded_weight.shape[0], -1) + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + param = params_dict[name] + updated_params.add(name) + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + param = params_dict.pop(name) + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + updated_params.add(name) + return updated_params + class MllamaTextRMSNorm(nn.Module): @@ -1132,6 +1162,7 @@ class MllamaForCausalLM(nn.Module): config = vllm_config.model_config.hf_config.text_config quant_config = vllm_config.quant_config + self.quant_config = quant_config self.vocab_size = config.vocab_size self.model = MllamaTextModel(vllm_config=vllm_config, @@ -1167,6 +1198,58 @@ class MllamaForCausalLM(nn.Module): ) return hidden_states + def load_weights(self, weights: Iterable[tuple[str, + torch.Tensor]]) -> set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + (".qkv_proj", ".q_proj", "q"), + (".qkv_proj", ".k_proj", "k"), + (".qkv_proj", ".v_proj", "v"), + (".gate_up_proj", ".gate_proj", 0), + (".gate_up_proj", ".up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + updated_params: set[str] = set() + for name, loaded_weight in weights: + if 'patch_embedding.weight' in name: + name = name.replace('patch_embedding.weight', + 'patch_embedding._linear.weight') + loaded_weight = loaded_weight.view(loaded_weight.shape[0], -1) + if (self.quant_config is not None and + (scale_name := self.quant_config.get_cache_scale(name))): + # Loading kv cache quantization scales + param = params_dict[scale_name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + loaded_weight = (loaded_weight if loaded_weight.dim() == 0 else + loaded_weight[0]) + weight_loader(param, loaded_weight) + updated_params.add(scale_name) + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + param = params_dict[name] + updated_params.add(name) + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + orig_name = name + name = maybe_remap_kv_scale_name(name, params_dict) + if name is None: + logger.debug("Missing name %s, orig name %s", name, + orig_name) + continue + + param = params_dict.pop(name) + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + updated_params.add(name) + return updated_params + @MULTIMODAL_REGISTRY.register_processor(MllamaMultiModalProcessor, info=MllamaProcessingInfo, @@ -1178,6 +1261,19 @@ class MllamaForConditionalGeneration(nn.Module, SupportsMultiModal, "gate_up_proj": ["gate_proj", "up_proj"] } + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + # mapping for new names in checkpoint saved after transformers v4.52 + "model.vision_model.": "vision_model.", + "model.multi_modal_projector.": "multi_modal_projector.", + "model.language_model.": "language_model.model.", + "lm_head.": "language_model.lm_head.", + }, + orig_to_new_suffix={ + "patch_embedding.weight": "patch_embedding._linear.weight", + }, + ) + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() config: MllamaConfig = vllm_config.model_config.hf_config @@ -1479,55 +1575,8 @@ class MllamaForConditionalGeneration(nn.Module, SupportsMultiModal, def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: - stacked_params_mapping = [ - # (param_name, shard_name, shard_id) - (".qkv_proj", ".q_proj", "q"), - (".qkv_proj", ".k_proj", "k"), - (".qkv_proj", ".v_proj", "v"), - (".gate_up_proj", ".gate_proj", 0), - (".gate_up_proj", ".up_proj", 1), - ] - params_dict = dict(self.named_parameters()) - updated_params: set[str] = set() - for name, loaded_weight in weights: - if 'patch_embedding.weight' in name: - name = name.replace('patch_embedding.weight', - 'patch_embedding._linear.weight') - loaded_weight = loaded_weight.view(loaded_weight.shape[0], -1) - if (self.quant_config is not None and - (scale_name := self.quant_config.get_cache_scale(name))): - # Loading kv cache quantization scales - param = params_dict[scale_name] - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - loaded_weight = (loaded_weight if loaded_weight.dim() == 0 else - loaded_weight[0]) - weight_loader(param, loaded_weight) - updated_params.add(scale_name) - continue - for (param_name, weight_name, shard_id) in stacked_params_mapping: - if weight_name not in name: - continue - name = name.replace(weight_name, param_name) - param = params_dict[name] - updated_params.add(name) - weight_loader = param.weight_loader - weight_loader(param, loaded_weight, shard_id) - break - else: - orig_name = name - name = maybe_remap_kv_scale_name(name, params_dict) - if name is None: - logger.debug("Missing name %s, orig name %s", name, - orig_name) - continue - - param = params_dict.pop(name) - weight_loader = getattr(param, "weight_loader", - default_weight_loader) - weight_loader(param, loaded_weight) - updated_params.add(name) - return updated_params + loader = AutoWeightsLoader(self) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) def get_mm_mapping(self) -> MultiModelKeys: """ diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py index cc2cebe4a4a37..103a267c41f59 100644 --- a/vllm/model_executor/models/paligemma.py +++ b/vllm/model_executor/models/paligemma.py @@ -24,8 +24,9 @@ from vllm.sequence import IntermediateTensors from .interfaces import MultiModalEmbeddings, SupportsMultiModal, SupportsPP from .siglip import SiglipVisionModel -from .utils import (AutoWeightsLoader, flatten_bn, init_vllm_registered_model, - maybe_prefix, merge_multimodal_embeddings) +from .utils import (AutoWeightsLoader, WeightsMapper, flatten_bn, + init_vllm_registered_model, maybe_prefix, + merge_multimodal_embeddings) from .vision import get_vision_encoder_info logger = init_logger(__name__) @@ -227,6 +228,15 @@ class PaliGemmaForConditionalGeneration(nn.Module, SupportsMultiModal, ], } + hf_to_vllm_mapper = WeightsMapper( + orig_to_new_prefix={ + # mapping for new names in checkpoint saved after transformers v4.52 + "model.language_model.": "language_model.model.", + "model.vision_tower.": "vision_tower.", + "model.multi_modal_projector.": "multi_modal_projector.", + "lm_head.": "language_model.lm_head.", + }) + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() config = vllm_config.model_config.hf_config @@ -395,4 +405,4 @@ class PaliGemmaForConditionalGeneration(nn.Module, SupportsMultiModal, def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: loader = AutoWeightsLoader(self) - return loader.load_weights(weights) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) From ffb2cd6b5441af28da22694a19838a19507a15e1 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Tue, 17 Jun 2025 14:49:26 -0400 Subject: [PATCH 003/211] [Perf] Optimize `moe_align_block_size` CUDA kernel (#19572) Signed-off-by: yewentao256 Co-authored-by: mgoin --- .../kernels/benchmark_moe_align_block_size.py | 159 +++++++ csrc/moe/moe_align_sum_kernels.cu | 450 ++++++------------ csrc/moe/moe_ops.h | 6 - csrc/moe/torch_bindings.cpp | 9 - .../kernels/moe/test_moe_align_block_size.py | 90 ++++ vllm/_custom_ops.py | 9 - .../layers/fused_moe/moe_align_block_size.py | 28 +- 7 files changed, 386 insertions(+), 365 deletions(-) create mode 100644 benchmarks/kernels/benchmark_moe_align_block_size.py create mode 100644 tests/kernels/moe/test_moe_align_block_size.py diff --git a/benchmarks/kernels/benchmark_moe_align_block_size.py b/benchmarks/kernels/benchmark_moe_align_block_size.py new file mode 100644 index 0000000000000..024a5dcfc8b0b --- /dev/null +++ b/benchmarks/kernels/benchmark_moe_align_block_size.py @@ -0,0 +1,159 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import argparse +import itertools + +import torch +import triton + +from vllm import _custom_ops as ops +from vllm.model_executor.layers.fused_moe.moe_align_block_size import ( + moe_align_block_size_triton, +) + + +def get_topk_ids(num_tokens: int, num_experts: int, topk: int) -> torch.Tensor: + return torch.stack( + [ + torch.randperm(num_experts, dtype=torch.int32, device="cuda")[:topk] + for _ in range(num_tokens) + ] + ) + + +def check_correctness(num_tokens, num_experts=256, block_size=256, topk=8): + """ + Verifies vllm vs. Triton + """ + topk_ids = get_topk_ids(num_tokens, num_experts, topk) + + # 1. malloc space for triton and vllm + # malloc enough space (max_num_tokens_padded) for the sorted ids + max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1) + sorted_ids_triton = torch.empty( + (max_num_tokens_padded,), dtype=torch.int32, device="cuda" + ) + sorted_ids_triton.fill_(topk_ids.numel()) # fill with sentinel value + expert_ids_triton = torch.zeros( + (max_num_tokens_padded // block_size,), dtype=torch.int32, device="cuda" + ) + num_tokens_post_pad_triton = torch.empty((1,), dtype=torch.int32, device="cuda") + + sorted_ids_vllm = torch.empty_like(sorted_ids_triton) + sorted_ids_vllm.fill_(topk_ids.numel()) + expert_ids_vllm = torch.zeros_like(expert_ids_triton) + num_tokens_post_pad_vllm = torch.empty_like(num_tokens_post_pad_triton) + + # 2. run implementations + moe_align_block_size_triton( + topk_ids, + num_experts, + block_size, + sorted_ids_triton, + expert_ids_triton, + num_tokens_post_pad_triton, + ) + + ops.moe_align_block_size( + topk_ids, + num_experts, + block_size, + sorted_ids_vllm, + expert_ids_vllm, + num_tokens_post_pad_vllm, + ) + print(f"✅ VLLM implementation works with {num_experts} experts!") + + # 3. compare results + if torch.allclose(expert_ids_triton, expert_ids_vllm) and torch.allclose( + num_tokens_post_pad_triton, num_tokens_post_pad_vllm + ): + print("✅ Triton and VLLM implementations match.") + else: + print("❌ Triton and VLLM implementations DO NOT match.") + print("Triton expert_ids:", expert_ids_triton) + print("VLLM expert_ids:", expert_ids_vllm) + print("Triton num_tokens_post_pad:", num_tokens_post_pad_triton) + print("VLLM num_tokens_post_pad:", num_tokens_post_pad_vllm) + + +# test configurations +num_tokens_range = [1, 16, 256, 4096] +num_experts_range = [16, 64, 224, 256, 280, 512] +topk_range = [1, 2, 8] +configs = list(itertools.product(num_tokens_range, num_experts_range, topk_range)) + + +@triton.testing.perf_report( + triton.testing.Benchmark( + x_names=["num_tokens", "num_experts", "topk"], + x_vals=configs, + line_arg="provider", + line_vals=["vllm", "triton"], # "triton" + line_names=["VLLM", "Triton"], # "Triton" + plot_name="moe-align-block-size-performance", + args={}, + ) +) +def benchmark(num_tokens, num_experts, topk, provider): + """Benchmark function for Triton.""" + block_size = 256 + topk_ids = get_topk_ids(num_tokens, num_experts, topk) + + max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1) + sorted_ids = torch.empty((max_num_tokens_padded,), dtype=torch.int32, device="cuda") + sorted_ids.fill_(topk_ids.numel()) + max_num_m_blocks = max_num_tokens_padded // block_size + expert_ids = torch.empty((max_num_m_blocks,), dtype=torch.int32, device="cuda") + num_tokens_post_pad = torch.empty((1,), dtype=torch.int32, device="cuda") + + quantiles = [0.5, 0.2, 0.8] + + if provider == "vllm": + ms, min_ms, max_ms = triton.testing.do_bench( + lambda: ops.moe_align_block_size( + topk_ids, + num_experts, + block_size, + sorted_ids.clone(), + expert_ids.clone(), + num_tokens_post_pad.clone(), + ), + quantiles=quantiles, + ) + elif provider == "triton": + ms, min_ms, max_ms = triton.testing.do_bench( + lambda: moe_align_block_size_triton( + topk_ids, + num_experts, + block_size, + sorted_ids.clone(), + expert_ids.clone(), + num_tokens_post_pad.clone(), + ), + quantiles=quantiles, + ) + + return 1000 * ms, 1000 * max_ms, 1000 * min_ms + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--num_experts", + type=int, + default=64, + choices=[8, 16, 32, 64, 128, 256], + ) + parser.add_argument( + "--topk", + type=int, + default=8, + choices=[2, 4, 8], + help="Top-k value for correctness check.", + ) + args = parser.parse_args() + + print("Running correctness check...") + check_correctness(num_tokens=1024, num_experts=args.num_experts, topk=args.topk) + benchmark.run(print_data=True, show_plots=True) diff --git a/csrc/moe/moe_align_sum_kernels.cu b/csrc/moe/moe_align_sum_kernels.cu index 6b6a9d04a60f4..9335e2333b0d9 100644 --- a/csrc/moe/moe_align_sum_kernels.cu +++ b/csrc/moe/moe_align_sum_kernels.cu @@ -13,232 +13,45 @@ namespace vllm { namespace moe { -namespace { -__device__ __forceinline__ int32_t index(int32_t total_col, int32_t row, - int32_t col) { - // don't worry about overflow because num_experts is relatively small - return row * total_col + col; -} -} // namespace - -template -__global__ void moe_align_block_size_kernel(scalar_t* __restrict__ topk_ids, - int32_t* sorted_token_ids, - int32_t* expert_ids, - int32_t* total_tokens_post_pad, - int32_t num_experts, - int32_t block_size, size_t numel) { - const size_t tokens_per_thread = CEILDIV(numel, blockDim.x); - const size_t start_idx = threadIdx.x * tokens_per_thread; - - extern __shared__ int32_t shared_mem[]; - int32_t* cumsum = shared_mem; // 1d tensor with shape (num_experts + 1) - token_cnts_t* tokens_cnts = - (token_cnts_t*)(shared_mem + num_experts + - 1); // 2d tensor with shape (blockDim.x + 1, num_experts) - - for (int i = 0; i < num_experts; ++i) { - tokens_cnts[index(num_experts, threadIdx.x + 1, i)] = 0; - } - - /** - * In the first step we compute token_cnts[thread_index + 1][expert_index], - * which counts how many tokens in the token shard of thread_index are - * assigned to expert expert_index. - */ - for (int i = start_idx; i < numel && i < start_idx + tokens_per_thread; ++i) { - ++tokens_cnts[index(num_experts, threadIdx.x + 1, topk_ids[i])]; - } - - __syncthreads(); - - // For each expert we accumulate the token counts from the different threads. - if (threadIdx.x < num_experts) { - tokens_cnts[index(num_experts, 0, threadIdx.x)] = 0; - for (int i = 1; i <= blockDim.x; ++i) { - tokens_cnts[index(num_experts, i, threadIdx.x)] += - tokens_cnts[index(num_experts, i - 1, threadIdx.x)]; - } - } - - __syncthreads(); - - // We accumulate the token counts of all experts in thread 0. - if (threadIdx.x == 0) { - cumsum[0] = 0; - for (int i = 1; i <= num_experts; ++i) { - cumsum[i] = cumsum[i - 1] + - CEILDIV(tokens_cnts[index(num_experts, blockDim.x, i - 1)], - block_size) * - block_size; - } - *total_tokens_post_pad = static_cast(cumsum[num_experts]); - } - - __syncthreads(); - - /** - * For each expert, each thread processes the tokens of the corresponding - * blocks and stores the corresponding expert_id for each block. - */ - if (threadIdx.x < num_experts) { - for (int i = cumsum[threadIdx.x]; i < cumsum[threadIdx.x + 1]; - i += block_size) { - expert_ids[i / block_size] = threadIdx.x; - } - } - - /** - * Each thread processes a token shard, calculating the index of each token - * after sorting by expert number. Given the example topk_ids = - * [0,1,2,1,2,3,0,3,4] and block_size = 4, then the output would be [0, 6, *, - * *, 1, 3, *, *, 2, 4, *, *, 5, 7, *, *, 8, *, *, *], where * represents a - * padding value(preset in python). - */ - for (int i = start_idx; i < numel && i < start_idx + tokens_per_thread; ++i) { - int32_t expert_id = topk_ids[i]; - /** The cumsum[expert_id] stores the starting index of the tokens that the - * expert with expert_id needs to process, and - * tokens_cnts[threadIdx.x][expert_id] stores the indices of the tokens - * processed by the expert with expert_id within the current thread's token - * shard. - */ - int32_t rank_post_pad = - tokens_cnts[index(num_experts, threadIdx.x, expert_id)] + - cumsum[expert_id]; - sorted_token_ids[rank_post_pad] = i; - ++tokens_cnts[index(num_experts, threadIdx.x, expert_id)]; - } -} - -// TODO(simon): this is temporarily adapted from -// https://github.com/sgl-project/sglang/commit/31548116a8dc8c6df7e146e0587335a59fc5b9d7 -// we did this to unblock Deepseek V3 but there should be a better -// implementation to manage shared memory. template -__global__ void moe_align_block_size_global_mem_kernel( - scalar_t* __restrict__ topk_ids, int32_t* sorted_token_ids, - int32_t* expert_ids, int32_t* total_tokens_post_pad, int32_t num_experts, - int32_t block_size, size_t numel, int32_t* tokens_cnts, int32_t* cumsum) { - const size_t tokens_per_thread = CEILDIV(numel, blockDim.x); - const size_t start_idx = threadIdx.x * tokens_per_thread; +__global__ void moe_align_block_size_kernel( + const scalar_t* __restrict__ topk_ids, + int32_t* __restrict__ sorted_token_ids, int32_t* __restrict__ expert_ids, + int32_t* __restrict__ total_tokens_post_pad, int32_t num_experts, + int32_t padded_num_experts, int32_t experts_per_warp, int32_t block_size, + size_t numel, int32_t* __restrict__ cumsum) { + extern __shared__ int32_t shared_counts[]; - for (int i = 0; i < num_experts; ++i) { - tokens_cnts[index(num_experts, threadIdx.x + 1, i)] = 0; - } - - /** - * In the first step we compute token_cnts[thread_index + 1][expert_index], - * which counts how many tokens in the token shard of thread_index are - * assigned to expert expert_index. - */ - for (int i = start_idx; i < numel && i < start_idx + tokens_per_thread; ++i) { - ++tokens_cnts[index(num_experts, threadIdx.x + 1, topk_ids[i])]; - } - - __syncthreads(); - - // For each expert we accumulate the token counts from the different threads. - if (threadIdx.x < num_experts) { - tokens_cnts[index(num_experts, 0, threadIdx.x)] = 0; - for (int i = 1; i <= blockDim.x; ++i) { - tokens_cnts[index(num_experts, i, threadIdx.x)] += - tokens_cnts[index(num_experts, i - 1, threadIdx.x)]; - } - } - - __syncthreads(); - - // We accumulate the token counts of all experts in thread 0. - if (threadIdx.x == 0) { - cumsum[0] = 0; - for (int i = 1; i <= num_experts; ++i) { - cumsum[i] = cumsum[i - 1] + - CEILDIV(tokens_cnts[index(num_experts, blockDim.x, i - 1)], - block_size) * - block_size; - } - *total_tokens_post_pad = cumsum[num_experts]; - } - - __syncthreads(); - - /** - * For each expert, each thread processes the tokens of the corresponding - * blocks and stores the corresponding expert_id for each block. - */ - if (threadIdx.x < num_experts) { - for (int i = cumsum[threadIdx.x]; i < cumsum[threadIdx.x + 1]; - i += block_size) { - expert_ids[i / block_size] = threadIdx.x; - } - } - - /** - * Each thread processes a token shard, calculating the index of each token - * after sorting by expert number. Given the example topk_ids = - * [0,1,2,1,2,3,0,3,4] and block_size = 4, then the output would be [0, 6, *, - * *, 1, 3, *, *, 2, 4, *, *, 5, 7, *, *, 8, *, *, *], where * represents a - * padding value(preset in python). - */ - for (int i = start_idx; i < numel && i < start_idx + tokens_per_thread; ++i) { - int32_t expert_id = topk_ids[i]; - /** The cumsum[expert_id] stores the starting index of the tokens that the - * expert with expert_id needs to process, and - * tokens_cnts[threadIdx.x][expert_id] stores the indices of the tokens - * processed by the expert with expert_id within the current thread's token - * shard. - */ - int32_t rank_post_pad = - tokens_cnts[index(num_experts, threadIdx.x, expert_id)] + - cumsum[expert_id]; - sorted_token_ids[rank_post_pad] = i; - ++tokens_cnts[index(num_experts, threadIdx.x, expert_id)]; - } -} - -// taken from -// https://github.com/sgl-project/sglang/commit/cdae77b03dfc6fec3863630550b45bbfc789f957 -template -__global__ void sgl_moe_align_block_size_kernel( - scalar_t* __restrict__ topk_ids, int32_t* sorted_token_ids, - int32_t* expert_ids, int32_t* total_tokens_post_pad, int32_t num_experts, - int32_t block_size, size_t numel, int32_t* cumsum) { - __shared__ int32_t shared_counts[32][8]; - - const int warp_id = threadIdx.x / 32; - const int experts_per_warp = 8; + const int warp_id = threadIdx.x / WARP_SIZE; const int my_expert_start = warp_id * experts_per_warp; - // Initialize shared_counts for this warp's experts for (int i = 0; i < experts_per_warp; ++i) { - if (my_expert_start + i < num_experts) { - shared_counts[warp_id][i] = 0; + if (my_expert_start + i < padded_num_experts) { + shared_counts[warp_id * experts_per_warp + i] = 0; } } __syncthreads(); - const size_t tokens_per_thread = CEILDIV(numel, blockDim.x); - const size_t start_idx = threadIdx.x * tokens_per_thread; + const size_t tid = threadIdx.x; + const size_t stride = blockDim.x; - for (int i = start_idx; i < numel && i < start_idx + tokens_per_thread; ++i) { + for (size_t i = tid; i < numel; i += stride) { int expert_id = topk_ids[i]; int warp_idx = expert_id / experts_per_warp; int expert_offset = expert_id % experts_per_warp; - atomicAdd(&shared_counts[warp_idx][expert_offset], 1); + atomicAdd(&shared_counts[warp_idx * experts_per_warp + expert_offset], 1); } __syncthreads(); - // Single thread computes cumulative sum and total tokens if (threadIdx.x == 0) { cumsum[0] = 0; for (int i = 1; i <= num_experts; ++i) { int expert_count = 0; int warp_idx = (i - 1) / experts_per_warp; int expert_offset = (i - 1) % experts_per_warp; - expert_count = shared_counts[warp_idx][expert_offset]; + expert_count = shared_counts[warp_idx * experts_per_warp + expert_offset]; cumsum[i] = cumsum[i - 1] + CEILDIV(expert_count, block_size) * block_size; @@ -248,7 +61,6 @@ __global__ void sgl_moe_align_block_size_kernel( __syncthreads(); - // Assign expert IDs to blocks if (threadIdx.x < num_experts) { for (int i = cumsum[threadIdx.x]; i < cumsum[threadIdx.x + 1]; i += block_size) { @@ -257,13 +69,11 @@ __global__ void sgl_moe_align_block_size_kernel( } } -// taken from -// https://github.com/sgl-project/sglang/commit/cdae77b03dfc6fec3863630550b45bbfc789f957 template -__global__ void sgl_moe_token_sort_kernel(scalar_t* __restrict__ topk_ids, - int32_t* sorted_token_ids, - int32_t* cumsum_buffer, - size_t numel) { +__global__ void count_and_sort_expert_tokens_kernel( + const scalar_t* __restrict__ topk_ids, + int32_t* __restrict__ sorted_token_ids, int32_t* __restrict__ cumsum_buffer, + size_t numel) { const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; const size_t stride = blockDim.x * gridDim.x; @@ -290,132 +100,138 @@ __global__ void moe_sum_kernel( } } +template +__global__ void moe_align_block_size_small_batch_expert_kernel( + const scalar_t* __restrict__ topk_ids, + int32_t* __restrict__ sorted_token_ids, int32_t* __restrict__ expert_ids, + int32_t* __restrict__ total_tokens_post_pad, int32_t num_experts, + int32_t block_size, size_t numel) { + const size_t tid = threadIdx.x; + const size_t stride = blockDim.x; + + extern __shared__ int32_t shared_mem[]; + int32_t* cumsum = shared_mem; + int32_t* tokens_cnts = (int32_t*)(shared_mem + num_experts + 1); + + for (int i = 0; i < num_experts; ++i) { + tokens_cnts[(threadIdx.x + 1) * num_experts + i] = 0; + } + + for (size_t i = tid; i < numel; i += stride) { + ++tokens_cnts[(threadIdx.x + 1) * num_experts + topk_ids[i]]; + } + + __syncthreads(); + + if (threadIdx.x < num_experts) { + tokens_cnts[threadIdx.x] = 0; + for (int i = 1; i <= blockDim.x; ++i) { + tokens_cnts[i * num_experts + threadIdx.x] += + tokens_cnts[(i - 1) * num_experts + threadIdx.x]; + } + } + + __syncthreads(); + + if (threadIdx.x == 0) { + cumsum[0] = 0; + for (int i = 1; i <= num_experts; ++i) { + cumsum[i] = + cumsum[i - 1] + + CEILDIV(tokens_cnts[blockDim.x * num_experts + i - 1], block_size) * + block_size; + } + *total_tokens_post_pad = static_cast(cumsum[num_experts]); + } + + __syncthreads(); + + if (threadIdx.x < num_experts) { + for (int i = cumsum[threadIdx.x]; i < cumsum[threadIdx.x + 1]; + i += block_size) { + expert_ids[i / block_size] = threadIdx.x; + } + } + + for (size_t i = tid; i < numel; i += stride) { + int32_t expert_id = topk_ids[i]; + int32_t rank_post_pad = + tokens_cnts[threadIdx.x * num_experts + expert_id] + cumsum[expert_id]; + sorted_token_ids[rank_post_pad] = i; + ++tokens_cnts[threadIdx.x * num_experts + expert_id]; + } +} + } // namespace moe } // namespace vllm +// taken from +// https://github.com/sgl-project/sglang/blob/8b5f83ed3b7d2a49ad5c5cd5aa61c5d502f47dbc void moe_align_block_size(torch::Tensor topk_ids, int64_t num_experts, int64_t block_size, torch::Tensor sorted_token_ids, torch::Tensor experts_ids, torch::Tensor num_tokens_post_pad) { const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - int device_max_shared_mem; - auto dev = topk_ids.get_device(); - cudaDeviceGetAttribute(&device_max_shared_mem, - cudaDevAttrMaxSharedMemoryPerBlockOptin, dev); - - const int32_t num_thread = max((int32_t)num_experts, WARP_SIZE); - const int32_t shared_mem_i32 = - ((num_thread + 1) * num_experts + (num_experts + 1)) * sizeof(int32_t); - const int32_t shared_mem_i16 = - ((num_thread + 1) * num_experts) * sizeof(uint16_t) + - (num_experts + 1) * sizeof(int32_t); - - bool use_global_memory = false; - bool use_i16 = false; // Use uint16_t for shared memory token counts - if (shared_mem_i32 < device_max_shared_mem) { - // Do nothing in this case. We're all set to use int32_t token counts - } else if (shared_mem_i16 < device_max_shared_mem && - topk_ids.numel() <= 65535) { - // when nelements of topk_ids is smaller than 65535 (max value of uint16), - // element value of token_cnts would also smaller than 65535, - // so we can use uint16 as dtype of token_cnts - use_i16 = true; - } else { - use_global_memory = true; - } - - if (use_global_memory) { - VLLM_DISPATCH_INTEGRAL_AND_UNSIGNED_TYPES( - topk_ids.scalar_type(), "moe_align_block_size_global_mem_kernel", [&] { - // calc needed amount of shared mem for `tokens_cnts` and `cumsum` - // tensors - const int32_t num_thread = max((int32_t)num_experts, WARP_SIZE); - - auto options_int = torch::TensorOptions() - .dtype(torch::kInt) - .device(topk_ids.device()); - torch::Tensor token_cnts_buffer = - torch::empty({(num_experts + 1) * num_experts}, options_int); - torch::Tensor cumsum_buffer = - torch::empty({num_experts + 1}, options_int); - - auto kernel = - vllm::moe::moe_align_block_size_global_mem_kernel; - kernel<<<1, num_thread, 0, stream>>>( - topk_ids.data_ptr(), - sorted_token_ids.data_ptr(), - experts_ids.data_ptr(), - num_tokens_post_pad.data_ptr(), num_experts, block_size, - topk_ids.numel(), token_cnts_buffer.data_ptr(), - cumsum_buffer.data_ptr()); - }); - } else if (use_i16) { - VLLM_DISPATCH_INTEGRAL_AND_UNSIGNED_TYPES( - topk_ids.scalar_type(), "moe_align_block_size_kernel", [&] { - // set dynamic shared mem - auto kernel = - vllm::moe::moe_align_block_size_kernel; - AT_CUDA_CHECK(VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize( - (void*)kernel, shared_mem_i16)); - kernel<<<1, num_thread, shared_mem_i16, stream>>>( - topk_ids.data_ptr(), - sorted_token_ids.data_ptr(), - experts_ids.data_ptr(), - num_tokens_post_pad.data_ptr(), num_experts, block_size, - topk_ids.numel()); - }); - } else { - VLLM_DISPATCH_INTEGRAL_AND_UNSIGNED_TYPES( - topk_ids.scalar_type(), "moe_align_block_size_kernel", [&] { - auto kernel = - vllm::moe::moe_align_block_size_kernel; - AT_CUDA_CHECK(VLLM_DevFuncAttribute_SET_MaxDynamicSharedMemorySize( - (void*)kernel, shared_mem_i32)); - kernel<<<1, num_thread, shared_mem_i32, stream>>>( - topk_ids.data_ptr(), - sorted_token_ids.data_ptr(), - experts_ids.data_ptr(), - num_tokens_post_pad.data_ptr(), num_experts, block_size, - topk_ids.numel()); - }); - } -} - -void sgl_moe_align_block_size(torch::Tensor topk_ids, int64_t num_experts, - int64_t block_size, - torch::Tensor sorted_token_ids, - torch::Tensor experts_ids, - torch::Tensor num_tokens_post_pad) { - const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); - TORCH_CHECK(num_experts == 256, - "sgl_moe_align_block_size kernel only supports deepseek v3."); + int64_t padded_num_experts = + ((num_experts + WARP_SIZE - 1) / WARP_SIZE) * WARP_SIZE; + int experts_per_warp = WARP_SIZE; + int threads = 1024; + threads = ((threads + WARP_SIZE - 1) / WARP_SIZE) * WARP_SIZE; VLLM_DISPATCH_INTEGRAL_AND_UNSIGNED_TYPES( - topk_ids.scalar_type(), "sgl_moe_align_block_size_kernel", [&] { + topk_ids.scalar_type(), "moe_align_block_size_kernel", [&] { // calc needed amount of shared mem for `cumsum` tensors auto options_int = torch::TensorOptions().dtype(torch::kInt).device(topk_ids.device()); torch::Tensor cumsum_buffer = torch::zeros({num_experts + 1}, options_int); + bool small_batch_expert_mode = + (topk_ids.numel() < 1024) && (num_experts <= 64); - auto align_kernel = - vllm::moe::sgl_moe_align_block_size_kernel; - align_kernel<<<1, 1024, 0, stream>>>( - topk_ids.data_ptr(), sorted_token_ids.data_ptr(), - experts_ids.data_ptr(), - num_tokens_post_pad.data_ptr(), num_experts, block_size, - topk_ids.numel(), cumsum_buffer.data_ptr()); + if (small_batch_expert_mode) { + const int32_t threads = max((int32_t)num_experts, WARP_SIZE); + const int32_t shared_mem_size = + ((threads + 1) * num_experts + (num_experts + 1)) * + sizeof(int32_t); - const int block_threads = 256; - const int num_blocks = - (topk_ids.numel() + block_threads - 1) / block_threads; - const int max_blocks = 65535; - const int actual_blocks = std::min(num_blocks, max_blocks); - auto sort_kernel = vllm::moe::sgl_moe_token_sort_kernel; - sort_kernel<<>>( - topk_ids.data_ptr(), sorted_token_ids.data_ptr(), - cumsum_buffer.data_ptr(), topk_ids.numel()); + auto small_batch_expert_kernel = + vllm::moe::moe_align_block_size_small_batch_expert_kernel< + scalar_t>; + small_batch_expert_kernel<<<1, threads, shared_mem_size, stream>>>( + topk_ids.data_ptr(), + sorted_token_ids.data_ptr(), + experts_ids.data_ptr(), + num_tokens_post_pad.data_ptr(), num_experts, block_size, + topk_ids.numel()); + } else { + auto align_kernel = vllm::moe::moe_align_block_size_kernel; + + size_t num_warps = CEILDIV(padded_num_experts, experts_per_warp); + size_t shared_mem_size = + num_warps * experts_per_warp * sizeof(int32_t); + + align_kernel<<<1, threads, shared_mem_size, stream>>>( + topk_ids.data_ptr(), + sorted_token_ids.data_ptr(), + experts_ids.data_ptr(), + num_tokens_post_pad.data_ptr(), num_experts, + padded_num_experts, experts_per_warp, block_size, + topk_ids.numel(), cumsum_buffer.data_ptr()); + + const int block_threads = std::min(256, (int)threads); + const int num_blocks = + (topk_ids.numel() + block_threads - 1) / block_threads; + const int max_blocks = 65535; + const int actual_blocks = std::min(num_blocks, max_blocks); + + auto sort_kernel = + vllm::moe::count_and_sort_expert_tokens_kernel; + sort_kernel<<>>( + topk_ids.data_ptr(), + sorted_token_ids.data_ptr(), + cumsum_buffer.data_ptr(), topk_ids.numel()); + } }); } diff --git a/csrc/moe/moe_ops.h b/csrc/moe/moe_ops.h index c4faef731060a..661730c96867e 100644 --- a/csrc/moe/moe_ops.h +++ b/csrc/moe/moe_ops.h @@ -12,12 +12,6 @@ void moe_align_block_size(torch::Tensor topk_ids, int64_t num_experts, int64_t block_size, torch::Tensor sorted_token_ids, torch::Tensor experts_ids, torch::Tensor num_tokens_post_pad); - -void sgl_moe_align_block_size(torch::Tensor topk_ids, int64_t num_experts, - int64_t block_size, - torch::Tensor sorted_token_ids, - torch::Tensor experts_ids, - torch::Tensor num_tokens_post_pad); #ifndef USE_ROCM torch::Tensor moe_wna16_gemm(torch::Tensor input, torch::Tensor output, torch::Tensor b_qweight, torch::Tensor b_scales, diff --git a/csrc/moe/torch_bindings.cpp b/csrc/moe/torch_bindings.cpp index d6ef4940b6c31..97df311d04409 100644 --- a/csrc/moe/torch_bindings.cpp +++ b/csrc/moe/torch_bindings.cpp @@ -22,15 +22,6 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, m) { " Tensor! num_tokens_post_pad) -> ()"); m.impl("moe_align_block_size", torch::kCUDA, &moe_align_block_size); - // temporarily adapted from - // https://github.com/sgl-project/sglang/commit/ded9fcd09a43d5e7d5bb31a2bc3e9fc21bf65d2a - m.def( - "sgl_moe_align_block_size(Tensor topk_ids, int num_experts," - " int block_size, Tensor! sorted_token_ids," - " Tensor! experts_ids," - " Tensor! num_tokens_post_pad) -> ()"); - m.impl("sgl_moe_align_block_size", torch::kCUDA, &sgl_moe_align_block_size); - #ifndef USE_ROCM m.def( "moe_wna16_gemm(Tensor input, Tensor! output, Tensor b_qweight, " diff --git a/tests/kernels/moe/test_moe_align_block_size.py b/tests/kernels/moe/test_moe_align_block_size.py new file mode 100644 index 0000000000000..e980422a7b97f --- /dev/null +++ b/tests/kernels/moe/test_moe_align_block_size.py @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import itertools + +import pytest +import torch + +from vllm import _custom_ops as ops +from vllm.model_executor.layers.fused_moe.moe_align_block_size import ( + moe_align_block_size_triton) + + +@pytest.mark.parametrize( + "block_size,num_tokens,topk,num_experts", + list( + itertools.product( + [32, 64, 128, 256], # block_size + [ + 1, + 3, + 7, + 16, + 256, + 2256, + 4096, + ], # num_tokens + [1, 4, 16, 64], # topk + [64, 160, 256, 257, 260, 264], # num_experts + )), +) +def test_moe_align_block_size_compare_implementations(block_size, num_tokens, + topk, num_experts): + topk_ids = torch.stack([ + torch.randperm(num_experts, dtype=torch.int32, device="cuda")[:topk] + for _ in range(num_tokens) + ]) + + max_num_tokens_padded = topk_ids.numel() + num_experts * (block_size - 1) + + sorted_ids_cuda = torch.empty((max_num_tokens_padded, ), + dtype=torch.int32, + device=topk_ids.device) + sorted_ids_cuda.fill_(topk_ids.numel()) + max_num_m_blocks = max_num_tokens_padded // block_size + expert_ids_cuda = torch.zeros((max_num_m_blocks, ), + dtype=torch.int32, + device=topk_ids.device) + num_tokens_post_pad_cuda = torch.empty((1), + dtype=torch.int32, + device=topk_ids.device) + + sorted_ids_triton = torch.empty_like(sorted_ids_cuda) + sorted_ids_triton.fill_(topk_ids.numel()) + expert_ids_triton = torch.zeros_like(expert_ids_cuda) + num_tokens_post_pad_triton = torch.empty_like(num_tokens_post_pad_cuda) + + ops.moe_align_block_size( + topk_ids, + num_experts, + block_size, + sorted_ids_cuda, + expert_ids_cuda, + num_tokens_post_pad_cuda, + ) + + moe_align_block_size_triton( + topk_ids, + num_experts, + block_size, + sorted_ids_triton, + expert_ids_triton, + num_tokens_post_pad_triton, + ) + + assert torch.allclose(expert_ids_cuda, expert_ids_triton), ( + f"Expert IDs mismatch for block_size={block_size}, " + f"num_tokens={num_tokens}, topk={topk}\n" + f"CUDA expert_ids: {expert_ids_cuda}\n" + f"Triton expert_ids: {expert_ids_triton}") + + assert torch.allclose( + num_tokens_post_pad_cuda, num_tokens_post_pad_triton), ( + f"Num tokens post pad mismatch for block_size={block_size}, " + f"num_tokens={num_tokens}, topk={topk}\n" + f"CUDA num_tokens_post_pad: {num_tokens_post_pad_cuda}\n" + f"Triton num_tokens_post_pad: {num_tokens_post_pad_triton}") + + +if __name__ == "__main__": + pytest.main([__file__]) diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index ff992c33b3092..b16fef8714193 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -1524,15 +1524,6 @@ def moe_align_block_size(topk_ids: torch.Tensor, num_experts: int, num_tokens_post_pad) -def sgl_moe_align_block_size(topk_ids: torch.Tensor, num_experts: int, - block_size: int, sorted_token_ids: torch.Tensor, - experts_ids: torch.Tensor, - num_tokens_post_pad: torch.Tensor) -> None: - torch.ops._moe_C.sgl_moe_align_block_size(topk_ids, num_experts, - block_size, sorted_token_ids, - experts_ids, num_tokens_post_pad) - - def moe_wna16_gemm(input: torch.Tensor, output: torch.Tensor, b_qweight: torch.Tensor, b_scales: torch.Tensor, b_qzeros: Optional[torch.Tensor], diff --git a/vllm/model_executor/layers/fused_moe/moe_align_block_size.py b/vllm/model_executor/layers/fused_moe/moe_align_block_size.py index 9d990959e01fa..f9451ca2fde45 100644 --- a/vllm/model_executor/layers/fused_moe/moe_align_block_size.py +++ b/vllm/model_executor/layers/fused_moe/moe_align_block_size.py @@ -4,7 +4,6 @@ from typing import Optional import torch -import vllm.envs as envs from vllm import _custom_ops as ops from vllm.triton_utils import tl, triton from vllm.utils import round_up @@ -99,6 +98,7 @@ def moe_align_block_size_stage4( # Triton implementation based on: # https://github.com/sgl-project/sglang/commit/ba5112ff691d791a9e38c6c71f59324a5fcb49d0 +# TODO(wentao): Deprecated this function in the future. def moe_align_block_size_triton( topk_ids: torch.Tensor, num_experts: int, @@ -220,29 +220,9 @@ def moe_align_block_size( num_tokens_post_pad = torch.empty((1), dtype=torch.int32, device=topk_ids.device) - if num_experts >= 224: - if envs.VLLM_ENABLE_MOE_ALIGN_BLOCK_SIZE_TRITON or num_experts != 256: - moe_align_block_size_triton( - topk_ids, - num_experts, - block_size, - sorted_ids, - expert_ids, - num_tokens_post_pad, - ) - else: - # Currently requires num_experts=256 - ops.sgl_moe_align_block_size( - topk_ids, - num_experts, - block_size, - sorted_ids, - expert_ids, - num_tokens_post_pad, - ) - else: - ops.moe_align_block_size(topk_ids, num_experts, block_size, sorted_ids, - expert_ids, num_tokens_post_pad) + + ops.moe_align_block_size(topk_ids, num_experts, block_size, sorted_ids, + expert_ids, num_tokens_post_pad) if expert_map is not None: expert_ids = expert_map[expert_ids] From bf57ccc5c2102a77d70431b2fe47e6fde4acc600 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Wed, 18 Jun 2025 03:49:39 +0900 Subject: [PATCH 004/211] Remove sm120 arch from sm100 cutlass kernel arch list (#19716) Signed-off-by: mgoin --- CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index d75f0d3212476..402131b7a1e7a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -420,9 +420,9 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") endif() endif() - # The cutlass_scaled_mm kernels for Blackwell (c3x, i.e. CUTLASS 3.x) require - # CUDA 12.8 or later - cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0a;10.1a;12.0a" "${CUDA_ARCHS}") + # The cutlass_scaled_mm kernels for Blackwell SM100 (c3x, i.e. CUTLASS 3.x) + # require CUDA 12.8 or later + cuda_archs_loose_intersection(SCALED_MM_ARCHS "10.0a;10.1a" "${CUDA_ARCHS}") if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER 12.8 AND SCALED_MM_ARCHS) set(SRCS "csrc/quantization/cutlass_w8a8/scaled_mm_c3x_sm100.cu" From cda92307c145e7722cdc33e6d26e105eeb22b882 Mon Sep 17 00:00:00 2001 From: Jiayi Yao <82156730+YaoJiayi@users.noreply.github.com> Date: Tue, 17 Jun 2025 12:57:54 -0700 Subject: [PATCH 005/211] [Misc] Update lmcache connector with the latest connector apis (#19441) Signed-off-by: YaoJiayi <120040070@link.cuhk.edu.cn> --- .../kv_connector/v1/lmcache_connector.py | 35 ++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py index cc1f4ba356428..e838ac2499c04 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/lmcache_connector.py @@ -1,6 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Optional import torch from lmcache.integration.vllm.vllm_v1_adapter import LMCacheConnectorV1Impl @@ -87,6 +87,22 @@ class LMCacheConnectorV1(KVConnectorBase_V1): """ self._lmcache_engine.wait_for_save() + def get_finished( + self, finished_req_ids: set[str] + ) -> tuple[Optional[set[str]], Optional[set[str]]]: + """ + Notifies worker-side connector ids of requests that have + finished generating tokens. + + Returns: + ids of requests that have finished asynchronous transfer + (requests that previously returned True from request_finished()), + tuple of (sending/saving ids, recving/loading ids). + The finished saves/sends req ids must belong to a set provided in a + call to this method (this call or a prior one). + """ + return self._lmcache_engine.get_finished(finished_req_ids) + # ============================== # Scheduler-side methods # ============================== @@ -132,3 +148,20 @@ class LMCacheConnectorV1(KVConnectorBase_V1): scheduler_output (SchedulerOutput): the scheduler output object. """ return self._lmcache_engine.build_connector_meta(scheduler_output) + + def request_finished( + self, + request: "Request", + block_ids: list[int], + ) -> tuple[bool, Optional[dict[str, Any]]]: + """ + Called when a request has finished, before its blocks are freed. + + Returns: + True if the request is being saved/sent asynchronously and blocks + should not be freed until the request_id is returned from + get_finished(). + Optional KVTransferParams to be included in the request outputs + returned by the engine. + """ + return self._lmcache_engine.request_finished(request, block_ids) From b447624ee399019740626f6217a05c00178ed17d Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Wed, 18 Jun 2025 05:59:29 +0900 Subject: [PATCH 006/211] [Bugfix] Fix faulty triton importing logic when using Ray for DP (#19734) Signed-off-by: mgoin --- vllm/triton_utils/importing.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/vllm/triton_utils/importing.py b/vllm/triton_utils/importing.py index a003e4eb02c07..21beb76f37cc7 100644 --- a/vllm/triton_utils/importing.py +++ b/vllm/triton_utils/importing.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import os import types from importlib.util import find_spec @@ -23,7 +24,22 @@ if HAS_TRITON: x.driver for x in backends.values() if x.driver and x.driver.is_active() ] - if len(active_drivers) != 1: + + # Check if we're in a distributed environment where CUDA_VISIBLE_DEVICES + # might be temporarily empty (e.g., Ray sets it to "" during actor init) + cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES") + is_distributed_env = (cuda_visible_devices is not None + and len(cuda_visible_devices.strip()) == 0) + + # Apply lenient driver check for distributed environments + if is_distributed_env and len(active_drivers) == 0: + # Allow 0 drivers in distributed environments - they may become + # active later when CUDA context is properly initialized + logger.debug( + "Triton found 0 active drivers in distributed environment. " + "This is expected during initialization.") + elif not is_distributed_env and len(active_drivers) != 1: + # Strict check for non-distributed environments logger.info( "Triton is installed but %d active driver(s) found " "(expected 1). Disabling Triton to prevent runtime errors.", From a44b1c951df919bd6a42cb7e13104106619723ff Mon Sep 17 00:00:00 2001 From: Charlie Fu Date: Tue, 17 Jun 2025 16:03:06 -0500 Subject: [PATCH 007/211] [Feature][ROCm] Add full graph capture support for TritonAttentionBackend (#19158) Signed-off-by: charlifu --- .../compile/piecewise/test_full_cudagraph.py | 1 + vllm/platforms/rocm.py | 5 +- vllm/v1/attention/backends/flash_attn.py | 172 +----------------- vllm/v1/attention/backends/triton_attn.py | 166 ++++++++++++++++- vllm/v1/attention/backends/utils.py | 168 +++++++++++++++++ 5 files changed, 334 insertions(+), 178 deletions(-) diff --git a/tests/compile/piecewise/test_full_cudagraph.py b/tests/compile/piecewise/test_full_cudagraph.py index c1f5d9658af16..efe9c843f144c 100644 --- a/tests/compile/piecewise/test_full_cudagraph.py +++ b/tests/compile/piecewise/test_full_cudagraph.py @@ -147,6 +147,7 @@ def test_lower_max_num_seqs(model, supported): llm.generate(["Hello, my name is"] * 10) +@pytest.mark.skipif(not current_platform.is_cuda(), reason="Skip if not cuda") def test_full_cudagraph_with_invalid_backend(): with temporary_environ({ "VLLM_USE_V1": "1", diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index a929366db49cc..445b24d72f078 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -141,7 +141,8 @@ def use_rocm_custom_paged_attention( and (head_size == 64 or head_size == 128) and (block_size == 16 or block_size == 32) and (gqa_ratio >= 1 and gqa_ratio <= 16) - and max_seq_len <= 32768 and (envs.VLLM_ROCM_CUSTOM_PAGED_ATTN) + and max_seq_len <= 128 * 1024 + and (envs.VLLM_ROCM_CUSTOM_PAGED_ATTN) and not (envs.VLLM_ROCM_USE_AITER_PAGED_ATTN and envs.VLLM_ROCM_USE_AITER)) @@ -151,7 +152,7 @@ def use_rocm_custom_paged_attention( and (qtype == torch.half or qtype == torch.bfloat16) and head_size == 128 and block_size == 16 and (gqa_ratio >= 3 and gqa_ratio <= 16) - and max_seq_len <= 32768 and alibi_slopes is None + and max_seq_len <= 128 * 1024 and alibi_slopes is None and kv_cache_dtype == "auto" and envs.VLLM_ROCM_CUSTOM_PAGED_ATTN) diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index 43a664476aaae..4ad7178374b1a 100755 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -19,9 +19,9 @@ from vllm.config import VllmConfig, get_layers_from_vllm_config from vllm.logger import init_logger from vllm.platforms import current_platform from vllm.utils import cdiv -from vllm.v1.attention.backends.utils import (AttentionMetadataBuilder, - CommonAttentionMetadata, - get_kv_cache_layout) +from vllm.v1.attention.backends.utils import ( + AttentionMetadataBuilder, CommonAttentionMetadata, get_kv_cache_layout, + make_local_attention_virtual_batches) from vllm.v1.kv_cache_interface import AttentionSpec from vllm.v1.worker.block_table import BlockTable @@ -126,172 +126,6 @@ class FlashAttentionMetadata: local_attn_metadata: Optional[LocalAttentionMetadata] = None -# -# Take in `query_start_loc_np` and `seq_lens_np` and break the sequences into -# local attention blocks, where each block is passed to the attention kernel -# as an independent local ("virtual") batch item. -# -# For example, if are performing a chunked prefill a batch of 3 sequences: -# q_seqlens = [4, 10, 5] -# kv_seqlens = [6, 17, 9] -# Then normally for regular attention we would compute with an attention mask -# for batch idx 0 (q_seqlens = 4, kv_seqlens = 6) like: -# batch idx: 0 (q_seqlens = 4, kv_seqlens = 6) -# k_toks > 0 1 2 3 4 5 -# q_toks v _____________ -# 0 | 1 1 1 -# 1 | 1 1 1 1 -# 2 | 1 1 1 1 1 -# 3 | 1 1 1 1 1 1 -# -# for local attention (with attn_chunk_size = 4) we would compute with an -# attention mask like: -# batch idx: 0 (q_seqlens = 4, kv_seqlens = 6, attn_chunk_size = 4) -# k_toks > 0 1 2 3 4 5 -# q_toks v _____________ -# 0 | 1 1 1 -# 1 | 1 1 1 1 -# 2 | 1 -# 3 | 1 1 -# -# We can simulate this mask using standard flash-attention by breaking the -# sequences into local ("virtual") batches, where each local batch item is a -# local attention block, so in this case batch idx 0 would be broken up into: -# -# local-batch idx: 0 (q_seqlens = 2, kv_seqlens = 4) (batch 0) -# k_toks > 0 1 2 3 -# q_toks v _____________ -# 0 | 1 1 1 -# 1 | 1 1 1 1 -# local-batch idx: 1 (q_seqlens = 2, kv_seqlens = 2) (batch 0) -# k_toks > 4 5 -# q_toks v _____________ -# 2 | 1 -# 3 | 1 1 -# -# e.g. if we have: -# attn_chunk_size = 4 -# query_start_loc_np = [0, 4, 14, 19] (q_seqlens = [4, 10, 5]) -# Then this function would return: -# __b0__ ______b1______ __b2__ < orig batch indices -# q_seqlens_local = [ 2, 2, 1, 4, 4, 1, 4, 1] -# cu_seqlens_q_local = [0, 4, 6, 10, 14, 18, 19, 23, 24] -# seqlens_k_local = [ 4, 2, 4, 4, 4, 1, 4, 1] -# block_table_local : shape[local_virtual_batches, pages_per_local_batch] -def make_local_attention_virtual_batches( - attn_chunk_size: int, - query_start_loc_np: np.ndarray, - seq_lens_np: np.ndarray, - block_table: torch.Tensor, - block_size: int = 0, -) -> tuple[np.ndarray, np.ndarray, np.ndarray, torch.Tensor]: - q_seqlens = query_start_loc_np[1:] - query_start_loc_np[:-1] - actual_batch_size = seq_lens_np.shape[0] - - # Handle if we are starting in the middle of a local attention block, - # we assume q_seqlens > 0 (for all elements), for each batch idx we compute - # the number of tokens that are not in the first local attention block and - # then we can simply use a cdiv for the rest. - # For example if we have: - # attn_chunk_size = 4 - # q_seqlens = [4, 10, 5] - # k_seqlens = [6, 17, 9] - # Then we would get: - # new_tokens_in_first_block = [2, 1, 4] - # local_blocks = [2, 4, 2] - q_tokens_in_first_block = np.minimum( - attn_chunk_size - ((seq_lens_np - q_seqlens) % attn_chunk_size), - q_seqlens).astype(np.int32) - tokens_in_last_block = attn_chunk_size + (seq_lens_np % -attn_chunk_size) - local_blocks = 1 + cdiv(q_seqlens - q_tokens_in_first_block, - attn_chunk_size) - - # Once we know the number of local blocks we can compute the request spans - # for each batch idx, we can figure out the number of "virtual" requests we - # have to make, - # For the above example we would get: - # seqlens_q_local = [2, 2, 1, 4, 4, 1, 4, 1] - # - # First Get batched arange. (E.g., [2, 4, 2] -> [0, 1, 0, 1, 2, 3, 0, 1]) - # (TODO: max a utility to share this code with _prepare_inputs) - # arange step 1. [2, 4, 2] -> [2, 6, 8] - cu_num_blocks = np.cumsum(local_blocks) - virtual_batches = cu_num_blocks[-1] - # arange step 2. [2, 6, 8] -> [0, 0, 2, 2, 2, 2, 6, 6] - block_offsets = np.repeat(cu_num_blocks - local_blocks, local_blocks) - # arange step 3. [0, 1, 0, 1, 2, 3, 0, 1] - arange = np.arange(virtual_batches, dtype=np.int32) - block_offsets - # also compute reverse arange (i.e. [1, 0, 3, 2, 1, 0, 1, 0]) - rarange = np.repeat(local_blocks, local_blocks) - arange - 1 - # Then we can compute the seqlens_q_local, handling the fact that the - # first and last blocks could be partial - seqlens_q_local = \ - np.repeat(q_seqlens - q_tokens_in_first_block, local_blocks) - # set the first block since this may be a partial block - seqlens_q_local[arange == 0] = q_tokens_in_first_block - # set the remaining blocks - seqlens_q_local[arange > 0] = np.minimum( - seqlens_q_local - attn_chunk_size * (arange - 1), - attn_chunk_size)[arange > 0] - - # convert from q_seqlens to cu_seqlens_q - cu_seqlens_q_local = np.pad(np.cumsum(seqlens_q_local), (1, 0))\ - .astype(np.int32) - - # compute the seqlens_k_local, - # basically a full local attention block for all but the last block in each - # batch - # For our example this will be: - # seqlens_k_local = [4, 2, 4, 4, 4, 1, 4, 1] - seqlens_k_local = np.full(cu_num_blocks[-1], - attn_chunk_size, - dtype=np.int32) - seqlens_k_local[cu_num_blocks - 1] = tokens_in_last_block - - k_seqstarts_absolute = np.repeat(seq_lens_np, local_blocks) - \ - (rarange * attn_chunk_size + \ - np.repeat(tokens_in_last_block, local_blocks)) - # For the example the local attention blocks start at: - # _b0_ _____b1_____ _b2_ - # k_seqstarts_absolute = [0, 4, 4, 8, 12, 16, 4, 8] - block_starts = k_seqstarts_absolute // block_size - assert attn_chunk_size % block_size == 0, \ - f"attn_chunk_size {attn_chunk_size} is not " \ - f"divisible by block_size {block_size}" - pages_per_local_batch = attn_chunk_size // block_size - - # Create a block_table for the local attention blocks - # For out example if we have a block-table like (assuming block_size=2): - # block_table = [ - # [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], < batch 0 - # [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], < batch 1 - # [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], < batch 2 - # ] - # Then for the local batches we would want a block-table like - # block_table_local = [ - # [ 0, 1 ], < local-batch 0, (batch 0, starting from k[0]) - # [ 2, 3 ], < local-batch 1, (batch 0, starting from k[4]) - # [ 12, 13 ], < local-batch 2, (batch 1, starting from k[4]) - # [ 14, 15 ], < local-batch 3, (batch 1, starting from k[8]) - # [ 16, 17 ], < local-batch 4, (batch 1, starting from k[12]) - # [ 18, 19 ], < local-batch 5, (batch 1, starting from k[16]) - # [ 22, 23 ], < local-batch 6, (batch 2, starting from k[4]) - # [ 24, 25 ], < local-batch 7, (batch 2, starting from k[8]) - # ] - block_indices= np.broadcast_to( - np.arange(pages_per_local_batch, dtype=np.int32), - (virtual_batches, pages_per_local_batch)) \ - + np.expand_dims(block_starts, axis=1) - block_indices = block_indices.flatten().clip(max=block_table.shape[1] - 1) - batch_indices = np.repeat(np.arange(actual_batch_size, dtype=np.int32), - local_blocks * pages_per_local_batch) - block_table_local = block_table[batch_indices, block_indices]\ - .view(virtual_batches, -1) - - return seqlens_q_local, cu_seqlens_q_local, seqlens_k_local, \ - block_table_local - - def _get_sliding_window_configs( vllm_config: VllmConfig) -> set[Optional[tuple[int, int]]]: """Get the set of all sliding window configs used in the model.""" diff --git a/vllm/v1/attention/backends/triton_attn.py b/vllm/v1/attention/backends/triton_attn.py index 9782ec087babb..ecb92bb1e4161 100644 --- a/vllm/v1/attention/backends/triton_attn.py +++ b/vllm/v1/attention/backends/triton_attn.py @@ -1,7 +1,8 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """Attention layer with PagedAttention and Triton prefix prefill.""" -from typing import TYPE_CHECKING, Any, Optional +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, ClassVar, Optional import torch @@ -15,8 +16,10 @@ from vllm.attention.ops.paged_attn import PagedAttention from vllm.attention.ops.triton_unified_attention import unified_attention from vllm.logger import init_logger from vllm.platforms import current_platform -from vllm.v1.attention.backends.flash_attn import ( - FlashAttentionMetadata, FlashAttentionMetadataBuilder) +from vllm.v1.attention.backends.flash_attn import FlashAttentionMetadata +from vllm.v1.attention.backends.utils import ( + AttentionMetadataBuilder, CommonAttentionMetadata, + make_local_attention_virtual_batches) from vllm.v1.kv_cache_interface import AttentionSpec from vllm.v1.worker.block_table import BlockTable @@ -26,12 +29,161 @@ if TYPE_CHECKING: logger = init_logger(__name__) -class TritonAttentionMetadataBuilder(FlashAttentionMetadataBuilder): +@dataclass +class TritonAttentionMetadata: + # NOTE(sang): Definition of context_len, query_len, and seq_len. + # |---------- N-1 iteration --------| + # |---------------- N iteration ---------------------| + # |- tokenA -|......................|-- newTokens ---| + # |---------- context_len ----------| + # |-------------------- seq_len ---------------------| + # |-- query_len ---| + + num_actual_tokens: int # Number of tokens excluding padding. + max_query_len: int + query_start_loc: torch.Tensor + max_seq_len: int + seq_lens: torch.Tensor + block_table: torch.Tensor + slot_mapping: torch.Tensor + + # For cascade attention. + use_cascade: bool + common_prefix_len: int + cu_prefix_query_lens: Optional[torch.Tensor] + prefix_kv_lens: Optional[torch.Tensor] + suffix_kv_lens: Optional[torch.Tensor] + + # Optional aot scheduling + scheduler_metadata: Optional[torch.Tensor] = None + prefix_scheduler_metadata: Optional[torch.Tensor] = None + + # for local attention + @dataclass + class LocalAttentionMetadata: + local_query_start_loc: torch.Tensor + local_seqused_k: torch.Tensor + local_block_table: torch.Tensor + local_max_query_len: int + local_max_seq_len: int + local_scheduler_metadata: Optional[torch.Tensor] + + local_attn_metadata: Optional[LocalAttentionMetadata] = None + + +class TritonAttentionMetadataBuilder( + AttentionMetadataBuilder[TritonAttentionMetadata]): + full_cudagraph_supported: ClassVar[bool] = True def __init__(self, runner: "GPUModelRunner", kv_cache_spec: AttentionSpec, block_table: BlockTable): - super().__init__(runner, kv_cache_spec, block_table) - self.aot_schedule = False + self.runner = runner + self.block_size = kv_cache_spec.block_size + self.kv_cache_spec = kv_cache_spec + self.block_table = block_table + + def build_for_cudagraph_capture( + self, common_attn_metadata: CommonAttentionMetadata + ) -> TritonAttentionMetadata: + attn_metadata = self.build(0, common_attn_metadata) + # When doing full graph capture, setting seq_lens to + # max_model_len will cause graph capture to be extremely + # slow, so here we set it to 1. + attn_metadata.seq_lens.fill_(1) + return attn_metadata + + def build( + self, common_prefix_len: int, + common_attn_metadata: CommonAttentionMetadata + ) -> TritonAttentionMetadata: + num_reqs = common_attn_metadata.num_reqs + num_actual_tokens = common_attn_metadata.num_actual_tokens + max_query_len = common_attn_metadata.max_query_len + + max_seq_len = int(self.runner.seq_lens_np[:num_reqs].max()) + query_start_loc = common_attn_metadata.query_start_loc + seq_lens = common_attn_metadata.seq_lens + block_table = self.block_table + block_table_tensor = block_table.get_device_tensor()[:num_reqs] + + block_table.slot_mapping[:num_actual_tokens].copy_( + block_table.slot_mapping_cpu[:num_actual_tokens], + non_blocking=True) + # Fill unused with -1. Needed for reshape_and_cache in full cuda graph + # mode. + block_table.slot_mapping[num_actual_tokens:].fill_(-1) + + slot_mapping = block_table.slot_mapping[:num_actual_tokens] + + # for local attention + local_attn_metadata = None + if self.runner.attention_chunk_size is not None: + seqlens_q_local_np, virt_q_cu_seqlens_np, virt_k_seqlens_np, \ + virt_block_table_tensor = make_local_attention_virtual_batches( + self.runner.attention_chunk_size, + self.runner.query_start_loc_np[:num_reqs + 1], + self.runner.seq_lens_np[:num_reqs], + block_table_tensor, + self.block_size, + ) + local_query_start_loc = torch.from_numpy(virt_q_cu_seqlens_np).to( + self.runner.device, non_blocking=True) + local_seqused_k = torch.from_numpy(virt_k_seqlens_np).to( + self.runner.device, non_blocking=True) + local_max_query_len = seqlens_q_local_np.max() + local_max_seq_len = virt_k_seqlens_np.max() + + local_attn_metadata = TritonAttentionMetadata \ + .LocalAttentionMetadata( + local_query_start_loc=local_query_start_loc, + local_seqused_k=local_seqused_k, + local_block_table=virt_block_table_tensor, + local_max_query_len=local_max_query_len, + local_max_seq_len=local_max_seq_len, + local_scheduler_metadata=None, + ) + + use_cascade = common_prefix_len > 0 + + if use_cascade: + cu_prefix_query_lens = torch.tensor([0, num_actual_tokens], + dtype=torch.int32, + device=self.runner.device) + prefix_kv_lens = torch.tensor([common_prefix_len], + dtype=torch.int32, + device=self.runner.device) + suffix_kv_lens = (self.runner.seq_lens_np[:num_reqs] - + common_prefix_len) + suffix_kv_lens = torch.from_numpy(suffix_kv_lens).to( + self.runner.device) + else: + cu_prefix_query_lens = None + prefix_kv_lens = None + suffix_kv_lens = None + prefix_scheduler_metadata = None + + attn_metadata = TritonAttentionMetadata( + num_actual_tokens=num_actual_tokens, + max_query_len=max_query_len, + query_start_loc=query_start_loc, + max_seq_len=max_seq_len, + seq_lens=seq_lens, + block_table=block_table_tensor, + slot_mapping=slot_mapping, + use_cascade=use_cascade, + common_prefix_len=common_prefix_len, + cu_prefix_query_lens=cu_prefix_query_lens, + prefix_kv_lens=prefix_kv_lens, + suffix_kv_lens=suffix_kv_lens, + local_attn_metadata=local_attn_metadata, + prefix_scheduler_metadata=prefix_scheduler_metadata, + ) + return attn_metadata + + def can_run_in_cudagraph( + self, common_attn_metadata: CommonAttentionMetadata) -> bool: + # Full CUDA Graph always supported + return True class TritonAttentionBackend(AttentionBackend): @@ -52,7 +204,7 @@ class TritonAttentionBackend(AttentionBackend): @staticmethod def get_metadata_cls() -> type["AttentionMetadata"]: - return FlashAttentionMetadata + return TritonAttentionMetadata @staticmethod def get_kv_cache_shape( diff --git a/vllm/v1/attention/backends/utils.py b/vllm/v1/attention/backends/utils.py index 82798afee32cb..8083f20026024 100644 --- a/vllm/v1/attention/backends/utils.py +++ b/vllm/v1/attention/backends/utils.py @@ -9,6 +9,8 @@ from typing import TYPE_CHECKING, ClassVar, Generic, TypeVar import numpy as np import torch +from vllm.utils import cdiv + if TYPE_CHECKING: from vllm.v1.core.sched.output import SchedulerOutput from vllm.v1.worker.gpu_input_batch import InputBatch @@ -140,3 +142,169 @@ def get_kv_cache_layout(): "detected. Setting KV cache layout to %s.", cache_layout) return cache_layout + + +# +# Take in `query_start_loc_np` and `seq_lens_np` and break the sequences into +# local attention blocks, where each block is passed to the attention kernel +# as an independent local ("virtual") batch item. +# +# For example, if are performing a chunked prefill a batch of 3 sequences: +# q_seqlens = [4, 10, 5] +# kv_seqlens = [6, 17, 9] +# Then normally for regular attention we would compute with an attention mask +# for batch idx 0 (q_seqlens = 4, kv_seqlens = 6) like: +# batch idx: 0 (q_seqlens = 4, kv_seqlens = 6) +# k_toks > 0 1 2 3 4 5 +# q_toks v _____________ +# 0 | 1 1 1 +# 1 | 1 1 1 1 +# 2 | 1 1 1 1 1 +# 3 | 1 1 1 1 1 1 +# +# for local attention (with attn_chunk_size = 4) we would compute with an +# attention mask like: +# batch idx: 0 (q_seqlens = 4, kv_seqlens = 6, attn_chunk_size = 4) +# k_toks > 0 1 2 3 4 5 +# q_toks v _____________ +# 0 | 1 1 1 +# 1 | 1 1 1 1 +# 2 | 1 +# 3 | 1 1 +# +# We can simulate this mask using standard flash-attention by breaking the +# sequences into local ("virtual") batches, where each local batch item is a +# local attention block, so in this case batch idx 0 would be broken up into: +# +# local-batch idx: 0 (q_seqlens = 2, kv_seqlens = 4) (batch 0) +# k_toks > 0 1 2 3 +# q_toks v _____________ +# 0 | 1 1 1 +# 1 | 1 1 1 1 +# local-batch idx: 1 (q_seqlens = 2, kv_seqlens = 2) (batch 0) +# k_toks > 4 5 +# q_toks v _____________ +# 2 | 1 +# 3 | 1 1 +# +# e.g. if we have: +# attn_chunk_size = 4 +# query_start_loc_np = [0, 4, 14, 19] (q_seqlens = [4, 10, 5]) +# Then this function would return: +# __b0__ ______b1______ __b2__ < orig batch indices +# q_seqlens_local = [ 2, 2, 1, 4, 4, 1, 4, 1] +# cu_seqlens_q_local = [0, 4, 6, 10, 14, 18, 19, 23, 24] +# seqlens_k_local = [ 4, 2, 4, 4, 4, 1, 4, 1] +# block_table_local : shape[local_virtual_batches, pages_per_local_batch] +def make_local_attention_virtual_batches( + attn_chunk_size: int, + query_start_loc_np: np.ndarray, + seq_lens_np: np.ndarray, + block_table: torch.Tensor, + block_size: int = 0, +) -> tuple[np.ndarray, np.ndarray, np.ndarray, torch.Tensor]: + q_seqlens = query_start_loc_np[1:] - query_start_loc_np[:-1] + actual_batch_size = seq_lens_np.shape[0] + + # Handle if we are starting in the middle of a local attention block, + # we assume q_seqlens > 0 (for all elements), for each batch idx we compute + # the number of tokens that are not in the first local attention block and + # then we can simply use a cdiv for the rest. + # For example if we have: + # attn_chunk_size = 4 + # q_seqlens = [4, 10, 5] + # k_seqlens = [6, 17, 9] + # Then we would get: + # new_tokens_in_first_block = [2, 1, 4] + # local_blocks = [2, 4, 2] + q_tokens_in_first_block = np.minimum( + attn_chunk_size - ((seq_lens_np - q_seqlens) % attn_chunk_size), + q_seqlens).astype(np.int32) + tokens_in_last_block = attn_chunk_size + (seq_lens_np % -attn_chunk_size) + local_blocks = 1 + cdiv(q_seqlens - q_tokens_in_first_block, + attn_chunk_size) + + # Once we know the number of local blocks we can compute the request spans + # for each batch idx, we can figure out the number of "virtual" requests we + # have to make, + # For the above example we would get: + # seqlens_q_local = [2, 2, 1, 4, 4, 1, 4, 1] + # + # First Get batched arange. (E.g., [2, 4, 2] -> [0, 1, 0, 1, 2, 3, 0, 1]) + # (TODO: max a utility to share this code with _prepare_inputs) + # arange step 1. [2, 4, 2] -> [2, 6, 8] + cu_num_blocks = np.cumsum(local_blocks) + virtual_batches = cu_num_blocks[-1] + # arange step 2. [2, 6, 8] -> [0, 0, 2, 2, 2, 2, 6, 6] + block_offsets = np.repeat(cu_num_blocks - local_blocks, local_blocks) + # arange step 3. [0, 1, 0, 1, 2, 3, 0, 1] + arange = np.arange(virtual_batches, dtype=np.int32) - block_offsets + # also compute reverse arange (i.e. [1, 0, 3, 2, 1, 0, 1, 0]) + rarange = np.repeat(local_blocks, local_blocks) - arange - 1 + # Then we can compute the seqlens_q_local, handling the fact that the + # first and last blocks could be partial + seqlens_q_local = \ + np.repeat(q_seqlens - q_tokens_in_first_block, local_blocks) + # set the first block since this may be a partial block + seqlens_q_local[arange == 0] = q_tokens_in_first_block + # set the remaining blocks + seqlens_q_local[arange > 0] = np.minimum( + seqlens_q_local - attn_chunk_size * (arange - 1), + attn_chunk_size)[arange > 0] + + # convert from q_seqlens to cu_seqlens_q + cu_seqlens_q_local = np.pad(np.cumsum(seqlens_q_local), (1, 0))\ + .astype(np.int32) + + # compute the seqlens_k_local, + # basically a full local attention block for all but the last block in each + # batch + # For our example this will be: + # seqlens_k_local = [4, 2, 4, 4, 4, 1, 4, 1] + seqlens_k_local = np.full(cu_num_blocks[-1], + attn_chunk_size, + dtype=np.int32) + seqlens_k_local[cu_num_blocks - 1] = tokens_in_last_block + + k_seqstarts_absolute = np.repeat(seq_lens_np, local_blocks) - \ + (rarange * attn_chunk_size + \ + np.repeat(tokens_in_last_block, local_blocks)) + # For the example the local attention blocks start at: + # _b0_ _____b1_____ _b2_ + # k_seqstarts_absolute = [0, 4, 4, 8, 12, 16, 4, 8] + block_starts = k_seqstarts_absolute // block_size + assert attn_chunk_size % block_size == 0, \ + f"attn_chunk_size {attn_chunk_size} is not " \ + f"divisible by block_size {block_size}" + pages_per_local_batch = attn_chunk_size // block_size + + # Create a block_table for the local attention blocks + # For out example if we have a block-table like (assuming block_size=2): + # block_table = [ + # [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], < batch 0 + # [10, 11, 12, 13, 14, 15, 16, 17, 18, 19], < batch 1 + # [20, 21, 22, 23, 24, 25, 26, 27, 28, 29], < batch 2 + # ] + # Then for the local batches we would want a block-table like + # block_table_local = [ + # [ 0, 1 ], < local-batch 0, (batch 0, starting from k[0]) + # [ 2, 3 ], < local-batch 1, (batch 0, starting from k[4]) + # [ 12, 13 ], < local-batch 2, (batch 1, starting from k[4]) + # [ 14, 15 ], < local-batch 3, (batch 1, starting from k[8]) + # [ 16, 17 ], < local-batch 4, (batch 1, starting from k[12]) + # [ 18, 19 ], < local-batch 5, (batch 1, starting from k[16]) + # [ 22, 23 ], < local-batch 6, (batch 2, starting from k[4]) + # [ 24, 25 ], < local-batch 7, (batch 2, starting from k[8]) + # ] + block_indices= np.broadcast_to( + np.arange(pages_per_local_batch, dtype=np.int32), + (virtual_batches, pages_per_local_batch)) \ + + np.expand_dims(block_starts, axis=1) + block_indices = block_indices.flatten().clip(max=block_table.shape[1] - 1) + batch_indices = np.repeat(np.arange(actual_batch_size, dtype=np.int32), + local_blocks * pages_per_local_batch) + block_table_local = block_table[batch_indices, block_indices]\ + .view(virtual_batches, -1) + + return seqlens_q_local, cu_seqlens_q_local, seqlens_k_local, \ + block_table_local From dac8cc49f43f7d2639d873532a408949169821a9 Mon Sep 17 00:00:00 2001 From: Chenyaaang <42742451+Chenyaaang@users.noreply.github.com> Date: Tue, 17 Jun 2025 15:24:49 -0700 Subject: [PATCH 008/211] [TPU] Update torch version to include paged attention kernel change (#19706) Signed-off-by: Chenyaaang --- requirements/tpu.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements/tpu.txt b/requirements/tpu.txt index a26dfd460d8ef..c6038b0443951 100644 --- a/requirements/tpu.txt +++ b/requirements/tpu.txt @@ -18,9 +18,9 @@ setuptools==78.1.0 --find-links https://storage.googleapis.com/libtpu-releases/index.html --find-links https://storage.googleapis.com/jax-releases/jax_nightly_releases.html --find-links https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html -torch==2.8.0.dev20250605 -torchvision==0.23.0.dev20250605 -torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250605-cp39-cp39-linux_x86_64.whl ; python_version == "3.9" -torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250605-cp310-cp310-linux_x86_64.whl ; python_version == "3.10" -torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250605-cp311-cp311-linux_x86_64.whl ; python_version == "3.11" +torch==2.8.0.dev20250617 +torchvision==0.23.0.dev20250617 +torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250617-cp39-cp39-linux_x86_64.whl ; python_version == "3.9" +torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250617-cp310-cp310-linux_x86_64.whl ; python_version == "3.10" +torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250617-cp311-cp311-linux_x86_64.whl ; python_version == "3.11" From c53711bd63dba1945c45f654b9f8e0776b02f7f2 Mon Sep 17 00:00:00 2001 From: Ning Xie Date: Wed, 18 Jun 2025 08:21:06 +0800 Subject: [PATCH 009/211] [MISC] correct copy_blocks src_to_dists param type (#19696) Signed-off-by: Andy Xie --- vllm/attention/ops/ipex_attn.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/attention/ops/ipex_attn.py b/vllm/attention/ops/ipex_attn.py index b7e4ba4d7416a..7207d0420a010 100644 --- a/vllm/attention/ops/ipex_attn.py +++ b/vllm/attention/ops/ipex_attn.py @@ -1,7 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -from typing import Dict, List, Optional, Tuple +from typing import List, Optional, Tuple try: import intel_extension_for_pytorch.llm.modules as ipex_modules @@ -120,7 +120,7 @@ class _PagedAttention: @staticmethod def copy_blocks( kv_caches: List[torch.Tensor], - src_to_dists: Dict[int, List[int]], + src_to_dists: torch.Tensor, *args, ) -> None: key_caches = [kv_cache[0] for kv_cache in kv_caches] From 6e9cc73f674d913113cc1fd5d50f53a2ee94aa8e Mon Sep 17 00:00:00 2001 From: Ning Xie Date: Wed, 18 Jun 2025 08:21:50 +0800 Subject: [PATCH 010/211] [MISC] correct DeviceConfig device field static type analysis (#19699) Signed-off-by: Andy Xie --- vllm/config.py | 7 +++++-- vllm/engine/arg_utils.py | 3 ++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index d986ab6b0edbd..7a9bc8a4f7afa 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2285,7 +2285,7 @@ Device = Literal["auto", "cuda", "neuron", "cpu", "tpu", "xpu", "hpu"] class DeviceConfig: """Configuration for the device to use for vLLM execution.""" - device: SkipValidation[Union[Device, torch.device]] = "auto" + device: SkipValidation[Optional[Union[Device, torch.device]]] = "auto" """Device type for vLLM execution. This parameter is deprecated and will be removed in a future release. @@ -2327,7 +2327,10 @@ class DeviceConfig: "to turn on verbose logging to help debug the issue.") else: # Device type is assigned explicitly - self.device_type = self.device + if isinstance(self.device, str): + self.device_type = self.device + elif isinstance(self.device, torch.device): + self.device_type = self.device.type # Some device types require processing inputs on CPU if self.device_type in ["neuron"]: diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index f599d7a3bb5ed..a0e099a191109 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1018,7 +1018,8 @@ class EngineArgs: from vllm.platforms import current_platform current_platform.pre_register_and_update() - device_config = DeviceConfig(device=current_platform.device_type) + device_config = DeviceConfig( + device=cast(Device, current_platform.device_type)) model_config = self.create_model_config() # * If VLLM_USE_V1 is unset, we enable V1 for "supported features" From d4629dc43f92c189e90a3e2f2f8a52648aed4d9d Mon Sep 17 00:00:00 2001 From: lkchen Date: Tue, 17 Jun 2025 20:03:01 -0700 Subject: [PATCH 011/211] [Misc] Add __str__ for RequestStatus (#19780) Signed-off-by: Linkun Chen --- tests/v1/test_request.py | 15 +++++++++++++++ vllm/v1/request.py | 3 +++ 2 files changed, 18 insertions(+) create mode 100644 tests/v1/test_request.py diff --git a/tests/v1/test_request.py b/tests/v1/test_request.py new file mode 100644 index 0000000000000..2dc90f83caba6 --- /dev/null +++ b/tests/v1/test_request.py @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: Apache-2.0 +from vllm.v1.request import RequestStatus + + +def test_request_status_fmt_str(): + """Test that the string representation of RequestStatus is correct.""" + assert f"{RequestStatus.WAITING}" == "WAITING" + assert f"{RequestStatus.WAITING_FOR_FSM}" == "WAITING_FOR_FSM" + assert f"{RequestStatus.WAITING_FOR_REMOTE_KVS}" == "WAITING_FOR_REMOTE_KVS" + assert f"{RequestStatus.RUNNING}" == "RUNNING" + assert f"{RequestStatus.PREEMPTED}" == "PREEMPTED" + assert f"{RequestStatus.FINISHED_STOPPED}" == "FINISHED_STOPPED" + assert f"{RequestStatus.FINISHED_LENGTH_CAPPED}" == "FINISHED_LENGTH_CAPPED" + assert f"{RequestStatus.FINISHED_ABORTED}" == "FINISHED_ABORTED" + assert f"{RequestStatus.FINISHED_IGNORED}" == "FINISHED_IGNORED" diff --git a/vllm/v1/request.py b/vllm/v1/request.py index 53fd70fabecf3..694e271e5ad74 100644 --- a/vllm/v1/request.py +++ b/vllm/v1/request.py @@ -171,6 +171,9 @@ class RequestStatus(enum.IntEnum): FINISHED_ABORTED = enum.auto() FINISHED_IGNORED = enum.auto() + def __str__(self): + return self.name + @staticmethod def is_finished(status: "RequestStatus") -> bool: return status > RequestStatus.PREEMPTED From 5f52a846850a3aed258e3a95bbea8fcaeb39d725 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Wed, 18 Jun 2025 01:37:01 -0400 Subject: [PATCH 012/211] [V1] Add API docs for EncoderCacheManager (#19294) Signed-off-by: Russell Bryant --- vllm/v1/core/encoder_cache_manager.py | 108 +++++++++++++++++++++++++- 1 file changed, 106 insertions(+), 2 deletions(-) diff --git a/vllm/v1/core/encoder_cache_manager.py b/vllm/v1/core/encoder_cache_manager.py index 16dc67b9b6f6a..67ea3b007ecee 100644 --- a/vllm/v1/core/encoder_cache_manager.py +++ b/vllm/v1/core/encoder_cache_manager.py @@ -14,6 +14,39 @@ logger = init_logger(__name__) class EncoderCacheManager: + """Manages caching of encoder outputs for multimodal models in vLLM V1. + + The EncoderCacheManager handles the lifecycle of multimodal encoder outputs + (such as vision embeddings from images) during request processing. It + provides memory-aware caching to avoid recomputing encoder outputs when the + same multimodal inputs appear in different stages of request processing. + + This manager is particularly important for: + - Vision-language models (e.g., LLaVA) where image encoder outputs are + cached + - Any multimodal model where encoder computation is expensive and + cacheable + + The cache operates at the granularity of individual multimodal input items + within requests, allowing for fine-grained memory management and enabling + chunked processing of multimodal inputs. + + Note that no caching is shared between requests at this time. If the same + input is used across multiple requests, it will be reprocessed for each + request. + + Args: + cache_size: Limit the size of the cache, measured by the number of + tokens from the input sequence. + + Attributes: + cache_size: Total cache capacity in encoder tokens + num_free_slots: Current available cache capacity in encoder tokens + cached: Mapping from request_id to set of cached input_ids for that + request + freed: List of (request_id, input_id) pairs that were recently freed. + This is cleared after every call to get_freed_ids(). + """ def __init__(self, cache_size: int): self.cache_size = cache_size @@ -24,14 +57,48 @@ class EncoderCacheManager: self.freed: list[tuple[str, int]] = [] def has_cache(self, request: Request, input_id: int) -> bool: + """Check if encoder output for a specific multimodal input is cached. + + Args: + request: The request containing the multimodal input + input_id: Index of the multimodal input within the request + + Returns: + True if the encoder output for this input is already cached + """ req_id = request.request_id return req_id in self.cached and input_id in self.cached[req_id] def can_allocate(self, request: Request, input_id: int) -> bool: + """Check if there's sufficient cache space for a multimodal input. + + Args: + request: The request containing the multimodal input + input_id: Index of the multimodal input within the request + + Returns: + True if there's enough free cache space to store the encoder output + for this multimodal input + """ num_tokens = request.get_num_encoder_tokens(input_id) return num_tokens <= self.num_free_slots def allocate(self, request: Request, input_id: int) -> None: + """Allocate cache space for a multimodal input's encoder output. + + This method reserves cache space for storing the encoder output of + the specified multimodal input. The actual encoder output storage + happens in the model runner, but this method ensures the cache + manager tracks the allocation. + + Args: + request: The request containing the multimodal input + input_id: Index of the multimodal input within the request + + Note: + This method assumes can_allocate() returned True for the same + request and input_id. It will reduce available cache space. + """ req_id = request.request_id if req_id not in self.cached: self.cached[req_id] = set() @@ -39,10 +106,30 @@ class EncoderCacheManager: self.num_free_slots -= request.get_num_encoder_tokens(input_id) def get_cached_input_ids(self, request: Request) -> set[int]: + """Get all cached multimodal input IDs for a request. + + Args: + request: The request to query + + Returns: + Set of input_ids that have cached encoder outputs for this request. + Returns empty set if no inputs are cached for this request. + """ return self.cached.get(request.request_id, set()) def free_encoder_input(self, request: Request, input_id: int) -> None: - """Free a single encoder input id for the request.""" + """Free cache space for a single multimodal input's encoder output. + + This method is called when: + - The encoder output has been fully consumed by the decoder and is + no longer needed (e.g., in vision-language models after image + tokens are processed) + - A request is being cancelled or aborted + + Args: + request: The request containing the multimodal input + input_id: Index of the multimodal input to free from cache + """ req_id = request.request_id if req_id not in self.cached: return @@ -54,12 +141,29 @@ class EncoderCacheManager: self.freed.append((req_id, input_id)) def free(self, request: Request) -> None: - """Free all cached input ids for the request.""" + """Free all cached encoder outputs for a request. + + This method is typically called when a request is finished, cancelled, + or aborted, and all its encoder outputs should be freed from cache. + + Args: + request: The request whose encoder outputs should be freed + """ input_ids = self.get_cached_input_ids(request).copy() for input_id in input_ids: self.free_encoder_input(request, input_id) def get_freed_ids(self) -> list[tuple[str, int]]: + """Get and clear the list of recently freed encoder cache entries. + + This method returns all encoder cache entries that were freed since + the last call to this method. It's used by the scheduler to notify + workers about which encoder outputs can be removed from their caches. + + Returns: + List of (request_id, input_id) tuples that were freed since the + last call. The internal freed list is cleared after this call. + """ freed = self.freed self.freed = [] return freed From eccdc8318c50ad3e2adfaaf99d714a8dccc6ab16 Mon Sep 17 00:00:00 2001 From: Zhonghua Deng Date: Wed, 18 Jun 2025 14:32:36 +0800 Subject: [PATCH 013/211] [V1][P/D] An native implementation of xPyD based on P2P NCCL (#18242) Signed-off-by: Abatom --- docs/design/v1/p2p_nccl_connector.md | 337 +++++++++++ .../disagg_xpyd/disagg_prefill_proxy_xpyd.py | 154 +++++ .../device_communicators/pynccl_wrapper.py | 8 + .../kv_transfer/kv_connector/factory.py | 5 + .../kv_connector/v1/p2p/__init__.py | 0 .../kv_connector/v1/p2p/p2p_nccl_connector.py | 481 ++++++++++++++++ .../kv_connector/v1/p2p/p2p_nccl_engine.py | 531 ++++++++++++++++++ .../kv_connector/v1/p2p/tensor_memory_pool.py | 264 +++++++++ 8 files changed, 1780 insertions(+) create mode 100644 docs/design/v1/p2p_nccl_connector.md create mode 100644 examples/online_serving/disagg_xpyd/disagg_prefill_proxy_xpyd.py create mode 100644 vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py create mode 100644 vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py create mode 100644 vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py create mode 100644 vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py diff --git a/docs/design/v1/p2p_nccl_connector.md b/docs/design/v1/p2p_nccl_connector.md new file mode 100644 index 0000000000000..c24b53763709c --- /dev/null +++ b/docs/design/v1/p2p_nccl_connector.md @@ -0,0 +1,337 @@ +An implementation of xPyD with dynamic scaling based on point-to-point communication, partly inspired by Dynamo. + +# Detailed Design + +## Overall Process +As shown in Figure 1, the overall process of this **PD disaggregation** solution is described through a request flow: + +1. The client sends an HTTP request to the Proxy/Router's `/v1/completions` interface. +2. The Proxy/Router selects a **1P1D (1 Prefill instance + 1 Decode instance)** through either through round-robin or random selection, generates a `request_id` (rules to be introduced later), modifies the `max_tokens` in the HTTP request message to **1**, and then forwards the request to the **P instance**. +3. Immediately afterward, the Proxy/Router forwards the **original HTTP request** to the **D instance**. +4. The **P instance** performs **Prefill** and then **actively sends the generated KV cache** to the D instance (using **PUT_ASYNC** mode). The D instance's `zmq_addr` can be resolved through the `request_id`. +5. The **D instance** has a **dedicated thread** for receiving the KV cache (to avoid blocking the main process). The received KV cache is saved into the **GPU memory buffer**, the size of which is determined by the vLLM startup parameter `kv_buffer_size`. When the GPU buffer is full, the KV cache is stored in the **local Tensor memory pool**. +6. During the **Decode**, the D instance's main process retrieves the KV cache (transmitted by the P instance) from either the **GPU buffer** or the **memory pool**, thereby **skipping Prefill**. +7. After completing **Decode**, the D instance returns the result to the **Proxy/Router**, which then forwards it to the **client**. + +![image1](https://github.com/user-attachments/assets/fb01bde6-755b-49f7-ad45-48a94b1e10a7) + +## Proxy/Router (Demo) + +A simple HTTP service acts as the entry point for client requests and starts a background thread to listen for P/D instances reporting their HTTP IP and PORT, as well as ZMQ IP and PORT. It maintains a dictionary of `http_addr -> zmq_addr`. The `http_addr` is the IP:PORT for the vLLM instance's request, while the `zmq_addr` is the address for KV cache handshake and metadata reception. + +The Proxy/Router is responsible for selecting 1P1D based on the characteristics of the client request, such as the prompt, and generating a corresponding `request_id`, for example: + +``` +cmpl-___prefill_addr_10.0.1.2:21001___decode_addr_10.0.1.3:22001_93923d63113b4b338973f24d19d4bf11-0 +``` + +Currently, to quickly verify whether xPyD can work, a round-robin selection of 1P1D is used. In the future, it is planned to use a trie combined with the load status of instances to select appropriate P and D. + +Each P/D instance periodically sends a heartbeat packet to the Proxy/Router (currently every 3 seconds) to register (i.e., report `http_addr -> zmq_addr`) and keep the connection alive. If an instance crashes and fails to send a ping for a certain period of time, the Proxy/Router will remove the timed-out instance (this feature has not yet been developed). + +## KV Cache Transfer Methods + +There are three methods for KVcache transfer: PUT, GET, and PUT_ASYNC. These methods can be specified using the `--kv-transfer-config` and `kv_connector_extra_config` parameters, specifically through the `send_type` field. Both PUT and PUT_ASYNC involve the P instance actively sending KVcache to the D instance. The difference is that PUT is a synchronous transfer method that blocks the main process, while PUT_ASYNC is an asynchronous transfer method. PUT_ASYNC uses a dedicated thread for sending KVcache, which means it does not block the main process. In contrast, the GET method involves the P instance saving the KVcache to the memory buffer after computing the prefill. The D instance then actively retrieves the computed KVcache from the P instance once it has allocated space for the KVcache. + +Experimental results have shown that the performance of these methods, from highest to lowest, is as follows: PUT_ASYNC → GET → PUT. + +## P2P Communication via ZMQ & NCCL + +As long as the address of the counterpart is known, point-to-point KV cache transfer (using NCCL) can be performed, without being constrained by rank and world size. To support dynamic scaling (expansion and contraction) of instances with PD disaggregation. This means that adding or removing P/D instances does not require a full system restart. + +Each P/D instance only needs to create a single `P2pNcclEngine` instance. This instance maintains a ZMQ Server, which runs a dedicated thread to listen on the `zmq_addr` address and receive control flow requests from other instances. These requests include requests to establish an NCCL connection and requests to send KVcache metadata (such as tensor shapes and data types). However, it does not actually transmit the KVcache data itself. + +When a P instance and a D instance transmit KVcache for the first time, they need to establish a ZMQ connection and an NCCL group. For subsequent KVcache transmissions, this ZMQ connection and NCCL group are reused. The NCCL group consists of only two ranks, meaning the world size is equal to 2. This design is intended to support dynamic scaling, which means that adding or removing P/D instances does not require a full system restart. As long as the address of the counterpart is known, point-to-point KVcache transmission can be performed, without being restricted by rank or world size. + +## NCCL Group Topology + +Currently, only symmetric TP (Tensor Parallelism) methods are supported for KVcache transmission. Asymmetric TP and PP (Pipeline Parallelism) methods will be supported in the future. Figure 2 illustrates the 1P2D setup, where each instance has a TP (Tensor Parallelism) degree of 2. There are a total of 7 NCCL groups: three vLLM instances each have one NCCL group with TP=2. Additionally, the 0th GPU card of the P instance establishes an NCCL group with the 0th GPU card of each D instance. Similarly, the 1st GPU card of the P instance establishes an NCCL group with the 1st GPU card of each D instance. + +![image2](https://github.com/user-attachments/assets/837e61d6-365e-4cbf-8640-6dd7ab295b36) + +Each NCCL group occupies a certain amount of GPU memory buffer for communication, the size of which is primarily influenced by the `NCCL_MAX_NCHANNELS` environment variable. When `NCCL_MAX_NCHANNELS=16`, an NCCL group typically occupies 100MB, while when `NCCL_MAX_NCHANNELS=8`, it usually takes up 52MB. For large-scale xPyD configurations—such as DeepSeek's 96P144D—this implementation is currently not feasible. Moving forward, we are considering using RDMA for point-to-point communication and are also keeping an eye on UCCL. + +## GPU Memory Buffer and Tensor Memory Pool + +The trade-off in the size of the memory buffer is as follows: For P instances, the memory buffer is not required in PUT and PUT_ASYNC modes, but it is necessary in GET mode. For D instances, a memory buffer is needed in all three modes. The memory buffer for D instances should not be too large. Similarly, for P instances in GET mode, the memory buffer should also not be too large. The memory buffer of D instances is used to temporarily store KVcache sent by P instances. If it is too large, it will reduce the KVcache space available for normal inference by D instances, thereby decreasing the inference batch size and ultimately leading to a reduction in output throughput. The size of the memory buffer is configured by the parameter `kv_buffer_size`, measured in bytes, and is typically set to 5%~10% of the memory size. + +If the `--max-num-seqs` parameter for P instances is set to a large value, due to the large batch size, P instances will generate a large amount of KVcache simultaneously. This may exceed the capacity of the memory buffer of D instances, resulting in KVcache loss. Once KVcache is lost, D instances need to recompute Prefill, which is equivalent to performing Prefill twice. Consequently, the time-to-first-token (TTFT) will significantly increase, leading to degraded performance. + +To address the above issues, I have designed and developed a local Tensor memory pool for storing KVcache, inspired by the buddy system used in Linux memory modules. Since the memory is sufficiently large, typically in the TB range on servers, there is no need to consider prefix caching or using block-based designs to reuse memory, thereby saving space. When the memory buffer is insufficient, KVcache can be directly stored in the Tensor memory pool, and D instances can subsequently retrieve KVcache from it. The read and write speed is that of PCIe, with PCIe 4.0 having a speed of approximately 21 GB/s, which is usually faster than the Prefill speed. Otherwise, solutions like Mooncake and lmcache would not be necessary. The Tensor memory pool acts as a flood diversion area, typically unused except during sudden traffic surges. In the worst-case scenario, my solution performs no worse than the normal situation with a Cache store. + +# Install vLLM + +```shell +# Enter the home directory or your working directory. +cd /home + +# Download the installation package, and I will update the commit-id in time. You can directly copy the command. +wget https://vllm-wheels.s3.us-west-2.amazonaws.com/9112b443a042d8d815880b8780633882ad32b183/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl + +# Download the code repository. +git clone -b xpyd-v1 https://github.com/Abatom/vllm.git +cd vllm + +# Set the installation package path. +export VLLM_PRECOMPILED_WHEEL_LOCATION=/home/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl + +# installation +pip install -e . -v +``` + +# Run xPyD + +## Instructions +- The following examples are run on an A800 (80GB) device, using the Meta-Llama-3.1-8B-Instruct model. +- Pay attention to the setting of the `kv_buffer_size` (in bytes). The empirical value is 10% of the GPU memory size. This is related to the kvcache size. If it is too small, the GPU memory buffer for temporarily storing the received kvcache will overflow, causing the kvcache to be stored in the tensor memory pool, which increases latency. If it is too large, the kvcache available for inference will be reduced, leading to a smaller batch size and decreased throughput. +- For Prefill instances, when using non-GET mode, the `kv_buffer_size` can be set to 1, as Prefill currently does not need to receive kvcache. However, when using GET mode, a larger `kv_buffer_size` is required because it needs to store the kvcache sent to the D instance. +- You may need to modify the `kv_buffer_size` and `port` in the following commands (if there is a conflict). +- `PUT_ASYNC` offers the best performance and should be prioritized. +- The `--port` must be consistent with the `http_port` in the `--kv-transfer-config`. +- The `disagg_prefill_proxy_xpyd.py` script will use port 10001 (for receiving client requests) and port 30001 (for receiving service discovery from P and D instances). +- The node running the proxy must have `quart` installed. +- Supports multiple nodes; you just need to modify the `proxy_ip` and `proxy_port` in `--kv-transfer-config`. +- In the following examples, it is assumed that **the proxy's IP is 10.0.1.1**. + +## Run 1P3D + +### Proxy (e.g. 10.0.1.1) + +```shell +cd {your vllm directory}/examples/online_serving/disagg_xpyd/ +python3 disagg_prefill_proxy_xpyd.py & +``` + +### Prefill1 (e.g. 10.0.1.2 or 10.0.1.1) + +```shell +VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=0 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20005 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"21001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20005","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & +``` + +### Decode1 (e.g. 10.0.1.3 or 10.0.1.1) + +```shell +VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=1 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20009 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.7 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"22001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20009","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & +``` + +### Decode2 (e.g. 10.0.1.4 or 10.0.1.1) + +```shell +VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=2 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20003 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.7 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"23001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20003","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & +``` + +### Decode3 (e.g. 10.0.1.5 or 10.0.1.1) + +```shell +VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=3 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20008 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.7 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"24001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20008","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & +``` + +## Run 3P1D + +### Proxy (e.g. 10.0.1.1) + +```shell +cd {your vllm directory}/examples/online_serving/disagg_xpyd/ +python3 disagg_prefill_proxy_xpyd.py & +``` + +### Prefill1 (e.g. 10.0.1.2 or 10.0.1.1) + +```shell +VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=0 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20005 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"21001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20005","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & +``` + +### Prefill2 (e.g. 10.0.1.3 or 10.0.1.1) + +```shell +VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=1 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20009 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"22001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20009","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & +``` + +### Prefill3 (e.g. 10.0.1.4 or 10.0.1.1) + +```shell +VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=2 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20003 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"23001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20003","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & +``` + +### Decode1 (e.g. 10.0.1.5 or 10.0.1.1) + +```shell +VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=3 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20008 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.7 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"24001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20008","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & +``` + +# Single request + +```shell +curl -X POST -s http://10.0.1.1:10001/v1/completions \ +-H "Content-Type: application/json" \ +-d '{ + "model": "base_model", + "prompt": "San Francisco is a", + "max_tokens": 10, + "temperature": 0 +}' +``` + +# Benchmark + +```shell +python3 benchmark_serving.py \ + --backend vllm \ + --model base_model \ + --tokenizer meta-llama/Llama-3.1-8B-Instruct \ + --dataset-name "random" \ + --host 10.0.1.1 \ + --port 10001 \ + --random-input-len 1024 \ + --random-output-len 1024 \ + --ignore-eos \ + --burstiness 100 \ + --percentile-metrics "ttft,tpot,itl,e2el" \ + --metric-percentiles "90,95,99" \ + --seed $(date +%s) \ + --trust-remote-code \ + --request-rate 3 \ + --num-prompts 1000 +``` + +# Shut down + +```shell +pgrep python | xargs kill -9 && pkill -f python +``` + +# Test data + +## **Scenario 1**: 1K input & 1K output tokens, E2E P99 latency ~20s +- **1P5D (6×A800) vs vLLM (1×A800)**: + - Throughput ↑7.2% (1085 → 6979/6) + - ITL (P99) ↓81.3% (120ms → 22.9ms) + - TTFT (P99) ↑26.8% (175ms → 222ms) + - TPOT: No change + +- **1P6D (7×A800) vs vLLM (1×A800)**: + - Throughput ↑9.6% (1085 → 8329/7) + - ITL (P99) ↓81.0% (120ms → 22.7ms) + - TTFT (P99) ↑210% (175ms →543ms) + - TPOT: No change + +## **Scenario 2**: 1K input & 200 output tokens, E2E P99 latency ~4s +- **1P1D (2×A800) vs vLLM (1×A800)**: + - Throughput ↑37.4% (537 → 1476/2) + - ITL (P99) ↓81.8% (127ms → 23.1ms) + - TTFT (P99) ↑41.8% (160ms → 227ms) + - TPOT: No change + +![testdata](https://github.com/user-attachments/assets/f791bfc7-9f3d-4e5c-9171-a42f9f4da627) diff --git a/examples/online_serving/disagg_xpyd/disagg_prefill_proxy_xpyd.py b/examples/online_serving/disagg_xpyd/disagg_prefill_proxy_xpyd.py new file mode 100644 index 0000000000000..73f2caaa0dbde --- /dev/null +++ b/examples/online_serving/disagg_xpyd/disagg_prefill_proxy_xpyd.py @@ -0,0 +1,154 @@ +# SPDX-License-Identifier: Apache-2.0 + +import os +import socket +import threading +import uuid + +import aiohttp +import msgpack +import zmq +from quart import Quart, make_response, request + +count = 0 +prefill_instances: dict[str, str] = {} # http_address: zmq_address +decode_instances: dict[str, str] = {} # http_address: zmq_address + +prefill_cv = threading.Condition() +decode_cv = threading.Condition() + + +def _listen_for_register(poller, router_socket): + while True: + socks = dict(poller.poll()) + if router_socket in socks: + remote_address, message = router_socket.recv_multipart() + # data: {"type": "P", "http_address": "ip:port", + # "zmq_address": "ip:port"} + data = msgpack.loads(message) + if data["type"] == "P": + global prefill_instances + global prefill_cv + with prefill_cv: + prefill_instances[data["http_address"]] = data["zmq_address"] + elif data["type"] == "D": + global decode_instances + global decode_cv + with decode_cv: + decode_instances[data["http_address"]] = data["zmq_address"] + else: + print( + "Unexpected, Received message from %s, data: %s", + remote_address, + data, + ) + + +def start_service_discovery(hostname, port): + if not hostname: + hostname = socket.gethostname() + if port == 0: + raise ValueError("Port cannot be 0") + + context = zmq.Context() + router_socket = context.socket(zmq.ROUTER) + router_socket.bind(f"tcp://{hostname}:{port}") + + poller = zmq.Poller() + poller.register(router_socket, zmq.POLLIN) + + _listener_thread = threading.Thread( + target=_listen_for_register, args=[poller, router_socket], daemon=True + ) + _listener_thread.start() + return _listener_thread + + +AIOHTTP_TIMEOUT = aiohttp.ClientTimeout(total=6 * 60 * 60) + +app = Quart(__name__) + + +def random_uuid() -> str: + return str(uuid.uuid4().hex) + + +async def forward_request(url, data, request_id): + async with aiohttp.ClientSession(timeout=AIOHTTP_TIMEOUT) as session: + headers = { + "Authorization": f"Bearer {os.environ.get('OPENAI_API_KEY')}", + "X-Request-Id": request_id, + } + async with session.post(url=url, json=data, headers=headers) as response: + if response.status == 200: + if True: + async for chunk_bytes in response.content.iter_chunked(1024): + yield chunk_bytes + else: + content = await response.read() + yield content + + +@app.route("/v1/completions", methods=["POST"]) +async def handle_request(): + try: + original_request_data = await request.get_json() + + prefill_request = original_request_data.copy() + # change max_tokens = 1 to let it only do prefill + prefill_request["max_tokens"] = 1 + + global count + global prefill_instances + global prefill_cv + with prefill_cv: + prefill_list = list(prefill_instances.items()) + prefill_addr, prefill_zmq_addr = prefill_list[count % len(prefill_list)] + + global decode_instances + global decode_cv + with decode_cv: + decode_list = list(decode_instances.items()) + decode_addr, decode_zmq_addr = decode_list[count % len(decode_list)] + + print( + f"handle_request count: {count}, [HTTP:{prefill_addr}, " + f"ZMQ:{prefill_zmq_addr}] 👉 [HTTP:{decode_addr}, " + f"ZMQ:{decode_zmq_addr}]" + ) + count += 1 + + request_id = ( + f"___prefill_addr_{prefill_zmq_addr}___decode_addr_" + f"{decode_zmq_addr}_{random_uuid()}" + ) + + # finish prefill + async for _ in forward_request( + f"http://{prefill_addr}/v1/completions", prefill_request, request_id + ): + continue + + # return decode + generator = forward_request( + f"http://{decode_addr}/v1/completions", original_request_data, request_id + ) + response = await make_response(generator) + response.timeout = None + + return response + + except Exception as e: + import sys + import traceback + + exc_info = sys.exc_info() + print("Error occurred in disagg prefill proxy server") + print(e) + print("".join(traceback.format_exception(*exc_info))) + + +if __name__ == "__main__": + t = start_service_discovery("0.0.0.0", 30001) + app.run(host="0.0.0.0", port=10001) + t.join() diff --git a/vllm/distributed/device_communicators/pynccl_wrapper.py b/vllm/distributed/device_communicators/pynccl_wrapper.py index 04a4d0147f5d8..3018a92da07c1 100644 --- a/vllm/distributed/device_communicators/pynccl_wrapper.py +++ b/vllm/distributed/device_communicators/pynccl_wrapper.py @@ -272,6 +272,14 @@ class NCCLLibrary: ctypes.byref(unique_id))) return unique_id + def unique_id_from_bytes(self, data: bytes) -> ncclUniqueId: + if len(data) != 128: + raise ValueError( + f"Expected 128 bytes for ncclUniqueId, got {len(data)} bytes") + unique_id = ncclUniqueId() + ctypes.memmove(ctypes.addressof(unique_id.internal), data, 128) + return unique_id + def ncclCommInitRank(self, world_size: int, unique_id: ncclUniqueId, rank: int) -> ncclComm_t: comm = ncclComm_t() diff --git a/vllm/distributed/kv_transfer/kv_connector/factory.py b/vllm/distributed/kv_transfer/kv_connector/factory.py index 58dfa251c735d..be9ce72dea67a 100644 --- a/vllm/distributed/kv_transfer/kv_connector/factory.py +++ b/vllm/distributed/kv_transfer/kv_connector/factory.py @@ -112,6 +112,11 @@ KVConnectorFactory.register_connector( "vllm.distributed.kv_transfer.kv_connector.v1.shared_storage_connector", "SharedStorageConnector") +KVConnectorFactory.register_connector( + "P2pNcclConnector", + "vllm.distributed.kv_transfer.kv_connector.v1.p2p.p2p_nccl_connector", + "P2pNcclConnector") + KVConnectorFactory.register_connector( "LMCacheConnectorV1", "vllm.distributed.kv_transfer.kv_connector.v1.lmcache_connector", diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py b/vllm/distributed/kv_transfer/kv_connector/v1/p2p/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py new file mode 100644 index 0000000000000..a47deaf91272e --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py @@ -0,0 +1,481 @@ +# SPDX-License-Identifier: Apache-2.0 + +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Optional + +import regex as re +import torch + +from vllm.config import VllmConfig +from vllm.distributed.kv_transfer.kv_connector.v1.base import ( + KVConnectorBase_V1, KVConnectorMetadata, KVConnectorRole) +from vllm.distributed.kv_transfer.kv_connector.v1.p2p.p2p_nccl_engine import ( + P2pNcclEngine) +from vllm.distributed.parallel_state import get_world_group +from vllm.forward_context import get_forward_context +from vllm.logger import init_logger +from vllm.v1.attention.backends.mla.common import MLACommonMetadata +from vllm.v1.core.sched.output import SchedulerOutput + +if TYPE_CHECKING: + from vllm.attention.backends.abstract import AttentionMetadata + from vllm.forward_context import ForwardContext + from vllm.v1.core.kv_cache_manager import KVCacheBlocks + from vllm.v1.request import Request + +logger = init_logger(__name__) + + +@dataclass +class ReqMeta: + # Request Id + request_id: str + # Request tokens + token_ids: torch.Tensor + # Slot mappings, should have the same length as token_ids + slot_mapping: torch.Tensor + + @staticmethod + def make_meta(request_id: str, token_ids: list[int], block_ids: list[int], + block_size: int) -> "ReqMeta": + valid_num_tokens = len(token_ids) + token_ids_tensor = torch.tensor(token_ids) + block_ids_tensor = torch.tensor(block_ids) + num_blocks = block_ids_tensor.shape[0] + block_offsets = torch.arange(0, block_size) + slot_mapping = block_offsets.reshape((1, block_size)) + \ + block_ids_tensor.reshape((num_blocks, 1)) * block_size + slot_mapping = slot_mapping.flatten()[:valid_num_tokens] + + return ReqMeta( + request_id=request_id, + token_ids=token_ids_tensor, + slot_mapping=slot_mapping, + ) + + +@dataclass +class P2pNcclConnectorMetadata(KVConnectorMetadata): + requests: list[ReqMeta] + + def __init__(self): + self.requests = [] + + def add_request( + self, + request_id: str, + token_ids: list[int], + block_ids: list[int], + block_size: int, + ) -> None: + self.requests.append( + ReqMeta.make_meta(request_id, token_ids, block_ids, block_size)) + + +class P2pNcclConnector(KVConnectorBase_V1): + + def __init__(self, vllm_config: "VllmConfig", role: KVConnectorRole): + super().__init__(vllm_config=vllm_config, role=role) + self._block_size = vllm_config.cache_config.block_size + self._requests_need_load: dict[str, Any] = {} + self.config = vllm_config.kv_transfer_config + self.is_producer = self.config.is_kv_producer + self.chunked_prefill: dict[str, Any] = {} + + self._rank = get_world_group().rank \ + if role == KVConnectorRole.WORKER else 0 + self._local_rank = get_world_group().local_rank \ + if role == KVConnectorRole.WORKER else 0 + + self.p2p_nccl_engine = P2pNcclEngine( + local_rank=self._local_rank, + config=self.config, + hostname="", + port_offset=self._rank, + ) if role == KVConnectorRole.WORKER else None + + # ============================== + # Worker-side methods + # ============================== + + def start_load_kv(self, forward_context: "ForwardContext", + **kwargs) -> None: + """Start loading the KV cache from the connector buffer to vLLM's + paged KV buffer. + + Args: + forward_context (ForwardContext): the forward context. + **kwargs: additional arguments for the load operation + + Note: + The number of elements in kv_caches and layer_names should be + the same. + """ + + # Only consumer/decode loads KV Cache + if self.is_producer: + return + + assert self.p2p_nccl_engine is not None + + attn_metadata = forward_context.attn_metadata + if attn_metadata is None: + return + + def inject_kv_into_layer( + dst_kv_cache_layer: torch.Tensor, + src_kv_cache: torch.Tensor, + slot_mapping: torch.Tensor, + request_id: str, + ) -> None: + """Inject the KV cache into the layer. + + Args: + dst_kv_cache_layer (torch.Tensor): the destination KV cache + layer. In shape [2, num_pages, page_size, xxx] if not + using MLA, [num_pages, page_size, xxx] otherwise. + src_kv_cache (torch.Tensor): the source KV cache. In shape + [2, num_tokens, xxx] if not using MLA, [num_tokens, xxx] + otherwise. + slot_mapping (torch.Tensor): the slot mapping. In shape + [num_tokens]. + request_id (str): request id for log + """ + dst_kv_cache_layer_shape = dst_kv_cache_layer.shape + if isinstance(attn_metadata, MLACommonMetadata): + num_pages = dst_kv_cache_layer_shape[0] + page_size = dst_kv_cache_layer_shape[1] + dst_kv_cache_layer = dst_kv_cache_layer.reshape( + num_pages * page_size, -1) + self.check_tensors_except_dim(dst_kv_cache_layer, src_kv_cache, + 0) + num_token = src_kv_cache.shape[0] + if len(slot_mapping) == num_token: + dst_kv_cache_layer[slot_mapping, ...] = src_kv_cache + else: + dst_kv_cache_layer[slot_mapping[:num_token], + ...] = src_kv_cache + logger.warning( + "🚧src_kv_cache does not match, num_slot:%d, " + "num_token:%d, request_id:%s", len(slot_mapping), + num_token, request_id) + + dst_kv_cache_layer.reshape(dst_kv_cache_layer_shape) + else: + num_pages = dst_kv_cache_layer_shape[1] + page_size = dst_kv_cache_layer_shape[2] + dst_kv_cache_layer = dst_kv_cache_layer.reshape( + 2, num_pages * page_size, -1) + self.check_tensors_except_dim(dst_kv_cache_layer, src_kv_cache, + 1) + num_token = src_kv_cache.shape[1] + if len(slot_mapping) == num_token: + dst_kv_cache_layer[:, slot_mapping, ...] = src_kv_cache + else: + dst_kv_cache_layer[:, slot_mapping[:num_token], + ...] = src_kv_cache + logger.warning( + "🚧src_kv_cache does not match, num_slot:%d, " + "num_token:%d, request_id:%s", len(slot_mapping), + num_token, request_id) + + dst_kv_cache_layer.reshape(dst_kv_cache_layer_shape) + + # Get the metadata + metadata: KVConnectorMetadata = \ + self._get_connector_metadata() + assert isinstance(metadata, P2pNcclConnectorMetadata) + + if metadata is None: + return + + # Load the KV for each request each layer + for request in metadata.requests: + for layer_name in forward_context.no_compile_layers: + attn_layer = forward_context.no_compile_layers[layer_name] + kv_cache_layer = attn_layer.kv_cache[ \ + forward_context.virtual_engine] + + kv_cache = self.p2p_nccl_engine.recv_tensor( + request.request_id + "#" + layer_name) + + if kv_cache is None: + logger.warning("🚧src_kv_cache is None, %s", + request.request_id) + continue + + inject_kv_into_layer(kv_cache_layer, kv_cache, + request.slot_mapping, request.request_id) + + def wait_for_layer_load(self, layer_name: str) -> None: + """Blocking until the KV for a specific layer is loaded into vLLM's + paged buffer. + + This interface will be useful for layer-by-layer pipelining. + + Args: + layer_name: the name of that layer + """ + return + + def save_kv_layer(self, layer_name: str, kv_layer: torch.Tensor, + attn_metadata: "AttentionMetadata", **kwargs) -> None: + """Start saving the KV cache of the layer from vLLM's paged buffer + to the connector. + + Args: + layer_name (str): the name of the layer. + kv_layer (torch.Tensor): the paged KV buffer of the current + layer in vLLM. + attn_metadata (AttentionMetadata): the attention metadata. + **kwargs: additional arguments for the save operation. + """ + + # Only producer/prefill saves KV Cache + if not self.is_producer: + return + + assert self.p2p_nccl_engine is not None + + def extract_kv_from_layer( + layer: torch.Tensor, + slot_mapping: torch.Tensor, + ) -> torch.Tensor: + """Extract the KV cache from the layer. + + Assume the shape of the layer is (2, num_pages, page_size, xxx) + if MLA is not used, and (num_pages, page_size, xxx) otherwise. + """ + if isinstance(attn_metadata, MLACommonMetadata): + num_pages, page_size = layer.shape[0], layer.shape[1] + return layer.reshape(num_pages * page_size, -1)[slot_mapping, + ...] + num_pages, page_size = layer.shape[1], layer.shape[2] + return layer.reshape(2, num_pages * page_size, -1)[:, slot_mapping, + ...] + + connector_metadata = self._get_connector_metadata() + assert isinstance(connector_metadata, P2pNcclConnectorMetadata) + for request in connector_metadata.requests: + request_id = request.request_id + ip, port = self.parse_request_id(request_id, True) + remote_address = ip + ":" + str(port + self._rank) + kv_cache = extract_kv_from_layer(kv_layer, request.slot_mapping) + self.p2p_nccl_engine.send_tensor(request_id + "#" + layer_name, + kv_cache, remote_address) + + def wait_for_save(self): + if self.is_producer: + assert self.p2p_nccl_engine is not None + self.p2p_nccl_engine.wait_for_sent() + + def get_finished( + self, finished_req_ids: set[str], + **kwargs) -> tuple[Optional[set[str]], Optional[set[str]]]: + """ + Notifies worker-side connector ids of requests that have + finished generating tokens. + + Returns: + ids of requests that have finished asynchronous transfer, + tuple of (sending/saving ids, recving/loading ids). + The finished saves/sends req ids must belong to a set provided in a + call to this method (this call or a prior one). + """ + + assert self.p2p_nccl_engine is not None + + forward_context: ForwardContext = get_forward_context() + return self.p2p_nccl_engine.get_finished(finished_req_ids, + forward_context) + + # ============================== + # Scheduler-side methods + # ============================== + + def get_num_new_matched_tokens( + self, + request: "Request", + num_computed_tokens: int, + ) -> tuple[int, bool]: + """ + Get number of new tokens that can be loaded from the + external KV cache beyond the num_computed_tokens. + + Args: + request (Request): the request object. + num_computed_tokens (int): the number of locally + computed tokens for this request + + Returns: + the number of tokens that can be loaded from the + external KV cache beyond what is already computed. + """ + if self.is_producer: + return 0, False + + num_external_tokens = (len(request.prompt_token_ids) - 1 - + num_computed_tokens) + + if num_external_tokens < 0: + num_external_tokens = 0 + + return num_external_tokens, False + + def update_state_after_alloc(self, request: "Request", + blocks: "KVCacheBlocks", + num_external_tokens: int): + """ + Update KVConnector state after block allocation. + """ + if not self.is_producer and num_external_tokens > 0: + self._requests_need_load[request.request_id] = ( + request, blocks.get_block_ids()[0]) + + def build_connector_meta( + self, + scheduler_output: SchedulerOutput, + ) -> KVConnectorMetadata: + """Build the connector metadata for this step. + + This function should NOT modify any fields in the scheduler_output. + Also, calling this function will reset the state of the connector. + + Args: + scheduler_output (SchedulerOutput): the scheduler output object. + """ + + meta = P2pNcclConnectorMetadata() + + for new_req in scheduler_output.scheduled_new_reqs: + if self.is_producer: + num_scheduled_tokens = ( + scheduler_output.num_scheduled_tokens)[new_req.req_id] + num_tokens = num_scheduled_tokens + new_req.num_computed_tokens + # the request's prompt is chunked prefill + if num_tokens < len(new_req.prompt_token_ids): + # 'CachedRequestData' has no attribute 'prompt_token_ids' + self.chunked_prefill[new_req.req_id] = ( + new_req.block_ids[0], new_req.prompt_token_ids) + continue + # the request's prompt is not chunked prefill + meta.add_request(request_id=new_req.req_id, + token_ids=new_req.prompt_token_ids, + block_ids=new_req.block_ids[0], + block_size=self._block_size) + continue + if new_req.req_id in self._requests_need_load: + meta.add_request(request_id=new_req.req_id, + token_ids=new_req.prompt_token_ids, + block_ids=new_req.block_ids[0], + block_size=self._block_size) + self._requests_need_load.pop(new_req.req_id) + + for cached_req in scheduler_output.scheduled_cached_reqs: + if self.is_producer: + num_scheduled_tokens = ( + scheduler_output.num_scheduled_tokens)[cached_req.req_id] + num_tokens = (num_scheduled_tokens + + cached_req.num_computed_tokens) + assert cached_req.req_id in self.chunked_prefill + block_ids = cached_req.new_block_ids[0] + if not cached_req.resumed_from_preemption: + block_ids = (self.chunked_prefill[cached_req.req_id][0] + + block_ids) + prompt_token_ids = self.chunked_prefill[cached_req.req_id][1] + # the request's prompt is chunked prefill again + if num_tokens < len(prompt_token_ids): + self.chunked_prefill[cached_req.req_id] = ( + block_ids, prompt_token_ids) + continue + # the request's prompt is all prefilled finally + meta.add_request(request_id=cached_req.req_id, + token_ids=prompt_token_ids, + block_ids=block_ids, + block_size=self._block_size) + self.chunked_prefill.pop(cached_req.req_id, None) + continue + + # NOTE(rob): here we rely on the resumed requests being + # the first N requests in the list scheduled_cache_reqs. + if not cached_req.resumed_from_preemption: + break + if cached_req.req_id in self._requests_need_load: + request, _ = self._requests_need_load.pop(cached_req.req_id) + total_tokens = cached_req.num_computed_tokens + 1 + token_ids = request.all_token_ids[:total_tokens] + + # NOTE(rob): For resumed req, new_block_ids is all + # of the block_ids for the request. + block_ids = cached_req.new_block_ids[0] + + meta.add_request(request_id=cached_req.req_id, + token_ids=token_ids, + block_ids=block_ids, + block_size=self._block_size) + + # Requests loaded asynchronously are not in the scheduler_output. + # for request_id in self._requests_need_load: + # request, block_ids = self._requests_need_load[request_id] + # meta.add_request(request_id=request.request_id, + # token_ids=request.prompt_token_ids, + # block_ids=block_ids, + # block_size=self._block_size) + + self._requests_need_load.clear() + return meta + + def request_finished( + self, + request: "Request", + block_ids: list[int], + ) -> tuple[bool, Optional[dict[str, Any]]]: + """ + Called when a request has finished, before its blocks are freed. + + Returns: + True if the request is being saved/sent asynchronously and blocks + should not be freed until the request_id is returned from + get_finished(). + Optional KVTransferParams to be included in the request outputs + returned by the engine. + """ + + self.chunked_prefill.pop(request.request_id, None) + + return False, None + + # ============================== + # Static methods + # ============================== + + @staticmethod + def parse_request_id(request_id: str, is_prefill=True) -> tuple[str, int]: + # Regular expression to match the string hostname and integer port + if is_prefill: + pattern = r"___decode_addr_(.*):(\d+)" + else: + pattern = r"___prefill_addr_(.*):(\d+)___" + + # Use re.search to find the pattern in the request_id + match = re.search(pattern, request_id) + if match: + # Extract the ranks + ip = match.group(1) + port = int(match.group(2)) + + return ip, port + raise ValueError( + f"Request id {request_id} does not contain hostname and port") + + @staticmethod + def check_tensors_except_dim(tensor1, tensor2, dim): + shape1 = tensor1.size() + shape2 = tensor2.size() + + if len(shape1) != len(shape2) or not all( + s1 == s2 + for i, (s1, s2) in enumerate(zip(shape1, shape2)) if i != dim): + raise NotImplementedError( + "Currently, only symmetric TP is supported. Asymmetric TP, PP," + "and others will be supported in future PRs.") diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py b/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py new file mode 100644 index 0000000000000..81f7a2525896e --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py @@ -0,0 +1,531 @@ +# SPDX-License-Identifier: Apache-2.0 + +import logging +import os +import threading +import time +import typing +from collections import deque +from contextlib import contextmanager +from typing import TYPE_CHECKING, Any, Optional + +import msgpack +import torch +import zmq + +from vllm.config import KVTransferConfig +from vllm.distributed.device_communicators.pynccl_wrapper import ( + NCCLLibrary, buffer_type, cudaStream_t, ncclComm_t, ncclDataTypeEnum) +from vllm.distributed.kv_transfer.kv_connector.v1.p2p.tensor_memory_pool import ( # noqa: E501 + TensorMemoryPool) +from vllm.utils import current_stream, get_ip + +if TYPE_CHECKING: + from vllm.forward_context import ForwardContext + +logger = logging.getLogger(__name__) + +DEFAULT_MEM_POOL_SIZE_GB = 32 + + +@contextmanager +def set_p2p_nccl_context(num_channels: str): + original_values: dict[str, Any] = {} + env_vars = [ + 'NCCL_MAX_NCHANNELS', + 'NCCL_MIN_NCHANNELS', + 'NCCL_CUMEM_ENABLE', + 'NCCL_BUFFSIZE', + 'NCCL_PROTO', # LL,LL128,SIMPLE + 'NCCL_ALGO', # RING,TREE + ] + + for var in env_vars: + original_values[var] = os.environ.get(var) + + logger.info("set_p2p_nccl_context, original_values: %s", original_values) + + try: + os.environ['NCCL_MAX_NCHANNELS'] = num_channels + os.environ['NCCL_MIN_NCHANNELS'] = num_channels + os.environ['NCCL_CUMEM_ENABLE'] = '1' + yield + finally: + for var in env_vars: + if original_values[var] is not None: + os.environ[var] = original_values[var] + else: + os.environ.pop(var, None) + + +class P2pNcclEngine: + + def __init__(self, + local_rank: int, + config: KVTransferConfig, + hostname: str = "", + port_offset: int = 0, + library_path: Optional[str] = None) -> None: + self.config = config + self.rank = port_offset + self.local_rank = local_rank + self.device = torch.device(f"cuda:{self.local_rank}") + self.nccl = NCCLLibrary(library_path) + + if not hostname: + hostname = get_ip() + port = int(self.config.kv_port) + port_offset + if port == 0: + raise ValueError("Port cannot be 0") + self._hostname = hostname + self._port = port + + # Each card corresponds to a ZMQ address. + self.zmq_address = f"{self._hostname}:{self._port}" + + # The `http_port` must be consistent with the port of OpenAI. + self.http_address = ( + f"{self._hostname}:" + f"{self.config.kv_connector_extra_config['http_port']}") + + # If `proxy_ip` or `proxy_port` is `""`, + # then the ping thread will not be enabled. + proxy_ip = self.config.get_from_extra_config("proxy_ip", "") + proxy_port = self.config.get_from_extra_config("proxy_port", "") + if proxy_ip == "" or proxy_port == "": + self.proxy_address = "" + else: + self.proxy_address = proxy_ip + ":" + proxy_port + + self.context = zmq.Context() + self.router_socket = self.context.socket(zmq.ROUTER) + self.router_socket.bind(f"tcp://{self.zmq_address}") + + self.poller = zmq.Poller() + self.poller.register(self.router_socket, zmq.POLLIN) + + self.send_store_cv = threading.Condition() + self.send_queue_cv = threading.Condition() + self.recv_store_cv = threading.Condition() + + self.send_stream = torch.cuda.Stream() + self.recv_stream = torch.cuda.Stream() + + mem_pool_size_gb = self.config.get_from_extra_config( + "mem_pool_size_gb", DEFAULT_MEM_POOL_SIZE_GB) + self.pool = TensorMemoryPool(max_block_size=int(mem_pool_size_gb) * + 1024**3) # GB + + # The sending type includes tree mutually exclusive options: + # PUT, GET, PUT_ASYNC. + self.send_type = self.config.get_from_extra_config("send_type", "PUT") + if self.send_type == "GET": + # tensor_id: torch.Tensor + self.send_store: dict[str, torch.Tensor] = {} + else: + # PUT or PUT_ASYNC + # tensor_id: torch.Tensor + self.send_queue: deque[list[Any]] = deque() + self.send_request_id_to_tensor_ids: dict[str, set[str]] = {} + if self.send_type == "PUT_ASYNC": + self._send_thread = threading.Thread(target=self._send_async, + daemon=True) + self._send_thread.start() + + # tensor_id: torch.Tensor/(addr, dtype, shape) + self.recv_store: dict[str, Any] = {} + self.recv_request_id_to_tensor_ids: dict[str, set[str]] = {} + self.socks: dict[str, Any] = {} # remote_address: client socket + self.comms: dict[str, Any] = {} # remote_address: (ncclComm_t, rank) + + self.buffer_size = 0 + self.buffer_size_threshold = float(self.config.kv_buffer_size) + + self.nccl_num_channels = self.config.get_from_extra_config( + "nccl_num_channels", "8") + + self._listener_thread = threading.Thread( + target=self._listen_for_requests, daemon=True) + self._listener_thread.start() + + self._ping_thread = None + if port_offset == 0 and self.proxy_address != "": + self._ping_thread = threading.Thread(target=self._ping, + daemon=True) + self._ping_thread.start() + + logger.info( + "💯P2pNcclEngine init, rank:%d, local_rank:%d, http_address:%s, " + "zmq_address:%s, proxy_address:%s, send_type:%s, buffer_size_" + "threshold:%.2f, nccl_num_channels:%s", self.rank, self.local_rank, + self.http_address, self.zmq_address, self.proxy_address, + self.send_type, self.buffer_size_threshold, self.nccl_num_channels) + + def _create_connect(self, remote_address: typing.Optional[str] = None): + assert remote_address is not None + if remote_address not in self.socks: + sock = self.context.socket(zmq.DEALER) + sock.setsockopt_string(zmq.IDENTITY, self.zmq_address) + sock.connect(f"tcp://{remote_address}") + self.socks[remote_address] = sock + if remote_address in self.comms: + logger.info("👋comm exists, remote_address:%s, comms:%s", + remote_address, self.comms) + return sock, self.comms[remote_address] + + unique_id = self.nccl.ncclGetUniqueId() + data = {"cmd": "NEW", "unique_id": bytes(unique_id.internal)} + sock.send(msgpack.dumps(data)) + + with torch.cuda.device(self.device): + rank = 0 + with set_p2p_nccl_context(self.nccl_num_channels): + comm: ncclComm_t = self.nccl.ncclCommInitRank( + 2, unique_id, rank) + self.comms[remote_address] = (comm, rank) + logger.info("🤝ncclCommInitRank Success, %s👉%s, MyRank: %s", + self.zmq_address, remote_address, rank) + + return self.socks[remote_address], self.comms[remote_address] + + def send_tensor( + self, + tensor_id: str, + tensor: torch.Tensor, + remote_address: typing.Optional[str] = None, + ) -> bool: + if remote_address is None: + with self.recv_store_cv: + self.recv_store[tensor_id] = tensor + self.recv_store_cv.notify() + return True + else: + if self.send_type == "PUT": + return self._send_sync(tensor_id, tensor, remote_address) + elif self.send_type == "PUT_ASYNC": + with self.send_queue_cv: + self.send_queue.append([tensor_id, remote_address, tensor]) + self.send_queue_cv.notify() + else: # GET + with self.send_store_cv: + tensor_size = tensor.element_size() * tensor.numel() + while (self.buffer_size + tensor_size + > self.buffer_size_threshold): + oldest_tenser_id = next(iter(self.send_store)) + oldest_tenser = self.send_store.pop(oldest_tenser_id) + oldest_tenser_size = oldest_tenser.element_size( + ) * oldest_tenser.numel() + self.buffer_size -= oldest_tenser_size + logger.info( + "⛔[GET]Send to %s, tensor_id:%s, tensor_size:%d," + " buffer_size:%d, oldest_tenser_size:%d, rank:%d", + remote_address, tensor_id, tensor_size, + self.buffer_size, oldest_tenser_size, self.rank) + + self.send_store[tensor_id] = tensor + self.buffer_size += tensor_size + logger.debug( + "🔵[GET]Send to %s, tensor_id:%s, tensor_size:%d, " + "shape:%s, rank:%d, buffer_size:%d(%.2f%%)", + remote_address, tensor_id, tensor_size, tensor.shape, + self.rank, self.buffer_size, + self.buffer_size / self.buffer_size_threshold * 100) + + return True + + def recv_tensor( + self, + tensor_id: str, + remote_address: typing.Optional[str] = None, + ) -> torch.Tensor: + if self.send_type == "PUT" or self.send_type == "PUT_ASYNC": + start_time = time.time() + with self.recv_store_cv: + while tensor_id not in self.recv_store: + self.recv_store_cv.wait() + tensor = self.recv_store[tensor_id] + + if tensor is not None: + if isinstance(tensor, tuple): + addr, dtype, shape = tensor + tensor = self.pool.load_tensor(addr, dtype, shape, + self.device) + else: + self.buffer_size -= (tensor.element_size() * + tensor.numel()) + else: + duration = time.time() - start_time + logger.warning( + "🔴[PUT]Recv From %s, tensor_id:%s, duration:%.3fms, " + "rank:%d", remote_address, tensor_id, duration * 1000, + self.rank) + return tensor + + # GET + if remote_address is None: + return None + + if remote_address not in self.socks: + self._create_connect(remote_address) + + sock = self.socks[remote_address] + comm, rank = self.comms[remote_address] + + data = {"cmd": "GET", "tensor_id": tensor_id} + sock.send(msgpack.dumps(data)) + + message = sock.recv() + data = msgpack.loads(message) + if data["ret"] != 0: + logger.warning("🔴[GET]Recv From %s, tensor_id: %s, ret: %d", + remote_address, tensor_id, data["ret"]) + return None + + tensor = torch.empty(data["shape"], + dtype=getattr(torch, data["dtype"]), + device=self.device) + + self._recv(comm, tensor, rank ^ 1, self.recv_stream) + + return tensor + + def _listen_for_requests(self): + while True: + socks = dict(self.poller.poll()) + if self.router_socket in socks: + remote_address, message = self.router_socket.recv_multipart() + data = msgpack.loads(message) + if data["cmd"] == "NEW": + unique_id = self.nccl.unique_id_from_bytes( + bytes(data["unique_id"])) + with torch.cuda.device(self.device): + rank = 1 + with set_p2p_nccl_context(self.nccl_num_channels): + comm: ncclComm_t = self.nccl.ncclCommInitRank( + 2, unique_id, rank) + self.comms[remote_address.decode()] = (comm, rank) + logger.info( + "🤝ncclCommInitRank Success, %s👈%s, MyRank:%s", + self.zmq_address, remote_address.decode(), rank) + elif data["cmd"] == "PUT": + tensor_id = data["tensor_id"] + try: + tensor = torch.empty(data["shape"], + dtype=getattr( + torch, data["dtype"]), + device=self.device) + self.router_socket.send_multipart( + [remote_address, b"0"]) + comm, rank = self.comms[remote_address.decode()] + self._recv(comm, tensor, rank ^ 1, self.recv_stream) + tensor_size = tensor.element_size() * tensor.numel() + if (self.buffer_size + tensor_size + > self.buffer_size_threshold): + # Store Tensor in memory pool + addr = self.pool.store_tensor(tensor) + tensor = (addr, tensor.dtype, tensor.shape) + logger.warning( + "🔴[PUT]Recv Tensor, Out Of Threshold, " + "%s👈%s, data:%s, addr:%d", self.zmq_address, + remote_address.decode(), data, addr) + else: + self.buffer_size += tensor_size + + except torch.cuda.OutOfMemoryError: + self.router_socket.send_multipart( + [remote_address, b"1"]) + tensor = None + logger.warning( + "🔴[PUT]Recv Tensor, Out Of Memory, %s👈%s, " + "data:%s", self.zmq_address, + remote_address.decode(), data) + + with self.recv_store_cv: + self.recv_store[tensor_id] = tensor + self._have_received_tensor_id(tensor_id) + self.recv_store_cv.notify() + + elif data["cmd"] == "GET": + tensor_id = data["tensor_id"] + with self.send_store_cv: + tensor = self.send_store.pop(tensor_id, None) + if tensor is not None: + data = { + "ret": 0, + "shape": tensor.shape, + "dtype": + str(tensor.dtype).replace("torch.", "") + } + # LRU + self.send_store[tensor_id] = tensor + self._have_sent_tensor_id(tensor_id) + else: + data = {"ret": 1} + + self.router_socket.send_multipart( + [remote_address, msgpack.dumps(data)]) + + if data["ret"] == 0: + comm, rank = self.comms[remote_address.decode()] + self._send(comm, tensor.to(self.device), rank ^ 1, + self.send_stream) + else: + logger.warning( + "🚧Unexpected, Received message from %s, data:%s", + remote_address, data) + + def _have_sent_tensor_id(self, tensor_id: str): + request_id = tensor_id.split('#')[0] + if request_id not in self.send_request_id_to_tensor_ids: + self.send_request_id_to_tensor_ids[request_id] = set() + self.send_request_id_to_tensor_ids[request_id].add(tensor_id) + + def _have_received_tensor_id(self, tensor_id: str): + request_id = tensor_id.split('#')[0] + if request_id not in self.recv_request_id_to_tensor_ids: + self.recv_request_id_to_tensor_ids[request_id] = set() + self.recv_request_id_to_tensor_ids[request_id].add(tensor_id) + + def _send_async(self): + while True: + with self.send_queue_cv: + while not self.send_queue: + self.send_queue_cv.wait() + tensor_id, remote_address, tensor = self.send_queue.popleft() + if not self.send_queue: + self.send_queue_cv.notify() + self._send_sync(tensor_id, tensor, remote_address) + + def wait_for_sent(self): + if self.send_type == "PUT_ASYNC": + start_time = time.time() + with self.send_queue_cv: + while self.send_queue: + self.send_queue_cv.wait() + duration = time.time() - start_time + logger.debug( + "🚧[PUT_ASYNC]It took %.3fms to wait for the send_queue" + " to be empty, rank:%d", duration * 1000, self.rank) + + def _send_sync( + self, + tensor_id: str, + tensor: torch.Tensor, + remote_address: typing.Optional[str] = None, + ) -> bool: + if remote_address is None: + return False + if remote_address not in self.socks: + self._create_connect(remote_address) + + sock = self.socks[remote_address] + comm, rank = self.comms[remote_address] + data = { + "cmd": "PUT", + "tensor_id": tensor_id, + "shape": tensor.shape, + "dtype": str(tensor.dtype).replace("torch.", "") + } + sock.send(msgpack.dumps(data)) + + response = sock.recv() + if response != b"0": + logger.error( + "🔴Send Tensor, Peer Out Of Memory/Threshold, %s 👉 %s, " + "MyRank:%s, data:%s, tensor:%s, size:%fGB, response:%s", + self.zmq_address, remote_address, rank, data, tensor.shape, + tensor.element_size() * tensor.numel() / 1024**3, + response.decode()) + return False + + self._send(comm, tensor.to(self.device), rank ^ 1, self.send_stream) + + if self.send_type == "PUT_ASYNC": + self._have_sent_tensor_id(tensor_id) + + return True + + def get_finished( + self, finished_req_ids: set[str], forward_context: "ForwardContext" + ) -> tuple[Optional[set[str]], Optional[set[str]]]: + """ + Notifies worker-side connector ids of requests that have + finished generating tokens. + + Returns: + ids of requests that have finished asynchronous transfer, + tuple of (sending/saving ids, recving/loading ids). + The finished saves/sends req ids must belong to a set provided in a + call to this method (this call or a prior one). + """ + + # Clear the buffer upon request completion. + for request_id in finished_req_ids: + for layer_name in forward_context.no_compile_layers: + tensor_id = request_id + "#" + layer_name + if tensor_id in self.recv_store: + with self.recv_store_cv: + tensor = self.recv_store.pop(tensor_id, None) + self.send_request_id_to_tensor_ids.pop( + request_id, None) + self.recv_request_id_to_tensor_ids.pop( + request_id, None) + addr = 0 + if isinstance(tensor, tuple): + addr, _, _ = tensor + self.pool.free(addr) + + # TODO:Retrieve requests that have already sent the KV cache. + finished_sending: set[str] = set() + + # TODO:Retrieve requests that have already received the KV cache. + finished_recving: set[str] = set() + + return finished_sending or None, finished_recving or None + + def _ping(self): + sock = self.context.socket(zmq.DEALER) + sock.setsockopt_string(zmq.IDENTITY, self.zmq_address) + logger.debug("ping start, zmq_address:%s", self.zmq_address) + sock.connect(f"tcp://{self.proxy_address}") + data = { + "type": "P" if self.config.is_kv_producer else "D", + "http_address": self.http_address, + "zmq_address": self.zmq_address + } + while True: + sock.send(msgpack.dumps(data)) + time.sleep(3) + + def _send(self, comm, tensor: torch.Tensor, dst: int, stream=None): + assert tensor.device == self.device, ( + f"this nccl communicator is created to work on {self.device}, " + f"but the input tensor is on {tensor.device}") + if stream is None: + stream = current_stream() + + with torch.cuda.stream(stream): + self.nccl.ncclSend(buffer_type(tensor.data_ptr()), tensor.numel(), + ncclDataTypeEnum.from_torch(tensor.dtype), dst, + comm, cudaStream_t(stream.cuda_stream)) + stream.synchronize() + + def _recv(self, comm, tensor: torch.Tensor, src: int, stream=None): + assert tensor.device == self.device, ( + f"this nccl communicator is created to work on {self.device}, " + f"but the input tensor is on {tensor.device}") + if stream is None: + stream = current_stream() + + with torch.cuda.stream(stream): + self.nccl.ncclRecv(buffer_type(tensor.data_ptr()), tensor.numel(), + ncclDataTypeEnum.from_torch(tensor.dtype), src, + comm, cudaStream_t(stream.cuda_stream)) + stream.synchronize() + + def close(self) -> None: + self._listener_thread.join() + if self.send_type == "PUT_ASYNC": + self._send_thread.join() + if self._ping_thread is not None: + self._ping_thread.join() diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py b/vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py new file mode 100644 index 0000000000000..303619a3fdd0f --- /dev/null +++ b/vllm/distributed/kv_transfer/kv_connector/v1/p2p/tensor_memory_pool.py @@ -0,0 +1,264 @@ +# SPDX-License-Identifier: Apache-2.0 + +import atexit +import ctypes +import math +from dataclasses import dataclass + +import torch + +from vllm.logger import init_logger + +logger = init_logger(__name__) + + +@dataclass +class MemoryBlock: + size: int + addr: int + + +"""A memory pool for managing pinned host memory allocations for tensors. + +This class implements a buddy allocation system to efficiently manage pinned +host memory for tensor storage. It supports allocation, deallocation, and +tensor storage/retrieval operations. + +Key Features: +- Uses power-of-two block sizes for efficient buddy allocation +- Supports splitting and merging of memory blocks +- Provides methods to store CUDA tensors in pinned host memory +- Allows loading tensors from pinned memory back to device +- Automatically cleans up memory on destruction + +Attributes: + max_block_size (int): Maximum block size (rounded to nearest power of two) + min_block_size (int): Minimum block size (rounded to nearest power of two) + free_lists (dict): Dictionary of free memory blocks by size + allocated_blocks (dict): Dictionary of currently allocated blocks + base_tensor (torch.Tensor): Base pinned memory tensor + base_address (int): Base memory address of the pinned memory region + +Example: + >>> pool = TensorMemoryPool(max_block_size=1024*1024) + >>> tensor = torch.randn(100, device='cuda') + >>> addr = pool.store_tensor(tensor) + >>> loaded_tensor = pool.load_tensor(addr, tensor.dtype, + ... tensor.shape, 'cuda') + >>> pool.free(addr) +""" + + +class TensorMemoryPool: + """Initializes the memory pool with given size constraints. + + Args: + max_block_size (int): Maximum size of memory blocks to manage + min_block_size (int, optional): Minimum size of memory blocks + to manage. Defaults to 512. + + Raises: + ValueError: If block sizes are invalid or max_block_size is less + than min_block_size + """ + + def __init__(self, max_block_size: int, min_block_size: int = 512): + if max_block_size <= 0 or min_block_size <= 0: + raise ValueError("Block sizes must be positive") + if max_block_size < min_block_size: + raise ValueError( + "Max block size must be greater than min block size") + + self.max_block_size = self._round_to_power_of_two(max_block_size) + self.min_block_size = self._round_to_power_of_two(min_block_size) + + self.free_lists: dict[int, dict[int, MemoryBlock]] = {} + self.allocated_blocks: dict[int, MemoryBlock] = {} + + self._initialize_free_lists() + self._allocate_pinned_memory() + + atexit.register(self.cleanup) + + def _round_to_power_of_two(self, size: int) -> int: + return 1 << (size - 1).bit_length() + + def _initialize_free_lists(self): + size = self.max_block_size + while size >= self.min_block_size: + self.free_lists[size] = {} + size //= 2 + + def _allocate_pinned_memory(self): + self.base_tensor = torch.empty(self.max_block_size // 4, + dtype=torch.float32, + pin_memory=True) + self.base_address = self.base_tensor.data_ptr() + initial_block = MemoryBlock(size=self.max_block_size, + addr=self.base_address) + self.free_lists[self.max_block_size][ + initial_block.addr] = initial_block + logger.debug("TensorMemoryPool, base_address:", self.base_address, + self.base_address % self.max_block_size) + + def allocate(self, size: int) -> int: + """Allocates a memory block of at least the requested size. + + Args: + size (int): Minimum size of memory to allocate + + Returns: + int: Address of the allocated memory block + + Raises: + ValueError: If size is invalid or insufficient memory is available + """ + if size <= 0: + raise ValueError("Allocation size must be positive") + + required_size = self._round_to_power_of_two( + max(size, self.min_block_size)) + if required_size > self.max_block_size: + raise ValueError("Requested size exceeds maximum block size") + + current_size = required_size + while current_size <= self.max_block_size: + if self.free_lists[current_size]: + _, block = self.free_lists[current_size].popitem() + self._split_block(block, required_size) + self.allocated_blocks[block.addr] = block + return block.addr + current_size *= 2 + + raise ValueError("Insufficient memory") + + def _split_block(self, block: MemoryBlock, required_size: int): + while (block.size > required_size + and block.size // 2 >= self.min_block_size): + buddy_size = block.size // 2 + buddy_addr = block.addr + buddy_size + + buddy = MemoryBlock(size=buddy_size, addr=buddy_addr) + block.size = buddy_size + + self.free_lists[buddy_size][buddy.addr] = buddy + + def free(self, addr: int): + """Frees an allocated memory block. + + Args: + addr (int): Address of the block to free + + Raises: + ValueError: If address is invalid or not allocated + """ + if addr not in self.allocated_blocks: + raise ValueError("Invalid address to free") + + block = self.allocated_blocks.pop(addr) + self._merge_buddies(block) + + def _merge_buddies(self, block: MemoryBlock): + MAX_MERGE_DEPTH = 30 + depth = 0 + + while depth < MAX_MERGE_DEPTH: + buddy_offset = block.size if (block.addr - self.base_address) % ( + 2 * block.size) == 0 else -block.size + buddy_addr = block.addr + buddy_offset + buddy = self.free_lists[block.size].get(buddy_addr) + if buddy: + del self.free_lists[buddy.size][buddy.addr] + merged_addr = min(block.addr, buddy.addr) + merged_size = block.size * 2 + block = MemoryBlock(size=merged_size, addr=merged_addr) + depth += 1 + else: + break + self.free_lists[block.size][block.addr] = block + + def store_tensor(self, tensor: torch.Tensor) -> int: + """Stores a CUDA tensor in pinned host memory. + + Args: + tensor (torch.Tensor): CUDA tensor to store + + Returns: + int: Address where the tensor is stored + + Raises: + ValueError: If tensor is not on CUDA or allocation fails + """ + if not tensor.is_cuda: + raise ValueError("Only CUDA tensors can be stored") + + size = tensor.element_size() * tensor.numel() + addr = self.allocate(size) + block = self.allocated_blocks[addr] + + if block.size < size: + self.free(addr) + raise ValueError( + f"Allocated block size {block.size} is smaller than " + f"required size {size}") + + try: + buffer = (ctypes.c_byte * block.size).from_address(block.addr) + cpu_tensor = torch.frombuffer(buffer, + dtype=tensor.dtype, + count=tensor.numel()).reshape( + tensor.shape) + except ValueError as err: + self.free(addr) + raise ValueError(f"Failed to create tensor view: {err}") from err + + cpu_tensor.copy_(tensor) + + return addr + + def load_tensor(self, addr: int, dtype: torch.dtype, + shape: tuple[int, ...], device) -> torch.Tensor: + """Loads a tensor from pinned host memory to the specified device. + + Args: + addr (int): Address where tensor is stored + dtype (torch.dtype): Data type of the tensor + shape (tuple[int, ...]): Shape of the tensor + device: Target device for the loaded tensor + + Returns: + torch.Tensor: The loaded tensor on the specified device + + Raises: + ValueError: If address is invalid or sizes don't match + """ + if addr not in self.allocated_blocks: + raise ValueError("Invalid address to load") + + block = self.allocated_blocks[addr] + num_elements = math.prod(shape) + dtype_size = torch.tensor([], dtype=dtype).element_size() + required_size = num_elements * dtype_size + + if required_size > block.size: + raise ValueError("Requested tensor size exceeds block size") + + buffer = (ctypes.c_byte * block.size).from_address(block.addr) + cpu_tensor = torch.frombuffer(buffer, dtype=dtype, + count=num_elements).reshape(shape) + + cuda_tensor = torch.empty(shape, dtype=dtype, device=device) + + cuda_tensor.copy_(cpu_tensor) + + return cuda_tensor + + def cleanup(self): + """Cleans up all memory resources and resets the pool state.""" + self.free_lists.clear() + self.allocated_blocks.clear() + if hasattr(self, 'base_tensor'): + del self.base_tensor + + def __del__(self): + self.cleanup() From 19a53b27833d767632c9eaff6ddf5ef08ba3af2f Mon Sep 17 00:00:00 2001 From: afeldman-nm <156691304+afeldman-nm@users.noreply.github.com> Date: Wed, 18 Jun 2025 02:38:13 -0400 Subject: [PATCH 014/211] [V1] Decouple GPU and TPU `InputBatch` (#19778) Signed-off-by: Andrew Feldman --- vllm/v1/sample/tpu/metadata.py | 2 +- vllm/v1/worker/gpu_input_batch.py | 7 +- vllm/v1/worker/lora_model_runner_mixin.py | 6 +- vllm/v1/worker/tpu_input_batch.py | 584 ++++++++++++++++++++++ vllm/v1/worker/tpu_model_runner.py | 2 +- 5 files changed, 597 insertions(+), 4 deletions(-) create mode 100644 vllm/v1/worker/tpu_input_batch.py diff --git a/vllm/v1/sample/tpu/metadata.py b/vllm/v1/sample/tpu/metadata.py index 4c1ac4895197c..6491c84f60762 100644 --- a/vllm/v1/sample/tpu/metadata.py +++ b/vllm/v1/sample/tpu/metadata.py @@ -5,7 +5,7 @@ from typing import Optional import torch -from vllm.v1.worker.gpu_input_batch import InputBatch +from vllm.v1.worker.tpu_input_batch import InputBatch DEFAULT_SAMPLING_PARAMS = dict( temperature=-1.0, diff --git a/vllm/v1/worker/gpu_input_batch.py b/vllm/v1/worker/gpu_input_batch.py index ebb770a7ddb29..e76293f98a51f 100644 --- a/vllm/v1/worker/gpu_input_batch.py +++ b/vllm/v1/worker/gpu_input_batch.py @@ -1,6 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -# Datastructures defining an input batch +# Datastructures defining a GPU input batch from dataclasses import dataclass from typing import Optional, cast @@ -453,6 +453,11 @@ class InputBatch: self.block_table.swap_row(i1, i2) def condense(self, empty_req_indices: list[int]) -> None: + """Move non-empty requests down into lower, empty indices. + + Args: + empty_req_indices: empty batch indices, sorted descending. + """ num_reqs = self.num_reqs if num_reqs == 0: # The batched states are empty. diff --git a/vllm/v1/worker/lora_model_runner_mixin.py b/vllm/v1/worker/lora_model_runner_mixin.py index afa41a37eeb34..2fbdee4724e35 100644 --- a/vllm/v1/worker/lora_model_runner_mixin.py +++ b/vllm/v1/worker/lora_model_runner_mixin.py @@ -5,6 +5,7 @@ Define LoRA functionality mixin for model runners. """ from contextlib import contextmanager +from typing import Union import numpy as np import torch.nn as nn @@ -15,7 +16,10 @@ from vllm.lora.layers import LoRAMapping from vllm.lora.request import LoRARequest from vllm.lora.worker_manager import LRUCacheWorkerLoRAManager from vllm.model_executor.models import supports_lora, supports_multimodal -from vllm.v1.worker.gpu_input_batch import InputBatch +from vllm.v1.worker.gpu_input_batch import InputBatch as GPUInputBatch +from vllm.v1.worker.tpu_input_batch import InputBatch as TPUInputBatch + +InputBatch = Union[TPUInputBatch, GPUInputBatch] logger = init_logger(__name__) diff --git a/vllm/v1/worker/tpu_input_batch.py b/vllm/v1/worker/tpu_input_batch.py new file mode 100644 index 0000000000000..3f105ccc5d92d --- /dev/null +++ b/vllm/v1/worker/tpu_input_batch.py @@ -0,0 +1,584 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +# Datastructures defining a TPU input batch + +from typing import Optional, cast + +import numpy as np +import torch + +from vllm.lora.request import LoRARequest +from vllm.sampling_params import SamplingType +from vllm.utils import swap_dict_values +from vllm.v1.outputs import LogprobsTensors +from vllm.v1.worker.block_table import MultiGroupBlockTable +from vllm.v1.worker.gpu_input_batch import CachedRequestState + +_SAMPLING_EPS = 1e-5 + + +class InputBatch: + + def __init__( + self, + max_num_reqs: int, + max_model_len: int, + max_num_batched_tokens: int, + device: torch.device, + pin_memory: bool, + vocab_size: int, + block_sizes: list[int], # The block_size of each kv cache group + ): + self.max_num_reqs = max_num_reqs + self.max_model_len = max_model_len + self.max_num_batched_tokens = max_num_batched_tokens + self.device = device + self.pin_memory = pin_memory + self.vocab_size = vocab_size + + self._req_ids: list[Optional[str]] = [] + self.req_id_to_index: dict[str, int] = {} + + # TODO(woosuk): This buffer could be too large if max_model_len is big. + # Find a way to reduce the CPU memory usage. + # This buffer is not directly transferred to the GPU, so it does not + # need to be pinned. + self.token_ids_cpu_tensor = torch.zeros( + (max_num_reqs, max_model_len), + device="cpu", + dtype=torch.int32, + pin_memory=False, + ) + self.token_ids_cpu = self.token_ids_cpu_tensor.numpy() + self.num_tokens = np.zeros(max_num_reqs, dtype=np.int32) + self.num_tokens_no_spec = np.zeros(max_num_reqs, dtype=np.int32) + self.num_prompt_tokens = np.zeros(max_num_reqs, dtype=np.int32) + self.num_computed_tokens_cpu_tensor = torch.zeros( + (max_num_reqs, ), + device="cpu", + dtype=torch.int32, + pin_memory=pin_memory, + ) + self.num_computed_tokens_cpu = \ + self.num_computed_tokens_cpu_tensor.numpy() + + # Block table. + self.block_table = MultiGroupBlockTable( + max_num_reqs=max_num_reqs, + max_model_len=max_model_len, + max_num_batched_tokens=max_num_batched_tokens, + pin_memory=pin_memory, + device=device, + block_sizes=block_sizes, + ) + + # Sampling-related. + self.temperature = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device=device) + self.temperature_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device="cpu", + pin_memory=pin_memory) + self.temperature_cpu = self.temperature_cpu_tensor.numpy() + self.greedy_reqs: set[str] = set() + self.random_reqs: set[str] = set() + + self.top_p = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device=device) + self.top_p_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device="cpu", + pin_memory=pin_memory) + self.top_p_cpu = self.top_p_cpu_tensor.numpy() + self.top_p_reqs: set[str] = set() + + self.top_k = torch.empty((max_num_reqs, ), + dtype=torch.int32, + device=device) + self.top_k_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.int32, + device="cpu", + pin_memory=pin_memory) + self.top_k_cpu = self.top_k_cpu_tensor.numpy() + self.top_k_reqs: set[str] = set() + + self.min_p = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device=device) + self.min_p_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.float32, + device="cpu", + pin_memory=pin_memory) + self.min_p_cpu = self.min_p_cpu_tensor.numpy() + self.min_p_reqs: set[str] = set() + + # Frequency penalty related data structures + self.frequency_penalties = torch.empty((max_num_reqs, ), + dtype=torch.float, + device=device) + self.frequency_penalties_cpu_tensor = torch.empty( + (max_num_reqs, ), + dtype=torch.float, + device="cpu", + pin_memory=pin_memory) + self.frequency_penalties_cpu = \ + self.frequency_penalties_cpu_tensor.numpy() + self.frequency_penalties_reqs: set[str] = set() + + # Presence penalty related data structures + self.presence_penalties = torch.empty((max_num_reqs, ), + dtype=torch.float, + device=device) + self.presence_penalties_cpu_tensor = torch.empty((max_num_reqs, ), + dtype=torch.float, + device="cpu", + pin_memory=pin_memory) + self.presence_penalties_cpu = self.presence_penalties_cpu_tensor.numpy( + ) + self.presence_penalties_reqs: set[str] = set() + + # Repetition penalty related data structures + self.repetition_penalties = torch.empty((max_num_reqs, ), + dtype=torch.float, + device=device) + self.repetition_penalties_cpu_tensor = torch.empty( + (max_num_reqs, ), + dtype=torch.float, + device="cpu", + pin_memory=pin_memory) + self.repetition_penalties_cpu = \ + self.repetition_penalties_cpu_tensor.numpy() + self.repetition_penalties_reqs: set[str] = set() + + # req_index -> (min_tokens, stop_token_ids) + self.min_tokens: dict[int, tuple[int, set[int]]] = {} + + # lora related + self.request_lora_mapping = np.zeros((self.max_num_reqs, ), + dtype=np.int32) + self.lora_id_to_request_ids: dict[int, set[str]] = {} + self.lora_id_to_lora_request: dict[int, LoRARequest] = {} + + # req_index -> generator + # NOTE(woosuk): The indices of the requests that do not have their own + # generator should not be included in the dictionary. + self.generators: dict[int, torch.Generator] = {} + + self.num_logprobs: dict[str, int] = {} + # NOTE(rob): num_prompt_logprobs only includes reqs + # that are currently in the prefill phase. + self.num_prompt_logprobs: dict[str, int] = {} + + # To accumulate prompt logprobs tensor chunks across prefill steps. + self.in_progress_prompt_logprobs_cpu: dict[str, LogprobsTensors] = {} + + self.logit_bias: list[Optional[dict[int, + float]]] = [None] * max_num_reqs + self.has_allowed_token_ids: set[str] = set() + # NOTE(lufang): In the mask tensor, if the corresponding token allowed, + # the value is False. Since we use masked_fill_ to set -inf. + self.allowed_token_ids_mask: Optional[torch.Tensor] = None + self.allowed_token_ids_mask_cpu_tensor: Optional[torch.Tensor] = None + + # req_index -> bad_words_token_ids + self.bad_words_token_ids: dict[int, list[list[int]]] = {} + + self.req_output_token_ids: list[Optional[list[int]]] = [] + + @property + def req_ids(self) -> list[str]: + # None elements should only be present transiently + # while performing state updates to the batch. + return cast(list[str], self._req_ids) + + def add_request( + self, + request: "CachedRequestState", + req_index: Optional[int] = None, + ) -> None: + if req_index is None: + req_index = self.num_reqs + assert req_index < self.max_num_reqs + + req_id = request.req_id + if req_index == len(self._req_ids): + self._req_ids.append(req_id) + self.req_output_token_ids.append(request.output_token_ids) + else: + self._req_ids[req_index] = req_id + self.req_output_token_ids[req_index] = request.output_token_ids + + self.req_id_to_index[req_id] = req_index + + # Copy the prompt token ids and output token ids. + num_prompt_tokens = len(request.prompt_token_ids) + self.num_prompt_tokens[req_index] = num_prompt_tokens + self.token_ids_cpu[ + req_index, :num_prompt_tokens] = request.prompt_token_ids + start_idx = num_prompt_tokens + end_idx = start_idx + len(request.output_token_ids) + self.token_ids_cpu[req_index, + start_idx:end_idx] = request.output_token_ids + # Number of token ids in token_ids_cpu. + # NOTE(woosuk): This may include spec decode tokens. + self.num_tokens[req_index] = request.num_tokens + # Number of tokens without spec decode tokens. + self.num_tokens_no_spec[req_index] = request.num_tokens + + self.num_computed_tokens_cpu[req_index] = request.num_computed_tokens + self.block_table.add_row(request.block_ids, req_index) + + sampling_params = request.sampling_params + if sampling_params.sampling_type == SamplingType.GREEDY: + # Avoid later division by zero. + self.temperature_cpu[req_index] = -1.0 + self.greedy_reqs.add(req_id) + else: + self.temperature_cpu[req_index] = sampling_params.temperature + self.random_reqs.add(req_id) + + self.top_p_cpu[req_index] = sampling_params.top_p + if sampling_params.top_p < 1: + self.top_p_reqs.add(req_id) + top_k = sampling_params.top_k + if 0 < top_k < self.vocab_size: + self.top_k_reqs.add(req_id) + else: + top_k = self.vocab_size + self.top_k_cpu[req_index] = top_k + self.min_p_cpu[req_index] = sampling_params.min_p + self.frequency_penalties_cpu[ + req_index] = sampling_params.frequency_penalty + if sampling_params.min_p > _SAMPLING_EPS: + self.min_p_reqs.add(req_id) + if sampling_params.frequency_penalty != 0.0: + self.frequency_penalties_reqs.add(req_id) + self.presence_penalties_cpu[ + req_index] = sampling_params.presence_penalty + if sampling_params.presence_penalty != 0.0: + self.presence_penalties_reqs.add(req_id) + self.repetition_penalties_cpu[ + req_index] = sampling_params.repetition_penalty + if sampling_params.repetition_penalty != 1.0: + self.repetition_penalties_reqs.add(req_id) + if sampling_params.min_tokens: + self.min_tokens[req_index] = (sampling_params.min_tokens, + sampling_params.all_stop_token_ids) + + # NOTE(woosuk): self.generators should not include the requests that + # do not have their own generator. + if request.generator is not None: + self.generators[req_index] = request.generator + + if sampling_params.logprobs is not None: + self.num_logprobs[req_id] = sampling_params.logprobs + if sampling_params.prompt_logprobs is not None: + self.num_prompt_logprobs[req_id] = sampling_params.prompt_logprobs + if sampling_params.logit_bias is not None: + self.logit_bias[req_index] = sampling_params.logit_bias + + if sampling_params.allowed_token_ids: + self.has_allowed_token_ids.add(req_id) + if self.allowed_token_ids_mask_cpu_tensor is None: + # Lazy allocation for this tensor, which can be large. + # False means we don't fill with -inf. + self.allowed_token_ids_mask = torch.zeros(self.max_num_reqs, + self.vocab_size, + dtype=torch.bool, + device=self.device) + self.allowed_token_ids_mask_cpu_tensor = torch.zeros( + self.max_num_reqs, + self.vocab_size, + dtype=torch.bool, + device="cpu") + self.allowed_token_ids_mask_cpu_tensor[req_index] = True + # False means we don't fill with -inf. + self.allowed_token_ids_mask_cpu_tensor[req_index][ + sampling_params.allowed_token_ids] = False + + if sampling_params.bad_words_token_ids: + self.bad_words_token_ids[ + req_index] = sampling_params.bad_words_token_ids + + # Add request lora ID + if request.lora_request: + lora_id = request.lora_request.lora_int_id + if lora_id not in self.lora_id_to_request_ids: + self.lora_id_to_request_ids[lora_id] = set() + + self.request_lora_mapping[req_index] = lora_id + self.lora_id_to_request_ids[lora_id].add(request.req_id) + self.lora_id_to_lora_request[lora_id] = request.lora_request + else: + # No LoRA + self.request_lora_mapping[req_index] = 0 + + def remove_request(self, req_id: str) -> Optional[int]: + """This method must always be followed by a call to condense().""" + + req_index = self.req_id_to_index.pop(req_id, None) + if req_index is None: + return None + self._req_ids[req_index] = None + self.req_output_token_ids[req_index] = None + + self.greedy_reqs.discard(req_id) + self.random_reqs.discard(req_id) + self.top_p_reqs.discard(req_id) + self.top_k_reqs.discard(req_id) + self.min_p_reqs.discard(req_id) + self.min_tokens.pop(req_index, None) + self.frequency_penalties_reqs.discard(req_id) + self.presence_penalties_reqs.discard(req_id) + self.repetition_penalties_reqs.discard(req_id) + self.generators.pop(req_index, None) + self.num_logprobs.pop(req_id, None) + self.num_prompt_logprobs.pop(req_id, None) + self.in_progress_prompt_logprobs_cpu.pop(req_id, None) + + # LoRA + lora_id = self.request_lora_mapping[req_index] + if lora_id != 0: + self.lora_id_to_request_ids[lora_id].discard(req_id) + if len(self.lora_id_to_request_ids[lora_id]) == 0: + self.lora_id_to_request_ids.pop(lora_id) + self.lora_id_to_lora_request.pop(lora_id) + self.request_lora_mapping[req_index] = 0 + + self.logit_bias[req_index] = None + self.has_allowed_token_ids.discard(req_id) + if self.allowed_token_ids_mask_cpu_tensor is not None: + # False means we don't fill with -inf. + self.allowed_token_ids_mask_cpu_tensor[req_index].fill_(False) + self.bad_words_token_ids.pop(req_index, None) + return req_index + + def swap_states(self, i1: int, i2: int) -> None: + old_id_i1 = self._req_ids[i1] + old_id_i2 = self._req_ids[i2] + self._req_ids[i1], self._req_ids[i2] =\ + self._req_ids[i2], self._req_ids[i1] # noqa + self.req_output_token_ids[i1], self.req_output_token_ids[i2] =\ + self.req_output_token_ids[i2], self.req_output_token_ids[i1] + assert old_id_i1 is not None and old_id_i2 is not None + self.req_id_to_index[old_id_i1], self.req_id_to_index[old_id_i2] =\ + self.req_id_to_index[old_id_i2], self.req_id_to_index[old_id_i1] + self.num_tokens[i1], self.num_tokens[i2] =\ + self.num_tokens[i2], self.num_tokens[i1] + self.num_tokens_no_spec[i1], self.num_tokens_no_spec[i2] =\ + self.num_tokens_no_spec[i2], self.num_tokens_no_spec[i1] + self.num_prompt_tokens[i1], self.num_prompt_tokens[i2] =\ + self.num_prompt_tokens[i2], self.num_prompt_tokens[i1] + self.num_computed_tokens_cpu[i1], self.num_computed_tokens_cpu[i2] =\ + self.num_computed_tokens_cpu[i2], self.num_computed_tokens_cpu[i1] + self.temperature_cpu[i1], self.temperature_cpu[i2] =\ + self.temperature_cpu[i2], self.temperature_cpu[i1] + self.top_p_cpu[i1], self.top_p_cpu[i2] =\ + self.top_p_cpu[i2], self.top_p_cpu[i1] + self.top_k_cpu[i1], self.top_k_cpu[i2] =\ + self.top_k_cpu[i2], self.top_k_cpu[i1] + self.frequency_penalties_cpu[i1], self.frequency_penalties_cpu[i2] =\ + self.frequency_penalties_cpu[i2], self.frequency_penalties_cpu[i1] + self.presence_penalties_cpu[i1], self.presence_penalties_cpu[i2] =\ + self.presence_penalties_cpu[i2], self.presence_penalties_cpu[i1] + self.repetition_penalties_cpu[i1], self.repetition_penalties_cpu[i2] =\ + self.repetition_penalties_cpu[i2], self.repetition_penalties_cpu[i1] + self.min_p_cpu[i1], self.min_p_cpu[i2] =\ + self.min_p_cpu[i2], self.min_p_cpu[i1] + + # NOTE: the following is unsafe + # self.token_ids_cpu[i1, ...], self.token_ids_cpu[i2, ...], =\ + # self.token_ids_cpu[i2, ...], self.token_ids_cpu[i1, ...] + # instead, we need to temporiarily copy the data for one of the indices + # TODO(lucas): optimize this by only copying valid indices + tmp = self.token_ids_cpu[i1, ...].copy() + self.token_ids_cpu[i1, ...] = self.token_ids_cpu[i2, ...] + self.token_ids_cpu[i2, ...] = tmp + + swap_dict_values(self.generators, i1, i2) + swap_dict_values(self.min_tokens, i1, i2) + swap_dict_values(self.bad_words_token_ids, i1, i2) + + self.request_lora_mapping[i1], self.request_lora_mapping[i2] =\ + self.request_lora_mapping[i2], self.request_lora_mapping[i1] + self.logit_bias[i1], self.logit_bias[i2] =\ + self.logit_bias[i2], self.logit_bias[i1] + + if self.allowed_token_ids_mask_cpu_tensor is not None: + self.allowed_token_ids_mask_cpu_tensor[i1], \ + self.allowed_token_ids_mask_cpu_tensor[i2] =\ + self.allowed_token_ids_mask_cpu_tensor[i2], \ + self.allowed_token_ids_mask_cpu_tensor[i1] + self.block_table.swap_row(i1, i2) + + def condense(self, empty_req_indices: list[int]) -> None: + """Move non-empty requests down into lower, empty indices. + + Args: + empty_req_indices: empty batch indices, sorted descending. + """ + num_reqs = self.num_reqs + if num_reqs == 0: + # The batched states are empty. + self._req_ids.clear() + self.req_output_token_ids.clear() + return + + # NOTE(woosuk): This function assumes that the empty_req_indices + # is sorted in descending order. + last_req_index = num_reqs + len(empty_req_indices) - 1 + while empty_req_indices: + # Find the largest non-empty index. + while last_req_index in empty_req_indices: + last_req_index -= 1 + + # Find the smallest empty index. + empty_index = empty_req_indices.pop() + if empty_index >= last_req_index: + break + + # Swap the states. + req_id = self._req_ids[last_req_index] + output_token_ids = self.req_output_token_ids[last_req_index] + assert req_id is not None + self._req_ids[empty_index] = req_id + self._req_ids[last_req_index] = None + self.req_output_token_ids[empty_index] = output_token_ids + self.req_output_token_ids[last_req_index] = None + self.req_id_to_index[req_id] = empty_index + + num_tokens = self.num_tokens[last_req_index] + self.token_ids_cpu[empty_index, :num_tokens] = self.token_ids_cpu[ + last_req_index, :num_tokens] + self.num_tokens[empty_index] = num_tokens + self.num_tokens_no_spec[empty_index] = self.num_tokens_no_spec[ + last_req_index] + self.num_prompt_tokens[empty_index] = self.num_prompt_tokens[ + last_req_index] + self.num_computed_tokens_cpu[ + empty_index] = self.num_computed_tokens_cpu[last_req_index] + self.block_table.move_row(last_req_index, empty_index) + self.temperature_cpu[empty_index] = self.temperature_cpu[ + last_req_index] + self.top_p_cpu[empty_index] = self.top_p_cpu[last_req_index] + self.top_k_cpu[empty_index] = self.top_k_cpu[last_req_index] + self.frequency_penalties_cpu[ + empty_index] = self.frequency_penalties_cpu[last_req_index] + self.presence_penalties_cpu[ + empty_index] = self.presence_penalties_cpu[last_req_index] + self.repetition_penalties_cpu[ + empty_index] = self.repetition_penalties_cpu[last_req_index] + self.min_p_cpu[empty_index] = self.min_p_cpu[last_req_index] + generator = self.generators.pop(last_req_index, None) + if generator is not None: + self.generators[empty_index] = generator + + min_token = self.min_tokens.pop(last_req_index, None) + if min_token is not None: + self.min_tokens[empty_index] = min_token + + self.request_lora_mapping[empty_index] = self.request_lora_mapping[ + last_req_index] + + self.logit_bias[empty_index] = self.logit_bias[last_req_index] + + if self.allowed_token_ids_mask_cpu_tensor is not None: + self.allowed_token_ids_mask_cpu_tensor[ + empty_index] = self.allowed_token_ids_mask_cpu_tensor[ + last_req_index] + + bad_words_token_ids = self.bad_words_token_ids.pop( + last_req_index, None) + if bad_words_token_ids is not None: + self.bad_words_token_ids[empty_index] = bad_words_token_ids + # Decrement last_req_index since it is now empty. + last_req_index -= 1 + + # Trim lists to the batch size. + del self._req_ids[self.num_reqs:] + del self.req_output_token_ids[self.num_reqs:] + + def _make_prompt_token_ids_tensor(self) -> torch.Tensor: + max_prompt_len = self.num_prompt_tokens[:self.num_reqs].max() + prompt_token_ids_cpu_tensor = torch.empty( + (self.num_reqs, max_prompt_len), + device="cpu", + dtype=torch.int64, + pin_memory=self.pin_memory, + ) + prompt_token_ids = prompt_token_ids_cpu_tensor.numpy() + prompt_token_ids[:] = self.token_ids_cpu[:self. + num_reqs, :max_prompt_len] + # Use the value of vocab_size as a pad since we don't have a + # token_id of this value. + for i in range(self.num_reqs): + prompt_token_ids[i, self.num_prompt_tokens[i]:] = self.vocab_size + return prompt_token_ids_cpu_tensor.to(device=self.device, + non_blocking=True) + + def make_lora_inputs( + self, num_scheduled_tokens: np.ndarray + ) -> tuple[tuple[int, ...], tuple[int, ...], set[LoRARequest]]: + """ + Given the num_scheduled_tokens for each request in the batch, return + datastructures used to activate the current LoRAs. + Returns: + 1. prompt_lora_mapping: A tuple of size self.num_reqs where, + prompt_lora_mapping[i] is the LoRA id to use for the ith prompt. + 2. token_lora_mapping: A tuple of size np.sum(num_scheduled_tokens) + where, token_lora_mapping[i] is the LoRA id to use for ith token. + 3. lora_requests: Set of relevant LoRA requests. + """ + + req_lora_mapping = self.request_lora_mapping[:self.num_reqs] + prompt_lora_mapping = tuple(req_lora_mapping) + token_lora_mapping = tuple( + req_lora_mapping.repeat(num_scheduled_tokens)) + active_lora_requests: set[LoRARequest] = set( + self.lora_id_to_lora_request.values()) + + return prompt_lora_mapping, token_lora_mapping, active_lora_requests + + @property + def num_reqs(self) -> int: + return len(self.req_id_to_index) + + @property + def all_greedy(self) -> bool: + return len(self.random_reqs) == 0 + + @property + def all_random(self) -> bool: + return len(self.greedy_reqs) == 0 + + @property + def no_top_p(self) -> bool: + return len(self.top_p_reqs) == 0 + + @property + def no_top_k(self) -> bool: + return len(self.top_k_reqs) == 0 + + @property + def no_min_p(self) -> bool: + return len(self.min_p_reqs) == 0 + + @property + def no_penalties(self) -> bool: + return (len(self.presence_penalties_reqs) == 0 + and len(self.frequency_penalties_reqs) == 0 + and len(self.repetition_penalties_reqs) == 0) + + @property + def max_num_logprobs(self) -> Optional[int]: + return max(self.num_logprobs.values()) if self.num_logprobs else None + + @property + def no_prompt_logprob(self) -> bool: + return not self.num_prompt_logprobs + + @property + def no_allowed_token_ids(self) -> bool: + return len(self.has_allowed_token_ids) == 0 diff --git a/vllm/v1/worker/tpu_model_runner.py b/vllm/v1/worker/tpu_model_runner.py index 89c6373b37730..de5a0a1f55971 100644 --- a/vllm/v1/worker/tpu_model_runner.py +++ b/vllm/v1/worker/tpu_model_runner.py @@ -42,8 +42,8 @@ from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsTensors, from vllm.v1.sample.tpu.metadata import TPUSupportedSamplingMetadata from vllm.v1.sample.tpu.sampler import Sampler as TPUSampler from vllm.v1.utils import bind_kv_cache -from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch from vllm.v1.worker.lora_model_runner_mixin import LoRAModelRunnerMixin +from vllm.v1.worker.tpu_input_batch import CachedRequestState, InputBatch from .utils import (initialize_kv_cache_for_kv_sharing, sanity_check_mm_encoder_outputs) From f04d604567924652e7b4533bbcd5e0c358d578e5 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Tue, 17 Jun 2025 23:59:27 -0700 Subject: [PATCH 015/211] [Minor] Zero-initialize attn output buffer (#19784) Signed-off-by: Woosuk Kwon --- vllm/attention/layer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index 6d9c6f51b34df..f7d230c5d7d6f 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -209,7 +209,7 @@ class Attention(nn.Module): if self.use_output: output_shape = (output_shape if output_shape is not None else query.shape) - output = torch.empty(output_shape, + output = torch.zeros(output_shape, dtype=query.dtype, device=query.device) hidden_size = output_shape[-1] From cca91a7a10c8227984311444e4ba901e7c54cf34 Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Wed, 18 Jun 2025 18:30:58 +0800 Subject: [PATCH 016/211] [doc] fix the incorrect label (#19787) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- docs/getting_started/installation/gpu.md | 2 +- docs/getting_started/installation/gpu/cuda.inc.md | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/getting_started/installation/gpu.md b/docs/getting_started/installation/gpu.md index f8a3acef784fc..1be7557b79e5f 100644 --- a/docs/getting_started/installation/gpu.md +++ b/docs/getting_started/installation/gpu.md @@ -42,7 +42,7 @@ vLLM is a Python library that supports the following GPU variants. Select your G === "NVIDIA CUDA" - --8<-- "docs/getting_started/installation/gpu/cuda.inc.md:create-a-new-python-environment" + --8<-- "docs/getting_started/installation/gpu/cuda.inc.md:set-up-using-python" === "AMD ROCm" diff --git a/docs/getting_started/installation/gpu/cuda.inc.md b/docs/getting_started/installation/gpu/cuda.inc.md index 409efece30888..4503bb443188d 100644 --- a/docs/getting_started/installation/gpu/cuda.inc.md +++ b/docs/getting_started/installation/gpu/cuda.inc.md @@ -10,8 +10,6 @@ vLLM contains pre-compiled C++ and CUDA (12.8) binaries. # --8<-- [end:requirements] # --8<-- [start:set-up-using-python] -### Create a new Python environment - !!! note PyTorch installed via `conda` will statically link `NCCL` library, which can cause issues when vLLM tries to use `NCCL`. See for more details. From 257ab9543973d3df3ef7254725a3372318698ed6 Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Wed, 18 Jun 2025 21:03:36 +0800 Subject: [PATCH 017/211] [Platform] Allow platform use V1 Engine by default (#19792) Signed-off-by: wangxiyuan --- vllm/engine/arg_utils.py | 14 +++++--------- vllm/platforms/cpu.py | 8 ++++++++ vllm/platforms/interface.py | 7 +++++++ 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index a0e099a191109..91b748b54c28f 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1303,7 +1303,7 @@ class EngineArgs: # Skip this check if we are running on a non-GPU platform, # or if the device capability is not available # (e.g. in a Ray actor without GPUs). - from vllm.platforms import CpuArchEnum, current_platform + from vllm.platforms import current_platform if (current_platform.is_cuda() and current_platform.get_device_capability() and current_platform.get_device_capability().major < 8): @@ -1445,14 +1445,10 @@ class EngineArgs: _raise_or_fallback(feature_name=name, recommend_to_remove=False) return False - # Non-[CUDA, TPU, x86 CPU] may be supported on V1, - # but off by default for now. - v0_hardware = not any( - (current_platform.is_cuda_alike(), current_platform.is_tpu(), - (current_platform.is_cpu() - and current_platform.get_cpu_architecture() == CpuArchEnum.X86))) - if v0_hardware and _warn_or_fallback( # noqa: SIM103 - current_platform.device_name): + # The platform may be supported on V1, but off by default for now. + if not current_platform.default_v1( # noqa: SIM103 + model_config=model_config) and _warn_or_fallback( + current_platform.device_name): return False ############################################################# diff --git a/vllm/platforms/cpu.py b/vllm/platforms/cpu.py index 1dfd394db608d..106bce162003f 100644 --- a/vllm/platforms/cpu.py +++ b/vllm/platforms/cpu.py @@ -269,3 +269,11 @@ class CpuPlatform(Platform): model configuration. """ return True + + @classmethod + def default_v1(cls, model_config) -> bool: + """Returns whether the current platform can use v1 by default for the + supplied model configuration. + """ + return cls.supports_v1( + model_config) and cls.get_cpu_architecture() == CpuArchEnum.X86 diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index f91f222b25e58..3ff173dcd8c85 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -479,6 +479,13 @@ class Platform: """ return False + @classmethod + def default_v1(cls, model_config: ModelConfig) -> bool: + """ + Returns whether the current platform supports v1 by default. + """ + return cls.supports_v1(model_config) + @classmethod def use_custom_allreduce(cls) -> bool: """ From 735a9de71f375e6879ceee692e3cdca14d88ddfd Mon Sep 17 00:00:00 2001 From: Lu Fang <30275821+houseroad@users.noreply.github.com> Date: Wed, 18 Jun 2025 22:26:43 +0800 Subject: [PATCH 018/211] [Qwen] Add tagging rule for Qwen related PRs (#19799) Signed-off-by: Lu Fang --- .github/mergify.yml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/.github/mergify.yml b/.github/mergify.yml index 20b4a8fc2dbc3..ce8fb2ee2d534 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -65,6 +65,21 @@ pull_request_rules: add: - multi-modality +- name: label-qwen + description: Automatically apply qwen label + conditions: + - or: + - files~=^examples/.*qwen.*\.py + - files~=^tests/.*qwen.*\.py + - files~=^vllm/model_executor/models/.*qwen.*\.py + - files~=^vllm/reasoning/.*qwen.*\.py + - title~=(?i)Qwen + - body~=(?i)Qwen + actions: + label: + add: + - qwen + - name: label-rocm description: Automatically apply rocm label conditions: From 8b6e1d639c66d5828d03a7df2c3a500030a5c5cd Mon Sep 17 00:00:00 2001 From: Zzz9990 Date: Wed, 18 Jun 2025 23:46:51 +0800 Subject: [PATCH 019/211] [Hardware][AMD] integrate aiter chunked prefill into vllm (#18596) Signed-off-by: fsx950223 Signed-off-by: charlifu Co-authored-by: fsx950223 Co-authored-by: charlifu --- vllm/envs.py | 8 + vllm/platforms/rocm.py | 12 +- vllm/v1/attention/backends/rocm_aiter_fa.py | 585 ++++++++++++++++++++ 3 files changed, 602 insertions(+), 3 deletions(-) create mode 100644 vllm/v1/attention/backends/rocm_aiter_fa.py diff --git a/vllm/envs.py b/vllm/envs.py index a4a1784f97f90..c7604d6dfeb84 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -87,6 +87,7 @@ if TYPE_CHECKING: VLLM_ROCM_USE_AITER_MOE: bool = True VLLM_ROCM_USE_AITER_RMSNORM: bool = True VLLM_ROCM_USE_AITER_MLA: bool = True + VLLM_ROCM_USE_AITER_MHA: bool = True VLLM_ROCM_USE_SKINNY_GEMM: bool = True VLLM_ROCM_FP8_PADDING: bool = True VLLM_ROCM_MOE_PADDING: bool = True @@ -653,6 +654,13 @@ environment_variables: dict[str, Callable[[], Any]] = { "VLLM_ROCM_USE_AITER_MLA": lambda: (os.getenv("VLLM_ROCM_USE_AITER_MLA", "True").lower() in ("true", "1")), + + # Whether to use aiter mha ops. + # By default is enabled. + "VLLM_ROCM_USE_AITER_MHA": + lambda: (os.getenv("VLLM_ROCM_USE_AITER_MHA", "True").lower() in + ("true", "1")), + # use rocm skinny gemms "VLLM_ROCM_USE_SKINNY_GEMM": lambda: (os.getenv("VLLM_ROCM_USE_SKINNY_GEMM", "True").lower() in diff --git a/vllm/platforms/rocm.py b/vllm/platforms/rocm.py index 445b24d72f078..08d471d5a983c 100644 --- a/vllm/platforms/rocm.py +++ b/vllm/platforms/rocm.py @@ -215,9 +215,15 @@ class RocmPlatform(Platform): selected_backend = _Backend.ROCM_FLASH if envs.VLLM_USE_V1: - logger.info("Using Triton Attention backend on V1 engine.") - return ("vllm.v1.attention.backends." - "triton_attn.TritonAttentionBackend") + if envs.VLLM_ROCM_USE_AITER and envs.VLLM_ROCM_USE_AITER_MHA \ + and on_gfx9(): + logger.info("Using Flash Attention backend on V1 engine.") + return ("vllm.v1.attention.backends." + "rocm_aiter_fa.AiterFlashAttentionBackend") + else: + logger.info("Using Triton Attention backend on V1 engine.") + return ("vllm.v1.attention.backends." + "triton_attn.TritonAttentionBackend") if selected_backend == _Backend.ROCM_FLASH: if not cls.has_device_capability(90): # not Instinct series GPUs. diff --git a/vllm/v1/attention/backends/rocm_aiter_fa.py b/vllm/v1/attention/backends/rocm_aiter_fa.py new file mode 100644 index 0000000000000..e011e95efd41b --- /dev/null +++ b/vllm/v1/attention/backends/rocm_aiter_fa.py @@ -0,0 +1,585 @@ +# SPDX-License-Identifier: Apache-2.0 +"""Attention layer with AiterFlashAttention.""" +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Optional + +import torch + +from vllm import _custom_ops as ops +from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, + AttentionMetadata, AttentionType, + is_quantized_kv_cache) +from vllm.logger import init_logger +from vllm.platforms import current_platform +from vllm.v1.attention.backends.flash_attn import ( + make_local_attention_virtual_batches) +from vllm.v1.attention.backends.utils import CommonAttentionMetadata +from vllm.v1.kv_cache_interface import AttentionSpec +from vllm.v1.worker.block_table import BlockTable + +if TYPE_CHECKING: + from vllm.v1.core.sched.output import SchedulerOutput + from vllm.v1.worker.gpu_input_batch import InputBatch + from vllm.v1.worker.gpu_model_runner import GPUModelRunner + +if current_platform.is_rocm(): + import aiter + + from vllm.triton_utils import tl, triton + from vllm.utils import direct_register_custom_op + + @triton.jit + def _vllm_layout_trans_kernel( + k_buffer_ptr, + v_buffer_ptr, + k_values_ptr, + v_values_ptr, + b_query_lens_loc, + b_seq_lens_loc, + block_table, + block_table_stride_0, + E_DIM: tl.constexpr, + BLOCK_SIZE: tl.constexpr, + ): + batch_idx = tl.program_id(0) + block_idx = tl.program_id(1) + batch_token_indexes = tl.load(b_seq_lens_loc + batch_idx + + tl.arange(0, 2)) + batch_token_start, batch_token_end = tl.split(batch_token_indexes) + seq_len = batch_token_end - batch_token_start + + batch_query_indexes = tl.load(b_query_lens_loc + batch_idx + + tl.arange(0, 2)) + batch_query_start, batch_query_end = tl.split(batch_query_indexes) + query_len = batch_query_end - batch_query_start + if query_len <= 1: + return + if block_idx * BLOCK_SIZE < seq_len: + block_mask = (block_idx * BLOCK_SIZE + + tl.arange(0, BLOCK_SIZE)[:, None]) < seq_len + + kv_idx = tl.load(block_table + batch_idx * block_table_stride_0 + + block_idx) + + kv_buffer_off = kv_idx * BLOCK_SIZE * E_DIM + tl.arange( + 0, BLOCK_SIZE)[:, None] * E_DIM + tl.arange(0, E_DIM)[None, :] + k_vals = tl.load(k_buffer_ptr + kv_buffer_off, + mask=block_mask, + other=0.0) + v_vals = tl.load(v_buffer_ptr + kv_buffer_off, + mask=block_mask, + other=0.0) + + kv_values_off = batch_token_start * E_DIM + \ + block_idx * BLOCK_SIZE * E_DIM + \ + tl.arange(0, BLOCK_SIZE)[:, None] * E_DIM + \ + tl.arange(0, E_DIM)[None, :] + tl.store(k_values_ptr + kv_values_off, k_vals, mask=block_mask) + tl.store(v_values_ptr + kv_values_off, v_vals, mask=block_mask) + + def vllm_layout_trans(b_query_lens_loc, b_seq_lens_loc, block_table, + k_buffer, v_buffer, max_seq_len, total_tokens): + H_KV = v_buffer.shape[2] + D = v_buffer.shape[3] + BLOCK_SIZE = v_buffer.shape[1] + dtype = k_buffer.dtype + k_values = torch.empty((total_tokens, H_KV, D), + dtype=dtype, + device="cuda") + v_values = torch.empty((total_tokens, H_KV, D), + dtype=dtype, + device="cuda") + + grid = (block_table.shape[0], + (max_seq_len + BLOCK_SIZE - 1) // BLOCK_SIZE) + + _vllm_layout_trans_kernel[grid](k_buffer, + v_buffer, + k_values, + v_values, + b_query_lens_loc, + b_seq_lens_loc, + block_table, + block_table.stride(0), + E_DIM=H_KV * D, + BLOCK_SIZE=BLOCK_SIZE) + + return k_values, v_values + + def flash_attn_varlen_func_impl( + q: torch.Tensor, + k_cache: torch.Tensor, + v_cache: torch.Tensor, + out: torch.Tensor, + cu_seqlens_q: torch.Tensor, + cu_seqlens_k: torch.Tensor, + total_tokens: int, + max_seqlen_q: int, + max_seqlen_k: int, + softmax_scale: float, + window_size: Optional[list[int]], # -1 means infinite context window + alibi_slopes: Optional[list[float]], + block_table: torch.Tensor, + ) -> torch.Tensor: + k, v = vllm_layout_trans(cu_seqlens_q, cu_seqlens_k, block_table, + k_cache, v_cache, max_seqlen_k, total_tokens) + output = aiter.flash_attn_varlen_func( + q=q, + k=k, + v=v, + cu_seqlens_q=cu_seqlens_q, + max_seqlen_q=max_seqlen_q, + min_seqlen_q=1, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_k=max_seqlen_k, + softmax_scale=softmax_scale, + causal=True, + alibi_slopes=alibi_slopes, + window_size=window_size, + out=out, + ) + return output + + def flash_attn_varlen_func_fake( + q: torch.Tensor, + k_cache: torch.Tensor, + v_cache: torch.Tensor, + out: torch.Tensor, + cu_seqlens_q: torch.Tensor, + cu_seqlens_k: torch.Tensor, + total_tokens: int, + max_seqlen_q: int, + max_seqlen_k: int, + softmax_scale: float, + window_size: Optional[list[int]], # -1 means infinite context window + alibi_slopes: Optional[list[float]], + block_table: torch.Tensor, + ) -> torch.Tensor: + return torch.empty(q.shape[0], + q.shape[1], + v_cache.shape[-2], + dtype=torch.float8_e4m3fnuz, + device="cuda") + + direct_register_custom_op("flash_attn_varlen_func", + flash_attn_varlen_func_impl, ["out"], + flash_attn_varlen_func_fake, + dispatch_key=current_platform.dispatch_key) + +logger = init_logger(__name__) + + +class AiterFlashAttentionMetadataBuilder: + + def __init__(self, runner: "GPUModelRunner", kv_cache_spec: AttentionSpec, + block_table: BlockTable): + model_config = runner.model_config + + self.runner = runner + self.num_heads_q = model_config.get_num_attention_heads( + runner.parallel_config) + self.num_heads_kv = model_config.get_num_kv_heads( + runner.parallel_config) + self.headdim = model_config.get_head_size() + self.block_size = kv_cache_spec.block_size + self.kv_cache_spec = kv_cache_spec + self.block_table = block_table + + # Sliding window size to be used with the AOT scheduler will be + # populated on first build() call. + self.aot_sliding_window: Optional[tuple[int, int]] = None + + def reorder_batch(self, input_batch: "InputBatch", + scheduler_output: "SchedulerOutput") -> bool: + return False + + def build(self, common_prefix_len: int, + common_attn_metadata: CommonAttentionMetadata): + + num_reqs = common_attn_metadata.num_reqs + num_actual_tokens = common_attn_metadata.num_actual_tokens + max_query_len = common_attn_metadata.max_query_len + + max_seq_len = int(self.runner.seq_lens_np[:num_reqs].max()) + total_tokens = int(self.runner.seq_lens_np[:num_reqs].sum()) + query_start_loc = common_attn_metadata.query_start_loc + seq_lens = common_attn_metadata.seq_lens + block_table = self.block_table + block_table_tensor = block_table.get_device_tensor()[:num_reqs] + + block_table.slot_mapping[:num_actual_tokens].copy_( + block_table.slot_mapping_cpu[:num_actual_tokens], + non_blocking=True) + # Fill unused with -1. Needed for reshape_and_cache in full cuda graph + # mode. + block_table.slot_mapping[num_actual_tokens:].fill_(-1) + + slot_mapping = block_table.slot_mapping[:num_actual_tokens] + + cu_seq_lens = torch.zeros(seq_lens.shape[0] + 1, + dtype=torch.int32, + device="cuda") + torch.cumsum(seq_lens, + dim=0, + dtype=cu_seq_lens.dtype, + out=cu_seq_lens[1:]) + + def schedule(batch_size, cu_query_lens, max_query_len, seqlens, + max_seq_len, causal): + return None + + # for local attention + local_attn_metadata = None + if self.runner.attention_chunk_size is not None: + seqlens_q_local_np, virt_q_cu_seqlens_np, virt_k_seqlens_np, \ + virt_block_table_tensor = make_local_attention_virtual_batches( + self.runner.attention_chunk_size, + self.runner.query_start_loc_np[:num_reqs + 1], + self.runner.seq_lens_np[:num_reqs], + block_table_tensor, + self.block_size, + ) + local_query_start_loc = torch.from_numpy(virt_q_cu_seqlens_np).to( + self.runner.device, non_blocking=True) + local_seqused_k = torch.from_numpy(virt_k_seqlens_np).to( + self.runner.device, non_blocking=True) + local_max_query_len = seqlens_q_local_np.max() + local_max_seq_len = virt_k_seqlens_np.max() + local_scheduler_metadata = schedule( + batch_size=local_query_start_loc.shape[0] - 1, + cu_query_lens=local_query_start_loc, + max_query_len=local_max_query_len, + seqlens=local_seqused_k, + max_seq_len=local_max_seq_len, + causal=True) + + local_attn_metadata = \ + AiterFlashAttentionMetadata.LocalAttentionMetadata( + local_query_start_loc=local_query_start_loc, + local_seqused_k=local_seqused_k, + local_block_table=virt_block_table_tensor, + local_max_query_len=local_max_query_len, + local_max_seq_len=local_max_seq_len, + local_scheduler_metadata=local_scheduler_metadata, + ) + + use_cascade = common_prefix_len > 0 + + cu_prefix_query_lens = None + prefix_kv_lens = None + suffix_kv_lens = None + + attn_metadata = AiterFlashAttentionMetadata( + num_actual_tokens=num_actual_tokens, + max_query_len=max_query_len, + query_start_loc=query_start_loc, + max_seq_len=max_seq_len, + seq_lens=seq_lens, + cu_seq_lens=cu_seq_lens, + total_tokens=total_tokens, + block_table=block_table_tensor, + slot_mapping=slot_mapping, + use_cascade=use_cascade, + common_prefix_len=common_prefix_len, + cu_prefix_query_lens=cu_prefix_query_lens, + prefix_kv_lens=prefix_kv_lens, + suffix_kv_lens=suffix_kv_lens, + local_attn_metadata=local_attn_metadata, + ) + return attn_metadata + + def can_run_in_cudagraph( + self, common_attn_metadata: CommonAttentionMetadata) -> bool: + # Full CUDA Graph always supported (FA2 support checked separately) + return True + + def use_cascade_attention(self, *args, **kwargs) -> bool: + return False + + +class AiterFlashAttentionBackend(AttentionBackend): + + accept_output_buffer: bool = True + + @staticmethod + def get_supported_head_sizes() -> list[int]: + return [32, 64, 96, 128, 160, 192, 224, 256] + + @staticmethod + def get_name() -> str: + return "FLASH_ATTN_VLLM_V1" + + @staticmethod + def get_impl_cls() -> type["AiterFlashAttentionImpl"]: + return AiterFlashAttentionImpl + + @staticmethod + def get_metadata_cls() -> type["AttentionMetadata"]: + return AiterFlashAttentionMetadata + + @staticmethod + def get_builder_cls() -> type["AiterFlashAttentionMetadataBuilder"]: + return AiterFlashAttentionMetadataBuilder + + @staticmethod + def get_kv_cache_shape( + num_blocks: int, + block_size: int, + num_kv_heads: int, + head_size: int, + ) -> tuple[int, ...]: + if block_size % 16 != 0: + raise ValueError("Block size must be a multiple of 16.") + return (2, num_blocks, block_size, num_kv_heads, head_size) + + +@dataclass +class AiterFlashAttentionMetadata: + # NOTE(sang): Definition of context_len, query_len, and seq_len. + # |---------- N-1 iteration --------| + # |---------------- N iteration ---------------------| + # |- tokenA -|......................|-- newTokens ---| + # |---------- context_len ----------| + # |-------------------- seq_len ---------------------| + # |-- query_len ---| + + num_actual_tokens: int # Number of tokens excluding padding. + max_query_len: int + query_start_loc: torch.Tensor + max_seq_len: int + seq_lens: torch.Tensor + cu_seq_lens: torch.Tensor + total_tokens: int + block_table: torch.Tensor + slot_mapping: torch.Tensor + + # For cascade attention. + use_cascade: bool + common_prefix_len: int + cu_prefix_query_lens: Optional[torch.Tensor] + prefix_kv_lens: Optional[torch.Tensor] + suffix_kv_lens: Optional[torch.Tensor] + + # for local attention + @dataclass + class LocalAttentionMetadata: + local_query_start_loc: torch.Tensor + local_seqused_k: torch.Tensor + local_block_table: torch.Tensor + local_max_query_len: int + local_max_seq_len: int + local_scheduler_metadata: Optional[torch.Tensor] + + local_attn_metadata: Optional[LocalAttentionMetadata] = None + + +class AiterFlashAttentionImpl(AttentionImpl): + + def __init__( + self, + num_heads: int, + head_size: int, + scale: float, + num_kv_heads: int, + alibi_slopes: Optional[list[float]], + sliding_window: Optional[int], + kv_cache_dtype: str, + blocksparse_params: Optional[dict[str, Any]] = None, + logits_soft_cap: Optional[float] = None, + attn_type: AttentionType = AttentionType.DECODER, + use_irope: bool = False, + ) -> None: + if blocksparse_params is not None: + raise ValueError( + "AiterFlashAttention does not support block-sparse attention.") + self.num_heads = num_heads + self.head_size = head_size + self.scale = float(scale) + self.num_kv_heads = num_kv_heads + if alibi_slopes is not None: + alibi_slopes = torch.tensor(alibi_slopes, dtype=torch.float32) + self.alibi_slopes = alibi_slopes + if sliding_window is None: + self.sliding_window = [-1, -1] + else: + self.sliding_window = [sliding_window - 1, 0] + self.kv_cache_dtype = kv_cache_dtype + if logits_soft_cap is None: + # In flash-attn, setting logits_soft_cap as 0 means no soft cap. + logits_soft_cap = 0. + self.logits_soft_cap = logits_soft_cap + + assert self.num_heads % self.num_kv_heads == 0 + self.num_queries_per_kv = self.num_heads // self.num_kv_heads + + support_head_sizes = \ + AiterFlashAttentionBackend.get_supported_head_sizes() + if head_size not in support_head_sizes: + raise ValueError( + f"Head size {head_size} is not supported by " + "AiterFlashAttention. " + f"Supported head sizes are: {support_head_sizes}. " + "Set VLLM_USE_V1=0 to use another attention backend.") + + if attn_type != AttentionType.DECODER: + raise NotImplementedError("Encoder self-attention and " + "encoder/decoder cross-attention " + "are not implemented for " + "FlashAttentionImpl") + self.use_irope = use_irope + if is_quantized_kv_cache(self.kv_cache_dtype): + raise NotImplementedError( + "AiterFlashAttention does not support fp8 kv-cache on this " + "device.") + + def forward( + self, + layer: torch.nn.Module, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + kv_cache: torch.Tensor, + attn_metadata: AiterFlashAttentionMetadata, + output: Optional[torch.Tensor] = None, + output_scale: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + """Forward pass with AiterFlashAttention. + + Args: + query: shape = [num_tokens, num_heads, head_size] + key: shape = [num_tokens, num_kv_heads, head_size] + value: shape = [num_tokens, num_kv_heads, head_size] + kv_cache = [2, num_blocks, block_size, num_kv_heads, head_size] + attn_metadata: Metadata for attention. + Returns: + shape = [num_tokens, num_heads * head_size] + NOTE: FP8 quantization, flash-attn expect the size of + {q,k,v}_descale to be (num_sequences, num_kv_heads). + We use torch's .expand() to avoid duplicating values + """ + assert output is not None, "Output tensor must be provided." + + if output_scale is not None: + raise NotImplementedError( + "fused output quantization is not yet supported" + " for FlashAttentionImpl") + + if attn_metadata is None: + # Profiling run. + return output + + # IMPORTANT! + # NOTE(woosuk): With piece-wise CUDA graphs, this method is executed in + # eager-mode PyTorch. Thus, we need to be careful about any CPU overhead + # in this method. For example, `view` and `slice` (or `[:n]`) operations + # are surprisingly slow even in the case they do not invoke any GPU ops. + # Minimize the PyTorch ops in this method as much as possible. + # Whenever making a change in this method, please benchmark the + # performance to make sure it does not introduce any overhead. + + num_actual_tokens = attn_metadata.num_actual_tokens + # Reshape the input keys and values and store them in the cache. + # NOTE(woosuk): Here, key and value are padded while slot_mapping is + # not padded. However, we don't need to do key[:num_actual_tokens] and + # value[:num_actual_tokens] because the reshape_and_cache_flash op uses + # the slot_mapping's shape to determine the number of actual tokens. + key_cache, value_cache = kv_cache.unbind(0) + torch.ops._C_cache_ops.reshape_and_cache_flash( + key, + value, + key_cache, + value_cache, + attn_metadata.slot_mapping, + self.kv_cache_dtype, + layer._k_scale, + layer._v_scale, + ) + + if self.kv_cache_dtype.startswith("fp8"): + key_cache = key_cache.view(torch.float8_e4m3fnuz) + value_cache = value_cache.view(torch.float8_e4m3fnuz) + num_tokens, num_heads, head_size = query.shape + query, _ = ops.scaled_fp8_quant( + query.reshape( + (num_tokens, num_heads * head_size)).contiguous(), + layer._q_scale) + query = query.reshape((num_tokens, num_heads, head_size)) + + # Compute attention and update output up to `num_actual_tokens`. + use_local_attn = \ + (self.use_irope and attn_metadata.local_attn_metadata is not None) + + if not attn_metadata.use_cascade or use_local_attn: + if use_local_attn: + assert attn_metadata.local_attn_metadata is not None + local_metadata = attn_metadata.local_attn_metadata + cu_seqlens_q = local_metadata.local_query_start_loc + seqused_k = local_metadata.local_seqused_k + max_seqlen_q = local_metadata.local_max_query_len + max_seqlen_k = local_metadata.local_max_seq_len + block_table = local_metadata.local_block_table + else: + cu_seqlens_q = attn_metadata.query_start_loc + seqused_k = attn_metadata.seq_lens + max_seqlen_q = attn_metadata.max_query_len + max_seqlen_k = attn_metadata.max_seq_len + block_table = attn_metadata.block_table + + if max_seqlen_q > 1: + cu_seq_lens = attn_metadata.cu_seq_lens + total_tokens = attn_metadata.total_tokens + torch.ops.vllm.flash_attn_varlen_func( + query[:num_actual_tokens], + key_cache, + value_cache, + out=output[:num_actual_tokens], + cu_seqlens_q=cu_seqlens_q, + max_seqlen_q=max_seqlen_q, + max_seqlen_k=max_seqlen_k, + total_tokens=total_tokens, + softmax_scale=self.scale, + alibi_slopes=self.alibi_slopes, + window_size=self.sliding_window, + block_table=block_table, + cu_seqlens_k=cu_seq_lens, + ) + + _, num_heads, head_size = query.shape + _PARTITION_SIZE_ROCM = 256 + num_seqs = seqused_k.shape[0] + nbyes_per_qo_elem = torch.finfo(output.dtype).bits // 8 + max_num_partitions = (max_seqlen_k + _PARTITION_SIZE_ROCM - + 1) // _PARTITION_SIZE_ROCM + + workspace_buffer = torch.empty( + (num_seqs * num_heads * max_num_partitions * head_size) * + nbyes_per_qo_elem + 2 * + (num_seqs * num_heads * max_num_partitions) * 4, + dtype=torch.uint8, + device=output.device, + ) + + aiter.paged_attention_v1( + output[:num_actual_tokens], + workspace_buffer, + query[:num_actual_tokens], + key_cache, + value_cache, + self.scale, + block_table, + cu_seqlens_q, + seqused_k, + max_seqlen_k, + self.alibi_slopes, + self.kv_cache_dtype, + "NHD", + self.logits_soft_cap, + layer._k_scale, + layer._v_scale, + None, + _PARTITION_SIZE_ROCM, + ) + return output + else: + raise NotImplementedError( + "Cascade attention is not implemented for ROCM AITER") From 12575cfa7aa176e017735dd2883513b12be54c32 Mon Sep 17 00:00:00 2001 From: Chauncey Date: Thu, 19 Jun 2025 01:26:16 +0800 Subject: [PATCH 020/211] [Bugfix] fix RAY_CGRAPH_get_timeout is not set successfully (#19725) Signed-off-by: chaunceyjiang --- vllm/executor/ray_distributed_executor.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/vllm/executor/ray_distributed_executor.py b/vllm/executor/ray_distributed_executor.py index bdc2b1f4c27cd..a3f05ec5ea3f2 100644 --- a/vllm/executor/ray_distributed_executor.py +++ b/vllm/executor/ray_distributed_executor.py @@ -557,8 +557,17 @@ class RayDistributedExecutor(DistributedExecutorBase): def _compiled_ray_dag(self, enable_asyncio: bool): assert self.parallel_config.use_ray self._check_ray_cgraph_installation() + # Enlarge the default value of "RAY_CGRAPH_get_timeout" to 300 seconds + # (it is 10 seconds by default). This is a Ray environment variable to + # control the timeout of getting result from a compiled graph execution, + # i.e., the distributed execution that includes model forward runs and + # intermediate tensor communications, in the case of vllm. + # Note: we should set this env var before importing + # ray.dag, otherwise it will not take effect. + os.environ.setdefault("RAY_CGRAPH_get_timeout", "300") # noqa: SIM112 from ray.dag import InputNode, MultiOutputNode - + logger.info("RAY_CGRAPH_get_timeout is set to %s", + os.environ["RAY_CGRAPH_get_timeout"]) # noqa: SIM112 logger.info("VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE = %s", envs.VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE) logger.info("VLLM_USE_RAY_COMPILED_DAG_OVERLAP_COMM = %s", @@ -570,15 +579,6 @@ class RayDistributedExecutor(DistributedExecutorBase): "Invalid value for VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE: " f"{channel_type}. Valid values are: 'auto', 'nccl', or 'shm'.") - # Enlarge the default value of "RAY_CGRAPH_get_timeout" to 300 seconds - # (it is 10 seconds by default). This is a Ray environment variable to - # control the timeout of getting result from a compiled graph execution, - # i.e., the distributed execution that includes model forward runs and - # intermediate tensor communications, in the case of vllm. - os.environ.setdefault("RAY_CGRAPH_get_timeout", "300") # noqa: SIM112 - logger.info("RAY_CGRAPH_get_timeout is set to %s", - os.environ["RAY_CGRAPH_get_timeout"]) # noqa: SIM112 - with InputNode() as input_data: # Example DAG: PP=2, TP=4 # From ffacb222cbc3c1ac6ef05270bf8a2536a786ae06 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Wed, 18 Jun 2025 16:22:28 -0400 Subject: [PATCH 021/211] [Docs] Add Huzaifa Sidhpurwala to vuln mgmt team doc (#19808) Signed-off-by: Russell Bryant --- docs/contributing/vulnerability_management.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/contributing/vulnerability_management.md b/docs/contributing/vulnerability_management.md index 1842b3010c496..e20b10f8f7b32 100644 --- a/docs/contributing/vulnerability_management.md +++ b/docs/contributing/vulnerability_management.md @@ -34,6 +34,7 @@ you may contact the following individuals: - Simon Mo - simon.mo@hey.com - Russell Bryant - rbryant@redhat.com +- Huzaifa Sidhpurwala - huzaifas@redhat.com ## Slack Discussion From a89209b78de0aa3d47a684a34861336d3ba7c0fe Mon Sep 17 00:00:00 2001 From: Chen Zhang Date: Thu, 19 Jun 2025 04:34:15 +0800 Subject: [PATCH 022/211] [v1] Support mamba2 (#19327) Signed-off-by: Chen Zhang --- .../models/language/generation/test_hybrid.py | 53 +++- tests/v1/test_oracle.py | 2 +- vllm/engine/arg_utils.py | 7 +- .../layers/mamba/mamba_mixer2.py | 235 +++++++++++++----- vllm/model_executor/models/mamba2.py | 52 ++-- vllm/v1/attention/backends/mamba_attn.py | 192 ++++++++++++++ vllm/v1/core/single_type_kv_cache_manager.py | 43 +++- vllm/v1/kv_cache_interface.py | 24 ++ vllm/v1/worker/gpu_model_runner.py | 94 +++++-- 9 files changed, 582 insertions(+), 120 deletions(-) create mode 100644 vllm/v1/attention/backends/mamba_attn.py diff --git a/tests/models/language/generation/test_hybrid.py b/tests/models/language/generation/test_hybrid.py index 3eaadcb45fe12..90c4cd968e7a2 100644 --- a/tests/models/language/generation/test_hybrid.py +++ b/tests/models/language/generation/test_hybrid.py @@ -17,9 +17,10 @@ SSM_MODELS = [ "state-spaces/mamba-130m-hf", "tiiuae/falcon-mamba-tiny-dev", # TODO: Compare to a Mamba2 model. The HF transformers implementation of - # Mamba2 is buggy for Codestral as it doesn't handle n_groups. + # Mamba2 is buggy for Codestral as it doesn't handle n_groups, so the test + # doesn't compare vLLM output with HF output. # See https://github.com/huggingface/transformers/pull/35943 - # "mistralai/Mamba-Codestral-7B-v0.1", + "mistralai/Mamba-Codestral-7B-v0.1", ] HYBRID_MODELS = [ @@ -35,6 +36,10 @@ HYBRID_MODELS = [ "hmellor/tiny-random-BambaForCausalLM", ] +V1_SUPPORTED_MODELS = [ + "mistralai/Mamba-Codestral-7B-v0.1", +] + # Avoid OOM MAX_NUM_SEQS = 4 @@ -46,24 +51,50 @@ def test_models( hf_runner, vllm_runner, example_prompts, + monkeypatch, model: str, max_tokens: int, num_logprobs: int, ) -> None: with hf_runner(model) as hf_model: - hf_outputs = hf_model.generate_greedy_logprobs_limit( - example_prompts, max_tokens, num_logprobs) + if model != "mistralai/Mamba-Codestral-7B-v0.1": + hf_outputs = hf_model.generate_greedy_logprobs_limit( + example_prompts, max_tokens, num_logprobs) + else: + hf_outputs = None with vllm_runner(model, max_num_seqs=MAX_NUM_SEQS) as vllm_model: - vllm_outputs = vllm_model.generate_greedy_logprobs( + vllm_v0_outputs = vllm_model.generate_greedy_logprobs( example_prompts, max_tokens, num_logprobs) - check_logprobs_close( - outputs_0_lst=hf_outputs, - outputs_1_lst=vllm_outputs, - name_0="hf", - name_1="vllm", - ) + if model in V1_SUPPORTED_MODELS: + with monkeypatch.context() as m: + m.setenv("VLLM_USE_V1", "1") + with vllm_runner(model, + max_num_seqs=MAX_NUM_SEQS, + enforce_eager=True, + enable_prefix_caching=False) as vllm_model: + vllm_v1_outputs = vllm_model.generate_greedy_logprobs( + example_prompts, max_tokens, num_logprobs) + else: + vllm_v1_outputs = None + + if hf_outputs is not None: + check_logprobs_close( + outputs_0_lst=hf_outputs, + outputs_1_lst=vllm_v0_outputs, + name_0="hf", + name_1="vllm-v0", + ) + + if model in V1_SUPPORTED_MODELS: + ref_outputs = hf_outputs if hf_outputs is not None else vllm_v0_outputs + check_logprobs_close( + outputs_0_lst=ref_outputs, + outputs_1_lst=vllm_v1_outputs, + name_0="hf" if hf_outputs is not None else "vllm-v0", + name_1="vllm-v1", + ) @pytest.mark.parametrize("model", SSM_MODELS + HYBRID_MODELS) diff --git a/tests/v1/test_oracle.py b/tests/v1/test_oracle.py index e5eadfd4e9dad..1787b9a0b4695 100644 --- a/tests/v1/test_oracle.py +++ b/tests/v1/test_oracle.py @@ -12,7 +12,7 @@ from vllm.engine.async_llm_engine import AsyncLLMEngine UNSUPPORTED_MODELS_V1 = [ "openai/whisper-large-v3", # transcription "facebook/bart-large-cnn", # encoder decoder - "mistralai/Mamba-Codestral-7B-v0.1", # mamba + "state-spaces/mamba-130m-hf", # mamba1 "hmellor/tiny-random-BambaForCausalLM", # hybrid "BAAI/bge-m3", # embedding ] diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 91b748b54c28f..4ca645b91b33a 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1355,12 +1355,17 @@ class EngineArgs: recommend_to_remove=False) return False - # No Mamba or Encoder-Decoder so far. + # No Encoder-Decoder, not all Mamba so far. if not model_config.is_v1_compatible: _raise_or_fallback(feature_name=model_config.architectures, recommend_to_remove=False) return False + # V1 mamba models are unoptimized. + if model_config.has_inner_state and _warn_or_fallback( + feature_name="Mamba"): + return False + # No Concurrent Partial Prefills so far. if (self.max_num_partial_prefills != SchedulerConfig.max_num_partial_prefills diff --git a/vllm/model_executor/layers/mamba/mamba_mixer2.py b/vllm/model_executor/layers/mamba/mamba_mixer2.py index cd3b0b3907d77..9dcbcb2e6f2be 100644 --- a/vllm/model_executor/layers/mamba/mamba_mixer2.py +++ b/vllm/model_executor/layers/mamba/mamba_mixer2.py @@ -6,7 +6,9 @@ from typing import Optional, Union import torch from torch import nn +from vllm import envs from vllm.attention.backends.abstract import AttentionMetadata +from vllm.config import get_current_vllm_config from vllm.distributed import (divide, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, tensor_model_parallel_all_gather, @@ -27,6 +29,7 @@ from vllm.model_executor.model_loader.weight_utils import ( LoaderFunction, composed_weight_loader, sharded_weight_loader) from vllm.model_executor.models.mamba_cache import MambaCacheParams from vllm.model_executor.utils import set_weight_attrs +from vllm.v1.attention.backends.mamba_attn import Mamba2AttentionMetadata # Added by the IBM Team, 2024 @@ -227,20 +230,22 @@ class MambaMixer2(CustomOp): """ def __init__( - self, - hidden_size: int, - ssm_state_size: int, - conv_kernel_size: int, - intermediate_size: int, - use_conv_bias: bool, - use_bias: bool, - n_groups: int = 1, - num_heads: int = 128, - head_dim: int = 64, - rms_norm_eps: float = 1e-5, - activation: str = "silu", - use_rms_norm: bool = True, - quant_config: Optional[QuantizationConfig] = None, + self, + hidden_size: int, + ssm_state_size: int, + conv_kernel_size: int, + intermediate_size: int, + use_conv_bias: bool, + use_bias: bool, + n_groups: int = 1, + num_heads: int = 128, + head_dim: int = 64, + rms_norm_eps: float = 1e-5, + activation: str = "silu", + use_rms_norm: bool = True, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + chunk_size: int = -1, # the chunk size used by v1 ): super().__init__() @@ -273,6 +278,7 @@ class MambaMixer2(CustomOp): ), "Tensor parallel currently not supported for quantized models." self.ssm_state_size = ssm_state_size + self.conv_kernel_size = conv_kernel_size self.activation = activation self.intermediate_size = intermediate_size @@ -411,6 +417,22 @@ class MambaMixer2(CustomOp): self.use_rms_norm, eps=rms_norm_eps) + if envs.VLLM_USE_V1: + compilation_config = get_current_vllm_config().compilation_config + if prefix in compilation_config.static_forward_context: + raise ValueError(f"Duplicate layer name: {prefix}") + compilation_config.static_forward_context[prefix] = self + # The outer list is for v0 PP virtual engine. Though this code path + # only runs for v1, we have to do this to unify with the interface + # of Attention + v0 PP. + # The inner tuple is (conv_state, ssm_state) + self.kv_cache = [(torch.tensor([]), torch.tensor([]))] + assert chunk_size != -1, "chunk_size must be set for v1" + + # NOTE: chunk_size may be -1 for models without v1 support + self.chunk_size = chunk_size + self.prefix = prefix + def forward_native( self, hidden_states: torch.Tensor, @@ -426,17 +448,37 @@ class MambaMixer2(CustomOp): mamba2_metadata: Mamba2Metadata, mup_vector: Optional[torch.Tensor] = None, ): + forward_context = get_forward_context() # mamba2_metadata contains metadata necessary for the mamba2 triton # kernels to operate in continuous batching and in chunked prefill # modes; they are computed at top-level model forward since they # stay the same and reused for all mamba layers in the same iteration - attn_metadata: AttentionMetadata = get_forward_context().attn_metadata - - num_prefills = attn_metadata.num_prefills # request count - num_decodes = attn_metadata.num_decode_tokens # token count (=request) - num_prefill_tokens = attn_metadata.num_prefill_tokens # token count - has_prefill = num_prefills > 0 - has_decode = num_decodes > 0 + attn_metadata: AttentionMetadata = forward_context.attn_metadata + if envs.VLLM_USE_V1: + if attn_metadata is not None: + assert isinstance(attn_metadata, dict) + attn_metadata = attn_metadata[self.prefix] + assert isinstance(attn_metadata, Mamba2AttentionMetadata) + self_kv_cache = self.kv_cache[forward_context.virtual_engine] + conv_state = self_kv_cache[0] + ssm_state = self_kv_cache[1] + state_indices_tensor = attn_metadata.state_indices_tensor + has_initial_states_p = attn_metadata.has_initial_states + prep_initial_states = attn_metadata.prep_initial_states + chunk_size = attn_metadata.chunk_size + seq_idx_p = attn_metadata.seq_idx + chunk_indices_p = attn_metadata.chunk_indices + chunk_offsets_p = attn_metadata.chunk_offsets + else: + conv_state = mamba_cache_params.conv_state + ssm_state = mamba_cache_params.ssm_state + state_indices_tensor = mamba_cache_params.state_indices_tensor + has_initial_states_p = mamba2_metadata.has_initial_states + prep_initial_states = mamba2_metadata.prep_initial_states + chunk_size = mamba2_metadata.chunk_size + seq_idx_p = mamba2_metadata.seq_idx + chunk_indices_p = mamba2_metadata.chunk_indices + chunk_offsets_p = mamba2_metadata.chunk_offsets groups_time_state_size = self.n_groups * self.ssm_state_size @@ -459,27 +501,6 @@ class MambaMixer2(CustomOp): conv_weights = self.conv1d.weight.view(self.conv1d.weight.size(0), self.conv1d.weight.size(2)) - # Separate prefill and decode by splitting varlen input - # Split along token dimension - hidden_states_B_C_p, hidden_states_B_C_d = torch.split( - hidden_states_B_C, - [num_prefill_tokens, num_decodes], - dim=0, - ) - dt_p, dt_d = torch.split( - dt, - [num_prefill_tokens, num_decodes], - dim=0, - ) - # Split along batch dimension - state_indices_tensor_p, state_indices_tensor_d = torch.split( - mamba_cache_params.state_indices_tensor, - [num_prefills, num_decodes], - dim=0, - ) - query_start_loc_p = (attn_metadata.query_start_loc[:num_prefills + 1] - if has_prefill else None) - # - get hidden_states, B and C after depthwise convolution. split_hidden_states_B_C_fn = lambda hidden_states_B_C: torch.split( hidden_states_B_C, @@ -491,20 +512,80 @@ class MambaMixer2(CustomOp): dim=-1, ) + if envs.VLLM_USE_V1 and attn_metadata is None: + # V1 profile run + hidden_states_B_C = (hidden_states_B_C.transpose( + 0, 1).clone().transpose(0, 1)).contiguous() + hidden_states, _B, _C = split_hidden_states_B_C_fn( + hidden_states_B_C) + hidden_states = self.norm(hidden_states, gate) + out, _ = self.out_proj(hidden_states) + return out + + num_prefills = attn_metadata.num_prefills # request count + num_decodes = attn_metadata.num_decode_tokens # token count (=request) + num_prefill_tokens = attn_metadata.num_prefill_tokens # token count + has_prefill = num_prefills > 0 + has_decode = num_decodes > 0 + + # NOTE: V0 put prefill before decode, v1 puts decode before prefill + # Separate prefill and decode by splitting varlen input + # Split along token dimension + if envs.VLLM_USE_V1: + hidden_states_B_C_d, hidden_states_B_C_p = torch.split( + hidden_states_B_C, + [num_decodes, num_prefill_tokens], + dim=0, + ) + dt_d, dt_p = torch.split( + dt, + [num_decodes, num_prefill_tokens], + dim=0, + ) + # Split along batch dimension + state_indices_tensor_d, state_indices_tensor_p = torch.split( + state_indices_tensor, + [num_decodes, num_prefills], + dim=0, + ) + query_start_loc_p = ( + attn_metadata.query_start_loc[-num_prefills - 1:] - + num_decodes if has_prefill else None) + else: + hidden_states_B_C_p, hidden_states_B_C_d = torch.split( + hidden_states_B_C, + [num_prefill_tokens, num_decodes], + dim=0, + ) + dt_p, dt_d = torch.split( + dt, + [num_prefill_tokens, num_decodes], + dim=0, + ) + # Split along batch dimension + state_indices_tensor_p, state_indices_tensor_d = torch.split( + state_indices_tensor, + [num_prefills, num_decodes], + dim=0, + ) + query_start_loc_p = (attn_metadata.query_start_loc[:num_prefills + + 1] + if has_prefill else None) + ssd_output_list = [] # Process prefill requests if has_prefill: # 2. Convolution sequence transformation # - "cache_indices" updates the conv_state cache in positions - # pointed to by "mamba_cache_params.state_indices_tensor" + # pointed to by "state_indices_tensor" hidden_states_B_C_p = causal_conv1d_fn( hidden_states_B_C_p.transpose(0, 1), conv_weights, self.conv1d.bias, activation=self.activation, - conv_states=mamba_cache_params.conv_state, - has_initial_state=mamba2_metadata.has_initial_states, + conv_states=conv_state, + has_initial_state=has_initial_states_p, cache_indices=state_indices_tensor_p, query_start_loc=query_start_loc_p).transpose( 0, 1)[:num_prefill_tokens] @@ -516,12 +597,11 @@ class MambaMixer2(CustomOp): # 3. State Space Model sequence transformation initial_states = None - if (mamba2_metadata.has_initial_states is not None - and mamba2_metadata.prep_initial_states): + if (has_initial_states_p is not None and prep_initial_states): # making a copy of the states initial_states = torch.where( - mamba2_metadata.has_initial_states[:, None, None, None], - mamba_cache_params.ssm_state[state_indices_tensor_p], 0) + has_initial_states_p[:, None, None, None], + ssm_state[state_indices_tensor_p], 0) scan_output, varlen_state = mamba_chunk_scan_combined( hidden_states_p.view(1, num_prefill_tokens, @@ -533,14 +613,14 @@ class MambaMixer2(CustomOp): -1), C_p.view(1, num_prefill_tokens, self.n_groups // self.tp_size, -1), - chunk_size=mamba2_metadata.chunk_size, + chunk_size=chunk_size, D=self.D, z=None, dt_bias=self.dt_bias, - seq_idx=mamba2_metadata.seq_idx, - chunk_indices=mamba2_metadata.chunk_indices, - chunk_offsets=mamba2_metadata.chunk_offsets, - cu_seqlens=attn_metadata.query_start_loc[:num_prefills + 1], + seq_idx=seq_idx_p, + chunk_indices=chunk_indices_p, + chunk_offsets=chunk_offsets_p, + cu_seqlens=query_start_loc_p, initial_states=initial_states, return_varlen_states=True, return_final_states=False, @@ -550,7 +630,7 @@ class MambaMixer2(CustomOp): # update ssm states # - varlen state is a (num_prefills, nheads, headdim, dstate) tensor - mamba_cache_params.ssm_state[state_indices_tensor_p] = varlen_state + ssm_state[state_indices_tensor_p] = varlen_state # - reshape ssd_output_list.append(scan_output.view(num_prefill_tokens, -1)) @@ -560,7 +640,7 @@ class MambaMixer2(CustomOp): # 2. Convolution sequence transformation hidden_states_B_C_d = causal_conv1d_update( hidden_states_B_C_d, - mamba_cache_params.conv_state, + conv_state, conv_weights, self.conv1d.bias, self.activation, @@ -586,7 +666,7 @@ class MambaMixer2(CustomOp): # using state_indices_tensor_d hidden_states_d = selective_state_update( - mamba_cache_params.ssm_state, + ssm_state, hidden_states_d, dt_d, A_d, @@ -598,9 +678,16 @@ class MambaMixer2(CustomOp): dt_softplus=True, state_batch_indices=state_indices_tensor_d, ) - ssd_output_list.append( - hidden_states_d.view(-1, (self.num_heads // self.tp_size) * - self.head_dim)) + + if envs.VLLM_USE_V1: + ssd_output_list.insert( + 0, + hidden_states_d.view(-1, (self.num_heads // self.tp_size) * + self.head_dim)) + else: + ssd_output_list.append( + hidden_states_d.view(-1, (self.num_heads // self.tp_size) * + self.head_dim)) # Merge prefill and decode outputs before passing to gated MLP hidden_states = torch.vstack(ssd_output_list) @@ -614,3 +701,31 @@ class MambaMixer2(CustomOp): # 5. Final linear projection out, _ = self.out_proj(hidden_states) return out + + def get_state_shape(self) -> tuple[tuple[int, ...], tuple[int, ...]]: + world_size = get_tensor_model_parallel_world_size() + + conv_state_shape, temporal_state_shape = None, None + + # if n_groups is not divisible by world_size, need to extend the shards + # to ensure all groups needed by a head is sharded along with it + n_groups = (self.n_groups + + extra_groups_for_head_shards(self.n_groups, world_size)) + + # - heads and n_groups are TP-ed + conv_dim = (self.intermediate_size + + 2 * n_groups * self.ssm_state_size) + conv_state_shape = ( + divide(conv_dim, world_size), + self.conv_kernel_size - 1, + ) + + # These are not TP-ed as they depend on A, dt_bias, D + # - they are typically small + # e.g., (h_heads, d_head, d_state) = (128, 64, 128) + temporal_state_shape = ( + divide(self.num_heads, world_size), + self.head_dim, + self.ssm_state_size, + ) + return conv_state_shape, temporal_state_shape diff --git a/vllm/model_executor/models/mamba2.py b/vllm/model_executor/models/mamba2.py index cf9e1bd03e986..d2403ccbb9724 100644 --- a/vllm/model_executor/models/mamba2.py +++ b/vllm/model_executor/models/mamba2.py @@ -8,6 +8,7 @@ import torch from torch import nn from transformers import MambaConfig +from vllm import envs from vllm.attention.backends.abstract import AttentionMetadata from vllm.config import VllmConfig from vllm.distributed import divide, get_tensor_model_parallel_world_size @@ -25,8 +26,7 @@ from vllm.model_executor.layers.vocab_parallel_embedding import ( DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.models.interfaces import (HasInnerState, - IsAttentionFree, - SupportsV0Only) + IsAttentionFree) from vllm.model_executor.models.mamba_cache import (MambaCacheManager, MambaCacheParams) from vllm.model_executor.sampling_metadata import SamplingMetadata @@ -44,7 +44,8 @@ class Mamba2DecoderLayer(nn.Module): def __init__(self, config: MambaConfig, - quant_config: Optional[QuantizationConfig] = None) -> None: + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "") -> None: super().__init__() self.config = config self.mixer = MambaMixer2(hidden_size=config.hidden_size, @@ -60,7 +61,9 @@ class Mamba2DecoderLayer(nn.Module): head_dim=config.head_dim, rms_norm_eps=config.layer_norm_epsilon, activation=config.hidden_act, - quant_config=quant_config) + quant_config=quant_config, + prefix=f"{prefix}.mixer", + chunk_size=config.chunk_size) self.norm = RMSNorm(config.hidden_size, eps=config.layer_norm_epsilon) @@ -108,8 +111,8 @@ class Mamba2Model(nn.Module): self.start_layer, self.end_layer, self.layers = make_layers( config.num_hidden_layers, - lambda prefix: Mamba2DecoderLayer(config, - quant_config=quant_config), + lambda prefix: Mamba2DecoderLayer( + config, quant_config=quant_config, prefix=prefix), prefix=f"{prefix}.layers") self.norm_f = RMSNorm(config.hidden_size, @@ -142,10 +145,14 @@ class Mamba2Model(nn.Module): attn_metadata: AttentionMetadata = get_forward_context().attn_metadata - mamba2_metadata = prepare_mamba2_metadata( - chunk_size=self.config.chunk_size, - attn_metadata=attn_metadata, - ) + if not envs.VLLM_USE_V1: + mamba2_metadata = prepare_mamba2_metadata( + chunk_size=self.config.chunk_size, + attn_metadata=attn_metadata, + ) + else: + # v1 get mamba2_metadata from forward_context + mamba2_metadata = None for i in range(len(self.layers)): layer = self.layers[i] @@ -155,7 +162,7 @@ class Mamba2Model(nn.Module): hidden_states=hidden_states, residual=residual, mamba_cache_params=mamba_cache_params.at_layer_idx( - i - self.start_layer), + i - self.start_layer) if mamba_cache_params else None, mamba2_metadata=mamba2_metadata) if not get_pp_group().is_last_rank: @@ -190,8 +197,7 @@ class Mamba2Model(nn.Module): return loaded_params -class Mamba2ForCausalLM(nn.Module, HasInnerState, IsAttentionFree, - SupportsV0Only): +class Mamba2ForCausalLM(nn.Module, HasInnerState, IsAttentionFree): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): config = vllm_config.model_config.hf_config @@ -242,14 +248,20 @@ class Mamba2ForCausalLM(nn.Module, HasInnerState, IsAttentionFree, intermediate_tensors: Optional[IntermediateTensors] = None, inputs_embeds: Optional[torch.Tensor] = None, **kwargs): - if self.mamba_cache is None: - num_mamba_layers = self.model_config.get_num_layers_by_block_type( - self.vllm_config.parallel_config, LayerBlockType.mamba) - self.mamba_cache = MambaCacheManager( - self.vllm_config, self.lm_head.weight.dtype, num_mamba_layers, - *self._get_mamba_cache_shape()) + if not envs.VLLM_USE_V1: + if self.mamba_cache is None: + num_mamba_layers = ( + self.model_config.get_num_layers_by_block_type( + self.vllm_config.parallel_config, + LayerBlockType.mamba)) + self.mamba_cache = MambaCacheManager( + self.vllm_config, self.lm_head.weight.dtype, + num_mamba_layers, *self._get_mamba_cache_shape()) - mamba_cache_params = self.mamba_cache.current_run_tensors(**kwargs) + mamba_cache_params = self.mamba_cache.current_run_tensors(**kwargs) + else: + # NOTE: mamba_cache_params is not needed for v1 + mamba_cache_params = None hidden_states = self.backbone(input_ids, positions, mamba_cache_params, intermediate_tensors, inputs_embeds) diff --git a/vllm/v1/attention/backends/mamba_attn.py b/vllm/v1/attention/backends/mamba_attn.py new file mode 100644 index 0000000000000..74d619aadbdc7 --- /dev/null +++ b/vllm/v1/attention/backends/mamba_attn.py @@ -0,0 +1,192 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from dataclasses import dataclass +from typing import TYPE_CHECKING + +import torch + +from vllm.attention.backends.abstract import AttentionBackend +from vllm.config import VllmConfig, get_layers_from_vllm_config +from vllm.model_executor.layers.mamba.mamba2_metadata import ( + _query_start_loc_to_chunk_indices_offsets) +from vllm.v1.attention.backends.utils import (AttentionMetadataBuilder, + CommonAttentionMetadata) +from vllm.v1.kv_cache_interface import MambaSpec +from vllm.v1.worker.block_table import BlockTable + +if TYPE_CHECKING: + from vllm.v1.core.sched.output import SchedulerOutput + from vllm.v1.worker.gpu_input_batch import InputBatch + from vllm.v1.worker.gpu_model_runner import GPUModelRunner + + +def get_mamba2_chunk_size(vllm_config: VllmConfig) -> int: + from vllm.model_executor.layers.mamba.mamba_mixer2 import MambaMixer2 + layers = get_layers_from_vllm_config(vllm_config, MambaMixer2) + chunk_sizes = set(layer.chunk_size for layer in layers.values()) + assert len( + chunk_sizes) == 1, "All Mamba2 layers must have the same chunk size" + return chunk_sizes.pop() + + +class Mamba2AttentionBackend(AttentionBackend): + + @staticmethod + def get_builder_cls() -> type["Mamba2AttentionMetadataBuilder"]: + return Mamba2AttentionMetadataBuilder + + +@dataclass +class Mamba2AttentionMetadata: + num_prefills: int + num_prefill_tokens: int + num_decodes: int + num_decode_tokens: int + query_start_loc: torch.Tensor + seq_lens: torch.Tensor + + has_initial_states: torch.Tensor + prep_initial_states: bool + chunk_size: int + seq_idx: torch.Tensor + chunk_indices: torch.Tensor + chunk_offsets: torch.Tensor + + state_indices_tensor: torch.Tensor # shape: [batch,] + + +class Mamba2AttentionMetadataBuilder( + AttentionMetadataBuilder[Mamba2AttentionMetadata]): + + def __init__(self, runner: "GPUModelRunner", kv_cache_spec: MambaSpec, + block_table: BlockTable): + self.runner = runner + self.kv_cache_spec = kv_cache_spec + self.block_table = block_table + self.chunk_size = get_mamba2_chunk_size(runner.vllm_config) + + def reorder_batch(self, input_batch: "InputBatch", + scheduler_output: "SchedulerOutput") -> bool: + # NOTE (Chen): Copied from MLACommonMetadataBuilder and + # FlashInferMetadataBuilder. Should be refactored later to avoid code + # duplication of these 3 functions. + # We now want to reorder the batch so that the "decode" requests are and + # the front and the "prefill" requests are at the using the least amount + # swaps possible. (NOTE for now we loosely use "decode" to mean requests + # where attention is likely memory-bound and "prefill" to mean requests + # where attention is likely compute-bound, TODO(lucas): figure out a + # better naming here) + decodes = [] + prefills = [] + num_decode_tokens = 0 + num_prefill_tokens = 0 + + for i, req_id in enumerate(input_batch.req_ids): + num_tokens = scheduler_output.num_scheduled_tokens[req_id] + # for now treat 1 scheduled token as "decode" even if its not, + # we should update this to something like < 8 in the future but + # currently the decode run only supports num_tokens = 1 + if num_tokens == 1: + decodes.append(i) + num_decode_tokens += num_tokens + else: + prefills.append(i) + num_prefill_tokens += num_tokens + + # We hope that this is fairly minimal since decodes + # should be around for a number of iterations so hopefully they are + # relatively stationary (and new request are generally appended to the + # persistent batch so already should be at the back) + # To achieve this we loop over the decodes in descending order and + # the prefills in ascending order. We swap decodes from the "back" + # i.e. past where the last decode should be in the reodorered with + # prefills from the front of the batch. + # `decodes` and `prefills` are already in ascending order just based on + # the above loop + num_decodes = len(decodes) + num_prefills = len(prefills) + modified_batch = False + + for i in range(1, min(num_decodes, num_prefills) + 1): + # If the decode is at the "back" of the batch, i, we can swap it + # with the prefill closest to the front of the batch + decode_idx = decodes[num_decodes - i] + if decode_idx < num_decodes: + break + + input_batch.swap_states(prefills[i - 1], decode_idx) + modified_batch = True + + # Save for next `build` call + # TODO(lucas): this is a bit of a hack, we should probably have a + # better way of doing this + self._num_decodes = num_decodes + self._num_prefills = num_prefills + self._num_decode_tokens = num_decode_tokens + self._num_prefill_tokens = num_prefill_tokens + + return modified_batch + + def build(self, common_prefix_len: int, + common_attn_metadata: CommonAttentionMetadata): + num_reqs = common_attn_metadata.num_reqs + query_start_loc = common_attn_metadata.query_start_loc + seq_lens = common_attn_metadata.seq_lens + + seq_idx = None + chunk_indices, chunk_offsets = None, None + # Need flags to indicate if there are initial states + # currently we really only support the FlashAttention backend + has_initial_states = None + prep_initial_states = False + + state_indices_tensor = self.block_table.block_table[:num_reqs, 0] + + # Compute seq_idx, chunk_indices and chunk_offsets for prefill only + if self._num_prefills > 0: + #[batch,] + has_initial_states_cpu = ( + self.runner.input_batch. + num_computed_tokens_cpu_tensor[num_reqs - + self._num_prefills:num_reqs] + > 0) + prep_initial_states = torch.any(has_initial_states_cpu).item() + has_initial_states = has_initial_states_cpu.to( + query_start_loc.device) + + query_start_loc_p = common_attn_metadata.query_start_loc[ + -self._num_prefills - 1:] - self._num_decode_tokens + + seq_idx = torch.repeat_interleave( + torch.arange(self._num_prefills, + dtype=torch.int32, + device=query_start_loc_p.device), + query_start_loc_p.diff(), + output_size=self._num_prefill_tokens) + seq_idx.unsqueeze_(0) + + # We compute metadata for chunked prefill once at the top level + # model forward and reuse them in mamba layers. If not needed, + # they will be ignored inside mamba kernels. + if prep_initial_states: + chunk_indices, chunk_offsets = ( + _query_start_loc_to_chunk_indices_offsets( + query_start_loc_p, self.chunk_size, + self._num_prefill_tokens)) + + attn_metadata = Mamba2AttentionMetadata( + num_prefills=self._num_prefills, + num_prefill_tokens=self._num_prefill_tokens, + num_decodes=self._num_decodes, + num_decode_tokens=self._num_decode_tokens, + query_start_loc=query_start_loc, + seq_lens=seq_lens, + has_initial_states=has_initial_states, + prep_initial_states=prep_initial_states, + chunk_size=self.chunk_size, + seq_idx=seq_idx, + chunk_indices=chunk_indices, + chunk_offsets=chunk_offsets, + state_indices_tensor=state_indices_tensor, + ) + return attn_metadata diff --git a/vllm/v1/core/single_type_kv_cache_manager.py b/vllm/v1/core/single_type_kv_cache_manager.py index 95222779c3afb..5b47180380765 100644 --- a/vllm/v1/core/single_type_kv_cache_manager.py +++ b/vllm/v1/core/single_type_kv_cache_manager.py @@ -8,7 +8,7 @@ from vllm.utils import cdiv from vllm.v1.core.block_pool import BlockPool from vllm.v1.core.kv_cache_utils import BlockHash, KVCacheBlock from vllm.v1.kv_cache_interface import (FullAttentionSpec, KVCacheSpec, - SlidingWindowSpec) + MambaSpec, SlidingWindowSpec) from vllm.v1.request import Request @@ -52,6 +52,7 @@ class SingleTypeKVCacheManager(ABC): self.caching_hash_fn = caching_hash_fn self.kv_cache_group_id = kv_cache_group_id + self._null_block = block_pool.null_block def get_num_blocks_to_allocate( self, request_id: str, num_tokens: int, @@ -390,9 +391,49 @@ class SlidingWindowManager(SingleTypeKVCacheManager): return 0 +class MambaManager(SingleTypeKVCacheManager): + + @classmethod + def find_longest_cache_hit( + cls, + block_hashes: list[BlockHash], + max_length: int, + kv_cache_group_ids: list[int], + block_pool: BlockPool, + kv_cache_spec: KVCacheSpec, + use_eagle: bool, + ) -> tuple[list[KVCacheBlock], ...]: + assert isinstance( + kv_cache_spec, + MambaSpec), ("MambaManager can only be used for mamba groups") + # Prefix caching is not supported for mamba now. Always return empty + # list. + computed_blocks: tuple[list[KVCacheBlock], ...] = tuple( + [] for _ in range(len(kv_cache_group_ids))) + return computed_blocks + + def remove_skipped_blocks(self, request_id: str, + num_computed_tokens: int) -> None: + # Each request will always have 1 block at this moment, so no need to + # remove blocks. + pass + + def get_num_common_prefix_blocks(self, request_id: str, + num_running_requests: int) -> int: + return 0 + + def allocate_new_blocks(self, request_id: str, + num_tokens: int) -> list[KVCacheBlock]: + new_blocks = super().allocate_new_blocks(request_id, num_tokens) + assert len(self.req_to_blocks[request_id]) == 1, ( + "MambaManager should only allocate 1 block for each request.") + return new_blocks + + spec_manager_map: dict[type[KVCacheSpec], type[SingleTypeKVCacheManager]] = { FullAttentionSpec: FullAttentionManager, SlidingWindowSpec: SlidingWindowManager, + MambaSpec: MambaManager, } diff --git a/vllm/v1/kv_cache_interface.py b/vllm/v1/kv_cache_interface.py index e938f3bfc6714..c48775adc9b86 100644 --- a/vllm/v1/kv_cache_interface.py +++ b/vllm/v1/kv_cache_interface.py @@ -3,6 +3,7 @@ import copy from dataclasses import dataclass +from math import prod from typing import Optional import torch @@ -154,6 +155,29 @@ class SlidingWindowSpec(AttentionSpec): return (cdiv(num_tokens, self.block_size) + 1) * self.page_size_bytes +@dataclass +class MambaSpec(KVCacheSpec): + shapes: tuple[tuple[int, ...], ...] + dtype: torch.dtype + + def __post_init__(self): + self.num_elements = sum(prod(shape) for shape in self.shapes) + + @property + def type_id(self) -> str: + return f"mamba_{self.shapes}_{self.dtype}" + + @property + def page_size_bytes(self) -> int: + return self.num_elements * get_dtype_size(self.dtype) + + def max_memory_usage_bytes(self, vllm_config: VllmConfig) -> int: + # We allocate 1 block for each request now, so max_memory_usage_bytes is + # the same as page_size_bytes. + # Need to update this when supporting prefix caching. + return self.page_size_bytes + + @dataclass class KVCacheTensor: """ diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 558325fa0347e..516a90e481ed4 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -29,6 +29,7 @@ from vllm.distributed.parallel_state import ( from vllm.forward_context import (DPMetadata, get_forward_context, set_forward_context) from vllm.logger import init_logger +from vllm.model_executor.layers.mamba.mamba_mixer2 import MambaMixer2 from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding from vllm.model_executor.model_loader import TensorizerLoader, get_model_loader from vllm.multimodal import MULTIMODAL_REGISTRY @@ -38,12 +39,14 @@ from vllm.sampling_params import SamplingType from vllm.sequence import IntermediateTensors from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, GiB_bytes, LazyLoader, async_tensor_h2d, cdiv, - check_use_alibi, is_pin_memory_available) + check_use_alibi, get_dtype_size, + is_pin_memory_available) +from vllm.v1.attention.backends.mamba_attn import Mamba2AttentionBackend from vllm.v1.attention.backends.utils import (AttentionMetadataBuilder, CommonAttentionMetadata) from vllm.v1.core.encoder_cache_manager import compute_encoder_budget from vllm.v1.kv_cache_interface import (AttentionSpec, FullAttentionSpec, - KVCacheConfig, KVCacheSpec, + KVCacheConfig, KVCacheSpec, MambaSpec, SlidingWindowSpec) from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsTensors, ModelRunnerOutput) @@ -2093,28 +2096,31 @@ class GPUModelRunner(LoRAModelRunnerMixin): for i, kv_cache_group_spec in enumerate( kv_cache_config.kv_cache_groups): kv_cache_spec = kv_cache_group_spec.kv_cache_spec - if not isinstance(kv_cache_spec, AttentionSpec): - raise NotImplementedError( - "Only AttentionSpec is supported for now.") - attn_backend_i = get_attn_backend( - kv_cache_spec.head_size, - self.dtype, - kv_cache_spec.dtype, - kv_cache_spec.block_size, - self.model_config.is_attention_free, - use_mla=kv_cache_spec.use_mla, - ) - if attn_backend_i is None: - error_msg = ( - f"Error with get_attn_backend: {kv_cache_spec.head_size=}, " - f"{self.dtype=}, {kv_cache_spec.dtype=}, " - f"{kv_cache_spec.block_size=}, " - f"{self.model_config.is_attention_free=}, " - f"{kv_cache_spec.use_mla=}") - logger.error(error_msg) - raise NotImplementedError( - "Non-Attention backend is not supported by V1 " - "GPUModelRunner.") + if isinstance(kv_cache_spec, AttentionSpec): + attn_backend_i = get_attn_backend( + kv_cache_spec.head_size, + self.dtype, + kv_cache_spec.dtype, + kv_cache_spec.block_size, + self.model_config.is_attention_free, + use_mla=kv_cache_spec.use_mla, + ) + if attn_backend_i is None: + error_msg = (f"Error with get_attn_backend: " + f"{kv_cache_spec.head_size=}, " + f"{self.dtype=}, {kv_cache_spec.dtype=}, " + f"{kv_cache_spec.block_size=}, " + f"{self.model_config.is_attention_free=}, " + f"{kv_cache_spec.use_mla=}") + logger.error(error_msg) + raise NotImplementedError( + "Non-Attention backend is not supported by V1 " + "GPUModelRunner.") + elif isinstance(kv_cache_spec, MambaSpec): + attn_backend_i = Mamba2AttentionBackend + else: + raise ValueError( + f"Unknown KV cache spec type: {type(kv_cache_spec)}") block_table_i = self.input_batch.block_table[i] attn_metadata_builder_i = attn_backend_i.get_builder_cls()( @@ -2242,6 +2248,22 @@ class GPUModelRunner(LoRAModelRunnerMixin): kv_caches[layer_name] = kv_cache_raw_tensors[ layer_name].view(dtype).view(kv_cache_shape).permute( *inv_order) + elif isinstance(kv_cache_spec, MambaSpec): + raw_tensor = kv_cache_raw_tensors[layer_name] + dtype = kv_cache_spec.dtype + state_tensors = [] + start_pos = 0 + for shape in kv_cache_spec.shapes: + target_shape = (num_blocks, *shape) + size_in_bytes = np.prod(shape) * get_dtype_size( + dtype) * num_blocks + tensor = raw_tensor[start_pos:start_pos + + size_in_bytes] + tensor = tensor.view(dtype).view(target_shape) + state_tensors.append(tensor) + start_pos += size_in_bytes + assert start_pos == raw_tensor.numel() + kv_caches[layer_name] = tuple(state_tensors) else: raise NotImplementedError return kv_caches @@ -2307,11 +2329,11 @@ class GPUModelRunner(LoRAModelRunnerMixin): format. Layers that do not need KV cache are not included. """ - layers = get_layers_from_vllm_config(self.vllm_config, Attention) block_size = self.vllm_config.cache_config.block_size use_mla = self.vllm_config.model_config.use_mla kv_cache_spec: dict[str, KVCacheSpec] = {} - for layer_name, attn_module in layers.items(): + attn_layers = get_layers_from_vllm_config(self.vllm_config, Attention) + for layer_name, attn_module in attn_layers.items(): if (kv_tgt_layer := attn_module.kv_sharing_target_layer_name) is not None: # The layer doesn't need its own KV cache and will use that of @@ -2351,4 +2373,24 @@ class GPUModelRunner(LoRAModelRunnerMixin): raise ValueError( f"Unknown attention type: {attn_module.attn_type}") + mamba_layers = get_layers_from_vllm_config(self.vllm_config, + MambaMixer2) + if len(mamba_layers) > 0: + if self.vllm_config.speculative_config is not None: + raise NotImplementedError( + "Mamba with speculative decoding is not supported yet.") + if not self.vllm_config.model_config.enforce_eager: + raise NotImplementedError( + "Mamba with cuda graph is not supported yet.") + if self.vllm_config.cache_config.enable_prefix_caching: + raise NotImplementedError( + "Prefix caching is not supported for Mamba yet.") + max_model_len = self.vllm_config.model_config.max_model_len + # Set block_size to max_model_len, so that mamba model will always + # have only one block in the KV cache. + for layer_name, mamba_module in mamba_layers.items(): + kv_cache_spec[layer_name] = MambaSpec( + shapes=mamba_module.get_state_shape(), + dtype=self.kv_cache_dtype, + block_size=max_model_len) return kv_cache_spec From 9206d0ff019e84763113459075ddd14da0b28b6c Mon Sep 17 00:00:00 2001 From: Nathan Weinberg <31703736+nathan-weinberg@users.noreply.github.com> Date: Wed, 18 Jun 2025 16:47:08 -0400 Subject: [PATCH 023/211] docs: fix Slack bulletpoint in README (#19811) Signed-off-by: Nathan Weinberg --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index ec16d758327d4..d312716a84283 100644 --- a/README.md +++ b/README.md @@ -156,7 +156,7 @@ If you use vLLM for your research, please cite our [paper](https://arxiv.org/abs - For technical questions and feature requests, please use GitHub [Issues](https://github.com/vllm-project/vllm/issues) or [Discussions](https://github.com/vllm-project/vllm/discussions) - For discussing with fellow users, please use the [vLLM Forum](https://discuss.vllm.ai) -- coordinating contributions and development, please use [Slack](https://slack.vllm.ai) +- For coordinating contributions and development, please use [Slack](https://slack.vllm.ai) - For security disclosures, please use GitHub's [Security Advisories](https://github.com/vllm-project/vllm/security/advisories) feature - For collaborations and partnerships, please contact us at [vllm-questions@lists.berkeley.edu](mailto:vllm-questions@lists.berkeley.edu) From 16c16301c8218a28eefcdec07757a782e5ddc643 Mon Sep 17 00:00:00 2001 From: afeldman-nm <156691304+afeldman-nm@users.noreply.github.com> Date: Wed, 18 Jun 2025 18:08:00 -0400 Subject: [PATCH 024/211] Disable "Forbid direct 'import triton'" check for `vllm/triton_utils/importing.py` in an extensible way (#19783) Signed-off-by: Andrew Feldman --- tools/check_triton_import.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/check_triton_import.py b/tools/check_triton_import.py index 77b2dfc391889..c01d9d4ab079a 100644 --- a/tools/check_triton_import.py +++ b/tools/check_triton_import.py @@ -14,6 +14,12 @@ ALLOWED_LINES = { "from vllm.triton_utils import tl, triton", } +ALLOWED_FILES = {"vllm/triton_utils/importing.py"} + + +def is_allowed_file(current_file: str) -> bool: + return current_file in ALLOWED_FILES + def is_forbidden_import(line: str) -> bool: stripped = line.strip() @@ -25,10 +31,14 @@ def parse_diff(diff: str) -> list[str]: violations = [] current_file = None current_lineno = None + skip_allowed_file = False for line in diff.splitlines(): if line.startswith("+++ b/"): current_file = line[6:] + skip_allowed_file = is_allowed_file(current_file) + elif skip_allowed_file: + continue elif line.startswith("@@"): match = re.search(r"\+(\d+)", line) if match: From 3b523e38d96ab5a29e9d157f10891ca953881964 Mon Sep 17 00:00:00 2001 From: Lukas Geiger Date: Wed, 18 Jun 2025 23:36:55 +0100 Subject: [PATCH 025/211] [Core] Do not copy array during hashing (#19484) Signed-off-by: Lukas Geiger --- tests/multimodal/test_hasher.py | 12 ++++++++++++ vllm/multimodal/hasher.py | 22 ++++++++++++---------- vllm/v1/serial_utils.py | 2 +- 3 files changed, 25 insertions(+), 11 deletions(-) diff --git a/tests/multimodal/test_hasher.py b/tests/multimodal/test_hasher.py index b5048c8cc3ad8..42cb40739dcc3 100644 --- a/tests/multimodal/test_hasher.py +++ b/tests/multimodal/test_hasher.py @@ -60,3 +60,15 @@ def test_hash_collision_array_shape(): hasher = MultiModalHasher assert hasher.hash_kwargs(data=arr1) != hasher.hash_kwargs(data=arr2) + + +def test_hash_non_contiguous_array(): + arr = np.arange(24).reshape(4, 6).T + assert not arr.flags.c_contiguous + + arr_c = np.ascontiguousarray(arr) + assert arr_c.flags.c_contiguous + + hasher = MultiModalHasher + # Both should be hashable and produce the same hashes + assert hasher.hash_kwargs(data=arr) == hasher.hash_kwargs(data=arr_c) diff --git a/vllm/multimodal/hasher.py b/vllm/multimodal/hasher.py index db8b2e2b39592..ac27bb66f7b51 100644 --- a/vllm/multimodal/hasher.py +++ b/vllm/multimodal/hasher.py @@ -3,6 +3,7 @@ import pickle from collections.abc import Iterable, Mapping +from typing import Union import numpy as np import torch @@ -23,11 +24,11 @@ A dictionary containing hashes for items in each modality. class MultiModalHasher: @classmethod - def serialize_item(cls, obj: object) -> bytes: + def serialize_item(cls, obj: object) -> Union[bytes, memoryview]: # Simple cases if isinstance(obj, str): return obj.encode("utf-8") - if isinstance(obj, bytes): + if isinstance(obj, (bytes, memoryview)): return obj if isinstance(obj, (int, float)): return np.array(obj).tobytes() @@ -38,12 +39,13 @@ class MultiModalHasher: if isinstance(obj, torch.Tensor): return cls.item_to_bytes("tensor", obj.numpy()) if isinstance(obj, np.ndarray): - return cls.item_to_bytes( - "ndarray", { - "dtype": obj.dtype.str, - "shape": obj.shape, - "data": obj.tobytes(), - }) + # If the array is non-contiguous, we need to copy it first + arr_data = obj.data if obj.flags.c_contiguous else obj.tobytes() + return cls.item_to_bytes("ndarray", { + "dtype": obj.dtype.str, + "shape": obj.shape, + "data": arr_data, + }) logger.warning( "No serialization method found for %s. " @@ -64,7 +66,7 @@ class MultiModalHasher: cls, key: str, obj: object, - ) -> Iterable[tuple[bytes, bytes]]: + ) -> Iterable[tuple[bytes, Union[bytes, memoryview]]]: # Recursive cases if isinstance(obj, (list, tuple)): for i, elem in enumerate(obj): @@ -73,7 +75,7 @@ class MultiModalHasher: for k, v in obj.items(): yield from cls.iter_item_to_bytes(f"{key}.{k}", v) else: - key_bytes = cls.serialize_item(key) + key_bytes = key.encode("utf-8") value_bytes = cls.serialize_item(obj) yield key_bytes, value_bytes diff --git a/vllm/v1/serial_utils.py b/vllm/v1/serial_utils.py index ab6653a786ffe..03200c2c2f8ec 100644 --- a/vllm/v1/serial_utils.py +++ b/vllm/v1/serial_utils.py @@ -140,7 +140,7 @@ class MsgpackEncoder: ) -> tuple[str, tuple[int, ...], Union[int, memoryview]]: assert self.aux_buffers is not None # If the array is non-contiguous, we need to copy it first - arr_data = obj.data if obj.data.c_contiguous else obj.tobytes() + arr_data = obj.data if obj.flags.c_contiguous else obj.tobytes() if not obj.shape or obj.nbytes < self.size_threshold: # Encode small arrays and scalars inline. Using this extension type # ensures we can avoid copying when decoding. From 04fefe7c9a7947390885bac4f31f8c83a8aefd73 Mon Sep 17 00:00:00 2001 From: QiliangCui Date: Wed, 18 Jun 2025 15:41:13 -0700 Subject: [PATCH 026/211] [TPU] Update torch-xla version to include paged attention tuned block change (#19813) Signed-off-by: Qiliang Cui --- requirements/tpu.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/requirements/tpu.txt b/requirements/tpu.txt index c6038b0443951..2b5fd8941647f 100644 --- a/requirements/tpu.txt +++ b/requirements/tpu.txt @@ -18,9 +18,9 @@ setuptools==78.1.0 --find-links https://storage.googleapis.com/libtpu-releases/index.html --find-links https://storage.googleapis.com/jax-releases/jax_nightly_releases.html --find-links https://storage.googleapis.com/jax-releases/jaxlib_nightly_releases.html -torch==2.8.0.dev20250617 -torchvision==0.23.0.dev20250617 -torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250617-cp39-cp39-linux_x86_64.whl ; python_version == "3.9" -torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250617-cp310-cp310-linux_x86_64.whl ; python_version == "3.10" -torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250617-cp311-cp311-linux_x86_64.whl ; python_version == "3.11" +torch==2.8.0.dev20250618 +torchvision==0.23.0.dev20250618 +torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250618-cp39-cp39-linux_x86_64.whl ; python_version == "3.9" +torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250618-cp310-cp310-linux_x86_64.whl ; python_version == "3.10" +torch_xla[tpu, pallas] @ https://storage.googleapis.com/pytorch-xla-releases/wheels/tpuvm/torch_xla-2.8.0.dev20250618-cp311-cp311-linux_x86_64.whl ; python_version == "3.11" From 14fdd21d39b634b4c13f7aadbe60e83080b59ebd Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Wed, 18 Jun 2025 18:48:29 -0400 Subject: [PATCH 027/211] [Core] More fixes to MultiModalEmbeddings type handling (#19715) Signed-off-by: Russell Bryant --- vllm/model_executor/models/aria.py | 3 ++- vllm/model_executor/models/aya_vision.py | 3 ++- vllm/model_executor/models/blip2.py | 3 ++- vllm/model_executor/models/chameleon.py | 3 ++- vllm/model_executor/models/deepseek_vl2.py | 3 ++- vllm/model_executor/models/florence2.py | 3 ++- vllm/model_executor/models/fuyu.py | 3 ++- vllm/model_executor/models/gemma3_mm.py | 3 ++- vllm/model_executor/models/glm4v.py | 3 ++- vllm/model_executor/models/granite_speech.py | 3 ++- vllm/model_executor/models/idefics3.py | 3 ++- vllm/model_executor/models/internvl.py | 3 ++- vllm/model_executor/models/kimi_vl.py | 3 ++- vllm/model_executor/models/llava.py | 3 ++- vllm/model_executor/models/llava_next.py | 3 ++- vllm/model_executor/models/llava_next_video.py | 3 ++- vllm/model_executor/models/llava_onevision.py | 3 ++- vllm/model_executor/models/minicpmv.py | 3 ++- vllm/model_executor/models/minimax_vl_01.py | 3 ++- vllm/model_executor/models/mistral3.py | 3 ++- vllm/model_executor/models/mllama4.py | 3 ++- vllm/model_executor/models/molmo.py | 3 ++- vllm/model_executor/models/ovis.py | 3 ++- vllm/model_executor/models/paligemma.py | 3 ++- vllm/model_executor/models/phi3v.py | 3 ++- vllm/model_executor/models/phi4mm.py | 3 ++- vllm/model_executor/models/pixtral.py | 3 ++- vllm/model_executor/models/qwen2_5_omni_thinker.py | 5 +++-- vllm/model_executor/models/qwen2_5_vl.py | 3 ++- vllm/model_executor/models/qwen2_audio.py | 3 ++- vllm/model_executor/models/qwen2_vl.py | 3 ++- vllm/model_executor/models/qwen_vl.py | 3 ++- vllm/model_executor/models/skyworkr1v.py | 3 ++- vllm/model_executor/models/tarsier.py | 3 ++- vllm/model_executor/models/ultravox.py | 3 ++- 35 files changed, 71 insertions(+), 36 deletions(-) diff --git a/vllm/model_executor/models/aria.py b/vllm/model_executor/models/aria.py index 4fe6a7b9e9383..eb7435d6e1d8b 100644 --- a/vllm/model_executor/models/aria.py +++ b/vllm/model_executor/models/aria.py @@ -620,7 +620,8 @@ class AriaForConditionalGeneration(nn.Module, SupportsMultiModal): multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.config.image_token_index) diff --git a/vllm/model_executor/models/aya_vision.py b/vllm/model_executor/models/aya_vision.py index 7c02d245db8b7..a48631ad709f7 100644 --- a/vllm/model_executor/models/aya_vision.py +++ b/vllm/model_executor/models/aya_vision.py @@ -430,7 +430,8 @@ class AyaVisionForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids=input_ids, inputs_embeds=inputs_embeds, diff --git a/vllm/model_executor/models/blip2.py b/vllm/model_executor/models/blip2.py index 87fc6b5b02405..3c3955161daaa 100644 --- a/vllm/model_executor/models/blip2.py +++ b/vllm/model_executor/models/blip2.py @@ -641,7 +641,8 @@ class Blip2ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, _IMAGE_TOKEN_ID) diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index 21f29dc43c268..d538ba09c65cf 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -1005,7 +1005,8 @@ class ChameleonForConditionalGeneration(nn.Module, SupportsMultiModal, ) -> torch.Tensor: inputs_embeds = self.model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.model.vocabulary_mapping.image_token_id) diff --git a/vllm/model_executor/models/deepseek_vl2.py b/vllm/model_executor/models/deepseek_vl2.py index 6341c65a5d4cf..da5452409d2f9 100644 --- a/vllm/model_executor/models/deepseek_vl2.py +++ b/vllm/model_executor/models/deepseek_vl2.py @@ -600,7 +600,8 @@ class DeepseekVLV2ForCausalLM(nn.Module, SupportsMultiModal, SupportsPP): multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.image_token_id) diff --git a/vllm/model_executor/models/florence2.py b/vllm/model_executor/models/florence2.py index 4b220ea483e81..425407c19ab5d 100644 --- a/vllm/model_executor/models/florence2.py +++ b/vllm/model_executor/models/florence2.py @@ -1046,7 +1046,8 @@ class Florence2ForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.pad_token_id) diff --git a/vllm/model_executor/models/fuyu.py b/vllm/model_executor/models/fuyu.py index 9692899f7b993..7e03982e78e69 100644 --- a/vllm/model_executor/models/fuyu.py +++ b/vllm/model_executor/models/fuyu.py @@ -345,7 +345,8 @@ class FuyuForCausalLM(nn.Module, SupportsMultiModal, SupportsPP): multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/gemma3_mm.py b/vllm/model_executor/models/gemma3_mm.py index 415a8dbdcf87f..3a1c14978b45b 100644 --- a/vllm/model_executor/models/gemma3_mm.py +++ b/vllm/model_executor/models/gemma3_mm.py @@ -592,7 +592,8 @@ class Gemma3ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/glm4v.py b/vllm/model_executor/models/glm4v.py index e9271367a472b..70916c45c0e09 100644 --- a/vllm/model_executor/models/glm4v.py +++ b/vllm/model_executor/models/glm4v.py @@ -609,7 +609,8 @@ class GLM4VForCausalLM(ChatGLMBaseModel, SupportsLoRA, SupportsPP, ) -> torch.Tensor: inputs_embeds = self.transformer.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids=input_ids, inputs_embeds=inputs_embeds, diff --git a/vllm/model_executor/models/granite_speech.py b/vllm/model_executor/models/granite_speech.py index 137aad926cb90..f2dc5708028ba 100644 --- a/vllm/model_executor/models/granite_speech.py +++ b/vllm/model_executor/models/granite_speech.py @@ -721,7 +721,8 @@ class GraniteSpeechForConditionalGeneration( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: """Compute the merged LLM / audio embeddings.""" - if multimodal_embeddings is None: + if multimodal_embeddings is None \ + or len(multimodal_embeddings) == 0: return self.language_model.get_input_embeddings(input_ids) inputs_embeds = embed_multimodal( diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index be04ad0422df8..b1d0626217a0a 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -720,7 +720,8 @@ class Idefics3ForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index 9d5cceccff2ff..bb71177ecad8e 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -1336,7 +1336,8 @@ class InternVLChatModel(nn.Module, SupportsMultiModal, SupportsPP, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: context_token_ids = [ token_id for token_id in (self.img_context_token_id, self.video_context_token_id) diff --git a/vllm/model_executor/models/kimi_vl.py b/vllm/model_executor/models/kimi_vl.py index 351d1fbdc7444..f32c2075f6a85 100644 --- a/vllm/model_executor/models/kimi_vl.py +++ b/vllm/model_executor/models/kimi_vl.py @@ -393,7 +393,8 @@ class KimiVLForConditionalGeneration(nn.Module, SupportsMultiModal): # model as one of the requirements of basic vLLM model implementation. inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None and len( + multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids=input_ids, inputs_embeds=inputs_embeds, diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index f70ad37a3d3ac..1c35bf5206db7 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -683,7 +683,8 @@ class LlavaForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP): multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/llava_next.py b/vllm/model_executor/models/llava_next.py index bc792be19dbf6..142d5740f077f 100644 --- a/vllm/model_executor/models/llava_next.py +++ b/vllm/model_executor/models/llava_next.py @@ -502,7 +502,8 @@ class LlavaNextForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: - if not multimodal_embeddings: + if multimodal_embeddings is None \ + or len(multimodal_embeddings) == 0: return self.language_model.get_input_embeddings(input_ids) inputs_embeds = embed_multimodal( diff --git a/vllm/model_executor/models/llava_next_video.py b/vllm/model_executor/models/llava_next_video.py index c13e8e9b24140..f930f3ce8a16f 100644 --- a/vllm/model_executor/models/llava_next_video.py +++ b/vllm/model_executor/models/llava_next_video.py @@ -426,7 +426,8 @@ class LlavaNextVideoForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.config.video_token_index) diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py index 373b0a2a7d5e6..c5403762f5390 100644 --- a/vllm/model_executor/models/llava_onevision.py +++ b/vllm/model_executor/models/llava_onevision.py @@ -881,7 +881,8 @@ class LlavaOnevisionForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, [self.config.image_token_index, self.config.video_token_index]) diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py index b923287dca3e0..9dc03c8001824 100644 --- a/vllm/model_executor/models/minicpmv.py +++ b/vllm/model_executor/models/minicpmv.py @@ -892,7 +892,8 @@ class MiniCPMVBaseModel(nn.Module, SupportsMultiModal, SupportsPP): multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.llm.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: assert len(self.mm_token_ids) > 0 inputs_embeds = merge_multimodal_embeddings( input_ids, diff --git a/vllm/model_executor/models/minimax_vl_01.py b/vllm/model_executor/models/minimax_vl_01.py index bc00af2ec6b9e..8ce94540e87fe 100644 --- a/vllm/model_executor/models/minimax_vl_01.py +++ b/vllm/model_executor/models/minimax_vl_01.py @@ -201,7 +201,8 @@ class MiniMaxVL01ForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/mistral3.py b/vllm/model_executor/models/mistral3.py index ebc176e2c7242..04d6d347cb84f 100644 --- a/vllm/model_executor/models/mistral3.py +++ b/vllm/model_executor/models/mistral3.py @@ -521,7 +521,8 @@ class Mistral3ForConditionalGeneration(nn.Module, SupportsLoRA, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/mllama4.py b/vllm/model_executor/models/mllama4.py index bf4bd309eea27..a420e757e2194 100644 --- a/vllm/model_executor/models/mllama4.py +++ b/vllm/model_executor/models/mllama4.py @@ -808,7 +808,8 @@ class Llama4ForConditionalGeneration(nn.Module, SupportsMultiModal, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None and len( + multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/molmo.py b/vllm/model_executor/models/molmo.py index 70c60c6d528bf..bb08cd59f6fcf 100644 --- a/vllm/model_executor/models/molmo.py +++ b/vllm/model_executor/models/molmo.py @@ -1487,7 +1487,8 @@ class MolmoForCausalLM(nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: assert self.img_patch_id is not None inputs_embeds = merge_multimodal_embeddings( diff --git a/vllm/model_executor/models/ovis.py b/vllm/model_executor/models/ovis.py index 900a1f5de4581..6eecd4499fb96 100644 --- a/vllm/model_executor/models/ovis.py +++ b/vllm/model_executor/models/ovis.py @@ -515,7 +515,8 @@ class Ovis(nn.Module, SupportsMultiModal, SupportsPP): multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.llm.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.image_pad_token_id) diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py index 103a267c41f59..e1de8cf458780 100644 --- a/vllm/model_executor/models/paligemma.py +++ b/vllm/model_executor/models/paligemma.py @@ -364,7 +364,8 @@ class PaliGemmaForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.config.image_token_index) diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index 9cec7831ae0cf..0a7adf91e488f 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -669,7 +669,8 @@ class Phi3VForCausalLM(nn.Module, SupportsMultiModal, SupportsPP, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.embed_tokens(input_ids) - if multimodal_embeddings: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.image_token_id) diff --git a/vllm/model_executor/models/phi4mm.py b/vllm/model_executor/models/phi4mm.py index a3ca72d1f5cf9..5d1f0775b07fb 100644 --- a/vllm/model_executor/models/phi4mm.py +++ b/vllm/model_executor/models/phi4mm.py @@ -1148,7 +1148,8 @@ class Phi4MMForCausalLM(nn.Module, SupportsLoRA, SupportsMultiModal): multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.model.embed_tokens(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None and len( + multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, [_IMAGE_PLACEHOLDER_TOKEN_ID, _AUDIO_PLACEHOLDER_TOKEN_ID]) diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 320c0e10d06a1..709ac1d9df945 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -423,7 +423,8 @@ class PixtralForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/qwen2_5_omni_thinker.py b/vllm/model_executor/models/qwen2_5_omni_thinker.py index ad1e8fcb39d54..9344bf8e03a76 100644 --- a/vllm/model_executor/models/qwen2_5_omni_thinker.py +++ b/vllm/model_executor/models/qwen2_5_omni_thinker.py @@ -805,7 +805,8 @@ class Qwen2_5OmniThinkerForConditionalGeneration( multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: # TODO (ywang96): support overlapping modalitiy embeddings so that # `use_audio_in_video` will work on V1. @@ -845,7 +846,7 @@ class Qwen2_5OmniThinkerForConditionalGeneration( multimodal_embeddings: Optional[NestedTensors] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is None: + if multimodal_embeddings is None or len(multimodal_embeddings) == 0: return inputs_embeds for embeddings, modality in multimodal_embeddings: diff --git a/vllm/model_executor/models/qwen2_5_vl.py b/vllm/model_executor/models/qwen2_5_vl.py index 202cd5e860d18..01e85ae805777 100644 --- a/vllm/model_executor/models/qwen2_5_vl.py +++ b/vllm/model_executor/models/qwen2_5_vl.py @@ -1046,7 +1046,8 @@ class Qwen2_5_VLForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, [self.config.image_token_id, self.config.video_token_id]) diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index e77a8e05d2001..aefa1db24628d 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -364,7 +364,8 @@ class Qwen2AudioForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.config.audio_token_index) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 49b709069cd23..d5e297ea66da4 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -1289,7 +1289,8 @@ class Qwen2VLForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, [self.config.image_token_id, self.config.video_token_id]) diff --git a/vllm/model_executor/models/qwen_vl.py b/vllm/model_executor/models/qwen_vl.py index 546737621a7c1..fc29785af95a0 100644 --- a/vllm/model_executor/models/qwen_vl.py +++ b/vllm/model_executor/models/qwen_vl.py @@ -754,7 +754,8 @@ class QwenVLForConditionalGeneration(QWenBaseModel, SupportsPP, SupportsLoRA, ) -> torch.Tensor: inputs_embeds = self.transformer.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, multimodal_embeddings, self.transformer.visual.image_pad_id) diff --git a/vllm/model_executor/models/skyworkr1v.py b/vllm/model_executor/models/skyworkr1v.py index 9fba24ac5cecb..28f181dde2154 100644 --- a/vllm/model_executor/models/skyworkr1v.py +++ b/vllm/model_executor/models/skyworkr1v.py @@ -883,7 +883,8 @@ class SkyworkR1VChatModel(nn.Module, SupportsMultiModal, SupportsPP): multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: assert self.img_context_token_id is not None self._set_visual_token_mask(input_ids) inputs_embeds = merge_multimodal_embeddings( diff --git a/vllm/model_executor/models/tarsier.py b/vllm/model_executor/models/tarsier.py index 2645e700fcda1..a5736f124f259 100644 --- a/vllm/model_executor/models/tarsier.py +++ b/vllm/model_executor/models/tarsier.py @@ -598,7 +598,8 @@ class TarsierForConditionalGeneration(nn.Module, SupportsMultiModal, multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: inputs_embeds = merge_multimodal_embeddings( input_ids, inputs_embeds, diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py index f6b9d19694efa..94f5e03fd446e 100644 --- a/vllm/model_executor/models/ultravox.py +++ b/vllm/model_executor/models/ultravox.py @@ -560,7 +560,8 @@ class UltravoxModel(nn.Module, SupportsMultiModal, SupportsPP, SupportsLoRA): multimodal_embeddings: Optional[MultiModalEmbeddings] = None, ) -> torch.Tensor: inputs_embeds = self.language_model.get_input_embeddings(input_ids) - if multimodal_embeddings is not None: + if multimodal_embeddings is not None \ + and len(multimodal_embeddings) != 0: # TODO(ywang96): remove this block after v0 is deprecated. if not envs.VLLM_USE_V1: From d49adea1f9f10eec0250a9ff34624b51fb7e3ee9 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Wed, 18 Jun 2025 15:49:40 -0700 Subject: [PATCH 028/211] [Multimodal] Use fast processor for Qwen2/2.5-VL (#19789) --- vllm/model_executor/models/qwen2_5_omni_thinker.py | 2 +- vllm/model_executor/models/qwen2_5_vl.py | 2 +- vllm/model_executor/models/qwen2_vl.py | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/models/qwen2_5_omni_thinker.py b/vllm/model_executor/models/qwen2_5_omni_thinker.py index 9344bf8e03a76..c0ed473103abe 100644 --- a/vllm/model_executor/models/qwen2_5_omni_thinker.py +++ b/vllm/model_executor/models/qwen2_5_omni_thinker.py @@ -150,7 +150,7 @@ class Qwen2_5OmniThinkerProcessingInfo(Qwen2AudioProcessingInfo, min_pixels=min_pixels, max_pixels=max_pixels, size=size, - use_fast=kwargs.get("use_fast")), + use_fast=kwargs.get("use_fast", True)), **kwargs, ) if not hasattr(processor, "audio_token"): diff --git a/vllm/model_executor/models/qwen2_5_vl.py b/vllm/model_executor/models/qwen2_5_vl.py index 01e85ae805777..4faa0d2c366ed 100644 --- a/vllm/model_executor/models/qwen2_5_vl.py +++ b/vllm/model_executor/models/qwen2_5_vl.py @@ -798,7 +798,7 @@ class Qwen2_5_VLProcessingInfo(Qwen2VLProcessingInfo): min_pixels=min_pixels, max_pixels=max_pixels, size=size, - use_fast=kwargs.get("use_fast")), + use_fast=kwargs.get("use_fast", True)), **kwargs, ) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index d5e297ea66da4..3b939a43e9246 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -763,7 +763,7 @@ class Qwen2VLProcessingInfo(BaseProcessingInfo): min_pixels=min_pixels, max_pixels=max_pixels, size=size, - use_fast=kwargs.get("use_fast")), + use_fast=kwargs.get("use_fast", True)), **kwargs, ) @@ -808,6 +808,7 @@ class Qwen2VLProcessingInfo(BaseProcessingInfo): size: Optional[dict[str, int]] = None, **kwargs: object, ) -> Qwen2VLImageProcessor: + kwargs["use_fast"] = kwargs.get("use_fast", True) return cached_image_processor_from_config( self.ctx.model_config, **self._get_image_processor_kwargs(min_pixels=min_pixels, From ed33349738a5ebc83826ee85ee4c2898afb58504 Mon Sep 17 00:00:00 2001 From: Richard Zou Date: Wed, 18 Jun 2025 20:23:12 -0400 Subject: [PATCH 029/211] [BugFix] Fix use_cudagraph=False (#19612) Signed-off-by: Richard Zou --- tests/compile/test_config.py | 41 ++++++++++++++---------------- vllm/compilation/counter.py | 3 +++ vllm/v1/worker/gpu_model_runner.py | 16 ++++++++---- 3 files changed, 33 insertions(+), 27 deletions(-) diff --git a/tests/compile/test_config.py b/tests/compile/test_config.py index 52e0fcc2881fb..37d8ae0c08bf4 100644 --- a/tests/compile/test_config.py +++ b/tests/compile/test_config.py @@ -1,14 +1,10 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import pytest -import torch import vllm from vllm.compilation.counter import compilation_counter -from vllm.config import (CompilationConfig, CompilationLevel, VllmConfig, - set_current_vllm_config) - -from .piecewise.test_simple import SillyModel +from vllm.config import VllmConfig def test_use_cudagraphs_dynamic(monkeypatch): @@ -22,23 +18,24 @@ def test_use_cudagraphs_dynamic(monkeypatch): @pytest.mark.parametrize("enabled", [True, False]) -def test_use_cudagraphs(enabled): +def test_use_cudagraphs(vllm_runner, monkeypatch, enabled): assert vllm.envs.VLLM_USE_V1 - vllm_config = VllmConfig(compilation_config=CompilationConfig( - level=CompilationLevel.PIECEWISE, - use_cudagraph=enabled, - cudagraph_capture_sizes=[100], - )) - with set_current_vllm_config(vllm_config): - model = SillyModel(vllm_config=vllm_config, prefix='') - inputs = torch.randn(100, device="cuda") + # Disable multiprocessing so that the counter is in the same process + monkeypatch.setenv('VLLM_ENABLE_V1_MULTIPROCESSING', '0') - with compilation_counter.expect( - num_graphs_seen=1, # one graph for the model - num_cudagraph_captured=1 if enabled else 0, - ): - # first run is warmup - model(inputs) - # second run does CUDAGraphs recording (if enabled) - model(inputs) + compilation_config = { + "cudagraph_capture_sizes": [100], + "use_cudagraph": enabled, + } + with ( + compilation_counter.expect( + num_graphs_seen=1, + num_gpu_runner_capture_triggers=1 if enabled else 0, + num_cudagraph_captured=13 if enabled else 0, + ), + # loading the model causes compilation (if enabled) to happen + vllm_runner('facebook/opt-125m', + compilation_config=compilation_config, + gpu_memory_utilization=0.4) as _): + pass diff --git a/vllm/compilation/counter.py b/vllm/compilation/counter.py index 165347cfccef7..9d7a25689b560 100644 --- a/vllm/compilation/counter.py +++ b/vllm/compilation/counter.py @@ -15,6 +15,9 @@ class CompilationCounter: # not including the splitting ops num_piecewise_capturable_graphs_seen: int = 0 num_backend_compilations: int = 0 + # Number of gpu_model_runner attempts to trigger CUDAGraphs capture + num_gpu_runner_capture_triggers: int = 0 + # Number of CUDAGraphs captured num_cudagraph_captured: int = 0 # InductorAdapter.compile calls num_inductor_compiles: int = 0 diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 516a90e481ed4..c4163eb2b8f59 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -18,6 +18,7 @@ import vllm.envs as envs from vllm.attention import AttentionType, get_attn_backend from vllm.attention.backends.abstract import AttentionBackend from vllm.attention.layer import Attention +from vllm.compilation.counter import compilation_counter from vllm.config import (CompilationLevel, VllmConfig, get_layers_from_vllm_config) from vllm.distributed.kv_transfer import (get_kv_transfer_group, @@ -200,9 +201,11 @@ class GPUModelRunner(LoRAModelRunnerMixin): block_sizes=[self.cache_config.block_size], ) - self.use_cuda_graph = (self.compilation_config.level - == CompilationLevel.PIECEWISE - and not self.model_config.enforce_eager) + self.use_cuda_graph = ( + self.vllm_config.compilation_config.level + == CompilationLevel.PIECEWISE + and self.vllm_config.compilation_config.use_cudagraph + and not self.model_config.enforce_eager) # TODO(woosuk): Provide an option to tune the max cudagraph batch size. # The convention is different. # self.cudagraph_batch_sizes sorts in ascending order. @@ -2058,10 +2061,13 @@ class GPUModelRunner(LoRAModelRunnerMixin): def capture_model(self) -> None: if not self.use_cuda_graph: logger.warning( - "Skipping CUDA graph capture. Please add " - "-O %s to use CUDA graphs.", CompilationLevel.PIECEWISE) + "Skipping CUDA graph capture. To turn on CUDA graph capture, " + "set -O %s and ensure `use_cudagraph` was not manually set to " + "False", CompilationLevel.PIECEWISE) return + compilation_counter.num_gpu_runner_capture_triggers += 1 + start_time = time.perf_counter() start_free_gpu_memory = torch.cuda.mem_get_info()[0] From dfada85eee0a24b982525cef3723498c016f1122 Mon Sep 17 00:00:00 2001 From: afeldman-nm <156691304+afeldman-nm@users.noreply.github.com> Date: Wed, 18 Jun 2025 20:41:11 -0400 Subject: [PATCH 030/211] [Frontend] Expose custom args in OpenAI APIs (#16862) Signed-off-by: Andrew Feldman Signed-off-by: Andrew Feldman Co-authored-by: Nick Hill --- .../kernels/benchmark_moe_align_block_size.py | 2 +- vllm/entrypoints/openai/protocol.py | 52 +++++++++++++++---- vllm/sampling_params.py | 4 +- 3 files changed, 44 insertions(+), 14 deletions(-) diff --git a/benchmarks/kernels/benchmark_moe_align_block_size.py b/benchmarks/kernels/benchmark_moe_align_block_size.py index 024a5dcfc8b0b..5170ac09dc42a 100644 --- a/benchmarks/kernels/benchmark_moe_align_block_size.py +++ b/benchmarks/kernels/benchmark_moe_align_block_size.py @@ -4,12 +4,12 @@ import argparse import itertools import torch -import triton from vllm import _custom_ops as ops from vllm.model_executor.layers.fused_moe.moe_align_block_size import ( moe_align_block_size_triton, ) +from vllm.triton_utils import triton def get_topk_ids(num_tokens: int, num_experts: int, topk: int) -> torch.Tensor: diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index 5f2d07e677bbf..b278d0d005867 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -326,8 +326,9 @@ class ChatCompletionRequest(OpenAIBaseModel): ) chat_template_kwargs: Optional[dict[str, Any]] = Field( default=None, - description=("Additional kwargs to pass to the template renderer. " - "Will be accessible by the chat template."), + description=( + "Additional keyword args to pass to the template renderer. " + "Will be accessible by the chat template."), ) mm_processor_kwargs: Optional[dict[str, Any]] = Field( default=None, @@ -414,6 +415,12 @@ class ChatCompletionRequest(OpenAIBaseModel): default=None, description="KVTransfer parameters used for disaggregated serving.") + vllm_xargs: Optional[dict[str, Union[str, int, float]]] = Field( + default=None, + description=("Additional request parameters with string or " + "numeric values, used by custom extensions."), + ) + # --8<-- [end:chat-completion-extra-params] # Default sampling parameters for chat completion requests @@ -523,6 +530,10 @@ class ChatCompletionRequest(OpenAIBaseModel): structural_tag=self.structural_tag, ) + extra_args: dict[str, Any] = self.vllm_xargs if self.vllm_xargs else {} + if self.kv_transfer_params: + # Pass in kv_transfer_params via extra_args + extra_args["kv_transfer_params"] = self.kv_transfer_params return SamplingParams.from_optional( n=self.n, best_of=self.best_of, @@ -553,8 +564,8 @@ class ChatCompletionRequest(OpenAIBaseModel): logit_bias=self.logit_bias, bad_words= self.bad_words, allowed_token_ids=self.allowed_token_ids, - extra_args=({"kv_transfer_params": self.kv_transfer_params} - if self.kv_transfer_params else None)) + extra_args=extra_args or None, + ) def _get_guided_json_from_tool( self) -> Optional[Union[str, dict, BaseModel]]: @@ -871,6 +882,12 @@ class CompletionRequest(OpenAIBaseModel): default=None, description="KVTransfer parameters used for disaggregated serving.") + vllm_xargs: Optional[dict[str, Union[str, int, float]]] = Field( + default=None, + description=("Additional request parameters with string or " + "numeric values, used by custom extensions."), + ) + # --8<-- [end:completion-extra-params] # Default sampling parameters for completion requests @@ -968,6 +985,10 @@ class CompletionRequest(OpenAIBaseModel): whitespace_pattern=self.guided_whitespace_pattern, ) + extra_args: dict[str, Any] = self.vllm_xargs if self.vllm_xargs else {} + if self.kv_transfer_params: + # Pass in kv_transfer_params via extra_args + extra_args["kv_transfer_params"] = self.kv_transfer_params return SamplingParams.from_optional( n=self.n, best_of=self.best_of, @@ -997,8 +1018,8 @@ class CompletionRequest(OpenAIBaseModel): guided_decoding=guided_decoding, logit_bias=self.logit_bias, allowed_token_ids=self.allowed_token_ids, - extra_args=({"kv_transfer_params": self.kv_transfer_params} - if self.kv_transfer_params else None)) + extra_args=extra_args or None, + ) @model_validator(mode="before") @classmethod @@ -1117,8 +1138,9 @@ class EmbeddingChatRequest(OpenAIBaseModel): ) chat_template_kwargs: Optional[dict[str, Any]] = Field( default=None, - description=("Additional kwargs to pass to the template renderer. " - "Will be accessible by the chat template."), + description=( + "Additional keyword args to pass to the template renderer. " + "Will be accessible by the chat template."), ) mm_processor_kwargs: Optional[dict[str, Any]] = Field( default=None, @@ -1623,8 +1645,9 @@ class TokenizeChatRequest(OpenAIBaseModel): ) chat_template_kwargs: Optional[dict[str, Any]] = Field( default=None, - description=("Additional kwargs to pass to the template renderer. " - "Will be accessible by the chat template."), + description=( + "Additional keyword args to pass to the template renderer. " + "Will be accessible by the chat template."), ) mm_processor_kwargs: Optional[dict[str, Any]] = Field( default=None, @@ -1736,6 +1759,12 @@ class TranscriptionRequest(OpenAIBaseModel): # Flattened stream option to simplify form data. stream_include_usage: Optional[bool] = False stream_continuous_usage_stats: Optional[bool] = False + + vllm_xargs: Optional[dict[str, Union[str, int, float]]] = Field( + default=None, + description=("Additional request parameters with string or " + "numeric values, used by custom extensions."), + ) # --8<-- [end:transcription-extra-params] # --8<-- [start:transcription-sampling-params] @@ -1823,7 +1852,8 @@ class TranscriptionRequest(OpenAIBaseModel): presence_penalty=self.presence_penalty, output_kind=RequestOutputKind.DELTA if self.stream \ - else RequestOutputKind.FINAL_ONLY) + else RequestOutputKind.FINAL_ONLY, + extra_args=self.vllm_xargs) @model_validator(mode="before") @classmethod diff --git a/vllm/sampling_params.py b/vllm/sampling_params.py index 7abdcecca4746..a9a862384d117 100644 --- a/vllm/sampling_params.py +++ b/vllm/sampling_params.py @@ -198,8 +198,8 @@ class SamplingParams( processor which only retains scores for the given token ids. Defaults to None. extra_args: Arbitrary additional args, that can be used by custom - sampling implementations. Not used by any in-tree sampling - implementations. + sampling implementations, plugins, etc. Not used by any in-tree + sampling implementations. """ n: int = 1 From 36239f79dd356a8284ed4cf4d261312eac2495e1 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Thu, 19 Jun 2025 10:53:55 +0900 Subject: [PATCH 031/211] Fix FA2 fallback for Blackwell V1 (#19781) Signed-off-by: mgoin --- vllm/platforms/cuda.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 2d07ddc36613a..54719a3e79dd8 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -255,7 +255,7 @@ class CudaPlatformBase(Platform): "install FlashInfer for better performance.") pass # FlashAttention is the default for SM 8.0+ GPUs - elif cls.has_device_capability(80): + if cls.has_device_capability(80): logger.info_once("Using Flash Attention backend on V1 engine.") return ("vllm.v1.attention.backends." "flash_attn.FlashAttentionBackend") From 8d1e89d946e8f72580233b4541834110b62a257b Mon Sep 17 00:00:00 2001 From: Lu Fang <30275821+houseroad@users.noreply.github.com> Date: Thu, 19 Jun 2025 11:25:15 +0800 Subject: [PATCH 032/211] [Misc][ROCm] Enforce no unused variable in ROCm C++ files (#19796) Signed-off-by: Lu Fang --- cmake/utils.cmake | 1 + 1 file changed, 1 insertion(+) diff --git a/cmake/utils.cmake b/cmake/utils.cmake index 6d90555f29678..59c78950a1093 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -122,6 +122,7 @@ function (get_torch_gpu_compiler_flags OUT_GPU_FLAGS GPU_LANG) "-DENABLE_FP8" "-U__HIP_NO_HALF_CONVERSIONS__" "-U__HIP_NO_HALF_OPERATORS__" + "-Werror=unused-variable" "-fno-gpu-rdc") endif() From 4959915089f1bcf011f082136464e48b76c7e3d9 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Thu, 19 Jun 2025 11:52:09 +0800 Subject: [PATCH 033/211] [Quantization] Modify the logic of BNB double quantization (#19742) Signed-off-by: Jee Jee Li --- .../model_loader/bitsandbytes_loader.py | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/model_loader/bitsandbytes_loader.py b/vllm/model_executor/model_loader/bitsandbytes_loader.py index 3146c35a4e6fa..a0a5372600f3d 100644 --- a/vllm/model_executor/model_loader/bitsandbytes_loader.py +++ b/vllm/model_executor/model_loader/bitsandbytes_loader.py @@ -492,8 +492,6 @@ class BitsAndBytesModelLoader(BaseModelLoader): raise ValueError("Following weights were not initialized from " f"checkpoint: {weights_not_loaded}") - torch.cuda.empty_cache() - param_dict = dict(model.named_parameters()) stacked_quant_state_dict: dict[str, dict[int, Any]] = {} # TODO: Change this lazy import to normal import @@ -545,6 +543,8 @@ class BitsAndBytesModelLoader(BaseModelLoader): for param_name, param in param_dict.items(): if param_name in stacked_quant_state_dict: quant_states = stacked_quant_state_dict[param_name] + # Dequantize double quantized values during weight loading. + dequantize_dq(quant_states) set_weight_attrs(param, {"bnb_quant_state": quant_states}) pack_ratio = getattr(param, "pack_factor", -1) @@ -565,6 +565,28 @@ class BitsAndBytesModelLoader(BaseModelLoader): if load_8bit: set_weight_attrs( param, {"matmul_state": [None] * len(quant_states)}) - + torch.cuda.empty_cache() def download_model(self, model_config: ModelConfig) -> None: self._prepare_weights(model_config.model, model_config.revision) + + +def dequantize_dq(quant_states: dict) -> None: + """ + When BNB employs Double Quantization, we perform the dequantization of + these constants during weight loading rather than at inference time, + thereby avoiding this computational overhead during inference. This comes + at the cost of increased memory usage. + """ + from bitsandbytes.functional import dequantize_blockwise + for _, quant_state in quant_states.items(): + # Copied from: https://github.com/bitsandbytes-foundation/bitsandbytes/blob/0.45.3/bitsandbytes/functional.py#L1352-#L1356 + if quant_state.nested: + absmax = dequantize_blockwise(quant_state.absmax, + quant_state.state2) + absmax += quant_state.offset + if absmax.dtype != torch.float32: + absmax = absmax.float() + quant_state.absmax = absmax + quant_state.nested = False + quant_state.offset = None + quant_state.state2 = None From 799397ee4f57b90ee1b5f12f88b12f4de0de0d1d Mon Sep 17 00:00:00 2001 From: Maximilien de Bayser Date: Thu, 19 Jun 2025 01:36:33 -0300 Subject: [PATCH 034/211] Support embedding models in V1 (#16188) Signed-off-by: Max de Bayser Signed-off-by: Max de Bayser Signed-off-by: 22quinn <33176974+22quinn@users.noreply.github.com> Co-authored-by: 22quinn <33176974+22quinn@users.noreply.github.com> --- examples/offline_inference/basic/embed.py | 5 +- .../vision_language_embedding.py | 1 + tests/compile/test_basic_correctness.py | 32 ++-- tests/conftest.py | 3 + tests/entrypoints/llm/test_encode.py | 24 ++- tests/entrypoints/openai/test_embedding.py | 8 + tests/entrypoints/openai/test_pooling.py | 15 +- tests/entrypoints/openai/test_rerank.py | 8 + tests/entrypoints/openai/test_score.py | 9 + .../language/pooling/test_classification.py | 10 +- .../models/language/pooling/test_embedding.py | 34 +++- tests/models/registry.py | 22 +-- tests/tokenization/test_detokenize.py | 1 + tests/v1/core/test_kv_cache_utils.py | 1 + tests/v1/core/test_prefix_caching.py | 1 + tests/v1/core/test_scheduler.py | 26 ++- tests/v1/engine/test_engine_core.py | 1 + tests/v1/engine/test_engine_core_client.py | 1 + .../v1/engine/test_fast_incdec_prefix_err.py | 1 + tests/v1/engine/test_output_processor.py | 13 +- tests/v1/kv_connector/unit/utils.py | 2 + tests/v1/worker/test_gpu_input_batch.py | 4 +- tests/v1/worker/test_gpu_model_runner.py | 1 + vllm/config.py | 26 ++- vllm/engine/arg_utils.py | 45 +++-- vllm/entrypoints/llm.py | 2 +- vllm/entrypoints/openai/serving_pooling.py | 4 +- vllm/model_executor/layers/pooler.py | 149 ++++++++++++---- vllm/model_executor/models/bert.py | 4 +- vllm/model_executor/models/modernbert.py | 5 +- vllm/model_executor/models/qwen3.py | 7 +- vllm/pooling_params.py | 7 + vllm/v1/core/kv_cache_manager.py | 3 +- vllm/v1/core/sched/output.py | 5 +- vllm/v1/core/sched/scheduler.py | 28 ++- vllm/v1/core/sched/utils.py | 15 +- vllm/v1/engine/__init__.py | 7 +- vllm/v1/engine/async_llm.py | 80 ++++++++- vllm/v1/engine/core.py | 1 - vllm/v1/engine/detokenizer.py | 10 +- vllm/v1/engine/llm_engine.py | 4 +- vllm/v1/engine/logprobs.py | 1 + vllm/v1/engine/output_processor.py | 133 +++++++++----- vllm/v1/engine/processor.py | 33 ++-- vllm/v1/metrics/loggers.py | 5 +- vllm/v1/metrics/stats.py | 1 - vllm/v1/outputs.py | 4 + vllm/v1/pool/__init__.py | 0 vllm/v1/pool/metadata.py | 16 ++ vllm/v1/request.py | 43 +++-- vllm/v1/structured_output/__init__.py | 4 +- vllm/v1/worker/gpu_input_batch.py | 163 +++++++++++------- vllm/v1/worker/gpu_model_runner.py | 126 ++++++++++++-- vllm/v1/worker/gpu_worker.py | 11 +- vllm/v1/worker/tpu_input_batch.py | 1 + vllm/v1/worker/tpu_model_runner.py | 4 + 56 files changed, 889 insertions(+), 281 deletions(-) create mode 100644 vllm/v1/pool/__init__.py create mode 100644 vllm/v1/pool/metadata.py diff --git a/examples/offline_inference/basic/embed.py b/examples/offline_inference/basic/embed.py index fc5ca23787be1..1114033d5cea4 100644 --- a/examples/offline_inference/basic/embed.py +++ b/examples/offline_inference/basic/embed.py @@ -12,7 +12,10 @@ def parse_args(): parser = EngineArgs.add_cli_args(parser) # Set example specific arguments parser.set_defaults( - model="intfloat/e5-mistral-7b-instruct", task="embed", enforce_eager=True + model="intfloat/e5-mistral-7b-instruct", + task="embed", + enforce_eager=True, + max_model_len=1024, ) return parser.parse_args() diff --git a/examples/offline_inference/vision_language_embedding.py b/examples/offline_inference/vision_language_embedding.py index 1f5bd4ad72b05..9451825f0b734 100644 --- a/examples/offline_inference/vision_language_embedding.py +++ b/examples/offline_inference/vision_language_embedding.py @@ -94,6 +94,7 @@ def run_vlm2vec(query: Query) -> ModelRequestData: engine_args = EngineArgs( model="TIGER-Lab/VLM2Vec-Full", task="embed", + max_model_len=4096, trust_remote_code=True, mm_processor_kwargs={"num_crops": 4}, limit_mm_per_prompt={"image": 1}, diff --git a/tests/compile/test_basic_correctness.py b/tests/compile/test_basic_correctness.py index dc6cfe9daccdc..1ee9b234d9f4c 100644 --- a/tests/compile/test_basic_correctness.py +++ b/tests/compile/test_basic_correctness.py @@ -31,7 +31,7 @@ class TestSetting: # basic llama model TestSetting( model="meta-llama/Llama-3.2-1B-Instruct", - model_args=[], + model_args=["--max-model-len", "2048"], pp_size=2, tp_size=2, attn_backend="FLASHINFER", @@ -41,7 +41,7 @@ class TestSetting: # llama model with quantization TestSetting( model="TheBloke/TinyLlama-1.1B-Chat-v0.3-GPTQ", - model_args=["--quantization", "gptq"], + model_args=["--quantization", "gptq", "--max-model-len", "2048"], pp_size=1, tp_size=1, attn_backend="FLASH_ATTN", @@ -51,7 +51,7 @@ class TestSetting: # MoE model TestSetting( model="ibm/PowerMoE-3b", - model_args=[], + model_args=["--max-model-len", "2048"], pp_size=1, tp_size=2, attn_backend="FLASH_ATTN", @@ -61,23 +61,27 @@ class TestSetting: # embedding model TestSetting( model="BAAI/bge-multilingual-gemma2", - model_args=["--task", "embed", "--dtype", "bfloat16"], + model_args=[ + "--task", "embed", "--dtype", "bfloat16", "--max-model-len", + "2048" + ], pp_size=1, tp_size=1, attn_backend="FLASH_ATTN", method="encode", fullgraph=True, ), - # encoder-based embedding model (BERT) - TestSetting( - model="BAAI/bge-base-en-v1.5", - model_args=["--task", "embed"], - pp_size=1, - tp_size=1, - attn_backend="XFORMERS", - method="encode", - fullgraph=True, - ), + # TODO: bert models are not supported in V1 yet + # # encoder-based embedding model (BERT) + # TestSetting( + # model="BAAI/bge-base-en-v1.5", + # model_args=["--task", "embed"], + # pp_size=1, + # tp_size=1, + # attn_backend="XFORMERS", + # method="encode", + # fullgraph=True, + # ), # vision language model TestSetting( model="microsoft/Phi-3.5-vision-instruct", diff --git a/tests/conftest.py b/tests/conftest.py index 294805a8164f8..ff564b2b8ed51 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -145,6 +145,7 @@ def run_with_both_engines(request, monkeypatch): # Automatically runs tests twice, once with V1 and once without use_v1 = request.param # Tests decorated with `@skip_v1` are only run without v1 + skip_v0 = request.node.get_closest_marker("skip_v0") skip_v1 = request.node.get_closest_marker("skip_v1") if use_v1: @@ -152,6 +153,8 @@ def run_with_both_engines(request, monkeypatch): pytest.skip("Skipping test on vllm V1") monkeypatch.setenv('VLLM_USE_V1', '1') else: + if skip_v0: + pytest.skip("Skipping test on vllm V0") monkeypatch.setenv('VLLM_USE_V1', '0') yield diff --git a/tests/entrypoints/llm/test_encode.py b/tests/entrypoints/llm/test_encode.py index f0fa54aa3131c..b930f05bebd0f 100644 --- a/tests/entrypoints/llm/test_encode.py +++ b/tests/entrypoints/llm/test_encode.py @@ -8,6 +8,8 @@ import pytest from vllm import LLM, PoolingParams, PoolingRequestOutput from vllm.distributed import cleanup_dist_env_and_memory +from ...models.utils import check_embeddings_close + MODEL_NAME = "intfloat/multilingual-e5-small" PROMPTS = [ @@ -27,6 +29,14 @@ TOKEN_IDS = [ ] +@pytest.fixture(autouse=True) +def v1(run_with_both_engines): + # Simple autouse wrapper to run both engines for each test + # This can be promoted up to conftest.py to run for every + # test in a package + pass + + @pytest.fixture(scope="module") def llm(): # pytest caches the fixture so we use weakref.proxy to @@ -46,9 +56,15 @@ def llm(): cleanup_dist_env_and_memory() -def assert_outputs_equal(o1: list[PoolingRequestOutput], +def assert_outputs_match(o1: list[PoolingRequestOutput], o2: list[PoolingRequestOutput]): - assert [o.outputs for o in o1] == [o.outputs for o in o2] + check_embeddings_close( + embeddings_0_lst=[o.outputs.data for o in o1], + embeddings_1_lst=[o.outputs.data for o in o2], + name_0="hf", + name_1="vllm", + tol=1e-2, + ) @pytest.mark.skip_global_cleanup @@ -63,7 +79,7 @@ def test_v1_v2_api_consistency_single_prompt_tokens(llm: LLM, v2_output = llm.encode({"prompt_token_ids": prompt_token_ids}, pooling_params=pooling_params) - assert_outputs_equal(v1_output, v2_output) + assert_outputs_match(v1_output, v2_output) @pytest.mark.skip_global_cleanup @@ -80,7 +96,7 @@ def test_v1_v2_api_consistency_multi_prompt_tokens(llm: LLM): } for p in TOKEN_IDS], pooling_params=pooling_params, ) - assert_outputs_equal(v1_output, v2_output) + assert_outputs_match(v1_output, v2_output) @pytest.mark.skip_global_cleanup diff --git a/tests/entrypoints/openai/test_embedding.py b/tests/entrypoints/openai/test_embedding.py index 80640a2e1a8bc..adb094127e400 100644 --- a/tests/entrypoints/openai/test_embedding.py +++ b/tests/entrypoints/openai/test_embedding.py @@ -21,6 +21,14 @@ DUMMY_CHAT_TEMPLATE = """{% for message in messages %}{{message['role'] + ': ' + DTYPE = "bfloat16" +@pytest.fixture(autouse=True) +def v1(run_with_both_engines): + # Simple autouse wrapper to run both engines for each test + # This can be promoted up to conftest.py to run for every + # test in a package + pass + + @pytest.fixture(scope="module") def server(): args = [ diff --git a/tests/entrypoints/openai/test_pooling.py b/tests/entrypoints/openai/test_pooling.py index cf16ace6537ac..41c30e71684bb 100644 --- a/tests/entrypoints/openai/test_pooling.py +++ b/tests/entrypoints/openai/test_pooling.py @@ -7,6 +7,7 @@ import numpy as np import pytest import requests +from tests.models.utils import check_embeddings_close from vllm.entrypoints.openai.protocol import PoolingResponse from vllm.transformers_utils.tokenizer import get_tokenizer @@ -223,8 +224,11 @@ async def test_batch_base64_pooling(server: RemoteOpenAIServer, np.frombuffer(base64.b64decode(data.data), dtype="float32").tolist()) - assert responses_float.data[0].data == decoded_responses_base64_data[0] - assert responses_float.data[1].data == decoded_responses_base64_data[1] + check_embeddings_close( + embeddings_0_lst=[d.data for d in responses_float.data], + embeddings_1_lst=decoded_responses_base64_data, + name_0="float32", + name_1="base64") # Default response is float32 decoded from base64 by OpenAI Client default_response = requests.post( @@ -237,5 +241,8 @@ async def test_batch_base64_pooling(server: RemoteOpenAIServer, default_response.raise_for_status() responses_default = PoolingResponse.model_validate(default_response.json()) - assert responses_float.data[0].data == responses_default.data[0].data - assert responses_float.data[1].data == responses_default.data[1].data + check_embeddings_close( + embeddings_0_lst=[d.data for d in responses_default.data], + embeddings_1_lst=[d.data for d in responses_default.data], + name_0="float32", + name_1="base64") diff --git a/tests/entrypoints/openai/test_rerank.py b/tests/entrypoints/openai/test_rerank.py index 19eba320c2795..e40bbca9a8ad5 100644 --- a/tests/entrypoints/openai/test_rerank.py +++ b/tests/entrypoints/openai/test_rerank.py @@ -12,6 +12,14 @@ MODEL_NAME = "BAAI/bge-reranker-base" DTYPE = "bfloat16" +@pytest.fixture(autouse=True) +def v1(run_with_both_engines): + # Simple autouse wrapper to run both engines for each test + # This can be promoted up to conftest.py to run for every + # test in a package + pass + + @pytest.fixture(scope="module") def server(): args = ["--enforce-eager", "--max-model-len", "100", "--dtype", DTYPE] diff --git a/tests/entrypoints/openai/test_score.py b/tests/entrypoints/openai/test_score.py index af51a0a3eeebf..8927fe7718093 100644 --- a/tests/entrypoints/openai/test_score.py +++ b/tests/entrypoints/openai/test_score.py @@ -11,6 +11,15 @@ from vllm.entrypoints.openai.protocol import ScoreResponse from ...utils import RemoteOpenAIServer + +@pytest.fixture(autouse=True) +def v1(run_with_both_engines): + # Simple autouse wrapper to run both engines for each test + # This can be promoted up to conftest.py to run for every + # test in a package + pass + + MODELS = [ { "name": "BAAI/bge-reranker-v2-m3", diff --git a/tests/models/language/pooling/test_classification.py b/tests/models/language/pooling/test_classification.py index 4a6d781ce6f09..77df6d16a3673 100644 --- a/tests/models/language/pooling/test_classification.py +++ b/tests/models/language/pooling/test_classification.py @@ -6,6 +6,14 @@ from transformers import AutoModelForSequenceClassification from vllm.platforms import current_platform +# TODO: enable when float32 is supported by V1 +# @pytest.fixture(autouse=True) +# def v1(run_with_both_engines): +# # Simple autouse wrapper to run both engines for each test +# # This can be promoted up to conftest.py to run for every +# # test in a package +# pass + @pytest.mark.parametrize( "model", @@ -29,7 +37,7 @@ def test_models( # switch to use ROCm CK FA backend monkeypatch.setenv("VLLM_USE_TRITON_FLASH_ATTN", "False") - with vllm_runner(model, dtype=dtype) as vllm_model: + with vllm_runner(model, max_model_len=512, dtype=dtype) as vllm_model: vllm_outputs = vllm_model.classify(example_prompts) with hf_runner(model, diff --git a/tests/models/language/pooling/test_embedding.py b/tests/models/language/pooling/test_embedding.py index 9516a01421cbb..e29b4f6e8bec9 100644 --- a/tests/models/language/pooling/test_embedding.py +++ b/tests/models/language/pooling/test_embedding.py @@ -8,6 +8,14 @@ from vllm.platforms import current_platform from ...utils import check_embeddings_close +@pytest.fixture(autouse=True) +def v1(run_with_both_engines): + # Simple autouse wrapper to run both engines for each test + # This can be promoted up to conftest.py to run for every + # test in a package + pass + + @pytest.mark.parametrize( "model", [ @@ -20,15 +28,27 @@ from ...utils import check_embeddings_close marks=[pytest.mark.core_model]), pytest.param("intfloat/e5-mistral-7b-instruct", marks=[pytest.mark.core_model, pytest.mark.cpu_model]), - pytest.param("ssmits/Qwen2-7B-Instruct-embed-base"), + # the qwen models interfere with each other (see PR + # https://github.com/vllm-project/vllm/pull/18720). + # To avoid this problem, for now we skip v0 since it will be + # deprecated anyway. + pytest.param("ssmits/Qwen2-7B-Instruct-embed-base", + marks=[pytest.mark.skip_v0]), # [Encoder-only] pytest.param("BAAI/bge-base-en-v1.5", - marks=[pytest.mark.core_model, pytest.mark.cpu_model]), - pytest.param("sentence-transformers/all-MiniLM-L12-v2"), - pytest.param("intfloat/multilingual-e5-small"), - pytest.param("Alibaba-NLP/gte-Qwen2-1.5B-instruct"), + marks=[ + pytest.mark.core_model, pytest.mark.cpu_model, + pytest.mark.skip_v1 + ]), + pytest.param("sentence-transformers/all-MiniLM-L12-v2", + marks=[pytest.mark.skip_v1]), + pytest.param("intfloat/multilingual-e5-small", + marks=[pytest.mark.skip_v1]), + pytest.param("Alibaba-NLP/gte-Qwen2-1.5B-instruct", + marks=[pytest.mark.skip_v1]), # [Cross-Encoder] - pytest.param("sentence-transformers/stsb-roberta-base-v2"), + pytest.param("sentence-transformers/stsb-roberta-base-v2", + marks=[pytest.mark.skip_v1]), ], ) def test_models( @@ -62,7 +82,7 @@ def test_models( with vllm_runner(model, task="embed", - max_model_len=None, + max_model_len=512, **vllm_extra_kwargs) as vllm_model: vllm_outputs = vllm_model.encode(example_prompts) diff --git a/tests/models/registry.py b/tests/models/registry.py index fb93ba60c2e8d..82253a1c94b39 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -265,8 +265,8 @@ _TEXT_GENERATION_EXAMPLE_MODELS = { _EMBEDDING_EXAMPLE_MODELS = { # [Text-only] - "BertModel": _HfExamplesInfo("BAAI/bge-base-en-v1.5"), - "Gemma2Model": _HfExamplesInfo("BAAI/bge-multilingual-gemma2"), + "BertModel": _HfExamplesInfo("BAAI/bge-base-en-v1.5", v0_only=True), + "Gemma2Model": _HfExamplesInfo("BAAI/bge-multilingual-gemma2", v0_only=True), # noqa: E501 "GritLM": _HfExamplesInfo("parasail-ai/GritLM-7B-vllm"), "GteModel": _HfExamplesInfo("Snowflake/snowflake-arctic-embed-m-v2.0", trust_remote_code=True), @@ -279,16 +279,16 @@ _EMBEDDING_EXAMPLE_MODELS = { "LlamaModel": _HfExamplesInfo("llama", is_available_online=False), "MistralModel": _HfExamplesInfo("intfloat/e5-mistral-7b-instruct"), "ModernBertModel": _HfExamplesInfo("Alibaba-NLP/gte-modernbert-base", - trust_remote_code=True), + trust_remote_code=True, v0_only=True), "NomicBertModel": _HfExamplesInfo("nomic-ai/nomic-embed-text-v2-moe", - trust_remote_code=True), + trust_remote_code=True, v0_only=True), # noqa: E501 "Qwen2Model": _HfExamplesInfo("ssmits/Qwen2-7B-Instruct-embed-base"), "Qwen2ForRewardModel": _HfExamplesInfo("Qwen/Qwen2.5-Math-RM-72B"), "Qwen2ForProcessRewardModel": _HfExamplesInfo("Qwen/Qwen2.5-Math-PRM-7B"), "Qwen2ForSequenceClassification": _HfExamplesInfo("jason9693/Qwen2.5-1.5B-apeach"), # noqa: E501 - "RobertaModel": _HfExamplesInfo("sentence-transformers/stsb-roberta-base-v2"), # noqa: E501 - "RobertaForMaskedLM": _HfExamplesInfo("sentence-transformers/all-roberta-large-v1"), # noqa: E501 - "XLMRobertaModel": _HfExamplesInfo("intfloat/multilingual-e5-small"), + "RobertaModel": _HfExamplesInfo("sentence-transformers/stsb-roberta-base-v2", v0_only=True), # noqa: E501 + "RobertaForMaskedLM": _HfExamplesInfo("sentence-transformers/all-roberta-large-v1", v0_only=True), # noqa: E501 + "XLMRobertaModel": _HfExamplesInfo("intfloat/multilingual-e5-small", v0_only=True), # noqa: E501 # [Multimodal] "LlavaNextForConditionalGeneration": _HfExamplesInfo("royokong/e5-v"), "Phi3VForCausalLM": _HfExamplesInfo("TIGER-Lab/VLM2Vec-Full", @@ -300,10 +300,10 @@ _EMBEDDING_EXAMPLE_MODELS = { _CROSS_ENCODER_EXAMPLE_MODELS = { # [Text-only] - "BertForSequenceClassification": _HfExamplesInfo("cross-encoder/ms-marco-MiniLM-L-6-v2"), # noqa: E501 - "RobertaForSequenceClassification": _HfExamplesInfo("cross-encoder/quora-roberta-base"), # noqa: E501 - "XLMRobertaForSequenceClassification": _HfExamplesInfo("BAAI/bge-reranker-v2-m3"), # noqa: E501 - "ModernBertForSequenceClassification": _HfExamplesInfo("Alibaba-NLP/gte-reranker-modernbert-base"), # noqa: E501 + "BertForSequenceClassification": _HfExamplesInfo("cross-encoder/ms-marco-MiniLM-L-6-v2", v0_only=True), # noqa: E501 + "RobertaForSequenceClassification": _HfExamplesInfo("cross-encoder/quora-roberta-base", v0_only=True), # noqa: E501 + "XLMRobertaForSequenceClassification": _HfExamplesInfo("BAAI/bge-reranker-v2-m3", v0_only=True), # noqa: E501 + "ModernBertForSequenceClassification": _HfExamplesInfo("Alibaba-NLP/gte-reranker-modernbert-base", v0_only=True), # noqa: E501 } _MULTIMODAL_EXAMPLE_MODELS = { diff --git a/tests/tokenization/test_detokenize.py b/tests/tokenization/test_detokenize.py index 9f2414eca24f3..f8aeba8301b12 100644 --- a/tests/tokenization/test_detokenize.py +++ b/tests/tokenization/test_detokenize.py @@ -68,6 +68,7 @@ def _run_incremental_decode(tokenizer, None, params, None, + None, 0.0, None, cache_salt=None, diff --git a/tests/v1/core/test_kv_cache_utils.py b/tests/v1/core/test_kv_cache_utils.py index 347f98c772ffe..e80ad8a68151d 100644 --- a/tests/v1/core/test_kv_cache_utils.py +++ b/tests/v1/core/test_kv_cache_utils.py @@ -43,6 +43,7 @@ def make_request(request_id, multi_modal_hashes=mm_hashes, multi_modal_placeholders=mm_positions, sampling_params=SamplingParams(max_tokens=17), + pooling_params=None, eos_token_id=100, lora_request=None, cache_salt=cache_salt, diff --git a/tests/v1/core/test_prefix_caching.py b/tests/v1/core/test_prefix_caching.py index 394336624aca8..7a42778831c5d 100644 --- a/tests/v1/core/test_prefix_caching.py +++ b/tests/v1/core/test_prefix_caching.py @@ -39,6 +39,7 @@ def make_request(request_id, multi_modal_placeholders=mm_positions, sampling_params=SamplingParams(max_tokens=17, prompt_logprobs=prompt_logprobs), + pooling_params=None, eos_token_id=100, lora_request=None, cache_salt=cache_salt, diff --git a/tests/v1/core/test_scheduler.py b/tests/v1/core/test_scheduler.py index d348956aa1773..b0b1116eb5363 100644 --- a/tests/v1/core/test_scheduler.py +++ b/tests/v1/core/test_scheduler.py @@ -135,6 +135,7 @@ def create_requests(num_requests: int, request_id=f"{i}", prompt_token_ids=[i] * num_tokens, sampling_params=sampling_params, + pooling_params=None, multi_modal_inputs=mm_inputs, multi_modal_placeholders=mm_position, multi_modal_hashes=None, @@ -283,6 +284,7 @@ def test_schedule_partial_requests(): spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) scheduler.update_from_output(output, model_runner_output) @@ -333,6 +335,7 @@ def test_no_mm_input_chunking(): spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) scheduler.update_from_output(output, model_runner_output) @@ -396,6 +399,7 @@ def test_schedule_concurrent_partial_requests(enable_prefix_caching: bool): spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) scheduler.update_from_output(output, model_runner_output) @@ -420,6 +424,7 @@ def test_schedule_concurrent_partial_requests(enable_prefix_caching: bool): spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) scheduler.update_from_output(output1, model_runner_output) output2 = scheduler.schedule() @@ -473,7 +478,8 @@ def test_stop_via_update_from_output(): 11]], # First request hits EOS, second continues spec_token_ids=None, logprobs=None, - prompt_logprobs_dict={}) + prompt_logprobs_dict={}, + pooler_output=[]) scheduler.update_from_output(scheduler_output, model_output) @@ -523,7 +529,8 @@ def test_stop_via_update_from_output(): [13, 14]], # First request hits stop token spec_token_ids=None, logprobs=None, - prompt_logprobs_dict={}) + prompt_logprobs_dict={}, + pooler_output=[]) scheduler.update_from_output(scheduler_output, model_output) @@ -572,7 +579,8 @@ def test_stop_via_update_from_output(): [13]], # First request exceeds max_tokens spec_token_ids=None, logprobs=None, - prompt_logprobs_dict={}) + prompt_logprobs_dict={}, + pooler_output=[]) scheduler.update_from_output(scheduler_output, model_output) @@ -614,7 +622,8 @@ def test_stop_via_update_from_output(): sampled_token_ids=[[EOS_TOKEN_ID, 10, 11]], spec_token_ids=None, logprobs=None, - prompt_logprobs_dict={}) + prompt_logprobs_dict={}, + pooler_output=[]) scheduler.update_from_output(scheduler_output, model_output) @@ -663,6 +672,7 @@ def test_schedule_concurrent_batches(enable_prefix_caching: Optional[bool], spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) scheduler.update_from_output(scheduler_output0, model_runner_output) @@ -680,6 +690,7 @@ def test_schedule_concurrent_batches(enable_prefix_caching: Optional[bool], spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) scheduler.update_from_output(scheduler_output1, model_runner_output) @@ -730,6 +741,7 @@ def test_schedule_spec_decoding_stats(spec_tokens, output_tokens, expected): spec_token_ids=spec_tokens, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) engine_core_outputs = scheduler.update_from_output(output, model_runner_output) @@ -769,6 +781,7 @@ def test_schedule_spec_decoding_stats(spec_tokens, output_tokens, expected): spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) engine_core_outputs = scheduler.update_from_output(output, model_runner_output) @@ -896,6 +909,7 @@ def test_kv_connector_basic(): spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) # Ensure ScheduleOutput is correct. @@ -941,6 +955,7 @@ def test_kv_connector_basic(): spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) # We should get a local cache hit of NUM_TOKENS_PREFIX and @@ -1007,6 +1022,7 @@ def test_kv_connector_unable_to_allocate(): spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) # Just one request should be running. @@ -1087,6 +1103,7 @@ def test_kv_connector_handles_preemption(): spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) # All can be scheduled - 1st token. @@ -1181,6 +1198,7 @@ def make_output(scheduler: Scheduler): spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], ) diff --git a/tests/v1/engine/test_engine_core.py b/tests/v1/engine/test_engine_core.py index bc7894e92814e..bbdc73e9608a1 100644 --- a/tests/v1/engine/test_engine_core.py +++ b/tests/v1/engine/test_engine_core.py @@ -39,6 +39,7 @@ def make_request() -> EngineCoreRequest: mm_hashes=None, mm_placeholders=None, sampling_params=SamplingParams(), + pooling_params=None, eos_token_id=None, arrival_time=time.time(), lora_request=None, diff --git a/tests/v1/engine/test_engine_core_client.py b/tests/v1/engine/test_engine_core_client.py index d4db16fe86fab..16c36cd5c6b9b 100644 --- a/tests/v1/engine/test_engine_core_client.py +++ b/tests/v1/engine/test_engine_core_client.py @@ -53,6 +53,7 @@ def make_request( mm_hashes=None, mm_placeholders=None, sampling_params=params, + pooling_params=None, eos_token_id=None, arrival_time=time.time(), lora_request=None, diff --git a/tests/v1/engine/test_fast_incdec_prefix_err.py b/tests/v1/engine/test_fast_incdec_prefix_err.py index 5c844e0e7095e..f028b4ab1d73f 100644 --- a/tests/v1/engine/test_fast_incdec_prefix_err.py +++ b/tests/v1/engine/test_fast_incdec_prefix_err.py @@ -33,6 +33,7 @@ def test_fast_inc_detok_invalid_utf8_err_case(): None, params, None, + None, 0.0, None, cache_salt=None, diff --git a/tests/v1/engine/test_output_processor.py b/tests/v1/engine/test_output_processor.py index 6b88b0cf17e32..1c8c5f25e29be 100644 --- a/tests/v1/engine/test_output_processor.py +++ b/tests/v1/engine/test_output_processor.py @@ -66,7 +66,8 @@ def test_incremental_detokenization(request_output_kind: RequestOutputKind, output_kind=request_output_kind, stop=[], include_stop_str_in_output=False, - )) + ), + pooling_params=None) for idx, prompt_tokens in enumerate(dummy_test_vectors.prompt_tokens) ] @@ -416,7 +417,8 @@ def test_logprobs_processor(request_output_kind: RequestOutputKind, include_stop_str_in_output=False, logprobs=num_sample_logprobs, prompt_logprobs=num_prompt_logprobs, - )) + ), + pooling_params=None) for idx, prompt_tokens in enumerate(dummy_test_vectors.prompt_tokens) ] @@ -582,7 +584,8 @@ def test_stop_token(include_stop_str_in_output: bool, logprobs=num_sample_logprobs, prompt_logprobs=None, ignore_eos=ignore_eos, - )) + ), + pooling_params=None) # Add request to the detokenizer. output_processor.add_request(request, prompt_string) @@ -678,7 +681,8 @@ def test_stop_string(include_stop_str_in_output: bool, include_stop_str_in_output=include_stop_str_in_output, logprobs=num_sample_logprobs, prompt_logprobs=None, - )) + ), + pooling_params=None) for idx, prompt_tokens in enumerate(dummy_test_vectors.prompt_tokens) ] @@ -786,6 +790,7 @@ def test_iteration_stats(dummy_test_vectors): cache_salt=None, data_parallel_rank=None, sampling_params=SamplingParams(), + pooling_params=None, ) for idx, prompt_tokens in enumerate(dummy_test_vectors.prompt_tokens) ] diff --git a/tests/v1/kv_connector/unit/utils.py b/tests/v1/kv_connector/unit/utils.py index 4a9e3a7ad807a..61f59f35f75b9 100644 --- a/tests/v1/kv_connector/unit/utils.py +++ b/tests/v1/kv_connector/unit/utils.py @@ -150,6 +150,7 @@ def create_request( request_id=f"id-{request_id}", prompt_token_ids=prompt_token_ids, sampling_params=sampling_params, + pooling_params=None, multi_modal_inputs=None, multi_modal_placeholders=None, multi_modal_hashes=None, @@ -183,6 +184,7 @@ def create_model_runner_output( spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=None, finished_sending=finished_sending, finished_recving=finished_recving, ) diff --git a/tests/v1/worker/test_gpu_input_batch.py b/tests/v1/worker/test_gpu_input_batch.py index de6ebe4f6716b..9e5e06cdc1f51 100644 --- a/tests/v1/worker/test_gpu_input_batch.py +++ b/tests/v1/worker/test_gpu_input_batch.py @@ -10,6 +10,7 @@ import torch from vllm.sampling_params import SamplingParams from vllm.utils import is_pin_memory_available, make_tensor_with_pad +from vllm.v1.pool.metadata import PoolingMetadata from vllm.v1.sample.metadata import SamplingMetadata from vllm.v1.worker.block_table import BlockTable, MultiGroupBlockTable from vllm.v1.worker.gpu_input_batch import CachedRequestState, InputBatch @@ -46,7 +47,7 @@ def _compare_objs(obj1, obj2): for a_i, b_i in zip(a.block_tables, b.block_tables): _compare_objs(a_i, b_i) is_same = True - elif isinstance(a, (BlockTable, SamplingMetadata)): + elif isinstance(a, (BlockTable, SamplingMetadata, PoolingMetadata)): _compare_objs(a, b) is_same = True # if we make it here must be same elif a == b: @@ -201,6 +202,7 @@ def _construct_cached_request_state(req_id_suffix: int): req_id=f"req_id_{req_id_suffix}", prompt_token_ids=prompt_token_ids, sampling_params=_create_sampling_params(), + pooling_params=None, mm_inputs=[], mm_positions=[], block_ids=([], ), diff --git a/tests/v1/worker/test_gpu_model_runner.py b/tests/v1/worker/test_gpu_model_runner.py index 994432dfd593b..abf14a8fb6250 100644 --- a/tests/v1/worker/test_gpu_model_runner.py +++ b/tests/v1/worker/test_gpu_model_runner.py @@ -122,6 +122,7 @@ def _schedule_new_request(*req_ids: str) -> SchedulerOutput: mm_hashes=[], mm_positions=[], sampling_params=SamplingParams(), + pooling_params=None, block_ids=([0], ), num_computed_tokens=0, lora_request=None, diff --git a/vllm/config.py b/vllm/config.py index 7a9bc8a4f7afa..54c7a497b261b 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -4496,11 +4496,31 @@ class VllmConfig: if self.compilation_config.full_cuda_graph and \ not self.model_config.disable_cascade_attn: - logger.warning_once( - "full_cuda_graph is not supported with " - "cascade attention. Disabling cascade attention.") + logger.info("full_cuda_graph is not supported with " + "cascade attention. Disabling cascade attention.") self.model_config.disable_cascade_attn = True + disable_chunked_prefill_reasons: list[str] = [] + + if self.model_config and self.model_config.pooler_config: + pooling_type = self.model_config.pooler_config.pooling_type + if pooling_type is None or pooling_type.lower() != "last": + disable_chunked_prefill_reasons.append( + "Only \"last\" pooling supports chunked " + "prefill and prefix caching; disabling both.") + + if disable_chunked_prefill_reasons: + for reason in disable_chunked_prefill_reasons: + logger.info(reason) + self.scheduler_config.chunked_prefill_enabled = False + self.scheduler_config.long_prefill_token_threshold = 0 + self.scheduler_config.max_num_batched_tokens = max( + self.scheduler_config.max_model_len, + DEFAULT_MAX_NUM_BATCHED_TOKENS) + + if self.cache_config is not None: + self.cache_config.enable_prefix_caching = False + if (self.kv_events_config is not None and self.kv_events_config.enable_kv_cache_events and not self.cache_config.enable_prefix_caching): diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 4ca645b91b33a..7a88e3269a5ed 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1041,7 +1041,7 @@ class EngineArgs: # Set default arguments for V0 or V1 Engine. if use_v1: - self._set_default_args_v1(usage_context) + self._set_default_args_v1(usage_context, model_config) else: self._set_default_args_v0(model_config) @@ -1349,13 +1349,7 @@ class EngineArgs: recommend_to_remove=False) return False - # No Embedding Models so far. - if model_config.task not in ["generate"]: - _raise_or_fallback(feature_name=f"--task {model_config.task}", - recommend_to_remove=False) - return False - - # No Encoder-Decoder, not all Mamba so far. + # No Mamba or Encoder-Decoder so far. if not model_config.is_v1_compatible: _raise_or_fallback(feature_name=model_config.architectures, recommend_to_remove=False) @@ -1523,15 +1517,38 @@ class EngineArgs: if self.max_num_seqs is None: self.max_num_seqs = 256 - def _set_default_args_v1(self, usage_context: UsageContext) -> None: + def _set_default_args_v1(self, usage_context: UsageContext, + model_config: ModelConfig) -> None: """Set Default Arguments for V1 Engine.""" - # V1 always uses chunked prefills. - self.enable_chunked_prefill = True + # V1 always uses chunked prefills and prefix caching + # for non-pooling tasks. + # For pooling tasks the default is False + if model_config.runner_type != "pooling": + self.enable_chunked_prefill = True + if self.enable_prefix_caching is None: + self.enable_prefix_caching = True + else: - # V1 enables prefix caching by default. - if self.enable_prefix_caching is None: - self.enable_prefix_caching = True + pooling_type = model_config.pooler_config.pooling_type + + # TODO: when encoder models are supported we'll have to + # check for causal attention here. + incremental_prefill_supported = (pooling_type is not None and + pooling_type.lower() == "last") + + action = "Enabling" if \ + incremental_prefill_supported else "Disabling" + + if self.enable_chunked_prefill is None: + self.enable_chunked_prefill = incremental_prefill_supported + logger.info("(%s) chunked prefill by default", action) + if self.enable_prefix_caching is None: + self.enable_prefix_caching = incremental_prefill_supported + logger.info("(%s) prefix caching by default", action) + + if not self.enable_chunked_prefill: + self.max_num_batched_tokens = model_config.max_model_len # V1 should use the new scheduler by default. # Swap it only if this arg is set to the original V0 default diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index c11e627ee2361..f3170fa30fce1 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -1266,7 +1266,7 @@ class LLM: # the tokenizer for models such as # "cross-encoder/ms-marco-MiniLM-L-6-v2" doesn't support passing # lists of tokens to the `text` and `text_pair` kwargs - tokenizer = self.llm_engine.get_tokenizer() + tokenizer = self.get_tokenizer() def ensure_str(prompt: SingletonPrompt): if isinstance(prompt, dict): diff --git a/vllm/entrypoints/openai/serving_pooling.py b/vllm/entrypoints/openai/serving_pooling.py index b896cc46b9d08..c2ed50d04d124 100644 --- a/vllm/entrypoints/openai/serving_pooling.py +++ b/vllm/entrypoints/openai/serving_pooling.py @@ -9,6 +9,7 @@ from typing import Final, Literal, Optional, Union, cast import jinja2 import numpy as np +import torch from fastapi import Request from typing_extensions import assert_never @@ -39,7 +40,8 @@ def _get_data( elif encoding_format == "base64": # Force to use float32 for base64 encoding # to match the OpenAI python client behavior - pooling_bytes = np.array(output.data, dtype="float32").tobytes() + pt_float32 = output.data.to(dtype=torch.float32) + pooling_bytes = np.array(pt_float32, dtype="float32").tobytes() return base64.b64encode(pooling_bytes).decode("utf-8") assert_never(encoding_format) diff --git a/vllm/model_executor/layers/pooler.py b/vllm/model_executor/layers/pooler.py index 6829d93d2d6c7..eb2148d76452e 100644 --- a/vllm/model_executor/layers/pooler.py +++ b/vllm/model_executor/layers/pooler.py @@ -10,11 +10,15 @@ import torch.nn.functional as F from typing_extensions import assert_never from vllm.config import ModelConfig, PoolerConfig -from vllm.model_executor.pooling_metadata import (PoolingMetadata, - PoolingTensors) +from vllm.model_executor.pooling_metadata import ( # noqa: E501 + PoolingMetadata as V0PoolingMetadata) +from vllm.model_executor.pooling_metadata import PoolingTensors from vllm.sequence import PoolerOutput, PoolingSequenceGroupOutput from vllm.transformers_utils.config import ( get_cross_encoder_activation_function) +from vllm.v1.pool.metadata import PoolingMetadata as V1PoolingMetadata + +PoolingMetadata = Union[V0PoolingMetadata, V1PoolingMetadata] class PoolingType(IntEnum): @@ -75,15 +79,18 @@ class SimplePooler(nn.Module): def get_prompt_lens( self, - hidden_states: torch.Tensor, + hidden_states: Union[torch.Tensor, list[torch.Tensor]], pooling_metadata: PoolingMetadata, ) -> torch.Tensor: + if isinstance(pooling_metadata, V1PoolingMetadata): + return pooling_metadata.prompt_lens + assert isinstance(hidden_states, torch.Tensor) return PoolingTensors.from_pooling_metadata( pooling_metadata, hidden_states.device).prompt_lens def extract_states( self, - hidden_states: torch.Tensor, + hidden_states: Union[torch.Tensor, list[torch.Tensor]], pooling_metadata: PoolingMetadata, ) -> Union[list[torch.Tensor], torch.Tensor]: raise NotImplementedError @@ -93,7 +100,7 @@ class SimplePooler(nn.Module): def forward( self, - hidden_states: torch.Tensor, + hidden_states: Union[torch.Tensor, list[torch.Tensor]], pooling_metadata: PoolingMetadata, ) -> PoolerOutput: pooled_data = self.extract_states(hidden_states, pooling_metadata) @@ -106,11 +113,19 @@ class CLSPool(SimplePooler): def extract_states( self, - hidden_states: torch.Tensor, + hidden_states: Union[torch.Tensor, list[torch.Tensor]], pooling_metadata: PoolingMetadata, ) -> Union[list[torch.Tensor], torch.Tensor]: prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) + if isinstance(hidden_states, list): + result = [] + for req_state, prompt_len in zip(hidden_states, prompt_lens): + assert prompt_len == req_state.shape[0], \ + "partial prefill not supported with CLS pooling" + result.append(req_state[0]) + return result + first_token_flat_indices = torch.zeros_like(prompt_lens) first_token_flat_indices[1:] += torch.cumsum(prompt_lens, dim=0)[:-1] return hidden_states[first_token_flat_indices] @@ -120,9 +135,12 @@ class LastPool(SimplePooler): def extract_states( self, - hidden_states: torch.Tensor, + hidden_states: Union[torch.Tensor, list[torch.Tensor]], pooling_metadata: PoolingMetadata, ) -> Union[list[torch.Tensor], torch.Tensor]: + if isinstance(hidden_states, list): + return [h[-1] for h in hidden_states] + prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) last_token_flat_indices = torch.cumsum(prompt_lens, dim=0) - 1 @@ -133,11 +151,17 @@ class AllPool(SimplePooler): def extract_states( self, - hidden_states: torch.Tensor, + hidden_states: Union[torch.Tensor, list[torch.Tensor]], pooling_metadata: PoolingMetadata, ) -> Union[list[torch.Tensor], torch.Tensor]: prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) + if isinstance(hidden_states, list): + for req_state, prompt_len in zip(hidden_states, prompt_lens): + assert prompt_len == req_state.shape[0], \ + "partial prefill not supported with ALL pooling" + return hidden_states + offset = 0 pooled_data = list[torch.Tensor]() for prompt_len in prompt_lens: @@ -151,11 +175,20 @@ class MeanPool(SimplePooler): def extract_states( self, - hidden_states: torch.Tensor, + hidden_states: Union[torch.Tensor, list[torch.Tensor]], pooling_metadata: PoolingMetadata, ) -> Union[list[torch.Tensor], torch.Tensor]: prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) + if isinstance(hidden_states, list): + result = [] + for req_state, prompt_len in zip(hidden_states, prompt_lens): + assert prompt_len == req_state.shape[0], \ + "partial prefill not supported with mean pooling" + result.append(torch.mean(req_state, dim=0, + dtype=torch.float32)) + return result + # Use float32 for torch.cumsum in MeanPool, # otherwise precision will be lost significantly. cumsum = torch.cumsum(hidden_states, dim=0, dtype=torch.float32) @@ -184,30 +217,53 @@ class StepPool(SimplePooler): self.step_tag_id = step_tag_id self.returned_token_ids = returned_token_ids + def get_prompt_token_ids( + self, + pooling_metadata: PoolingMetadata, + ) -> list[torch.Tensor]: + if isinstance(pooling_metadata, V1PoolingMetadata): + return [ + pooling_metadata.prompt_token_ids[i, :num] + for i, num in enumerate(pooling_metadata.prompt_lens) + ] + return [ + torch.tensor(seq_data_i.prompt_token_ids) + for seq_data_i in pooling_metadata.seq_data.values() + ] + def extract_states( self, - hidden_states: torch.Tensor, + hidden_states: Union[torch.Tensor, list[torch.Tensor]], pooling_metadata: PoolingMetadata, ) -> Union[list[torch.Tensor], torch.Tensor]: prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) + prompt_token_ids = self.get_prompt_token_ids(pooling_metadata) + pooled_data: list[torch.Tensor] = [] + + if isinstance(hidden_states, list): + for req_state, prompt_len in zip(hidden_states, prompt_lens): + assert prompt_len == req_state.shape[0], \ + "partial prefill not supported with mean pooling" + pooled_data = hidden_states + else: + offset = 0 + for prompt_len in prompt_lens: + pooled_data_i = hidden_states[offset:offset + prompt_len] + offset += prompt_len + pooled_data.append(pooled_data_i) + + pooled_data = [] returned_token_ids = self.returned_token_ids - if returned_token_ids is not None and len(returned_token_ids) > 0: - hidden_states = hidden_states[:, returned_token_ids] - step_tag_id = self.step_tag_id - offset = 0 - pooled_data = list[torch.Tensor]() - for prompt_len, seq_data_i in zip(prompt_lens, - pooling_metadata.seq_data.values()): - pooled_data_i = hidden_states[offset:offset + prompt_len] - if step_tag_id is not None: - token_ids = torch.tensor(seq_data_i.prompt_token_ids) - pooled_data_i = pooled_data_i[token_ids == step_tag_id] + for data, token_id in zip(pooled_data, prompt_token_ids): + if returned_token_ids is not None and len(returned_token_ids) > 0: + data = data[:, returned_token_ids] - offset += prompt_len - pooled_data.append(pooled_data_i) + if step_tag_id is not None: + data = data[token_id == step_tag_id] + pooled_data.append(data) return pooled_data @@ -230,10 +286,17 @@ class PoolerHead(nn.Module): else: pooled_data = pooled_data.to(torch.float32) - dimensions_list = [ - pooling_param.dimensions - for _, pooling_param in pooling_metadata.seq_groups - ] + if isinstance(pooling_metadata, V0PoolingMetadata): + dimensions_list = [ + pooling_param.dimensions + for _, pooling_param in pooling_metadata.seq_groups + ] + else: + assert isinstance(pooled_data, list) + dimensions_list = [ + pooling_param.dimensions + for pooling_param in pooling_metadata.pooling_params + ] if any(d is not None for d in dimensions_list): # change the output dimension assert len(pooled_data) == len(dimensions_list) @@ -325,20 +388,41 @@ class ClassifierPooler(nn.Module): raise NotImplementedError(f"task={config.task!r} is not supported" " with the classification pooler") + def get_prompt_lens( + self, + hidden_states: Union[torch.Tensor, list[torch.Tensor]], + pooling_metadata: PoolingMetadata, + ) -> torch.Tensor: + if isinstance(pooling_metadata, V1PoolingMetadata): + return pooling_metadata.prompt_lens + assert isinstance(hidden_states, torch.Tensor) + return PoolingTensors.from_pooling_metadata( + pooling_metadata, hidden_states.device).prompt_lens + def forward( self, - hidden_states: torch.Tensor, + hidden_states: Union[torch.Tensor, list[torch.Tensor]], pooling_metadata: PoolingMetadata, ) -> PoolerOutput: """Pools sentence pair scores from the hidden_states.""" + prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) - prompt_lens = PoolingTensors.from_pooling_metadata( - pooling_metadata, hidden_states.device).prompt_lens + pooled_data = list[torch.Tensor]() + if isinstance(hidden_states, list): + for req_state, prompt_len in zip(hidden_states, prompt_lens): + assert prompt_len == req_state.shape[0], \ + "partial prefill not supported with classifier" + pooled_data = hidden_states + else: + offset = 0 + for prompt_len in prompt_lens: + pooled_data_i = hidden_states[offset:offset + prompt_len] + offset += prompt_len + pooled_data.append(pooled_data_i) offset = 0 pooled_data_lst = [] - for prompt_len in prompt_lens: - pooled_data_i = hidden_states[offset:offset + prompt_len] + for pooled_data_i in pooled_data: if self.pooler is not None: final_shape_tensor = self.pooler(pooled_data_i) @@ -346,7 +430,6 @@ class ClassifierPooler(nn.Module): final_shape_tensor = self.classifier(pooled_data_i) pooled_data_lst.append(final_shape_tensor) - offset += prompt_len pooled_output = torch.stack(pooled_data_lst) diff --git a/vllm/model_executor/models/bert.py b/vllm/model_executor/models/bert.py index 389393987c811..d6f6d9d1fb59a 100644 --- a/vllm/model_executor/models/bert.py +++ b/vllm/model_executor/models/bert.py @@ -446,8 +446,8 @@ class BertEmbeddingModel(nn.Module, SupportsV0Only, SupportsQuant): softmax=False) -class BertForSequenceClassification(nn.Module, SupportsCrossEncoding, - SupportsQuant): +class BertForSequenceClassification(nn.Module, SupportsV0Only, + SupportsCrossEncoding, SupportsQuant): """A model that uses Bert to provide embedding functionalities. This class encapsulates the BertModel and provides an interface for diff --git a/vllm/model_executor/models/modernbert.py b/vllm/model_executor/models/modernbert.py index 35f416a6e21e8..7c1f889e8f385 100644 --- a/vllm/model_executor/models/modernbert.py +++ b/vllm/model_executor/models/modernbert.py @@ -21,7 +21,7 @@ from vllm.model_executor.model_loader.weight_utils import default_weight_loader from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.sequence import IntermediateTensors, PoolerOutput -from .interfaces import SupportsCrossEncoding +from .interfaces import SupportsCrossEncoding, SupportsV0Only from .utils import WeightsMapper, maybe_prefix @@ -270,7 +270,8 @@ class ModernBertPooler(nn.Module): return pooled_output -class ModernBertForSequenceClassification(nn.Module, SupportsCrossEncoding): +class ModernBertForSequenceClassification(nn.Module, SupportsV0Only, + SupportsCrossEncoding): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() diff --git a/vllm/model_executor/models/qwen3.py b/vllm/model_executor/models/qwen3.py index bad0f6b1ffb73..216c1f1c7ff74 100644 --- a/vllm/model_executor/models/qwen3.py +++ b/vllm/model_executor/models/qwen3.py @@ -375,7 +375,12 @@ class Qwen3ForSequenceClassification(nn.Module, SupportsLoRA, ) -> Optional[PoolerOutput]: hidden_states = self._pooler.extract_states(hidden_states, pooling_metadata) - logits, _ = self.score(hidden_states) + + if isinstance(hidden_states, list): + logits = [self.score(state)[0] for state in hidden_states] + else: + logits, _ = self.score(hidden_states) + pooled_data = self._pooler.head(logits, pooling_metadata) pooled_outputs = [ self._pooler.build_output(data.squeeze(-1)) for data in pooled_data diff --git a/vllm/pooling_params.py b/vllm/pooling_params.py index 322f9ed3efa9f..b5c327bdd256b 100644 --- a/vllm/pooling_params.py +++ b/vllm/pooling_params.py @@ -5,6 +5,8 @@ from typing import TYPE_CHECKING, Any, Optional import msgspec +from vllm.sampling_params import RequestOutputKind + if TYPE_CHECKING: from vllm.config import ModelConfig @@ -23,6 +25,7 @@ class PoolingParams( dimensions: Optional[int] = None additional_data: Optional[Any] = None + output_kind: RequestOutputKind = RequestOutputKind.FINAL_ONLY def clone(self) -> "PoolingParams": """Returns a deep copy of the PoolingParams instance.""" @@ -52,3 +55,7 @@ class PoolingParams( return (f"PoolingParams(" f"dimensions={self.dimensions}, " f"additional_metadata={self.additional_data})") + + def __post_init__(self) -> None: + assert self.output_kind == RequestOutputKind.FINAL_ONLY,\ + "For pooling output_kind has to be FINAL_ONLY" diff --git a/vllm/v1/core/kv_cache_manager.py b/vllm/v1/core/kv_cache_manager.py index 99531e7d213dd..08bb0efb2f3dd 100644 --- a/vllm/v1/core/kv_cache_manager.py +++ b/vllm/v1/core/kv_cache_manager.py @@ -146,7 +146,8 @@ class KVCacheManager: # Prefix caching is disabled or # When the request requires prompt logprobs, we skip prefix caching. if (not self.enable_caching - or request.sampling_params.prompt_logprobs is not None): + or (request.sampling_params is not None + and request.sampling_params.prompt_logprobs is not None)): return self.create_empty_block_list(), 0 # The block hashes for the request may already be computed diff --git a/vllm/v1/core/sched/output.py b/vllm/v1/core/sched/output.py index 9b0a439fe7dcd..6f31031a1086e 100644 --- a/vllm/v1/core/sched/output.py +++ b/vllm/v1/core/sched/output.py @@ -14,6 +14,7 @@ if TYPE_CHECKING: KVConnectorMetadata) from vllm.lora.request import LoRARequest from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange + from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams from vllm.v1.request import Request @@ -26,7 +27,8 @@ class NewRequestData: mm_inputs: list[MultiModalKwargs] mm_hashes: list[str] mm_positions: list[PlaceholderRange] - sampling_params: SamplingParams + sampling_params: Optional[SamplingParams] + pooling_params: Optional[PoolingParams] block_ids: tuple[list[int], ...] num_computed_tokens: int lora_request: Optional[LoRARequest] @@ -44,6 +46,7 @@ class NewRequestData: mm_hashes=request.mm_hashes, mm_positions=request.mm_positions, sampling_params=request.sampling_params, + pooling_params=request.pooling_params, block_ids=block_ids, num_computed_tokens=request.num_computed_tokens, lora_request=request.lora_request, diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index 2d2274ab6a4d5..16e76defdf721 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -402,6 +402,15 @@ class Scheduler(SchedulerInterface): < num_new_tokens): num_new_tokens = ( self.scheduler_config.long_prefill_token_threshold) + + # chunked prefill has to be enabled explicitly to allow + # pooling requests to be chunked + if not self.scheduler_config.chunked_prefill_enabled and \ + num_new_tokens > token_budget: + self.waiting.popleft() + skipped_waiting_requests.appendleft(request) + continue + num_new_tokens = min(num_new_tokens, token_budget) assert num_new_tokens > 0 @@ -707,6 +716,7 @@ class Scheduler(SchedulerInterface): logprobs = model_runner_output.logprobs prompt_logprobs_dict = model_runner_output.prompt_logprobs_dict num_scheduled_tokens = scheduler_output.num_scheduled_tokens + pooler_outputs = model_runner_output.pooler_output new_running: list[Request] = [] outputs: dict[int, list[EngineCoreOutput]] = defaultdict(list) @@ -724,7 +734,8 @@ class Scheduler(SchedulerInterface): continue req_index = model_runner_output.req_id_to_index[req_id] - generated_token_ids = sampled_token_ids[req_index] + generated_token_ids = sampled_token_ids[ + req_index] if sampled_token_ids else [] scheduled_spec_token_ids = ( scheduler_output.scheduled_spec_decode_tokens.get(req_id)) @@ -776,8 +787,17 @@ class Scheduler(SchedulerInterface): del new_token_ids[num_new:] # Trim new tokens if needed. break + pooler_output = None + if pooler_outputs: + pooler_output = pooler_outputs[req_index] + stopped = check_stop(request, self.max_model_len, + pooler_output) + if stopped: + kv_transfer_params = self._free_request(request) + # Extract sample logprobs if needed. - if request.sampling_params.logprobs is not None and logprobs: + if request.sampling_params is not None \ + and request.sampling_params.logprobs is not None and logprobs: # NOTE: once we support N tokens per step (spec decode), # the outer lists can be of length > 1. new_logprobs = logprobs.slice(req_index, req_index + 1) @@ -802,7 +822,8 @@ class Scheduler(SchedulerInterface): # Get prompt logprobs for this request. prompt_logprobs_tensors = prompt_logprobs_dict.get(req_id) - if new_token_ids or kv_transfer_params: + if new_token_ids or pooler_output is not None \ + or kv_transfer_params: # Add EngineCoreOutput for this Request. outputs[request.client_index].append( @@ -812,6 +833,7 @@ class Scheduler(SchedulerInterface): finish_reason=request.get_finished_reason(), new_logprobs=new_logprobs, new_prompt_logprobs_tensors=prompt_logprobs_tensors, + pooling_output=pooler_output, stop_reason=request.stop_reason, events=request.take_events(), kv_transfer_params=kv_transfer_params, diff --git a/vllm/v1/core/sched/utils.py b/vllm/v1/core/sched/utils.py index 1397c5f4c9a6d..42ec95091f962 100644 --- a/vllm/v1/core/sched/utils.py +++ b/vllm/v1/core/sched/utils.py @@ -1,15 +1,28 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from typing import Optional + +import torch + from vllm.v1.request import Request, RequestStatus -def check_stop(request: Request, max_model_len: int) -> bool: +def check_stop(request: Request, + max_model_len: int, + pooler_output: Optional[torch.Tensor] = None) -> bool: if (request.num_tokens >= max_model_len or request.num_output_tokens >= request.max_tokens): request.status = RequestStatus.FINISHED_LENGTH_CAPPED return True + if request.pooling_params: + if pooler_output is not None: + request.status = RequestStatus.FINISHED_STOPPED + return True + return False + sampling_params = request.sampling_params + assert sampling_params is not None last_token_id = request.output_token_ids[-1] if (not sampling_params.ignore_eos and last_token_id == request.eos_token_id): diff --git a/vllm/v1/engine/__init__.py b/vllm/v1/engine/__init__.py index 59463f1ba99f5..4d1696a9b43a5 100644 --- a/vllm/v1/engine/__init__.py +++ b/vllm/v1/engine/__init__.py @@ -7,10 +7,12 @@ from collections.abc import Sequence from typing import Any, Optional, Union import msgspec +import torch from vllm.lora.request import LoRARequest from vllm.multimodal import MultiModalKwargs from vllm.multimodal.inputs import PlaceholderRange +from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams from vllm.v1.metrics.stats import SchedulerStats from vllm.v1.outputs import LogprobsLists, LogprobsTensors @@ -50,7 +52,8 @@ class EngineCoreRequest( mm_inputs: Optional[Sequence[Optional[MultiModalKwargs]]] mm_hashes: Optional[list[str]] mm_placeholders: Optional[list[PlaceholderRange]] - sampling_params: SamplingParams + sampling_params: Optional[SamplingParams] + pooling_params: Optional[PoolingParams] eos_token_id: Optional[int] arrival_time: float lora_request: Optional[LoRARequest] @@ -104,6 +107,8 @@ class EngineCoreOutput( new_logprobs: Optional[LogprobsLists] = None new_prompt_logprobs_tensors: Optional[LogprobsTensors] = None + pooling_output: Optional[torch.Tensor] = None + finish_reason: Optional[FinishReason] = None stop_reason: Union[int, str, None] = None events: Optional[list[EngineCoreEvent]] = None diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index 7fb36cf5941e8..998c4c5ea3cf7 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -17,7 +17,7 @@ from vllm.inputs.preprocess import InputPreprocessor from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry -from vllm.outputs import RequestOutput +from vllm.outputs import PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams @@ -228,8 +228,7 @@ class AsyncLLM(EngineClient): if self.errored: raise EngineDeadError() - assert isinstance(params, SamplingParams), \ - "Pooling is not supported in V1" + is_pooling = isinstance(params, PoolingParams) # Create a new output collector for the request. queue = RequestOutputCollector(output_kind=params.output_kind) @@ -240,7 +239,7 @@ class AsyncLLM(EngineClient): tokenization_kwargs, trace_headers, prompt_adapter_request, priority, data_parallel_rank) - if params.n == 1: + if is_pooling or params.n == 1: await self._add_request(request, prompt_str, None, 0, queue) return queue @@ -443,7 +442,7 @@ class AsyncLLM(EngineClient): stat_logger.record(scheduler_stats=scheduler_stats, iteration_stats=iteration_stats) - def encode( + async def encode( self, prompt: PromptType, pooling_params: PoolingParams, @@ -451,8 +450,75 @@ class AsyncLLM(EngineClient): lora_request: Optional[LoRARequest] = None, trace_headers: Optional[Mapping[str, str]] = None, priority: int = 0, - ): - raise ValueError("Not Supported on V1 yet.") + ) -> AsyncGenerator[PoolingRequestOutput, None]: + """ + Main function called by the API server to kick off a request + * 1) Making an AsyncStream corresponding to the Request. + * 2) Processing the Input. + * 3) Adding the Request to the EngineCore (separate process). + + A separate output_handler loop runs in a background AsyncIO task, + pulling outputs from EngineCore and putting them into the + per-request AsyncStream. + + The caller of generate() iterates the returned AsyncGenerator, + returning the RequestOutput back to the caller. + """ + + try: + # We start the output_handler on the first call to generate() so + # we can call __init__ before the event loop, which enables us + # to handle startup failure gracefully in the OpenAI server. + self._run_output_handler() + + q = await self.add_request( + request_id, + prompt, + pooling_params, + lora_request=lora_request, + trace_headers=trace_headers, + priority=priority, + ) + + # The output_handler task pushes items into the queue. + # This task pulls from the queue and yields to caller. + finished = False + while not finished: + # Note: drain queue without await if possible (avoids + # task switching under load which helps performance). + out = q.get_nowait() or await q.get() + assert isinstance(out, PoolingRequestOutput) + # Note: both OutputProcessor and EngineCore handle their + # own request cleanup based on finished. + finished = out.finished + yield out + + # If the request is disconnected by the client, generate() + # is cancelled. So, we abort the request if we end up here. + except asyncio.CancelledError: + await self.abort(request_id) + if self.log_requests: + logger.info("Request %s aborted.", request_id) + raise + + # Engine is dead. Do not abort since we shut down. + except EngineDeadError: + if self.log_requests: + logger.info("Request %s failed (engine dead).", request_id) + raise + + # Request validation error. + except ValueError: + if self.log_requests: + logger.info("Request %s failed (bad request).", request_id) + raise + + # Unexpected error in the generate() task (possibly recoverable). + except Exception as e: + await self.abort(request_id) + if self.log_requests: + logger.info("Request %s failed.", request_id) + raise EngineGenerateError() from e async def get_vllm_config(self) -> VllmConfig: return self.vllm_config diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index 57fcf8daa5a1b..da65550354d09 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -60,7 +60,6 @@ class EngineCore: executor_class: type[Executor], log_stats: bool, executor_fail_callback: Optional[Callable] = None): - assert vllm_config.model_config.runner_type != "pooling" # plugins need to be loaded at the engine/scheduler level too from vllm.plugins import load_general_plugins diff --git a/vllm/v1/engine/detokenizer.py b/vllm/v1/engine/detokenizer.py index 35aceba0fe766..2f5504ea14b41 100644 --- a/vllm/v1/engine/detokenizer.py +++ b/vllm/v1/engine/detokenizer.py @@ -50,6 +50,8 @@ class IncrementalDetokenizer: request: EngineCoreRequest, ) -> "IncrementalDetokenizer": + assert request.sampling_params is not None + if tokenizer is None: # No tokenizer => skipping detokenization. return IncrementalDetokenizer() @@ -70,6 +72,7 @@ class BaseIncrementalDetokenizer(IncrementalDetokenizer, ABC): # Stop strings params = request.sampling_params + assert params is not None self.stop = stop = params.stop self.include_stop_str_in_output = params.include_stop_str_in_output @@ -164,6 +167,7 @@ class FastIncrementalDetokenizer(BaseIncrementalDetokenizer): super().__init__(request) sampling_params = request.sampling_params + assert sampling_params is not None self.request_id = request.request_id self.skip_special_tokens = sampling_params.skip_special_tokens @@ -245,20 +249,20 @@ class SlowIncrementalDetokenizer(BaseIncrementalDetokenizer): super().__init__(request) self.tokenizer = tokenizer + params = request.sampling_params + assert params is not None # Metadata for incremental detokenization. self.tokens, self.prefix_offset, self.read_offset = ( convert_prompt_ids_to_tokens( tokenizer=tokenizer, prompt_ids=request.prompt_token_ids, - skip_special_tokens=request.sampling_params. - skip_special_tokens, + skip_special_tokens=params.skip_special_tokens, )) self.token_ids.extend(request.prompt_token_ids) self.prompt_len = len(request.prompt_token_ids) - params = request.sampling_params self.skip_special_tokens = params.skip_special_tokens self.spaces_between_special_tokens = ( params.spaces_between_special_tokens) diff --git a/vllm/v1/engine/llm_engine.py b/vllm/v1/engine/llm_engine.py index 736ffd8b40f00..1932cd10bb1bc 100644 --- a/vllm/v1/engine/llm_engine.py +++ b/vllm/v1/engine/llm_engine.py @@ -15,7 +15,7 @@ from vllm.inputs import PromptType from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.multimodal import MULTIMODAL_REGISTRY, MultiModalRegistry -from vllm.outputs import RequestOutput +from vllm.outputs import PoolingRequestOutput, RequestOutput from vllm.pooling_params import PoolingParams from vllm.prompt_adapter.request import PromptAdapterRequest from vllm.sampling_params import SamplingParams @@ -221,7 +221,7 @@ class LLMEngine: # Add the request to EngineCore. self.engine_core.add_request(child_request) - def step(self) -> list[RequestOutput]: + def step(self) -> Union[list[RequestOutput], list[PoolingRequestOutput]]: if self.should_execute_dummy_batch: self.should_execute_dummy_batch = False diff --git a/vllm/v1/engine/logprobs.py b/vllm/v1/engine/logprobs.py index edc3be5b0120e..e95da0a5e5aaf 100644 --- a/vllm/v1/engine/logprobs.py +++ b/vllm/v1/engine/logprobs.py @@ -38,6 +38,7 @@ class LogprobsProcessor: tokenizer: Optional[AnyTokenizer], request: EngineCoreRequest, ) -> "LogprobsProcessor": + assert request.sampling_params is not None num_logprobs = request.sampling_params.logprobs num_prompt_logprobs = request.sampling_params.prompt_logprobs return cls( diff --git a/vllm/v1/engine/output_processor.py b/vllm/v1/engine/output_processor.py index 1dcfbab30cfb3..2bcd61d1f0aa1 100644 --- a/vllm/v1/engine/output_processor.py +++ b/vllm/v1/engine/output_processor.py @@ -4,9 +4,12 @@ import asyncio from collections.abc import Iterable from dataclasses import dataclass -from typing import Any, Optional, Union +from typing import Any, Optional, Union, cast -from vllm.outputs import CompletionOutput, RequestOutput +import torch + +from vllm.outputs import (CompletionOutput, PoolingOutput, + PoolingRequestOutput, RequestOutput) from vllm.sampling_params import RequestOutputKind from vllm.transformers_utils.tokenizer import AnyTokenizer from vllm.transformers_utils.tokenizer_group import TokenizerGroup @@ -29,20 +32,22 @@ class RequestOutputCollector: def __init__(self, output_kind: RequestOutputKind): self.aggregate = output_kind == RequestOutputKind.DELTA - self.output: Optional[Union[RequestOutput, Exception]] = None + self.output: Optional[Union[RequestOutput, PoolingRequestOutput, + Exception]] = None self.ready = asyncio.Event() - def put(self, output: Union[RequestOutput, Exception]) -> None: + def put(self, output: Union[RequestOutput, PoolingRequestOutput, + Exception]) -> None: """Non-blocking put operation.""" if self.output is None or isinstance(output, Exception): self.output = output self.ready.set() - elif isinstance(self.output, RequestOutput): + elif isinstance(self.output, (RequestOutput, PoolingRequestOutput)): # This ensures that request outputs with different request indexes # (if n > 1) do not override each other. self.output.add(output, aggregate=self.aggregate) - async def get(self) -> RequestOutput: + async def get(self) -> Union[RequestOutput, PoolingRequestOutput]: """Get operation blocks on put event.""" while (output := self.output) is None: await self.ready.wait() @@ -52,7 +57,8 @@ class RequestOutputCollector: raise output return output - def get_nowait(self) -> Optional[RequestOutput]: + def get_nowait( + self) -> Optional[Union[RequestOutput, PoolingRequestOutput]]: """Non-blocking get operation.""" output = self.output if output is not None: @@ -66,7 +72,7 @@ class RequestOutputCollector: @dataclass class OutputProcessorOutput: - request_outputs: list[RequestOutput] + request_outputs: list[Union[RequestOutput, PoolingRequestOutput]] reqs_to_abort: list[str] @@ -81,8 +87,8 @@ class RequestState: output_kind: RequestOutputKind, prompt: Optional[str], prompt_token_ids: list[int], - logprobs_processor: LogprobsProcessor, - detokenizer: IncrementalDetokenizer, + logprobs_processor: Optional[LogprobsProcessor], + detokenizer: Optional[IncrementalDetokenizer], max_tokens_param: Optional[int], arrival_time: float, queue: Optional[RequestOutputCollector], @@ -116,27 +122,39 @@ class RequestState: queue: Optional[RequestOutputCollector], log_stats: bool, ) -> "RequestState": - if not request.sampling_params.detokenize: - tokenizer = None + + if sampling_params := request.sampling_params: + if not sampling_params.detokenize: + tokenizer = None + output_kind = sampling_params.output_kind + logprobs_processor = LogprobsProcessor.from_new_request( + tokenizer=tokenizer, + request=request, + ) + detokenizer = IncrementalDetokenizer.from_new_request( + tokenizer=tokenizer, + request=request, + ) + max_tokens_param = sampling_params.max_tokens + else: + logprobs_processor = None + detokenizer = None + max_tokens_param = None + assert request.pooling_params is not None + output_kind = request.pooling_params.output_kind + return cls( request_id=request.request_id, parent_req=parent_req, request_index=request_index, lora_name=(request.lora_request.name if request.lora_request is not None else None), - output_kind=request.sampling_params.output_kind, + output_kind=output_kind, prompt=prompt, prompt_token_ids=request.prompt_token_ids, - logprobs_processor=LogprobsProcessor.from_new_request( - tokenizer=tokenizer, - request=request, - ), - detokenizer=IncrementalDetokenizer.from_new_request( - tokenizer=tokenizer, - request=request, - ), - max_tokens_param=(request.sampling_params.max_tokens if - request.sampling_params is not None else None), + logprobs_processor=logprobs_processor, + detokenizer=detokenizer, + max_tokens_param=max_tokens_param, arrival_time=request.arrival_time, queue=queue, log_stats=log_stats, @@ -145,11 +163,12 @@ class RequestState: def make_request_output( self, new_token_ids: list[int], + pooling_output: Optional[torch.Tensor], finish_reason: Optional[FinishReason], stop_reason: Union[int, str, None], kv_transfer_params: Optional[dict[str, Any]] = None, num_cached_tokens: int = 0, - ) -> Optional[RequestOutput]: + ) -> Optional[Union[RequestOutput, PoolingRequestOutput]]: finished = finish_reason is not None final_only = self.output_kind == RequestOutputKind.FINAL_ONLY @@ -158,15 +177,20 @@ class RequestState: # Only the final output is required in FINAL_ONLY mode. return None - completion_output = self._new_completion_output( - new_token_ids, finish_reason, stop_reason) - request_id = self.request_id + if pooling_output is not None: + return self._new_request_output( + request_id, [self._new_pooling_output(pooling_output)], + finished) + + output = self._new_completion_output(new_token_ids, finish_reason, + stop_reason) + if self.parent_req is None: - outputs = [completion_output] + outputs = [output] else: request_id, outputs, finished = self.parent_req.get_outputs( - request_id, completion_output) + request_id, output) if not outputs: return None @@ -176,12 +200,21 @@ class RequestState: def _new_request_output( self, request_id: str, - outputs: list[CompletionOutput], + outputs: Union[list[CompletionOutput], list[PoolingOutput]], finished: bool, kv_transfer_params: Optional[dict[str, Any]] = None, num_cached_tokens: int = 0, - ) -> RequestOutput: + ) -> Union[RequestOutput, PoolingRequestOutput]: + if isinstance(outputs[0], PoolingOutput): + assert len(outputs) == 1 + return PoolingRequestOutput( + request_id=request_id, + outputs=outputs[0], + prompt_token_ids=self.prompt_token_ids, + finished=finished, + ) + assert self.logprobs_processor is not None if self.output_kind == RequestOutputKind.DELTA: # Side effect: logprobs processor forgets prompt logprobs prompt_logprobs = self.logprobs_processor.pop_prompt_logprobs() @@ -193,7 +226,7 @@ class RequestState: prompt=self.prompt, prompt_token_ids=self.prompt_token_ids, prompt_logprobs=prompt_logprobs, - outputs=outputs, + outputs=cast(list[CompletionOutput], outputs), finished=finished, kv_transfer_params=kv_transfer_params, num_cached_tokens=num_cached_tokens, @@ -206,6 +239,8 @@ class RequestState: stop_reason: Union[int, str, None], ) -> CompletionOutput: + assert self.detokenizer is not None + assert self.logprobs_processor is not None finished = finish_reason is not None delta = self.output_kind == RequestOutputKind.DELTA @@ -228,6 +263,13 @@ class RequestState: finish_reason=str(finish_reason) if finished else None, stop_reason=stop_reason if finished else None) + def _new_pooling_output( + self, + pooling_output: torch.Tensor, + ) -> PoolingOutput: + + return PoolingOutput(data=pooling_output) + class OutputProcessor: """Process EngineCoreOutputs into RequestOutputs.""" @@ -326,7 +368,8 @@ class OutputProcessor: within the loop below. """ - request_outputs: list[RequestOutput] = [] + request_outputs: Union[list[RequestOutput], + list[PoolingRequestOutput]] = [] reqs_to_abort: list[str] = [] for engine_core_output in engine_core_outputs: req_id = engine_core_output.request_id @@ -341,25 +384,31 @@ class OutputProcessor: iteration_stats) new_token_ids = engine_core_output.new_token_ids + pooling_output = engine_core_output.pooling_output finish_reason = engine_core_output.finish_reason stop_reason = engine_core_output.stop_reason kv_transfer_params = engine_core_output.kv_transfer_params num_cached_tokens = engine_core_output.num_cached_tokens req_state.is_prefilling = False - # 2) Detokenize the token ids into text and perform stop checks. - stop_string = req_state.detokenizer.update( - new_token_ids, finish_reason == FinishReason.STOP) - if stop_string: - finish_reason = FinishReason.STOP - stop_reason = stop_string + if pooling_output is None: + assert req_state.detokenizer is not None + assert req_state.logprobs_processor is not None + # 2) Detokenize the token ids into text and perform stop checks. + stop_string = req_state.detokenizer.update( + new_token_ids, finish_reason == FinishReason.STOP) + if stop_string: + finish_reason = FinishReason.STOP + stop_reason = stop_string - # 3) Compute sample and prompt logprobs for request, if required. - req_state.logprobs_processor.update_from_output(engine_core_output) + # 3) Compute sample and prompt logprobs for request, + # if required. + req_state.logprobs_processor.update_from_output( + engine_core_output) # 4) Create and handle RequestOutput objects. if request_output := req_state.make_request_output( - new_token_ids, finish_reason, stop_reason, + new_token_ids, pooling_output, finish_reason, stop_reason, kv_transfer_params, num_cached_tokens): if req_state.queue is not None: # AsyncLLM: put into queue for handling by generate(). diff --git a/vllm/v1/engine/processor.py b/vllm/v1/engine/processor.py index e28879d40460e..b00f1444c7b3c 100644 --- a/vllm/v1/engine/processor.py +++ b/vllm/v1/engine/processor.py @@ -136,8 +136,8 @@ class Processor: Should raise ValueError if unsupported for API Server. """ - if not isinstance(params, SamplingParams): - raise ValueError("V1 does not yet support Pooling models.") + if isinstance(params, PoolingParams): + return self._validate_logprobs(params) self._validate_sampling_params(params, lora_request) @@ -263,18 +263,22 @@ class Processor: if encoder_inputs is not None: raise NotImplementedError - assert isinstance(params, SamplingParams) - # TODO: can we avoid cloning here in multiproc case? - sampling_params = params.clone() - # If unset max tokens, then generate up to the max_model_len. - if sampling_params.max_tokens is None: - sampling_params.max_tokens = ( - self.model_config.max_model_len - - len(decoder_inputs["prompt_token_ids"])) - sampling_params.update_from_generation_config( - self.generation_config_fields, eos_token_id) - sampling_params.update_from_tokenizer( - self.tokenizer.get_lora_tokenizer(lora_request)) + sampling_params = None + pooling_params = None + if isinstance(params, SamplingParams): + # TODO: can we avoid cloning here in multiproc case? + sampling_params = params.clone() + # If unset max tokens, then generate up to the max_model_len. + if sampling_params.max_tokens is None: + sampling_params.max_tokens = ( + self.model_config.max_model_len - + len(decoder_inputs["prompt_token_ids"])) + sampling_params.update_from_generation_config( + self.generation_config_fields, eos_token_id) + sampling_params.update_from_tokenizer( + self.tokenizer.get_lora_tokenizer(lora_request)) + else: + pooling_params = params.clone() # Multimodal related. sorted_mm_inputs: Optional[Sequence[Optional[MultiModalKwargs]]] = None @@ -331,6 +335,7 @@ class Processor: mm_hashes=sorted_mm_hashes, mm_placeholders=sorted_mm_positions, sampling_params=sampling_params, + pooling_params=pooling_params, eos_token_id=eos_token_id, arrival_time=arrival_time, lora_request=lora_request, diff --git a/vllm/v1/metrics/loggers.py b/vllm/v1/metrics/loggers.py index 11865a0fd1f27..c720ca13e51b2 100644 --- a/vllm/v1/metrics/loggers.py +++ b/vllm/v1/metrics/loggers.py @@ -481,8 +481,9 @@ class PrometheusStatLogger(StatLoggerBase): finished_request.num_prompt_tokens) self.histogram_num_generation_tokens_request.observe( finished_request.num_generation_tokens) - self.histogram_max_tokens_request.observe( - finished_request.max_tokens_param) + if finished_request.max_tokens_param: + self.histogram_max_tokens_request.observe( + finished_request.max_tokens_param) if self.gauge_lora_info is not None: running_lora_adapters = \ diff --git a/vllm/v1/metrics/stats.py b/vllm/v1/metrics/stats.py index 4a5d5fac49d1d..716f40fffb282 100644 --- a/vllm/v1/metrics/stats.py +++ b/vllm/v1/metrics/stats.py @@ -106,7 +106,6 @@ class IterationStats: self.num_generation_tokens += num_new_generation_tokens if is_prefilling: - assert num_new_generation_tokens > 0 self.num_prompt_tokens += prompt_len first_token_latency = self._time_since(req_stats.arrival_time) diff --git a/vllm/v1/outputs.py b/vllm/v1/outputs.py index 17a299d57cbaa..2234843293cc6 100644 --- a/vllm/v1/outputs.py +++ b/vllm/v1/outputs.py @@ -101,6 +101,9 @@ class ModelRunnerOutput: # [prompt_len] prompt_logprobs_dict: dict[str, Optional[LogprobsTensors]] + # [num_reqs, hidden_size] + pooler_output: list[Optional[torch.Tensor]] + # [req_ids] finished_sending: Optional[set[str]] = None finished_recving: Optional[set[str]] = None @@ -112,5 +115,6 @@ EMPTY_MODEL_RUNNER_OUTPUT = ModelRunnerOutput(req_ids=[], spec_token_ids=None, logprobs=None, prompt_logprobs_dict={}, + pooler_output=[], finished_sending=None, finished_recving=None) diff --git a/vllm/v1/pool/__init__.py b/vllm/v1/pool/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vllm/v1/pool/metadata.py b/vllm/v1/pool/metadata.py new file mode 100644 index 0000000000000..d70a0d0446617 --- /dev/null +++ b/vllm/v1/pool/metadata.py @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: Apache-2.0 +from dataclasses import dataclass +from typing import Optional + +import torch + +from vllm.pooling_params import PoolingParams + + +@dataclass +class PoolingMetadata: + """Tensors for pooling.""" + + prompt_lens: torch.Tensor + prompt_token_ids: Optional[torch.Tensor] + pooling_params: list[PoolingParams] diff --git a/vllm/v1/request.py b/vllm/v1/request.py index 694e271e5ad74..e3f3a418755c3 100644 --- a/vllm/v1/request.py +++ b/vllm/v1/request.py @@ -5,6 +5,7 @@ import enum from typing import TYPE_CHECKING, Any, Optional, Union from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange +from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams from vllm.utils import is_list_of from vllm.v1.engine import (EngineCoreEvent, EngineCoreEventType, @@ -25,7 +26,8 @@ class Request: multi_modal_inputs: Optional[list[MultiModalKwargs]], multi_modal_hashes: Optional[list[str]], multi_modal_placeholders: Optional[list[PlaceholderRange]], - sampling_params: SamplingParams, + sampling_params: Optional[SamplingParams], + pooling_params: Optional[PoolingParams], eos_token_id: Optional[int], client_index: int = 0, lora_request: Optional["LoRARequest"] = None, @@ -35,18 +37,35 @@ class Request: self.request_id = request_id self.client_index = client_index self.sampling_params = sampling_params + self.pooling_params = pooling_params # Because of LoRA, the eos token id can be different for each request. self.eos_token_id = eos_token_id self.lora_request = lora_request self.structured_output_request = structured_output_request - self.status = (RequestStatus.WAITING_FOR_FSM - if sampling_params.guided_decoding is not None else - RequestStatus.WAITING) + self.status = RequestStatus.WAITING + if sampling_params and sampling_params.guided_decoding is not None: + self.status = RequestStatus.WAITING_FOR_FSM self.events: list[EngineCoreEvent] = [] self.stop_reason: Union[int, str, None] = None - assert sampling_params.max_tokens is not None - self.max_tokens = sampling_params.max_tokens + + # P/D: Connector-specific KV transfer parameters. + self.kv_transfer_params: Optional[dict[str, Any]] = None + + if pooling_params is not None: + self.max_tokens = 1 + elif sampling_params is not None: + assert sampling_params.max_tokens is not None + self.max_tokens = sampling_params.max_tokens + if sampling_params.guided_decoding is not None: + self.status = RequestStatus.WAITING_FOR_FSM + + if sampling_params.extra_args is not None: + self.kv_transfer_params = \ + sampling_params.extra_args.get("kv_transfer_params") + else: + raise ValueError( + "sampling_params and pooling_params can't both be unset") self.prompt_token_ids = prompt_token_ids self.num_prompt_tokens = len(self.prompt_token_ids) @@ -63,11 +82,6 @@ class Request: self.num_encoder_inputs = len(self.mm_inputs) self.has_encoder_inputs = self.num_encoder_inputs > 0 - # P/D: Connector-specific KV transfer parameters. - kv_params = (None if sampling_params.extra_args is None else - sampling_params.extra_args.get("kv_transfer_params")) - self.kv_transfer_params: Optional[dict[str, Any]] = kv_params - # Sanity check assert len(self.mm_inputs) == len(self.mm_positions) if self.mm_hashes: @@ -98,10 +112,12 @@ class Request: multi_modal_hashes=request.mm_hashes, multi_modal_placeholders=request.mm_placeholders, sampling_params=request.sampling_params, + pooling_params=request.pooling_params, eos_token_id=request.eos_token_id, lora_request=request.lora_request, structured_output_request=StructuredOutputRequest( - sampling_params=request.sampling_params), + sampling_params=request.sampling_params) \ + if request.sampling_params else None, cache_salt=request.cache_salt, ) @@ -141,7 +157,8 @@ class Request: @property def use_structured_output(self) -> bool: - return self.sampling_params.guided_decoding is not None + return self.sampling_params is not None and \ + self.sampling_params.guided_decoding is not None def record_event( self, diff --git a/vllm/v1/structured_output/__init__.py b/vllm/v1/structured_output/__init__.py index b2b0ee7969543..c5500b9a384d0 100644 --- a/vllm/v1/structured_output/__init__.py +++ b/vllm/v1/structured_output/__init__.py @@ -62,13 +62,15 @@ class StructuredOutputManager: return if TYPE_CHECKING: - assert request.sampling_params.guided_decoding is not None + assert request.sampling_params is not None and \ + request.sampling_params.guided_decoding is not None # Initialize the backend the first time it is needed. # # NOTE: We only support a single backend. We do NOT support different # backends on a per-request basis in V1 (for now, anyway...). if self.backend is None: + assert request.sampling_params is not None backend = request.sampling_params.guided_decoding.backend vocab_size = self.vllm_config.model_config.get_vocab_size() if backend == "xgrammar": diff --git a/vllm/v1/worker/gpu_input_batch.py b/vllm/v1/worker/gpu_input_batch.py index e76293f98a51f..3a2c9ef7dface 100644 --- a/vllm/v1/worker/gpu_input_batch.py +++ b/vllm/v1/worker/gpu_input_batch.py @@ -10,9 +10,11 @@ import torch from vllm.lora.request import LoRARequest from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange +from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams, SamplingType from vllm.utils import swap_dict_values from vllm.v1.outputs import LogprobsTensors +from vllm.v1.pool.metadata import PoolingMetadata from vllm.v1.sample.metadata import SamplingMetadata from vllm.v1.utils import copy_slice from vllm.v1.worker.block_table import MultiGroupBlockTable @@ -27,7 +29,8 @@ class CachedRequestState: prompt_token_ids: list[int] mm_inputs: list[MultiModalKwargs] mm_positions: list[PlaceholderRange] - sampling_params: SamplingParams + sampling_params: Optional[SamplingParams] + pooling_params: Optional[PoolingParams] generator: Optional[torch.Generator] block_ids: tuple[list[int], ...] @@ -226,6 +229,8 @@ class InputBatch: # This is updated each time the batch constituents change. self.sampling_metadata = self._make_sampling_metadata() + self.pooling_params: dict[str, PoolingParams] = {} + @property def req_ids(self) -> list[str]: # None elements should only be present transiently @@ -269,77 +274,83 @@ class InputBatch: self.num_computed_tokens_cpu[req_index] = request.num_computed_tokens self.block_table.add_row(request.block_ids, req_index) - sampling_params = request.sampling_params - if sampling_params.sampling_type == SamplingType.GREEDY: - # Avoid later division by zero. - self.temperature_cpu[req_index] = -1.0 - self.greedy_reqs.add(req_id) - else: - self.temperature_cpu[req_index] = sampling_params.temperature - self.random_reqs.add(req_id) + if sampling_params := request.sampling_params: + if sampling_params.sampling_type == SamplingType.GREEDY: + # Avoid later division by zero. + self.temperature_cpu[req_index] = -1.0 + self.greedy_reqs.add(req_id) + else: + self.temperature_cpu[req_index] = sampling_params.temperature + self.random_reqs.add(req_id) - self.top_p_cpu[req_index] = sampling_params.top_p - if sampling_params.top_p < 1: - self.top_p_reqs.add(req_id) - top_k = sampling_params.top_k - if 0 < top_k < self.vocab_size: - self.top_k_reqs.add(req_id) - else: - top_k = self.vocab_size - self.top_k_cpu[req_index] = top_k - self.min_p_cpu[req_index] = sampling_params.min_p - self.frequency_penalties_cpu[ - req_index] = sampling_params.frequency_penalty - if sampling_params.min_p > _SAMPLING_EPS: - self.min_p_reqs.add(req_id) - if sampling_params.frequency_penalty != 0.0: - self.frequency_penalties_reqs.add(req_id) - self.presence_penalties_cpu[ - req_index] = sampling_params.presence_penalty - if sampling_params.presence_penalty != 0.0: - self.presence_penalties_reqs.add(req_id) - self.repetition_penalties_cpu[ - req_index] = sampling_params.repetition_penalty - if sampling_params.repetition_penalty != 1.0: - self.repetition_penalties_reqs.add(req_id) - if sampling_params.min_tokens: - self.min_tokens[req_index] = (sampling_params.min_tokens, - sampling_params.all_stop_token_ids) + self.top_p_cpu[req_index] = sampling_params.top_p + if sampling_params.top_p < 1: + self.top_p_reqs.add(req_id) + top_k = sampling_params.top_k + if 0 < top_k < self.vocab_size: + self.top_k_reqs.add(req_id) + else: + top_k = self.vocab_size + self.top_k_cpu[req_index] = top_k + self.min_p_cpu[req_index] = sampling_params.min_p + self.frequency_penalties_cpu[ + req_index] = sampling_params.frequency_penalty + if sampling_params.min_p > _SAMPLING_EPS: + self.min_p_reqs.add(req_id) + if sampling_params.frequency_penalty != 0.0: + self.frequency_penalties_reqs.add(req_id) + self.presence_penalties_cpu[ + req_index] = sampling_params.presence_penalty + if sampling_params.presence_penalty != 0.0: + self.presence_penalties_reqs.add(req_id) + self.repetition_penalties_cpu[ + req_index] = sampling_params.repetition_penalty + if sampling_params.repetition_penalty != 1.0: + self.repetition_penalties_reqs.add(req_id) + if sampling_params.min_tokens: + self.min_tokens[req_index] = ( + sampling_params.min_tokens, + sampling_params.all_stop_token_ids) - # NOTE(woosuk): self.generators should not include the requests that - # do not have their own generator. - if request.generator is not None: - self.generators[req_index] = request.generator + # NOTE(woosuk): self.generators should not include the requests that + # do not have their own generator. + if request.generator is not None: + self.generators[req_index] = request.generator - if sampling_params.logprobs is not None: - self.num_logprobs[req_id] = sampling_params.logprobs - if sampling_params.prompt_logprobs is not None: - self.num_prompt_logprobs[req_id] = sampling_params.prompt_logprobs - if sampling_params.logit_bias is not None: - self.logit_bias[req_index] = sampling_params.logit_bias + if sampling_params.logprobs is not None: + self.num_logprobs[req_id] = sampling_params.logprobs + if sampling_params.prompt_logprobs is not None: + self.num_prompt_logprobs[ + req_id] = sampling_params.prompt_logprobs + if sampling_params.logit_bias is not None: + self.logit_bias[req_index] = sampling_params.logit_bias - if sampling_params.allowed_token_ids: - self.has_allowed_token_ids.add(req_id) - if self.allowed_token_ids_mask_cpu_tensor is None: - # Lazy allocation for this tensor, which can be large. + if sampling_params.allowed_token_ids: + self.has_allowed_token_ids.add(req_id) + if self.allowed_token_ids_mask_cpu_tensor is None: + # Lazy allocation for this tensor, which can be large. + # False means we don't fill with -inf. + self.allowed_token_ids_mask = torch.zeros( + self.max_num_reqs, + self.vocab_size, + dtype=torch.bool, + device=self.device) + self.allowed_token_ids_mask_cpu_tensor = torch.zeros( + self.max_num_reqs, + self.vocab_size, + dtype=torch.bool, + device="cpu") + self.allowed_token_ids_mask_cpu_tensor[req_index] = True # False means we don't fill with -inf. - self.allowed_token_ids_mask = torch.zeros(self.max_num_reqs, - self.vocab_size, - dtype=torch.bool, - device=self.device) - self.allowed_token_ids_mask_cpu_tensor = torch.zeros( - self.max_num_reqs, - self.vocab_size, - dtype=torch.bool, - device="cpu") - self.allowed_token_ids_mask_cpu_tensor[req_index] = True - # False means we don't fill with -inf. - self.allowed_token_ids_mask_cpu_tensor[req_index][ - sampling_params.allowed_token_ids] = False + self.allowed_token_ids_mask_cpu_tensor[req_index][ + sampling_params.allowed_token_ids] = False - if sampling_params.bad_words_token_ids: - self.bad_words_token_ids[ - req_index] = sampling_params.bad_words_token_ids + if sampling_params.bad_words_token_ids: + self.bad_words_token_ids[ + req_index] = sampling_params.bad_words_token_ids + else: + assert request.pooling_params is not None + self.pooling_params[req_id] = request.pooling_params # Add request lora ID if request.lora_request: @@ -392,6 +403,7 @@ class InputBatch: # False means we don't fill with -inf. self.allowed_token_ids_mask_cpu_tensor[req_index].fill_(False) self.bad_words_token_ids.pop(req_index, None) + self.pooling_params.pop(req_id, None) return req_index def swap_states(self, i1: int, i2: int) -> None: @@ -602,6 +614,25 @@ class InputBatch: bad_words_token_ids=self.bad_words_token_ids, ) + @property + def pooling_metadata(self) -> PoolingMetadata: + if len(self.pooling_params) == 0: + pooling_params = [] + else: + # Note, for now this assumes that all request in the batch + # are either sampling or pooling requests + assert len(self.req_ids) == len(self.pooling_params) + pooling_params = [ + self.pooling_params[req_id] for req_id in self.req_ids + ] + + return PoolingMetadata( + prompt_lens=torch.from_numpy( + self.num_prompt_tokens[:self.num_reqs]).to(self.device), + prompt_token_ids=self.sampling_metadata.prompt_token_ids, + pooling_params=pooling_params, + ) + def _make_prompt_token_ids_tensor(self) -> torch.Tensor: max_prompt_len = self.num_prompt_tokens[:self.num_reqs].max() prompt_token_ids_cpu_tensor = torch.empty( diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index c4163eb2b8f59..f96fb64342c9f 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -36,6 +36,7 @@ from vllm.model_executor.model_loader import TensorizerLoader, get_model_loader from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange from vllm.multimodal.utils import group_mm_inputs_by_modality +from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingType from vllm.sequence import IntermediateTensors from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, @@ -51,6 +52,7 @@ from vllm.v1.kv_cache_interface import (AttentionSpec, FullAttentionSpec, SlidingWindowSpec) from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsTensors, ModelRunnerOutput) +from vllm.v1.pool.metadata import PoolingMetadata from vllm.v1.sample.metadata import SamplingMetadata from vllm.v1.sample.rejection_sampler import RejectionSampler from vllm.v1.sample.sampler import Sampler @@ -119,6 +121,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): cache_config.cache_dtype] self.is_multimodal_model = model_config.is_multimodal_model + self.is_pooling_model = model_config.pooler_config is not None self.max_model_len = model_config.max_model_len self.max_num_tokens = scheduler_config.max_num_batched_tokens self.max_num_reqs = scheduler_config.max_num_seqs @@ -394,7 +397,9 @@ class GPUModelRunner(LoRAModelRunnerMixin): for new_req_data in scheduler_output.scheduled_new_reqs: req_id = new_req_data.req_id sampling_params = new_req_data.sampling_params - if sampling_params.sampling_type == SamplingType.RANDOM_SEED: + pooling_params = new_req_data.pooling_params + if sampling_params and \ + sampling_params.sampling_type == SamplingType.RANDOM_SEED: generator = torch.Generator(device=self.device) generator.manual_seed(sampling_params.seed) else: @@ -406,6 +411,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): mm_inputs=new_req_data.mm_inputs, mm_positions=new_req_data.mm_positions, sampling_params=sampling_params, + pooling_params=pooling_params, generator=generator, block_ids=new_req_data.block_ids, num_computed_tokens=new_req_data.num_computed_tokens, @@ -563,7 +569,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): self, scheduler_output: "SchedulerOutput", ) -> tuple[dict[str, Any], bool, torch.Tensor, - Optional[SpecDecodeMetadata]]: + Optional[SpecDecodeMetadata], np.ndarray]: """ :return: tuple[ attn_metadata: layer-to-attention_metadata mapping, @@ -750,7 +756,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): self.set_active_loras(self.input_batch, num_scheduled_tokens) return (attn_metadata, attention_cuda_graphs, logits_indices, - spec_decode_metadata) + spec_decode_metadata, num_scheduled_tokens) def _compute_cascade_attn_prefix_len( self, @@ -1197,6 +1203,51 @@ class GPUModelRunner(LoRAModelRunnerMixin): dtype=torch.int32) return max_tokens_across_dp_cpu - num_tokens, num_tokens_after_padding + def _pool( + self, + hidden_states: torch.Tensor, + num_scheduled_tokens: int, + num_scheduled_tokens_np: np.ndarray, + finished_sending: Optional[set[str]], + finished_recving: Optional[set[str]], + ) -> ModelRunnerOutput: + assert self.input_batch.num_reqs ==\ + len(self.input_batch.pooling_params), \ + "Either all or none of the requests in" \ + " a batch must be pooling request" + + extracted_hidden_states = list( + torch.split(hidden_states[:num_scheduled_tokens], + num_scheduled_tokens_np.tolist())) + + pooling_metadata = self.input_batch.pooling_metadata + + raw_pooler_output = self.model.pooler( + hidden_states=extracted_hidden_states, + pooling_metadata=pooling_metadata) + + pooler_output: list[Optional[torch.Tensor]] = [] + seq_lens = self.seq_lens[:self.input_batch.num_reqs] + for raw_output, seq_len, prompt_len in zip( + raw_pooler_output, seq_lens, pooling_metadata.prompt_lens): + + if seq_len == prompt_len: + pooler_output.append(raw_output.data.cpu()) + else: + pooler_output.append(None) + + return ModelRunnerOutput( + req_ids=self.input_batch.req_ids, + req_id_to_index=self.input_batch.req_id_to_index, + sampled_token_ids=[], + spec_token_ids=None, + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=pooler_output, + finished_sending=finished_sending, + finished_recving=finished_recving, + ) + @torch.inference_mode() def execute_model( self, @@ -1214,7 +1265,8 @@ class GPUModelRunner(LoRAModelRunnerMixin): # Prepare the decoder inputs. (attn_metadata, attention_cuda_graphs, logits_indices, - spec_decode_metadata) = (self._prepare_inputs(scheduler_output)) + spec_decode_metadata, + num_scheduled_tokens_np) = (self._prepare_inputs(scheduler_output)) num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens if (self.use_cuda_graph and num_scheduled_tokens <= self.cudagraph_batch_sizes[-1]): @@ -1284,7 +1336,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): # compiled with full CUDA graphs, we have to skip them entirely. skip_cuda_graphs = self.full_cuda_graph and not attention_cuda_graphs - # Run the decoder. + # Run the model. # Use persistent buffers for CUDA graphs. with set_forward_context( attn_metadata, @@ -1326,6 +1378,11 @@ class GPUModelRunner(LoRAModelRunnerMixin): all_gather_group=get_tp_group()) logits = None else: + if self.input_batch.pooling_params: + return self._pool(hidden_states, num_scheduled_tokens, + num_scheduled_tokens_np, finished_sending, + finished_recving) + sample_hidden_states = hidden_states[logits_indices] logits = self.model.compute_logits(sample_hidden_states, None) if broadcast_pp_output: @@ -1541,6 +1598,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): spec_token_ids=spec_token_ids, logprobs=logprobs_lists, prompt_logprobs_dict=prompt_logprobs_dict, + pooler_output=[], finished_sending=finished_sending, finished_recving=finished_recving, ) @@ -1802,7 +1860,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): self, num_tokens: int, capture_attn_cudagraph: bool = False, - ) -> torch.Tensor: + ) -> tuple[torch.Tensor, torch.Tensor]: # Padding for DP num_pad, num_tokens_across_dp = self.get_dp_padding(num_tokens) @@ -1899,7 +1957,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): self.drafter.dummy_run(num_tokens) logit_indices = np.cumsum(num_scheduled_tokens) - 1 - return hidden_states[logit_indices] + return hidden_states, hidden_states[logit_indices] @torch.inference_mode() def _dummy_sampler_run( @@ -1978,6 +2036,48 @@ class GPUModelRunner(LoRAModelRunnerMixin): ) return sampler_output + @torch.inference_mode() + def _dummy_pooler_run( + self, + hidden_states: torch.Tensor, + ) -> torch.Tensor: + + num_tokens = hidden_states.shape[0] + max_num_reqs = self.scheduler_config.max_num_seqs + num_reqs = min(num_tokens, max_num_reqs) + min_tokens_per_req = num_tokens // num_reqs + num_scheduled_tokens_list = [min_tokens_per_req] * num_reqs + num_scheduled_tokens_list[-1] += num_tokens % num_reqs + assert sum(num_scheduled_tokens_list) == num_tokens + assert len(num_scheduled_tokens_list) == num_reqs + + hidden_states_list = list( + torch.split(hidden_states, num_scheduled_tokens_list)) + + req_num_tokens = num_tokens // num_reqs + + dummy_metadata = PoolingMetadata( + prompt_lens=torch.tensor([h.shape[0] for h in hidden_states_list], + device=self.device), + prompt_token_ids=torch.zeros((num_reqs, req_num_tokens), + dtype=torch.int32, + device=self.device), + pooling_params=[PoolingParams()] * num_reqs) + + try: + pooler_output = self.model.pooler(hidden_states=hidden_states_list, + pooling_metadata=dummy_metadata) + except RuntimeError as e: + if 'out of memory' in str(e): + raise RuntimeError( + "CUDA out of memory occurred when warming up pooler with " + f"{num_reqs} dummy requests. Please try lowering " + "`max_num_seqs` or `gpu_memory_utilization` when " + "initializing the engine.") from e + else: + raise e + return pooler_output + def profile_run(self) -> None: # Profile with multimodal encoder & encoder cache. # TODO: handle encoder-decoder models once we support them. @@ -2048,13 +2148,17 @@ class GPUModelRunner(LoRAModelRunnerMixin): # Cache the dummy encoder outputs. self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs)) - hidden_states = self._dummy_run(self.max_num_tokens) + hidden_states, last_hidden_states \ + = self._dummy_run(self.max_num_tokens) if get_pp_group().is_last_rank: - sampler_output = self._dummy_sampler_run(hidden_states) + if self.is_pooling_model: + output = self._dummy_pooler_run(hidden_states) + else: + output = self._dummy_sampler_run(last_hidden_states) else: - sampler_output = None + output = None self._sync_device() - del hidden_states, sampler_output + del hidden_states, output self.encoder_cache.clear() gc.collect() diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index 58795e3fe2927..b0f80c701325f 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -273,9 +273,14 @@ class Worker(WorkerBase): if get_pp_group().is_last_rank: max_num_reqs = min(self.scheduler_config.max_num_seqs, self.scheduler_config.max_num_batched_tokens) - self.model_runner._dummy_sampler_run( - hidden_states=self.model_runner._dummy_run( - num_tokens=max_num_reqs)) + + hidden_states, last_hidden_states = \ + self.model_runner._dummy_run(num_tokens=max_num_reqs) + if self.model_runner.is_pooling_model: + self.model_runner._dummy_pooler_run(hidden_states) + else: + self.model_runner._dummy_sampler_run( + hidden_states=last_hidden_states) # Reset the seed to ensure that the random state is not affected by # the model initialization and profiling. diff --git a/vllm/v1/worker/tpu_input_batch.py b/vllm/v1/worker/tpu_input_batch.py index 3f105ccc5d92d..81c798685cb3a 100644 --- a/vllm/v1/worker/tpu_input_batch.py +++ b/vllm/v1/worker/tpu_input_batch.py @@ -231,6 +231,7 @@ class InputBatch: self.block_table.add_row(request.block_ids, req_index) sampling_params = request.sampling_params + assert sampling_params is not None, "pooling requests not supported yet" if sampling_params.sampling_type == SamplingType.GREEDY: # Avoid later division by zero. self.temperature_cpu[req_index] = -1.0 diff --git a/vllm/v1/worker/tpu_model_runner.py b/vllm/v1/worker/tpu_model_runner.py index de5a0a1f55971..774caa1a3d98f 100644 --- a/vllm/v1/worker/tpu_model_runner.py +++ b/vllm/v1/worker/tpu_model_runner.py @@ -386,6 +386,8 @@ class TPUModelRunner(LoRAModelRunnerMixin): req_ids_to_add: list[str] = [] # Add new requests to the cached states. for new_req_data in scheduler_output.scheduled_new_reqs: + assert new_req_data.sampling_params is not None,\ + "Pooling is not supported in TPU yet" req_id = new_req_data.req_id sampling_params = new_req_data.sampling_params @@ -395,6 +397,7 @@ class TPUModelRunner(LoRAModelRunnerMixin): mm_inputs=new_req_data.mm_inputs, mm_positions=new_req_data.mm_positions, sampling_params=sampling_params, + pooling_params=None, generator=None, block_ids=new_req_data.block_ids, num_computed_tokens=new_req_data.num_computed_tokens, @@ -956,6 +959,7 @@ class TPUModelRunner(LoRAModelRunnerMixin): spec_token_ids=None, logprobs=logprobs_lists, prompt_logprobs_dict=prompt_logprobs_dict, + pooler_output=[], ) # Check there are no new graphs compiled - all the graphs should be From b1098b40723fd3cd02a6e30ca766bde083f61552 Mon Sep 17 00:00:00 2001 From: Lu Fang <30275821+houseroad@users.noreply.github.com> Date: Thu, 19 Jun 2025 12:44:41 +0800 Subject: [PATCH 035/211] [Bugfix] Fix the linter (#19826) Signed-off-by: Lu Fang --- vllm/model_executor/models/qwen2_5_omni_thinker.py | 10 +++++----- vllm/model_executor/models/qwen2_5_vl.py | 10 +++++----- vllm/model_executor/models/qwen2_vl.py | 10 +++++----- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/vllm/model_executor/models/qwen2_5_omni_thinker.py b/vllm/model_executor/models/qwen2_5_omni_thinker.py index c0ed473103abe..9497f15984b75 100644 --- a/vllm/model_executor/models/qwen2_5_omni_thinker.py +++ b/vllm/model_executor/models/qwen2_5_omni_thinker.py @@ -146,11 +146,11 @@ class Qwen2_5OmniThinkerProcessingInfo(Qwen2AudioProcessingInfo, kwargs["fps"] = fps processor = self.ctx.get_hf_processor( Qwen2_5OmniProcessor, - image_processor=self.get_image_processor( - min_pixels=min_pixels, - max_pixels=max_pixels, - size=size, - use_fast=kwargs.get("use_fast", True)), + image_processor=self.get_image_processor(min_pixels=min_pixels, + max_pixels=max_pixels, + size=size, + use_fast=kwargs.get( + "use_fast", True)), **kwargs, ) if not hasattr(processor, "audio_token"): diff --git a/vllm/model_executor/models/qwen2_5_vl.py b/vllm/model_executor/models/qwen2_5_vl.py index 4faa0d2c366ed..ff53a2775e3d4 100644 --- a/vllm/model_executor/models/qwen2_5_vl.py +++ b/vllm/model_executor/models/qwen2_5_vl.py @@ -794,11 +794,11 @@ class Qwen2_5_VLProcessingInfo(Qwen2VLProcessingInfo): return self.ctx.get_hf_processor( Qwen2_5_VLProcessor, - image_processor=self.get_image_processor( - min_pixels=min_pixels, - max_pixels=max_pixels, - size=size, - use_fast=kwargs.get("use_fast", True)), + image_processor=self.get_image_processor(min_pixels=min_pixels, + max_pixels=max_pixels, + size=size, + use_fast=kwargs.get( + "use_fast", True)), **kwargs, ) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 3b939a43e9246..690b8e02c2fde 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -759,11 +759,11 @@ class Qwen2VLProcessingInfo(BaseProcessingInfo): ) -> Qwen2VLProcessor: return self.ctx.get_hf_processor( Qwen2VLProcessor, - image_processor=self.get_image_processor( - min_pixels=min_pixels, - max_pixels=max_pixels, - size=size, - use_fast=kwargs.get("use_fast", True)), + image_processor=self.get_image_processor(min_pixels=min_pixels, + max_pixels=max_pixels, + size=size, + use_fast=kwargs.get( + "use_fast", True)), **kwargs, ) From e2148dc5eaec959a18a73f9f288bd5589efec2c1 Mon Sep 17 00:00:00 2001 From: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Date: Wed, 18 Jun 2025 21:47:01 -0700 Subject: [PATCH 036/211] [Bugfix] Add check_health to v1 async client. (#19821) Signed-off-by: Kourosh Hakhamaneshi --- tests/v1/engine/test_async_llm.py | 29 +++++++++++++++++++++++++++++ vllm/v1/engine/async_llm.py | 2 ++ 2 files changed, 31 insertions(+) diff --git a/tests/v1/engine/test_async_llm.py b/tests/v1/engine/test_async_llm.py index 3ae6293972682..33a9d672024b6 100644 --- a/tests/v1/engine/test_async_llm.py +++ b/tests/v1/engine/test_async_llm.py @@ -369,3 +369,32 @@ async def test_dp_rank_argument(monkeypatch: pytest.MonkeyPatch): sampling_params=sampling_params, data_parallel_rank=1): pass + + +@pytest.mark.asyncio +async def test_check_health(monkeypatch: pytest.MonkeyPatch): + """Test that check_health returns normally for healthy engine + and raises EngineDeadError when the engine is dead. + """ + from unittest.mock import patch + + from vllm.v1.engine.exceptions import EngineDeadError + + with monkeypatch.context() as m, ExitStack() as after: + m.setenv("VLLM_USE_V1", "1") + + engine = AsyncLLM.from_engine_args(TEXT_ENGINE_ARGS) + after.callback(engine.shutdown) + + # Test 1: Healthy engine should not raise any exception + await engine.check_health() + + # Test 2: Mock the errored property to simulate a dead engine + with patch.object(type(engine), + 'errored', + new_callable=lambda: property(lambda self: True) + ), pytest.raises(EngineDeadError): + await engine.check_health() + + # Test 3: Verify healthy engine still works after mock + await engine.check_health() diff --git a/vllm/v1/engine/async_llm.py b/vllm/v1/engine/async_llm.py index 998c4c5ea3cf7..3754570dfaaae 100644 --- a/vllm/v1/engine/async_llm.py +++ b/vllm/v1/engine/async_llm.py @@ -552,6 +552,8 @@ class AsyncLLM(EngineClient): async def check_health(self) -> None: logger.debug("Called check_health.") + if self.errored: + raise self.dead_error async def start_profile(self) -> None: await self.engine_core.profile_async(True) From 83ca9ae47b4476c900738b6db0ff5fcdfce13a7b Mon Sep 17 00:00:00 2001 From: "Yu-Hang \"Maxin\" Tang" Date: Wed, 18 Jun 2025 22:56:03 -0700 Subject: [PATCH 037/211] Mark invariant normalizer in Gemma as non-persistent (#19788) Signed-off-by: Yu-Hang Tang --- .../models/language/generation/test_gemma.py | 20 +++++++++++++++++++ vllm/model_executor/models/gemma.py | 4 +++- vllm/model_executor/models/gemma2.py | 4 +++- vllm/model_executor/models/gemma3.py | 4 +++- 4 files changed, 29 insertions(+), 3 deletions(-) create mode 100644 tests/models/language/generation/test_gemma.py diff --git a/tests/models/language/generation/test_gemma.py b/tests/models/language/generation/test_gemma.py new file mode 100644 index 0000000000000..ed0f0c19a0411 --- /dev/null +++ b/tests/models/language/generation/test_gemma.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import numpy as np +import pytest + +MODELS = ["google/gemma-2b", "google/gemma-2-2b", "google/gemma-3-4b-it"] + + +@pytest.mark.parametrize("model", MODELS) +def test_dummy_loader(vllm_runner, model: str) -> None: + with vllm_runner( + model, + load_format="dummy", + ) as llm: + normalizers = llm.collective_rpc(lambda self: self.worker.model_runner. + model.model.normalizer.cpu().item()) + assert np.allclose( + normalizers, + llm.llm_engine.model_config.hf_config.hidden_size**0.5, + rtol=1e-3) diff --git a/vllm/model_executor/models/gemma.py b/vllm/model_executor/models/gemma.py index 99ed51f8e70af..59c3102add4c7 100644 --- a/vllm/model_executor/models/gemma.py +++ b/vllm/model_executor/models/gemma.py @@ -281,7 +281,9 @@ class GemmaModel(nn.Module): # data type such as bfloat16, not float32. # See https://github.com/huggingface/transformers/pull/29402 normalizer = self.config.hidden_size**0.5 - self.register_buffer("normalizer", torch.tensor(normalizer)) + self.register_buffer("normalizer", + torch.tensor(normalizer), + persistent=False) self.make_empty_intermediate_tensors = ( make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) diff --git a/vllm/model_executor/models/gemma2.py b/vllm/model_executor/models/gemma2.py index ce405041b3d4a..8beefb2cd0bd8 100644 --- a/vllm/model_executor/models/gemma2.py +++ b/vllm/model_executor/models/gemma2.py @@ -267,7 +267,9 @@ class Gemma2Model(nn.Module): # data type such as bfloat16, not float32. # See https://github.com/huggingface/transformers/pull/29402 normalizer = self.config.hidden_size**0.5 - self.register_buffer("normalizer", torch.tensor(normalizer)) + self.register_buffer("normalizer", + torch.tensor(normalizer), + persistent=False) self.make_empty_intermediate_tensors = ( make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) diff --git a/vllm/model_executor/models/gemma3.py b/vllm/model_executor/models/gemma3.py index e19e0026b3f99..954e48d25f67d 100644 --- a/vllm/model_executor/models/gemma3.py +++ b/vllm/model_executor/models/gemma3.py @@ -371,7 +371,9 @@ class Gemma3Model(nn.Module): # data type such as bfloat16, not float32. # See https://github.com/huggingface/transformers/pull/29402 normalizer = self.config.hidden_size**0.5 - self.register_buffer("normalizer", torch.tensor(normalizer)) + self.register_buffer("normalizer", + torch.tensor(normalizer), + persistent=False) self.make_empty_intermediate_tensors = ( make_empty_intermediate_tensors_factory( ["hidden_states", "residual"], config.hidden_size)) From 2de12be428a71bbdd83cc2cb79f328285aa6224f Mon Sep 17 00:00:00 2001 From: TJian Date: Wed, 18 Jun 2025 22:56:31 -0700 Subject: [PATCH 038/211] [ROCm] [AITER] [Bugfix] Patch for AITER commit `648764942e552a8bb5fe16026703716a81f05374` (#18990) Signed-off-by: tjtanaa --- docker/Dockerfile.rocm_base | 2 +- vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docker/Dockerfile.rocm_base b/docker/Dockerfile.rocm_base index 45efcbde698b2..dc8ec5f1a15e5 100644 --- a/docker/Dockerfile.rocm_base +++ b/docker/Dockerfile.rocm_base @@ -12,7 +12,7 @@ ARG PYTORCH_REPO="https://github.com/pytorch/pytorch.git" ARG PYTORCH_VISION_REPO="https://github.com/pytorch/vision.git" ARG FA_BRANCH="1a7f4dfa" ARG FA_REPO="https://github.com/Dao-AILab/flash-attention.git" -ARG AITER_BRANCH="c1debd8" +ARG AITER_BRANCH="6487649" ARG AITER_REPO="https://github.com/ROCm/aiter.git" FROM ${BASE_IMAGE} AS base diff --git a/vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py b/vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py index d44989cce724a..00f1b1f6b911f 100644 --- a/vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py +++ b/vllm/model_executor/layers/fused_moe/rocm_aiter_fused_moe.py @@ -22,8 +22,9 @@ class QuantMethod(IntEnum): NO = 0 # a16w16 PER_TENSOR = 1 # w8a8 (pre_Tensor) PER_TOKEN = 2 # w8a8/w8a4 (per_Token) - BLOCK_1X128 = 3 # block quantized w8a8 (per_1x128) - BLOCK_128x128 = 4 # block quantized w8a8 (per_128x128) + BLOCK_1X32 = 3 # fp4x2 + BLOCK_1X128 = 4 # block quantized w8a8 (per_1x128) + BLOCK_128x128 = 5 # block quantized w8a8 (per_128x128) class ActivationMethod(IntEnum): From aa20d10a9182677ffc419e2d823e00237f56eb0d Mon Sep 17 00:00:00 2001 From: zsolt-borbely-htec Date: Thu, 19 Jun 2025 07:57:16 +0200 Subject: [PATCH 039/211] [Misc] [ROCm] Prevent surplus tensor reshape (#19803) Signed-off-by: Zsolt Borbely --- vllm/v1/attention/backends/triton_attn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/v1/attention/backends/triton_attn.py b/vllm/v1/attention/backends/triton_attn.py index ecb92bb1e4161..4c5a1a755c1a6 100644 --- a/vllm/v1/attention/backends/triton_attn.py +++ b/vllm/v1/attention/backends/triton_attn.py @@ -376,7 +376,7 @@ class TritonAttentionImpl(AttentionImpl): query.reshape( (num_tokens, num_heads * head_size)).contiguous(), layer._q_scale) - query = query.reshape((num_tokens, num_heads, head_size)) + query = query.reshape((num_tokens, num_heads, head_size)) use_local_attn = \ (self.use_irope and attn_metadata.local_attn_metadata is not None) From c7b370c603daf287e27d852ac36a7347536b01c6 Mon Sep 17 00:00:00 2001 From: Ning Xie Date: Thu, 19 Jun 2025 13:57:35 +0800 Subject: [PATCH 040/211] raise exception for pin_lora (#19809) Signed-off-by: Andy Xie --- vllm/worker/worker_base.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/vllm/worker/worker_base.py b/vllm/worker/worker_base.py index 0b37caa71669c..200026dc7282f 100644 --- a/vllm/worker/worker_base.py +++ b/vllm/worker/worker_base.py @@ -202,8 +202,7 @@ class LoRANotSupportedWorkerBase(WorkerBase): raise ValueError(f"{type(self)} does not support LoRA") def pin_lora(self, lora_id: int) -> bool: - return ValueError( - f"{type(self)} does not support LoRA") # type: ignore + raise ValueError(f"{type(self)} does not support LoRA") def list_loras(self) -> Set[int]: raise ValueError(f"{type(self)} does not support LoRA") @@ -398,7 +397,7 @@ class LocalOrDistributedWorkerBase(WorkerBase): model_input, worker_input, kwargs = inputs num_steps = worker_input.num_steps - if (execute_model_req is not None and execute_model_req.spec_step_idx): + if execute_model_req is not None and execute_model_req.spec_step_idx: kwargs["spec_step_idx"] = execute_model_req.spec_step_idx self.execute_worker(worker_input) From 6021999573910709e25e7ca838ec8b647959a07d Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Thu, 19 Jun 2025 14:04:10 +0800 Subject: [PATCH 041/211] [Minor] Allow redirecting model path for HfRunner in test (#19795) Signed-off-by: Isotr0py <2037008807@qq.com> --- tests/conftest.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index ff564b2b8ed51..f50e611a471bb 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -33,6 +33,7 @@ from vllm.inputs import (ExplicitEncoderDecoderPrompt, TextPrompt, from vllm.logger import init_logger from vllm.outputs import RequestOutput from vllm.sampling_params import BeamSearchParams +from vllm.transformers_utils.utils import maybe_model_redirect from vllm.utils import cuda_device_count_stateless logger = init_logger(__name__) @@ -321,6 +322,7 @@ class HfRunner: skip_tokenizer_init: bool = False, auto_cls: type[_BaseAutoModelClass] = AutoModelForCausalLM, ) -> None: + model_name = maybe_model_redirect(model_name) self.model_name = model_name self.config = AutoConfig.from_pretrained( From 1d0ae26c8544fd5a62e171e30c2dcc2973a23bc8 Mon Sep 17 00:00:00 2001 From: Zuxin <159079591+zuxin666@users.noreply.github.com> Date: Wed, 18 Jun 2025 23:26:41 -0700 Subject: [PATCH 042/211] Add xLAM tool parser support (#17148) --- docs/features/tool_calling.md | 19 + ..._chat_completion_client_with_tools_xlam.py | 244 +++++++++ ...letion_client_with_tools_xlam_streaming.py | 272 ++++++++++ examples/tool_chat_template_xlam_llama.jinja | 77 +++ examples/tool_chat_template_xlam_qwen.jinja | 66 +++ tests/tool_use/test_xlam_tool_parser.py | 246 ++++++++++ .../openai/tool_parsers/__init__.py | 3 +- .../openai/tool_parsers/xlam_tool_parser.py | 463 ++++++++++++++++++ 8 files changed, 1389 insertions(+), 1 deletion(-) create mode 100644 examples/online_serving/openai_chat_completion_client_with_tools_xlam.py create mode 100644 examples/online_serving/openai_chat_completion_client_with_tools_xlam_streaming.py create mode 100644 examples/tool_chat_template_xlam_llama.jinja create mode 100644 examples/tool_chat_template_xlam_qwen.jinja create mode 100644 tests/tool_use/test_xlam_tool_parser.py create mode 100644 vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py diff --git a/docs/features/tool_calling.md b/docs/features/tool_calling.md index 3547069f724dc..93ea164881ce5 100644 --- a/docs/features/tool_calling.md +++ b/docs/features/tool_calling.md @@ -226,6 +226,25 @@ AI21's Jamba-1.5 models are supported. Flags: `--tool-call-parser jamba` +### xLAM Models (`xlam`) + +The xLAM tool parser is designed to support models that generate tool calls in various JSON formats. It detects function calls in several different output styles: + +1. Direct JSON arrays: Output strings that are JSON arrays starting with `[` and ending with `]` +2. Thinking tags: Using `...` tags containing JSON arrays +3. Code blocks: JSON in code blocks (```json ...```) +4. Tool calls tags: Using `[TOOL_CALLS]` or `...` tags + +Parallel function calls are supported, and the parser can effectively separate text content from tool calls. + +Supported models: +* Salesforce Llama-xLAM models: `Salesforce/Llama-xLAM-2-8B-fc-r`, `Salesforce/Llama-xLAM-2-70B-fc-r` +* Qwen-xLAM models: `Salesforce/xLAM-1B-fc-r`, `Salesforce/xLAM-3B-fc-r`, `Salesforce/Qwen-xLAM-32B-fc-r` + +Flags: +* For Llama-based xLAM models: `--tool-call-parser xlam --chat-template examples/tool_chat_template_xlam_llama.jinja` +* For Qwen-based xLAM models: `--tool-call-parser xlam --chat-template examples/tool_chat_template_xlam_qwen.jinja` + ### Qwen Models For Qwen2.5, the chat template in tokenizer_config.json has already included support for the Hermes-style tool use. Therefore, you can use the `hermes` parser to enable tool calls for Qwen models. For more detailed information, please refer to the official [Qwen documentation](https://qwen.readthedocs.io/en/latest/framework/function_call.html#vllm) diff --git a/examples/online_serving/openai_chat_completion_client_with_tools_xlam.py b/examples/online_serving/openai_chat_completion_client_with_tools_xlam.py new file mode 100644 index 0000000000000..3de5e2b544c88 --- /dev/null +++ b/examples/online_serving/openai_chat_completion_client_with_tools_xlam.py @@ -0,0 +1,244 @@ +# SPDX-License-Identifier: Apache-2.0 +# ruff: noqa: E501 +""" +Set up this example by starting a vLLM OpenAI-compatible server with tool call +options enabled for xLAM-2 models: + +vllm serve --model Salesforce/Llama-xLAM-2-8b-fc-r --enable-auto-tool-choice --tool-call-parser xlam + +OR + +vllm serve --model Salesforce/xLAM-2-3b-fc-r --enable-auto-tool-choice --tool-call-parser xlam +""" + +import json +import time + +from openai import OpenAI + +# Modify OpenAI's API key and API base to use vLLM's API server. +openai_api_key = "empty" +openai_api_base = "http://localhost:8000/v1" + + +# Define tool functions +def get_weather(location: str, unit: str): + return f"Weather in {location} is 22 degrees {unit}." + + +def calculate_expression(expression: str): + try: + result = eval(expression) + return f"The result of {expression} is {result}" + except Exception as e: + return f"Could not calculate {expression}: {e}" + + +def translate_text(text: str, target_language: str): + return f"Translation of '{text}' to {target_language}: [translated content]" + + +# Define tools +tools = [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "City and state, e.g., 'San Francisco, CA'", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location", "unit"], + }, + }, + }, + { + "type": "function", + "function": { + "name": "calculate_expression", + "description": "Calculate a mathematical expression", + "parameters": { + "type": "object", + "properties": { + "expression": { + "type": "string", + "description": "Mathematical expression to evaluate, needs to be a valid python expression", + } + }, + "required": ["expression"], + }, + }, + }, + { + "type": "function", + "function": { + "name": "translate_text", + "description": "Translate text to another language", + "parameters": { + "type": "object", + "properties": { + "text": {"type": "string", "description": "Text to translate"}, + "target_language": { + "type": "string", + "description": "Target language for translation", + }, + }, + "required": ["text", "target_language"], + }, + }, + }, +] + +# Map of function names to implementations +tool_functions = { + "get_weather": get_weather, + "calculate_expression": calculate_expression, + "translate_text": translate_text, +} + + +def process_response(response, tool_functions, original_query): + """Process a non-streaming response with possible tool calls""" + + print("\n--- Response Output ---") + + # Check if the response has content + if response.choices[0].message.content: + print(f"Content: {response.choices[0].message.content}") + + # Check if the response has tool calls + if response.choices[0].message.tool_calls: + print("--------------------------------") + print(f"Tool calls: {response.choices[0].message.tool_calls}") + print("--------------------------------") + + # Collect all tool calls and results before making follow-up request + tool_results = [] + assistant_message = {"role": "assistant"} + + if response.choices[0].message.content: + assistant_message["content"] = response.choices[0].message.content + + assistant_tool_calls = [] + + # Process each tool call + for tool_call in response.choices[0].message.tool_calls: + function_name = tool_call.function.name + function_args = tool_call.function.arguments + function_id = tool_call.id + + print(f"Function called: {function_name}") + print(f"Arguments: {function_args}") + print(f"Function ID: {function_id}") + + # Execute the function + try: + # Parse the JSON arguments + args = json.loads(function_args) + + # Call the function with the arguments + function_result = tool_functions[function_name](**args) + print(f"\n--- Function Result ---\n{function_result}\n") + + # Add tool call to assistant message + assistant_tool_calls.append( + { + "id": function_id, + "type": "function", + "function": {"name": function_name, "arguments": function_args}, + } + ) + + # Add tool result to tool_results + tool_results.append( + { + "role": "tool", + "tool_call_id": function_id, + "content": function_result, + } + ) + + except Exception as e: + print(f"Error executing function: {e}") + + # Add tool_calls to assistant message + assistant_message["tool_calls"] = assistant_tool_calls + + # Create a follow-up message with all function results + follow_up_messages = [ + {"role": "user", "content": original_query}, + assistant_message, + ] + + # Add all tool results to the messages + follow_up_messages.extend(tool_results) + + # Get completion with all tool results in a single follow-up + follow_up_response = client.chat.completions.create( + model=client.models.list().data[0].id, + messages=follow_up_messages, + stream=False, + ) + + print("\n--- Follow-up Response ---") + print(follow_up_response.choices[0].message.content) + print("--- End Follow-up ---\n") + + print("--- End Response ---\n") + + +def run_test_case(query, test_name): + """Run a single test case with the given query""" + print(f"\n{'=' * 50}\nTEST CASE: {test_name}\n{'=' * 50}") + print(f"Query: '{query}'") + + start_time = time.time() + + # Create non-streaming chat completion request + response = client.chat.completions.create( + model=client.models.list().data[0].id, + messages=[{"role": "user", "content": query}], + tools=tools, + tool_choice="auto", + stream=False, + ) + + # Process the non-streaming response, passing the original query + process_response(response, tool_functions, query) + + end_time = time.time() + print(f"Test completed in {end_time - start_time:.2f} seconds") + + +def main(): + # Initialize OpenAI client + global client + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) + + # Run test cases + test_cases = [ + ("I want to know the weather in San Francisco", "Weather Information"), + ("Calculate 25 * 17 + 31", "Math Calculation"), + ("Translate 'Hello world' to Spanish", "Text Translation"), + ("What is the weather in Tokyo and New York in celsius", "Multiple Tool Usage"), + ] + + # Execute all test cases + for query, test_name in test_cases: + run_test_case(query, test_name) + time.sleep(1) # Small delay between tests + + print("\nAll tests completed.") + + +if __name__ == "__main__": + main() diff --git a/examples/online_serving/openai_chat_completion_client_with_tools_xlam_streaming.py b/examples/online_serving/openai_chat_completion_client_with_tools_xlam_streaming.py new file mode 100644 index 0000000000000..5847414b1171a --- /dev/null +++ b/examples/online_serving/openai_chat_completion_client_with_tools_xlam_streaming.py @@ -0,0 +1,272 @@ +# SPDX-License-Identifier: Apache-2.0 +# ruff: noqa: E501 +""" +Set up this example by starting a vLLM OpenAI-compatible server with tool call +options enabled for xLAM-2 models: + +vllm serve --model Salesforce/Llama-xLAM-2-8b-fc-r --enable-auto-tool-choice --tool-call-parser xlam + +OR + +vllm serve --model Salesforce/xLAM-2-3b-fc-r --enable-auto-tool-choice --tool-call-parser xlam + +This example demonstrates streaming tool calls with xLAM models. +""" + +import json +import time + +from openai import OpenAI + +# Modify OpenAI's API key and API base to use vLLM's API server. +openai_api_key = "empty" +openai_api_base = "http://localhost:8000/v1" + + +# Define tool functions +def get_weather(location: str, unit: str): + return f"Weather in {location} is 22 degrees {unit}." + + +def calculate_expression(expression: str): + try: + result = eval(expression) + return f"The result of {expression} is {result}" + except Exception as e: + return f"Could not calculate {expression}: {e}" + + +def translate_text(text: str, target_language: str): + return f"Translation of '{text}' to {target_language}: [translated content]" + + +# Define tools +tools = [ + { + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "City and state, e.g., 'San Francisco, CA'", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location", "unit"], + }, + }, + }, + { + "type": "function", + "function": { + "name": "calculate_expression", + "description": "Calculate a mathematical expression", + "parameters": { + "type": "object", + "properties": { + "expression": { + "type": "string", + "description": "Mathematical expression to evaluate, needs to be a valid Python expression", + } + }, + "required": ["expression"], + }, + }, + }, + { + "type": "function", + "function": { + "name": "translate_text", + "description": "Translate text to another language", + "parameters": { + "type": "object", + "properties": { + "text": {"type": "string", "description": "Text to translate"}, + "target_language": { + "type": "string", + "description": "Target language for translation", + }, + }, + "required": ["text", "target_language"], + }, + }, + }, +] + +# Map of function names to implementations +tool_functions = { + "get_weather": get_weather, + "calculate_expression": calculate_expression, + "translate_text": translate_text, +} + + +def process_stream(response, tool_functions, original_query): + """Process a streaming response with possible tool calls""" + # Track multiple tool calls + tool_calls = {} # Dictionary to store tool calls by ID + + current_id = None + + print("\n--- Stream Output ---") + for chunk in response: + # Handle tool calls in the stream + if chunk.choices[0].delta.tool_calls: + for tool_call_chunk in chunk.choices[0].delta.tool_calls: + # Get the tool call ID + if hasattr(tool_call_chunk, "id") and tool_call_chunk.id: + current_id = tool_call_chunk.id + if current_id not in tool_calls: + tool_calls[current_id] = { + "function_name": None, + "function_args": "", + "function_id": current_id, + } + + # Extract function information as it comes in chunks + if ( + hasattr(tool_call_chunk, "function") + and current_id + and current_id in tool_calls + ): + if ( + hasattr(tool_call_chunk.function, "name") + and tool_call_chunk.function.name + ): + tool_calls[current_id]["function_name"] = ( + tool_call_chunk.function.name + ) + print(f"Function called: {tool_call_chunk.function.name}") + + if ( + hasattr(tool_call_chunk.function, "arguments") + and tool_call_chunk.function.arguments + ): + tool_calls[current_id]["function_args"] += ( + tool_call_chunk.function.arguments + ) + print(f"Arguments chunk: {tool_call_chunk.function.arguments}") + + # Handle regular content in the stream + elif chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content, end="") + + print("\n--- End Stream ---\n") + + # Execute each function call and build messages for follow-up + follow_up_messages = [{"role": "user", "content": original_query}] + + for tool_id, tool_data in tool_calls.items(): + function_name = tool_data["function_name"] + function_args = tool_data["function_args"] + function_id = tool_data["function_id"] + + if function_name and function_args: + try: + # Parse the JSON arguments + args = json.loads(function_args) + + # Call the function with the arguments + function_result = tool_functions[function_name](**args) + print( + f"\n--- Function Result ({function_name}) ---\n{function_result}\n" + ) + + # Add the assistant message with tool call + follow_up_messages.append( + { + "role": "assistant", + "tool_calls": [ + { + "id": function_id, + "type": "function", + "function": { + "name": function_name, + "arguments": function_args, + }, + } + ], + } + ) + + # Add the tool message with function result + follow_up_messages.append( + { + "role": "tool", + "tool_call_id": function_id, + "content": function_result, + } + ) + + except Exception as e: + print(f"Error executing function: {e}") + + # Only send follow-up if we have results to process + if len(follow_up_messages) > 1: + # Create a follow-up message with all the function results + follow_up_response = client.chat.completions.create( + model=client.models.list().data[0].id, + messages=follow_up_messages, + stream=True, + ) + + print("\n--- Follow-up Response ---") + for chunk in follow_up_response: + if chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content, end="") + print("\n--- End Follow-up ---\n") + + +def run_test_case(query, test_name): + """Run a single test case with the given query""" + print(f"\n{'=' * 50}\nTEST CASE: {test_name}\n{'=' * 50}") + print(f"Query: '{query}'") + + start_time = time.time() + + # Create streaming chat completion request + response = client.chat.completions.create( + model=client.models.list().data[0].id, + messages=[{"role": "user", "content": query}], + tools=tools, + tool_choice="auto", + stream=True, + ) + + # Process the streaming response + process_stream(response, tool_functions, query) + + end_time = time.time() + print(f"Test completed in {end_time - start_time:.2f} seconds") + + +def main(): + # Initialize OpenAI client + global client + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) + + # Run test cases + test_cases = [ + ("I want to know the weather in San Francisco", "Weather Information"), + ("Calculate 25 * 17 + 31", "Math Calculation"), + ("Translate 'Hello world' to Spanish", "Text Translation"), + ("What is the weather in Tokyo and New York in celsius", "Multiple Tool Usage"), + ] + + # Execute all test cases + for query, test_name in test_cases: + run_test_case(query, test_name) + time.sleep(1) # Small delay between tests + + print("\nAll tests completed.") + + +if __name__ == "__main__": + main() diff --git a/examples/tool_chat_template_xlam_llama.jinja b/examples/tool_chat_template_xlam_llama.jinja new file mode 100644 index 0000000000000..f97de4004f1cf --- /dev/null +++ b/examples/tool_chat_template_xlam_llama.jinja @@ -0,0 +1,77 @@ +{{- bos_token }} +{%- if custom_tools is defined %} + {%- set tools = custom_tools %} +{%- endif %} +{%- if not tools_in_user_message is defined %} + {%- set tools_in_user_message = true %} +{%- endif %} +{%- if not tools is defined %} + {%- set tools = none %} +{%- endif %} + +{#- Extract system message #} +{{- "<|start_header_id|>system<|end_header_id|>\n\n" }} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content'] | trim %} + {%- set messages = messages[1:] %} + {{- system_message + "\n" }} +{%- else %} + {%- set system_message = "You are a helpful assistant. You are developed by Salesforce xLAM team." %} + {% set format_instruction %}You have access to a set of tools. When using tools, make calls in a single JSON array: + +[{"name": "tool_call_name", "arguments": {"arg1": "value1", "arg2": "value2"}}, ... (additional parallel tool calls as needed)] + +If no tool is suitable, state that explicitly. If the user's input lacks required parameters, ask for clarification. Do not interpret or respond until tool results are returned. Once they are available, process them or make additional calls if needed. For tasks that don't require tools, such as casual conversation or general advice, respond directly in plain text. The available tools are:{% endset %} + {{- system_message + "\n" }} + {%- if tools is not none %} + {{- format_instruction + "\n\n" }} + {%- endif %} +{%- endif %} + + +{%- if tools is not none %} + {%- for t in tools %} + {{- t | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} +{%- endif %} +{{- "<|eot_id|>" }} + +{%- for message in messages %} + {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %} + {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' }} + {%- elif 'tool_calls' in message %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' -}} + {%- if message['tool_calls'] %} + {{- "[" }} + {%- for tool_call_function in message.tool_calls %} + {%- set tool_call = tool_call_function.function %} + {{- '{"name": "' + tool_call.name + '", ' }} + {{- '"arguments": ' }} + {{- tool_call.arguments | tojson }} + {{- "}" }} + {%- if not loop.last %} + {{- ", " }} + {%- endif %} + {%- endfor %} + {{- "]" }} + {{- "<|eot_id|>" }} + {%- elif message['content'] %} + {{- message['content'] | trim + '<|eot_id|>' }} + {%- else %} + {{- "[]\n" + '<|eot_id|>' }} + {%- endif %} + {%- elif message.role == "tool" or message.role == "ipython" %} + {{- "<|start_header_id|>" + "ipython" + "<|end_header_id|>\n\n" }} + {%- set content = message["content"] %} + {%- if content is mapping or (content is iterable and content is not string) %} + {{- content | tojson }} + {%- else %} + {{- content }} + {%- endif %} + {{- "<|eot_id|>" }} + {%- endif %} +{%- endfor %} +{%- if add_generation_prompt %} + {{- '<|start_header_id|>assistant<|end_header_id|>\n\n' }} +{%- endif %} \ No newline at end of file diff --git a/examples/tool_chat_template_xlam_qwen.jinja b/examples/tool_chat_template_xlam_qwen.jinja new file mode 100644 index 0000000000000..acf57cc4b2c18 --- /dev/null +++ b/examples/tool_chat_template_xlam_qwen.jinja @@ -0,0 +1,66 @@ +{# System message #} +{{- "<|im_start|>system\n" }} +{%- if messages[0]['role'] == 'system' %} + {%- set system_message = messages[0]['content'] | trim %} + {%- set messages = messages[1:] %} + {{- system_message + "\n" }} +{%- else %} + {%- set system_message = "You are a helpful assistant. You are developed by Salesforce xLAM team." %} + {% set format_instruction %}You have access to a set of tools. When using tools, make calls in a single JSON array: + +[{"name": "tool_call_name", "arguments": {"arg1": "value1", "arg2": "value2"}}, ... (additional parallel tool calls as needed)] + +If no tool is suitable, state that explicitly. If the user's input lacks required parameters, ask for clarification. Do not interpret or respond until tool results are returned. Once they are available, process them or make additional calls if needed. For tasks that don't require tools, such as casual conversation or general advice, respond directly in plain text. The available tools are:{% endset %} + {{- system_message + "\n" }} + {%- if tools is not none %} + {{- format_instruction + "\n\n" }} + {%- endif %} +{%- endif %} + +{%- if tools is not none %} + {%- for func in tools %} + {{- func | tojson(indent=4) }} + {{- "\n\n" }} + {%- endfor %} +{%- endif %} +{{- "<|im_end|>\n" }} +{%- for message in messages %} + {%- if message['role'] == 'tool' %} + {{- "<|im_start|>tool\n" }} + {%- if message.content is defined and message.content.content is defined %} + {%- set content = message.content.content %} + {%- else %} + {%- set content = message.content %} + {%- endif %} + {%- if content is mapping or content is iterable and content is not string %} + {{- content | tojson }} + {%- else %} + {{- content }} + {%- endif %} + {{- "<|im_end|>\n" }} + {%- elif 'tool_calls' in message %} + {{- "<|im_start|>assistant\n" }} + {%- if message['tool_calls'] %} + {{- "[" }} + {%- for tool_call in message.tool_calls %} + {%- set out = tool_call.function | tojson %} + {{- out }} + {%- if not loop.last %} + {{- ", " }} + {%- endif %} + {%- endfor %} + {{- "]"}} + {%- elif message['content'] %} + {{- message['content'] | trim }} + {%- else %} + {{- "[]\n" }} + {%- endif %} + {{- "<|im_end|>\n" }} + {%- else %} + {{- "<|im_start|>" + message['role'] + "\n" + message['content'] | trim + "<|im_end|>\n" }} + {%- endif %} +{%- endfor %} + +{%- if add_generation_prompt %} + {{- "<|im_start|>assistant\n" }} +{%- endif %} diff --git a/tests/tool_use/test_xlam_tool_parser.py b/tests/tool_use/test_xlam_tool_parser.py new file mode 100644 index 0000000000000..dd154177bc8b4 --- /dev/null +++ b/tests/tool_use/test_xlam_tool_parser.py @@ -0,0 +1,246 @@ +# SPDX-License-Identifier: Apache-2.0 + +import json + +import pytest + +from vllm.entrypoints.openai.protocol import FunctionCall, ToolCall +from vllm.entrypoints.openai.tool_parsers import xLAMToolParser +from vllm.transformers_utils.tokenizer import get_tokenizer + +# Use a common model that is likely to be available +MODEL = "Salesforce/Llama-xLAM-2-8B-fc-r" + + +@pytest.fixture(scope="module") +def xlam_tokenizer(): + return get_tokenizer(tokenizer_name=MODEL) + + +@pytest.fixture +def xlam_tool_parser(xlam_tokenizer): + return xLAMToolParser(xlam_tokenizer) + + +def assert_tool_calls(actual_tool_calls: list[ToolCall], + expected_tool_calls: list[ToolCall]): + assert len(actual_tool_calls) == len(expected_tool_calls) + + for actual_tool_call, expected_tool_call in zip(actual_tool_calls, + expected_tool_calls): + assert isinstance(actual_tool_call.id, str) + assert len(actual_tool_call.id) > 16 + + assert actual_tool_call.type == "function" + assert actual_tool_call.function == expected_tool_call.function + + +def test_extract_tool_calls_no_tools(xlam_tool_parser): + model_output = "This is a test" + extracted_tool_calls = xlam_tool_parser.extract_tool_calls( + model_output, request=None) # type: ignore[arg-type] + assert not extracted_tool_calls.tools_called + assert extracted_tool_calls.tool_calls == [] + assert extracted_tool_calls.content == model_output + + +@pytest.mark.parametrize( + ids=[ + "parallel_tool_calls", + "single_tool_with_think_tag", + "single_tool_with_json_code_block", + "single_tool_with_tool_calls_tag", + ], + argnames=["model_output", "expected_tool_calls", "expected_content"], + argvalues=[ + ( + """[{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}, {"name": "get_current_weather", "arguments": {"city": "Orlando", "state": "FL", "unit": "fahrenheit"}}]""", # noqa: E501 + [ + ToolCall(function=FunctionCall( + name="get_current_weather", + arguments=json.dumps({ + "city": "Dallas", + "state": "TX", + "unit": "fahrenheit", + }), + )), + ToolCall(function=FunctionCall( + name="get_current_weather", + arguments=json.dumps({ + "city": "Orlando", + "state": "FL", + "unit": "fahrenheit", + }), + )), + ], + None, + ), + ( + """I'll help you with that.[{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]""", # noqa: E501 + [ + ToolCall(function=FunctionCall( + name="get_current_weather", + arguments=json.dumps({ + "city": "Dallas", + "state": "TX", + "unit": "fahrenheit", + }), + )) + ], + "I'll help you with that.", + ), + ( + """I'll help you with that.\n```json\n[{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]\n```""", # noqa: E501 + [ + ToolCall(function=FunctionCall( + name="get_current_weather", + arguments=json.dumps({ + "city": "Dallas", + "state": "TX", + "unit": "fahrenheit", + }), + )) + ], + "I'll help you with that.", + ), + ( + """I'll check the weather for you.[TOOL_CALLS][{"name": "get_current_weather", "arguments": {"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]""", # noqa: E501 + [ + ToolCall(function=FunctionCall( + name="get_current_weather", + arguments=json.dumps({ + "city": "Dallas", + "state": "TX", + "unit": "fahrenheit", + }), + )) + ], + "I'll check the weather for you.", + ), + ], +) +def test_extract_tool_calls(xlam_tool_parser, model_output, + expected_tool_calls, expected_content): + extracted_tool_calls = xlam_tool_parser.extract_tool_calls( + model_output, request=None) # type: ignore[arg-type] + assert extracted_tool_calls.tools_called + + assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) + + assert extracted_tool_calls.content == expected_content + + +@pytest.mark.parametrize( + ids=["list_structured_tool_call"], + argnames=["model_output", "expected_tool_calls", "expected_content"], + argvalues=[ + ( + """[{"name": "get_current_weather", "arguments": {"city": "Seattle", "state": "WA", "unit": "celsius"}}]""", # noqa: E501 + [ + ToolCall(function=FunctionCall( + name="get_current_weather", + arguments=json.dumps({ + "city": "Seattle", + "state": "WA", + "unit": "celsius", + }), + )) + ], + None, + ), + ], +) +def test_extract_tool_calls_list_structure(xlam_tool_parser, model_output, + expected_tool_calls, + expected_content): + """Test extraction of tool calls when the model outputs a list-structured tool call.""" # noqa: E501 + extracted_tool_calls = xlam_tool_parser.extract_tool_calls( + model_output, request=None) # type: ignore[arg-type] + assert extracted_tool_calls.tools_called + + assert_tool_calls(extracted_tool_calls.tool_calls, expected_tool_calls) + + assert extracted_tool_calls.content == expected_content + + +# Test for preprocess_model_output method +def test_preprocess_model_output(xlam_tool_parser): + # Test with list structure + model_output = """[{"name": "get_current_weather", "arguments": {"city": "Seattle"}}]""" # noqa: E501 + content, potential_tool_calls = xlam_tool_parser.preprocess_model_output( + model_output) + assert content is None + assert potential_tool_calls == model_output + + # Test with thinking tag + model_output = """I'll help you with that.[{"name": "get_current_weather", "arguments": {"city": "Seattle"}}]""" # noqa: E501 + content, potential_tool_calls = xlam_tool_parser.preprocess_model_output( + model_output) + assert content == "I'll help you with that." + assert ( + potential_tool_calls == + '[{"name": "get_current_weather", "arguments": {"city": "Seattle"}}]') + + # Test with JSON code block + model_output = """I'll help you with that. +```json +[{"name": "get_current_weather", "arguments": {"city": "Seattle"}}] +```""" + content, potential_tool_calls = xlam_tool_parser.preprocess_model_output( + model_output) + assert content == "I'll help you with that." + assert "get_current_weather" in potential_tool_calls + + # Test with no tool calls + model_output = """I'll help you with that.""" + content, potential_tool_calls = xlam_tool_parser.preprocess_model_output( + model_output) + assert content == model_output + assert potential_tool_calls is None + + +# Simulate streaming to test extract_tool_calls_streaming +def test_streaming_with_list_structure(xlam_tool_parser): + # Reset streaming state + xlam_tool_parser.prev_tool_calls = [] + xlam_tool_parser.current_tools_sent = [] + xlam_tool_parser.streamed_args = [] + xlam_tool_parser.current_tool_id = -1 + + # Simulate receiving a message with list structure + current_text = """[{"name": "get_current_weather", "arguments": {"city": "Seattle"}}]""" # noqa: E501 + + # First call to set up the tool + xlam_tool_parser.extract_tool_calls_streaming( + previous_text="", + current_text=current_text, + delta_text="]", + previous_token_ids=[], + current_token_ids=[], + delta_token_ids=[], + request=None, + ) + + # Make sure the tool is set up correctly + assert (xlam_tool_parser.current_tool_id + >= 0), "Tool index should be initialized" + + # Manually set up the state for sending the tool name + xlam_tool_parser.current_tools_sent = [False] + + # Call to send the function name + result = xlam_tool_parser.extract_tool_calls_streaming( + previous_text=current_text, + current_text=current_text, + delta_text="", + previous_token_ids=[], + current_token_ids=[], + delta_token_ids=[], + request=None, + ) + + # Check that we get a result with the proper tool call + if result is not None: + assert hasattr(result, "tool_calls") + assert len(result.tool_calls) == 1 + assert result.tool_calls[0].function.name == "get_current_weather" diff --git a/vllm/entrypoints/openai/tool_parsers/__init__.py b/vllm/entrypoints/openai/tool_parsers/__init__.py index 3e4f4e149c9f4..46bd665e767dd 100644 --- a/vllm/entrypoints/openai/tool_parsers/__init__.py +++ b/vllm/entrypoints/openai/tool_parsers/__init__.py @@ -13,11 +13,12 @@ from .llama_tool_parser import Llama3JsonToolParser from .mistral_tool_parser import MistralToolParser from .phi4mini_tool_parser import Phi4MiniJsonToolParser from .pythonic_tool_parser import PythonicToolParser +from .xlam_tool_parser import xLAMToolParser __all__ = [ "ToolParser", "ToolParserManager", "Granite20bFCToolParser", "GraniteToolParser", "Hermes2ProToolParser", "MistralToolParser", "Internlm2ToolParser", "Llama3JsonToolParser", "JambaToolParser", "Llama4PythonicToolParser", "PythonicToolParser", "Phi4MiniJsonToolParser", - "DeepSeekV3ToolParser" + "DeepSeekV3ToolParser", "xLAMToolParser" ] diff --git a/vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py new file mode 100644 index 0000000000000..742e7bfdb3aa9 --- /dev/null +++ b/vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py @@ -0,0 +1,463 @@ +# SPDX-License-Identifier: Apache-2.0 +# ruff: noqa +import json +import re +from collections.abc import Sequence +from typing import Any, Dict, List, Optional, Union + +from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, + DeltaFunctionCall, DeltaMessage, + DeltaToolCall, + ExtractedToolCallInformation, + FunctionCall, ToolCall) +from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import ( + ToolParser, ToolParserManager) +from vllm.logger import init_logger +from vllm.transformers_utils.tokenizer import AnyTokenizer +from vllm.utils import random_uuid + +logger = init_logger(__name__) + + +@ToolParserManager.register_module("xlam") +class xLAMToolParser(ToolParser): + + def __init__(self, tokenizer: AnyTokenizer): + super().__init__(tokenizer) + + # Initialize state for streaming mode + self.prev_tool_calls: list[dict] = [] + self.current_tool_id = -1 + self.current_tool_name_sent = False + self.streamed_args: list[str] = [ + ] # Track arguments sent for each tool + + # For backward compatibility with tests + self.current_tools_sent: list[bool] = [] + + # For backward compatibility with serving code + self.prev_tool_call_arr = [] + + # Regex patterns for preprocessing + self.json_code_block_patterns = [ + r"```(?:json)?\s*([\s\S]*?)```", + r"\[TOOL_CALLS\]([\s\S]*?)(?=\n|$)", + r"([\s\S]*?)", + ] + self.thinking_tag_pattern = r"([\s\S]*)" + + # Define streaming state type to be initialized later + self.streaming_state: dict[str, Any] = { + "current_tool_index": -1, + "tool_ids": [], + "sent_tools": [], + } + + def preprocess_model_output( + self, model_output: str) -> tuple[Optional[str], Optional[str]]: + """ + Preprocess the model output to extract content and potential tool calls. + Returns: + Tuple of (content, potential_tool_calls_json) + """ + # Check for thinking tag + thinking_match = re.search(self.thinking_tag_pattern, model_output) + if thinking_match: + content = model_output[:thinking_match.start() + + len("")].strip() + thinking_content = thinking_match.group(1).strip() + + # Try to parse the thinking content as JSON + try: + json.loads(thinking_content) + return content, thinking_content + except json.JSONDecodeError: + # If can't parse as JSON, look for JSON code blocks + for json_pattern in self.json_code_block_patterns: + json_matches = re.findall(json_pattern, thinking_content) + if json_matches: + for json_str in json_matches: + try: + json.loads(json_str) + return content, json_str + except json.JSONDecodeError: + continue + + # Check for JSON code blocks in the entire output + for json_pattern in self.json_code_block_patterns: + json_matches = re.findall(json_pattern, model_output) + if json_matches: + for json_str in json_matches: + try: + json.loads(json_str) + # Extract content by removing the JSON code block + content = re.sub(json_pattern, "", + model_output).strip() + return content, json_str + except json.JSONDecodeError: + continue + + # If the entire output is a valid JSON array or looks like one, treat it as tool calls + if model_output.strip().startswith("["): + try: + json.loads(model_output) + return None, model_output + except json.JSONDecodeError: + # Even if it's not valid JSON yet, it might be a tool call in progress + if ("{" in model_output and "name" in model_output + and "arguments" in model_output): + return None, model_output + + # If no tool calls found, return the original output as content + return model_output, None + + def extract_tool_calls( + self, model_output: str, + request: ChatCompletionRequest) -> ExtractedToolCallInformation: + """ + Extract tool calls from a complete model output. + """ + try: + # Preprocess the model output + content, potential_tool_calls = self.preprocess_model_output( + model_output) + + if not potential_tool_calls: + return ExtractedToolCallInformation(tools_called=False, + tool_calls=[], + content=content) + + # Parse the potential tool calls as JSON + tool_calls_data = json.loads(potential_tool_calls) + + # Ensure it's an array + if not isinstance(tool_calls_data, list): + logger.debug("Tool calls data is not an array") + return ExtractedToolCallInformation( + tools_called=False, + tool_calls=[], + content=content or model_output, + ) + + tool_calls: list[ToolCall] = [] + + for idx, call in enumerate(tool_calls_data): + if (not isinstance(call, dict) or "name" not in call + or "arguments" not in call): + logger.debug("Invalid tool call format at index %d", idx) + continue + + tool_call = ToolCall( + id=f"call_{idx}_{random_uuid()}", + type="function", + function=FunctionCall( + name=call["name"], + arguments=(json.dumps(call["arguments"]) if isinstance( + call["arguments"], dict) else call["arguments"]), + ), + ) + tool_calls.append(tool_call) + + return ExtractedToolCallInformation( + tools_called=len(tool_calls) > 0, + tool_calls=tool_calls, + content=content, + ) + + except Exception as e: + logger.exception("Error extracting tool calls: %s", str(e)) + return ExtractedToolCallInformation(tools_called=False, + tool_calls=[], + content=model_output) + + def extract_tool_calls_streaming( + self, + previous_text: str, + current_text: str, + delta_text: str, + previous_token_ids: Sequence[int], + current_token_ids: Sequence[int], + delta_token_ids: Sequence[int], + request: ChatCompletionRequest, + ) -> Union[DeltaMessage, None]: + """ + Extract tool calls for streaming mode. + """ + # Simplify detection: if it begins with "[" treat it as a function call + is_function_call = (current_text.strip().startswith("[")) + + # If not a function call, return normal content + if not is_function_call: + return DeltaMessage(content=delta_text) + + try: + # Initialize streaming state if not exists + if not hasattr(self, "streaming_state"): + self.streaming_state = { + "current_tool_index": -1, + "tool_ids": [], + "sent_tools": [], # Track complete state of each tool + } + + # Try parsing as JSON to check for complete tool calls + try: + parsed_tools = json.loads(current_text) + if isinstance(parsed_tools, list): + # Update our tool array for next time + self.prev_tool_call_arr = parsed_tools + except json.JSONDecodeError: + # Not complete JSON yet, use regex for partial parsing + pass + + # Check for test-specific state setup (current_tools_sent) + # This handles the case where tests manually set current_tools_sent + if (hasattr(self, "current_tools_sent") # type: ignore + and len(self.current_tools_sent) > 0): + # If current_tools_sent is set to [False], it means the test wants us to send the name + if (len(self.current_tools_sent) == 1 + and self.current_tools_sent[0] is False): + # Extract the function name using regex + name_pattern = r'"name"\s*:\s*"([^"]+)"' + name_match = re.search(name_pattern, current_text) + if name_match: + function_name = name_match.group(1) + + # The test expects us to send just the name first + tool_id = f"chatcmpl-tool-{random_uuid()}" + delta = DeltaMessage(tool_calls=[ + DeltaToolCall( + index=0, + type="function", + id=tool_id, + function=DeltaFunctionCall( + name=function_name).model_dump( + exclude_none=True), # type: ignore + ) + ]) + # Update state to reflect that we've sent the name + self.current_tools_sent = [True] + self.current_tool_id = 0 + self.streaming_state["current_tool_index"] = 0 + if len(self.streaming_state["sent_tools"]) == 0: + self.streaming_state["sent_tools"].append({ + "sent_name": + True, + "sent_arguments_prefix": + False, + "sent_arguments": + "", + }) + else: + self.streaming_state["sent_tools"][0][ + "sent_name"] = True + self.current_tool_name_sent = True + return delta + + # Use regex to identify tool calls in the output + name_pattern = r'"name"\s*:\s*"([^"]+)"' + name_matches = list(re.finditer(name_pattern, current_text)) + tool_count = len(name_matches) + + # If no tools found yet, return + if tool_count == 0: + return None + + # Ensure our state arrays are large enough + while len(self.streaming_state["sent_tools"]) < tool_count: + self.streaming_state["sent_tools"].append({ + "sent_name": + False, + "sent_arguments_prefix": + False, + "sent_arguments": + "", + }) + + while len(self.streaming_state["tool_ids"]) < tool_count: + self.streaming_state["tool_ids"].append(None) + + # Determine if we need to move to a new tool + current_idx = self.streaming_state["current_tool_index"] + + # If we haven't processed any tool yet or current tool is complete, move to next + if current_idx == -1 or current_idx < tool_count - 1: + next_idx = current_idx + 1 + + # If tool at next_idx has not been sent yet + if (next_idx < tool_count + and not self.streaming_state["sent_tools"][next_idx] + ["sent_name"]): + # Update indexes + self.streaming_state["current_tool_index"] = next_idx + self.current_tool_id = ( + next_idx # For backward compatibility + ) + current_idx = next_idx + + # Extract the tool name + tool_name = name_matches[current_idx].group(1) + + # Generate ID and send tool name + tool_id = f"call_{current_idx}_{random_uuid()}" + self.streaming_state["tool_ids"][current_idx] = tool_id + + delta = DeltaMessage(tool_calls=[ + DeltaToolCall( + index=current_idx, + type="function", + id=tool_id, + function=DeltaFunctionCall( + name=tool_name).model_dump( + exclude_none=True), # type: ignore + ) + ]) + self.streaming_state["sent_tools"][current_idx][ + "sent_name"] = True + self.current_tool_name_sent = ( + True # For backward compatibility + ) + + # Keep track of streamed args for backward compatibility + while len(self.streamed_args) <= current_idx: + self.streamed_args.append("") + + return delta + + # Process arguments for the current tool + if current_idx >= 0 and current_idx < tool_count: + # Support both regular and empty argument objects + # First, check for the empty arguments case: "arguments": {} + empty_args_pattern = ( + r'"name"\s*:\s*"[^"]+"\s*,\s*"arguments"\s*:\s*\{\s*\}') + empty_args_match = re.search(empty_args_pattern, current_text) + + # Check if this tool has empty arguments + if empty_args_match and empty_args_match.start() > 0: + # Find which tool this empty arguments belongs to + empty_args_tool_idx = 0 + for i in range(tool_count): + if i == current_idx: + # If this is our current tool and it has empty arguments + if not self.streaming_state["sent_tools"][ + current_idx]["sent_arguments_prefix"]: + # Send empty object + self.streaming_state["sent_tools"][ + current_idx][ + "sent_arguments_prefix"] = True + self.streaming_state["sent_tools"][ + current_idx]["sent_arguments"] = "{}" + + # Update streamed_args for backward compatibility + while len(self.streamed_args) <= current_idx: + self.streamed_args.append("") + self.streamed_args[current_idx] += "{}" + + delta = DeltaMessage(tool_calls=[ + DeltaToolCall( + index=current_idx, + function=DeltaFunctionCall( + arguments="{}"). + model_dump( + exclude_none=True), # type: ignore + ) + ]) + + # Move to next tool if available + if current_idx < tool_count - 1: + self.streaming_state[ + "current_tool_index"] += 1 + self.current_tool_id = self.streaming_state[ + "current_tool_index"] + + return delta + + # Extract arguments for current tool using regex for non-empty arguments + args_pattern = r'"name"\s*:\s*"[^"]+"\s*,\s*"arguments"\s*:\s*(\{(?:[^{}]|(?:\{[^{}]*\}))*\})' + args_matches = list(re.finditer(args_pattern, current_text)) + + if current_idx < len(args_matches): + args_text = args_matches[current_idx].group(1) + + # Handle transition between tools + is_last_tool = current_idx == tool_count - 1 + + # Find where the arguments for our current tool end + if not is_last_tool: + # If we have more tools after this one, try to find the complete argument block + next_tool_pos = current_text.find( + "},{", args_matches[current_idx].start()) + if next_tool_pos != -1: + args_end_pos = (next_tool_pos + 1 + ) # +1 to include the '}' + args_text = (current_text[args_matches[current_idx] + .start():args_end_pos]. + split('"arguments":')[1].strip()) + + # If arguments haven't been sent yet + sent_args = self.streaming_state["sent_tools"][ + current_idx]["sent_arguments"] + + # If we haven't sent the opening bracket yet + if not self.streaming_state["sent_tools"][current_idx][ + "sent_arguments_prefix"] and args_text.startswith( + "{"): + self.streaming_state["sent_tools"][current_idx][ + "sent_arguments_prefix"] = True + self.streaming_state["sent_tools"][current_idx][ + "sent_arguments"] = "{" + + # Update streamed_args for backward compatibility + while len(self.streamed_args) <= current_idx: + self.streamed_args.append("") + self.streamed_args[current_idx] += "{" + + delta = DeltaMessage(tool_calls=[ + DeltaToolCall( + index=current_idx, + function=DeltaFunctionCall( + arguments="{").model_dump( + exclude_none=True), # type: ignore + ) + ]) + return delta + + # If we need to send more arguments + if args_text.startswith(sent_args): + # Calculate what part of arguments we need to send + args_diff = args_text[len(sent_args):] + + if args_diff: + # Update our state + self.streaming_state["sent_tools"][current_idx][ + "sent_arguments"] = args_text + + # Update streamed_args for backward compatibility + while len(self.streamed_args) <= current_idx: + self.streamed_args.append("") + self.streamed_args[current_idx] += args_diff + + delta = DeltaMessage(tool_calls=[ + DeltaToolCall( + index=current_idx, + function=DeltaFunctionCall( + arguments=args_diff).model_dump( + exclude_none=True), # type: ignore + ) + ]) + return delta + + # If the tool's arguments are complete, check if we need to move to the next tool + if args_text.endswith("}") and args_text == sent_args: + # This tool is complete, move to the next one in the next iteration + if current_idx < tool_count - 1: + self.streaming_state["current_tool_index"] += 1 + self.current_tool_id = self.streaming_state[ + "current_tool_index"] # For compatibility + + # If we got here, we couldn't determine what to stream next + return None + + except Exception as e: + logger.exception(f"Error in streaming tool calls: {e}") + # If we encounter an error, just return the delta text as regular content + return DeltaMessage(content=delta_text) From 466166dcfdc40f85f4043a94b9a53099af4a0850 Mon Sep 17 00:00:00 2001 From: NekoMimiUnagi Date: Thu, 19 Jun 2025 02:21:41 -0500 Subject: [PATCH 043/211] [Frontend] Add optional token-level progress bar to `LLM.beam_search` (#19301) Signed-off-by: Ruosen Li Signed-off-by: Aaron Pham Signed-off-by: Ubuntu Co-authored-by: 22quinn <33176974+22quinn@users.noreply.github.com> --- vllm/entrypoints/llm.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index f3170fa30fce1..87810772fc2e2 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -552,6 +552,7 @@ class LLM: prompts: list[Union[TokensPrompt, TextPrompt]], params: BeamSearchParams, lora_request: Optional[Union[list[LoRARequest], LoRARequest]] = None, + use_tqdm: bool = False, ) -> list[BeamSearchOutput]: """ Generate sequences using beam search. @@ -561,6 +562,7 @@ class LLM: of token IDs. params: The beam search parameters. lora_request: LoRA request to use for generation, if any. + use_tqdm: Whether to use tqdm to display the progress bar. """ # TODO: how does beam search work together with length penalty, # frequency, penalty, and stopping criteria, etc.? @@ -623,7 +625,18 @@ class LLM: **mm_kwargs, ), ) - for _ in range(max_tokens): + token_iter = range(max_tokens) + if use_tqdm: + token_iter = tqdm(token_iter, + desc="Beam search", + unit="token", + unit_scale=False) + logger.warning( + "The progress bar shows the upper bound on token steps and " + "may finish early due to stopping conditions. It does not " + "reflect instance-level progress.") + + for _ in token_iter: all_beams: list[BeamSearchSequence] = list( sum((instance.beams for instance in instances), [])) pos = [0] + list( From 4719460644b4629db2b6dbf12be331d0b34b4b6f Mon Sep 17 00:00:00 2001 From: Alexei-V-Ivanov-AMD <156011006+Alexei-V-Ivanov-AMD@users.noreply.github.com> Date: Thu, 19 Jun 2025 03:36:16 -0500 Subject: [PATCH 044/211] Fixing Chunked Prefill Test. (#19762) Signed-off-by: Alexei V. Ivanov --- .buildkite/test-pipeline.yaml | 2 +- .../basic_correctness/test_chunked_prefill.py | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 8f39862708689..34ff145e6ed44 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -89,7 +89,7 @@ steps: - VLLM_TEST_ENABLE_ARTIFICIAL_PREEMPT=1 pytest -v -s basic_correctness/test_preemption.py - label: Chunked Prefill Test - mirror_hardwares: [amdexperimental] + mirror_hardwares: [amdexperimental, amdproduction] source_file_dependencies: - vllm/ - tests/basic_correctness/test_chunked_prefill diff --git a/tests/basic_correctness/test_chunked_prefill.py b/tests/basic_correctness/test_chunked_prefill.py index eb5b09ff74f60..4a422e8555da7 100644 --- a/tests/basic_correctness/test_chunked_prefill.py +++ b/tests/basic_correctness/test_chunked_prefill.py @@ -49,7 +49,13 @@ def use_v0_only(monkeypatch: pytest.MonkeyPatch): # NOTE: Increasing this in this suite will fail CI because we currently cannot # reset distributed env properly. Use a value > 1 just when you test. @pytest.mark.parametrize("tensor_parallel_size", [1]) -@pytest.mark.parametrize("attention_backend", ["FLASHINFER", "FLASH_ATTN"]) +@pytest.mark.parametrize("attention_backend", [ + pytest.param("FLASHINFER", + marks=pytest.mark.skipif( + current_platform.is_rocm(), + reason="FLASHINFER isn't supported on ROCm")), + "FLASH_ATTN" +]) def test_models( hf_runner: HfRunner, vllm_runner: VllmRunner, @@ -99,7 +105,13 @@ def test_models( @multi_gpu_test(num_gpus=2) @pytest.mark.parametrize("distributed_executor_backend", ["ray", "mp"]) @pytest.mark.parametrize("model", MODELS) -@pytest.mark.parametrize("attention_backend", ["FLASHINFER", "FLASH_ATTN"]) +@pytest.mark.parametrize("attention_backend", [ + pytest.param("FLASHINFER", + marks=pytest.mark.skipif( + current_platform.is_rocm(), + reason="FLASHINFER isn't supported on ROCm")), + "FLASH_ATTN" +]) def test_models_distributed( hf_runner: HfRunner, vllm_runner: VllmRunner, @@ -172,6 +184,8 @@ def test_models_distributed( # Due to low-precision numerical divergence, this test is too sensitive to # the async postprocessor @pytest.mark.parametrize("disable_async_output_proc", [True]) +@pytest.mark.skipif(current_platform.is_rocm(), + reason="machete_prepack_B isn't supported on ROCm") def test_models_with_fp8_kv_cache( vllm_runner: VllmRunner, example_prompts, From 6f68c492204946b7de33d91fcd7c5439ef4aa9a3 Mon Sep 17 00:00:00 2001 From: 22quinn <33176974+22quinn@users.noreply.github.com> Date: Thu, 19 Jun 2025 02:43:27 -0700 Subject: [PATCH 045/211] [Doc] Update V1 user guide for embedding models (#19842) Signed-off-by: 22quinn <33176974+22quinn@users.noreply.github.com> --- docs/usage/v1_guide.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/usage/v1_guide.md b/docs/usage/v1_guide.md index 28c501439325e..1ec3e72a4f56f 100644 --- a/docs/usage/v1_guide.md +++ b/docs/usage/v1_guide.md @@ -39,9 +39,9 @@ This living user guide outlines a few known **important changes and limitations* For each item, our progress towards V1 support falls into one of the following states: - **🚀 Optimized**: Nearly fully optimized, with no further work currently planned. -- **🟢 Functional**: Fully operational, with ongoing optimizations. -- **🚧 WIP**: Under active development. -- **🟡 Planned**: Scheduled for future implementation (some may have open PRs/RFCs). +- **🟢 Functional**: Fully operational, with ongoing optimizations. +- **🚧 WIP**: Under active development. +- **🟡 Planned**: Scheduled for future implementation (some may have open PRs/RFCs). - **🟠 Delayed**: Temporarily dropped in V1 but planned to be re-introduced later. - **🔴 Deprecated**: Not planned for V1 unless there is strong demand. @@ -70,7 +70,7 @@ For each item, our progress towards V1 support falls into one of the following s |-----------------------------|------------------------------------------------------------------------------------| | **Decoder-only Models** | 🚀 Optimized | | **Encoder-Decoder Models** | 🟠 Delayed | -| **Embedding Models** | 🚧 WIP ([PR #16188](https://github.com/vllm-project/vllm/pull/16188)) | +| **Embedding Models** | 🟢 Functional | | **Mamba Models** | 🚧 WIP ([PR #19327](https://github.com/vllm-project/vllm/pull/19327)) | | **Multimodal Models** | 🟢 Functional | @@ -80,11 +80,11 @@ vLLM V1 currently excludes model architectures with the `SupportsV0Only` protoco This corresponds to the V1 column in our [list of supported models][supported-models]. -See below for the status of models that are still not yet supported in V1. +See below for the status of models that are not yet supported or have more features planned in V1. #### Embedding Models -The initial support will be provided by [PR #16188](https://github.com/vllm-project/vllm/pull/16188). +The initial basic support is now functional. Later, we will consider using [hidden states processor](https://github.com/vllm-project/vllm/issues/12249), which is based on [global logits processor](https://github.com/vllm-project/vllm/pull/13360) From 01220ce89a332a4105c0031933c5079036ceefa0 Mon Sep 17 00:00:00 2001 From: "Li, Jiang" Date: Thu, 19 Jun 2025 23:46:09 +0800 Subject: [PATCH 046/211] [CI][CPU] Improve dummy Triton interfaces and fix the CPU CI (#19838) Signed-off-by: jiang1.li --- requirements/cpu.txt | 3 --- vllm/triton_utils/importing.py | 2 ++ 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/requirements/cpu.txt b/requirements/cpu.txt index d7b0fc6d80a74..8742898cff00f 100644 --- a/requirements/cpu.txt +++ b/requirements/cpu.txt @@ -21,9 +21,6 @@ torchvision; platform_machine != "ppc64le" and platform_machine != "s390x" torchvision==0.22.0; platform_machine == "ppc64le" datasets # for benchmark scripts -# cpu cannot use triton 3.3.0 -triton==3.2.0; platform_machine == "x86_64" - # Intel Extension for PyTorch, only for x86_64 CPUs intel-openmp==2024.2.1; platform_machine == "x86_64" intel_extension_for_pytorch==2.7.0; platform_machine == "x86_64" diff --git a/vllm/triton_utils/importing.py b/vllm/triton_utils/importing.py index 21beb76f37cc7..dd30b2bc5f075 100644 --- a/vllm/triton_utils/importing.py +++ b/vllm/triton_utils/importing.py @@ -68,9 +68,11 @@ class TritonPlaceholder(types.ModuleType): def __init__(self): super().__init__("triton") + self.__version__ = "3.3.0" self.jit = self._dummy_decorator("jit") self.autotune = self._dummy_decorator("autotune") self.heuristics = self._dummy_decorator("heuristics") + self.Config = self._dummy_decorator("Config") self.language = TritonLanguagePlaceholder() logger.warning_once( "Triton is not installed. Using dummy decorators. " From ead2110297a65d8df8a28da3953eb449a0b6449c Mon Sep 17 00:00:00 2001 From: Alex Brooks Date: Thu, 19 Jun 2025 11:18:07 -0600 Subject: [PATCH 047/211] [Core][Bugfix] Fix Online MM Beam Search (#19688) Signed-off-by: Alex-Brooks --- tests/entrypoints/openai/test_vision.py | 31 +++++++++++++++++++++---- vllm/engine/protocol.py | 13 +++++++++-- vllm/entrypoints/llm.py | 13 ++++++----- 3 files changed, 45 insertions(+), 12 deletions(-) diff --git a/tests/entrypoints/openai/test_vision.py b/tests/entrypoints/openai/test_vision.py index 4513d8b3420f4..fd613842f9866 100644 --- a/tests/entrypoints/openai/test_vision.py +++ b/tests/entrypoints/openai/test_vision.py @@ -25,6 +25,25 @@ TEST_IMAGE_URLS = [ "https://upload.wikimedia.org/wikipedia/commons/0/0b/RGBA_comp.png", ] +EXPECTED_MM_BEAM_SEARCH_RES = [ + [ + "The image shows a wooden boardwalk leading through a", + "The image shows a wooden boardwalk extending into a", + ], + [ + "The image shows two parrots perched on", + "The image shows two birds perched on a cur", + ], + [ + "The image shows a Venn diagram with three over", + "This image shows a Venn diagram with three over", + ], + [ + "This image displays a gradient of colors ranging from", + "This image displays a gradient of colors transitioning from", + ], +] + @pytest.fixture(scope="module") def server(): @@ -270,10 +289,13 @@ async def test_single_chat_session_image_base64encoded( @pytest.mark.asyncio @pytest.mark.parametrize("model_name", [MODEL_NAME]) -@pytest.mark.parametrize("image_url", TEST_IMAGE_URLS) +@pytest.mark.parametrize("image_idx", list(range(len(TEST_IMAGE_URLS)))) async def test_single_chat_session_image_base64encoded_beamsearch( - client: openai.AsyncOpenAI, model_name: str, image_url: str, + client: openai.AsyncOpenAI, model_name: str, image_idx: int, base64_encoded_image: dict[str, str]): + # NOTE: This test also validates that we pass MM data through beam search + image_url = TEST_IMAGE_URLS[image_idx] + expected_res = EXPECTED_MM_BEAM_SEARCH_RES[image_idx] messages = [{ "role": @@ -297,10 +319,11 @@ async def test_single_chat_session_image_base64encoded_beamsearch( messages=messages, n=2, max_completion_tokens=10, + temperature=0.0, extra_body=dict(use_beam_search=True)) assert len(chat_completion.choices) == 2 - assert chat_completion.choices[ - 0].message.content != chat_completion.choices[1].message.content + for actual, expected_str in zip(chat_completion.choices, expected_res): + assert actual.message.content == expected_str @pytest.mark.asyncio diff --git a/vllm/engine/protocol.py b/vllm/engine/protocol.py index 727d59283643c..8688fcc82cd9b 100644 --- a/vllm/engine/protocol.py +++ b/vllm/engine/protocol.py @@ -88,9 +88,18 @@ class EngineClient(ABC): if processed_inputs["type"] == "embeds": raise NotImplementedError - prompt_token_ids = processed_inputs["prompt_token_ids"] + # This is a workaround to fix multimodal beam search; this is a + # bandaid fix for 2 small problems: + # 1. Multi_modal_data on the processed_inputs currently resolves to + # `None`. + # 2. preprocessing above expands the multimodal placeholders. However, + # this happens again in generation, so the double expansion causes + # a mismatch. + # TODO - would be ideal to handle this more gracefully. + prompt_token_ids = prompt.get("prompt_token_ids") + multi_modal_data = prompt.get("multi_modal_data") + prompt_text = processed_inputs.get("prompt") - multi_modal_data = processed_inputs.get("multi_modal_data") mm_processor_kwargs = processed_inputs.get("mm_processor_kwargs") tokenized_length = len(prompt_token_ids) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 87810772fc2e2..d479d4c89f125 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -15,7 +15,8 @@ from tqdm.auto import tqdm from typing_extensions import TypeVar, deprecated from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput, - BeamSearchSequence, get_beam_search_score) + BeamSearchSequence, + create_sort_beams_key_function) from vllm.config import (CompilationConfig, ModelDType, TokenizerMode, is_init_field) from vllm.engine.arg_utils import (EngineArgs, HfOverrides, PoolerConfig, @@ -575,10 +576,11 @@ class LLM: lora_requests = self._get_beam_search_lora_requests( lora_request, prompts) - def sort_beams_key(x: BeamSearchSequence) -> float: - return get_beam_search_score(x.tokens, x.cum_logprob, - tokenizer.eos_token_id, - length_penalty) + tokenizer = self.get_tokenizer() + sort_beams_key = create_sort_beams_key_function( + tokenizer.eos_token_id, + length_penalty, + ) def create_tokens_prompt_from_beam( beam: BeamSearchSequence) -> TokensPrompt: @@ -593,7 +595,6 @@ class LLM: "mm_processor_kwargs"] = beam.mm_processor_kwargs return TokensPrompt(**token_prompt_kwargs) - tokenizer = self.get_tokenizer() # generate 2 * beam_width candidates at each step # following the huggingface transformers implementation # at https://github.com/huggingface/transformers/blob/e15687fffe5c9d20598a19aeab721ae0a7580f8a/src/transformers/generation/beam_search.py#L534 # noqa From ea10dd9d9e00a88705a6203ad3318a367f6c372e Mon Sep 17 00:00:00 2001 From: xzbdmw <97848247+xzbdmw@users.noreply.github.com> Date: Fri, 20 Jun 2025 02:49:59 +0800 Subject: [PATCH 048/211] [Frontend] early return chat format resolution when specified (#19735) --- vllm/entrypoints/chat_utils.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index 95c806c228b82..7951c49f5da05 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -448,6 +448,9 @@ def resolve_chat_template_content_format( model_config: ModelConfig, trust_remote_code: Optional[bool] = None, ) -> _ChatTemplateContentFormat: + if given_format != "auto": + return given_format + detected_format = _resolve_chat_template_content_format( chat_template, tools, @@ -461,7 +464,7 @@ def resolve_chat_template_content_format( detected_format=detected_format, ) - return detected_format if given_format == "auto" else given_format + return detected_format From 10d82f9ac5bd83861f0265e3f30e90ca8ff2cf63 Mon Sep 17 00:00:00 2001 From: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Date: Thu, 19 Jun 2025 21:30:41 -0400 Subject: [PATCH 049/211] [Benchmark][Bugfix] Fix Dataset Length Calculation (#19868) Signed-off-by: Robert Shaw Co-authored-by: Robert Shaw --- benchmarks/benchmark_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/benchmark_dataset.py b/benchmarks/benchmark_dataset.py index 5d2a26cd443c0..8671719bce72f 100644 --- a/benchmarks/benchmark_dataset.py +++ b/benchmarks/benchmark_dataset.py @@ -353,7 +353,7 @@ class RandomDataset(BenchmarkDataset): : input_lens[i] ] prompt = tokenizer.decode(re_encoded_sequence) - total_input_len = prefix_len + int(input_lens[i]) + total_input_len = len(re_encoded_sequence) requests.append( SampleRequest( prompt=prompt, From ee9a1531aad9bc5cb585a230b467927bfaa5fc39 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Fri, 20 Jun 2025 09:51:07 +0800 Subject: [PATCH 050/211] [CI/Build][Bugfix] Fix deadlock on v1 engine test CI (#19872) Signed-off-by: Isotr0py <2037008807@qq.com> --- tests/v1/engine/test_async_llm.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/v1/engine/test_async_llm.py b/tests/v1/engine/test_async_llm.py index 33a9d672024b6..e137452f2625f 100644 --- a/tests/v1/engine/test_async_llm.py +++ b/tests/v1/engine/test_async_llm.py @@ -383,7 +383,8 @@ async def test_check_health(monkeypatch: pytest.MonkeyPatch): with monkeypatch.context() as m, ExitStack() as after: m.setenv("VLLM_USE_V1", "1") - engine = AsyncLLM.from_engine_args(TEXT_ENGINE_ARGS) + with set_default_torch_num_threads(1): + engine = AsyncLLM.from_engine_args(TEXT_ENGINE_ARGS) after.callback(engine.shutdown) # Test 1: Healthy engine should not raise any exception From b6bad3d1865238faf304be5963f1a523304e1f43 Mon Sep 17 00:00:00 2001 From: Elaine Zhao Date: Thu, 19 Jun 2025 21:27:51 -0700 Subject: [PATCH 051/211] [CI][Neuron] Fail and exit on first error (#19622) Signed-off-by: Elaine Zhao Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- .buildkite/scripts/hardware_ci/run-neuron-test.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.buildkite/scripts/hardware_ci/run-neuron-test.sh b/.buildkite/scripts/hardware_ci/run-neuron-test.sh index 3d294ea5f8a75..a397457c83261 100644 --- a/.buildkite/scripts/hardware_ci/run-neuron-test.sh +++ b/.buildkite/scripts/hardware_ci/run-neuron-test.sh @@ -54,10 +54,11 @@ docker run --rm -it --device=/dev/neuron0 --network bridge \ --name "${container_name}" \ ${image_name} \ /bin/bash -c " + set -e; # Exit on first error python3 /workspace/vllm/examples/offline_inference/neuron.py; python3 -m pytest /workspace/vllm/tests/neuron/1_core/ -v --capture=tee-sys; for f in /workspace/vllm/tests/neuron/2_core/*.py; do - echo 'Running test file: '$f; + echo \"Running test file: \$f\"; python3 -m pytest \$f -v --capture=tee-sys; done " \ No newline at end of file From 5aa4a015ce4c85ad292a2f7d61df60a57ffc75b2 Mon Sep 17 00:00:00 2001 From: Brayden Zhong Date: Fri, 20 Jun 2025 00:28:55 -0400 Subject: [PATCH 052/211] [Benchmark] Fix `Value of type "SampleRequest" is not indexable` (#18032) Signed-off-by: Brayden Zhong --- benchmarks/benchmark_throughput.py | 2 +- vllm/benchmarks/throughput.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmarks/benchmark_throughput.py b/benchmarks/benchmark_throughput.py index 401ebe0bdb265..0ded34c70badd 100644 --- a/benchmarks/benchmark_throughput.py +++ b/benchmarks/benchmark_throughput.py @@ -97,7 +97,7 @@ def run_vllm( assert lora_requests is None, "BeamSearch API does not support LoRA" prompts = [request.prompt for request in requests] # output_len should be the same for all requests. - output_len = requests[0][2] + output_len = requests[0].expected_output_len for request in requests: assert request.expected_output_len == output_len start = time.perf_counter() diff --git a/vllm/benchmarks/throughput.py b/vllm/benchmarks/throughput.py index be9ea39f0c38e..af2ca96571286 100644 --- a/vllm/benchmarks/throughput.py +++ b/vllm/benchmarks/throughput.py @@ -84,7 +84,7 @@ def run_vllm( assert lora_requests is None, "BeamSearch API does not support LoRA" prompts = [request.prompt for request in requests] # output_len should be the same for all requests. - output_len = requests[0][2] + output_len = requests[0].expected_output_len for request in requests: assert request.expected_output_len == output_len start = time.perf_counter() From e41bf15cd04e6681249ab7d382cef6450a2115f5 Mon Sep 17 00:00:00 2001 From: Xerxes <58462889+Xerxes-cn@users.noreply.github.com> Date: Fri, 20 Jun 2025 12:43:07 +0800 Subject: [PATCH 053/211] [Chore]: qwen3-moe-type-hints-mistake (#19860) Co-authored-by: xinnan.hou --- vllm/model_executor/models/qwen3_moe.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/model_executor/models/qwen3_moe.py b/vllm/model_executor/models/qwen3_moe.py index 823197fc93503..417d7b22088bf 100644 --- a/vllm/model_executor/models/qwen3_moe.py +++ b/vllm/model_executor/models/qwen3_moe.py @@ -294,7 +294,7 @@ class Qwen3MoeDecoderLayer(nn.Module): positions: torch.Tensor, hidden_states: torch.Tensor, residual: Optional[torch.Tensor], - ) -> torch.Tensor: + ) -> tuple[torch.Tensor, torch.Tensor]: # Self Attention if residual is None: residual = hidden_states From e3a3e4db463d5fc45def4d39d256ccf42fb70044 Mon Sep 17 00:00:00 2001 From: qli88 Date: Thu, 19 Jun 2025 23:43:20 -0500 Subject: [PATCH 054/211] [Bugfix] Enable PP with AITER+V1 (#19822) Signed-off-by: Qiang Li --- vllm/model_executor/layers/layernorm.py | 1 - vllm/v1/attention/backends/mla/rocm_aiter_mla.py | 13 +++---------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/vllm/model_executor/layers/layernorm.py b/vllm/model_executor/layers/layernorm.py index b3c65e34178ad..e8d1fd6355059 100644 --- a/vllm/model_executor/layers/layernorm.py +++ b/vllm/model_executor/layers/layernorm.py @@ -45,7 +45,6 @@ def fused_add_rms_norm( def rocm_aiter_rms_norm(x: torch.Tensor, weight: torch.Tensor, variance_epsilon: float) -> torch.Tensor: - import aiter as rocm_aiter if x.dim() > 2: x_original_shape = x.shape diff --git a/vllm/v1/attention/backends/mla/rocm_aiter_mla.py b/vllm/v1/attention/backends/mla/rocm_aiter_mla.py index 9fbca2e955e72..8ad4e542b45b6 100644 --- a/vllm/v1/attention/backends/mla/rocm_aiter_mla.py +++ b/vllm/v1/attention/backends/mla/rocm_aiter_mla.py @@ -201,16 +201,9 @@ class AiterMLAImpl(MLACommonImpl[AiterMLAMetadata]): kv_buffer = kv_c_and_k_pe_cache.unsqueeze(2) - if self.num_heads == 16: - # AITER MLA decode kernel only supports - # max_seqlen_q=1 when using 16 heads. - max_seqlen_qo = 1 - else: - # AITER MLA decode Kernel handles arbitrary - # max_seqlen_q values when using 128 heads. - assert attn_metadata.prefill is not None - max_seqlen_qo = attn_metadata.prefill.max_query_len - + # max_seqlen_qo must be 1 except for MTP + # TODO: Find the best value for MTP + max_seqlen_qo = 1 aiter_mla_decode_fwd(q, kv_buffer, o, self.scale, attn_metadata.decode.qo_indptr, max_seqlen_qo, attn_metadata.decode.paged_kv_indptr, From 5e666f72cdb1aa7dade649d92a45e93983937fd2 Mon Sep 17 00:00:00 2001 From: kourosh hakhamaneshi <31483498+kouroshHakha@users.noreply.github.com> Date: Thu, 19 Jun 2025 22:01:16 -0700 Subject: [PATCH 055/211] [Bugfix][Ray] Set the cuda context eagerly in the ray worker (#19583) --- .buildkite/test-pipeline.yaml | 9 ++++ tests/cuda/test_cuda_context.py | 80 +++++++++++++++++++++++++++++++++ vllm/platforms/cuda.py | 11 +++++ vllm/platforms/interface.py | 7 +++ 4 files changed, 107 insertions(+) create mode 100644 tests/cuda/test_cuda_context.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 34ff145e6ed44..fe775bb370f29 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -271,6 +271,15 @@ steps: commands: - pytest -v -s prefix_caching + +- label: Platform Tests (CUDA) + mirror_hardwares: [amdexperimental] + source_file_dependencies: + - vllm/ + - tests/cuda + commands: + - pytest -v -s cuda/test_cuda_context.py + - label: Samplers Test # 36min mirror_hardwares: [amdexperimental] source_file_dependencies: diff --git a/tests/cuda/test_cuda_context.py b/tests/cuda/test_cuda_context.py new file mode 100644 index 0000000000000..f973b284b87e1 --- /dev/null +++ b/tests/cuda/test_cuda_context.py @@ -0,0 +1,80 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import ctypes +from concurrent.futures import ThreadPoolExecutor + +import pytest +import torch + +from vllm.platforms import current_platform + + +def check_cuda_context(): + """Check CUDA driver context status""" + try: + cuda = ctypes.CDLL('libcuda.so') + device = ctypes.c_int() + result = cuda.cuCtxGetDevice(ctypes.byref(device)) + return (True, device.value) if result == 0 else (False, None) + except Exception: + return False, None + + +def run_cuda_test_in_thread(device_input, expected_device_id): + """Run CUDA context test in separate thread for isolation""" + try: + # New thread should have no CUDA context initially + valid_before, device_before = check_cuda_context() + if valid_before: + return False, \ + "CUDA context should not exist in new thread, " \ + f"got device {device_before}" + + # Test setting CUDA context + current_platform.set_device(device_input) + + # Verify context is created correctly + valid_after, device_id = check_cuda_context() + if not valid_after: + return False, "CUDA context should be valid after set_cuda_context" + if device_id != expected_device_id: + return False, \ + f"Expected device {expected_device_id}, got {device_id}" + + return True, "Success" + except Exception as e: + return False, f"Exception in thread: {str(e)}" + + +class TestSetCudaContext: + """Test suite for the set_cuda_context function.""" + + @pytest.mark.skipif(not current_platform.is_cuda(), + reason="CUDA not available") + @pytest.mark.parametrize(argnames="device_input,expected_device_id", + argvalues=[ + (0, 0), + (torch.device('cuda:0'), 0), + ('cuda:0', 0), + ], + ids=["int", "torch_device", "string"]) + def test_set_cuda_context_parametrized(self, device_input, + expected_device_id): + """Test setting CUDA context in isolated threads.""" + with ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(run_cuda_test_in_thread, device_input, + expected_device_id) + success, message = future.result(timeout=30) + assert success, message + + @pytest.mark.skipif(not current_platform.is_cuda(), + reason="CUDA not available") + def test_set_cuda_context_invalid_device_type(self): + """Test error handling for invalid device type.""" + with pytest.raises(ValueError, match="Expected a cuda device"): + current_platform.set_device(torch.device('cpu')) + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/vllm/platforms/cuda.py b/vllm/platforms/cuda.py index 54719a3e79dd8..879d094f65788 100644 --- a/vllm/platforms/cuda.py +++ b/vllm/platforms/cuda.py @@ -71,6 +71,17 @@ class CudaPlatformBase(Platform): # though vLLM doesn't support these GPUs. return [torch.float32] + @classmethod + def set_device(cls, device: torch.device) -> None: + """ + Set the device for the current platform. + """ + super().set_device(device) + # With this trick we can force the device to be set eagerly + # see https://github.com/pytorch/pytorch/issues/155668 + # for why and when it is needed + _ = torch.zeros(1, device=device) + @classmethod def get_device_capability(cls, device_id: int = 0 diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index 3ff173dcd8c85..f962fafabf502 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -298,6 +298,13 @@ class Platform: np.random.seed(seed) torch.manual_seed(seed) + @classmethod + def set_device(cls, device: torch.device) -> None: + """ + Set the device for the current platform. + """ + torch.cuda.set_device(device) + @classmethod def pre_register_and_update(cls, parser: Optional[FlexibleArgumentParser] = None From 089a306f197dcc1152f2802ba1c56fbdeb86ac27 Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Fri, 20 Jun 2025 15:25:15 +0800 Subject: [PATCH 056/211] [Misc] update cuda version (#19526) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- examples/others/lmcache/cpu_offload_lmcache.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/examples/others/lmcache/cpu_offload_lmcache.py b/examples/others/lmcache/cpu_offload_lmcache.py index 9138b53679b3f..e10ee4e2a9a9a 100644 --- a/examples/others/lmcache/cpu_offload_lmcache.py +++ b/examples/others/lmcache/cpu_offload_lmcache.py @@ -17,7 +17,8 @@ Usage: (Without enable_chunked_prefill) Note that `lmcache` is needed to run this example. -Requirements: Linux, Python: 3.10 or higher, CUDA: 12.1 +Requirements: +https://docs.lmcache.ai/getting_started/installation.html#prerequisites Learn more about LMCache environment setup, please refer to: https://docs.lmcache.ai/getting_started/installation.html """ From e384f2f10824df7789c6da35256cf957788c0208 Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Fri, 20 Jun 2025 16:02:21 +0800 Subject: [PATCH 057/211] [Misc] refactor example - openai_transcription_client (#19851) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- .../openai_transcription_client.py | 39 ++++++++++++++++--- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/examples/online_serving/openai_transcription_client.py b/examples/online_serving/openai_transcription_client.py index 12d45de3c81b0..ae43cb5da7905 100644 --- a/examples/online_serving/openai_transcription_client.py +++ b/examples/online_serving/openai_transcription_client.py @@ -1,5 +1,23 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +""" +This script demonstrates how to use the vLLM API server to perform audio +transcription with the `openai/whisper-large-v3` model. + +Before running this script, you must start the vLLM server with the following command: + + vllm serve openai/whisper-large-v3 + +Requirements: +- vLLM with audio support +- openai Python SDK +- httpx for streaming support + +The script performs: +1. Synchronous transcription using OpenAI-compatible API. +2. Streaming transcription using raw HTTP request to the vLLM server. +""" + import asyncio import json @@ -21,6 +39,9 @@ client = OpenAI( def sync_openai(): + """ + Perform synchronous transcription using OpenAI-compatible API. + """ with open(str(mary_had_lamb), "rb") as f: transcription = client.audio.transcriptions.create( file=f, @@ -37,11 +58,11 @@ def sync_openai(): print("transcription result:", transcription.text) -sync_openai() - - # OpenAI Transcription API client does not support streaming. async def stream_openai_response(): + """ + Perform streaming transcription using vLLM's raw HTTP streaming API. + """ data = { "language": "en", "stream": True, @@ -68,7 +89,15 @@ async def stream_openai_response(): # Extract and print the content content = chunk["choices"][0].get("delta", {}).get("content") print(content, end="") + print() # Final newline after stream ends -# Run the asynchronous function -asyncio.run(stream_openai_response()) +def main(): + sync_openai() + + # Run the asynchronous function + asyncio.run(stream_openai_response()) + + +if __name__ == "__main__": + main() From 71d1219545b5139bf8f00fc72bdd3682cce62775 Mon Sep 17 00:00:00 2001 From: Ning Xie Date: Fri, 20 Jun 2025 18:50:13 +0800 Subject: [PATCH 058/211] [Kernel] correct cpu worker function parameter type (#19745) Signed-off-by: Andy Xie --- vllm/attention/ops/ipex_attn.py | 2 +- vllm/worker/cpu_worker.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/vllm/attention/ops/ipex_attn.py b/vllm/attention/ops/ipex_attn.py index 7207d0420a010..8919754989165 100644 --- a/vllm/attention/ops/ipex_attn.py +++ b/vllm/attention/ops/ipex_attn.py @@ -29,7 +29,7 @@ class _PagedAttention: head_size: int, *args, ) -> Tuple[int, ...]: - return (2, num_blocks, block_size * num_kv_heads * head_size) + return 2, num_blocks, block_size * num_kv_heads * head_size @staticmethod def split_kv_cache( diff --git a/vllm/worker/cpu_worker.py b/vllm/worker/cpu_worker.py index 9e834befd68ab..ff110e050bb6f 100644 --- a/vllm/worker/cpu_worker.py +++ b/vllm/worker/cpu_worker.py @@ -3,7 +3,7 @@ """A CPU worker class.""" import os from importlib import util -from typing import Dict, List, Optional, Set, Tuple, Type +from typing import List, Optional, Set, Tuple, Type import torch import torch.distributed @@ -88,13 +88,13 @@ class CPUCacheEngine: torch.empty(kv_cache_shape, dtype=self.dtype, device="cpu")) return kv_cache - def swap_in(self, src_to_dst: Dict[int, int]) -> None: + def swap_in(self, src_to_dst: torch.Tensor) -> None: raise NotImplementedError("Swap is not supported in CPUCacheEngine.") - def swap_out(self, src_to_dst: Dict[int, int]) -> None: + def swap_out(self, src_to_dst: torch.Tensor) -> None: raise NotImplementedError("Swap is not supported in CPUCacheEngine.") - def copy(self, src_to_dsts: Dict[int, List[int]]) -> None: + def copy(self, src_to_dsts: torch.Tensor) -> None: self.attn_backend.copy_blocks(self.cpu_cache, src_to_dsts) @staticmethod From 7771d1de882f53863f04d609723b8c29646ee5da Mon Sep 17 00:00:00 2001 From: Thomas Parnell Date: Fri, 20 Jun 2025 13:16:48 +0200 Subject: [PATCH 059/211] [Fix] import regex instead of re (#19875) Signed-off-by: Thomas Parnell --- vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py index 742e7bfdb3aa9..6dd8336e52de9 100644 --- a/vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/xlam_tool_parser.py @@ -1,10 +1,11 @@ # SPDX-License-Identifier: Apache-2.0 # ruff: noqa import json -import re from collections.abc import Sequence from typing import Any, Dict, List, Optional, Union +import regex as re + from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, DeltaFunctionCall, DeltaMessage, DeltaToolCall, From f1e840e8429614d5bb2f928bcbec0d0469c70415 Mon Sep 17 00:00:00 2001 From: Adrian <69011980+nie3e@users.noreply.github.com> Date: Fri, 20 Jun 2025 14:07:41 +0200 Subject: [PATCH 060/211] [Model] GPT2ForSequenceClassification model (#19663) Signed-off-by: nie3e Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- tests/models/registry.py | 1 + vllm/model_executor/models/gpt2.py | 56 +++++++++++++++++++++++++- vllm/model_executor/models/registry.py | 1 + 3 files changed, 57 insertions(+), 1 deletion(-) diff --git a/tests/models/registry.py b/tests/models/registry.py index 82253a1c94b39..17dc901e28cab 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -267,6 +267,7 @@ _EMBEDDING_EXAMPLE_MODELS = { # [Text-only] "BertModel": _HfExamplesInfo("BAAI/bge-base-en-v1.5", v0_only=True), "Gemma2Model": _HfExamplesInfo("BAAI/bge-multilingual-gemma2", v0_only=True), # noqa: E501 + "GPT2ForSequenceClassification": _HfExamplesInfo("nie3e/sentiment-polish-gpt2-small"), # noqa: E501 "GritLM": _HfExamplesInfo("parasail-ai/GritLM-7B-vllm"), "GteModel": _HfExamplesInfo("Snowflake/snowflake-arctic-embed-m-v2.0", trust_remote_code=True), diff --git a/vllm/model_executor/models/gpt2.py b/vllm/model_executor/models/gpt2.py index fd3decbaebec4..27021550f9987 100644 --- a/vllm/model_executor/models/gpt2.py +++ b/vllm/model_executor/models/gpt2.py @@ -40,9 +40,11 @@ from vllm.model_executor.layers.quantization import QuantizationConfig from vllm.model_executor.layers.vocab_parallel_embedding import ( ParallelLMHead, VocabParallelEmbedding) from vllm.model_executor.model_loader.weight_utils import default_weight_loader +from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.model_executor.sampling_metadata import SamplingMetadata -from vllm.sequence import IntermediateTensors +from vllm.sequence import IntermediateTensors, PoolerOutput +from ..layers.pooler import Pooler, PoolingType from .interfaces import SupportsPP from .utils import (AutoWeightsLoader, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, @@ -318,6 +320,58 @@ class GPT2LMHeadModel(nn.Module, SupportsPP): return loader.load_weights(weights) +class GPT2ForSequenceClassification(nn.Module): + """GPT2 Model for sequence classification. + + This class expands GPT2Model with pooling and score functions - last token + is being used for classification. + + Attributes: + transformer: An instance of GPT2Model used for forward operations. + score: A layer for calculating logits. + _pooler: An instance of Pooler used for pooling operations. + """ + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + self.transformer = GPT2Model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "gpt2")) + self.score = nn.Linear(config.n_embd, config.num_labels, bias=False) + pooler_config = vllm_config.model_config.pooler_config + self._pooler = Pooler.from_config_with_defaults( + pooler_config, + pooling_type=PoolingType.LAST, + normalize=False, + softmax=True) + + def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]): + loader = AutoWeightsLoader(self) + return loader.load_weights(weights) + + def pooler( + self, + hidden_states: torch.Tensor, + pooling_metadata: PoolingMetadata, + ) -> Optional[PoolerOutput]: + return self._pooler(hidden_states, pooling_metadata) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + hidden_states = self.transformer( + input_ids=input_ids, + position_ids=positions, + inputs_embeds=inputs_embeds, + intermediate_tensors=intermediate_tensors) + logits = self.score(hidden_states) + return logits + + def _add_transformer_prefix( weights: Iterable[tuple[str, torch.Tensor]] ) -> Iterable[tuple[str, torch.Tensor]]: diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 83f7cc6eee0fd..95cb25e8f3c95 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -130,6 +130,7 @@ _EMBEDDING_MODELS = { "DeciLMForCausalLM": ("nemotron_nas", "DeciLMForCausalLM"), "Gemma2Model": ("gemma2", "Gemma2ForCausalLM"), "GlmForCausalLM": ("glm", "GlmForCausalLM"), + "GPT2ForSequenceClassification": ("gpt2", "GPT2ForSequenceClassification"), "GritLM": ("gritlm", "GritLM"), "GteModel": ("bert_with_rope", "SnowflakeGteNewModel"), "GteNewModel": ("bert_with_rope", "GteNewModel"), From 7e8977fcd4e9c3bf6b114c7dc715b28a61b5cdb0 Mon Sep 17 00:00:00 2001 From: "Chendi.Xue" Date: Fri, 20 Jun 2025 09:44:56 -0500 Subject: [PATCH 061/211] [custom_op][vllm-plugin] update custom_op class to use op_registry (#19164) Signed-off-by: Chendi.Xue --- .../plugins/vllm_add_dummy_platform/setup.py | 4 +- .../vllm_add_dummy_platform/__init__.py | 4 ++ .../dummy_attention_backend.py | 5 +- .../dummy_custom_ops.py | 20 +++++++ .../vllm_add_dummy_platform/dummy_platform.py | 23 +++++++- tests/plugins_tests/test_platform_plugins.py | 14 +++++ vllm/model_executor/custom_op.py | 56 +++++++++++++++++++ 7 files changed, 120 insertions(+), 6 deletions(-) create mode 100644 tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_custom_ops.py diff --git a/tests/plugins/vllm_add_dummy_platform/setup.py b/tests/plugins/vllm_add_dummy_platform/setup.py index e40f62f7749be..a531826628cda 100644 --- a/tests/plugins/vllm_add_dummy_platform/setup.py +++ b/tests/plugins/vllm_add_dummy_platform/setup.py @@ -10,5 +10,7 @@ setup( entry_points={ 'vllm.platform_plugins': [ "dummy_platform_plugin = vllm_add_dummy_platform:dummy_platform_plugin" # noqa - ] + ], + "vllm.general_plugins": + ["dummy_custom_ops = vllm_add_dummy_platform:register_ops"], }) diff --git a/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/__init__.py b/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/__init__.py index 1b28342eb1791..c4fe6ed197f6b 100644 --- a/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/__init__.py +++ b/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/__init__.py @@ -6,3 +6,7 @@ from typing import Optional def dummy_platform_plugin() -> Optional[str]: return "vllm_add_dummy_platform.dummy_platform.DummyPlatform" + + +def register_ops(): + import vllm_add_dummy_platform.dummy_custom_ops # noqa diff --git a/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_attention_backend.py b/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_attention_backend.py index f30a36f35f5d5..e38fb2fbf934e 100644 --- a/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_attention_backend.py +++ b/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_attention_backend.py @@ -1,10 +1,11 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -from vllm.attention.backends.flash_attn import FlashAttentionBackend +from vllm.attention.backends.placeholder_attn import ( + PlaceholderAttentionBackend) -class DummyAttentionBackend(FlashAttentionBackend): +class DummyAttentionBackend(PlaceholderAttentionBackend): @staticmethod def get_name() -> str: diff --git a/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_custom_ops.py b/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_custom_ops.py new file mode 100644 index 0000000000000..1fcc3fc666173 --- /dev/null +++ b/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_custom_ops.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import torch + +from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding + + +# Register CustomRotaryEmbedding to CustomOP. +@RotaryEmbedding.register_oot +class DummyRotaryEmbedding(RotaryEmbedding): + """Original rotary positional embedding.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.addition_config = True + + def forward_oot(self, *args, + **kwargs) -> tuple[torch.Tensor, torch.Tensor]: + return super().forward_oot(*args, **kwargs) diff --git a/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_platform.py b/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_platform.py index 67cd5ed3b73df..e67825f89d815 100644 --- a/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_platform.py +++ b/tests/plugins/vllm_add_dummy_platform/vllm_add_dummy_platform/dummy_platform.py @@ -1,12 +1,29 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from typing import TYPE_CHECKING -from vllm.platforms.cuda import CudaPlatform +from vllm.platforms.interface import Platform, PlatformEnum + +if TYPE_CHECKING: + from vllm.config import VllmConfig +else: + VllmConfig = None +from vllm import envs -class DummyPlatform(CudaPlatform): +class DummyPlatform(Platform): + _enum = PlatformEnum.OOT device_name = "DummyDevice" + device_type: str = "privateuseone" + dispatch_key: str = "PrivateUse1" + + @classmethod + def check_and_update_config(cls, vllm_config: VllmConfig) -> None: + if envs.VLLM_USE_V1: + compilation_config = vllm_config.compilation_config + # Activate custom ops for v1. + compilation_config.custom_ops = ["all"] def get_attn_backend_cls(self, backend_name, head_size, dtype, kv_cache_dtype, block_size, use_v1, use_mla): - return "vllm_add_dummy_platform.dummy_attention_backend.DummyAttentionBackend" # noqa E501 + return "vllm_add_dummy_platform.dummy_attention_backend.DummyAttentionBackend" # noqa E501 \ No newline at end of file diff --git a/tests/plugins_tests/test_platform_plugins.py b/tests/plugins_tests/test_platform_plugins.py index 685a8cd2c8b82..ef99c3dadd32c 100644 --- a/tests/plugins_tests/test_platform_plugins.py +++ b/tests/plugins_tests/test_platform_plugins.py @@ -5,6 +5,7 @@ import pytest import torch from vllm.attention.selector import get_attn_backend +from vllm.plugins import load_general_plugins from vllm.utils import STR_BACKEND_ENV_VAR, STR_INVALID_VAL @@ -32,3 +33,16 @@ def test_oot_attention_backend(monkeypatch: pytest.MonkeyPatch): m.setenv(STR_BACKEND_ENV_VAR, STR_INVALID_VAL) backend = get_attn_backend(16, torch.float16, "auto", 16, False) assert backend.get_name() == "Dummy_Backend" + + +def test_oot_custom_op(monkeypatch: pytest.MonkeyPatch): + # simulate workload by running an example + load_general_plugins() + from vllm.model_executor.layers.rotary_embedding import RotaryEmbedding + layer = RotaryEmbedding(16, 16, 16, 16, True, torch.float16) + assert layer.__class__.__name__ == "DummyRotaryEmbedding", ( + f"Expected DummyRotaryEmbedding, got {layer.__class__.__name__}, " + "possibly because the custom op is not registered correctly.") + assert hasattr(layer, "addition_config"), ( + "Expected DummyRotaryEmbedding to have an 'addition_config' attribute, " + "which is set by the custom op.") diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py index 7e6cdd9875106..1680b723d6a29 100644 --- a/vllm/model_executor/custom_op.py +++ b/vllm/model_executor/custom_op.py @@ -1,6 +1,8 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from typing import Optional + import torch.nn as nn from vllm.config import get_current_vllm_config @@ -16,6 +18,24 @@ class CustomOp(nn.Module): Dispatches the forward method to the appropriate backend. """ + def __new__(cls, *args, **kwargs): + try: + op_name = cls.__name__ + except AttributeError: + raise TypeError( + f"Cannot instantiate '{cls.__name__}': its 'name' attribute " + f"was not set, possibly because it was not decorated with " + f"@CustomOp.register, or it's the CustomOp base class itself." + ) from None + + if op_name not in cls.op_registry_oot: + op_cls_to_instantiate = cls + else: + op_cls_to_instantiate = cls.op_registry_oot[op_name] + logger.debug("Instantiating custom op: %s using %s", op_name, + str(op_cls_to_instantiate)) + return super().__new__(op_cls_to_instantiate) + def __init__(self): super().__init__() self._forward_method = self.dispatch_forward() @@ -138,6 +158,7 @@ class CustomOp(nn.Module): # - MyOp.enabled() # - op_registry["my_op"].enabled() op_registry: dict[str, type['CustomOp']] = {} + op_registry_oot: dict[str, type['CustomOp']] = {} # Decorator to register custom ops. @classmethod @@ -150,3 +171,38 @@ class CustomOp(nn.Module): return op_cls return decorator + + # Decorator to register out-of-tree(oot) custom ops. + # For OOT custom ops: + # if in-tree layer class is registered with an oot_custom_op layer, + # the oot_custom_op layer will be used instead. + # Example: + # - @UnquantizedFusedMoEMethod.register_oot + # class HPUUnquantizedFusedMoEMethod(UnquantizedFusedMoEMethod) + # or + # - @CustomOP.register_oot(name="UnquantizedFusedMoEMethod") + @classmethod + def register_oot(cls, _decorated_op_cls=None, name: Optional[str] = None): + + def decorator(op_cls): + reg_name = name if name is not None else cls.__name__ + assert reg_name not in cls.op_registry_oot, \ + f"Duplicate op name: {reg_name}" + op_cls.name = reg_name + cls.op_registry_oot[reg_name] = op_cls + return op_cls + + if _decorated_op_cls is None: + # Called with parentheses: @CustomOP.register_oot() + # or @CustomOP.register_oot(name="...") + # So, _decorated_op_cls is None. + # We return the actual decorator function. + return decorator + elif isinstance(_decorated_op_cls, type): # Check if it's a class + # Called without parentheses: @CustomOP.register_oot + # The first argument is the class itself. + # We call the 'decorator' function immediately with the class. + return decorator(_decorated_op_cls) + else: + # Handle other unexpected cases if necessary + raise TypeError("Decorator can only be applied to classes.") From 2e3e3c86dc5d14d0ee8f782f5caedc9b999a63c7 Mon Sep 17 00:00:00 2001 From: Vlad Tiberiu Mihailescu Date: Fri, 20 Jun 2025 07:47:16 -0700 Subject: [PATCH 062/211] Export NaNs in logits to scheduler_stats if output is corrupted (#18777) Signed-off-by: Vlad Mihailescu --- tests/v1/worker/test_gpu_model_runner.py | 49 ++++++++++++++++++++++++ vllm/envs.py | 9 ++++- vllm/v1/core/sched/scheduler.py | 7 ++++ vllm/v1/metrics/stats.py | 2 + vllm/v1/outputs.py | 6 ++- vllm/v1/request.py | 8 ++++ vllm/v1/worker/gpu_model_runner.py | 25 ++++++++++++ 7 files changed, 104 insertions(+), 2 deletions(-) diff --git a/tests/v1/worker/test_gpu_model_runner.py b/tests/v1/worker/test_gpu_model_runner.py index abf14a8fb6250..583a88d8e6ec6 100644 --- a/tests/v1/worker/test_gpu_model_runner.py +++ b/tests/v1/worker/test_gpu_model_runner.py @@ -4,6 +4,7 @@ import random import pytest +import torch from vllm.attention import Attention from vllm.config import (CacheConfig, ModelConfig, ParallelConfig, @@ -277,6 +278,54 @@ def test_update_states_request_resumed(model_runner): assert _is_req_state_block_table_match(model_runner, req_id) +def test_get_nans_in_logits(model_runner): + req_ids = ("req_0", "req_1") + + scheduler_output = _schedule_new_request(*req_ids) + model_runner._update_states(scheduler_output) + + logits = torch.tensor([ + [1.0, 2.0, 3.0], + [3.0, 2.0, 1.0], + ], device=DEVICE) + result = model_runner._get_nans_in_logits(logits) + assert result == {"req_0": 0, "req_1": 0} + + logits = torch.tensor([ + [1.0, float('nan'), 3.0], + [4.0, float('nan'), float('nan')], + ], + device=DEVICE) + result = model_runner._get_nans_in_logits(logits) + assert result == {"req_0": 1, "req_1": 2} + + logits = torch.tensor([ + [1.0, 2.0, 3.0], + [4.0, float('nan'), float('nan')], + ], + device=DEVICE) + result = model_runner._get_nans_in_logits(logits) + assert result == {"req_0": 0, "req_1": 2} + + result = model_runner._get_nans_in_logits(logits=None) + assert result == {"req_0": 0, "req_1": 0} + + logits = torch.tensor([ + [1.0, float('nan'), 3.0], + ], device=DEVICE) + result = model_runner._get_nans_in_logits(logits) + assert result == {'req_0': 1, 'req_1': 0} + + logits = torch.tensor([ + [float('nan'), float('nan'), 2.0], + [1.0, 2.0, 3.0], + [float('nan'), 2.0, 3.0], + ], + device=DEVICE) + result = model_runner._get_nans_in_logits(logits) + assert result == {'req_0': 2, 'req_1': 0} + + def test_update_states_no_changes(model_runner): req_id = "req_0" diff --git a/vllm/envs.py b/vllm/envs.py index c7604d6dfeb84..b1030997f25ad 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -130,6 +130,7 @@ if TYPE_CHECKING: VLLM_SLEEP_WHEN_IDLE: bool = False VLLM_MQ_MAX_CHUNK_BYTES_MB: int = 16 VLLM_KV_CACHE_LAYOUT: Optional[str] = None + VLLM_COMPUTE_NANS_IN_LOGITS: bool = False def get_default_cache_root(): @@ -897,7 +898,13 @@ environment_variables: dict[str, Callable[[], Any]] = { # leave the layout choice to the backend. Mind that backends may only # implement and support a subset of all possible layouts. "VLLM_KV_CACHE_LAYOUT": - lambda: os.getenv("VLLM_KV_CACHE_LAYOUT", None) + lambda: os.getenv("VLLM_KV_CACHE_LAYOUT", None), + + # Enable checking whether the generated logits contain NaNs, + # indicating corrupted output. Useful for debugging low level bugs + # or bad hardware but it may add compute overhead. + "VLLM_COMPUTE_NANS_IN_LOGITS": + lambda: bool(int(os.getenv("VLLM_COMPUTE_NANS_IN_LOGITS", "0"))), } # --8<-- [end:env-vars-definition] diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index 16e76defdf721..0958366e0aca7 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -717,6 +717,7 @@ class Scheduler(SchedulerInterface): prompt_logprobs_dict = model_runner_output.prompt_logprobs_dict num_scheduled_tokens = scheduler_output.num_scheduled_tokens pooler_outputs = model_runner_output.pooler_output + num_nans_in_logits = model_runner_output.num_nans_in_logits new_running: list[Request] = [] outputs: dict[int, list[EngineCoreOutput]] = defaultdict(list) @@ -810,6 +811,10 @@ class Scheduler(SchedulerInterface): request.structured_output_request.grammar.accept_tokens( # type: ignore[union-attr] req_id, new_token_ids) + # spec_token_ids comes from the model runner output + if num_nans_in_logits is not None and req_id in num_nans_in_logits: + request.num_nans_in_logits = num_nans_in_logits[req_id] + # Add newly generated spec token ids to the request. if spec_token_ids is not None: if self.structured_output_manager.should_advance(request): @@ -972,6 +977,8 @@ class Scheduler(SchedulerInterface): kv_cache_usage=self.kv_cache_manager.usage, prefix_cache_stats=prefix_cache_stats, spec_decoding_stats=spec_decoding_stats, + num_corrupted_reqs=sum(req.is_output_corrupted + for req in self.running), ) def make_spec_decoding_stats( diff --git a/vllm/v1/metrics/stats.py b/vllm/v1/metrics/stats.py index 716f40fffb282..1eb10ccb6c493 100644 --- a/vllm/v1/metrics/stats.py +++ b/vllm/v1/metrics/stats.py @@ -40,6 +40,8 @@ class SchedulerStats: spec_decoding_stats: Optional[SpecDecodingStats] = None + num_corrupted_reqs: int = 0 + @dataclass class LoRAStats: diff --git a/vllm/v1/outputs.py b/vllm/v1/outputs.py index 2234843293cc6..f78623f571b2d 100644 --- a/vllm/v1/outputs.py +++ b/vllm/v1/outputs.py @@ -108,6 +108,9 @@ class ModelRunnerOutput: finished_sending: Optional[set[str]] = None finished_recving: Optional[set[str]] = None + # req_id -> num_nans_in_logits + num_nans_in_logits: Optional[dict[str, int]] = None + EMPTY_MODEL_RUNNER_OUTPUT = ModelRunnerOutput(req_ids=[], req_id_to_index={}, @@ -117,4 +120,5 @@ EMPTY_MODEL_RUNNER_OUTPUT = ModelRunnerOutput(req_ids=[], prompt_logprobs_dict={}, pooler_output=[], finished_sending=None, - finished_recving=None) + finished_recving=None, + num_nans_in_logits=None) diff --git a/vllm/v1/request.py b/vllm/v1/request.py index e3f3a418755c3..4632884419aed 100644 --- a/vllm/v1/request.py +++ b/vllm/v1/request.py @@ -97,6 +97,10 @@ class Request: # The number of tokens with prefix cache hits. self.num_cached_tokens = -1 + # The number of NaNs in logits. A value greater than 0 + # indicates that the output is corrupted + self.num_nans_in_logits = 0 + @classmethod def from_engine_core_request(cls, request: EngineCoreRequest) -> "Request": if request.mm_inputs is not None: @@ -132,6 +136,10 @@ class Request: self._output_token_ids.extend(token_ids) self._all_token_ids.extend(token_ids) + @property + def is_output_corrupted(self) -> bool: + return self.num_nans_in_logits > 0 + @property def num_tokens(self) -> int: return len(self._all_token_ids) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index f96fb64342c9f..3303660061183 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -1431,6 +1431,10 @@ class GPUModelRunner(LoRAModelRunnerMixin): ) sampler_output.sampled_token_ids = output_token_ids + num_nans_in_logits = {} + if envs.VLLM_COMPUTE_NANS_IN_LOGITS: + num_nans_in_logits = self._get_nans_in_logits(logits) + # TODO(woosuk): The following loop can be slow since it iterates over # the requests one by one. Optimize. discard_sampled_tokens_req_indices = [] @@ -1601,6 +1605,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): pooler_output=[], finished_sending=finished_sending, finished_recving=finished_recving, + num_nans_in_logits=num_nans_in_logits, ) def kv_connector_no_forward( @@ -1826,6 +1831,26 @@ class GPUModelRunner(LoRAModelRunnerMixin): return prompt_logprobs_dict + def _get_nans_in_logits( + self, + logits: Optional[torch.Tensor], + ) -> dict[str, int]: + try: + if logits is None: + return {req_id: 0 for req_id in self.input_batch.req_ids} + + num_nans_in_logits = {} + num_nans_for_index = logits.isnan().sum(dim=-1).cpu().numpy() + for req_id in self.input_batch.req_ids: + req_index = self.input_batch.req_id_to_index[req_id] + num_nans_in_logits[req_id] = ( + int(num_nans_for_index[req_index]) + if num_nans_for_index is not None + and req_index < logits.shape[0] else 0) + return num_nans_in_logits + except IndexError: + return {} + @contextmanager def maybe_randomize_inputs(self, input_ids: torch.Tensor): """ From 79f2f1c2a1999d1e7a5202062bad4e115fd9d775 Mon Sep 17 00:00:00 2001 From: "Li, Jiang" Date: Fri, 20 Jun 2025 23:30:36 +0800 Subject: [PATCH 063/211] [CPU][CI] Fallback sliding window to v0 and fix CPU pooling model tests (#19901) Signed-off-by: jiang1.li --- tests/models/language/pooling/test_embedding.py | 7 ++++++- vllm/engine/arg_utils.py | 7 +++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/tests/models/language/pooling/test_embedding.py b/tests/models/language/pooling/test_embedding.py index e29b4f6e8bec9..5ef9f768c5744 100644 --- a/tests/models/language/pooling/test_embedding.py +++ b/tests/models/language/pooling/test_embedding.py @@ -1,5 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import os + import pytest from vllm.config import PoolerConfig @@ -33,7 +35,7 @@ def v1(run_with_both_engines): # To avoid this problem, for now we skip v0 since it will be # deprecated anyway. pytest.param("ssmits/Qwen2-7B-Instruct-embed-base", - marks=[pytest.mark.skip_v0]), + marks=[pytest.mark.skip_v0, pytest.mark.cpu_model]), # [Encoder-only] pytest.param("BAAI/bge-base-en-v1.5", marks=[ @@ -58,6 +60,9 @@ def test_models( model, monkeypatch, ) -> None: + if model == "intfloat/e5-mistral-7b-instruct" and current_platform.is_cpu( + ) and os.environ.get("VLLM_USE_V1", "0") == "1": + pytest.skip("CPU V1 doesn't support sliding window") if model == "BAAI/bge-multilingual-gemma2" and current_platform.is_rocm(): # ROCm Triton FA does not currently support sliding window attention diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 7a88e3269a5ed..bffc8ba8c907f 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1449,6 +1449,13 @@ class EngineArgs: model_config=model_config) and _warn_or_fallback( current_platform.device_name): return False + + if (current_platform.is_cpu() + and model_config.get_sliding_window() is not None): + _raise_or_fallback(feature_name="sliding window (CPU backend)", + recommend_to_remove=False) + return False + ############################################################# return True From 71baf85ae11be24d4ea32d30cb5b8dfb0912a6cc Mon Sep 17 00:00:00 2001 From: Ning Xie Date: Sat, 21 Jun 2025 02:18:11 +0800 Subject: [PATCH 064/211] [Kernel] mark TorchSDPABackend swap_blocks NotImplementedError (#19749) --- vllm/attention/backends/torch_sdpa.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/attention/backends/torch_sdpa.py b/vllm/attention/backends/torch_sdpa.py index 3e1336a5ac3b2..af5fe81dc883c 100644 --- a/vllm/attention/backends/torch_sdpa.py +++ b/vllm/attention/backends/torch_sdpa.py @@ -65,7 +65,7 @@ class TorchSDPABackend(AttentionBackend): dst_kv_cache: torch.Tensor, src_to_dst: torch.Tensor, ) -> None: - PagedAttention.swap_blocks(src_kv_cache, dst_kv_cache, src_to_dst) + raise NotImplementedError("Swap is not supported in TorchSDPABackend.") @staticmethod def copy_blocks( From e773a9e1c2c175e193b383ed497ad6fcb73cdfe5 Mon Sep 17 00:00:00 2001 From: wangxiyuan Date: Sat, 21 Jun 2025 05:09:09 +0800 Subject: [PATCH 065/211] [Misc] Clean up useless code (#19889) Signed-off-by: wangxiyuan --- vllm/config.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 54c7a497b261b..508cdfaec1c46 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1900,17 +1900,6 @@ class ParallelConfig: os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0" logger.info("Disabling V1 multiprocessing for external launcher.") - ray_only_devices: list[str] = [] - from vllm.platforms import current_platform - if (current_platform.device_type in ray_only_devices - and self.world_size > 1): - if self.distributed_executor_backend is None: - self.distributed_executor_backend = "ray" - if self.distributed_executor_backend != "ray": - raise ValueError( - f"{current_platform.device_type.upper()} backend only " - "supports Ray for distributed inference.") - if self.distributed_executor_backend is None and self.world_size > 1: # We use multiprocessing by default if world_size fits on the # current node and we aren't in a ray placement group. From 8ca81bb0691bf8909ecb6eb4dd43f4af6dcaaa66 Mon Sep 17 00:00:00 2001 From: Rabin Adhikari Date: Sat, 21 Jun 2025 01:03:17 +0200 Subject: [PATCH 066/211] Fix: Check the type of params to be a Sequence not list. (#19910) Signed-off-by: Rabin Adhikari --- vllm/entrypoints/llm.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index d479d4c89f125..05e0be61adad9 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -1450,15 +1450,15 @@ class LLM: prompts = [prompts] num_requests = len(prompts) - if isinstance(params, list) and len(params) != num_requests: + if isinstance(params, Sequence) and len(params) != num_requests: raise ValueError("The lengths of prompts and params " "must be the same.") if isinstance(lora_request, - list) and len(lora_request) != num_requests: + Sequence) and len(lora_request) != num_requests: raise ValueError("The lengths of prompts and lora_request " "must be the same.") - for sp in params if isinstance(params, list) else (params, ): + for sp in params if isinstance(params, Sequence) else (params, ): if isinstance(sp, SamplingParams): self._add_guided_params(sp, guided_options) From 6f170f11dddcfafa061785d4fb4993f7bcb16107 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Sat, 21 Jun 2025 11:29:09 +0800 Subject: [PATCH 067/211] [Bugfix] Fix bnb 8bit model weights loading (#19917) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/model_executor/model_loader/bitsandbytes_loader.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/model_loader/bitsandbytes_loader.py b/vllm/model_executor/model_loader/bitsandbytes_loader.py index a0a5372600f3d..09857ef297f0a 100644 --- a/vllm/model_executor/model_loader/bitsandbytes_loader.py +++ b/vllm/model_executor/model_loader/bitsandbytes_loader.py @@ -577,10 +577,10 @@ def dequantize_dq(quant_states: dict) -> None: thereby avoiding this computational overhead during inference. This comes at the cost of increased memory usage. """ - from bitsandbytes.functional import dequantize_blockwise + from bitsandbytes.functional import QuantState, dequantize_blockwise for _, quant_state in quant_states.items(): # Copied from: https://github.com/bitsandbytes-foundation/bitsandbytes/blob/0.45.3/bitsandbytes/functional.py#L1352-#L1356 - if quant_state.nested: + if isinstance(quant_state, QuantState) and quant_state.nested: absmax = dequantize_blockwise(quant_state.absmax, quant_state.state2) absmax += quant_state.offset From c3bf9bad11193ee684ed6083b6692d0b5bf2bac7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B1=AA=E5=BF=97=E9=B9=8F?= Date: Sat, 21 Jun 2025 12:01:51 +0800 Subject: [PATCH 068/211] [New model support]Support Tarsier2 (#19887) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 汪志鹏 --- docs/models/supported_models.md | 1 + examples/offline_inference/vision_language.py | 32 +++++++ .../vision_language_multi_image.py | 27 ++++++ .../multimodal/processing/test_common.py | 1 + tests/models/registry.py | 2 + vllm/model_executor/models/qwen2_vl.py | 89 ++++++++++++++++++- vllm/model_executor/models/registry.py | 1 + 7 files changed, 152 insertions(+), 1 deletion(-) diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index 60f7dacebfa21..803d2938d2b1e 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -562,6 +562,7 @@ Specified using `--task generate`. | `SkyworkR1VChatModel` | Skywork-R1V-38B | T + I | `Skywork/Skywork-R1V-38B` | | ✅︎ | ✅︎ | | `SmolVLMForConditionalGeneration` | SmolVLM2 | T + I | `SmolVLM2-2.2B-Instruct` | ✅︎ | | ✅︎ | | `TarsierForConditionalGeneration` | Tarsier | T + IE+ | `omni-search/Tarsier-7b`,`omni-search/Tarsier-34b` | | ✅︎ | ✅︎ | +| `Tarsier2ForConditionalGeneration`^ | Tarsier2 | T + IE+ + VE+ | `omni-research/Tarsier2-Recap-7b`,`omni-research/Tarsier2-7b-0115` | | ✅︎ | ✅︎ | ^ You need to set the architecture name via `--hf-overrides` to match the one in vLLM.     • For example, to use DeepSeek-VL2 series models: diff --git a/examples/offline_inference/vision_language.py b/examples/offline_inference/vision_language.py index 15dbd9f44128a..57b042ed013b1 100644 --- a/examples/offline_inference/vision_language.py +++ b/examples/offline_inference/vision_language.py @@ -1040,6 +1040,37 @@ def run_qwen2_5_omni(questions: list[str], modality: str): ) +def run_tarsier2(questions: list[str], modality: str) -> ModelRequestData: + model_name = "omni-research/Tarsier2-Recap-7b" + + engine_args = EngineArgs( + model=model_name, + max_model_len=4096, + hf_overrides={"architectures": ["Tarsier2ForConditionalGeneration"]}, + limit_mm_per_prompt={modality: 1}, + ) + + if modality == "image": + placeholder = "<|image_pad|>" + elif modality == "video": + placeholder = "<|video_pad|>" + + prompts = [ + ( + "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" + f"<|im_start|>user\n<|vision_start|>{placeholder}<|vision_end|>" + f"{question}<|im_end|>\n" + "<|im_start|>assistant\n" + ) + for question in questions + ] + + return ModelRequestData( + engine_args=engine_args, + prompts=prompts, + ) + + # SkyworkR1V def run_skyworkr1v(questions: list[str], modality: str) -> ModelRequestData: assert modality == "image" @@ -1112,6 +1143,7 @@ model_example_map = { "skywork_chat": run_skyworkr1v, "smolvlm": run_smolvlm, "tarsier": run_tarsier, + "tarsier2": run_tarsier2, } diff --git a/examples/offline_inference/vision_language_multi_image.py b/examples/offline_inference/vision_language_multi_image.py index e55181e4f490f..edddd429364dc 100644 --- a/examples/offline_inference/vision_language_multi_image.py +++ b/examples/offline_inference/vision_language_multi_image.py @@ -828,6 +828,32 @@ def load_tarsier(question: str, image_urls: list[str]) -> ModelRequestData: ) +def load_tarsier2(question: str, image_urls: list[str]) -> ModelRequestData: + model_name = "omni-research/Tarsier2-Recap-7b" + + engine_args = EngineArgs( + model=model_name, + trust_remote_code=True, + max_model_len=32768, + limit_mm_per_prompt={"image": len(image_urls)}, + hf_overrides={"architectures": ["Tarsier2ForConditionalGeneration"]}, + ) + + prompt = ( + "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n" + f"<|im_start|>user\n<|vision_start|>{'<|image_pad|>' * len(image_urls)}" + f"<|vision_end|>{question}<|im_end|>\n" + "<|im_start|>assistant\n" + ) + image_data = [fetch_image(url) for url in image_urls] + + return ModelRequestData( + engine_args=engine_args, + prompt=prompt, + image_data=image_data, + ) + + model_example_map = { "aria": load_aria, "aya_vision": load_aya_vision, @@ -853,6 +879,7 @@ model_example_map = { "qwen2_5_vl": load_qwen2_5_vl, "smolvlm": load_smolvlm, "tarsier": load_tarsier, + "tarsier2": load_tarsier2, } diff --git a/tests/models/multimodal/processing/test_common.py b/tests/models/multimodal/processing/test_common.py index 1e6608955b31b..1ba60178c13db 100644 --- a/tests/models/multimodal/processing/test_common.py +++ b/tests/models/multimodal/processing/test_common.py @@ -284,6 +284,7 @@ def _test_processing_correctness_one( "fixie-ai/ultravox-v0_5-llama-3_2-1b", "openai/whisper-large-v3", "omni-research/Tarsier-7b", + "omni-research/Tarsier2-Recap-7b" ]) @pytest.mark.parametrize("hit_rate", [0.3, 0.5, 1.0]) @pytest.mark.parametrize("num_batches", [32]) diff --git a/tests/models/registry.py b/tests/models/registry.py index 17dc901e28cab..49510af880cf5 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -398,6 +398,8 @@ _MULTIMODAL_EXAMPLE_MODELS = { trust_remote_code=True), "TarsierForConditionalGeneration": _HfExamplesInfo("omni-research/Tarsier-7b", # noqa: E501 hf_overrides={"architectures": ["TarsierForConditionalGeneration"]}), # noqa: E501 + "Tarsier2ForConditionalGeneration": _HfExamplesInfo("omni-research/Tarsier2-Recap-7b", # noqa: E501 + hf_overrides={"architectures": ["Tarsier2ForConditionalGeneration"]}), # noqa: E501 # [Encoder-decoder] # Florence-2 uses BartFastTokenizer which can't be loaded from AutoTokenizer # Therefore, we borrow the BartTokenizer from the original Bart model diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 690b8e02c2fde..7a6ebe10c516b 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -32,12 +32,14 @@ import torch import torch.nn as nn import torch.nn.functional as F from einops import rearrange, repeat -from transformers import BatchFeature +from transformers import AutoConfig, BatchFeature from transformers.models.qwen2_vl import (Qwen2VLImageProcessor, Qwen2VLProcessor) from transformers.models.qwen2_vl.configuration_qwen2_vl import ( Qwen2VLConfig, Qwen2VLVisionConfig) from transformers.models.qwen2_vl.image_processing_qwen2_vl import smart_resize +from transformers.models.qwen2_vl.video_processing_qwen2_vl import ( + Qwen2VLVideoProcessor) from vllm.config import VllmConfig from vllm.distributed import parallel_state, tensor_model_parallel_all_gather @@ -69,6 +71,7 @@ from vllm.sequence import IntermediateTensors from vllm.transformers_utils.config import uses_mrope from vllm.transformers_utils.processor import ( cached_image_processor_from_config) +from vllm.transformers_utils.tokenizer import AnyTokenizer from .interfaces import (MultiModalEmbeddings, SupportsLoRA, SupportsMultiModal, SupportsPP) @@ -1405,3 +1408,87 @@ class Qwen2VLForConditionalGeneration(nn.Module, SupportsMultiModal, connector="visual.merger.", tower_model="visual.", ) + + +class Tarsier2MultiModalProcessor(Qwen2VLMultiModalProcessor): + pass + + +class Tarsier2ImageProcessor(Qwen2VLImageProcessor): + + def __init__( + self, + size: Optional[dict[str, int]] = None, + **kwargs, + ) -> None: + if size is not None and "min_pixels" in size and "max_pixels" in size: + # Remap if Tarsier2-specific format is provided + remapped_size = { + "shortest_edge": size["min_pixels"], + "longest_edge": size["max_pixels"] + } + super().__init__(size=remapped_size, **kwargs) + else: + super().__init__(size=size, **kwargs) + + +class Tarsier2Processor(Qwen2VLProcessor): + + def __init__( + self, + vision_config: dict, + tokenizer: AnyTokenizer, + **kwargs, + ): + self.image_processor = Tarsier2ImageProcessor(**vision_config) + super().__init__(image_processor=self.image_processor, + tokenizer=tokenizer, + video_processor=Qwen2VLVideoProcessor(), + chat_template=None, + **kwargs) + + +class Tarsier2ProcessingInfo(Qwen2VLProcessingInfo): + + def get_hf_config(self) -> Qwen2VLConfig: + model_path = self.ctx.model_config.model + original_config = AutoConfig.from_pretrained(model_path) + config_dict = original_config.to_dict() + correct_config = Qwen2VLConfig.from_dict(config_dict) + + return correct_config + + def get_hf_processor(self, **kwargs: object) -> Tarsier2Processor: + return Tarsier2Processor( + vision_config=self.ctx.get_hf_image_processor_config(), + tokenizer=self.get_tokenizer(), + **kwargs, + ) + + def get_image_processor(self) -> Tarsier2ImageProcessor: + return Tarsier2ImageProcessor( + **self.ctx.get_hf_image_processor_config()) + + +@MULTIMODAL_REGISTRY.register_processor(Tarsier2MultiModalProcessor, + info=Tarsier2ProcessingInfo, + dummy_inputs=Qwen2VLDummyInputsBuilder) +class Tarsier2ForConditionalGeneration(Qwen2VLForConditionalGeneration): + hf_to_vllm_mapper = WeightsMapper(orig_to_new_prefix={ + "vision_tower.": "visual.", + }) + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + # Tarsier2 uses llava as model_type, which will create a Qwen2VLConfig + # as text_config, we need to reconstruct Qwen2VLConfig from LlavaConfig. + config = vllm_config.model_config.hf_config + qwen2vl_config = config.text_config + qwen2vl_config.architectures = config.architectures + vllm_config.model_config.hf_config = qwen2vl_config + super().__init__(vllm_config=vllm_config, prefix=prefix) + + def load_weights(self, weights: Iterable[tuple[str, + torch.Tensor]]) -> set[str]: + + loader = AutoWeightsLoader(self) + return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 95cb25e8f3c95..faeaf6ef68ccc 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -217,6 +217,7 @@ _MULTIMODAL_MODELS = { "UltravoxModel": ("ultravox", "UltravoxModel"), "Phi4MMForCausalLM": ("phi4mm", "Phi4MMForCausalLM"), "TarsierForConditionalGeneration": ("tarsier", "TarsierForConditionalGeneration"), # noqa: E501 + "Tarsier2ForConditionalGeneration": ("qwen2_vl", "Tarsier2ForConditionalGeneration"), # noqa: E501 # [Encoder-decoder] "Florence2ForConditionalGeneration": ("florence2", "Florence2ForConditionalGeneration"), # noqa: E501 "MllamaForConditionalGeneration": ("mllama", "MllamaForConditionalGeneration"), # noqa: E501 From caa680fd2e70cc947911f4185750aaa3bcbdd122 Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Sun, 22 Jun 2025 01:29:06 +0800 Subject: [PATCH 069/211] [doc] add contact us in community (#19922) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- README.md | 1 + docs/community/contact_us.md | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 docs/community/contact_us.md diff --git a/README.md b/README.md index d312716a84283..971e4195b4d94 100644 --- a/README.md +++ b/README.md @@ -159,6 +159,7 @@ If you use vLLM for your research, please cite our [paper](https://arxiv.org/abs - For coordinating contributions and development, please use [Slack](https://slack.vllm.ai) - For security disclosures, please use GitHub's [Security Advisories](https://github.com/vllm-project/vllm/security/advisories) feature - For collaborations and partnerships, please contact us at [vllm-questions@lists.berkeley.edu](mailto:vllm-questions@lists.berkeley.edu) + ## Media Kit diff --git a/docs/community/contact_us.md b/docs/community/contact_us.md new file mode 100644 index 0000000000000..4d87a7e6140d2 --- /dev/null +++ b/docs/community/contact_us.md @@ -0,0 +1,11 @@ +--- +title: Contact Us +--- +[](){ #contactus } + +- For technical questions and feature requests, please use GitHub [Issues](https://github.com/vllm-project/vllm/issues) or [Discussions](https://github.com/vllm-project/vllm/discussions) +- For discussing with fellow users, please use the [vLLM Forum](https://discuss.vllm.ai) +- For coordinating contributions and development, please use [Slack](https://slack.vllm.ai) +- For security disclosures, please use GitHub's [Security Advisories](https://github.com/vllm-project/vllm/security/advisories) feature +- For collaborations and partnerships, please contact us at [vllm-questions@lists.berkeley.edu](mailto:vllm-questions@lists.berkeley.edu) + From 2c5302fadd81c06f61e5a3973ed4c0e6a4a2be40 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Sat, 21 Jun 2025 13:01:07 -0700 Subject: [PATCH 070/211] [Multimodal] Optimize Qwen2/2.5-VL startup time (#19756) Signed-off-by: Woosuk Kwon Signed-off-by: Roger Wang Co-authored-by: Roger Wang --- vllm/model_executor/models/qwen2_vl.py | 8 ++++++++ vllm/multimodal/processing.py | 21 +++++++++++++++++++++ vllm/multimodal/profiling.py | 22 +++++++++++++++++++++- 3 files changed, 50 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 7a6ebe10c516b..899fc57c7a0e5 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -823,6 +823,14 @@ class Qwen2VLProcessingInfo(BaseProcessingInfo): def get_supported_mm_limits(self) -> Mapping[str, Optional[int]]: return {"image": None, "video": None} + def get_max_tokens_per_item( + self, seq_len: int, + mm_counts: Mapping[str, int]) -> Optional[Mapping[str, int]]: + + max_image_tokens = self.get_max_image_tokens() + max_video_tokens = self.get_max_video_tokens(seq_len, mm_counts) + return {"image": max_image_tokens, "video": max_video_tokens} + def _get_vision_info( self, *, diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index 5cfca57bffeec..38f3a7cb932f4 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -1100,6 +1100,27 @@ class BaseProcessingInfo: return allowed_limits + def get_max_tokens_per_item( + self, seq_len: int, + mm_counts: Optional[Mapping[str, + int]]) -> Optional[Mapping[str, int]]: + """Return the maximum number of tokens per item of for each modality. + By default, returns `None`. When `None` is returned, vLLM will generate + dummy inputs (images/videos) at maximum possible sizes and process them + to determine the maximum token count per modality. + This approach works but can be very slow for certain models (e.g., + Qwen2.5-VL), leading to very long startup time. For better performance, + each model can override this method to return pre-computed maximum token + counts, avoiding the need for dummy input generation and processing. + + NOTE: The maximum number of tokens per item of each modality returned + from this function should respect to the model maximum sequence length + and the maximum number of items of each modality allowed, and agrees + with dummy inputs (images/videos) at maximum possible sizes. + + """ + return None + _I = TypeVar("_I", bound=BaseProcessingInfo) diff --git a/vllm/multimodal/profiling.py b/vllm/multimodal/profiling.py index 1faecb7bd24a8..67bcb31f23f70 100644 --- a/vllm/multimodal/profiling.py +++ b/vllm/multimodal/profiling.py @@ -253,6 +253,26 @@ class MultiModalProfiler(Generic[_I]): seq_len: int, mm_counts: Optional[Mapping[str, int]] = None, ) -> Mapping[str, int]: - mm_inputs = self._get_dummy_mm_inputs(seq_len, mm_counts) + max_tokens_per_item = self.processing_info.get_max_tokens_per_item( + seq_len=seq_len, mm_counts=mm_counts) + if max_tokens_per_item is not None: + if mm_counts is None: + total_mm_tokens = sum(max_tokens_per_item.values()) + else: + total_mm_tokens = sum(max_tokens_per_item[k] * mm_counts[k] + for k in max_tokens_per_item.keys() + & mm_counts.keys()) + if total_mm_tokens > seq_len: + logger.warning_once( + "The sequence length (%d) is smaller than the pre-defined" + " wosrt-case total number of multimodal tokens (%d). " + "This may cause certain multi-modal inputs to fail during " + "inference. To avoid this, you should increase " + "`max_model_len` or reduce `mm_counts`.", + seq_len, + total_mm_tokens, + ) + return max_tokens_per_item + mm_inputs = self._get_dummy_mm_inputs(seq_len, mm_counts) return self._get_mm_num_tokens(mm_inputs) From 3b1e4c6a23af94b76f7e1290ec48d671f1420d8d Mon Sep 17 00:00:00 2001 From: Adrian <69011980+nie3e@users.noreply.github.com> Date: Sat, 21 Jun 2025 22:57:19 +0200 Subject: [PATCH 071/211] [Docs] Add GPT2ForSequenceClassification to supported models in docs (#19932) Signed-off-by: nie3e --- docs/models/supported_models.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index 803d2938d2b1e..92557eb662843 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -445,7 +445,7 @@ Specified using `--task classify`. | Architecture | Models | Example HF Models | [LoRA][lora-adapter] | [PP][distributed-serving] | [V1](gh-issue:8779) | |----------------------------------|----------|----------------------------------------|------------------------|-----------------------------|-----------------------| | `JambaForSequenceClassification` | Jamba | `ai21labs/Jamba-tiny-reward-dev`, etc. | ✅︎ | ✅︎ | | - +| `GPT2ForSequenceClassification` | GPT2 | `nie3e/sentiment-polish-gpt2-small` | | | | If your model is not in the above list, we will try to automatically convert the model using [as_classification_model][vllm.model_executor.models.adapters.as_classification_model]. By default, the class probabilities are extracted from the softmaxed hidden state corresponding to the last token. From 4c409cabc2c1c432ba670029990bd59e6bbf1479 Mon Sep 17 00:00:00 2001 From: Ning Xie Date: Sun, 22 Jun 2025 11:10:46 +0800 Subject: [PATCH 072/211] [Misc] add vllm_config in __init__ (#19866) Signed-off-by: Andy Xie --- vllm/worker/worker_base.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/worker/worker_base.py b/vllm/worker/worker_base.py index 200026dc7282f..c382b29ad1990 100644 --- a/vllm/worker/worker_base.py +++ b/vllm/worker/worker_base.py @@ -509,6 +509,7 @@ class WorkerWrapperBase: """ self.rpc_rank = rpc_rank self.worker: Optional[WorkerBase] = None + self.vllm_config: Optional[VllmConfig] = None # do not store this `vllm_config`, `init_worker` will set the final # one. TODO: investigate if we can remove this field in # `WorkerWrapperBase`, `init_cached_hf_modules` should be From 2bb246b8f7b8dd220008ff7bd735249b362c799a Mon Sep 17 00:00:00 2001 From: Ning Xie Date: Sun, 22 Jun 2025 13:39:09 +0800 Subject: [PATCH 073/211] [MISC] add cpu_kvcache_space_bytes to CacheConfig (#19812) Signed-off-by: Andy Xie --- vllm/config.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vllm/config.py b/vllm/config.py index 508cdfaec1c46..ce7e2a2929cf5 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1506,6 +1506,8 @@ class CacheConfig: """This enables dynamic calculation of `k_scale` and `v_scale` when kv_cache_dtype is fp8. If `False`, the scales will be loaded from the model checkpoint if available. Otherwise, the scales will default to 1.0.""" + cpu_kvcache_space_bytes: Optional[int] = None + """(CPU backend only) CPU key-value cache space.""" # Will be set after profiling. num_gpu_blocks: Optional[int] = field(default=None, init=False) From 202c5df9357e7c52b51e19abc70e8444f3f85ada Mon Sep 17 00:00:00 2001 From: "Wang, Yi" Date: Sun, 22 Jun 2025 15:21:04 +0800 Subject: [PATCH 074/211] [Benchmark] fix request loss if "ping" is returned (#19535) Signed-off-by: Wang, Yi A Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- benchmarks/backend_request_func.py | 8 +++++++- vllm/benchmarks/endpoint_request_func.py | 20 ++++++++++++++++---- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/benchmarks/backend_request_func.py b/benchmarks/backend_request_func.py index ddb38e304cd65..c7229dbb8e90d 100644 --- a/benchmarks/backend_request_func.py +++ b/benchmarks/backend_request_func.py @@ -404,8 +404,14 @@ async def async_request_openai_chat_completions( chunk_bytes = chunk_bytes.strip() if not chunk_bytes: continue + chunk_bytes = chunk_bytes.decode("utf-8") + # NOTE: SSE comments (often used as pings) start with a colon. + # These are not JSON data payload and should be skipped. + if chunk_bytes.startswith(":"): + continue + + chunk = chunk_bytes.removeprefix("data: ") - chunk = chunk_bytes.decode("utf-8").removeprefix("data: ") if chunk != "[DONE]": timestamp = time.perf_counter() data = json.loads(chunk) diff --git a/vllm/benchmarks/endpoint_request_func.py b/vllm/benchmarks/endpoint_request_func.py index aba60edc58cbf..60ae520db3862 100644 --- a/vllm/benchmarks/endpoint_request_func.py +++ b/vllm/benchmarks/endpoint_request_func.py @@ -104,9 +104,15 @@ async def async_request_openai_completions( chunk_bytes = chunk_bytes.strip() if not chunk_bytes: continue + chunk_bytes = chunk_bytes.decode("utf-8") + # NOTE: SSE comments (often used as pings) start with + # a colon. These are not JSON data payload and should + # be skipped. + if chunk_bytes.startswith(":"): + continue + + chunk = chunk_bytes.removeprefix("data: ") - chunk = chunk_bytes.decode("utf-8").removeprefix( - "data: ") if chunk != "[DONE]": data = json.loads(chunk) @@ -213,9 +219,15 @@ async def async_request_openai_chat_completions( chunk_bytes = chunk_bytes.strip() if not chunk_bytes: continue + chunk_bytes = chunk_bytes.decode("utf-8") + # NOTE: SSE comments (often used as pings) start with + # a colon. These are not JSON data payload and should + # be skipped. + if chunk_bytes.startswith(":"): + continue + + chunk = chunk_bytes.removeprefix("data: ") - chunk = chunk_bytes.decode("utf-8").removeprefix( - "data: ") if chunk != "[DONE]": timestamp = time.perf_counter() data = json.loads(chunk) From c305a2109d72cdd37d36595e45c5c0a9cb2ea6d4 Mon Sep 17 00:00:00 2001 From: 22quinn <33176974+22quinn@users.noreply.github.com> Date: Sun, 22 Jun 2025 01:46:21 -0700 Subject: [PATCH 075/211] [CI/Build] Auto tag perf benchmarks related PRs (#19943) Signed-off-by: 22quinn <33176974+22quinn@users.noreply.github.com> --- .github/mergify.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/mergify.yml b/.github/mergify.yml index ce8fb2ee2d534..9c61ae198db8b 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -65,6 +65,19 @@ pull_request_rules: add: - multi-modality +- name: label-performance + description: Automatically apply performance label + conditions: + - or: + - files~=^benchmarks/ + - files~=^vllm/benchmarks/ + - files~=^tests/benchmarks/ + - files~=^\.buildkite/nightly-benchmarks/ + actions: + label: + add: + - performance + - name: label-qwen description: Automatically apply qwen label conditions: From ec0db6f51c4ed8caf21c2dc4a2f47e89b307f59a Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Sun, 22 Jun 2025 18:26:13 +0800 Subject: [PATCH 076/211] [doc] use snippets for contact us (#19944) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- README.md | 3 ++- docs/community/contact_us.md | 7 +------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 971e4195b4d94..3e6ae2acab2a9 100644 --- a/README.md +++ b/README.md @@ -154,12 +154,13 @@ If you use vLLM for your research, please cite our [paper](https://arxiv.org/abs ## Contact Us + - For technical questions and feature requests, please use GitHub [Issues](https://github.com/vllm-project/vllm/issues) or [Discussions](https://github.com/vllm-project/vllm/discussions) - For discussing with fellow users, please use the [vLLM Forum](https://discuss.vllm.ai) - For coordinating contributions and development, please use [Slack](https://slack.vllm.ai) - For security disclosures, please use GitHub's [Security Advisories](https://github.com/vllm-project/vllm/security/advisories) feature - For collaborations and partnerships, please contact us at [vllm-questions@lists.berkeley.edu](mailto:vllm-questions@lists.berkeley.edu) - + ## Media Kit diff --git a/docs/community/contact_us.md b/docs/community/contact_us.md index 4d87a7e6140d2..a10e6bfc9b0a4 100644 --- a/docs/community/contact_us.md +++ b/docs/community/contact_us.md @@ -3,9 +3,4 @@ title: Contact Us --- [](){ #contactus } -- For technical questions and feature requests, please use GitHub [Issues](https://github.com/vllm-project/vllm/issues) or [Discussions](https://github.com/vllm-project/vllm/discussions) -- For discussing with fellow users, please use the [vLLM Forum](https://discuss.vllm.ai) -- For coordinating contributions and development, please use [Slack](https://slack.vllm.ai) -- For security disclosures, please use GitHub's [Security Advisories](https://github.com/vllm-project/vllm/security/advisories) feature -- For collaborations and partnerships, please contact us at [vllm-questions@lists.berkeley.edu](mailto:vllm-questions@lists.berkeley.edu) - +--8<-- "README.md:contact-us" From c76a506bd60f56d364da0de415c48798870e1312 Mon Sep 17 00:00:00 2001 From: Roger Wang Date: Sun, 22 Jun 2025 05:16:08 -0700 Subject: [PATCH 077/211] [Misc] Update model-specific PR tagging (#19949) Signed-off-by: Roger Wang --- .github/mergify.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/mergify.yml b/.github/mergify.yml index 9c61ae198db8b..9c047bcaf95dc 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -45,6 +45,7 @@ pull_request_rules: - files~=^vllm/entrypoints/openai/tool_parsers/llama.*\.py - files~=^vllm/model_executor/models/.*llama.*\.py - files~=^vllm/transformers_utils/configs/.*llama.*\.py + - title~=(?i)llama actions: label: add: @@ -87,7 +88,6 @@ pull_request_rules: - files~=^vllm/model_executor/models/.*qwen.*\.py - files~=^vllm/reasoning/.*qwen.*\.py - title~=(?i)Qwen - - body~=(?i)Qwen actions: label: add: From 2c11a29f0b474d3607227bbc895867bff9d8f8f4 Mon Sep 17 00:00:00 2001 From: "Ye (Charlotte) Qi" Date: Sun, 22 Jun 2025 09:34:48 -0700 Subject: [PATCH 078/211] [Misc] Simplify vllm bench cli subcommand implementation (#19948) --- vllm/entrypoints/cli/__init__.py | 12 ++++++ vllm/entrypoints/cli/benchmark/base.py | 20 ++-------- vllm/entrypoints/cli/benchmark/latency.py | 17 ++------ vllm/entrypoints/cli/benchmark/main.py | 41 ++++++++------------ vllm/entrypoints/cli/benchmark/serve.py | 17 ++------ vllm/entrypoints/cli/benchmark/throughput.py | 17 ++------ 6 files changed, 44 insertions(+), 80 deletions(-) diff --git a/vllm/entrypoints/cli/__init__.py b/vllm/entrypoints/cli/__init__.py index e69de29bb2d1d..41671b5b98abb 100644 --- a/vllm/entrypoints/cli/__init__.py +++ b/vllm/entrypoints/cli/__init__.py @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from vllm.entrypoints.cli.benchmark.latency import BenchmarkLatencySubcommand +from vllm.entrypoints.cli.benchmark.serve import BenchmarkServingSubcommand +from vllm.entrypoints.cli.benchmark.throughput import ( + BenchmarkThroughputSubcommand) + +__all__: list[str] = [ + "BenchmarkLatencySubcommand", + "BenchmarkServingSubcommand", + "BenchmarkThroughputSubcommand", +] \ No newline at end of file diff --git a/vllm/entrypoints/cli/benchmark/base.py b/vllm/entrypoints/cli/benchmark/base.py index 30a8844108002..0c22bc75105e6 100644 --- a/vllm/entrypoints/cli/benchmark/base.py +++ b/vllm/entrypoints/cli/benchmark/base.py @@ -3,18 +3,15 @@ import argparse from vllm.entrypoints.cli.types import CLISubcommand -from vllm.utils import FlexibleArgumentParser class BenchmarkSubcommandBase(CLISubcommand): """ The base class of subcommands for vllm bench. """ - @property - def help(self) -> str: - """The help message of the subcommand.""" - raise NotImplementedError + help: str - def add_cli_args(self, parser: argparse.ArgumentParser) -> None: + @classmethod + def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: """Add the CLI arguments to the parser.""" raise NotImplementedError @@ -26,14 +23,3 @@ class BenchmarkSubcommandBase(CLISubcommand): args: The arguments to the command. """ raise NotImplementedError - - def subparser_init( - self, - subparsers: argparse._SubParsersAction) -> FlexibleArgumentParser: - parser = subparsers.add_parser( - self.name, - help=self.help, - description=self.help, - usage=f"vllm bench {self.name} [options]") - self.add_cli_args(parser) - return parser diff --git a/vllm/entrypoints/cli/benchmark/latency.py b/vllm/entrypoints/cli/benchmark/latency.py index e0358a262dcdc..3e68963cfd44e 100644 --- a/vllm/entrypoints/cli/benchmark/latency.py +++ b/vllm/entrypoints/cli/benchmark/latency.py @@ -4,27 +4,18 @@ import argparse from vllm.benchmarks.latency import add_cli_args, main from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase -from vllm.entrypoints.cli.types import CLISubcommand class BenchmarkLatencySubcommand(BenchmarkSubcommandBase): """ The `latency` subcommand for vllm bench. """ - def __init__(self): - self.name = "latency" - super().__init__() + name = "latency" + help = "Benchmark the latency of a single batch of requests." - @property - def help(self) -> str: - return "Benchmark the latency of a single batch of requests." - - def add_cli_args(self, parser: argparse.ArgumentParser) -> None: + @classmethod + def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: add_cli_args(parser) @staticmethod def cmd(args: argparse.Namespace) -> None: main(args) - - -def cmd_init() -> list[CLISubcommand]: - return [BenchmarkLatencySubcommand()] diff --git a/vllm/entrypoints/cli/benchmark/main.py b/vllm/entrypoints/cli/benchmark/main.py index 717da630ab4f0..fdc5a047f6927 100644 --- a/vllm/entrypoints/cli/benchmark/main.py +++ b/vllm/entrypoints/cli/benchmark/main.py @@ -2,51 +2,44 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import argparse -import vllm.entrypoints.cli.benchmark.latency -import vllm.entrypoints.cli.benchmark.serve -import vllm.entrypoints.cli.benchmark.throughput +from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase from vllm.entrypoints.cli.types import CLISubcommand from vllm.utils import FlexibleArgumentParser -BENCHMARK_CMD_MODULES = [ - vllm.entrypoints.cli.benchmark.latency, - vllm.entrypoints.cli.benchmark.serve, - vllm.entrypoints.cli.benchmark.throughput, -] - class BenchmarkSubcommand(CLISubcommand): """ The `bench` subcommand for the vLLM CLI. """ - def __init__(self): - self.name = "bench" - super().__init__() + name = "bench" + help = "vLLM bench subcommand." @staticmethod def cmd(args: argparse.Namespace) -> None: args.dispatch_function(args) def validate(self, args: argparse.Namespace) -> None: - if args.bench_type in self.cmds: - self.cmds[args.bench_type].validate(args) + pass def subparser_init( self, subparsers: argparse._SubParsersAction) -> FlexibleArgumentParser: + bench_parser = subparsers.add_parser( - "bench", - help="vLLM bench subcommand.", - description="vLLM bench subcommand.", + self.name, + help=self.help, + description=self.help, usage="vllm bench [options]") bench_subparsers = bench_parser.add_subparsers(required=True, dest="bench_type") - self.cmds = {} - for cmd_module in BENCHMARK_CMD_MODULES: - new_cmds = cmd_module.cmd_init() - for cmd in new_cmds: - cmd.subparser_init(bench_subparsers).set_defaults( - dispatch_function=cmd.cmd) - self.cmds[cmd.name] = cmd + + for cmd_cls in BenchmarkSubcommandBase.__subclasses__(): + cmd_subparser = bench_subparsers.add_parser( + cmd_cls.name, + help=cmd_cls.help, + description=cmd_cls.help, + ) + cmd_subparser.set_defaults(dispatch_function=cmd_cls.cmd) + cmd_cls.add_cli_args(cmd_subparser) return bench_parser diff --git a/vllm/entrypoints/cli/benchmark/serve.py b/vllm/entrypoints/cli/benchmark/serve.py index 3043701570230..3dd7a46d6284b 100644 --- a/vllm/entrypoints/cli/benchmark/serve.py +++ b/vllm/entrypoints/cli/benchmark/serve.py @@ -4,27 +4,18 @@ import argparse from vllm.benchmarks.serve import add_cli_args, main from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase -from vllm.entrypoints.cli.types import CLISubcommand class BenchmarkServingSubcommand(BenchmarkSubcommandBase): """ The `serve` subcommand for vllm bench. """ - def __init__(self): - self.name = "serve" - super().__init__() + name = "serve" + help = "Benchmark the online serving throughput." - @property - def help(self) -> str: - return "Benchmark the online serving throughput." - - def add_cli_args(self, parser: argparse.ArgumentParser) -> None: + @classmethod + def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: add_cli_args(parser) @staticmethod def cmd(args: argparse.Namespace) -> None: main(args) - - -def cmd_init() -> list[CLISubcommand]: - return [BenchmarkServingSubcommand()] diff --git a/vllm/entrypoints/cli/benchmark/throughput.py b/vllm/entrypoints/cli/benchmark/throughput.py index 20431cd3d8702..d5d43ad4a3591 100644 --- a/vllm/entrypoints/cli/benchmark/throughput.py +++ b/vllm/entrypoints/cli/benchmark/throughput.py @@ -4,27 +4,18 @@ import argparse from vllm.benchmarks.throughput import add_cli_args, main from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase -from vllm.entrypoints.cli.types import CLISubcommand class BenchmarkThroughputSubcommand(BenchmarkSubcommandBase): """ The `throughput` subcommand for vllm bench. """ - def __init__(self): - self.name = "throughput" - super().__init__() + name = "throughput" + help = "Benchmark offline inference throughput." - @property - def help(self) -> str: - return "Benchmark offline inference throughput." - - def add_cli_args(self, parser: argparse.ArgumentParser) -> None: + @classmethod + def add_cli_args(cls, parser: argparse.ArgumentParser) -> None: add_cli_args(parser) @staticmethod def cmd(args: argparse.Namespace) -> None: main(args) - - -def cmd_init() -> list[CLISubcommand]: - return [BenchmarkThroughputSubcommand()] From e91386cde110e690d754684a2b03707925605f90 Mon Sep 17 00:00:00 2001 From: Aaron Pham Date: Sun, 22 Jun 2025 15:43:07 -0400 Subject: [PATCH 079/211] [Chore] dedup logs (#19955) --- vllm/config.py | 2 +- vllm/triton_utils/importing.py | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index ce7e2a2929cf5..b8232aae70837 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1970,7 +1970,7 @@ class ParallelConfig: if not current_platform.use_custom_allreduce(): self.disable_custom_all_reduce = True - logger.info( + logger.debug( "Disabled the custom all-reduce kernel because it is not " "supported on current platform.") if self.ray_workers_use_nsight and not self.use_ray: diff --git a/vllm/triton_utils/importing.py b/vllm/triton_utils/importing.py index dd30b2bc5f075..6cc8429d76c31 100644 --- a/vllm/triton_utils/importing.py +++ b/vllm/triton_utils/importing.py @@ -74,10 +74,6 @@ class TritonPlaceholder(types.ModuleType): self.heuristics = self._dummy_decorator("heuristics") self.Config = self._dummy_decorator("Config") self.language = TritonLanguagePlaceholder() - logger.warning_once( - "Triton is not installed. Using dummy decorators. " - "Install it via `pip install triton` to enable kernel" - " compilation.") def _dummy_decorator(self, name): From 33d51f599e414c3639bdea7e5e8fb0c7bb61cc3d Mon Sep 17 00:00:00 2001 From: "Ye (Charlotte) Qi" Date: Sun, 22 Jun 2025 15:17:49 -0700 Subject: [PATCH 080/211] [BugFix] Add an env to disable moe chunking to work around compile incompatibility (#19642) Signed-off-by: Ye (Charlotte) Qi --- vllm/envs.py | 7 +++++++ vllm/model_executor/layers/fused_moe/modular_kernel.py | 6 +++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/vllm/envs.py b/vllm/envs.py index b1030997f25ad..93a7c8069c2de 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -49,6 +49,7 @@ if TYPE_CHECKING: VLLM_XLA_CACHE_PATH: str = os.path.join(VLLM_CACHE_ROOT, "xla_cache") VLLM_XLA_CHECK_RECOMPILATION: bool = False VLLM_FUSED_MOE_CHUNK_SIZE: int = 64 * 1024 + VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING: bool = True VLLM_USE_RAY_SPMD_WORKER: bool = False VLLM_USE_RAY_COMPILED_DAG: bool = False VLLM_USE_RAY_COMPILED_DAG_CHANNEL_TYPE: str = "auto" @@ -535,6 +536,12 @@ environment_variables: dict[str, Callable[[], Any]] = { lambda: bool(int(os.getenv("VLLM_XLA_USE_SPMD", "0"))), "VLLM_FUSED_MOE_CHUNK_SIZE": lambda: int(os.getenv("VLLM_FUSED_MOE_CHUNK_SIZE", "32768")), + # Control whether to use fused MoE activation chunking. Current chunking + # logic is incompatible with torch.compile and causes IMA. See issue + # https://github.com/vllm-project/vllm/issues/19631. + "VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING": + lambda: bool( + int(os.getenv("VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING", "1"))), # If set, vllm will skip the deprecation warnings. "VLLM_NO_DEPRECATION_WARNING": diff --git a/vllm/model_executor/layers/fused_moe/modular_kernel.py b/vllm/model_executor/layers/fused_moe/modular_kernel.py index ed3b6b8a1af42..d25d70d3eff15 100644 --- a/vllm/model_executor/layers/fused_moe/modular_kernel.py +++ b/vllm/model_executor/layers/fused_moe/modular_kernel.py @@ -225,6 +225,10 @@ class FusedMoEPermuteExpertsUnpermute(ABC): else: raise ValueError(f"Unsupported FusedMoe activation: {activation}") + def enable_chunking(self): + return envs.VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING and \ + self.supports_chunking() + @abstractmethod def apply( self, @@ -400,7 +404,7 @@ class FusedMoEModularKernel(torch.nn.Module): else: _, M, N, K, top_k = _moe_problem_size(a1q, w1, w2, topk_ids) - if self.fused_experts.supports_chunking(): + if self.fused_experts.enable_chunking(): CHUNK_SIZE = envs.VLLM_FUSED_MOE_CHUNK_SIZE num_chunks = cdiv(M, CHUNK_SIZE) else: From c4cf26067755624eb94afda36ae1f679dec0d542 Mon Sep 17 00:00:00 2001 From: Aaron Pham Date: Sun, 22 Jun 2025 19:11:22 -0400 Subject: [PATCH 081/211] [Perf][CLI] Improve overall startup time (#19941) --- .pre-commit-config.yaml | 5 ++ tools/check_init_lazy_imports.py | 108 +++++++++++++++++++++++++ vllm/__init__.py | 73 +++++++++++++---- vllm/config.py | 30 +++---- vllm/engine/arg_utils.py | 15 +++- vllm/entrypoints/cli/benchmark/main.py | 9 ++- vllm/entrypoints/cli/collect_env.py | 15 ++-- vllm/entrypoints/cli/main.py | 50 +++++++----- vllm/entrypoints/cli/openai.py | 22 +++-- vllm/entrypoints/cli/run_batch.py | 31 ++++--- vllm/entrypoints/cli/serve.py | 11 +-- vllm/entrypoints/cli/types.py | 8 +- vllm/entrypoints/openai/run_batch.py | 16 ++-- vllm/utils.py | 3 - 14 files changed, 293 insertions(+), 103 deletions(-) create mode 100644 tools/check_init_lazy_imports.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7534ae55907e6..e62b623b4e114 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -115,6 +115,11 @@ repos: entry: python tools/check_spdx_header.py language: python types: [python] + - id: check-root-lazy-imports + name: Check root lazy imports + entry: python tools/check_init_lazy_imports.py + language: python + types: [python] - id: check-filenames name: Check for spaces in all filenames entry: bash diff --git a/tools/check_init_lazy_imports.py b/tools/check_init_lazy_imports.py new file mode 100644 index 0000000000000..e8e6f07cc33fc --- /dev/null +++ b/tools/check_init_lazy_imports.py @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +"""Ensure we perform lazy loading in vllm/__init__.py. +i.e: appears only within the ``if typing.TYPE_CHECKING:`` guard, +**except** for a short whitelist. +""" + +from __future__ import annotations + +import ast +import pathlib +import sys +from collections.abc import Iterable +from typing import Final + +REPO_ROOT: Final = pathlib.Path(__file__).resolve().parent.parent +INIT_PATH: Final = REPO_ROOT / "vllm" / "__init__.py" + +# If you need to add items to whitelist, do it here. +ALLOWED_IMPORTS: Final[frozenset[str]] = frozenset({ + "vllm.env_override", +}) +ALLOWED_FROM_MODULES: Final[frozenset[str]] = frozenset({ + ".version", +}) + + +def _is_internal(name: str | None, *, level: int = 0) -> bool: + if level > 0: + return True + if name is None: + return False + return name.startswith("vllm.") or name == "vllm" + + +def _fail(violations: Iterable[tuple[int, str]]) -> None: + print("ERROR: Disallowed eager imports in vllm/__init__.py:\n", + file=sys.stderr) + for lineno, msg in violations: + print(f" Line {lineno}: {msg}", file=sys.stderr) + sys.exit(1) + + +def main() -> None: + source = INIT_PATH.read_text(encoding="utf-8") + tree = ast.parse(source, filename=str(INIT_PATH)) + + violations: list[tuple[int, str]] = [] + + class Visitor(ast.NodeVisitor): + + def __init__(self) -> None: + super().__init__() + self._in_type_checking = False + + def visit_If(self, node: ast.If) -> None: + guard_is_type_checking = False + test = node.test + if isinstance(test, ast.Attribute) and isinstance( + test.value, ast.Name): + guard_is_type_checking = (test.value.id == "typing" + and test.attr == "TYPE_CHECKING") + elif isinstance(test, ast.Name): + guard_is_type_checking = test.id == "TYPE_CHECKING" + + if guard_is_type_checking: + prev = self._in_type_checking + self._in_type_checking = True + for child in node.body: + self.visit(child) + self._in_type_checking = prev + for child in node.orelse: + self.visit(child) + else: + self.generic_visit(node) + + def visit_Import(self, node: ast.Import) -> None: + if self._in_type_checking: + return + for alias in node.names: + module_name = alias.name + if _is_internal( + module_name) and module_name not in ALLOWED_IMPORTS: + violations.append(( + node.lineno, + f"import '{module_name}' must be inside typing.TYPE_CHECKING", # noqa: E501 + )) + + def visit_ImportFrom(self, node: ast.ImportFrom) -> None: + if self._in_type_checking: + return + module_as_written = ("." * node.level) + (node.module or "") + if _is_internal( + node.module, level=node.level + ) and module_as_written not in ALLOWED_FROM_MODULES: + violations.append(( + node.lineno, + f"from '{module_as_written}' import ... must be inside typing.TYPE_CHECKING", # noqa: E501 + )) + + Visitor().visit(tree) + + if violations: + _fail(violations) + + +if __name__ == "__main__": + main() diff --git a/vllm/__init__.py b/vllm/__init__.py index 6232b657e8284..7b90fd3a241bd 100644 --- a/vllm/__init__.py +++ b/vllm/__init__.py @@ -1,29 +1,72 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project """vLLM: a high-throughput and memory-efficient inference engine for LLMs""" + # The version.py should be independent library, and we always import the # version library first. Such assumption is critical for some customization. from .version import __version__, __version_tuple__ # isort:skip +import typing + # The environment variables override should be imported before any other # modules to ensure that the environment variables are set before any # other modules are imported. -import vllm.env_override # isort:skip # noqa: F401 +import vllm.env_override # noqa: F401 + +MODULE_ATTRS = { + "AsyncEngineArgs": ".engine.arg_utils:AsyncEngineArgs", + "EngineArgs": ".engine.arg_utils:EngineArgs", + "AsyncLLMEngine": ".engine.async_llm_engine:AsyncLLMEngine", + "LLMEngine": ".engine.llm_engine:LLMEngine", + "LLM": ".entrypoints.llm:LLM", + "initialize_ray_cluster": ".executor.ray_utils:initialize_ray_cluster", + "PromptType": ".inputs:PromptType", + "TextPrompt": ".inputs:TextPrompt", + "TokensPrompt": ".inputs:TokensPrompt", + "ModelRegistry": ".model_executor.models:ModelRegistry", + "SamplingParams": ".sampling_params:SamplingParams", + "PoolingParams": ".pooling_params:PoolingParams", + "ClassificationOutput": ".outputs:ClassificationOutput", + "ClassificationRequestOutput": ".outputs:ClassificationRequestOutput", + "CompletionOutput": ".outputs:CompletionOutput", + "EmbeddingOutput": ".outputs:EmbeddingOutput", + "EmbeddingRequestOutput": ".outputs:EmbeddingRequestOutput", + "PoolingOutput": ".outputs:PoolingOutput", + "PoolingRequestOutput": ".outputs:PoolingRequestOutput", + "RequestOutput": ".outputs:RequestOutput", + "ScoringOutput": ".outputs:ScoringOutput", + "ScoringRequestOutput": ".outputs:ScoringRequestOutput", +} + +if typing.TYPE_CHECKING: + from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs + from vllm.engine.async_llm_engine import AsyncLLMEngine + from vllm.engine.llm_engine import LLMEngine + from vllm.entrypoints.llm import LLM + from vllm.executor.ray_utils import initialize_ray_cluster + from vllm.inputs import PromptType, TextPrompt, TokensPrompt + from vllm.model_executor.models import ModelRegistry + from vllm.outputs import (ClassificationOutput, + ClassificationRequestOutput, CompletionOutput, + EmbeddingOutput, EmbeddingRequestOutput, + PoolingOutput, PoolingRequestOutput, + RequestOutput, ScoringOutput, + ScoringRequestOutput) + from vllm.pooling_params import PoolingParams + from vllm.sampling_params import SamplingParams +else: + + def __getattr__(name: str) -> typing.Any: + from importlib import import_module + + if name in MODULE_ATTRS: + module_name, attr_name = MODULE_ATTRS[name].split(":") + module = import_module(module_name, __package__) + return getattr(module, attr_name) + else: + raise AttributeError( + f'module {__package__} has no attribute {name}') -from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs -from vllm.engine.async_llm_engine import AsyncLLMEngine -from vllm.engine.llm_engine import LLMEngine -from vllm.entrypoints.llm import LLM -from vllm.executor.ray_utils import initialize_ray_cluster -from vllm.inputs import PromptType, TextPrompt, TokensPrompt -from vllm.model_executor.models import ModelRegistry -from vllm.outputs import (ClassificationOutput, ClassificationRequestOutput, - CompletionOutput, EmbeddingOutput, - EmbeddingRequestOutput, PoolingOutput, - PoolingRequestOutput, RequestOutput, ScoringOutput, - ScoringRequestOutput) -from vllm.pooling_params import PoolingParams -from vllm.sampling_params import SamplingParams __all__ = [ "__version__", diff --git a/vllm/config.py b/vllm/config.py index b8232aae70837..7549c97b4fec4 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -28,7 +28,7 @@ from pydantic.dataclasses import dataclass from safetensors.torch import _TYPES as _SAFETENSORS_TO_TORCH_DTYPE from torch.distributed import ProcessGroup, ReduceOp from transformers import PretrainedConfig -from typing_extensions import deprecated, runtime_checkable +from typing_extensions import Self, deprecated, runtime_checkable import vllm.envs as envs from vllm import version @@ -1537,7 +1537,6 @@ class CacheConfig: def __post_init__(self) -> None: self.swap_space_bytes = self.swap_space * GiB_bytes - self._verify_args() self._verify_cache_dtype() self._verify_prefix_caching() @@ -1546,7 +1545,8 @@ class CacheConfig: # metrics info return {key: str(value) for key, value in self.__dict__.items()} - def _verify_args(self) -> None: + @model_validator(mode='after') + def _verify_args(self) -> Self: if self.cpu_offload_gb < 0: raise ValueError("CPU offload space must be non-negative" f", but got {self.cpu_offload_gb}") @@ -1556,6 +1556,8 @@ class CacheConfig: "GPU memory utilization must be less than 1.0. Got " f"{self.gpu_memory_utilization}.") + return self + def _verify_cache_dtype(self) -> None: if self.cache_dtype == "auto": pass @@ -1942,15 +1944,14 @@ class ParallelConfig: if self.distributed_executor_backend is None and self.world_size == 1: self.distributed_executor_backend = "uni" - self._verify_args() - @property def use_ray(self) -> bool: return self.distributed_executor_backend == "ray" or ( isinstance(self.distributed_executor_backend, type) and self.distributed_executor_backend.uses_ray) - def _verify_args(self) -> None: + @model_validator(mode='after') + def _verify_args(self) -> Self: # Lazy import to avoid circular import from vllm.executor.executor_base import ExecutorBase from vllm.platforms import current_platform @@ -1977,8 +1978,7 @@ class ParallelConfig: raise ValueError("Unable to use nsight profiling unless workers " "run with Ray.") - assert isinstance(self.worker_extension_cls, str), ( - "worker_extension_cls must be a string (qualified class name).") + return self PreemptionMode = Literal["swap", "recompute"] @@ -2202,9 +2202,8 @@ class SchedulerConfig: self.max_num_partial_prefills, self.max_long_partial_prefills, self.long_prefill_token_threshold) - self._verify_args() - - def _verify_args(self) -> None: + @model_validator(mode='after') + def _verify_args(self) -> Self: if (self.max_num_batched_tokens < self.max_model_len and not self.chunked_prefill_enabled): raise ValueError( @@ -2263,6 +2262,8 @@ class SchedulerConfig: "must be greater than or equal to 1 and less than or equal to " f"max_num_partial_prefills ({self.max_num_partial_prefills}).") + return self + @property def is_multi_step(self) -> bool: return self.num_scheduler_steps > 1 @@ -2669,8 +2670,6 @@ class SpeculativeConfig: if self.posterior_alpha is None: self.posterior_alpha = 0.3 - self._verify_args() - @staticmethod def _maybe_override_draft_max_model_len( speculative_max_model_len: Optional[int], @@ -2761,7 +2760,8 @@ class SpeculativeConfig: return draft_parallel_config - def _verify_args(self) -> None: + @model_validator(mode='after') + def _verify_args(self) -> Self: if self.num_speculative_tokens is None: raise ValueError( "num_speculative_tokens must be provided with " @@ -2812,6 +2812,8 @@ class SpeculativeConfig: "Eagle3 is only supported for Llama models. " f"Got {self.target_model_config.hf_text_config.model_type=}") + return self + @property def num_lookahead_slots(self) -> int: """The number of additional slots the scheduler should allocate per diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index bffc8ba8c907f..dd09f514906df 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -3,7 +3,9 @@ # yapf: disable import argparse +import copy import dataclasses +import functools import json import sys import threading @@ -168,7 +170,8 @@ def get_type_hints(type_hint: TypeHint) -> set[TypeHint]: return type_hints -def get_kwargs(cls: ConfigType) -> dict[str, Any]: +@functools.lru_cache(maxsize=30) +def _compute_kwargs(cls: ConfigType) -> dict[str, Any]: cls_docs = get_attr_docs(cls) kwargs = {} for field in fields(cls): @@ -269,6 +272,16 @@ def get_kwargs(cls: ConfigType) -> dict[str, Any]: return kwargs +def get_kwargs(cls: ConfigType) -> dict[str, Any]: + """Return argparse kwargs for the given Config dataclass. + + The heavy computation is cached via functools.lru_cache, and a deep copy + is returned so callers can mutate the dictionary without affecting the + cached version. + """ + return copy.deepcopy(_compute_kwargs(cls)) + + @dataclass class EngineArgs: """Arguments for vLLM engine.""" diff --git a/vllm/entrypoints/cli/benchmark/main.py b/vllm/entrypoints/cli/benchmark/main.py index fdc5a047f6927..8904a2468b3ce 100644 --- a/vllm/entrypoints/cli/benchmark/main.py +++ b/vllm/entrypoints/cli/benchmark/main.py @@ -1,10 +1,16 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +from __future__ import annotations + import argparse +import typing from vllm.entrypoints.cli.benchmark.base import BenchmarkSubcommandBase from vllm.entrypoints.cli.types import CLISubcommand -from vllm.utils import FlexibleArgumentParser + +if typing.TYPE_CHECKING: + from vllm.utils import FlexibleArgumentParser class BenchmarkSubcommand(CLISubcommand): @@ -23,7 +29,6 @@ class BenchmarkSubcommand(CLISubcommand): def subparser_init( self, subparsers: argparse._SubParsersAction) -> FlexibleArgumentParser: - bench_parser = subparsers.add_parser( self.name, help=self.help, diff --git a/vllm/entrypoints/cli/collect_env.py b/vllm/entrypoints/cli/collect_env.py index 141aafdb1a618..785c18812adb7 100644 --- a/vllm/entrypoints/cli/collect_env.py +++ b/vllm/entrypoints/cli/collect_env.py @@ -1,19 +1,21 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from __future__ import annotations + import argparse +import typing from vllm.collect_env import main as collect_env_main from vllm.entrypoints.cli.types import CLISubcommand -from vllm.utils import FlexibleArgumentParser + +if typing.TYPE_CHECKING: + from vllm.utils import FlexibleArgumentParser class CollectEnvSubcommand(CLISubcommand): """The `collect-env` subcommand for the vLLM CLI. """ - - def __init__(self): - self.name = "collect-env" - super().__init__() + name = "collect-env" @staticmethod def cmd(args: argparse.Namespace) -> None: @@ -23,12 +25,11 @@ class CollectEnvSubcommand(CLISubcommand): def subparser_init( self, subparsers: argparse._SubParsersAction) -> FlexibleArgumentParser: - collect_env_parser = subparsers.add_parser( + return subparsers.add_parser( "collect-env", help="Start collecting environment information.", description="Start collecting environment information.", usage="vllm collect-env") - return collect_env_parser def cmd_init() -> list[CLISubcommand]: diff --git a/vllm/entrypoints/cli/main.py b/vllm/entrypoints/cli/main.py index 9bb1162e38d82..3e09d45b2ed71 100644 --- a/vllm/entrypoints/cli/main.py +++ b/vllm/entrypoints/cli/main.py @@ -1,27 +1,15 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +'''The CLI entrypoints of vLLM -# The CLI entrypoint to vLLM. +Note that all future modules must be lazily loaded within main +to avoid certain eager import breakage.''' +from __future__ import annotations + +import importlib.metadata import signal import sys -import vllm.entrypoints.cli.benchmark.main -import vllm.entrypoints.cli.collect_env -import vllm.entrypoints.cli.openai -import vllm.entrypoints.cli.run_batch -import vllm.entrypoints.cli.serve -import vllm.version -from vllm.entrypoints.utils import VLLM_SUBCMD_PARSER_EPILOG, cli_env_setup -from vllm.utils import FlexibleArgumentParser - -CMD_MODULES = [ - vllm.entrypoints.cli.openai, - vllm.entrypoints.cli.serve, - vllm.entrypoints.cli.benchmark.main, - vllm.entrypoints.cli.collect_env, - vllm.entrypoints.cli.run_batch, -] - def register_signal_handlers(): @@ -33,16 +21,34 @@ def register_signal_handlers(): def main(): + import vllm.entrypoints.cli.benchmark.main + import vllm.entrypoints.cli.collect_env + import vllm.entrypoints.cli.openai + import vllm.entrypoints.cli.run_batch + import vllm.entrypoints.cli.serve + from vllm.entrypoints.utils import VLLM_SUBCMD_PARSER_EPILOG, cli_env_setup + from vllm.utils import FlexibleArgumentParser + + CMD_MODULES = [ + vllm.entrypoints.cli.openai, + vllm.entrypoints.cli.serve, + vllm.entrypoints.cli.benchmark.main, + vllm.entrypoints.cli.collect_env, + vllm.entrypoints.cli.run_batch, + ] + cli_env_setup() parser = FlexibleArgumentParser( description="vLLM CLI", epilog=VLLM_SUBCMD_PARSER_EPILOG, ) - parser.add_argument('-v', - '--version', - action='version', - version=vllm.version.__version__) + parser.add_argument( + '-v', + '--version', + action='version', + version=importlib.metadata.version('vllm'), + ) subparsers = parser.add_subparsers(required=False, dest="subparser") cmds = {} for cmd_module in CMD_MODULES: diff --git a/vllm/entrypoints/cli/openai.py b/vllm/entrypoints/cli/openai.py index 58dcdfe217fd5..5ddaee5b52af1 100644 --- a/vllm/entrypoints/cli/openai.py +++ b/vllm/entrypoints/cli/openai.py @@ -1,18 +1,21 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -# Commands that act as an interactive OpenAI API client + +from __future__ import annotations import argparse import os import signal import sys -from typing import Optional +from typing import TYPE_CHECKING from openai import OpenAI from openai.types.chat import ChatCompletionMessageParam from vllm.entrypoints.cli.types import CLISubcommand -from vllm.utils import FlexibleArgumentParser + +if TYPE_CHECKING: + from vllm.utils import FlexibleArgumentParser def _register_signal_handlers(): @@ -42,8 +45,7 @@ def _interactive_cli(args: argparse.Namespace) -> tuple[str, OpenAI]: return model_name, openai_client -def chat(system_prompt: Optional[str], model_name: str, - client: OpenAI) -> None: +def chat(system_prompt: str | None, model_name: str, client: OpenAI) -> None: conversation: list[ChatCompletionMessageParam] = [] if system_prompt is not None: conversation.append({"role": "system", "content": system_prompt}) @@ -92,10 +94,7 @@ def _add_query_options( class ChatCommand(CLISubcommand): """The `chat` subcommand for the vLLM CLI. """ - - def __init__(self): - self.name = "chat" - super().__init__() + name = "chat" @staticmethod def cmd(args: argparse.Namespace) -> None: @@ -157,10 +156,7 @@ class ChatCommand(CLISubcommand): class CompleteCommand(CLISubcommand): """The `complete` subcommand for the vLLM CLI. """ - - def __init__(self): - self.name = "complete" - super().__init__() + name = 'complete' @staticmethod def cmd(args: argparse.Namespace) -> None: diff --git a/vllm/entrypoints/cli/run_batch.py b/vllm/entrypoints/cli/run_batch.py index 6bdd3b63c26d2..61a34cbc3959f 100644 --- a/vllm/entrypoints/cli/run_batch.py +++ b/vllm/entrypoints/cli/run_batch.py @@ -1,37 +1,42 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from __future__ import annotations + import argparse import asyncio - -from prometheus_client import start_http_server +import importlib.metadata +import typing from vllm.entrypoints.cli.types import CLISubcommand -from vllm.entrypoints.logger import logger -from vllm.entrypoints.openai.run_batch import main as run_batch_main -from vllm.entrypoints.openai.run_batch import make_arg_parser from vllm.entrypoints.utils import (VLLM_SUBCMD_PARSER_EPILOG, show_filtered_argument_or_group_from_help) -from vllm.utils import FlexibleArgumentParser -from vllm.version import __version__ as VLLM_VERSION +from vllm.logger import init_logger + +if typing.TYPE_CHECKING: + from vllm.utils import FlexibleArgumentParser + +logger = init_logger(__name__) class RunBatchSubcommand(CLISubcommand): """The `run-batch` subcommand for vLLM CLI.""" - - def __init__(self): - self.name = "run-batch" - super().__init__() + name = "run-batch" @staticmethod def cmd(args: argparse.Namespace) -> None: - logger.info("vLLM batch processing API version %s", VLLM_VERSION) + from vllm.entrypoints.openai.run_batch import main as run_batch_main + + logger.info("vLLM batch processing API version %s", + importlib.metadata.version("vllm")) logger.info("args: %s", args) # Start the Prometheus metrics server. # LLMEngine uses the Prometheus client # to publish metrics at the /metrics endpoint. if args.enable_metrics: + from prometheus_client import start_http_server + logger.info("Prometheus metrics enabled") start_http_server(port=args.port, addr=args.url) else: @@ -42,6 +47,8 @@ class RunBatchSubcommand(CLISubcommand): def subparser_init( self, subparsers: argparse._SubParsersAction) -> FlexibleArgumentParser: + from vllm.entrypoints.openai.run_batch import make_arg_parser + run_batch_parser = subparsers.add_parser( "run-batch", help="Run batch prompts and write results to file.", diff --git a/vllm/entrypoints/cli/serve.py b/vllm/entrypoints/cli/serve.py index 9040877a422ff..897c222a3ff58 100644 --- a/vllm/entrypoints/cli/serve.py +++ b/vllm/entrypoints/cli/serve.py @@ -9,8 +9,8 @@ import sys import uvloop import zmq +import vllm import vllm.envs as envs -from vllm import AsyncEngineArgs from vllm.entrypoints.cli.types import CLISubcommand from vllm.entrypoints.openai.api_server import (run_server, run_server_worker, setup_server) @@ -38,10 +38,7 @@ logger = init_logger(__name__) class ServeSubcommand(CLISubcommand): """The `serve` subcommand for the vLLM CLI. """ - - def __init__(self): - self.name = "serve" - super().__init__() + name = "serve" @staticmethod def cmd(args: argparse.Namespace) -> None: @@ -115,7 +112,7 @@ def run_headless(args: argparse.Namespace): raise ValueError("api_server_count can't be set in headless mode") # Create the EngineConfig. - engine_args = AsyncEngineArgs.from_cli_args(args) + engine_args = vllm.AsyncEngineArgs.from_cli_args(args) usage_context = UsageContext.OPENAI_API_SERVER vllm_config = engine_args.create_engine_config(usage_context=usage_context) @@ -175,7 +172,7 @@ def run_multi_api_server(args: argparse.Namespace): listen_address, sock = setup_server(args) - engine_args = AsyncEngineArgs.from_cli_args(args) + engine_args = vllm.AsyncEngineArgs.from_cli_args(args) usage_context = UsageContext.OPENAI_API_SERVER vllm_config = engine_args.create_engine_config(usage_context=usage_context) model_config = vllm_config.model_config diff --git a/vllm/entrypoints/cli/types.py b/vllm/entrypoints/cli/types.py index 0a72443129758..b88f094b302ad 100644 --- a/vllm/entrypoints/cli/types.py +++ b/vllm/entrypoints/cli/types.py @@ -1,9 +1,13 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -import argparse +from __future__ import annotations -from vllm.utils import FlexibleArgumentParser +import argparse +import typing + +if typing.TYPE_CHECKING: + from vllm.utils import FlexibleArgumentParser class CLISubcommand: diff --git a/vllm/entrypoints/openai/run_batch.py b/vllm/entrypoints/openai/run_batch.py index 9994b3cae8888..29740fc7e602c 100644 --- a/vllm/entrypoints/openai/run_batch.py +++ b/vllm/entrypoints/openai/run_batch.py @@ -15,7 +15,7 @@ from tqdm import tqdm from vllm.engine.arg_utils import AsyncEngineArgs, optional_type from vllm.engine.async_llm_engine import AsyncLLMEngine -from vllm.entrypoints.logger import RequestLogger, logger +from vllm.entrypoints.logger import RequestLogger # yapf: disable from vllm.entrypoints.openai.protocol import (BatchRequestInput, BatchRequestOutput, @@ -29,10 +29,13 @@ from vllm.entrypoints.openai.serving_embedding import OpenAIServingEmbedding from vllm.entrypoints.openai.serving_models import (BaseModelPath, OpenAIServingModels) from vllm.entrypoints.openai.serving_score import ServingScores +from vllm.logger import init_logger from vllm.usage.usage_lib import UsageContext from vllm.utils import FlexibleArgumentParser, random_uuid from vllm.version import __version__ as VLLM_VERSION +logger = init_logger(__name__) + def make_arg_parser(parser: FlexibleArgumentParser): parser.add_argument( @@ -201,13 +204,16 @@ async def upload_data(output_url: str, data_or_file: str, except Exception as e: if attempt < max_retries: logger.error( - f"Failed to upload data (attempt {attempt}). " - f"Error message: {str(e)}.\nRetrying in {delay} seconds..." + "Failed to upload data (attempt %d). Error message: %s.\nRetrying in %d seconds...", # noqa: E501 + attempt, + e, + delay, ) await asyncio.sleep(delay) else: - raise Exception(f"Failed to upload data (attempt {attempt}). " - f"Error message: {str(e)}.") from e + raise Exception( + f"Failed to upload data (attempt {attempt}). Error message: {str(e)}." # noqa: E501 + ) from e async def write_file(path_or_url: str, batch_outputs: list[BatchRequestOutput], diff --git a/vllm/utils.py b/vllm/utils.py index dc408e1676f1c..34be4d52c4836 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -67,9 +67,6 @@ from torch.library import Library from typing_extensions import Never, ParamSpec, TypeIs, assert_never import vllm.envs as envs -# NOTE: import triton_utils to make TritonPlaceholderModule work -# if triton is unavailable -import vllm.triton_utils # noqa: F401 from vllm.logger import enable_trace_function_call, init_logger if TYPE_CHECKING: From 4a0f7888a3f0de1d460d90f43b8f3e1826b68dbb Mon Sep 17 00:00:00 2001 From: amit Date: Mon, 23 Jun 2025 06:18:08 +0300 Subject: [PATCH 082/211] [Core] feat: Implement Priority Scheduling in V1 Engine (#19057) Signed-off-by: amit Co-authored-by: Roger Wang --- docs/usage/v1_guide.md | 12 + tests/v1/core/test_scheduler.py | 590 +++++++++++++++++++++++++++- vllm/v1/core/sched/request_queue.py | 224 +++++++++++ vllm/v1/core/sched/scheduler.py | 88 +++-- vllm/v1/engine/__init__.py | 1 + vllm/v1/engine/processor.py | 3 +- vllm/v1/request.py | 8 + 7 files changed, 896 insertions(+), 30 deletions(-) create mode 100644 vllm/v1/core/sched/request_queue.py diff --git a/docs/usage/v1_guide.md b/docs/usage/v1_guide.md index 1ec3e72a4f56f..82a2710d895ca 100644 --- a/docs/usage/v1_guide.md +++ b/docs/usage/v1_guide.md @@ -45,6 +45,18 @@ For each item, our progress towards V1 support falls into one of the following s - **🟠 Delayed**: Temporarily dropped in V1 but planned to be re-introduced later. - **🔴 Deprecated**: Not planned for V1 unless there is strong demand. +!!! note + vLLM V1’s unified scheduler treats both prompt and output tokens the same + way by using a simple dictionary (e.g., `{request_id: num_tokens}`) to dynamically + allocate a fixed token budget per request, enabling features like chunked prefills, + prefix caching, and speculative decoding without a strict separation between prefill + and decode phases. + +The V1 scheduler supports multiple scheduling policies, including First-Come, +First-Served (FCFS) and priority-based scheduling (where requests are processed +based on assigned priority, with FCFS as a tie-breaker), configurable via the +`--scheduling-policy` argument. + ### Hardware | Hardware | Status | diff --git a/tests/v1/core/test_scheduler.py b/tests/v1/core/test_scheduler.py index b0b1116eb5363..8994816a3017c 100644 --- a/tests/v1/core/test_scheduler.py +++ b/tests/v1/core/test_scheduler.py @@ -1150,7 +1150,6 @@ def test_kv_connector_handles_preemption(): assert len(scheduler.running) == 1 _ = scheduler.update_from_output(output, MODEL_RUNNER_OUTPUT) assert len(scheduler.running) == 0 - assert len(scheduler.waiting) == 1 # All memory should be freed since nothing is running. assert scheduler.kv_cache_manager.block_pool.get_num_free_blocks() \ == NUM_BLOCKS - 1 @@ -1265,3 +1264,592 @@ def test_memory_leak(): # Confirm no memory leak. assert_scheduler_empty(scheduler) + + +def create_scheduler_with_priority( + model: str = "facebook/opt-125m", + max_num_seqs: int = 16, + max_num_batched_tokens: int = 8192, + enable_prefix_caching: Optional[bool] = None, + long_prefill_token_threshold: int = 0, + disable_chunked_mm_input: bool = False, + use_kv_connector: bool = False, + num_blocks: int = 10000, + block_size: int = 16, + max_model_len: Optional[int] = None, + num_speculative_tokens: Optional[int] = None, +) -> Scheduler: + '''Create scheduler with priority policy enabled. + + Args: + model: model under test + max_num_seqs: max sequences to schedule + max_num_batch_tokens: max num tokens to batch + enable_prefix_caching: optionally force APC config + (True/False) or use default + (None) + + Returns: + {class}`Scheduler` instance with priority scheduling + ''' + if max_model_len is None: + max_model_len = max_num_batched_tokens + scheduler_config = SchedulerConfig( + max_num_seqs=max_num_seqs, + max_num_batched_tokens=max_num_batched_tokens, + max_model_len=max_model_len, + long_prefill_token_threshold=long_prefill_token_threshold, + disable_chunked_mm_input=disable_chunked_mm_input, + enable_chunked_prefill=True, + policy="priority", # Enable priority scheduling + ) + model_config = ModelConfig( + model=model, + task="auto", + tokenizer=model, + tokenizer_mode="auto", + trust_remote_code=True, + dtype="float16", + seed=42, + ) + # Cache config, optionally force APC + kwargs_cache = ({} if enable_prefix_caching is None else { + 'enable_prefix_caching': enable_prefix_caching + }) + cache_config = CacheConfig( + block_size=block_size, + gpu_memory_utilization=0.9, + swap_space=0, + cache_dtype="auto", + **kwargs_cache, + ) + kv_transfer_config = KVTransferConfig( + kv_connector="SharedStorageConnector", + kv_role="kv_both", + kv_connector_extra_config={"shared_storage_path": "local_storage"}, + ) if use_kv_connector else None + + speculative_config: Optional[SpeculativeConfig] = None + if num_speculative_tokens is not None: + speculative_config = SpeculativeConfig( + model="ngram", num_speculative_tokens=num_speculative_tokens) + + vllm_config = VllmConfig( + scheduler_config=scheduler_config, + model_config=model_config, + cache_config=cache_config, + kv_transfer_config=kv_transfer_config, + speculative_config=speculative_config, + ) + kv_cache_config = KVCacheConfig( + num_blocks=num_blocks, # A large number of blocks to hold all requests + kv_cache_tensors=[], + kv_cache_groups=[ + KVCacheGroupSpec(['layer'], + FullAttentionSpec(block_size, 1, 1, torch.float32, + False)) + ], + ) + cache_config.num_gpu_blocks = num_blocks + return Scheduler( + vllm_config=vllm_config, + kv_cache_config=kv_cache_config, + log_stats=True, + structured_output_manager=StructuredOutputManager(vllm_config), + ) + + +def create_requests_with_priority( + num_requests: int, + priorities: list[int], + arrival_times: Optional[list[float]] = None, + num_tokens: int = 10, + mm_positions: Optional[list[PlaceholderRange]] = None, + max_tokens: int = 16, + stop_token_ids: Optional[list[int]] = None, + prompt_logprobs: Optional[int] = None): + """Create requests with specified priorities and arrival times.""" + assert len(priorities) == num_requests + if arrival_times is not None: + assert len(arrival_times) == num_requests + else: + arrival_times = [float(i) for i in range(num_requests)] + + sampling_params = SamplingParams(ignore_eos=False, + max_tokens=max_tokens, + stop_token_ids=stop_token_ids, + prompt_logprobs=prompt_logprobs) + requests = [] + for i in range(num_requests): + if mm_positions is not None: + mm_position = mm_positions[i] + mm_inputs = [MultiModalKwargs({})] * len(mm_position) + else: + mm_position = None + mm_inputs = None + request = Request( + request_id=f"{i}", + prompt_token_ids=[i] * num_tokens, + sampling_params=sampling_params, + pooling_params=None, + multi_modal_inputs=mm_inputs, + multi_modal_placeholders=mm_position, + multi_modal_hashes=None, + eos_token_id=EOS_TOKEN_ID, + arrival_time=arrival_times[i], + priority=priorities[i], + ) + requests.append(request) + return requests + + +def test_priority_scheduling_basic_ordering(): + """Test that requests are scheduled in priority order + (lower value = higher priority).""" + scheduler = create_scheduler_with_priority() + + # Create requests with different priorities + # Priority 0 (highest), 1, 2 (lowest) + priorities = [2, 0, 1] # Add in non-priority order + arrival_times = [1.0, 2.0, 3.0] # All different arrival times + requests = create_requests_with_priority(num_requests=3, + priorities=priorities, + arrival_times=arrival_times) + + # Add requests in non-priority order + for request in requests: + scheduler.add_request(request) + + # Schedule and verify priority order + output = scheduler.schedule() + + # Should schedule all requests since they fit in budget + assert len(output.scheduled_new_reqs) == 3 + + # Verify they are scheduled in priority order: + # req_1 (priority 0), req_2 (priority 1), req_0 (priority 2) + scheduled_req_ids = [req.req_id for req in output.scheduled_new_reqs] + assert scheduled_req_ids == ["1", "2", "0"] + + +def test_priority_scheduling_arrival_time_tiebreaker(): + """Test that arrival time is used + as tiebreaker when priorities are equal.""" + scheduler = create_scheduler_with_priority() + + # Create requests with same priority but different arrival times + priorities = [1, 1, 1] # All same priority + arrival_times = [3.0, 1.0, 2.0] # Different arrival times + requests = create_requests_with_priority(num_requests=3, + priorities=priorities, + arrival_times=arrival_times) + + # Add requests in non-arrival order + for request in requests: + scheduler.add_request(request) + + # Schedule and verify arrival time order + output = scheduler.schedule() + + # Should schedule all requests since they fit in budget + assert len(output.scheduled_new_reqs) == 3 + + # Verify they are scheduled in arrival time order: + # req_1 (1.0), req_2 (2.0), req_0 (3.0) + scheduled_req_ids = [req.req_id for req in output.scheduled_new_reqs] + assert scheduled_req_ids == ["1", "2", "0"] + + +def test_priority_scheduling_mixed_priority_and_arrival(): + """Test priority scheduling with mixed priorities and arrival times.""" + scheduler = create_scheduler_with_priority() + + # Create requests with mixed priorities and arrival times + priorities = [2, 1, 1, 0] # Mixed priorities + arrival_times = [1.0, 3.0, 2.0, 4.0] # Mixed arrival times + requests = create_requests_with_priority(num_requests=4, + priorities=priorities, + arrival_times=arrival_times) + + # Add requests + for request in requests: + scheduler.add_request(request) + + # Schedule and verify order + output = scheduler.schedule() + + # Should schedule all requests since they fit in budget + assert len(output.scheduled_new_reqs) == 4 + + # Expected order: + # 1. req_3 (priority 0, arrival 4.0) + # 2. req_2 (priority 1, arrival 2.0) - earlier arrival than req_1 + # 3. req_1 (priority 1, arrival 3.0) + # 4. req_0 (priority 2, arrival 1.0) + scheduled_req_ids = [req.req_id for req in output.scheduled_new_reqs] + assert scheduled_req_ids == ["3", "2", "1", "0"] + + +def test_priority_scheduling_preemption(): + """Test that priority scheduling preempts + lower priority requests when memory is constrained.""" + # Create scheduler with very limited memory to force preemption + scheduler = create_scheduler_with_priority( + max_num_seqs=3, # Allow multiple requests + max_num_batched_tokens=200, + num_blocks=6, # Very limited blocks to force memory pressure + block_size=16, # Standard block size + ) + + # Create initial low-priority requests that will consume most memory + low_priority_requests = create_requests_with_priority( + num_requests=2, + priorities=[5, 5], # Low priority + arrival_times=[1.0, 2.0], + num_tokens=30 # Large enough to consume significant memory + ) + + # Add and schedule low priority requests + for request in low_priority_requests: + scheduler.add_request(request) + + output = scheduler.schedule() + assert len(output.scheduled_new_reqs) == 2 + + # Simulate model execution to move requests to running state + model_output = ModelRunnerOutput( + req_ids=[req.request_id for req in low_priority_requests], + req_id_to_index={ + req.request_id: i + for i, req in enumerate(low_priority_requests) + }, + sampled_token_ids=[[100] for _ in low_priority_requests], + spec_token_ids=None, + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[], + ) + scheduler.update_from_output(output, model_output) + + # Verify both requests are running + assert len(scheduler.running) == 2 + + # Now add a high-priority request that requires memory allocation + # This should trigger preemption due to memory constraints + high_priority_request = create_requests_with_priority( + num_requests=1, + priorities=[0], # High priority + arrival_times=[3.0], + num_tokens=30 # Large enough to require significant memory + )[0] + + scheduler.add_request(high_priority_request) + + # Schedule again - this should trigger + # preemption when trying to allocate memory + output = scheduler.schedule() + + # Due to the scheduler's design, if preemption happens + # during running request scheduling, + # waiting requests won't be scheduled in the same step + # Let's check if preemption occurred by looking at the waiting queue + + # If preemption happened, we should see requests in the + # waiting queue + if len(scheduler.waiting) > 1: # high priority + preempted request + # Preemption occurred - verify the high priority request + # gets scheduled next + output2 = scheduler.schedule() + assert len(output2.scheduled_new_reqs) == 1 + # High priority request + assert output2.scheduled_new_reqs[0].req_id == "0" + else: + # No preemption needed - all requests fit + # This is also valid behavior if memory allows + assert len(output.scheduled_new_reqs) == 1 + # High priority request + assert output.scheduled_new_reqs[0].req_id == "0" + + +def test_priority_scheduling_no_preemption_when_space_available(): + """Test that preemption doesn't happen + when there's space for new requests.""" + scheduler = create_scheduler_with_priority( + max_num_seqs=3, # Allow 3 concurrent requests + max_num_batched_tokens=200, # Sufficient token budget + ) + + # Add two low-priority running requests + low_priority_requests = create_requests_with_priority( + num_requests=2, + priorities=[5, 5], + arrival_times=[1.0, 2.0], + num_tokens=30) + + for request in low_priority_requests: + scheduler.add_request(request) + + output = scheduler.schedule() + model_output = ModelRunnerOutput( + req_ids=[req.request_id for req in low_priority_requests], + req_id_to_index={ + req.request_id: i + for i, req in enumerate(low_priority_requests) + }, + sampled_token_ids=[[100] for _ in low_priority_requests], + spec_token_ids=None, + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[], + ) + scheduler.update_from_output(output, model_output) + + # Add high-priority request + high_priority_request = create_requests_with_priority(num_requests=1, + priorities=[0], + arrival_times=[3.0], + num_tokens=30)[0] + + scheduler.add_request(high_priority_request) + + # Schedule - should not preempt since there's space + output = scheduler.schedule() + + # Should schedule the new request without preemption + assert len(output.scheduled_new_reqs) == 1 + assert len(scheduler.running) == 3 # All three requests running + assert len(scheduler.waiting) == 0 # No requests waiting + + +def test_priority_scheduling_preemption_victim_selection(): + """Test that the correct victim is selected for + preemption based on priority and arrival time.""" + # This test verifies the priority-based victim selection logic + # by checking the waiting queue order after adding requests with different + # priorities + scheduler = create_scheduler_with_priority( + max_num_seqs=1, # Force sequential processing to test priority order + ) + + # Create requests with different priorities + requests = create_requests_with_priority( + num_requests=3, + priorities=[3, 2, 0], # Different priorities: low, medium, high + arrival_times=[1.0, 2.0, 3.0], + num_tokens=10) + + # Add all requests + for request in requests: + scheduler.add_request(request) + + # Schedule - should only schedule the highest priority request + # (req_2, priority 0) + output = scheduler.schedule() + assert len(output.scheduled_new_reqs) == 1 + assert output.scheduled_new_reqs[0].req_id == "2" # Highest priority + + # Verify the waiting queue has the remaining requests in priority order + assert len(scheduler.waiting) == 2 + + # Extract waiting requests and verify priority order + waiting_requests = list(scheduler.waiting) + + waiting_priorities = [req.priority for req in waiting_requests] + waiting_req_ids = [req.request_id for req in waiting_requests] + + # Should be req_1 (priority 2) then req_0 (priority 3) + assert waiting_priorities == [2, 3] + assert waiting_req_ids == ["1", "0"] + + +def test_priority_scheduling_equal_priority_preemption(): + """Test arrival time tiebreaker when requests have equal priority.""" + # This test verifies that arrival time is used as a tiebreaker for equal + # priorities + scheduler = create_scheduler_with_priority( + max_num_seqs=1, # Force sequential processing + ) + + # Create requests with same priority but different arrival times + requests = create_requests_with_priority( + num_requests=3, + priorities=[2, 2, 2], # Same priority + arrival_times=[3.0, 1.0, 2.0], # Different arrival times + num_tokens=10) + + # Add all requests + for request in requests: + scheduler.add_request(request) + + # Schedule - should schedule the request with earliest arrival time + output = scheduler.schedule() + assert len(output.scheduled_new_reqs) == 1 + assert output.scheduled_new_reqs[0].req_id == "1" # Earliest arrival (1.0) + + # Verify the waiting queue has remaining requests in arrival time order + assert len(scheduler.waiting) == 2 + + # Extract waiting requests and verify arrival time order + waiting_requests = list(scheduler.waiting) + + waiting_arrival_times = [req.arrival_time for req in waiting_requests] + waiting_req_ids = [req.request_id for req in waiting_requests] + + # Should be req_2 (arrival 2.0) then req_0 (arrival 3.0) + assert waiting_arrival_times == [2.0, 3.0] + assert waiting_req_ids == ["2", "0"] + + +def test_priority_scheduling_waiting_queue_order(): + """Test that the waiting queue maintains priority order.""" + scheduler = create_scheduler_with_priority( + max_num_seqs=1, # Only one request can run at a time + ) + + # Create multiple requests with different priorities + requests = create_requests_with_priority( + num_requests=4, + priorities=[3, 1, 2, 0], # Mixed priorities + arrival_times=[1.0, 2.0, 3.0, 4.0], + num_tokens=10) + + # Add all requests + for request in requests: + scheduler.add_request(request) + + # Schedule - should only schedule the highest priority request + # (req_3, priority 0) + output = scheduler.schedule() + assert len(output.scheduled_new_reqs) == 1 + assert output.scheduled_new_reqs[0].req_id == "3" + + # Verify waiting queue has remaining requests in priority order + assert len(scheduler.waiting) == 3 + + # Extract requests from waiting queue + # (it's a heap, so we need to pop to see order) + waiting_requests = list(scheduler.waiting) + + waiting_priorities = [req.priority for req in waiting_requests] + waiting_req_ids = [req.request_id for req in waiting_requests] + + # Should be ordered by priority: req_1 (1), req_2 (2), req_0 (3) + assert waiting_req_ids == ["1", "2", "0"] + assert waiting_priorities == [1, 2, 3] + + +def test_priority_scheduling_fcfs_fallback(): + """Test that FCFS behavior is maintained when all + requests have same priority.""" + scheduler = create_scheduler_with_priority() + + # Create requests with same priority but different arrival times + priorities = [1, 1, 1, 1] # All same priority + arrival_times = [4.0, 1.0, 3.0, 2.0] # Different arrival times + requests = create_requests_with_priority(num_requests=4, + priorities=priorities, + arrival_times=arrival_times) + + # Add requests + for request in requests: + scheduler.add_request(request) + + # Schedule + output = scheduler.schedule() + + # Should schedule all requests in arrival time order + assert len(output.scheduled_new_reqs) == 4 + scheduled_req_ids = [req.req_id for req in output.scheduled_new_reqs] + + # Expected order by arrival time: + # req_1 (1.0), req_3 (2.0), req_2 (3.0), req_0 (4.0) + assert scheduled_req_ids == ["1", "3", "2", "0"] + + +def test_priority_scheduling_with_limited_slots(): + """Test priority scheduling when max_num_seqs limits concurrent requests.""" + scheduler = create_scheduler_with_priority( + max_num_seqs=2, # Only allow 2 concurrent requests + max_num_batched_tokens=1000, # Plenty of token budget + ) + + # Create requests with different priorities + requests = create_requests_with_priority( + num_requests=4, + priorities=[3, 1, 2, 0], # Mixed priorities + arrival_times=[1.0, 2.0, 3.0, 4.0], + num_tokens=10) + + # Add all requests + for request in requests: + scheduler.add_request(request) + + # Schedule - should only schedule the 2 highest priority requests + output = scheduler.schedule() + assert len(output.scheduled_new_reqs) == 2 + + # Should schedule req_3 (priority 0) and req_1 (priority 1) + scheduled_req_ids = [req.req_id for req in output.scheduled_new_reqs] + assert "3" in scheduled_req_ids # Priority 0 + assert "1" in scheduled_req_ids # Priority 1 + + # Remaining requests should be in waiting queue in priority order + assert len(scheduler.waiting) == 2 + + # Extract waiting requests and verify order + waiting_requests = list(scheduler.waiting) + waiting_priorities = [req.priority for req in waiting_requests] + waiting_req_ids = [req.request_id for req in waiting_requests] + + # Should be req_2 (priority 2) then req_0 (priority 3) + assert waiting_priorities == [2, 3] + assert waiting_req_ids == ["2", "0"] + + +def test_priority_scheduling_heap_property(): + """Test that the waiting queue maintains heap + property for priority scheduling.""" + scheduler = create_scheduler_with_priority( + max_num_seqs=1, # Only one request can run at a time + ) + + # Add requests in random priority order + priorities = [5, 1, 8, 3, 2, 7, 4, 6] + arrival_times = [float(i) for i in range(len(priorities))] + requests = create_requests_with_priority(num_requests=len(priorities), + priorities=priorities, + arrival_times=arrival_times, + num_tokens=10) + + # Add all requests + for request in requests: + scheduler.add_request(request) + + # Schedule one request at a time and verify priority order + scheduled_priorities = [] + + while scheduler.waiting: + output = scheduler.schedule() + if output.scheduled_new_reqs: + req = output.scheduled_new_reqs[0] + scheduled_priorities.append(requests[int(req.req_id)].priority) + + # Simulate completion to make room for next request + model_output = ModelRunnerOutput( + req_ids=[req.req_id], + req_id_to_index={req.req_id: 0}, + sampled_token_ids=[[100]], + spec_token_ids=None, + logprobs=None, + prompt_logprobs_dict={}, + pooler_output=[], + ) + scheduler.update_from_output(output, model_output) + + # Finish the request to make room for the next one + scheduler.finish_requests(req.req_id, + RequestStatus.FINISHED_STOPPED) + + # Verify requests were scheduled in priority order (lowest value first) + expected_priorities = sorted(priorities) + assert scheduled_priorities == expected_priorities diff --git a/vllm/v1/core/sched/request_queue.py b/vllm/v1/core/sched/request_queue.py new file mode 100644 index 0000000000000..fc2bc30b9a5fd --- /dev/null +++ b/vllm/v1/core/sched/request_queue.py @@ -0,0 +1,224 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +from __future__ import annotations + +import heapq +from abc import ABC, abstractmethod +from collections import deque +from collections.abc import Iterable, Iterator +from enum import Enum + +from vllm.v1.request import Request + + +class SchedulingPolicy(Enum): + """Enum for scheduling policies.""" + FCFS = "fcfs" + PRIORITY = "priority" + + +class RequestQueue(ABC): + """Abstract base class for request queues.""" + + @abstractmethod + def add_request(self, request: Request) -> None: + """Add a request to the queue according to the policy.""" + pass + + @abstractmethod + def pop_request(self) -> Request: + """Pop a request from the queue according to the policy.""" + pass + + @abstractmethod + def peek_request(self) -> Request: + """Peek at the request at the front of the queue without removing it.""" + pass + + @abstractmethod + def prepend_request(self, request: Request) -> None: + """Prepend a request to the front of the queue.""" + pass + + @abstractmethod + def prepend_requests(self, requests: RequestQueue) -> None: + """Prepend all requests from another queue to the front of this + queue.""" + pass + + @abstractmethod + def remove_request(self, request: Request) -> None: + """Remove a specific request from the queue.""" + pass + + @abstractmethod + def remove_requests(self, requests: Iterable[Request]) -> None: + """Remove multiple specific requests from the queue.""" + pass + + @abstractmethod + def __bool__(self) -> bool: + """Check if queue has any requests.""" + pass + + @abstractmethod + def __len__(self) -> int: + """Get number of requests in queue.""" + pass + + @abstractmethod + def __iter__(self) -> Iterator[Request]: + """Iterate over the queue according to the policy.""" + pass + + @abstractmethod + def __reversed__(self) -> Iterator[Request]: + """Iterate over the queue in reverse order.""" + pass + + +class FCFSRequestQueue(deque[Request], RequestQueue): + """A first-come-first-served queue that supports deque operations.""" + + def add_request(self, request: Request) -> None: + """Add a request to the queue according to FCFS policy.""" + self.append(request) + + def pop_request(self) -> Request: + """Pop a request from the queue according to FCFS policy.""" + return self.popleft() + + def peek_request(self) -> Request: + """Peek at the next request in the queue without removing it.""" + if not self: + raise IndexError("peek from an empty queue") + return self[0] + + def prepend_request(self, request: Request) -> None: + """Prepend a request to the front of the queue.""" + self.appendleft(request) + + def prepend_requests(self, requests: RequestQueue) -> None: + """Prepend all requests from another queue to the front of this + queue.""" + self.extendleft(reversed(requests)) + + def remove_request(self, request: Request) -> None: + """Remove a specific request from the queue.""" + self.remove(request) + + def remove_requests(self, requests: Iterable[Request]) -> None: + """Remove multiple specific requests from the queue.""" + requests_to_remove = set(requests) + filtered_requests = [ + req for req in self if req not in requests_to_remove + ] + # deque does not support in-place filtering, so we need to clear + # and extend + self.clear() + self.extend(filtered_requests) + + def __bool__(self) -> bool: + """Check if queue has any requests.""" + return len(self) > 0 + + def __len__(self) -> int: + """Get number of requests in queue.""" + return super().__len__() + + def __iter__(self) -> Iterator[Request]: + """Iterate over the queue according to FCFS policy.""" + return super().__iter__() + + def __reversed__(self) -> Iterator[Request]: + """Iterate over the queue in reverse order.""" + return super().__reversed__() + + +class PriorityRequestQueue(RequestQueue): + """ + A priority queue that supports heap operations. + + Requests with a smaller value of `priority` are processed first. + If multiple requests have the same priority, the one with the earlier + `arrival_time` is processed first. + """ + + def __init__(self) -> None: + self._heap: list[tuple[int, float, Request]] = [] + + def add_request(self, request: Request) -> None: + """Add a request to the queue according to priority policy.""" + heapq.heappush(self._heap, + (request.priority, request.arrival_time, request)) + + def pop_request(self) -> Request: + """Pop a request from the queue according to priority policy.""" + if not self._heap: + raise IndexError("pop from empty heap") + _, _, request = heapq.heappop(self._heap) + return request + + def peek_request(self) -> Request: + """Peek at the next request in the queue without removing it.""" + if not self._heap: + raise IndexError("peek from empty heap") + _, _, request = self._heap[0] + return request + + def prepend_request(self, request: Request) -> None: + """Add a request to the queue according to priority policy. + + Note: In a priority queue, there is no concept of prepending to the + front. Requests are ordered by (priority, arrival_time).""" + self.add_request(request) + + def prepend_requests(self, requests: RequestQueue) -> None: + """Add all requests from another queue according to priority policy. + + Note: In a priority queue, there is no concept of prepending to the + front. Requests are ordered by (priority, arrival_time).""" + for request in requests: + self.add_request(request) + + def remove_request(self, request: Request) -> None: + """Remove a specific request from the queue.""" + self._heap = [(p, t, r) for p, t, r in self._heap if r != request] + heapq.heapify(self._heap) + + def remove_requests(self, requests: Iterable[Request]) -> None: + """Remove multiple specific requests from the queue.""" + requests_to_remove = set(requests) + self._heap = [(p, t, r) for p, t, r in self._heap + if r not in requests_to_remove] + heapq.heapify(self._heap) + + def __bool__(self) -> bool: + """Check if queue has any requests.""" + return bool(self._heap) + + def __len__(self) -> int: + """Get number of requests in queue.""" + return len(self._heap) + + def __iter__(self) -> Iterator[Request]: + """Iterate over the queue according to priority policy.""" + heap_copy = self._heap[:] + while heap_copy: + _, _, request = heapq.heappop(heap_copy) + yield request + + def __reversed__(self) -> Iterator[Request]: + """Iterate over the queue in reverse priority order.""" + return reversed(list(self)) + + +def create_request_queue(policy: SchedulingPolicy) -> RequestQueue: + """Create request queue based on scheduling policy.""" + if policy == SchedulingPolicy.PRIORITY: + return PriorityRequestQueue() + elif policy == SchedulingPolicy.FCFS: + return FCFSRequestQueue() + else: + raise ValueError(f"Unknown scheduling policy: {policy}") diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index 0958366e0aca7..00b0844a5660b 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -22,6 +22,8 @@ from vllm.v1.core.kv_cache_manager import KVCacheManager from vllm.v1.core.sched.interface import SchedulerInterface from vllm.v1.core.sched.output import (CachedRequestData, NewRequestData, SchedulerOutput) +from vllm.v1.core.sched.request_queue import (SchedulingPolicy, + create_request_queue) from vllm.v1.core.sched.utils import check_stop from vllm.v1.engine import (EngineCoreEventType, EngineCoreOutput, EngineCoreOutputs) @@ -94,8 +96,16 @@ class Scheduler(SchedulerInterface): # req_id -> Request self.requests: dict[str, Request] = {} + # Scheduling policy + if self.scheduler_config.policy == "priority": + self.policy = SchedulingPolicy.PRIORITY + elif self.scheduler_config.policy == "fcfs": + self.policy = SchedulingPolicy.FCFS + else: + raise ValueError( + f"Unknown scheduling policy: {self.scheduler_config.policy}") # Priority queues for requests. - self.waiting: deque[Request] = deque() + self.waiting = create_request_queue(self.policy) self.running: list[Request] = [] # The request IDs that are finished in between the previous and the @@ -247,7 +257,15 @@ class Scheduler(SchedulerInterface): if new_blocks is None: # The request cannot be scheduled. # Preempt the lowest-priority request. - preempted_req = self.running.pop() + if self.policy == SchedulingPolicy.PRIORITY: + preempted_req = max( + self.running, + key=lambda r: (r.priority, r.arrival_time), + ) + self.running.remove(preempted_req) + else: + preempted_req = self.running.pop() + self.kv_cache_manager.free(preempted_req) preempted_req.status = RequestStatus.PREEMPTED preempted_req.num_computed_tokens = 0 @@ -255,7 +273,7 @@ class Scheduler(SchedulerInterface): preempted_req.record_event( EngineCoreEventType.PREEMPTED, scheduled_timestamp) - self.waiting.appendleft(preempted_req) + self.waiting.prepend_request(preempted_req) preempted_reqs.append(preempted_req) if preempted_req == request: # No more request to preempt. @@ -311,9 +329,9 @@ class Scheduler(SchedulerInterface): if req.lora_request and req.lora_request.lora_int_id > 0) assert len(scheduled_loras) <= self.lora_config.max_loras - # Use a temporary deque to collect requests that need to be skipped - # and put back at the head of the waiting queue later - skipped_waiting_requests: deque[Request] = deque() + # Use a temporary RequestQueue to collect requests that need to be + # skipped and put back at the head of the waiting queue later + skipped_waiting_requests = create_request_queue(self.policy) # Next, schedule the WAITING requests. if not preempted_reqs: @@ -321,7 +339,7 @@ class Scheduler(SchedulerInterface): if len(self.running) == self.max_num_running_reqs: break - request = self.waiting[0] + request = self.waiting.peek_request() # KVTransfer: skip request if still waiting for remote kvs. if request.status == RequestStatus.WAITING_FOR_REMOTE_KVS: @@ -332,8 +350,8 @@ class Scheduler(SchedulerInterface): logger.debug( "%s is still in WAITING_FOR_REMOTE_KVS state.", request.request_id) - self.waiting.popleft() - skipped_waiting_requests.appendleft(request) + self.waiting.pop_request() + skipped_waiting_requests.prepend_request(request) continue # Skip request if the structured output request is still waiting @@ -343,19 +361,18 @@ class Scheduler(SchedulerInterface): if structured_output_req and structured_output_req.grammar: request.status = RequestStatus.WAITING else: - self.waiting.popleft() - skipped_waiting_requests.appendleft(request) + self.waiting.pop_request() + skipped_waiting_requests.prepend_request(request) continue # Check that adding the request still respects the max_loras # constraint. - if self.lora_config and request.lora_request and ( - len(scheduled_loras) == self.lora_config.max_loras - and request.lora_request.lora_int_id - not in scheduled_loras): + if (self.lora_config and request.lora_request and + (len(scheduled_loras) == self.lora_config.max_loras and + request.lora_request.lora_int_id not in scheduled_loras)): # Scheduling would exceed max_loras, skip. - self.waiting.popleft() - skipped_waiting_requests.appendleft(request) + self.waiting.pop_request() + skipped_waiting_requests.prepend_request(request) continue num_external_computed_tokens = 0 @@ -407,8 +424,8 @@ class Scheduler(SchedulerInterface): # pooling requests to be chunked if not self.scheduler_config.chunked_prefill_enabled and \ num_new_tokens > token_budget: - self.waiting.popleft() - skipped_waiting_requests.appendleft(request) + self.waiting.pop_request() + skipped_waiting_requests.prepend_request(request) continue num_new_tokens = min(num_new_tokens, token_budget) @@ -448,17 +465,19 @@ class Scheduler(SchedulerInterface): num_external_computed_tokens, ) - self.waiting.popleft() + # Request was already popped from self.waiting + # unless it was re-added above due to new_blocks being None. + request = self.waiting.pop_request() if load_kv_async: # If loading async, allocate memory and put request # into the WAITING_FOR_REMOTE_KV state. - skipped_waiting_requests.appendleft(request) + skipped_waiting_requests.prepend_request(request) request.status = RequestStatus.WAITING_FOR_REMOTE_KVS continue if request.use_structured_output: - structured_output_request_ids[ - request.request_id] = req_index + structured_output_request_ids[request.request_id] = ( + req_index) req_index += 1 self.running.append(request) if self.log_stats: @@ -494,7 +513,7 @@ class Scheduler(SchedulerInterface): # Put back any skipped requests at the head of the waiting queue if skipped_waiting_requests: - self.waiting.extendleft(skipped_waiting_requests) + self.waiting.prepend_requests(skipped_waiting_requests) # Check if the scheduling constraints are satisfied. total_num_scheduled_tokens = sum(num_scheduled_tokens.values()) @@ -896,7 +915,7 @@ class Scheduler(SchedulerInterface): return len(self.running), len(self.waiting) def add_request(self, request: Request) -> None: - self.waiting.append(request) + self.waiting.add_request(request) self.requests[request.request_id] = request if self.log_stats: request.record_event(EngineCoreEventType.QUEUED) @@ -917,16 +936,31 @@ class Scheduler(SchedulerInterface): else: request_ids = set(request_ids) + running_requests_to_remove = [] + waiting_requests_to_remove = [] + valid_requests = [] + + # First pass: collect requests to remove from queues for req_id in request_ids: request = self.requests.get(req_id) if request is None: # Invalid request ID. continue + valid_requests.append(request) if request.status == RequestStatus.RUNNING: - self.running.remove(request) + running_requests_to_remove.append(request) else: - self.waiting.remove(request) + waiting_requests_to_remove.append(request) + + # Remove all requests from queues at once for better efficiency + for request in running_requests_to_remove: + self.running.remove(request) + if waiting_requests_to_remove: + self.waiting.remove_requests(waiting_requests_to_remove) + + # Second pass: set status and free requests + for request in valid_requests: request.status = finished_status self._free_request(request) diff --git a/vllm/v1/engine/__init__.py b/vllm/v1/engine/__init__.py index 4d1696a9b43a5..921ccd708cdd6 100644 --- a/vllm/v1/engine/__init__.py +++ b/vllm/v1/engine/__init__.py @@ -68,6 +68,7 @@ class EngineCoreRequest( # belong to, to cover a race condition where the request is sent before # a wave finished notification is received. current_wave: int = 0 + priority: int = 0 class EngineCoreEventType(enum.IntEnum): diff --git a/vllm/v1/engine/processor.py b/vllm/v1/engine/processor.py index b00f1444c7b3c..a0b170ba55ad7 100644 --- a/vllm/v1/engine/processor.py +++ b/vllm/v1/engine/processor.py @@ -219,8 +219,6 @@ class Processor: # TODO(woosuk): Support encoder-decoder models. self._validate_lora(lora_request) self._validate_params(params, lora_request) - if priority != 0: - raise ValueError("V1 does not support priority yet.") if trace_headers is not None: raise ValueError("V1 does not support tracing yet.") if prompt_adapter_request is not None: @@ -340,6 +338,7 @@ class Processor: arrival_time=arrival_time, lora_request=lora_request, cache_salt=decoder_inputs.get("cache_salt"), + priority=priority, data_parallel_rank=data_parallel_rank, ) diff --git a/vllm/v1/request.py b/vllm/v1/request.py index 4632884419aed..9b96f4599f924 100644 --- a/vllm/v1/request.py +++ b/vllm/v1/request.py @@ -2,6 +2,7 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import enum +import time from typing import TYPE_CHECKING, Any, Optional, Union from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange @@ -30,18 +31,23 @@ class Request: pooling_params: Optional[PoolingParams], eos_token_id: Optional[int], client_index: int = 0, + arrival_time: Optional[float] = None, lora_request: Optional["LoRARequest"] = None, structured_output_request: Optional["StructuredOutputRequest"] = None, cache_salt: Optional[str] = None, + priority: int = 0, ) -> None: self.request_id = request_id self.client_index = client_index + self.priority = priority self.sampling_params = sampling_params self.pooling_params = pooling_params # Because of LoRA, the eos token id can be different for each request. self.eos_token_id = eos_token_id self.lora_request = lora_request self.structured_output_request = structured_output_request + self.arrival_time = arrival_time if arrival_time is not None else \ + time.time() self.status = RequestStatus.WAITING if sampling_params and sampling_params.guided_decoding is not None: @@ -118,11 +124,13 @@ class Request: sampling_params=request.sampling_params, pooling_params=request.pooling_params, eos_token_id=request.eos_token_id, + arrival_time=request.arrival_time, lora_request=request.lora_request, structured_output_request=StructuredOutputRequest( sampling_params=request.sampling_params) \ if request.sampling_params else None, cache_salt=request.cache_salt, + priority=request.priority, ) def append_output_token_ids( From f39ab2d4bde85e169b85ea3555dc4b74224b3929 Mon Sep 17 00:00:00 2001 From: jinqinn Date: Mon, 23 Jun 2025 11:36:26 +0800 Subject: [PATCH 083/211] [Misc] Configurable timeout for execute_model RPC calls via env var (#19544) Signed-off-by: jinqinn --- vllm/envs.py | 6 ++++++ vllm/v1/executor/multiproc_executor.py | 17 ++++++----------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/vllm/envs.py b/vllm/envs.py index 93a7c8069c2de..01d8d8a2d2e0c 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -130,6 +130,7 @@ if TYPE_CHECKING: VLLM_TOOL_PARSE_REGEX_TIMEOUT_SECONDS: int = 1 VLLM_SLEEP_WHEN_IDLE: bool = False VLLM_MQ_MAX_CHUNK_BYTES_MB: int = 16 + VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS: int = 300 VLLM_KV_CACHE_LAYOUT: Optional[str] = None VLLM_COMPUTE_NANS_IN_LOGITS: bool = False @@ -897,6 +898,11 @@ environment_variables: dict[str, Callable[[], Any]] = { "VLLM_MQ_MAX_CHUNK_BYTES_MB": lambda: int(os.getenv("VLLM_MQ_MAX_CHUNK_BYTES_MB", "16")), + # Timeout in seconds for execute_model RPC calls in multiprocessing + # executor (only applies when TP > 1). + "VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS": + lambda: int(os.getenv("VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS", "300")), + # KV Cache layout used throughout vllm. # Some common values are: # - NHD diff --git a/vllm/v1/executor/multiproc_executor.py b/vllm/v1/executor/multiproc_executor.py index 2148680d5f565..b06b7cc804d5a 100644 --- a/vllm/v1/executor/multiproc_executor.py +++ b/vllm/v1/executor/multiproc_executor.py @@ -37,11 +37,6 @@ from vllm.worker.worker_base import WorkerWrapperBase logger = init_logger(__name__) -POLLING_TIMEOUT_MS = 5000 -POLLING_TIMEOUT_S = POLLING_TIMEOUT_MS // 1000 - -EXECUTE_MODEL_TIMEOUT_S = 300 - class MultiprocExecutor(Executor): @@ -160,12 +155,12 @@ class MultiprocExecutor(Executor): self, scheduler_output, ) -> Union[ModelRunnerOutput, Future[ModelRunnerOutput]]: - (output, ) = self.collective_rpc("execute_model", - args=(scheduler_output, ), - unique_reply_rank=self.output_rank, - non_block=self.max_concurrent_batches - > 1, - timeout=EXECUTE_MODEL_TIMEOUT_S) + (output, ) = self.collective_rpc( + "execute_model", + args=(scheduler_output, ), + unique_reply_rank=self.output_rank, + non_block=self.max_concurrent_batches > 1, + timeout=envs.VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS) return output def collective_rpc(self, From 493c275352bddf7d4877602e19b8bd29d662de63 Mon Sep 17 00:00:00 2001 From: Vensen Date: Mon, 23 Jun 2025 11:40:28 +0800 Subject: [PATCH 084/211] Fix(models/siglip): Add compatibility for Gemma models quantized by llm-compressor (#19643) Signed-off-by: Vensenmu --- vllm/model_executor/models/gemma3_mm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/model_executor/models/gemma3_mm.py b/vllm/model_executor/models/gemma3_mm.py index 3a1c14978b45b..619d2aa674919 100644 --- a/vllm/model_executor/models/gemma3_mm.py +++ b/vllm/model_executor/models/gemma3_mm.py @@ -479,6 +479,7 @@ class Gemma3ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP, "model.vision_tower.": "vision_tower.", "model.multi_modal_projector.": "multi_modal_projector.", "lm_head.": "language_model.lm_head.", + "vision_tower.vision_model.": "vision_model.", }) def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): From f17aec0d6350303b46ee58d27a6fc83ddf9583b2 Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Mon, 23 Jun 2025 13:24:23 +0800 Subject: [PATCH 085/211] [doc] Fold long code blocks to improve readability (#19926) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- docs/ci/update_pytorch_version.md | 6 +- docs/cli/README.md | 49 +- docs/configuration/conserving_memory.md | 54 +- docs/configuration/env_vars.md | 8 +- docs/contributing/README.md | 30 +- docs/contributing/model/basic.md | 48 +- docs/contributing/model/multimodal.md | 779 +++++++++--------- docs/contributing/profiling.md | 36 +- docs/deployment/docker.md | 28 +- docs/deployment/frameworks/autogen.md | 76 +- docs/deployment/frameworks/cerebrium.md | 112 +-- docs/deployment/frameworks/dstack.md | 118 +-- docs/deployment/frameworks/haystack.md | 38 +- docs/deployment/frameworks/litellm.md | 26 +- docs/deployment/frameworks/lws.md | 230 +++--- docs/deployment/frameworks/skypilot.md | 362 ++++---- .../integrations/production-stack.md | 84 +- docs/deployment/k8s.md | 172 ++-- docs/deployment/nginx.md | 84 +- docs/design/arch_overview.md | 92 ++- docs/design/kernel/paged_attention.md | 38 +- docs/design/plugin_system.md | 40 +- docs/features/lora.md | 186 +++-- docs/features/multimodal_inputs.md | 644 ++++++++------- docs/features/quantization/auto_awq.md | 84 +- docs/features/quantization/bitblas.md | 28 +- docs/features/quantization/fp8.md | 28 +- docs/features/quantization/gguf.md | 72 +- docs/features/quantization/gptqmodel.md | 82 +- docs/features/quantization/int4.md | 126 +-- docs/features/quantization/int8.md | 78 +- docs/features/quantization/modelopt.md | 74 +- .../quantization/quantized_kvcache.md | 136 +-- docs/features/quantization/quark.md | 212 ++--- docs/features/quantization/torchao.md | 40 +- docs/features/reasoning_outputs.md | 366 ++++---- docs/features/spec_decode.md | 228 ++--- docs/features/structured_outputs.md | 342 ++++---- docs/features/tool_calling.md | 142 ++-- docs/getting_started/installation/cpu.md | 80 +- .../installation/gpu/rocm.inc.md | 60 +- .../installation/intel_gaudi.md | 96 +-- docs/getting_started/quickstart.md | 66 +- docs/models/generative_models.md | 56 +- docs/models/supported_models.md | 59 +- docs/serving/integrations/langchain.md | 26 +- docs/serving/openai_compatible_server.md | 552 +++++++------ docs/usage/metrics.md | 40 +- docs/usage/troubleshooting.md | 160 ++-- docs/usage/usage_stats.md | 62 +- 50 files changed, 3455 insertions(+), 3180 deletions(-) diff --git a/docs/ci/update_pytorch_version.md b/docs/ci/update_pytorch_version.md index 2ad3430a4de85..69fdc82ef9711 100644 --- a/docs/ci/update_pytorch_version.md +++ b/docs/ci/update_pytorch_version.md @@ -91,7 +91,7 @@ source to unblock the update process. ### FlashInfer Here is how to build and install it from source with torch2.7.0+cu128 in vLLM [Dockerfile](https://github.com/vllm-project/vllm/blob/27bebcd89792d5c4b08af7a65095759526f2f9e1/docker/Dockerfile#L259-L271): -``` +```bash export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0 10.0+PTX' export FLASHINFER_ENABLE_SM90=1 uv pip install --system --no-build-isolation "git+https://github.com/flashinfer-ai/flashinfer@v0.2.6.post1" @@ -105,14 +105,14 @@ team if you want to get the package published there. ### xFormers Similar to FlashInfer, here is how to build and install xFormers from source: -``` +```bash export TORCH_CUDA_ARCH_LIST='7.0 7.5 8.0 8.9 9.0 10.0+PTX' MAX_JOBS=16 uv pip install --system --no-build-isolation "git+https://github.com/facebookresearch/xformers@v0.0.30" ``` ### Mamba -``` +```bash uv pip install --system --no-build-isolation "git+https://github.com/state-spaces/mamba@v2.2.4" ``` diff --git a/docs/cli/README.md b/docs/cli/README.md index df700fb743c06..b2587a5e7cd2b 100644 --- a/docs/cli/README.md +++ b/docs/cli/README.md @@ -16,35 +16,33 @@ vllm {chat,complete,serve,bench,collect-env,run-batch} Start the vLLM OpenAI Compatible API server. -Examples: +??? Examples -```bash -# Start with a model -vllm serve meta-llama/Llama-2-7b-hf + ```bash + # Start with a model + vllm serve meta-llama/Llama-2-7b-hf -# Specify the port -vllm serve meta-llama/Llama-2-7b-hf --port 8100 + # Specify the port + vllm serve meta-llama/Llama-2-7b-hf --port 8100 -# Check with --help for more options -# To list all groups -vllm serve --help=listgroup + # Check with --help for more options + # To list all groups + vllm serve --help=listgroup -# To view a argument group -vllm serve --help=ModelConfig + # To view a argument group + vllm serve --help=ModelConfig -# To view a single argument -vllm serve --help=max-num-seqs + # To view a single argument + vllm serve --help=max-num-seqs -# To search by keyword -vllm serve --help=max -``` + # To search by keyword + vllm serve --help=max + ``` ## chat Generate chat completions via the running API server. -Examples: - ```bash # Directly connect to localhost API without arguments vllm chat @@ -60,8 +58,6 @@ vllm chat --quick "hi" Generate text completions based on the given prompt via the running API server. -Examples: - ```bash # Directly connect to localhost API without arguments vllm complete @@ -73,6 +69,8 @@ vllm complete --url http://{vllm-serve-host}:{vllm-serve-port}/v1 vllm complete --quick "The future of AI is" ``` + + ## bench Run benchmark tests for latency online serving throughput and offline inference throughput. @@ -89,8 +87,6 @@ vllm bench {latency, serve, throughput} Benchmark the latency of a single batch of requests. -Example: - ```bash vllm bench latency \ --model meta-llama/Llama-3.2-1B-Instruct \ @@ -104,8 +100,6 @@ vllm bench latency \ Benchmark the online serving throughput. -Example: - ```bash vllm bench serve \ --model meta-llama/Llama-3.2-1B-Instruct \ @@ -120,8 +114,6 @@ vllm bench serve \ Benchmark offline inference throughput. -Example: - ```bash vllm bench throughput \ --model meta-llama/Llama-3.2-1B-Instruct \ @@ -143,7 +135,8 @@ vllm collect-env Run batch prompts and write results to file. -Examples: +
+Examples ```bash # Running with a local file @@ -159,6 +152,8 @@ vllm run-batch \ --model meta-llama/Meta-Llama-3-8B-Instruct ``` +
+ ## More Help For detailed options of any subcommand, use: diff --git a/docs/configuration/conserving_memory.md b/docs/configuration/conserving_memory.md index a1283a503a6df..e2303067e3ee8 100644 --- a/docs/configuration/conserving_memory.md +++ b/docs/configuration/conserving_memory.md @@ -57,19 +57,21 @@ By default, we optimize model inference using CUDA graphs which take up extra me You can adjust `compilation_config` to achieve a better balance between inference speed and memory usage: -```python -from vllm import LLM -from vllm.config import CompilationConfig, CompilationLevel +??? Code -llm = LLM( - model="meta-llama/Llama-3.1-8B-Instruct", - compilation_config=CompilationConfig( - level=CompilationLevel.PIECEWISE, - # By default, it goes up to max_num_seqs - cudagraph_capture_sizes=[1, 2, 4, 8, 16], - ), -) -``` + ```python + from vllm import LLM + from vllm.config import CompilationConfig, CompilationLevel + + llm = LLM( + model="meta-llama/Llama-3.1-8B-Instruct", + compilation_config=CompilationConfig( + level=CompilationLevel.PIECEWISE, + # By default, it goes up to max_num_seqs + cudagraph_capture_sizes=[1, 2, 4, 8, 16], + ), + ) + ``` You can disable graph capturing completely via the `enforce_eager` flag: @@ -127,18 +129,20 @@ reduce the size of the processed multi-modal inputs, which in turn saves memory. Here are some examples: -```python -from vllm import LLM +??? Code -# Available for Qwen2-VL series models -llm = LLM(model="Qwen/Qwen2.5-VL-3B-Instruct", - mm_processor_kwargs={ - "max_pixels": 768 * 768, # Default is 1280 * 28 * 28 - }) + ```python + from vllm import LLM -# Available for InternVL series models -llm = LLM(model="OpenGVLab/InternVL2-2B", - mm_processor_kwargs={ - "max_dynamic_patch": 4, # Default is 12 - }) -``` + # Available for Qwen2-VL series models + llm = LLM(model="Qwen/Qwen2.5-VL-3B-Instruct", + mm_processor_kwargs={ + "max_pixels": 768 * 768, # Default is 1280 * 28 * 28 + }) + + # Available for InternVL series models + llm = LLM(model="OpenGVLab/InternVL2-2B", + mm_processor_kwargs={ + "max_dynamic_patch": 4, # Default is 12 + }) + ``` diff --git a/docs/configuration/env_vars.md b/docs/configuration/env_vars.md index f6d548a19d91f..c875931c305b6 100644 --- a/docs/configuration/env_vars.md +++ b/docs/configuration/env_vars.md @@ -7,6 +7,8 @@ vLLM uses the following environment variables to configure the system: All environment variables used by vLLM are prefixed with `VLLM_`. **Special care should be taken for Kubernetes users**: please do not name the service as `vllm`, otherwise environment variables set by Kubernetes might conflict with vLLM's environment variables, because [Kubernetes sets environment variables for each service with the capitalized service name as the prefix](https://kubernetes.io/docs/concepts/services-networking/service/#environment-variables). -```python ---8<-- "vllm/envs.py:env-vars-definition" -``` +??? Code + + ```python + --8<-- "vllm/envs.py:env-vars-definition" + ``` diff --git a/docs/contributing/README.md b/docs/contributing/README.md index 10c50e0072434..e977ec3d2f71a 100644 --- a/docs/contributing/README.md +++ b/docs/contributing/README.md @@ -93,25 +93,27 @@ For additional features and advanced configurations, refer to the official [MkDo ## Testing -```bash -pip install -r requirements/dev.txt +??? note "Commands" -# Linting, formatting and static type checking -pre-commit install --hook-type pre-commit --hook-type commit-msg + ```bash + pip install -r requirements/dev.txt -# You can manually run pre-commit with -pre-commit run --all-files + # Linting, formatting and static type checking + pre-commit install --hook-type pre-commit --hook-type commit-msg -# To manually run something from CI that does not run -# locally by default, you can run: -pre-commit run mypy-3.9 --hook-stage manual --all-files + # You can manually run pre-commit with + pre-commit run --all-files -# Unit tests -pytest tests/ + # To manually run something from CI that does not run + # locally by default, you can run: + pre-commit run mypy-3.9 --hook-stage manual --all-files -# Run tests for a single test file with detailed output -pytest -s -v tests/test_logger.py -``` + # Unit tests + pytest tests/ + + # Run tests for a single test file with detailed output + pytest -s -v tests/test_logger.py + ``` !!! tip Since the ships with Python 3.12, all tests in CI (except `mypy`) are run with Python 3.12. diff --git a/docs/contributing/model/basic.md b/docs/contributing/model/basic.md index 0c0ba33792578..644d21482ef6f 100644 --- a/docs/contributing/model/basic.md +++ b/docs/contributing/model/basic.md @@ -27,33 +27,35 @@ All vLLM modules within the model must include a `prefix` argument in their cons The initialization code should look like this: -```python -from torch import nn -from vllm.config import VllmConfig -from vllm.attention import Attention +??? Code -class MyAttention(nn.Module): - def __init__(self, vllm_config: VllmConfig, prefix: str): - super().__init__() - self.attn = Attention(prefix=f"{prefix}.attn") + ```python + from torch import nn + from vllm.config import VllmConfig + from vllm.attention import Attention -class MyDecoderLayer(nn.Module): - def __init__(self, vllm_config: VllmConfig, prefix: str): - super().__init__() - self.self_attn = MyAttention(prefix=f"{prefix}.self_attn") + class MyAttention(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str): + super().__init__() + self.attn = Attention(prefix=f"{prefix}.attn") -class MyModel(nn.Module): - def __init__(self, vllm_config: VllmConfig, prefix: str): - super().__init__() - self.layers = nn.ModuleList( - [MyDecoderLayer(vllm_config, prefix=f"{prefix}.layers.{i}") for i in range(vllm_config.model_config.hf_config.num_hidden_layers)] - ) + class MyDecoderLayer(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str): + super().__init__() + self.self_attn = MyAttention(prefix=f"{prefix}.self_attn") -class MyModelForCausalLM(nn.Module): - def __init__(self, vllm_config: VllmConfig, prefix: str = ""): - super().__init__() - self.model = MyModel(vllm_config, prefix=f"{prefix}.model") -``` + class MyModel(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str): + super().__init__() + self.layers = nn.ModuleList( + [MyDecoderLayer(vllm_config, prefix=f"{prefix}.layers.{i}") for i in range(vllm_config.model_config.hf_config.num_hidden_layers)] + ) + + class MyModelForCausalLM(nn.Module): + def __init__(self, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + self.model = MyModel(vllm_config, prefix=f"{prefix}.model") + ``` ### Computation Code diff --git a/docs/contributing/model/multimodal.md b/docs/contributing/model/multimodal.md index bed6d4e653d65..6ff2abbae6329 100644 --- a/docs/contributing/model/multimodal.md +++ b/docs/contributing/model/multimodal.md @@ -25,59 +25,63 @@ Further update the model as follows: - Implement [get_multimodal_embeddings][vllm.model_executor.models.interfaces.SupportsMultiModal.get_multimodal_embeddings] that returns the embeddings from running the multimodal inputs through the multimodal tokenizer of the model. Below we provide a boilerplate of a typical implementation pattern, but feel free to adjust it to your own needs. - ```python - class YourModelForImage2Seq(nn.Module): - ... + ??? Code - def _process_image_input(self, image_input: YourModelImageInputs) -> torch.Tensor: + ```python + class YourModelForImage2Seq(nn.Module): + ... - assert self.vision_encoder is not None - image_features = self.vision_encoder(image_input) - return self.multi_modal_projector(image_features) + def _process_image_input(self, image_input: YourModelImageInputs) -> torch.Tensor: - def get_multimodal_embeddings( - self, **kwargs: object) -> Optional[MultiModalEmbeddings]: + assert self.vision_encoder is not None + image_features = self.vision_encoder(image_input) + return self.multi_modal_projector(image_features) - # Validate the multimodal input keyword arguments - image_input = self._parse_and_validate_image_input(**kwargs) - if image_input is None: - return None + def get_multimodal_embeddings( + self, **kwargs: object) -> Optional[MultiModalEmbeddings]: - # Run multimodal inputs through encoder and projector - vision_embeddings = self._process_image_input(image_input) - return vision_embeddings - ``` + # Validate the multimodal input keyword arguments + image_input = self._parse_and_validate_image_input(**kwargs) + if image_input is None: + return None + + # Run multimodal inputs through encoder and projector + vision_embeddings = self._process_image_input(image_input) + return vision_embeddings + ``` !!! important The returned `multimodal_embeddings` must be either a **3D [torch.Tensor][]** of shape `(num_items, feature_size, hidden_size)`, or a **list / tuple of 2D [torch.Tensor][]'s** of shape `(feature_size, hidden_size)`, so that `multimodal_embeddings[i]` retrieves the embeddings generated from the `i`-th multimodal data item (e.g, image) of the request. - Implement [get_input_embeddings][vllm.model_executor.models.interfaces.SupportsMultiModal.get_input_embeddings] to merge `multimodal_embeddings` with text embeddings from the `input_ids`. If input processing for the model is implemented correctly (see sections below), then you can leverage the utility function we provide to easily merge the embeddings. - ```python - from .utils import merge_multimodal_embeddings + ??? Code - class YourModelForImage2Seq(nn.Module): - ... + ```python + from .utils import merge_multimodal_embeddings - def get_input_embeddings( - self, - input_ids: torch.Tensor, - multimodal_embeddings: Optional[MultiModalEmbeddings] = None, - ) -> torch.Tensor: + class YourModelForImage2Seq(nn.Module): + ... - # `get_input_embeddings` should already be implemented for the language - # model as one of the requirements of basic vLLM model implementation. - inputs_embeds = self.language_model.get_input_embeddings(input_ids) + def get_input_embeddings( + self, + input_ids: torch.Tensor, + multimodal_embeddings: Optional[MultiModalEmbeddings] = None, + ) -> torch.Tensor: - if multimodal_embeddings is not None: - inputs_embeds = merge_multimodal_embeddings( - input_ids=input_ids, - inputs_embeds=inputs_embeds, - multimodal_embeddings=multimodal_embeddings, - placeholder_token_id=self.config.image_token_index) + # `get_input_embeddings` should already be implemented for the language + # model as one of the requirements of basic vLLM model implementation. + inputs_embeds = self.language_model.get_input_embeddings(input_ids) - return inputs_embeds - ``` + if multimodal_embeddings is not None: + inputs_embeds = merge_multimodal_embeddings( + input_ids=input_ids, + inputs_embeds=inputs_embeds, + multimodal_embeddings=multimodal_embeddings, + placeholder_token_id=self.config.image_token_index) + + return inputs_embeds + ``` - Implement [get_language_model][vllm.model_executor.models.interfaces.SupportsMultiModal.get_language_model] getter to provide stable access to the underlying language model. @@ -135,42 +139,46 @@ Assuming that the memory usage increases with the number of tokens, the dummy in Looking at the code of HF's `LlavaForConditionalGeneration`: - ```python - # https://github.com/huggingface/transformers/blob/v4.47.1/src/transformers/models/llava/modeling_llava.py#L530-L544 - n_image_tokens = (input_ids == self.config.image_token_index).sum().item() - n_image_features = image_features.shape[0] * image_features.shape[1] + ??? Code - if n_image_tokens != n_image_features: - raise ValueError( - f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" + ```python + # https://github.com/huggingface/transformers/blob/v4.47.1/src/transformers/models/llava/modeling_llava.py#L530-L544 + n_image_tokens = (input_ids == self.config.image_token_index).sum().item() + n_image_features = image_features.shape[0] * image_features.shape[1] + + if n_image_tokens != n_image_features: + raise ValueError( + f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}" + ) + special_image_mask = ( + (input_ids == self.config.image_token_index) + .unsqueeze(-1) + .expand_as(inputs_embeds) + .to(inputs_embeds.device) ) - special_image_mask = ( - (input_ids == self.config.image_token_index) - .unsqueeze(-1) - .expand_as(inputs_embeds) - .to(inputs_embeds.device) - ) - image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) - inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) - ``` + image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype) + inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) + ``` The number of placeholder feature tokens per image is `image_features.shape[1]`. `image_features` is calculated inside the `get_image_features` method: - ```python - # https://github.com/huggingface/transformers/blob/v4.47.1/src/transformers/models/llava/modeling_llava.py#L290-L300 - image_outputs = self.vision_tower(pixel_values, output_hidden_states=True) + ??? Code - selected_image_feature = image_outputs.hidden_states[vision_feature_layer] - if vision_feature_select_strategy == "default": - selected_image_feature = selected_image_feature[:, 1:] - elif vision_feature_select_strategy == "full": - selected_image_feature = selected_image_feature - else: - raise ValueError(f"Unexpected select feature strategy: {self.config.vision_feature_select_strategy}") - image_features = self.multi_modal_projector(selected_image_feature) - return image_features - ``` + ```python + # https://github.com/huggingface/transformers/blob/v4.47.1/src/transformers/models/llava/modeling_llava.py#L290-L300 + image_outputs = self.vision_tower(pixel_values, output_hidden_states=True) + + selected_image_feature = image_outputs.hidden_states[vision_feature_layer] + if vision_feature_select_strategy == "default": + selected_image_feature = selected_image_feature[:, 1:] + elif vision_feature_select_strategy == "full": + selected_image_feature = selected_image_feature + else: + raise ValueError(f"Unexpected select feature strategy: {self.config.vision_feature_select_strategy}") + image_features = self.multi_modal_projector(selected_image_feature) + return image_features + ``` We can infer that `image_features.shape[1]` is based on `image_outputs.hidden_states.shape[1]` from the vision tower (`CLIPVisionModel` for the [`llava-hf/llava-1.5-7b-hf`](https://huggingface.co/llava-hf/llava-1.5-7b-hf) model). @@ -193,20 +201,22 @@ Assuming that the memory usage increases with the number of tokens, the dummy in To find the sequence length, we turn to the code of `CLIPVisionEmbeddings`: - ```python - # https://github.com/huggingface/transformers/blob/v4.47.1/src/transformers/models/clip/modeling_clip.py#L247-L257 - target_dtype = self.patch_embedding.weight.dtype - patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] - patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + ??? Code - class_embeds = self.class_embedding.expand(batch_size, 1, -1) - embeddings = torch.cat([class_embeds, patch_embeds], dim=1) - if interpolate_pos_encoding: - embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) - else: - embeddings = embeddings + self.position_embedding(self.position_ids) - return embeddings - ``` + ```python + # https://github.com/huggingface/transformers/blob/v4.47.1/src/transformers/models/clip/modeling_clip.py#L247-L257 + target_dtype = self.patch_embedding.weight.dtype + patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] + patch_embeds = patch_embeds.flatten(2).transpose(1, 2) + + class_embeds = self.class_embedding.expand(batch_size, 1, -1) + embeddings = torch.cat([class_embeds, patch_embeds], dim=1) + if interpolate_pos_encoding: + embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) + else: + embeddings = embeddings + self.position_embedding(self.position_ids) + return embeddings + ``` We can infer that `embeddings.shape[1] == self.num_positions`, where @@ -218,55 +228,59 @@ Assuming that the memory usage increases with the number of tokens, the dummy in Overall, the number of placeholder feature tokens for an image can be calculated as: - ```python - def get_num_image_tokens( - self, - *, - image_width: int, - image_height: int, - ) -> int: - hf_config = self.get_hf_config() - hf_processor = self.get_hf_processor() + ??? Code - image_size = hf_config.vision_config.image_size - patch_size = hf_config.vision_config.patch_size + ```python + def get_num_image_tokens( + self, + *, + image_width: int, + image_height: int, + ) -> int: + hf_config = self.get_hf_config() + hf_processor = self.get_hf_processor() - num_image_tokens = (image_size // patch_size) ** 2 + 1 - if hf_processor.vision_feature_select_strategy == "default": - num_image_tokens -= 1 + image_size = hf_config.vision_config.image_size + patch_size = hf_config.vision_config.patch_size - return num_image_tokens - ``` + num_image_tokens = (image_size // patch_size) ** 2 + 1 + if hf_processor.vision_feature_select_strategy == "default": + num_image_tokens -= 1 + + return num_image_tokens + ``` Notice that the number of image tokens doesn't depend on the image width and height. We can simply use a dummy `image_size` to calculate the multimodal profiling data: - ```python - # NOTE: In actuality, this is usually implemented as part of the - # model's subclass of `BaseProcessingInfo`, but we show it as is - # here for simplicity. - def get_image_size_with_most_features(self) -> ImageSize: - hf_config = self.get_hf_config() - width = height = hf_config.image_size - return ImageSize(width=width, height=height) + ??? Code - def get_dummy_mm_data( - self, - seq_len: int, - mm_counts: Mapping[str, int], - ) -> MultiModalDataDict: - num_images = mm_counts.get("image", 0) + ```python + # NOTE: In actuality, this is usually implemented as part of the + # model's subclass of `BaseProcessingInfo`, but we show it as is + # here for simplicity. + def get_image_size_with_most_features(self) -> ImageSize: + hf_config = self.get_hf_config() + width = height = hf_config.image_size + return ImageSize(width=width, height=height) - target_width, target_height = \ - self.info.get_image_size_with_most_features() + def get_dummy_mm_data( + self, + seq_len: int, + mm_counts: Mapping[str, int], + ) -> MultiModalDataDict: + num_images = mm_counts.get("image", 0) - return { - "image": - self._get_dummy_images(width=target_width, - height=target_height, - num_images=num_images) - } - ``` + target_width, target_height = \ + self.info.get_image_size_with_most_features() + + return { + "image": + self._get_dummy_images(width=target_width, + height=target_height, + num_images=num_images) + } + ``` For the text, we simply expand the multimodal image token from the model config to match the desired number of images. @@ -284,21 +298,23 @@ Assuming that the memory usage increases with the number of tokens, the dummy in Looking at the code of HF's `FuyuForCausalLM`: - ```python - # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/modeling_fuyu.py#L311-L322 - if image_patches is not None and past_key_values is None: - patch_embeddings = [ - self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype)) - .squeeze(0) - .to(inputs_embeds.device) - for patch in image_patches - ] - inputs_embeds = self.gather_continuous_embeddings( - word_embeddings=inputs_embeds, - continuous_embeddings=patch_embeddings, - image_patch_input_indices=image_patches_indices, - ) - ``` + ??? Code + + ```python + # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/modeling_fuyu.py#L311-L322 + if image_patches is not None and past_key_values is None: + patch_embeddings = [ + self.vision_embed_tokens(patch.to(self.vision_embed_tokens.weight.dtype)) + .squeeze(0) + .to(inputs_embeds.device) + for patch in image_patches + ] + inputs_embeds = self.gather_continuous_embeddings( + word_embeddings=inputs_embeds, + continuous_embeddings=patch_embeddings, + image_patch_input_indices=image_patches_indices, + ) + ``` The number of placeholder feature tokens for the `i`th item in the batch is `patch_embeddings[i].shape[0]`, which is the same as `image_patches[i].shape[0]`, i.e. `num_total_patches`. @@ -312,92 +328,98 @@ Assuming that the memory usage increases with the number of tokens, the dummy in In `FuyuImageProcessor.preprocess`, the images are resized and padded to the target `FuyuImageProcessor.size`, returning the dimensions after resizing (but before padding) as metadata. - ```python - # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/processing_fuyu.py#L541-L544 - image_encoding = self.image_processor.preprocess(images, **output_kwargs["images_kwargs"]) - batch_images = image_encoding["images"] - image_unpadded_heights = image_encoding["image_unpadded_heights"] - image_unpadded_widths = image_encoding["image_unpadded_widths"] + ??? Code - # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/image_processing_fuyu.py#L480-L - if do_resize: - batch_images = [ - [self.resize(image, size=size, input_data_format=input_data_format) for image in images] - for images in batch_images - ] + ```python + # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/processing_fuyu.py#L541-L544 + image_encoding = self.image_processor.preprocess(images, **output_kwargs["images_kwargs"]) + batch_images = image_encoding["images"] + image_unpadded_heights = image_encoding["image_unpadded_heights"] + image_unpadded_widths = image_encoding["image_unpadded_widths"] - image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images] - image_unpadded_heights = [[image_size[0]] for image_size in image_sizes] - image_unpadded_widths = [[image_size[1]] for image_size in image_sizes] - - if do_pad: - batch_images = [ - [ - self.pad_image( - image, - size=size, - mode=padding_mode, - constant_values=padding_value, - input_data_format=input_data_format, - ) - for image in images + # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/image_processing_fuyu.py#L480-L + if do_resize: + batch_images = [ + [self.resize(image, size=size, input_data_format=input_data_format) for image in images] + for images in batch_images ] - for images in batch_images - ] - ``` + + image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images] + image_unpadded_heights = [[image_size[0]] for image_size in image_sizes] + image_unpadded_widths = [[image_size[1]] for image_size in image_sizes] + + if do_pad: + batch_images = [ + [ + self.pad_image( + image, + size=size, + mode=padding_mode, + constant_values=padding_value, + input_data_format=input_data_format, + ) + for image in images + ] + for images in batch_images + ] + ``` In `FuyuImageProcessor.preprocess_with_tokenizer_info`, the images are split into patches based on this metadata: - ```python - # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/processing_fuyu.py#L417-L425 - model_image_input = self.image_processor.preprocess_with_tokenizer_info( - image_input=tensor_batch_images, - image_present=image_present, - image_unpadded_h=image_unpadded_heights, - image_unpadded_w=image_unpadded_widths, - image_placeholder_id=image_placeholder_id, - image_newline_id=image_newline_id, - variable_sized=True, - ) + ??? Code - # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/image_processing_fuyu.py#L638-L658 - image_height, image_width = image.shape[1], image.shape[2] - if variable_sized: # variable_sized=True - new_h = min( - image_height, - math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height, + ```python + # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/processing_fuyu.py#L417-L425 + model_image_input = self.image_processor.preprocess_with_tokenizer_info( + image_input=tensor_batch_images, + image_present=image_present, + image_unpadded_h=image_unpadded_heights, + image_unpadded_w=image_unpadded_widths, + image_placeholder_id=image_placeholder_id, + image_newline_id=image_newline_id, + variable_sized=True, ) - new_w = min( - image_width, - math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width, - ) - image = image[:, :new_h, :new_w] - image_height, image_width = new_h, new_w - num_patches = self.get_num_patches(image_height=image_height, image_width=image_width) - tensor_of_image_ids = torch.full( - [num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device - ) - patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0) - assert num_patches == patches.shape[0] - ``` + # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/image_processing_fuyu.py#L638-L658 + image_height, image_width = image.shape[1], image.shape[2] + if variable_sized: # variable_sized=True + new_h = min( + image_height, + math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height, + ) + new_w = min( + image_width, + math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width, + ) + image = image[:, :new_h, :new_w] + image_height, image_width = new_h, new_w + + num_patches = self.get_num_patches(image_height=image_height, image_width=image_width) + tensor_of_image_ids = torch.full( + [num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device + ) + patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0) + assert num_patches == patches.shape[0] + ``` The number of patches is in turn defined by `FuyuImageProcessor.get_num_patches`: - ```python - # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/image_processing_fuyu.py#L552-L562 - patch_size = patch_size if patch_size is not None else self.patch_size - patch_height, patch_width = self.patch_size["height"], self.patch_size["width"] + ??? Code - if image_height % patch_height != 0: - raise ValueError(f"{image_height=} must be divisible by {patch_height}") - if image_width % patch_width != 0: - raise ValueError(f"{image_width=} must be divisible by {patch_width}") + ```python + # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/image_processing_fuyu.py#L552-L562 + patch_size = patch_size if patch_size is not None else self.patch_size + patch_height, patch_width = self.patch_size["height"], self.patch_size["width"] - num_patches_per_dim_h = image_height // patch_height - num_patches_per_dim_w = image_width // patch_width - num_patches = num_patches_per_dim_h * num_patches_per_dim_w - ``` + if image_height % patch_height != 0: + raise ValueError(f"{image_height=} must be divisible by {patch_height}") + if image_width % patch_width != 0: + raise ValueError(f"{image_width=} must be divisible by {patch_width}") + + num_patches_per_dim_h = image_height // patch_height + num_patches_per_dim_w = image_width // patch_width + num_patches = num_patches_per_dim_h * num_patches_per_dim_w + ``` These image patches correspond to placeholder tokens (`|SPEAKER|`). So, we just need to maximize the number of image patches. Since input images are first resized to fit within `image_processor.size`, we can maximize the number of image patches by inputting an image with size equal to `image_processor.size`. @@ -419,23 +441,25 @@ Assuming that the memory usage increases with the number of tokens, the dummy in For the multimodal image profiling data, the logic is very similar to LLaVA: - ```python - def get_dummy_mm_data( - self, - seq_len: int, - mm_counts: Mapping[str, int], - ) -> MultiModalDataDict: - target_width, target_height = \ - self.info.get_image_size_with_most_features() - num_images = mm_counts.get("image", 0) + ??? Code - return { - "image": - self._get_dummy_images(width=target_width, - height=target_height, - num_images=num_images) - } - ``` + ```python + def get_dummy_mm_data( + self, + seq_len: int, + mm_counts: Mapping[str, int], + ) -> MultiModalDataDict: + target_width, target_height = \ + self.info.get_image_size_with_most_features() + num_images = mm_counts.get("image", 0) + + return { + "image": + self._get_dummy_images(width=target_width, + height=target_height, + num_images=num_images) + } + ``` ## 4. Specify processing details @@ -455,6 +479,7 @@ return a schema of the tensors outputted by the HF processor that are related to The output of `CLIPImageProcessor` is a simple tensor with shape `(num_images, num_channels, image_height, image_width)`: + ```python # https://github.com/huggingface/transformers/blob/v4.47.1/src/transformers/models/clip/image_processing_clip.py#L339-L345 images = [ @@ -505,35 +530,37 @@ return a schema of the tensors outputted by the HF processor that are related to In order to support the use of [MultiModalFieldConfig.batched][] like in LLaVA, we remove the extra batch dimension by overriding [BaseMultiModalProcessor._call_hf_processor][]: - ```python - def _call_hf_processor( - self, - prompt: str, - mm_data: Mapping[str, object], - mm_kwargs: Mapping[str, object], - ) -> BatchFeature: - processed_outputs = super()._call_hf_processor( - prompt=prompt, - mm_data=mm_data, - mm_kwargs=mm_kwargs, - ) + ??? Code - image_patches = processed_outputs.get("image_patches") - if image_patches is not None: - images = mm_data["images"] - assert isinstance(images, list) + ```python + def _call_hf_processor( + self, + prompt: str, + mm_data: Mapping[str, object], + mm_kwargs: Mapping[str, object], + ) -> BatchFeature: + processed_outputs = super()._call_hf_processor( + prompt=prompt, + mm_data=mm_data, + mm_kwargs=mm_kwargs, + ) - # Original output: (1, num_images, Pn, Px * Py * C) - # New output: (num_images, Pn, Px * Py * C) - assert (isinstance(image_patches, list) - and len(image_patches) == 1) - assert (isinstance(image_patches[0], torch.Tensor) - and len(image_patches[0]) == len(images)) + image_patches = processed_outputs.get("image_patches") + if image_patches is not None: + images = mm_data["images"] + assert isinstance(images, list) - processed_outputs["image_patches"] = image_patches[0] + # Original output: (1, num_images, Pn, Px * Py * C) + # New output: (num_images, Pn, Px * Py * C) + assert (isinstance(image_patches, list) + and len(image_patches) == 1) + assert (isinstance(image_patches[0], torch.Tensor) + and len(image_patches[0]) == len(images)) - return processed_outputs - ``` + processed_outputs["image_patches"] = image_patches[0] + + return processed_outputs + ``` !!! note Our [actual code](gh-file:vllm/model_executor/models/fuyu.py) has special handling @@ -573,35 +600,37 @@ Each [PromptUpdate][vllm.multimodal.processing.PromptUpdate] instance specifies It simply repeats each input `image_token` a number of times equal to the number of placeholder feature tokens (`num_image_tokens`). Based on this, we override [_get_prompt_updates][vllm.multimodal.processing.BaseMultiModalProcessor._get_prompt_updates] as follows: - ```python - def _get_prompt_updates( - self, - mm_items: MultiModalDataItems, - hf_processor_mm_kwargs: Mapping[str, object], - out_mm_kwargs: MultiModalKwargs, - ) -> Sequence[PromptUpdate]: - hf_config = self.info.get_hf_config() - image_token_id = hf_config.image_token_index + ??? Code - def get_replacement(item_idx: int): - images = mm_items.get_items("image", ImageProcessorItems) + ```python + def _get_prompt_updates( + self, + mm_items: MultiModalDataItems, + hf_processor_mm_kwargs: Mapping[str, object], + out_mm_kwargs: MultiModalKwargs, + ) -> Sequence[PromptUpdate]: + hf_config = self.info.get_hf_config() + image_token_id = hf_config.image_token_index - image_size = images.get_image_size(item_idx) - num_image_tokens = self.info.get_num_image_tokens( - image_width=image_size.width, - image_height=image_size.height, - ) + def get_replacement(item_idx: int): + images = mm_items.get_items("image", ImageProcessorItems) - return [image_token_id] * num_image_tokens + image_size = images.get_image_size(item_idx) + num_image_tokens = self.info.get_num_image_tokens( + image_width=image_size.width, + image_height=image_size.height, + ) - return [ - PromptReplacement( - modality="image", - target=[image_token_id], - replacement=get_replacement, - ), - ] - ``` + return [image_token_id] * num_image_tokens + + return [ + PromptReplacement( + modality="image", + target=[image_token_id], + replacement=get_replacement, + ), + ] + ``` === "Handling additional tokens: Fuyu" @@ -616,117 +645,90 @@ Each [PromptUpdate][vllm.multimodal.processing.PromptUpdate] instance specifies We define a helper function to return `ncols` and `nrows` directly: - ```python - def get_image_feature_grid_size( - self, - *, - image_width: int, - image_height: int, - ) -> tuple[int, int]: - image_processor = self.get_image_processor() - target_width = image_processor.size["width"] - target_height = image_processor.size["height"] - patch_width = image_processor.patch_size["width"] - patch_height = image_processor.patch_size["height"] + ??? Code - if not (image_width <= target_width and image_height <= target_height): - height_scale_factor = target_height / image_height - width_scale_factor = target_width / image_width - optimal_scale_factor = min(height_scale_factor, width_scale_factor) + ```python + def get_image_feature_grid_size( + self, + *, + image_width: int, + image_height: int, + ) -> tuple[int, int]: + image_processor = self.get_image_processor() + target_width = image_processor.size["width"] + target_height = image_processor.size["height"] + patch_width = image_processor.patch_size["width"] + patch_height = image_processor.patch_size["height"] - image_height = int(image_height * optimal_scale_factor) - image_width = int(image_width * optimal_scale_factor) + if not (image_width <= target_width and image_height <= target_height): + height_scale_factor = target_height / image_height + width_scale_factor = target_width / image_width + optimal_scale_factor = min(height_scale_factor, width_scale_factor) - ncols = math.ceil(image_width / patch_width) - nrows = math.ceil(image_height / patch_height) - return ncols, nrows - ``` + image_height = int(image_height * optimal_scale_factor) + image_width = int(image_width * optimal_scale_factor) + + ncols = math.ceil(image_width / patch_width) + nrows = math.ceil(image_height / patch_height) + return ncols, nrows + ``` Based on this, we can initially define our replacement tokens as: - ```python - def get_replacement(item_idx: int): - images = mm_items.get_items("image", ImageProcessorItems) - image_size = images.get_image_size(item_idx) + ??? Code - ncols, nrows = self.info.get_image_feature_grid_size( - image_width=image_size.width, - image_height=image_size.height, - ) + ```python + def get_replacement(item_idx: int): + images = mm_items.get_items("image", ImageProcessorItems) + image_size = images.get_image_size(item_idx) - # `_IMAGE_TOKEN_ID` corresponds to `|SPEAKER|` - # `_NEWLINE_TOKEN_ID` corresponds to `|NEWLINE|` - return ([_IMAGE_TOKEN_ID] * ncols + [_NEWLINE_TOKEN_ID]) * nrows - ``` + ncols, nrows = self.info.get_image_feature_grid_size( + image_width=image_size.width, + image_height=image_size.height, + ) + + # `_IMAGE_TOKEN_ID` corresponds to `|SPEAKER|` + # `_NEWLINE_TOKEN_ID` corresponds to `|NEWLINE|` + return ([_IMAGE_TOKEN_ID] * ncols + [_NEWLINE_TOKEN_ID]) * nrows + ``` However, this is not entirely correct. After `FuyuImageProcessor.preprocess_with_tokenizer_info` is called, a BOS token (``) is also added to the promopt: - ```python - # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/processing_fuyu.py#L417-L435 - model_image_input = self.image_processor.preprocess_with_tokenizer_info( - image_input=tensor_batch_images, - image_present=image_present, - image_unpadded_h=image_unpadded_heights, - image_unpadded_w=image_unpadded_widths, - image_placeholder_id=image_placeholder_id, - image_newline_id=image_newline_id, - variable_sized=True, - ) - prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch( - tokenizer=self.tokenizer, - prompts=prompts, - scale_factors=scale_factors, - max_tokens_to_generate=self.max_tokens_to_generate, - max_position_embeddings=self.max_position_embeddings, - add_BOS=True, - add_beginning_of_answer_token=True, - ) - ``` + ??? Code + + ```python + # https://github.com/huggingface/transformers/blob/v4.48.3/src/transformers/models/fuyu/processing_fuyu.py#L417-L435 + model_image_input = self.image_processor.preprocess_with_tokenizer_info( + image_input=tensor_batch_images, + image_present=image_present, + image_unpadded_h=image_unpadded_heights, + image_unpadded_w=image_unpadded_widths, + image_placeholder_id=image_placeholder_id, + image_newline_id=image_newline_id, + variable_sized=True, + ) + prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch( + tokenizer=self.tokenizer, + prompts=prompts, + scale_factors=scale_factors, + max_tokens_to_generate=self.max_tokens_to_generate, + max_position_embeddings=self.max_position_embeddings, + add_BOS=True, + add_beginning_of_answer_token=True, + ) + ``` To assign the vision embeddings to only the image tokens, instead of a string you can return an instance of [PromptUpdateDetails][vllm.multimodal.processing.PromptUpdateDetails]: - ```python - hf_config = self.info.get_hf_config() - bos_token_id = hf_config.bos_token_id # `` - assert isinstance(bos_token_id, int) + ??? Code - def get_replacement_fuyu(item_idx: int): - images = mm_items.get_items("image", ImageProcessorItems) - image_size = images.get_image_size(item_idx) - - ncols, nrows = self.info.get_image_feature_grid_size( - image_width=image_size.width, - image_height=image_size.height, - ) - image_tokens = ([_IMAGE_TOKEN_ID] * ncols + - [_NEWLINE_TOKEN_ID]) * nrows - - return PromptUpdateDetails.select_token_id( - image_tokens + [bos_token_id], - embed_token_id=_IMAGE_TOKEN_ID, - ) - ``` - - Finally, noticing that the HF processor removes the `|ENDOFTEXT|` token from the tokenized prompt, - we can search for it to conduct the replacement at the start of the string: - - ```python - def _get_prompt_updates( - self, - mm_items: MultiModalDataItems, - hf_processor_mm_kwargs: Mapping[str, object], - out_mm_kwargs: MultiModalKwargs, - ) -> Sequence[PromptUpdate]: + ```python hf_config = self.info.get_hf_config() - bos_token_id = hf_config.bos_token_id + bos_token_id = hf_config.bos_token_id # `` assert isinstance(bos_token_id, int) - tokenizer = self.info.get_tokenizer() - eot_token_id = tokenizer.bos_token_id - assert isinstance(eot_token_id, int) - def get_replacement_fuyu(item_idx: int): images = mm_items.get_items("image", ImageProcessorItems) image_size = images.get_image_size(item_idx) @@ -742,15 +744,52 @@ Each [PromptUpdate][vllm.multimodal.processing.PromptUpdate] instance specifies image_tokens + [bos_token_id], embed_token_id=_IMAGE_TOKEN_ID, ) + ``` - return [ - PromptReplacement( - modality="image", - target=[eot_token_id], - replacement=get_replacement_fuyu, - ) - ] - ``` + Finally, noticing that the HF processor removes the `|ENDOFTEXT|` token from the tokenized prompt, + we can search for it to conduct the replacement at the start of the string: + + ??? Code + + ```python + def _get_prompt_updates( + self, + mm_items: MultiModalDataItems, + hf_processor_mm_kwargs: Mapping[str, object], + out_mm_kwargs: MultiModalKwargs, + ) -> Sequence[PromptUpdate]: + hf_config = self.info.get_hf_config() + bos_token_id = hf_config.bos_token_id + assert isinstance(bos_token_id, int) + + tokenizer = self.info.get_tokenizer() + eot_token_id = tokenizer.bos_token_id + assert isinstance(eot_token_id, int) + + def get_replacement_fuyu(item_idx: int): + images = mm_items.get_items("image", ImageProcessorItems) + image_size = images.get_image_size(item_idx) + + ncols, nrows = self.info.get_image_feature_grid_size( + image_width=image_size.width, + image_height=image_size.height, + ) + image_tokens = ([_IMAGE_TOKEN_ID] * ncols + + [_NEWLINE_TOKEN_ID]) * nrows + + return PromptUpdateDetails.select_token_id( + image_tokens + [bos_token_id], + embed_token_id=_IMAGE_TOKEN_ID, + ) + + return [ + PromptReplacement( + modality="image", + target=[eot_token_id], + replacement=get_replacement_fuyu, + ) + ] + ``` ## 5. Register processor-related classes diff --git a/docs/contributing/profiling.md b/docs/contributing/profiling.md index be01b9b65f65c..6d6366741aaed 100644 --- a/docs/contributing/profiling.md +++ b/docs/contributing/profiling.md @@ -97,26 +97,26 @@ to manually kill the profiler and generate your `nsys-rep` report. You can view these profiles either as summaries in the CLI, using `nsys stats [profile-file]`, or in the GUI by installing Nsight [locally following the directions here](https://developer.nvidia.com/nsight-systems/get-started). -CLI example: +??? CLI example -```bash -nsys stats report1.nsys-rep -... - ** CUDA GPU Kernel Summary (cuda_gpu_kern_sum): + ```bash + nsys stats report1.nsys-rep + ... + ** CUDA GPU Kernel Summary (cuda_gpu_kern_sum): - Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name - -------- --------------- --------- ----------- ----------- -------- --------- ----------- ---------------------------------------------------------------------------------------------------- - 46.3 10,327,352,338 17,505 589,965.9 144,383.0 27,040 3,126,460 944,263.8 sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_of… - 14.8 3,305,114,764 5,152 641,520.7 293,408.0 287,296 2,822,716 867,124.9 sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_of… - 12.1 2,692,284,876 14,280 188,535.4 83,904.0 19,328 2,862,237 497,999.9 sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off… - 9.5 2,116,600,578 33,920 62,399.8 21,504.0 15,326 2,532,285 290,954.1 sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_… - 5.0 1,119,749,165 18,912 59,208.4 9,056.0 6,784 2,578,366 271,581.7 void vllm::act_and_mul_kernel, (bool)1>(T1 *, cons… - 4.1 916,662,515 21,312 43,011.6 19,776.0 8,928 2,586,205 199,790.1 void cutlass::device_kernel(int)0&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kern… - 1.9 418,362,605 18,912 22,121.5 3,871.0 3,328 2,523,870 175,248.2 void vllm::rotary_embedding_kernel(const long *, T1 *, T1 *, const T1 *, in… - 0.7 167,083,069 18,880 8,849.7 2,240.0 1,471 2,499,996 101,436.1 void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0… -... -``` + Time (%) Total Time (ns) Instances Avg (ns) Med (ns) Min (ns) Max (ns) StdDev (ns) Name + -------- --------------- --------- ----------- ----------- -------- --------- ----------- ---------------------------------------------------------------------------------------------------- + 46.3 10,327,352,338 17,505 589,965.9 144,383.0 27,040 3,126,460 944,263.8 sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize128x128x64_warpgroupsize1x1x1_execute_segment_k_of… + 14.8 3,305,114,764 5,152 641,520.7 293,408.0 287,296 2,822,716 867,124.9 sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize256x128x64_warpgroupsize2x1x1_execute_segment_k_of… + 12.1 2,692,284,876 14,280 188,535.4 83,904.0 19,328 2,862,237 497,999.9 sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x128x64_warpgroupsize1x1x1_execute_segment_k_off… + 9.5 2,116,600,578 33,920 62,399.8 21,504.0 15,326 2,532,285 290,954.1 sm90_xmma_gemm_bf16bf16_bf16f32_f32_tn_n_tilesize64x64x64_warpgroupsize1x1x1_execute_segment_k_off_… + 5.0 1,119,749,165 18,912 59,208.4 9,056.0 6,784 2,578,366 271,581.7 void vllm::act_and_mul_kernel, (bool)1>(T1 *, cons… + 4.1 916,662,515 21,312 43,011.6 19,776.0 8,928 2,586,205 199,790.1 void cutlass::device_kernel(int)0&&vllm::_typeConvert::exists, void>::type vllm::fused_add_rms_norm_kern… + 1.9 418,362,605 18,912 22,121.5 3,871.0 3,328 2,523,870 175,248.2 void vllm::rotary_embedding_kernel(const long *, T1 *, T1 *, const T1 *, in… + 0.7 167,083,069 18,880 8,849.7 2,240.0 1,471 2,499,996 101,436.1 void vllm::reshape_and_cache_flash_kernel<__nv_bfloat16, __nv_bfloat16, (vllm::Fp8KVCacheDataType)0… + ... + ``` GUI example: diff --git a/docs/deployment/docker.md b/docs/deployment/docker.md index 93d9e80f5b012..eb84db7871e4a 100644 --- a/docs/deployment/docker.md +++ b/docs/deployment/docker.md @@ -97,19 +97,21 @@ of PyTorch Nightly and should be considered **experimental**. Using the flag `-- flags to speed up build process. However, ensure your `max_jobs` is substantially larger than `nvcc_threads` to get the most benefits. Keep an eye on memory usage with parallel jobs as it can be substantial (see example below). -```console -# Example of building on Nvidia GH200 server. (Memory usage: ~15GB, Build time: ~1475s / ~25 min, Image size: 6.93GB) -python3 use_existing_torch.py -DOCKER_BUILDKIT=1 docker build . \ - --file docker/Dockerfile \ - --target vllm-openai \ - --platform "linux/arm64" \ - -t vllm/vllm-gh200-openai:latest \ - --build-arg max_jobs=66 \ - --build-arg nvcc_threads=2 \ - --build-arg torch_cuda_arch_list="9.0 10.0+PTX" \ - --build-arg vllm_fa_cmake_gpu_arches="90-real" -``` +??? Command + + ```console + # Example of building on Nvidia GH200 server. (Memory usage: ~15GB, Build time: ~1475s / ~25 min, Image size: 6.93GB) + python3 use_existing_torch.py + DOCKER_BUILDKIT=1 docker build . \ + --file docker/Dockerfile \ + --target vllm-openai \ + --platform "linux/arm64" \ + -t vllm/vllm-gh200-openai:latest \ + --build-arg max_jobs=66 \ + --build-arg nvcc_threads=2 \ + --build-arg torch_cuda_arch_list="9.0 10.0+PTX" \ + --build-arg vllm_fa_cmake_gpu_arches="90-real" + ``` !!! note If you are building the `linux/arm64` image on a non-ARM host (e.g., an x86_64 machine), you need to ensure your system is set up for cross-compilation using QEMU. This allows your host machine to emulate ARM64 execution. diff --git a/docs/deployment/frameworks/autogen.md b/docs/deployment/frameworks/autogen.md index ad8c167659efa..295664daeadb7 100644 --- a/docs/deployment/frameworks/autogen.md +++ b/docs/deployment/frameworks/autogen.md @@ -30,51 +30,53 @@ python -m vllm.entrypoints.openai.api_server \ - Call it with AutoGen: -```python -import asyncio -from autogen_core.models import UserMessage -from autogen_ext.models.openai import OpenAIChatCompletionClient -from autogen_core.models import ModelFamily +??? Code + + ```python + import asyncio + from autogen_core.models import UserMessage + from autogen_ext.models.openai import OpenAIChatCompletionClient + from autogen_core.models import ModelFamily -async def main() -> None: - # Create a model client - model_client = OpenAIChatCompletionClient( - model="mistralai/Mistral-7B-Instruct-v0.2", - base_url="http://{your-vllm-host-ip}:{your-vllm-host-port}/v1", - api_key="EMPTY", - model_info={ - "vision": False, - "function_calling": False, - "json_output": False, - "family": ModelFamily.MISTRAL, - "structured_output": True, - }, - ) + async def main() -> None: + # Create a model client + model_client = OpenAIChatCompletionClient( + model="mistralai/Mistral-7B-Instruct-v0.2", + base_url="http://{your-vllm-host-ip}:{your-vllm-host-port}/v1", + api_key="EMPTY", + model_info={ + "vision": False, + "function_calling": False, + "json_output": False, + "family": ModelFamily.MISTRAL, + "structured_output": True, + }, + ) - messages = [UserMessage(content="Write a very short story about a dragon.", source="user")] + messages = [UserMessage(content="Write a very short story about a dragon.", source="user")] - # Create a stream. - stream = model_client.create_stream(messages=messages) + # Create a stream. + stream = model_client.create_stream(messages=messages) - # Iterate over the stream and print the responses. - print("Streamed responses:") - async for response in stream: - if isinstance(response, str): - # A partial response is a string. - print(response, flush=True, end="") - else: - # The last response is a CreateResult object with the complete message. - print("\n\n------------\n") - print("The complete response:", flush=True) - print(response.content, flush=True) + # Iterate over the stream and print the responses. + print("Streamed responses:") + async for response in stream: + if isinstance(response, str): + # A partial response is a string. + print(response, flush=True, end="") + else: + # The last response is a CreateResult object with the complete message. + print("\n\n------------\n") + print("The complete response:", flush=True) + print(response.content, flush=True) - # Close the client when done. - await model_client.close() + # Close the client when done. + await model_client.close() -asyncio.run(main()) -``` + asyncio.run(main()) + ``` For details, see the tutorial: diff --git a/docs/deployment/frameworks/cerebrium.md b/docs/deployment/frameworks/cerebrium.md index 84cb2304fac20..8e096f26db71d 100644 --- a/docs/deployment/frameworks/cerebrium.md +++ b/docs/deployment/frameworks/cerebrium.md @@ -34,25 +34,27 @@ vllm = "latest" Next, let us add our code to handle inference for the LLM of your choice (`mistralai/Mistral-7B-Instruct-v0.1` for this example), add the following code to your `main.py`: -```python -from vllm import LLM, SamplingParams +??? Code -llm = LLM(model="mistralai/Mistral-7B-Instruct-v0.1") + ```python + from vllm import LLM, SamplingParams -def run(prompts: list[str], temperature: float = 0.8, top_p: float = 0.95): + llm = LLM(model="mistralai/Mistral-7B-Instruct-v0.1") - sampling_params = SamplingParams(temperature=temperature, top_p=top_p) - outputs = llm.generate(prompts, sampling_params) + def run(prompts: list[str], temperature: float = 0.8, top_p: float = 0.95): - # Print the outputs. - results = [] - for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - results.append({"prompt": prompt, "generated_text": generated_text}) + sampling_params = SamplingParams(temperature=temperature, top_p=top_p) + outputs = llm.generate(prompts, sampling_params) - return {"results": results} -``` + # Print the outputs. + results = [] + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + results.append({"prompt": prompt, "generated_text": generated_text}) + + return {"results": results} + ``` Then, run the following code to deploy it to the cloud: @@ -62,47 +64,51 @@ cerebrium deploy If successful, you should be returned a CURL command that you can call inference against. Just remember to end the url with the function name you are calling (in our case`/run`) -```python -curl -X POST https://api.cortex.cerebrium.ai/v4/p-xxxxxx/vllm/run \ - -H 'Content-Type: application/json' \ - -H 'Authorization: ' \ - --data '{ - "prompts": [ - "Hello, my name is", - "The president of the United States is", - "The capital of France is", - "The future of AI is" - ] - }' -``` +??? Command + + ```python + curl -X POST https://api.cortex.cerebrium.ai/v4/p-xxxxxx/vllm/run \ + -H 'Content-Type: application/json' \ + -H 'Authorization: ' \ + --data '{ + "prompts": [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is" + ] + }' + ``` You should get a response like: -```python -{ - "run_id": "52911756-3066-9ae8-bcc9-d9129d1bd262", - "result": { - "result": [ - { - "prompt": "Hello, my name is", - "generated_text": " Sarah, and I'm a teacher. I teach elementary school students. One of" - }, - { - "prompt": "The president of the United States is", - "generated_text": " elected every four years. This is a democratic system.\n\n5. What" - }, - { - "prompt": "The capital of France is", - "generated_text": " Paris.\n" - }, - { - "prompt": "The future of AI is", - "generated_text": " bright, but it's important to approach it with a balanced and nuanced perspective." - } - ] - }, - "run_time_ms": 152.53663063049316 -} -``` +??? Response + + ```python + { + "run_id": "52911756-3066-9ae8-bcc9-d9129d1bd262", + "result": { + "result": [ + { + "prompt": "Hello, my name is", + "generated_text": " Sarah, and I'm a teacher. I teach elementary school students. One of" + }, + { + "prompt": "The president of the United States is", + "generated_text": " elected every four years. This is a democratic system.\n\n5. What" + }, + { + "prompt": "The capital of France is", + "generated_text": " Paris.\n" + }, + { + "prompt": "The future of AI is", + "generated_text": " bright, but it's important to approach it with a balanced and nuanced perspective." + } + ] + }, + "run_time_ms": 152.53663063049316 + } + ``` You now have an autoscaling endpoint where you only pay for the compute you use! diff --git a/docs/deployment/frameworks/dstack.md b/docs/deployment/frameworks/dstack.md index 7de92855745b0..0b91fc88ce3fc 100644 --- a/docs/deployment/frameworks/dstack.md +++ b/docs/deployment/frameworks/dstack.md @@ -26,75 +26,81 @@ dstack init Next, to provision a VM instance with LLM of your choice (`NousResearch/Llama-2-7b-chat-hf` for this example), create the following `serve.dstack.yml` file for the dstack `Service`: -```yaml -type: service +??? Config -python: "3.11" -env: - - MODEL=NousResearch/Llama-2-7b-chat-hf -port: 8000 -resources: - gpu: 24GB -commands: - - pip install vllm - - vllm serve $MODEL --port 8000 -model: - format: openai - type: chat - name: NousResearch/Llama-2-7b-chat-hf -``` + ```yaml + type: service + + python: "3.11" + env: + - MODEL=NousResearch/Llama-2-7b-chat-hf + port: 8000 + resources: + gpu: 24GB + commands: + - pip install vllm + - vllm serve $MODEL --port 8000 + model: + format: openai + type: chat + name: NousResearch/Llama-2-7b-chat-hf + ``` Then, run the following CLI for provisioning: -```console -$ dstack run . -f serve.dstack.yml +??? Command -⠸ Getting run plan... - Configuration serve.dstack.yml - Project deep-diver-main - User deep-diver - Min resources 2..xCPU, 8GB.., 1xGPU (24GB) - Max price - - Max duration - - Spot policy auto - Retry policy no + ```console + $ dstack run . -f serve.dstack.yml - # BACKEND REGION INSTANCE RESOURCES SPOT PRICE - 1 gcp us-central1 g2-standard-4 4xCPU, 16GB, 1xL4 (24GB), 100GB (disk) yes $0.223804 - 2 gcp us-east1 g2-standard-4 4xCPU, 16GB, 1xL4 (24GB), 100GB (disk) yes $0.223804 - 3 gcp us-west1 g2-standard-4 4xCPU, 16GB, 1xL4 (24GB), 100GB (disk) yes $0.223804 - ... - Shown 3 of 193 offers, $5.876 max + ⠸ Getting run plan... + Configuration serve.dstack.yml + Project deep-diver-main + User deep-diver + Min resources 2..xCPU, 8GB.., 1xGPU (24GB) + Max price - + Max duration - + Spot policy auto + Retry policy no -Continue? [y/n]: y -⠙ Submitting run... -⠏ Launching spicy-treefrog-1 (pulling) -spicy-treefrog-1 provisioning completed (running) -Service is published at ... -``` + # BACKEND REGION INSTANCE RESOURCES SPOT PRICE + 1 gcp us-central1 g2-standard-4 4xCPU, 16GB, 1xL4 (24GB), 100GB (disk) yes $0.223804 + 2 gcp us-east1 g2-standard-4 4xCPU, 16GB, 1xL4 (24GB), 100GB (disk) yes $0.223804 + 3 gcp us-west1 g2-standard-4 4xCPU, 16GB, 1xL4 (24GB), 100GB (disk) yes $0.223804 + ... + Shown 3 of 193 offers, $5.876 max + + Continue? [y/n]: y + ⠙ Submitting run... + ⠏ Launching spicy-treefrog-1 (pulling) + spicy-treefrog-1 provisioning completed (running) + Service is published at ... + ``` After the provisioning, you can interact with the model by using the OpenAI SDK: -```python -from openai import OpenAI +??? Code -client = OpenAI( - base_url="https://gateway.", - api_key="" -) + ```python + from openai import OpenAI -completion = client.chat.completions.create( - model="NousResearch/Llama-2-7b-chat-hf", - messages=[ - { - "role": "user", - "content": "Compose a poem that explains the concept of recursion in programming.", - } - ] -) + client = OpenAI( + base_url="https://gateway.", + api_key="" + ) -print(completion.choices[0].message.content) -``` + completion = client.chat.completions.create( + model="NousResearch/Llama-2-7b-chat-hf", + messages=[ + { + "role": "user", + "content": "Compose a poem that explains the concept of recursion in programming.", + } + ] + ) + + print(completion.choices[0].message.content) + ``` !!! note dstack automatically handles authentication on the gateway using dstack's tokens. Meanwhile, if you don't want to configure a gateway, you can provision dstack `Task` instead of `Service`. The `Task` is for development purpose only. If you want to know more about hands-on materials how to serve vLLM using dstack, check out [this repository](https://github.com/dstackai/dstack-examples/tree/main/deployment/vllm) diff --git a/docs/deployment/frameworks/haystack.md b/docs/deployment/frameworks/haystack.md index 2eac4a5279fd6..04d9eba3065c0 100644 --- a/docs/deployment/frameworks/haystack.md +++ b/docs/deployment/frameworks/haystack.md @@ -27,29 +27,29 @@ vllm serve mistralai/Mistral-7B-Instruct-v0.1 - Use the `OpenAIGenerator` and `OpenAIChatGenerator` components in Haystack to query the vLLM server. -```python -from haystack.components.generators.chat import OpenAIChatGenerator -from haystack.dataclasses import ChatMessage -from haystack.utils import Secret +??? Code -generator = OpenAIChatGenerator( - # for compatibility with the OpenAI API, a placeholder api_key is needed - api_key=Secret.from_token("VLLM-PLACEHOLDER-API-KEY"), - model="mistralai/Mistral-7B-Instruct-v0.1", - api_base_url="http://{your-vLLM-host-ip}:{your-vLLM-host-port}/v1", - generation_kwargs = {"max_tokens": 512} -) + ```python + from haystack.components.generators.chat import OpenAIChatGenerator + from haystack.dataclasses import ChatMessage + from haystack.utils import Secret -response = generator.run( - messages=[ChatMessage.from_user("Hi. Can you help me plan my next trip to Italy?")] -) + generator = OpenAIChatGenerator( + # for compatibility with the OpenAI API, a placeholder api_key is needed + api_key=Secret.from_token("VLLM-PLACEHOLDER-API-KEY"), + model="mistralai/Mistral-7B-Instruct-v0.1", + api_base_url="http://{your-vLLM-host-ip}:{your-vLLM-host-port}/v1", + generation_kwargs = {"max_tokens": 512} + ) -print("-"*30) -print(response) -print("-"*30) -``` + response = generator.run( + messages=[ChatMessage.from_user("Hi. Can you help me plan my next trip to Italy?")] + ) -Output e.g.: + print("-"*30) + print(response) + print("-"*30) + ``` ```console ------------------------------ diff --git a/docs/deployment/frameworks/litellm.md b/docs/deployment/frameworks/litellm.md index 3011cde830180..8498feaa2972e 100644 --- a/docs/deployment/frameworks/litellm.md +++ b/docs/deployment/frameworks/litellm.md @@ -34,21 +34,23 @@ vllm serve qwen/Qwen1.5-0.5B-Chat - Call it with litellm: -```python -import litellm +??? Code -messages = [{ "content": "Hello, how are you?","role": "user"}] + ```python + import litellm -# hosted_vllm is prefix key word and necessary -response = litellm.completion( - model="hosted_vllm/qwen/Qwen1.5-0.5B-Chat", # pass the vllm model name - messages=messages, - api_base="http://{your-vllm-server-host}:{your-vllm-server-port}/v1", - temperature=0.2, - max_tokens=80) + messages = [{ "content": "Hello, how are you?","role": "user"}] -print(response) -``` + # hosted_vllm is prefix key word and necessary + response = litellm.completion( + model="hosted_vllm/qwen/Qwen1.5-0.5B-Chat", # pass the vllm model name + messages=messages, + api_base="http://{your-vllm-server-host}:{your-vllm-server-port}/v1", + temperature=0.2, + max_tokens=80) + + print(response) + ``` ### Embeddings diff --git a/docs/deployment/frameworks/lws.md b/docs/deployment/frameworks/lws.md index 18282a89ddfff..9df9528769064 100644 --- a/docs/deployment/frameworks/lws.md +++ b/docs/deployment/frameworks/lws.md @@ -17,99 +17,101 @@ vLLM can be deployed with [LWS](https://github.com/kubernetes-sigs/lws) on Kuber Deploy the following yaml file `lws.yaml` -```yaml -apiVersion: leaderworkerset.x-k8s.io/v1 -kind: LeaderWorkerSet -metadata: - name: vllm -spec: - replicas: 2 - leaderWorkerTemplate: - size: 2 - restartPolicy: RecreateGroupOnPodRestart - leaderTemplate: - metadata: - labels: - role: leader - spec: - containers: - - name: vllm-leader - image: docker.io/vllm/vllm-openai:latest - env: - - name: HUGGING_FACE_HUB_TOKEN - value: - command: - - sh - - -c - - "bash /vllm-workspace/examples/online_serving/multi-node-serving.sh leader --ray_cluster_size=$(LWS_GROUP_SIZE); - python3 -m vllm.entrypoints.openai.api_server --port 8080 --model meta-llama/Meta-Llama-3.1-405B-Instruct --tensor-parallel-size 8 --pipeline_parallel_size 2" - resources: - limits: - nvidia.com/gpu: "8" - memory: 1124Gi - ephemeral-storage: 800Gi - requests: - ephemeral-storage: 800Gi - cpu: 125 - ports: - - containerPort: 8080 - readinessProbe: - tcpSocket: - port: 8080 - initialDelaySeconds: 15 - periodSeconds: 10 - volumeMounts: - - mountPath: /dev/shm - name: dshm - volumes: - - name: dshm - emptyDir: - medium: Memory - sizeLimit: 15Gi - workerTemplate: - spec: - containers: - - name: vllm-worker - image: docker.io/vllm/vllm-openai:latest - command: - - sh - - -c - - "bash /vllm-workspace/examples/online_serving/multi-node-serving.sh worker --ray_address=$(LWS_LEADER_ADDRESS)" - resources: - limits: - nvidia.com/gpu: "8" - memory: 1124Gi - ephemeral-storage: 800Gi - requests: - ephemeral-storage: 800Gi - cpu: 125 - env: - - name: HUGGING_FACE_HUB_TOKEN - value: - volumeMounts: - - mountPath: /dev/shm - name: dshm - volumes: - - name: dshm - emptyDir: - medium: Memory - sizeLimit: 15Gi ---- -apiVersion: v1 -kind: Service -metadata: - name: vllm-leader -spec: - ports: - - name: http - port: 8080 - protocol: TCP - targetPort: 8080 - selector: - leaderworkerset.sigs.k8s.io/name: vllm - role: leader - type: ClusterIP -``` +??? Yaml + + ```yaml + apiVersion: leaderworkerset.x-k8s.io/v1 + kind: LeaderWorkerSet + metadata: + name: vllm + spec: + replicas: 2 + leaderWorkerTemplate: + size: 2 + restartPolicy: RecreateGroupOnPodRestart + leaderTemplate: + metadata: + labels: + role: leader + spec: + containers: + - name: vllm-leader + image: docker.io/vllm/vllm-openai:latest + env: + - name: HUGGING_FACE_HUB_TOKEN + value: + command: + - sh + - -c + - "bash /vllm-workspace/examples/online_serving/multi-node-serving.sh leader --ray_cluster_size=$(LWS_GROUP_SIZE); + python3 -m vllm.entrypoints.openai.api_server --port 8080 --model meta-llama/Meta-Llama-3.1-405B-Instruct --tensor-parallel-size 8 --pipeline_parallel_size 2" + resources: + limits: + nvidia.com/gpu: "8" + memory: 1124Gi + ephemeral-storage: 800Gi + requests: + ephemeral-storage: 800Gi + cpu: 125 + ports: + - containerPort: 8080 + readinessProbe: + tcpSocket: + port: 8080 + initialDelaySeconds: 15 + periodSeconds: 10 + volumeMounts: + - mountPath: /dev/shm + name: dshm + volumes: + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 15Gi + workerTemplate: + spec: + containers: + - name: vllm-worker + image: docker.io/vllm/vllm-openai:latest + command: + - sh + - -c + - "bash /vllm-workspace/examples/online_serving/multi-node-serving.sh worker --ray_address=$(LWS_LEADER_ADDRESS)" + resources: + limits: + nvidia.com/gpu: "8" + memory: 1124Gi + ephemeral-storage: 800Gi + requests: + ephemeral-storage: 800Gi + cpu: 125 + env: + - name: HUGGING_FACE_HUB_TOKEN + value: + volumeMounts: + - mountPath: /dev/shm + name: dshm + volumes: + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 15Gi + --- + apiVersion: v1 + kind: Service + metadata: + name: vllm-leader + spec: + ports: + - name: http + port: 8080 + protocol: TCP + targetPort: 8080 + selector: + leaderworkerset.sigs.k8s.io/name: vllm + role: leader + type: ClusterIP + ``` ```bash kubectl apply -f lws.yaml @@ -175,25 +177,27 @@ curl http://localhost:8080/v1/completions \ The output should be similar to the following -```text -{ - "id": "cmpl-1bb34faba88b43f9862cfbfb2200949d", - "object": "text_completion", - "created": 1715138766, - "model": "meta-llama/Meta-Llama-3.1-405B-Instruct", - "choices": [ +??? Output + + ```text { - "index": 0, - "text": " top destination for foodies, with", - "logprobs": null, - "finish_reason": "length", - "stop_reason": null + "id": "cmpl-1bb34faba88b43f9862cfbfb2200949d", + "object": "text_completion", + "created": 1715138766, + "model": "meta-llama/Meta-Llama-3.1-405B-Instruct", + "choices": [ + { + "index": 0, + "text": " top destination for foodies, with", + "logprobs": null, + "finish_reason": "length", + "stop_reason": null + } + ], + "usage": { + "prompt_tokens": 5, + "total_tokens": 12, + "completion_tokens": 7 + } } - ], - "usage": { - "prompt_tokens": 5, - "total_tokens": 12, - "completion_tokens": 7 - } -} -``` + ``` diff --git a/docs/deployment/frameworks/skypilot.md b/docs/deployment/frameworks/skypilot.md index 9763745f23787..b649312971b5c 100644 --- a/docs/deployment/frameworks/skypilot.md +++ b/docs/deployment/frameworks/skypilot.md @@ -24,48 +24,50 @@ sky check See the vLLM SkyPilot YAML for serving, [serving.yaml](https://github.com/skypilot-org/skypilot/blob/master/llm/vllm/serve.yaml). -```yaml -resources: - accelerators: {L4, A10g, A10, L40, A40, A100, A100-80GB} # We can use cheaper accelerators for 8B model. - use_spot: True - disk_size: 512 # Ensure model checkpoints can fit. - disk_tier: best - ports: 8081 # Expose to internet traffic. +??? Yaml -envs: - MODEL_NAME: meta-llama/Meta-Llama-3-8B-Instruct - HF_TOKEN: # Change to your own huggingface token, or use --env to pass. + ```yaml + resources: + accelerators: {L4, A10g, A10, L40, A40, A100, A100-80GB} # We can use cheaper accelerators for 8B model. + use_spot: True + disk_size: 512 # Ensure model checkpoints can fit. + disk_tier: best + ports: 8081 # Expose to internet traffic. -setup: | - conda create -n vllm python=3.10 -y - conda activate vllm + envs: + MODEL_NAME: meta-llama/Meta-Llama-3-8B-Instruct + HF_TOKEN: # Change to your own huggingface token, or use --env to pass. - pip install vllm==0.4.0.post1 - # Install Gradio for web UI. - pip install gradio openai - pip install flash-attn==2.5.7 + setup: | + conda create -n vllm python=3.10 -y + conda activate vllm -run: | - conda activate vllm - echo 'Starting vllm api server...' - python -u -m vllm.entrypoints.openai.api_server \ - --port 8081 \ - --model $MODEL_NAME \ - --trust-remote-code \ - --tensor-parallel-size $SKYPILOT_NUM_GPUS_PER_NODE \ - 2>&1 | tee api_server.log & + pip install vllm==0.4.0.post1 + # Install Gradio for web UI. + pip install gradio openai + pip install flash-attn==2.5.7 - echo 'Waiting for vllm api server to start...' - while ! `cat api_server.log | grep -q 'Uvicorn running on'`; do sleep 1; done + run: | + conda activate vllm + echo 'Starting vllm api server...' + python -u -m vllm.entrypoints.openai.api_server \ + --port 8081 \ + --model $MODEL_NAME \ + --trust-remote-code \ + --tensor-parallel-size $SKYPILOT_NUM_GPUS_PER_NODE \ + 2>&1 | tee api_server.log & - echo 'Starting gradio server...' - git clone https://github.com/vllm-project/vllm.git || true - python vllm/examples/online_serving/gradio_openai_chatbot_webserver.py \ - -m $MODEL_NAME \ - --port 8811 \ - --model-url http://localhost:8081/v1 \ - --stop-token-ids 128009,128001 -``` + echo 'Waiting for vllm api server to start...' + while ! `cat api_server.log | grep -q 'Uvicorn running on'`; do sleep 1; done + + echo 'Starting gradio server...' + git clone https://github.com/vllm-project/vllm.git || true + python vllm/examples/online_serving/gradio_openai_chatbot_webserver.py \ + -m $MODEL_NAME \ + --port 8811 \ + --model-url http://localhost:8081/v1 \ + --stop-token-ids 128009,128001 + ``` Start the serving the Llama-3 8B model on any of the candidate GPUs listed (L4, A10g, ...): @@ -93,68 +95,67 @@ HF_TOKEN="your-huggingface-token" \ SkyPilot can scale up the service to multiple service replicas with built-in autoscaling, load-balancing and fault-tolerance. You can do it by adding a services section to the YAML file. -```yaml -service: - replicas: 2 - # An actual request for readiness probe. - readiness_probe: - path: /v1/chat/completions - post_data: - model: $MODEL_NAME - messages: - - role: user - content: Hello! What is your name? - max_completion_tokens: 1 -``` +??? Yaml -
-Click to see the full recipe YAML - -```yaml -service: - replicas: 2 - # An actual request for readiness probe. - readiness_probe: - path: /v1/chat/completions - post_data: - model: $MODEL_NAME - messages: - - role: user - content: Hello! What is your name? + ```yaml + service: + replicas: 2 + # An actual request for readiness probe. + readiness_probe: + path: /v1/chat/completions + post_data: + model: $MODEL_NAME + messages: + - role: user + content: Hello! What is your name? max_completion_tokens: 1 + ``` -resources: - accelerators: {L4, A10g, A10, L40, A40, A100, A100-80GB} # We can use cheaper accelerators for 8B model. - use_spot: True - disk_size: 512 # Ensure model checkpoints can fit. - disk_tier: best - ports: 8081 # Expose to internet traffic. +??? Yaml -envs: - MODEL_NAME: meta-llama/Meta-Llama-3-8B-Instruct - HF_TOKEN: # Change to your own huggingface token, or use --env to pass. + ```yaml + service: + replicas: 2 + # An actual request for readiness probe. + readiness_probe: + path: /v1/chat/completions + post_data: + model: $MODEL_NAME + messages: + - role: user + content: Hello! What is your name? + max_completion_tokens: 1 -setup: | - conda create -n vllm python=3.10 -y - conda activate vllm + resources: + accelerators: {L4, A10g, A10, L40, A40, A100, A100-80GB} # We can use cheaper accelerators for 8B model. + use_spot: True + disk_size: 512 # Ensure model checkpoints can fit. + disk_tier: best + ports: 8081 # Expose to internet traffic. - pip install vllm==0.4.0.post1 - # Install Gradio for web UI. - pip install gradio openai - pip install flash-attn==2.5.7 + envs: + MODEL_NAME: meta-llama/Meta-Llama-3-8B-Instruct + HF_TOKEN: # Change to your own huggingface token, or use --env to pass. -run: | - conda activate vllm - echo 'Starting vllm api server...' - python -u -m vllm.entrypoints.openai.api_server \ - --port 8081 \ - --model $MODEL_NAME \ - --trust-remote-code \ - --tensor-parallel-size $SKYPILOT_NUM_GPUS_PER_NODE \ - 2>&1 | tee api_server.log -``` + setup: | + conda create -n vllm python=3.10 -y + conda activate vllm -
+ pip install vllm==0.4.0.post1 + # Install Gradio for web UI. + pip install gradio openai + pip install flash-attn==2.5.7 + + run: | + conda activate vllm + echo 'Starting vllm api server...' + python -u -m vllm.entrypoints.openai.api_server \ + --port 8081 \ + --model $MODEL_NAME \ + --trust-remote-code \ + --tensor-parallel-size $SKYPILOT_NUM_GPUS_PER_NODE \ + 2>&1 | tee api_server.log + ``` Start the serving the Llama-3 8B model on multiple replicas: @@ -170,8 +171,7 @@ Wait until the service is ready: watch -n10 sky serve status vllm ``` -
-Example outputs: +Example outputs: ```console Services @@ -184,29 +184,29 @@ vllm 1 1 xx.yy.zz.121 18 mins ago 1x GCP([Spot]{'L4': 1}) R vllm 2 1 xx.yy.zz.245 18 mins ago 1x GCP([Spot]{'L4': 1}) READY us-east4 ``` -
- After the service is READY, you can find a single endpoint for the service and access the service with the endpoint: -```console -ENDPOINT=$(sky serve status --endpoint 8081 vllm) -curl -L http://$ENDPOINT/v1/chat/completions \ - -H "Content-Type: application/json" \ - -d '{ - "model": "meta-llama/Meta-Llama-3-8B-Instruct", - "messages": [ - { - "role": "system", - "content": "You are a helpful assistant." - }, - { - "role": "user", - "content": "Who are you?" - } - ], - "stop_token_ids": [128009, 128001] - }' -``` +??? Commands + + ```bash + ENDPOINT=$(sky serve status --endpoint 8081 vllm) + curl -L http://$ENDPOINT/v1/chat/completions \ + -H "Content-Type: application/json" \ + -d '{ + "model": "meta-llama/Meta-Llama-3-8B-Instruct", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Who are you?" + } + ], + "stop_token_ids": [128009, 128001] + }' + ``` To enable autoscaling, you could replace the `replicas` with the following configs in `service`: @@ -220,57 +220,54 @@ service: This will scale the service up to when the QPS exceeds 2 for each replica. -
-Click to see the full recipe YAML +??? Yaml -```yaml -service: - replica_policy: - min_replicas: 2 - max_replicas: 4 - target_qps_per_replica: 2 - # An actual request for readiness probe. - readiness_probe: - path: /v1/chat/completions - post_data: - model: $MODEL_NAME - messages: - - role: user - content: Hello! What is your name? - max_completion_tokens: 1 + ```yaml + service: + replica_policy: + min_replicas: 2 + max_replicas: 4 + target_qps_per_replica: 2 + # An actual request for readiness probe. + readiness_probe: + path: /v1/chat/completions + post_data: + model: $MODEL_NAME + messages: + - role: user + content: Hello! What is your name? + max_completion_tokens: 1 -resources: - accelerators: {L4, A10g, A10, L40, A40, A100, A100-80GB} # We can use cheaper accelerators for 8B model. - use_spot: True - disk_size: 512 # Ensure model checkpoints can fit. - disk_tier: best - ports: 8081 # Expose to internet traffic. + resources: + accelerators: {L4, A10g, A10, L40, A40, A100, A100-80GB} # We can use cheaper accelerators for 8B model. + use_spot: True + disk_size: 512 # Ensure model checkpoints can fit. + disk_tier: best + ports: 8081 # Expose to internet traffic. -envs: - MODEL_NAME: meta-llama/Meta-Llama-3-8B-Instruct - HF_TOKEN: # Change to your own huggingface token, or use --env to pass. + envs: + MODEL_NAME: meta-llama/Meta-Llama-3-8B-Instruct + HF_TOKEN: # Change to your own huggingface token, or use --env to pass. -setup: | - conda create -n vllm python=3.10 -y - conda activate vllm + setup: | + conda create -n vllm python=3.10 -y + conda activate vllm - pip install vllm==0.4.0.post1 - # Install Gradio for web UI. - pip install gradio openai - pip install flash-attn==2.5.7 + pip install vllm==0.4.0.post1 + # Install Gradio for web UI. + pip install gradio openai + pip install flash-attn==2.5.7 -run: | - conda activate vllm - echo 'Starting vllm api server...' - python -u -m vllm.entrypoints.openai.api_server \ - --port 8081 \ - --model $MODEL_NAME \ - --trust-remote-code \ - --tensor-parallel-size $SKYPILOT_NUM_GPUS_PER_NODE \ - 2>&1 | tee api_server.log -``` - -
+ run: | + conda activate vllm + echo 'Starting vllm api server...' + python -u -m vllm.entrypoints.openai.api_server \ + --port 8081 \ + --model $MODEL_NAME \ + --trust-remote-code \ + --tensor-parallel-size $SKYPILOT_NUM_GPUS_PER_NODE \ + 2>&1 | tee api_server.log + ``` To update the service with the new config: @@ -288,38 +285,35 @@ sky serve down vllm It is also possible to access the Llama-3 service with a separate GUI frontend, so the user requests send to the GUI will be load-balanced across replicas. -
-Click to see the full GUI YAML +??? Yaml -```yaml -envs: - MODEL_NAME: meta-llama/Meta-Llama-3-8B-Instruct - ENDPOINT: x.x.x.x:3031 # Address of the API server running vllm. + ```yaml + envs: + MODEL_NAME: meta-llama/Meta-Llama-3-8B-Instruct + ENDPOINT: x.x.x.x:3031 # Address of the API server running vllm. -resources: - cpus: 2 + resources: + cpus: 2 -setup: | - conda create -n vllm python=3.10 -y - conda activate vllm + setup: | + conda create -n vllm python=3.10 -y + conda activate vllm - # Install Gradio for web UI. - pip install gradio openai + # Install Gradio for web UI. + pip install gradio openai -run: | - conda activate vllm - export PATH=$PATH:/sbin + run: | + conda activate vllm + export PATH=$PATH:/sbin - echo 'Starting gradio server...' - git clone https://github.com/vllm-project/vllm.git || true - python vllm/examples/online_serving/gradio_openai_chatbot_webserver.py \ - -m $MODEL_NAME \ - --port 8811 \ - --model-url http://$ENDPOINT/v1 \ - --stop-token-ids 128009,128001 | tee ~/gradio.log -``` - -
+ echo 'Starting gradio server...' + git clone https://github.com/vllm-project/vllm.git || true + python vllm/examples/online_serving/gradio_openai_chatbot_webserver.py \ + -m $MODEL_NAME \ + --port 8811 \ + --model-url http://$ENDPOINT/v1 \ + --stop-token-ids 128009,128001 | tee ~/gradio.log + ``` 1. Start the chat web UI: diff --git a/docs/deployment/integrations/production-stack.md b/docs/deployment/integrations/production-stack.md index 8288a4b6e6be3..2b1cc6f6fee18 100644 --- a/docs/deployment/integrations/production-stack.md +++ b/docs/deployment/integrations/production-stack.md @@ -60,22 +60,22 @@ And then you can send out a query to the OpenAI-compatible API to check the avai curl -o- http://localhost:30080/models ``` -Expected output: +??? Output -```json -{ - "object": "list", - "data": [ + ```json { - "id": "facebook/opt-125m", - "object": "model", - "created": 1737428424, - "owned_by": "vllm", - "root": null + "object": "list", + "data": [ + { + "id": "facebook/opt-125m", + "object": "model", + "created": 1737428424, + "owned_by": "vllm", + "root": null + } + ] } - ] -} -``` + ``` To send an actual chatting request, you can issue a curl request to the OpenAI `/completion` endpoint: @@ -89,23 +89,23 @@ curl -X POST http://localhost:30080/completions \ }' ``` -Expected output: +??? Output -```json -{ - "id": "completion-id", - "object": "text_completion", - "created": 1737428424, - "model": "facebook/opt-125m", - "choices": [ + ```json { - "text": " there was a brave knight who...", - "index": 0, - "finish_reason": "length" + "id": "completion-id", + "object": "text_completion", + "created": 1737428424, + "model": "facebook/opt-125m", + "choices": [ + { + "text": " there was a brave knight who...", + "index": 0, + "finish_reason": "length" + } + ] } - ] -} -``` + ``` ### Uninstall @@ -121,23 +121,25 @@ sudo helm uninstall vllm The core vLLM production stack configuration is managed with YAML. Here is the example configuration used in the installation above: -```yaml -servingEngineSpec: - runtimeClassName: "" - modelSpec: - - name: "opt125m" - repository: "vllm/vllm-openai" - tag: "latest" - modelURL: "facebook/opt-125m" +??? Yaml - replicaCount: 1 + ```yaml + servingEngineSpec: + runtimeClassName: "" + modelSpec: + - name: "opt125m" + repository: "vllm/vllm-openai" + tag: "latest" + modelURL: "facebook/opt-125m" - requestCPU: 6 - requestMemory: "16Gi" - requestGPU: 1 + replicaCount: 1 - pvcStorage: "10Gi" -``` + requestCPU: 6 + requestMemory: "16Gi" + requestGPU: 1 + + pvcStorage: "10Gi" + ``` In this YAML configuration: * **`modelSpec`** includes: diff --git a/docs/deployment/k8s.md b/docs/deployment/k8s.md index 7430f99a5396c..13225ba208fde 100644 --- a/docs/deployment/k8s.md +++ b/docs/deployment/k8s.md @@ -29,85 +29,89 @@ Alternatively, you can deploy vLLM to Kubernetes using any of the following: First, create a Kubernetes PVC and Secret for downloading and storing Hugging Face model: -```bash -cat < + Yaml + ```yaml apiVersion: v1 kind: PersistentVolumeClaim @@ -144,6 +151,8 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) volumeMode: Filesystem ``` + + Secret is optional and only required for accessing gated models, you can skip this step if you are not using gated models ```yaml @@ -156,13 +165,16 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) stringData: token: "REPLACE_WITH_TOKEN" ``` - + Next to create the deployment file for vLLM to run the model server. The following example deploys the `Mistral-7B-Instruct-v0.3` model. Here are two examples for using NVIDIA GPU and AMD GPU. NVIDIA GPU: +
+ Yaml + ```yaml apiVersion: apps/v1 kind: Deployment @@ -233,10 +245,15 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) periodSeconds: 5 ``` +
+ AMD GPU: You can refer to the `deployment.yaml` below if using AMD ROCm GPU like MI300X. +
+ Yaml + ```yaml apiVersion: apps/v1 kind: Deployment @@ -305,12 +322,17 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) mountPath: /dev/shm ``` +
+ You can get the full example with steps and sample yaml files from . 2. Create a Kubernetes Service for vLLM Next, create a Kubernetes Service file to expose the `mistral-7b` deployment: +
+ Yaml + ```yaml apiVersion: v1 kind: Service @@ -330,6 +352,8 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) type: ClusterIP ``` +
+ 3. Deploy and Test Apply the deployment and service configurations using `kubectl apply -f `: diff --git a/docs/deployment/nginx.md b/docs/deployment/nginx.md index f0ff5c1d0e76d..752be76b3864f 100644 --- a/docs/deployment/nginx.md +++ b/docs/deployment/nginx.md @@ -36,23 +36,25 @@ docker build . -f Dockerfile.nginx --tag nginx-lb Create a file named `nginx_conf/nginx.conf`. Note that you can add as many servers as you'd like. In the below example we'll start with two. To add more, add another `server vllmN:8000 max_fails=3 fail_timeout=10000s;` entry to `upstream backend`. -```console -upstream backend { - least_conn; - server vllm0:8000 max_fails=3 fail_timeout=10000s; - server vllm1:8000 max_fails=3 fail_timeout=10000s; -} -server { - listen 80; - location / { - proxy_pass http://backend; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; +??? Config + + ```console + upstream backend { + least_conn; + server vllm0:8000 max_fails=3 fail_timeout=10000s; + server vllm1:8000 max_fails=3 fail_timeout=10000s; } -} -``` + server { + listen 80; + location / { + proxy_pass http://backend; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } + } + ``` [](){ #nginxloadbalancer-nginx-vllm-container } @@ -93,30 +95,32 @@ Notes: - The below example assumes GPU backend used. If you are using CPU backend, remove `--gpus device=ID`, add `VLLM_CPU_KVCACHE_SPACE` and `VLLM_CPU_OMP_THREADS_BIND` environment variables to the docker run command. - Adjust the model name that you want to use in your vLLM servers if you don't want to use `Llama-2-7b-chat-hf`. -```console -mkdir -p ~/.cache/huggingface/hub/ -hf_cache_dir=~/.cache/huggingface/ -docker run \ - -itd \ - --ipc host \ - --network vllm_nginx \ - --gpus device=0 \ - --shm-size=10.24gb \ - -v $hf_cache_dir:/root/.cache/huggingface/ \ - -p 8081:8000 \ - --name vllm0 vllm \ - --model meta-llama/Llama-2-7b-chat-hf -docker run \ - -itd \ - --ipc host \ - --network vllm_nginx \ - --gpus device=1 \ - --shm-size=10.24gb \ - -v $hf_cache_dir:/root/.cache/huggingface/ \ - -p 8082:8000 \ - --name vllm1 vllm \ - --model meta-llama/Llama-2-7b-chat-hf -``` +??? Commands + + ```console + mkdir -p ~/.cache/huggingface/hub/ + hf_cache_dir=~/.cache/huggingface/ + docker run \ + -itd \ + --ipc host \ + --network vllm_nginx \ + --gpus device=0 \ + --shm-size=10.24gb \ + -v $hf_cache_dir:/root/.cache/huggingface/ \ + -p 8081:8000 \ + --name vllm0 vllm \ + --model meta-llama/Llama-2-7b-chat-hf + docker run \ + -itd \ + --ipc host \ + --network vllm_nginx \ + --gpus device=1 \ + --shm-size=10.24gb \ + -v $hf_cache_dir:/root/.cache/huggingface/ \ + -p 8082:8000 \ + --name vllm1 vllm \ + --model meta-llama/Llama-2-7b-chat-hf + ``` !!! note If you are behind proxy, you can pass the proxy settings to the docker run command via `-e http_proxy=$http_proxy -e https_proxy=$https_proxy`. diff --git a/docs/design/arch_overview.md b/docs/design/arch_overview.md index 14720a392aafb..9bfdab17007e3 100644 --- a/docs/design/arch_overview.md +++ b/docs/design/arch_overview.md @@ -22,31 +22,33 @@ server. Here is a sample of `LLM` class usage: -```python -from vllm import LLM, SamplingParams +??? Code -# Define a list of input prompts -prompts = [ - "Hello, my name is", - "The capital of France is", - "The largest ocean is", -] + ```python + from vllm import LLM, SamplingParams -# Define sampling parameters -sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + # Define a list of input prompts + prompts = [ + "Hello, my name is", + "The capital of France is", + "The largest ocean is", + ] -# Initialize the LLM engine with the OPT-125M model -llm = LLM(model="facebook/opt-125m") + # Define sampling parameters + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) -# Generate outputs for the input prompts -outputs = llm.generate(prompts, sampling_params) + # Initialize the LLM engine with the OPT-125M model + llm = LLM(model="facebook/opt-125m") -# Print the generated outputs -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") -``` + # Generate outputs for the input prompts + outputs = llm.generate(prompts, sampling_params) + + # Print the generated outputs + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + ``` More API details can be found in the [Offline Inference](#offline-inference-api) section of the API docs. @@ -178,32 +180,34 @@ vision-language model. To avoid accidentally passing incorrect arguments, the constructor is now keyword-only. This ensures that the constructor will raise an error if old configurations are passed. vLLM developers have already made this change for all models within vLLM. For out-of-tree registered models, developers need to update their models, for example by adding shim code to adapt the old constructor signature to the new one: - ```python - class MyOldModel(nn.Module): - def __init__( - self, - config, - cache_config: Optional[CacheConfig] = None, - quant_config: Optional[QuantizationConfig] = None, - lora_config: Optional[LoRAConfig] = None, - prefix: str = "", - ) -> None: - ... + ??? Code - from vllm.config import VllmConfig - class MyNewModel(MyOldModel): - def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): - config = vllm_config.model_config.hf_config - cache_config = vllm_config.cache_config - quant_config = vllm_config.quant_config - lora_config = vllm_config.lora_config - super().__init__(config, cache_config, quant_config, lora_config, prefix) + ```python + class MyOldModel(nn.Module): + def __init__( + self, + config, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + lora_config: Optional[LoRAConfig] = None, + prefix: str = "", + ) -> None: + ... - if __version__ >= "0.6.4": - MyModel = MyNewModel - else: - MyModel = MyOldModel - ``` + from vllm.config import VllmConfig + class MyNewModel(MyOldModel): + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + config = vllm_config.model_config.hf_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config + lora_config = vllm_config.lora_config + super().__init__(config, cache_config, quant_config, lora_config, prefix) + + if __version__ >= "0.6.4": + MyModel = MyNewModel + else: + MyModel = MyOldModel + ``` This way, the model can work with both old and new versions of vLLM. diff --git a/docs/design/kernel/paged_attention.md b/docs/design/kernel/paged_attention.md index 6ebe1ee48acf1..ff135a7319603 100644 --- a/docs/design/kernel/paged_attention.md +++ b/docs/design/kernel/paged_attention.md @@ -448,27 +448,29 @@ elements of the entire head for all context tokens. However, overall, all results for output have been calculated but are just stored in different thread register memory. -```cpp -float* out_smem = reinterpret_cast(shared_mem); -for (int i = NUM_WARPS; i > 1; i /= 2) { - // Upper warps write to shared memory. - ... - float* dst = &out_smem[(warp_idx - mid) * HEAD_SIZE]; - for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { - ... - dst[row_idx] = accs[i]; - } +??? Code - // Lower warps update the output. - const float* src = &out_smem[warp_idx * HEAD_SIZE]; - for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + ```cpp + float* out_smem = reinterpret_cast(shared_mem); + for (int i = NUM_WARPS; i > 1; i /= 2) { + // Upper warps write to shared memory. ... - accs[i] += src[row_idx]; - } + float* dst = &out_smem[(warp_idx - mid) * HEAD_SIZE]; + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + ... + dst[row_idx] = accs[i]; + } - // Write out the accs. -} -``` + // Lower warps update the output. + const float* src = &out_smem[warp_idx * HEAD_SIZE]; + for (int i = 0; i < NUM_ROWS_PER_THREAD; i++) { + ... + accs[i] += src[row_idx]; + } + + // Write out the accs. + } + ``` ## Output diff --git a/docs/design/plugin_system.md b/docs/design/plugin_system.md index 0764dfb6501bc..944f0e680de4d 100644 --- a/docs/design/plugin_system.md +++ b/docs/design/plugin_system.md @@ -13,28 +13,30 @@ Plugins are user-registered code that vLLM executes. Given vLLM's architecture ( vLLM's plugin system uses the standard Python `entry_points` mechanism. This mechanism allows developers to register functions in their Python packages for use by other packages. An example of a plugin: -```python -# inside `setup.py` file -from setuptools import setup +??? Code -setup(name='vllm_add_dummy_model', - version='0.1', - packages=['vllm_add_dummy_model'], - entry_points={ - 'vllm.general_plugins': - ["register_dummy_model = vllm_add_dummy_model:register"] - }) + ```python + # inside `setup.py` file + from setuptools import setup -# inside `vllm_add_dummy_model.py` file -def register(): - from vllm import ModelRegistry + setup(name='vllm_add_dummy_model', + version='0.1', + packages=['vllm_add_dummy_model'], + entry_points={ + 'vllm.general_plugins': + ["register_dummy_model = vllm_add_dummy_model:register"] + }) - if "MyLlava" not in ModelRegistry.get_supported_archs(): - ModelRegistry.register_model( - "MyLlava", - "vllm_add_dummy_model.my_llava:MyLlava", - ) -``` + # inside `vllm_add_dummy_model.py` file + def register(): + from vllm import ModelRegistry + + if "MyLlava" not in ModelRegistry.get_supported_archs(): + ModelRegistry.register_model( + "MyLlava", + "vllm_add_dummy_model.my_llava:MyLlava", + ) + ``` For more information on adding entry points to your package, please check the [official documentation](https://setuptools.pypa.io/en/latest/userguide/entry_point.html). diff --git a/docs/features/lora.md b/docs/features/lora.md index 04e92dbc45924..4ccc3290e56a2 100644 --- a/docs/features/lora.md +++ b/docs/features/lora.md @@ -29,24 +29,26 @@ We can now submit the prompts and call `llm.generate` with the `lora_request` pa of `LoRARequest` is a human identifiable name, the second parameter is a globally unique ID for the adapter and the third parameter is the path to the LoRA adapter. -```python -sampling_params = SamplingParams( - temperature=0, - max_tokens=256, - stop=["[/assistant]"] -) +??? Code -prompts = [ - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", - "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", -] + ```python + sampling_params = SamplingParams( + temperature=0, + max_tokens=256, + stop=["[/assistant]"] + ) -outputs = llm.generate( - prompts, - sampling_params, - lora_request=LoRARequest("sql_adapter", 1, sql_lora_path) -) -``` + prompts = [ + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_74 (icao VARCHAR, airport VARCHAR)\n\n question: Name the ICAO for lilongwe international airport [/user] [assistant]", + "[user] Write a SQL query to answer the question based on the table schema.\n\n context: CREATE TABLE table_name_11 (nationality VARCHAR, elector VARCHAR)\n\n question: When Anchero Pantaleone was the elector what is under nationality? [/user] [assistant]", + ] + + outputs = llm.generate( + prompts, + sampling_params, + lora_request=LoRARequest("sql_adapter", 1, sql_lora_path) + ) + ``` Check out for an example of how to use LoRA adapters with the async engine and how to use more advanced configuration options. @@ -68,24 +70,26 @@ The server entrypoint accepts all other LoRA configuration parameters (`max_lora etc.), which will apply to all forthcoming requests. Upon querying the `/models` endpoint, we should see our LoRA along with its base model (if `jq` is not installed, you can follow [this guide](https://jqlang.org/download/) to install it.): -```bash -curl localhost:8000/v1/models | jq . -{ - "object": "list", - "data": [ - { - "id": "meta-llama/Llama-2-7b-hf", - "object": "model", - ... - }, - { - "id": "sql-lora", - "object": "model", - ... - } - ] -} -``` +??? Command + + ```bash + curl localhost:8000/v1/models | jq . + { + "object": "list", + "data": [ + { + "id": "meta-llama/Llama-2-7b-hf", + "object": "model", + ... + }, + { + "id": "sql-lora", + "object": "model", + ... + } + ] + } + ``` Requests can specify the LoRA adapter as if it were any other model via the `model` request parameter. The requests will be processed according to the server-wide LoRA configuration (i.e. in parallel with base model requests, and potentially other @@ -168,36 +172,36 @@ Alternatively, follow these example steps to implement your own plugin: 1. Implement the LoRAResolver interface. - Example of a simple S3 LoRAResolver implementation: + ??? Example of a simple S3 LoRAResolver implementation - ```python - import os - import s3fs - from vllm.lora.request import LoRARequest - from vllm.lora.resolver import LoRAResolver + ```python + import os + import s3fs + from vllm.lora.request import LoRARequest + from vllm.lora.resolver import LoRAResolver - class S3LoRAResolver(LoRAResolver): - def __init__(self): - self.s3 = s3fs.S3FileSystem() - self.s3_path_format = os.getenv("S3_PATH_TEMPLATE") - self.local_path_format = os.getenv("LOCAL_PATH_TEMPLATE") + class S3LoRAResolver(LoRAResolver): + def __init__(self): + self.s3 = s3fs.S3FileSystem() + self.s3_path_format = os.getenv("S3_PATH_TEMPLATE") + self.local_path_format = os.getenv("LOCAL_PATH_TEMPLATE") - async def resolve_lora(self, base_model_name, lora_name): - s3_path = self.s3_path_format.format(base_model_name=base_model_name, lora_name=lora_name) - local_path = self.local_path_format.format(base_model_name=base_model_name, lora_name=lora_name) + async def resolve_lora(self, base_model_name, lora_name): + s3_path = self.s3_path_format.format(base_model_name=base_model_name, lora_name=lora_name) + local_path = self.local_path_format.format(base_model_name=base_model_name, lora_name=lora_name) - # Download the LoRA from S3 to the local path - await self.s3._get( - s3_path, local_path, recursive=True, maxdepth=1 - ) + # Download the LoRA from S3 to the local path + await self.s3._get( + s3_path, local_path, recursive=True, maxdepth=1 + ) - lora_request = LoRARequest( - lora_name=lora_name, - lora_path=local_path, - lora_int_id=abs(hash(lora_name)) - ) - return lora_request - ``` + lora_request = LoRARequest( + lora_name=lora_name, + lora_path=local_path, + lora_int_id=abs(hash(lora_name)) + ) + return lora_request + ``` 2. Register `LoRAResolver` plugin. @@ -234,38 +238,40 @@ The new format of `--lora-modules` is mainly to support the display of parent mo - The `parent` field of LoRA model `sql-lora` now links to its base model `meta-llama/Llama-2-7b-hf`. This correctly reflects the hierarchical relationship between the base model and the LoRA adapter. - The `root` field points to the artifact location of the lora adapter. -```bash -$ curl http://localhost:8000/v1/models +??? Command output -{ - "object": "list", - "data": [ - { - "id": "meta-llama/Llama-2-7b-hf", - "object": "model", - "created": 1715644056, - "owned_by": "vllm", - "root": "~/.cache/huggingface/hub/models--meta-llama--Llama-2-7b-hf/snapshots/01c7f73d771dfac7d292323805ebc428287df4f9/", - "parent": null, - "permission": [ + ```bash + $ curl http://localhost:8000/v1/models + + { + "object": "list", + "data": [ { - ..... + "id": "meta-llama/Llama-2-7b-hf", + "object": "model", + "created": 1715644056, + "owned_by": "vllm", + "root": "~/.cache/huggingface/hub/models--meta-llama--Llama-2-7b-hf/snapshots/01c7f73d771dfac7d292323805ebc428287df4f9/", + "parent": null, + "permission": [ + { + ..... + } + ] + }, + { + "id": "sql-lora", + "object": "model", + "created": 1715644056, + "owned_by": "vllm", + "root": "~/.cache/huggingface/hub/models--yard1--llama-2-7b-sql-lora-test/snapshots/0dfa347e8877a4d4ed19ee56c140fa518470028c/", + "parent": meta-llama/Llama-2-7b-hf, + "permission": [ + { + .... + } + ] } ] - }, - { - "id": "sql-lora", - "object": "model", - "created": 1715644056, - "owned_by": "vllm", - "root": "~/.cache/huggingface/hub/models--yard1--llama-2-7b-sql-lora-test/snapshots/0dfa347e8877a4d4ed19ee56c140fa518470028c/", - "parent": meta-llama/Llama-2-7b-hf, - "permission": [ - { - .... - } - ] - } - ] -} -``` + } + ``` diff --git a/docs/features/multimodal_inputs.md b/docs/features/multimodal_inputs.md index afb9a6d4df9ae..d4465beb8593e 100644 --- a/docs/features/multimodal_inputs.md +++ b/docs/features/multimodal_inputs.md @@ -20,111 +20,117 @@ To input multi-modal data, follow this schema in [vllm.inputs.PromptType][]: You can pass a single image to the `'image'` field of the multi-modal dictionary, as shown in the following examples: -```python -from vllm import LLM +??? Code -llm = LLM(model="llava-hf/llava-1.5-7b-hf") + ```python + from vllm import LLM -# Refer to the HuggingFace repo for the correct format to use -prompt = "USER: \nWhat is the content of this image?\nASSISTANT:" + llm = LLM(model="llava-hf/llava-1.5-7b-hf") -# Load the image using PIL.Image -image = PIL.Image.open(...) + # Refer to the HuggingFace repo for the correct format to use + prompt = "USER: \nWhat is the content of this image?\nASSISTANT:" -# Single prompt inference -outputs = llm.generate({ - "prompt": prompt, - "multi_modal_data": {"image": image}, -}) + # Load the image using PIL.Image + image = PIL.Image.open(...) -for o in outputs: - generated_text = o.outputs[0].text - print(generated_text) + # Single prompt inference + outputs = llm.generate({ + "prompt": prompt, + "multi_modal_data": {"image": image}, + }) -# Batch inference -image_1 = PIL.Image.open(...) -image_2 = PIL.Image.open(...) -outputs = llm.generate( - [ - { - "prompt": "USER: \nWhat is the content of this image?\nASSISTANT:", - "multi_modal_data": {"image": image_1}, - }, - { - "prompt": "USER: \nWhat's the color of this image?\nASSISTANT:", - "multi_modal_data": {"image": image_2}, - } - ] -) + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) -for o in outputs: - generated_text = o.outputs[0].text - print(generated_text) -``` + # Batch inference + image_1 = PIL.Image.open(...) + image_2 = PIL.Image.open(...) + outputs = llm.generate( + [ + { + "prompt": "USER: \nWhat is the content of this image?\nASSISTANT:", + "multi_modal_data": {"image": image_1}, + }, + { + "prompt": "USER: \nWhat's the color of this image?\nASSISTANT:", + "multi_modal_data": {"image": image_2}, + } + ] + ) + + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + ``` Full example: To substitute multiple images inside the same text prompt, you can pass in a list of images instead: -```python -from vllm import LLM +??? Code -llm = LLM( - model="microsoft/Phi-3.5-vision-instruct", - trust_remote_code=True, # Required to load Phi-3.5-vision - max_model_len=4096, # Otherwise, it may not fit in smaller GPUs - limit_mm_per_prompt={"image": 2}, # The maximum number to accept -) + ```python + from vllm import LLM -# Refer to the HuggingFace repo for the correct format to use -prompt = "<|user|>\n<|image_1|>\n<|image_2|>\nWhat is the content of each image?<|end|>\n<|assistant|>\n" + llm = LLM( + model="microsoft/Phi-3.5-vision-instruct", + trust_remote_code=True, # Required to load Phi-3.5-vision + max_model_len=4096, # Otherwise, it may not fit in smaller GPUs + limit_mm_per_prompt={"image": 2}, # The maximum number to accept + ) -# Load the images using PIL.Image -image1 = PIL.Image.open(...) -image2 = PIL.Image.open(...) + # Refer to the HuggingFace repo for the correct format to use + prompt = "<|user|>\n<|image_1|>\n<|image_2|>\nWhat is the content of each image?<|end|>\n<|assistant|>\n" -outputs = llm.generate({ - "prompt": prompt, - "multi_modal_data": { - "image": [image1, image2] - }, -}) + # Load the images using PIL.Image + image1 = PIL.Image.open(...) + image2 = PIL.Image.open(...) -for o in outputs: - generated_text = o.outputs[0].text - print(generated_text) -``` + outputs = llm.generate({ + "prompt": prompt, + "multi_modal_data": { + "image": [image1, image2] + }, + }) + + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + ``` Full example: Multi-image input can be extended to perform video captioning. We show this with [Qwen2-VL](https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct) as it supports videos: -```python -from vllm import LLM +??? Code -# Specify the maximum number of frames per video to be 4. This can be changed. -llm = LLM("Qwen/Qwen2-VL-2B-Instruct", limit_mm_per_prompt={"image": 4}) + ```python + from vllm import LLM -# Create the request payload. -video_frames = ... # load your video making sure it only has the number of frames specified earlier. -message = { - "role": "user", - "content": [ - {"type": "text", "text": "Describe this set of frames. Consider the frames to be a part of the same video."}, - ], -} -for i in range(len(video_frames)): - base64_image = encode_image(video_frames[i]) # base64 encoding. - new_image = {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}} - message["content"].append(new_image) + # Specify the maximum number of frames per video to be 4. This can be changed. + llm = LLM("Qwen/Qwen2-VL-2B-Instruct", limit_mm_per_prompt={"image": 4}) -# Perform inference and log output. -outputs = llm.chat([message]) + # Create the request payload. + video_frames = ... # load your video making sure it only has the number of frames specified earlier. + message = { + "role": "user", + "content": [ + {"type": "text", "text": "Describe this set of frames. Consider the frames to be a part of the same video."}, + ], + } + for i in range(len(video_frames)): + base64_image = encode_image(video_frames[i]) # base64 encoding. + new_image = {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}} + message["content"].append(new_image) -for o in outputs: - generated_text = o.outputs[0].text - print(generated_text) -``` + # Perform inference and log output. + outputs = llm.chat([message]) + + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + ``` ### Video Inputs @@ -144,68 +150,72 @@ Full example: To input pre-computed embeddings belonging to a data type (i.e. image, video, or audio) directly to the language model, pass a tensor of shape `(num_items, feature_size, hidden_size of LM)` to the corresponding field of the multi-modal dictionary. -```python -from vllm import LLM +??? Code -# Inference with image embeddings as input -llm = LLM(model="llava-hf/llava-1.5-7b-hf") + ```python + from vllm import LLM -# Refer to the HuggingFace repo for the correct format to use -prompt = "USER: \nWhat is the content of this image?\nASSISTANT:" + # Inference with image embeddings as input + llm = LLM(model="llava-hf/llava-1.5-7b-hf") -# Embeddings for single image -# torch.Tensor of shape (1, image_feature_size, hidden_size of LM) -image_embeds = torch.load(...) + # Refer to the HuggingFace repo for the correct format to use + prompt = "USER: \nWhat is the content of this image?\nASSISTANT:" -outputs = llm.generate({ - "prompt": prompt, - "multi_modal_data": {"image": image_embeds}, -}) + # Embeddings for single image + # torch.Tensor of shape (1, image_feature_size, hidden_size of LM) + image_embeds = torch.load(...) -for o in outputs: - generated_text = o.outputs[0].text - print(generated_text) -``` + outputs = llm.generate({ + "prompt": prompt, + "multi_modal_data": {"image": image_embeds}, + }) + + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + ``` For Qwen2-VL and MiniCPM-V, we accept additional parameters alongside the embeddings: -```python -# Construct the prompt based on your model -prompt = ... +??? Code -# Embeddings for multiple images -# torch.Tensor of shape (num_images, image_feature_size, hidden_size of LM) -image_embeds = torch.load(...) + ```python + # Construct the prompt based on your model + prompt = ... -# Qwen2-VL -llm = LLM("Qwen/Qwen2-VL-2B-Instruct", limit_mm_per_prompt={"image": 4}) -mm_data = { - "image": { - "image_embeds": image_embeds, - # image_grid_thw is needed to calculate positional encoding. - "image_grid_thw": torch.load(...), # torch.Tensor of shape (1, 3), + # Embeddings for multiple images + # torch.Tensor of shape (num_images, image_feature_size, hidden_size of LM) + image_embeds = torch.load(...) + + # Qwen2-VL + llm = LLM("Qwen/Qwen2-VL-2B-Instruct", limit_mm_per_prompt={"image": 4}) + mm_data = { + "image": { + "image_embeds": image_embeds, + # image_grid_thw is needed to calculate positional encoding. + "image_grid_thw": torch.load(...), # torch.Tensor of shape (1, 3), + } } -} -# MiniCPM-V -llm = LLM("openbmb/MiniCPM-V-2_6", trust_remote_code=True, limit_mm_per_prompt={"image": 4}) -mm_data = { - "image": { - "image_embeds": image_embeds, - # image_sizes is needed to calculate details of the sliced image. - "image_sizes": [image.size for image in images], # list of image sizes + # MiniCPM-V + llm = LLM("openbmb/MiniCPM-V-2_6", trust_remote_code=True, limit_mm_per_prompt={"image": 4}) + mm_data = { + "image": { + "image_embeds": image_embeds, + # image_sizes is needed to calculate details of the sliced image. + "image_sizes": [image.size for image in images], # list of image sizes + } } -} -outputs = llm.generate({ - "prompt": prompt, - "multi_modal_data": mm_data, -}) + outputs = llm.generate({ + "prompt": prompt, + "multi_modal_data": mm_data, + }) -for o in outputs: - generated_text = o.outputs[0].text - print(generated_text) -``` + for o in outputs: + generated_text = o.outputs[0].text + print(generated_text) + ``` ## Online Serving @@ -235,51 +245,53 @@ vllm serve microsoft/Phi-3.5-vision-instruct --task generate \ Then, you can use the OpenAI client as follows: -```python -from openai import OpenAI +??? Code -openai_api_key = "EMPTY" -openai_api_base = "http://localhost:8000/v1" + ```python + from openai import OpenAI -client = OpenAI( - api_key=openai_api_key, - base_url=openai_api_base, -) + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" -# Single-image input inference -image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) -chat_response = client.chat.completions.create( - model="microsoft/Phi-3.5-vision-instruct", - messages=[{ - "role": "user", - "content": [ - # NOTE: The prompt formatting with the image token `` is not needed - # since the prompt will be processed automatically by the API server. - {"type": "text", "text": "What’s in this image?"}, - {"type": "image_url", "image_url": {"url": image_url}}, - ], - }], -) -print("Chat completion output:", chat_response.choices[0].message.content) + # Single-image input inference + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" -# Multi-image input inference -image_url_duck = "https://upload.wikimedia.org/wikipedia/commons/d/da/2015_Kaczka_krzy%C5%BCowka_w_wodzie_%28samiec%29.jpg" -image_url_lion = "https://upload.wikimedia.org/wikipedia/commons/7/77/002_The_lion_king_Snyggve_in_the_Serengeti_National_Park_Photo_by_Giles_Laurent.jpg" + chat_response = client.chat.completions.create( + model="microsoft/Phi-3.5-vision-instruct", + messages=[{ + "role": "user", + "content": [ + # NOTE: The prompt formatting with the image token `` is not needed + # since the prompt will be processed automatically by the API server. + {"type": "text", "text": "What’s in this image?"}, + {"type": "image_url", "image_url": {"url": image_url}}, + ], + }], + ) + print("Chat completion output:", chat_response.choices[0].message.content) -chat_response = client.chat.completions.create( - model="microsoft/Phi-3.5-vision-instruct", - messages=[{ - "role": "user", - "content": [ - {"type": "text", "text": "What are the animals in these images?"}, - {"type": "image_url", "image_url": {"url": image_url_duck}}, - {"type": "image_url", "image_url": {"url": image_url_lion}}, - ], - }], -) -print("Chat completion output:", chat_response.choices[0].message.content) -``` + # Multi-image input inference + image_url_duck = "https://upload.wikimedia.org/wikipedia/commons/d/da/2015_Kaczka_krzy%C5%BCowka_w_wodzie_%28samiec%29.jpg" + image_url_lion = "https://upload.wikimedia.org/wikipedia/commons/7/77/002_The_lion_king_Snyggve_in_the_Serengeti_National_Park_Photo_by_Giles_Laurent.jpg" + + chat_response = client.chat.completions.create( + model="microsoft/Phi-3.5-vision-instruct", + messages=[{ + "role": "user", + "content": [ + {"type": "text", "text": "What are the animals in these images?"}, + {"type": "image_url", "image_url": {"url": image_url_duck}}, + {"type": "image_url", "image_url": {"url": image_url_lion}}, + ], + }], + ) + print("Chat completion output:", chat_response.choices[0].message.content) + ``` Full example: @@ -311,44 +323,46 @@ vllm serve llava-hf/llava-onevision-qwen2-0.5b-ov-hf --task generate --max-model Then, you can use the OpenAI client as follows: -```python -from openai import OpenAI +??? Code -openai_api_key = "EMPTY" -openai_api_base = "http://localhost:8000/v1" + ```python + from openai import OpenAI -client = OpenAI( - api_key=openai_api_key, - base_url=openai_api_base, -) + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" -video_url = "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4" + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) -## Use video url in the payload -chat_completion_from_url = client.chat.completions.create( - messages=[{ - "role": - "user", - "content": [ - { - "type": "text", - "text": "What's in this video?" - }, - { - "type": "video_url", - "video_url": { - "url": video_url + video_url = "http://commondatastorage.googleapis.com/gtv-videos-bucket/sample/ForBiggerFun.mp4" + + ## Use video url in the payload + chat_completion_from_url = client.chat.completions.create( + messages=[{ + "role": + "user", + "content": [ + { + "type": "text", + "text": "What's in this video?" }, - }, - ], - }], - model=model, - max_completion_tokens=64, -) + { + "type": "video_url", + "video_url": { + "url": video_url + }, + }, + ], + }], + model=model, + max_completion_tokens=64, + ) -result = chat_completion_from_url.choices[0].message.content -print("Chat completion output from image url:", result) -``` + result = chat_completion_from_url.choices[0].message.content + print("Chat completion output from image url:", result) + ``` Full example: @@ -373,84 +387,88 @@ vllm serve fixie-ai/ultravox-v0_5-llama-3_2-1b Then, you can use the OpenAI client as follows: -```python -import base64 -import requests -from openai import OpenAI -from vllm.assets.audio import AudioAsset +??? Code -def encode_base64_content_from_url(content_url: str) -> str: - """Encode a content retrieved from a remote url to base64 format.""" + ```python + import base64 + import requests + from openai import OpenAI + from vllm.assets.audio import AudioAsset - with requests.get(content_url) as response: - response.raise_for_status() - result = base64.b64encode(response.content).decode('utf-8') + def encode_base64_content_from_url(content_url: str) -> str: + """Encode a content retrieved from a remote url to base64 format.""" - return result + with requests.get(content_url) as response: + response.raise_for_status() + result = base64.b64encode(response.content).decode('utf-8') -openai_api_key = "EMPTY" -openai_api_base = "http://localhost:8000/v1" + return result -client = OpenAI( - api_key=openai_api_key, - base_url=openai_api_base, -) + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" -# Any format supported by librosa is supported -audio_url = AudioAsset("winning_call").url -audio_base64 = encode_base64_content_from_url(audio_url) + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) -chat_completion_from_base64 = client.chat.completions.create( - messages=[{ - "role": "user", - "content": [ - { - "type": "text", - "text": "What's in this audio?" - }, - { - "type": "input_audio", - "input_audio": { - "data": audio_base64, - "format": "wav" + # Any format supported by librosa is supported + audio_url = AudioAsset("winning_call").url + audio_base64 = encode_base64_content_from_url(audio_url) + + chat_completion_from_base64 = client.chat.completions.create( + messages=[{ + "role": "user", + "content": [ + { + "type": "text", + "text": "What's in this audio?" }, - }, - ], - }], - model=model, - max_completion_tokens=64, -) + { + "type": "input_audio", + "input_audio": { + "data": audio_base64, + "format": "wav" + }, + }, + ], + }], + model=model, + max_completion_tokens=64, + ) -result = chat_completion_from_base64.choices[0].message.content -print("Chat completion output from input audio:", result) -``` + result = chat_completion_from_base64.choices[0].message.content + print("Chat completion output from input audio:", result) + ``` Alternatively, you can pass `audio_url`, which is the audio counterpart of `image_url` for image input: -```python -chat_completion_from_url = client.chat.completions.create( - messages=[{ - "role": "user", - "content": [ - { - "type": "text", - "text": "What's in this audio?" - }, - { - "type": "audio_url", - "audio_url": { - "url": audio_url - }, - }, - ], - }], - model=model, - max_completion_tokens=64, -) +??? Code -result = chat_completion_from_url.choices[0].message.content -print("Chat completion output from audio url:", result) -``` + ```python + chat_completion_from_url = client.chat.completions.create( + messages=[{ + "role": "user", + "content": [ + { + "type": "text", + "text": "What's in this audio?" + }, + { + "type": "audio_url", + "audio_url": { + "url": audio_url + }, + }, + ], + }], + model=model, + max_completion_tokens=64, + ) + + result = chat_completion_from_url.choices[0].message.content + print("Chat completion output from audio url:", result) + ``` Full example: @@ -470,61 +488,63 @@ pass a tensor of shape to the corresponding field of the multi-modal dictionary. For image embeddings, you can pass the base64-encoded tensor to the `image_embeds` field. The following example demonstrates how to pass image embeddings to the OpenAI server: -```python -image_embedding = torch.load(...) -grid_thw = torch.load(...) # Required by Qwen/Qwen2-VL-2B-Instruct +??? Code -buffer = io.BytesIO() -torch.save(image_embedding, buffer) -buffer.seek(0) -binary_data = buffer.read() -base64_image_embedding = base64.b64encode(binary_data).decode('utf-8') + ```python + image_embedding = torch.load(...) + grid_thw = torch.load(...) # Required by Qwen/Qwen2-VL-2B-Instruct -client = OpenAI( - # defaults to os.environ.get("OPENAI_API_KEY") - api_key=openai_api_key, - base_url=openai_api_base, -) + buffer = io.BytesIO() + torch.save(image_embedding, buffer) + buffer.seek(0) + binary_data = buffer.read() + base64_image_embedding = base64.b64encode(binary_data).decode('utf-8') -# Basic usage - this is equivalent to the LLaVA example for offline inference -model = "llava-hf/llava-1.5-7b-hf" -embeds = { - "type": "image_embeds", - "image_embeds": f"{base64_image_embedding}" -} + client = OpenAI( + # defaults to os.environ.get("OPENAI_API_KEY") + api_key=openai_api_key, + base_url=openai_api_base, + ) -# Pass additional parameters (available to Qwen2-VL and MiniCPM-V) -model = "Qwen/Qwen2-VL-2B-Instruct" -embeds = { - "type": "image_embeds", - "image_embeds": { - "image_embeds": f"{base64_image_embedding}" , # Required - "image_grid_thw": f"{base64_image_grid_thw}" # Required by Qwen/Qwen2-VL-2B-Instruct - }, -} -model = "openbmb/MiniCPM-V-2_6" -embeds = { - "type": "image_embeds", - "image_embeds": { - "image_embeds": f"{base64_image_embedding}" , # Required - "image_sizes": f"{base64_image_sizes}" # Required by openbmb/MiniCPM-V-2_6 - }, -} -chat_completion = client.chat.completions.create( - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": [ - { - "type": "text", - "text": "What's in this image?", + # Basic usage - this is equivalent to the LLaVA example for offline inference + model = "llava-hf/llava-1.5-7b-hf" + embeds = { + "type": "image_embeds", + "image_embeds": f"{base64_image_embedding}" + } + + # Pass additional parameters (available to Qwen2-VL and MiniCPM-V) + model = "Qwen/Qwen2-VL-2B-Instruct" + embeds = { + "type": "image_embeds", + "image_embeds": { + "image_embeds": f"{base64_image_embedding}" , # Required + "image_grid_thw": f"{base64_image_grid_thw}" # Required by Qwen/Qwen2-VL-2B-Instruct }, - embeds, - ], - }, -], - model=model, -) -``` + } + model = "openbmb/MiniCPM-V-2_6" + embeds = { + "type": "image_embeds", + "image_embeds": { + "image_embeds": f"{base64_image_embedding}" , # Required + "image_sizes": f"{base64_image_sizes}" # Required by openbmb/MiniCPM-V-2_6 + }, + } + chat_completion = client.chat.completions.create( + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": [ + { + "type": "text", + "text": "What's in this image?", + }, + embeds, + ], + }, + ], + model=model, + ) + ``` !!! note Only one message can contain `{"type": "image_embeds"}`. diff --git a/docs/features/quantization/auto_awq.md b/docs/features/quantization/auto_awq.md index 4366a080f52cf..8362672f40b36 100644 --- a/docs/features/quantization/auto_awq.md +++ b/docs/features/quantization/auto_awq.md @@ -15,29 +15,31 @@ pip install autoawq After installing AutoAWQ, you are ready to quantize a model. Please refer to the [AutoAWQ documentation](https://casper-hansen.github.io/AutoAWQ/examples/#basic-quantization) for further details. Here is an example of how to quantize `mistralai/Mistral-7B-Instruct-v0.2`: -```python -from awq import AutoAWQForCausalLM -from transformers import AutoTokenizer +??? Code -model_path = 'mistralai/Mistral-7B-Instruct-v0.2' -quant_path = 'mistral-instruct-v0.2-awq' -quant_config = { "zero_point": True, "q_group_size": 128, "w_bit": 4, "version": "GEMM" } + ```python + from awq import AutoAWQForCausalLM + from transformers import AutoTokenizer -# Load model -model = AutoAWQForCausalLM.from_pretrained( - model_path, **{"low_cpu_mem_usage": True, "use_cache": False} -) -tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) + model_path = 'mistralai/Mistral-7B-Instruct-v0.2' + quant_path = 'mistral-instruct-v0.2-awq' + quant_config = { "zero_point": True, "q_group_size": 128, "w_bit": 4, "version": "GEMM" } -# Quantize -model.quantize(tokenizer, quant_config=quant_config) + # Load model + model = AutoAWQForCausalLM.from_pretrained( + model_path, **{"low_cpu_mem_usage": True, "use_cache": False} + ) + tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) -# Save quantized model -model.save_quantized(quant_path) -tokenizer.save_pretrained(quant_path) + # Quantize + model.quantize(tokenizer, quant_config=quant_config) -print(f'Model is quantized and saved at "{quant_path}"') -``` + # Save quantized model + model.save_quantized(quant_path) + tokenizer.save_pretrained(quant_path) + + print(f'Model is quantized and saved at "{quant_path}"') + ``` To run an AWQ model with vLLM, you can use [TheBloke/Llama-2-7b-Chat-AWQ](https://huggingface.co/TheBloke/Llama-2-7b-Chat-AWQ) with the following command: @@ -49,27 +51,29 @@ python examples/offline_inference/llm_engine_example.py \ AWQ models are also supported directly through the LLM entrypoint: -```python -from vllm import LLM, SamplingParams +??? Code -# Sample prompts. -prompts = [ - "Hello, my name is", - "The president of the United States is", - "The capital of France is", - "The future of AI is", -] -# Create a sampling params object. -sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + ```python + from vllm import LLM, SamplingParams -# Create an LLM. -llm = LLM(model="TheBloke/Llama-2-7b-Chat-AWQ", quantization="AWQ") -# Generate texts from the prompts. The output is a list of RequestOutput objects -# that contain the prompt, generated text, and other information. -outputs = llm.generate(prompts, sampling_params) -# Print the outputs. -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") -``` + # Sample prompts. + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + + # Create an LLM. + llm = LLM(model="TheBloke/Llama-2-7b-Chat-AWQ", quantization="AWQ") + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + # Print the outputs. + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + ``` diff --git a/docs/features/quantization/bitblas.md b/docs/features/quantization/bitblas.md index 9001725d9c02d..3f8ae7a959cd5 100644 --- a/docs/features/quantization/bitblas.md +++ b/docs/features/quantization/bitblas.md @@ -43,17 +43,19 @@ llm = LLM( ## Read gptq format checkpoint -```python -from vllm import LLM -import torch +??? Code -# "hxbgsyxh/llama-13b-4bit-g-1" is a pre-quantized checkpoint. -model_id = "hxbgsyxh/llama-13b-4bit-g-1" -llm = LLM( - model=model_id, - dtype=torch.float16, - trust_remote_code=True, - quantization="bitblas", - max_model_len=1024 -) -``` + ```python + from vllm import LLM + import torch + + # "hxbgsyxh/llama-13b-4bit-g-1" is a pre-quantized checkpoint. + model_id = "hxbgsyxh/llama-13b-4bit-g-1" + llm = LLM( + model=model_id, + dtype=torch.float16, + trust_remote_code=True, + quantization="bitblas", + max_model_len=1024 + ) + ``` diff --git a/docs/features/quantization/fp8.md b/docs/features/quantization/fp8.md index 01d5d9da046de..ec7639af805b9 100644 --- a/docs/features/quantization/fp8.md +++ b/docs/features/quantization/fp8.md @@ -58,22 +58,24 @@ For FP8 quantization, we can recover accuracy with simple RTN quantization. We r Since simple RTN does not require data for weight quantization and the activations are quantized dynamically, we do not need any calibration data for this quantization flow. -```python -from llmcompressor.transformers import oneshot -from llmcompressor.modifiers.quantization import QuantizationModifier +??? Code -# Configure the simple PTQ quantization -recipe = QuantizationModifier( - targets="Linear", scheme="FP8_DYNAMIC", ignore=["lm_head"]) + ```python + from llmcompressor.transformers import oneshot + from llmcompressor.modifiers.quantization import QuantizationModifier -# Apply the quantization algorithm. -oneshot(model=model, recipe=recipe) + # Configure the simple PTQ quantization + recipe = QuantizationModifier( + targets="Linear", scheme="FP8_DYNAMIC", ignore=["lm_head"]) -# Save the model: Meta-Llama-3-8B-Instruct-FP8-Dynamic -SAVE_DIR = MODEL_ID.split("/")[1] + "-FP8-Dynamic" -model.save_pretrained(SAVE_DIR) -tokenizer.save_pretrained(SAVE_DIR) -``` + # Apply the quantization algorithm. + oneshot(model=model, recipe=recipe) + + # Save the model: Meta-Llama-3-8B-Instruct-FP8-Dynamic + SAVE_DIR = MODEL_ID.split("/")[1] + "-FP8-Dynamic" + model.save_pretrained(SAVE_DIR) + tokenizer.save_pretrained(SAVE_DIR) + ``` ### 3. Evaluating Accuracy diff --git a/docs/features/quantization/gguf.md b/docs/features/quantization/gguf.md index 72f758f653a8f..014b513eeda75 100644 --- a/docs/features/quantization/gguf.md +++ b/docs/features/quantization/gguf.md @@ -41,42 +41,44 @@ vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \ You can also use the GGUF model directly through the LLM entrypoint: -```python -from vllm import LLM, SamplingParams +??? Code -# In this script, we demonstrate how to pass input to the chat method: -conversation = [ - { - "role": "system", - "content": "You are a helpful assistant" - }, - { - "role": "user", - "content": "Hello" - }, - { - "role": "assistant", - "content": "Hello! How can I assist you today?" - }, - { - "role": "user", - "content": "Write an essay about the importance of higher education.", - }, -] + ```python + from vllm import LLM, SamplingParams -# Create a sampling params object. -sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + # In this script, we demonstrate how to pass input to the chat method: + conversation = [ + { + "role": "system", + "content": "You are a helpful assistant" + }, + { + "role": "user", + "content": "Hello" + }, + { + "role": "assistant", + "content": "Hello! How can I assist you today?" + }, + { + "role": "user", + "content": "Write an essay about the importance of higher education.", + }, + ] -# Create an LLM. -llm = LLM(model="./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", - tokenizer="TinyLlama/TinyLlama-1.1B-Chat-v1.0") -# Generate texts from the prompts. The output is a list of RequestOutput objects -# that contain the prompt, generated text, and other information. -outputs = llm.chat(conversation, sampling_params) + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) -# Print the outputs. -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") -``` + # Create an LLM. + llm = LLM(model="./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", + tokenizer="TinyLlama/TinyLlama-1.1B-Chat-v1.0") + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.chat(conversation, sampling_params) + + # Print the outputs. + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + ``` diff --git a/docs/features/quantization/gptqmodel.md b/docs/features/quantization/gptqmodel.md index 53e938d2cbd7d..2f088f474f199 100644 --- a/docs/features/quantization/gptqmodel.md +++ b/docs/features/quantization/gptqmodel.md @@ -31,28 +31,30 @@ After installing GPTQModel, you are ready to quantize a model. Please refer to t Here is an example of how to quantize `meta-llama/Llama-3.2-1B-Instruct`: -```python -from datasets import load_dataset -from gptqmodel import GPTQModel, QuantizeConfig +??? Code -model_id = "meta-llama/Llama-3.2-1B-Instruct" -quant_path = "Llama-3.2-1B-Instruct-gptqmodel-4bit" + ```python + from datasets import load_dataset + from gptqmodel import GPTQModel, QuantizeConfig -calibration_dataset = load_dataset( - "allenai/c4", - data_files="en/c4-train.00001-of-01024.json.gz", - split="train" - ).select(range(1024))["text"] + model_id = "meta-llama/Llama-3.2-1B-Instruct" + quant_path = "Llama-3.2-1B-Instruct-gptqmodel-4bit" -quant_config = QuantizeConfig(bits=4, group_size=128) + calibration_dataset = load_dataset( + "allenai/c4", + data_files="en/c4-train.00001-of-01024.json.gz", + split="train" + ).select(range(1024))["text"] -model = GPTQModel.load(model_id, quant_config) + quant_config = QuantizeConfig(bits=4, group_size=128) -# increase `batch_size` to match gpu/vram specs to speed up quantization -model.quantize(calibration_dataset, batch_size=2) + model = GPTQModel.load(model_id, quant_config) -model.save(quant_path) -``` + # increase `batch_size` to match gpu/vram specs to speed up quantization + model.quantize(calibration_dataset, batch_size=2) + + model.save(quant_path) + ``` ## Running a quantized model with vLLM @@ -67,32 +69,34 @@ python examples/offline_inference/llm_engine_example.py \ GPTQModel quantized models are also supported directly through the LLM entrypoint: -```python -from vllm import LLM, SamplingParams +??? Code -# Sample prompts. -prompts = [ - "Hello, my name is", - "The president of the United States is", - "The capital of France is", - "The future of AI is", -] + ```python + from vllm import LLM, SamplingParams -# Create a sampling params object. -sampling_params = SamplingParams(temperature=0.6, top_p=0.9) + # Sample prompts. + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] -# Create an LLM. -llm = LLM(model="ModelCloud/DeepSeek-R1-Distill-Qwen-7B-gptqmodel-4bit-vortex-v2") + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.6, top_p=0.9) -# Generate texts from the prompts. The output is a list of RequestOutput objects -# that contain the prompt, generated text, and other information. -outputs = llm.generate(prompts, sampling_params) + # Create an LLM. + llm = LLM(model="ModelCloud/DeepSeek-R1-Distill-Qwen-7B-gptqmodel-4bit-vortex-v2") -# Print the outputs. -print("-"*50) -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + + # Print the outputs. print("-"*50) -``` + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}\nGenerated text: {generated_text!r}") + print("-"*50) + ``` diff --git a/docs/features/quantization/int4.md b/docs/features/quantization/int4.md index b7d09206365ff..185e13649f48a 100644 --- a/docs/features/quantization/int4.md +++ b/docs/features/quantization/int4.md @@ -53,51 +53,55 @@ When quantizing weights to INT4, you need sample data to estimate the weight upd It's best to use calibration data that closely matches your deployment data. For a general-purpose instruction-tuned model, you can use a dataset like `ultrachat`: -```python -from datasets import load_dataset +??? Code -NUM_CALIBRATION_SAMPLES = 512 -MAX_SEQUENCE_LENGTH = 2048 + ```python + from datasets import load_dataset -# Load and preprocess the dataset -ds = load_dataset("HuggingFaceH4/ultrachat_200k", split="train_sft") -ds = ds.shuffle(seed=42).select(range(NUM_CALIBRATION_SAMPLES)) + NUM_CALIBRATION_SAMPLES = 512 + MAX_SEQUENCE_LENGTH = 2048 -def preprocess(example): - return {"text": tokenizer.apply_chat_template(example["messages"], tokenize=False)} -ds = ds.map(preprocess) + # Load and preprocess the dataset + ds = load_dataset("HuggingFaceH4/ultrachat_200k", split="train_sft") + ds = ds.shuffle(seed=42).select(range(NUM_CALIBRATION_SAMPLES)) -def tokenize(sample): - return tokenizer(sample["text"], padding=False, max_length=MAX_SEQUENCE_LENGTH, truncation=True, add_special_tokens=False) -ds = ds.map(tokenize, remove_columns=ds.column_names) -``` + def preprocess(example): + return {"text": tokenizer.apply_chat_template(example["messages"], tokenize=False)} + ds = ds.map(preprocess) + + def tokenize(sample): + return tokenizer(sample["text"], padding=False, max_length=MAX_SEQUENCE_LENGTH, truncation=True, add_special_tokens=False) + ds = ds.map(tokenize, remove_columns=ds.column_names) + ``` ### 3. Applying Quantization Now, apply the quantization algorithms: -```python -from llmcompressor.transformers import oneshot -from llmcompressor.modifiers.quantization import GPTQModifier -from llmcompressor.modifiers.smoothquant import SmoothQuantModifier +??? Code -# Configure the quantization algorithms -recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"]) + ```python + from llmcompressor.transformers import oneshot + from llmcompressor.modifiers.quantization import GPTQModifier + from llmcompressor.modifiers.smoothquant import SmoothQuantModifier -# Apply quantization -oneshot( - model=model, - dataset=ds, - recipe=recipe, - max_seq_length=MAX_SEQUENCE_LENGTH, - num_calibration_samples=NUM_CALIBRATION_SAMPLES, -) + # Configure the quantization algorithms + recipe = GPTQModifier(targets="Linear", scheme="W4A16", ignore=["lm_head"]) -# Save the compressed model: Meta-Llama-3-8B-Instruct-W4A16-G128 -SAVE_DIR = MODEL_ID.split("/")[1] + "-W4A16-G128" -model.save_pretrained(SAVE_DIR, save_compressed=True) -tokenizer.save_pretrained(SAVE_DIR) -``` + # Apply quantization + oneshot( + model=model, + dataset=ds, + recipe=recipe, + max_seq_length=MAX_SEQUENCE_LENGTH, + num_calibration_samples=NUM_CALIBRATION_SAMPLES, + ) + + # Save the compressed model: Meta-Llama-3-8B-Instruct-W4A16-G128 + SAVE_DIR = MODEL_ID.split("/")[1] + "-W4A16-G128" + model.save_pretrained(SAVE_DIR, save_compressed=True) + tokenizer.save_pretrained(SAVE_DIR) + ``` This process creates a W4A16 model with weights quantized to 4-bit integers. @@ -137,34 +141,36 @@ $ lm_eval --model vllm \ The following is an example of an expanded quantization recipe you can tune to your own use case: -```python -from compressed_tensors.quantization import ( - QuantizationArgs, - QuantizationScheme, - QuantizationStrategy, - QuantizationType, -) -recipe = GPTQModifier( - targets="Linear", - config_groups={ - "config_group": QuantizationScheme( - targets=["Linear"], - weights=QuantizationArgs( - num_bits=4, - type=QuantizationType.INT, - strategy=QuantizationStrategy.GROUP, - group_size=128, - symmetric=True, - dynamic=False, - actorder="weight", +??? Code + + ```python + from compressed_tensors.quantization import ( + QuantizationArgs, + QuantizationScheme, + QuantizationStrategy, + QuantizationType, + ) + recipe = GPTQModifier( + targets="Linear", + config_groups={ + "config_group": QuantizationScheme( + targets=["Linear"], + weights=QuantizationArgs( + num_bits=4, + type=QuantizationType.INT, + strategy=QuantizationStrategy.GROUP, + group_size=128, + symmetric=True, + dynamic=False, + actorder="weight", + ), ), - ), - }, - ignore=["lm_head"], - update_size=NUM_CALIBRATION_SAMPLES, - dampening_frac=0.01 -) -``` + }, + ignore=["lm_head"], + update_size=NUM_CALIBRATION_SAMPLES, + dampening_frac=0.01 + ) + ``` ## Troubleshooting and Support diff --git a/docs/features/quantization/int8.md b/docs/features/quantization/int8.md index 1d9fba9dc87f1..de5ae5c044010 100644 --- a/docs/features/quantization/int8.md +++ b/docs/features/quantization/int8.md @@ -54,54 +54,60 @@ When quantizing activations to INT8, you need sample data to estimate the activa It's best to use calibration data that closely matches your deployment data. For a general-purpose instruction-tuned model, you can use a dataset like `ultrachat`: -```python -from datasets import load_dataset +??? Code -NUM_CALIBRATION_SAMPLES = 512 -MAX_SEQUENCE_LENGTH = 2048 + ```python + from datasets import load_dataset -# Load and preprocess the dataset -ds = load_dataset("HuggingFaceH4/ultrachat_200k", split="train_sft") -ds = ds.shuffle(seed=42).select(range(NUM_CALIBRATION_SAMPLES)) + NUM_CALIBRATION_SAMPLES = 512 + MAX_SEQUENCE_LENGTH = 2048 -def preprocess(example): - return {"text": tokenizer.apply_chat_template(example["messages"], tokenize=False)} -ds = ds.map(preprocess) + # Load and preprocess the dataset + ds = load_dataset("HuggingFaceH4/ultrachat_200k", split="train_sft") + ds = ds.shuffle(seed=42).select(range(NUM_CALIBRATION_SAMPLES)) -def tokenize(sample): - return tokenizer(sample["text"], padding=False, max_length=MAX_SEQUENCE_LENGTH, truncation=True, add_special_tokens=False) -ds = ds.map(tokenize, remove_columns=ds.column_names) -``` + def preprocess(example): + return {"text": tokenizer.apply_chat_template(example["messages"], tokenize=False)} + ds = ds.map(preprocess) + + def tokenize(sample): + return tokenizer(sample["text"], padding=False, max_length=MAX_SEQUENCE_LENGTH, truncation=True, add_special_tokens=False) + ds = ds.map(tokenize, remove_columns=ds.column_names) + ``` + + ### 3. Applying Quantization Now, apply the quantization algorithms: -```python -from llmcompressor.transformers import oneshot -from llmcompressor.modifiers.quantization import GPTQModifier -from llmcompressor.modifiers.smoothquant import SmoothQuantModifier +??? Code -# Configure the quantization algorithms -recipe = [ - SmoothQuantModifier(smoothing_strength=0.8), - GPTQModifier(targets="Linear", scheme="W8A8", ignore=["lm_head"]), -] + ```python + from llmcompressor.transformers import oneshot + from llmcompressor.modifiers.quantization import GPTQModifier + from llmcompressor.modifiers.smoothquant import SmoothQuantModifier -# Apply quantization -oneshot( - model=model, - dataset=ds, - recipe=recipe, - max_seq_length=MAX_SEQUENCE_LENGTH, - num_calibration_samples=NUM_CALIBRATION_SAMPLES, -) + # Configure the quantization algorithms + recipe = [ + SmoothQuantModifier(smoothing_strength=0.8), + GPTQModifier(targets="Linear", scheme="W8A8", ignore=["lm_head"]), + ] -# Save the compressed model: Meta-Llama-3-8B-Instruct-W8A8-Dynamic-Per-Token -SAVE_DIR = MODEL_ID.split("/")[1] + "-W8A8-Dynamic-Per-Token" -model.save_pretrained(SAVE_DIR, save_compressed=True) -tokenizer.save_pretrained(SAVE_DIR) -``` + # Apply quantization + oneshot( + model=model, + dataset=ds, + recipe=recipe, + max_seq_length=MAX_SEQUENCE_LENGTH, + num_calibration_samples=NUM_CALIBRATION_SAMPLES, + ) + + # Save the compressed model: Meta-Llama-3-8B-Instruct-W8A8-Dynamic-Per-Token + SAVE_DIR = MODEL_ID.split("/")[1] + "-W8A8-Dynamic-Per-Token" + model.save_pretrained(SAVE_DIR, save_compressed=True) + tokenizer.save_pretrained(SAVE_DIR) + ``` This process creates a W8A8 model with weights and activations quantized to 8-bit integers. diff --git a/docs/features/quantization/modelopt.md b/docs/features/quantization/modelopt.md index 001d18657dad0..0bb6003832ba3 100644 --- a/docs/features/quantization/modelopt.md +++ b/docs/features/quantization/modelopt.md @@ -14,24 +14,26 @@ You can quantize HuggingFace models using the example scripts provided in the Te Below is an example showing how to quantize a model using modelopt's PTQ API: -```python -import modelopt.torch.quantization as mtq -from transformers import AutoModelForCausalLM +??? Code -# Load the model from HuggingFace -model = AutoModelForCausalLM.from_pretrained("") + ```python + import modelopt.torch.quantization as mtq + from transformers import AutoModelForCausalLM -# Select the quantization config, for example, FP8 -config = mtq.FP8_DEFAULT_CFG + # Load the model from HuggingFace + model = AutoModelForCausalLM.from_pretrained("") -# Define a forward loop function for calibration -def forward_loop(model): - for data in calib_set: - model(data) + # Select the quantization config, for example, FP8 + config = mtq.FP8_DEFAULT_CFG -# PTQ with in-place replacement of quantized modules -model = mtq.quantize(model, config, forward_loop) -``` + # Define a forward loop function for calibration + def forward_loop(model): + for data in calib_set: + model(data) + + # PTQ with in-place replacement of quantized modules + model = mtq.quantize(model, config, forward_loop) + ``` After the model is quantized, you can export it to a quantized checkpoint using the export API: @@ -48,31 +50,33 @@ with torch.inference_mode(): The quantized checkpoint can then be deployed with vLLM. As an example, the following code shows how to deploy `nvidia/Llama-3.1-8B-Instruct-FP8`, which is the FP8 quantized checkpoint derived from `meta-llama/Llama-3.1-8B-Instruct`, using vLLM: -```python -from vllm import LLM, SamplingParams +??? Code -def main(): + ```python + from vllm import LLM, SamplingParams - model_id = "nvidia/Llama-3.1-8B-Instruct-FP8" - # Ensure you specify quantization='modelopt' when loading the modelopt checkpoint - llm = LLM(model=model_id, quantization="modelopt", trust_remote_code=True) + def main(): - sampling_params = SamplingParams(temperature=0.8, top_p=0.9) + model_id = "nvidia/Llama-3.1-8B-Instruct-FP8" + # Ensure you specify quantization='modelopt' when loading the modelopt checkpoint + llm = LLM(model=model_id, quantization="modelopt", trust_remote_code=True) - prompts = [ - "Hello, my name is", - "The president of the United States is", - "The capital of France is", - "The future of AI is", - ] + sampling_params = SamplingParams(temperature=0.8, top_p=0.9) - outputs = llm.generate(prompts, sampling_params) + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] - for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + outputs = llm.generate(prompts, sampling_params) -if __name__ == "__main__": - main() -``` + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + + if __name__ == "__main__": + main() + ``` diff --git a/docs/features/quantization/quantized_kvcache.md b/docs/features/quantization/quantized_kvcache.md index e3ebd024bab3c..52b8d38ace1dd 100644 --- a/docs/features/quantization/quantized_kvcache.md +++ b/docs/features/quantization/quantized_kvcache.md @@ -35,20 +35,22 @@ Studies have shown that FP8 E4M3 quantization typically only minimally degrades Here is an example of how to enable FP8 quantization: -```python -# To calculate kv cache scales on the fly enable the calculate_kv_scales -# parameter +??? Code -from vllm import LLM, SamplingParams + ```python + # To calculate kv cache scales on the fly enable the calculate_kv_scales + # parameter -sampling_params = SamplingParams(temperature=0.7, top_p=0.8) -llm = LLM(model="meta-llama/Llama-2-7b-chat-hf", - kv_cache_dtype="fp8", - calculate_kv_scales=True) -prompt = "London is the capital of" -out = llm.generate(prompt, sampling_params)[0].outputs[0].text -print(out) -``` + from vllm import LLM, SamplingParams + + sampling_params = SamplingParams(temperature=0.7, top_p=0.8) + llm = LLM(model="meta-llama/Llama-2-7b-chat-hf", + kv_cache_dtype="fp8", + calculate_kv_scales=True) + prompt = "London is the capital of" + out = llm.generate(prompt, sampling_params)[0].outputs[0].text + print(out) + ``` The `kv_cache_dtype` argument specifies the data type for KV cache storage: - `"auto"`: Uses the model's default "unquantized" data type @@ -71,67 +73,69 @@ pip install llmcompressor Here's a complete example using `meta-llama/Llama-3.1-8B-Instruct` (most models can use this same pattern): -```python -from datasets import load_dataset -from transformers import AutoModelForCausalLM, AutoTokenizer -from llmcompressor.transformers import oneshot +??? Code -# Select model and load it -MODEL_ID = "meta-llama/Llama-3.1-8B-Instruct" -model = AutoModelForCausalLM.from_pretrained(MODEL_ID, device_map="auto", torch_dtype="auto") -tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) + ```python + from datasets import load_dataset + from transformers import AutoModelForCausalLM, AutoTokenizer + from llmcompressor.transformers import oneshot -# Select calibration dataset -DATASET_ID = "HuggingFaceH4/ultrachat_200k" -DATASET_SPLIT = "train_sft" + # Select model and load it + MODEL_ID = "meta-llama/Llama-3.1-8B-Instruct" + model = AutoModelForCausalLM.from_pretrained(MODEL_ID, device_map="auto", torch_dtype="auto") + tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) -# Configure calibration parameters -NUM_CALIBRATION_SAMPLES = 512 # 512 samples is a good starting point -MAX_SEQUENCE_LENGTH = 2048 + # Select calibration dataset + DATASET_ID = "HuggingFaceH4/ultrachat_200k" + DATASET_SPLIT = "train_sft" -# Load and preprocess dataset -ds = load_dataset(DATASET_ID, split=DATASET_SPLIT) -ds = ds.shuffle(seed=42).select(range(NUM_CALIBRATION_SAMPLES)) + # Configure calibration parameters + NUM_CALIBRATION_SAMPLES = 512 # 512 samples is a good starting point + MAX_SEQUENCE_LENGTH = 2048 -def process_and_tokenize(example): - text = tokenizer.apply_chat_template(example["messages"], tokenize=False) - return tokenizer( - text, - padding=False, - max_length=MAX_SEQUENCE_LENGTH, - truncation=True, - add_special_tokens=False, + # Load and preprocess dataset + ds = load_dataset(DATASET_ID, split=DATASET_SPLIT) + ds = ds.shuffle(seed=42).select(range(NUM_CALIBRATION_SAMPLES)) + + def process_and_tokenize(example): + text = tokenizer.apply_chat_template(example["messages"], tokenize=False) + return tokenizer( + text, + padding=False, + max_length=MAX_SEQUENCE_LENGTH, + truncation=True, + add_special_tokens=False, + ) + + ds = ds.map(process_and_tokenize, remove_columns=ds.column_names) + + # Configure quantization settings + recipe = """ + quant_stage: + quant_modifiers: + QuantizationModifier: + kv_cache_scheme: + num_bits: 8 + type: float + strategy: tensor + dynamic: false + symmetric: true + """ + + # Apply quantization + oneshot( + model=model, + dataset=ds, + recipe=recipe, + max_seq_length=MAX_SEQUENCE_LENGTH, + num_calibration_samples=NUM_CALIBRATION_SAMPLES, ) -ds = ds.map(process_and_tokenize, remove_columns=ds.column_names) - -# Configure quantization settings -recipe = """ -quant_stage: - quant_modifiers: - QuantizationModifier: - kv_cache_scheme: - num_bits: 8 - type: float - strategy: tensor - dynamic: false - symmetric: true -""" - -# Apply quantization -oneshot( - model=model, - dataset=ds, - recipe=recipe, - max_seq_length=MAX_SEQUENCE_LENGTH, - num_calibration_samples=NUM_CALIBRATION_SAMPLES, -) - -# Save quantized model: Llama-3.1-8B-Instruct-FP8-KV -SAVE_DIR = MODEL_ID.split("/")[1] + "-FP8-KV" -model.save_pretrained(SAVE_DIR, save_compressed=True) -tokenizer.save_pretrained(SAVE_DIR) -``` + # Save quantized model: Llama-3.1-8B-Instruct-FP8-KV + SAVE_DIR = MODEL_ID.split("/")[1] + "-FP8-KV" + model.save_pretrained(SAVE_DIR, save_compressed=True) + tokenizer.save_pretrained(SAVE_DIR) + ``` The above script will create a folder in your current directory containing your quantized model (e.g., `Llama-3.1-8B-Instruct-FP8-KV`) with calibrated scales. diff --git a/docs/features/quantization/quark.md b/docs/features/quantization/quark.md index 35e9dbe2609be..6e77584da2325 100644 --- a/docs/features/quantization/quark.md +++ b/docs/features/quantization/quark.md @@ -42,20 +42,22 @@ The Quark quantization process can be listed for 5 steps as below: Quark uses [Transformers](https://huggingface.co/docs/transformers/en/index) to fetch model and tokenizer. -```python -from transformers import AutoTokenizer, AutoModelForCausalLM +??? Code -MODEL_ID = "meta-llama/Llama-2-70b-chat-hf" -MAX_SEQ_LEN = 512 + ```python + from transformers import AutoTokenizer, AutoModelForCausalLM -model = AutoModelForCausalLM.from_pretrained( - MODEL_ID, device_map="auto", torch_dtype="auto", -) -model.eval() + MODEL_ID = "meta-llama/Llama-2-70b-chat-hf" + MAX_SEQ_LEN = 512 -tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, model_max_length=MAX_SEQ_LEN) -tokenizer.pad_token = tokenizer.eos_token -``` + model = AutoModelForCausalLM.from_pretrained( + MODEL_ID, device_map="auto", torch_dtype="auto", + ) + model.eval() + + tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, model_max_length=MAX_SEQ_LEN) + tokenizer.pad_token = tokenizer.eos_token + ``` ### 2. Prepare the Calibration Dataloader @@ -63,22 +65,24 @@ Quark uses the [PyTorch Dataloader](https://pytorch.org/tutorials/beginner/basic to load calibration data. For more details about how to use calibration datasets efficiently, please refer to [Adding Calibration Datasets](https://quark.docs.amd.com/latest/pytorch/calibration_datasets.html). -```python -from datasets import load_dataset -from torch.utils.data import DataLoader +??? Code -BATCH_SIZE = 1 -NUM_CALIBRATION_DATA = 512 + ```python + from datasets import load_dataset + from torch.utils.data import DataLoader -# Load the dataset and get calibration data. -dataset = load_dataset("mit-han-lab/pile-val-backup", split="validation") -text_data = dataset["text"][:NUM_CALIBRATION_DATA] + BATCH_SIZE = 1 + NUM_CALIBRATION_DATA = 512 -tokenized_outputs = tokenizer(text_data, return_tensors="pt", - padding=True, truncation=True, max_length=MAX_SEQ_LEN) -calib_dataloader = DataLoader(tokenized_outputs['input_ids'], - batch_size=BATCH_SIZE, drop_last=True) -``` + # Load the dataset and get calibration data. + dataset = load_dataset("mit-han-lab/pile-val-backup", split="validation") + text_data = dataset["text"][:NUM_CALIBRATION_DATA] + + tokenized_outputs = tokenizer(text_data, return_tensors="pt", + padding=True, truncation=True, max_length=MAX_SEQ_LEN) + calib_dataloader = DataLoader(tokenized_outputs['input_ids'], + batch_size=BATCH_SIZE, drop_last=True) + ``` ### 3. Set the Quantization Configuration @@ -94,42 +98,44 @@ kv-cache and the quantization algorithm is AutoSmoothQuant. AutoSmoothQuant config file for Llama is `examples/torch/language_modeling/llm_ptq/models/llama/autosmoothquant_config.json`. -```python -from quark.torch.quantization import (Config, QuantizationConfig, - FP8E4M3PerTensorSpec, - load_quant_algo_config_from_file) +??? Code -# Define fp8/per-tensor/static spec. -FP8_PER_TENSOR_SPEC = FP8E4M3PerTensorSpec(observer_method="min_max", - is_dynamic=False).to_quantization_spec() + ```python + from quark.torch.quantization import (Config, QuantizationConfig, + FP8E4M3PerTensorSpec, + load_quant_algo_config_from_file) -# Define global quantization config, input tensors and weight apply FP8_PER_TENSOR_SPEC. -global_quant_config = QuantizationConfig(input_tensors=FP8_PER_TENSOR_SPEC, - weight=FP8_PER_TENSOR_SPEC) + # Define fp8/per-tensor/static spec. + FP8_PER_TENSOR_SPEC = FP8E4M3PerTensorSpec(observer_method="min_max", + is_dynamic=False).to_quantization_spec() -# Define quantization config for kv-cache layers, output tensors apply FP8_PER_TENSOR_SPEC. -KV_CACHE_SPEC = FP8_PER_TENSOR_SPEC -kv_cache_layer_names_for_llama = ["*k_proj", "*v_proj"] -kv_cache_quant_config = {name : - QuantizationConfig(input_tensors=global_quant_config.input_tensors, - weight=global_quant_config.weight, - output_tensors=KV_CACHE_SPEC) - for name in kv_cache_layer_names_for_llama} -layer_quant_config = kv_cache_quant_config.copy() + # Define global quantization config, input tensors and weight apply FP8_PER_TENSOR_SPEC. + global_quant_config = QuantizationConfig(input_tensors=FP8_PER_TENSOR_SPEC, + weight=FP8_PER_TENSOR_SPEC) -# Define algorithm config by config file. -LLAMA_AUTOSMOOTHQUANT_CONFIG_FILE = - 'examples/torch/language_modeling/llm_ptq/models/llama/autosmoothquant_config.json' -algo_config = load_quant_algo_config_from_file(LLAMA_AUTOSMOOTHQUANT_CONFIG_FILE) + # Define quantization config for kv-cache layers, output tensors apply FP8_PER_TENSOR_SPEC. + KV_CACHE_SPEC = FP8_PER_TENSOR_SPEC + kv_cache_layer_names_for_llama = ["*k_proj", "*v_proj"] + kv_cache_quant_config = {name : + QuantizationConfig(input_tensors=global_quant_config.input_tensors, + weight=global_quant_config.weight, + output_tensors=KV_CACHE_SPEC) + for name in kv_cache_layer_names_for_llama} + layer_quant_config = kv_cache_quant_config.copy() -EXCLUDE_LAYERS = ["lm_head"] -quant_config = Config( - global_quant_config=global_quant_config, - layer_quant_config=layer_quant_config, - kv_cache_quant_config=kv_cache_quant_config, - exclude=EXCLUDE_LAYERS, - algo_config=algo_config) -``` + # Define algorithm config by config file. + LLAMA_AUTOSMOOTHQUANT_CONFIG_FILE = + 'examples/torch/language_modeling/llm_ptq/models/llama/autosmoothquant_config.json' + algo_config = load_quant_algo_config_from_file(LLAMA_AUTOSMOOTHQUANT_CONFIG_FILE) + + EXCLUDE_LAYERS = ["lm_head"] + quant_config = Config( + global_quant_config=global_quant_config, + layer_quant_config=layer_quant_config, + kv_cache_quant_config=kv_cache_quant_config, + exclude=EXCLUDE_LAYERS, + algo_config=algo_config) + ``` ### 4. Quantize the Model and Export @@ -139,63 +145,67 @@ HuggingFace `safetensors`, you can refer to [HuggingFace format exporting](https://quark.docs.amd.com/latest/pytorch/export/quark_export_hf.html) for more exporting format details. -```python -import torch -from quark.torch import ModelQuantizer, ModelExporter -from quark.torch.export import ExporterConfig, JsonExporterConfig +??? Code -# Apply quantization. -quantizer = ModelQuantizer(quant_config) -quant_model = quantizer.quantize_model(model, calib_dataloader) + ```python + import torch + from quark.torch import ModelQuantizer, ModelExporter + from quark.torch.export import ExporterConfig, JsonExporterConfig -# Freeze quantized model to export. -freezed_model = quantizer.freeze(model) + # Apply quantization. + quantizer = ModelQuantizer(quant_config) + quant_model = quantizer.quantize_model(model, calib_dataloader) -# Define export config. -LLAMA_KV_CACHE_GROUP = ["*k_proj", "*v_proj"] -export_config = ExporterConfig(json_export_config=JsonExporterConfig()) -export_config.json_export_config.kv_cache_group = LLAMA_KV_CACHE_GROUP + # Freeze quantized model to export. + freezed_model = quantizer.freeze(model) -# Model: Llama-2-70b-chat-hf-w-fp8-a-fp8-kvcache-fp8-pertensor-autosmoothquant -EXPORT_DIR = MODEL_ID.split("/")[1] + "-w-fp8-a-fp8-kvcache-fp8-pertensor-autosmoothquant" -exporter = ModelExporter(config=export_config, export_dir=EXPORT_DIR) -with torch.no_grad(): - exporter.export_safetensors_model(freezed_model, - quant_config=quant_config, tokenizer=tokenizer) -``` + # Define export config. + LLAMA_KV_CACHE_GROUP = ["*k_proj", "*v_proj"] + export_config = ExporterConfig(json_export_config=JsonExporterConfig()) + export_config.json_export_config.kv_cache_group = LLAMA_KV_CACHE_GROUP + + # Model: Llama-2-70b-chat-hf-w-fp8-a-fp8-kvcache-fp8-pertensor-autosmoothquant + EXPORT_DIR = MODEL_ID.split("/")[1] + "-w-fp8-a-fp8-kvcache-fp8-pertensor-autosmoothquant" + exporter = ModelExporter(config=export_config, export_dir=EXPORT_DIR) + with torch.no_grad(): + exporter.export_safetensors_model(freezed_model, + quant_config=quant_config, tokenizer=tokenizer) + ``` ### 5. Evaluation in vLLM Now, you can load and run the Quark quantized model directly through the LLM entrypoint: -```python -from vllm import LLM, SamplingParams +??? Code -# Sample prompts. -prompts = [ - "Hello, my name is", - "The president of the United States is", - "The capital of France is", - "The future of AI is", -] -# Create a sampling params object. -sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + ```python + from vllm import LLM, SamplingParams -# Create an LLM. -llm = LLM(model="Llama-2-70b-chat-hf-w-fp8-a-fp8-kvcache-fp8-pertensor-autosmoothquant", - kv_cache_dtype='fp8',quantization='quark') -# Generate texts from the prompts. The output is a list of RequestOutput objects -# that contain the prompt, generated text, and other information. -outputs = llm.generate(prompts, sampling_params) -# Print the outputs. -print("\nGenerated Outputs:\n" + "-" * 60) -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}") - print(f"Output: {generated_text!r}") - print("-" * 60) -``` + # Sample prompts. + prompts = [ + "Hello, my name is", + "The president of the United States is", + "The capital of France is", + "The future of AI is", + ] + # Create a sampling params object. + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + + # Create an LLM. + llm = LLM(model="Llama-2-70b-chat-hf-w-fp8-a-fp8-kvcache-fp8-pertensor-autosmoothquant", + kv_cache_dtype='fp8',quantization='quark') + # Generate texts from the prompts. The output is a list of RequestOutput objects + # that contain the prompt, generated text, and other information. + outputs = llm.generate(prompts, sampling_params) + # Print the outputs. + print("\nGenerated Outputs:\n" + "-" * 60) + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}") + print(f"Output: {generated_text!r}") + print("-" * 60) + ``` Or, you can use `lm_eval` to evaluate accuracy: diff --git a/docs/features/quantization/torchao.md b/docs/features/quantization/torchao.md index a7a517af85aa9..c45979a361172 100644 --- a/docs/features/quantization/torchao.md +++ b/docs/features/quantization/torchao.md @@ -15,26 +15,28 @@ pip install \ ## Quantizing HuggingFace Models You can quantize your own huggingface model with torchao, e.g. [transformers](https://huggingface.co/docs/transformers/main/en/quantization/torchao) and [diffusers](https://huggingface.co/docs/diffusers/en/quantization/torchao), and save the checkpoint to huggingface hub like [this](https://huggingface.co/jerryzh168/llama3-8b-int8wo) with the following example code: -```Python -import torch -from transformers import TorchAoConfig, AutoModelForCausalLM, AutoTokenizer -from torchao.quantization import Int8WeightOnlyConfig +??? Code -model_name = "meta-llama/Meta-Llama-3-8B" -quantization_config = TorchAoConfig(Int8WeightOnlyConfig()) -quantized_model = AutoModelForCausalLM.from_pretrained( - model_name, - torch_dtype="auto", - device_map="auto", - quantization_config=quantization_config -) -tokenizer = AutoTokenizer.from_pretrained(model_name) -input_text = "What are we having for dinner?" -input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") + ```Python + import torch + from transformers import TorchAoConfig, AutoModelForCausalLM, AutoTokenizer + from torchao.quantization import Int8WeightOnlyConfig -hub_repo = # YOUR HUB REPO ID -tokenizer.push_to_hub(hub_repo) -quantized_model.push_to_hub(hub_repo, safe_serialization=False) -``` + model_name = "meta-llama/Meta-Llama-3-8B" + quantization_config = TorchAoConfig(Int8WeightOnlyConfig()) + quantized_model = AutoModelForCausalLM.from_pretrained( + model_name, + torch_dtype="auto", + device_map="auto", + quantization_config=quantization_config + ) + tokenizer = AutoTokenizer.from_pretrained(model_name) + input_text = "What are we having for dinner?" + input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") + + hub_repo = # YOUR HUB REPO ID + tokenizer.push_to_hub(hub_repo) + quantized_model.push_to_hub(hub_repo, safe_serialization=False) + ``` Alternatively, you can use the [TorchAO Quantization space](https://huggingface.co/spaces/medmekk/TorchAO_Quantization) for quantizing models with a simple UI. diff --git a/docs/features/reasoning_outputs.md b/docs/features/reasoning_outputs.md index 59ef10d9c963b..2e6afe61663cb 100644 --- a/docs/features/reasoning_outputs.md +++ b/docs/features/reasoning_outputs.md @@ -33,34 +33,36 @@ vllm serve deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \ Next, make a request to the model that should return the reasoning content in the response. -```python -from openai import OpenAI +??? Code -# Modify OpenAI's API key and API base to use vLLM's API server. -openai_api_key = "EMPTY" -openai_api_base = "http://localhost:8000/v1" + ```python + from openai import OpenAI -client = OpenAI( - api_key=openai_api_key, - base_url=openai_api_base, -) + # Modify OpenAI's API key and API base to use vLLM's API server. + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" -models = client.models.list() -model = models.data[0].id + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) -# Round 1 -messages = [{"role": "user", "content": "9.11 and 9.8, which is greater?"}] -# For granite, add: `extra_body={"chat_template_kwargs": {"thinking": True}}` -# For Qwen3 series, if you want to disable thinking in reasoning mode, add: -# extra_body={"chat_template_kwargs": {"enable_thinking": False}} -response = client.chat.completions.create(model=model, messages=messages) + models = client.models.list() + model = models.data[0].id -reasoning_content = response.choices[0].message.reasoning_content -content = response.choices[0].message.content + # Round 1 + messages = [{"role": "user", "content": "9.11 and 9.8, which is greater?"}] + # For granite, add: `extra_body={"chat_template_kwargs": {"thinking": True}}` + # For Qwen3 series, if you want to disable thinking in reasoning mode, add: + # extra_body={"chat_template_kwargs": {"enable_thinking": False}} + response = client.chat.completions.create(model=model, messages=messages) -print("reasoning_content:", reasoning_content) -print("content:", content) -``` + reasoning_content = response.choices[0].message.reasoning_content + content = response.choices[0].message.content + + print("reasoning_content:", reasoning_content) + print("content:", content) + ``` The `reasoning_content` field contains the reasoning steps that led to the final conclusion, while the `content` field contains the final conclusion. @@ -68,77 +70,81 @@ The `reasoning_content` field contains the reasoning steps that led to the final Streaming chat completions are also supported for reasoning models. The `reasoning_content` field is available in the `delta` field in [chat completion response chunks](https://platform.openai.com/docs/api-reference/chat/streaming). -```json -{ - "id": "chatcmpl-123", - "object": "chat.completion.chunk", - "created": 1694268190, - "model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", - "system_fingerprint": "fp_44709d6fcb", - "choices": [ - { - "index": 0, - "delta": { - "role": "assistant", - "reasoning_content": "is", - }, - "logprobs": null, - "finish_reason": null - } - ] -} -``` +??? Json + + ```json + { + "id": "chatcmpl-123", + "object": "chat.completion.chunk", + "created": 1694268190, + "model": "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", + "system_fingerprint": "fp_44709d6fcb", + "choices": [ + { + "index": 0, + "delta": { + "role": "assistant", + "reasoning_content": "is", + }, + "logprobs": null, + "finish_reason": null + } + ] + } + ``` OpenAI Python client library does not officially support `reasoning_content` attribute for streaming output. But the client supports extra attributes in the response. You can use `hasattr` to check if the `reasoning_content` attribute is present in the response. For example: -```python -from openai import OpenAI +??? Code -# Modify OpenAI's API key and API base to use vLLM's API server. -openai_api_key = "EMPTY" -openai_api_base = "http://localhost:8000/v1" + ```python + from openai import OpenAI -client = OpenAI( - api_key=openai_api_key, - base_url=openai_api_base, -) + # Modify OpenAI's API key and API base to use vLLM's API server. + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" -models = client.models.list() -model = models.data[0].id + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) -messages = [{"role": "user", "content": "9.11 and 9.8, which is greater?"}] -# For granite, add: `extra_body={"chat_template_kwargs": {"thinking": True}}` -# For Qwen3 series, if you want to disable thinking in reasoning mode, add: -# extra_body={"chat_template_kwargs": {"enable_thinking": False}} -stream = client.chat.completions.create(model=model, - messages=messages, - stream=True) + models = client.models.list() + model = models.data[0].id -print("client: Start streaming chat completions...") -printed_reasoning_content = False -printed_content = False + messages = [{"role": "user", "content": "9.11 and 9.8, which is greater?"}] + # For granite, add: `extra_body={"chat_template_kwargs": {"thinking": True}}` + # For Qwen3 series, if you want to disable thinking in reasoning mode, add: + # extra_body={"chat_template_kwargs": {"enable_thinking": False}} + stream = client.chat.completions.create(model=model, + messages=messages, + stream=True) -for chunk in stream: - reasoning_content = None - content = None - # Check the content is reasoning_content or content - if hasattr(chunk.choices[0].delta, "reasoning_content"): - reasoning_content = chunk.choices[0].delta.reasoning_content - elif hasattr(chunk.choices[0].delta, "content"): - content = chunk.choices[0].delta.content + print("client: Start streaming chat completions...") + printed_reasoning_content = False + printed_content = False - if reasoning_content is not None: - if not printed_reasoning_content: - printed_reasoning_content = True - print("reasoning_content:", end="", flush=True) - print(reasoning_content, end="", flush=True) - elif content is not None: - if not printed_content: - printed_content = True - print("\ncontent:", end="", flush=True) - # Extract and print the content - print(content, end="", flush=True) -``` + for chunk in stream: + reasoning_content = None + content = None + # Check the content is reasoning_content or content + if hasattr(chunk.choices[0].delta, "reasoning_content"): + reasoning_content = chunk.choices[0].delta.reasoning_content + elif hasattr(chunk.choices[0].delta, "content"): + content = chunk.choices[0].delta.content + + if reasoning_content is not None: + if not printed_reasoning_content: + printed_reasoning_content = True + print("reasoning_content:", end="", flush=True) + print(reasoning_content, end="", flush=True) + elif content is not None: + if not printed_content: + printed_content = True + print("\ncontent:", end="", flush=True) + # Extract and print the content + print(content, end="", flush=True) + ``` Remember to check whether the `reasoning_content` exists in the response before accessing it. You could checkout the [example](https://github.com/vllm-project/vllm/blob/main/examples/online_serving/openai_chat_completion_with_reasoning_streaming.py). @@ -146,41 +152,43 @@ Remember to check whether the `reasoning_content` exists in the response before The reasoning content is also available when both tool calling and the reasoning parser are enabled. Additionally, tool calling only parses functions from the `content` field, not from the `reasoning_content`. -```python -from openai import OpenAI +??? Code -client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy") + ```python + from openai import OpenAI -tools = [{ - "type": "function", - "function": { - "name": "get_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"}, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]} - }, - "required": ["location", "unit"] + client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy") + + tools = [{ + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"}, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]} + }, + "required": ["location", "unit"] + } } - } -}] + }] -response = client.chat.completions.create( - model=client.models.list().data[0].id, - messages=[{"role": "user", "content": "What's the weather like in San Francisco?"}], - tools=tools, - tool_choice="auto" -) + response = client.chat.completions.create( + model=client.models.list().data[0].id, + messages=[{"role": "user", "content": "What's the weather like in San Francisco?"}], + tools=tools, + tool_choice="auto" + ) -print(response) -tool_call = response.choices[0].message.tool_calls[0].function + print(response) + tool_call = response.choices[0].message.tool_calls[0].function -print(f"reasoning_content: {response.choices[0].message.reasoning_content}") -print(f"Function called: {tool_call.name}") -print(f"Arguments: {tool_call.arguments}") -``` + print(f"reasoning_content: {response.choices[0].message.reasoning_content}") + print(f"Function called: {tool_call.name}") + print(f"Arguments: {tool_call.arguments}") + ``` For more examples, please refer to . @@ -192,85 +200,89 @@ For more examples, please refer to . -```python -# import the required packages +??? Code -from vllm.reasoning import ReasoningParser, ReasoningParserManager -from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, - DeltaMessage) + ```python + # import the required packages -# define a reasoning parser and register it to vllm -# the name list in register_module can be used -# in --reasoning-parser. -@ReasoningParserManager.register_module(["example"]) -class ExampleParser(ReasoningParser): - def __init__(self, tokenizer: AnyTokenizer): - super().__init__(tokenizer) + from vllm.reasoning import ReasoningParser, ReasoningParserManager + from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, + DeltaMessage) - def extract_reasoning_content_streaming( - self, - previous_text: str, - current_text: str, - delta_text: str, - previous_token_ids: Sequence[int], - current_token_ids: Sequence[int], - delta_token_ids: Sequence[int], - ) -> Union[DeltaMessage, None]: - """ - Instance method that should be implemented for extracting reasoning - from an incomplete response; for use when handling reasoning calls and - streaming. Has to be an instance method because it requires state - - the current tokens/diffs, but also the information about what has - previously been parsed and extracted (see constructor) - """ + # define a reasoning parser and register it to vllm + # the name list in register_module can be used + # in --reasoning-parser. + @ReasoningParserManager.register_module(["example"]) + class ExampleParser(ReasoningParser): + def __init__(self, tokenizer: AnyTokenizer): + super().__init__(tokenizer) - def extract_reasoning_content( - self, model_output: str, request: ChatCompletionRequest - ) -> tuple[Optional[str], Optional[str]]: - """ - Extract reasoning content from a complete model-generated string. + def extract_reasoning_content_streaming( + self, + previous_text: str, + current_text: str, + delta_text: str, + previous_token_ids: Sequence[int], + current_token_ids: Sequence[int], + delta_token_ids: Sequence[int], + ) -> Union[DeltaMessage, None]: + """ + Instance method that should be implemented for extracting reasoning + from an incomplete response; for use when handling reasoning calls and + streaming. Has to be an instance method because it requires state - + the current tokens/diffs, but also the information about what has + previously been parsed and extracted (see constructor) + """ - Used for non-streaming responses where we have the entire model response - available before sending to the client. + def extract_reasoning_content( + self, model_output: str, request: ChatCompletionRequest + ) -> tuple[Optional[str], Optional[str]]: + """ + Extract reasoning content from a complete model-generated string. - Parameters: - model_output: str - The model-generated string to extract reasoning content from. + Used for non-streaming responses where we have the entire model response + available before sending to the client. - request: ChatCompletionRequest - The request object that was used to generate the model_output. + Parameters: + model_output: str + The model-generated string to extract reasoning content from. - Returns: - tuple[Optional[str], Optional[str]] - A tuple containing the reasoning content and the content. - """ -``` + request: ChatCompletionRequest + The request object that was used to generate the model_output. + + Returns: + tuple[Optional[str], Optional[str]] + A tuple containing the reasoning content and the content. + """ + ``` Additionally, to enable structured output, you'll need to create a new `Reasoner` similar to the one in . -```python -@dataclass -class DeepSeekReasoner(Reasoner): - """ - Reasoner for DeepSeek R series models. - """ - start_token_id: int - end_token_id: int +??? Code - start_token: str = "" - end_token: str = "" + ```python + @dataclass + class DeepSeekReasoner(Reasoner): + """ + Reasoner for DeepSeek R series models. + """ + start_token_id: int + end_token_id: int - @classmethod - def from_tokenizer(cls, tokenizer: PreTrainedTokenizer) -> Reasoner: - return cls(start_token_id=tokenizer.encode( - "", add_special_tokens=False)[0], - end_token_id=tokenizer.encode("", - add_special_tokens=False)[0]) + start_token: str = "" + end_token: str = "" - def is_reasoning_end(self, input_ids: list[int]) -> bool: - return self.end_token_id in input_ids - ... -``` + @classmethod + def from_tokenizer(cls, tokenizer: PreTrainedTokenizer) -> Reasoner: + return cls(start_token_id=tokenizer.encode( + "", add_special_tokens=False)[0], + end_token_id=tokenizer.encode("", + add_special_tokens=False)[0]) + + def is_reasoning_end(self, input_ids: list[int]) -> bool: + return self.end_token_id in input_ids + ... + ``` The structured output engine like [xgrammar](https://github.com/mlc-ai/xgrammar) will use `end_token_id` to check if the reasoning content is present in the model output and skip the structured output if it is the case. diff --git a/docs/features/spec_decode.md b/docs/features/spec_decode.md index 5080960f72ddb..7055cde1e9930 100644 --- a/docs/features/spec_decode.md +++ b/docs/features/spec_decode.md @@ -18,29 +18,31 @@ Speculative decoding is a technique which improves inter-token latency in memory The following code configures vLLM in an offline mode to use speculative decoding with a draft model, speculating 5 tokens at a time. -```python -from vllm import LLM, SamplingParams +??? Code -prompts = [ - "The future of AI is", -] -sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + ```python + from vllm import LLM, SamplingParams -llm = LLM( - model="facebook/opt-6.7b", - tensor_parallel_size=1, - speculative_config={ - "model": "facebook/opt-125m", - "num_speculative_tokens": 5, - }, -) -outputs = llm.generate(prompts, sampling_params) + prompts = [ + "The future of AI is", + ] + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") -``` + llm = LLM( + model="facebook/opt-6.7b", + tensor_parallel_size=1, + speculative_config={ + "model": "facebook/opt-125m", + "num_speculative_tokens": 5, + }, + ) + outputs = llm.generate(prompts, sampling_params) + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + ``` To perform the same with an online mode launch the server: @@ -60,69 +62,73 @@ python -m vllm.entrypoints.openai.api_server \ Then use a client: -```python -from openai import OpenAI +??? Code -# Modify OpenAI's API key and API base to use vLLM's API server. -openai_api_key = "EMPTY" -openai_api_base = "http://localhost:8000/v1" + ```python + from openai import OpenAI -client = OpenAI( - # defaults to os.environ.get("OPENAI_API_KEY") - api_key=openai_api_key, - base_url=openai_api_base, -) + # Modify OpenAI's API key and API base to use vLLM's API server. + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" -models = client.models.list() -model = models.data[0].id + client = OpenAI( + # defaults to os.environ.get("OPENAI_API_KEY") + api_key=openai_api_key, + base_url=openai_api_base, + ) -# Completion API -stream = False -completion = client.completions.create( - model=model, - prompt="The future of AI is", - echo=False, - n=1, - stream=stream, -) + models = client.models.list() + model = models.data[0].id -print("Completion results:") -if stream: - for c in completion: - print(c) -else: - print(completion) -``` + # Completion API + stream = False + completion = client.completions.create( + model=model, + prompt="The future of AI is", + echo=False, + n=1, + stream=stream, + ) + + print("Completion results:") + if stream: + for c in completion: + print(c) + else: + print(completion) + ``` ## Speculating by matching n-grams in the prompt The following code configures vLLM to use speculative decoding where proposals are generated by matching n-grams in the prompt. For more information read [this thread.](https://x.com/joao_gante/status/1747322413006643259) -```python -from vllm import LLM, SamplingParams +??? Code -prompts = [ - "The future of AI is", -] -sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + ```python + from vllm import LLM, SamplingParams -llm = LLM( - model="facebook/opt-6.7b", - tensor_parallel_size=1, - speculative_config={ - "method": "ngram", - "num_speculative_tokens": 5, - "prompt_lookup_max": 4, - }, -) -outputs = llm.generate(prompts, sampling_params) + prompts = [ + "The future of AI is", + ] + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") -``` + llm = LLM( + model="facebook/opt-6.7b", + tensor_parallel_size=1, + speculative_config={ + "method": "ngram", + "num_speculative_tokens": 5, + "prompt_lookup_max": 4, + }, + ) + outputs = llm.generate(prompts, sampling_params) + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + ``` ## Speculating using MLP speculators @@ -131,29 +137,31 @@ draft models that conditioning draft predictions on both context vectors and sam For more information see [this blog](https://pytorch.org/blog/hitchhikers-guide-speculative-decoding/) or [this technical report](https://arxiv.org/abs/2404.19124). -```python -from vllm import LLM, SamplingParams +??? Code -prompts = [ - "The future of AI is", -] -sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + ```python + from vllm import LLM, SamplingParams -llm = LLM( - model="meta-llama/Meta-Llama-3.1-70B-Instruct", - tensor_parallel_size=4, - speculative_config={ - "model": "ibm-ai-platform/llama3-70b-accelerator", - "draft_tensor_parallel_size": 1, - }, -) -outputs = llm.generate(prompts, sampling_params) + prompts = [ + "The future of AI is", + ] + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") -``` + llm = LLM( + model="meta-llama/Meta-Llama-3.1-70B-Instruct", + tensor_parallel_size=4, + speculative_config={ + "model": "ibm-ai-platform/llama3-70b-accelerator", + "draft_tensor_parallel_size": 1, + }, + ) + outputs = llm.generate(prompts, sampling_params) + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + ``` Note that these speculative models currently need to be run without tensor parallelism, although it is possible to run the main model using tensor parallelism (see example above). Since the @@ -177,31 +185,33 @@ A variety of speculative models of this type are available on HF hub: The following code configures vLLM to use speculative decoding where proposals are generated by an [EAGLE (Extrapolation Algorithm for Greater Language-model Efficiency)](https://arxiv.org/pdf/2401.15077) based draft model. A more detailed example for offline mode, including how to extract request level acceptance rate, can be found [here](gh-file:examples/offline_inference/eagle.py). -```python -from vllm import LLM, SamplingParams +??? Code -prompts = [ - "The future of AI is", -] -sampling_params = SamplingParams(temperature=0.8, top_p=0.95) + ```python + from vllm import LLM, SamplingParams -llm = LLM( - model="meta-llama/Meta-Llama-3-8B-Instruct", - tensor_parallel_size=4, - speculative_config={ - "model": "yuhuili/EAGLE-LLaMA3-Instruct-8B", - "draft_tensor_parallel_size": 1, - }, -) + prompts = [ + "The future of AI is", + ] + sampling_params = SamplingParams(temperature=0.8, top_p=0.95) -outputs = llm.generate(prompts, sampling_params) + llm = LLM( + model="meta-llama/Meta-Llama-3-8B-Instruct", + tensor_parallel_size=4, + speculative_config={ + "model": "yuhuili/EAGLE-LLaMA3-Instruct-8B", + "draft_tensor_parallel_size": 1, + }, + ) -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + outputs = llm.generate(prompts, sampling_params) -``` + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + + ``` A few important things to consider when using the EAGLE based draft models: diff --git a/docs/features/structured_outputs.md b/docs/features/structured_outputs.md index 044c796609923..b63f344ebd5a5 100644 --- a/docs/features/structured_outputs.md +++ b/docs/features/structured_outputs.md @@ -33,39 +33,43 @@ text. Now let´s see an example for each of the cases, starting with the `guided_choice`, as it´s the easiest one: -```python -from openai import OpenAI -client = OpenAI( - base_url="http://localhost:8000/v1", - api_key="-", -) -model = client.models.list().data[0].id +??? Code -completion = client.chat.completions.create( - model=model, - messages=[ - {"role": "user", "content": "Classify this sentiment: vLLM is wonderful!"} - ], - extra_body={"guided_choice": ["positive", "negative"]}, -) -print(completion.choices[0].message.content) -``` + ```python + from openai import OpenAI + client = OpenAI( + base_url="http://localhost:8000/v1", + api_key="-", + ) + model = client.models.list().data[0].id + + completion = client.chat.completions.create( + model=model, + messages=[ + {"role": "user", "content": "Classify this sentiment: vLLM is wonderful!"} + ], + extra_body={"guided_choice": ["positive", "negative"]}, + ) + print(completion.choices[0].message.content) + ``` The next example shows how to use the `guided_regex`. The idea is to generate an email address, given a simple regex template: -```python -completion = client.chat.completions.create( - model=model, - messages=[ - { - "role": "user", - "content": "Generate an example email address for Alan Turing, who works in Enigma. End in .com and new line. Example result: alan.turing@enigma.com\n", - } - ], - extra_body={"guided_regex": r"\w+@\w+\.com\n", "stop": ["\n"]}, -) -print(completion.choices[0].message.content) -``` +??? Code + + ```python + completion = client.chat.completions.create( + model=model, + messages=[ + { + "role": "user", + "content": "Generate an example email address for Alan Turing, who works in Enigma. End in .com and new line. Example result: alan.turing@enigma.com\n", + } + ], + extra_body={"guided_regex": r"\w+@\w+\.com\n", "stop": ["\n"]}, + ) + print(completion.choices[0].message.content) + ``` One of the most relevant features in structured text generation is the option to generate a valid JSON with pre-defined fields and formats. For this we can use the `guided_json` parameter in two different ways: @@ -75,41 +79,43 @@ For this we can use the `guided_json` parameter in two different ways: The next example shows how to use the `guided_json` parameter with a Pydantic model: -```python -from pydantic import BaseModel -from enum import Enum +??? Code -class CarType(str, Enum): - sedan = "sedan" - suv = "SUV" - truck = "Truck" - coupe = "Coupe" + ```python + from pydantic import BaseModel + from enum import Enum -class CarDescription(BaseModel): - brand: str - model: str - car_type: CarType + class CarType(str, Enum): + sedan = "sedan" + suv = "SUV" + truck = "Truck" + coupe = "Coupe" -json_schema = CarDescription.model_json_schema() + class CarDescription(BaseModel): + brand: str + model: str + car_type: CarType -completion = client.chat.completions.create( - model=model, - messages=[ - { - "role": "user", - "content": "Generate a JSON with the brand, model and car_type of the most iconic car from the 90's", - } - ], - "response_format": { - "type": "json_schema", - "json_schema": { - "name": "car-description", - "schema": CarDescription.model_json_schema() + json_schema = CarDescription.model_json_schema() + + completion = client.chat.completions.create( + model=model, + messages=[ + { + "role": "user", + "content": "Generate a JSON with the brand, model and car_type of the most iconic car from the 90's", + } + ], + "response_format": { + "type": "json_schema", + "json_schema": { + "name": "car-description", + "schema": CarDescription.model_json_schema() + }, }, - }, -) -print(completion.choices[0].message.content) -``` + ) + print(completion.choices[0].message.content) + ``` !!! tip While not strictly necessary, normally it´s better to indicate in the prompt the @@ -121,33 +127,35 @@ difficult to use, but it´s really powerful. It allows us to define complete languages like SQL queries. It works by using a context free EBNF grammar. As an example, we can use to define a specific format of simplified SQL queries: -```python -simplified_sql_grammar = """ - root ::= select_statement +??? Code - select_statement ::= "SELECT " column " from " table " where " condition + ```python + simplified_sql_grammar = """ + root ::= select_statement - column ::= "col_1 " | "col_2 " + select_statement ::= "SELECT " column " from " table " where " condition - table ::= "table_1 " | "table_2 " + column ::= "col_1 " | "col_2 " - condition ::= column "= " number + table ::= "table_1 " | "table_2 " - number ::= "1 " | "2 " -""" + condition ::= column "= " number -completion = client.chat.completions.create( - model=model, - messages=[ - { - "role": "user", - "content": "Generate an SQL query to show the 'username' and 'email' from the 'users' table.", - } - ], - extra_body={"guided_grammar": simplified_sql_grammar}, -) -print(completion.choices[0].message.content) -``` + number ::= "1 " | "2 " + """ + + completion = client.chat.completions.create( + model=model, + messages=[ + { + "role": "user", + "content": "Generate an SQL query to show the 'username' and 'email' from the 'users' table.", + } + ], + extra_body={"guided_grammar": simplified_sql_grammar}, + ) + print(completion.choices[0].message.content) + ``` See also: [full example](https://docs.vllm.ai/en/latest/examples/online_serving/structured_outputs.html) @@ -161,34 +169,36 @@ vllm serve deepseek-ai/DeepSeek-R1-Distill-Qwen-7B --reasoning-parser deepseek_r Note that you can use reasoning with any provided structured outputs feature. The following uses one with JSON schema: -```python -from pydantic import BaseModel +??? Code + + ```python + from pydantic import BaseModel -class People(BaseModel): - name: str - age: int + class People(BaseModel): + name: str + age: int -completion = client.chat.completions.create( - model=model, - messages=[ - { - "role": "user", - "content": "Generate a JSON with the name and age of one random person.", - } - ], - response_format={ - "type": "json_schema", - "json_schema": { - "name": "people", - "schema": People.model_json_schema() - } - }, -) -print("reasoning_content: ", completion.choices[0].message.reasoning_content) -print("content: ", completion.choices[0].message.content) -``` + completion = client.chat.completions.create( + model=model, + messages=[ + { + "role": "user", + "content": "Generate a JSON with the name and age of one random person.", + } + ], + response_format={ + "type": "json_schema", + "json_schema": { + "name": "people", + "schema": People.model_json_schema() + } + }, + ) + print("reasoning_content: ", completion.choices[0].message.reasoning_content) + print("content: ", completion.choices[0].message.content) + ``` See also: [full example](https://docs.vllm.ai/en/latest/examples/online_serving/structured_outputs.html) @@ -202,33 +212,33 @@ For the following examples, vLLM was setup using `vllm serve meta-llama/Llama-3. Here is a simple example demonstrating how to get structured output using Pydantic models: -```python -from pydantic import BaseModel -from openai import OpenAI +??? Code -class Info(BaseModel): - name: str - age: int + ```python + from pydantic import BaseModel + from openai import OpenAI -client = OpenAI(base_url="http://0.0.0.0:8000/v1", api_key="dummy") -model = client.models.list().data[0].id -completion = client.beta.chat.completions.parse( - model=model, - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "My name is Cameron, I'm 28. What's my name and age?"}, - ], - response_format=Info, -) + class Info(BaseModel): + name: str + age: int -message = completion.choices[0].message -print(message) -assert message.parsed -print("Name:", message.parsed.name) -print("Age:", message.parsed.age) -``` + client = OpenAI(base_url="http://0.0.0.0:8000/v1", api_key="dummy") + model = client.models.list().data[0].id + completion = client.beta.chat.completions.parse( + model=model, + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "My name is Cameron, I'm 28. What's my name and age?"}, + ], + response_format=Info, + ) -Output: + message = completion.choices[0].message + print(message) + assert message.parsed + print("Name:", message.parsed.name) + print("Age:", message.parsed.age) + ``` ```console ParsedChatCompletionMessage[Testing](content='{"name": "Cameron", "age": 28}', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=[], parsed=Testing(name='Cameron', age=28)) @@ -238,35 +248,37 @@ Age: 28 Here is a more complex example using nested Pydantic models to handle a step-by-step math solution: -```python -from typing import List -from pydantic import BaseModel -from openai import OpenAI +??? Code -class Step(BaseModel): - explanation: str - output: str + ```python + from typing import List + from pydantic import BaseModel + from openai import OpenAI -class MathResponse(BaseModel): - steps: list[Step] - final_answer: str + class Step(BaseModel): + explanation: str + output: str -completion = client.beta.chat.completions.parse( - model=model, - messages=[ - {"role": "system", "content": "You are a helpful expert math tutor."}, - {"role": "user", "content": "Solve 8x + 31 = 2."}, - ], - response_format=MathResponse, -) + class MathResponse(BaseModel): + steps: list[Step] + final_answer: str -message = completion.choices[0].message -print(message) -assert message.parsed -for i, step in enumerate(message.parsed.steps): - print(f"Step #{i}:", step) -print("Answer:", message.parsed.final_answer) -``` + completion = client.beta.chat.completions.parse( + model=model, + messages=[ + {"role": "system", "content": "You are a helpful expert math tutor."}, + {"role": "user", "content": "Solve 8x + 31 = 2."}, + ], + response_format=MathResponse, + ) + + message = completion.choices[0].message + print(message) + assert message.parsed + for i, step in enumerate(message.parsed.steps): + print(f"Step #{i}:", step) + print("Answer:", message.parsed.final_answer) + ``` Output: @@ -296,19 +308,21 @@ These parameters can be used in the same way as the parameters from the Online Serving examples above. One example for the usage of the `choice` parameter is shown below: -```python -from vllm import LLM, SamplingParams -from vllm.sampling_params import GuidedDecodingParams +??? Code -llm = LLM(model="HuggingFaceTB/SmolLM2-1.7B-Instruct") + ```python + from vllm import LLM, SamplingParams + from vllm.sampling_params import GuidedDecodingParams -guided_decoding_params = GuidedDecodingParams(choice=["Positive", "Negative"]) -sampling_params = SamplingParams(guided_decoding=guided_decoding_params) -outputs = llm.generate( - prompts="Classify this sentiment: vLLM is wonderful!", - sampling_params=sampling_params, -) -print(outputs[0].outputs[0].text) -``` + llm = LLM(model="HuggingFaceTB/SmolLM2-1.7B-Instruct") + + guided_decoding_params = GuidedDecodingParams(choice=["Positive", "Negative"]) + sampling_params = SamplingParams(guided_decoding=guided_decoding_params) + outputs = llm.generate( + prompts="Classify this sentiment: vLLM is wonderful!", + sampling_params=sampling_params, + ) + print(outputs[0].outputs[0].text) + ``` See also: [full example](https://docs.vllm.ai/en/latest/examples/online_serving/structured_outputs.html) diff --git a/docs/features/tool_calling.md b/docs/features/tool_calling.md index 93ea164881ce5..9fb878777a48b 100644 --- a/docs/features/tool_calling.md +++ b/docs/features/tool_calling.md @@ -15,44 +15,46 @@ vllm serve meta-llama/Llama-3.1-8B-Instruct \ Next, make a request to the model that should result in it using the available tools: -```python -from openai import OpenAI -import json +??? Code -client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy") + ```python + from openai import OpenAI + import json -def get_weather(location: str, unit: str): - return f"Getting the weather for {location} in {unit}..." -tool_functions = {"get_weather": get_weather} + client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy") -tools = [{ - "type": "function", - "function": { - "name": "get_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"}, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]} - }, - "required": ["location", "unit"] + def get_weather(location: str, unit: str): + return f"Getting the weather for {location} in {unit}..." + tool_functions = {"get_weather": get_weather} + + tools = [{ + "type": "function", + "function": { + "name": "get_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"}, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]} + }, + "required": ["location", "unit"] + } } - } -}] + }] -response = client.chat.completions.create( - model=client.models.list().data[0].id, - messages=[{"role": "user", "content": "What's the weather like in San Francisco?"}], - tools=tools, - tool_choice="auto" -) + response = client.chat.completions.create( + model=client.models.list().data[0].id, + messages=[{"role": "user", "content": "What's the weather like in San Francisco?"}], + tools=tools, + tool_choice="auto" + ) -tool_call = response.choices[0].message.tool_calls[0].function -print(f"Function called: {tool_call.name}") -print(f"Arguments: {tool_call.arguments}") -print(f"Result: {get_weather(**json.loads(tool_call.arguments))}") -``` + tool_call = response.choices[0].message.tool_calls[0].function + print(f"Function called: {tool_call.name}") + print(f"Arguments: {tool_call.arguments}") + print(f"Result: {get_weather(**json.loads(tool_call.arguments))}") + ``` Example output: @@ -301,49 +303,51 @@ A tool parser plugin is a Python file containing one or more ToolParser implemen Here is a summary of a plugin file: -```python +??? Code -# import the required packages + ```python -# define a tool parser and register it to vllm -# the name list in register_module can be used -# in --tool-call-parser. you can define as many -# tool parsers as you want here. -@ToolParserManager.register_module(["example"]) -class ExampleToolParser(ToolParser): - def __init__(self, tokenizer: AnyTokenizer): - super().__init__(tokenizer) + # import the required packages - # adjust request. e.g.: set skip special tokens - # to False for tool call output. - def adjust_request( - self, request: ChatCompletionRequest) -> ChatCompletionRequest: - return request + # define a tool parser and register it to vllm + # the name list in register_module can be used + # in --tool-call-parser. you can define as many + # tool parsers as you want here. + @ToolParserManager.register_module(["example"]) + class ExampleToolParser(ToolParser): + def __init__(self, tokenizer: AnyTokenizer): + super().__init__(tokenizer) - # implement the tool call parse for stream call - def extract_tool_calls_streaming( - self, - previous_text: str, - current_text: str, - delta_text: str, - previous_token_ids: Sequence[int], - current_token_ids: Sequence[int], - delta_token_ids: Sequence[int], - request: ChatCompletionRequest, - ) -> Union[DeltaMessage, None]: - return delta + # adjust request. e.g.: set skip special tokens + # to False for tool call output. + def adjust_request( + self, request: ChatCompletionRequest) -> ChatCompletionRequest: + return request - # implement the tool parse for non-stream call - def extract_tool_calls( - self, - model_output: str, - request: ChatCompletionRequest, - ) -> ExtractedToolCallInformation: - return ExtractedToolCallInformation(tools_called=False, - tool_calls=[], - content=text) + # implement the tool call parse for stream call + def extract_tool_calls_streaming( + self, + previous_text: str, + current_text: str, + delta_text: str, + previous_token_ids: Sequence[int], + current_token_ids: Sequence[int], + delta_token_ids: Sequence[int], + request: ChatCompletionRequest, + ) -> Union[DeltaMessage, None]: + return delta -``` + # implement the tool parse for non-stream call + def extract_tool_calls( + self, + model_output: str, + request: ChatCompletionRequest, + ) -> ExtractedToolCallInformation: + return ExtractedToolCallInformation(tools_called=False, + tool_calls=[], + content=text) + + ``` Then you can use this plugin in the command line like this. diff --git a/docs/getting_started/installation/cpu.md b/docs/getting_started/installation/cpu.md index 00bb5cae43f00..3f75d1aef300b 100644 --- a/docs/getting_started/installation/cpu.md +++ b/docs/getting_started/installation/cpu.md @@ -76,21 +76,23 @@ Currently, there are no pre-built CPU wheels. ### Build image from source -```console -$ docker build -f docker/Dockerfile.cpu --tag vllm-cpu-env --target vllm-openai . +??? Commands -# Launching OpenAI server -$ docker run --rm \ - --privileged=true \ - --shm-size=4g \ - -p 8000:8000 \ - -e VLLM_CPU_KVCACHE_SPACE= \ - -e VLLM_CPU_OMP_THREADS_BIND= \ - vllm-cpu-env \ - --model=meta-llama/Llama-3.2-1B-Instruct \ - --dtype=bfloat16 \ - other vLLM OpenAI server arguments -``` + ```console + $ docker build -f docker/Dockerfile.cpu --tag vllm-cpu-env --target vllm-openai . + + # Launching OpenAI server + $ docker run --rm \ + --privileged=true \ + --shm-size=4g \ + -p 8000:8000 \ + -e VLLM_CPU_KVCACHE_SPACE= \ + -e VLLM_CPU_OMP_THREADS_BIND= \ + vllm-cpu-env \ + --model=meta-llama/Llama-3.2-1B-Instruct \ + --dtype=bfloat16 \ + other vLLM OpenAI server arguments + ``` !!! tip For ARM or Apple silicon, use `docker/Dockerfile.arm` @@ -144,32 +146,34 @@ vllm serve facebook/opt-125m - If using vLLM CPU backend on a machine with hyper-threading, it is recommended to bind only one OpenMP thread on each physical CPU core using `VLLM_CPU_OMP_THREADS_BIND` or using auto thread binding feature by default. On a hyper-threading enabled platform with 16 logical CPU cores / 8 physical CPU cores: -```console -$ lscpu -e # check the mapping between logical CPU cores and physical CPU cores +??? Commands -# The "CPU" column means the logical CPU core IDs, and the "CORE" column means the physical core IDs. On this platform, two logical cores are sharing one physical core. -CPU NODE SOCKET CORE L1d:L1i:L2:L3 ONLINE MAXMHZ MINMHZ MHZ -0 0 0 0 0:0:0:0 yes 2401.0000 800.0000 800.000 -1 0 0 1 1:1:1:0 yes 2401.0000 800.0000 800.000 -2 0 0 2 2:2:2:0 yes 2401.0000 800.0000 800.000 -3 0 0 3 3:3:3:0 yes 2401.0000 800.0000 800.000 -4 0 0 4 4:4:4:0 yes 2401.0000 800.0000 800.000 -5 0 0 5 5:5:5:0 yes 2401.0000 800.0000 800.000 -6 0 0 6 6:6:6:0 yes 2401.0000 800.0000 800.000 -7 0 0 7 7:7:7:0 yes 2401.0000 800.0000 800.000 -8 0 0 0 0:0:0:0 yes 2401.0000 800.0000 800.000 -9 0 0 1 1:1:1:0 yes 2401.0000 800.0000 800.000 -10 0 0 2 2:2:2:0 yes 2401.0000 800.0000 800.000 -11 0 0 3 3:3:3:0 yes 2401.0000 800.0000 800.000 -12 0 0 4 4:4:4:0 yes 2401.0000 800.0000 800.000 -13 0 0 5 5:5:5:0 yes 2401.0000 800.0000 800.000 -14 0 0 6 6:6:6:0 yes 2401.0000 800.0000 800.000 -15 0 0 7 7:7:7:0 yes 2401.0000 800.0000 800.000 + ```console + $ lscpu -e # check the mapping between logical CPU cores and physical CPU cores -# On this platform, it is recommend to only bind openMP threads on logical CPU cores 0-7 or 8-15 -$ export VLLM_CPU_OMP_THREADS_BIND=0-7 -$ python examples/offline_inference/basic/basic.py -``` + # The "CPU" column means the logical CPU core IDs, and the "CORE" column means the physical core IDs. On this platform, two logical cores are sharing one physical core. + CPU NODE SOCKET CORE L1d:L1i:L2:L3 ONLINE MAXMHZ MINMHZ MHZ + 0 0 0 0 0:0:0:0 yes 2401.0000 800.0000 800.000 + 1 0 0 1 1:1:1:0 yes 2401.0000 800.0000 800.000 + 2 0 0 2 2:2:2:0 yes 2401.0000 800.0000 800.000 + 3 0 0 3 3:3:3:0 yes 2401.0000 800.0000 800.000 + 4 0 0 4 4:4:4:0 yes 2401.0000 800.0000 800.000 + 5 0 0 5 5:5:5:0 yes 2401.0000 800.0000 800.000 + 6 0 0 6 6:6:6:0 yes 2401.0000 800.0000 800.000 + 7 0 0 7 7:7:7:0 yes 2401.0000 800.0000 800.000 + 8 0 0 0 0:0:0:0 yes 2401.0000 800.0000 800.000 + 9 0 0 1 1:1:1:0 yes 2401.0000 800.0000 800.000 + 10 0 0 2 2:2:2:0 yes 2401.0000 800.0000 800.000 + 11 0 0 3 3:3:3:0 yes 2401.0000 800.0000 800.000 + 12 0 0 4 4:4:4:0 yes 2401.0000 800.0000 800.000 + 13 0 0 5 5:5:5:0 yes 2401.0000 800.0000 800.000 + 14 0 0 6 6:6:6:0 yes 2401.0000 800.0000 800.000 + 15 0 0 7 7:7:7:0 yes 2401.0000 800.0000 800.000 + + # On this platform, it is recommend to only bind openMP threads on logical CPU cores 0-7 or 8-15 + $ export VLLM_CPU_OMP_THREADS_BIND=0-7 + $ python examples/offline_inference/basic/basic.py + ``` - If using vLLM CPU backend on a multi-socket machine with NUMA, be aware to set CPU cores using `VLLM_CPU_OMP_THREADS_BIND` to avoid cross NUMA node memory access. diff --git a/docs/getting_started/installation/gpu/rocm.inc.md b/docs/getting_started/installation/gpu/rocm.inc.md index 8019fb50f4ddd..6bc714fe6e8b4 100644 --- a/docs/getting_started/installation/gpu/rocm.inc.md +++ b/docs/getting_started/installation/gpu/rocm.inc.md @@ -90,24 +90,26 @@ Currently, there are no pre-built ROCm wheels. 4. Build vLLM. For example, vLLM on ROCM 6.3 can be built with the following steps: - ```bash - pip install --upgrade pip + ??? Commands - # Build & install AMD SMI - pip install /opt/rocm/share/amd_smi + ```bash + pip install --upgrade pip - # Install dependencies - pip install --upgrade numba \ - scipy \ - huggingface-hub[cli,hf_transfer] \ - setuptools_scm - pip install "numpy<2" - pip install -r requirements/rocm.txt + # Build & install AMD SMI + pip install /opt/rocm/share/amd_smi - # Build vLLM for MI210/MI250/MI300. - export PYTORCH_ROCM_ARCH="gfx90a;gfx942" - python3 setup.py develop - ``` + # Install dependencies + pip install --upgrade numba \ + scipy \ + huggingface-hub[cli,hf_transfer] \ + setuptools_scm + pip install "numpy<2" + pip install -r requirements/rocm.txt + + # Build vLLM for MI210/MI250/MI300. + export PYTORCH_ROCM_ARCH="gfx90a;gfx942" + python3 setup.py develop + ``` This may take 5-10 minutes. Currently, `pip install .` does not work for ROCm installation. @@ -201,19 +203,21 @@ DOCKER_BUILDKIT=1 docker build \ To run the above docker image `vllm-rocm`, use the below command: -```console -docker run -it \ - --network=host \ - --group-add=video \ - --ipc=host \ - --cap-add=SYS_PTRACE \ - --security-opt seccomp=unconfined \ - --device /dev/kfd \ - --device /dev/dri \ - -v :/app/model \ - vllm-rocm \ - bash -``` +??? Command + + ```console + docker run -it \ + --network=host \ + --group-add=video \ + --ipc=host \ + --cap-add=SYS_PTRACE \ + --security-opt seccomp=unconfined \ + --device /dev/kfd \ + --device /dev/dri \ + -v :/app/model \ + vllm-rocm \ + bash + ``` Where the `` is the location where the model is stored, for example, the weights for llama2 or llama3 models. diff --git a/docs/getting_started/installation/intel_gaudi.md b/docs/getting_started/installation/intel_gaudi.md index f5970850aae71..056caa7081474 100644 --- a/docs/getting_started/installation/intel_gaudi.md +++ b/docs/getting_started/installation/intel_gaudi.md @@ -200,7 +200,7 @@ INFO 08-01 21:37:59 hpu_model_runner.py:509] Generated 48 decode buckets: [(1, 1 `min` determines the lowest value of the bucket. `step` determines the interval between buckets, and `max` determines the upper bound of the bucket. Furthermore, interval between `min` and `step` has special handling -- `min` gets multiplied by consecutive powers of two, until `step` gets reached. We call this the ramp-up phase and it is used for handling lower batch sizes with minimum wastage, while allowing larger padding on larger batch sizes. -Example (with ramp-up) +Example (with ramp-up): ```text min = 2, step = 32, max = 64 @@ -209,7 +209,7 @@ min = 2, step = 32, max = 64 => buckets = ramp_up + stable => (2, 4, 8, 16, 32, 64) ``` -Example (without ramp-up) +Example (without ramp-up): ```text min = 128, step = 128, max = 512 @@ -232,19 +232,21 @@ As an example, if a request of 3 sequences, with max sequence length of 412 come Warmup is an optional, but highly recommended step occurring before vLLM server starts listening. It executes a forward pass for each bucket with dummy data. The goal is to pre-compile all graphs and not incur any graph compilation overheads within bucket boundaries during server runtime. Each warmup step is logged during vLLM startup: -```text -INFO 08-01 22:26:47 hpu_model_runner.py:1066] [Warmup][Prompt][1/24] batch_size:4 seq_len:1024 free_mem:79.16 GiB -INFO 08-01 22:26:47 hpu_model_runner.py:1066] [Warmup][Prompt][2/24] batch_size:4 seq_len:896 free_mem:55.43 GiB -INFO 08-01 22:26:48 hpu_model_runner.py:1066] [Warmup][Prompt][3/24] batch_size:4 seq_len:768 free_mem:55.43 GiB -... -INFO 08-01 22:26:59 hpu_model_runner.py:1066] [Warmup][Prompt][24/24] batch_size:1 seq_len:128 free_mem:55.43 GiB -INFO 08-01 22:27:00 hpu_model_runner.py:1066] [Warmup][Decode][1/48] batch_size:4 seq_len:2048 free_mem:55.43 GiB -INFO 08-01 22:27:00 hpu_model_runner.py:1066] [Warmup][Decode][2/48] batch_size:4 seq_len:1920 free_mem:55.43 GiB -INFO 08-01 22:27:01 hpu_model_runner.py:1066] [Warmup][Decode][3/48] batch_size:4 seq_len:1792 free_mem:55.43 GiB -... -INFO 08-01 22:27:16 hpu_model_runner.py:1066] [Warmup][Decode][47/48] batch_size:2 seq_len:128 free_mem:55.43 GiB -INFO 08-01 22:27:16 hpu_model_runner.py:1066] [Warmup][Decode][48/48] batch_size:1 seq_len:128 free_mem:55.43 GiB -``` +??? Logs + + ```text + INFO 08-01 22:26:47 hpu_model_runner.py:1066] [Warmup][Prompt][1/24] batch_size:4 seq_len:1024 free_mem:79.16 GiB + INFO 08-01 22:26:47 hpu_model_runner.py:1066] [Warmup][Prompt][2/24] batch_size:4 seq_len:896 free_mem:55.43 GiB + INFO 08-01 22:26:48 hpu_model_runner.py:1066] [Warmup][Prompt][3/24] batch_size:4 seq_len:768 free_mem:55.43 GiB + ... + INFO 08-01 22:26:59 hpu_model_runner.py:1066] [Warmup][Prompt][24/24] batch_size:1 seq_len:128 free_mem:55.43 GiB + INFO 08-01 22:27:00 hpu_model_runner.py:1066] [Warmup][Decode][1/48] batch_size:4 seq_len:2048 free_mem:55.43 GiB + INFO 08-01 22:27:00 hpu_model_runner.py:1066] [Warmup][Decode][2/48] batch_size:4 seq_len:1920 free_mem:55.43 GiB + INFO 08-01 22:27:01 hpu_model_runner.py:1066] [Warmup][Decode][3/48] batch_size:4 seq_len:1792 free_mem:55.43 GiB + ... + INFO 08-01 22:27:16 hpu_model_runner.py:1066] [Warmup][Decode][47/48] batch_size:2 seq_len:128 free_mem:55.43 GiB + INFO 08-01 22:27:16 hpu_model_runner.py:1066] [Warmup][Decode][48/48] batch_size:1 seq_len:128 free_mem:55.43 GiB + ``` This example uses the same buckets as in the [Bucketing Mechanism][gaudi-bucketing-mechanism] section. Each output line corresponds to execution of a single bucket. When bucket is executed for the first time, its graph is compiled and can be reused later on, skipping further graph compilations. @@ -279,37 +281,39 @@ When there's large amount of requests pending, vLLM scheduler will attempt to fi Each described step is logged by vLLM server, as follows (negative values correspond to memory being released): -```text -INFO 08-02 17:37:44 hpu_model_runner.py:493] Prompt bucket config (min, step, max_warmup) bs:[1, 32, 4], seq:[128, 128, 1024] -INFO 08-02 17:37:44 hpu_model_runner.py:499] Generated 24 prompt buckets: [(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024)] -INFO 08-02 17:37:44 hpu_model_runner.py:504] Decode bucket config (min, step, max_warmup) bs:[1, 128, 4], seq:[128, 128, 2048] -INFO 08-02 17:37:44 hpu_model_runner.py:509] Generated 48 decode buckets: [(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (1, 1152), (1, 1280), (1, 1408), (1, 1536), (1, 1664), (1, 1792), (1, 1920), (1, 2048), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (2, 1152), (2, 1280), (2, 1408), (2, 1536), (2, 1664), (2, 1792), (2, 1920), (2, 2048), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024), (4, 1152), (4, 1280), (4, 1408), (4, 1536), (4, 1664), (4, 1792), (4, 1920), (4, 2048)] -INFO 08-02 17:37:52 hpu_model_runner.py:430] Pre-loading model weights on hpu:0 took 14.97 GiB of device memory (14.97 GiB/94.62 GiB used) and 2.95 GiB of host memory (475.2 GiB/1007 GiB used) -INFO 08-02 17:37:52 hpu_model_runner.py:438] Wrapping in HPU Graph took 0 B of device memory (14.97 GiB/94.62 GiB used) and -252 KiB of host memory (475.2 GiB/1007 GiB used) -INFO 08-02 17:37:52 hpu_model_runner.py:442] Loading model weights took in total 14.97 GiB of device memory (14.97 GiB/94.62 GiB used) and 2.95 GiB of host memory (475.2 GiB/1007 GiB used) -INFO 08-02 17:37:54 hpu_worker.py:134] Model profiling run took 504 MiB of device memory (15.46 GiB/94.62 GiB used) and 180.9 MiB of host memory (475.4 GiB/1007 GiB used) -INFO 08-02 17:37:54 hpu_worker.py:158] Free device memory: 79.16 GiB, 39.58 GiB usable (gpu_memory_utilization=0.5), 15.83 GiB reserved for HPUGraphs (VLLM_GRAPH_RESERVED_MEM=0.4), 23.75 GiB reserved for KV cache -INFO 08-02 17:37:54 hpu_executor.py:85] # HPU blocks: 1519, # CPU blocks: 0 -INFO 08-02 17:37:54 hpu_worker.py:190] Initializing cache engine took 23.73 GiB of device memory (39.2 GiB/94.62 GiB used) and -1.238 MiB of host memory (475.4 GiB/1007 GiB used) -INFO 08-02 17:37:54 hpu_model_runner.py:1066] [Warmup][Prompt][1/24] batch_size:4 seq_len:1024 free_mem:55.43 GiB -... -INFO 08-02 17:38:22 hpu_model_runner.py:1066] [Warmup][Decode][48/48] batch_size:1 seq_len:128 free_mem:55.43 GiB -INFO 08-02 17:38:22 hpu_model_runner.py:1159] Using 15.85 GiB/55.43 GiB of free device memory for HPUGraphs, 7.923 GiB for prompt and 7.923 GiB for decode (VLLM_GRAPH_PROMPT_RATIO=0.3) -INFO 08-02 17:38:22 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][1/24] batch_size:1 seq_len:128 free_mem:55.43 GiB -... -INFO 08-02 17:38:26 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][11/24] batch_size:1 seq_len:896 free_mem:48.77 GiB -INFO 08-02 17:38:27 hpu_model_runner.py:1066] [Warmup][Graph/Decode][1/48] batch_size:4 seq_len:128 free_mem:47.51 GiB -... -INFO 08-02 17:38:41 hpu_model_runner.py:1066] [Warmup][Graph/Decode][48/48] batch_size:1 seq_len:2048 free_mem:47.35 GiB -INFO 08-02 17:38:41 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][12/24] batch_size:4 seq_len:256 free_mem:47.35 GiB -INFO 08-02 17:38:42 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][13/24] batch_size:2 seq_len:512 free_mem:45.91 GiB -INFO 08-02 17:38:42 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][14/24] batch_size:1 seq_len:1024 free_mem:44.48 GiB -INFO 08-02 17:38:43 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][15/24] batch_size:2 seq_len:640 free_mem:43.03 GiB -INFO 08-02 17:38:43 hpu_model_runner.py:1128] Graph/Prompt captured:15 (62.5%) used_mem:14.03 GiB buckets:[(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (4, 128), (4, 256)] -INFO 08-02 17:38:43 hpu_model_runner.py:1128] Graph/Decode captured:48 (100.0%) used_mem:161.9 MiB buckets:[(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (1, 1152), (1, 1280), (1, 1408), (1, 1536), (1, 1664), (1, 1792), (1, 1920), (1, 2048), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (2, 1152), (2, 1280), (2, 1408), (2, 1536), (2, 1664), (2, 1792), (2, 1920), (2, 2048), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024), (4, 1152), (4, 1280), (4, 1408), (4, 1536), (4, 1664), (4, 1792), (4, 1920), (4, 2048)] -INFO 08-02 17:38:43 hpu_model_runner.py:1206] Warmup finished in 49 secs, allocated 14.19 GiB of device memory -INFO 08-02 17:38:43 hpu_executor.py:91] init_cache_engine took 37.92 GiB of device memory (53.39 GiB/94.62 GiB used) and 57.86 MiB of host memory (475.4 GiB/1007 GiB used) -``` +??? Logs + + ```text + INFO 08-02 17:37:44 hpu_model_runner.py:493] Prompt bucket config (min, step, max_warmup) bs:[1, 32, 4], seq:[128, 128, 1024] + INFO 08-02 17:37:44 hpu_model_runner.py:499] Generated 24 prompt buckets: [(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024)] + INFO 08-02 17:37:44 hpu_model_runner.py:504] Decode bucket config (min, step, max_warmup) bs:[1, 128, 4], seq:[128, 128, 2048] + INFO 08-02 17:37:44 hpu_model_runner.py:509] Generated 48 decode buckets: [(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (1, 1152), (1, 1280), (1, 1408), (1, 1536), (1, 1664), (1, 1792), (1, 1920), (1, 2048), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (2, 1152), (2, 1280), (2, 1408), (2, 1536), (2, 1664), (2, 1792), (2, 1920), (2, 2048), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024), (4, 1152), (4, 1280), (4, 1408), (4, 1536), (4, 1664), (4, 1792), (4, 1920), (4, 2048)] + INFO 08-02 17:37:52 hpu_model_runner.py:430] Pre-loading model weights on hpu:0 took 14.97 GiB of device memory (14.97 GiB/94.62 GiB used) and 2.95 GiB of host memory (475.2 GiB/1007 GiB used) + INFO 08-02 17:37:52 hpu_model_runner.py:438] Wrapping in HPU Graph took 0 B of device memory (14.97 GiB/94.62 GiB used) and -252 KiB of host memory (475.2 GiB/1007 GiB used) + INFO 08-02 17:37:52 hpu_model_runner.py:442] Loading model weights took in total 14.97 GiB of device memory (14.97 GiB/94.62 GiB used) and 2.95 GiB of host memory (475.2 GiB/1007 GiB used) + INFO 08-02 17:37:54 hpu_worker.py:134] Model profiling run took 504 MiB of device memory (15.46 GiB/94.62 GiB used) and 180.9 MiB of host memory (475.4 GiB/1007 GiB used) + INFO 08-02 17:37:54 hpu_worker.py:158] Free device memory: 79.16 GiB, 39.58 GiB usable (gpu_memory_utilization=0.5), 15.83 GiB reserved for HPUGraphs (VLLM_GRAPH_RESERVED_MEM=0.4), 23.75 GiB reserved for KV cache + INFO 08-02 17:37:54 hpu_executor.py:85] # HPU blocks: 1519, # CPU blocks: 0 + INFO 08-02 17:37:54 hpu_worker.py:190] Initializing cache engine took 23.73 GiB of device memory (39.2 GiB/94.62 GiB used) and -1.238 MiB of host memory (475.4 GiB/1007 GiB used) + INFO 08-02 17:37:54 hpu_model_runner.py:1066] [Warmup][Prompt][1/24] batch_size:4 seq_len:1024 free_mem:55.43 GiB + ... + INFO 08-02 17:38:22 hpu_model_runner.py:1066] [Warmup][Decode][48/48] batch_size:1 seq_len:128 free_mem:55.43 GiB + INFO 08-02 17:38:22 hpu_model_runner.py:1159] Using 15.85 GiB/55.43 GiB of free device memory for HPUGraphs, 7.923 GiB for prompt and 7.923 GiB for decode (VLLM_GRAPH_PROMPT_RATIO=0.3) + INFO 08-02 17:38:22 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][1/24] batch_size:1 seq_len:128 free_mem:55.43 GiB + ... + INFO 08-02 17:38:26 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][11/24] batch_size:1 seq_len:896 free_mem:48.77 GiB + INFO 08-02 17:38:27 hpu_model_runner.py:1066] [Warmup][Graph/Decode][1/48] batch_size:4 seq_len:128 free_mem:47.51 GiB + ... + INFO 08-02 17:38:41 hpu_model_runner.py:1066] [Warmup][Graph/Decode][48/48] batch_size:1 seq_len:2048 free_mem:47.35 GiB + INFO 08-02 17:38:41 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][12/24] batch_size:4 seq_len:256 free_mem:47.35 GiB + INFO 08-02 17:38:42 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][13/24] batch_size:2 seq_len:512 free_mem:45.91 GiB + INFO 08-02 17:38:42 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][14/24] batch_size:1 seq_len:1024 free_mem:44.48 GiB + INFO 08-02 17:38:43 hpu_model_runner.py:1066] [Warmup][Graph/Prompt][15/24] batch_size:2 seq_len:640 free_mem:43.03 GiB + INFO 08-02 17:38:43 hpu_model_runner.py:1128] Graph/Prompt captured:15 (62.5%) used_mem:14.03 GiB buckets:[(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (4, 128), (4, 256)] + INFO 08-02 17:38:43 hpu_model_runner.py:1128] Graph/Decode captured:48 (100.0%) used_mem:161.9 MiB buckets:[(1, 128), (1, 256), (1, 384), (1, 512), (1, 640), (1, 768), (1, 896), (1, 1024), (1, 1152), (1, 1280), (1, 1408), (1, 1536), (1, 1664), (1, 1792), (1, 1920), (1, 2048), (2, 128), (2, 256), (2, 384), (2, 512), (2, 640), (2, 768), (2, 896), (2, 1024), (2, 1152), (2, 1280), (2, 1408), (2, 1536), (2, 1664), (2, 1792), (2, 1920), (2, 2048), (4, 128), (4, 256), (4, 384), (4, 512), (4, 640), (4, 768), (4, 896), (4, 1024), (4, 1152), (4, 1280), (4, 1408), (4, 1536), (4, 1664), (4, 1792), (4, 1920), (4, 2048)] + INFO 08-02 17:38:43 hpu_model_runner.py:1206] Warmup finished in 49 secs, allocated 14.19 GiB of device memory + INFO 08-02 17:38:43 hpu_executor.py:91] init_cache_engine took 37.92 GiB of device memory (53.39 GiB/94.62 GiB used) and 57.86 MiB of host memory (475.4 GiB/1007 GiB used) + ``` ### Recommended vLLM Parameters diff --git a/docs/getting_started/quickstart.md b/docs/getting_started/quickstart.md index 38fc9925eb51c..d02cb18bcb940 100644 --- a/docs/getting_started/quickstart.md +++ b/docs/getting_started/quickstart.md @@ -147,20 +147,22 @@ curl http://localhost:8000/v1/completions \ Since this server is compatible with OpenAI API, you can use it as a drop-in replacement for any applications using OpenAI API. For example, another way to query the server is via the `openai` Python package: -```python -from openai import OpenAI +??? Code -# Modify OpenAI's API key and API base to use vLLM's API server. -openai_api_key = "EMPTY" -openai_api_base = "http://localhost:8000/v1" -client = OpenAI( - api_key=openai_api_key, - base_url=openai_api_base, -) -completion = client.completions.create(model="Qwen/Qwen2.5-1.5B-Instruct", - prompt="San Francisco is a") -print("Completion result:", completion) -``` + ```python + from openai import OpenAI + + # Modify OpenAI's API key and API base to use vLLM's API server. + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) + completion = client.completions.create(model="Qwen/Qwen2.5-1.5B-Instruct", + prompt="San Francisco is a") + print("Completion result:", completion) + ``` A more detailed client example can be found here: @@ -184,26 +186,28 @@ curl http://localhost:8000/v1/chat/completions \ Alternatively, you can use the `openai` Python package: -```python -from openai import OpenAI -# Set OpenAI's API key and API base to use vLLM's API server. -openai_api_key = "EMPTY" -openai_api_base = "http://localhost:8000/v1" +??? Code -client = OpenAI( - api_key=openai_api_key, - base_url=openai_api_base, -) + ```python + from openai import OpenAI + # Set OpenAI's API key and API base to use vLLM's API server. + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" -chat_response = client.chat.completions.create( - model="Qwen/Qwen2.5-1.5B-Instruct", - messages=[ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Tell me a joke."}, - ] -) -print("Chat response:", chat_response) -``` + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) + + chat_response = client.chat.completions.create( + model="Qwen/Qwen2.5-1.5B-Instruct", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Tell me a joke."}, + ] + ) + print("Chat response:", chat_response) + ``` ## On Attention Backends diff --git a/docs/models/generative_models.md b/docs/models/generative_models.md index e52c5ae01cb8a..355ed506e5dfd 100644 --- a/docs/models/generative_models.md +++ b/docs/models/generative_models.md @@ -85,35 +85,37 @@ and automatically applies the model's [chat template](https://huggingface.co/doc In general, only instruction-tuned models have a chat template. Base models may perform poorly as they are not trained to respond to the chat conversation. -```python -from vllm import LLM +??? Code -llm = LLM(model="meta-llama/Meta-Llama-3-8B-Instruct") -conversation = [ - { - "role": "system", - "content": "You are a helpful assistant" - }, - { - "role": "user", - "content": "Hello" - }, - { - "role": "assistant", - "content": "Hello! How can I assist you today?" - }, - { - "role": "user", - "content": "Write an essay about the importance of higher education.", - }, -] -outputs = llm.chat(conversation) + ```python + from vllm import LLM -for output in outputs: - prompt = output.prompt - generated_text = output.outputs[0].text - print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") -``` + llm = LLM(model="meta-llama/Meta-Llama-3-8B-Instruct") + conversation = [ + { + "role": "system", + "content": "You are a helpful assistant" + }, + { + "role": "user", + "content": "Hello" + }, + { + "role": "assistant", + "content": "Hello! How can I assist you today?" + }, + { + "role": "user", + "content": "Write an essay about the importance of higher education.", + }, + ] + outputs = llm.chat(conversation) + + for output in outputs: + prompt = output.prompt + generated_text = output.outputs[0].text + print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") + ``` A code example can be found here: diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index 92557eb662843..c022980fe6e61 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -70,7 +70,10 @@ To make your model compatible with the Transformers backend, it needs: 2. `MyAttention` must use `ALL_ATTENTION_FUNCTIONS` to call attention. 3. `MyModel` must contain `_supports_attention_backend = True`. -```python title="modeling_my_model.py" +
+modeling_my_model.py + +```python from transformers import PreTrainedModel from torch import nn @@ -93,6 +96,8 @@ class MyModel(PreTrainedModel): _supports_attention_backend = True ``` +
+ Here is what happens in the background when this model is loaded: 1. The config is loaded. @@ -103,7 +108,10 @@ That's it! For your model to be compatible with vLLM's tensor parallel and/or pipeline parallel features, you must add `base_model_tp_plan` and/or `base_model_pp_plan` to your model's config class: -```python title="configuration_my_model.py" +
+configuration_my_model.py + +```python from transformers import PretrainedConfig @@ -123,6 +131,8 @@ class MyConfig(PretrainedConfig): } ``` +
+ - `base_model_tp_plan` is a `dict` that maps fully qualified layer name patterns to tensor parallel styles (currently only `"colwise"` and `"rowwise"` are supported). - `base_model_pp_plan` is a `dict` that maps direct child layer names to `tuple`s of `list`s of `str`s: * You only need to do this for layers which are not present on all pipeline stages @@ -198,6 +208,9 @@ huggingface-cli scan-cache --dir ~/.cache/huggingface/hub Use the Hugging Face CLI to interactively [delete downloaded model](https://huggingface.co/docs/huggingface_hub/guides/manage-cache#clean-your-cache) from the cache: +
+Commands + ```console # The `delete-cache` command requires extra dependencies to work with the TUI. # Please run `pip install huggingface_hub[cli]` to install them. @@ -224,6 +237,8 @@ Start deletion. Done. Deleted 1 repo(s) and 0 revision(s) for a total of 438.9M. ``` +
+ #### Using a proxy Here are some tips for loading/downloading models from Hugging Face using a proxy: @@ -601,27 +616,29 @@ Specified using `--task generate`. For the best results, we recommend using the following dependency versions (tested on A10 and L40): - ```text - # Core vLLM-compatible dependencies with Molmo accuracy setup (tested on L40) - torch==2.5.1 - torchvision==0.20.1 - transformers==4.48.1 - tokenizers==0.21.0 - tiktoken==0.7.0 - vllm==0.7.0 + ??? Dependency versions - # Optional but recommended for improved performance and stability - triton==3.1.0 - xformers==0.0.28.post3 - uvloop==0.21.0 - protobuf==5.29.3 - openai==1.60.2 - opencv-python-headless==4.11.0.86 - pillow==10.4.0 + ```text + # Core vLLM-compatible dependencies with Molmo accuracy setup (tested on L40) + torch==2.5.1 + torchvision==0.20.1 + transformers==4.48.1 + tokenizers==0.21.0 + tiktoken==0.7.0 + vllm==0.7.0 - # Installed FlashAttention (for float16 only) - flash-attn>=2.5.6 # Not used in float32, but should be documented - ``` + # Optional but recommended for improved performance and stability + triton==3.1.0 + xformers==0.0.28.post3 + uvloop==0.21.0 + protobuf==5.29.3 + openai==1.60.2 + opencv-python-headless==4.11.0.86 + pillow==10.4.0 + + # Installed FlashAttention (for float16 only) + flash-attn>=2.5.6 # Not used in float32, but should be documented + ``` **Note:** Make sure you understand the security implications of using outdated packages. diff --git a/docs/serving/integrations/langchain.md b/docs/serving/integrations/langchain.md index 14ea6a0443415..d7e2b41651c4d 100644 --- a/docs/serving/integrations/langchain.md +++ b/docs/serving/integrations/langchain.md @@ -13,19 +13,21 @@ pip install langchain langchain_community -q To run inference on a single or multiple GPUs, use `VLLM` class from `langchain`. -```python -from langchain_community.llms import VLLM +??? Code -llm = VLLM(model="mosaicml/mpt-7b", - trust_remote_code=True, # mandatory for hf models - max_new_tokens=128, - top_k=10, - top_p=0.95, - temperature=0.8, - # tensor_parallel_size=... # for distributed inference -) + ```python + from langchain_community.llms import VLLM -print(llm("What is the capital of France ?")) -``` + llm = VLLM(model="mosaicml/mpt-7b", + trust_remote_code=True, # mandatory for hf models + max_new_tokens=128, + top_k=10, + top_p=0.95, + temperature=0.8, + # tensor_parallel_size=... # for distributed inference + ) + + print(llm("What is the capital of France ?")) + ``` Please refer to this [Tutorial](https://python.langchain.com/docs/integrations/llms/vllm) for more details. diff --git a/docs/serving/openai_compatible_server.md b/docs/serving/openai_compatible_server.md index 3002b2f92e4d5..7862778464dd3 100644 --- a/docs/serving/openai_compatible_server.md +++ b/docs/serving/openai_compatible_server.md @@ -15,22 +15,24 @@ vllm serve NousResearch/Meta-Llama-3-8B-Instruct \ To call the server, in your preferred text editor, create a script that uses an HTTP client. Include any messages that you want to send to the model. Then run that script. Below is an example script using the [official OpenAI Python client](https://github.com/openai/openai-python). -```python -from openai import OpenAI -client = OpenAI( - base_url="http://localhost:8000/v1", - api_key="token-abc123", -) +??? Code -completion = client.chat.completions.create( - model="NousResearch/Meta-Llama-3-8B-Instruct", - messages=[ - {"role": "user", "content": "Hello!"} - ] -) + ```python + from openai import OpenAI + client = OpenAI( + base_url="http://localhost:8000/v1", + api_key="token-abc123", + ) -print(completion.choices[0].message) -``` + completion = client.chat.completions.create( + model="NousResearch/Meta-Llama-3-8B-Instruct", + messages=[ + {"role": "user", "content": "Hello!"} + ] + ) + + print(completion.choices[0].message) + ``` !!! tip vLLM supports some parameters that are not supported by OpenAI, `top_k` for example. @@ -147,27 +149,29 @@ with `--enable-request-id-headers`. > rather than within the vLLM layer for this reason. > See [this PR](https://github.com/vllm-project/vllm/pull/11529) for more details. -```python -completion = client.chat.completions.create( - model="NousResearch/Meta-Llama-3-8B-Instruct", - messages=[ - {"role": "user", "content": "Classify this sentiment: vLLM is wonderful!"} - ], - extra_headers={ - "x-request-id": "sentiment-classification-00001", - } -) -print(completion._request_id) +??? Code -completion = client.completions.create( - model="NousResearch/Meta-Llama-3-8B-Instruct", - prompt="A robot may not injure a human being", - extra_headers={ - "x-request-id": "completion-test", - } -) -print(completion._request_id) -``` + ```python + completion = client.chat.completions.create( + model="NousResearch/Meta-Llama-3-8B-Instruct", + messages=[ + {"role": "user", "content": "Classify this sentiment: vLLM is wonderful!"} + ], + extra_headers={ + "x-request-id": "sentiment-classification-00001", + } + ) + print(completion._request_id) + + completion = client.completions.create( + model="NousResearch/Meta-Llama-3-8B-Instruct", + prompt="A robot may not injure a human being", + extra_headers={ + "x-request-id": "completion-test", + } + ) + print(completion._request_id) + ``` ## API Reference @@ -184,15 +188,19 @@ Code example: The following [sampling parameters][sampling-params] are supported. -```python ---8<-- "vllm/entrypoints/openai/protocol.py:completion-sampling-params" -``` +??? Code + + ```python + --8<-- "vllm/entrypoints/openai/protocol.py:completion-sampling-params" + ``` The following extra parameters are supported: -```python ---8<-- "vllm/entrypoints/openai/protocol.py:completion-extra-params" -``` +??? Code + + ```python + --8<-- "vllm/entrypoints/openai/protocol.py:completion-extra-params" + ``` [](){ #chat-api } @@ -212,15 +220,19 @@ Code example: The following [sampling parameters][sampling-params] are supported. -```python ---8<-- "vllm/entrypoints/openai/protocol.py:chat-completion-sampling-params" -``` +??? Code + + ```python + --8<-- "vllm/entrypoints/openai/protocol.py:chat-completion-sampling-params" + ``` The following extra parameters are supported: -```python ---8<-- "vllm/entrypoints/openai/protocol.py:chat-completion-extra-params" -``` +??? Code + + ```python + --8<-- "vllm/entrypoints/openai/protocol.py:chat-completion-extra-params" + ``` [](){ #embeddings-api } @@ -259,29 +271,31 @@ and passing a list of `messages` in the request. Refer to the examples below for Since the request schema is not defined by OpenAI client, we post a request to the server using the lower-level `requests` library: - ```python - import requests + ??? Code - image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + ```python + import requests - response = requests.post( - "http://localhost:8000/v1/embeddings", - json={ - "model": "TIGER-Lab/VLM2Vec-Full", - "messages": [{ - "role": "user", - "content": [ - {"type": "image_url", "image_url": {"url": image_url}}, - {"type": "text", "text": "Represent the given image."}, - ], - }], - "encoding_format": "float", - }, - ) - response.raise_for_status() - response_json = response.json() - print("Embedding output:", response_json["data"][0]["embedding"]) - ``` + image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + + response = requests.post( + "http://localhost:8000/v1/embeddings", + json={ + "model": "TIGER-Lab/VLM2Vec-Full", + "messages": [{ + "role": "user", + "content": [ + {"type": "image_url", "image_url": {"url": image_url}}, + {"type": "text", "text": "Represent the given image."}, + ], + }], + "encoding_format": "float", + }, + ) + response.raise_for_status() + response_json = response.json() + print("Embedding output:", response_json["data"][0]["embedding"]) + ``` === "DSE-Qwen2-MRL" @@ -316,15 +330,19 @@ The following [pooling parameters][pooling-params] are supported. The following extra parameters are supported by default: -```python ---8<-- "vllm/entrypoints/openai/protocol.py:embedding-extra-params" -``` +??? Code + + ```python + --8<-- "vllm/entrypoints/openai/protocol.py:embedding-extra-params" + ``` For chat-like input (i.e. if `messages` is passed), these extra parameters are supported instead: -```python ---8<-- "vllm/entrypoints/openai/protocol.py:chat-embedding-extra-params" -``` +??? Code + + ```python + --8<-- "vllm/entrypoints/openai/protocol.py:chat-embedding-extra-params" + ``` [](){ #transcriptions-api } @@ -343,15 +361,19 @@ Code example: The following [sampling parameters][sampling-params] are supported. -```python ---8<-- "vllm/entrypoints/openai/protocol.py:transcription-sampling-params" -``` +??? Code + + ```python + --8<-- "vllm/entrypoints/openai/protocol.py:transcription-sampling-params" + ``` The following extra parameters are supported: -```python ---8<-- "vllm/entrypoints/openai/protocol.py:transcription-extra-params" -``` +??? Code + + ```python + --8<-- "vllm/entrypoints/openai/protocol.py:transcription-extra-params" + ``` [](){ #tokenizer-api } @@ -387,8 +409,6 @@ Code example: You can classify multiple texts by passing an array of strings: -Request: - ```bash curl -v "http://127.0.0.1:8000/classify" \ -H "Content-Type: application/json" \ @@ -401,47 +421,45 @@ curl -v "http://127.0.0.1:8000/classify" \ }' ``` -Response: +??? Response -```bash -{ - "id": "classify-7c87cac407b749a6935d8c7ce2a8fba2", - "object": "list", - "created": 1745383065, - "model": "jason9693/Qwen2.5-1.5B-apeach", - "data": [ + ```bash { - "index": 0, - "label": "Default", - "probs": [ - 0.565970778465271, - 0.4340292513370514 + "id": "classify-7c87cac407b749a6935d8c7ce2a8fba2", + "object": "list", + "created": 1745383065, + "model": "jason9693/Qwen2.5-1.5B-apeach", + "data": [ + { + "index": 0, + "label": "Default", + "probs": [ + 0.565970778465271, + 0.4340292513370514 + ], + "num_classes": 2 + }, + { + "index": 1, + "label": "Spoiled", + "probs": [ + 0.26448777318000793, + 0.7355121970176697 + ], + "num_classes": 2 + } ], - "num_classes": 2 - }, - { - "index": 1, - "label": "Spoiled", - "probs": [ - 0.26448777318000793, - 0.7355121970176697 - ], - "num_classes": 2 + "usage": { + "prompt_tokens": 20, + "total_tokens": 20, + "completion_tokens": 0, + "prompt_tokens_details": null + } } - ], - "usage": { - "prompt_tokens": 20, - "total_tokens": 20, - "completion_tokens": 0, - "prompt_tokens_details": null - } -} -``` + ``` You can also pass a string directly to the `input` field: -Request: - ```bash curl -v "http://127.0.0.1:8000/classify" \ -H "Content-Type: application/json" \ @@ -451,33 +469,33 @@ curl -v "http://127.0.0.1:8000/classify" \ }' ``` -Response: +??? Response -```bash -{ - "id": "classify-9bf17f2847b046c7b2d5495f4b4f9682", - "object": "list", - "created": 1745383213, - "model": "jason9693/Qwen2.5-1.5B-apeach", - "data": [ + ```bash { - "index": 0, - "label": "Default", - "probs": [ - 0.565970778465271, - 0.4340292513370514 + "id": "classify-9bf17f2847b046c7b2d5495f4b4f9682", + "object": "list", + "created": 1745383213, + "model": "jason9693/Qwen2.5-1.5B-apeach", + "data": [ + { + "index": 0, + "label": "Default", + "probs": [ + 0.565970778465271, + 0.4340292513370514 + ], + "num_classes": 2 + } ], - "num_classes": 2 + "usage": { + "prompt_tokens": 10, + "total_tokens": 10, + "completion_tokens": 0, + "prompt_tokens_details": null + } } - ], - "usage": { - "prompt_tokens": 10, - "total_tokens": 10, - "completion_tokens": 0, - "prompt_tokens_details": null - } -} -``` + ``` #### Extra parameters @@ -508,8 +526,6 @@ Code example: You can pass a string to both `text_1` and `text_2`, forming a single sentence pair. -Request: - ```bash curl -X 'POST' \ 'http://127.0.0.1:8000/score' \ @@ -523,24 +539,24 @@ curl -X 'POST' \ }' ``` -Response: +??? Response -```bash -{ - "id": "score-request-id", - "object": "list", - "created": 693447, - "model": "BAAI/bge-reranker-v2-m3", - "data": [ + ```bash { - "index": 0, - "object": "score", - "score": 1 + "id": "score-request-id", + "object": "list", + "created": 693447, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": 1 + } + ], + "usage": {} } - ], - "usage": {} -} -``` + ``` #### Batch inference @@ -548,95 +564,95 @@ You can pass a string to `text_1` and a list to `text_2`, forming multiple sente where each pair is built from `text_1` and a string in `text_2`. The total number of pairs is `len(text_2)`. -Request: +??? Request -```bash -curl -X 'POST' \ - 'http://127.0.0.1:8000/score' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -d '{ - "model": "BAAI/bge-reranker-v2-m3", - "text_1": "What is the capital of France?", - "text_2": [ - "The capital of Brazil is Brasilia.", - "The capital of France is Paris." - ] -}' -``` + ```bash + curl -X 'POST' \ + 'http://127.0.0.1:8000/score' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-v2-m3", + "text_1": "What is the capital of France?", + "text_2": [ + "The capital of Brazil is Brasilia.", + "The capital of France is Paris." + ] + }' + ``` -Response: +??? Response -```bash -{ - "id": "score-request-id", - "object": "list", - "created": 693570, - "model": "BAAI/bge-reranker-v2-m3", - "data": [ + ```bash { - "index": 0, - "object": "score", - "score": 0.001094818115234375 - }, - { - "index": 1, - "object": "score", - "score": 1 + "id": "score-request-id", + "object": "list", + "created": 693570, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": 0.001094818115234375 + }, + { + "index": 1, + "object": "score", + "score": 1 + } + ], + "usage": {} } - ], - "usage": {} -} -``` + ``` You can pass a list to both `text_1` and `text_2`, forming multiple sentence pairs where each pair is built from a string in `text_1` and the corresponding string in `text_2` (similar to `zip()`). The total number of pairs is `len(text_2)`. -Request: +??? Request -```bash -curl -X 'POST' \ - 'http://127.0.0.1:8000/score' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -d '{ - "model": "BAAI/bge-reranker-v2-m3", - "encoding_format": "float", - "text_1": [ - "What is the capital of Brazil?", - "What is the capital of France?" - ], - "text_2": [ - "The capital of Brazil is Brasilia.", - "The capital of France is Paris." - ] -}' -``` + ```bash + curl -X 'POST' \ + 'http://127.0.0.1:8000/score' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-v2-m3", + "encoding_format": "float", + "text_1": [ + "What is the capital of Brazil?", + "What is the capital of France?" + ], + "text_2": [ + "The capital of Brazil is Brasilia.", + "The capital of France is Paris." + ] + }' + ``` -Response: +??? Response -```bash -{ - "id": "score-request-id", - "object": "list", - "created": 693447, - "model": "BAAI/bge-reranker-v2-m3", - "data": [ + ```bash { - "index": 0, - "object": "score", - "score": 1 - }, - { - "index": 1, - "object": "score", - "score": 1 + "id": "score-request-id", + "object": "list", + "created": 693447, + "model": "BAAI/bge-reranker-v2-m3", + "data": [ + { + "index": 0, + "object": "score", + "score": 1 + }, + { + "index": 1, + "object": "score", + "score": 1 + } + ], + "usage": {} } - ], - "usage": {} -} -``` + ``` #### Extra parameters @@ -675,51 +691,51 @@ Code example: Note that the `top_n` request parameter is optional and will default to the length of the `documents` field. Result documents will be sorted by relevance, and the `index` property can be used to determine original order. -Request: +??? Request -```bash -curl -X 'POST' \ - 'http://127.0.0.1:8000/v1/rerank' \ - -H 'accept: application/json' \ - -H 'Content-Type: application/json' \ - -d '{ - "model": "BAAI/bge-reranker-base", - "query": "What is the capital of France?", - "documents": [ - "The capital of Brazil is Brasilia.", - "The capital of France is Paris.", - "Horses and cows are both animals" - ] -}' -``` + ```bash + curl -X 'POST' \ + 'http://127.0.0.1:8000/v1/rerank' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "model": "BAAI/bge-reranker-base", + "query": "What is the capital of France?", + "documents": [ + "The capital of Brazil is Brasilia.", + "The capital of France is Paris.", + "Horses and cows are both animals" + ] + }' + ``` -Response: +??? Response -```bash -{ - "id": "rerank-fae51b2b664d4ed38f5969b612edff77", - "model": "BAAI/bge-reranker-base", - "usage": { - "total_tokens": 56 - }, - "results": [ + ```bash { - "index": 1, - "document": { - "text": "The capital of France is Paris." + "id": "rerank-fae51b2b664d4ed38f5969b612edff77", + "model": "BAAI/bge-reranker-base", + "usage": { + "total_tokens": 56 }, - "relevance_score": 0.99853515625 - }, - { - "index": 0, - "document": { - "text": "The capital of Brazil is Brasilia." - }, - "relevance_score": 0.0005860328674316406 + "results": [ + { + "index": 1, + "document": { + "text": "The capital of France is Paris." + }, + "relevance_score": 0.99853515625 + }, + { + "index": 0, + "document": { + "text": "The capital of Brazil is Brasilia." + }, + "relevance_score": 0.0005860328674316406 + } + ] } - ] -} -``` + ``` #### Extra parameters diff --git a/docs/usage/metrics.md b/docs/usage/metrics.md index 6603aa83b4af7..988b9a5517250 100644 --- a/docs/usage/metrics.md +++ b/docs/usage/metrics.md @@ -12,28 +12,32 @@ vllm serve unsloth/Llama-3.2-1B-Instruct Then query the endpoint to get the latest metrics from the server: -```console -$ curl http://0.0.0.0:8000/metrics +??? Output -# HELP vllm:iteration_tokens_total Histogram of number of tokens per engine_step. -# TYPE vllm:iteration_tokens_total histogram -vllm:iteration_tokens_total_sum{model_name="unsloth/Llama-3.2-1B-Instruct"} 0.0 -vllm:iteration_tokens_total_bucket{le="1.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 -vllm:iteration_tokens_total_bucket{le="8.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 -vllm:iteration_tokens_total_bucket{le="16.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 -vllm:iteration_tokens_total_bucket{le="32.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 -vllm:iteration_tokens_total_bucket{le="64.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 -vllm:iteration_tokens_total_bucket{le="128.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 -vllm:iteration_tokens_total_bucket{le="256.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 -vllm:iteration_tokens_total_bucket{le="512.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 -... -``` + ```console + $ curl http://0.0.0.0:8000/metrics + + # HELP vllm:iteration_tokens_total Histogram of number of tokens per engine_step. + # TYPE vllm:iteration_tokens_total histogram + vllm:iteration_tokens_total_sum{model_name="unsloth/Llama-3.2-1B-Instruct"} 0.0 + vllm:iteration_tokens_total_bucket{le="1.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="8.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="16.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="32.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="64.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="128.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="256.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + vllm:iteration_tokens_total_bucket{le="512.0",model_name="unsloth/Llama-3.2-1B-Instruct"} 3.0 + ... + ``` The following metrics are exposed: -```python ---8<-- "vllm/engine/metrics.py:metrics-definitions" -``` +??? Code + + ```python + --8<-- "vllm/engine/metrics.py:metrics-definitions" + ``` Note: when metrics are deprecated in version `X.Y`, they are hidden in version `X.Y+1` but can be re-enabled using the `--show-hidden-metrics-for-version=X.Y` escape hatch, diff --git a/docs/usage/troubleshooting.md b/docs/usage/troubleshooting.md index e9ab425a1d063..9403abfad85fb 100644 --- a/docs/usage/troubleshooting.md +++ b/docs/usage/troubleshooting.md @@ -60,68 +60,70 @@ To identify the particular CUDA operation that causes the error, you can add `-- If GPU/CPU communication cannot be established, you can use the following Python script and follow the instructions below to confirm whether the GPU/CPU communication is working correctly. -```python -# Test PyTorch NCCL -import torch -import torch.distributed as dist -dist.init_process_group(backend="nccl") -local_rank = dist.get_rank() % torch.cuda.device_count() -torch.cuda.set_device(local_rank) -data = torch.FloatTensor([1,] * 128).to("cuda") -dist.all_reduce(data, op=dist.ReduceOp.SUM) -torch.cuda.synchronize() -value = data.mean().item() -world_size = dist.get_world_size() -assert value == world_size, f"Expected {world_size}, got {value}" +??? Code -print("PyTorch NCCL is successful!") + ```python + # Test PyTorch NCCL + import torch + import torch.distributed as dist + dist.init_process_group(backend="nccl") + local_rank = dist.get_rank() % torch.cuda.device_count() + torch.cuda.set_device(local_rank) + data = torch.FloatTensor([1,] * 128).to("cuda") + dist.all_reduce(data, op=dist.ReduceOp.SUM) + torch.cuda.synchronize() + value = data.mean().item() + world_size = dist.get_world_size() + assert value == world_size, f"Expected {world_size}, got {value}" -# Test PyTorch GLOO -gloo_group = dist.new_group(ranks=list(range(world_size)), backend="gloo") -cpu_data = torch.FloatTensor([1,] * 128) -dist.all_reduce(cpu_data, op=dist.ReduceOp.SUM, group=gloo_group) -value = cpu_data.mean().item() -assert value == world_size, f"Expected {world_size}, got {value}" + print("PyTorch NCCL is successful!") -print("PyTorch GLOO is successful!") + # Test PyTorch GLOO + gloo_group = dist.new_group(ranks=list(range(world_size)), backend="gloo") + cpu_data = torch.FloatTensor([1,] * 128) + dist.all_reduce(cpu_data, op=dist.ReduceOp.SUM, group=gloo_group) + value = cpu_data.mean().item() + assert value == world_size, f"Expected {world_size}, got {value}" -if world_size <= 1: - exit() + print("PyTorch GLOO is successful!") -# Test vLLM NCCL, with cuda graph -from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator + if world_size <= 1: + exit() -pynccl = PyNcclCommunicator(group=gloo_group, device=local_rank) -# pynccl is enabled by default for 0.6.5+, -# but for 0.6.4 and below, we need to enable it manually. -# keep the code for backward compatibility when because people -# prefer to read the latest documentation. -pynccl.disabled = False + # Test vLLM NCCL, with cuda graph + from vllm.distributed.device_communicators.pynccl import PyNcclCommunicator + + pynccl = PyNcclCommunicator(group=gloo_group, device=local_rank) + # pynccl is enabled by default for 0.6.5+, + # but for 0.6.4 and below, we need to enable it manually. + # keep the code for backward compatibility when because people + # prefer to read the latest documentation. + pynccl.disabled = False + + s = torch.cuda.Stream() + with torch.cuda.stream(s): + data.fill_(1) + out = pynccl.all_reduce(data, stream=s) + value = out.mean().item() + assert value == world_size, f"Expected {world_size}, got {value}" + + print("vLLM NCCL is successful!") + + g = torch.cuda.CUDAGraph() + with torch.cuda.graph(cuda_graph=g, stream=s): + out = pynccl.all_reduce(data, stream=torch.cuda.current_stream()) -s = torch.cuda.Stream() -with torch.cuda.stream(s): data.fill_(1) - out = pynccl.all_reduce(data, stream=s) + g.replay() + torch.cuda.current_stream().synchronize() value = out.mean().item() assert value == world_size, f"Expected {world_size}, got {value}" -print("vLLM NCCL is successful!") + print("vLLM NCCL with cuda graph is successful!") -g = torch.cuda.CUDAGraph() -with torch.cuda.graph(cuda_graph=g, stream=s): - out = pynccl.all_reduce(data, stream=torch.cuda.current_stream()) - -data.fill_(1) -g.replay() -torch.cuda.current_stream().synchronize() -value = out.mean().item() -assert value == world_size, f"Expected {world_size}, got {value}" - -print("vLLM NCCL with cuda graph is successful!") - -dist.destroy_process_group(gloo_group) -dist.destroy_process_group() -``` + dist.destroy_process_group(gloo_group) + dist.destroy_process_group() + ``` If you are testing with a single node, adjust `--nproc-per-node` to the number of GPUs you want to use: @@ -165,25 +167,27 @@ WARNING 12-11 14:50:37 multiproc_worker_utils.py:281] CUDA was previously or an error from Python that looks like this: -```console -RuntimeError: - An attempt has been made to start a new process before the - current process has finished its bootstrapping phase. +??? Logs - This probably means that you are not using fork to start your - child processes and you have forgotten to use the proper idiom - in the main module: + ```console + RuntimeError: + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. - if __name__ == '__main__': - freeze_support() - ... + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: - The "freeze_support()" line can be omitted if the program - is not going to be frozen to produce an executable. + if __name__ == '__main__': + freeze_support() + ... - To fix this issue, refer to the "Safe importing of main module" - section in https://docs.python.org/3/library/multiprocessing.html -``` + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable. + + To fix this issue, refer to the "Safe importing of main module" + section in https://docs.python.org/3/library/multiprocessing.html + ``` then you must update your Python code to guard usage of `vllm` behind a `if __name__ == '__main__':` block. For example, instead of this: @@ -207,20 +211,22 @@ if __name__ == '__main__': vLLM heavily depends on `torch.compile` to optimize the model for better performance, which introduces the dependency on the `torch.compile` functionality and the `triton` library. By default, we use `torch.compile` to [optimize some functions](https://github.com/vllm-project/vllm/pull/10406) in the model. Before running vLLM, you can check if `torch.compile` is working as expected by running the following script: -```python -import torch +??? Code -@torch.compile -def f(x): - # a simple function to test torch.compile - x = x + 1 - x = x * 2 - x = x.sin() - return x + ```python + import torch -x = torch.randn(4, 4).cuda() -print(f(x)) -``` + @torch.compile + def f(x): + # a simple function to test torch.compile + x = x + 1 + x = x * 2 + x = x.sin() + return x + + x = torch.randn(4, 4).cuda() + print(f(x)) + ``` If it raises errors from `torch/_inductor` directory, usually it means you have a custom `triton` library that is not compatible with the version of PyTorch you are using. See [this issue](https://github.com/vllm-project/vllm/issues/12219) for example. diff --git a/docs/usage/usage_stats.md b/docs/usage/usage_stats.md index 750cba7ed9ce2..78d2a6784bc5a 100644 --- a/docs/usage/usage_stats.md +++ b/docs/usage/usage_stats.md @@ -10,36 +10,38 @@ The list of data collected by the latest version of vLLM can be found here: Date: Mon, 23 Jun 2025 07:41:50 +0200 Subject: [PATCH 086/211] [P/D][NixlConnector] Support `tp_size > num_kv_heads` deployments (#19691) Signed-off-by: NickLucche Co-authored-by: Nick Hill --- .../kv_transfer/kv_connector/v1/nixl_connector.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index bdab4850d4c19..94f757e007af7 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -22,6 +22,7 @@ from vllm.distributed.kv_transfer.kv_connector.v1.base import ( from vllm.distributed.parallel_state import ( get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, get_tp_group) +from vllm.distributed.utils import divide from vllm.logger import init_logger from vllm.platforms import _Backend from vllm.utils import make_zmq_path, make_zmq_socket, round_down @@ -679,11 +680,15 @@ class NixlConnectorWorker: # Number of D TP workers reading from a single P TP worker. This is # 1 when P and D `--tensor-parallel-size` match. - assert self._tp_size[self.engine_id] % self._tp_size[engine_id] == 0, ( - "Local TP size must be divisible by remote TP size.") - tp_ratio = self._tp_size[self.engine_id] // self._tp_size[engine_id] + tp_ratio = divide(self._tp_size[self.engine_id], + self._tp_size[engine_id]) assert tp_ratio > 0, "Decode TP cannot be smaller than prefill TP" - if self.use_mla: + + # Handle tp_size>num_kv_heads: replicate KV cache. + total_num_kv_heads = self.model_config.get_total_num_kv_heads() + is_kv_replicated = self._tp_size[engine_id] // total_num_kv_heads >= 1 + + if self.use_mla or is_kv_replicated: # With MLA the only difference is in the number of blocks. remote_block_size = nixl_agent_meta.block_len // ( self.slot_size_bytes) @@ -720,7 +725,7 @@ class NixlConnectorWorker: self.kv_caches_base_addr[ engine_id] = nixl_agent_meta.kv_caches_base_addr rank_offset = self.tp_rank % tp_ratio * self.block_len \ - if not self.use_mla else 0 + if not (self.use_mla or is_kv_replicated) else 0 # Register all remote blocks, but only the corresponding kv heads. for base_addr in nixl_agent_meta.kv_caches_base_addr: for block_id in range(nixl_agent_meta.num_blocks): From 1bcd15edc71422e4eb4525f5e07903d73187da17 Mon Sep 17 00:00:00 2001 From: lkchen Date: Sun, 22 Jun 2025 22:41:53 -0700 Subject: [PATCH 087/211] [BugFix][P/D] Fix for cases where _recving_transfers can be cleaned up when *all* transfer done (#19874) Signed-off-by: Linkun Chen --- .../kv_connector/unit/test_nixl_connector.py | 174 +++++++++++++++++- .../kv_connector/v1/nixl_connector.py | 9 +- 2 files changed, 179 insertions(+), 4 deletions(-) diff --git a/tests/v1/kv_connector/unit/test_nixl_connector.py b/tests/v1/kv_connector/unit/test_nixl_connector.py index a0bcb8f602e11..b00be7b83e12b 100644 --- a/tests/v1/kv_connector/unit/test_nixl_connector.py +++ b/tests/v1/kv_connector/unit/test_nixl_connector.py @@ -1,8 +1,23 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import time +import uuid +from collections import defaultdict +from typing import Optional +from unittest.mock import patch + +import pytest + +try: + from nixl._api import nixl_agent as NixlWrapper +except ImportError: + NixlWrapper = None + from vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector import ( - NixlConnectorMetadata) + KVConnectorRole, NixlAgentMetadata, NixlConnector, NixlConnectorMetadata, + NixlConnectorWorker) +from vllm.forward_context import ForwardContext from .utils import create_request, create_scheduler, create_vllm_config @@ -72,3 +87,160 @@ def test_prompt_less_than_block_size(): # This request should be scheduled regularly. assert len(scheduler_output.scheduled_new_reqs) == 1 + + +class FakeNixlWrapper: + """Mock implementation of NixlWrapper for testing. + + We don't inherit from NixlWrapper because NixlWrapper could be None. + """ + + AGENT_METADATA = b"fake_agent_metadata" + REMOTE_AGENT_NAME = "remote_agent" + + def __init__(self, agent_name: str, *args, **kwargs): + self._cycles_before_xfer_done = 0 + self._check_xfer_state_cycles: defaultdict[int, int] = defaultdict( + lambda: 0) + + def get_reg_descs(self, caches_data, memory_type: str) -> list: + return [str(uuid.uuid4()) for _ in caches_data] + + def register_memory(self, descs) -> None: + pass + + def get_xfer_descs(self, blocks_data, memory_type: str) -> list: + return [str(uuid.uuid4()) for _ in blocks_data] + + def prep_xfer_dlist(self, agent_name: str, descs: list) -> int: + return uuid.uuid4().int + + def get_agent_metadata(self) -> bytes: + return self.AGENT_METADATA + + def add_remote_agent(self, agent_metadata: bytes) -> str: + return self.REMOTE_AGENT_NAME + + def get_new_notifs(self) -> dict[str, list[bytes]]: + # Used to collect done_sending, which we don't test yet. + return {} + + def check_xfer_state(self, handle: int) -> str: + if self._check_xfer_state_cycles[ + handle] >= self._cycles_before_xfer_done: + return "DONE" + self._check_xfer_state_cycles[handle] += 1 + return "PROC" + + def release_xfer_handle(self, handle: int) -> None: + pass + + def send_notif(self, agent_name: str, notif_msg: bytes) -> None: + pass + + def make_prepped_xfer(self, + xfer_type: str, + local_xfer_side_handle: int, + local_block_descs_ids: list[int], + remote_xfer_side_handle: int, + remote_block_descs_ids: list[int], + notif_msg: Optional[bytes] = None) -> int: + return uuid.uuid4().int + + def transfer(self, handle: int) -> str: + return "PROC" + + ############################################################ + # Follow are for changing the behavior during testing. + ############################################################ + + def set_cycles_before_xfer_done(self, cycles: int): + """Set the number of cycles before a transfer is considered done.""" + self._cycles_before_xfer_done = cycles + + +class FakeNixlConnectorWorker(NixlConnectorWorker): + + REMOTE_ENGINE_ID = "remote_engine" + + def __init__(self, *args, hand_shake_latency: float = 1.8, **kwargs): + super().__init__(*args, **kwargs) + self._hand_shake_latency = hand_shake_latency + + def _nixl_handshake(self, host: str, port: int): + # Mimic slow _nixl_handshake, as well as bypass zmq communication. + time.sleep(self._hand_shake_latency) + # These should've been done in register_kv_caches(), called by + # gpu_model_runner. Here we just hardcode some dummy values. + self.slot_size_bytes = 4096 + self.block_len = self.slot_size_bytes * self.block_size + self.num_blocks = 1 + self.dst_num_blocks[self.engine_id] = self.num_blocks + + self.add_remote_agent( + NixlAgentMetadata( + engine_id=self.REMOTE_ENGINE_ID, + agent_metadata=FakeNixlWrapper.AGENT_METADATA, + kv_caches_base_addr=[0], + num_blocks=1, + tp_size=1, + block_len=self.block_len, + attn_backend_name=self.backend_name, + )) + + +@pytest.mark.skipif(NixlWrapper is None, reason="nixl not installed") +@patch( + "vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper", + FakeNixlWrapper) +def test_multi_xfer_one_engine( + # dist_init is a fixture that initializes the distributed environment. + dist_init): + """Test case where multiple xfers are initiated to the same engine. + + This test triggers the connector to load remote KV for the same + `request_id`. The transfer is not done immediately due to + `set_cycles_before_xfer_done`, so there is a state where there are multiple + transfer states for the same `request_id`, and `get_finished` should handle + it correctly (wait for all transfers to be done). + """ + vllm_config = create_vllm_config() + + request_id = "req_id" + + # Test worker role in decode server. + connector = NixlConnector(vllm_config, KVConnectorRole.WORKER) + connector.connector_worker = FakeNixlConnectorWorker(vllm_config, + connector.engine_id, + hand_shake_latency=0) + assert isinstance(connector.connector_worker.nixl_wrapper, FakeNixlWrapper) + connector.connector_worker.nixl_wrapper.set_cycles_before_xfer_done(3) + for i in range(4): + metadata = NixlConnectorMetadata() + metadata.add_new_req(request_id=request_id, + local_block_ids=[i + 1, i + 2, i + 3], + kv_transfer_params={ + "remote_block_ids": [i + 4, i + 5, i + 6], + "remote_engine_id": + FakeNixlConnectorWorker.REMOTE_ENGINE_ID, + "remote_host": "localhost", + "remote_port": 1234, + }) + connector.bind_connector_metadata(metadata) + + dummy_ctx = ForwardContext( + no_compile_layers={}, + attn_metadata={}, + virtual_engine=0, + ) + _before_load = time.perf_counter() + connector.start_load_kv(dummy_ctx) + _after_load = time.perf_counter() + assert _after_load - _before_load < 0.1, "start_load_kv took " \ + f"{_after_load - _before_load} seconds" + + while True: + _, done_recving = connector.get_finished(finished_req_ids=set()) + if len(done_recving) > 0: + assert request_id in done_recving + break diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index 94f757e007af7..2d80cbf2b24f6 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -841,17 +841,20 @@ class NixlConnectorWorker: """ done_req_ids: set[str] = set() for req_id, handles in list(transfers.items()): - for handle, xfer_stime in handles: + in_progress = False + for handle, _xfer_stime in handles: xfer_state = self.nixl_wrapper.check_xfer_state(handle) if xfer_state == "DONE": self.nixl_wrapper.release_xfer_handle(handle) - done_req_ids.add(req_id) - del transfers[req_id] elif xfer_state == "PROC": + in_progress = True continue else: raise RuntimeError("Transfer failed with state %s", xfer_state) + if not in_progress: + done_req_ids.add(req_id) + del transfers[req_id] return done_req_ids def start_load_kv(self, metadata: NixlConnectorMetadata): From 5111642a6fc944f69dca9e890c29619ad3405f3b Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Mon, 23 Jun 2025 17:31:06 +0800 Subject: [PATCH 088/211] [Doc] Update V1 status for decoder-only embedding models (#19952) Signed-off-by: Isotr0py <2037008807@qq.com> --- docs/models/supported_models.md | 19 ++++++++++--------- vllm/model_executor/models/qwen2_rm.py | 26 ++++++++------------------ 2 files changed, 18 insertions(+), 27 deletions(-) diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index c022980fe6e61..bcd0ead0c34ad 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -407,15 +407,15 @@ Specified using `--task embed`. | Architecture | Models | Example HF Models | [LoRA][lora-adapter] | [PP][distributed-serving] | [V1](gh-issue:8779) | |--------------------------------------------------------|---------------------|---------------------------------------------------------------------------------------------------------------------|----------------------|---------------------------|-----------------------| | `BertModel` | BERT-based | `BAAI/bge-base-en-v1.5`, `Snowflake/snowflake-arctic-embed-xs`, etc. | | | | -| `Gemma2Model` | Gemma 2-based | `BAAI/bge-multilingual-gemma2`, etc. | ✅︎ | | | +| `Gemma2Model` | Gemma 2-based | `BAAI/bge-multilingual-gemma2`, etc. | ✅︎ | | ✅︎ | | `GritLM` | GritLM | `parasail-ai/GritLM-7B-vllm`. | ✅︎ | ✅︎ | | | `GteModel` | Arctic-Embed-2.0-M | `Snowflake/snowflake-arctic-embed-m-v2.0`. | ︎ | | | | `GteNewModel` | mGTE-TRM (see note) | `Alibaba-NLP/gte-multilingual-base`, etc. | ︎ | ︎ | | | `ModernBertModel` | ModernBERT-based | `Alibaba-NLP/gte-modernbert-base`, etc. | ︎ | ︎ | | | `NomicBertModel` | Nomic BERT | `nomic-ai/nomic-embed-text-v1`, `nomic-ai/nomic-embed-text-v2-moe`, `Snowflake/snowflake-arctic-embed-m-long`, etc. | ︎ | ︎ | | -| `LlamaModel`, `LlamaForCausalLM`, `MistralModel`, etc. | Llama-based | `intfloat/e5-mistral-7b-instruct`, etc. | ✅︎ | ✅︎ | | -| `Qwen2Model`, `Qwen2ForCausalLM` | Qwen2-based | `ssmits/Qwen2-7B-Instruct-embed-base` (see note), `Alibaba-NLP/gte-Qwen2-7B-instruct` (see note), etc. | ✅︎ | ✅︎ | | -| `Qwen3Model`, `Qwen3ForCausalLM` | Qwen3-based | `Qwen/Qwen3-Embedding-0.6B`, etc. | ✅︎ | ✅︎ | | +| `LlamaModel`, `LlamaForCausalLM`, `MistralModel`, etc. | Llama-based | `intfloat/e5-mistral-7b-instruct`, etc. | ✅︎ | ✅︎ | ✅︎ | +| `Qwen2Model`, `Qwen2ForCausalLM` | Qwen2-based | `ssmits/Qwen2-7B-Instruct-embed-base` (see note), `Alibaba-NLP/gte-Qwen2-7B-instruct` (see note), etc. | ✅︎ | ✅︎ | ✅︎ | +| `Qwen3Model`, `Qwen3ForCausalLM` | Qwen3-based | `Qwen/Qwen3-Embedding-0.6B`, etc. | ✅︎ | ✅︎ | ✅︎ | | `RobertaModel`, `RobertaForMaskedLM` | RoBERTa-based | `sentence-transformers/all-roberta-large-v1`, etc. | | | | !!! note @@ -442,9 +442,10 @@ Specified using `--task reward`. | Architecture | Models | Example HF Models | [LoRA][lora-adapter] | [PP][distributed-serving] | [V1](gh-issue:8779) | |---------------------------|-----------------|------------------------------------------------------------------------|------------------------|-----------------------------|-----------------------| -| `InternLM2ForRewardModel` | InternLM2-based | `internlm/internlm2-1_8b-reward`, `internlm/internlm2-7b-reward`, etc. | ✅︎ | ✅︎ | | -| `LlamaForCausalLM` | Llama-based | `peiyi9979/math-shepherd-mistral-7b-prm`, etc. | ✅︎ | ✅︎ | | -| `Qwen2ForRewardModel` | Qwen2-based | `Qwen/Qwen2.5-Math-RM-72B`, etc. | ✅︎ | ✅︎ | | +| `InternLM2ForRewardModel` | InternLM2-based | `internlm/internlm2-1_8b-reward`, `internlm/internlm2-7b-reward`, etc. | ✅︎ | ✅︎ | ✅︎ | +| `LlamaForCausalLM` | Llama-based | `peiyi9979/math-shepherd-mistral-7b-prm`, etc. | ✅︎ | ✅︎ | ✅︎ | +| `Qwen2ForRewardModel` | Qwen2-based | `Qwen/Qwen2.5-Math-RM-72B`, etc. | ✅︎ | ✅︎ | ✅︎ | +| `Qwen2ForProcessRewardModel` | Qwen2-based | `Qwen/Qwen2.5-Math-PRM-7B`, etc. | ✅︎ | ✅︎ | ✅︎ | If your model is not in the above list, we will try to automatically convert the model using [as_reward_model][vllm.model_executor.models.adapters.as_reward_model]. By default, we return the hidden states of each token directly. @@ -460,7 +461,7 @@ Specified using `--task classify`. | Architecture | Models | Example HF Models | [LoRA][lora-adapter] | [PP][distributed-serving] | [V1](gh-issue:8779) | |----------------------------------|----------|----------------------------------------|------------------------|-----------------------------|-----------------------| | `JambaForSequenceClassification` | Jamba | `ai21labs/Jamba-tiny-reward-dev`, etc. | ✅︎ | ✅︎ | | -| `GPT2ForSequenceClassification` | GPT2 | `nie3e/sentiment-polish-gpt2-small` | | | | +| `GPT2ForSequenceClassification` | GPT2 | `nie3e/sentiment-polish-gpt2-small` | | | ✅︎ | If your model is not in the above list, we will try to automatically convert the model using [as_classification_model][vllm.model_executor.models.adapters.as_classification_model]. By default, the class probabilities are extracted from the softmaxed hidden state corresponding to the last token. @@ -471,7 +472,7 @@ Specified using `--task score`. | Architecture | Models | Example HF Models | [V1](gh-issue:8779) | |---------------------------------------|-------------------|--------------------------------------------------------------------------------------|-----------------------| | `BertForSequenceClassification` | BERT-based | `cross-encoder/ms-marco-MiniLM-L-6-v2`, etc. | | -| `Qwen3ForSequenceClassification` | Qwen3-based | `tomaarsen/Qwen3-Reranker-0.6B-seq-cls`, `Qwen/Qwen3-Reranker-0.6B` (see note), etc. | | +| `Qwen3ForSequenceClassification` | Qwen3-based | `tomaarsen/Qwen3-Reranker-0.6B-seq-cls`, `Qwen/Qwen3-Reranker-0.6B` (see note), etc. | ✅︎ | | `RobertaForSequenceClassification` | RoBERTa-based | `cross-encoder/quora-roberta-base`, etc. | | | `XLMRobertaForSequenceClassification` | XLM-RoBERTa-based | `BAAI/bge-reranker-v2-m3`, etc. | | diff --git a/vllm/model_executor/models/qwen2_rm.py b/vllm/model_executor/models/qwen2_rm.py index 76d7ecdd1272b..9a85080816783 100644 --- a/vllm/model_executor/models/qwen2_rm.py +++ b/vllm/model_executor/models/qwen2_rm.py @@ -19,24 +19,12 @@ from vllm.model_executor.layers.pooler import Pooler, PoolingType, SimplePooler from vllm.model_executor.pooling_metadata import PoolingMetadata from vllm.sequence import IntermediateTensors, PoolerOutput -from .interfaces import SupportsLoRA, SupportsPP, SupportsV0Only +from .interfaces import SupportsLoRA, SupportsPP from .qwen2 import Qwen2Model from .utils import AutoWeightsLoader, maybe_prefix -class ReLU(nn.Module): - - def __init__(self): - super().__init__() - self.activation = nn.ReLU() - - def forward(self, input): - input, _ = input - return self.activation(input) - - -class Qwen2RewardBaseModel(nn.Module, SupportsLoRA, SupportsPP, - SupportsV0Only): +class Qwen2RewardBaseModel(nn.Module, SupportsLoRA, SupportsPP): packed_modules_mapping = { "qkv_proj": [ "q_proj", @@ -65,11 +53,13 @@ class Qwen2RewardBaseModel(nn.Module, SupportsLoRA, SupportsPP, self.score = nn.Sequential( ColumnParallelLinear(config.hidden_size, config.hidden_size, - quant_config=quant_config), - ReLU(), + quant_config=quant_config, + return_bias=False), + nn.ReLU(), RowParallelLinear(config.hidden_size, config.num_labels, - quant_config=quant_config), + quant_config=quant_config, + return_bias=False), ) self._pooler: SimplePooler self.make_empty_intermediate_tensors = ( @@ -87,7 +77,7 @@ class Qwen2RewardBaseModel(nn.Module, SupportsLoRA, SupportsPP, ) -> Union[torch.Tensor, IntermediateTensors]: hidden_states = self.model(input_ids, positions, intermediate_tensors, inputs_embeds) - logits, _ = self.score(hidden_states) + logits = self.score(hidden_states) return logits def pooler( From b82e0f82cb24bc2cfccbd816a46f535a8ff64eda Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Mon, 23 Jun 2025 18:54:16 +0800 Subject: [PATCH 089/211] [doc] use MkDocs collapsible blocks - supplement (#19973) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- docs/design/v1/p2p_nccl_connector.md | 356 ++++++++++++----------- docs/design/v1/torch_compile.md | 80 ++--- examples/others/logging_configuration.md | 110 +++---- 3 files changed, 286 insertions(+), 260 deletions(-) diff --git a/docs/design/v1/p2p_nccl_connector.md b/docs/design/v1/p2p_nccl_connector.md index c24b53763709c..32cdaacf058ae 100644 --- a/docs/design/v1/p2p_nccl_connector.md +++ b/docs/design/v1/p2p_nccl_connector.md @@ -61,23 +61,25 @@ To address the above issues, I have designed and developed a local Tensor memory # Install vLLM -```shell -# Enter the home directory or your working directory. -cd /home +??? Commands -# Download the installation package, and I will update the commit-id in time. You can directly copy the command. -wget https://vllm-wheels.s3.us-west-2.amazonaws.com/9112b443a042d8d815880b8780633882ad32b183/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl + ```shell + # Enter the home directory or your working directory. + cd /home -# Download the code repository. -git clone -b xpyd-v1 https://github.com/Abatom/vllm.git -cd vllm + # Download the installation package, and I will update the commit-id in time. You can directly copy the command. + wget https://vllm-wheels.s3.us-west-2.amazonaws.com/9112b443a042d8d815880b8780633882ad32b183/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl -# Set the installation package path. -export VLLM_PRECOMPILED_WHEEL_LOCATION=/home/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl + # Download the code repository. + git clone -b xpyd-v1 https://github.com/Abatom/vllm.git + cd vllm -# installation -pip install -e . -v -``` + # Set the installation package path. + export VLLM_PRECOMPILED_WHEEL_LOCATION=/home/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl + + # installation + pip install -e . -v + ``` # Run xPyD @@ -104,83 +106,91 @@ python3 disagg_prefill_proxy_xpyd.py & ### Prefill1 (e.g. 10.0.1.2 or 10.0.1.1) -```shell -VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=0 vllm serve {your model directory} \ - --host 0.0.0.0 \ - --port 20005 \ - --tensor-parallel-size 1 \ - --seed 1024 \ - --served-model-name base_model \ - --dtype float16 \ - --max-model-len 10000 \ - --max-num-batched-tokens 10000 \ - --max-num-seqs 256 \ - --trust-remote-code \ - --gpu-memory-utilization 0.9 \ - --disable-log-request \ - --kv-transfer-config \ - '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"21001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20005","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & -``` +??? Command + + ```shell + VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=0 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20005 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"21001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20005","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & + ``` ### Decode1 (e.g. 10.0.1.3 or 10.0.1.1) -```shell -VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=1 vllm serve {your model directory} \ - --host 0.0.0.0 \ - --port 20009 \ - --tensor-parallel-size 1 \ - --seed 1024 \ - --served-model-name base_model \ - --dtype float16 \ - --max-model-len 10000 \ - --max-num-batched-tokens 10000 \ - --max-num-seqs 256 \ - --trust-remote-code \ - --gpu-memory-utilization 0.7 \ - --disable-log-request \ - --kv-transfer-config \ - '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"22001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20009","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & -``` +??? Command + + ```shell + VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=1 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20009 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.7 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"22001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20009","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & + ``` ### Decode2 (e.g. 10.0.1.4 or 10.0.1.1) -```shell -VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=2 vllm serve {your model directory} \ - --host 0.0.0.0 \ - --port 20003 \ - --tensor-parallel-size 1 \ - --seed 1024 \ - --served-model-name base_model \ - --dtype float16 \ - --max-model-len 10000 \ - --max-num-batched-tokens 10000 \ - --max-num-seqs 256 \ - --trust-remote-code \ - --gpu-memory-utilization 0.7 \ - --disable-log-request \ - --kv-transfer-config \ - '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"23001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20003","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & -``` +??? Command + + ```shell + VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=2 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20003 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.7 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"23001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20003","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & + ``` ### Decode3 (e.g. 10.0.1.5 or 10.0.1.1) -```shell -VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=3 vllm serve {your model directory} \ - --host 0.0.0.0 \ - --port 20008 \ - --tensor-parallel-size 1 \ - --seed 1024 \ - --served-model-name base_model \ - --dtype float16 \ - --max-model-len 10000 \ - --max-num-batched-tokens 10000 \ - --max-num-seqs 256 \ - --trust-remote-code \ - --gpu-memory-utilization 0.7 \ - --disable-log-request \ - --kv-transfer-config \ - '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"24001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20008","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & -``` +??? Command + + ```shell + VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=3 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20008 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.7 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"24001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20008","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & + ``` ## Run 3P1D @@ -193,83 +203,91 @@ python3 disagg_prefill_proxy_xpyd.py & ### Prefill1 (e.g. 10.0.1.2 or 10.0.1.1) -```shell -VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=0 vllm serve {your model directory} \ - --host 0.0.0.0 \ - --port 20005 \ - --tensor-parallel-size 1 \ - --seed 1024 \ - --served-model-name base_model \ - --dtype float16 \ - --max-model-len 10000 \ - --max-num-batched-tokens 10000 \ - --max-num-seqs 256 \ - --trust-remote-code \ - --gpu-memory-utilization 0.9 \ - --disable-log-request \ - --kv-transfer-config \ - '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"21001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20005","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & -``` +??? Command + + ```shell + VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=0 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20005 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"21001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20005","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & + ``` ### Prefill2 (e.g. 10.0.1.3 or 10.0.1.1) -```shell -VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=1 vllm serve {your model directory} \ - --host 0.0.0.0 \ - --port 20009 \ - --tensor-parallel-size 1 \ - --seed 1024 \ - --served-model-name base_model \ - --dtype float16 \ - --max-model-len 10000 \ - --max-num-batched-tokens 10000 \ - --max-num-seqs 256 \ - --trust-remote-code \ - --gpu-memory-utilization 0.9 \ - --disable-log-request \ - --kv-transfer-config \ - '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"22001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20009","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & -``` +??? Command + + ```shell + VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=1 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20009 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"22001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20009","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & + ``` ### Prefill3 (e.g. 10.0.1.4 or 10.0.1.1) -```shell -VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=2 vllm serve {your model directory} \ - --host 0.0.0.0 \ - --port 20003 \ - --tensor-parallel-size 1 \ - --seed 1024 \ - --served-model-name base_model \ - --dtype float16 \ - --max-model-len 10000 \ - --max-num-batched-tokens 10000 \ - --max-num-seqs 256 \ - --trust-remote-code \ - --gpu-memory-utilization 0.9 \ - --disable-log-request \ - --kv-transfer-config \ - '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"23001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20003","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & -``` +??? Command + + ```shell + VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=2 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20003 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.9 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_producer","kv_buffer_size":"1e1","kv_port":"23001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20003","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & + ``` ### Decode1 (e.g. 10.0.1.5 or 10.0.1.1) -```shell -VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=3 vllm serve {your model directory} \ - --host 0.0.0.0 \ - --port 20008 \ - --tensor-parallel-size 1 \ - --seed 1024 \ - --served-model-name base_model \ - --dtype float16 \ - --max-model-len 10000 \ - --max-num-batched-tokens 10000 \ - --max-num-seqs 256 \ - --trust-remote-code \ - --gpu-memory-utilization 0.7 \ - --disable-log-request \ - --kv-transfer-config \ - '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"24001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20008","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & -``` +??? Command + + ```shell + VLLM_USE_V1=1 CUDA_VISIBLE_DEVICES=3 vllm serve {your model directory} \ + --host 0.0.0.0 \ + --port 20008 \ + --tensor-parallel-size 1 \ + --seed 1024 \ + --served-model-name base_model \ + --dtype float16 \ + --max-model-len 10000 \ + --max-num-batched-tokens 10000 \ + --max-num-seqs 256 \ + --trust-remote-code \ + --gpu-memory-utilization 0.7 \ + --disable-log-request \ + --kv-transfer-config \ + '{"kv_connector":"P2pNcclConnector","kv_role":"kv_consumer","kv_buffer_size":"8e9","kv_port":"24001","kv_connector_extra_config":{"proxy_ip":"10.0.1.1","proxy_port":"30001","http_port":"20008","send_type":"PUT_ASYNC","nccl_num_channels":"16"}}' > /var/vllm.log 2>&1 & + ``` # Single request @@ -286,25 +304,27 @@ curl -X POST -s http://10.0.1.1:10001/v1/completions \ # Benchmark -```shell -python3 benchmark_serving.py \ - --backend vllm \ - --model base_model \ - --tokenizer meta-llama/Llama-3.1-8B-Instruct \ - --dataset-name "random" \ - --host 10.0.1.1 \ - --port 10001 \ - --random-input-len 1024 \ - --random-output-len 1024 \ - --ignore-eos \ - --burstiness 100 \ - --percentile-metrics "ttft,tpot,itl,e2el" \ - --metric-percentiles "90,95,99" \ - --seed $(date +%s) \ - --trust-remote-code \ - --request-rate 3 \ - --num-prompts 1000 -``` +??? Command + + ```shell + python3 benchmark_serving.py \ + --backend vllm \ + --model base_model \ + --tokenizer meta-llama/Llama-3.1-8B-Instruct \ + --dataset-name "random" \ + --host 10.0.1.1 \ + --port 10001 \ + --random-input-len 1024 \ + --random-output-len 1024 \ + --ignore-eos \ + --burstiness 100 \ + --percentile-metrics "ttft,tpot,itl,e2el" \ + --metric-percentiles "90,95,99" \ + --seed $(date +%s) \ + --trust-remote-code \ + --request-rate 3 \ + --num-prompts 1000 + ``` # Shut down diff --git a/docs/design/v1/torch_compile.md b/docs/design/v1/torch_compile.md index 64b6f0cc0a9b6..b65099bd62a25 100644 --- a/docs/design/v1/torch_compile.md +++ b/docs/design/v1/torch_compile.md @@ -28,27 +28,29 @@ A unique aspect of vLLM's `torch.compile` integration, is that we guarantee all In the very verbose logs, we can see: -``` -DEBUG 03-07 03:06:52 [decorators.py:203] Start compiling function +??? Logs -DEBUG 03-07 03:06:54 [backends.py:370] Traced files (to be considered for compilation cache): -DEBUG 03-07 03:06:54 [backends.py:370] xxx/torch/_dynamo/polyfills/builtins.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/torch/nn/modules/container.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/torch/nn/modules/module.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/attention/layer.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/distributed/communication_op.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/distributed/parallel_state.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/custom_op.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/layers/activation.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/layers/layernorm.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/layers/linear.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/layers/rotary_embedding.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/layers/vocab_parallel_embedding.py -DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/models/llama.py + ```text + DEBUG 03-07 03:06:52 [decorators.py:203] Start compiling function -DEBUG 03-07 03:07:07 [backends.py:462] Computation graph saved to ~/.cache/vllm/torch_compile_cache/1517964802/rank_0_0/computation_graph.py -DEBUG 03-07 03:07:07 [wrapper.py:105] Dynamo transformed code saved to ~/.cache/vllm/torch_compile_cache/1517964802/rank_0_0/transformed_code.py -``` + DEBUG 03-07 03:06:54 [backends.py:370] Traced files (to be considered for compilation cache): + DEBUG 03-07 03:06:54 [backends.py:370] xxx/torch/_dynamo/polyfills/builtins.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/torch/nn/modules/container.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/torch/nn/modules/module.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/attention/layer.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/distributed/communication_op.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/distributed/parallel_state.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/custom_op.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/layers/activation.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/layers/layernorm.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/layers/linear.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/layers/rotary_embedding.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/layers/vocab_parallel_embedding.py + DEBUG 03-07 03:06:54 [backends.py:370] xxx/vllm/model_executor/models/llama.py + + DEBUG 03-07 03:07:07 [backends.py:462] Computation graph saved to ~/.cache/vllm/torch_compile_cache/1517964802/rank_0_0/computation_graph.py + DEBUG 03-07 03:07:07 [wrapper.py:105] Dynamo transformed code saved to ~/.cache/vllm/torch_compile_cache/1517964802/rank_0_0/transformed_code.py + ``` This is about the Python code compilation, i.e. graph capture by Dynamo. It tries to trace the function with code `xxx/vllm/model_executor/models/llama.py:339`, which is the `forward` function of the model we compile. During the forward pass, there are also other functions called and inlined by Dynamo, as shown by the logs, including some PyTorch functions from `xxx/torch/nn/modules/module.py` (used by PyTorch `nn.Module`, because module attribute access will trigger a function call), some communication / attention / activation functions from vLLM. All the traced files will be considered when we decide the cache directory to use. This way, any code change in the above files will trigger compilation cache miss, and therefore recompilation. @@ -99,28 +101,31 @@ This time, Inductor compilation is completely bypassed, and we will load from di The above example just uses Inductor to compile for a general shape (i.e. symbolic shape). We can also use Inductor to compile for some of the specific shapes, for example: -``` -vllm serve meta-llama/Llama-3.2-1B --compilation_config '{"compile_sizes": [1, 2, 4, 8]}' +```bash +vllm serve meta-llama/Llama-3.2-1B \ + --compilation_config '{"compile_sizes": [1, 2, 4, 8]}' ``` Then it will also compile a specific kernel just for batch size `1, 2, 4, 8`. At this time, all of the shapes in the computation graph are static and known, and we will turn on auto-tuning to tune for max performance. This can be slow when you run it for the first time, but the next time you run it, we can directly bypass the tuning and run the tuned kernel. When all the shapes are known, `torch.compile` can compare different configs, and often find some better configs to run the kernel. For example, we can see the following log: -``` -AUTOTUNE mm(8x2048, 2048x3072) - triton_mm_4 0.0130 ms 100.0% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=128, BLOCK_M=16, BLOCK_N=32, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=5, num_warps=2 - triton_mm_8 0.0134 ms 97.4% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=128, BLOCK_M=16, BLOCK_N=64, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=5, num_warps=4 - triton_mm_12 0.0148 ms 87.7% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=128, BLOCK_M=16, BLOCK_N=128, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=4, num_warps=4 - mm 0.0160 ms 81.6% - triton_mm_16 0.0165 ms 78.7% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=64, BLOCK_M=16, BLOCK_N=128, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=5, num_warps=8 - triton_mm_3 0.0199 ms 65.4% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=32, BLOCK_M=16, BLOCK_N=32, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=5, num_warps=2 - triton_mm_1 0.0203 ms 64.2% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=128, BLOCK_M=16, BLOCK_N=32, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=2, num_warps=2 - triton_mm_7 0.0203 ms 64.1% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=64, BLOCK_M=16, BLOCK_N=64, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=3, num_warps=4 - triton_mm_2 0.0208 ms 62.5% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=32, BLOCK_M=16, BLOCK_N=64, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=5, num_warps=4 - triton_mm_11 0.0215 ms 60.5% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=64, BLOCK_M=16, BLOCK_N=128, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=3, num_warps=4 -SingleProcess AUTOTUNE benchmarking takes 2.0428 seconds and 7.5727 seconds precompiling -``` +??? Logs + + ``` + AUTOTUNE mm(8x2048, 2048x3072) + triton_mm_4 0.0130 ms 100.0% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=128, BLOCK_M=16, BLOCK_N=32, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=5, num_warps=2 + triton_mm_8 0.0134 ms 97.4% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=128, BLOCK_M=16, BLOCK_N=64, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=5, num_warps=4 + triton_mm_12 0.0148 ms 87.7% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=128, BLOCK_M=16, BLOCK_N=128, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=4, num_warps=4 + mm 0.0160 ms 81.6% + triton_mm_16 0.0165 ms 78.7% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=64, BLOCK_M=16, BLOCK_N=128, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=5, num_warps=8 + triton_mm_3 0.0199 ms 65.4% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=32, BLOCK_M=16, BLOCK_N=32, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=5, num_warps=2 + triton_mm_1 0.0203 ms 64.2% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=128, BLOCK_M=16, BLOCK_N=32, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=2, num_warps=2 + triton_mm_7 0.0203 ms 64.1% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=64, BLOCK_M=16, BLOCK_N=64, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=3, num_warps=4 + triton_mm_2 0.0208 ms 62.5% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=32, BLOCK_M=16, BLOCK_N=64, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=5, num_warps=4 + triton_mm_11 0.0215 ms 60.5% ACC_TYPE='tl.float32', ALLOW_TF32=False, BLOCK_K=64, BLOCK_M=16, BLOCK_N=128, B_PROLOGUE_CAST_TYPE=None, EVEN_K=True, GROUP_M=8, num_stages=3, num_warps=4 + SingleProcess AUTOTUNE benchmarking takes 2.0428 seconds and 7.5727 seconds precompiling + ``` It means, for a matrix multiplication with shape `8x2048x3072`, `torch.compile` tries triton template with various configs, and it is much faster than the default code (which dispatches to cublas library). @@ -136,8 +141,9 @@ The cudagraphs are captured and managed by the compiler backend, and replayed wh By default, vLLM will try to determine a set of sizes to capture cudagraph. You can also override it using the config `cudagraph_capture_sizes`: -``` -vllm serve meta-llama/Llama-3.2-1B --compilation-config '{"cudagraph_capture_sizes": [1, 2, 4, 8]}' +```bash +vllm serve meta-llama/Llama-3.2-1B \ + --compilation-config '{"cudagraph_capture_sizes": [1, 2, 4, 8]}' ``` Then it will only capture cudagraph for the specified sizes. It can be useful to have fine-grained control over the cudagraph capture. diff --git a/examples/others/logging_configuration.md b/examples/others/logging_configuration.md index fbdbce6a4612a..916ab5fd1c871 100644 --- a/examples/others/logging_configuration.md +++ b/examples/others/logging_configuration.md @@ -55,33 +55,33 @@ STDOUT of the console in JSON format with a log level of `INFO`. To begin, first, create an appropriate JSON logging configuration file: -**/path/to/logging_config.json:** +??? note "/path/to/logging_config.json" -```json -{ - "formatters": { - "json": { - "class": "pythonjsonlogger.jsonlogger.JsonFormatter" + ```json + { + "formatters": { + "json": { + "class": "pythonjsonlogger.jsonlogger.JsonFormatter" + } + }, + "handlers": { + "console": { + "class" : "logging.StreamHandler", + "formatter": "json", + "level": "INFO", + "stream": "ext://sys.stdout" + } + }, + "loggers": { + "vllm": { + "handlers": ["console"], + "level": "INFO", + "propagate": false + } + }, + "version": 1 } - }, - "handlers": { - "console": { - "class" : "logging.StreamHandler", - "formatter": "json", - "level": "INFO", - "stream": "ext://sys.stdout" - } - }, - "loggers": { - "vllm": { - "handlers": ["console"], - "level": "INFO", - "propagate": false - } - }, - "version": 1 -} -``` + ``` Finally, run vLLM with the `VLLM_LOGGING_CONFIG_PATH` environment variable set to the path of the custom logging configuration JSON file: @@ -104,38 +104,38 @@ configuration overrides the built-in default logging configuration used by vLLM. First, create an appropriate JSON logging configuration file that includes configuration for the root vLLM logger and for the logger you wish to silence: -**/path/to/logging_config.json:** +??? note "/path/to/logging_config.json" -```json -{ - "formatters": { - "vllm": { - "class": "vllm.logging_utils.NewLineFormatter", - "datefmt": "%m-%d %H:%M:%S", - "format": "%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s" + ```json + { + "formatters": { + "vllm": { + "class": "vllm.logging_utils.NewLineFormatter", + "datefmt": "%m-%d %H:%M:%S", + "format": "%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s" + } + }, + "handlers": { + "vllm": { + "class" : "logging.StreamHandler", + "formatter": "vllm", + "level": "INFO", + "stream": "ext://sys.stdout" + } + }, + "loggers": { + "vllm": { + "handlers": ["vllm"], + "level": "DEBUG", + "propagate": false + }, + "vllm.example_noisy_logger": { + "propagate": false + } + }, + "version": 1 } - }, - "handlers": { - "vllm": { - "class" : "logging.StreamHandler", - "formatter": "vllm", - "level": "INFO", - "stream": "ext://sys.stdout" - } - }, - "loggers": { - "vllm": { - "handlers": ["vllm"], - "level": "DEBUG", - "propagate": false - }, - "vllm.example_noisy_logger": { - "propagate": false - } - }, - "version": 1 -} -``` + ``` Finally, run vLLM with the `VLLM_LOGGING_CONFIG_PATH` environment variable set to the path of the custom logging configuration JSON file: From a6e6604d32ead92a20a615312b7a83668d0f9d7f Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Mon, 23 Jun 2025 21:30:55 +0800 Subject: [PATCH 090/211] [Bugfix] Fix CI bitsandbytes failure (#19969) Signed-off-by: Jee Jee Li --- tests/quantization/test_bitsandbytes.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/quantization/test_bitsandbytes.py b/tests/quantization/test_bitsandbytes.py index 325a902b31112..8e39ed2fff876 100644 --- a/tests/quantization/test_bitsandbytes.py +++ b/tests/quantization/test_bitsandbytes.py @@ -159,6 +159,7 @@ def test_4bit_bnb_embedding_model( with vllm_runner(model_name, task="embed", dtype=dtype, + gpu_memory_utilization=0.5, quantization="bitsandbytes") as vllm_model: vllm_outputs = vllm_model.encode(example_prompts) check_embeddings_close( From 53243e5c42b006f56e239c4f946b7ca33b7db5cc Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Mon, 23 Jun 2025 22:27:07 +0800 Subject: [PATCH 091/211] [doc] improve readability for long commands (#19920) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- docs/contributing/profiling.md | 38 ++++++++++++++++++++---- docs/getting_started/installation/cpu.md | 14 +++++++-- docs/usage/troubleshooting.md | 5 +++- 3 files changed, 48 insertions(+), 9 deletions(-) diff --git a/docs/contributing/profiling.md b/docs/contributing/profiling.md index 6d6366741aaed..20f4867057d3e 100644 --- a/docs/contributing/profiling.md +++ b/docs/contributing/profiling.md @@ -30,13 +30,21 @@ Refer to for an example #### OpenAI Server ```bash -VLLM_TORCH_PROFILER_DIR=./vllm_profile python -m vllm.entrypoints.openai.api_server --model meta-llama/Meta-Llama-3-70B +VLLM_TORCH_PROFILER_DIR=./vllm_profile \ + python -m vllm.entrypoints.openai.api_server \ + --model meta-llama/Meta-Llama-3-70B ``` benchmark_serving.py: ```bash -python benchmarks/benchmark_serving.py --backend vllm --model meta-llama/Meta-Llama-3-70B --dataset-name sharegpt --dataset-path sharegpt.json --profile --num-prompts 2 +python benchmarks/benchmark_serving.py \ + --backend vllm \ + --model meta-llama/Meta-Llama-3-70B \ + --dataset-name sharegpt \ + --dataset-path sharegpt.json \ + --profile \ + --num-prompts 2 ``` ## Profile with NVIDIA Nsight Systems @@ -64,7 +72,16 @@ For basic usage, you can just append `nsys profile -o report.nsys-rep --trace-fo The following is an example using the `benchmarks/benchmark_latency.py` script: ```bash -nsys profile -o report.nsys-rep --trace-fork-before-exec=true --cuda-graph-trace=node python benchmarks/benchmark_latency.py --model meta-llama/Llama-3.1-8B-Instruct --num-iters-warmup 5 --num-iters 1 --batch-size 16 --input-len 512 --output-len 8 +nsys profile -o report.nsys-rep \ + --trace-fork-before-exec=true \ + --cuda-graph-trace=node \ + python benchmarks/benchmark_latency.py \ + --model meta-llama/Llama-3.1-8B-Instruct \ + --num-iters-warmup 5 \ + --num-iters 1 \ + --batch-size 16 \ + --input-len 512 \ + --output-len 8 ``` #### OpenAI Server @@ -73,10 +90,21 @@ To profile the server, you will want to prepend your `vllm serve` command with ` ```bash # server -nsys profile -o report.nsys-rep --trace-fork-before-exec=true --cuda-graph-trace=node --delay 30 --duration 60 vllm serve meta-llama/Llama-3.1-8B-Instruct +nsys profile -o report.nsys-rep \ + --trace-fork-before-exec=true \ + --cuda-graph-trace=node \ + --delay 30 \ + --duration 60 \ + vllm serve meta-llama/Llama-3.1-8B-Instruct # client -python benchmarks/benchmark_serving.py --backend vllm --model meta-llama/Llama-3.1-8B-Instruct --num-prompts 1 --dataset-name random --random-input 1024 --random-output 512 +python benchmarks/benchmark_serving.py \ + --backend vllm \ + --model meta-llama/Llama-3.1-8B-Instruct \ + --num-prompts 1 \ + --dataset-name random \ + --random-input 1024 \ + --random-output 512 ``` In practice, you should set the `--duration` argument to a large value. Whenever you want the server to stop profiling, run: diff --git a/docs/getting_started/installation/cpu.md b/docs/getting_started/installation/cpu.md index 3f75d1aef300b..5d7019e5a8676 100644 --- a/docs/getting_started/installation/cpu.md +++ b/docs/getting_started/installation/cpu.md @@ -79,7 +79,9 @@ Currently, there are no pre-built CPU wheels. ??? Commands ```console - $ docker build -f docker/Dockerfile.cpu --tag vllm-cpu-env --target vllm-openai . + $ docker build -f docker/Dockerfile.cpu \ + --tag vllm-cpu-env \ + --target vllm-openai . # Launching OpenAI server $ docker run --rm \ @@ -188,13 +190,19 @@ vllm serve facebook/opt-125m - Tensor Parallel is supported for serving and offline inferencing. In general each NUMA node is treated as one GPU card. Below is the example script to enable Tensor Parallel = 2 for serving: ```console - VLLM_CPU_KVCACHE_SPACE=40 VLLM_CPU_OMP_THREADS_BIND="0-31|32-63" vllm serve meta-llama/Llama-2-7b-chat-hf -tp=2 --distributed-executor-backend mp + VLLM_CPU_KVCACHE_SPACE=40 VLLM_CPU_OMP_THREADS_BIND="0-31|32-63" \ + vllm serve meta-llama/Llama-2-7b-chat-hf \ + -tp=2 \ + --distributed-executor-backend mp ``` or using default auto thread binding: ```console - VLLM_CPU_KVCACHE_SPACE=40 vllm serve meta-llama/Llama-2-7b-chat-hf -tp=2 --distributed-executor-backend mp + VLLM_CPU_KVCACHE_SPACE=40 \ + vllm serve meta-llama/Llama-2-7b-chat-hf \ + -tp=2 \ + --distributed-executor-backend mp ``` - For each thread id list in `VLLM_CPU_OMP_THREADS_BIND`, users should guarantee threads in the list belong to a same NUMA node. diff --git a/docs/usage/troubleshooting.md b/docs/usage/troubleshooting.md index 9403abfad85fb..631c8c40cfec0 100644 --- a/docs/usage/troubleshooting.md +++ b/docs/usage/troubleshooting.md @@ -134,7 +134,10 @@ NCCL_DEBUG=TRACE torchrun --nproc-per-node= test.py If you are testing with multi-nodes, adjust `--nproc-per-node` and `--nnodes` according to your setup and set `MASTER_ADDR` to the correct IP address of the master node, reachable from all nodes. Then, run: ```console -NCCL_DEBUG=TRACE torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=$MASTER_ADDR test.py +NCCL_DEBUG=TRACE torchrun --nnodes 2 \ + --nproc-per-node=2 \ + --rdzv_backend=c10d \ + --rdzv_endpoint=$MASTER_ADDR test.py ``` If the script runs successfully, you should see the message `sanity check is successful!`. From c3649e4feeed30594f2de8f5183bd24b50b80f1c Mon Sep 17 00:00:00 2001 From: Lukas Geiger Date: Mon, 23 Jun 2025 18:59:09 +0100 Subject: [PATCH 092/211] [Docs] Fix syntax highlighting of shell commands (#19870) Signed-off-by: Lukas Geiger --- .../nightly-benchmarks/nightly-annotation.md | 2 +- docs/deployment/docker.md | 12 +++--- docs/deployment/frameworks/anything-llm.md | 2 +- docs/deployment/frameworks/autogen.md | 4 +- docs/deployment/frameworks/cerebrium.md | 6 +-- docs/deployment/frameworks/chatbox.md | 2 +- docs/deployment/frameworks/dify.md | 4 +- docs/deployment/frameworks/dstack.md | 4 +- docs/deployment/frameworks/haystack.md | 4 +- docs/deployment/frameworks/helm.md | 4 +- docs/deployment/frameworks/litellm.md | 6 +-- docs/deployment/frameworks/open-webui.md | 4 +- .../retrieval_augmented_generation.md | 12 +++--- docs/deployment/frameworks/skypilot.md | 16 ++++---- docs/deployment/frameworks/streamlit.md | 6 +-- docs/deployment/integrations/llamastack.md | 2 +- docs/deployment/k8s.md | 6 +-- docs/deployment/nginx.md | 16 ++++---- docs/features/multimodal_inputs.md | 6 +-- docs/features/quantization/auto_awq.md | 4 +- docs/features/quantization/bitblas.md | 2 +- docs/features/quantization/bnb.md | 4 +- docs/features/quantization/fp8.md | 10 ++--- docs/features/quantization/gguf.md | 6 +-- docs/features/quantization/gptqmodel.md | 4 +- docs/features/quantization/int4.md | 8 ++-- docs/features/quantization/int8.md | 8 ++-- docs/features/quantization/modelopt.md | 2 +- .../quantization/quantized_kvcache.md | 2 +- docs/features/quantization/quark.md | 12 +++--- docs/features/quantization/torchao.md | 2 +- docs/features/tool_calling.md | 2 +- .../installation/aws_neuron.md | 10 ++--- docs/getting_started/installation/cpu.md | 18 ++++----- .../installation/cpu/apple.inc.md | 4 +- .../installation/cpu/build.inc.md | 10 ++--- .../installation/cpu/s390x.inc.md | 6 +-- .../installation/google_tpu.md | 8 ++-- .../installation/gpu/cuda.inc.md | 34 ++++++++-------- .../installation/gpu/rocm.inc.md | 24 +++++------ .../installation/gpu/xpu.inc.md | 12 +++--- .../installation/intel_gaudi.md | 10 ++--- .../installation/python_env_setup.inc.md | 2 +- docs/getting_started/quickstart.md | 14 +++---- .../models/extensions/runai_model_streamer.md | 18 ++++----- docs/models/supported_models.md | 4 +- docs/serving/distributed_serving.md | 16 ++++---- docs/serving/integrations/langchain.md | 2 +- docs/serving/integrations/llamaindex.md | 2 +- docs/usage/metrics.md | 2 +- docs/usage/troubleshooting.md | 4 +- .../offline_inference/openai_batch/README.md | 40 +++++++++---------- .../online_serving/opentelemetry/README.md | 16 ++++---- 53 files changed, 220 insertions(+), 220 deletions(-) diff --git a/.buildkite/nightly-benchmarks/nightly-annotation.md b/.buildkite/nightly-benchmarks/nightly-annotation.md index e43ea765f1556..ef11c040057c8 100644 --- a/.buildkite/nightly-benchmarks/nightly-annotation.md +++ b/.buildkite/nightly-benchmarks/nightly-annotation.md @@ -16,7 +16,7 @@ Please download the visualization scripts in the post - Download `nightly-benchmarks.zip`. - In the same folder, run the following code: - ```console + ```bash export HF_TOKEN= apt update apt install -y git diff --git a/docs/deployment/docker.md b/docs/deployment/docker.md index eb84db7871e4a..5f6a22c28c28e 100644 --- a/docs/deployment/docker.md +++ b/docs/deployment/docker.md @@ -10,7 +10,7 @@ title: Using Docker vLLM offers an official Docker image for deployment. The image can be used to run OpenAI compatible server and is available on Docker Hub as [vllm/vllm-openai](https://hub.docker.com/r/vllm/vllm-openai/tags). -```console +```bash docker run --runtime nvidia --gpus all \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HUGGING_FACE_HUB_TOKEN=" \ @@ -22,7 +22,7 @@ docker run --runtime nvidia --gpus all \ This image can also be used with other container engines such as [Podman](https://podman.io/). -```console +```bash podman run --gpus all \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HUGGING_FACE_HUB_TOKEN=$HF_TOKEN" \ @@ -71,7 +71,7 @@ You can add any other [engine-args][engine-args] you need after the image tag (` You can build and run vLLM from source via the provided . To build vLLM: -```console +```bash # optionally specifies: --build-arg max_jobs=8 --build-arg nvcc_threads=2 DOCKER_BUILDKIT=1 docker build . \ --target vllm-openai \ @@ -99,7 +99,7 @@ of PyTorch Nightly and should be considered **experimental**. Using the flag `-- ??? Command - ```console + ```bash # Example of building on Nvidia GH200 server. (Memory usage: ~15GB, Build time: ~1475s / ~25 min, Image size: 6.93GB) python3 use_existing_torch.py DOCKER_BUILDKIT=1 docker build . \ @@ -118,7 +118,7 @@ of PyTorch Nightly and should be considered **experimental**. Using the flag `-- Run the following command on your host machine to register QEMU user static handlers: - ```console + ```bash docker run --rm --privileged multiarch/qemu-user-static --reset -p yes ``` @@ -128,7 +128,7 @@ of PyTorch Nightly and should be considered **experimental**. Using the flag `-- To run vLLM with the custom-built Docker image: -```console +```bash docker run --runtime nvidia --gpus all \ -v ~/.cache/huggingface:/root/.cache/huggingface \ -p 8000:8000 \ diff --git a/docs/deployment/frameworks/anything-llm.md b/docs/deployment/frameworks/anything-llm.md index a89e633c086ea..4633c2946cde8 100644 --- a/docs/deployment/frameworks/anything-llm.md +++ b/docs/deployment/frameworks/anything-llm.md @@ -15,7 +15,7 @@ It allows you to deploy a large language model (LLM) server with vLLM as the bac - Start the vLLM server with the supported chat completion model, e.g. -```console +```bash vllm serve Qwen/Qwen1.5-32B-Chat-AWQ --max-model-len 4096 ``` diff --git a/docs/deployment/frameworks/autogen.md b/docs/deployment/frameworks/autogen.md index 295664daeadb7..13930e67ab2f5 100644 --- a/docs/deployment/frameworks/autogen.md +++ b/docs/deployment/frameworks/autogen.md @@ -11,7 +11,7 @@ title: AutoGen - Setup [AutoGen](https://microsoft.github.io/autogen/0.2/docs/installation/) environment -```console +```bash pip install vllm # Install AgentChat and OpenAI client from Extensions @@ -23,7 +23,7 @@ pip install -U "autogen-agentchat" "autogen-ext[openai]" - Start the vLLM server with the supported chat completion model, e.g. -```console +```bash python -m vllm.entrypoints.openai.api_server \ --model mistralai/Mistral-7B-Instruct-v0.2 ``` diff --git a/docs/deployment/frameworks/cerebrium.md b/docs/deployment/frameworks/cerebrium.md index 8e096f26db71d..5c5f2f48d50b7 100644 --- a/docs/deployment/frameworks/cerebrium.md +++ b/docs/deployment/frameworks/cerebrium.md @@ -11,14 +11,14 @@ vLLM can be run on a cloud based GPU machine with [Cerebrium](https://www.cerebr To install the Cerebrium client, run: -```console +```bash pip install cerebrium cerebrium login ``` Next, create your Cerebrium project, run: -```console +```bash cerebrium init vllm-project ``` @@ -58,7 +58,7 @@ Next, let us add our code to handle inference for the LLM of your choice (`mistr Then, run the following code to deploy it to the cloud: -```console +```bash cerebrium deploy ``` diff --git a/docs/deployment/frameworks/chatbox.md b/docs/deployment/frameworks/chatbox.md index 10da2fc710027..b1b50b55146ca 100644 --- a/docs/deployment/frameworks/chatbox.md +++ b/docs/deployment/frameworks/chatbox.md @@ -15,7 +15,7 @@ It allows you to deploy a large language model (LLM) server with vLLM as the bac - Start the vLLM server with the supported chat completion model, e.g. -```console +```bash vllm serve qwen/Qwen1.5-0.5B-Chat ``` diff --git a/docs/deployment/frameworks/dify.md b/docs/deployment/frameworks/dify.md index 886484b543475..a0e40784f0ea4 100644 --- a/docs/deployment/frameworks/dify.md +++ b/docs/deployment/frameworks/dify.md @@ -18,13 +18,13 @@ This guide walks you through deploying Dify using a vLLM backend. - Start the vLLM server with the supported chat completion model, e.g. -```console +```bash vllm serve Qwen/Qwen1.5-7B-Chat ``` - Start the Dify server with docker compose ([details](https://github.com/langgenius/dify?tab=readme-ov-file#quick-start)): -```console +```bash git clone https://github.com/langgenius/dify.git cd dify cd docker diff --git a/docs/deployment/frameworks/dstack.md b/docs/deployment/frameworks/dstack.md index 0b91fc88ce3fc..8b4bc459683b0 100644 --- a/docs/deployment/frameworks/dstack.md +++ b/docs/deployment/frameworks/dstack.md @@ -11,14 +11,14 @@ vLLM can be run on a cloud based GPU machine with [dstack](https://dstack.ai/), To install dstack client, run: -```console +```bash pip install "dstack[all] dstack server ``` Next, to configure your dstack project, run: -```console +```bash mkdir -p vllm-dstack cd vllm-dstack dstack init diff --git a/docs/deployment/frameworks/haystack.md b/docs/deployment/frameworks/haystack.md index 04d9eba3065c0..7a4cab4c2ee35 100644 --- a/docs/deployment/frameworks/haystack.md +++ b/docs/deployment/frameworks/haystack.md @@ -13,7 +13,7 @@ It allows you to deploy a large language model (LLM) server with vLLM as the bac - Setup vLLM and Haystack environment -```console +```bash pip install vllm haystack-ai ``` @@ -21,7 +21,7 @@ pip install vllm haystack-ai - Start the vLLM server with the supported chat completion model, e.g. -```console +```bash vllm serve mistralai/Mistral-7B-Instruct-v0.1 ``` diff --git a/docs/deployment/frameworks/helm.md b/docs/deployment/frameworks/helm.md index 192b90438acf0..cff8af2c09d29 100644 --- a/docs/deployment/frameworks/helm.md +++ b/docs/deployment/frameworks/helm.md @@ -22,7 +22,7 @@ Before you begin, ensure that you have the following: To install the chart with the release name `test-vllm`: -```console +```bash helm upgrade --install --create-namespace --namespace=ns-vllm test-vllm . -f values.yaml --set secrets.s3endpoint=$ACCESS_POINT --set secrets.s3bucketname=$BUCKET --set secrets.s3accesskeyid=$ACCESS_KEY --set secrets.s3accesskey=$SECRET_KEY ``` @@ -30,7 +30,7 @@ helm upgrade --install --create-namespace --namespace=ns-vllm test-vllm . -f val To uninstall the `test-vllm` deployment: -```console +```bash helm uninstall test-vllm --namespace=ns-vllm ``` diff --git a/docs/deployment/frameworks/litellm.md b/docs/deployment/frameworks/litellm.md index 8498feaa2972e..8279613b1a273 100644 --- a/docs/deployment/frameworks/litellm.md +++ b/docs/deployment/frameworks/litellm.md @@ -18,7 +18,7 @@ And LiteLLM supports all models on VLLM. - Setup vLLM and litellm environment -```console +```bash pip install vllm litellm ``` @@ -28,7 +28,7 @@ pip install vllm litellm - Start the vLLM server with the supported chat completion model, e.g. -```console +```bash vllm serve qwen/Qwen1.5-0.5B-Chat ``` @@ -56,7 +56,7 @@ vllm serve qwen/Qwen1.5-0.5B-Chat - Start the vLLM server with the supported embedding model, e.g. -```console +```bash vllm serve BAAI/bge-base-en-v1.5 ``` diff --git a/docs/deployment/frameworks/open-webui.md b/docs/deployment/frameworks/open-webui.md index 1ab1931068fae..676a0f58b54f8 100644 --- a/docs/deployment/frameworks/open-webui.md +++ b/docs/deployment/frameworks/open-webui.md @@ -7,13 +7,13 @@ title: Open WebUI 2. Start the vLLM server with the supported chat completion model, e.g. -```console +```bash vllm serve qwen/Qwen1.5-0.5B-Chat ``` 1. Start the [Open WebUI](https://github.com/open-webui/open-webui) docker container (replace the vllm serve host and vllm serve port): -```console +```bash docker run -d -p 3000:8080 \ --name open-webui \ -v open-webui:/app/backend/data \ diff --git a/docs/deployment/frameworks/retrieval_augmented_generation.md b/docs/deployment/frameworks/retrieval_augmented_generation.md index cb26c8378deec..851c31db32f27 100644 --- a/docs/deployment/frameworks/retrieval_augmented_generation.md +++ b/docs/deployment/frameworks/retrieval_augmented_generation.md @@ -15,7 +15,7 @@ Here are the integrations: - Setup vLLM and langchain environment -```console +```bash pip install -U vllm \ langchain_milvus langchain_openai \ langchain_community beautifulsoup4 \ @@ -26,14 +26,14 @@ pip install -U vllm \ - Start the vLLM server with the supported embedding model, e.g. -```console +```bash # Start embedding service (port 8000) vllm serve ssmits/Qwen2-7B-Instruct-embed-base ``` - Start the vLLM server with the supported chat completion model, e.g. -```console +```bash # Start chat service (port 8001) vllm serve qwen/Qwen1.5-0.5B-Chat --port 8001 ``` @@ -52,7 +52,7 @@ python retrieval_augmented_generation_with_langchain.py - Setup vLLM and llamaindex environment -```console +```bash pip install vllm \ llama-index llama-index-readers-web \ llama-index-llms-openai-like \ @@ -64,14 +64,14 @@ pip install vllm \ - Start the vLLM server with the supported embedding model, e.g. -```console +```bash # Start embedding service (port 8000) vllm serve ssmits/Qwen2-7B-Instruct-embed-base ``` - Start the vLLM server with the supported chat completion model, e.g. -```console +```bash # Start chat service (port 8001) vllm serve qwen/Qwen1.5-0.5B-Chat --port 8001 ``` diff --git a/docs/deployment/frameworks/skypilot.md b/docs/deployment/frameworks/skypilot.md index b649312971b5c..ecf987539ced4 100644 --- a/docs/deployment/frameworks/skypilot.md +++ b/docs/deployment/frameworks/skypilot.md @@ -15,7 +15,7 @@ vLLM can be **run and scaled to multiple service replicas on clouds and Kubernet - Check that you have installed SkyPilot ([docs](https://skypilot.readthedocs.io/en/latest/getting-started/installation.html)). - Check that `sky check` shows clouds or Kubernetes are enabled. -```console +```bash pip install skypilot-nightly sky check ``` @@ -71,7 +71,7 @@ See the vLLM SkyPilot YAML for serving, [serving.yaml](https://github.com/skypil Start the serving the Llama-3 8B model on any of the candidate GPUs listed (L4, A10g, ...): -```console +```bash HF_TOKEN="your-huggingface-token" sky launch serving.yaml --env HF_TOKEN ``` @@ -83,7 +83,7 @@ Check the output of the command. There will be a shareable gradio link (like the **Optional**: Serve the 70B model instead of the default 8B and use more GPU: -```console +```bash HF_TOKEN="your-huggingface-token" \ sky launch serving.yaml \ --gpus A100:8 \ @@ -159,7 +159,7 @@ SkyPilot can scale up the service to multiple service replicas with built-in aut Start the serving the Llama-3 8B model on multiple replicas: -```console +```bash HF_TOKEN="your-huggingface-token" \ sky serve up -n vllm serving.yaml \ --env HF_TOKEN @@ -167,7 +167,7 @@ HF_TOKEN="your-huggingface-token" \ Wait until the service is ready: -```console +```bash watch -n10 sky serve status vllm ``` @@ -271,13 +271,13 @@ This will scale the service up to when the QPS exceeds 2 for each replica. To update the service with the new config: -```console +```bash HF_TOKEN="your-huggingface-token" sky serve update vllm serving.yaml --env HF_TOKEN ``` To stop the service: -```console +```bash sky serve down vllm ``` @@ -317,7 +317,7 @@ It is also possible to access the Llama-3 service with a separate GUI frontend, 1. Start the chat web UI: - ```console + ```bash sky launch \ -c gui ./gui.yaml \ --env ENDPOINT=$(sky serve status --endpoint vllm) diff --git a/docs/deployment/frameworks/streamlit.md b/docs/deployment/frameworks/streamlit.md index 33ed8c5f5b54d..5e998e3cca6e4 100644 --- a/docs/deployment/frameworks/streamlit.md +++ b/docs/deployment/frameworks/streamlit.md @@ -15,13 +15,13 @@ It can be quickly integrated with vLLM as a backend API server, enabling powerfu - Start the vLLM server with the supported chat completion model, e.g. -```console +```bash vllm serve qwen/Qwen1.5-0.5B-Chat ``` - Install streamlit and openai: -```console +```bash pip install streamlit openai ``` @@ -29,7 +29,7 @@ pip install streamlit openai - Start the streamlit web UI and start to chat: -```console +```bash streamlit run streamlit_openai_chatbot_webserver.py # or specify the VLLM_API_BASE or VLLM_API_KEY diff --git a/docs/deployment/integrations/llamastack.md b/docs/deployment/integrations/llamastack.md index 2ae600a423ff9..9bbc6b5b296c9 100644 --- a/docs/deployment/integrations/llamastack.md +++ b/docs/deployment/integrations/llamastack.md @@ -7,7 +7,7 @@ vLLM is also available via [Llama Stack](https://github.com/meta-llama/llama-sta To install Llama Stack, run -```console +```bash pip install llama-stack -q ``` diff --git a/docs/deployment/k8s.md b/docs/deployment/k8s.md index 13225ba208fde..f01e3d2fae0eb 100644 --- a/docs/deployment/k8s.md +++ b/docs/deployment/k8s.md @@ -115,7 +115,7 @@ Next, start the vLLM server as a Kubernetes Deployment and Service: We can verify that the vLLM server has started successfully via the logs (this might take a couple of minutes to download the model): -```console +```bash kubectl logs -l app.kubernetes.io/name=vllm ... INFO: Started server process [1] @@ -358,14 +358,14 @@ INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit) Apply the deployment and service configurations using `kubectl apply -f `: - ```console + ```bash kubectl apply -f deployment.yaml kubectl apply -f service.yaml ``` To test the deployment, run the following `curl` command: - ```console + ```bash curl http://mistral-7b.default.svc.cluster.local/v1/completions \ -H "Content-Type: application/json" \ -d '{ diff --git a/docs/deployment/nginx.md b/docs/deployment/nginx.md index 752be76b3864f..7f09453be0c42 100644 --- a/docs/deployment/nginx.md +++ b/docs/deployment/nginx.md @@ -11,13 +11,13 @@ This document shows how to launch multiple vLLM serving containers and use Nginx This guide assumes that you have just cloned the vLLM project and you're currently in the vllm root directory. -```console +```bash export vllm_root=`pwd` ``` Create a file named `Dockerfile.nginx`: -```console +```dockerfile FROM nginx:latest RUN rm /etc/nginx/conf.d/default.conf EXPOSE 80 @@ -26,7 +26,7 @@ CMD ["nginx", "-g", "daemon off;"] Build the container: -```console +```bash docker build . -f Dockerfile.nginx --tag nginx-lb ``` @@ -60,14 +60,14 @@ Create a file named `nginx_conf/nginx.conf`. Note that you can add as many serve ## Build vLLM Container -```console +```bash cd $vllm_root docker build -f docker/Dockerfile . --tag vllm ``` If you are behind proxy, you can pass the proxy settings to the docker build command as shown below: -```console +```bash cd $vllm_root docker build \ -f docker/Dockerfile . \ @@ -80,7 +80,7 @@ docker build \ ## Create Docker Network -```console +```bash docker network create vllm_nginx ``` @@ -129,7 +129,7 @@ Notes: ## Launch Nginx -```console +```bash docker run \ -itd \ -p 8000:80 \ @@ -142,7 +142,7 @@ docker run \ ## Verify That vLLM Servers Are Ready -```console +```bash docker logs vllm0 | grep Uvicorn docker logs vllm1 | grep Uvicorn ``` diff --git a/docs/features/multimodal_inputs.md b/docs/features/multimodal_inputs.md index d4465beb8593e..e3a77afb02f1c 100644 --- a/docs/features/multimodal_inputs.md +++ b/docs/features/multimodal_inputs.md @@ -307,7 +307,7 @@ Full example: ``` @@ -370,7 +370,7 @@ Full example: ``` @@ -476,7 +476,7 @@ Full example: ``` diff --git a/docs/features/quantization/auto_awq.md b/docs/features/quantization/auto_awq.md index 8362672f40b36..9f97ea406e25f 100644 --- a/docs/features/quantization/auto_awq.md +++ b/docs/features/quantization/auto_awq.md @@ -9,7 +9,7 @@ The main benefits are lower latency and memory usage. You can quantize your own models by installing AutoAWQ or picking one of the [6500+ models on Huggingface](https://huggingface.co/models?search=awq). -```console +```bash pip install autoawq ``` @@ -43,7 +43,7 @@ After installing AutoAWQ, you are ready to quantize a model. Please refer to the To run an AWQ model with vLLM, you can use [TheBloke/Llama-2-7b-Chat-AWQ](https://huggingface.co/TheBloke/Llama-2-7b-Chat-AWQ) with the following command: -```console +```bash python examples/offline_inference/llm_engine_example.py \ --model TheBloke/Llama-2-7b-Chat-AWQ \ --quantization awq diff --git a/docs/features/quantization/bitblas.md b/docs/features/quantization/bitblas.md index 3f8ae7a959cd5..c8f874ff84147 100644 --- a/docs/features/quantization/bitblas.md +++ b/docs/features/quantization/bitblas.md @@ -12,7 +12,7 @@ vLLM now supports [BitBLAS](https://github.com/microsoft/BitBLAS) for more effic Below are the steps to utilize BitBLAS with vLLM. -```console +```bash pip install bitblas>=0.1.0 ``` diff --git a/docs/features/quantization/bnb.md b/docs/features/quantization/bnb.md index a8dc2476f30aa..5756fdb288374 100644 --- a/docs/features/quantization/bnb.md +++ b/docs/features/quantization/bnb.md @@ -9,7 +9,7 @@ Compared to other quantization methods, BitsAndBytes eliminates the need for cal Below are the steps to utilize BitsAndBytes with vLLM. -```console +```bash pip install bitsandbytes>=0.45.3 ``` @@ -54,6 +54,6 @@ llm = LLM( Append the following to your model arguments for 4bit inflight quantization: -```console +```bash --quantization bitsandbytes ``` diff --git a/docs/features/quantization/fp8.md b/docs/features/quantization/fp8.md index ec7639af805b9..b9ed668b2ef31 100644 --- a/docs/features/quantization/fp8.md +++ b/docs/features/quantization/fp8.md @@ -23,7 +23,7 @@ The FP8 types typically supported in hardware have two distinct representations, To produce performant FP8 quantized models with vLLM, you'll need to install the [llm-compressor](https://github.com/vllm-project/llm-compressor/) library: -```console +```bash pip install llmcompressor ``` @@ -81,7 +81,7 @@ Since simple RTN does not require data for weight quantization and the activatio Install `vllm` and `lm-evaluation-harness` for evaluation: -```console +```bash pip install vllm lm-eval==0.4.4 ``` @@ -99,9 +99,9 @@ Evaluate accuracy with `lm_eval` (for example on 250 samples of `gsm8k`): !!! note Quantized models can be sensitive to the presence of the `bos` token. `lm_eval` does not add a `bos` token by default, so make sure to include the `add_bos_token=True` argument when running your evaluations. -```console -$ MODEL=$PWD/Meta-Llama-3-8B-Instruct-FP8-Dynamic -$ lm_eval \ +```bash +MODEL=$PWD/Meta-Llama-3-8B-Instruct-FP8-Dynamic +lm_eval \ --model vllm \ --model_args pretrained=$MODEL,add_bos_token=True \ --tasks gsm8k --num_fewshot 5 --batch_size auto --limit 250 diff --git a/docs/features/quantization/gguf.md b/docs/features/quantization/gguf.md index 014b513eeda75..102a3ee1ccccb 100644 --- a/docs/features/quantization/gguf.md +++ b/docs/features/quantization/gguf.md @@ -11,7 +11,7 @@ title: GGUF To run a GGUF model with vLLM, you can download and use the local GGUF model from [TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF](https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF) with the following command: -```console +```bash wget https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf # We recommend using the tokenizer from base model to avoid long-time and buggy tokenizer conversion. vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \ @@ -20,7 +20,7 @@ vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \ You can also add `--tensor-parallel-size 2` to enable tensor parallelism inference with 2 GPUs: -```console +```bash # We recommend using the tokenizer from base model to avoid long-time and buggy tokenizer conversion. vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \ --tokenizer TinyLlama/TinyLlama-1.1B-Chat-v1.0 \ @@ -32,7 +32,7 @@ vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \ GGUF assumes that huggingface can convert the metadata to a config file. In case huggingface doesn't support your model you can manually create a config and pass it as hf-config-path -```console +```bash # If you model is not supported by huggingface you can manually provide a huggingface compatible config path vllm serve ./tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf \ --tokenizer TinyLlama/TinyLlama-1.1B-Chat-v1.0 \ diff --git a/docs/features/quantization/gptqmodel.md b/docs/features/quantization/gptqmodel.md index 2f088f474f199..37bb02d4fb5bf 100644 --- a/docs/features/quantization/gptqmodel.md +++ b/docs/features/quantization/gptqmodel.md @@ -21,7 +21,7 @@ for more details on this and other advanced features. You can quantize your own models by installing [GPTQModel](https://github.com/ModelCloud/GPTQModel) or picking one of the [5000+ models on Huggingface](https://huggingface.co/models?search=gptq). -```console +```bash pip install -U gptqmodel --no-build-isolation -v ``` @@ -60,7 +60,7 @@ Here is an example of how to quantize `meta-llama/Llama-3.2-1B-Instruct`: To run an GPTQModel quantized model with vLLM, you can use [DeepSeek-R1-Distill-Qwen-7B-gptqmodel-4bit-vortex-v2](https://huggingface.co/ModelCloud/DeepSeek-R1-Distill-Qwen-7B-gptqmodel-4bit-vortex-v2) with the following command: -```console +```bash python examples/offline_inference/llm_engine_example.py \ --model ModelCloud/DeepSeek-R1-Distill-Qwen-7B-gptqmodel-4bit-vortex-v2 ``` diff --git a/docs/features/quantization/int4.md b/docs/features/quantization/int4.md index 185e13649f48a..2008bef5c8a25 100644 --- a/docs/features/quantization/int4.md +++ b/docs/features/quantization/int4.md @@ -14,13 +14,13 @@ Please visit the HF collection of [quantized INT4 checkpoints of popular LLMs re To use INT4 quantization with vLLM, you'll need to install the [llm-compressor](https://github.com/vllm-project/llm-compressor/) library: -```console +```bash pip install llmcompressor ``` Additionally, install `vllm` and `lm-evaluation-harness` for evaluation: -```console +```bash pip install vllm lm-eval==0.4.4 ``` @@ -116,8 +116,8 @@ model = LLM("./Meta-Llama-3-8B-Instruct-W4A16-G128") To evaluate accuracy, you can use `lm_eval`: -```console -$ lm_eval --model vllm \ +```bash +lm_eval --model vllm \ --model_args pretrained="./Meta-Llama-3-8B-Instruct-W4A16-G128",add_bos_token=true \ --tasks gsm8k \ --num_fewshot 5 \ diff --git a/docs/features/quantization/int8.md b/docs/features/quantization/int8.md index de5ae5c044010..3a8f855aa0577 100644 --- a/docs/features/quantization/int8.md +++ b/docs/features/quantization/int8.md @@ -15,13 +15,13 @@ Please visit the HF collection of [quantized INT8 checkpoints of popular LLMs re To use INT8 quantization with vLLM, you'll need to install the [llm-compressor](https://github.com/vllm-project/llm-compressor/) library: -```console +```bash pip install llmcompressor ``` Additionally, install `vllm` and `lm-evaluation-harness` for evaluation: -```console +```bash pip install vllm lm-eval==0.4.4 ``` @@ -122,8 +122,8 @@ model = LLM("./Meta-Llama-3-8B-Instruct-W8A8-Dynamic-Per-Token") To evaluate accuracy, you can use `lm_eval`: -```console -$ lm_eval --model vllm \ +```bash +lm_eval --model vllm \ --model_args pretrained="./Meta-Llama-3-8B-Instruct-W8A8-Dynamic-Per-Token",add_bos_token=true \ --tasks gsm8k \ --num_fewshot 5 \ diff --git a/docs/features/quantization/modelopt.md b/docs/features/quantization/modelopt.md index 0bb6003832ba3..39f2a78e705fc 100644 --- a/docs/features/quantization/modelopt.md +++ b/docs/features/quantization/modelopt.md @@ -4,7 +4,7 @@ The [NVIDIA TensorRT Model Optimizer](https://github.com/NVIDIA/TensorRT-Model-O We recommend installing the library with: -```console +```bash pip install nvidia-modelopt ``` diff --git a/docs/features/quantization/quantized_kvcache.md b/docs/features/quantization/quantized_kvcache.md index 52b8d38ace1dd..323dcb7d052d0 100644 --- a/docs/features/quantization/quantized_kvcache.md +++ b/docs/features/quantization/quantized_kvcache.md @@ -65,7 +65,7 @@ For optimal model quality when using FP8 KV Cache, we recommend using calibrated First, install the required dependencies: -```console +```bash pip install llmcompressor ``` diff --git a/docs/features/quantization/quark.md b/docs/features/quantization/quark.md index 6e77584da2325..77e3834954062 100644 --- a/docs/features/quantization/quark.md +++ b/docs/features/quantization/quark.md @@ -13,7 +13,7 @@ AWQ, GPTQ, Rotation and SmoothQuant. Before quantizing models, you need to install Quark. The latest release of Quark can be installed with pip: -```console +```bash pip install amd-quark ``` @@ -22,13 +22,13 @@ for more installation details. Additionally, install `vllm` and `lm-evaluation-harness` for evaluation: -```console +```bash pip install vllm lm-eval==0.4.4 ``` ## Quantization Process -After installing Quark, we will use an example to illustrate how to use Quark. +After installing Quark, we will use an example to illustrate how to use Quark. The Quark quantization process can be listed for 5 steps as below: 1. Load the model @@ -209,8 +209,8 @@ Now, you can load and run the Quark quantized model directly through the LLM ent Or, you can use `lm_eval` to evaluate accuracy: -```console -$ lm_eval --model vllm \ +```bash +lm_eval --model vllm \ --model_args pretrained=Llama-2-70b-chat-hf-w-fp8-a-fp8-kvcache-fp8-pertensor-autosmoothquant,kv_cache_dtype='fp8',quantization='quark' \ --tasks gsm8k ``` @@ -222,7 +222,7 @@ to quantize large language models more conveniently. It supports quantizing mode of different quantization schemes and optimization algorithms. It can export the quantized model and run evaluation tasks on the fly. With the script, the example above can be: -```console +```bash python3 quantize_quark.py --model_dir meta-llama/Llama-2-70b-chat-hf \ --output_dir /path/to/output \ --quant_scheme w_fp8_a_fp8 \ diff --git a/docs/features/quantization/torchao.md b/docs/features/quantization/torchao.md index c45979a361172..f8df3c4b08096 100644 --- a/docs/features/quantization/torchao.md +++ b/docs/features/quantization/torchao.md @@ -4,7 +4,7 @@ TorchAO is an architecture optimization library for PyTorch, it provides high pe We recommend installing the latest torchao nightly with -```console +```bash # Install the latest TorchAO nightly build # Choose the CUDA version that matches your system (cu126, cu128, etc.) pip install \ diff --git a/docs/features/tool_calling.md b/docs/features/tool_calling.md index 9fb878777a48b..41a024ba632e8 100644 --- a/docs/features/tool_calling.md +++ b/docs/features/tool_calling.md @@ -351,7 +351,7 @@ Here is a summary of a plugin file: Then you can use this plugin in the command line like this. -```console +```bash --enable-auto-tool-choice \ --tool-parser-plugin --tool-call-parser example \ diff --git a/docs/getting_started/installation/aws_neuron.md b/docs/getting_started/installation/aws_neuron.md index 6b2efd85f06b1..b8bd76bd5bcbe 100644 --- a/docs/getting_started/installation/aws_neuron.md +++ b/docs/getting_started/installation/aws_neuron.md @@ -26,7 +26,7 @@ The easiest way to launch a Trainium or Inferentia instance with pre-installed N - After launching the instance, follow the instructions in [Connect to your instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AccessingInstancesLinux.html) to connect to the instance - Once inside your instance, activate the pre-installed virtual environment for inference by running -```console +```bash source /opt/aws_neuronx_venv_pytorch_2_6_nxd_inference/bin/activate ``` @@ -47,7 +47,7 @@ Currently, there are no pre-built Neuron wheels. To build and install vLLM from source, run: -```console +```bash git clone https://github.com/vllm-project/vllm.git cd vllm pip install -U -r requirements/neuron.txt @@ -66,7 +66,7 @@ Refer to [vLLM User Guide for NxD Inference](https://awsdocs-neuron.readthedocs- To install the AWS Neuron fork, run the following: -```console +```bash git clone -b neuron-2.23-vllm-v0.7.2 https://github.com/aws-neuron/upstreaming-to-vllm.git cd upstreaming-to-vllm pip install -r requirements/neuron.txt @@ -100,7 +100,7 @@ to perform most of the heavy lifting which includes PyTorch model initialization To configure NxD Inference features through the vLLM entrypoint, use the `override_neuron_config` setting. Provide the configs you want to override as a dictionary (or JSON object when starting vLLM from the CLI). For example, to disable auto bucketing, include -```console +```python override_neuron_config={ "enable_bucketing":False, } @@ -108,7 +108,7 @@ override_neuron_config={ or when launching vLLM from the CLI, pass -```console +```bash --override-neuron-config "{\"enable_bucketing\":false}" ``` diff --git a/docs/getting_started/installation/cpu.md b/docs/getting_started/installation/cpu.md index 5d7019e5a8676..370b854def0f3 100644 --- a/docs/getting_started/installation/cpu.md +++ b/docs/getting_started/installation/cpu.md @@ -78,13 +78,13 @@ Currently, there are no pre-built CPU wheels. ??? Commands - ```console - $ docker build -f docker/Dockerfile.cpu \ + ```bash + docker build -f docker/Dockerfile.cpu \ --tag vllm-cpu-env \ --target vllm-openai . - # Launching OpenAI server - $ docker run --rm \ + # Launching OpenAI server + docker run --rm \ --privileged=true \ --shm-size=4g \ -p 8000:8000 \ @@ -123,7 +123,7 @@ vLLM CPU backend supports the following vLLM features: - We highly recommend to use TCMalloc for high performance memory allocation and better cache locality. For example, on Ubuntu 22.4, you can run: -```console +```bash sudo apt-get install libtcmalloc-minimal4 # install TCMalloc library find / -name *libtcmalloc* # find the dynamic link library path export LD_PRELOAD=/usr/lib/x86_64-linux-gnu/libtcmalloc_minimal.so.4:$LD_PRELOAD # prepend the library to LD_PRELOAD @@ -132,7 +132,7 @@ python examples/offline_inference/basic/basic.py # run vLLM - When using the online serving, it is recommended to reserve 1-2 CPU cores for the serving framework to avoid CPU oversubscription. For example, on a platform with 32 physical CPU cores, reserving CPU 30 and 31 for the framework and using CPU 0-29 for OpenMP: -```console +```bash export VLLM_CPU_KVCACHE_SPACE=40 export VLLM_CPU_OMP_THREADS_BIND=0-29 vllm serve facebook/opt-125m @@ -140,7 +140,7 @@ vllm serve facebook/opt-125m or using default auto thread binding: -```console +```bash export VLLM_CPU_KVCACHE_SPACE=40 export VLLM_CPU_NUM_OF_RESERVED_CPU=2 vllm serve facebook/opt-125m @@ -189,7 +189,7 @@ vllm serve facebook/opt-125m - Tensor Parallel is supported for serving and offline inferencing. In general each NUMA node is treated as one GPU card. Below is the example script to enable Tensor Parallel = 2 for serving: - ```console + ```bash VLLM_CPU_KVCACHE_SPACE=40 VLLM_CPU_OMP_THREADS_BIND="0-31|32-63" \ vllm serve meta-llama/Llama-2-7b-chat-hf \ -tp=2 \ @@ -198,7 +198,7 @@ vllm serve facebook/opt-125m or using default auto thread binding: - ```console + ```bash VLLM_CPU_KVCACHE_SPACE=40 \ vllm serve meta-llama/Llama-2-7b-chat-hf \ -tp=2 \ diff --git a/docs/getting_started/installation/cpu/apple.inc.md b/docs/getting_started/installation/cpu/apple.inc.md index 7a91e3ce5e5bc..1771213f5591d 100644 --- a/docs/getting_started/installation/cpu/apple.inc.md +++ b/docs/getting_started/installation/cpu/apple.inc.md @@ -25,11 +25,11 @@ Currently the CPU implementation for macOS supports FP32 and FP16 datatypes. After installation of XCode and the Command Line Tools, which include Apple Clang, execute the following commands to build and install vLLM from the source. -```console +```bash git clone https://github.com/vllm-project/vllm.git cd vllm pip install -r requirements/cpu.txt -pip install -e . +pip install -e . ``` !!! note diff --git a/docs/getting_started/installation/cpu/build.inc.md b/docs/getting_started/installation/cpu/build.inc.md index 7ddadccb1b4f1..d9ca04edee025 100644 --- a/docs/getting_started/installation/cpu/build.inc.md +++ b/docs/getting_started/installation/cpu/build.inc.md @@ -1,6 +1,6 @@ First, install recommended compiler. We recommend to use `gcc/g++ >= 12.3.0` as the default compiler to avoid potential problems. For example, on Ubuntu 22.4, you can run: -```console +```bash sudo apt-get update -y sudo apt-get install -y gcc-12 g++-12 libnuma-dev python3-dev sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 @@ -8,14 +8,14 @@ sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave / Second, clone vLLM project: -```console +```bash git clone https://github.com/vllm-project/vllm.git vllm_source cd vllm_source ``` Third, install Python packages for vLLM CPU backend building: -```console +```bash pip install --upgrade pip pip install "cmake>=3.26.1" wheel packaging ninja "setuptools-scm>=8" numpy pip install -v -r requirements/cpu.txt --extra-index-url https://download.pytorch.org/whl/cpu @@ -23,13 +23,13 @@ pip install -v -r requirements/cpu.txt --extra-index-url https://download.pytorc Finally, build and install vLLM CPU backend: -```console +```bash VLLM_TARGET_DEVICE=cpu python setup.py install ``` If you want to develop vllm, install it in editable mode instead. -```console +```bash VLLM_TARGET_DEVICE=cpu python setup.py develop ``` diff --git a/docs/getting_started/installation/cpu/s390x.inc.md b/docs/getting_started/installation/cpu/s390x.inc.md index 670485feefb65..6c6c40baececd 100644 --- a/docs/getting_started/installation/cpu/s390x.inc.md +++ b/docs/getting_started/installation/cpu/s390x.inc.md @@ -26,7 +26,7 @@ Currently the CPU implementation for s390x architecture supports FP32 datatype o Install the following packages from the package manager before building the vLLM. For example on RHEL 9.4: -```console +```bash dnf install -y \ which procps findutils tar vim git gcc g++ make patch make cython zlib-devel \ libjpeg-turbo-devel libtiff-devel libpng-devel libwebp-devel freetype-devel harfbuzz-devel \ @@ -35,7 +35,7 @@ dnf install -y \ Install rust>=1.80 which is needed for `outlines-core` and `uvloop` python packages installation. -```console +```bash curl https://sh.rustup.rs -sSf | sh -s -- -y && \ . "$HOME/.cargo/env" ``` @@ -45,7 +45,7 @@ Execute the following commands to build and install vLLM from the source. !!! tip Please build the following dependencies, `torchvision`, `pyarrow` from the source before building vLLM. -```console +```bash sed -i '/^torch/d' requirements-build.txt # remove torch from requirements-build.txt since we use nightly builds pip install -v \ --extra-index-url https://download.pytorch.org/whl/nightly/cpu \ diff --git a/docs/getting_started/installation/google_tpu.md b/docs/getting_started/installation/google_tpu.md index 0cb10b8de835e..a81a19df38b09 100644 --- a/docs/getting_started/installation/google_tpu.md +++ b/docs/getting_started/installation/google_tpu.md @@ -68,7 +68,7 @@ For more information about using TPUs with GKE, see: Create a TPU v5e with 4 TPU chips: -```console +```bash gcloud alpha compute tpus queued-resources create QUEUED_RESOURCE_ID \ --node-id TPU_NAME \ --project PROJECT_ID \ @@ -156,13 +156,13 @@ See [deployment-docker-pre-built-image][deployment-docker-pre-built-image] for i You can use to build a Docker image with TPU support. -```console +```bash docker build -f docker/Dockerfile.tpu -t vllm-tpu . ``` Run the Docker image with the following command: -```console +```bash # Make sure to add `--privileged --net host --shm-size=16G`. docker run --privileged --net host --shm-size=16G -it vllm-tpu ``` @@ -185,6 +185,6 @@ docker run --privileged --net host --shm-size=16G -it vllm-tpu Install OpenBLAS with the following command: - ```console + ```bash sudo apt-get install --no-install-recommends --yes libopenblas-base libopenmpi-dev libomp-dev ``` diff --git a/docs/getting_started/installation/gpu/cuda.inc.md b/docs/getting_started/installation/gpu/cuda.inc.md index 4503bb443188d..89f3772d09ce3 100644 --- a/docs/getting_started/installation/gpu/cuda.inc.md +++ b/docs/getting_started/installation/gpu/cuda.inc.md @@ -22,7 +22,7 @@ Therefore, it is recommended to install vLLM with a **fresh new** environment. I You can install vLLM using either `pip` or `uv pip`: -```console +```bash # Install vLLM with CUDA 12.8. # If you are using pip. pip install vllm --extra-index-url https://download.pytorch.org/whl/cu128 @@ -37,7 +37,7 @@ We recommend leveraging `uv` to [automatically select the appropriate PyTorch in As of now, vLLM's binaries are compiled with CUDA 12.8 and public PyTorch release versions by default. We also provide vLLM binaries compiled with CUDA 12.6, 11.8, and public PyTorch release versions: -```console +```bash # Install vLLM with CUDA 11.8. export VLLM_VERSION=0.6.1.post1 export PYTHON_VERSION=312 @@ -52,7 +52,7 @@ LLM inference is a fast-evolving field, and the latest code may contain bug fixe ##### Install the latest code using `pip` -```console +```bash pip install -U vllm \ --pre \ --extra-index-url https://wheels.vllm.ai/nightly @@ -62,7 +62,7 @@ pip install -U vllm \ Another way to install the latest code is to use `uv`: -```console +```bash uv pip install -U vllm \ --torch-backend=auto \ --extra-index-url https://wheels.vllm.ai/nightly @@ -72,7 +72,7 @@ uv pip install -U vllm \ If you want to access the wheels for previous commits (e.g. to bisect the behavior change, performance regression), due to the limitation of `pip`, you have to specify the full URL of the wheel file by embedding the commit hash in the URL: -```console +```bash export VLLM_COMMIT=33f460b17a54acb3b6cc0b03f4a17876cff5eafd # use full commit hash from the main branch pip install https://wheels.vllm.ai/${VLLM_COMMIT}/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl ``` @@ -83,7 +83,7 @@ Note that the wheels are built with Python 3.8 ABI (see [PEP 425](https://peps.p If you want to access the wheels for previous commits (e.g. to bisect the behavior change, performance regression), you can specify the commit hash in the URL: -```console +```bash export VLLM_COMMIT=72d9c316d3f6ede485146fe5aabd4e61dbc59069 # use full commit hash from the main branch uv pip install vllm \ --torch-backend=auto \ @@ -99,7 +99,7 @@ The `uv` approach works for vLLM `v0.6.6` and later and offers an easy-to-rememb If you only need to change Python code, you can build and install vLLM without compilation. Using `pip`'s [`--editable` flag](https://pip.pypa.io/en/stable/topics/local-project-installs/#editable-installs), changes you make to the code will be reflected when you run vLLM: -```console +```bash git clone https://github.com/vllm-project/vllm.git cd vllm VLLM_USE_PRECOMPILED=1 pip install --editable . @@ -118,7 +118,7 @@ This command will do the following: In case you see an error about wheel not found when running the above command, it might be because the commit you based on in the main branch was just merged and the wheel is being built. In this case, you can wait for around an hour to try again, or manually assign the previous commit in the installation using the `VLLM_PRECOMPILED_WHEEL_LOCATION` environment variable. -```console +```bash export VLLM_COMMIT=72d9c316d3f6ede485146fe5aabd4e61dbc59069 # use full commit hash from the main branch export VLLM_PRECOMPILED_WHEEL_LOCATION=https://wheels.vllm.ai/${VLLM_COMMIT}/vllm-1.0.0.dev-cp38-abi3-manylinux1_x86_64.whl pip install --editable . @@ -134,7 +134,7 @@ You can find more information about vLLM's wheels in [install-the-latest-code][i If you want to modify C++ or CUDA code, you'll need to build vLLM from source. This can take several minutes: -```console +```bash git clone https://github.com/vllm-project/vllm.git cd vllm pip install -e . @@ -160,7 +160,7 @@ There are scenarios where the PyTorch dependency cannot be easily installed via To build vLLM using an existing PyTorch installation: -```console +```bash git clone https://github.com/vllm-project/vllm.git cd vllm python use_existing_torch.py @@ -173,7 +173,7 @@ pip install --no-build-isolation -e . Currently, before starting the build process, vLLM fetches cutlass code from GitHub. However, there may be scenarios where you want to use a local version of cutlass instead. To achieve this, you can set the environment variable VLLM_CUTLASS_SRC_DIR to point to your local cutlass directory. -```console +```bash git clone https://github.com/vllm-project/vllm.git cd vllm VLLM_CUTLASS_SRC_DIR=/path/to/cutlass pip install -e . @@ -184,7 +184,7 @@ VLLM_CUTLASS_SRC_DIR=/path/to/cutlass pip install -e . To avoid your system being overloaded, you can limit the number of compilation jobs to be run simultaneously, via the environment variable `MAX_JOBS`. For example: -```console +```bash export MAX_JOBS=6 pip install -e . ``` @@ -194,7 +194,7 @@ A side effect is a much slower build process. Additionally, if you have trouble building vLLM, we recommend using the NVIDIA PyTorch Docker image. -```console +```bash # Use `--ipc=host` to make sure the shared memory is large enough. docker run \ --gpus all \ @@ -205,14 +205,14 @@ docker run \ If you don't want to use docker, it is recommended to have a full installation of CUDA Toolkit. You can download and install it from [the official website](https://developer.nvidia.com/cuda-toolkit-archive). After installation, set the environment variable `CUDA_HOME` to the installation path of CUDA Toolkit, and make sure that the `nvcc` compiler is in your `PATH`, e.g.: -```console +```bash export CUDA_HOME=/usr/local/cuda export PATH="${CUDA_HOME}/bin:$PATH" ``` Here is a sanity check to verify that the CUDA Toolkit is correctly installed: -```console +```bash nvcc --version # verify that nvcc is in your PATH ${CUDA_HOME}/bin/nvcc --version # verify that nvcc is in your CUDA_HOME ``` @@ -223,7 +223,7 @@ vLLM can fully run only on Linux but for development purposes, you can still bui Simply disable the `VLLM_TARGET_DEVICE` environment variable before installing: -```console +```bash export VLLM_TARGET_DEVICE=empty pip install -e . ``` @@ -238,7 +238,7 @@ See [deployment-docker-pre-built-image][deployment-docker-pre-built-image] for i Another way to access the latest code is to use the docker images: -```console +```bash export VLLM_COMMIT=33f460b17a54acb3b6cc0b03f4a17876cff5eafd # use full commit hash from the main branch docker pull public.ecr.aws/q9t5s3a7/vllm-ci-postmerge-repo:${VLLM_COMMIT} ``` diff --git a/docs/getting_started/installation/gpu/rocm.inc.md b/docs/getting_started/installation/gpu/rocm.inc.md index 6bc714fe6e8b4..aa4cacaf1aedd 100644 --- a/docs/getting_started/installation/gpu/rocm.inc.md +++ b/docs/getting_started/installation/gpu/rocm.inc.md @@ -31,17 +31,17 @@ Currently, there are no pre-built ROCm wheels. Alternatively, you can install PyTorch using PyTorch wheels. You can check PyTorch installation guide in PyTorch [Getting Started](https://pytorch.org/get-started/locally/). Example: - ```console + ```bash # Install PyTorch - $ pip uninstall torch -y - $ pip install --no-cache-dir --pre torch --index-url https://download.pytorch.org/whl/nightly/rocm6.3 + pip uninstall torch -y + pip install --no-cache-dir --pre torch --index-url https://download.pytorch.org/whl/nightly/rocm6.3 ``` 1. Install [Triton flash attention for ROCm](https://github.com/ROCm/triton) Install ROCm's Triton flash attention (the default triton-mlir branch) following the instructions from [ROCm/triton](https://github.com/ROCm/triton/blob/triton-mlir/README.md) - ```console + ```bash python3 -m pip install ninja cmake wheel pybind11 pip uninstall -y triton git clone https://github.com/OpenAI/triton.git @@ -62,7 +62,7 @@ Currently, there are no pre-built ROCm wheels. For example, for ROCm 6.3, suppose your gfx arch is `gfx90a`. To get your gfx architecture, run `rocminfo |grep gfx`. - ```console + ```bash git clone https://github.com/ROCm/flash-attention.git cd flash-attention git checkout b7d29fb @@ -76,7 +76,7 @@ Currently, there are no pre-built ROCm wheels. 3. If you choose to build AITER yourself to use a certain branch or commit, you can build AITER using the following steps: - ```console + ```bash python3 -m pip uninstall -y aiter git clone --recursive https://github.com/ROCm/aiter.git cd aiter @@ -148,7 +148,7 @@ If you choose to build this rocm_base image yourself, the steps are as follows. It is important that the user kicks off the docker build using buildkit. Either the user put DOCKER_BUILDKIT=1 as environment variable when calling docker build command, or the user needs to setup buildkit in the docker daemon configuration /etc/docker/daemon.json as follows and restart the daemon: -```console +```json { "features": { "buildkit": true @@ -158,7 +158,7 @@ It is important that the user kicks off the docker build using buildkit. Either To build vllm on ROCm 6.3 for MI200 and MI300 series, you can use the default: -```console +```bash DOCKER_BUILDKIT=1 docker build \ -f docker/Dockerfile.rocm_base \ -t rocm/vllm-dev:base . @@ -169,7 +169,7 @@ DOCKER_BUILDKIT=1 docker build \ First, build a docker image from and launch a docker container from the image. It is important that the user kicks off the docker build using buildkit. Either the user put `DOCKER_BUILDKIT=1` as environment variable when calling docker build command, or the user needs to setup buildkit in the docker daemon configuration /etc/docker/daemon.json as follows and restart the daemon: -```console +```bash { "features": { "buildkit": true @@ -187,13 +187,13 @@ Their values can be passed in when running `docker build` with `--build-arg` opt To build vllm on ROCm 6.3 for MI200 and MI300 series, you can use the default: -```console +```bash DOCKER_BUILDKIT=1 docker build -f docker/Dockerfile.rocm -t vllm-rocm . ``` To build vllm on ROCm 6.3 for Radeon RX7900 series (gfx1100), you should pick the alternative base image: -```console +```bash DOCKER_BUILDKIT=1 docker build \ --build-arg BASE_IMAGE="rocm/vllm-dev:navi_base" \ -f docker/Dockerfile.rocm \ @@ -205,7 +205,7 @@ To run the above docker image `vllm-rocm`, use the below command: ??? Command - ```console + ```bash docker run -it \ --network=host \ --group-add=video \ diff --git a/docs/getting_started/installation/gpu/xpu.inc.md b/docs/getting_started/installation/gpu/xpu.inc.md index 128fff164c3aa..ab84dc09834cf 100644 --- a/docs/getting_started/installation/gpu/xpu.inc.md +++ b/docs/getting_started/installation/gpu/xpu.inc.md @@ -25,7 +25,7 @@ Currently, there are no pre-built XPU wheels. - First, install required driver and Intel OneAPI 2025.0 or later. - Second, install Python packages for vLLM XPU backend building: -```console +```bash git clone https://github.com/vllm-project/vllm.git cd vllm pip install --upgrade pip @@ -34,7 +34,7 @@ pip install -v -r requirements/xpu.txt - Then, build and install vLLM XPU backend: -```console +```bash VLLM_TARGET_DEVICE=xpu python setup.py install ``` @@ -53,9 +53,9 @@ Currently, there are no pre-built XPU images. # --8<-- [end:pre-built-images] # --8<-- [start:build-image-from-source] -```console -$ docker build -f docker/Dockerfile.xpu -t vllm-xpu-env --shm-size=4g . -$ docker run -it \ +```bash +docker build -f docker/Dockerfile.xpu -t vllm-xpu-env --shm-size=4g . +docker run -it \ --rm \ --network=host \ --device /dev/dri \ @@ -68,7 +68,7 @@ $ docker run -it \ XPU platform supports **tensor parallel** inference/serving and also supports **pipeline parallel** as a beta feature for online serving. We require Ray as the distributed runtime backend. For example, a reference execution like following: -```console +```bash python -m vllm.entrypoints.openai.api_server \ --model=facebook/opt-13b \ --dtype=bfloat16 \ diff --git a/docs/getting_started/installation/intel_gaudi.md b/docs/getting_started/installation/intel_gaudi.md index 056caa7081474..a4f13dca4bf48 100644 --- a/docs/getting_started/installation/intel_gaudi.md +++ b/docs/getting_started/installation/intel_gaudi.md @@ -24,7 +24,7 @@ please follow the methods outlined in the To verify that the Intel Gaudi software was correctly installed, run: -```console +```bash hl-smi # verify that hl-smi is in your PATH and each Gaudi accelerator is visible apt list --installed | grep habana # verify that habanalabs-firmware-tools, habanalabs-graph, habanalabs-rdma-core, habanalabs-thunk and habanalabs-container-runtime are installed pip list | grep habana # verify that habana-torch-plugin, habana-torch-dataloader, habana-pyhlml and habana-media-loader are installed @@ -42,7 +42,7 @@ for more details. Use the following commands to run a Docker image: -```console +```bash docker pull vault.habana.ai/gaudi-docker/1.18.0/ubuntu22.04/habanalabs/pytorch-installer-2.4.0:latest docker run \ -it \ @@ -65,7 +65,7 @@ Currently, there are no pre-built Intel Gaudi wheels. To build and install vLLM from source, run: -```console +```bash git clone https://github.com/vllm-project/vllm.git cd vllm pip install -r requirements/hpu.txt @@ -74,7 +74,7 @@ python setup.py develop Currently, the latest features and performance optimizations are developed in Gaudi's [vLLM-fork](https://github.com/HabanaAI/vllm-fork) and we periodically upstream them to vLLM main repo. To install latest [HabanaAI/vLLM-fork](https://github.com/HabanaAI/vllm-fork), run the following: -```console +```bash git clone https://github.com/HabanaAI/vllm-fork.git cd vllm-fork git checkout habana_main @@ -90,7 +90,7 @@ Currently, there are no pre-built Intel Gaudi images. ### Build image from source -```console +```bash docker build -f docker/Dockerfile.hpu -t vllm-hpu-env . docker run \ -it \ diff --git a/docs/getting_started/installation/python_env_setup.inc.md b/docs/getting_started/installation/python_env_setup.inc.md index 911301d683359..423bf9b00d07f 100644 --- a/docs/getting_started/installation/python_env_setup.inc.md +++ b/docs/getting_started/installation/python_env_setup.inc.md @@ -1,6 +1,6 @@ It's recommended to use [uv](https://docs.astral.sh/uv/), a very fast Python environment manager, to create and manage Python environments. Please follow the [documentation](https://docs.astral.sh/uv/#getting-started) to install `uv`. After installing `uv`, you can create a new Python environment and install vLLM using the following commands: -```console +```bash uv venv --python 3.12 --seed source .venv/bin/activate ``` diff --git a/docs/getting_started/quickstart.md b/docs/getting_started/quickstart.md index d02cb18bcb940..39100e4ca5405 100644 --- a/docs/getting_started/quickstart.md +++ b/docs/getting_started/quickstart.md @@ -19,7 +19,7 @@ If you are using NVIDIA GPUs, you can install vLLM using [pip](https://pypi.org/ It's recommended to use [uv](https://docs.astral.sh/uv/), a very fast Python environment manager, to create and manage Python environments. Please follow the [documentation](https://docs.astral.sh/uv/#getting-started) to install `uv`. After installing `uv`, you can create a new Python environment and install vLLM using the following commands: -```console +```bash uv venv --python 3.12 --seed source .venv/bin/activate uv pip install vllm --torch-backend=auto @@ -29,13 +29,13 @@ uv pip install vllm --torch-backend=auto Another delightful way is to use `uv run` with `--with [dependency]` option, which allows you to run commands such as `vllm serve` without creating any permanent environment: -```console +```bash uv run --with vllm vllm --help ``` You can also use [conda](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html) to create and manage Python environments. You can install `uv` to the conda environment through `pip` if you want to manage it within the environment. -```console +```bash conda create -n myenv python=3.12 -y conda activate myenv pip install --upgrade uv @@ -110,7 +110,7 @@ By default, it starts the server at `http://localhost:8000`. You can specify the Run the following command to start the vLLM server with the [Qwen2.5-1.5B-Instruct](https://huggingface.co/Qwen/Qwen2.5-1.5B-Instruct) model: -```console +```bash vllm serve Qwen/Qwen2.5-1.5B-Instruct ``` @@ -124,7 +124,7 @@ vllm serve Qwen/Qwen2.5-1.5B-Instruct This server can be queried in the same format as OpenAI API. For example, to list the models: -```console +```bash curl http://localhost:8000/v1/models ``` @@ -134,7 +134,7 @@ You can pass in the argument `--api-key` or environment variable `VLLM_API_KEY` Once your server is started, you can query the model with input prompts: -```console +```bash curl http://localhost:8000/v1/completions \ -H "Content-Type: application/json" \ -d '{ @@ -172,7 +172,7 @@ vLLM is designed to also support the OpenAI Chat Completions API. The chat inter You can use the [create chat completion](https://platform.openai.com/docs/api-reference/chat/completions/create) endpoint to interact with the model: -```console +```bash curl http://localhost:8000/v1/chat/completions \ -H "Content-Type: application/json" \ -d '{ diff --git a/docs/models/extensions/runai_model_streamer.md b/docs/models/extensions/runai_model_streamer.md index 6755b574ea67b..60b43d21d9f68 100644 --- a/docs/models/extensions/runai_model_streamer.md +++ b/docs/models/extensions/runai_model_streamer.md @@ -9,27 +9,27 @@ Further reading can be found in [Run:ai Model Streamer Documentation](https://gi vLLM supports loading weights in Safetensors format using the Run:ai Model Streamer. You first need to install vLLM RunAI optional dependency: -```console +```bash pip3 install vllm[runai] ``` To run it as an OpenAI-compatible server, add the `--load-format runai_streamer` flag: -```console +```bash vllm serve /home/meta-llama/Llama-3.2-3B-Instruct \ --load-format runai_streamer ``` To run model from AWS S3 object store run: -```console +```bash vllm serve s3://core-llm/Llama-3-8b \ --load-format runai_streamer ``` To run model from a S3 compatible object store run: -```console +```bash RUNAI_STREAMER_S3_USE_VIRTUAL_ADDRESSING=0 \ AWS_EC2_METADATA_DISABLED=true \ AWS_ENDPOINT_URL=https://storage.googleapis.com \ @@ -44,7 +44,7 @@ You can tune parameters using `--model-loader-extra-config`: You can tune `concurrency` that controls the level of concurrency and number of OS threads reading tensors from the file to the CPU buffer. For reading from S3, it will be the number of client instances the host is opening to the S3 server. -```console +```bash vllm serve /home/meta-llama/Llama-3.2-3B-Instruct \ --load-format runai_streamer \ --model-loader-extra-config '{"concurrency":16}' @@ -53,7 +53,7 @@ vllm serve /home/meta-llama/Llama-3.2-3B-Instruct \ You can control the size of the CPU Memory buffer to which tensors are read from the file, and limit this size. You can read further about CPU buffer memory limiting [here](https://github.com/run-ai/runai-model-streamer/blob/master/docs/src/env-vars.md#runai_streamer_memory_limit). -```console +```bash vllm serve /home/meta-llama/Llama-3.2-3B-Instruct \ --load-format runai_streamer \ --model-loader-extra-config '{"memory_limit":5368709120}' @@ -66,13 +66,13 @@ vllm serve /home/meta-llama/Llama-3.2-3B-Instruct \ vLLM also supports loading sharded models using Run:ai Model Streamer. This is particularly useful for large models that are split across multiple files. To use this feature, use the `--load-format runai_streamer_sharded` flag: -```console +```bash vllm serve /path/to/sharded/model --load-format runai_streamer_sharded ``` The sharded loader expects model files to follow the same naming pattern as the regular sharded state loader: `model-rank-{rank}-part-{part}.safetensors`. You can customize this pattern using the `pattern` parameter in `--model-loader-extra-config`: -```console +```bash vllm serve /path/to/sharded/model \ --load-format runai_streamer_sharded \ --model-loader-extra-config '{"pattern":"custom-model-rank-{rank}-part-{part}.safetensors"}' @@ -82,7 +82,7 @@ To create sharded model files, you can use the script provided in test.py ``` If you are testing with multi-nodes, adjust `--nproc-per-node` and `--nnodes` according to your setup and set `MASTER_ADDR` to the correct IP address of the master node, reachable from all nodes. Then, run: -```console +```bash NCCL_DEBUG=TRACE torchrun --nnodes 2 \ --nproc-per-node=2 \ --rdzv_backend=c10d \ diff --git a/examples/offline_inference/openai_batch/README.md b/examples/offline_inference/openai_batch/README.md index ce75297821221..631fde91fcd08 100644 --- a/examples/offline_inference/openai_batch/README.md +++ b/examples/offline_inference/openai_batch/README.md @@ -29,14 +29,14 @@ We currently support `/v1/chat/completions`, `/v1/embeddings`, and `/v1/score` e To follow along with this example, you can download the example batch, or create your own batch file in your working directory. -```console +```bash wget https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl ``` Once you've created your batch file it should look like this -```console -$ cat offline_inference/openai_batch/openai_example_batch.jsonl +```bash +cat offline_inference/openai_batch/openai_example_batch.jsonl {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}} {"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are an unhelpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}} ``` @@ -47,7 +47,7 @@ The batch running tool is designed to be used from the command line. You can run the batch with the following command, which will write its results to a file called `results.jsonl` -```console +```bash python -m vllm.entrypoints.openai.run_batch \ -i offline_inference/openai_batch/openai_example_batch.jsonl \ -o results.jsonl \ @@ -56,7 +56,7 @@ python -m vllm.entrypoints.openai.run_batch \ or use command-line: -```console +```bash vllm run-batch \ -i offline_inference/openai_batch/openai_example_batch.jsonl \ -o results.jsonl \ @@ -67,8 +67,8 @@ vllm run-batch \ You should now have your results at `results.jsonl`. You can check your results by running `cat results.jsonl` -```console -$ cat results.jsonl +```bash +cat results.jsonl {"id":"vllm-383d1c59835645aeb2e07d004d62a826","custom_id":"request-1","response":{"id":"cmpl-61c020e54b964d5a98fa7527bfcdd378","object":"chat.completion","created":1715633336,"model":"meta-llama/Meta-Llama-3-8B-Instruct","choices":[{"index":0,"message":{"role":"assistant","content":"Hello! It's great to meet you! I'm here to help with any questions or tasks you may have. What's on your mind today?"},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":25,"total_tokens":56,"completion_tokens":31}},"error":null} {"id":"vllm-42e3d09b14b04568afa3f1797751a267","custom_id":"request-2","response":{"id":"cmpl-f44d049f6b3a42d4b2d7850bb1e31bcc","object":"chat.completion","created":1715633336,"model":"meta-llama/Meta-Llama-3-8B-Instruct","choices":[{"index":0,"message":{"role":"assistant","content":"*silence*"},"logprobs":null,"finish_reason":"stop","stop_reason":null}],"usage":{"prompt_tokens":27,"total_tokens":32,"completion_tokens":5}},"error":null} ``` @@ -79,7 +79,7 @@ The batch runner supports remote input and output urls that are accessible via h For example, to run against our example input file located at `https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl`, you can run -```console +```bash python -m vllm.entrypoints.openai.run_batch \ -i https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl \ -o results.jsonl \ @@ -88,7 +88,7 @@ python -m vllm.entrypoints.openai.run_batch \ or use command-line: -```console +```bash vllm run-batch \ -i https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl \ -o results.jsonl \ @@ -112,21 +112,21 @@ To integrate with cloud blob storage, we recommend using presigned urls. To follow along with this example, you can download the example batch, or create your own batch file in your working directory. -```console +```bash wget https://raw.githubusercontent.com/vllm-project/vllm/main/examples/offline_inference/openai_batch/openai_example_batch.jsonl ``` Once you've created your batch file it should look like this -```console -$ cat offline_inference/openai_batch/openai_example_batch.jsonl +```bash +cat offline_inference/openai_batch/openai_example_batch.jsonl {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}} {"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "meta-llama/Meta-Llama-3-8B-Instruct", "messages": [{"role": "system", "content": "You are an unhelpful assistant."},{"role": "user", "content": "Hello world!"}],"max_completion_tokens": 1000}} ``` Now upload your batch file to your S3 bucket. -```console +```bash aws s3 cp offline_inference/openai_batch/openai_example_batch.jsonl s3://MY_BUCKET/MY_INPUT_FILE.jsonl ``` @@ -181,7 +181,7 @@ output_url='https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_OUTPUT_FILE.jsonl?AW You can now run the batch runner, using the urls generated in the previous section. -```console +```bash python -m vllm.entrypoints.openai.run_batch \ -i "https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_INPUT_FILE.jsonl?AWSAccessKeyId=ABCDEFGHIJKLMNOPQRST&Signature=abcdefghijklmnopqrstuvwxyz12345&Expires=1715800091" \ -o "https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_OUTPUT_FILE.jsonl?AWSAccessKeyId=ABCDEFGHIJKLMNOPQRST&Signature=abcdefghijklmnopqrstuvwxyz12345&Expires=1715800091" \ @@ -190,7 +190,7 @@ python -m vllm.entrypoints.openai.run_batch \ or use command-line: -```console +```bash vllm run-batch \ -i "https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_INPUT_FILE.jsonl?AWSAccessKeyId=ABCDEFGHIJKLMNOPQRST&Signature=abcdefghijklmnopqrstuvwxyz12345&Expires=1715800091" \ -o "https://s3.us-west-2.amazonaws.com/MY_BUCKET/MY_OUTPUT_FILE.jsonl?AWSAccessKeyId=ABCDEFGHIJKLMNOPQRST&Signature=abcdefghijklmnopqrstuvwxyz12345&Expires=1715800091" \ @@ -201,7 +201,7 @@ vllm run-batch \ Your results are now on S3. You can view them in your terminal by running -```console +```bash aws s3 cp s3://MY_BUCKET/MY_OUTPUT_FILE.jsonl - ``` @@ -230,8 +230,8 @@ You can run the batch using the same command as in earlier examples. You can check your results by running `cat results.jsonl` -```console -$ cat results.jsonl +```bash +cat results.jsonl {"id":"vllm-db0f71f7dec244e6bce530e0b4ef908b","custom_id":"request-1","response":{"status_code":200,"request_id":"vllm-batch-3580bf4d4ae54d52b67eee266a6eab20","body":{"id":"embd-33ac2efa7996430184461f2e38529746","object":"list","created":444647,"model":"intfloat/e5-mistral-7b-instruct","data":[{"index":0,"object":"embedding","embedding":[0.016204833984375,0.0092010498046875,0.0018358230590820312,-0.0028228759765625,0.001422882080078125,-0.0031147003173828125,...]}],"usage":{"prompt_tokens":8,"total_tokens":8,"completion_tokens":0}}},"error":null} ... ``` @@ -261,8 +261,8 @@ You can run the batch using the same command as in earlier examples. You can check your results by running `cat results.jsonl` -```console -$ cat results.jsonl +```bash +cat results.jsonl {"id":"vllm-f87c5c4539184f618e555744a2965987","custom_id":"request-1","response":{"status_code":200,"request_id":"vllm-batch-806ab64512e44071b37d3f7ccd291413","body":{"id":"score-4ee45236897b4d29907d49b01298cdb1","object":"list","created":1737847944,"model":"BAAI/bge-reranker-v2-m3","data":[{"index":0,"object":"score","score":0.0010900497436523438},{"index":1,"object":"score","score":1.0}],"usage":{"prompt_tokens":37,"total_tokens":37,"completion_tokens":0,"prompt_tokens_details":null}}},"error":null} {"id":"vllm-41990c51a26d4fac8419077f12871099","custom_id":"request-2","response":{"status_code":200,"request_id":"vllm-batch-73ce66379026482699f81974e14e1e99","body":{"id":"score-13f2ffe6ba40460fbf9f7f00ad667d75","object":"list","created":1737847944,"model":"BAAI/bge-reranker-v2-m3","data":[{"index":0,"object":"score","score":0.001094818115234375},{"index":1,"object":"score","score":1.0}],"usage":{"prompt_tokens":37,"total_tokens":37,"completion_tokens":0,"prompt_tokens_details":null}}},"error":null} ``` diff --git a/examples/online_serving/opentelemetry/README.md b/examples/online_serving/opentelemetry/README.md index af00340079745..ae5d84d8ef198 100644 --- a/examples/online_serving/opentelemetry/README.md +++ b/examples/online_serving/opentelemetry/README.md @@ -2,7 +2,7 @@ 1. Install OpenTelemetry packages: - ```console + ```bash pip install \ 'opentelemetry-sdk>=1.26.0,<1.27.0' \ 'opentelemetry-api>=1.26.0,<1.27.0' \ @@ -12,7 +12,7 @@ 1. Start Jaeger in a docker container: - ```console + ```bash # From: https://www.jaegertracing.io/docs/1.57/getting-started/ docker run --rm --name jaeger \ -e COLLECTOR_ZIPKIN_HOST_PORT=:9411 \ @@ -31,14 +31,14 @@ 1. In a new shell, export Jaeger IP: - ```console + ```bash export JAEGER_IP=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' jaeger) export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317 ``` Then set vLLM's service name for OpenTelemetry, enable insecure connections to Jaeger and run vLLM: - ```console + ```bash export OTEL_SERVICE_NAME="vllm-server" export OTEL_EXPORTER_OTLP_TRACES_INSECURE=true vllm serve facebook/opt-125m --otlp-traces-endpoint="$OTEL_EXPORTER_OTLP_TRACES_ENDPOINT" @@ -46,7 +46,7 @@ 1. In a new shell, send requests with trace context from a dummy client - ```console + ```bash export JAEGER_IP=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' jaeger) export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=grpc://$JAEGER_IP:4317 export OTEL_EXPORTER_OTLP_TRACES_INSECURE=true @@ -67,7 +67,7 @@ OpenTelemetry supports either `grpc` or `http/protobuf` as the transport protocol for trace data in the exporter. By default, `grpc` is used. To set `http/protobuf` as the protocol, configure the `OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` environment variable as follows: -```console +```bash export OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=http/protobuf export OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://$JAEGER_IP:4318/v1/traces vllm serve facebook/opt-125m --otlp-traces-endpoint="$OTEL_EXPORTER_OTLP_TRACES_ENDPOINT" @@ -79,13 +79,13 @@ OpenTelemetry allows automatic instrumentation of FastAPI. 1. Install the instrumentation library - ```console + ```bash pip install opentelemetry-instrumentation-fastapi ``` 1. Run vLLM with `opentelemetry-instrument` - ```console + ```bash opentelemetry-instrument vllm serve facebook/opt-125m ``` From 68aaeb3749eac62d723f2a75c31b65e04ab9fbe6 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Mon, 23 Jun 2025 14:07:47 -0400 Subject: [PATCH 093/211] [EP+DP] Optimize the little operations in the DeepGEMM + DeepEP low latency case (#19885) Signed-off-by: Varun Sundar Rabindranath Signed-off-by: Tyler Michael Smith Co-authored-by: Varun Sundar Rabindranath --- .../moe/test_silu_mul_fp8_quant_deep_gemm.py | 83 ++++++++ .../layers/fused_moe/batched_deep_gemm_moe.py | 186 ++++++++++++++++-- vllm/model_executor/layers/fused_moe/layer.py | 12 +- 3 files changed, 263 insertions(+), 18 deletions(-) create mode 100644 tests/kernels/moe/test_silu_mul_fp8_quant_deep_gemm.py diff --git a/tests/kernels/moe/test_silu_mul_fp8_quant_deep_gemm.py b/tests/kernels/moe/test_silu_mul_fp8_quant_deep_gemm.py new file mode 100644 index 0000000000000..673a0aa367948 --- /dev/null +++ b/tests/kernels/moe/test_silu_mul_fp8_quant_deep_gemm.py @@ -0,0 +1,83 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import pytest +import torch + +from vllm.model_executor.layers.fused_moe.batched_deep_gemm_moe import ( + silu_mul_fp8_quant_deep_gemm) +from vllm.platforms import current_platform + +# (E, T, H, group_size, seed) +CASES = [ + (1, 1, 128, 64, 0), + (1, 4, 128, 128, 0), + (2, 4, 256, 128, 0), + (32, 64, 256, 128, 0), + (17, 31, 768, 128, 0), +] + + +@pytest.mark.parametrize("E,T,H,group_size,seed", CASES) +@torch.inference_mode() +def test_silu_mul_fp8_quant_deep_gemm(E, T, H, group_size, seed): + current_platform.seed_everything(seed) + + # Input tensor of shape (E, T, 2*H) + y = torch.randn((E, T, 2 * H), dtype=torch.float32, device="cuda") + tokens_per_expert = torch.randint( + low=0, + high=T, + size=(E, ), + dtype=torch.int32, + device="cuda", + ) + + # Run the Triton kernel + y_q, y_s = silu_mul_fp8_quant_deep_gemm(y, + tokens_per_expert, + group_size=group_size, + eps=1e-10) + + # Reference implementation + fp8_info = torch.finfo(torch.float8_e4m3fn) + fp8_max = fp8_info.max + fp8_min = fp8_info.min + eps = 1e-10 + + # Compute silu activation and elementwise multiplication + y1 = y[..., :H] + y2 = y[..., H:] + silu_x = y1 * torch.sigmoid(y1) + merged = silu_x * y2 + + # Compute reference scales and quantized output, skipping padded tokens + for e in range(E): + nt = tokens_per_expert[e].item() + ref_s = torch.empty((T, H // group_size), + dtype=torch.float32, + device="cuda") + ref_q = torch.empty((T, H), dtype=torch.float8_e4m3fn, device="cuda") + for t in range(nt): + data = merged[e, t] + data_grp = data.view(H // group_size, group_size) + amax = data_grp.abs().amax(dim=1).clamp(min=eps) + scale = amax / fp8_max + + scaled = data / scale.repeat_interleave(group_size) + clamped = scaled.clamp(fp8_min, fp8_max) + q = clamped.to(torch.float8_e4m3fn) + + ref_s[t] = scale + ref_q[t] = q + + y_se = y_s[e] + y_qe = y_q[e] + + torch.testing.assert_close(y_se[:nt], ref_s[:nt]) + torch.testing.assert_close( + y_qe[:nt].to(torch.float32), + ref_q[:nt].to(torch.float32), + atol=2, + rtol=2e-1, + ) diff --git a/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py b/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py index 5492399efdf86..70836879d17c0 100644 --- a/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +++ b/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py @@ -6,14 +6,179 @@ import torch import vllm.model_executor.layers.fused_moe.modular_kernel as mk from vllm.logger import init_logger -from vllm.model_executor.layers.fused_moe.utils import ( - _resize_cache, per_token_group_quant_fp8) +from vllm.model_executor.layers.fused_moe.utils import _resize_cache +from vllm.triton_utils import tl, triton logger = init_logger(__name__) has_deep_gemm = importlib.util.find_spec("deep_gemm") is not None +@triton.jit +def _silu_mul_fp8_quant_deep_gemm( + # Pointers ------------------------------------------------------------ + input_ptr, # 16-bit activations (E, T, 2*H) + y_q_ptr, # fp8 quantized activations (E, T, H) + y_s_ptr, # 16-bit scales (E, T, G) + counts_ptr, # int32 num tokens per expert (E) + + # Sizes --------------------------------------------------------------- + H: tl.constexpr, # hidden dimension (per output) + GROUP_SIZE: tl.constexpr, # elements per group (usually 128) + + # Strides for input (elements) --------------------------------------- + stride_i_e, + stride_i_t, + stride_i_h, + + # Strides for y_q (elements) ----------------------------------------- + stride_yq_e, + stride_yq_t, + stride_yq_h, + + # Strides for y_s (elements) ----------------------------------------- + stride_ys_e, + stride_ys_t, + stride_ys_g, + + # Stride for counts (elements) + stride_counts_e, + + # Numeric params ------------------------------------------------------ + eps: tl.constexpr, + fp8_min: tl.constexpr, + fp8_max: tl.constexpr, + + # Meta --------------------------------------------------------------- + BLOCK: tl.constexpr, +): + G = H // GROUP_SIZE + + # map program id -> (e, g) + pid = tl.program_id(0) + e = pid // G + g = pid % G + + e = e.to(tl.int64) + g = g.to(tl.int64) + + # number of valid tokens for this expert + n_tokens = tl.load(counts_ptr + e * stride_counts_e).to(tl.int64) + + cols = tl.arange(0, BLOCK) + cols = cols.to(tl.int64) + mask_h = cols < BLOCK + + t = tl.zeros([], tl.int64) + while t < n_tokens: + base_i_offset = (e * stride_i_e + t * stride_i_t + + g * GROUP_SIZE * stride_i_h) + base_yq_offset = (e * stride_yq_e + t * stride_yq_t + + g * GROUP_SIZE * stride_yq_h) + base_ys_offset = e * stride_ys_e + t * stride_ys_t + g * stride_ys_g + + mask = mask_h + x = tl.load(input_ptr + base_i_offset + cols * stride_i_h, + mask=mask, + other=0.0).to(tl.float32) + y2 = tl.load(input_ptr + base_i_offset + H * stride_i_h + + cols * stride_i_h, + mask=mask, + other=0.0).to(tl.float32) + + x = x * (1.0 / (1.0 + tl.exp(-x))) + y = x * y2 + + _absmax = tl.maximum(tl.max(tl.abs(y)), eps) + y_s = _absmax / fp8_max + y_q = tl.clamp(y / y_s, fp8_min, fp8_max).to(y_q_ptr.dtype.element_ty) + + tl.store(y_q_ptr + base_yq_offset + cols * stride_yq_h, y_q, mask=mask) + tl.store(y_s_ptr + base_ys_offset, y_s) + + t += 1 + + +def silu_mul_fp8_quant_deep_gemm( + y: torch.Tensor, # (E, T, 2*H) float32 + tokens_per_expert: torch.Tensor, # (E,) number of valid tokens per expert + group_size: int = 128, + eps: float = 1e-10, +): + """Quantize silu(y[..., :H]) * y[..., H:] to FP8 with group per-token scales + + y has shape (E, T, 2*H). The first half of the last dimension is + silu-activated, multiplied by the second half, then quantized into FP8. + + Returns `(y_q, y_s)` where + * `y_q` is the FP8 tensor of shape `(E, T, H)`, same layout as `y[..., :H]`. + * `y_s` has shape `(E, T, H // group_size)` and strides `(T*G, 1, T)` + """ + assert y.ndim == 3, "y must be (E, T, 2*H)" + E, T, H2 = y.shape + assert H2 % 2 == 0, "last dim of y must be even (2*H)" + H = H2 // 2 + G = H // group_size + assert H % group_size == 0, "H must be divisible by group_size" + assert tokens_per_expert.ndim == 1 and tokens_per_expert.shape[0] == E, \ + "tokens_per_expert must be shape (E,)" + tokens_per_expert = tokens_per_expert.to(device=y.device, + dtype=torch.int32) + + # allocate outputs + fp8_dtype = torch.float8_e4m3fn + y_q = torch.empty((E, T, H), dtype=fp8_dtype, device=y.device) + + # strides (elements) + stride_i_e, stride_i_t, stride_i_h = y.stride() + stride_yq_e, stride_yq_t, stride_yq_h = y_q.stride() + + # desired scale strides (elements): (T*G, 1, T) + stride_ys_e = T * G + stride_ys_t = 1 + stride_ys_g = T + y_s = torch.empty_strided((E, T, G), + (stride_ys_e, stride_ys_t, stride_ys_g), + dtype=torch.float32, + device=y.device) + + stride_cnt_e = tokens_per_expert.stride()[0] + + # static grid over experts and H-groups. + # A loop inside the kernel handles the token dim + grid = (E * G, ) + + f_info = torch.finfo(fp8_dtype) + fp8_max = f_info.max + fp8_min = f_info.min + + _silu_mul_fp8_quant_deep_gemm[grid]( + y, + y_q, + y_s, + tokens_per_expert, + H, + group_size, + stride_i_e, + stride_i_t, + stride_i_h, + stride_yq_e, + stride_yq_t, + stride_yq_h, + stride_ys_e, + stride_ys_t, + stride_ys_g, + stride_cnt_e, + eps, + fp8_min, + fp8_max, + BLOCK=group_size, + num_warps=4, + ) + + return y_q, y_s + + class BatchedDeepGemmExperts(mk.FusedMoEPermuteExpertsUnpermute): # The Deep Gemm kernels only support block size of 128 @@ -96,7 +261,6 @@ class BatchedDeepGemmExperts(mk.FusedMoEPermuteExpertsUnpermute): hidden_states, w1, w2, topk_ids) workspace1 = _resize_cache(workspace13, (E, max_num_tokens, N)) - workspace2 = _resize_cache(workspace2, (E, max_num_tokens, N // 2)) # (from deepgemm docs) : A value hint (which is a value on CPU) # for the M expectation of each batch, correctly setting this value @@ -109,19 +273,9 @@ class BatchedDeepGemmExperts(mk.FusedMoEPermuteExpertsUnpermute): masked_m=expert_num_tokens, expected_m=expected_m) - # TODO (varun) [Optimization]: Use a batched version of activation. - # Similarly for the quant below. - self.activation(activation, workspace2, workspace1.view(-1, N)) - - w2_hidden_size = workspace2.size(-1) - workspace2 = workspace2.view(-1, w2_hidden_size) - - a2q_scale: Optional[torch.Tensor] = None - a2q, a2q_scale = per_token_group_quant_fp8(workspace2, - self.block_shape[1], - column_major_scales=False) - a2q = a2q.view(E, max_num_tokens, -1) - a2q_scale = a2q_scale.view(E, max_num_tokens, -1) + assert expert_num_tokens is not None + a2q, a2q_scale = silu_mul_fp8_quant_deep_gemm(workspace1, + expert_num_tokens) dg.m_grouped_gemm_fp8_fp8_bf16_nt_masked((a2q, a2q_scale), (w2, w2_scale), diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 1fd8f2175886a..4ed10e60b13ac 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -45,7 +45,8 @@ if current_platform.is_cuda_alike(): from .pplx_prepare_finalize import PplxPrepareAndFinalize if has_deepep: from .deepep_ht_prepare_finalize import DeepEPHTPrepareAndFinalize - from .deepep_ll_prepare_finalize import DeepEPLLPrepareAndFinalize + from .deepep_ll_prepare_finalize import (DEEPEP_QUANT_BLOCK_SIZE, + DeepEPLLPrepareAndFinalize) else: fused_experts = None # type: ignore FusedMoEPermuteExpertsUnpermute = None # type: ignore @@ -377,6 +378,13 @@ class FusedMoEMethodBase(QuantizeMethodBase): all2all_manager.world_size) handle = all2all_manager.get_handle(all_to_all_args) + # Note : We may want to use FP8 dispatch even otherwise just to + # reduce datamovement + assert act_quant_block_size is not None + use_fp8_dispatch = (quant_dtype == current_platform.fp8_dtype() + and act_quant_block_size[1] + == DEEPEP_QUANT_BLOCK_SIZE) + # Note (varun): Whether to use FP8 dispatch or not needs some # profiling. Turning it off for now. prepare_finalize = DeepEPLLPrepareAndFinalize( @@ -386,7 +394,7 @@ class FusedMoEMethodBase(QuantizeMethodBase): max_tokens_per_rank=moe.max_num_tokens, quant_dtype=quant_dtype, block_shape=act_quant_block_size, - use_fp8_dispatch=False, + use_fp8_dispatch=use_fp8_dispatch, ) self.topk_indices_dtype = None From 61f4fc5dc6f165a2d2c38c277458d04fdd5c8bf5 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Tue, 24 Jun 2025 02:38:06 +0800 Subject: [PATCH 094/211] [Bugfix][v1] Fix step pooler implementation and step pooling usage in v1 (#19956) Signed-off-by: Isotr0py <2037008807@qq.com> --- tests/conftest.py | 18 +-- .../test_model_load_with_params.py | 12 +- tests/models/language/pooling/embed_utils.py | 2 +- .../models/language/pooling/test_embedding.py | 2 +- tests/models/language/pooling/test_jina.py | 4 +- tests/models/language/pooling/test_reward.py | 104 ++++++++++++++++++ .../multimodal/pooling/test_dse_qwen2_vl.py | 4 +- .../multimodal/pooling/test_llava_next.py | 2 +- tests/models/multimodal/pooling/test_phi3v.py | 2 +- tests/quantization/test_bitsandbytes.py | 2 +- vllm/model_executor/layers/pooler.py | 13 +-- vllm/model_executor/models/interfaces.py | 6 + vllm/v1/worker/gpu_input_batch.py | 30 +++-- vllm/v1/worker/gpu_model_runner.py | 3 + 14 files changed, 164 insertions(+), 40 deletions(-) create mode 100644 tests/models/language/pooling/test_reward.py diff --git a/tests/conftest.py b/tests/conftest.py index f50e611a471bb..feb52e26300a0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1027,13 +1027,13 @@ class VllmRunner: req_outputs = self.model.classify(prompts) return [req_output.outputs.probs for req_output in req_outputs] - def encode(self, - prompts: list[str], - images: Optional[PromptImageInput] = None, - videos: Optional[PromptVideoInput] = None, - audios: Optional[PromptAudioInput] = None, - *args, - **kwargs) -> list[list[float]]: + def embed(self, + prompts: list[str], + images: Optional[PromptImageInput] = None, + videos: Optional[PromptVideoInput] = None, + audios: Optional[PromptAudioInput] = None, + *args, + **kwargs) -> list[list[float]]: inputs = self.get_inputs(prompts, images=images, videos=videos, @@ -1042,6 +1042,10 @@ class VllmRunner: req_outputs = self.model.embed(inputs, *args, **kwargs) return [req_output.outputs.embedding for req_output in req_outputs] + def encode(self, prompts: list[str]) -> list[list[float]]: + req_outputs = self.model.encode(prompts) + return [req_output.outputs.data for req_output in req_outputs] + def score( self, text_1: Union[str, list[str]], diff --git a/tests/model_executor/test_model_load_with_params.py b/tests/model_executor/test_model_load_with_params.py index 94a14bd24bcb6..4bdb651e51705 100644 --- a/tests/model_executor/test_model_load_with_params.py +++ b/tests/model_executor/test_model_load_with_params.py @@ -29,8 +29,8 @@ def test_model_loading_with_params(vllm_runner): revision=REVISION, dtype="float16", max_model_len=MAX_MODEL_LEN) as vllm_model: - output = vllm_model.encode("Write a short story about a robot that" - " dreams for the first time.\n") + output = vllm_model.embed("Write a short story about a robot that" + " dreams for the first time.\n") model_config = vllm_model.model.llm_engine.model_config model_tokenizer = vllm_model.model.llm_engine.tokenizer @@ -67,8 +67,8 @@ def test_roberta_model_loading_with_params(vllm_runner): revision=REVISION_ROBERTA, dtype="float16", max_model_len=MAX_MODEL_LEN) as vllm_model: - output = vllm_model.encode("Write a short story about a robot that" - " dreams for the first time.\n") + output = vllm_model.embed("Write a short story about a robot that" + " dreams for the first time.\n") model_config = vllm_model.model.llm_engine.model_config model_tokenizer = vllm_model.model.llm_engine.tokenizer @@ -105,8 +105,8 @@ def test_facebook_roberta_model_loading_with_params(vllm_runner): with vllm_runner(model_name=model_name, dtype="float16", max_model_len=MAX_MODEL_LEN) as vllm_model: - output = vllm_model.encode("Write a short story about a robot that" - " dreams for the first time.\n") + output = vllm_model.embed("Write a short story about a robot that" + " dreams for the first time.\n") model_tokenizer = vllm_model.model.llm_engine.tokenizer assert model_tokenizer.tokenizer_id == model_name diff --git a/tests/models/language/pooling/embed_utils.py b/tests/models/language/pooling/embed_utils.py index dabd7bee7f393..a663679a9c7cb 100644 --- a/tests/models/language/pooling/embed_utils.py +++ b/tests/models/language/pooling/embed_utils.py @@ -55,7 +55,7 @@ def correctness_test_embed_models(hf_runner, task="embed", max_model_len=None, **vllm_extra_kwargs) as vllm_model: - vllm_outputs = vllm_model.encode(example_prompts) + vllm_outputs = vllm_model.embed(example_prompts) with hf_runner( model_info.name, diff --git a/tests/models/language/pooling/test_embedding.py b/tests/models/language/pooling/test_embedding.py index 5ef9f768c5744..b8b17524cf07f 100644 --- a/tests/models/language/pooling/test_embedding.py +++ b/tests/models/language/pooling/test_embedding.py @@ -89,7 +89,7 @@ def test_models( task="embed", max_model_len=512, **vllm_extra_kwargs) as vllm_model: - vllm_outputs = vllm_model.encode(example_prompts) + vllm_outputs = vllm_model.embed(example_prompts) check_embeddings_close( embeddings_0_lst=hf_outputs, diff --git a/tests/models/language/pooling/test_jina.py b/tests/models/language/pooling/test_jina.py index 0c44683e7486d..0bc189d82b8a3 100644 --- a/tests/models/language/pooling/test_jina.py +++ b/tests/models/language/pooling/test_jina.py @@ -98,11 +98,11 @@ def test_matryoshka( if dimensions not in matryoshka_dimensions: with pytest.raises(ValueError): - vllm_model.encode( + vllm_model.embed( example_prompts, pooling_params=PoolingParams(dimensions=dimensions)) else: - vllm_outputs = vllm_model.encode( + vllm_outputs = vllm_model.embed( example_prompts, pooling_params=PoolingParams(dimensions=dimensions)) diff --git a/tests/models/language/pooling/test_reward.py b/tests/models/language/pooling/test_reward.py new file mode 100644 index 0000000000000..085cdca9f1f31 --- /dev/null +++ b/tests/models/language/pooling/test_reward.py @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import pytest +import torch +import torch.nn.functional as F +from transformers import AutoModel + +from vllm.platforms import current_platform + +from ....conftest import HfRunner + + +@pytest.fixture(autouse=True) +def v1(run_with_both_engines): + # Simple autouse wrapper to run both engines for each test + # This can be promoted up to conftest.py to run for every + # test in a package + pass + + +@pytest.fixture +def math_step_prompts(): + # ruff: noqa: E501 + data = { + "system": + "Please reason step by step, and put your final answer within \\boxed{}. ", + "query": + "Sue lives in a fun neighborhood. One weekend, the neighbors decided to play a prank on Sue. On Friday morning, the neighbors placed 18 pink plastic flamingos out on Sue's front yard. On Saturday morning, the neighbors took back one third of the flamingos, painted them white, and put these newly painted white flamingos back out on Sue's front yard. Then, on Sunday morning, they added another 18 pink plastic flamingos to the collection. At noon on Sunday, how many more pink plastic flamingos were out than white plastic flamingos?", + "response": [ + "To find out how many more pink plastic flamingos were out than white plastic flamingos at noon on Sunday, we can break down the problem into steps. First, on Friday, the neighbors start with 18 pink plastic flamingos.", + "On Saturday, they take back one third of the flamingos. Since there were 18 flamingos, (1/3 \\times 18 = 6) flamingos are taken back. So, they have (18 - 6 = 12) flamingos left in their possession. Then, they paint these 6 flamingos white and put them back out on Sue's front yard. Now, Sue has the original 12 pink flamingos plus the 6 new white ones. Thus, by the end of Saturday, Sue has (12 + 6 = 18) pink flamingos and 6 white flamingos.", + "On Sunday, the neighbors add another 18 pink plastic flamingos to Sue's front yard. By the end of Sunday morning, Sue has (18 + 18 = 36) pink flamingos and still 6 white flamingos.", + "To find the difference, subtract the number of white flamingos from the number of pink flamingos: (36 - 6 = 30). Therefore, at noon on Sunday, there were 30 more pink plastic flamingos out than white plastic flamingos. The answer is (\\boxed{30}).", + ], + } + answer = "".join(data['response']) + "" + prompt = f"system\n{data['system']}\nuser\n{data['query']}\nassistant\n{answer}<|endoftext|>" + return [prompt] + + +def step_reward_patch_hf_model(hf_model: HfRunner): + + # Patch the hf_runner to use the step reward function + def make_step_rewards(logits: torch.Tensor, + token_masks: torch.Tensor) -> list[list[float]]: + probabilities = F.softmax(logits, dim=-1) + probabilities = probabilities * token_masks.unsqueeze(-1) + + all_scores_res: list[list[float]] = [] + for i in range(probabilities.size(0)): + sample = probabilities[i] # seq_len, num_labels + positive_probs = sample[sample != 0].view(-1, 2) + non_zero_elements_list = positive_probs.cpu().tolist() + all_scores_res.append(non_zero_elements_list) + return all_scores_res + + def reward(prompts: list[str]) -> list[list[float]]: + input_ids = hf_model.tokenizer(prompts, return_tensors="pt").input_ids + input_ids = hf_model.wrap_device(input_ids) + outputs = hf_model.model(input_ids=input_ids) + + step_sep_id = hf_model.tokenizer.encode("")[0] + token_masks = (input_ids == step_sep_id) + return make_step_rewards(outputs[0], token_masks) + + hf_model.reward = reward # type: ignore[attr-defined] + + return hf_model + + +@pytest.mark.parametrize( + "model", + [ + pytest.param("Qwen/Qwen2.5-Math-PRM-7B", + marks=[pytest.mark.core_model, pytest.mark.cpu_model]), + ], +) +@pytest.mark.parametrize("dtype", ["half"]) +def test_prm_models( + hf_runner, + vllm_runner, + math_step_prompts, + model: str, + dtype: str, + monkeypatch, +) -> None: + if current_platform.is_rocm(): + # ROCm Triton FA does not currently support sliding window attention + # switch to use ROCm CK FA backend + monkeypatch.setenv("VLLM_USE_TRITON_FLASH_ATTN", "False") + + with vllm_runner(model, max_model_len=1024, dtype=dtype) as vllm_model: + vllm_outputs = vllm_model.encode(math_step_prompts) + + with hf_runner(model, dtype=dtype, auto_cls=AutoModel) as hf_model: + hf_model = step_reward_patch_hf_model(hf_model) + hf_outputs = hf_model.reward(math_step_prompts) + + # check logits difference + for hf_output, vllm_output in zip(hf_outputs, vllm_outputs): + hf_output = torch.tensor(hf_output) + vllm_output = torch.tensor(vllm_output) + + assert torch.allclose(hf_output, vllm_output, 1e-2) diff --git a/tests/models/multimodal/pooling/test_dse_qwen2_vl.py b/tests/models/multimodal/pooling/test_dse_qwen2_vl.py index 3734d87b7962e..f889eea5e8393 100644 --- a/tests/models/multimodal/pooling/test_dse_qwen2_vl.py +++ b/tests/models/multimodal/pooling/test_dse_qwen2_vl.py @@ -98,7 +98,7 @@ def _run_test( max_model_len=8192) as vllm_model: tokenizer = vllm_model.model.get_tokenizer() texts = [ - # this is necessary because vllm_model.encode will not apply any + # this is necessary because vllm_model.embed will not apply any # templating to the prompt, and therefore lacks an image_pad # token unless one is inserted beforehand (the (28,28) image # above is converted to an image pad token by the chat template). @@ -109,7 +109,7 @@ def _run_test( # vllm will replace the pad token with the actual image, # which may be a placeholder image, later. ] - vllm_outputs = vllm_model.encode(texts, images=input_images) + vllm_outputs = vllm_model.embed(texts, images=input_images) hf_outputs = [] with hf_runner(model, diff --git a/tests/models/multimodal/pooling/test_llava_next.py b/tests/models/multimodal/pooling/test_llava_next.py index b6d90d2b0abed..4a8f5cafbe485 100644 --- a/tests/models/multimodal/pooling/test_llava_next.py +++ b/tests/models/multimodal/pooling/test_llava_next.py @@ -68,7 +68,7 @@ def _run_test( dtype=dtype, max_model_len=4096, enforce_eager=True) as vllm_model: - vllm_outputs = vllm_model.encode(input_texts, images=input_images) + vllm_outputs = vllm_model.embed(input_texts, images=input_images) with hf_runner(model, dtype=dtype, auto_cls=AutoModelForImageTextToText) as hf_model: diff --git a/tests/models/multimodal/pooling/test_phi3v.py b/tests/models/multimodal/pooling/test_phi3v.py index b42ac6fb21edd..9a4b6d3ff8a81 100644 --- a/tests/models/multimodal/pooling/test_phi3v.py +++ b/tests/models/multimodal/pooling/test_phi3v.py @@ -46,7 +46,7 @@ def _run_test( # will hurt multiprocessing backend with fork method (the default method). with vllm_runner(model, task="embed", dtype=dtype, enforce_eager=True) as vllm_model: - vllm_outputs = vllm_model.encode(input_texts, images=input_images) + vllm_outputs = vllm_model.embed(input_texts, images=input_images) # use eager mode for hf runner, since phi3_v didn't work with flash_attn hf_model_kwargs = {"_attn_implementation": "eager"} diff --git a/tests/quantization/test_bitsandbytes.py b/tests/quantization/test_bitsandbytes.py index 8e39ed2fff876..363daa6d27eff 100644 --- a/tests/quantization/test_bitsandbytes.py +++ b/tests/quantization/test_bitsandbytes.py @@ -161,7 +161,7 @@ def test_4bit_bnb_embedding_model( dtype=dtype, gpu_memory_utilization=0.5, quantization="bitsandbytes") as vllm_model: - vllm_outputs = vllm_model.encode(example_prompts) + vllm_outputs = vllm_model.embed(example_prompts) check_embeddings_close( embeddings_0_lst=hf_outputs, embeddings_1_lst=vllm_outputs, diff --git a/vllm/model_executor/layers/pooler.py b/vllm/model_executor/layers/pooler.py index eb2148d76452e..8a33cd6be4054 100644 --- a/vllm/model_executor/layers/pooler.py +++ b/vllm/model_executor/layers/pooler.py @@ -239,25 +239,24 @@ class StepPool(SimplePooler): prompt_lens = self.get_prompt_lens(hidden_states, pooling_metadata) prompt_token_ids = self.get_prompt_token_ids(pooling_metadata) - pooled_data: list[torch.Tensor] = [] - + pooled_data_lst = list[torch.Tensor]() if isinstance(hidden_states, list): for req_state, prompt_len in zip(hidden_states, prompt_lens): assert prompt_len == req_state.shape[0], \ - "partial prefill not supported with mean pooling" - pooled_data = hidden_states + "partial prefill not supported with step pooling" + pooled_data_lst = hidden_states else: offset = 0 for prompt_len in prompt_lens: pooled_data_i = hidden_states[offset:offset + prompt_len] offset += prompt_len - pooled_data.append(pooled_data_i) + pooled_data_lst.append(pooled_data_i) - pooled_data = [] + pooled_data = list[torch.Tensor]() returned_token_ids = self.returned_token_ids step_tag_id = self.step_tag_id - for data, token_id in zip(pooled_data, prompt_token_ids): + for data, token_id in zip(pooled_data_lst, prompt_token_ids): if returned_token_ids is not None and len(returned_token_ids) > 0: data = data[:, returned_token_ids] diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index 0e7e4e73eca98..f759f8f1f2731 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -489,6 +489,12 @@ def supports_cross_encoding( return is_pooling_model(model) and _supports_cross_encoding(model) +def has_step_pooler(model: Union[type[object], object]) -> bool: + """Check if the model uses step pooler.""" + return is_pooling_model(model) and any( + type(module).__name__ == "StepPool" for module in model.modules()) + + class SupportsQuant: """The interface required for all models that support quantization.""" diff --git a/vllm/v1/worker/gpu_input_batch.py b/vllm/v1/worker/gpu_input_batch.py index 3a2c9ef7dface..ca2bfe8317468 100644 --- a/vllm/v1/worker/gpu_input_batch.py +++ b/vllm/v1/worker/gpu_input_batch.py @@ -59,14 +59,15 @@ class CachedRequestState: class InputBatch: def __init__( - self, - max_num_reqs: int, - max_model_len: int, - max_num_batched_tokens: int, - device: torch.device, - pin_memory: bool, - vocab_size: int, - block_sizes: list[int], # The block_size of each kv cache group + self, + max_num_reqs: int, + max_model_len: int, + max_num_batched_tokens: int, + device: torch.device, + pin_memory: bool, + vocab_size: int, + block_sizes: list[int], # The block_size of each kv cache group + logits_processing_needs_token_ids: bool = False, ): self.max_num_reqs = max_num_reqs self.max_model_len = max_model_len @@ -74,6 +75,8 @@ class InputBatch: self.device = device self.pin_memory = pin_memory self.vocab_size = vocab_size + self.logits_processing_needs_token_ids = ( + logits_processing_needs_token_ids) self._req_ids: list[Optional[str]] = [] self.req_id_to_index: dict[str, int] = {} @@ -579,9 +582,14 @@ class InputBatch: copy_slice(self.repetition_penalties_cpu_tensor, self.repetition_penalties, num_reqs) - # The prompt tokens are used only for applying penalties during - # the sampling process. Hence copy these tensors only when - # there are requests which need penalties to be applied. + needs_prompt_token_ids = (not self.no_penalties or + (self.num_reqs > 0 + and self.logits_processing_needs_token_ids)) + if needs_prompt_token_ids: + # The prompt tokens are used only for applying penalties or + # step pooling during the sampling/pooling process. + # Hence copy these tensors only when there are requests which + # need penalties/step_pooler to be applied. prompt_token_ids = self._make_prompt_token_ids_tensor() else: prompt_token_ids = None diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 3303660061183..520d8fb186f4f 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -33,6 +33,7 @@ from vllm.logger import init_logger from vllm.model_executor.layers.mamba.mamba_mixer2 import MambaMixer2 from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding from vllm.model_executor.model_loader import TensorizerLoader, get_model_loader +from vllm.model_executor.models.interfaces import has_step_pooler from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange from vllm.multimodal.utils import group_mm_inputs_by_modality @@ -1708,6 +1709,8 @@ class GPUModelRunner(LoRAModelRunnerMixin): ) model_loader.load_weights(self.model, model_config=self.model_config) + if has_step_pooler(self.model): + self.input_batch.logits_processing_needs_token_ids = True if self.lora_config: self.model = self.load_lora_model(self.model, self.model_config, From d0132f025d630971b2b69d22d13ea3d21613e7a9 Mon Sep 17 00:00:00 2001 From: lkchen Date: Mon, 23 Jun 2025 12:57:57 -0700 Subject: [PATCH 095/211] [Misc] Add type alias `ReqId` and `EngineId` for better readability (#19880) Signed-off-by: Linkun Chen --- .../kv_connector/v1/nixl_connector.py | 37 ++++++++++--------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index 2d80cbf2b24f6..65bdd7ae29d53 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -36,6 +36,8 @@ if TYPE_CHECKING: from vllm.v1.request import Request Transfer = tuple[int, float] # (xfer_handle, start_time) +EngineId = str +ReqId = str GET_META_MSG = b"get_meta_msg" logger = init_logger(__name__) @@ -75,7 +77,7 @@ class ReqMeta: class NixlConnectorMetadata(KVConnectorMetadata): def __init__(self): - self.requests: dict[str, ReqMeta] = {} + self.requests: dict[ReqId, ReqMeta] = {} def add_new_req( self, @@ -96,16 +98,17 @@ class NixlConnector(KVConnectorBase_V1): def __init__(self, vllm_config: VllmConfig, role: KVConnectorRole): assert vllm_config.kv_transfer_config is not None - self.engine_id = vllm_config.kv_transfer_config.engine_id + assert vllm_config.kv_transfer_config.engine_id is not None + self.engine_id: EngineId = vllm_config.kv_transfer_config.engine_id if role == KVConnectorRole.SCHEDULER: self.connector_scheduler : Optional[NixlConnectorScheduler] = \ - NixlConnectorScheduler(vllm_config, str(self.engine_id)) + NixlConnectorScheduler(vllm_config, self.engine_id) self.connector_worker: Optional[NixlConnectorWorker] = None elif role == KVConnectorRole.WORKER: self.connector_scheduler = None self.connector_worker = NixlConnectorWorker( - vllm_config, str(self.engine_id)) + vllm_config, self.engine_id) ############################################################ # Scheduler Side Methods @@ -179,7 +182,7 @@ class NixlConnectorScheduler: def __init__(self, vllm_config: VllmConfig, engine_id: str): self.vllm_config = vllm_config self.block_size = vllm_config.cache_config.block_size - self.engine_id = engine_id + self.engine_id: EngineId = engine_id self.side_channel_host = envs.VLLM_NIXL_SIDE_CHANNEL_HOST self.side_channel_port = ( envs.VLLM_NIXL_SIDE_CHANNEL_PORT + @@ -190,7 +193,7 @@ class NixlConnectorScheduler: # Requests that need to start recv. # New requests are added by update_state_after_alloc in # the scheduler. Used to make metadata passed to Worker. - self._reqs_need_recv: dict[str, tuple[Request, list[int]]] = {} + self._reqs_need_recv: dict[ReqId, tuple[Request, list[int]]] = {} def get_num_new_matched_tokens( self, request: "Request", @@ -332,19 +335,19 @@ class NixlConnectorWorker: # Agent. self.nixl_wrapper = NixlWrapper(str(uuid.uuid4()), None) # Map of engine_id -> {rank0: agent_name0, rank1: agent_name1..}. - self._remote_agents: dict[str, dict[int, str]] = defaultdict(dict) + self._remote_agents: dict[EngineId, dict[int, str]] = defaultdict(dict) # NIXL handshake port. # NOTE(rob): Within a DP group, each DP rank gets its own # base port (which is sent in the KVTransferParams). # Each TP rank listens/queries on the base_port + tp_rank. - self.side_channel_port = ( + self.side_channel_port: int = ( envs.VLLM_NIXL_SIDE_CHANNEL_PORT + vllm_config.parallel_config.data_parallel_rank_local * vllm_config.parallel_config.tensor_parallel_size) # Metadata. - self.engine_id = engine_id + self.engine_id: EngineId = engine_id self.tp_rank = get_tensor_model_parallel_rank() self.world_size = get_tensor_model_parallel_world_size() self.tp_group = get_tp_group() @@ -354,7 +357,7 @@ class NixlConnectorWorker: # Map of engine_id -> kv_caches_base_addr. For TP case, each local # rank will still only pull from a single remote TP worker. - self.kv_caches_base_addr: dict[str, list[int]] = {} + self.kv_caches_base_addr: dict[EngineId, list[int]] = {} # Number of NIXL regions. Currently one region per cache # (so 1 per layer for MLA, otherwise 2 per layer) @@ -364,23 +367,23 @@ class NixlConnectorWorker: # nixl_prepped_dlist_handle. self.src_xfer_side_handle: int = 0 # Map of engine_id -> nixl_prepped_dlist_handle (int)]. - self.dst_xfer_side_handles: dict[str, int] = {} + self.dst_xfer_side_handles: dict[EngineId, int] = {} # Map of engine_id -> num_blocks. All ranks in the same deployment will # have the same number of blocks. - self.dst_num_blocks: dict[str, int] = {} + self.dst_num_blocks: dict[EngineId, int] = {} self._registered_descs: list[Any] = [] # In progress transfers. # [req_id -> list[handle]] - self._recving_transfers = defaultdict[str, list[Transfer]](list) + self._recving_transfers = defaultdict[ReqId, list[Transfer]](list) # Complete transfer tracker. Used by the rank 0 to track finished # transactions on ranks 1 to N-1. # [req_id -> count] - self._done_recving_count: defaultdict[str, + self._done_recving_count: defaultdict[ReqId, int] = defaultdict(lambda: 0) - self._done_sending_count: defaultdict[str, + self._done_sending_count: defaultdict[ReqId, int] = defaultdict(lambda: 0) # Background thread for establishing new connections. @@ -408,10 +411,10 @@ class NixlConnectorWorker: self._use_flashinfer = attn_backend == _Backend.FLASHINFER_VLLM_V1 logger.debug("Detected attention backend %s", self.backend_name) - self._tp_size: dict[str, int] = {self.engine_id: self.world_size} + self._tp_size: dict[EngineId, int] = {self.engine_id: self.world_size} # With heterogeneous TP, P must wait for all assigned D TP workers to # finish reading before safely freeing the blocks. - self.consumer_notification_counts_by_req = defaultdict[str, int](int) + self.consumer_notification_counts_by_req = defaultdict[ReqId, int](int) @staticmethod def _nixl_handshake_listener(metadata: NixlAgentMetadata, From e6327c9b3eb2111305d6b19bb3e5d1fd852eea55 Mon Sep 17 00:00:00 2001 From: cascade Date: Mon, 23 Jun 2025 13:09:02 -0700 Subject: [PATCH 096/211] [Feature] Support sequence parallelism for static fp8 quantization (#19181) Signed-off-by: cascade812 --- tests/compile/test_sequence_parallelism.py | 161 +++++++- tests/distributed/test_sequence_parallel.py | 108 +++-- tests/models/registry.py | 3 +- vllm/compilation/fusion.py | 4 +- vllm/compilation/pass_manager.py | 8 +- vllm/compilation/sequence_parallelism.py | 436 +++++++++++++++----- vllm/config.py | 6 +- 7 files changed, 531 insertions(+), 195 deletions(-) diff --git a/tests/compile/test_sequence_parallelism.py b/tests/compile/test_sequence_parallelism.py index c689befdf2da6..b56edfc906126 100644 --- a/tests/compile/test_sequence_parallelism.py +++ b/tests/compile/test_sequence_parallelism.py @@ -6,7 +6,9 @@ import torch import vllm.envs as envs from vllm.compilation.fix_functionalization import FixFunctionalizationPass +from vllm.compilation.fusion import FusionPass from vllm.compilation.fx_utils import find_auto_fn, find_auto_fn_maybe, is_func +from vllm.compilation.noop_elimination import NoOpEliminationPass from vllm.compilation.sequence_parallelism import SequenceParallelismPass from vllm.config import (CompilationConfig, DeviceConfig, ModelConfig, PassConfig, VllmConfig) @@ -14,12 +16,15 @@ from vllm.distributed import tensor_model_parallel_all_reduce from vllm.distributed.parallel_state import (init_distributed_environment, initialize_model_parallel) from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( + Fp8LinearOp) from vllm.platforms import current_platform from vllm.utils import update_environment_variables from ..utils import multi_gpu_test from .backend import TestBackend +FP8_DTYPE = current_platform.fp8_dtype() prompts = [ "Hello, my name is", "The president of the United States is", @@ -30,13 +35,16 @@ prompts = [ class TestModel(torch.nn.Module): - def __init__(self, hidden_size=16, intermediate_size=32): + def __init__(self, + hidden_size=16, + intermediate_size=32, + vllm_config: VllmConfig = None): super().__init__() self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.gate_proj = torch.nn.Parameter( torch.empty((intermediate_size, hidden_size))) - self.norm = RMSNorm(hidden_size, 1e-05) + self.norm = RMSNorm(intermediate_size, 1e-05) # Initialize weights torch.nn.init.normal_(self.gate_proj, std=0.02) @@ -79,32 +87,138 @@ class TestModel(torch.nn.Module): return [torch.ops._C.fused_add_rms_norm.default] +class TestQuantModel(torch.nn.Module): + + def __init__(self, + hidden_size=16, + intermediate_size=32, + vllm_config: VllmConfig = None): + super().__init__() + self.hidden_size = hidden_size + self.intermediate_size = intermediate_size + self.vllm_config = vllm_config + self.gate_proj = torch.nn.Parameter(torch.empty( + (intermediate_size, hidden_size)), + requires_grad=False) + self.norm = RMSNorm(intermediate_size, 1e-05) + # Initialize weights + torch.nn.init.normal_(self.gate_proj, std=0.02) + + self.fp8_linear = Fp8LinearOp(cutlass_fp8_supported=True, + use_per_token_if_dynamic=False) + + self.scale = torch.rand(1, dtype=torch.float32) + # Create a weight that is compatible with torch._scaled_mm, + # which expects a column-major layout. + self.w = torch.rand(hidden_size, + intermediate_size).to(dtype=FP8_DTYPE).t() + self.wscale = torch.rand(1, dtype=torch.float32) + + def forward(self, hidden_states, residual): + """ + Forward pass implementing the operations in the FX graph + + Args: + hidden_states: Input tensor + residual: Residual tensor from previous layer + + Returns: + Tuple containing the output tensor + """ + # Reshape input + view = hidden_states.reshape(-1, self.hidden_size) + + #matrix multiplication + permute = self.gate_proj.permute(1, 0) + mm = torch.mm(view, permute) + + # Tensor parallel all-reduce + all_reduce = tensor_model_parallel_all_reduce(mm) + + # layer normalization + norm_output, residual_output = self.norm(all_reduce, residual) + + # for static input quantization + # self.fp8_linear is initialized with use_per_token_if_dynamic=False + fp8_linear_result = self.fp8_linear.apply(norm_output, + self.w, + self.wscale, + input_scale=self.scale.to( + norm_output.device)) + + return fp8_linear_result, residual_output + + def ops_in_model_before(self): + ops_to_remove = [torch.ops.vllm.all_reduce.default + ] # Always removed by SP + # The following are only removed if fusion happens + if self.vllm_config and self.vllm_config.compilation_config \ + .pass_config.enable_fusion: + ops_to_remove.extend([ + torch.ops._C.fused_add_rms_norm.default, + torch.ops._C.static_scaled_fp8_quant.default, + ]) + return ops_to_remove + + def ops_in_model_after(self): + ops_to_add = [ + torch.ops.vllm.reduce_scatter.default, + torch.ops.vllm.all_gather.default + ] + # The following is only added if fusion happens + if self.vllm_config and self.vllm_config.compilation_config \ + .pass_config.enable_fusion: + ops_to_add.append( + torch.ops._C.fused_add_rms_norm_static_fp8_quant.default) + return ops_to_add + + def ops_in_model(self): + if self.vllm_config and self.vllm_config.compilation_config \ + .pass_config.enable_fusion: + # If fusion happens, the fused op is the one + # we check for (de)functionalization + return [torch.ops._C.fused_add_rms_norm_static_fp8_quant.default + ] # noqa: E501 + else: + # If no fusion, the original ops are checked + return [ + torch.ops._C.fused_add_rms_norm.default, + # TODO functionalization pass does not handle this yet + # torch.ops._C.static_scaled_fp8_quant.default, + ] + + @multi_gpu_test(num_gpus=2) +@pytest.mark.parametrize("test_model_cls", [TestModel, TestQuantModel]) @pytest.mark.parametrize("batch_size", [8]) @pytest.mark.parametrize("seq_len", [16]) @pytest.mark.parametrize("hidden_size", [16]) @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16]) +@pytest.mark.parametrize("enable_fusion", [True, False]) @pytest.mark.skipif(envs.VLLM_TARGET_DEVICE not in ["cuda"], reason="Only test on CUDA") -def test_sequence_parallelism_pass(batch_size: int, seq_len: int, - hidden_size: int, dtype: torch.dtype): +def test_sequence_parallelism_pass(test_model_cls: type[torch.nn.Module], + batch_size: int, seq_len: int, + hidden_size: int, dtype: torch.dtype, + enable_fusion: bool): num_processes = 2 def run_torch_spawn(fn, nprocs): # need to use torch.mp.spawn otherwise will have problems with # torch.distributed and cuda torch.multiprocessing.spawn(fn, - args=(num_processes, batch_size, seq_len, - hidden_size, dtype), + args=(num_processes, test_model_cls, + batch_size, seq_len, hidden_size, + dtype, enable_fusion), nprocs=nprocs) run_torch_spawn(sequence_parallelism_pass_on_test_model, num_processes) -def sequence_parallelism_pass_on_test_model(local_rank: int, world_size: int, - batch_size: int, seq_len: int, - hidden_size: int, - dtype: torch.dtype): +def sequence_parallelism_pass_on_test_model( + local_rank: int, world_size: int, + test_model_cls: type[torch.nn.Module], batch_size: int, seq_len: int, + hidden_size: int, dtype: torch.dtype, enable_fusion: bool): current_platform.seed_everything(0) device = torch.device(f"cuda:{local_rank}") @@ -127,26 +241,39 @@ def sequence_parallelism_pass_on_test_model(local_rank: int, world_size: int, # configure vllm config for SequenceParallelismPass vllm_config = VllmConfig() vllm_config.compilation_config = CompilationConfig(pass_config=PassConfig( - enable_sequence_parallelism=True)) + enable_sequence_parallelism=True, + enable_fusion=enable_fusion, + enable_noop=True)) # NoOp needed for fusion vllm_config.device_config = DeviceConfig(device=torch.device("cuda")) # this is a fake model name to construct the model config # in the vllm_config, it's not really used. - model = "nm-testing/TinyLlama-1.1B-Chat-v1.0-FP8-e2e" - vllm_config.model_config = ModelConfig(model=model, + model_name = "nm-testing/TinyLlama-1.1B-Chat-v1.0-FP8-e2e" + vllm_config.model_config = ModelConfig(model=model_name, task="auto", - tokenizer=model, + tokenizer=model_name, tokenizer_mode="auto", trust_remote_code=True, dtype=dtype, seed=42) sequence_parallelism_pass = SequenceParallelismPass(vllm_config) - backend_no_func = TestBackend(sequence_parallelism_pass) + noop_pass = NoOpEliminationPass(vllm_config) func_pass = FixFunctionalizationPass(vllm_config) - backend_func = TestBackend(sequence_parallelism_pass, func_pass) - model = TestModel(hidden_size, hidden_size * 2) + passes_for_backend = [noop_pass, sequence_parallelism_pass] + + if enable_fusion: + fusion_pass = FusionPass.instance(vllm_config) + passes_for_backend.append(fusion_pass) + + backend_no_func = TestBackend(*passes_for_backend) + backend_func = TestBackend(*passes_for_backend, func_pass) + + model = test_model_cls(hidden_size, + hidden_size * 2, + vllm_config=vllm_config) + hidden_states = torch.randn((batch_size * seq_len, hidden_size), dtype=dtype) residual = torch.randn((batch_size * seq_len, hidden_size), dtype=dtype) diff --git a/tests/distributed/test_sequence_parallel.py b/tests/distributed/test_sequence_parallel.py index 91a594eac5c42..b2f6a8ab9dd31 100644 --- a/tests/distributed/test_sequence_parallel.py +++ b/tests/distributed/test_sequence_parallel.py @@ -28,7 +28,7 @@ VLLM_MULTI_NODE = os.getenv("VLLM_MULTI_NODE", "0") == "1" class ParallelSetup(NamedTuple): tp_size: int pp_size: int - sp_enabled: bool + enable_fusion: bool eager_mode: bool chunked_prefill: bool @@ -67,49 +67,18 @@ class SPTestSettings: task: TaskOption = "auto", load_format: Optional[str] = None, ): + parallel_setups = [] + for eager_mode_val in [False, True]: + for pp_multiplier in [1, 2]: + for chunked_prefill_val in [False, True]: + parallel_setups.append( + ParallelSetup(tp_size=tp_base, + pp_size=pp_multiplier * pp_base, + enable_fusion=False, + eager_mode=eager_mode_val, + chunked_prefill=chunked_prefill_val)) return SPTestSettings( - parallel_setups=[ - ParallelSetup(tp_size=tp_base, - pp_size=pp_base, - sp_enabled=True, - eager_mode=False, - chunked_prefill=False), - ParallelSetup(tp_size=tp_base, - pp_size=pp_base, - sp_enabled=True, - eager_mode=False, - chunked_prefill=True), - ParallelSetup(tp_size=tp_base, - pp_size=pp_base, - sp_enabled=True, - eager_mode=True, - chunked_prefill=False), - ParallelSetup(tp_size=tp_base, - pp_size=pp_base, - sp_enabled=True, - eager_mode=True, - chunked_prefill=True), - ParallelSetup(tp_size=tp_base, - pp_size=2 * pp_base, - sp_enabled=True, - eager_mode=False, - chunked_prefill=False), - ParallelSetup(tp_size=tp_base, - pp_size=2 * pp_base, - sp_enabled=True, - eager_mode=False, - chunked_prefill=True), - ParallelSetup(tp_size=tp_base, - pp_size=2 * pp_base, - sp_enabled=True, - eager_mode=True, - chunked_prefill=False), - ParallelSetup(tp_size=tp_base, - pp_size=2 * pp_base, - sp_enabled=True, - eager_mode=True, - chunked_prefill=True) - ], + parallel_setups=parallel_setups, distributed_backends=["mp", "ray"], vllm_major_versions=["1", "1"], task=task, @@ -126,19 +95,44 @@ class SPTestSettings: multi_node_only: bool = False, load_format: Optional[str] = None, ): + parallel_setups = [] + for eager_mode_val in [False, True]: + for pp_multiplier in [1, 2]: + for chunked_prefill_val in [False, True]: + parallel_setups.append( + ParallelSetup(tp_size=tp_base, + pp_size=pp_multiplier * pp_base, + enable_fusion=False, + eager_mode=eager_mode_val, + chunked_prefill=chunked_prefill_val)) return SPTestSettings( - parallel_setups=[ + parallel_setups=parallel_setups, + distributed_backends=["mp", "ray"], + vllm_major_versions=["1", "1"], + task=task, + test_options=SPTestOptions(multi_node_only=multi_node_only, + load_format=load_format), + ) + + @staticmethod + def fp8_quant( + *, + tp_base: int = 2, + pp_base: int = 1, + task: TaskOption = "auto", + multi_node_only: bool = False, + load_format: Optional[str] = None, + ): + parallel_setups = [] + for fusion_val in [False, True]: + parallel_setups.append( ParallelSetup(tp_size=tp_base, pp_size=pp_base, - sp_enabled=True, - eager_mode=False, - chunked_prefill=False), - ParallelSetup(tp_size=tp_base, - pp_size=2 * pp_base, - sp_enabled=True, - eager_mode=False, - chunked_prefill=False), - ], + enable_fusion=fusion_val, + eager_mode=True, + chunked_prefill=False)) + return SPTestSettings( + parallel_setups=parallel_setups, distributed_backends=["mp", "ray"], vllm_major_versions=["1", "1"], task=task, @@ -171,7 +165,7 @@ def _compare_sp( ( tp_size, pp_size, - sp_enabled, + enable_fusion, eager_mode, chunked_prefill, ) = parallel_setup @@ -240,9 +234,9 @@ def _compare_sp( 'compile_sizes': [4, 8], 'splitting_ops': [], 'pass_config': { - 'enable_sequence_parallelism': sp_enabled, + 'enable_sequence_parallelism': True, + 'enable_fusion': enable_fusion, 'enable_noop': True, - 'enable_fusion': True, }, } @@ -291,12 +285,14 @@ def _compare_sp( SP_TEXT_GENERATION_MODELS = { # [Decoder-only] "meta-llama/Llama-3.2-1B-Instruct": SPTestSettings.fast(), + "RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8": SPTestSettings.fp8_quant(), } SP_TEST_MODELS = [ # TODO support other models # [LANGUAGE GENERATION] "meta-llama/Llama-3.2-1B-Instruct", + "RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8" ] diff --git a/tests/models/registry.py b/tests/models/registry.py index 49510af880cf5..4a587e39ad4cd 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -193,7 +193,8 @@ _TEXT_GENERATION_EXAMPLE_MODELS = { extras={"tiny": "ai21labs/Jamba-tiny-dev"}), # noqa: E501 "LlamaForCausalLM": _HfExamplesInfo("meta-llama/Llama-3.2-1B-Instruct", extras={"guard": "meta-llama/Llama-Guard-3-1B", # noqa: E501 - "hermes": "NousResearch/Hermes-3-Llama-3.1-8B"}), # noqa: E501 + "hermes": "NousResearch/Hermes-3-Llama-3.1-8B", # noqa: E501 + "fp8": "RedHatAI/Meta-Llama-3.1-8B-Instruct-FP8"}), # noqa: E501 "LLaMAForCausalLM": _HfExamplesInfo("decapoda-research/llama-7b-hf", is_available_online=False), "MambaForCausalLM": _HfExamplesInfo("state-spaces/mamba-130m-hf"), diff --git a/vllm/compilation/fusion.py b/vllm/compilation/fusion.py index 9d908fcae3dfd..951a2861e3a40 100644 --- a/vllm/compilation/fusion.py +++ b/vllm/compilation/fusion.py @@ -345,8 +345,8 @@ class FusedAddRMSNormStaticQuantPattern(RMSNormQuantPattern): # 0 is always None fused_return_mapping = {1: (quant_node, 1), 2: (rms_node, 2)} self.insert_fused_node(fused_return_mapping, - epsilon=rms_node.kwargs["epsilon"], - **kwargs) + **kwargs, + epsilon=rms_node.kwargs["epsilon"]) class RMSNormDynamicQuantPattern(RMSNormQuantPattern): diff --git a/vllm/compilation/pass_manager.py b/vllm/compilation/pass_manager.py index 28a59905ecf86..3ce00e3610c56 100644 --- a/vllm/compilation/pass_manager.py +++ b/vllm/compilation/pass_manager.py @@ -51,15 +51,15 @@ class PostGradPassManager(CustomGraphPass): if self.pass_config.enable_noop: self.passes += [NoOpEliminationPass(config)] - if self.pass_config.enable_fusion: - self.passes += [FusionPass.instance(config)] - self.passes += [ActivationQuantFusionPass(config)] - if self.pass_config.enable_sequence_parallelism: self.passes += [SequenceParallelismPass(config)] if self.pass_config.enable_async_tp: self.passes += [AsyncTPPass(config)] + if self.pass_config.enable_fusion: + self.passes += [FusionPass.instance(config)] + self.passes += [ActivationQuantFusionPass(config)] + if self.pass_config.enable_attn_fusion: self.passes += [AttnFusionPass(config)] diff --git a/vllm/compilation/sequence_parallelism.py b/vllm/compilation/sequence_parallelism.py index d41093903480b..6107046e40dcd 100644 --- a/vllm/compilation/sequence_parallelism.py +++ b/vllm/compilation/sequence_parallelism.py @@ -12,91 +12,142 @@ from vllm.distributed import get_tp_group, tensor_model_parallel_all_reduce from vllm.distributed.parallel_state import ( get_tensor_model_parallel_world_size) from vllm.logger import init_logger +from vllm.platforms import current_platform from .vllm_inductor_pass import VllmInductorPass logger = init_logger(__name__) -class AllReduceRMSNormPattern: +class _RMSNormAndQuantOpHelper: + """Base helper for RMSNorm and RMSNorm + Quantization functionalization.""" - def __init__(self, epsilon: float, dtype: torch.dtype, device: str): + def __init__(self, + epsilon: float, + dtype: torch.dtype, + device: str, + quant_op: Optional[torch._ops.OpOverload] = None, + **kwargs): self.epsilon = epsilon self.dtype = dtype self.device = device + self.quant_op = quant_op + + def _functional_rmsnorm(self, result_buffer, input_tensor, weight_tensor): + return torch.ops.higher_order.auto_functionalized( + torch.ops._C.rms_norm.default, + result=result_buffer, + input=input_tensor, + weight=weight_tensor, + epsilon=self.epsilon) + + def _functional_fused_add_rmsnorm(self, input_tensor, residual_tensor, + weight_tensor): + return torch.ops.higher_order.auto_functionalized( + torch.ops._C.fused_add_rms_norm.default, + input=input_tensor, + residual=residual_tensor, + weight=weight_tensor, + epsilon=self.epsilon) + + def _functional_rmsnorm_then_quant(self, rmsnorm_result_buffer, + quant_result_buffer, input_tensor, + weight_tensor, scale_tensor): + if self.quant_op is None: + raise RuntimeError( + "_RMSNormAndQuantOpHelper was not initialized with a quant_op." + ) + rmsnorm_out_tuple = self._functional_rmsnorm(rmsnorm_result_buffer, + input_tensor, + weight_tensor) + quant_out_tuple = torch.ops.higher_order.auto_functionalized( + self.quant_op, + result=quant_result_buffer, + input=rmsnorm_out_tuple[1], + scale=scale_tensor) + return quant_out_tuple + + def _functional_fused_add_rmsnorm_then_quant(self, quant_result_buffer, + input_tensor, residual_tensor, + weight_tensor, scale_tensor): + if self.quant_op is None: + raise RuntimeError( + "_RMSNormAndQuantOpHelper was not initialized with a quant_op." + ) + fused_add_rmsnorm_out_tuple = self._functional_fused_add_rmsnorm( + input_tensor, residual_tensor, weight_tensor) + quant_out_tuple = torch.ops.higher_order.auto_functionalized( + self.quant_op, + result=quant_result_buffer, + input=fused_add_rmsnorm_out_tuple[1], + scale=scale_tensor) + return quant_out_tuple, fused_add_rmsnorm_out_tuple[2] -class EmbeddingAllReduceRMSNormPattern(AllReduceRMSNormPattern): +class _SequenceParallelPatternHelper(_RMSNormAndQuantOpHelper): + """Helper for sequence parallelism patterns.""" + + def __init__(self, + epsilon: float, + dtype: torch.dtype, + device: str, + quant_op: Optional[torch._ops.OpOverload] = None, + **kwargs): + super().__init__(epsilon, dtype, device, quant_op=quant_op, **kwargs) + self.tp_group = get_tp_group() + self.tp_size = get_tensor_model_parallel_world_size() + + def _all_reduce(self, x: torch.Tensor) -> torch.Tensor: + return tensor_model_parallel_all_reduce(x) + + def _reduce_scatter(self, x: torch.Tensor) -> torch.Tensor: + return torch.ops.vllm.reduce_scatter.default( + x, + dim=0, + world_size=self.tp_size, + group_name=self.tp_group.unique_name) + + def _all_gather(self, x: torch.Tensor) -> torch.Tensor: + return torch.ops.vllm.all_gather.default( + x, + dim=0, + world_size=self.tp_size, + group_name=self.tp_group.unique_name) + + +class FirstAllReduceRMSNormPattern(_SequenceParallelPatternHelper): def get_inputs(self): - arg2_1 = torch.empty([16, 4], device=self.device, dtype=self.dtype) - mul_6 = torch.tensor([[3, 7, 1, 4, 9, 2, 5, 0]], - device=self.device, - dtype=torch.long) - unsqueeze = torch.rand([1, 8, 1], device=self.device, \ - dtype=self.dtype) > 0.5 - full_default = torch.zeros([1, 8, 4], device=self.device, \ - dtype=self.dtype) + input = torch.empty([1, 8, 4], device=self.device, dtype=self.dtype) permute = torch.empty([1, 8, 4], device=self.device, dtype=self.dtype) arg3_1 = torch.empty([4], device=self.device, dtype=self.dtype) - return [arg2_1, mul_6, unsqueeze, full_default, permute, arg3_1] + return [input, permute, arg3_1] def register(self, pm_pass: PatternMatcherPass): def pattern( - arg2_1: torch.Tensor, - mul_6: torch.Tensor, - unsqueeze: torch.Tensor, - full_default: torch.Tensor, + input: torch.Tensor, permute: torch.Tensor, arg3_1: torch.Tensor, ): - embedding = torch.ops.aten.embedding.default(arg2_1, mul_6) - where = torch.ops.aten.where.self(unsqueeze, full_default, - embedding) - all_reduce = tensor_model_parallel_all_reduce(where) - rmsnorm = torch.ops.higher_order.auto_functionalized( - torch.ops._C.rms_norm.default, - result=permute, - input=all_reduce, - weight=arg3_1, - epsilon=self.epsilon, - ) + all_reduce = self._all_reduce(input) + rmsnorm = self._functional_rmsnorm(permute, all_reduce, arg3_1) return rmsnorm[1], all_reduce def replacement( - arg2_1: torch.Tensor, - mul_6: torch.Tensor, - unsqueeze: torch.Tensor, - full_default: torch.Tensor, + input: torch.Tensor, permute: torch.Tensor, arg3_1: torch.Tensor, ): - embedding = torch.ops.aten.embedding.default(arg2_1, mul_6) - where = torch.ops.aten.where.self(unsqueeze, full_default, - embedding) - - tp = get_tp_group() - tp_size = get_tensor_model_parallel_world_size() - reduce_scatter = torch.ops.vllm.reduce_scatter.default( - where, dim=0, world_size=tp_size, group_name=tp.unique_name) + reduce_scatter = self._reduce_scatter(input) rmsnorm_result = torch.empty_like(reduce_scatter) - rmsnorm = torch.ops.higher_order.auto_functionalized( - torch.ops._C.rms_norm.default, - result=rmsnorm_result, - input=reduce_scatter, - weight=arg3_1, - epsilon=self.epsilon, - ) + rmsnorm = self._functional_rmsnorm(rmsnorm_result, reduce_scatter, + arg3_1) - all_gather = torch.ops.vllm.all_gather.default( - rmsnorm[1], - dim=0, - world_size=tp_size, - group_name=tp.unique_name) + all_gather = self._all_gather(rmsnorm[1]) return all_gather, reduce_scatter @@ -104,7 +155,7 @@ class EmbeddingAllReduceRMSNormPattern(AllReduceRMSNormPattern): pm.fwd_only, pm_pass) -class MiddleAllReduceRMSNormPattern(AllReduceRMSNormPattern): +class MiddleAllReduceRMSNormPattern(_SequenceParallelPatternHelper): def get_inputs(self): mm_1 = torch.empty([4, 4], device=self.device, dtype=self.dtype) @@ -127,16 +178,9 @@ class MiddleAllReduceRMSNormPattern(AllReduceRMSNormPattern): mm_1: torch.Tensor, rms_norm_weights: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: - all_reduce = tensor_model_parallel_all_reduce(mm_1) - - rmsnorm = torch.ops.higher_order.auto_functionalized( - torch.ops._C.fused_add_rms_norm.default, - input=all_reduce, - residual=residual, - weight=rms_norm_weights, - epsilon=self.epsilon, - ) - + all_reduce = self._all_reduce(mm_1) + rmsnorm = self._functional_fused_add_rmsnorm( + all_reduce, residual, rms_norm_weights) return rmsnorm[1], rmsnorm[2] def replacement( @@ -144,32 +188,17 @@ class MiddleAllReduceRMSNormPattern(AllReduceRMSNormPattern): mm_1: torch.Tensor, rms_norm_weights: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: - tp = get_tp_group() - tp_size = get_tensor_model_parallel_world_size() - reduce_scatter = torch.ops.vllm.reduce_scatter.default( - mm_1, dim=0, world_size=tp_size, group_name=tp.unique_name) - - # TODO is it possible to extract epsilon from somewhere - rmsnorm = torch.ops.higher_order.auto_functionalized( - torch.ops._C.fused_add_rms_norm.default, - input=reduce_scatter, - residual=residual, - weight=rms_norm_weights, - epsilon=self.epsilon, - ) - - all_gather = torch.ops.vllm.all_gather.default( - rmsnorm[1], - dim=0, - world_size=tp_size, - group_name=tp.unique_name) + reduce_scatter = self._reduce_scatter(mm_1) + rmsnorm = self._functional_fused_add_rmsnorm( + reduce_scatter, residual, rms_norm_weights) + all_gather = self._all_gather(rmsnorm[1]) return all_gather, rmsnorm[2] pm.register_replacement(pattern, replacement, self.get_inputs(), pm.fwd_only, pm_pass) -class LastAllReduceRMSNormPattern(AllReduceRMSNormPattern): +class LastAllReduceRMSNormPattern(_SequenceParallelPatternHelper): def get_inputs(self): mm_1 = torch.empty([4, 4], device=self.device, dtype=self.dtype) @@ -192,16 +221,9 @@ class LastAllReduceRMSNormPattern(AllReduceRMSNormPattern): mm_1: torch.Tensor, rms_norm_weights: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: - all_reduce = tensor_model_parallel_all_reduce(mm_1) - - rmsnorm = torch.ops.higher_order.auto_functionalized( - torch.ops._C.fused_add_rms_norm.default, - input=all_reduce, - residual=residual, - weight=rms_norm_weights, - epsilon=self.epsilon, - ) - + all_reduce = self._all_reduce(mm_1) + rmsnorm = self._functional_fused_add_rmsnorm( + all_reduce, residual, rms_norm_weights) return rmsnorm[1] def replacement( @@ -209,26 +231,185 @@ class LastAllReduceRMSNormPattern(AllReduceRMSNormPattern): mm_1: torch.Tensor, rms_norm_weights: torch.Tensor, ) -> tuple[torch.Tensor, torch.Tensor]: - tp = get_tp_group() - tp_size = get_tensor_model_parallel_world_size() - reduce_scatter = torch.ops.vllm.reduce_scatter.default( - mm_1, dim=0, world_size=tp_size, group_name=tp.unique_name) + reduce_scatter = self._reduce_scatter(mm_1) + rmsnorm = self._functional_fused_add_rmsnorm( + reduce_scatter, residual, rms_norm_weights) + normalized = self._all_gather(rmsnorm[1]) + return normalized - # TODO is it possible to extract epsilon from somewhere - rmsnorm = torch.ops.higher_order.auto_functionalized( - torch.ops._C.fused_add_rms_norm.default, - input=reduce_scatter, - residual=residual, - weight=rms_norm_weights, - epsilon=self.epsilon, - ) + pm.register_replacement(pattern, replacement, self.get_inputs(), + pm.fwd_only, pm_pass) - normalized = torch.ops.vllm.all_gather.default( - rmsnorm[1], - dim=0, - world_size=tp_size, - group_name=tp.unique_name) +FP8_DTYPE = current_platform.fp8_dtype() + + +class FirstAllReduceRMSNormStaticFP8Pattern(_SequenceParallelPatternHelper): + + def __init__(self, epsilon: float, dtype: torch.dtype, device: str, + op: torch._ops.OpOverload): + super().__init__(epsilon, dtype, device, quant_op=op) + + def get_inputs(self): + input = torch.zeros([1, 8, 4], device=self.device, dtype=self.dtype) + rmsnorm_result = torch.empty([1, 8, 4], + device=self.device, + dtype=self.dtype) + quant_result = torch.empty([1, 8, 4], + device=self.device, + dtype=FP8_DTYPE) + weight = torch.empty([4], device=self.device, dtype=self.dtype) + scale = torch.tensor(1.0, device=self.device, dtype=torch.float32) + return [input, rmsnorm_result, quant_result, weight, scale] + + def register(self, pm_pass: PatternMatcherPass): + + def pattern( + input: torch.Tensor, + rmsnorm_result: torch.Tensor, + quant_result: torch.Tensor, + weight: torch.Tensor, + scale: torch.Tensor, + ): + all_reduce = self._all_reduce(input) + static_fp8 = self._functional_rmsnorm_then_quant( + rmsnorm_result, quant_result, all_reduce, weight, scale) + return static_fp8[1], all_reduce + + def replacement( + input: torch.Tensor, + rmsnorm_result: torch.Tensor, + quant_result: torch.Tensor, + weight: torch.Tensor, + scale: torch.Tensor, + ): + reduce_scatter = self._reduce_scatter(input) + + rmsnorm_result = torch.empty_like(reduce_scatter, + dtype=rmsnorm_result.dtype) + quant_result = torch.empty_like( + rmsnorm_result, # Output of RMSNorm + dtype=quant_result.dtype) + static_fp8 = self._functional_rmsnorm_then_quant( + rmsnorm_result, quant_result, reduce_scatter, weight, scale) + all_gather = self._all_gather(static_fp8[1]) + + return all_gather, reduce_scatter + + pm.register_replacement(pattern, replacement, self.get_inputs(), + pm.fwd_only, pm_pass) + + +class MiddleAllReduceRMSNormStaticFP8Pattern(_SequenceParallelPatternHelper): + + def __init__(self, epsilon: float, dtype: torch.dtype, device: str, + op: torch._ops.OpOverload): + super().__init__(epsilon, dtype, device, quant_op=op) + + def get_inputs(self): + mm_1 = torch.empty([4, 4], device=self.device, dtype=self.dtype) + + residual = torch.empty([4, 4], device=self.device, dtype=self.dtype) + rms_norm_weights = torch.empty([4, 4], + device=self.device, + dtype=self.dtype) + result = torch.empty([4, 4], device=self.device, dtype=FP8_DTYPE) + scale = torch.empty([1, 1], device=self.device, dtype=torch.float32) + + return [ + result, + residual, + mm_1, + rms_norm_weights, + scale, + ] + + def register(self, pm_pass: PatternMatcherPass): + + def pattern( + result: torch.Tensor, + residual: torch.Tensor, + mm_1: torch.Tensor, + rms_norm_weights: torch.Tensor, + scale: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor]: + all_reduce = self._all_reduce(mm_1) + static_fp8, rmsnorm_residual_out = self._functional_fused_add_rmsnorm_then_quant( # noqa: E501 + result, all_reduce, residual, rms_norm_weights, scale) + return static_fp8[1], rmsnorm_residual_out + + def replacement( + result: torch.Tensor, + residual: torch.Tensor, + mm_1: torch.Tensor, + rms_norm_weights: torch.Tensor, + scale: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor]: + reduce_scatter = self._reduce_scatter(mm_1) + quant_result_buf = torch.empty_like(reduce_scatter, + dtype=result.dtype) + static_fp8, rmsnorm_residual_out = self._functional_fused_add_rmsnorm_then_quant( # noqa: E501 + quant_result_buf, reduce_scatter, residual, rms_norm_weights, + scale) + all_gather = self._all_gather(static_fp8[1]) + return all_gather, rmsnorm_residual_out + + pm.register_replacement(pattern, replacement, self.get_inputs(), + pm.fwd_only, pm_pass) + + +class LastAllReduceRMSNormStaticFP8Pattern(_SequenceParallelPatternHelper): + + def __init__(self, epsilon: float, dtype: torch.dtype, device: str, + op: torch._ops.OpOverload): + super().__init__(epsilon, dtype, device, quant_op=op) + + def get_inputs(self): + mm_1 = torch.empty([4, 4], device=self.device, dtype=self.dtype) + + residual = torch.empty([4, 4], device=self.device, dtype=self.dtype) + rms_norm_weights = torch.empty([4, 4], + device=self.device, + dtype=self.dtype) + result = torch.empty([4, 4], device=self.device, dtype=FP8_DTYPE) + scale = torch.empty([1, 1], device=self.device, dtype=torch.float32) + + return [ + result, + residual, + mm_1, + rms_norm_weights, + scale, + ] + + def register(self, pm_pass: PatternMatcherPass): + + def pattern( + result: torch.Tensor, + residual: torch.Tensor, + mm_1: torch.Tensor, + rms_norm_weights: torch.Tensor, + scale: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor]: + all_reduce = self._all_reduce(mm_1) + static_fp8, _ = self._functional_fused_add_rmsnorm_then_quant( + result, all_reduce, residual, rms_norm_weights, scale) + return static_fp8[1] + + def replacement( + result: torch.Tensor, + residual: torch.Tensor, + mm_1: torch.Tensor, + rms_norm_weights: torch.Tensor, + scale: torch.Tensor, + ) -> tuple[torch.Tensor, torch.Tensor]: + reduce_scatter = self._reduce_scatter(mm_1) + quant_result_buf = torch.empty_like(reduce_scatter, + dtype=result.dtype) + static_fp8, _ = self._functional_fused_add_rmsnorm_then_quant( + quant_result_buf, reduce_scatter, residual, rms_norm_weights, + scale) + normalized = self._all_gather(static_fp8[1]) return normalized pm.register_replacement(pattern, replacement, self.get_inputs(), @@ -236,21 +417,54 @@ class LastAllReduceRMSNormPattern(AllReduceRMSNormPattern): class SequenceParallelismPass(VllmInductorPass): + """ + This pass enables sequence parallelism for models. + It identifies patterns where an AllReduce operation is followed by + an RMSNorm (or RMSNorm and then Quantization) operation. + These patterns are replaced with a ReduceScatter operation, followed by + a local RMSNorm/Quantization, and then an AllGather operation. + + The general transformation is: + Input -> AllReduce -> RMSNorm -> Output + becomes + Input -> ReduceScatter -> RMSNorm -> AllGather -> Output + + While this pass itself does not directly yield performance improvements, + it lays the groundwork for subsequent fusion passes, such as + GEMM + ReduceScatter and AllGather + GEMM fusions. These fusions can + significantly reduce communication overhead and improve overall model + performance. + """ def __init__(self, config: VllmConfig): super().__init__(config) self.patterns: PatternMatcherPass = PatternMatcherPass( pass_name="sequence_parallelism_pass") + for epsilon in [1e-5, 1e-6]: - EmbeddingAllReduceRMSNormPattern( - epsilon, self.model_dtype, self.device).register(self.patterns) + # RMSNorm + Static FP8 quantization patterns + fp8_quant_op = torch.ops._C.static_scaled_fp8_quant.default + FirstAllReduceRMSNormStaticFP8Pattern( + epsilon, self.model_dtype, self.device, + fp8_quant_op).register(self.patterns) + MiddleAllReduceRMSNormStaticFP8Pattern( + epsilon, self.model_dtype, self.device, + fp8_quant_op).register(self.patterns) + LastAllReduceRMSNormStaticFP8Pattern( + epsilon, self.model_dtype, self.device, + fp8_quant_op).register(self.patterns) + + # Normal RMSNorm patterns + FirstAllReduceRMSNormPattern(epsilon, self.model_dtype, + self.device).register(self.patterns) MiddleAllReduceRMSNormPattern(epsilon, self.model_dtype, self.device).register(self.patterns) LastAllReduceRMSNormPattern(epsilon, self.model_dtype, self.device).register(self.patterns) + # WARNING: This is a hack to clear the pattern matcher cache # and allow multiple values of epsilon. torch._inductor.pattern_matcher._seen_patterns.clear() diff --git a/vllm/config.py b/vllm/config.py index 7549c97b4fec4..4333dcd3b8af9 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -3802,11 +3802,11 @@ class PassConfig: its own stages (before, after, maybe in-between).""" dump_graph_dir: Path = Path(".") """Directory to dump the graphs.""" - enable_fusion: bool = True + enable_fusion: bool = field(default_factory=lambda: not envs.VLLM_USE_V1) """Whether to enable the custom fusion (RMSNorm/SiluMul+quant) pass.""" enable_attn_fusion: bool = False """Whether to enable the custom attention+quant fusion pass.""" - enable_noop: bool = True + enable_noop: bool = field(default_factory=lambda: not envs.VLLM_USE_V1) """Whether to enable the custom no-op elimination pass.""" enable_sequence_parallelism: bool = False """Whether to enable sequence parallelism.""" @@ -4451,8 +4451,6 @@ class VllmConfig: # By default, V1 uses piecewise CUDA graphs. If full_cuda_graph # is set to True, full CUDA graphs will be used. self.compilation_config.cudagraph_num_of_warmups = 1 - self.compilation_config.pass_config.enable_fusion = False - self.compilation_config.pass_config.enable_noop = False self.compilation_config.level = CompilationLevel.PIECEWISE self.compilation_config.set_splitting_ops_for_v1() From a3bc76e4b565fd14f13710c696fe47787b008f1f Mon Sep 17 00:00:00 2001 From: 22quinn <33176974+22quinn@users.noreply.github.com> Date: Mon, 23 Jun 2025 14:15:37 -0700 Subject: [PATCH 097/211] [CI/Build] Push latest tag for cpu and neuron docker image (#19897) Signed-off-by: 22quinn <33176974+22quinn@users.noreply.github.com> --- .buildkite/release-pipeline.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.buildkite/release-pipeline.yaml b/.buildkite/release-pipeline.yaml index 16b5ad0297fe7..55678b8936e04 100644 --- a/.buildkite/release-pipeline.yaml +++ b/.buildkite/release-pipeline.yaml @@ -102,6 +102,7 @@ steps: commands: - "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7" - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:$(buildkite-agent meta-data get release-version) --tag public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:latest --progress plain --target vllm-openai -f docker/Dockerfile.cpu ." + - "docker push public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:latest" - "docker push public.ecr.aws/q9t5s3a7/vllm-cpu-release-repo:$(buildkite-agent meta-data get release-version)" env: DOCKER_BUILDKIT: "1" @@ -117,6 +118,7 @@ steps: commands: - "aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws/q9t5s3a7" - "DOCKER_BUILDKIT=1 docker build --build-arg max_jobs=16 --build-arg GIT_REPO_CHECK=1 --tag public.ecr.aws/q9t5s3a7/vllm-neuron-release-repo:$(buildkite-agent meta-data get release-version) --tag public.ecr.aws/q9t5s3a7/vllm-neuron-release-repo:latest --progress plain -f docker/Dockerfile.neuron ." + - "docker push public.ecr.aws/q9t5s3a7/vllm-neuron-release-repo:latest" - "docker push public.ecr.aws/q9t5s3a7/vllm-neuron-release-repo:$(buildkite-agent meta-data get release-version)" env: DOCKER_BUILDKIT: "1" From dd2ccf8ddead5cdb46613b2adeb2edcc77f426c0 Mon Sep 17 00:00:00 2001 From: Jun-Howie <62869005+Jun-Howie@users.noreply.github.com> Date: Tue, 24 Jun 2025 06:23:28 +0800 Subject: [PATCH 098/211] Feat Dynamic Quantization for MoE Layers in GPTQ Marlin Backend (#19395) --- .../layers/quantization/gptq_marlin.py | 32 +++++++++++++++++-- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/vllm/model_executor/layers/quantization/gptq_marlin.py b/vllm/model_executor/layers/quantization/gptq_marlin.py index f92ebdea986da..e9b8dc3266b4a 100644 --- a/vllm/model_executor/layers/quantization/gptq_marlin.py +++ b/vllm/model_executor/layers/quantization/gptq_marlin.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from copy import deepcopy from typing import Any, Callable, Optional, Union import torch @@ -9,7 +10,8 @@ import vllm.model_executor.layers.fused_moe # noqa from vllm import _custom_ops as ops from vllm.logger import init_logger from vllm.model_executor.layers.fused_moe.layer import ( - FusedMoE, FusedMoEMethodBase, FusedMoeWeightScaleSupported) + FusedMoE, FusedMoEMethodBase, FusedMoeWeightScaleSupported, + UnquantizedFusedMoEMethod) from vllm.model_executor.layers.linear import (LinearMethodBase, set_weight_attrs) from vllm.model_executor.layers.quantization import QuantizationMethods @@ -19,7 +21,7 @@ from vllm.model_executor.layers.quantization.kernels.mixed_precision import ( MPLinearLayerConfig, choose_mp_linear_kernel) from vllm.model_executor.layers.quantization.utils import replace_parameter from vllm.model_executor.layers.quantization.utils.gptq_utils import ( - get_linear_quant_method) + get_dynamic_override, get_linear_quant_method, override_config) from vllm.model_executor.layers.quantization.utils.marlin_utils import ( check_marlin_supported, check_moe_marlin_supports_layer, marlin_make_workspace_new, marlin_moe_permute_scales, @@ -35,6 +37,29 @@ from vllm.scalar_type import scalar_types logger = init_logger(__name__) +def get_moe_quant_method( + config: QuantizationConfig, + layer: torch.nn.Module, + prefix: str, + moe_method_cls: type, +): + cloned_config = deepcopy(config) + + if isinstance(layer, FusedMoE): + # False = skip module, None = no override, else = Positive match + if get_dynamic_override( # noqa: E712 + cloned_config, # noqa: E712 + layer_name=prefix) == False: # noqa: E712 + return UnquantizedFusedMoEMethod(layer.moe_config) + + if prefix: + # Dynamic per module/layer rules may override base config + override_config(cloned_config, prefix=prefix) + + return moe_method_cls(cloned_config) + return None + + class GPTQMarlinConfig(QuantizationConfig): """Config class for GPTQ Marlin""" @@ -163,7 +188,8 @@ class GPTQMarlinConfig(QuantizationConfig): "Falling back to Moe WNA16 kernels.") return MoeWNA16Config.from_config( self.full_config).get_quant_method(layer, prefix) - return GPTQMarlinMoEMethod(self) + return get_moe_quant_method(self, layer, prefix, + GPTQMarlinMoEMethod) return get_linear_quant_method(self, layer, prefix, GPTQMarlinLinearMethod) From 4671ac6e2ae9975ac305f767fc21e47d6ffde448 Mon Sep 17 00:00:00 2001 From: 22quinn <33176974+22quinn@users.noreply.github.com> Date: Mon, 23 Jun 2025 15:25:12 -0700 Subject: [PATCH 099/211] [Bugfix][Benchmark] Fix Marlin benchmark (#19929) --- benchmarks/kernels/benchmark_marlin.py | 245 ++++++++++++++++--------- 1 file changed, 158 insertions(+), 87 deletions(-) diff --git a/benchmarks/kernels/benchmark_marlin.py b/benchmarks/kernels/benchmark_marlin.py index 9ea1fddae2a3b..34cc45e94d76d 100644 --- a/benchmarks/kernels/benchmark_marlin.py +++ b/benchmarks/kernels/benchmark_marlin.py @@ -22,8 +22,16 @@ from vllm.model_executor.layers.quantization.utils.marlin_utils import ( MARLIN_SUPPORTED_GROUP_SIZES, query_marlin_supported_quant_types, ) +from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import ( + FP4_MARLIN_SUPPORTED_GROUP_SIZES, + rand_marlin_weight_fp4_like, +) +from vllm.model_executor.layers.quantization.utils.marlin_utils_fp8 import ( + marlin_quant_fp8_torch, +) from vllm.model_executor.layers.quantization.utils.marlin_utils_test import ( MarlinWorkspace, + awq_marlin_quantize, marlin_quantize, ) from vllm.model_executor.layers.quantization.utils.marlin_utils_test_24 import ( @@ -35,7 +43,7 @@ from vllm.model_executor.layers.quantization.utils.quant_utils import ( quantize_weights, sort_weights, ) -from vllm.scalar_type import ScalarType +from vllm.scalar_type import ScalarType, scalar_types from vllm.utils import FlexibleArgumentParser DEFAULT_MODELS = ["meta-llama/Llama-2-7b-hf/TP1"] @@ -57,80 +65,144 @@ def bench_run( size_n: int, ): label = "Quant Matmul" - sub_label = "{}, act={} k_full={}, q={}, g={}, MKN=({}x{}x{})".format( model, act_order, is_k_full, str(quant_type), group_size, size_m, size_k, size_n ) - print(f"Testing: {sub_label}") a = torch.randn(size_m, size_k).to(torch.half).cuda() b = torch.rand(size_k, size_n).to(torch.half).cuda() + has_zp = quant_type in [scalar_types.uint4, scalar_types.uint8] + if act_order and (group_size == -1 or group_size == size_k or has_zp): + return + if size_k % group_size != 0: + return - a_tmp = torch.zeros(size_m, size_k).to(torch.half).cuda() - - # Marlin quant - ( - marlin_w_ref, - marlin_q_w, - marlin_s, - marlin_g_idx, - marlin_sort_indices, - marlin_rand_perm, - ) = marlin_quantize(b, quant_type, group_size, act_order) - - # Marlin_24 quant - (marlin_24_w_ref, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s) = ( - marlin_24_quantize(b, quant_type, group_size) + marlin_24_supported = ( + quant_type in GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES + and group_size in GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES ) - - marlin_zp = torch.empty(0, dtype=torch.int, device=b.device) - - # GPTQ quant - (w_ref, q_w, s, g_idx, rand_perm) = gptq_quantize_weights( - b, quant_type, group_size, act_order + repack_supported = ( + quant_type in GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES + and group_size in MARLIN_SUPPORTED_GROUP_SIZES ) - q_w_gptq = gptq_pack(q_w, quant_type.size_bits, size_k, size_n) - - # For act_order, sort the "weights" and "g_idx" - # so that group ids are increasing - repack_sort_indices = torch.empty(0, dtype=torch.int, device=b.device) - if act_order: - (q_w, g_idx, repack_sort_indices) = sort_weights(q_w, g_idx) - - # Prepare - marlin_workspace = MarlinWorkspace( - size_n, GPTQ_MARLIN_MIN_THREAD_N, GPTQ_MARLIN_MAX_PARALLEL - ) - - marlin_24_workspace = MarlinWorkspace( - size_n, GPTQ_MARLIN_24_MIN_THREAD_N, GPTQ_MARLIN_24_MAX_PARALLEL - ) - marlin_zp = torch.zeros_like(marlin_s, dtype=torch.int) - - # AllSpark W8A16 quant - as_supported_case = ( + allspark_supported = ( quant_type in ALLSPARK_SUPPORTED_QUANT_TYPES and group_size == -1 and not act_order and is_k_full ) - if as_supported_case: - properties = torch.cuda.get_device_properties(b.device.index) - sm_count = properties.multi_processor_count - sm_version = properties.major * 10 + properties.minor - supported_arch = sm_version >= 80 and sm_version < 90 - as_supported_case = as_supported_case and supported_arch - if supported_arch: - has_zp = False - w_ref, qw, s, zp = quantize_weights(b, quant_type, group_size, has_zp) - qw = qw.to(torch.uint8) - - qw_reorder, s_reorder, zp_reorder = ops.allspark_repack_weight( - qw, s, zp, has_zp + def gen_marlin_params(): + # Marlin quant + marlin_g_idx = marlin_sort_indices = marlin_zp = marlin_s2 = None + if quant_type == scalar_types.float4_e2m1f: + if group_size != 16 or act_order: + return + marlin_w_ref, marlin_q_w, marlin_s, marlin_s2 = rand_marlin_weight_fp4_like( + b.T, group_size ) - CUBLAS_M_THRESHOLD = ALLSPARK_AMPERE_M_CUBLAS_THRESHOLD + elif quant_type == scalar_types.float8_e4m3fn: + if group_size not in [-1, 128] or act_order: + return + marlin_w_ref, marlin_q_w, marlin_s = marlin_quant_fp8_torch(b.T, group_size) + elif group_size == 16: + return + elif has_zp: + marlin_w_ref, marlin_q_w, marlin_s, marlin_zp = awq_marlin_quantize( + b, quant_type, group_size + ) + else: + marlin_w_ref, marlin_q_w, marlin_s, marlin_g_idx, marlin_sort_indices, _ = ( + marlin_quantize(b, quant_type, group_size, act_order) + ) + return ( + marlin_w_ref, + marlin_q_w, + marlin_s, + marlin_s2, + marlin_zp, + marlin_g_idx, + marlin_sort_indices, + ) + + def gen_marlin_24_params(): + marlin_24_w_ref = marlin_24_q_w_comp = marlin_24_meta = marlin_24_s = None + if marlin_24_supported: + (marlin_24_w_ref, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s) = ( + marlin_24_quantize(b, quant_type, group_size) + ) + return (marlin_24_w_ref, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s) + + def gen_repack_params(): + q_w_gptq = None + repack_sort_indices = None + if repack_supported: + (w_ref, q_w, s, g_idx, rand_perm) = gptq_quantize_weights( + b, quant_type, group_size, act_order + ) + q_w_gptq = gptq_pack(q_w, quant_type.size_bits, size_k, size_n) + + # For act_order, sort the "weights" and "g_idx" + # so that group ids are increasing + repack_sort_indices = torch.empty(0, dtype=torch.int, device=b.device) + if act_order: + (q_w, g_idx, repack_sort_indices) = sort_weights(q_w, g_idx) + return q_w_gptq, repack_sort_indices + + def gen_allspark_params(): + qw_reorder = s_reorder = zp_reorder = sm_count = sm_version = ( + CUBLAS_M_THRESHOLD + ) = None + nonlocal allspark_supported + if allspark_supported: + properties = torch.cuda.get_device_properties(b.device.index) + sm_count = properties.multi_processor_count + sm_version = properties.major * 10 + properties.minor + + supported_arch = sm_version >= 80 and sm_version < 90 + allspark_supported = allspark_supported and supported_arch + if supported_arch: + w_ref, qw, s, zp = quantize_weights(b, quant_type, group_size, has_zp) + qw = qw.to(torch.uint8) + + qw_reorder, s_reorder, zp_reorder = ops.allspark_repack_weight( + qw, s, zp, has_zp + ) + CUBLAS_M_THRESHOLD = ALLSPARK_AMPERE_M_CUBLAS_THRESHOLD + return ( + qw_reorder, + s_reorder, + zp_reorder, + sm_count, + sm_version, + CUBLAS_M_THRESHOLD, + ) + + ( + marlin_w_ref, + marlin_q_w, + marlin_s, + marlin_s2, + marlin_zp, + marlin_g_idx, + marlin_sort_indices, + ) = gen_marlin_params() + marlin_24_w_ref, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s = ( + gen_marlin_24_params() + ) + q_w_gptq, repack_sort_indices = gen_repack_params() + qw_reorder, s_reorder, zp_reorder, sm_count, sm_version, CUBLAS_M_THRESHOLD = ( + gen_allspark_params() + ) + + # Prepare + marlin_workspace = MarlinWorkspace( + size_n, GPTQ_MARLIN_MIN_THREAD_N, GPTQ_MARLIN_MAX_PARALLEL + ) + marlin_24_workspace = MarlinWorkspace( + size_n, GPTQ_MARLIN_24_MIN_THREAD_N, GPTQ_MARLIN_24_MAX_PARALLEL + ) globals = { # Gen params @@ -140,15 +212,14 @@ def bench_run( "size_n": size_n, "size_k": size_k, "a": a, - "a_tmp": a_tmp, # Marlin params "marlin_w_ref": marlin_w_ref, "marlin_q_w": marlin_q_w, "marlin_s": marlin_s, + "marlin_s2": marlin_s2, "marlin_zp": marlin_zp, "marlin_g_idx": marlin_g_idx, "marlin_sort_indices": marlin_sort_indices, - "marlin_rand_perm": marlin_rand_perm, "marlin_workspace": marlin_workspace, "is_k_full": is_k_full, # Marlin_24 params @@ -161,12 +232,12 @@ def bench_run( "q_w_gptq": q_w_gptq, "repack_sort_indices": repack_sort_indices, # AllSpark W8A16 params - "qw_reorder": qw_reorder if as_supported_case else None, - "s_reorder": s_reorder if as_supported_case else None, - "zp_reorder": zp_reorder if as_supported_case else None, - "sm_count": sm_count if as_supported_case else None, - "sm_version": sm_version if as_supported_case else None, - "CUBLAS_M_THRESHOLD": CUBLAS_M_THRESHOLD if as_supported_case else None, + "qw_reorder": qw_reorder, + "s_reorder": s_reorder, + "zp_reorder": zp_reorder, + "sm_count": sm_count, + "sm_version": sm_version, + "CUBLAS_M_THRESHOLD": CUBLAS_M_THRESHOLD, # Kernels "gptq_marlin_gemm": ops.gptq_marlin_gemm, "gptq_marlin_24_gemm": ops.gptq_marlin_24_gemm, @@ -177,7 +248,7 @@ def bench_run( min_run_time = 1 # Warmup pytorch - for i in range(5): + for _ in range(5): torch.matmul(a, marlin_w_ref) results.append( @@ -192,17 +263,17 @@ def bench_run( results.append( benchmark.Timer( - stmt="output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, False, False)", # noqa: E501 + stmt="output = gptq_marlin_gemm(a, None, marlin_q_w, marlin_s, marlin_s2, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, False, False)", # noqa: E501 globals=globals, label=label, sub_label=sub_label, - description="gptq_marlin_gemm_fp16", + description="gptq_marlin_gemm", ).blocked_autorange(min_run_time=min_run_time) ) results.append( benchmark.Timer( - stmt="output = gptq_marlin_gemm(a, marlin_q_w, marlin_s, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, True, False)", # noqa: E501 + stmt="output = gptq_marlin_gemm(a, None, marlin_q_w, marlin_s, marlin_s2, marlin_zp, marlin_g_idx, marlin_sort_indices, marlin_workspace.scratch, quant_type, size_m, size_n, size_k, is_k_full, False, True, False)", # noqa: E501 globals=globals, label=label, sub_label=sub_label, @@ -210,10 +281,7 @@ def bench_run( ).blocked_autorange(min_run_time=min_run_time) ) - if ( - quant_type in GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES - and group_size in GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES - ): + if marlin_24_supported: results.append( benchmark.Timer( stmt="output = gptq_marlin_24_gemm(a, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s, marlin_24_workspace.scratch, quant_type, size_m, size_n, size_k)", # noqa: E501 @@ -224,17 +292,18 @@ def bench_run( ).blocked_autorange(min_run_time=min_run_time) ) - results.append( - benchmark.Timer( - stmt="q_res = gptq_marlin_repack(q_w_gptq, repack_sort_indices, size_k, size_n, quant_type.size_bits)", # noqa: E501 - globals=globals, - label=label, - sub_label=sub_label, - description="gptq_marlin_repack", - ).blocked_autorange(min_run_time=min_run_time) - ) + if repack_supported: + results.append( + benchmark.Timer( + stmt="q_res = gptq_marlin_repack(q_w_gptq, repack_sort_indices, size_k, size_n, quant_type.size_bits)", # noqa: E501 + globals=globals, + label=label, + sub_label=sub_label, + description="gptq_marlin_repack", + ).blocked_autorange(min_run_time=min_run_time) + ) - if as_supported_case: + if allspark_supported: results.append( benchmark.Timer( stmt="output = allspark_w8a16_gemm(a, qw_reorder, s_reorder, zp_reorder, size_n, group_size, sm_count, sm_version, CUBLAS_M_THRESHOLD, False, True)", # noqa: E501 @@ -250,7 +319,6 @@ def main(args): print("Benchmarking models:") for i, model in enumerate(args.models): print(f"[{i}] {model}") - results: list[benchmark.Measurement] = [] for model in args.models: @@ -278,14 +346,17 @@ def main(args): ): continue - for quant_type in query_marlin_supported_quant_types(False): + for quant_type in query_marlin_supported_quant_types(): if ( len(args.limit_num_bits) > 0 and quant_type.size_bits not in args.limit_num_bits ): continue - for group_size in MARLIN_SUPPORTED_GROUP_SIZES: + for group_size in ( + MARLIN_SUPPORTED_GROUP_SIZES + + FP4_MARLIN_SUPPORTED_GROUP_SIZES + ): if ( len(args.limit_group_size) > 0 and group_size not in args.limit_group_size From 33d5e29be90834fbac91051f97f853696b2f2ec5 Mon Sep 17 00:00:00 2001 From: Chenyaaang <42742451+Chenyaaang@users.noreply.github.com> Date: Mon, 23 Jun 2025 16:04:28 -0700 Subject: [PATCH 100/211] [TPU] Fix tpu model runner test (#19995) Signed-off-by: Chenyaaang --- tests/v1/tpu/worker/test_tpu_model_runner.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/v1/tpu/worker/test_tpu_model_runner.py b/tests/v1/tpu/worker/test_tpu_model_runner.py index 0e7d305fef9ed..d22ddf5c7e581 100644 --- a/tests/v1/tpu/worker/test_tpu_model_runner.py +++ b/tests/v1/tpu/worker/test_tpu_model_runner.py @@ -6,6 +6,7 @@ import pytest from vllm.attention.layer import Attention from vllm.config import (CacheConfig, ModelConfig, SchedulerConfig, VllmConfig, set_current_vllm_config) +from vllm.pooling_params import PoolingParams from vllm.sampling_params import SamplingParams from vllm.utils import GiB_bytes from vllm.v1.core.kv_cache_utils import (estimate_max_model_len, @@ -71,6 +72,7 @@ def _schedule_new_request(*req_ids: str) -> SchedulerOutput: mm_hashes=[], mm_positions=[], sampling_params=SamplingParams(), + pooling_params=PoolingParams(), block_ids=([0], ), # block_ids should be tuple[list[int]] num_computed_tokens=0, lora_request=None, From a738dbb2a1238a5e4aafb92c629af3bce4213024 Mon Sep 17 00:00:00 2001 From: QiliangCui Date: Mon, 23 Jun 2025 17:18:10 -0700 Subject: [PATCH 101/211] Update test case parameter to have the throughput above 8.0 (#19994) Signed-off-by: Qiliang Cui --- .buildkite/scripts/tpu/config_v6e_1.env | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/scripts/tpu/config_v6e_1.env b/.buildkite/scripts/tpu/config_v6e_1.env index 4417586473474..03ec116f698d2 100644 --- a/.buildkite/scripts/tpu/config_v6e_1.env +++ b/.buildkite/scripts/tpu/config_v6e_1.env @@ -4,8 +4,8 @@ CONTAINER_NAME=vllm-tpu # vllm config MODEL=meta-llama/Llama-3.1-8B-Instruct -MAX_NUM_SEQS=512 -MAX_NUM_BATCHED_TOKENS=512 +MAX_NUM_SEQS=256 +MAX_NUM_BATCHED_TOKENS=1024 TENSOR_PARALLEL_SIZE=1 MAX_MODEL_LEN=2048 DOWNLOAD_DIR=/mnt/disks/persist From ee5ad8d2c5f7126c344319da15526248f7b515d7 Mon Sep 17 00:00:00 2001 From: Chenyaaang <42742451+Chenyaaang@users.noreply.github.com> Date: Mon, 23 Jun 2025 17:59:41 -0700 Subject: [PATCH 102/211] [Misc][Tools][Benchmark] Add profile to autotune script (#19711) Signed-off-by: Chenyaaang --- benchmarks/auto_tune.sh | 42 ++++++++++++++++++++++++++++++++++++----- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/benchmarks/auto_tune.sh b/benchmarks/auto_tune.sh index 1b01bbd61b628..b257b57ce06f5 100644 --- a/benchmarks/auto_tune.sh +++ b/benchmarks/auto_tune.sh @@ -10,6 +10,7 @@ # 3. Set variables (ALL REQUIRED) # BASE: your directory for vllm repo # MODEL: the model served by vllm +# SYSTEM: the hardware, choice TPU or GPU, for other systems, "get best profile" might not support. # TP: ways of tensor parallelism # DOWNLOAD_DIR: directory to download and load model weights. # INPUT_LEN: request input len @@ -34,6 +35,7 @@ TAG=$(date +"%Y_%m_%d_%H_%M") BASE="" MODEL="meta-llama/Llama-3.1-8B-Instruct" +SYSTEM="TPU" TP=1 DOWNLOAD_DIR="" INPUT_LEN=4000 @@ -45,12 +47,15 @@ NUM_BATCHED_TOKENS_LIST="512 1024 2048 4096" LOG_FOLDER="$BASE/auto-benchmark/$TAG" RESULT="$LOG_FOLDER/result.txt" +PROFILE_PATH="$LOG_FOLDER/profile" echo "result file: $RESULT" echo "model: $MODEL" rm -rf $LOG_FOLDER +rm -rf $PROFILE_PATH mkdir -p $LOG_FOLDER +mkdir -p $PROFILE_PATH cd "$BASE/vllm" @@ -70,10 +75,11 @@ start_server() { local max_num_seqs=$2 local max_num_batched_tokens=$3 local vllm_log=$4 + local profile_dir=$5 pkill -f vllm - VLLM_USE_V1=1 VLLM_SERVER_DEV_MODE=1 vllm serve $MODEL \ + VLLM_USE_V1=1 VLLM_SERVER_DEV_MODE=1 VLLM_TORCH_PROFILER_DIR=$profile_dir vllm serve $MODEL \ --disable-log-requests \ --port 8004 \ --gpu-memory-utilization $gpu_memory_utilization \ @@ -105,19 +111,37 @@ start_server() { fi } +update_best_profile() { + local profile_dir=$1 + local profile_index=$2 + sorted_paths=($(find "$profile_dir" -maxdepth 1 -not -path "$profile_dir" | sort)) + selected_profile_file= + if [[ "$SYSTEM" == "TPU" ]]; then + selected_profile_file="${sorted_paths[$profile_index]}/*.xplane.pb" + fi + if [[ "$SYSTEM" == "GPU" ]]; then + selected_profile_file="${sorted_paths[$profile_index]}" + fi + rm -f $PROFILE_PATH/* + cp $selected_profile_file $PROFILE_PATH +} + run_benchmark() { local max_num_seqs=$1 local max_num_batched_tokens=$2 local gpu_memory_utilization=$3 echo "max_num_seq: $max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens" local vllm_log="$LOG_FOLDER/vllm_log_${max_num_seqs}_${max_num_batched_tokens}.txt" + local profile_dir="$LOG_FOLDER/profile_${max_num_seqs}_${max_num_batched_tokens}" echo "vllm_log: $vllm_log" echo rm -f $vllm_log + mkdir -p $profile_dir pkill -f vllm + local profile_index=0 echo "starting server..." - start_server $gpu_memory_utilization $max_num_seqs $max_num_batched_tokens $vllm_log + start_server $gpu_memory_utilization $max_num_seqs $max_num_batched_tokens $vllm_log $profile_dir result=$? if [[ "$result" -eq 1 ]]; then echo "server failed to start. gpu_memory_utilization:$gpu_memory_utilization, max_num_seqs:$max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens" @@ -144,7 +168,8 @@ run_benchmark() { --goodput e2el:$MAX_LATENCY_ALLOWED_MS \ --num-prompts 1000 \ --random-prefix-len $prefix_len \ - --port 8004 &> "$bm_log" + --port 8004 \ + --profile &> "$bm_log" throughput=$(grep "Request throughput (req/s):" "$bm_log" | sed 's/[^0-9.]//g') e2el=$(grep "P99 E2EL (ms):" "$bm_log" | awk '{print $NF}') goodput=$(grep "Request goodput (req/s):" "$bm_log" | sed 's/[^0-9.]//g') @@ -158,6 +183,7 @@ run_benchmark() { # start from request-rate as int(throughput) + 1 request_rate=$((${throughput%.*} + 1)) while ((request_rate > 0)); do + profile_index=$((profile_index+1)) # clear prefix cache curl -X POST http://0.0.0.0:8004/reset_prefix_cache sleep 5 @@ -195,6 +221,12 @@ run_benchmark() { best_max_num_seqs=$max_num_seqs best_num_batched_tokens=$max_num_batched_tokens best_goodput=$goodput + if [[ "$SYSTEM" == "TPU" ]]; then + update_best_profile "$profile_dir/plugins/profile" $profile_index + fi + if [[ "$SYSTEM" == "GPU" ]]; then + update_best_profile "$profile_dir" $profile_index + fi fi else echo "max_num_seqs: $max_num_seqs, max_num_batched_tokens: $max_num_batched_tokens does not meet latency requirement ${MAX_LATENCY_ALLOWED_MS}" @@ -239,6 +271,6 @@ for num_seqs in "${num_seqs_list[@]}"; do done done echo "finish permutations" -echo "best_max_num_seqs: $best_max_num_seqs, best_num_batched_tokens: $best_num_batched_tokens, best_throughput: $best_throughput" -echo "best_max_num_seqs: $best_max_num_seqs, best_num_batched_tokens: $best_num_batched_tokens, best_throughput: $best_throughput" >> "$RESULT" +echo "best_max_num_seqs: $best_max_num_seqs, best_num_batched_tokens: $best_num_batched_tokens, best_throughput: $best_throughput, profile saved in: $PROFILE_PATH" +echo "best_max_num_seqs: $best_max_num_seqs, best_num_batched_tokens: $best_num_batched_tokens, best_throughput: $best_throughput, profile saved in: $PROFILE_PATH" >> "$RESULT" From 0eed516951e0c5b834d0e59b86fd6b555fa90f65 Mon Sep 17 00:00:00 2001 From: Kay Yan Date: Tue, 24 Jun 2025 12:04:11 +0800 Subject: [PATCH 103/211] [doc] Fix broken link in the installation for CPU (#19980) Signed-off-by: Kay Yan --- docs/getting_started/installation/cpu/arm.inc.md | 2 +- docs/getting_started/installation/cpu/x86.inc.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/getting_started/installation/cpu/arm.inc.md b/docs/getting_started/installation/cpu/arm.inc.md index 59b71dcaf911a..6c05900cf45c1 100644 --- a/docs/getting_started/installation/cpu/arm.inc.md +++ b/docs/getting_started/installation/cpu/arm.inc.md @@ -23,7 +23,7 @@ ARM CPU backend currently supports Float32, FP16 and BFloat16 datatypes. # --8<-- [end:pre-built-wheels] # --8<-- [start:build-wheel-from-source] ---8<-- "docs/getting_started/installation/cpu/cpu/build.inc.md" +--8<-- "docs/getting_started/installation/cpu/build.inc.md" Testing has been conducted on AWS Graviton3 instances for compatibility. diff --git a/docs/getting_started/installation/cpu/x86.inc.md b/docs/getting_started/installation/cpu/x86.inc.md index 9434eeea8b4a1..0412d4ccef00b 100644 --- a/docs/getting_started/installation/cpu/x86.inc.md +++ b/docs/getting_started/installation/cpu/x86.inc.md @@ -24,7 +24,7 @@ vLLM initially supports basic model inferencing and serving on x86 CPU platform, # --8<-- [end:pre-built-wheels] # --8<-- [start:build-wheel-from-source] ---8<-- "docs/getting_started/installation/cpu/cpu/build.inc.md" +--8<-- "docs/getting_started/installation/cpu/build.inc.md" !!! note - AVX512_BF16 is an extension ISA provides native BF16 data type conversion and vector product instructions, which brings some performance improvement compared with pure AVX512. The CPU backend build script will check the host CPU flags to determine whether to enable AVX512_BF16. From 3014c920dae5a2360b9b4141395522cc52b59193 Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Tue, 24 Jun 2025 13:57:46 +0800 Subject: [PATCH 104/211] add some examples for other benchmark scripts (#19893) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- benchmarks/README.md | 175 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 175 insertions(+) diff --git a/benchmarks/README.md b/benchmarks/README.md index 6f9fbb91cbd91..7f6c83b8e2fa9 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -387,3 +387,178 @@ python3 vllm/benchmarks/benchmark_throughput.py \ --enable-lora \ --lora-path yard1/llama-2-7b-sql-lora-test ``` + +--- +## Example - Structured Output Benchmark + +Benchmark the performance of structured output generation (JSON, grammar, regex). + +### Server Setup + +```bash +vllm serve NousResearch/Hermes-3-Llama-3.1-8B --disable-log-requests +``` + +### JSON Schema Benchmark + +```bash +python3 benchmarks/benchmark_serving_structured_output.py \ + --backend vllm \ + --model NousResearch/Hermes-3-Llama-3.1-8B \ + --dataset json \ + --structured-output-ratio 1.0 \ + --request-rate 10 \ + --num-prompts 1000 +``` + +### Grammar-based Generation Benchmark + +```bash +python3 benchmarks/benchmark_serving_structured_output.py \ + --backend vllm \ + --model NousResearch/Hermes-3-Llama-3.1-8B \ + --dataset grammar \ + --structure-type grammar \ + --request-rate 10 \ + --num-prompts 1000 +``` + +### Regex-based Generation Benchmark + +```bash +python3 benchmarks/benchmark_serving_structured_output.py \ + --backend vllm \ + --model NousResearch/Hermes-3-Llama-3.1-8B \ + --dataset regex \ + --request-rate 10 \ + --num-prompts 1000 +``` + +### Choice-based Generation Benchmark + +```bash +python3 benchmarks/benchmark_serving_structured_output.py \ + --backend vllm \ + --model NousResearch/Hermes-3-Llama-3.1-8B \ + --dataset choice \ + --request-rate 10 \ + --num-prompts 1000 +``` + +### XGrammar Benchmark Dataset + +```bash +python3 benchmarks/benchmark_serving_structured_output.py \ + --backend vllm \ + --model NousResearch/Hermes-3-Llama-3.1-8B \ + --dataset xgrammar_bench \ + --request-rate 10 \ + --num-prompts 1000 +``` + +--- +## Example - Long Document QA Throughput Benchmark + +Benchmark the performance of long document question-answering with prefix caching. + +### Basic Long Document QA Test + +```bash +python3 benchmarks/benchmark_long_document_qa_throughput.py \ + --model meta-llama/Llama-2-7b-chat-hf \ + --enable-prefix-caching \ + --num-documents 16 \ + --document-length 2000 \ + --output-len 50 \ + --repeat-count 5 +``` + +### Different Repeat Modes + +```bash +# Random mode (default) - shuffle prompts randomly +python3 benchmarks/benchmark_long_document_qa_throughput.py \ + --model meta-llama/Llama-2-7b-chat-hf \ + --enable-prefix-caching \ + --num-documents 8 \ + --document-length 3000 \ + --repeat-count 3 \ + --repeat-mode random + +# Tile mode - repeat entire prompt list in sequence +python3 benchmarks/benchmark_long_document_qa_throughput.py \ + --model meta-llama/Llama-2-7b-chat-hf \ + --enable-prefix-caching \ + --num-documents 8 \ + --document-length 3000 \ + --repeat-count 3 \ + --repeat-mode tile + +# Interleave mode - repeat each prompt consecutively +python3 benchmarks/benchmark_long_document_qa_throughput.py \ + --model meta-llama/Llama-2-7b-chat-hf \ + --enable-prefix-caching \ + --num-documents 8 \ + --document-length 3000 \ + --repeat-count 3 \ + --repeat-mode interleave +``` + +--- +## Example - Prefix Caching Benchmark + +Benchmark the efficiency of automatic prefix caching. + +### Fixed Prompt with Prefix Caching + +```bash +python3 benchmarks/benchmark_prefix_caching.py \ + --model meta-llama/Llama-2-7b-chat-hf \ + --enable-prefix-caching \ + --num-prompts 1 \ + --repeat-count 100 \ + --input-length-range 128:256 +``` + +### ShareGPT Dataset with Prefix Caching + +```bash +# download dataset +# wget https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered/resolve/main/ShareGPT_V3_unfiltered_cleaned_split.json + +python3 benchmarks/benchmark_prefix_caching.py \ + --model meta-llama/Llama-2-7b-chat-hf \ + --dataset-path /path/ShareGPT_V3_unfiltered_cleaned_split.json \ + --enable-prefix-caching \ + --num-prompts 20 \ + --repeat-count 5 \ + --input-length-range 128:256 +``` + +--- +## Example - Request Prioritization Benchmark + +Benchmark the performance of request prioritization in vLLM. + +### Basic Prioritization Test + +```bash +python3 benchmarks/benchmark_prioritization.py \ + --model meta-llama/Llama-2-7b-chat-hf \ + --input-len 128 \ + --output-len 64 \ + --num-prompts 100 \ + --scheduling-policy priority +``` + +### Multiple Sequences per Prompt + +```bash +python3 benchmarks/benchmark_prioritization.py \ + --model meta-llama/Llama-2-7b-chat-hf \ + --input-len 128 \ + --output-len 64 \ + --num-prompts 100 \ + --scheduling-policy priority \ + --n 2 +``` From 9a3b88328f7e434cac35b90ee463de6689f9a833 Mon Sep 17 00:00:00 2001 From: Vadim Gimpelson <156319763+vadiklyutiy@users.noreply.github.com> Date: Tue, 24 Jun 2025 10:01:26 +0400 Subject: [PATCH 105/211] [PERF] Speedup of MRoPE prepare inputs (#19939) Signed-off-by: Vadim Gimpelson --- vllm/model_executor/layers/rotary_embedding.py | 18 +++++++++--------- vllm/v1/worker/gpu_model_runner.py | 17 ++++++++--------- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/vllm/model_executor/layers/rotary_embedding.py b/vllm/model_executor/layers/rotary_embedding.py index 9de2338968a1c..b7bb2affc4fab 100644 --- a/vllm/model_executor/layers/rotary_embedding.py +++ b/vllm/model_executor/layers/rotary_embedding.py @@ -26,6 +26,7 @@ import math from typing import Any, Optional, Union +import numpy as np import torch import torch.nn as nn from transformers import PretrainedConfig @@ -1458,15 +1459,14 @@ class MRotaryEmbedding(RotaryEmbedding): ] @staticmethod - def get_next_input_positions_tensor( - mrope_position_delta: int, - context_len: int, - seq_len: int, - ) -> torch.Tensor: - return torch.arange( - mrope_position_delta + context_len, - mrope_position_delta + seq_len, - ).expand(3, -1) + def get_next_input_positions_tensor(out: np.ndarray, out_offset: int, + mrope_position_delta: int, + context_len: int, num_new_tokens: int): + + values = np.arange(mrope_position_delta + context_len, + mrope_position_delta + context_len + num_new_tokens, + dtype=out.dtype) + out[:, out_offset:out_offset + num_new_tokens] = values @classmethod def omni_get_updates_use_audio_in_video( diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 520d8fb186f4f..40639fdf24338 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -262,6 +262,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): dtype=torch.int64, device="cpu", pin_memory=self.pin_memory) + self.mrope_positions_np = self.mrope_positions_cpu.numpy() # Only relevant for models using ALiBi (e.g, MPT) self.use_alibi = check_use_alibi(model_config) @@ -889,15 +890,13 @@ class GPUModelRunner(LoRAModelRunnerMixin): dst_start = mrope_pos_ptr dst_end = mrope_pos_ptr + completion_part_len - self.mrope_positions_cpu[:, dst_start:dst_end] = \ - MRotaryEmbedding.get_next_input_positions_tensor( - req.mrope_position_delta, - context_len=num_computed_tokens + - prompt_part_len, - seq_len=num_computed_tokens + - prompt_part_len + - completion_part_len, - ) + MRotaryEmbedding.get_next_input_positions_tensor( + out=self.mrope_positions_np, + out_offset=dst_start, + mrope_position_delta=req.mrope_position_delta, + context_len=num_computed_tokens + prompt_part_len, + num_new_tokens=completion_part_len, + ) mrope_pos_ptr += completion_part_len From 53da4cd397821195e1b8bc481ada9f83a2007d05 Mon Sep 17 00:00:00 2001 From: "Li, Jiang" Date: Tue, 24 Jun 2025 21:20:04 +0800 Subject: [PATCH 106/211] [Bugfix][CPU] Fix InputBatch for pooling models in the CPU v1 (#20014) Signed-off-by: jiang1.li --- tests/models/language/pooling/test_reward.py | 2 +- vllm/v1/worker/cpu_model_runner.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/models/language/pooling/test_reward.py b/tests/models/language/pooling/test_reward.py index 085cdca9f1f31..ec3d25ee22a9e 100644 --- a/tests/models/language/pooling/test_reward.py +++ b/tests/models/language/pooling/test_reward.py @@ -101,4 +101,4 @@ def test_prm_models( hf_output = torch.tensor(hf_output) vllm_output = torch.tensor(vllm_output) - assert torch.allclose(hf_output, vllm_output, 1e-2) + assert torch.allclose(hf_output, vllm_output, 1.5e-2) diff --git a/vllm/v1/worker/cpu_model_runner.py b/vllm/v1/worker/cpu_model_runner.py index 6631c9636eacd..370de9f115990 100644 --- a/vllm/v1/worker/cpu_model_runner.py +++ b/vllm/v1/worker/cpu_model_runner.py @@ -7,6 +7,7 @@ import torch from vllm.config import VllmConfig from vllm.logger import init_logger from vllm.model_executor.model_loader import get_model +from vllm.model_executor.models.interfaces import has_step_pooler from vllm.v1.worker.gpu_model_runner import GPUModelRunner logger = init_logger(__name__) @@ -52,6 +53,9 @@ class CPUModelRunner(GPUModelRunner): logger.info("Starting to load model %s...", self.model_config.model) self.model = get_model(vllm_config=self.vllm_config) + if has_step_pooler(self.model): + self.input_batch.logits_processing_needs_token_ids = True + if self.lora_config: self.model = self.load_lora_model(self.model, self.model_config, self.scheduler_config, From 26d34eb67e68dabf2cd9828cbc0b442283dd0c53 Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Tue, 24 Jun 2025 22:03:20 +0800 Subject: [PATCH 107/211] refactor example - qwen3_reranker (#19847) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- examples/offline_inference/qwen3_reranker.py | 32 ++++++++++++++------ 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/examples/offline_inference/qwen3_reranker.py b/examples/offline_inference/qwen3_reranker.py index 27c4071bf094e..fe3cebc348f16 100644 --- a/examples/offline_inference/qwen3_reranker.py +++ b/examples/offline_inference/qwen3_reranker.py @@ -22,15 +22,19 @@ model_name = "Qwen/Qwen3-Reranker-0.6B" # If you want to load the official original version, the init parameters are # as follows. -model = LLM( - model=model_name, - task="score", - hf_overrides={ - "architectures": ["Qwen3ForSequenceClassification"], - "classifier_from_token": ["no", "yes"], - "is_original_qwen3_reranker": True, - }, -) + +def get_model() -> LLM: + """Initializes and returns the LLM model for Qwen3-Reranker.""" + return LLM( + model=model_name, + task="score", + hf_overrides={ + "architectures": ["Qwen3ForSequenceClassification"], + "classifier_from_token": ["no", "yes"], + "is_original_qwen3_reranker": True, + }, + ) + # Why do we need hf_overrides for the official original version: # vllm converts it to Qwen3ForSequenceClassification when loaded for @@ -51,7 +55,8 @@ suffix = "<|im_end|>\n<|im_start|>assistant\n\n\n\n\n" query_template = "{prefix}: {instruction}\n: {query}\n" document_template = ": {doc}{suffix}" -if __name__ == "__main__": + +def main() -> None: instruction = ( "Given a web search query, retrieve relevant passages that answer the query" ) @@ -72,6 +77,13 @@ if __name__ == "__main__": ] documents = [document_template.format(doc=doc, suffix=suffix) for doc in documents] + model = get_model() outputs = model.score(queries, documents) + print("-" * 30) print([output.outputs.score for output in outputs]) + print("-" * 30) + + +if __name__ == "__main__": + main() From 981eeca41aed8202074f36baa1e46eb9c2c82b16 Mon Sep 17 00:00:00 2001 From: amit Date: Tue, 24 Jun 2025 19:52:15 +0300 Subject: [PATCH 108/211] [Fix][V1] Remove --scheduling-policy oracle (#20010) Signed-off-by: amit --- tests/v1/test_oracle.py | 6 ------ vllm/engine/arg_utils.py | 5 ----- 2 files changed, 11 deletions(-) diff --git a/tests/v1/test_oracle.py b/tests/v1/test_oracle.py index 1787b9a0b4695..d640d7dc49d1d 100644 --- a/tests/v1/test_oracle.py +++ b/tests/v1/test_oracle.py @@ -74,12 +74,6 @@ def test_unsupported_configs(monkeypatch): disable_async_output_proc=True, ).create_engine_config() - with pytest.raises(NotImplementedError): - AsyncEngineArgs( - model=MODEL, - scheduling_policy="priority", - ).create_engine_config() - with pytest.raises(NotImplementedError): AsyncEngineArgs( model=MODEL, diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index dd09f514906df..9d1008b6b350a 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -1289,11 +1289,6 @@ class EngineArgs: recommend_to_remove=True) return False - if self.scheduling_policy != SchedulerConfig.policy: - _raise_or_fallback(feature_name="--scheduling-policy", - recommend_to_remove=False) - return False - if self.num_scheduler_steps != SchedulerConfig.num_scheduler_steps: _raise_or_fallback(feature_name="--num-scheduler-steps", recommend_to_remove=True) From a045b7e89a2424ec3b152ee57be2931eedb2abd2 Mon Sep 17 00:00:00 2001 From: Lucas Wilkinson Date: Tue, 24 Jun 2025 13:09:01 -0400 Subject: [PATCH 109/211] [Perf] Improve/Fix-regression for FA3 in High QPS regimes (#19463) Signed-off-by: Lucas Wilkinson --- cmake/external_projects/vllm_flash_attn.cmake | 2 +- test-qwen | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 160000 test-qwen diff --git a/cmake/external_projects/vllm_flash_attn.cmake b/cmake/external_projects/vllm_flash_attn.cmake index dba5baa362b83..ebaffe0821854 100644 --- a/cmake/external_projects/vllm_flash_attn.cmake +++ b/cmake/external_projects/vllm_flash_attn.cmake @@ -38,7 +38,7 @@ else() FetchContent_Declare( vllm-flash-attn GIT_REPOSITORY https://github.com/vllm-project/flash-attention.git - GIT_TAG 763ad155a1c826f71ff318f41edb1e4e5e376ddb + GIT_TAG 2c6bcfc0feb3d9d4a57b243fc159a68aa9933f5b GIT_PROGRESS TRUE # Don't share the vllm-flash-attn build between build types BINARY_DIR ${CMAKE_BINARY_DIR}/vllm-flash-attn diff --git a/test-qwen b/test-qwen new file mode 160000 index 0000000000000..34c31c0af8fc9 --- /dev/null +++ b/test-qwen @@ -0,0 +1 @@ +Subproject commit 34c31c0af8fc975140b8c85548fefa1eb7f523e4 From c635c5f7448f7ee5593c2ad7bb447b3aa1bc6d47 Mon Sep 17 00:00:00 2001 From: "d.transposed" Date: Tue, 24 Jun 2025 20:41:49 +0200 Subject: [PATCH 110/211] [Misc][Benchmarking] Add variable request-rate ("ramp-up") to the benchmarking client. (#19423) Signed-off-by: dtransposed Co-authored-by: dtransposed Co-authored-by: Roger Wang --- benchmarks/README.md | 15 +++ benchmarks/benchmark_serving.py | 178 +++++++++++++++++++++++++++++--- vllm/benchmarks/serve.py | 171 +++++++++++++++++++++++++++--- 3 files changed, 330 insertions(+), 34 deletions(-) diff --git a/benchmarks/README.md b/benchmarks/README.md index 7f6c83b8e2fa9..2714b8b49821c 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -269,6 +269,21 @@ python3 vllm/benchmarks/benchmark_serving.py \ --num-prompts 10 ``` +### Running With Ramp-Up Request Rate + +The benchmark tool also supports ramping up the request rate over the +duration of the benchmark run. This can be useful for stress testing the +server or finding the maximum throughput that it can handle, given some latency budget. + +Two ramp-up strategies are supported: +- `linear`: Increases the request rate linearly from a start value to an end value. +- `exponential`: Increases the request rate exponentially. + +The following arguments can be used to control the ramp-up: +- `--ramp-up-strategy`: The ramp-up strategy to use (`linear` or `exponential`). +- `--ramp-up-start-rps`: The request rate at the beginning of the benchmark. +- `--ramp-up-end-rps`: The request rate at the end of the benchmark. + --- ## Example - Offline Throughput Benchmark diff --git a/benchmarks/benchmark_serving.py b/benchmarks/benchmark_serving.py index f38e45b261138..886a51e1cbd9a 100644 --- a/benchmarks/benchmark_serving.py +++ b/benchmarks/benchmark_serving.py @@ -33,7 +33,7 @@ import warnings from collections.abc import AsyncGenerator, Iterable from dataclasses import dataclass from datetime import datetime -from typing import Any, Optional +from typing import Any, Literal, Optional import numpy as np from tqdm.asyncio import tqdm @@ -107,14 +107,42 @@ class BenchmarkMetrics: percentiles_e2el_ms: list[tuple[float, float]] +def _get_current_request_rate( + ramp_up_strategy: Optional[Literal["linear", "exponential"]], + ramp_up_start_rps: Optional[int], + ramp_up_end_rps: Optional[int], + request_index: int, + total_requests: int, + request_rate: float, +) -> float: + if ( + ramp_up_strategy + and ramp_up_start_rps is not None + and ramp_up_end_rps is not None + ): + progress = request_index / max(total_requests - 1, 1) + if ramp_up_strategy == "linear": + increase = (ramp_up_end_rps - ramp_up_start_rps) * progress + return ramp_up_start_rps + increase + elif ramp_up_strategy == "exponential": + ratio = ramp_up_end_rps / ramp_up_start_rps + return ramp_up_start_rps * (ratio**progress) + else: + raise ValueError(f"Unknown ramp-up strategy: {ramp_up_strategy}") + return request_rate + + async def get_request( input_requests: list[SampleRequest], request_rate: float, burstiness: float = 1.0, -) -> AsyncGenerator[SampleRequest, None]: + ramp_up_strategy: Optional[Literal["linear", "exponential"]] = None, + ramp_up_start_rps: Optional[int] = None, + ramp_up_end_rps: Optional[int] = None, +) -> AsyncGenerator[tuple[SampleRequest, float], None]: """ Asynchronously generates requests at a specified rate - with OPTIONAL burstiness. + with OPTIONAL burstiness and OPTIONAL ramp-up strategy. Args: input_requests: @@ -129,22 +157,44 @@ async def get_request( A lower burstiness value (0 < burstiness < 1) results in more bursty requests, while a higher burstiness value (burstiness > 1) results in a more uniform arrival of requests. + ramp_up_strategy (optional): + The ramp-up strategy. Can be "linear" or "exponential". + If None, uses constant request rate (specified by request_rate). + ramp_up_start_rps (optional): + The starting request rate for ramp-up. + ramp_up_end_rps (optional): + The ending request rate for ramp-up. """ - input_requests: Iterable[SampleRequest] = iter(input_requests) - - # Calculate scale parameter theta to maintain the desired request_rate. assert burstiness > 0, ( f"A positive burstiness factor is expected, but given {burstiness}." ) - theta = 1.0 / (request_rate * burstiness) + # Convert to list to get length for ramp-up calculations + if isinstance(input_requests, Iterable) and not isinstance(input_requests, list): + input_requests = list(input_requests) + + total_requests = len(input_requests) + request_index = 0 for request in input_requests: - yield request + current_request_rate = _get_current_request_rate( + ramp_up_strategy, + ramp_up_start_rps, + ramp_up_end_rps, + request_index, + total_requests, + request_rate, + ) - if request_rate == float("inf"): + yield request, current_request_rate + + request_index += 1 + + if current_request_rate == float("inf"): # If the request rate is infinity, then we don't need to wait. continue + theta = 1.0 / (current_request_rate * burstiness) + # Sample the request interval from the gamma distribution. # If burstiness is 1, it follows exponential distribution. interval = np.random.gamma(shape=burstiness, scale=theta) @@ -290,6 +340,9 @@ async def benchmark( max_concurrency: Optional[int], lora_modules: Optional[Iterable[str]], extra_body: Optional[dict], + ramp_up_strategy: Optional[Literal["linear", "exponential"]] = None, + ramp_up_start_rps: Optional[int] = None, + ramp_up_end_rps: Optional[int] = None, ): if backend in ASYNC_REQUEST_FUNCS: request_func = ASYNC_REQUEST_FUNCS[backend] @@ -353,7 +406,15 @@ async def benchmark( distribution = "Poisson process" if burstiness == 1.0 else "Gamma distribution" - print(f"Traffic request rate: {request_rate}") + if ramp_up_strategy is not None: + print( + f"Traffic ramp-up strategy: {ramp_up_strategy}. Will increase " + f"RPS from {ramp_up_start_rps} to {ramp_up_end_rps} RPS over " + "the duration of the benchmark." + ) + else: + print(f"Traffic request rate: {request_rate} RPS.") + print(f"Burstiness factor: {burstiness} ({distribution})") print(f"Maximum request concurrency: {max_concurrency}") @@ -373,7 +434,34 @@ async def benchmark( benchmark_start_time = time.perf_counter() tasks: list[asyncio.Task] = [] - async for request in get_request(input_requests, request_rate, burstiness): + + rps_change_events = [] + last_int_rps = -1 + if ramp_up_strategy is not None and ramp_up_start_rps is not None: + last_int_rps = ramp_up_start_rps + rps_change_events.append( + { + "rps": last_int_rps, + "timestamp": datetime.now().isoformat(), + } + ) + + async for request, current_request_rate in get_request( + input_requests, + request_rate, + burstiness, + ramp_up_strategy, + ramp_up_start_rps, + ramp_up_end_rps, + ): + if ramp_up_strategy is not None: + current_int_rps = int(current_request_rate) + if current_int_rps > last_int_rps: + timestamp = datetime.now().isoformat() + for rps_val in range(last_int_rps + 1, current_int_rps + 1): + rps_change_events.append({"rps": rps_val, "timestamp": timestamp}) + last_int_rps = current_int_rps + prompt, prompt_len, output_len, mm_content = ( request.prompt, request.prompt_len, @@ -397,11 +485,8 @@ async def benchmark( ignore_eos=ignore_eos, extra_body=extra_body, ) - tasks.append( - asyncio.create_task( - limited_request_func(request_func_input=request_func_input, pbar=pbar) - ) - ) + task = limited_request_func(request_func_input=request_func_input, pbar=pbar) + tasks.append(asyncio.create_task(task)) outputs: list[RequestFuncOutput] = await asyncio.gather(*tasks) if profile: @@ -477,6 +562,9 @@ async def benchmark( "errors": [output.error for output in outputs], } + if rps_change_events: + result["rps_change_events"] = rps_change_events + def process_one_metric( # E.g., "ttft" metric_attribute_name: str, @@ -610,6 +698,26 @@ def main(args: argparse.Namespace): tokenizer_id = args.tokenizer if args.tokenizer is not None else args.model tokenizer_mode = args.tokenizer_mode + # Validate ramp-up arguments + if args.ramp_up_strategy is not None: + if args.request_rate != float("inf"): + raise ValueError( + "When using ramp-up, do not specify --request-rate. " + "The request rate will be controlled by ramp-up parameters. " + "Please remove the --request-rate argument." + ) + if args.ramp_up_start_rps is None or args.ramp_up_end_rps is None: + raise ValueError( + "When using --ramp-up-strategy, both --ramp-up-start-rps and " + "--ramp-up-end-rps must be specified" + ) + if args.ramp_up_start_rps < 0 or args.ramp_up_end_rps < 0: + raise ValueError("Ramp-up start and end RPS must be non-negative") + if args.ramp_up_start_rps > args.ramp_up_end_rps: + raise ValueError("Ramp-up start RPS must be less than end RPS") + if args.ramp_up_strategy == "exponential" and args.ramp_up_start_rps == 0: + raise ValueError("For exponential ramp-up, the start RPS cannot be 0.") + if args.base_url is not None: api_url = f"{args.base_url}{args.endpoint}" base_url = f"{args.base_url}" @@ -802,6 +910,9 @@ def main(args: argparse.Namespace): max_concurrency=args.max_concurrency, lora_modules=args.lora_modules, extra_body=sampling_params, + ramp_up_strategy=args.ramp_up_strategy, + ramp_up_start_rps=args.ramp_up_start_rps, + ramp_up_end_rps=args.ramp_up_end_rps, ) ) @@ -834,6 +945,11 @@ def main(args: argparse.Namespace): result_json["burstiness"] = args.burstiness result_json["max_concurrency"] = args.max_concurrency + if args.ramp_up_strategy is not None: + result_json["ramp_up_strategy"] = args.ramp_up_strategy + result_json["ramp_up_start_rps"] = args.ramp_up_start_rps + result_json["ramp_up_end_rps"] = args.ramp_up_end_rps + # Merge with benchmark result result_json = {**result_json, **benchmark_result} @@ -859,7 +975,10 @@ def main(args: argparse.Namespace): if args.max_concurrency is not None else "" ) - file_name = f"{backend}-{args.request_rate}qps{max_concurrency_str}-{base_model_id}-{current_dt}.json" # noqa + if args.ramp_up_strategy is not None: + file_name = f"{backend}-ramp-up-{args.ramp_up_strategy}-{args.ramp_up_start_rps}qps-{args.ramp_up_end_rps}qps{max_concurrency_str}-{base_model_id}-{current_dt}.json" # noqa + else: + file_name = f"{backend}-{args.request_rate}qps{max_concurrency_str}-{base_model_id}-{current_dt}.json" # noqa if args.result_filename: file_name = args.result_filename if args.result_dir: @@ -1225,6 +1344,31 @@ def create_argument_parser(): "script chooses a LoRA module at random.", ) + parser.add_argument( + "--ramp-up-strategy", + type=str, + default=None, + choices=["linear", "exponential"], + help="The ramp-up strategy. This would be used to " + "ramp up the request rate from initial RPS to final " + "RPS rate (specified by --ramp-up-start-rps and --ramp-up-end-rps). " + "over the duration of the benchmark.", + ) + parser.add_argument( + "--ramp-up-start-rps", + type=int, + default=None, + help="The starting request rate for ramp-up (RPS). " + "Needs to be specified when --ramp-up-strategy is used.", + ) + parser.add_argument( + "--ramp-up-end-rps", + type=int, + default=None, + help="The ending request rate for ramp-up (RPS). " + "Needs to be specified when --ramp-up-strategy is used.", + ) + return parser diff --git a/vllm/benchmarks/serve.py b/vllm/benchmarks/serve.py index 4487d2d6841a1..302f655f424a3 100644 --- a/vllm/benchmarks/serve.py +++ b/vllm/benchmarks/serve.py @@ -26,7 +26,7 @@ import warnings from collections.abc import AsyncGenerator, Iterable from dataclasses import dataclass from datetime import datetime -from typing import Any, Optional +from typing import Any, Literal, Optional import numpy as np from tqdm.asyncio import tqdm @@ -75,14 +75,39 @@ class BenchmarkMetrics: percentiles_e2el_ms: list[tuple[float, float]] +def _get_current_request_rate( + ramp_up_strategy: Optional[Literal["linear", "exponential"]], + ramp_up_start_rps: Optional[int], + ramp_up_end_rps: Optional[int], + request_index: int, + total_requests: int, + request_rate: float, +) -> float: + if (ramp_up_strategy and ramp_up_start_rps is not None + and ramp_up_end_rps is not None): + progress = request_index / max(total_requests - 1, 1) + if ramp_up_strategy == "linear": + increase = (ramp_up_end_rps - ramp_up_start_rps) * progress + return ramp_up_start_rps + increase + elif ramp_up_strategy == "exponential": + ratio = ramp_up_end_rps / ramp_up_start_rps + return ramp_up_start_rps * (ratio**progress) + else: + raise ValueError(f"Unknown ramp-up strategy: {ramp_up_strategy}") + return request_rate + + async def get_request( input_requests: list[SampleRequest], request_rate: float, burstiness: float = 1.0, -) -> AsyncGenerator[SampleRequest, None]: + ramp_up_strategy: Optional[Literal["linear", "exponential"]] = None, + ramp_up_start_rps: Optional[int] = None, + ramp_up_end_rps: Optional[int] = None, +) -> AsyncGenerator[tuple[SampleRequest, float], None]: """ Asynchronously generates requests at a specified rate - with OPTIONAL burstiness. + with OPTIONAL burstiness and OPTIONAL ramp-up strategy. Args: input_requests: @@ -97,21 +122,42 @@ async def get_request( A lower burstiness value (0 < burstiness < 1) results in more bursty requests, while a higher burstiness value (burstiness > 1) results in a more uniform arrival of requests. + ramp_up_strategy (optional): + The ramp-up strategy. Can be "linear" or "exponential". + If None, uses constant request rate (specified by request_rate). + ramp_up_start_rps (optional): + The starting request rate for ramp-up. + ramp_up_end_rps (optional): + The ending request rate for ramp-up. """ - input_requests: Iterable[SampleRequest] = iter(input_requests) - - # Calculate scale parameter theta to maintain the desired request_rate. assert burstiness > 0, ( f"A positive burstiness factor is expected, but given {burstiness}.") - theta = 1.0 / (request_rate * burstiness) + # Convert to list to get length for ramp-up calculations + if isinstance(input_requests, Iterable) and not isinstance( + input_requests, list): + input_requests = list(input_requests) + + total_requests = len(input_requests) + request_index = 0 for request in input_requests: - yield request + current_request_rate = _get_current_request_rate(ramp_up_strategy, + ramp_up_start_rps, + ramp_up_end_rps, + request_index, + total_requests, + request_rate) - if request_rate == float("inf"): + yield request, current_request_rate + + request_index += 1 + + if current_request_rate == float("inf"): # If the request rate is infinity, then we don't need to wait. continue + theta = 1.0 / (current_request_rate * burstiness) + # Sample the request interval from the gamma distribution. # If burstiness is 1, it follows exponential distribution. interval = np.random.gamma(shape=burstiness, scale=theta) @@ -259,6 +305,9 @@ async def benchmark( max_concurrency: Optional[int], lora_modules: Optional[Iterable[str]], extra_body: Optional[dict], + ramp_up_strategy: Optional[Literal["linear", "exponential"]] = None, + ramp_up_start_rps: Optional[int] = None, + ramp_up_end_rps: Optional[int] = None, ): if endpoint_type in ASYNC_REQUEST_FUNCS: request_func = ASYNC_REQUEST_FUNCS[endpoint_type] @@ -316,12 +365,16 @@ async def benchmark( if profile_output.success: print("Profiler started") - if burstiness == 1.0: - distribution = "Poisson process" - else: - distribution = "Gamma distribution" + distribution = ("Poisson process" if burstiness == 1.0 + else "Gamma distribution") + + if ramp_up_strategy is not None: + print(f"Traffic ramp-up strategy: {ramp_up_strategy}.") + print(f"Will increase RPS from {ramp_up_start_rps} to " + f"{ramp_up_end_rps} RPS over the duration of the benchmark.") + else: + print(f"Traffic request rate: {request_rate}") - print(f"Traffic request rate: {request_rate}") print(f"Burstiness factor: {burstiness} ({distribution})") print(f"Maximum request concurrency: {max_concurrency}") @@ -344,7 +397,29 @@ async def benchmark( benchmark_start_time = time.perf_counter() tasks: list[asyncio.Task] = [] - async for request in get_request(input_requests, request_rate, burstiness): + + rps_change_events = [] + last_int_rps = -1 + if ramp_up_strategy is not None and ramp_up_start_rps is not None: + last_int_rps = ramp_up_start_rps + rps_change_events.append({ + "rps": last_int_rps, + "timestamp": datetime.now().isoformat(), + }) + + async for request, current_request_rate in get_request( + input_requests, request_rate, burstiness, ramp_up_strategy, + ramp_up_start_rps, ramp_up_end_rps): + if ramp_up_strategy is not None: + current_int_rps = int(current_request_rate) + if current_int_rps > last_int_rps: + timestamp = datetime.now().isoformat() + for rps_val in range(last_int_rps + 1, current_int_rps + 1): + rps_change_events.append({ + "rps": rps_val, + "timestamp": timestamp + }) + last_int_rps = current_int_rps prompt, prompt_len, output_len, mm_content = ( request.prompt, request.prompt_len, @@ -435,6 +510,9 @@ async def benchmark( "errors": [output.error for output in outputs], } + if rps_change_events: + result["rps_change_events"] = rps_change_events + def process_one_metric( # E.g., "ttft" metric_attribute_name: str, @@ -771,12 +849,60 @@ def add_cli_args(parser: argparse.ArgumentParser): "launching the server. For each request, the " "script chooses a LoRA module at random.") + parser.add_argument( + "--ramp-up-strategy", + type=str, + default=None, + choices=["linear", "exponential"], + help="The ramp-up strategy. This would be used to " + "ramp up the request rate from initial RPS to final " + "RPS rate (specified by --ramp-up-start-rps and " + "--ramp-up-end-rps.) over the duration of the benchmark." + ) + parser.add_argument( + "--ramp-up-start-rps", + type=int, + default=None, + help="The starting request rate for ramp-up (RPS). " + "Needs to be specified when --ramp-up-strategy is used.", + ) + parser.add_argument( + "--ramp-up-end-rps", + type=int, + default=None, + help="The ending request rate for ramp-up (RPS). " + "Needs to be specified when --ramp-up-strategy is used.", + ) + def main(args: argparse.Namespace): print(args) random.seed(args.seed) np.random.seed(args.seed) + # Validate ramp-up arguments + if args.ramp_up_strategy is not None: + if args.request_rate != float("inf"): + raise ValueError( + "When using ramp-up, do not specify --request-rate. " + "The request rate will be controlled by ramp-up parameters. " + "Please remove the --request-rate argument." + ) + if args.ramp_up_start_rps is None or args.ramp_up_end_rps is None: + raise ValueError( + "When using --ramp-up-strategy, both --ramp-up-start-rps and " + "--ramp-up-end-rps must be specified" + ) + if args.ramp_up_start_rps < 0 or args.ramp_up_end_rps < 0: + raise ValueError("Ramp-up start and end RPS must be non-negative") + if args.ramp_up_start_rps > args.ramp_up_end_rps: + raise ValueError("Ramp-up start RPS must be less than end RPS") + if (args.ramp_up_strategy == "exponential" + and args.ramp_up_start_rps == 0): + raise ValueError( + "For exponential ramp-up, the start RPS cannot be 0.") + + endpoint_type = args.endpoint_type label = args.label model_id = args.model model_name = args.served_model_name @@ -849,6 +975,9 @@ def main(args: argparse.Namespace): max_concurrency=args.max_concurrency, lora_modules=args.lora_modules, extra_body=sampling_params, + ramp_up_strategy=args.ramp_up_strategy, + ramp_up_start_rps=args.ramp_up_start_rps, + ramp_up_end_rps=args.ramp_up_end_rps, )) # Save config and results to json @@ -881,6 +1010,11 @@ def main(args: argparse.Namespace): result_json["burstiness"] = args.burstiness result_json["max_concurrency"] = args.max_concurrency + if args.ramp_up_strategy is not None: + result_json["ramp_up_strategy"] = args.ramp_up_strategy + result_json["ramp_up_start_rps"] = args.ramp_up_start_rps + result_json["ramp_up_end_rps"] = args.ramp_up_end_rps + # Merge with benchmark result result_json = {**result_json, **benchmark_result} @@ -903,8 +1037,11 @@ def main(args: argparse.Namespace): base_model_id = model_id.split("/")[-1] max_concurrency_str = (f"-concurrency{args.max_concurrency}" if args.max_concurrency is not None else "") - label = label or args.endpoint_type - file_name = f"{label}-{args.request_rate}qps{max_concurrency_str}-{base_model_id}-{current_dt}.json" #noqa + label = label or endpoint_type + if args.ramp_up_strategy is not None: + file_name = f"{label}-ramp-up-{args.ramp_up_strategy}-{args.ramp_up_start_rps}qps-{args.ramp_up_end_rps}qps{max_concurrency_str}-{base_model_id}-{current_dt}.json" # noqa + else: + file_name = f"{label}-{args.request_rate}qps{max_concurrency_str}-{base_model_id}-{current_dt}.json" # noqa if args.result_filename: file_name = args.result_filename if args.result_dir: From 8619e7158cc6d95534d3985257999323445c36e0 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Tue, 24 Jun 2025 12:45:20 -0700 Subject: [PATCH 111/211] [BugFix] Fix multi-node offline data parallel (#19937) Signed-off-by: Nick Hill --- .buildkite/test-pipeline.yaml | 3 +++ vllm/entrypoints/llm.py | 2 ++ vllm/v1/engine/core.py | 8 ++++++-- vllm/v1/engine/core_client.py | 20 +++++++++++++++++++- vllm/v1/engine/llm_engine.py | 2 +- 5 files changed, 31 insertions(+), 4 deletions(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index fe775bb370f29..d6c9ee680abf8 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -615,13 +615,16 @@ steps: - vllm/executor/ - vllm/model_executor/models/ - tests/distributed/ + - tests/examples/offline_inference/data_parallel.py commands: - # the following commands are for the first node, with ip 192.168.10.10 (ray environment already set up) - VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed' + - python3 ../examples/offline_inference/data_parallel.py --dp-size=2 --tp-size=1 --node-size=2 --node-rank=0 --master-addr=192.168.10.10 --master-port=12345 --enforce-eager --trust-remote-code - VLLM_MULTI_NODE=1 pytest -v -s distributed/test_multi_node_assignment.py - VLLM_MULTI_NODE=1 pytest -v -s distributed/test_pipeline_parallel.py - # the following commands are for the second node, with ip 192.168.10.11 (ray environment already set up) - VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed' + - python3 ../examples/offline_inference/data_parallel.py --dp-size=2 --tp-size=1 --node-size=2 --node-rank=1 --master-addr=192.168.10.10 --master-port=12345 --enforce-eager --trust-remote-code - label: Distributed Tests (2 GPUs) # 40min mirror_hardwares: [amdexperimental] diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 05e0be61adad9..63967e4d2d4bc 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -1568,6 +1568,8 @@ class LLM: pbar.update(n) else: pbar.update(1) + if pbar.n == num_requests: + pbar.refresh() if use_tqdm: pbar.close() diff --git a/vllm/v1/engine/core.py b/vllm/v1/engine/core.py index da65550354d09..453ed364dc81b 100644 --- a/vllm/v1/engine/core.py +++ b/vllm/v1/engine/core.py @@ -877,12 +877,16 @@ class DPEngineCoreProc(EngineCoreProc): local_unfinished_reqs) if not self.engines_running: - if self.dp_rank == 0: + if self.dp_rank == 0 or not self.has_coordinator: # Notify client that we are pausing the loop. logger.debug("Wave %d finished, pausing engine loop.", self.current_wave) + # In the coordinator case, dp rank 0 sends updates to the + # coordinator. Otherwise (offline spmd case), each rank + # sends the update to its colocated front-end process. + client_index = -1 if self.has_coordinator else 0 self.output_queue.put_nowait( - (-1, + (client_index, EngineCoreOutputs(wave_complete=self.current_wave))) self.current_wave += 1 diff --git a/vllm/v1/engine/core_client.py b/vllm/v1/engine/core_client.py index 8058cd3127df7..856310df58884 100644 --- a/vllm/v1/engine/core_client.py +++ b/vllm/v1/engine/core_client.py @@ -155,6 +155,11 @@ class EngineCoreClient(ABC): kwargs: Optional[dict[str, Any]] = None) -> list[_R]: raise NotImplementedError + def dp_engines_running(self) -> bool: + """Returns True id data parallel engines are collectively in a + running state.""" + raise NotImplementedError + async def get_output_async(self) -> EngineCoreOutputs: raise NotImplementedError @@ -282,6 +287,9 @@ class InprocClient(EngineCoreClient): kwargs: Optional[dict[str, Any]] = None) -> list[_R]: return self.engine_core.collective_rpc(method, timeout, args, kwargs) + def dp_engines_running(self) -> bool: + return False + @dataclass class BackgroundResources: @@ -384,6 +392,9 @@ class MPClient(EngineCoreClient): dp_size = parallel_config.data_parallel_size dp_rank = parallel_config.data_parallel_rank + # State used for data parallel. + self.engines_running = False + # SPMD mode is where there is an LLM instance per DP rank and # one core engine per LLM, see # examples/offline_inference/data_parallel.py. @@ -539,6 +550,9 @@ class MPClient(EngineCoreClient): while self.pending_messages and self.pending_messages[-1][0].done: self.pending_messages.pop() + def dp_engines_running(self) -> bool: + return self.engines_running + def _process_utility_output(output: UtilityOutput, utility_results: dict[int, AnyFuture]): @@ -562,6 +576,7 @@ class SyncMPClient(MPClient): log_stats=log_stats, ) + self.is_dp = self.vllm_config.parallel_config.data_parallel_size > 1 self.outputs_queue = queue.Queue[Union[EngineCoreOutputs, Exception]]() # Ensure that the outputs socket processing thread does not have @@ -623,6 +638,8 @@ class SyncMPClient(MPClient): outputs = self.outputs_queue.get() if isinstance(outputs, Exception): raise self._format_exception(outputs) from None + if outputs.wave_complete is not None: + self.engines_running = False return outputs def _send_input(self, request_type: EngineCoreRequestType, request: Any): @@ -650,6 +667,8 @@ class SyncMPClient(MPClient): return future.result() def add_request(self, request: EngineCoreRequest) -> None: + if self.is_dp: + self.engines_running = True self._send_input(EngineCoreRequestType.ADD, request) def abort_requests(self, request_ids: list[str]) -> None: @@ -911,7 +930,6 @@ class DPAsyncMPClient(AsyncMPClient): client_addresses: Optional[dict[str, str]] = None, client_index: int = 0): self.current_wave = 0 - self.engines_running = False # To route aborts to the correct engine. self.reqs_in_flight: dict[str, CoreEngine] = {} diff --git a/vllm/v1/engine/llm_engine.py b/vllm/v1/engine/llm_engine.py index 1932cd10bb1bc..25fab27131142 100644 --- a/vllm/v1/engine/llm_engine.py +++ b/vllm/v1/engine/llm_engine.py @@ -160,7 +160,7 @@ class LLMEngine: def has_unfinished_requests(self) -> bool: has_unfinished = self.output_processor.has_unfinished_requests() if self.dp_group is None: - return has_unfinished + return has_unfinished or self.engine_core.dp_engines_running() return self.has_unfinished_requests_dp(has_unfinished) def has_unfinished_requests_dp(self, has_unfinished: bool) -> bool: From 91f7d9d0b698a3488d62717246abb1d1e2354a68 Mon Sep 17 00:00:00 2001 From: lkchen Date: Tue, 24 Jun 2025 12:46:10 -0700 Subject: [PATCH 112/211] [P/D] Asynchronously do _nixl_handshake (#19836) Signed-off-by: Linkun Chen Signed-off-by: Nick Hill Co-authored-by: Nick Hill --- .../kv_connector/unit/test_nixl_connector.py | 216 +++++++++++++----- .../kv_connector/v1/nixl_connector.py | 134 ++++++++--- 2 files changed, 259 insertions(+), 91 deletions(-) diff --git a/tests/v1/kv_connector/unit/test_nixl_connector.py b/tests/v1/kv_connector/unit/test_nixl_connector.py index b00be7b83e12b..ab9729aae2e9f 100644 --- a/tests/v1/kv_connector/unit/test_nixl_connector.py +++ b/tests/v1/kv_connector/unit/test_nixl_connector.py @@ -7,13 +7,6 @@ from collections import defaultdict from typing import Optional from unittest.mock import patch -import pytest - -try: - from nixl._api import nixl_agent as NixlWrapper -except ImportError: - NixlWrapper = None - from vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector import ( KVConnectorRole, NixlAgentMetadata, NixlConnector, NixlConnectorMetadata, NixlConnectorWorker) @@ -92,7 +85,8 @@ def test_prompt_less_than_block_size(): class FakeNixlWrapper: """Mock implementation of NixlWrapper for testing. - We don't inherit from NixlWrapper because NixlWrapper could be None. + We don't inherit from nixl._api.nixl_agent because nixl may not be + installed. """ AGENT_METADATA = b"fake_agent_metadata" @@ -167,7 +161,7 @@ class FakeNixlConnectorWorker(NixlConnectorWorker): super().__init__(*args, **kwargs) self._hand_shake_latency = hand_shake_latency - def _nixl_handshake(self, host: str, port: int): + def _nixl_handshake(self, host: str, port: int) -> dict[int, str]: # Mimic slow _nixl_handshake, as well as bypass zmq communication. time.sleep(self._hand_shake_latency) # These should've been done in register_kv_caches(), called by @@ -177,7 +171,7 @@ class FakeNixlConnectorWorker(NixlConnectorWorker): self.num_blocks = 1 self.dst_num_blocks[self.engine_id] = self.num_blocks - self.add_remote_agent( + remote_agent_name = self.add_remote_agent( NixlAgentMetadata( engine_id=self.REMOTE_ENGINE_ID, agent_metadata=FakeNixlWrapper.AGENT_METADATA, @@ -187,40 +181,101 @@ class FakeNixlConnectorWorker(NixlConnectorWorker): block_len=self.block_len, attn_backend_name=self.backend_name, )) + return {0: remote_agent_name} -@pytest.mark.skipif(NixlWrapper is None, reason="nixl not installed") -@patch( - "vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper", - FakeNixlWrapper) -def test_multi_xfer_one_engine( - # dist_init is a fixture that initializes the distributed environment. - dist_init): - """Test case where multiple xfers are initiated to the same engine. - - This test triggers the connector to load remote KV for the same - `request_id`. The transfer is not done immediately due to - `set_cycles_before_xfer_done`, so there is a state where there are multiple - transfer states for the same `request_id`, and `get_finished` should handle - it correctly (wait for all transfers to be done). - """ - vllm_config = create_vllm_config() +class TestNixlHandshake: - request_id = "req_id" + @patch( + "vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper", + FakeNixlWrapper) + def test_multi_xfer_one_engine( + self, + # dist_init is a fixture that initializes the distributed environment. + dist_init): + """Test case where multiple xfers are initiated to the same engine. + + This test triggers the connector to load remote KV for the same + `request_id`. The transfer is not done immediately due to + `set_cycles_before_xfer_done`, so there is a state where there are + multiple transfer states for the same `request_id`, and `get_finished` + should handle it correctly (wait for all transfers to be done). + """ + vllm_config = create_vllm_config() - # Test worker role in decode server. - connector = NixlConnector(vllm_config, KVConnectorRole.WORKER) - connector.connector_worker = FakeNixlConnectorWorker(vllm_config, - connector.engine_id, - hand_shake_latency=0) - assert isinstance(connector.connector_worker.nixl_wrapper, FakeNixlWrapper) - connector.connector_worker.nixl_wrapper.set_cycles_before_xfer_done(3) - for i in range(4): + request_id = "req_id" + + # Test worker role in decode server. + connector = NixlConnector(vllm_config, KVConnectorRole.WORKER) + connector.connector_worker = FakeNixlConnectorWorker( + vllm_config, connector.engine_id, hand_shake_latency=0) + assert isinstance(connector.connector_worker.nixl_wrapper, + FakeNixlWrapper) + connector.connector_worker.nixl_wrapper.set_cycles_before_xfer_done(3) + num_xfers = 4 + while True: + # For the same request_id, initiate multiple xfers across different + # round of `execute_model` calls. + metadata = NixlConnectorMetadata() + if num_xfers > 0: + num_xfers -= 1 + metadata.add_new_req( + request_id=request_id, + local_block_ids=[ + num_xfers + 1, num_xfers + 2, num_xfers + 3 + ], + kv_transfer_params={ + "remote_block_ids": + [num_xfers + 4, num_xfers + 5, num_xfers + 6], + "remote_engine_id": + FakeNixlConnectorWorker.REMOTE_ENGINE_ID, + "remote_host": + "localhost", + "remote_port": + 1234, + }) + connector.bind_connector_metadata(metadata) + + # Mimic maybe_setup_kv_connector in gpu_model_runner. + dummy_ctx = ForwardContext( + no_compile_layers={}, + attn_metadata={}, + virtual_engine=0, + ) + _before_load = time.perf_counter() + connector.start_load_kv(dummy_ctx) + _after_load = time.perf_counter() + assert _after_load - _before_load < 0.1, "start_load_kv took " \ + f"{_after_load - _before_load} seconds" + + # Mimic get_finished_kv_transfers in gpu_model_runner. + _, done_recving = connector.get_finished(finished_req_ids=set()) + if len(done_recving) > 0: + assert request_id in done_recving + break + + connector.clear_connector_metadata() + + @patch( + "vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper", + FakeNixlWrapper) + def test_async_load_kv( + self, + # dist_init is a fixture that initializes the distributed environment. + dist_init): + """Test that NixlConnector's start_load_kv should be non-blocking.""" + + vllm_config = create_vllm_config() + + # Test worker role in decode server. + connector = NixlConnector(vllm_config, KVConnectorRole.WORKER) + connector.connector_worker = FakeNixlConnectorWorker( + vllm_config, connector.engine_id) metadata = NixlConnectorMetadata() - metadata.add_new_req(request_id=request_id, - local_block_ids=[i + 1, i + 2, i + 3], + metadata.add_new_req(request_id="id", + local_block_ids=[1, 2, 3], kv_transfer_params={ - "remote_block_ids": [i + 4, i + 5, i + 6], + "remote_block_ids": [4, 5, 6], "remote_engine_id": FakeNixlConnectorWorker.REMOTE_ENGINE_ID, "remote_host": "localhost", @@ -228,19 +283,74 @@ def test_multi_xfer_one_engine( }) connector.bind_connector_metadata(metadata) - dummy_ctx = ForwardContext( - no_compile_layers={}, - attn_metadata={}, - virtual_engine=0, - ) - _before_load = time.perf_counter() - connector.start_load_kv(dummy_ctx) - _after_load = time.perf_counter() - assert _after_load - _before_load < 0.1, "start_load_kv took " \ - f"{_after_load - _before_load} seconds" + timeout = 2.5 + start = time.perf_counter() + while time.perf_counter() - start < timeout: + dummy_ctx = ForwardContext( + no_compile_layers={}, + attn_metadata={}, + virtual_engine=0, + ) + _before_load = time.perf_counter() + connector.start_load_kv(dummy_ctx) + _after_load = time.perf_counter() + assert _after_load - _before_load < 0.1, "start_load_kv took " \ + f"{_after_load - _before_load} seconds" + time.sleep(0.5) # backoff for the async handshake to complete. + connector.bind_connector_metadata(NixlConnectorMetadata()) + _, done_recving = connector.get_finished(finished_req_ids=set()) + if len(done_recving) > 0: + return + raise TimeoutError("Took too long to complete async handshake.") - while True: - _, done_recving = connector.get_finished(finished_req_ids=set()) - if len(done_recving) > 0: - assert request_id in done_recving - break + @patch( + "vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper", + FakeNixlWrapper) + def test_concurrent_load_kv( + self, + # dist_init is a fixture that initializes the distributed environment. + dist_init): + """Test that multiple start_load_kv calls should occur concurrently.""" + + vllm_config = create_vllm_config() + + # Test worker role in decode server. + connector = NixlConnector(vllm_config, KVConnectorRole.WORKER) + connector.connector_worker = FakeNixlConnectorWorker( + vllm_config, connector.engine_id) + metadata = NixlConnectorMetadata() + total_reqs = 5 + for i in range(total_reqs): + metadata.add_new_req(request_id=f"id_{i}", + local_block_ids=[1, 2, 3], + kv_transfer_params={ + "remote_block_ids": [4, 5, 6], + "remote_engine_id": + FakeNixlConnectorWorker.REMOTE_ENGINE_ID, + "remote_host": "localhost", + "remote_port": 1234, + }) + connector.bind_connector_metadata(metadata) + + timeout = 2.5 * total_reqs + cnt_finished_reqs = 0 + start = time.perf_counter() + while time.perf_counter() - start < timeout: + dummy_ctx = ForwardContext( + no_compile_layers={}, + attn_metadata={}, + virtual_engine=0, + ) + _before_load = time.perf_counter() + connector.start_load_kv(dummy_ctx) + _after_load = time.perf_counter() + assert _after_load - _before_load < 0.1, "start_load_kv took " \ + f"{_after_load - _before_load} seconds" + time.sleep(0.5) # backoff for the async handshake to complete. + connector.bind_connector_metadata(NixlConnectorMetadata()) + _, done_recving = connector.get_finished(finished_req_ids=set()) + if len(done_recving) > 0: + cnt_finished_reqs += len(done_recving) + if cnt_finished_reqs == total_reqs: + return + raise TimeoutError("Took too long to complete async handshake.") diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index 65bdd7ae29d53..a962a9241d73e 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -2,11 +2,13 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import contextlib import math +import queue import threading import time import uuid from collections import defaultdict from collections.abc import Iterator +from concurrent.futures import Future, ThreadPoolExecutor from dataclasses import dataclass from typing import TYPE_CHECKING, Any, Optional @@ -23,6 +25,7 @@ from vllm.distributed.parallel_state import ( get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, get_tp_group) from vllm.distributed.utils import divide +from vllm.forward_context import ForwardContext from vllm.logger import init_logger from vllm.platforms import _Backend from vllm.utils import make_zmq_path, make_zmq_socket, round_down @@ -31,7 +34,6 @@ from vllm.v1.request import RequestStatus if TYPE_CHECKING: from vllm.attention.backends.abstract import AttentionMetadata - from vllm.forward_context import ForwardContext from vllm.v1.core.kv_cache_manager import KVCacheBlocks from vllm.v1.request import Request @@ -71,7 +73,7 @@ class ReqMeta: remote_block_ids: list[int] remote_host: str remote_port: int - remote_engine_id: str + remote_engine_id: EngineId class NixlConnectorMetadata(KVConnectorMetadata): @@ -81,7 +83,7 @@ class NixlConnectorMetadata(KVConnectorMetadata): def add_new_req( self, - request_id: str, + request_id: ReqId, local_block_ids: list[int], kv_transfer_params: dict[str, Any], ): @@ -102,7 +104,7 @@ class NixlConnector(KVConnectorBase_V1): self.engine_id: EngineId = vllm_config.kv_transfer_config.engine_id if role == KVConnectorRole.SCHEDULER: - self.connector_scheduler : Optional[NixlConnectorScheduler] = \ + self.connector_scheduler: Optional[NixlConnectorScheduler] = \ NixlConnectorScheduler(vllm_config, self.engine_id) self.connector_worker: Optional[NixlConnectorWorker] = None elif role == KVConnectorRole.WORKER: @@ -186,7 +188,7 @@ class NixlConnectorScheduler: self.side_channel_host = envs.VLLM_NIXL_SIDE_CHANNEL_HOST self.side_channel_port = ( envs.VLLM_NIXL_SIDE_CHANNEL_PORT + - vllm_config.parallel_config.data_parallel_rank_local * + vllm_config.parallel_config.data_parallel_rank * vllm_config.parallel_config.tensor_parallel_size) logger.info("Initializing NIXL Scheduler %s", engine_id) @@ -343,7 +345,7 @@ class NixlConnectorWorker: # Each TP rank listens/queries on the base_port + tp_rank. self.side_channel_port: int = ( envs.VLLM_NIXL_SIDE_CHANNEL_PORT + - vllm_config.parallel_config.data_parallel_rank_local * + vllm_config.parallel_config.data_parallel_rank * vllm_config.parallel_config.tensor_parallel_size) # Metadata. @@ -386,8 +388,17 @@ class NixlConnectorWorker: self._done_sending_count: defaultdict[ReqId, int] = defaultdict(lambda: 0) - # Background thread for establishing new connections. + # Background thread for handling new handshake requests. self._nixl_handshake_listener_t: Optional[threading.Thread] = None + # Background thread for initializing new NIXL handshakes. + self._handshake_initiation_executor = ThreadPoolExecutor( + # NIXL is not guaranteed to be thread-safe, limit 1 worker. + max_workers=1, + thread_name_prefix="vllm-nixl-handshake-initiator") + self._ready_requests = queue.Queue[tuple[ReqId, ReqMeta]]() + self._handshake_futures: dict[EngineId, Future[dict[int, str]]] = {} + # Protects _handshake_futures and _remote_agents. + self._handshake_lock = threading.RLock() self.vllm_config = vllm_config self.block_size = vllm_config.cache_config.block_size @@ -416,6 +427,12 @@ class NixlConnectorWorker: # finish reading before safely freeing the blocks. self.consumer_notification_counts_by_req = defaultdict[ReqId, int](int) + def __del__(self): + """Cleanup background threads on destruction.""" + self._handshake_initiation_executor.shutdown(wait=False) + if self._nixl_handshake_listener_t: + self._nixl_handshake_listener_t.join(timeout=0) + @staticmethod def _nixl_handshake_listener(metadata: NixlAgentMetadata, ready_event: threading.Event, base_port: int, @@ -443,7 +460,7 @@ class NixlConnectorWorker: "Connection listener got unexpected message %s", msg) sock.send_multipart((identity, b"", encoded_data)) - def _nixl_handshake(self, host: str, port: int): + def _nixl_handshake(self, host: str, port: int) -> dict[int, str]: """Do a NIXL handshake with a remote instance.""" start_time = time.perf_counter() @@ -452,7 +469,7 @@ class NixlConnectorWorker: # a hack to keep us moving. We will switch when moving to etcd # or where we have a single ZMQ socket in the scheduler. - def handshake(path: str, rank: int) -> NixlAgentMetadata: + def handshake(path: str, rank: int) -> tuple[NixlAgentMetadata, str]: # Send query for the request. with zmq_ctx(zmq.REQ, path) as sock: sock.send(GET_META_MSG) @@ -462,19 +479,20 @@ class NixlConnectorWorker: got_metadata_time = time.perf_counter() # Register Remote agent. - self.add_remote_agent(metadata, rank) + remote_agent_name = self.add_remote_agent(metadata, rank) setup_agent_time = time.perf_counter() logger.debug("NIXL handshake: get metadata took: %s", got_metadata_time - start_time) logger.debug("NIXL handshake: add agent took: %s", setup_agent_time - got_metadata_time) - return metadata + return metadata, remote_agent_name # Handshake with remote agent-rank0 first to get the tp_size of remote path = make_zmq_path("tcp", host, port) logger.debug("Querying master rank metadata on path: %s", path) - metadata = handshake(path, 0) + rank_to_agent_name: dict[int, str] = {} + metadata, rank_to_agent_name[0] = handshake(path, 0) # Handshake only with the other TP remote the current local rank will # pull from. With homogeneous TP it happens to be the same rank_i. @@ -484,7 +502,10 @@ class NixlConnectorWorker: path = make_zmq_path("tcp", host, port + p_remote_rank) logger.debug("Querying metadata on path: %s at remote rank %s", path, p_remote_rank) - _ = handshake(path, p_remote_rank) + _, rank_to_agent_name[p_remote_rank] = handshake( + path, p_remote_rank) + + return rank_to_agent_name def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): """Register the KV Cache data in nixl.""" @@ -621,11 +642,11 @@ class NixlConnectorWorker: daemon=True, name="nixl_handshake_listener") self._nixl_handshake_listener_t.start() - ready_event.wait() + ready_event.wait() # Wait for listener ZMQ socket to be ready. def add_remote_agent(self, nixl_agent_meta: NixlAgentMetadata, - remote_tp_rank: int = 0): + remote_tp_rank: int = 0) -> str: """ Add the remote NIXL agent and prepare the descriptors for reading cache blocks from remote. @@ -666,8 +687,8 @@ class NixlConnectorWorker: """ # noqa: E501 engine_id = nixl_agent_meta.engine_id # TODO re-evaluate refreshing for scaling/recovery - if remote_tp_rank in self._remote_agents.get(engine_id, ()): - return + if remote_tp_rank in self._remote_agents.get(engine_id, {}): + return self._remote_agents[engine_id][remote_tp_rank] if engine_id in self._tp_size: assert self._tp_size[engine_id] == nixl_agent_meta.tp_size @@ -677,9 +698,8 @@ class NixlConnectorWorker: # layout and close outputs. assert nixl_agent_meta.attn_backend_name == self.backend_name - self._remote_agents[engine_id][ - remote_tp_rank] = self.nixl_wrapper.add_remote_agent( - nixl_agent_meta.agent_metadata) + remote_agent_name = self.nixl_wrapper.add_remote_agent( + nixl_agent_meta.agent_metadata) # Number of D TP workers reading from a single P TP worker. This is # 1 when P and D `--tensor-parallel-size` match. @@ -708,8 +728,9 @@ class NixlConnectorWorker: "local_kv_heads*tp_ratio, block_size, head_dim] and same dtype." ) - assert self.block_size == remote_block_size, "Remote P worker with " \ - "different block size is not supported" + assert self.block_size == remote_block_size, ( + "Remote P worker with different block size is not supported " + f"{self.block_size=} {remote_block_size=}") # Create dst descs and xfer side handles. TP workers have same #blocks. if engine_id in self.dst_num_blocks: @@ -748,7 +769,9 @@ class NixlConnectorWorker: descs = self.nixl_wrapper.get_xfer_descs(blocks_data, "VRAM") self.dst_xfer_side_handles[ engine_id] = self.nixl_wrapper.prep_xfer_dlist( - self._remote_agents[engine_id][remote_tp_rank], descs) + remote_agent_name, descs) + + return remote_agent_name def get_finished(self) -> tuple[set[str], set[str]]: """ @@ -866,33 +889,68 @@ class NixlConnectorWorker: We check for these trnxs to complete in each step(). """ for req_id, meta in metadata.requests.items(): + remote_engine_id = meta.remote_engine_id logger.debug( "start_load_kv for request %s from remote engine %s. " "Num local_block_ids: %s. Num remote_block_ids: %s. ", req_id, - meta.remote_engine_id, len(meta.local_block_ids), + remote_engine_id, len(meta.local_block_ids), len(meta.remote_block_ids)) - self._read_blocks( - request_id=req_id, - dst_engine_id=meta.remote_engine_id, - local_block_ids=meta.local_block_ids, - remote_block_ids=meta.remote_block_ids, - remote_host=meta.remote_host, - remote_port=meta.remote_port, - ) + if remote_engine_id not in self._remote_agents: + # Being optimistic to assume engine is usually ready, apply + # lock only when the optimistic check fails. + with self._handshake_lock: + if remote_engine_id not in self._remote_agents: + fut = self._handshake_futures.get(remote_engine_id) + if fut is None: + fut = self._handshake_initiation_executor.submit( + self._nixl_handshake, meta.remote_host, + meta.remote_port) + self._handshake_futures[remote_engine_id] = fut + + def done_callback(f: Future[dict[int, str]], + eid=remote_engine_id): + with self._handshake_lock: + del self._handshake_futures[eid] + try: + self._remote_agents[eid] = f.result() + except Exception: + logger.exception( + "Handshake with %s failed", eid) + + fut.add_done_callback(done_callback) + + # TODO: handle failure state of future in the + # callback, we want to fail the request in this case. + def request_ready(_f: Future[Any], + entry=(req_id, meta)): + self._ready_requests.put(entry) + + fut.add_done_callback(request_ready) + continue + self._read_blocks_for_req(req_id, meta) + + # Start transfers for requests whose handshakes have now finished. + while not self._ready_requests.empty(): + self._read_blocks_for_req(*self._ready_requests.get_nowait()) + + def _read_blocks_for_req(self, req_id: str, meta: ReqMeta): + logger.debug( + "Remote agent %s available, calling _read_blocks for req %s", + meta.remote_engine_id, req_id) + self._read_blocks( + request_id=req_id, + dst_engine_id=meta.remote_engine_id, + local_block_ids=meta.local_block_ids, + remote_block_ids=meta.remote_block_ids, + ) def _read_blocks( self, local_block_ids: list[int], remote_block_ids: list[int], - remote_host: str, - remote_port: int, dst_engine_id: str, request_id: str, ): - # NOTE(rob): this takes ~2s. We need to get this off the hotpath. - if dst_engine_id not in self._remote_agents: - self._nixl_handshake(remote_host, remote_port) - # NOTE(rob): having the staging blocks be on the READER side is # not going to work well (since we will have to call rearrange tensors). # after we detect the txn is complete (which means we cannot make the From c6e3bba8e68ef586e346a0a8f5c3af168148fd63 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Tue, 24 Jun 2025 15:51:56 -0400 Subject: [PATCH 113/211] [Feature] Integrate new deepgemm (#19820) Signed-off-by: yewentao256 --- benchmarks/kernels/benchmark_moe.py | 3 + .../benchmark_fp8_block_dense_gemm.py | 312 ++++++++---------- tests/kernels/moe/test_deepep_deepgemm_moe.py | 23 +- tests/kernels/quantization/test_block_fp8.py | 55 +-- .../layers/fused_moe/batched_deep_gemm_moe.py | 19 +- .../layers/fused_moe/deep_gemm_moe.py | 9 +- .../layers/quantization/deepgemm.py | 2 +- .../layers/quantization/utils/fp8_utils.py | 71 +++- 8 files changed, 230 insertions(+), 264 deletions(-) diff --git a/benchmarks/kernels/benchmark_moe.py b/benchmarks/kernels/benchmark_moe.py index cef53b183cef3..99d8d3eee0df3 100644 --- a/benchmarks/kernels/benchmark_moe.py +++ b/benchmarks/kernels/benchmark_moe.py @@ -86,6 +86,9 @@ def benchmark_config( (num_experts, 2 * shard_intermediate_size), dtype=torch.float32 ) w2_scale = torch.randn((hidden_size, num_experts), dtype=torch.float32) + if use_deep_gemm: + # we use the default block shape for deepgemm + block_quant_shape = [128, 128] if use_fp8_w8a8: if block_quant_shape: block_n, block_k = block_quant_shape[0], block_quant_shape[1] diff --git a/benchmarks/kernels/deepgemm/benchmark_fp8_block_dense_gemm.py b/benchmarks/kernels/deepgemm/benchmark_fp8_block_dense_gemm.py index e67ce05453181..253d2984aa9df 100644 --- a/benchmarks/kernels/deepgemm/benchmark_fp8_block_dense_gemm.py +++ b/benchmarks/kernels/deepgemm/benchmark_fp8_block_dense_gemm.py @@ -1,13 +1,11 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -# fmt: off -# ruff: noqa: E501 import time -# Import DeepGEMM functions -import deep_gemm import torch -from deep_gemm import calc_diff, ceil_div, get_col_major_tma_aligned_tensor +from deep_gemm import fp8_gemm_nt +from deep_gemm.testing.numeric import calc_diff +from deep_gemm.utils.math import ceil_div, per_block_cast_to_fp8, per_token_cast_to_fp8 # Import vLLM functions from vllm import _custom_ops as ops @@ -18,107 +16,84 @@ from vllm.model_executor.layers.quantization.utils.fp8_utils import ( from vllm.triton_utils import triton -# Copied from -# https://github.com/deepseek-ai/DeepGEMM/blob/78cacf70d41d15d688bd493ebc85845f7f2a3d5d/tests/test_core.py#L9 -def per_token_cast_to_fp8( - x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: - """Convert tensor to FP8 format with per-token scaling.""" - assert x.dim() == 2 and x.size(1) % 128 == 0 - m, n = x.shape - x_view = x.view(m, -1, 128) - x_amax = x_view.abs().float().amax(dim=2).view(m, -1).clamp(1e-4) - return (x_view * (448.0 / x_amax.unsqueeze(2))).to( - torch.float8_e4m3fn).view(m, n), (x_amax / 448.0).view(m, -1) - - # Copied from # https://github.com/deepseek-ai/DeepGEMM/blob/78cacf70d41d15d688bd493ebc85845f7f2a3d5d/tests/test_core.py#L17 -def per_block_cast_to_fp8( - x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: +def per_block_cast_to_fp8_vllm(x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: """Convert tensor to FP8 format with per-block scaling.""" assert x.dim() == 2 m, n = x.shape - x_padded = torch.zeros((ceil_div(m, 128) * 128, ceil_div(n, 128) * 128), - dtype=x.dtype, - device=x.device) + x_padded = torch.zeros( + (ceil_div(m, 128) * 128, ceil_div(n, 128) * 128), dtype=x.dtype, device=x.device + ) x_padded[:m, :n] = x x_view = x_padded.view(-1, 128, x_padded.size(1) // 128, 128) x_amax = x_view.abs().float().amax(dim=(1, 3), keepdim=True).clamp(1e-4) x_scaled = (x_view * (448.0 / x_amax)).to(torch.float8_e4m3fn) - return x_scaled.view_as(x_padded)[:m, :n].contiguous(), ( - x_amax / 448.0).view(x_view.size(0), x_view.size(2)) + return x_scaled.view_as(x_padded)[:m, :n].contiguous(), (x_amax / 448.0).view( + x_view.size(0), x_view.size(2) + ) -def benchmark_shape(m: int, - n: int, - k: int, - warmup: int = 100, - repeat: int = 10000, - verbose: bool = False) -> dict: +def benchmark_shape( + m: int, + n: int, + k: int, + warmup: int = 100, + repeat: int = 10000, + verbose: bool = False, +) -> dict: """Benchmark all implementations for a specific (m, n, k) shape.""" if verbose: print(f"\n=== Benchmarking shape: m={m}, n={n}, k={k} ===") - # Create test tensors - A = torch.randn((m, k), device='cuda', dtype=torch.bfloat16) - B = torch.randn((n, k), device='cuda', dtype=torch.bfloat16) - - # Reference result in BF16 + A = torch.randn((m, k), device="cuda", dtype=torch.bfloat16) + B = torch.randn((n, k), device="cuda", dtype=torch.bfloat16) torch.cuda.synchronize() C_ref = A @ B.t() # Pre-quantize B for all implementations # (weights can be pre-quantized offline) B_deepgemm, B_scale_deepgemm = per_block_cast_to_fp8(B) - B_vllm, B_scale_vllm = per_block_cast_to_fp8(B) + B_vllm, B_scale_vllm = per_block_cast_to_fp8_vllm(B) # Block size configuration block_size = [128, 128] # Pre-quantize A for all implementations A_deepgemm, A_scale_deepgemm = per_token_cast_to_fp8(A) - A_scale_deepgemm = get_col_major_tma_aligned_tensor(A_scale_deepgemm) - C_deepgemm = torch.empty((m, n), device='cuda', dtype=torch.bfloat16) + C_deepgemm = ( + torch.empty((n, m), device="cuda", dtype=torch.bfloat16).t().contiguous() + ) A_vllm, A_scale_vllm = per_token_group_quant_fp8(A, block_size[1]) A_vllm_cutlass, A_scale_vllm_cutlass = per_token_group_quant_fp8( - A, block_size[1], column_major_scales=True) + A, block_size[1], column_major_scales=True + ) - # === DeepGEMM Implementation === def deepgemm_gemm(): - # A quantization is inside the loop as it depends on activations - # A_deepgemm, A_scale_deepgemm = per_token_cast_to_fp8(A) - # A_deepgemm, A_scale_deepgemm = per_token_group_quant_fp8( - # A, block_size[1]) - # A_scale_aligned = get_col_major_tma_aligned_tensor(A_scale_deepgemm) - # C_deepgemm = torch.empty((m, n), device='cuda', dtype=torch.bfloat16) - deep_gemm.gemm_fp8_fp8_bf16_nt((A_deepgemm, A_scale_deepgemm), - (B_deepgemm, B_scale_deepgemm), - C_deepgemm) + fp8_gemm_nt( + (A_deepgemm, A_scale_deepgemm), (B_deepgemm, B_scale_deepgemm), C_deepgemm + ) return C_deepgemm - # === vLLM Triton Implementation === def vllm_triton_gemm(): - # A quantization is inside the loop as it depends on activations - # A_vllm, A_scale_vllm = per_token_group_quant_fp8(A, block_size[1]) - return w8a8_block_fp8_matmul(A_vllm, - B_vllm, - A_scale_vllm, - B_scale_vllm, - block_size, - output_dtype=torch.bfloat16) + return w8a8_block_fp8_matmul( + A_vllm, + B_vllm, + A_scale_vllm, + B_scale_vllm, + block_size, + output_dtype=torch.bfloat16, + ) - # === vLLM CUTLASS Implementation === def vllm_cutlass_gemm(): - # A quantization is inside the loop as it depends on activations - # A_vllm_cutlass, A_scale_vllm_cutlass = per_token_group_quant_fp8( - # A, block_size[1], column_major_scales=True) - return ops.cutlass_scaled_mm(A_vllm_cutlass, - B_vllm.T, - scale_a=A_scale_vllm_cutlass, - scale_b=B_scale_vllm.T, - out_dtype=torch.bfloat16) + return ops.cutlass_scaled_mm( + A_vllm_cutlass, + B_vllm.T, + scale_a=A_scale_vllm_cutlass, + scale_b=B_scale_vllm.T, + out_dtype=torch.bfloat16, + ) - # Run correctness check first if verbose: print("Running correctness check...") C_deepgemm = deepgemm_gemm() @@ -133,26 +108,22 @@ def benchmark_shape(m: int, print(f"DeepGEMM vs Reference difference: {deepgemm_diff:.6f}") print(f"vLLM Triton vs Reference difference: {vllm_triton_diff:.6f}") print(f"vLLM CUTLASS vs Reference difference: {vllm_cutlass_diff:.6f}") - print("vLLM Triton vs DeepGEMM difference: " - f"{calc_diff(C_vllm_triton, C_deepgemm):.6f}") - print("vLLM CUTLASS vs DeepGEMM difference: " - f"{calc_diff(C_vllm_cutlass, C_deepgemm):.6f}") + print( + "vLLM Triton vs DeepGEMM difference: " + f"{calc_diff(C_vllm_triton, C_deepgemm):.6f}" + ) + print( + "vLLM CUTLASS vs DeepGEMM difference: " + f"{calc_diff(C_vllm_cutlass, C_deepgemm):.6f}" + ) - # Benchmark implementations implementations = { "DeepGEMM": deepgemm_gemm, "vLLM Triton": vllm_triton_gemm, - "vLLM CUTLASS": vllm_cutlass_gemm + "vLLM CUTLASS": vllm_cutlass_gemm, } - benchmark_results = { - "shape": { - "m": m, - "n": n, - "k": k - }, - "implementations": {} - } + benchmark_results = {"shape": {"m": m, "n": n, "k": k}, "implementations": {}} for name, func in implementations.items(): # Warmup @@ -180,38 +151,36 @@ def benchmark_shape(m: int, "tflops": tflops, "gb_s": gb_s, "diff": { - "DeepGEMM": - 0.0 if name == "DeepGEMM" else calc_diff(func(), C_deepgemm), - "Reference": - deepgemm_diff if name == "DeepGEMM" else - (vllm_triton_diff - if name == "vLLM Triton" else vllm_cutlass_diff) - } + "DeepGEMM": 0.0 + if name == "DeepGEMM" + else calc_diff(func(), C_deepgemm), + "Reference": deepgemm_diff + if name == "DeepGEMM" + else (vllm_triton_diff if name == "vLLM Triton" else vllm_cutlass_diff), + }, } if verbose: - print( - f"{name}: {avg_time_ms:.3f} ms, {tflops:.2f} TFLOPS, {gb_s:.2f} GB/s" - ) + print(f"{name}: {avg_time_ms:.3f} ms, {tflops:.2f} TFLOPS, {gb_s:.2f} GB/s") # Calculate speedups baseline = benchmark_results["implementations"]["DeepGEMM"]["time_ms"] for name, data in benchmark_results["implementations"].items(): if name != "DeepGEMM": speedup = baseline / data["time_ms"] - benchmark_results["implementations"][name][ - "speedup_vs_deepgemm"] = speedup + benchmark_results["implementations"][name]["speedup_vs_deepgemm"] = speedup if verbose: - print(f"DeepGEMM is {1/speedup:.2f}x " - f"{'faster' if 1/speedup > 1 else 'slower'} than {name}") + print( + f"DeepGEMM is {1 / speedup:.2f}x " + f"{'faster' if 1 / speedup > 1 else 'slower'} than {name}" + ) - vllm_triton_time = benchmark_results["implementations"]["vLLM Triton"][ - "time_ms"] - vllm_cutlass_time = benchmark_results["implementations"]["vLLM CUTLASS"][ - "time_ms"] + vllm_triton_time = benchmark_results["implementations"]["vLLM Triton"]["time_ms"] + vllm_cutlass_time = benchmark_results["implementations"]["vLLM CUTLASS"]["time_ms"] cutlass_vs_triton = vllm_triton_time / vllm_cutlass_time - benchmark_results["implementations"]["vLLM CUTLASS"][ - "speedup_vs_triton"] = cutlass_vs_triton + benchmark_results["implementations"]["vLLM CUTLASS"]["speedup_vs_triton"] = ( + cutlass_vs_triton + ) if verbose: print( f"vLLM CUTLASS is {cutlass_vs_triton:.2f}x " @@ -223,8 +192,7 @@ def benchmark_shape(m: int, def format_table_row(values, widths): """Format a row with specified column widths.""" - return "| " + " | ".join(f"{val:{w}}" - for val, w in zip(values, widths)) + " |" + return "| " + " | ".join(f"{val:{w}}" for val, w in zip(values, widths)) + " |" def print_table(headers, rows, title=None): @@ -232,16 +200,12 @@ def print_table(headers, rows, title=None): if title: print(f"\n{title}") - # Calculate column widths based on headers and data widths = [ max(len(str(h)), max(len(str(row[i])) for row in rows)) for i, h in enumerate(headers) ] - # Create separator line separator = "+-" + "-+-".join("-" * w for w in widths) + "-+" - - # Print table print(separator) print(format_table_row(headers, widths)) print(separator) @@ -259,44 +223,22 @@ def run_benchmarks(verbose: bool = False): """Run benchmarks for a set of common shapes.""" print("===== STARTING FP8 GEMM BENCHMARK =====") - # Make sure we're using the GPU if not torch.cuda.is_available(): print("CUDA not available! Tests require GPU.") return - # Print system information print(f"PyTorch version: {torch.__version__}") print(f"CUDA version: {torch.version.cuda}") print(f"Triton version: {triton.__version__}") print(f"Using device: {torch.cuda.get_device_name()}") - # Enable TF32 for better performance torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True - # Set seeds for reproducibility torch.manual_seed(42) torch.cuda.manual_seed(42) # Define benchmark shapes (m, n, k) - shapes = [ - (8, 4096, 7168), - (8, 7168, 18432), - (8, 18432, 7168), - (64, 4096, 7168), - (64, 7168, 18432), - (64, 18432, 7168), - (64, 24576, 1536), - (64, 32768, 512), - (64, 7168, 16384), - (128, 4096, 7168), - (128, 7168, 18432), - (128, 18432, 7168), - (1024, 4096, 7168), - (1024, 18432, 7168), - (2048, 4096, 7168), - (4096, 4096, 7168), - ] shapes = [ # (64, 2112, 7168), (64, 24576, 1536), @@ -323,7 +265,6 @@ def run_benchmarks(verbose: bool = False): result = benchmark_shape(m, n, k, verbose=verbose) all_results.append(result) - # Print results in a nicely formatted table print("\n===== PERFORMANCE COMPARISON =====") # Print DeepGEMM table @@ -332,38 +273,50 @@ def run_benchmarks(verbose: bool = False): for result in all_results: shape = result["shape"] impl_data = result["implementations"]["DeepGEMM"] - deepgemm_rows.append([ - shape["m"], shape["n"], shape["k"], f"{impl_data['time_us']:.1f}", - f"{impl_data['tflops']:.1f}", f"{impl_data['gb_s']:.1f}" - ]) + deepgemm_rows.append( + [ + shape["m"], + shape["n"], + shape["k"], + f"{impl_data['time_us']:.1f}", + f"{impl_data['tflops']:.1f}", + f"{impl_data['gb_s']:.1f}", + ] + ) - print_table(deepgemm_headers, - deepgemm_rows, - title="DeepGEMM Implementation:") + print_table(deepgemm_headers, deepgemm_rows, title="DeepGEMM Implementation:") # Print vLLM Triton table - triton_headers = [ - "m", "n", "k", "Time (μs)", "TFLOPS", "GB/s", "vs DeepGEMM" - ] + triton_headers = ["m", "n", "k", "Time (μs)", "TFLOPS", "GB/s", "vs DeepGEMM"] triton_rows = [] for result in all_results: shape = result["shape"] impl_data = result["implementations"]["vLLM Triton"] speedup = impl_data.get("speedup_vs_deepgemm", 1.0) - triton_rows.append([ - shape["m"], shape["n"], shape["k"], f"{impl_data['time_us']:.1f}", - f"{impl_data['tflops']:.1f}", f"{impl_data['gb_s']:.1f}", - format_speedup(speedup) - ]) + triton_rows.append( + [ + shape["m"], + shape["n"], + shape["k"], + f"{impl_data['time_us']:.1f}", + f"{impl_data['tflops']:.1f}", + f"{impl_data['gb_s']:.1f}", + format_speedup(speedup), + ] + ) - print_table(triton_headers, - triton_rows, - title="vLLM Triton Implementation:") + print_table(triton_headers, triton_rows, title="vLLM Triton Implementation:") # Print vLLM CUTLASS table cutlass_headers = [ - "m", "n", "k", "Time (μs)", "TFLOPS", "GB/s", "vs DeepGEMM", - "vs Triton" + "m", + "n", + "k", + "Time (μs)", + "TFLOPS", + "GB/s", + "vs DeepGEMM", + "vs Triton", ] cutlass_rows = [] for result in all_results: @@ -371,28 +324,27 @@ def run_benchmarks(verbose: bool = False): impl_data = result["implementations"]["vLLM CUTLASS"] vs_deepgemm = impl_data.get("speedup_vs_deepgemm", 1.0) vs_triton = impl_data.get("speedup_vs_triton", 1.0) - cutlass_rows.append([ - shape["m"], shape["n"], shape["k"], f"{impl_data['time_us']:.1f}", - f"{impl_data['tflops']:.1f}", f"{impl_data['gb_s']:.1f}", - format_speedup(vs_deepgemm), - format_speedup(vs_triton) - ]) + cutlass_rows.append( + [ + shape["m"], + shape["n"], + shape["k"], + f"{impl_data['time_us']:.1f}", + f"{impl_data['tflops']:.1f}", + f"{impl_data['gb_s']:.1f}", + format_speedup(vs_deepgemm), + format_speedup(vs_triton), + ] + ) - print_table(cutlass_headers, - cutlass_rows, - title="vLLM CUTLASS Implementation:") + print_table(cutlass_headers, cutlass_rows, title="vLLM CUTLASS Implementation:") # Calculate and print averages print("\n===== AVERAGE PERFORMANCE =====") implementations = ["DeepGEMM", "vLLM Triton", "vLLM CUTLASS"] avg_metrics = { - impl: { - "tflops": 0, - "gb_s": 0, - "time_ms": 0 - } - for impl in implementations + impl: {"tflops": 0, "gb_s": 0, "time_ms": 0} for impl in implementations } for result in all_results: @@ -410,9 +362,9 @@ def run_benchmarks(verbose: bool = False): avg_tflops = avg_metrics[impl]["tflops"] / num_shapes avg_mem_bw = avg_metrics[impl]["gb_s"] / num_shapes avg_time = avg_metrics[impl]["time_ms"] / num_shapes - avg_rows.append([ - impl, f"{avg_tflops:.2f}", f"{avg_mem_bw:.2f}", f"{avg_time:.2f}" - ]) + avg_rows.append( + [impl, f"{avg_tflops:.2f}", f"{avg_mem_bw:.2f}", f"{avg_time:.2f}"] + ) print_table(avg_headers, avg_rows) @@ -420,21 +372,19 @@ def run_benchmarks(verbose: bool = False): avg_speedups = { "DeepGEMM vs vLLM Triton": 0, "DeepGEMM vs vLLM CUTLASS": 0, - "vLLM CUTLASS vs vLLM Triton": 0 + "vLLM CUTLASS vs vLLM Triton": 0, } for result in all_results: deepgemm_time = result["implementations"]["DeepGEMM"]["time_ms"] vllm_triton_time = result["implementations"]["vLLM Triton"]["time_ms"] - vllm_cutlass_time = result["implementations"]["vLLM CUTLASS"][ - "time_ms"] + vllm_cutlass_time = result["implementations"]["vLLM CUTLASS"]["time_ms"] - avg_speedups[ - "DeepGEMM vs vLLM Triton"] += vllm_triton_time / deepgemm_time - avg_speedups[ - "DeepGEMM vs vLLM CUTLASS"] += vllm_cutlass_time / deepgemm_time - avg_speedups[ - "vLLM CUTLASS vs vLLM Triton"] += vllm_triton_time / vllm_cutlass_time + avg_speedups["DeepGEMM vs vLLM Triton"] += vllm_triton_time / deepgemm_time + avg_speedups["DeepGEMM vs vLLM CUTLASS"] += vllm_cutlass_time / deepgemm_time + avg_speedups["vLLM CUTLASS vs vLLM Triton"] += ( + vllm_triton_time / vllm_cutlass_time + ) print("\n===== AVERAGE SPEEDUPS =====") speedup_headers = ["Comparison", "Speedup"] @@ -446,14 +396,12 @@ def run_benchmarks(verbose: bool = False): print_table(speedup_headers, speedup_rows) - # Average accuracy comparison print("\n===== ACCURACY COMPARISON =====") avg_diff = {impl: 0 for impl in implementations} for result in all_results: for impl in implementations: - avg_diff[impl] += result["implementations"][impl]["diff"][ - "Reference"] + avg_diff[impl] += result["implementations"][impl]["diff"]["Reference"] diff_headers = ["Implementation", "Avg Diff vs Reference"] diff_rows = [] diff --git a/tests/kernels/moe/test_deepep_deepgemm_moe.py b/tests/kernels/moe/test_deepep_deepgemm_moe.py index 2d7cf39a8cca5..b418a22a48ec2 100644 --- a/tests/kernels/moe/test_deepep_deepgemm_moe.py +++ b/tests/kernels/moe/test_deepep_deepgemm_moe.py @@ -66,25 +66,6 @@ def next_power_of_2(x): return 2**math.ceil(math.log2(x)) -def per_block_cast_to_fp8( - x: torch.Tensor, - block_size_n: int = 128) -> tuple[torch.Tensor, torch.Tensor]: - assert x.dim() == 2 - m, n = x.shape - x_padded = torch.zeros( - (deep_gemm.ceil_div(m, 128) * 128, - deep_gemm.ceil_div(n, block_size_n) * block_size_n), - dtype=x.dtype, - device=x.device) - x_padded[:m, :n] = x - x_view = x_padded.view(-1, 128, x_padded.size(1) // 128, block_size_n) - x_amax = x_view.abs().float().amax(dim=(1, 3), keepdim=True).clamp(1e-4) - x_scaled = (x_view * (448.0 / x_amax)).to(torch.float8_e4m3fn) - x_scaled_sub = x_scaled.view_as(x_padded)[:m, :n].contiguous() - scales = (x_amax / 448.0).view(x_view.size(0), x_view.size(2)) - return x_scaled_sub, scales - - def make_block_quant_fp8_weights( e: int, n: int, @@ -125,8 +106,8 @@ def make_block_quant_fp8_weights( assert (w2.shape[-2] + block_n - 1) // block_n == w2_s.shape[-2] for i in range(e): - w1[i], w1_s[i] = per_block_cast_to_fp8(w1_bf16[i]) - w2[i], w2_s[i] = per_block_cast_to_fp8(w2_bf16[i]) + w1[i], w1_s[i] = deep_gemm.utils.math.per_block_cast_to_fp8(w1_bf16[i]) + w2[i], w2_s[i] = deep_gemm.utils.math.per_block_cast_to_fp8(w2_bf16[i]) return w1, w2, w1_s, w2_s diff --git a/tests/kernels/quantization/test_block_fp8.py b/tests/kernels/quantization/test_block_fp8.py index eec59573792db..ca9f1d39af5ea 100644 --- a/tests/kernels/quantization/test_block_fp8.py +++ b/tests/kernels/quantization/test_block_fp8.py @@ -18,7 +18,8 @@ from vllm.model_executor.layers.fused_moe.fused_moe import ( from vllm.model_executor.layers.fused_moe.moe_align_block_size import ( moe_align_block_size) from vllm.model_executor.layers.quantization.utils.fp8_utils import ( - per_token_group_quant_fp8, w8a8_block_fp8_matmul) + get_col_major_tma_aligned_tensor, per_token_group_quant_fp8, + w8a8_block_fp8_matmul) from vllm.platforms import current_platform dg_available = False @@ -263,25 +264,6 @@ def test_w8a8_block_fp8_fused_moe(M, N, K, E, topk, block_size, dtype, seed): assert rel_diff < 0.03 -def per_block_cast_to_fp8( - x: torch.Tensor, - block_size_n: int = 128) -> tuple[torch.Tensor, torch.Tensor]: - assert x.dim() == 2 - m, n = x.shape - x_padded = torch.zeros( - (deep_gemm.ceil_div(m, 128) * 128, - deep_gemm.ceil_div(n, block_size_n) * block_size_n), - dtype=x.dtype, - device=x.device) - x_padded[:m, :n] = x - x_view = x_padded.view(-1, 128, x_padded.size(1) // 128, block_size_n) - x_amax = x_view.abs().float().amax(dim=(1, 3), keepdim=True).clamp(1e-4) - x_scaled = (x_view * (448.0 / x_amax)).to(torch.float8_e4m3fn) - x_scaled_sub = x_scaled.view_as(x_padded)[:m, :n].contiguous() - scales = (x_amax / 448.0).view(x_view.size(0), x_view.size(2)) - return x_scaled_sub, scales - - @pytest.mark.parametrize( "M,N,K,block_size,out_dtype,seed", itertools.product(M, N, K, BLOCK_SIZE, OUT_DTYPES, SEEDS)) @@ -299,10 +281,8 @@ def test_w8a8_block_fp8_deep_gemm_matmul(M, N, K, block_size, out_dtype, seed): A_fp32 = (torch.rand(M, K, dtype=torch.float32) - 0.5) * 2 * fp8_max B_fp32 = (torch.rand(N, K, dtype=torch.float32) - 0.5) * 2 * fp8_max - _, block_k = block_size[0], block_size[1] - - A_fp8, As_fp8 = per_token_group_quant_fp8(A_fp32, block_k) - B_fp8, Bs_fp8 = per_block_cast_to_fp8(B_fp32) + A_fp8, As_fp8 = deep_gemm.utils.math.per_token_cast_to_fp8(A_fp32) + B_fp8, Bs_fp8 = deep_gemm.utils.math.per_block_cast_to_fp8(B_fp32) As = As_fp8.to(torch.float32) Bs = Bs_fp8.to(torch.float32) @@ -310,15 +290,12 @@ def test_w8a8_block_fp8_deep_gemm_matmul(M, N, K, block_size, out_dtype, seed): ref_out = native_w8a8_block_matmul(A_fp8, B_fp8, As, Bs, block_size, out_dtype) - # Transpose earlier so that the testing will not trigger transposing kernels - As_fp8 = deep_gemm.get_col_major_tma_aligned_tensor(As_fp8) - out = torch.zeros((M, N), device='cuda', dtype=out_dtype) assert As_fp8.shape == (M, (K + 127) // 128), f"{As_fp8.shape} != {(M, (K + 127) // 128)}" - deep_gemm.gemm_fp8_fp8_bf16_nt((A_fp8, As_fp8), (B_fp8, Bs_fp8), out) + deep_gemm.fp8_gemm_nt((A_fp8, As_fp8), (B_fp8, Bs_fp8), out) rel_diff = (torch.mean( torch.abs(out.to(torch.float32) - ref_out.to(torch.float32))) / @@ -382,16 +359,16 @@ def deep_gemm_w8a8_block_fp8_moe(M, K, a, w1, w2, w1_s, w2_s, score, topk, dtype=torch.bfloat16, device=a.device) - deep_gemm.m_grouped_gemm_fp8_fp8_bf16_nt_contiguous((a_q, a_s), (w1, w1_s), - inter_out, m_indices) + deep_gemm.m_grouped_fp8_gemm_nt_contiguous((a_q, a_s), (w1, w1_s), + inter_out, m_indices) act_out = SiluAndMul().forward_native(inter_out) act_out_q, act_out_s = per_token_group_quant_fp8(act_out, block_k) out = torch.zeros(a_q.shape[0], K, dtype=torch.bfloat16, device=a.device) - deep_gemm.m_grouped_gemm_fp8_fp8_bf16_nt_contiguous( - (act_out_q, act_out_s), (w2, w2_s), out, m_indices) + deep_gemm.m_grouped_fp8_gemm_nt_contiguous((act_out_q, act_out_s), + (w2, w2_s), out, m_indices) final_out = _moe_unpermute(out, inv_perm, topk, K, topk_weight) @@ -441,15 +418,15 @@ def test_w8a8_block_fp8_deep_gemm_fused_moe(M, N, K, E, topk, seed): w1_s = torch.empty((E, n_tiles_w1, k_tiles_w1), dtype=torch.float32) w2_s = torch.empty((E, n_tiles_w2, k_tiles_w2), dtype=torch.float32) - w1_s = deep_gemm.get_col_major_tma_aligned_tensor(w1_s).contiguous() - w2_s = deep_gemm.get_col_major_tma_aligned_tensor(w2_s).contiguous() + w1_s = get_col_major_tma_aligned_tensor(w1_s).contiguous() + w2_s = get_col_major_tma_aligned_tensor(w2_s).contiguous() assert w1_s.shape == (E, (2 * N + 127) // 128, (K + 127) // 128) assert (w2.shape[-2] + block_n - 1) // block_n == w2_s.shape[-2] for i in range(E): - w1[i], w1_s[i] = per_block_cast_to_fp8(w1_bf16[i]) - w2[i], w2_s[i] = per_block_cast_to_fp8(w2_bf16[i]) + w1[i], w1_s[i] = deep_gemm.utils.math.per_block_cast_to_fp8(w1_bf16[i]) + w2[i], w2_s[i] = deep_gemm.utils.math.per_block_cast_to_fp8(w2_bf16[i]) # Set the context to avoid lots of warning spam. with set_current_vllm_config(vllm_config): @@ -460,14 +437,10 @@ def test_w8a8_block_fp8_deep_gemm_fused_moe(M, N, K, E, topk, seed): ref_out = torch_w8a8_block_fp8_moe(a, w1, w2, w1_s, w2_s, score, topk, block_size) - topk_weights, topk_ids, token_expert_indices = fused_topk( - a, score.float(), topk, False) + topk_weights, topk_ids, _ = fused_topk(a, score.float(), topk, False) out = deep_gemm_moe_fp8(a, w1, w2, w1_s, w2_s, topk_weights, topk_ids) - #print(f"{out.sum()=}") - #print(f"{ref_out.sum()=}") - rel_diff = (torch.mean( torch.abs(out.to(torch.float32) - ref_out.to(torch.float32))) / torch.mean(torch.abs(ref_out.to(torch.float32)))) diff --git a/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py b/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py index 70836879d17c0..fd313b828266f 100644 --- a/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +++ b/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py @@ -266,19 +266,16 @@ class BatchedDeepGemmExperts(mk.FusedMoEPermuteExpertsUnpermute): # for the M expectation of each batch, correctly setting this value # may lead to better performance. expected_m = max_num_tokens - - dg.m_grouped_gemm_fp8_fp8_bf16_nt_masked((a1q, a1q_scale), - (w1, w1_scale), - out=workspace1, - masked_m=expert_num_tokens, - expected_m=expected_m) + dg.fp8_m_grouped_gemm_nt_masked((a1q, a1q_scale), (w1, w1_scale), + out=workspace1, + masked_m=expert_num_tokens, + expected_m=expected_m) assert expert_num_tokens is not None a2q, a2q_scale = silu_mul_fp8_quant_deep_gemm(workspace1, expert_num_tokens) - dg.m_grouped_gemm_fp8_fp8_bf16_nt_masked((a2q, a2q_scale), - (w2, w2_scale), - out=output, - masked_m=expert_num_tokens, - expected_m=expected_m) + dg.fp8_m_grouped_gemm_nt_masked((a2q, a2q_scale), (w2, w2_scale), + out=output, + masked_m=expert_num_tokens, + expected_m=expected_m) diff --git a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py index b4473b907381a..f349d2802de16 100644 --- a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +++ b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py @@ -144,8 +144,8 @@ class DeepGemmExperts(mk.FusedMoEPermuteExpertsUnpermute): (M_sum, N // 2)) mm2_out = _resize_cache(workspace2, (M_sum, K)) - dg.m_grouped_gemm_fp8_fp8_bf16_nt_contiguous( - (a1q, a1q_scale), (w1, w1_scale), mm1_out, expert_ids) + dg.m_grouped_fp8_gemm_nt_contiguous((a1q, a1q_scale), (w1, w1_scale), + mm1_out, expert_ids) self.activation(activation, act_out, mm1_out.view(-1, N)) @@ -154,9 +154,8 @@ class DeepGemmExperts(mk.FusedMoEPermuteExpertsUnpermute): self.block_shape[1], column_major_scales=True, out_q=quant_out) - - dg.m_grouped_gemm_fp8_fp8_bf16_nt_contiguous( - (a2q, a2q_scale), (w2, w2_scale), mm2_out, expert_ids) + dg.m_grouped_fp8_gemm_nt_contiguous((a2q, a2q_scale), (w2, w2_scale), + mm2_out, expert_ids) torch.index_select(mm2_out, 0, inv_perm, out=output) diff --git a/vllm/model_executor/layers/quantization/deepgemm.py b/vllm/model_executor/layers/quantization/deepgemm.py index 1d40f4915a1be..304d9af9c9218 100644 --- a/vllm/model_executor/layers/quantization/deepgemm.py +++ b/vllm/model_executor/layers/quantization/deepgemm.py @@ -58,7 +58,7 @@ def w8a8_block_fp8_matmul_deepgemm( output_dtype) # Deepgemm only supports output tensor type as bfloat16 assert C.dtype == torch.bfloat16 - deep_gemm.gemm_fp8_fp8_bf16_nt((A, As), (B, Bs), C) + deep_gemm.fp8_gemm_nt((A, As), (B, Bs), C) return C diff --git a/vllm/model_executor/layers/quantization/utils/fp8_utils.py b/vllm/model_executor/layers/quantization/utils/fp8_utils.py index 754650ebeffb5..a4ba2783a0a9b 100644 --- a/vllm/model_executor/layers/quantization/utils/fp8_utils.py +++ b/vllm/model_executor/layers/quantization/utils/fp8_utils.py @@ -114,6 +114,10 @@ def should_use_deepgemm(output_dtype: torch.dtype, weight: torch.Tensor): and weight.shape[0] % 128 == 0 and weight.shape[1] % 128 == 0) +def ceil_div(x: int, y: int) -> int: + return (x + y - 1) // y + + # TODO fix ROCm->Triton custom path: # https://github.com/vllm-project/vllm/issues/14397 def apply_w8a8_block_fp8_linear( @@ -158,9 +162,6 @@ def apply_w8a8_block_fp8_linear( if current_platform.is_cuda(): if current_platform.has_device_capability(100): - def ceil_div(x: int, y: int) -> int: - return (x + y - 1) // y - use_cutlass = cutlass_block_fp8_supported and ( ceil_div(weight.shape[0], 128) == weight_scale.shape[0] and ceil_div(weight.shape[1], 128) == weight_scale.shape[1]) @@ -655,3 +656,67 @@ def w8a8_block_fp8_matmul( ) return C + + +# Taken from https://github.com/deepseek-ai/DeepGEMM/blob/0c88cd01392c1073c7049a97d6328c7bba9b3947 +# TODO(wentao): remove this function when DeepGEMM exposes this function +def get_tma_aligned_size(x: int, element_size: int) -> int: + """ + Global memory address of TMA must be 16-byte aligned. + Since we use column-major layout for the LHS scaling tensor, + the M-axis of the LHS scaling tensor needs to be padded to a multiple of + 16 bytes. + + Arguments: + x: original M-axis shape of the LHS scaling tensor. + element_size: element size of the LHS scaling tensor. + + Returns: + M-axis shape of the LHS scaling tensor after padding. + """ + tma_alignment_bytes = 16 + assert tma_alignment_bytes % element_size == 0 + alignment = tma_alignment_bytes // element_size + return ceil_div(x, alignment) * alignment + + +# Taken from https://github.com/deepseek-ai/DeepGEMM/blob/0c88cd01392c1073c7049a97d6328c7bba9b3947 +# TODO(wentao): remove this function when DeepGEMM exposes this function +def get_col_major_tma_aligned_tensor(x: torch.Tensor) -> torch.Tensor: + """ + Returns TMA-aligned transposed format of the input tensor. `torch.transpose` + will be called if necessary. + If the input tensor is already column-major layout and 16-byte aligned along + the M axis (thus meets the requirement of LHS scaling tensor in + DeepGEMM), this function will do nothing. + + Arguments: + x: usually the LHS scaling tensor in GEMM. + + Returns: + The LHS scaling tensor of TMA-aligned transposed format. + """ + # NOTES: for the extreme performance, you may rewrite/fuse this function in + # CUDA + assert x.dim() in (2, 3) + remove_dim = False + m, n = x.shape[-2], x.shape[-1] + aligned_m = get_tma_aligned_size(m, x.element_size()) + if x.dim() == 2: + if x.stride(0) == 1 and x.stride(1) == aligned_m: + return x + x, remove_dim = x.unsqueeze(0), True + + b = x.shape[0] + + # The last kernel gives a column-major TMA aligned layout + if x.stride(0) == aligned_m * n and x.stride(1) == 1 and x.stride( + 2) == aligned_m: + return x.squeeze(0) if remove_dim else x + + # Normal layout requires transposing + aligned_x = torch.transpose( + torch.empty((b, n, aligned_m), device=x.device, dtype=x.dtype), 1, 2) + aligned_x[:, :m, :] = x + aligned_x = aligned_x[:, :m, :] + return aligned_x.squeeze(0) if remove_dim else aligned_x From ead369845dece41335d4ee52c94258c775a10897 Mon Sep 17 00:00:00 2001 From: Brayden Zhong Date: Tue, 24 Jun 2025 16:23:15 -0400 Subject: [PATCH 114/211] [Easy] Remove submodule added in #19463 (#20039) Signed-off-by: Brayden Zhong --- test-qwen | 1 - 1 file changed, 1 deletion(-) delete mode 160000 test-qwen diff --git a/test-qwen b/test-qwen deleted file mode 160000 index 34c31c0af8fc9..0000000000000 --- a/test-qwen +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 34c31c0af8fc975140b8c85548fefa1eb7f523e4 From c01d1c5aba3783f4356ce59e637794870e7ad2d4 Mon Sep 17 00:00:00 2001 From: Boyuan Feng Date: Tue, 24 Jun 2025 14:52:16 -0700 Subject: [PATCH 115/211] use .dev for version comparison with pytorch nightly release (#20031) Signed-off-by: Boyuan Feng --- tests/compile/test_config.py | 9 +++++++++ vllm/compilation/backends.py | 2 +- vllm/model_executor/layers/quantization/torchao.py | 4 ++-- vllm/utils.py | 9 +++++++-- 4 files changed, 19 insertions(+), 5 deletions(-) diff --git a/tests/compile/test_config.py b/tests/compile/test_config.py index 37d8ae0c08bf4..8679d5c3019b9 100644 --- a/tests/compile/test_config.py +++ b/tests/compile/test_config.py @@ -5,6 +5,15 @@ import pytest import vllm from vllm.compilation.counter import compilation_counter from vllm.config import VllmConfig +from vllm.utils import _is_torch_equal_or_newer + + +def test_version(): + assert _is_torch_equal_or_newer('2.8.0.dev20250624+cu128', '2.8.0.dev') + assert _is_torch_equal_or_newer('2.8.0a0+gitc82a174', '2.8.0.dev') + assert _is_torch_equal_or_newer('2.8.0', '2.8.0.dev') + assert _is_torch_equal_or_newer('2.8.1', '2.8.0.dev') + assert not _is_torch_equal_or_newer('2.7.1', '2.8.0.dev') def test_use_cudagraphs_dynamic(monkeypatch): diff --git a/vllm/compilation/backends.py b/vllm/compilation/backends.py index 8bb8c3a2a2e4e..a2bb053cec4a1 100644 --- a/vllm/compilation/backends.py +++ b/vllm/compilation/backends.py @@ -32,7 +32,7 @@ logger = init_logger(__name__) def make_compiler(compilation_config: CompilationConfig) -> CompilerInterface: if compilation_config.use_inductor: if envs.VLLM_USE_STANDALONE_COMPILE and is_torch_equal_or_newer( - "2.8.0a"): + "2.8.0.dev"): logger.debug("Using InductorStandaloneAdaptor") return InductorStandaloneAdaptor() else: diff --git a/vllm/model_executor/layers/quantization/torchao.py b/vllm/model_executor/layers/quantization/torchao.py index 9c909a3a430cb..a4e0356c02689 100644 --- a/vllm/model_executor/layers/quantization/torchao.py +++ b/vllm/model_executor/layers/quantization/torchao.py @@ -44,14 +44,14 @@ class TorchAOConfig(QuantizationConfig): """ # TorchAO quantization relies on tensor subclasses. In order, # to enable proper caching this needs standalone compile - if is_torch_equal_or_newer("2.8.0a"): + if is_torch_equal_or_newer("2.8.0.dev"): os.environ["VLLM_TEST_STANDALONE_COMPILE"] = "1" logger.info( "Using TorchAO: Setting VLLM_TEST_STANDALONE_COMPILE=1") # TODO: remove after the torch dependency is updated to 2.8 if is_torch_equal_or_newer( - "2.7.0") and not is_torch_equal_or_newer("2.8.0a"): + "2.7.0") and not is_torch_equal_or_newer("2.8.0.dev"): os.environ["VLLM_DISABLE_COMPILE_CACHE"] = "1" logger.info("Using TorchAO: Setting VLLM_DISABLE_COMPILE_CACHE=1") """ diff --git a/vllm/utils.py b/vllm/utils.py index 34be4d52c4836..fdefda901c4d2 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -2919,8 +2919,13 @@ def is_torch_equal_or_newer(target: str) -> bool: Whether the condition meets. """ try: - torch_version = version.parse(str(torch.__version__)) - return torch_version >= version.parse(target) + return _is_torch_equal_or_newer(str(torch.__version__), target) except Exception: # Fallback to PKG-INFO to load the package info, needed by the doc gen. return Version(importlib.metadata.version('torch')) >= Version(target) + + +# Helper function used in testing. +def _is_torch_equal_or_newer(torch_version: str, target: str) -> bool: + torch_version = version.parse(torch_version) + return torch_version >= version.parse(target) From 0d06b533a0fcca7a62603c868df68235659d6935 Mon Sep 17 00:00:00 2001 From: Eli Uriegas <1700823+seemethere@users.noreply.github.com> Date: Tue, 24 Jun 2025 15:44:10 -0700 Subject: [PATCH 116/211] cmake: Update vllm_flash_attn for vllm_kernels (#20032) Signed-off-by: Eli Uriegas --- cmake/external_projects/vllm_flash_attn.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/external_projects/vllm_flash_attn.cmake b/cmake/external_projects/vllm_flash_attn.cmake index ebaffe0821854..7b17018f65ab4 100644 --- a/cmake/external_projects/vllm_flash_attn.cmake +++ b/cmake/external_projects/vllm_flash_attn.cmake @@ -38,7 +38,7 @@ else() FetchContent_Declare( vllm-flash-attn GIT_REPOSITORY https://github.com/vllm-project/flash-attention.git - GIT_TAG 2c6bcfc0feb3d9d4a57b243fc159a68aa9933f5b + GIT_TAG 5f3644181c7a15345ce20bfc65af117d3601b524 GIT_PROGRESS TRUE # Don't share the vllm-flash-attn build between build types BINARY_DIR ${CMAKE_BINARY_DIR}/vllm-flash-attn From 1afa9948f5931109cbe5201fee4fd7614ff7f904 Mon Sep 17 00:00:00 2001 From: Brayden Zhong Date: Tue, 24 Jun 2025 22:42:53 -0400 Subject: [PATCH 117/211] [Llama4] Update `attn_temperature_tuning` (#19997) Signed-off-by: Brayden Zhong --- vllm/model_executor/models/llama4.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vllm/model_executor/models/llama4.py b/vllm/model_executor/models/llama4.py index 9fb73261cd892..0c9baab1f2e4e 100644 --- a/vllm/model_executor/models/llama4.py +++ b/vllm/model_executor/models/llama4.py @@ -148,9 +148,8 @@ class Llama4Attention(nn.Module): self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim**-0.5 - # TODO: attn_temperature_tuning should be a bool in huggingface self.attn_temperature_tuning = self.nope and \ - config.attn_temperature_tuning > 0 + config.attn_temperature_tuning self.floor_scale = getattr(config, "floor_scale", 8192.0) self.attn_scale = getattr(config, "attn_scale", 0.1) From a6c4b87fbcca30135ba52b9199c43aa8ee9c0400 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Tue, 24 Jun 2025 22:45:22 -0400 Subject: [PATCH 118/211] Revert "[Feature] Integrate new deepgemm (#19820)" (#20049) Signed-off-by: yewentao256 --- benchmarks/kernels/benchmark_moe.py | 3 - .../benchmark_fp8_block_dense_gemm.py | 301 ++++++++++-------- tests/kernels/moe/test_deepep_deepgemm_moe.py | 23 +- tests/kernels/quantization/test_block_fp8.py | 55 +++- .../layers/fused_moe/batched_deep_gemm_moe.py | 19 +- .../layers/fused_moe/deep_gemm_moe.py | 10 +- .../layers/quantization/deepgemm.py | 2 +- .../layers/quantization/utils/fp8_utils.py | 71 +---- 8 files changed, 254 insertions(+), 230 deletions(-) diff --git a/benchmarks/kernels/benchmark_moe.py b/benchmarks/kernels/benchmark_moe.py index 99d8d3eee0df3..cef53b183cef3 100644 --- a/benchmarks/kernels/benchmark_moe.py +++ b/benchmarks/kernels/benchmark_moe.py @@ -86,9 +86,6 @@ def benchmark_config( (num_experts, 2 * shard_intermediate_size), dtype=torch.float32 ) w2_scale = torch.randn((hidden_size, num_experts), dtype=torch.float32) - if use_deep_gemm: - # we use the default block shape for deepgemm - block_quant_shape = [128, 128] if use_fp8_w8a8: if block_quant_shape: block_n, block_k = block_quant_shape[0], block_quant_shape[1] diff --git a/benchmarks/kernels/deepgemm/benchmark_fp8_block_dense_gemm.py b/benchmarks/kernels/deepgemm/benchmark_fp8_block_dense_gemm.py index 253d2984aa9df..43c54d56ca8c1 100644 --- a/benchmarks/kernels/deepgemm/benchmark_fp8_block_dense_gemm.py +++ b/benchmarks/kernels/deepgemm/benchmark_fp8_block_dense_gemm.py @@ -1,11 +1,13 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +# fmt: off +# ruff: noqa: E501 import time +# Import DeepGEMM functions +import deep_gemm import torch -from deep_gemm import fp8_gemm_nt -from deep_gemm.testing.numeric import calc_diff -from deep_gemm.utils.math import ceil_div, per_block_cast_to_fp8, per_token_cast_to_fp8 +from deep_gemm import calc_diff, ceil_div, get_col_major_tma_aligned_tensor # Import vLLM functions from vllm import _custom_ops as ops @@ -16,84 +18,96 @@ from vllm.model_executor.layers.quantization.utils.fp8_utils import ( from vllm.triton_utils import triton +# Copied from +# https://github.com/deepseek-ai/DeepGEMM/blob/78cacf70d41d15d688bd493ebc85845f7f2a3d5d/tests/test_core.py#L9 +def per_token_cast_to_fp8( + x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + """Convert tensor to FP8 format with per-token scaling.""" + assert x.dim() == 2 and x.size(1) % 128 == 0 + m, n = x.shape + x_view = x.view(m, -1, 128) + x_amax = x_view.abs().float().amax(dim=2).view(m, -1).clamp(1e-4) + return (x_view * (448.0 / x_amax.unsqueeze(2))).to( + torch.float8_e4m3fn).view(m, n), (x_amax / 448.0).view(m, -1) + + # Copied from # https://github.com/deepseek-ai/DeepGEMM/blob/78cacf70d41d15d688bd493ebc85845f7f2a3d5d/tests/test_core.py#L17 -def per_block_cast_to_fp8_vllm(x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: +def per_block_cast_to_fp8( + x: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: """Convert tensor to FP8 format with per-block scaling.""" assert x.dim() == 2 m, n = x.shape - x_padded = torch.zeros( - (ceil_div(m, 128) * 128, ceil_div(n, 128) * 128), dtype=x.dtype, device=x.device - ) + x_padded = torch.zeros((ceil_div(m, 128) * 128, ceil_div(n, 128) * 128), + dtype=x.dtype, + device=x.device) x_padded[:m, :n] = x x_view = x_padded.view(-1, 128, x_padded.size(1) // 128, 128) x_amax = x_view.abs().float().amax(dim=(1, 3), keepdim=True).clamp(1e-4) x_scaled = (x_view * (448.0 / x_amax)).to(torch.float8_e4m3fn) - return x_scaled.view_as(x_padded)[:m, :n].contiguous(), (x_amax / 448.0).view( - x_view.size(0), x_view.size(2) - ) + return x_scaled.view_as(x_padded)[:m, :n].contiguous(), ( + x_amax / 448.0).view(x_view.size(0), x_view.size(2)) -def benchmark_shape( - m: int, - n: int, - k: int, - warmup: int = 100, - repeat: int = 10000, - verbose: bool = False, -) -> dict: +def benchmark_shape(m: int, + n: int, + k: int, + warmup: int = 100, + repeat: int = 10000, + verbose: bool = False) -> dict: """Benchmark all implementations for a specific (m, n, k) shape.""" if verbose: print(f"\n=== Benchmarking shape: m={m}, n={n}, k={k} ===") - A = torch.randn((m, k), device="cuda", dtype=torch.bfloat16) - B = torch.randn((n, k), device="cuda", dtype=torch.bfloat16) + # Create test tensors + A = torch.randn((m, k), device='cuda', dtype=torch.bfloat16) + B = torch.randn((n, k), device='cuda', dtype=torch.bfloat16) + + # Reference result in BF16 torch.cuda.synchronize() C_ref = A @ B.t() # Pre-quantize B for all implementations # (weights can be pre-quantized offline) B_deepgemm, B_scale_deepgemm = per_block_cast_to_fp8(B) - B_vllm, B_scale_vllm = per_block_cast_to_fp8_vllm(B) + B_vllm, B_scale_vllm = per_block_cast_to_fp8(B) # Block size configuration block_size = [128, 128] # Pre-quantize A for all implementations A_deepgemm, A_scale_deepgemm = per_token_cast_to_fp8(A) - C_deepgemm = ( - torch.empty((n, m), device="cuda", dtype=torch.bfloat16).t().contiguous() - ) + A_scale_deepgemm = get_col_major_tma_aligned_tensor(A_scale_deepgemm) + C_deepgemm = torch.empty((m, n), device='cuda', dtype=torch.bfloat16) A_vllm, A_scale_vllm = per_token_group_quant_fp8(A, block_size[1]) A_vllm_cutlass, A_scale_vllm_cutlass = per_token_group_quant_fp8( - A, block_size[1], column_major_scales=True - ) + A, block_size[1], column_major_scales=True) + # === DeepGEMM Implementation === def deepgemm_gemm(): - fp8_gemm_nt( - (A_deepgemm, A_scale_deepgemm), (B_deepgemm, B_scale_deepgemm), C_deepgemm - ) + deep_gemm.gemm_fp8_fp8_bf16_nt((A_deepgemm, A_scale_deepgemm), + (B_deepgemm, B_scale_deepgemm), + C_deepgemm) return C_deepgemm + # === vLLM Triton Implementation === def vllm_triton_gemm(): - return w8a8_block_fp8_matmul( - A_vllm, - B_vllm, - A_scale_vllm, - B_scale_vllm, - block_size, - output_dtype=torch.bfloat16, - ) + return w8a8_block_fp8_matmul(A_vllm, + B_vllm, + A_scale_vllm, + B_scale_vllm, + block_size, + output_dtype=torch.bfloat16) + # === vLLM CUTLASS Implementation === def vllm_cutlass_gemm(): - return ops.cutlass_scaled_mm( - A_vllm_cutlass, - B_vllm.T, - scale_a=A_scale_vllm_cutlass, - scale_b=B_scale_vllm.T, - out_dtype=torch.bfloat16, - ) + return ops.cutlass_scaled_mm(A_vllm_cutlass, + B_vllm.T, + scale_a=A_scale_vllm_cutlass, + scale_b=B_scale_vllm.T, + out_dtype=torch.bfloat16) + # Run correctness check first if verbose: print("Running correctness check...") C_deepgemm = deepgemm_gemm() @@ -108,22 +122,26 @@ def benchmark_shape( print(f"DeepGEMM vs Reference difference: {deepgemm_diff:.6f}") print(f"vLLM Triton vs Reference difference: {vllm_triton_diff:.6f}") print(f"vLLM CUTLASS vs Reference difference: {vllm_cutlass_diff:.6f}") - print( - "vLLM Triton vs DeepGEMM difference: " - f"{calc_diff(C_vllm_triton, C_deepgemm):.6f}" - ) - print( - "vLLM CUTLASS vs DeepGEMM difference: " - f"{calc_diff(C_vllm_cutlass, C_deepgemm):.6f}" - ) + print("vLLM Triton vs DeepGEMM difference: " + f"{calc_diff(C_vllm_triton, C_deepgemm):.6f}") + print("vLLM CUTLASS vs DeepGEMM difference: " + f"{calc_diff(C_vllm_cutlass, C_deepgemm):.6f}") + # Benchmark implementations implementations = { "DeepGEMM": deepgemm_gemm, "vLLM Triton": vllm_triton_gemm, - "vLLM CUTLASS": vllm_cutlass_gemm, + "vLLM CUTLASS": vllm_cutlass_gemm } - benchmark_results = {"shape": {"m": m, "n": n, "k": k}, "implementations": {}} + benchmark_results = { + "shape": { + "m": m, + "n": n, + "k": k + }, + "implementations": {} + } for name, func in implementations.items(): # Warmup @@ -151,36 +169,38 @@ def benchmark_shape( "tflops": tflops, "gb_s": gb_s, "diff": { - "DeepGEMM": 0.0 - if name == "DeepGEMM" - else calc_diff(func(), C_deepgemm), - "Reference": deepgemm_diff - if name == "DeepGEMM" - else (vllm_triton_diff if name == "vLLM Triton" else vllm_cutlass_diff), - }, + "DeepGEMM": + 0.0 if name == "DeepGEMM" else calc_diff(func(), C_deepgemm), + "Reference": + deepgemm_diff if name == "DeepGEMM" else + (vllm_triton_diff + if name == "vLLM Triton" else vllm_cutlass_diff) + } } if verbose: - print(f"{name}: {avg_time_ms:.3f} ms, {tflops:.2f} TFLOPS, {gb_s:.2f} GB/s") + print( + f"{name}: {avg_time_ms:.3f} ms, {tflops:.2f} TFLOPS, {gb_s:.2f} GB/s" + ) # Calculate speedups baseline = benchmark_results["implementations"]["DeepGEMM"]["time_ms"] for name, data in benchmark_results["implementations"].items(): if name != "DeepGEMM": speedup = baseline / data["time_ms"] - benchmark_results["implementations"][name]["speedup_vs_deepgemm"] = speedup + benchmark_results["implementations"][name][ + "speedup_vs_deepgemm"] = speedup if verbose: - print( - f"DeepGEMM is {1 / speedup:.2f}x " - f"{'faster' if 1 / speedup > 1 else 'slower'} than {name}" - ) + print(f"DeepGEMM is {1/speedup:.2f}x " + f"{'faster' if 1/speedup > 1 else 'slower'} than {name}") - vllm_triton_time = benchmark_results["implementations"]["vLLM Triton"]["time_ms"] - vllm_cutlass_time = benchmark_results["implementations"]["vLLM CUTLASS"]["time_ms"] + vllm_triton_time = benchmark_results["implementations"]["vLLM Triton"][ + "time_ms"] + vllm_cutlass_time = benchmark_results["implementations"]["vLLM CUTLASS"][ + "time_ms"] cutlass_vs_triton = vllm_triton_time / vllm_cutlass_time - benchmark_results["implementations"]["vLLM CUTLASS"]["speedup_vs_triton"] = ( - cutlass_vs_triton - ) + benchmark_results["implementations"]["vLLM CUTLASS"][ + "speedup_vs_triton"] = cutlass_vs_triton if verbose: print( f"vLLM CUTLASS is {cutlass_vs_triton:.2f}x " @@ -192,7 +212,8 @@ def benchmark_shape( def format_table_row(values, widths): """Format a row with specified column widths.""" - return "| " + " | ".join(f"{val:{w}}" for val, w in zip(values, widths)) + " |" + return "| " + " | ".join(f"{val:{w}}" + for val, w in zip(values, widths)) + " |" def print_table(headers, rows, title=None): @@ -200,12 +221,16 @@ def print_table(headers, rows, title=None): if title: print(f"\n{title}") + # Calculate column widths based on headers and data widths = [ max(len(str(h)), max(len(str(row[i])) for row in rows)) for i, h in enumerate(headers) ] + # Create separator line separator = "+-" + "-+-".join("-" * w for w in widths) + "-+" + + # Print table print(separator) print(format_table_row(headers, widths)) print(separator) @@ -223,22 +248,44 @@ def run_benchmarks(verbose: bool = False): """Run benchmarks for a set of common shapes.""" print("===== STARTING FP8 GEMM BENCHMARK =====") + # Make sure we're using the GPU if not torch.cuda.is_available(): print("CUDA not available! Tests require GPU.") return + # Print system information print(f"PyTorch version: {torch.__version__}") print(f"CUDA version: {torch.version.cuda}") print(f"Triton version: {triton.__version__}") print(f"Using device: {torch.cuda.get_device_name()}") + # Enable TF32 for better performance torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True + # Set seeds for reproducibility torch.manual_seed(42) torch.cuda.manual_seed(42) # Define benchmark shapes (m, n, k) + shapes = [ + (8, 4096, 7168), + (8, 7168, 18432), + (8, 18432, 7168), + (64, 4096, 7168), + (64, 7168, 18432), + (64, 18432, 7168), + (64, 24576, 1536), + (64, 32768, 512), + (64, 7168, 16384), + (128, 4096, 7168), + (128, 7168, 18432), + (128, 18432, 7168), + (1024, 4096, 7168), + (1024, 18432, 7168), + (2048, 4096, 7168), + (4096, 4096, 7168), + ] shapes = [ # (64, 2112, 7168), (64, 24576, 1536), @@ -265,6 +312,7 @@ def run_benchmarks(verbose: bool = False): result = benchmark_shape(m, n, k, verbose=verbose) all_results.append(result) + # Print results in a nicely formatted table print("\n===== PERFORMANCE COMPARISON =====") # Print DeepGEMM table @@ -273,50 +321,38 @@ def run_benchmarks(verbose: bool = False): for result in all_results: shape = result["shape"] impl_data = result["implementations"]["DeepGEMM"] - deepgemm_rows.append( - [ - shape["m"], - shape["n"], - shape["k"], - f"{impl_data['time_us']:.1f}", - f"{impl_data['tflops']:.1f}", - f"{impl_data['gb_s']:.1f}", - ] - ) + deepgemm_rows.append([ + shape["m"], shape["n"], shape["k"], f"{impl_data['time_us']:.1f}", + f"{impl_data['tflops']:.1f}", f"{impl_data['gb_s']:.1f}" + ]) - print_table(deepgemm_headers, deepgemm_rows, title="DeepGEMM Implementation:") + print_table(deepgemm_headers, + deepgemm_rows, + title="DeepGEMM Implementation:") # Print vLLM Triton table - triton_headers = ["m", "n", "k", "Time (μs)", "TFLOPS", "GB/s", "vs DeepGEMM"] + triton_headers = [ + "m", "n", "k", "Time (μs)", "TFLOPS", "GB/s", "vs DeepGEMM" + ] triton_rows = [] for result in all_results: shape = result["shape"] impl_data = result["implementations"]["vLLM Triton"] speedup = impl_data.get("speedup_vs_deepgemm", 1.0) - triton_rows.append( - [ - shape["m"], - shape["n"], - shape["k"], - f"{impl_data['time_us']:.1f}", - f"{impl_data['tflops']:.1f}", - f"{impl_data['gb_s']:.1f}", - format_speedup(speedup), - ] - ) + triton_rows.append([ + shape["m"], shape["n"], shape["k"], f"{impl_data['time_us']:.1f}", + f"{impl_data['tflops']:.1f}", f"{impl_data['gb_s']:.1f}", + format_speedup(speedup) + ]) - print_table(triton_headers, triton_rows, title="vLLM Triton Implementation:") + print_table(triton_headers, + triton_rows, + title="vLLM Triton Implementation:") # Print vLLM CUTLASS table cutlass_headers = [ - "m", - "n", - "k", - "Time (μs)", - "TFLOPS", - "GB/s", - "vs DeepGEMM", - "vs Triton", + "m", "n", "k", "Time (μs)", "TFLOPS", "GB/s", "vs DeepGEMM", + "vs Triton" ] cutlass_rows = [] for result in all_results: @@ -324,27 +360,28 @@ def run_benchmarks(verbose: bool = False): impl_data = result["implementations"]["vLLM CUTLASS"] vs_deepgemm = impl_data.get("speedup_vs_deepgemm", 1.0) vs_triton = impl_data.get("speedup_vs_triton", 1.0) - cutlass_rows.append( - [ - shape["m"], - shape["n"], - shape["k"], - f"{impl_data['time_us']:.1f}", - f"{impl_data['tflops']:.1f}", - f"{impl_data['gb_s']:.1f}", - format_speedup(vs_deepgemm), - format_speedup(vs_triton), - ] - ) + cutlass_rows.append([ + shape["m"], shape["n"], shape["k"], f"{impl_data['time_us']:.1f}", + f"{impl_data['tflops']:.1f}", f"{impl_data['gb_s']:.1f}", + format_speedup(vs_deepgemm), + format_speedup(vs_triton) + ]) - print_table(cutlass_headers, cutlass_rows, title="vLLM CUTLASS Implementation:") + print_table(cutlass_headers, + cutlass_rows, + title="vLLM CUTLASS Implementation:") # Calculate and print averages print("\n===== AVERAGE PERFORMANCE =====") implementations = ["DeepGEMM", "vLLM Triton", "vLLM CUTLASS"] avg_metrics = { - impl: {"tflops": 0, "gb_s": 0, "time_ms": 0} for impl in implementations + impl: { + "tflops": 0, + "gb_s": 0, + "time_ms": 0 + } + for impl in implementations } for result in all_results: @@ -362,9 +399,9 @@ def run_benchmarks(verbose: bool = False): avg_tflops = avg_metrics[impl]["tflops"] / num_shapes avg_mem_bw = avg_metrics[impl]["gb_s"] / num_shapes avg_time = avg_metrics[impl]["time_ms"] / num_shapes - avg_rows.append( - [impl, f"{avg_tflops:.2f}", f"{avg_mem_bw:.2f}", f"{avg_time:.2f}"] - ) + avg_rows.append([ + impl, f"{avg_tflops:.2f}", f"{avg_mem_bw:.2f}", f"{avg_time:.2f}" + ]) print_table(avg_headers, avg_rows) @@ -372,19 +409,21 @@ def run_benchmarks(verbose: bool = False): avg_speedups = { "DeepGEMM vs vLLM Triton": 0, "DeepGEMM vs vLLM CUTLASS": 0, - "vLLM CUTLASS vs vLLM Triton": 0, + "vLLM CUTLASS vs vLLM Triton": 0 } for result in all_results: deepgemm_time = result["implementations"]["DeepGEMM"]["time_ms"] vllm_triton_time = result["implementations"]["vLLM Triton"]["time_ms"] - vllm_cutlass_time = result["implementations"]["vLLM CUTLASS"]["time_ms"] + vllm_cutlass_time = result["implementations"]["vLLM CUTLASS"][ + "time_ms"] - avg_speedups["DeepGEMM vs vLLM Triton"] += vllm_triton_time / deepgemm_time - avg_speedups["DeepGEMM vs vLLM CUTLASS"] += vllm_cutlass_time / deepgemm_time - avg_speedups["vLLM CUTLASS vs vLLM Triton"] += ( - vllm_triton_time / vllm_cutlass_time - ) + avg_speedups[ + "DeepGEMM vs vLLM Triton"] += vllm_triton_time / deepgemm_time + avg_speedups[ + "DeepGEMM vs vLLM CUTLASS"] += vllm_cutlass_time / deepgemm_time + avg_speedups[ + "vLLM CUTLASS vs vLLM Triton"] += vllm_triton_time / vllm_cutlass_time print("\n===== AVERAGE SPEEDUPS =====") speedup_headers = ["Comparison", "Speedup"] @@ -396,12 +435,14 @@ def run_benchmarks(verbose: bool = False): print_table(speedup_headers, speedup_rows) + # Average accuracy comparison print("\n===== ACCURACY COMPARISON =====") avg_diff = {impl: 0 for impl in implementations} for result in all_results: for impl in implementations: - avg_diff[impl] += result["implementations"][impl]["diff"]["Reference"] + avg_diff[impl] += result["implementations"][impl]["diff"][ + "Reference"] diff_headers = ["Implementation", "Avg Diff vs Reference"] diff_rows = [] diff --git a/tests/kernels/moe/test_deepep_deepgemm_moe.py b/tests/kernels/moe/test_deepep_deepgemm_moe.py index b418a22a48ec2..2d7cf39a8cca5 100644 --- a/tests/kernels/moe/test_deepep_deepgemm_moe.py +++ b/tests/kernels/moe/test_deepep_deepgemm_moe.py @@ -66,6 +66,25 @@ def next_power_of_2(x): return 2**math.ceil(math.log2(x)) +def per_block_cast_to_fp8( + x: torch.Tensor, + block_size_n: int = 128) -> tuple[torch.Tensor, torch.Tensor]: + assert x.dim() == 2 + m, n = x.shape + x_padded = torch.zeros( + (deep_gemm.ceil_div(m, 128) * 128, + deep_gemm.ceil_div(n, block_size_n) * block_size_n), + dtype=x.dtype, + device=x.device) + x_padded[:m, :n] = x + x_view = x_padded.view(-1, 128, x_padded.size(1) // 128, block_size_n) + x_amax = x_view.abs().float().amax(dim=(1, 3), keepdim=True).clamp(1e-4) + x_scaled = (x_view * (448.0 / x_amax)).to(torch.float8_e4m3fn) + x_scaled_sub = x_scaled.view_as(x_padded)[:m, :n].contiguous() + scales = (x_amax / 448.0).view(x_view.size(0), x_view.size(2)) + return x_scaled_sub, scales + + def make_block_quant_fp8_weights( e: int, n: int, @@ -106,8 +125,8 @@ def make_block_quant_fp8_weights( assert (w2.shape[-2] + block_n - 1) // block_n == w2_s.shape[-2] for i in range(e): - w1[i], w1_s[i] = deep_gemm.utils.math.per_block_cast_to_fp8(w1_bf16[i]) - w2[i], w2_s[i] = deep_gemm.utils.math.per_block_cast_to_fp8(w2_bf16[i]) + w1[i], w1_s[i] = per_block_cast_to_fp8(w1_bf16[i]) + w2[i], w2_s[i] = per_block_cast_to_fp8(w2_bf16[i]) return w1, w2, w1_s, w2_s diff --git a/tests/kernels/quantization/test_block_fp8.py b/tests/kernels/quantization/test_block_fp8.py index ca9f1d39af5ea..eec59573792db 100644 --- a/tests/kernels/quantization/test_block_fp8.py +++ b/tests/kernels/quantization/test_block_fp8.py @@ -18,8 +18,7 @@ from vllm.model_executor.layers.fused_moe.fused_moe import ( from vllm.model_executor.layers.fused_moe.moe_align_block_size import ( moe_align_block_size) from vllm.model_executor.layers.quantization.utils.fp8_utils import ( - get_col_major_tma_aligned_tensor, per_token_group_quant_fp8, - w8a8_block_fp8_matmul) + per_token_group_quant_fp8, w8a8_block_fp8_matmul) from vllm.platforms import current_platform dg_available = False @@ -264,6 +263,25 @@ def test_w8a8_block_fp8_fused_moe(M, N, K, E, topk, block_size, dtype, seed): assert rel_diff < 0.03 +def per_block_cast_to_fp8( + x: torch.Tensor, + block_size_n: int = 128) -> tuple[torch.Tensor, torch.Tensor]: + assert x.dim() == 2 + m, n = x.shape + x_padded = torch.zeros( + (deep_gemm.ceil_div(m, 128) * 128, + deep_gemm.ceil_div(n, block_size_n) * block_size_n), + dtype=x.dtype, + device=x.device) + x_padded[:m, :n] = x + x_view = x_padded.view(-1, 128, x_padded.size(1) // 128, block_size_n) + x_amax = x_view.abs().float().amax(dim=(1, 3), keepdim=True).clamp(1e-4) + x_scaled = (x_view * (448.0 / x_amax)).to(torch.float8_e4m3fn) + x_scaled_sub = x_scaled.view_as(x_padded)[:m, :n].contiguous() + scales = (x_amax / 448.0).view(x_view.size(0), x_view.size(2)) + return x_scaled_sub, scales + + @pytest.mark.parametrize( "M,N,K,block_size,out_dtype,seed", itertools.product(M, N, K, BLOCK_SIZE, OUT_DTYPES, SEEDS)) @@ -281,8 +299,10 @@ def test_w8a8_block_fp8_deep_gemm_matmul(M, N, K, block_size, out_dtype, seed): A_fp32 = (torch.rand(M, K, dtype=torch.float32) - 0.5) * 2 * fp8_max B_fp32 = (torch.rand(N, K, dtype=torch.float32) - 0.5) * 2 * fp8_max - A_fp8, As_fp8 = deep_gemm.utils.math.per_token_cast_to_fp8(A_fp32) - B_fp8, Bs_fp8 = deep_gemm.utils.math.per_block_cast_to_fp8(B_fp32) + _, block_k = block_size[0], block_size[1] + + A_fp8, As_fp8 = per_token_group_quant_fp8(A_fp32, block_k) + B_fp8, Bs_fp8 = per_block_cast_to_fp8(B_fp32) As = As_fp8.to(torch.float32) Bs = Bs_fp8.to(torch.float32) @@ -290,12 +310,15 @@ def test_w8a8_block_fp8_deep_gemm_matmul(M, N, K, block_size, out_dtype, seed): ref_out = native_w8a8_block_matmul(A_fp8, B_fp8, As, Bs, block_size, out_dtype) + # Transpose earlier so that the testing will not trigger transposing kernels + As_fp8 = deep_gemm.get_col_major_tma_aligned_tensor(As_fp8) + out = torch.zeros((M, N), device='cuda', dtype=out_dtype) assert As_fp8.shape == (M, (K + 127) // 128), f"{As_fp8.shape} != {(M, (K + 127) // 128)}" - deep_gemm.fp8_gemm_nt((A_fp8, As_fp8), (B_fp8, Bs_fp8), out) + deep_gemm.gemm_fp8_fp8_bf16_nt((A_fp8, As_fp8), (B_fp8, Bs_fp8), out) rel_diff = (torch.mean( torch.abs(out.to(torch.float32) - ref_out.to(torch.float32))) / @@ -359,16 +382,16 @@ def deep_gemm_w8a8_block_fp8_moe(M, K, a, w1, w2, w1_s, w2_s, score, topk, dtype=torch.bfloat16, device=a.device) - deep_gemm.m_grouped_fp8_gemm_nt_contiguous((a_q, a_s), (w1, w1_s), - inter_out, m_indices) + deep_gemm.m_grouped_gemm_fp8_fp8_bf16_nt_contiguous((a_q, a_s), (w1, w1_s), + inter_out, m_indices) act_out = SiluAndMul().forward_native(inter_out) act_out_q, act_out_s = per_token_group_quant_fp8(act_out, block_k) out = torch.zeros(a_q.shape[0], K, dtype=torch.bfloat16, device=a.device) - deep_gemm.m_grouped_fp8_gemm_nt_contiguous((act_out_q, act_out_s), - (w2, w2_s), out, m_indices) + deep_gemm.m_grouped_gemm_fp8_fp8_bf16_nt_contiguous( + (act_out_q, act_out_s), (w2, w2_s), out, m_indices) final_out = _moe_unpermute(out, inv_perm, topk, K, topk_weight) @@ -418,15 +441,15 @@ def test_w8a8_block_fp8_deep_gemm_fused_moe(M, N, K, E, topk, seed): w1_s = torch.empty((E, n_tiles_w1, k_tiles_w1), dtype=torch.float32) w2_s = torch.empty((E, n_tiles_w2, k_tiles_w2), dtype=torch.float32) - w1_s = get_col_major_tma_aligned_tensor(w1_s).contiguous() - w2_s = get_col_major_tma_aligned_tensor(w2_s).contiguous() + w1_s = deep_gemm.get_col_major_tma_aligned_tensor(w1_s).contiguous() + w2_s = deep_gemm.get_col_major_tma_aligned_tensor(w2_s).contiguous() assert w1_s.shape == (E, (2 * N + 127) // 128, (K + 127) // 128) assert (w2.shape[-2] + block_n - 1) // block_n == w2_s.shape[-2] for i in range(E): - w1[i], w1_s[i] = deep_gemm.utils.math.per_block_cast_to_fp8(w1_bf16[i]) - w2[i], w2_s[i] = deep_gemm.utils.math.per_block_cast_to_fp8(w2_bf16[i]) + w1[i], w1_s[i] = per_block_cast_to_fp8(w1_bf16[i]) + w2[i], w2_s[i] = per_block_cast_to_fp8(w2_bf16[i]) # Set the context to avoid lots of warning spam. with set_current_vllm_config(vllm_config): @@ -437,10 +460,14 @@ def test_w8a8_block_fp8_deep_gemm_fused_moe(M, N, K, E, topk, seed): ref_out = torch_w8a8_block_fp8_moe(a, w1, w2, w1_s, w2_s, score, topk, block_size) - topk_weights, topk_ids, _ = fused_topk(a, score.float(), topk, False) + topk_weights, topk_ids, token_expert_indices = fused_topk( + a, score.float(), topk, False) out = deep_gemm_moe_fp8(a, w1, w2, w1_s, w2_s, topk_weights, topk_ids) + #print(f"{out.sum()=}") + #print(f"{ref_out.sum()=}") + rel_diff = (torch.mean( torch.abs(out.to(torch.float32) - ref_out.to(torch.float32))) / torch.mean(torch.abs(ref_out.to(torch.float32)))) diff --git a/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py b/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py index fd313b828266f..70836879d17c0 100644 --- a/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +++ b/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py @@ -266,16 +266,19 @@ class BatchedDeepGemmExperts(mk.FusedMoEPermuteExpertsUnpermute): # for the M expectation of each batch, correctly setting this value # may lead to better performance. expected_m = max_num_tokens - dg.fp8_m_grouped_gemm_nt_masked((a1q, a1q_scale), (w1, w1_scale), - out=workspace1, - masked_m=expert_num_tokens, - expected_m=expected_m) + + dg.m_grouped_gemm_fp8_fp8_bf16_nt_masked((a1q, a1q_scale), + (w1, w1_scale), + out=workspace1, + masked_m=expert_num_tokens, + expected_m=expected_m) assert expert_num_tokens is not None a2q, a2q_scale = silu_mul_fp8_quant_deep_gemm(workspace1, expert_num_tokens) - dg.fp8_m_grouped_gemm_nt_masked((a2q, a2q_scale), (w2, w2_scale), - out=output, - masked_m=expert_num_tokens, - expected_m=expected_m) + dg.m_grouped_gemm_fp8_fp8_bf16_nt_masked((a2q, a2q_scale), + (w2, w2_scale), + out=output, + masked_m=expert_num_tokens, + expected_m=expected_m) diff --git a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py index f349d2802de16..2e50329bc5455 100644 --- a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +++ b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py @@ -143,9 +143,10 @@ class DeepGemmExperts(mk.FusedMoEPermuteExpertsUnpermute): quant_out = _resize_cache(workspace13.view(dtype=torch.float8_e4m3fn), (M_sum, N // 2)) mm2_out = _resize_cache(workspace2, (M_sum, K)) + # import pdb; pdb.set_trace() - dg.m_grouped_fp8_gemm_nt_contiguous((a1q, a1q_scale), (w1, w1_scale), - mm1_out, expert_ids) + dg.m_grouped_gemm_fp8_fp8_bf16_nt_contiguous( + (a1q, a1q_scale), (w1, w1_scale), mm1_out, expert_ids) self.activation(activation, act_out, mm1_out.view(-1, N)) @@ -154,8 +155,9 @@ class DeepGemmExperts(mk.FusedMoEPermuteExpertsUnpermute): self.block_shape[1], column_major_scales=True, out_q=quant_out) - dg.m_grouped_fp8_gemm_nt_contiguous((a2q, a2q_scale), (w2, w2_scale), - mm2_out, expert_ids) + + dg.m_grouped_gemm_fp8_fp8_bf16_nt_contiguous( + (a2q, a2q_scale), (w2, w2_scale), mm2_out, expert_ids) torch.index_select(mm2_out, 0, inv_perm, out=output) diff --git a/vllm/model_executor/layers/quantization/deepgemm.py b/vllm/model_executor/layers/quantization/deepgemm.py index 304d9af9c9218..1d40f4915a1be 100644 --- a/vllm/model_executor/layers/quantization/deepgemm.py +++ b/vllm/model_executor/layers/quantization/deepgemm.py @@ -58,7 +58,7 @@ def w8a8_block_fp8_matmul_deepgemm( output_dtype) # Deepgemm only supports output tensor type as bfloat16 assert C.dtype == torch.bfloat16 - deep_gemm.fp8_gemm_nt((A, As), (B, Bs), C) + deep_gemm.gemm_fp8_fp8_bf16_nt((A, As), (B, Bs), C) return C diff --git a/vllm/model_executor/layers/quantization/utils/fp8_utils.py b/vllm/model_executor/layers/quantization/utils/fp8_utils.py index a4ba2783a0a9b..754650ebeffb5 100644 --- a/vllm/model_executor/layers/quantization/utils/fp8_utils.py +++ b/vllm/model_executor/layers/quantization/utils/fp8_utils.py @@ -114,10 +114,6 @@ def should_use_deepgemm(output_dtype: torch.dtype, weight: torch.Tensor): and weight.shape[0] % 128 == 0 and weight.shape[1] % 128 == 0) -def ceil_div(x: int, y: int) -> int: - return (x + y - 1) // y - - # TODO fix ROCm->Triton custom path: # https://github.com/vllm-project/vllm/issues/14397 def apply_w8a8_block_fp8_linear( @@ -162,6 +158,9 @@ def apply_w8a8_block_fp8_linear( if current_platform.is_cuda(): if current_platform.has_device_capability(100): + def ceil_div(x: int, y: int) -> int: + return (x + y - 1) // y + use_cutlass = cutlass_block_fp8_supported and ( ceil_div(weight.shape[0], 128) == weight_scale.shape[0] and ceil_div(weight.shape[1], 128) == weight_scale.shape[1]) @@ -656,67 +655,3 @@ def w8a8_block_fp8_matmul( ) return C - - -# Taken from https://github.com/deepseek-ai/DeepGEMM/blob/0c88cd01392c1073c7049a97d6328c7bba9b3947 -# TODO(wentao): remove this function when DeepGEMM exposes this function -def get_tma_aligned_size(x: int, element_size: int) -> int: - """ - Global memory address of TMA must be 16-byte aligned. - Since we use column-major layout for the LHS scaling tensor, - the M-axis of the LHS scaling tensor needs to be padded to a multiple of - 16 bytes. - - Arguments: - x: original M-axis shape of the LHS scaling tensor. - element_size: element size of the LHS scaling tensor. - - Returns: - M-axis shape of the LHS scaling tensor after padding. - """ - tma_alignment_bytes = 16 - assert tma_alignment_bytes % element_size == 0 - alignment = tma_alignment_bytes // element_size - return ceil_div(x, alignment) * alignment - - -# Taken from https://github.com/deepseek-ai/DeepGEMM/blob/0c88cd01392c1073c7049a97d6328c7bba9b3947 -# TODO(wentao): remove this function when DeepGEMM exposes this function -def get_col_major_tma_aligned_tensor(x: torch.Tensor) -> torch.Tensor: - """ - Returns TMA-aligned transposed format of the input tensor. `torch.transpose` - will be called if necessary. - If the input tensor is already column-major layout and 16-byte aligned along - the M axis (thus meets the requirement of LHS scaling tensor in - DeepGEMM), this function will do nothing. - - Arguments: - x: usually the LHS scaling tensor in GEMM. - - Returns: - The LHS scaling tensor of TMA-aligned transposed format. - """ - # NOTES: for the extreme performance, you may rewrite/fuse this function in - # CUDA - assert x.dim() in (2, 3) - remove_dim = False - m, n = x.shape[-2], x.shape[-1] - aligned_m = get_tma_aligned_size(m, x.element_size()) - if x.dim() == 2: - if x.stride(0) == 1 and x.stride(1) == aligned_m: - return x - x, remove_dim = x.unsqueeze(0), True - - b = x.shape[0] - - # The last kernel gives a column-major TMA aligned layout - if x.stride(0) == aligned_m * n and x.stride(1) == 1 and x.stride( - 2) == aligned_m: - return x.squeeze(0) if remove_dim else x - - # Normal layout requires transposing - aligned_x = torch.transpose( - torch.empty((b, n, aligned_m), device=x.device, dtype=x.dtype), 1, 2) - aligned_x[:, :m, :] = x - aligned_x = aligned_x[:, :m, :] - return aligned_x.squeeze(0) if remove_dim else aligned_x From 2273ec322cddb59431a124bb982340870ed06920 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Wed, 25 Jun 2025 11:23:29 +0800 Subject: [PATCH 119/211] Revert "Fix(models/siglip): Add compatibility for Gemma models quantized by llm-compressor" (#20030) --- vllm/model_executor/models/gemma3_mm.py | 1 - 1 file changed, 1 deletion(-) diff --git a/vllm/model_executor/models/gemma3_mm.py b/vllm/model_executor/models/gemma3_mm.py index 619d2aa674919..3a1c14978b45b 100644 --- a/vllm/model_executor/models/gemma3_mm.py +++ b/vllm/model_executor/models/gemma3_mm.py @@ -479,7 +479,6 @@ class Gemma3ForConditionalGeneration(nn.Module, SupportsMultiModal, SupportsPP, "model.vision_tower.": "vision_tower.", "model.multi_modal_projector.": "multi_modal_projector.", "lm_head.": "language_model.lm_head.", - "vision_tower.vision_model.": "vision_model.", }) def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): From 3443aaf8dd7cb0e2f40dd1e6d9d36c8db23c6597 Mon Sep 17 00:00:00 2001 From: h-avsha Date: Wed, 25 Jun 2025 06:33:51 +0300 Subject: [PATCH 120/211] Move to a faster base64 implementation (#19984) Signed-off-by: h-avsha --- requirements/common.txt | 1 + vllm/multimodal/image.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/requirements/common.txt b/requirements/common.txt index 639abe511017a..9a9ae1d93896b 100644 --- a/requirements/common.txt +++ b/requirements/common.txt @@ -44,3 +44,4 @@ watchfiles # required for http server to monitor the updates of TLS files python-json-logger # Used by logging as per examples/others/logging_configuration.md scipy # Required for phi-4-multimodal-instruct ninja # Required for xgrammar, rocm, tpu, xpu +pybase64 # fast base64 implementation diff --git a/vllm/multimodal/image.py b/vllm/multimodal/image.py index e673632d43664..dce4c4c1cadba 100644 --- a/vllm/multimodal/image.py +++ b/vllm/multimodal/image.py @@ -1,10 +1,10 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -import base64 from io import BytesIO from pathlib import Path +import pybase64 import torch from PIL import Image @@ -55,7 +55,7 @@ class ImageMediaIO(MediaIO[Image.Image]): return convert_image_mode(image, self.image_mode) def load_base64(self, media_type: str, data: str) -> Image.Image: - return self.load_bytes(base64.b64decode(data)) + return self.load_bytes(pybase64.b64decode(data, validate=True)) def load_file(self, filepath: Path) -> Image.Image: image = Image.open(filepath) @@ -75,7 +75,7 @@ class ImageMediaIO(MediaIO[Image.Image]): image.save(buffer, image_format) data = buffer.getvalue() - return base64.b64encode(data).decode('utf-8') + return pybase64.b64encode(data).decode('utf-8') class ImageEmbeddingMediaIO(MediaIO[torch.Tensor]): @@ -88,10 +88,10 @@ class ImageEmbeddingMediaIO(MediaIO[torch.Tensor]): return torch.load(buffer, weights_only=True) def load_base64(self, media_type: str, data: str) -> torch.Tensor: - return self.load_bytes(base64.b64decode(data)) + return self.load_bytes(pybase64.b64decode(data, validate=True)) def load_file(self, filepath: Path) -> torch.Tensor: return torch.load(filepath, weights_only=True) def encode_base64(self, media: torch.Tensor) -> str: - return base64.b64encode(media.numpy()).decode('utf-8') + return pybase64.b64encode(media.numpy()).decode('utf-8') From 7108934142801dad2fd8ac42aec8b1699e37ff5d Mon Sep 17 00:00:00 2001 From: David Xia Date: Wed, 25 Jun 2025 00:41:11 -0400 Subject: [PATCH 121/211] [Frontend] speed up import time of vllm.config (#18036) Signed-off-by: David Xia --- vllm/config.py | 44 +++++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 4333dcd3b8af9..5cf7fe84dd3e6 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -27,19 +27,13 @@ from pydantic import (ConfigDict, SkipValidation, TypeAdapter, field_validator, from pydantic.dataclasses import dataclass from safetensors.torch import _TYPES as _SAFETENSORS_TO_TORCH_DTYPE from torch.distributed import ProcessGroup, ReduceOp -from transformers import PretrainedConfig from typing_extensions import Self, deprecated, runtime_checkable import vllm.envs as envs from vllm import version from vllm.compilation.inductor_pass import CallableInductorPass, InductorPass from vllm.logger import init_logger -from vllm.model_executor.layers.quantization import (QUANTIZATION_METHODS, - QuantizationMethods, - get_quantization_config) -from vllm.model_executor.models import ModelRegistry from vllm.platforms import current_platform -from vllm.tracing import is_otel_available, otel_import_error_traceback from vllm.transformers_utils.config import ( ConfigFormat, get_config, get_hf_image_processor_config, get_hf_text_config, get_pooling_config, @@ -48,32 +42,49 @@ from vllm.transformers_utils.config import ( try_get_tokenizer_config, uses_mrope) from vllm.transformers_utils.s3_utils import S3Model from vllm.transformers_utils.utils import is_s3, maybe_model_redirect +# yapf conflicts with isort for this block +# yapf: disable from vllm.utils import (DEFAULT_MAX_NUM_BATCHED_TOKENS, MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS, POOLING_MODEL_MAX_NUM_BATCHED_TOKENS, GiB_bytes, - LayerBlockType, common_broadcastable_dtype, + LayerBlockType, LazyLoader, common_broadcastable_dtype, cuda_device_count_stateless, get_cpu_memory, get_open_port, is_torch_equal_or_newer, random_uuid, resolve_obj_by_qualname) +# yapf: enable + if TYPE_CHECKING: from _typeshed import DataclassInstance from ray.util.placement_group import PlacementGroup + from transformers.configuration_utils import PretrainedConfig + import vllm.model_executor.layers.quantization as me_quant + import vllm.model_executor.models as me_models from vllm.executor.executor_base import ExecutorBase + from vllm.model_executor.layers.quantization import QuantizationMethods from vllm.model_executor.layers.quantization.base_config import ( QuantizationConfig) from vllm.model_executor.model_loader import BaseModelLoader from vllm.model_executor.model_loader.tensorizer import TensorizerConfig ConfigType = type[DataclassInstance] + HfOverrides = Union[dict, Callable[[type], type]] else: PlacementGroup = Any + PretrainedConfig = Any ExecutorBase = Any QuantizationConfig = Any + QuantizationMethods = Any BaseModelLoader = Any TensorizerConfig = Any ConfigType = type + HfOverrides = Union[dict[str, Any], Callable[[type], type]] + + me_quant = LazyLoader("model_executor", globals(), + "vllm.model_executor.layers.quantization") + me_models = LazyLoader("model_executor", globals(), + "vllm.model_executor.models") logger = init_logger(__name__) @@ -100,9 +111,6 @@ _TASK_RUNNER: dict[_ResolvedTask, RunnerType] = { for task in tasks } -HfOverrides = Union[dict[str, Any], Callable[[PretrainedConfig], - PretrainedConfig]] - @runtime_checkable class SupportsHash(Protocol): @@ -648,7 +656,7 @@ class ModelConfig: @property def registry(self): - return ModelRegistry + return me_models.ModelRegistry @property def architectures(self) -> list[str]: @@ -859,14 +867,15 @@ class ModelConfig: return quant_cfg def _verify_quantization(self) -> None: - supported_quantization = QUANTIZATION_METHODS + supported_quantization = me_quant.QUANTIZATION_METHODS optimized_quantization_methods = [ "fp8", "marlin", "modelopt", "gptq_marlin_24", "gptq_marlin", "awq_marlin", "fbgemm_fp8", "compressed-tensors", "experts_int8", "quark", "modelopt_fp4", "bitblas", "gptq_bitblas" ] if self.quantization is not None: - self.quantization = cast(QuantizationMethods, self.quantization) + self.quantization = cast(me_quant.QuantizationMethods, + self.quantization) # Parse quantization method from the HF model config, if available. quant_cfg = self._parse_quant_hf_config() @@ -900,14 +909,14 @@ class ModelConfig: # Detect which checkpoint is it for name in quantization_methods: - method = get_quantization_config(name) + method = me_quant.get_quantization_config(name) quantization_override = method.override_quantization_method( quant_cfg, self.quantization) if quantization_override is not None: # Raise error if the override is not custom (custom would # be in QUANTIZATION_METHODS but not QuantizationMethods) # and hasn't been added to the overrides list. - if (name in get_args(QuantizationMethods) + if (name in get_args(me_quant.QuantizationMethods) and name not in overrides): raise ValueError( f"Quantization method {name} is an override but " @@ -1417,7 +1426,7 @@ class ModelConfig: @property def is_v1_compatible(self) -> bool: architectures = getattr(self.hf_config, "architectures", []) - return ModelRegistry.is_v1_compatible(architectures) + return me_models.ModelRegistry.is_v1_compatible(architectures) @property def is_matryoshka(self) -> bool: @@ -2376,7 +2385,7 @@ class SpeculativeConfig: according to the log probability settings in SamplingParams.""" # Draft model configuration - quantization: Optional[QuantizationMethods] = None + quantization: Optional[me_quant.QuantizationMethods] = None """Quantization method that was used to quantize the draft model weights. If `None`, we assume the model weights are not quantized. Note that it only takes effect when using the draft model-based speculative method.""" @@ -3624,6 +3633,7 @@ class ObservabilityConfig: and "," in self.collect_detailed_traces[0]): self._parse_collect_detailed_traces() + from vllm.tracing import is_otel_available, otel_import_error_traceback if not is_otel_available() and self.otlp_traces_endpoint is not None: raise ValueError( "OpenTelemetry is not available. Unable to configure " From 879f69bed375968ce02e003883752754beb36111 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Wed, 25 Jun 2025 01:19:09 -0400 Subject: [PATCH 122/211] [Refactor] Remove duplicate `ceil_div` (#20023) Signed-off-by: yewentao256 --- benchmarks/cutlass_benchmarks/w8a8_benchmarks.py | 11 +++-------- tests/kernels/attention/test_mla_decode_cpu.py | 5 +---- .../attention/test_triton_decode_attention.py | 5 +---- tests/neuron/1_core/test_prefix_prefill.py | 9 ++++----- vllm/attention/ops/nki_flash_attn.py | 15 ++++++--------- .../layers/fused_moe/moe_align_block_size.py | 8 ++------ .../layers/quantization/utils/fp8_utils.py | 9 +++------ 7 files changed, 20 insertions(+), 42 deletions(-) diff --git a/benchmarks/cutlass_benchmarks/w8a8_benchmarks.py b/benchmarks/cutlass_benchmarks/w8a8_benchmarks.py index cec422e8d597f..a5a5b52f60397 100644 --- a/benchmarks/cutlass_benchmarks/w8a8_benchmarks.py +++ b/benchmarks/cutlass_benchmarks/w8a8_benchmarks.py @@ -19,7 +19,7 @@ from vllm import _custom_ops as ops from vllm.model_executor.layers.quantization.utils.fp8_utils import ( w8a8_block_fp8_matmul, ) -from vllm.utils import FlexibleArgumentParser +from vllm.utils import FlexibleArgumentParser, cdiv DEFAULT_MODELS = list(WEIGHT_SHAPES.keys()) DEFAULT_BATCH_SIZES = [1, 16, 32, 64, 128, 256, 512] @@ -117,14 +117,9 @@ def bench_fp8( scale_a = torch.tensor(1.0, device="cuda", dtype=torch.float32) scale_b = torch.tensor(1.0, device="cuda", dtype=torch.float32) - def ceil_div(x: int, y: int) -> int: - return (x + y - 1) // y - - block_scale_a = torch.rand( - (m, ceil_div(k, 128)), device="cuda", dtype=torch.float32 - ) + block_scale_a = torch.rand((m, cdiv(k, 128)), device="cuda", dtype=torch.float32) block_scale_b = torch.rand( - ceil_div(k, 128), ceil_div(n, 128), device="cuda", dtype=torch.float32 + cdiv(k, 128), cdiv(n, 128), device="cuda", dtype=torch.float32 ) block_scale_a_M_major = block_scale_a.t().contiguous().t() block_scale_b_K_major = block_scale_b.t().contiguous().t() diff --git a/tests/kernels/attention/test_mla_decode_cpu.py b/tests/kernels/attention/test_mla_decode_cpu.py index 5a7480a6beaea..f8b307c595dea 100644 --- a/tests/kernels/attention/test_mla_decode_cpu.py +++ b/tests/kernels/attention/test_mla_decode_cpu.py @@ -7,10 +7,7 @@ from torch import Tensor import vllm._custom_ops as ops from vllm.platforms import current_platform - - -def cdiv(a, b): - return (a + b - 1) // b +from vllm.utils import cdiv def ref_mla( diff --git a/tests/kernels/attention/test_triton_decode_attention.py b/tests/kernels/attention/test_triton_decode_attention.py index 358b374ea75bc..2dca720fe3301 100644 --- a/tests/kernels/attention/test_triton_decode_attention.py +++ b/tests/kernels/attention/test_triton_decode_attention.py @@ -5,10 +5,7 @@ import pytest import torch from vllm.attention.ops.triton_decode_attention import decode_attention_fwd - - -def cdiv(a, b): - return (a + b - 1) // b +from vllm.utils import cdiv @pytest.mark.parametrize("B", [3, 5]) diff --git a/tests/neuron/1_core/test_prefix_prefill.py b/tests/neuron/1_core/test_prefix_prefill.py index 8b9a5f6e4a6af..abf7febc2955c 100644 --- a/tests/neuron/1_core/test_prefix_prefill.py +++ b/tests/neuron/1_core/test_prefix_prefill.py @@ -7,6 +7,8 @@ import pytest import torch import torch.nn.functional as F +from vllm.utils import cdiv + class BlockDiagonalCausalFromBottomRightMask: @@ -398,11 +400,8 @@ def test_contexted_kv_attention( assert (large_tile_size >= B_P_SIZE ), f"Expect {large_tile_size=} to be larger than {B_P_SIZE=}" - def ceil_div(a, b): - return (a + b - 1) // b - def pad_to_multiple(a, b): - return ceil_div(a, b) * b + return cdiv(a, b) * b def pad_to_next_power_of_2(a): assert a > 0 @@ -411,7 +410,7 @@ def test_contexted_kv_attention( # calculate input shapes max_num_queries = pad_to_next_power_of_2(sum(query_lens)) context_lens = torch.tensor(seq_lens) - torch.tensor(query_lens) - num_active_blocks = ceil_div(context_lens, block_size).sum().item() + num_active_blocks = cdiv(context_lens, block_size).sum().item() num_active_blocks = pad_to_multiple(num_active_blocks, large_tile_size // block_size) context_kv_len = num_active_blocks * block_size diff --git a/vllm/attention/ops/nki_flash_attn.py b/vllm/attention/ops/nki_flash_attn.py index e28ff7e8b4ed9..29fa432017616 100644 --- a/vllm/attention/ops/nki_flash_attn.py +++ b/vllm/attention/ops/nki_flash_attn.py @@ -8,9 +8,7 @@ import torch from neuronxcc import nki from neuronxcc.nki.language import par_dim - -def ceil_div(a, b): - return (a + b - 1) // b +from vllm.utils import cdiv def is_power_of_2(x): @@ -35,11 +33,10 @@ def load_block_tables(block_tables_hbm, num_tiles, num_blocks_per_tile): (num_tiles, num_blocks_per_tile)) block_tables_sbuf = nl.zeros( - (ceil_div(num_tiles, - B_P_SIZE), par_dim(B_P_SIZE), num_blocks_per_tile), + (cdiv(num_tiles, B_P_SIZE), par_dim(B_P_SIZE), num_blocks_per_tile), dtype=nl.int32, ) - for i in nl.affine_range(ceil_div(num_tiles, B_P_SIZE)): + for i in nl.affine_range(cdiv(num_tiles, B_P_SIZE)): i_p = nl.arange(B_P_SIZE)[:, None] i_f = nl.arange(num_blocks_per_tile)[None, :] block_tables_sbuf[i, i_p, i_f] = nl.load( @@ -83,7 +80,7 @@ def transform_block_tables_for_indirect_load( assert is_power_of_2( num_blocks_per_tile), f"{num_blocks_per_tile=} is not power of 2" - num_loads = ceil_div(num_blocks_per_tile, B_P_SIZE) + num_loads = cdiv(num_blocks_per_tile, B_P_SIZE) block_tables_transposed = nl.ndarray( ( num_loads, @@ -165,7 +162,7 @@ def load_kv_tile_from_cache( equivalent to (par_dim(B_P_SIZE), seqlen_kv // B_P_SIZE * B_D_SIZE) """ # load key cache - num_loads = ceil_div(num_blocks_per_large_tile, B_P_SIZE) + num_loads = cdiv(num_blocks_per_large_tile, B_P_SIZE) for load_idx in nl.affine_range(num_loads): i_p = nl.arange(B_P_SIZE)[:, None] i_f = nl.arange(tiled_block_size * B_D_SIZE)[None, :] @@ -605,7 +602,7 @@ def flash_paged_attention( ) for large_k_tile_idx in nl.sequential_range(0, num_large_k_tile): - num_loads = ceil_div(num_blocks_per_large_tile, B_P_SIZE) + num_loads = cdiv(num_blocks_per_large_tile, B_P_SIZE) cur_k_tile = nl.ndarray( (par_dim(B_D_SIZE), LARGE_TILE_SZ), dtype=kernel_dtype, diff --git a/vllm/model_executor/layers/fused_moe/moe_align_block_size.py b/vllm/model_executor/layers/fused_moe/moe_align_block_size.py index f9451ca2fde45..ceb96add0fdee 100644 --- a/vllm/model_executor/layers/fused_moe/moe_align_block_size.py +++ b/vllm/model_executor/layers/fused_moe/moe_align_block_size.py @@ -6,11 +6,7 @@ import torch from vllm import _custom_ops as ops from vllm.triton_utils import tl, triton -from vllm.utils import round_up - - -def ceil_div(a, b): - return (a + b - 1) // b +from vllm.utils import cdiv, round_up @triton.jit @@ -115,7 +111,7 @@ def moe_align_block_size_triton( cumsum = torch.zeros((num_experts + 1, ), dtype=torch.int32, device=topk_ids.device) - tokens_per_thread = ceil_div(numel, num_experts) + tokens_per_thread = cdiv(numel, num_experts) moe_align_block_size_stage1[grid]( topk_ids, diff --git a/vllm/model_executor/layers/quantization/utils/fp8_utils.py b/vllm/model_executor/layers/quantization/utils/fp8_utils.py index 754650ebeffb5..3a0fb83d627af 100644 --- a/vllm/model_executor/layers/quantization/utils/fp8_utils.py +++ b/vllm/model_executor/layers/quantization/utils/fp8_utils.py @@ -19,7 +19,7 @@ from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( CUTLASS_BLOCK_FP8_SUPPORTED) from vllm.platforms import current_platform from vllm.triton_utils import tl, triton -from vllm.utils import direct_register_custom_op +from vllm.utils import cdiv, direct_register_custom_op logger = init_logger(__name__) has_deep_gemm = importlib.util.find_spec("deep_gemm") is not None @@ -158,12 +158,9 @@ def apply_w8a8_block_fp8_linear( if current_platform.is_cuda(): if current_platform.has_device_capability(100): - def ceil_div(x: int, y: int) -> int: - return (x + y - 1) // y - use_cutlass = cutlass_block_fp8_supported and ( - ceil_div(weight.shape[0], 128) == weight_scale.shape[0] - and ceil_div(weight.shape[1], 128) == weight_scale.shape[1]) + cdiv(weight.shape[0], 128) == weight_scale.shape[0] + and cdiv(weight.shape[1], 128) == weight_scale.shape[1]) else: # TODO: update this after switching to public sm90 block scale gemm # as it also supports weight.shape % 128 != 0 From f59fc60fb317e7e04456de50b7abce99a9017225 Mon Sep 17 00:00:00 2001 From: Max Wittig Date: Wed, 25 Jun 2025 07:43:04 +0200 Subject: [PATCH 123/211] [Feat][CLI] enforce-include-usage (#19695) Signed-off-by: Max Wittig --- vllm/entrypoints/openai/api_server.py | 2 ++ vllm/entrypoints/openai/cli_args.py | 5 +++++ vllm/entrypoints/openai/serving_chat.py | 19 +++++++++++++++---- vllm/entrypoints/openai/serving_completion.py | 11 ++++++++--- vllm/entrypoints/openai/serving_engine.py | 6 ++++-- 5 files changed, 34 insertions(+), 9 deletions(-) diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 62f1c6a7c12bf..a23736470f661 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -1190,6 +1190,7 @@ async def init_app_state( tool_parser=args.tool_call_parser, reasoning_parser=args.reasoning_parser, enable_prompt_tokens_details=args.enable_prompt_tokens_details, + enable_force_include_usage=args.enable_force_include_usage, ) if model_config.runner_type == "generate" else None state.openai_serving_completion = OpenAIServingCompletion( engine_client, @@ -1197,6 +1198,7 @@ async def init_app_state( state.openai_serving_models, request_logger=request_logger, return_tokens_as_token_ids=args.return_tokens_as_token_ids, + enable_force_include_usage=args.enable_force_include_usage, ) if model_config.runner_type == "generate" else None state.openai_serving_pooling = OpenAIServingPooling( engine_client, diff --git a/vllm/entrypoints/openai/cli_args.py b/vllm/entrypoints/openai/cli_args.py index ca70e78df3260..dd4bd53046a35 100644 --- a/vllm/entrypoints/openai/cli_args.py +++ b/vllm/entrypoints/openai/cli_args.py @@ -272,6 +272,11 @@ def make_arg_parser(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: action='store_true', default=False, help="If set to True, enable prompt_tokens_details in usage.") + parser.add_argument( + "--enable-force-include-usage", + action='store_true', + default=False, + help="If set to True, including usage on every request.") parser.add_argument( "--enable-server-load-tracking", action='store_true', diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 2a0d4cd74a284..10aced83b60bb 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -64,12 +64,14 @@ class OpenAIServingChat(OpenAIServing): enable_auto_tools: bool = False, tool_parser: Optional[str] = None, enable_prompt_tokens_details: bool = False, + enable_force_include_usage: bool = False, ) -> None: super().__init__(engine_client=engine_client, model_config=model_config, models=models, request_logger=request_logger, - return_tokens_as_token_ids=return_tokens_as_token_ids) + return_tokens_as_token_ids=return_tokens_as_token_ids, + enable_force_include_usage=enable_force_include_usage) self.response_role = response_role self.chat_template = chat_template @@ -110,6 +112,7 @@ class OpenAIServingChat(OpenAIServing): "been registered") from e self.enable_prompt_tokens_details = enable_prompt_tokens_details + self.enable_force_include_usage = enable_force_include_usage self.default_sampling_params = ( self.model_config.get_diff_sampling_param()) if self.default_sampling_params: @@ -261,8 +264,14 @@ class OpenAIServingChat(OpenAIServing): # Streaming response if request.stream: return self.chat_completion_stream_generator( - request, result_generator, request_id, model_name, - conversation, tokenizer, request_metadata) + request, + result_generator, + request_id, + model_name, + conversation, + tokenizer, + request_metadata, + enable_force_include_usage=self.enable_force_include_usage) try: return await self.chat_completion_full_generator( @@ -405,6 +414,7 @@ class OpenAIServingChat(OpenAIServing): conversation: list[ConversationMessage], tokenizer: AnyTokenizer, request_metadata: RequestResponseMetadata, + enable_force_include_usage: bool, ) -> AsyncGenerator[str, None]: created_time = int(time.time()) chunk_object_type: Final = "chat.completion.chunk" @@ -471,7 +481,8 @@ class OpenAIServingChat(OpenAIServing): stream_options = request.stream_options if stream_options: - include_usage = stream_options.include_usage + include_usage = stream_options.include_usage \ + or enable_force_include_usage include_continuous_usage = include_usage and \ stream_options.continuous_usage_stats else: diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index ce5eca8550289..a19fde8d70a83 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -52,12 +52,14 @@ class OpenAIServingCompletion(OpenAIServing): *, request_logger: Optional[RequestLogger], return_tokens_as_token_ids: bool = False, + enable_force_include_usage: bool = False, ): super().__init__(engine_client=engine_client, model_config=model_config, models=models, request_logger=request_logger, - return_tokens_as_token_ids=return_tokens_as_token_ids) + return_tokens_as_token_ids=return_tokens_as_token_ids, + enable_force_include_usage=enable_force_include_usage) self.default_sampling_params = ( self.model_config.get_diff_sampling_param()) if self.default_sampling_params: @@ -227,7 +229,8 @@ class OpenAIServingCompletion(OpenAIServing): model_name, num_prompts=num_prompts, tokenizer=tokenizer, - request_metadata=request_metadata) + request_metadata=request_metadata, + enable_force_include_usage=self.enable_force_include_usage) # Non-streaming response final_res_batch: list[Optional[RequestOutput]] = [None] * num_prompts @@ -289,6 +292,7 @@ class OpenAIServingCompletion(OpenAIServing): num_prompts: int, tokenizer: AnyTokenizer, request_metadata: RequestResponseMetadata, + enable_force_include_usage: bool, ) -> AsyncGenerator[str, None]: num_choices = 1 if request.n is None else request.n previous_text_lens = [0] * num_choices * num_prompts @@ -298,7 +302,8 @@ class OpenAIServingCompletion(OpenAIServing): stream_options = request.stream_options if stream_options: - include_usage = stream_options.include_usage + include_usage = stream_options.include_usage or \ + enable_force_include_usage include_continuous_usage = include_usage and \ stream_options.continuous_usage_stats else: diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index ac3883bdeb33c..4bf790bbb2984 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -132,7 +132,7 @@ RequestT = TypeVar("RequestT", bound=AnyRequest) class RequestProcessingMixin(BaseModel): """ - Mixin for request processing, + Mixin for request processing, handling prompt preparation and engine input. """ request_prompts: Optional[Sequence[RequestPrompt]] = [] @@ -144,7 +144,7 @@ class RequestProcessingMixin(BaseModel): class ResponseGenerationMixin(BaseModel): """ - Mixin for response generation, + Mixin for response generation, managing result generators and final batch results. """ result_generator: Optional[AsyncGenerator[tuple[int, Union[ @@ -208,6 +208,7 @@ class OpenAIServing: *, request_logger: Optional[RequestLogger], return_tokens_as_token_ids: bool = False, + enable_force_include_usage: bool = False, ): super().__init__() @@ -219,6 +220,7 @@ class OpenAIServing: self.request_logger = request_logger self.return_tokens_as_token_ids = return_tokens_as_token_ids + self.enable_force_include_usage = enable_force_include_usage self._tokenizer_executor = ThreadPoolExecutor(max_workers=1) From 015fab8c2fa4db8776f7e91abd50371911673d88 Mon Sep 17 00:00:00 2001 From: bnellnm <49004751+bnellnm@users.noreply.github.com> Date: Wed, 25 Jun 2025 02:22:58 -0400 Subject: [PATCH 124/211] [Kernels][Bugfix] Use torch op for all kernels in FusedMoE forward. Add additional testing for cudagraphs. (#19717) Signed-off-by: Bill Nell --- tests/kernels/moe/test_cutlass_moe.py | 11 +- tests/kernels/moe/test_moe.py | 288 ++++++++++++------ tests/kernels/moe/test_nvfp4_moe.py | 2 +- tests/kernels/moe/test_pplx_cutlass_moe.py | 22 +- tests/kernels/moe/test_pplx_moe.py | 37 +-- tests/kernels/quantization/test_block_fp8.py | 49 ++- tests/kernels/utils.py | 28 +- vllm/envs.py | 1 + .../layers/fused_moe/cutlass_moe.py | 70 ++--- .../layers/fused_moe/deep_gemm_moe.py | 2 +- .../fused_moe/deepep_ll_prepare_finalize.py | 2 +- .../layers/fused_moe/fused_moe.py | 84 ++--- vllm/model_executor/layers/fused_moe/layer.py | 19 +- .../layers/fused_moe/pplx_prepare_finalize.py | 2 +- 14 files changed, 379 insertions(+), 238 deletions(-) diff --git a/tests/kernels/moe/test_cutlass_moe.py b/tests/kernels/moe/test_cutlass_moe.py index ce420901e3177..158100a098799 100644 --- a/tests/kernels/moe/test_cutlass_moe.py +++ b/tests/kernels/moe/test_cutlass_moe.py @@ -29,7 +29,10 @@ MNK_FACTORS = [ (224, 1024, 1536), (224, 3072, 1024), (224, 3072, 1536), - (1024 * 128, 1024, 1024), + (32768, 1024, 1024), + # These sizes trigger wrong answers. + #(7232, 2048, 5120), + #(40000, 2048, 5120), ] vllm_config = VllmConfig(parallel_config=ParallelConfig( @@ -232,8 +235,10 @@ def test_cutlass_moe_8_bit_no_graph( topk: int, per_act_token: bool, per_out_ch: bool, + monkeypatch, ): current_platform.seed_everything(7) + monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192") with set_current_vllm_config(vllm_config): mt = MOETensors8Bit.make_moe_tensors_8bit(m, k, n, e, per_act_token, per_out_ch) @@ -274,8 +279,10 @@ def test_cutlass_moe_8_bit_cuda_graph( topk: int, per_act_token: bool, per_out_ch: bool, + monkeypatch, ): current_platform.seed_everything(7) + monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192") with set_current_vllm_config(vllm_config): dtype = torch.half @@ -329,8 +336,10 @@ def test_cutlass_moe_8_bit_EP( per_act_token: bool, per_out_channel: bool, ep_size: int, + monkeypatch, ): current_platform.seed_everything(7) + monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", "8192") with set_current_vllm_config(vllm_config): mt = MOETensors8Bit.make_moe_tensors_8bit(m, k, n, e, per_act_token, per_out_channel) diff --git a/tests/kernels/moe/test_moe.py b/tests/kernels/moe/test_moe.py index bed374cf4d564..0c31168566e24 100644 --- a/tests/kernels/moe/test_moe.py +++ b/tests/kernels/moe/test_moe.py @@ -4,6 +4,9 @@ Run `pytest tests/kernels/test_moe.py`. """ +import functools +from typing import Callable, Optional, Union + import pytest import torch from torch.nn import Parameter @@ -14,6 +17,7 @@ from transformers.models.mixtral.modeling_mixtral import MixtralSparseMoeBlock import vllm.model_executor.layers.fused_moe # noqa from tests.kernels.utils import opcheck, stack_and_dev, torch_moe from vllm.config import VllmConfig, set_current_vllm_config +from vllm.forward_context import set_forward_context from vllm.model_executor.layers.fused_moe import fused_moe from vllm.model_executor.layers.fused_moe.fused_moe import ( fused_topk, modular_triton_fused_moe) @@ -40,7 +44,76 @@ vllm_config.scheduler_config.max_num_seqs = 128 vllm_config.scheduler_config.max_model_len = 8192 -@pytest.mark.parametrize("m", [1, 33, 64, 222, 1024 * 128]) +def run_moe_test( + baseline: Union[Callable, torch.Tensor], + moe_fn: Callable, + a: torch.Tensor, + w1: torch.Tensor, + w2: torch.Tensor, + score: torch.Tensor, + topk: int, + global_num_experts: int = -1, + expert_map: Optional[torch.Tensor] = None, + padding: bool = False, + use_compile: bool = False, + use_cudagraph: bool = False, + atol: float = 2e-2, + rtol: float = 0, +) -> torch.Tensor: + if isinstance(baseline, torch.Tensor): + baseline_output = baseline + else: + baseline_output = baseline(a, + w1, + w2, + score, + topk, + global_num_experts=global_num_experts, + expert_map=expert_map) + + # Pad the weight if moe padding is enabled + if padding: + w1 = F.pad(w1, (0, 128), "constant", 0)[..., 0:-128] + w2 = F.pad(w2, (0, 128), "constant", 0)[..., 0:-128] + + if use_compile: + moe_fn = torch.compile(moe_fn, backend="inductor", fullgraph=True) + torch._dynamo.mark_dynamic(a, 0) + torch._dynamo.mark_dynamic(score, 0) + + test_output = moe_fn(a, + w1, + w2, + score, + topk, + global_num_experts=global_num_experts, + expert_map=expert_map) + + if use_cudagraph: + test_output.fill_(0) + stream = torch.cuda.Stream() + graph = torch.cuda.CUDAGraph() + with torch.cuda.graph(graph, stream=stream): + test_output = moe_fn(a, + w1, + w2, + score, + topk, + global_num_experts=global_num_experts, + expert_map=expert_map) + torch.cuda.synchronize() + graph.replay() + torch.cuda.synchronize() + + torch.testing.assert_close(test_output, + baseline_output, + atol=atol, + rtol=rtol) + + return baseline_output + + +@pytest.mark.parametrize("m", [1, 33, 64, 222, 32768, 40000]) @pytest.mark.parametrize("n", [128, 1024, 2048]) @pytest.mark.parametrize("k", [128, 511, 1024]) @pytest.mark.parametrize("e", NUM_EXPERTS) @@ -48,6 +121,7 @@ vllm_config.scheduler_config.max_model_len = 8192 @pytest.mark.parametrize("ep_size", EP_SIZE) @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16]) @pytest.mark.parametrize("padding", [True, False]) +@pytest.mark.parametrize("chunk_size", [8192]) def test_fused_moe( m: int, n: int, @@ -57,7 +131,17 @@ def test_fused_moe( ep_size: int, dtype: torch.dtype, padding: bool, + chunk_size: int, + monkeypatch, ): + current_platform.seed_everything(7) + + monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", str(chunk_size)) + + # + # Setup test data + # + a = torch.randn((m, k), device="cuda", dtype=dtype) / 10 w1 = torch.randn((e, 2 * n, k), device="cuda", dtype=dtype) / 10 w2 = torch.randn((e, k, n), device="cuda", dtype=dtype) / 10 @@ -77,58 +161,70 @@ def test_fused_moe( else: e_map = None - m_fused_moe = modular_triton_fused_moe(use_fp8_w8a8=False, - use_int8_w8a8=False, - use_int8_w8a16=False, - use_int4_w4a16=False, - per_channel_quant=False, - block_shape=None) + # + # Setup test functions + # + + m_fused_moe_fn = modular_triton_fused_moe(use_fp8_w8a8=False, + use_int8_w8a8=False, + use_int8_w8a16=False, + use_int4_w4a16=False, + per_channel_quant=False, + block_shape=None) + + def m_fused_moe( + a: torch.Tensor, + w1: torch.Tensor, + w2: torch.Tensor, + score: torch.Tensor, + topk: int, + global_num_experts: int = -1, + expert_map: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + topk_weights, topk_ids, _ = fused_topk(a, score, topk, False) + return m_fused_moe_fn(a, + w1, + w2, + topk_weights, + topk_ids, + global_num_experts=global_num_experts, + expert_map=expert_map) + + fused_moe_fn = functools.partial(fused_moe, renormalize=False) + + # + # Run tests + # + runner = functools.partial( + run_moe_test, + a=a, + w1=w1, + w2=w2, + score=score, + topk=topk, + global_num_experts=e, + expert_map=e_map, + padding=padding, + ) + + # Note: for now use_compile will error out if the problem size is + # large enough to trigger chunking. I'm leaving the flag and + # setup code in case we are able to revisit this later. + use_compile = False + + use_cudagraph = (n >= 1024 and k >= 1024 + and current_platform.is_cuda_alike()) with set_current_vllm_config(vllm_config): - torch_output = torch_moe(a, w1, w2, score, topk, e_map) - iterative_output = iterative_moe(a, - w1, - w2, - score, - topk, - global_num_experts=e, - expert_map=e_map, - renormalize=False) - - # Pad the weight if moe padding is enabled - if padding: - w1 = F.pad(w1, (0, 128), "constant", 0)[..., 0:-128] - torch.cuda.empty_cache() - w2 = F.pad(w2, (0, 128), "constant", 0)[..., 0:-128] - torch.cuda.empty_cache() - - triton_output = fused_moe(a, - w1, - w2, - score, - topk, - global_num_experts=e, - expert_map=e_map, - renormalize=False) - - topk_weights, topk_ids, _ = fused_topk(a, score, topk, False) - m_triton_output = m_fused_moe(a, - w1, - w2, - topk_weights, - topk_ids, - global_num_experts=e, - expert_map=e_map) - - torch.testing.assert_close(triton_output, torch_output, atol=2e-2, rtol=0) - torch.testing.assert_close(m_triton_output, - torch_output, - atol=2e-2, - rtol=0) - torch.testing.assert_close(iterative_output, - torch_output, - atol=2e-2, - rtol=0) + baseline_output = runner(torch_moe, iterative_moe) + runner(baseline_output, + fused_moe_fn, + use_compile=use_compile, + use_cudagraph=use_cudagraph) + runner(baseline_output, + m_fused_moe, + use_compile=use_compile, + use_cudagraph=use_cudagraph) @pytest.mark.parametrize("m", [1, 32, 222]) @@ -238,7 +334,12 @@ def test_fused_moe_wn16(m: int, n: int, k: int, e: int, topk: int, w1_zp=w1_qzeros if has_zp else None, w2_zp=w2_qzeros if has_zp else None, block_shape=[0, group_size]) - torch_output = torch_moe(a, w1_ref, w2_ref, score, topk, e_map) + torch_output = torch_moe(a, + w1_ref, + w2_ref, + score, + topk, + expert_map=e_map) torch.testing.assert_close(triton_output, torch_output, atol=2e-2, rtol=0) @@ -265,45 +366,51 @@ def test_mixtral_moe(dtype: torch.dtype, padding: bool, use_rocm_aiter: bool, pytest.skip("AITER ROCm test skip for float32") # Instantiate our and huggingface's MoE blocks - config = MixtralConfig() - hf_moe = MixtralSparseMoeBlock(config).to(dtype).to("cuda") - vllm_moe = MixtralMoE( - num_experts=config.num_local_experts, - top_k=config.num_experts_per_tok, - hidden_size=config.hidden_size, - intermediate_size=config.intermediate_size, - params_dtype=dtype, - tp_size=1, - dp_size=1, - ).cuda() + vllm_config.compilation_config.static_forward_context = dict() + with (set_current_vllm_config(vllm_config), + set_forward_context(None, vllm_config)): + config = MixtralConfig() + hf_moe = MixtralSparseMoeBlock(config).to(dtype).to("cuda") + vllm_moe = MixtralMoE( + num_experts=config.num_local_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.intermediate_size, + params_dtype=dtype, + tp_size=1, + dp_size=1, + ).cuda() - # Load the weights - vllm_moe.gate.weight.data[:] = hf_moe.gate.weight.data - for i in range(config.num_local_experts): - weights = (hf_moe.experts[i].w1.weight.data, - hf_moe.experts[i].w3.weight.data) - vllm_moe.experts.w13_weight[i][:] = torch.cat(weights, dim=0) - vllm_moe.experts.w2_weight[i][:] = hf_moe.experts[i].w2.weight.data + # Load the weights + vllm_moe.gate.weight.data[:] = hf_moe.gate.weight.data + for i in range(config.num_local_experts): + weights = (hf_moe.experts[i].w1.weight.data, + hf_moe.experts[i].w3.weight.data) + vllm_moe.experts.w13_weight[i][:] = torch.cat(weights, dim=0) + vllm_moe.experts.w2_weight[i][:] = hf_moe.experts[i].w2.weight.data - # Generate input batch of dimensions [batch_size, seq_len, hidden_dim] - hf_inputs = torch.randn((1, 64, config.hidden_size)).to(dtype).to("cuda") - # vLLM uses 1D query [num_tokens, hidden_dim] - vllm_inputs = hf_inputs.flatten(0, 1) + # Generate input batch of dimensions [batch_size, seq_len, hidden_dim] + hf_inputs = torch.randn( + (1, 64, config.hidden_size)).to(dtype).to("cuda") + # vLLM uses 1D query [num_tokens, hidden_dim] + vllm_inputs = hf_inputs.flatten(0, 1) - # Pad the weight if moe padding is enabled - if padding: - vllm_moe.experts.w13_weight = Parameter(F.pad( - vllm_moe.experts.w13_weight, (0, 128), "constant", 0)[..., 0:-128], - requires_grad=False) - torch.cuda.empty_cache() - vllm_moe.experts.w2_weight = Parameter(F.pad( - vllm_moe.experts.w2_weight, (0, 128), "constant", 0)[..., 0:-128], - requires_grad=False) - torch.cuda.empty_cache() + # Pad the weight if moe padding is enabled + if padding: + vllm_moe.experts.w13_weight = Parameter(F.pad( + vllm_moe.experts.w13_weight, (0, 128), "constant", 0)[..., + 0:-128], + requires_grad=False) + torch.cuda.empty_cache() + vllm_moe.experts.w2_weight = Parameter(F.pad( + vllm_moe.experts.w2_weight, (0, 128), "constant", 0)[..., + 0:-128], + requires_grad=False) + torch.cuda.empty_cache() - # Run forward passes for both MoE blocks - hf_states, _ = hf_moe.forward(hf_inputs) - vllm_states = vllm_moe.forward(vllm_inputs) + # Run forward passes for both MoE blocks + hf_states, _ = hf_moe.forward(hf_inputs) + vllm_states = vllm_moe.forward(vllm_inputs) mixtral_moe_tol = { torch.float32: 1e-3, @@ -546,7 +653,12 @@ def test_fused_marlin_moe( topk_weights, topk_ids, _ = fused_topk(a, score, topk, False) with set_current_vllm_config(vllm_config): - torch_output = torch_moe(a, w_ref1, w_ref2, score, topk, e_map) + torch_output = torch_moe(a, + w_ref1, + w_ref2, + score, + topk, + expert_map=e_map) marlin_output = torch.ops.vllm.fused_marlin_moe( a, diff --git a/tests/kernels/moe/test_nvfp4_moe.py b/tests/kernels/moe/test_nvfp4_moe.py index 22482d9ca85a6..76b560e1bb412 100644 --- a/tests/kernels/moe/test_nvfp4_moe.py +++ b/tests/kernels/moe/test_nvfp4_moe.py @@ -136,7 +136,7 @@ def test_cutlass_fp4_moe_no_graph(m: int, n: int, k: int, e: int, topk: int, device=w2.device, block_size=quant_blocksize) - torch_output = torch_moe(a_in_dtype, w1_d, w2_d, score, topk, None) + torch_output = torch_moe(a_in_dtype, w1_d, w2_d, score, topk) torch.testing.assert_close(torch_output, cutlass_output, diff --git a/tests/kernels/moe/test_pplx_cutlass_moe.py b/tests/kernels/moe/test_pplx_cutlass_moe.py index d90202dfcb3bd..0caf14f040bbe 100644 --- a/tests/kernels/moe/test_pplx_cutlass_moe.py +++ b/tests/kernels/moe/test_pplx_cutlass_moe.py @@ -6,9 +6,9 @@ from typing import Optional import pytest import torch +from tests.kernels.utils import torch_experts from vllm import _custom_ops as ops from vllm.config import VllmConfig, set_current_vllm_config -from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.fused_moe.cutlass_moe import CutlassExpertsFp8 from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk from vllm.model_executor.layers.fused_moe.modular_kernel import ( @@ -164,22 +164,6 @@ vllm_config.scheduler_config.max_num_seqs = 128 vllm_config.scheduler_config.max_model_len = 8192 -def torch_moe2(a, w1, w2, topk_weight, topk_ids): - M, K = a.shape - topk = topk_ids.shape[1] - a = a.view(M, -1, K).repeat(1, topk, 1).reshape(-1, K) - out = torch.zeros(M * topk, w2.shape[1], dtype=a.dtype, device=a.device) - num_experts = w1.shape[0] - for i in range(num_experts): - mask = (topk_ids == i).view(-1) - if mask.sum(): - out[mask] = SiluAndMul()( - a[mask] @ w1[i].transpose(0, 1)) @ w2[i].transpose(0, 1) - - return (out.view(M, -1, w2.shape[1]) * - topk_weight.view(M, -1, 1).to(out.dtype)).sum(dim=1) - - def _pplx_moe( pgi: ProcessGroupInfo, dp_size: int, @@ -210,8 +194,8 @@ def _pplx_moe( group_name = cpu_group.group_name with set_current_vllm_config(vllm_config): - torch_output = torch_moe2(a_full, w1_full, w2_full, topk_weights, - topk_ids) + torch_output = torch_experts(a_full, w1_full, w2_full, topk_weights, + topk_ids) pplx_output = pplx_cutlass_moe(pgi, dp_size, a, w1, w2, w1_scale, w2_scale, topk_weights, topk_ids, a1_scale, out_dtype, per_act_token, diff --git a/tests/kernels/moe/test_pplx_moe.py b/tests/kernels/moe/test_pplx_moe.py index 2d6a8f39cec5f..c4ad3af6802d4 100644 --- a/tests/kernels/moe/test_pplx_moe.py +++ b/tests/kernels/moe/test_pplx_moe.py @@ -18,8 +18,8 @@ try: except ImportError: has_pplx = False +from tests.kernels.utils import torch_experts from vllm.config import VllmConfig, set_current_vllm_config -from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.fused_moe import override_config from vllm.model_executor.layers.fused_moe.fused_batched_moe import ( BatchedExperts, BatchedPrepareAndFinalize, BatchedTritonExperts) @@ -163,29 +163,6 @@ def batched_moe( return fused_experts(a, w1, w2, topk_weight, topk_ids, num_experts) -# Note: same as torch_moe but with fused_topk factored out. -def torch_moe2( - a: torch.Tensor, - w1: torch.Tensor, - w2: torch.Tensor, - topk_weight: torch.Tensor, - topk_ids: torch.Tensor, -) -> torch.Tensor: - M, K = a.shape - topk = topk_ids.shape[1] - a = a.view(M, -1, K).repeat(1, topk, 1).reshape(-1, K) - out = torch.zeros(M * topk, w2.shape[1], dtype=a.dtype, device=a.device) - num_experts = w1.shape[0] - for i in range(num_experts): - mask = (topk_ids == i).view(-1) - if mask.sum(): - out[mask] = SiluAndMul()( - a[mask] @ w1[i].transpose(0, 1)) @ w2[i].transpose(0, 1) - - return (out.view(M, -1, w2.shape[1]) * - topk_weight.view(M, -1, 1).to(out.dtype)).sum(dim=1) - - @pytest.mark.parametrize("m", [1, 33, 64, 222]) @pytest.mark.parametrize("n", [128, 1024, 2048]) @pytest.mark.parametrize("k", [128, 512, 1024]) @@ -209,7 +186,7 @@ def test_fused_moe_batched_experts( with set_current_vllm_config(vllm_config): topk_weight, topk_ids, _ = fused_topk(a, score, topk, False) - baseline_output = torch_moe2(a, w1, w2, topk_weight, topk_ids) + baseline_output = torch_experts(a, w1, w2, topk_weight, topk_ids) torch_output = torch_batched_moe(a, w1, w2, topk_weight, topk_ids) batched_output = batched_moe(a, w1, w2, topk_weight, topk_ids) @@ -409,7 +386,7 @@ def pplx_moe( w2: torch.Tensor, topk_weight: torch.Tensor, topk_ids: torch.Tensor, - use_compile: bool = True, + use_compile: bool = False, use_cudagraphs: bool = True, ) -> torch.Tensor: from vllm.model_executor.layers.fused_moe.pplx_prepare_finalize import ( @@ -470,10 +447,16 @@ def pplx_moe( w1_chunk = chunk_by_rank(w1, rank, world_size).to(device) w2_chunk = chunk_by_rank(w2, rank, world_size).to(device) + # Note: for now use_compile will error out if the problem size is + # large enough to trigger chunking. I'm leaving the flag and + # setup code in case we are able to revisit this later. if use_compile: _fused_experts = torch.compile(fused_experts, backend='inductor', fullgraph=True) + torch._dynamo.mark_dynamic(a_chunk, 0) + torch._dynamo.mark_dynamic(chunk_topk_weight, 0) + torch._dynamo.mark_dynamic(chunk_topk_ids, 0) else: _fused_experts = fused_experts @@ -576,7 +559,7 @@ def _pplx_moe( with set_current_vllm_config(vllm_config), override_config(moe_config): topk_weight, topk_ids, _ = fused_topk(a, score, topk, False) - torch_output = torch_moe2(a, w1, w2, topk_weight, topk_ids) + torch_output = torch_experts(a, w1, w2, topk_weight, topk_ids) pplx_output = pplx_moe(group_name, pgi.rank, pgi.world_size, dp_size, a, w1, w2, topk_weight, topk_ids) # TODO (bnell): fix + re-enable diff --git a/tests/kernels/quantization/test_block_fp8.py b/tests/kernels/quantization/test_block_fp8.py index eec59573792db..1ca0a80ab9a99 100644 --- a/tests/kernels/quantization/test_block_fp8.py +++ b/tests/kernels/quantization/test_block_fp8.py @@ -403,19 +403,24 @@ def deep_gemm_w8a8_block_fp8_moe(M, K, a, w1, w2, w1_s, w2_s, score, topk, itertools.product(M_moe_dg, N_moe, K_moe, E, TOP_KS, SEEDS)) @pytest.mark.skipif(not dg_available, reason="DeepGemm kernels not available.") @torch.inference_mode() -def test_w8a8_block_fp8_deep_gemm_fused_moe(M, N, K, E, topk, seed): - - block_m = deep_gemm.get_m_alignment_for_contiguous_layout() - block_size = [block_m, block_m] - dtype = torch.bfloat16 - +def test_w8a8_block_fp8_deep_gemm_fused_moe(M, N, K, E, topk, seed, + monkeypatch): if topk > E: pytest.skip(f"Skipping test: topk={topk} > E={E}") if not _valid_deep_gemm_shape(M, N, K): pytest.skip(f"Skipping test: invalid size m={M}, n={N}, k={K}") + chunk_size = 1024 + torch.manual_seed(seed) + + monkeypatch.setenv("VLLM_FUSED_MOE_CHUNK_SIZE", str(chunk_size)) + + block_m = deep_gemm.get_m_alignment_for_contiguous_layout() + block_size = [block_m, block_m] + dtype = torch.bfloat16 + fp8_info = torch.finfo(torch.float8_e4m3fn) fp8_max, fp8_min = fp8_info.max, fp8_info.min @@ -451,6 +456,14 @@ def test_w8a8_block_fp8_deep_gemm_fused_moe(M, N, K, E, topk, seed): w1[i], w1_s[i] = per_block_cast_to_fp8(w1_bf16[i]) w2[i], w2_s[i] = per_block_cast_to_fp8(w2_bf16[i]) + # Note: for now use_compile will error out if the problem size is + # large enough to trigger chunking. I'm leaving the flag and + # setup code in case we are able to revisit this later. + use_compile = False + + use_cudagraph = (chunk_size < M and N >= 1024 and K >= 1024 + and current_platform.is_cuda_alike()) + # Set the context to avoid lots of warning spam. with set_current_vllm_config(vllm_config): if M >= 128: @@ -463,7 +476,29 @@ def test_w8a8_block_fp8_deep_gemm_fused_moe(M, N, K, E, topk, seed): topk_weights, topk_ids, token_expert_indices = fused_topk( a, score.float(), topk, False) - out = deep_gemm_moe_fp8(a, w1, w2, w1_s, w2_s, topk_weights, topk_ids) + if use_compile: + deep_gemm_moe_fp8_fn = torch.compile(deep_gemm_moe_fp8, + backend="inductor", + fullgraph=True) + torch._dynamo.mark_dynamic(a, 0) + torch._dynamo.mark_dynamic(topk_weights, 0) + torch._dynamo.mark_dynamic(topk_ids, 0) + else: + deep_gemm_moe_fp8_fn = deep_gemm_moe_fp8 + + out = deep_gemm_moe_fp8_fn(a, w1, w2, w1_s, w2_s, topk_weights, + topk_ids) + + if use_cudagraph: + out.fill_(0) + stream = torch.cuda.Stream() + graph = torch.cuda.CUDAGraph() + with torch.cuda.graph(graph, stream=stream): + out = deep_gemm_moe_fp8_fn(a, w1, w2, w1_s, w2_s, topk_weights, + topk_ids) + torch.cuda.synchronize() + graph.replay() + torch.cuda.synchronize() #print(f"{out.sum()=}") #print(f"{ref_out.sum()=}") diff --git a/tests/kernels/utils.py b/tests/kernels/utils.py index d1db6a8eb1ba4..dcda8e479b290 100644 --- a/tests/kernels/utils.py +++ b/tests/kernels/utils.py @@ -1054,12 +1054,21 @@ def compute_max_diff(output, output_ref): torch.abs(output_ref)) -def torch_moe(a, w1, w2, score, topk, expert_map): +def torch_experts(a: torch.Tensor, + w1: torch.Tensor, + w2: torch.Tensor, + topk_weight: torch.Tensor, + topk_ids: torch.Tensor, + global_num_experts: int = -1, + expert_map: Optional[torch.Tensor] = None) -> torch.Tensor: + assert (global_num_experts == -1 + or (global_num_experts == w1.shape[0] and expert_map is None) + or (expert_map is not None + and global_num_experts == expert_map.shape[0])) + topk = topk_ids.shape[1] B, D = a.shape a = a.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D) out = torch.zeros(B * topk, w2.shape[1], dtype=a.dtype, device=a.device) - score = torch.softmax(score, dim=-1, dtype=torch.float32) - topk_weight, topk_ids = torch.topk(score, topk) topk_weight = topk_weight.view(-1) topk_ids = topk_ids.view(-1) if expert_map is not None: @@ -1073,6 +1082,19 @@ def torch_moe(a, w1, w2, score, topk, expert_map): topk_weight.view(B, -1, 1).to(out.dtype)).sum(dim=1) +def torch_moe(a: torch.Tensor, + w1: torch.Tensor, + w2: torch.Tensor, + score: torch.Tensor, + topk: int, + global_num_experts: int = -1, + expert_map: Optional[torch.Tensor] = None) -> torch.Tensor: + score = torch.softmax(score, dim=-1, dtype=torch.float32) + topk_weight, topk_ids = torch.topk(score, topk) + return torch_experts(a, w1, w2, topk_weight, topk_ids, global_num_experts, + expert_map) + + def torch_moe_single(a, w, score, topk): B, D = a.shape a = a.view(B, -1, D).repeat(1, topk, 1).reshape(-1, D) diff --git a/vllm/envs.py b/vllm/envs.py index 01d8d8a2d2e0c..04c80807cd4dc 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -981,6 +981,7 @@ def compute_hash() -> str: "VLLM_DP_RANK", "VLLM_DP_SIZE", "VLLM_USE_STANDALONE_COMPILE", + "VLLM_FUSED_MOE_CHUNK_SIZE", ] for key in environment_variables_to_hash: if key in environment_variables: diff --git a/vllm/model_executor/layers/fused_moe/cutlass_moe.py b/vllm/model_executor/layers/fused_moe/cutlass_moe.py index 3f9ceac8b6e36..73d169a84808d 100644 --- a/vllm/model_executor/layers/fused_moe/cutlass_moe.py +++ b/vllm/model_executor/layers/fused_moe/cutlass_moe.py @@ -41,24 +41,24 @@ def run_cutlass_moe_fp8( assert w1.dtype == torch.float8_e4m3fn assert w2.dtype == torch.float8_e4m3fn if expert_num_tokens is None: - assert a1q.shape[1] == w1.shape[2], "Hidden size mismatch w1" + assert a1q.size(1) == w1.size(2), "Hidden size mismatch w1" else: - assert a1q.shape[2] == w1.shape[2], "Hidden size mismatch w1" - assert w1.shape[1] == w2.shape[2] * 2, "Hidden size mismatch w2" - assert w1_scale.dim() == 1 or w1_scale.shape[1] == 1 or w1_scale.shape[ - 1] == w1.shape[1], "W1 scale shape mismatch" - assert w2_scale.dim() == 1 or w2_scale.shape[1] == 1 or w2_scale.shape[ - 1] == w2.shape[1], "W2 scale shape mismatch" - assert w1.shape[0] == w2.shape[0], "Expert number mismatch" - assert a1q_scale is None or a1q_scale.dim( - ) == 0 or a1q_scale.shape[0] == 1 or a1q_scale.shape[0] == a1q.shape[ - 0], "Input scale shape mismatch" - assert w1.shape[0] == w2.shape[0], "Weights expert number mismatch" - assert w1.shape[0] == w1_scale.shape[0], "w1 scales expert number mismatch" - assert w1.shape[0] == w2_scale.shape[0], "w2 scales expert number mismatch" - assert a2_scale is None or a2_scale.dim( - ) == 0 or a2_scale.shape[0] == 1 or a2_scale.shape[0] == a1q.shape[ - 0], "Intermediate scale shape mismatch" + assert a1q.size(2) == w1.size(2), "Hidden size mismatch w1" + assert w1.size(1) == w2.size(2) * 2, "Hidden size mismatch w2" + assert w1_scale.dim() == 1 or w1_scale.size( + 1) == 1 or w1_scale.shape[1] == w1.size(1), "W1 scale shape mismatch" + assert w2_scale.dim() == 1 or w2_scale.size( + 1) == 1 or w2_scale.shape[1] == w2.size(1), "W2 scale shape mismatch" + assert w1.size(0) == w2.size(0), "Expert number mismatch" + assert a1q_scale is None or a1q_scale.dim() == 0 or a1q_scale.size( + 0) == 1 or a1q_scale.size( + 0) == a1q.shape[0], "Input scale shape mismatch" + assert w1.size(0) == w2.size(0), "Weights expert number mismatch" + assert w1.size(0) == w1_scale.size(0), "w1 scales expert number mismatch" + assert w1.size(0) == w2_scale.size(0), "w2 scales expert number mismatch" + assert a2_scale is None or a2_scale.dim() == 0 or a2_scale.size( + 0) == 1 or a2_scale.size( + 0) == a1q.shape[0], "Intermediate scale shape mismatch" assert out_dtype in [torch.half, torch.bfloat16], "Invalid output dtype" if expert_map is not None: assert expert_num_tokens is None @@ -75,12 +75,12 @@ def run_cutlass_moe_fp8( # their tokens are already contiguous for each expert as a result of # the dispatch function. - M = a1q.shape[0] # non batched expert M - padded_M = a1q.shape[1] # batched expert M + M = a1q.size(0) # non batched expert M + padded_M = a1q.size(1) # batched expert M _, K, N = w2.shape device = a1q.device - assert w1.shape[2] == K + assert w1.size(2) == K assert global_num_experts != -1 assert a1q_scale is not None @@ -91,8 +91,8 @@ def run_cutlass_moe_fp8( else: local_topk_ids = topk_ids - topk = local_topk_ids.shape[1] - local_E = w1.shape[0] + topk = local_topk_ids.size(1) + local_E = w1.size(0) if use_batched_format: assert expert_num_tokens is not None @@ -111,10 +111,10 @@ def run_cutlass_moe_fp8( problem_sizes2, expert_num_tokens, local_E, padded_M, N, K) - w1_scale = w1_scale.reshape(w1_scale.shape[0], -1) - w2_scale = w2_scale.reshape(w2_scale.shape[0], -1) - a1q = a1q.reshape(-1, a1q.shape[2]) - a1q_scale = a1q_scale.reshape(-1, a1q_scale.shape[2]).contiguous() + w1_scale = w1_scale.reshape(w1_scale.size(0), -1) + w2_scale = w2_scale.reshape(w2_scale.size(0), -1) + a1q = a1q.reshape(-1, a1q.size(2)) + a1q_scale = a1q_scale.reshape(-1, a1q_scale.size(2)).contiguous() else: expert_offsets = torch.empty((global_num_experts + 1), @@ -151,19 +151,19 @@ def run_cutlass_moe_fp8( a1q_scale = a1q_scale[a_map] if per_act_token else a1q_scale expert_offsets = expert_offsets[:-1] - ab_strides1 = torch.full((w1.shape[0], ), + ab_strides1 = torch.full((w1.size(0), ), K, device=device, dtype=torch.int64) - c_strides1 = torch.full((w1.shape[0], ), + c_strides1 = torch.full((w1.size(0), ), 2 * N, device=device, dtype=torch.int64) - ab_strides2 = torch.full((w1.shape[0], ), + ab_strides2 = torch.full((w1.size(0), ), N, device=device, dtype=torch.int64) - c_strides2 = torch.full((w1.shape[0], ), + c_strides2 = torch.full((w1.size(0), ), K, device=device, dtype=torch.int64) @@ -237,7 +237,7 @@ class CutlassExpertsFp8(mk.FusedMoEPermuteExpertsUnpermute): workspace2: tuple[int, ...] = () output: tuple[int, ...] = () if self.use_batched_format: - padded_M = aq.shape[1] + padded_M = aq.size(1) workspace1 = (self.max_experts_per_worker, padded_M, max(N, K)) workspace2 = (self.max_experts_per_worker, padded_M, (N // 2)) output = (self.max_experts_per_worker, padded_M, K) @@ -332,7 +332,7 @@ def cutlass_moe_fp8( """ per_act_token = a1_scale.numel() != 1 if a1_scale is not None else ( a2_scale.numel() != 1 if a2_scale is not None else False) - per_out_ch = w1_scale.numel() != w1_q.shape[0] + per_out_ch = w1_scale.numel() != w1_q.size(0) out_dtype = a.dtype @@ -425,11 +425,11 @@ def cutlass_moe_fp4(a: torch.Tensor, a1_gscale: torch.Tensor, assert (m == m_a), "input shape mismatch" assert 2 * half_k_w1 == k_w2, "Hidden size mismatch w2 and w1" assert a.dtype in [torch.half, torch.bfloat16], "Invalid input dtype" - assert (topk_weights.shape[0] == m and topk_ids.shape[0] + assert (topk_weights.size(0) == m and topk_ids.size(0) == m), ("topk must be provided for each row of a") out_dtype = a.dtype - num_topk = topk_ids.shape[1] + num_topk = topk_ids.size(1) expert_offsets = torch.empty((e + 1), dtype=torch.int32, device=device) blockscale_offsets = torch.empty((e + 1), dtype=torch.int32, device=device) @@ -463,7 +463,7 @@ def cutlass_moe_fp4(a: torch.Tensor, a1_gscale: torch.Tensor, out_dtype, device) del rep_a_fp4, rep_a_blockscale # hidden size dimension is split to one halfpytho sized tensor. - intermediate = torch.empty((m * num_topk, w1_fp4.shape[1] // 2), + intermediate = torch.empty((m * num_topk, w1_fp4.size(1) // 2), device=device, dtype=out_dtype) diff --git a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py index 2e50329bc5455..050d9520ca013 100644 --- a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +++ b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py @@ -48,7 +48,7 @@ def _valid_deep_gemm(hidden_states: torch.Tensor, w1: torch.Tensor, M = hidden_states.size(0) _, K, N = w2.size() if not _valid_deep_gemm_shape(M, N, K): - logger.debug("DeepGemm disabled: unalinged problem size.") + logger.debug("DeepGemm disabled: unaligned problem size.") return False if (w1.dtype != torch.float8_e4m3fn or w2.dtype != torch.float8_e4m3fn): diff --git a/vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py b/vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py index 3484a7a8a496a..5a8accd80463d 100644 --- a/vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py +++ b/vllm/model_executor/layers/fused_moe/deepep_ll_prepare_finalize.py @@ -25,7 +25,7 @@ def dequant_fp8(expert_x_fp8: torch.Tensor, expert_x_fp32 = expert_x_fp8.to(torch.float32).view( num_experts, -1, DEEPEP_QUANT_BLOCK_SIZE) expert_x_scales = expert_x_scales.view(num_experts, -1, 1) - return (expert_x_fp32 * expert_x_scales).view(expert_x_fp8.shape) + return (expert_x_fp32 * expert_x_scales).view(expert_x_fp8.size()) class DeepEPLLPrepareAndFinalize(mk.FusedMoEPrepareAndFinalize): diff --git a/vllm/model_executor/layers/fused_moe/fused_moe.py b/vllm/model_executor/layers/fused_moe/fused_moe.py index 437e80696ac65..f22884b8a1a5a 100644 --- a/vllm/model_executor/layers/fused_moe/fused_moe.py +++ b/vllm/model_executor/layers/fused_moe/fused_moe.py @@ -488,10 +488,10 @@ def invoke_fused_moe_kernel(A: torch.Tensor, if use_fp8_w8a8 or use_int8_w8a8: assert B_scale is not None - assert (block_shape is None or triton.cdiv(B.shape[-2], block_shape[0]) - == B_scale.shape[-2]) - assert (block_shape is None or triton.cdiv(B.shape[-1], block_shape[1]) - == B_scale.shape[-1]) + assert (block_shape is None + or triton.cdiv(B.size(-2), block_shape[0]) == B_scale.size(-2)) + assert (block_shape is None + or triton.cdiv(B.size(-1), block_shape[1]) == B_scale.size(-1)) elif use_int8_w8a16 or use_int4_w4a16: assert B_scale is not None @@ -500,19 +500,19 @@ def invoke_fused_moe_kernel(A: torch.Tensor, assert A_scale is None assert B_scale is None - M = A.shape[0] + M = A.size(0) num_tokens = M * top_k - EM = sorted_token_ids.shape[0] - if A.shape[0] < config["BLOCK_SIZE_M"]: + EM = sorted_token_ids.size(0) + if A.size(0) < config["BLOCK_SIZE_M"]: # optimize for small batch_size. # We assume that top_ids of each token is unique, so # so num_valid_experts <= batch_size <= BLOCK_SIZE_M, # and we can skip some invalid blocks. - EM = min(sorted_token_ids.shape[0], - A.shape[0] * top_k * config['BLOCK_SIZE_M']) + EM = min(sorted_token_ids.size(0), + A.size(0) * top_k * config['BLOCK_SIZE_M']) grid = lambda META: (triton.cdiv(EM, META['BLOCK_SIZE_M']) * triton.cdiv( - B.shape[1], META['BLOCK_SIZE_N']), ) + B.size(1), META['BLOCK_SIZE_N']), ) if (use_int8_w8a16 or use_int4_w4a16) and \ block_shape is not None and block_shape[1] > 0: @@ -522,16 +522,16 @@ def invoke_fused_moe_kernel(A: torch.Tensor, use_moe_wna16_cuda = should_moe_wna16_use_cuda( num_valid_tokens=num_tokens, group_size=block_shape[1], - num_experts=B.shape[0], + num_experts=B.size(0), bit=4 if use_int4_w4a16 else 8) config = config.copy() config.update( get_moe_wna16_block_config(config=config, use_moe_wna16_cuda=use_moe_wna16_cuda, num_valid_tokens=num_tokens, - size_k=A.shape[1], - size_n=B.shape[1], - num_experts=B.shape[1], + size_k=A.size(1), + size_n=B.size(1), + num_experts=B.size(1), group_size=block_shape[1], real_top_k=top_k, block_size_m=config["BLOCK_SIZE_M"])) @@ -556,8 +556,8 @@ def invoke_fused_moe_kernel(A: torch.Tensor, sorted_token_ids, expert_ids, num_tokens_post_padded, - B.shape[1], - A.shape[1], + B.size(1), + A.size(1), EM, num_tokens, A.stride(0), @@ -573,7 +573,7 @@ def invoke_fused_moe_kernel(A: torch.Tensor, B_zp.stride(0) if B_zp is not None else 0, B_zp.stride(2) if B_zp is not None else 0, B_zp.stride(1) if B_zp is not None else 0, - block_k_diviable=A.shape[1] % config["BLOCK_SIZE_K"] == 0, + block_k_diviable=A.size(1) % config["BLOCK_SIZE_K"] == 0, group_size=block_shape[1], MUL_ROUTED_WEIGHT=mul_routed_weight, top_k=top_k, @@ -599,8 +599,8 @@ def invoke_fused_moe_kernel(A: torch.Tensor, sorted_token_ids, expert_ids, num_tokens_post_padded, - B.shape[1], - B.shape[2], + B.size(1), + B.size(2), EM, num_tokens, A.stride(0), @@ -818,7 +818,7 @@ def try_get_optimal_moe_config( M: int, is_marlin: bool = False, block_shape: Optional[list[int]] = None, -): +) -> dict[str, int]: from vllm.model_executor.layers.fused_moe import get_config override_config = get_config() if override_config: @@ -873,10 +873,10 @@ def fused_topk( renormalize: bool, indices_type: Optional[torch.dtype] = None, ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: - assert hidden_states.shape[0] == gating_output.shape[0], ( + assert hidden_states.size(0) == gating_output.size(0), ( "Number of tokens mismatch") - M, _ = hidden_states.shape + M, _ = hidden_states.size() topk_weights = torch.empty(M, topk, @@ -915,7 +915,7 @@ def grouped_topk( e_score_correction_bias: Optional[torch.Tensor] = None ) -> tuple[torch.Tensor, torch.Tensor]: - assert hidden_states.shape[0] == gating_output.shape[0], ( + assert hidden_states.size(0) == gating_output.size(0), ( "Number of tokens mismatch") if scoring_func == "softmax": @@ -925,7 +925,7 @@ def grouped_topk( else: raise ValueError(f"Unsupported scoring function: {scoring_func}") - num_token = scores.shape[0] + num_token = scores.size(0) if e_score_correction_bias is not None: # Store original scores before applying correction bias. We use biased # scores for expert selection but original scores for routing weights @@ -942,7 +942,7 @@ def grouped_topk( group_mask.scatter_(1, group_idx, 1) # [n, n_group] score_mask = group_mask.unsqueeze(-1).expand( num_token, num_expert_group, - scores.shape[-1] // num_expert_group).reshape(num_token, -1) # [n, e] + scores.size(-1) // num_expert_group).reshape(num_token, -1) # [n, e] tmp_scores = scores.masked_fill(~score_mask.bool(), float("-inf")) # [n, e] @@ -1162,7 +1162,7 @@ def fused_experts(hidden_states: torch.Tensor, allow_deep_gemm: bool = False) -> torch.Tensor: # For now, disable DeepGemm for small N (<= 512) until better # permute/unpermute ops are available. - N = w1.shape[1] + N = w1.size(1) if (allow_deep_gemm and use_fp8_w8a8 and N > 512 and _valid_deep_gemm(hidden_states, w1, w2)): assert apply_router_weight_on_input is False @@ -1233,13 +1233,13 @@ def fused_experts_impl( ) -> torch.Tensor: # Check constraints. if use_int4_w4a16: - assert hidden_states.shape[1] // 2 == w1.shape[ - 2], "Hidden size mismatch" + assert hidden_states.size(1) // 2 == w1.size(2), ( + "Hidden size mismatch") else: - assert hidden_states.shape[1] == w1.shape[2], ( - f"Hidden size mismatch {hidden_states.shape[1]} != {w1.shape[2]}") + assert hidden_states.size(1) == w1.size(2), ( + f"Hidden size mismatch {hidden_states.size(1)} != {w1.size(2)}") - assert topk_weights.shape == topk_ids.shape, "topk shape mismatch" + assert topk_weights.size() == topk_ids.size(), "topk shape mismatch" assert hidden_states.is_contiguous(), "Hidden_states must be contiguous" assert w1.stride(-1) == 1, "Stride of last dimension must be 1" assert w2.stride(-1) == 1, "Stride of last dimension must be 1" @@ -1247,12 +1247,12 @@ def fused_experts_impl( torch.float32, torch.float16, torch.bfloat16 ] - num_tokens = hidden_states.shape[0] - E, N, _ = w1.shape - K = w2.shape[1] + num_tokens = hidden_states.size(0) + E, N, _ = w1.size() + K = w2.size(1) if global_num_experts == -1: global_num_experts = E - top_k_num = topk_ids.shape[1] + top_k_num = topk_ids.size(1) # We execute the fused_moe kernel in chunks to circumvent this issue: # https://github.com/vllm-project/vllm/issues/5938 CHUNK_SIZE = envs.VLLM_FUSED_MOE_CHUNK_SIZE @@ -1269,8 +1269,8 @@ def fused_experts_impl( get_config_func = functools.partial( try_get_optimal_moe_config, - w1.shape, - w2.shape, + w1.size(), + w2.size(), top_k_num, config_dtype, block_shape=block_shape, @@ -1310,7 +1310,7 @@ def fused_experts_impl( min((chunk + 1) * CHUNK_SIZE, num_tokens)) curr_hidden_states = hidden_states[begin_chunk_idx:end_chunk_idx] - tokens_in_chunk, _ = curr_hidden_states.shape + tokens_in_chunk, _ = curr_hidden_states.size() if tokens_in_chunk == 0: break @@ -1322,7 +1322,7 @@ def fused_experts_impl( # do not need to be adjusted. intermediate_cache1 = intermediate_cache1[:tokens_in_chunk] intermediate_cache2 = intermediate_cache2[:tokens_in_chunk * - topk_ids.shape[1]] + topk_ids.size(1)] intermediate_cache3 = intermediate_cache3[:tokens_in_chunk] config = get_config_func(tokens_in_chunk) @@ -1398,7 +1398,7 @@ def fused_experts_impl( per_channel_quant=per_channel_quant, block_shape=block_shape) - ops.moe_sum(intermediate_cache3.view(*intermediate_cache3.shape), + ops.moe_sum(intermediate_cache3.view(*intermediate_cache3.size()), out_hidden_states[begin_chunk_idx:end_chunk_idx]) return out_hidden_states @@ -1611,8 +1611,8 @@ class TritonExperts(mk.FusedMoEPermuteExpertsUnpermute): dtype=hidden_states.dtype) config = try_get_optimal_moe_config( - w1.shape, - w2.shape, + w1.size(), + w2.size(), top_k_num, config_dtype, num_tokens, diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 4ed10e60b13ac..c1bae033c2b4b 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -861,13 +861,11 @@ class FusedMoE(torch.nn.Module): self.global_num_experts = num_experts # For smuggling this layer into the fused moe custom op - self.use_direct_call = self.dp_size == 1 - if not self.use_direct_call: - compilation_config = vllm_config.compilation_config - if prefix in compilation_config.static_forward_context: - raise ValueError("Duplicate layer name: {}".format(prefix)) - compilation_config.static_forward_context[prefix] = self - self.layer_name = prefix + compilation_config = vllm_config.compilation_config + if prefix in compilation_config.static_forward_context: + raise ValueError("Duplicate layer name: {}".format(prefix)) + compilation_config.static_forward_context[prefix] = self + self.layer_name = prefix # Determine expert maps if self.use_ep: @@ -1361,11 +1359,8 @@ class FusedMoE(torch.nn.Module): def forward(self, hidden_states: torch.Tensor, router_logits: torch.Tensor): - if self.use_direct_call: - return self.forward_impl(hidden_states, router_logits) - else: - return torch.ops.vllm.moe_forward(hidden_states, router_logits, - self.layer_name) + return torch.ops.vllm.moe_forward(hidden_states, router_logits, + self.layer_name) def forward_impl_chunked(self, full_hidden_states: torch.Tensor, full_router_logits: torch.Tensor): diff --git a/vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py b/vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py index 5bc01dbf2025e..2ff8ef99b2ec8 100644 --- a/vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py +++ b/vllm/model_executor/layers/fused_moe/pplx_prepare_finalize.py @@ -69,7 +69,7 @@ class PplxPrepareAndFinalize(mk.FusedMoEPrepareAndFinalize): a1 = a1 * rank_topk_weights.to(a1.dtype) repeat_cols = 4 - repeat_rows = 1 if self.per_act_token else a1.shape[0] + repeat_rows = 1 if self.per_act_token else a1.size(0) a1q, a1q_scale = moe_kernel_quantize_input( a1, (None if self.per_act_token else a1_scale), self.quant_dtype, self.per_act_token, self.block_shape) From ba7ba35cdaeacb59860bad2691de190bd77ccd05 Mon Sep 17 00:00:00 2001 From: Aaron Pham Date: Wed, 25 Jun 2025 02:36:22 -0400 Subject: [PATCH 125/211] [Chore] debloat some initial logs (#19438) Signed-off-by: Aaron Pham --- vllm/config.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 5cf7fe84dd3e6..96ea47a0dce38 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -546,10 +546,10 @@ class ModelConfig: self.code_revision, self.config_format) if hf_overrides_kw: - logger.info("Overriding HF config with %s", hf_overrides_kw) + logger.debug("Overriding HF config with %s", hf_overrides_kw) hf_config.update(hf_overrides_kw) if hf_overrides_fn: - logger.info("Overriding HF config with %s", hf_overrides_fn) + logger.debug("Overriding HF config with %s", hf_overrides_fn) hf_config = hf_overrides_fn(hf_config) self.hf_config = hf_config @@ -1947,8 +1947,8 @@ class ParallelConfig: if get_current_placement_group(): backend = "ray" self.distributed_executor_backend = backend - logger.info("Defaulting to use %s for distributed inference", - backend) + logger.debug("Defaulting to use %s for distributed inference", + backend) if self.distributed_executor_backend is None and self.world_size == 1: self.distributed_executor_backend = "uni" From 0f9e7354f508af3fe314cfb709babaaa668f1b04 Mon Sep 17 00:00:00 2001 From: Lucas Wilkinson Date: Wed, 25 Jun 2025 04:39:04 -0400 Subject: [PATCH 126/211] [BugFix] Fix full-cuda-graph illegal memory access in FA3 (#20057) Signed-off-by: Lucas Wilkinson --- vllm/v1/attention/backends/flash_attn.py | 25 +++++++----------------- 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index 4ad7178374b1a..ef65d2ea36e4f 100755 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -158,12 +158,13 @@ class FlashAttentionMetadataBuilder( self.aot_schedule = (get_flash_attn_version() == 3) self.use_full_cuda_graph = compilation_config.full_cuda_graph - if self.use_full_cuda_graph and not self.aot_schedule: - raise ValueError("Full CUDA graph mode requires AOT scheduling, " - "which requires FlashAttention 3.") - self.scheduler_metadata = torch.zeros(self.runner.max_num_reqs + 1, - dtype=torch.int32, - device=self.runner.device) + if self.use_full_cuda_graph: + # NOTE(lucas): AOT scheduling not supported in full cuda graph mode + # yet. This is because the scheduler and kernel need to always use + # the same num_splits (which acts as an upper bound with the + # dynamic split scheduler) which is currently heuristically decided + # by the kernel launching code. + self.aot_schedule = False # Sliding window size to be used with the AOT scheduler will be # populated on first build() call. @@ -299,18 +300,6 @@ class FlashAttentionMetadataBuilder( max_seq_len=max_seq_len, causal=True) - if self.use_full_cuda_graph: - assert scheduler_metadata is not None - n = scheduler_metadata.shape[0] - self.scheduler_metadata[:n].copy_(scheduler_metadata, - non_blocking=True) - # NOTE(woosuk): We should zero out the rest of the scheduler - # metadata to guarantee the correctness. Otherwise, some thread - # blocks may use the invalid scheduler metadata and overwrite the - # output buffer. - self.scheduler_metadata[n:] = 0 - scheduler_metadata = self.scheduler_metadata[:n] - attn_metadata = FlashAttentionMetadata( num_actual_tokens=num_actual_tokens, max_query_len=max_query_len, From c53fec1fcb27aca9475e55c2d1e74c532f5f0364 Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Wed, 25 Jun 2025 20:24:07 +0800 Subject: [PATCH 127/211] [doc] add reference link for Intel XPU (#20064) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- docs/getting_started/installation/gpu/xpu.inc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/getting_started/installation/gpu/xpu.inc.md b/docs/getting_started/installation/gpu/xpu.inc.md index ab84dc09834cf..4469be36c0075 100644 --- a/docs/getting_started/installation/gpu/xpu.inc.md +++ b/docs/getting_started/installation/gpu/xpu.inc.md @@ -22,7 +22,7 @@ Currently, there are no pre-built XPU wheels. # --8<-- [end:pre-built-wheels] # --8<-- [start:build-wheel-from-source] -- First, install required driver and Intel OneAPI 2025.0 or later. +- First, install required [driver](https://dgpu-docs.intel.com/driver/installation.html#installing-gpu-drivers) and [Intel OneAPI](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html) 2025.0 or later. - Second, install Python packages for vLLM XPU backend building: ```bash From bf5181583f4927b774d86a0a493916062f86c57d Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Wed, 25 Jun 2025 22:06:46 +0900 Subject: [PATCH 128/211] [Doc] Guide for Incremental Compilation Workflow (#19109) --- docs/contributing/README.md | 3 + docs/contributing/incremental_build.md | 138 ++++++++++++++ .../installation/gpu/cuda.inc.md | 3 + tools/generate_cmake_presets.py | 169 ++++++++++++++++++ 4 files changed, 313 insertions(+) create mode 100644 docs/contributing/incremental_build.md create mode 100644 tools/generate_cmake_presets.py diff --git a/docs/contributing/README.md b/docs/contributing/README.md index e977ec3d2f71a..c0c338b426951 100644 --- a/docs/contributing/README.md +++ b/docs/contributing/README.md @@ -29,6 +29,8 @@ See . Depending on the kind of development you'd like to do (e.g. Python, CUDA), you can choose to build vLLM with or without compilation. Check out the [building from source][build-from-source] documentation for details. +For an optimized workflow when iterating on C++/CUDA kernels, see the [Incremental Compilation Workflow](./incremental_build.md) for recommendations. + ### Building the docs with MkDocs #### Introduction to MkDocs @@ -188,6 +190,7 @@ The PR needs to meet the following code quality standards: ### Adding or Changing Kernels +When actively developing or modifying kernels, using the [Incremental Compilation Workflow](./incremental_build.md) is highly recommended for faster build times. Each custom kernel needs a schema and one or more implementations to be registered with PyTorch. - Make sure custom ops are registered following PyTorch guidelines: diff --git a/docs/contributing/incremental_build.md b/docs/contributing/incremental_build.md new file mode 100644 index 0000000000000..8efa34825ecaf --- /dev/null +++ b/docs/contributing/incremental_build.md @@ -0,0 +1,138 @@ +# Incremental Compilation Workflow for vLLM Development + +When working on vLLM's C++/CUDA kernels located in the `csrc/` directory, recompiling the entire project with `uv pip install -e .` for every change can be time-consuming. An incremental compilation workflow using CMake allows for faster iteration by only recompiling the necessary components after an initial setup. This guide details how to set up and use such a workflow, which complements your editable Python installation. + +## Prerequisites + +Before setting up the incremental build: + +1. **vLLM Editable Install:** Ensure you have vLLM installed from source in an editable mode. Using pre-compiled wheels for the initial editable setup can be faster, as the CMake workflow will handle subsequent kernel recompilations. + + ```console + uv venv --python 3.12 --seed + source .venv/bin/activate + VLLM_USE_PRECOMPILED=1 uv pip install -U -e . --torch-backend=auto + ``` + +2. **CUDA Toolkit:** Verify that the NVIDIA CUDA Toolkit is correctly installed and `nvcc` is accessible in your `PATH`. CMake relies on `nvcc` to compile CUDA code. You can typically find `nvcc` in `$CUDA_HOME/bin/nvcc` or by running `which nvcc`. If you encounter issues, refer to the [official CUDA Toolkit installation guides](https://developer.nvidia.com/cuda-toolkit-archive) and vLLM's main [GPU installation documentation](../getting_started/installation/gpu/cuda.inc.md#troubleshooting) for troubleshooting. The `CMAKE_CUDA_COMPILER` variable in your `CMakeUserPresets.json` should also point to your `nvcc` binary. + +3. **Build Tools:** It is highly recommended to install `ccache` for fast rebuilds by caching compilation results (e.g., `sudo apt install ccache` or `conda install ccache`). Also, ensure the core build dependencies like `cmake` and `ninja` are installed. These are installable through `requirements/build.txt` or your system's package manager. + + ```console + uv pip install -r requirements/build.txt --torch-backend=auto + ``` + +## Setting up the CMake Build Environment + +The incremental build process is managed through CMake. You can configure your build settings using a `CMakeUserPresets.json` file at the root of the vLLM repository. + +### Generate `CMakeUserPresets.json` using the helper script + +To simplify the setup, vLLM provides a helper script that attempts to auto-detect your system's configuration (like CUDA path, Python environment, and CPU cores) and generates the `CMakeUserPresets.json` file for you. + +**Run the script:** + +Navigate to the root of your vLLM clone and execute the following command: + +```console +python tools/generate_cmake_presets.py +``` + +The script will prompt you if it cannot automatically determine certain paths (e.g., `nvcc` or a specific Python executable for your vLLM development environment). Follow the on-screen prompts. If an existing `CMakeUserPresets.json` is found, the script will ask for confirmation before overwriting it. + +After running the script, a `CMakeUserPresets.json` file will be created in the root of your vLLM repository. + +### Example `CMakeUserPresets.json` + +Below is an example of what the generated `CMakeUserPresets.json` might look like. The script will tailor these values based on your system and any input you provide. + +```json +{ + "version": 6, + "cmakeMinimumRequired": { + "major": 3, + "minor": 26, + "patch": 1 + }, + "configurePresets": [ + { + "name": "release", + "generator": "Ninja", + "binaryDir": "${sourceDir}/cmake-build-release", + "cacheVariables": { + "CMAKE_CUDA_COMPILER": "/usr/local/cuda/bin/nvcc", + "CMAKE_C_COMPILER_LAUNCHER": "ccache", + "CMAKE_CXX_COMPILER_LAUNCHER": "ccache", + "CMAKE_CUDA_COMPILER_LAUNCHER": "ccache", + "CMAKE_BUILD_TYPE": "Release", + "VLLM_PYTHON_EXECUTABLE": "/home/user/venvs/vllm/bin/python", + "CMAKE_INSTALL_PREFIX": "${sourceDir}", + "CMAKE_CUDA_FLAGS": "", + "NVCC_THREADS": "4", + "CMAKE_JOB_POOLS": "compile=32" + } + } + ], + "buildPresets": [ + { + "name": "release", + "configurePreset": "release", + "jobs": 32 + } + ] +} +``` + +**What do the various configurations mean?** +- `CMAKE_CUDA_COMPILER`: Path to your `nvcc` binary. The script attempts to find this automatically. +- `CMAKE_C_COMPILER_LAUNCHER`, `CMAKE_CXX_COMPILER_LAUNCHER`, `CMAKE_CUDA_COMPILER_LAUNCHER`: Setting these to `ccache` (or `sccache`) significantly speeds up rebuilds by caching compilation results. Ensure `ccache` is installed (e.g., `sudo apt install ccache` or `conda install ccache`). The script sets these by default. +- `VLLM_PYTHON_EXECUTABLE`: Path to the Python executable in your vLLM development environment. The script will prompt for this, defaulting to the current Python environment if suitable. +- `CMAKE_INSTALL_PREFIX: "${sourceDir}"`: Specifies that the compiled components should be installed back into your vLLM source directory. This is crucial for the editable install, as it makes the newly built kernels immediately available to your Python environment. +- `CMAKE_JOB_POOLS` and `jobs` in build presets: Control the parallelism of the build. The script sets these based on the number of CPU cores detected on your system. +- `binaryDir`: Specifies where the build artifacts will be stored (e.g., `cmake-build-release`). + +## Building and Installing with CMake + +Once your `CMakeUserPresets.json` is configured: + +1. **Initialize the CMake build environment:** + This step configures the build system according to your chosen preset (e.g., `release`) and creates the build directory at `binaryDir` + + ```console + cmake --preset release + ``` + +2. **Build and install the vLLM components:** + This command compiles the code and installs the resulting binaries into your vLLM source directory, making them available to your editable Python installation. + + ```console + cmake --build --preset release --target install + ``` + +3. **Make changes and repeat!** + Now you start using your editable install of vLLM, testing and making changes as needed. If you need to build again to update based on changes, simply run the CMake command again to build only the affected files. + + ```console + cmake --build --preset release --target install + ``` + +## Verifying the Build + +After a successful build, you will find a populated build directory (e.g., `cmake-build-release/` if you used the `release` preset and the example configuration). + +```console +> ls cmake-build-release/ +bin cmake_install.cmake _deps machete_generation.log +build.ninja CPackConfig.cmake detect_cuda_compute_capabilities.cu marlin_generation.log +_C.abi3.so CPackSourceConfig.cmake detect_cuda_version.cc _moe_C.abi3.so +CMakeCache.txt ctest _flashmla_C.abi3.so moe_marlin_generation.log +CMakeFiles cumem_allocator.abi3.so install_local_manifest.txt vllm-flash-attn +``` + +The `cmake --build ... --target install` command copies the compiled shared libraries (like `_C.abi3.so`, `_moe_C.abi3.so`, etc.) into the appropriate `vllm` package directory within your source tree. This updates your editable installation with the newly compiled kernels. + +## Additional Tips + +- **Adjust Parallelism:** Fine-tune the `CMAKE_JOB_POOLS` in `configurePresets` and `jobs` in `buildPresets` in your `CMakeUserPresets.json`. Too many jobs can overload systems with limited RAM or CPU cores, leading to slower builds or system instability. Too few won't fully utilize available resources. +- **Clean Builds When Necessary:** If you encounter persistent or strange build errors, especially after significant changes or switching branches, consider removing the CMake build directory (e.g., `rm -rf cmake-build-release`) and re-running the `cmake --preset` and `cmake --build` commands. +- **Specific Target Builds:** For even faster iterations when working on a specific module, you can sometimes build a specific target instead of the full `install` target, though `install` ensures all necessary components are updated in your Python environment. Refer to CMake documentation for more advanced target management. diff --git a/docs/getting_started/installation/gpu/cuda.inc.md b/docs/getting_started/installation/gpu/cuda.inc.md index 89f3772d09ce3..0417a25f85adc 100644 --- a/docs/getting_started/installation/gpu/cuda.inc.md +++ b/docs/getting_started/installation/gpu/cuda.inc.md @@ -151,6 +151,9 @@ pip install -e . [sccache](https://github.com/mozilla/sccache) works similarly to `ccache`, but has the capability to utilize caching in remote storage environments. The following environment variables can be set to configure the vLLM `sccache` remote: `SCCACHE_BUCKET=vllm-build-sccache SCCACHE_REGION=us-west-2 SCCACHE_S3_NO_CREDENTIALS=1`. We also recommend setting `SCCACHE_IDLE_TIMEOUT=0`. +!!! note "Faster Kernel Development" + For frequent C++/CUDA kernel changes, after the initial `pip install -e .` setup, consider using the [Incremental Compilation Workflow](../../contributing/incremental_build.md) for significantly faster rebuilds of only the modified kernel code. + ##### Use an existing PyTorch installation There are scenarios where the PyTorch dependency cannot be easily installed via pip, e.g.: diff --git a/tools/generate_cmake_presets.py b/tools/generate_cmake_presets.py new file mode 100644 index 0000000000000..5f92f2f5848fa --- /dev/null +++ b/tools/generate_cmake_presets.py @@ -0,0 +1,169 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import json +import multiprocessing +import os +import sys +from shutil import which + +try: + # Try to get CUDA_HOME from PyTorch installation, which is the + # most reliable source of truth for vLLM's build. + from torch.utils.cpp_extension import CUDA_HOME +except ImportError: + print("Warning: PyTorch not found. " + "Falling back to CUDA_HOME environment variable.") + CUDA_HOME = os.environ.get("CUDA_HOME") + + +def get_python_executable(): + """Get the current Python executable, which is used to run this script.""" + return sys.executable + + +def get_cpu_cores(): + """Get the number of CPU cores.""" + return multiprocessing.cpu_count() + + +def generate_presets(output_path="CMakeUserPresets.json"): + """Generates the CMakeUserPresets.json file.""" + + print("Attempting to detect your system configuration...") + + # Detect NVCC + nvcc_path = None + if CUDA_HOME: + prospective_path = os.path.join(CUDA_HOME, "bin", "nvcc") + if os.path.exists(prospective_path): + nvcc_path = prospective_path + print("Found nvcc via torch.utils.cpp_extension.CUDA_HOME: " + f"{nvcc_path}") + + if not nvcc_path: + nvcc_path = which("nvcc") + if nvcc_path: + print(f"Found nvcc in PATH: {nvcc_path}") + + if not nvcc_path: + nvcc_path_input = input( + "Could not automatically find 'nvcc'. Please provide the full " + "path to nvcc (e.g., /usr/local/cuda/bin/nvcc): ") + nvcc_path = nvcc_path_input.strip() + print(f"Using NVCC path: {nvcc_path}") + + # Detect Python executable + python_executable = get_python_executable() + if python_executable: + print(f"Found Python via sys.executable: {python_executable}") + else: + python_executable_prompt = ( + "Could not automatically find Python executable. Please provide " + "the full path to your Python executable for vLLM development " + "(typically from your virtual environment, e.g., " + "/home/user/venvs/vllm/bin/python): ") + python_executable = input(python_executable_prompt).strip() + if not python_executable: + raise ValueError( + "Could not determine Python executable. Please provide it " + "manually.") + + print(f"Using Python executable: {python_executable}") + + # Get CPU cores + cpu_cores = get_cpu_cores() + nvcc_threads = min(4, cpu_cores) + cmake_jobs = max(1, cpu_cores // nvcc_threads) + print(f"Detected {cpu_cores} CPU cores. " + f"Setting NVCC_THREADS={nvcc_threads} and CMake jobs={cmake_jobs}.") + + # Get vLLM project root (assuming this script is in vllm/tools/) + project_root = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..")) + print(f"VLLM project root detected as: {project_root}") + + # Ensure python_executable path is absolute or resolvable + if not os.path.isabs(python_executable) and which(python_executable): + python_executable = os.path.abspath(which(python_executable)) + elif not os.path.isabs(python_executable): + print(f"Warning: Python executable '{python_executable}' is not an " + "absolute path and not found in PATH. CMake might not find it.") + + cache_variables = { + "CMAKE_CUDA_COMPILER": nvcc_path, + "CMAKE_BUILD_TYPE": "Release", + "VLLM_PYTHON_EXECUTABLE": python_executable, + "CMAKE_INSTALL_PREFIX": "${sourceDir}", + "CMAKE_CUDA_FLAGS": "", + "NVCC_THREADS": str(nvcc_threads), + } + + # Detect compiler cache + if which("sccache"): + print("Using sccache for compiler caching.") + for launcher in ("C", "CXX", "CUDA", "HIP"): + cache_variables[f"CMAKE_{launcher}_COMPILER_LAUNCHER"] = "sccache" + elif which("ccache"): + print("Using ccache for compiler caching.") + for launcher in ("C", "CXX", "CUDA", "HIP"): + cache_variables[f"CMAKE_{launcher}_COMPILER_LAUNCHER"] = "ccache" + else: + print("No compiler cache ('ccache' or 'sccache') found.") + + configure_preset = { + "name": "release", + "binaryDir": "${sourceDir}/cmake-build-release", + "cacheVariables": cache_variables, + } + if which("ninja"): + print("Using Ninja generator.") + configure_preset["generator"] = "Ninja" + cache_variables["CMAKE_JOB_POOLS"] = f"compile={cmake_jobs}" + else: + print("Ninja not found, using default generator. " + "Build may be slower.") + + presets = { + "version": + 6, + # Keep in sync with CMakeLists.txt and requirements/build.txt + "cmakeMinimumRequired": { + "major": 3, + "minor": 26, + "patch": 1 + }, + "configurePresets": [configure_preset], + "buildPresets": [{ + "name": "release", + "configurePreset": "release", + "jobs": cmake_jobs, + }], + } + + output_file_path = os.path.join(project_root, output_path) + + if os.path.exists(output_file_path): + overwrite = input( + f"'{output_file_path}' already exists. Overwrite? (y/N): ").strip( + ).lower() + if overwrite != 'y': + print("Generation cancelled.") + return + + try: + with open(output_file_path, "w") as f: + json.dump(presets, f, indent=4) + print(f"Successfully generated '{output_file_path}'") + print("\nTo use this preset:") + print( + f"1. Ensure you are in the vLLM root directory: cd {project_root}") + print("2. Initialize CMake: cmake --preset release") + print("3. Build+install: cmake --build --preset release " + "--target install") + + except OSError as e: + print(f"Error writing file: {e}") + + +if __name__ == "__main__": + generate_presets() From 8359f4c8d840d409fe698d9bf428ecd2f7e85b75 Mon Sep 17 00:00:00 2001 From: cjackal <44624812+cjackal@users.noreply.github.com> Date: Thu, 26 Jun 2025 00:41:02 +0900 Subject: [PATCH 129/211] [V1][Speculative Decoding] Fix DeepSeek MTP (#20022) Signed-off-by: cjackal <44624812+cjackal@users.noreply.github.com> --- vllm/model_executor/models/deepseek_mtp.py | 28 +++++++++++++++------- vllm/v1/spec_decode/eagle.py | 2 +- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/vllm/model_executor/models/deepseek_mtp.py b/vllm/model_executor/models/deepseek_mtp.py index 6e6e74b0d1d9b..911f0036c2dd6 100644 --- a/vllm/model_executor/models/deepseek_mtp.py +++ b/vllm/model_executor/models/deepseek_mtp.py @@ -52,11 +52,6 @@ class DeepSeekMultiTokenPredictorLayer(nn.Module): quant_config: Optional[QuantizationConfig] = None, ) -> None: super().__init__() - self.embed_tokens = VocabParallelEmbedding( - config.vocab_size, - config.hidden_size, - ) - self.enorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.hnorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.eh_proj = nn.Linear(config.hidden_size * 2, @@ -74,8 +69,6 @@ class DeepSeekMultiTokenPredictorLayer(nn.Module): inputs_embeds: Optional[torch.Tensor] = None, spec_step_index: int = 0, ) -> torch.Tensor: - if inputs_embeds is None: - inputs_embeds = self.embed_tokens(input_ids) assert inputs_embeds is not None # masking inputs at position 0, as not needed by MTP inputs_embeds[positions == 0] = 0 @@ -112,7 +105,10 @@ class DeepSeekMultiTokenPredictor(nn.Module): for idx in range(self.mtp_start_layer_idx, self.mtp_start_layer_idx + self.num_mtp_layers) }) - + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + ) self.logits_processor = LogitsProcessor(config.vocab_size) def forward( @@ -123,6 +119,8 @@ class DeepSeekMultiTokenPredictor(nn.Module): inputs_embeds: Optional[torch.Tensor] = None, spec_step_idx: int = 0, ) -> torch.Tensor: + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) current_step_idx = (spec_step_idx % self.num_mtp_layers) return self.layers[str(self.mtp_start_layer_idx + current_step_idx)]( input_ids, @@ -242,6 +240,12 @@ class DeepSeekMTP(nn.Module, SupportsPP): if name.endswith(".bias") and name not in params_dict: continue + # According to DeepSeek-V3 Technical Report, MTP modules + # shares embedding layer. We only load the first weights. + if (spec_layer != self.model.mtp_start_layer_idx + and ".layers" not in name): + continue + param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader) @@ -253,17 +257,25 @@ class DeepSeekMTP(nn.Module, SupportsPP): """ Rewrite the weight name to match the format of the original model. Add .mtp_block for modules in transformer layer block for spec layer + and rename shared layer weights to be top level. """ spec_layer_weight_names = [ "embed_tokens", "enorm", "hnorm", "eh_proj", "shared_head" ] + shared_weight_names = ["embed_tokens"] spec_layer_weight = False + shared_weight = False for weight_name in spec_layer_weight_names: if weight_name in name: spec_layer_weight = True + if weight_name in shared_weight_names: + shared_weight = True break if not spec_layer_weight: # treat rest weights as weights for transformer layer block name = name.replace(f"model.layers.{spec_layer}.", f"model.layers.{spec_layer}.mtp_block.") + elif shared_weight: + # treat shared weights as top level weights + name = name.replace(f"model.layers.{spec_layer}.", "model.") return name diff --git a/vllm/v1/spec_decode/eagle.py b/vllm/v1/spec_decode/eagle.py index 153b67fe57147..156f5764e8dc0 100644 --- a/vllm/v1/spec_decode/eagle.py +++ b/vllm/v1/spec_decode/eagle.py @@ -148,7 +148,7 @@ class EagleProposer: assert self.runner is not None # FIXME: need to consider multiple kv_cache_groups - attn_metadata = self.runner.attn_metadata_builder.build( + attn_metadata = self.runner.attn_metadata_builders[0].build( common_prefix_len=0, common_attn_metadata=common_attn_metadata, ) From e795d723ed8b03135978399c41a93437951176ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Lucchesi?= Date: Wed, 25 Jun 2025 19:54:14 +0200 Subject: [PATCH 130/211] [Frontend] Add `/v1/audio/translations` OpenAI API endpoint (#19615) Signed-off-by: Roger Wang Signed-off-by: NickLucche Co-authored-by: Roger Wang --- docs/serving/openai_compatible_server.md | 30 ++ .../openai_transcription_client.py | 39 +- .../openai_translation_client.py | 75 +++ .../openai/test_transcription_validation.py | 2 + .../openai/test_translation_validation.py | 172 ++++++ vllm/entrypoints/openai/api_server.py | 55 +- vllm/entrypoints/openai/protocol.py | 187 +++++++ vllm/entrypoints/openai/serving_engine.py | 8 +- .../openai/serving_transcription.py | 509 +++--------------- vllm/entrypoints/openai/speech_to_text.py | 503 +++++++++++++++++ 10 files changed, 1123 insertions(+), 457 deletions(-) create mode 100644 examples/online_serving/openai_translation_client.py create mode 100644 tests/entrypoints/openai/test_translation_validation.py create mode 100644 vllm/entrypoints/openai/speech_to_text.py diff --git a/docs/serving/openai_compatible_server.md b/docs/serving/openai_compatible_server.md index 7862778464dd3..00756e719992d 100644 --- a/docs/serving/openai_compatible_server.md +++ b/docs/serving/openai_compatible_server.md @@ -57,6 +57,8 @@ We currently support the following OpenAI APIs: - Only applicable to [embedding models](../models/pooling_models.md) (`--task embed`). - [Transcriptions API][transcriptions-api] (`/v1/audio/transcriptions`) - Only applicable to Automatic Speech Recognition (ASR) models (OpenAI Whisper) (`--task generate`). +- [Translation API][translations-api] (`/v1/audio/translations`) + - Only applicable to Automatic Speech Recognition (ASR) models (OpenAI Whisper) (`--task generate`). In addition, we have the following custom APIs: @@ -374,6 +376,34 @@ The following extra parameters are supported: ```python --8<-- "vllm/entrypoints/openai/protocol.py:transcription-extra-params" ``` + +[](){ #translations-api } + +### Translations API + +Our Translation API is compatible with [OpenAI's Translations API](https://platform.openai.com/docs/api-reference/audio/createTranslation); +you can use the [official OpenAI Python client](https://github.com/openai/openai-python) to interact with it. +Whisper models can translate audio from one of the 55 non-English supported languages into English. +Please mind that the popular `openai/whisper-large-v3-turbo` model does not support translating. + +!!! note + To use the Translation API, please install with extra audio dependencies using `pip install vllm[audio]`. + +Code example: + +#### Extra Parameters + +The following [sampling parameters][sampling-params] are supported. + +```python +--8<-- "vllm/entrypoints/openai/protocol.py:translation-sampling-params" +``` + +The following extra parameters are supported: + +```python +--8<-- "vllm/entrypoints/openai/protocol.py:translation-extra-params" +``` [](){ #tokenizer-api } diff --git a/examples/online_serving/openai_transcription_client.py b/examples/online_serving/openai_transcription_client.py index ae43cb5da7905..755038a761396 100644 --- a/examples/online_serving/openai_transcription_client.py +++ b/examples/online_serving/openai_transcription_client.py @@ -26,23 +26,12 @@ from openai import OpenAI from vllm.assets.audio import AudioAsset -mary_had_lamb = AudioAsset("mary_had_lamb").get_local_path() -winning_call = AudioAsset("winning_call").get_local_path() -# Modify OpenAI's API key and API base to use vLLM's API server. -openai_api_key = "EMPTY" -openai_api_base = "http://localhost:8000/v1" -client = OpenAI( - api_key=openai_api_key, - base_url=openai_api_base, -) - - -def sync_openai(): +def sync_openai(audio_path: str, client: OpenAI): """ Perform synchronous transcription using OpenAI-compatible API. """ - with open(str(mary_had_lamb), "rb") as f: + with open(audio_path, "rb") as f: transcription = client.audio.transcriptions.create( file=f, model="openai/whisper-large-v3", @@ -58,8 +47,7 @@ def sync_openai(): print("transcription result:", transcription.text) -# OpenAI Transcription API client does not support streaming. -async def stream_openai_response(): +async def stream_openai_response(audio_path: str, base_url: str, api_key: str): """ Perform streaming transcription using vLLM's raw HTTP streaming API. """ @@ -68,11 +56,12 @@ async def stream_openai_response(): "stream": True, "model": "openai/whisper-large-v3", } - url = openai_api_base + "/audio/transcriptions" - headers = {"Authorization": f"Bearer {openai_api_key}"} + url = base_url + "/audio/transcriptions" + headers = {"Authorization": f"Bearer {api_key}"} print("transcription result:", end=" ") + # OpenAI Transcription API client does not support streaming. async with httpx.AsyncClient() as client: - with open(str(winning_call), "rb") as f: + with open(audio_path, "rb") as f: async with client.stream( "POST", url, files={"file": f}, data=data, headers=headers ) as response: @@ -93,10 +82,20 @@ async def stream_openai_response(): def main(): - sync_openai() + mary_had_lamb = str(AudioAsset("mary_had_lamb").get_local_path()) + winning_call = str(AudioAsset("winning_call").get_local_path()) + # Modify OpenAI's API key and API base to use vLLM's API server. + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) + + sync_openai(mary_had_lamb, client) # Run the asynchronous function - asyncio.run(stream_openai_response()) + asyncio.run(stream_openai_response(winning_call, openai_api_base, openai_api_key)) if __name__ == "__main__": diff --git a/examples/online_serving/openai_translation_client.py b/examples/online_serving/openai_translation_client.py new file mode 100644 index 0000000000000..6f7253e2a7894 --- /dev/null +++ b/examples/online_serving/openai_translation_client.py @@ -0,0 +1,75 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import asyncio +import json + +import httpx +from openai import OpenAI + +from vllm.assets.audio import AudioAsset + + +def sync_openai(audio_path: str, client: OpenAI): + with open(audio_path, "rb") as f: + translation = client.audio.translations.create( + file=f, + model="openai/whisper-large-v3", + response_format="json", + temperature=0.0, + # Additional params not provided by OpenAI API. + extra_body=dict( + language="it", + seed=4419, + repetition_penalty=1.3, + ), + ) + print("translation result:", translation.text) + + +async def stream_openai_response(audio_path: str, base_url: str, api_key: str): + data = { + "language": "it", + "stream": True, + "model": "openai/whisper-large-v3", + } + url = base_url + "/audio/translations" + headers = {"Authorization": f"Bearer {api_key}"} + print("translation result:", end=" ") + # OpenAI translation API client does not support streaming. + async with httpx.AsyncClient() as client: + with open(audio_path, "rb") as f: + async with client.stream( + "POST", url, files={"file": f}, data=data, headers=headers + ) as response: + async for line in response.aiter_lines(): + # Each line is a JSON object prefixed with 'data: ' + if line: + if line.startswith("data: "): + line = line[len("data: ") :] + # Last chunk, stream ends + if line.strip() == "[DONE]": + break + # Parse the JSON response + chunk = json.loads(line) + # Extract and print the content + content = chunk["choices"][0].get("delta", {}).get("content") + print(content, end="") + + +def main(): + foscolo = str(AudioAsset("azacinto_foscolo").get_local_path()) + + # Modify OpenAI's API key and API base to use vLLM's API server. + openai_api_key = "EMPTY" + openai_api_base = "http://localhost:8000/v1" + client = OpenAI( + api_key=openai_api_key, + base_url=openai_api_base, + ) + sync_openai(foscolo, client) + # Run the asynchronous function + asyncio.run(stream_openai_response(foscolo, openai_api_base, openai_api_key)) + + +if __name__ == "__main__": + main() diff --git a/tests/entrypoints/openai/test_transcription_validation.py b/tests/entrypoints/openai/test_transcription_validation.py index 8117e774951ee..dab14f1d7d03f 100644 --- a/tests/entrypoints/openai/test_transcription_validation.py +++ b/tests/entrypoints/openai/test_transcription_validation.py @@ -82,6 +82,8 @@ async def test_long_audio_request(mary_had_lamb): mary_had_lamb.seek(0) audio, sr = librosa.load(mary_had_lamb) + # Add small silence after each audio for repeatability in the split process + audio = np.pad(audio, (0, 1600)) repeated_audio = np.tile(audio, 10) # Repeated audio to buffer buffer = io.BytesIO() diff --git a/tests/entrypoints/openai/test_translation_validation.py b/tests/entrypoints/openai/test_translation_validation.py new file mode 100644 index 0000000000000..0c2cb367f3304 --- /dev/null +++ b/tests/entrypoints/openai/test_translation_validation.py @@ -0,0 +1,172 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import io +# imports for guided decoding tests +import json +from unittest.mock import patch + +import librosa +import numpy as np +import pytest +import soundfile as sf +from openai._base_client import AsyncAPIClient + +from vllm.assets.audio import AudioAsset + +from ...utils import RemoteOpenAIServer + + +@pytest.fixture +def foscolo(): + # Test translation it->en + path = AudioAsset('azacinto_foscolo').get_local_path() + with open(str(path), "rb") as f: + yield f + + +# NOTE: (NickLucche) the large-v3-turbo model was not trained on translation! +@pytest.mark.asyncio +async def test_basic_audio(foscolo): + model_name = "openai/whisper-small" + server_args = ["--enforce-eager"] + with RemoteOpenAIServer(model_name, server_args) as remote_server: + client = remote_server.get_async_client() + translation = await client.audio.translations.create( + model=model_name, + file=foscolo, + response_format="text", + # TODO remove once language detection is implemented + extra_body=dict(language="it"), + temperature=0.0) + out = json.loads(translation)['text'].strip() + assert "Nor will I ever touch the sacred" in out + + +@pytest.mark.asyncio +async def test_audio_prompt(foscolo): + model_name = "openai/whisper-small" + server_args = ["--enforce-eager"] + # Condition whisper on starting text + prompt = "Nor have I ever" + with RemoteOpenAIServer(model_name, server_args) as remote_server: + client = remote_server.get_async_client() + transcription = await client.audio.translations.create( + model=model_name, + file=foscolo, + prompt=prompt, + extra_body=dict(language="it"), + response_format="text", + temperature=0.0) + out = json.loads(transcription)['text'] + assert "Nor will I ever touch the sacred" not in out + assert prompt not in out + + +@pytest.mark.asyncio +async def test_non_asr_model(foscolo): + # text to text model + model_name = "JackFram/llama-68m" + server_args = ["--enforce-eager"] + with RemoteOpenAIServer(model_name, server_args) as remote_server: + client = remote_server.get_async_client() + res = await client.audio.translations.create(model=model_name, + file=foscolo, + temperature=0.0) + assert res.code == 400 and not res.text + assert res.message == "The model does not support Translations API" + + +@pytest.mark.asyncio +async def test_streaming_response(foscolo): + model_name = "openai/whisper-small" + server_args = ["--enforce-eager"] + translation = "" + with RemoteOpenAIServer(model_name, server_args) as remote_server: + client = remote_server.get_async_client() + res_no_stream = await client.audio.translations.create( + model=model_name, + file=foscolo, + response_format="json", + extra_body=dict(language="it"), + temperature=0.0) + # Unfortunately this only works when the openai client is patched + # to use streaming mode, not exposed in the translation api. + original_post = AsyncAPIClient.post + + async def post_with_stream(*args, **kwargs): + kwargs['stream'] = True + return await original_post(*args, **kwargs) + + with patch.object(AsyncAPIClient, "post", new=post_with_stream): + client = remote_server.get_async_client() + res = await client.audio.translations.create(model=model_name, + file=foscolo, + temperature=0.0, + extra_body=dict( + stream=True, + language="it")) + # Reconstruct from chunks and validate + async for chunk in res: + # just a chunk + text = chunk.choices[0]['delta']['content'] + translation += text + + assert translation == res_no_stream.text + + +@pytest.mark.asyncio +async def test_stream_options(foscolo): + model_name = "openai/whisper-small" + server_args = ["--enforce-eager"] + with RemoteOpenAIServer(model_name, server_args) as remote_server: + original_post = AsyncAPIClient.post + + async def post_with_stream(*args, **kwargs): + kwargs['stream'] = True + return await original_post(*args, **kwargs) + + with patch.object(AsyncAPIClient, "post", new=post_with_stream): + client = remote_server.get_async_client() + res = await client.audio.translations.create( + model=model_name, + file=foscolo, + temperature=0.0, + extra_body=dict(language="it", + stream=True, + stream_include_usage=True, + stream_continuous_usage_stats=True)) + final = False + continuous = True + async for chunk in res: + if not len(chunk.choices): + # final usage sent + final = True + else: + continuous = continuous and hasattr(chunk, 'usage') + assert final and continuous + + +@pytest.mark.asyncio +async def test_long_audio_request(foscolo): + model_name = "openai/whisper-small" + server_args = ["--enforce-eager"] + + foscolo.seek(0) + audio, sr = librosa.load(foscolo) + repeated_audio = np.tile(audio, 2) + # Repeated audio to buffer + buffer = io.BytesIO() + sf.write(buffer, repeated_audio, sr, format='WAV') + buffer.seek(0) + with RemoteOpenAIServer(model_name, server_args) as remote_server: + client = remote_server.get_async_client() + translation = await client.audio.translations.create( + model=model_name, + file=buffer, + extra_body=dict(language="it"), + response_format="text", + temperature=0.0) + out = json.loads(translation)['text'].strip().lower() + # TODO investigate higher model uncertainty in for longer translations. + assert out.count("nor will i ever") == 2 diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index a23736470f661..681633a2aff77 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -73,6 +73,8 @@ from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, TokenizeResponse, TranscriptionRequest, TranscriptionResponse, + TranslationRequest, + TranslationResponse, UnloadLoRAAdapterRequest) # yapf: enable from vllm.entrypoints.openai.serving_chat import OpenAIServingChat @@ -88,7 +90,7 @@ from vllm.entrypoints.openai.serving_score import ServingScores from vllm.entrypoints.openai.serving_tokenization import ( OpenAIServingTokenization) from vllm.entrypoints.openai.serving_transcription import ( - OpenAIServingTranscription) + OpenAIServingTranscription, OpenAIServingTranslation) from vllm.entrypoints.openai.tool_parsers import ToolParserManager from vllm.entrypoints.utils import (cli_env_setup, load_aware_call, with_cancellation) @@ -401,6 +403,10 @@ def transcription(request: Request) -> OpenAIServingTranscription: return request.app.state.openai_serving_transcription +def translation(request: Request) -> OpenAIServingTranslation: + return request.app.state.openai_serving_translation + + def engine_client(request: Request) -> EngineClient: return request.app.state.engine_client @@ -774,6 +780,47 @@ async def create_transcriptions(raw_request: Request, return StreamingResponse(content=generator, media_type="text/event-stream") +@router.post("/v1/audio/translations", + responses={ + HTTPStatus.OK.value: { + "content": { + "text/event-stream": {} + } + }, + HTTPStatus.BAD_REQUEST.value: { + "model": ErrorResponse + }, + HTTPStatus.UNPROCESSABLE_ENTITY.value: { + "model": ErrorResponse + }, + HTTPStatus.INTERNAL_SERVER_ERROR.value: { + "model": ErrorResponse + }, + }) +@with_cancellation +@load_aware_call +async def create_translations(request: Annotated[TranslationRequest, + Form()], + raw_request: Request): + handler = translation(raw_request) + if handler is None: + return base(raw_request).create_error_response( + message="The model does not support Translations API") + + audio_data = await request.file.read() + generator = await handler.create_translation(audio_data, request, + raw_request) + + if isinstance(generator, ErrorResponse): + return JSONResponse(content=generator.model_dump(), + status_code=generator.code) + + elif isinstance(generator, TranslationResponse): + return JSONResponse(content=generator.model_dump()) + + return StreamingResponse(content=generator, media_type="text/event-stream") + + @router.post("/rerank", dependencies=[Depends(validate_json_request)], responses={ @@ -1248,6 +1295,12 @@ async def init_app_state( state.openai_serving_models, request_logger=request_logger, ) if model_config.runner_type == "transcription" else None + state.openai_serving_translation = OpenAIServingTranslation( + engine_client, + model_config, + state.openai_serving_models, + request_logger=request_logger, + ) if model_config.runner_type == "transcription" else None state.task = model_config.task state.enable_server_load_tracking = args.enable_server_load_tracking diff --git a/vllm/entrypoints/openai/protocol.py b/vllm/entrypoints/openai/protocol.py index b278d0d005867..3b5281962b2d9 100644 --- a/vllm/entrypoints/openai/protocol.py +++ b/vllm/entrypoints/openai/protocol.py @@ -1947,3 +1947,190 @@ class TranscriptionResponseVerbose(OpenAIBaseModel): words: Optional[list[TranscriptionWord]] = None """Extracted words and their corresponding timestamps.""" + + +class TranslationResponseStreamChoice(OpenAIBaseModel): + delta: DeltaMessage + finish_reason: Optional[str] = None + stop_reason: Optional[Union[int, str]] = None + + +class TranslationStreamResponse(OpenAIBaseModel): + id: str = Field(default_factory=lambda: f"trsl-{random_uuid()}") + object: Literal["translation.chunk"] = "translation.chunk" + created: int = Field(default_factory=lambda: int(time.time())) + model: str + choices: list[TranslationResponseStreamChoice] + usage: Optional[UsageInfo] = Field(default=None) + + +class TranslationRequest(OpenAIBaseModel): + # Ordered by official OpenAI API documentation + # https://platform.openai.com/docs/api-reference/audio/createTranslation + + file: UploadFile + """ + The audio file object (not file name) to translate, in one of these + formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + """ + + model: Optional[str] = None + """ID of the model to use. + """ + + prompt: str = Field(default="") + """An optional text to guide the model's style or continue a previous audio + segment. + + The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + should match the audio language. + """ + + response_format: AudioResponseFormat = Field(default="json") + """ + The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. + """ + + # TODO support additional sampling parameters + # --8<-- [start:translation-sampling-params] + temperature: float = Field(default=0.0) + """The sampling temperature, between 0 and 1. + + Higher values like 0.8 will make the output more random, while lower values + like 0.2 will make it more focused / deterministic. If set to 0, the model + will use [log probability](https://en.wikipedia.org/wiki/Log_probability) + to automatically increase the temperature until certain thresholds are hit. + """ + # --8<-- [end:translation-sampling-params] + + # --8<-- [start:translation-extra-params] + language: Optional[str] = None + """The language of the input audio we translate from. + + Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format + will improve accuracy. + """ + + stream: Optional[bool] = False + """Custom field not present in the original OpenAI definition. When set, + it will enable output to be streamed in a similar fashion as the Chat + Completion endpoint. + """ + # Flattened stream option to simplify form data. + stream_include_usage: Optional[bool] = False + stream_continuous_usage_stats: Optional[bool] = False + # --8<-- [end:translation-extra-params] + + # Default sampling parameters for translation requests. + _DEFAULT_SAMPLING_PARAMS: dict = { + "temperature": 0, + } + + def to_sampling_params( + self, + default_max_tokens: int, + default_sampling_params: Optional[dict] = None) -> SamplingParams: + # TODO(#9845): remove max_tokens when field is removed from OpenAI API + max_tokens = default_max_tokens + + if default_sampling_params is None: + default_sampling_params = {} + # Default parameters + if (temperature := self.temperature) is None: + temperature = default_sampling_params.get( + "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"]) + + return SamplingParams.from_optional(temperature=temperature, + max_tokens=max_tokens, + output_kind=RequestOutputKind.DELTA + if self.stream \ + else RequestOutputKind.FINAL_ONLY) + + @model_validator(mode="before") + @classmethod + def validate_stream_options(cls, data): + stream_opts = ["stream_include_usage", "stream_continuous_usage_stats"] + stream = data.get("stream", False) + if any(bool(data.get(so, False)) for so in stream_opts) and not stream: + raise ValueError( + "Stream options can only be defined when `stream=True`.") + + return data + + +# Translation response objects +class TranslationResponse(OpenAIBaseModel): + text: str + """The translated text.""" + + +class TranslationWord(OpenAIBaseModel): + end: float + """End time of the word in seconds.""" + + start: float + """Start time of the word in seconds.""" + + word: str + """The text content of the word.""" + + +class TranslationSegment(OpenAIBaseModel): + id: int + """Unique identifier of the segment.""" + + avg_logprob: float + """Average logprob of the segment. + + If the value is lower than -1, consider the logprobs failed. + """ + + compression_ratio: float + """Compression ratio of the segment. + + If the value is greater than 2.4, consider the compression failed. + """ + + end: float + """End time of the segment in seconds.""" + + no_speech_prob: float + """Probability of no speech in the segment. + + If the value is higher than 1.0 and the `avg_logprob` is below -1, consider + this segment silent. + """ + + seek: int + """Seek offset of the segment.""" + + start: float + """Start time of the segment in seconds.""" + + temperature: float + """Temperature parameter used for generating the segment.""" + + text: str + """Text content of the segment.""" + + tokens: list[int] + """Array of token IDs for the text content.""" + + +class TranslationResponseVerbose(OpenAIBaseModel): + duration: str + """The duration of the input audio.""" + + language: str + """The language of the input audio.""" + + text: str + """The translated text.""" + + segments: Optional[list[TranslationSegment]] = None + """Segments of the translated text and their corresponding details.""" + + words: Optional[list[TranslationWord]] = None + """Extracted words and their corresponding timestamps.""" diff --git a/vllm/entrypoints/openai/serving_engine.py b/vllm/entrypoints/openai/serving_engine.py index 4bf790bbb2984..cf2b738ba55e4 100644 --- a/vllm/entrypoints/openai/serving_engine.py +++ b/vllm/entrypoints/openai/serving_engine.py @@ -58,7 +58,8 @@ from vllm.entrypoints.openai.protocol import (ChatCompletionRequest, TokenizeCompletionRequest, TokenizeResponse, TranscriptionRequest, - TranscriptionResponse) + TranscriptionResponse, + TranslationRequest) from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.entrypoints.openai.tool_parsers import ToolParser # yapf: enable @@ -89,9 +90,8 @@ CompletionLikeRequest = Union[CompletionRequest, DetokenizeRequest, ChatLikeRequest = Union[ChatCompletionRequest, EmbeddingChatRequest, TokenizeChatRequest] - -AnyRequest = Union[CompletionLikeRequest, ChatLikeRequest, - TranscriptionRequest] +SpeechToTextRequest = Union[TranscriptionRequest, TranslationRequest] +AnyRequest = Union[CompletionLikeRequest, ChatLikeRequest, SpeechToTextRequest] AnyResponse = Union[ CompletionResponse, diff --git a/vllm/entrypoints/openai/serving_transcription.py b/vllm/entrypoints/openai/serving_transcription.py index 60d66434ea5ab..0d6989fe91bfa 100644 --- a/vllm/entrypoints/openai/serving_transcription.py +++ b/vllm/entrypoints/openai/serving_transcription.py @@ -1,155 +1,28 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -import asyncio -import io -import math -import time from collections.abc import AsyncGenerator -from math import ceil -from typing import Final, Optional, Union, cast +from typing import Optional, Union -import numpy as np from fastapi import Request from vllm.config import ModelConfig from vllm.engine.protocol import EngineClient from vllm.entrypoints.logger import RequestLogger from vllm.entrypoints.openai.protocol import ( - DeltaMessage, ErrorResponse, RequestResponseMetadata, TranscriptionRequest, + ErrorResponse, RequestResponseMetadata, TranscriptionRequest, TranscriptionResponse, TranscriptionResponseStreamChoice, - TranscriptionStreamResponse, UsageInfo) -from vllm.entrypoints.openai.serving_engine import OpenAIServing + TranscriptionStreamResponse, TranslationRequest, TranslationResponse, + TranslationResponseStreamChoice, TranslationStreamResponse) from vllm.entrypoints.openai.serving_models import OpenAIServingModels -from vllm.inputs.data import PromptType +from vllm.entrypoints.openai.speech_to_text import OpenAISpeechToText from vllm.logger import init_logger from vllm.outputs import RequestOutput -from vllm.transformers_utils.processor import cached_get_processor -from vllm.utils import PlaceholderModule - -try: - import librosa -except ImportError: - librosa = PlaceholderModule("librosa") # type: ignore[assignment] logger = init_logger(__name__) -# From https://platform.openai.com/docs/guides/speech-to-text/supported-languages#supported-languages -# TODO these configs should live somewhere with the model so we can support -# additional ones -ISO639_1_SUPPORTED_LANGS = { - "af": "Afrikaans", - "ar": "Arabic", - "hy": "Armenian", - "az": "Azerbaijani", - "be": "Belarusian", - "bs": "Bosnian", - "bg": "Bulgarian", - "ca": "Catalan", - "zh": "Chinese", - "hr": "Croatian", - "cs": "Czech", - "da": "Danish", - "nl": "Dutch", - "en": "English", - "et": "Estonian", - "fi": "Finnish", - "fr": "French", - "gl": "Galician", - "de": "German", - "el": "Greek", - "he": "Hebrew", - "hi": "Hindi", - "hu": "Hungarian", - "is": "Icelandic", - "id": "Indonesian", - "it": "Italian", - "ja": "Japanese", - "kn": "Kannada", - "kk": "Kazakh", - "ko": "Korean", - "lv": "Latvian", - "lt": "Lithuanian", - "mk": "Macedonian", - "ms": "Malay", - "mr": "Marathi", - "mi": "Maori", - "ne": "Nepali", - "no": "Norwegian", - "fa": "Persian", - "pl": "Polish", - "pt": "Portuguese", - "ro": "Romanian", - "ru": "Russian", - "sr": "Serbian", - "sk": "Slovak", - "sl": "Slovenian", - "es": "Spanish", - "sw": "Swahili", - "sv": "Swedish", - "tl": "Tagalog", - "ta": "Tamil", - "th": "Thai", - "tr": "Turkish", - "uk": "Ukrainian", - "ur": "Urdu", - "vi": "Vietnamese", - "cy": "Welsh" -} -ISO639_1_OTHER_LANGS = { - "lo": "Lao", - "jw": "Javanese", - "tk": "Turkmen", - "yi": "Yiddish", - "so": "Somali", - "bn": "Bengali", - "nn": "Norwegian Nynorsk", - "si": "Sinhala", - "yo": "Yoruba", - "sa": "Sanskrit", - "mi": "Māori", - "fo": "Faroese", # codespell:ignore - "mt": "Maltese", - "tg": "Tajik", - "mg": "Malagasy", - "haw": "Hawaiian", - "km": "Khmer", - "br": "Breton", - "ps": "Pashto", - "ln": "Lingala", - "la": "Latin", - "ml": "Malayalam", - "sq": "Albanian", - "su": "Sundanese", - "eu": "Basque", - "ka": "Georgian", - "uz": "Uzbek", - "sn": "Shona", - "ht": "Haitian", - "as": "Assamese", - "mn": "Mongolian", - "te": "Telugu", - "pa": "Panjabi", - "tt": "Tatar", - "gu": "Gujarati", - "oc": "Occitan", - "ha": "Hausa", - "ba": "Bashkir", - "my": "Burmese", - "sd": "Sindhi", - "am": "Amharic", - "lb": "Luxembourgish", - "bo": "Tibetan" -} - -# As per https://platform.openai.com/docs/guides/speech-to-text#overview. -# TODO configurable -MAX_AUDIO_CLIP_FILESIZE_MB = 25 -OVERLAP_CHUNK_SECOND = 1 -MIN_ENERGY_WINDOW_SIZE = 1600 # 1600 ~ 100ms for 16000 Hz audio - - -class OpenAIServingTranscription(OpenAIServing): +class OpenAIServingTranscription(OpenAISpeechToText): + """Handles transcription requests.""" def __init__( self, @@ -164,70 +37,9 @@ class OpenAIServingTranscription(OpenAIServing): model_config=model_config, models=models, request_logger=request_logger, - return_tokens_as_token_ids=return_tokens_as_token_ids) + return_tokens_as_token_ids=return_tokens_as_token_ids, + task_type="transcribe") - self.default_sampling_params = ( - self.model_config.get_diff_sampling_param()) - processor = cached_get_processor(model_config.model) - self.max_audio_clip_s = processor.feature_extractor.chunk_length - self.model_sr = processor.feature_extractor.sampling_rate - self.hop_length = processor.feature_extractor.hop_length - - if self.default_sampling_params: - logger.info( - "Overwriting default completion sampling param with: %s", - self.default_sampling_params) - - async def _preprocess_transcription( - self, - request: TranscriptionRequest, - audio_data: bytes, - ) -> tuple[list[PromptType], float]: - # Validate request - # TODO language should be optional and can be guessed. - # For now we default to en. See - # https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/generation_whisper.py#L1520 - lang_token = f"<|{request.language}|>" if request.language else "<|en|>" - if request.language: - if request.language in ISO639_1_SUPPORTED_LANGS: - pass - elif request.language in ISO639_1_OTHER_LANGS: - logger.warning( - "The selected language %s has limited accuracy with" - " reported WER>=0.5. Results may be less accurate " - "for this choice.", request.language) - else: - raise ValueError( - f"Unsupported language: {request.language}." - "Language should be one of:" + - f" {list(ISO639_1_SUPPORTED_LANGS.values())}" + - f"or {list(ISO639_1_OTHER_LANGS.values())}") - - if len(audio_data) / 1024**2 > MAX_AUDIO_CLIP_FILESIZE_MB: - raise ValueError("Maximum file size exceeded.") - - with io.BytesIO(audio_data) as bytes_: - y, sr = librosa.load(bytes_) - - duration = librosa.get_duration(y=y, sr=sr) - chunks = [y] if duration < 30 else self._split_audio(y, sr) - prompts = [] - for i, chunk in enumerate(chunks): - prompt = { - "encoder_prompt": { - "prompt": "", - "multi_modal_data": { - "audio": (chunk, sr), - }, - }, - "decoder_prompt": - f"<|startoftranscript|>{lang_token}<|transcribe|><|notimestamps|>{request.prompt}" - if i == 0 else "" - } - prompts.append(cast(PromptType, prompt)) - return prompts, duration - - # TODO (varun) : Make verbose response work ! async def create_transcription( self, audio_data: bytes, request: TranscriptionRequest, raw_request: Request @@ -238,250 +50,83 @@ class OpenAIServingTranscription(OpenAIServing): See https://platform.openai.com/docs/api-reference/audio/createTranscription for the API specification. This API mimics the OpenAI transcription API. """ - error_check_ret = await self._check_model(request) - if error_check_ret is not None: - return error_check_ret - - # If the engine is dead, raise the engine's DEAD_ERROR. - # This is required for the streaming case, where we return a - # success status before we actually start generating text :). - if self.engine_client.errored: - raise self.engine_client.dead_error - - if request.response_format not in ['text', 'json']: - return self.create_error_response( - "Currently only support response_format `text` or `json`") - - request_id = f"trsc-{self._base_request_id(raw_request)}" - - request_metadata = RequestResponseMetadata(request_id=request_id) - if raw_request: - raw_request.state.request_metadata = request_metadata - - try: - ( - lora_request, - prompt_adapter_request, - ) = self._maybe_get_adapters(request) - - if lora_request: - return self.create_error_response( - "Currently do not support LoRA for Transcription.") - if prompt_adapter_request: - return self.create_error_response( - "Currently do not support PromptAdapter for Transcription." - ) - - prompts, duration_s = await self._preprocess_transcription( - request=request, - audio_data=audio_data, - ) - - except ValueError as e: - logger.exception("Error in preprocessing prompt inputs") - return self.create_error_response(str(e)) - - list_result_generator: Optional[list[AsyncGenerator[RequestOutput, - None]]] = None - try: - # Unlike most decoder-only models, whisper generation length is not - # constrained by the size of the input audio, which is mapped to a - # fixed-size log-mel-spectogram. - default_max_tokens = self.model_config.max_model_len - sampling_params = request.to_sampling_params( - default_max_tokens, self.default_sampling_params) - - self._log_inputs( - request_id, - prompts[0]['decoder_prompt'], # type: ignore - params=sampling_params, - lora_request=None, - prompt_adapter_request=None) - - list_result_generator = [ - self.engine_client.generate( - prompt, - sampling_params, - request_id, - ) for prompt in prompts - ] - except ValueError as e: - # TODO: Use a vllm-specific Validation Error - return self.create_error_response(str(e)) - - if request.stream: - return self.transcription_stream_generator(request, - list_result_generator, - request_id, - request_metadata, - duration_s) - # Non-streaming response. - try: - assert list_result_generator is not None - text = "" - for result_generator in list_result_generator: - async for op in result_generator: - text += op.outputs[0].text - return TranscriptionResponse(text=text) - except asyncio.CancelledError: - return self.create_error_response("Client disconnected") - except ValueError as e: - # TODO: Use a vllm-specific Validation Error - return self.create_error_response(str(e)) + return await self._create_speech_to_text( + audio_data=audio_data, + request=request, + raw_request=raw_request, + response_class=TranscriptionResponse, + stream_generator_method=self.transcription_stream_generator, + ) async def transcription_stream_generator( self, request: TranscriptionRequest, - list_result_generator: list[AsyncGenerator[RequestOutput, None]], + result_generator: list[AsyncGenerator[RequestOutput, None]], request_id: str, request_metadata: RequestResponseMetadata, audio_duration_s: float) -> AsyncGenerator[str, None]: - created_time = int(time.time()) - model_name = request.model - chunk_object_type: Final = "transcription.chunk" + generator = self._speech_to_text_stream_generator( + request=request, + list_result_generator=result_generator, + request_id=request_id, + request_metadata=request_metadata, + audio_duration_s=audio_duration_s, + chunk_object_type="transcription.chunk", + response_stream_choice_class=TranscriptionResponseStreamChoice, + stream_response_class=TranscriptionStreamResponse, + ) + async for chunk in generator: + yield chunk - completion_tokens = 0 - num_prompt_tokens = 0 - include_usage = request.stream_include_usage \ - if request.stream_include_usage else False - include_continuous_usage = request.stream_continuous_usage_stats\ - if include_usage and request.stream_continuous_usage_stats\ - else False +class OpenAIServingTranslation(OpenAISpeechToText): + """Handles translation requests.""" - try: - for result_generator in list_result_generator: - async for res in result_generator: - # On first result. - if res.prompt_token_ids is not None: - # Do not account the 4-tokens `<|startoftranscript|>..` - # Could be negative when language token - # is not specified. - num_prompt_tokens = max( - len(res.prompt_token_ids) - 4, 0) - # NOTE(NickLucche) user can't pass encoder - # prompts directly at least not to Whisper. - # One indicator of the encoder amount of processing - # is the log-mel spectogram length. - num_prompt_tokens += ceil( - audio_duration_s * self.model_sr / self.hop_length) + def __init__( + self, + engine_client: EngineClient, + model_config: ModelConfig, + models: OpenAIServingModels, + *, + request_logger: Optional[RequestLogger], + return_tokens_as_token_ids: bool = False, + ): + super().__init__(engine_client=engine_client, + model_config=model_config, + models=models, + request_logger=request_logger, + return_tokens_as_token_ids=return_tokens_as_token_ids, + task_type="translate") - # We need to do it here, because if there are exceptions in - # the result_generator, it needs to be sent as the FIRST - # response (by the try...catch). + async def create_translation( + self, audio_data: bytes, request: TranslationRequest, + raw_request: Request + ) -> Union[TranslationResponse, AsyncGenerator[str, None], ErrorResponse]: + """Translation API similar to OpenAI's API. - # Just one output (n=1) supported. - assert len(res.outputs) == 1 - output = res.outputs[0] - - delta_message = DeltaMessage(content=output.text) - completion_tokens += len(output.token_ids) - - if output.finish_reason is None: - # Still generating, send delta update. - choice_data = TranscriptionResponseStreamChoice( - delta=delta_message) - else: - # Model is finished generating. - choice_data = TranscriptionResponseStreamChoice( - delta=delta_message, - finish_reason=output.finish_reason, - stop_reason=output.stop_reason) - - chunk = TranscriptionStreamResponse( - id=request_id, - object=chunk_object_type, - created=created_time, - choices=[choice_data], - model=model_name) - - # handle usage stats if requested & if continuous - if include_continuous_usage: - chunk.usage = UsageInfo( - prompt_tokens=num_prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=num_prompt_tokens + completion_tokens, - ) - - data = chunk.model_dump_json(exclude_unset=True) - yield f"data: {data}\n\n" - - # Once the final token is handled, if stream_options.include_usage - # is sent, send the usage. - if include_usage: - final_usage = UsageInfo(prompt_tokens=num_prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=num_prompt_tokens + - completion_tokens) - - final_usage_chunk = TranscriptionStreamResponse( - id=request_id, - object=chunk_object_type, - created=created_time, - choices=[], - model=model_name, - usage=final_usage) - final_usage_data = (final_usage_chunk.model_dump_json( - exclude_unset=True, exclude_none=True)) - yield f"data: {final_usage_data}\n\n" - - # report to FastAPI middleware aggregate usage across all choices - request_metadata.final_usage_info = UsageInfo( - prompt_tokens=num_prompt_tokens, - completion_tokens=completion_tokens, - total_tokens=num_prompt_tokens + completion_tokens) - - except Exception as e: - # TODO: Use a vllm-specific Validation Error - logger.exception("Error in chat completion stream generator.") - data = self.create_streaming_error_response(str(e)) - yield f"data: {data}\n\n" - # Send the final done message after all response.n are finished - yield "data: [DONE]\n\n" - - def _split_audio(self, audio_data: np.ndarray, - sample_rate: int) -> list[np.ndarray]: - chunk_size = sample_rate * self.max_audio_clip_s - overlap_size = sample_rate * OVERLAP_CHUNK_SECOND - chunks = [] - i = 0 - while i < audio_data.shape[-1]: - if i + chunk_size >= audio_data.shape[-1]: - # handle last chunk - chunks.append(audio_data[..., i:]) - break - - # Find the best split point in the overlap region - search_start = i + chunk_size - overlap_size - search_end = min(i + chunk_size, audio_data.shape[-1]) - split_point = self._find_split_point(audio_data, search_start, - search_end) - - # Extract chunk up to the split point - chunks.append(audio_data[..., i:split_point]) - i = split_point - return chunks - - def _find_split_point(self, wav: np.ndarray, start_idx: int, - end_idx: int) -> int: - """Find the best point to split audio by - looking for silence or low amplitude. - Args: - wav: Audio tensor [1, T] - start_idx: Start index of search region - end_idx: End index of search region - Returns: - Index of best splitting point + See https://platform.openai.com/docs/api-reference/audio/createTranslation + for the API specification. This API mimics the OpenAI translation API. """ - segment = wav[start_idx:end_idx] + return await self._create_speech_to_text( + audio_data=audio_data, + request=request, + raw_request=raw_request, + response_class=TranslationResponse, + stream_generator_method=self.translation_stream_generator, + ) - # Calculate RMS energy in small windows - min_energy = math.inf - quietest_idx = 0 - for i in range(0, - len(segment) - MIN_ENERGY_WINDOW_SIZE, - MIN_ENERGY_WINDOW_SIZE): - window = segment[i:i + MIN_ENERGY_WINDOW_SIZE] - energy = (window**2).mean()**0.5 - if energy < min_energy: - quietest_idx = i + start_idx - min_energy = energy - return quietest_idx + async def translation_stream_generator( + self, request: TranslationRequest, + result_generator: list[AsyncGenerator[RequestOutput, None]], + request_id: str, request_metadata: RequestResponseMetadata, + audio_duration_s: float) -> AsyncGenerator[str, None]: + generator = self._speech_to_text_stream_generator( + request=request, + list_result_generator=result_generator, + request_id=request_id, + request_metadata=request_metadata, + audio_duration_s=audio_duration_s, + chunk_object_type="translation.chunk", + response_stream_choice_class=TranslationResponseStreamChoice, + stream_response_class=TranslationStreamResponse, + ) + async for chunk in generator: + yield chunk diff --git a/vllm/entrypoints/openai/speech_to_text.py b/vllm/entrypoints/openai/speech_to_text.py new file mode 100644 index 0000000000000..b23cf6cab0979 --- /dev/null +++ b/vllm/entrypoints/openai/speech_to_text.py @@ -0,0 +1,503 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import asyncio +import io +import math +import time +from collections.abc import AsyncGenerator +from math import ceil +from typing import Callable, Literal, Optional, TypeVar, Union, cast + +import numpy as np +from fastapi import Request + +from vllm.config import ModelConfig +from vllm.engine.protocol import EngineClient +from vllm.entrypoints.logger import RequestLogger +from vllm.entrypoints.openai.protocol import ( + DeltaMessage, ErrorResponse, RequestResponseMetadata, + TranscriptionResponse, TranscriptionResponseStreamChoice, + TranscriptionStreamResponse, TranslationResponse, + TranslationResponseStreamChoice, TranslationStreamResponse, UsageInfo) +from vllm.entrypoints.openai.serving_engine import (OpenAIServing, + SpeechToTextRequest) +from vllm.entrypoints.openai.serving_models import OpenAIServingModels +from vllm.inputs.data import PromptType +from vllm.logger import init_logger +from vllm.outputs import RequestOutput +from vllm.transformers_utils.processor import cached_get_processor +from vllm.utils import PlaceholderModule + +try: + import librosa +except ImportError: + librosa = PlaceholderModule("librosa") # type: ignore[assignment] + +SpeechToTextResponse = Union[TranscriptionResponse, TranslationResponse] +T = TypeVar("T", bound=SpeechToTextResponse) + +logger = init_logger(__name__) + +# From https://platform.openai.com/docs/guides/speech-to-text/supported-languages +# TODO these configs should live somewhere with the model so we can support +# additional ones + +ISO639_1_SUPPORTED_LANGS = { + "af": "Afrikaans", + "ar": "Arabic", + "hy": "Armenian", + "az": "Azerbaijani", + "be": "Belarusian", + "bs": "Bosnian", + "bg": "Bulgarian", + "ca": "Catalan", + "zh": "Chinese", + "hr": "Croatian", + "cs": "Czech", + "da": "Danish", + "nl": "Dutch", + "en": "English", + "et": "Estonian", + "fi": "Finnish", + "fr": "French", + "gl": "Galician", + "de": "German", + "el": "Greek", + "he": "Hebrew", + "hi": "Hindi", + "hu": "Hungarian", + "is": "Icelandic", + "id": "Indonesian", + "it": "Italian", + "ja": "Japanese", + "kn": "Kannada", + "kk": "Kazakh", + "ko": "Korean", + "lv": "Latvian", + "lt": "Lithuanian", + "mk": "Macedonian", + "ms": "Malay", + "mr": "Marathi", + "mi": "Maori", + "ne": "Nepali", + "no": "Norwegian", + "fa": "Persian", + "pl": "Polish", + "pt": "Portuguese", + "ro": "Romanian", + "ru": "Russian", + "sr": "Serbian", + "sk": "Slovak", + "sl": "Slovenian", + "es": "Spanish", + "sw": "Swahili", + "sv": "Swedish", + "tl": "Tagalog", + "ta": "Tamil", + "th": "Thai", + "tr": "Turkish", + "uk": "Ukrainian", + "ur": "Urdu", + "vi": "Vietnamese", + "cy": "Welsh" +} +ISO639_1_OTHER_LANGS = { + "lo": "Lao", + "jw": "Javanese", + "tk": "Turkmen", + "yi": "Yiddish", + "so": "Somali", + "bn": "Bengali", + "nn": "Norwegian Nynorsk", + "si": "Sinhala", + "yo": "Yoruba", + "sa": "Sanskrit", + "mi": "Māori", + "fo": "Faroese", # codespell:ignore + "mt": "Maltese", + "tg": "Tajik", + "mg": "Malagasy", + "haw": "Hawaiian", + "km": "Khmer", + "br": "Breton", + "ps": "Pashto", + "ln": "Lingala", + "la": "Latin", + "ml": "Malayalam", + "sq": "Albanian", + "su": "Sundanese", + "eu": "Basque", + "ka": "Georgian", + "uz": "Uzbek", + "sn": "Shona", + "ht": "Haitian", + "as": "Assamese", + "mn": "Mongolian", + "te": "Telugu", + "pa": "Panjabi", + "tt": "Tatar", + "gu": "Gujarati", + "oc": "Occitan", + "ha": "Hausa", + "ba": "Bashkir", + "my": "Burmese", + "sd": "Sindhi", + "am": "Amharic", + "lb": "Luxembourgish", + "bo": "Tibetan" +} + +# As per https://platform.openai.com/docs/guides/speech-to-text#overview. +# TODO configurable +MAX_AUDIO_CLIP_FILESIZE_MB = 25 +OVERLAP_CHUNK_SECOND = 1 +MIN_ENERGY_WINDOW_SIZE = 1600 # 1600 ~ 100ms for 16000 Hz audio + + +class OpenAISpeechToText(OpenAIServing): + """Base class for speech-to-text operations like transcription and + translation.""" + + def __init__( + self, + engine_client: EngineClient, + model_config: ModelConfig, + models: OpenAIServingModels, + *, + request_logger: Optional[RequestLogger], + return_tokens_as_token_ids: bool = False, + task_type: Literal["transcribe", "translate"] = "transcribe", + ): + super().__init__(engine_client=engine_client, + model_config=model_config, + models=models, + request_logger=request_logger, + return_tokens_as_token_ids=return_tokens_as_token_ids) + + self.default_sampling_params = ( + self.model_config.get_diff_sampling_param()) + processor = cached_get_processor(model_config.model) + self.max_audio_clip_s = processor.feature_extractor.chunk_length + self.model_sr = processor.feature_extractor.sampling_rate + self.hop_length = processor.feature_extractor.hop_length + self.task_type = task_type + + if self.default_sampling_params: + logger.info( + "Overwriting default completion sampling param with: %s", + self.default_sampling_params) + + async def _preprocess_speech_to_text( + self, + request: SpeechToTextRequest, + audio_data: bytes, + ) -> tuple[list[PromptType], float]: + # Validate request + # TODO language should be optional and can be guessed. + # For now we default to en. See + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/generation_whisper.py#L1520 + lang_token = f"<|{request.language}|>" if request.language else "<|en|>" + if request.language: + if request.language in ISO639_1_SUPPORTED_LANGS: + pass + elif request.language in ISO639_1_OTHER_LANGS: + logger.warning( + "The selected language %s has limited accuracy with" + " reported WER>=0.5. Results may be less accurate " + "for this choice.", request.language) + else: + raise ValueError( + f"Unsupported language: {request.language}." + "Language should be one of:" + + f" {list(ISO639_1_SUPPORTED_LANGS.values())}" + + f"or {list(ISO639_1_OTHER_LANGS.values())}") + + if len(audio_data) / 1024**2 > MAX_AUDIO_CLIP_FILESIZE_MB: + raise ValueError("Maximum file size exceeded.") + + with io.BytesIO(audio_data) as bytes_: + # NOTE resample to model SR here for efficiency. This is also a + # pre-requisite for chunking, as it assumes Whisper SR. + y, sr = librosa.load(bytes_, sr=self.model_sr) + + duration = librosa.get_duration(y=y, sr=sr) + chunks = [y] if duration < 30 else self._split_audio(y, int(sr)) + prompts = [] + for chunk in chunks: + prompt = { + "encoder_prompt": { + "prompt": "", + "multi_modal_data": { + "audio": (chunk, sr), + }, + }, + "decoder_prompt": + (f"<|startoftranscript|>{lang_token}" + f"<|{self.task_type}|><|notimestamps|>{request.prompt}") + } + prompts.append(cast(PromptType, prompt)) + return prompts, duration + + async def _create_speech_to_text( + self, + audio_data: bytes, + request: SpeechToTextRequest, + raw_request: Request, + response_class: type[T], + stream_generator_method: Callable[..., AsyncGenerator[str, None]], + ) -> Union[T, AsyncGenerator[str, None], ErrorResponse]: + """Base method for speech-to-text operations like transcription and + translation.""" + error_check_ret = await self._check_model(request) + if error_check_ret is not None: + return error_check_ret + + # If the engine is dead, raise the engine's DEAD_ERROR. + # This is required for the streaming case, where we return a + # success status before we actually start generating text :). + if self.engine_client.errored: + raise self.engine_client.dead_error + + if request.response_format not in ['text', 'json']: + return self.create_error_response( + "Currently only support response_format `text` or `json`") + + request_id = f"{self.task_type}-{self._base_request_id(raw_request)}" + + request_metadata = RequestResponseMetadata(request_id=request_id) + if raw_request: + raw_request.state.request_metadata = request_metadata + + try: + ( + lora_request, + prompt_adapter_request, + ) = self._maybe_get_adapters(request) + + if lora_request: + return self.create_error_response( + "Currently do not support LoRA for " + f"{self.task_type.title()}.") + if prompt_adapter_request: + return self.create_error_response( + f"Currently do not support PromptAdapter for " + f"{self.task_type.title()}.") + + prompts, duration_s = await self._preprocess_speech_to_text( + request=request, + audio_data=audio_data, + ) + + except ValueError as e: + logger.exception("Error in preprocessing prompt inputs") + return self.create_error_response(str(e)) + + list_result_generator: Optional[list[AsyncGenerator[RequestOutput, + None]]] = None + try: + # Unlike most decoder-only models, whisper generation length is not + # constrained by the size of the input audio, which is mapped to a + # fixed-size log-mel-spectogram. + default_max_tokens = self.model_config.max_model_len + sampling_params = request.to_sampling_params( + default_max_tokens, self.default_sampling_params) + + self._log_inputs( + request_id, + prompts[0]['decoder_prompt'], # type: ignore + params=sampling_params, + lora_request=None, + prompt_adapter_request=None) + + list_result_generator = [ + self.engine_client.generate( + prompt, + sampling_params, + request_id, + ) for prompt in prompts + ] + except ValueError as e: + # TODO: Use a vllm-specific Validation Error + return self.create_error_response(str(e)) + + if request.stream: + return stream_generator_method(request, list_result_generator, + request_id, request_metadata, + duration_s) + # Non-streaming response. + try: + assert list_result_generator is not None + text = "" + for result_generator in list_result_generator: + async for op in result_generator: + text += op.outputs[0].text + return cast(T, response_class(text=text)) + except asyncio.CancelledError: + return self.create_error_response("Client disconnected") + except ValueError as e: + # TODO: Use a vllm-specific Validation Error + return self.create_error_response(str(e)) + + async def _speech_to_text_stream_generator( + self, + request: SpeechToTextRequest, + list_result_generator: list[AsyncGenerator[RequestOutput, None]], + request_id: str, + request_metadata: RequestResponseMetadata, + audio_duration_s: float, + chunk_object_type: Literal["translation.chunk", "transcription.chunk"], + response_stream_choice_class: Union[ + type[TranscriptionResponseStreamChoice], + type[TranslationResponseStreamChoice]], + stream_response_class: Union[type[TranscriptionStreamResponse], + type[TranslationStreamResponse]], + ) -> AsyncGenerator[str, None]: + created_time = int(time.time()) + model_name = request.model + + completion_tokens = 0 + num_prompt_tokens = 0 + + include_usage = request.stream_include_usage \ + if request.stream_include_usage else False + include_continuous_usage = request.stream_continuous_usage_stats\ + if include_usage and request.stream_continuous_usage_stats\ + else False + + try: + for result_generator in list_result_generator: + async for res in result_generator: + # On first result. + if res.prompt_token_ids is not None: + # Do not account the 4-tokens `<|startoftranscript|>..` + # Could be negative when language token + # is not specified. + num_prompt_tokens = max( + len(res.prompt_token_ids) - 4, 0) + # NOTE(NickLucche) user can't pass encoder + # prompts directly at least not to Whisper. + # One indicator of the encoder amount of processing + # is the log-mel spectogram length. + num_prompt_tokens += ceil( + audio_duration_s * self.model_sr / self.hop_length) + + # We need to do it here, because if there are exceptions in + # the result_generator, it needs to be sent as the FIRST + # response (by the try...catch). + + # Just one output (n=1) supported. + assert len(res.outputs) == 1 + output = res.outputs[0] + + delta_message = DeltaMessage(content=output.text) + completion_tokens += len(output.token_ids) + + if output.finish_reason is None: + # Still generating, send delta update. + choice_data = response_stream_choice_class( + delta=delta_message) + else: + # Model is finished generating. + choice_data = response_stream_choice_class( + delta=delta_message, + finish_reason=output.finish_reason, + stop_reason=output.stop_reason) + + chunk = stream_response_class(id=request_id, + object=chunk_object_type, + created=created_time, + choices=[choice_data], + model=model_name) + + # handle usage stats if requested & if continuous + if include_continuous_usage: + chunk.usage = UsageInfo( + prompt_tokens=num_prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=num_prompt_tokens + completion_tokens, + ) + + data = chunk.model_dump_json(exclude_unset=True) + yield f"data: {data}\n\n" + + # Once the final token is handled, if stream_options.include_usage + # is sent, send the usage. + if include_usage: + final_usage = UsageInfo(prompt_tokens=num_prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=num_prompt_tokens + + completion_tokens) + + final_usage_chunk = stream_response_class( + id=request_id, + object=chunk_object_type, + created=created_time, + choices=[], + model=model_name, + usage=final_usage) + final_usage_data = (final_usage_chunk.model_dump_json( + exclude_unset=True, exclude_none=True)) + yield f"data: {final_usage_data}\n\n" + + # report to FastAPI middleware aggregate usage across all choices + request_metadata.final_usage_info = UsageInfo( + prompt_tokens=num_prompt_tokens, + completion_tokens=completion_tokens, + total_tokens=num_prompt_tokens + completion_tokens) + + except Exception as e: + # TODO: Use a vllm-specific Validation Error + logger.exception("Error in %s stream generator.", self.task_type) + data = self.create_streaming_error_response(str(e)) + yield f"data: {data}\n\n" + # Send the final done message after all response.n are finished + yield "data: [DONE]\n\n" + + def _split_audio(self, audio_data: np.ndarray, + sample_rate: int) -> list[np.ndarray]: + chunk_size = sample_rate * self.max_audio_clip_s + overlap_size = sample_rate * OVERLAP_CHUNK_SECOND + chunks = [] + i = 0 + while i < audio_data.shape[-1]: + if i + chunk_size >= audio_data.shape[-1]: + # handle last chunk + chunks.append(audio_data[..., i:]) + break + + # Find the best split point in the overlap region + search_start = i + chunk_size - overlap_size + search_end = min(i + chunk_size, audio_data.shape[-1]) + split_point = self._find_split_point(audio_data, search_start, + search_end) + + # Extract chunk up to the split point + chunks.append(audio_data[..., i:split_point]) + i = split_point + return chunks + + def _find_split_point(self, wav: np.ndarray, start_idx: int, + end_idx: int) -> int: + """Find the best point to split audio by + looking for silence or low amplitude. + Args: + wav: Audio tensor [1, T] + start_idx: Start index of search region + end_idx: End index of search region + Returns: + Index of best splitting point + """ + segment = wav[start_idx:end_idx] + + # Calculate RMS energy in small windows + min_energy = math.inf + quietest_idx = 0 + for i in range(0, + len(segment) - MIN_ENERGY_WINDOW_SIZE, + MIN_ENERGY_WINDOW_SIZE): + window = segment[i:i + MIN_ENERGY_WINDOW_SIZE] + energy = (window**2).mean()**0.5 + if energy < min_energy: + quietest_idx = i + start_idx + min_energy = energy + return quietest_idx From 02c97d9a92edca6f8675923afe0f1cf657305e38 Mon Sep 17 00:00:00 2001 From: Dipika Sikka Date: Wed, 25 Jun 2025 14:28:19 -0400 Subject: [PATCH 131/211] [Quantization] Add compressed-tensors emulations support for NVFP4 (#19879) Signed-off-by: Dipika Sikka Signed-off-by: Dipika --- vllm/envs.py | 7 +++++++ .../compressed_tensors/compressed_tensors.py | 4 +++- .../schemes/compressed_tensors_w4a4_nvfp4.py | 16 ++++++++++++++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/vllm/envs.py b/vllm/envs.py index 04c80807cd4dc..43fc0f5a36e83 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -133,6 +133,7 @@ if TYPE_CHECKING: VLLM_EXECUTE_MODEL_TIMEOUT_SECONDS: int = 300 VLLM_KV_CACHE_LAYOUT: Optional[str] = None VLLM_COMPUTE_NANS_IN_LOGITS: bool = False + VLLM_USE_NVFP4_CT_EMULATIONS: bool = False def get_default_cache_root(): @@ -918,6 +919,12 @@ environment_variables: dict[str, Callable[[], Any]] = { # or bad hardware but it may add compute overhead. "VLLM_COMPUTE_NANS_IN_LOGITS": lambda: bool(int(os.getenv("VLLM_COMPUTE_NANS_IN_LOGITS", "0"))), + + # Controls whether or not emulations are used for NVFP4 + # generations on machines < 100 for compressed-tensors + # models + "VLLM_USE_NVFP4_CT_EMULATIONS": + lambda: bool(int(os.getenv("VLLM_USE_NVFP4_CT_EMULATIONS", "0"))) } # --8<-- [end:env-vars-definition] diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py index e5702c871cc9a..d21abb2741a29 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py @@ -13,6 +13,7 @@ from compressed_tensors.quantization import (QuantizationArgs, QuantizationType) from pydantic import BaseModel +import vllm.envs as envs from vllm.logger import init_logger from vllm.model_executor.layers.fused_moe import FusedMoE from vllm.model_executor.layers.linear import (LinearBase, LinearMethodBase, @@ -374,7 +375,8 @@ class CompressedTensorsConfig(QuantizationConfig): if is_activation_quantization_format(self.quant_format): if self._is_fp4a4_nvfp4(weight_quant, input_quant): - if CompressedTensorsW4A4Fp4.cutlass_fp4_supported(): + if CompressedTensorsW4A4Fp4.cutlass_fp4_supported( + ) or envs.VLLM_USE_NVFP4_CT_EMULATIONS: return CompressedTensorsW4A4Fp4() else: logger.warning_once( diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py index 32718972a627a..ec1d4a6c0efae 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py @@ -4,11 +4,14 @@ from typing import Callable, Optional import torch from torch.nn.parameter import Parameter +import vllm.envs as envs from vllm._custom_ops import (cutlass_scaled_fp4_mm, cutlass_scaled_mm_supports_fp4, scaled_fp4_quant) from vllm.logger import init_logger from vllm.model_executor.layers.quantization.compressed_tensors.schemes import ( CompressedTensorsScheme) +from vllm.model_executor.layers.quantization.utils.nvfp4_emulation_utils import ( # noqa: E501 + run_nvfp4_emulations) from vllm.model_executor.parameter import (GroupQuantScaleParameter, ModelWeightParameter, PerTensorScaleParameter) @@ -26,6 +29,8 @@ class CompressedTensorsW4A4Fp4(CompressedTensorsScheme): @classmethod def get_min_capability(cls) -> int: + if envs.VLLM_USE_NVFP4_CT_EMULATIONS: + return 80 return 100 @classmethod @@ -129,6 +134,17 @@ class CompressedTensorsW4A4Fp4(CompressedTensorsScheme): x: torch.Tensor, bias: Optional[torch.Tensor] = None) -> torch.Tensor: + if envs.VLLM_USE_NVFP4_CT_EMULATIONS: + out = run_nvfp4_emulations( + x=x, + input_global_scale=layer.input_global_scale, + weight=layer.weight, + weight_scale_swizzled=layer.weight_scale_swizzled, + weight_global_scale=layer.weight_global_scale) + if bias is not None: + out = out + bias + return out + output_dtype = x.dtype output_shape = [x.shape[0], layer.weight.shape[0]] From 23a04e08958e75b5528dae310c1c5774dd03551d Mon Sep 17 00:00:00 2001 From: lsz05 Date: Thu, 26 Jun 2025 04:07:45 +0900 Subject: [PATCH 132/211] [Fix] Support cls pooling in ModernBertPooler (#20067) Signed-off-by: shengzhe.li --- vllm/model_executor/models/modernbert.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/models/modernbert.py b/vllm/model_executor/models/modernbert.py index 7c1f889e8f385..9d619b38d38d6 100644 --- a/vllm/model_executor/models/modernbert.py +++ b/vllm/model_executor/models/modernbert.py @@ -258,6 +258,7 @@ class ModernBertPooler(nn.Module): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size, config.classifier_bias) + self.pooling_type = config.classifier_pooling self.act = nn.GELU() self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps, @@ -265,7 +266,13 @@ class ModernBertPooler(nn.Module): def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: pooled_output = hidden_states - pooled_output = pooled_output.mean(dim=0, keepdim=False) + if self.pooling_type == "mean": + pooled_output = pooled_output.mean(dim=0, keepdim=False) + elif self.pooling_type == "cls": + pooled_output = pooled_output[0, :] + else: + raise ValueError("Pooling type should be either `cls` or `mean`, " + f"but got {self.pooling_type}") pooled_output = self.norm(self.act(self.dense(pooled_output))) return pooled_output From 8b8c209e352899c870fe348013a99a91262bf1e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eldar=20Kurti=C4=87?= Date: Wed, 25 Jun 2025 21:08:03 +0200 Subject: [PATCH 133/211] static_scaled_fp8_quant should not run when scale.numel is not 1 (#20076) --- vllm/_custom_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index b16fef8714193..8ebe694eefd0e 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -1276,7 +1276,7 @@ def scaled_fp8_quant( torch.ops._C.dynamic_scaled_fp8_quant(output, input, scale) else: # num_token_padding not implemented for this case - assert (scale.numel() == 1 or num_token_padding is None) + assert (scale.numel() == 1 and num_token_padding is None) torch.ops._C.static_scaled_fp8_quant(output, input, scale) return output, scale From 4734704b30efff60feebf5f524ca9a3f5f8ac23a Mon Sep 17 00:00:00 2001 From: lkchen Date: Wed, 25 Jun 2025 12:17:45 -0700 Subject: [PATCH 134/211] [PD] let toy proxy handle /chat/completions (#19730) Signed-off-by: Linkun --- .../nixl_integration/toy_proxy_server.py | 22 +++++++++++++------ 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/tests/v1/kv_connector/nixl_integration/toy_proxy_server.py b/tests/v1/kv_connector/nixl_integration/toy_proxy_server.py index 3d720fe0cafee..c58cb0286f139 100644 --- a/tests/v1/kv_connector/nixl_integration/toy_proxy_server.py +++ b/tests/v1/kv_connector/nixl_integration/toy_proxy_server.py @@ -196,8 +196,7 @@ async def stream_service_response(client_info: dict, endpoint: str, yield chunk -@app.post("/v1/completions") -async def handle_completions(request: Request): +async def _handle_completions(api: str, request: Request): try: req_data = await request.json() request_id = str(uuid.uuid4()) @@ -206,9 +205,8 @@ async def handle_completions(request: Request): prefill_client_info = get_next_client(request.app, 'prefill') # Send request to prefill service - response = await send_request_to_service(prefill_client_info, - "/completions", req_data, - request_id) + response = await send_request_to_service(prefill_client_info, api, + req_data, request_id) # Extract the needed fields response_json = response.json() @@ -224,7 +222,7 @@ async def handle_completions(request: Request): # Stream response from decode service async def generate_stream(): async for chunk in stream_service_response(decode_client_info, - "/completions", + api, req_data, request_id=request_id): yield chunk @@ -237,12 +235,22 @@ async def handle_completions(request: Request): import traceback exc_info = sys.exc_info() print("Error occurred in disagg prefill proxy server" - " - completions endpoint") + f" - {api} endpoint") print(e) print("".join(traceback.format_exception(*exc_info))) raise +@app.post("/v1/completions") +async def handle_completions(request: Request): + return await _handle_completions("/completions", request) + + +@app.post("/v1/chat/completions") +async def handle_chat_completions(request: Request): + return await _handle_completions("/chat/completions", request) + + @app.get("/healthcheck") async def healthcheck(): """Simple endpoint to check if the server is running.""" From c40692bf9adc77cde0c802c6c586b7cf143c39c2 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Wed, 25 Jun 2025 13:38:53 -0700 Subject: [PATCH 135/211] [Misc] Add parallel state `node_count` function (#20045) Signed-off-by: Nick Hill --- .buildkite/test-pipeline.yaml | 2 + tests/distributed/test_node_count.py | 43 ++++++++++++++++++++++ vllm/distributed/parallel_state.py | 55 +++++++++++++++++++++++++++- 3 files changed, 98 insertions(+), 2 deletions(-) create mode 100644 tests/distributed/test_node_count.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index d6c9ee680abf8..1536759c06bd2 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -619,11 +619,13 @@ steps: commands: - # the following commands are for the first node, with ip 192.168.10.10 (ray environment already set up) - VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed' + - NUM_NODES=2 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_node_count.py | grep 'Node count test passed' - python3 ../examples/offline_inference/data_parallel.py --dp-size=2 --tp-size=1 --node-size=2 --node-rank=0 --master-addr=192.168.10.10 --master-port=12345 --enforce-eager --trust-remote-code - VLLM_MULTI_NODE=1 pytest -v -s distributed/test_multi_node_assignment.py - VLLM_MULTI_NODE=1 pytest -v -s distributed/test_pipeline_parallel.py - # the following commands are for the second node, with ip 192.168.10.11 (ray environment already set up) - VLLM_TEST_SAME_HOST=0 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_same_node.py | grep 'Same node test passed' + - NUM_NODES=2 torchrun --nnodes 2 --nproc-per-node=2 --rdzv_backend=c10d --rdzv_endpoint=192.168.10.10 distributed/test_node_count.py | grep 'Node count test passed' - python3 ../examples/offline_inference/data_parallel.py --dp-size=2 --tp-size=1 --node-size=2 --node-rank=1 --master-addr=192.168.10.10 --master-port=12345 --enforce-eager --trust-remote-code - label: Distributed Tests (2 GPUs) # 40min diff --git a/tests/distributed/test_node_count.py b/tests/distributed/test_node_count.py new file mode 100644 index 0000000000000..e3c36ef5ef379 --- /dev/null +++ b/tests/distributed/test_node_count.py @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import os + +import torch.distributed as dist + +from vllm.distributed.parallel_state import _node_count +from vllm.distributed.utils import StatelessProcessGroup +from vllm.utils import get_ip, get_open_port + +if __name__ == "__main__": + dist.init_process_group(backend="gloo") + + rank = dist.get_rank() + world_size = dist.get_world_size() + + if rank == 0: + port = get_open_port() + ip = get_ip() + dist.broadcast_object_list([ip, port], src=0) + else: + recv = [None, None] + dist.broadcast_object_list(recv, src=0) + ip, port = recv + + stateless_pg = StatelessProcessGroup.create(ip, port, rank, world_size) + + for pg in [dist.group.WORLD, stateless_pg]: + test_result = _node_count(pg) + + # Expected node count based on environment variable) + expected = int(os.environ.get("NUM_NODES", "1")) + + assert test_result == expected, \ + f"Expected {expected} nodes, got {test_result}" + + if pg == dist.group.WORLD: + print(f"Node count test passed! Got {test_result} nodes " + f"when using torch distributed!") + else: + print(f"Node count test passed! Got {test_result} nodes " + f"when using StatelessProcessGroup!") diff --git a/vllm/distributed/parallel_state.py b/vllm/distributed/parallel_state.py index 126160b09553d..50dbbf50e9fcf 100644 --- a/vllm/distributed/parallel_state.py +++ b/vllm/distributed/parallel_state.py @@ -802,6 +802,7 @@ class GroupCoordinator: _WORLD: Optional[GroupCoordinator] = None +_NODE_COUNT: Optional[int] = None def get_world_group() -> GroupCoordinator: @@ -961,10 +962,13 @@ def init_distributed_environment( local_rank = envs.LOCAL_RANK else: local_rank = rank - global _WORLD + global _WORLD, _NODE_COUNT if _WORLD is None: ranks = list(range(torch.distributed.get_world_size())) _WORLD = init_world_group(ranks, local_rank, backend) + _NODE_COUNT = _node_count(_WORLD.cpu_group) + logger.debug("Detected %d nodes in the distributed environment", + _NODE_COUNT) else: assert _WORLD.world_size == torch.distributed.get_world_size(), ( "world group already initialized with a different world size") @@ -1164,6 +1168,13 @@ def get_tensor_model_parallel_rank(): return get_tp_group().rank_in_group +def get_node_count() -> int: + """Return the total number of nodes in the distributed environment. """ + assert _NODE_COUNT is not None, ( + "distributed environment is not initialized") + return _NODE_COUNT + + def destroy_model_parallel(): """Set the groups to none and destroy them.""" global _TP @@ -1189,10 +1200,11 @@ def destroy_model_parallel(): def destroy_distributed_environment(): - global _WORLD + global _WORLD, _NODE_COUNT if _WORLD: _WORLD.destroy() _WORLD = None + _NODE_COUNT = None if torch.distributed.is_initialized(): torch.distributed.destroy_process_group() @@ -1301,3 +1313,42 @@ def in_the_same_node_as(pg: Union[ProcessGroup, StatelessProcessGroup], aggregated_data += rank_data return [x == 1 for x in aggregated_data.tolist()] + + +def _node_count(pg: Union[ProcessGroup, StatelessProcessGroup]) -> int: + """ + Returns the total number of nodes in the process group. + + Args: + pg: The process group to analyze + + Returns: + int: The total number of nodes + """ + if isinstance(pg, ProcessGroup): + world_size = torch.distributed.get_world_size(group=pg) + else: + world_size = pg.world_size + + if world_size == 1: + return 1 + + # Build node assignment map + node_assignment = [0] * world_size # rank -> node_id + next_node_id = 0 + + for current_rank in range(world_size): + if node_assignment[current_rank] != 0: + continue # Already assigned to a node + + # Assign current rank to a new node + next_node_id += 1 + node_assignment[current_rank] = next_node_id + + # Find all ranks on the same node as current_rank + same_node_flags = in_the_same_node_as(pg, current_rank) + for other_rank, is_same_node in enumerate(same_node_flags): + if is_same_node and node_assignment[other_rank] == 0: + node_assignment[other_rank] = next_node_id + + return next_node_id From 4e0db57fff89cc968794650c9b9caf4ccc51b399 Mon Sep 17 00:00:00 2001 From: QiliangCui Date: Wed, 25 Jun 2025 13:48:17 -0700 Subject: [PATCH 136/211] Fix the path to the testing script. (#20082) Signed-off-by: Qiliang Cui --- .buildkite/scripts/tpu/docker_run_bm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/scripts/tpu/docker_run_bm.sh b/.buildkite/scripts/tpu/docker_run_bm.sh index 6705da03e3d76..715afce5f71ab 100755 --- a/.buildkite/scripts/tpu/docker_run_bm.sh +++ b/.buildkite/scripts/tpu/docker_run_bm.sh @@ -68,7 +68,7 @@ docker run \ echo "run script..." echo -docker exec "$CONTAINER_NAME" /bin/bash -c ".buildkite/scripts/hardware_ci/run_bm.sh" +docker exec "$CONTAINER_NAME" /bin/bash -c ".buildkite/scripts/tpu/run_bm.sh" echo "copy result back..." VLLM_LOG="$LOG_ROOT/$TEST_NAME"_vllm_log.txt From 9f0608fc166ba0173dac4a470753464b969c7043 Mon Sep 17 00:00:00 2001 From: zhrrr <43847754+izhuhaoran@users.noreply.github.com> Date: Thu, 26 Jun 2025 05:03:17 +0800 Subject: [PATCH 137/211] [Bugfix] default set cuda_graph_sizes to max_num_seqs for v1 engine (#20062) Signed-off-by: izhuhaoran --- vllm/config.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 96ea47a0dce38..e90ad5e9c8b65 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2042,11 +2042,12 @@ class SchedulerConfig: NOTE: This will be replaced by speculative config in the future; it is present to enable correctness tests until then.""" - cuda_graph_sizes: list[int] = field(default_factory=lambda: [512]) - """Cuda graph capture sizes, default is 512. - 1. if one value is provided, then the capture list would follow the + cuda_graph_sizes: list[int] = field(default_factory=list) + """Cuda graph capture sizes + 1. if none provided, then default set to [max_num_seqs] + 2. if one value is provided, then the capture list would follow the pattern: [1, 2, 4] + [i for i in range(8, cuda_graph_sizes + 1, 8)] - 2. more than one value (e.g. 1 2 128) is provided, then the capture list + 3. more than one value (e.g. 1 2 128) is provided, then the capture list will follow the provided list.""" delay_factor: float = 0.0 @@ -2211,6 +2212,10 @@ class SchedulerConfig: self.max_num_partial_prefills, self.max_long_partial_prefills, self.long_prefill_token_threshold) + # If cuda_graph_sizes is not specified, default set to [max_num_seqs]. + if not self.cuda_graph_sizes: + self.cuda_graph_sizes = [self.max_num_seqs] + @model_validator(mode='after') def _verify_args(self) -> Self: if (self.max_num_batched_tokens < self.max_model_len From 2cc206997012057152f194c0f25e19e3ab3297ea Mon Sep 17 00:00:00 2001 From: Chengji Yao Date: Wed, 25 Jun 2025 14:24:10 -0700 Subject: [PATCH 138/211] [TPU][Bugfix] fix kv cache padding (#20048) Signed-off-by: Chengji Yao --- vllm/v1/attention/backends/pallas.py | 8 +------- vllm/v1/worker/tpu_worker.py | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/vllm/v1/attention/backends/pallas.py b/vllm/v1/attention/backends/pallas.py index 1069578cfd292..e0aeea439794a 100644 --- a/vllm/v1/attention/backends/pallas.py +++ b/vllm/v1/attention/backends/pallas.py @@ -48,13 +48,7 @@ class PallasAttentionBackend(AttentionBackend): ) -> tuple[int, ...]: padded_head_size = cdiv( head_size, TPU_HEAD_SIZE_ALIGNMENT) * TPU_HEAD_SIZE_ALIGNMENT - num_blocks = num_blocks * head_size // padded_head_size - if padded_head_size != head_size: - logger.warning_once( - "head size is padded to %d, and num_blocks is adjusted to %d" - " accordingly", padded_head_size, num_blocks) - head_size = padded_head_size - return (num_blocks, block_size, num_kv_heads * 2, head_size) + return (num_blocks, block_size, num_kv_heads * 2, padded_head_size) @staticmethod def swap_blocks( diff --git a/vllm/v1/worker/tpu_worker.py b/vllm/v1/worker/tpu_worker.py index 87af8e476707c..a64ce881fe318 100644 --- a/vllm/v1/worker/tpu_worker.py +++ b/vllm/v1/worker/tpu_worker.py @@ -18,7 +18,8 @@ from vllm.distributed import (ensure_model_parallel_initialized, from vllm.logger import init_logger from vllm.lora.request import LoRARequest from vllm.model_executor import set_random_seed -from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE +from vllm.utils import STR_DTYPE_TO_TORCH_DTYPE, cdiv +from vllm.v1.attention.backends.pallas import TPU_HEAD_SIZE_ALIGNMENT from vllm.v1.core.sched.output import SchedulerOutput from vllm.v1.kv_cache_interface import (AttentionSpec, KVCacheConfig, KVCacheSpec) @@ -221,7 +222,17 @@ class TPUWorker: usable_memory_size = int(total_memory_size * self.cache_config.gpu_memory_utilization) tpu_kv_cache_bytes = max(usable_memory_size - profiled, 0) - + head_size = self.model_config.get_head_size() + if head_size > 0: + padded_head_size = cdiv( + head_size, TPU_HEAD_SIZE_ALIGNMENT) * TPU_HEAD_SIZE_ALIGNMENT + if padded_head_size != head_size: + logger.warning_once("head size is padded to %d", + padded_head_size) + # We adjust the usable memory size for the KV cache to prevent OOM + # errors, even after padding the head_size. + tpu_kv_cache_bytes = (tpu_kv_cache_bytes * head_size // + padded_head_size) return int(tpu_kv_cache_bytes) def execute_model( From 55c65ab495f5d270f65f89dcc737e9694b278002 Mon Sep 17 00:00:00 2001 From: Nick Hill Date: Wed, 25 Jun 2025 15:19:44 -0700 Subject: [PATCH 139/211] [P/D] Avoid stranding blocks in P when aborted in D's waiting queue (#19223) Signed-off-by: Nick Hill --- .../kv_transfer/kv_connector/v1/nixl_connector.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index a962a9241d73e..92a9184d318c7 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -298,8 +298,21 @@ class NixlConnectorScheduler: logger.debug( "NIXLConnector request_finished, request_status=%s, " "kv_transfer_params=%s", request.status, params) + if not params: + return False, None - if (params is None or not params.get("do_remote_decode") + if params.get("do_remote_prefill"): + # If do_remote_prefill is still True when the request is finished, + # update_state_after_alloc must not have been called (the request + # must have been aborted before it was scheduled). + # To avoid stranding the prefill blocks in the prefill instance, + # we must add empty block_ids to _reqs_need_recv so that our + # worker side will notify and free blocks in the prefill instance. + self._reqs_need_recv[request.request_id] = (request, []) + params["do_remote_prefill"] = False + return False, None + + if (not params.get("do_remote_decode") or request.status != RequestStatus.FINISHED_LENGTH_CAPPED): return False, None From 2d7620c3ebb3a3e0e600dd2781d7e5dfbd1c2382 Mon Sep 17 00:00:00 2001 From: Chenyaaang <42742451+Chenyaaang@users.noreply.github.com> Date: Wed, 25 Jun 2025 15:51:02 -0700 Subject: [PATCH 140/211] [TPU] Add TPU specific var VLLM_TPU_MOST_MODEL_LEN (#19919) Signed-off-by: Chenyaaang --- tests/v1/tpu/worker/test_tpu_model_runner.py | 14 ++ vllm/envs.py | 3 + vllm/platforms/tpu.py | 10 - vllm/v1/attention/backends/pallas.py | 5 + vllm/v1/worker/tpu_model_runner.py | 228 +++++++++++++------ 5 files changed, 184 insertions(+), 76 deletions(-) diff --git a/tests/v1/tpu/worker/test_tpu_model_runner.py b/tests/v1/tpu/worker/test_tpu_model_runner.py index d22ddf5c7e581..25839d0897a4c 100644 --- a/tests/v1/tpu/worker/test_tpu_model_runner.py +++ b/tests/v1/tpu/worker/test_tpu_model_runner.py @@ -587,3 +587,17 @@ def test_init_kv_cache_with_kv_sharing_valid(): assert len(kv_cache_config.kv_cache_groups[0].layer_names) == 2 assert kv_cache_config.kv_cache_groups[0].layer_names[0] == layer_0 assert kv_cache_config.kv_cache_groups[0].layer_names[1] == layer_1 + + +def test_most_model_len(monkeypatch: pytest.MonkeyPatch): + monkeypatch.setenv("VLLM_TPU_MOST_MODEL_LEN", "2048") + vllm_config = get_vllm_config() + vllm_config.model_config.max_model_len = 32000 + vllm_config.scheduler_config.max_num_seqs = 1200 + model_runner = get_model_runner(vllm_config) + + # verify model runner will adjust num_reqs to avoid SMEM OOM. + assert model_runner.num_reqs_most_model_len == 1200 + # num_page_per_req = 32k // 128 + # num_reqs = 1024 ** 2 // 2 // num_page_per_req // 4 = 524 + assert model_runner.num_reqs_max_model_len == 524 diff --git a/vllm/envs.py b/vllm/envs.py index 43fc0f5a36e83..c9c81603a75a8 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -119,6 +119,7 @@ if TYPE_CHECKING: VLLM_MARLIN_USE_ATOMIC_ADD: bool = False VLLM_V0_USE_OUTLINES_CACHE: bool = False VLLM_TPU_BUCKET_PADDING_GAP: int = 0 + VLLM_TPU_MOST_MODEL_LEN: Optional[int] = None VLLM_USE_DEEP_GEMM: bool = False VLLM_XGRAMMAR_CACHE_MB: int = 0 VLLM_MSGPACK_ZERO_COPY_THRESHOLD: int = 256 @@ -833,6 +834,8 @@ environment_variables: dict[str, Callable[[], Any]] = { "VLLM_TPU_BUCKET_PADDING_GAP": lambda: int(os.environ["VLLM_TPU_BUCKET_PADDING_GAP"]) if "VLLM_TPU_BUCKET_PADDING_GAP" in os.environ else 0, + "VLLM_TPU_MOST_MODEL_LEN": + lambda: maybe_convert_int(os.environ.get("VLLM_TPU_MOST_MODEL_LEN", None)), # Allow use of DeepGemm kernels for fused moe ops. "VLLM_USE_DEEP_GEMM": diff --git a/vllm/platforms/tpu.py b/vllm/platforms/tpu.py index 07e52017f5a53..0387e348965d7 100644 --- a/vllm/platforms/tpu.py +++ b/vllm/platforms/tpu.py @@ -122,16 +122,6 @@ class TpuPlatform(Platform): PallasAttentionBackend) cache_config.block_size = PallasAttentionBackend.get_page_size( vllm_config) # type: ignore[assignment] - min_page_size = PallasAttentionBackend.get_min_page_size( - vllm_config) - if min_page_size > cache_config.block_size: - logger.warning( - "Increase the page size from %s to %s to make sure there's" - "no SMEM OOM", - cache_config.block_size, - min_page_size, - ) - cache_config.block_size = min_page_size # type: ignore[assignment] parallel_config = vllm_config.parallel_config scheduler_config = vllm_config.scheduler_config diff --git a/vllm/v1/attention/backends/pallas.py b/vllm/v1/attention/backends/pallas.py index e0aeea439794a..ff2862edaa01b 100644 --- a/vllm/v1/attention/backends/pallas.py +++ b/vllm/v1/attention/backends/pallas.py @@ -71,6 +71,11 @@ class PallasAttentionBackend(AttentionBackend): min_page_size = 1 << (min_page_size - 1).bit_length() return min_page_size + @staticmethod + def get_max_num_seqs(model_len: int, page_size: int) -> int: + num_page_per_req = cdiv(model_len, page_size) + return 1024 * 1024 // 2 // num_page_per_req // 4 + # TPU has limited SREGs (scalar registers), if page_size is too small, we # can spill SREGs easily which leads to bad performance. The strategy we # apply here is trying to split max-model-len to 16 pages which make the diff --git a/vllm/v1/worker/tpu_model_runner.py b/vllm/v1/worker/tpu_model_runner.py index 774caa1a3d98f..2d80bac3c9546 100644 --- a/vllm/v1/worker/tpu_model_runner.py +++ b/vllm/v1/worker/tpu_model_runner.py @@ -37,8 +37,8 @@ from vllm.v1.core.encoder_cache_manager import compute_encoder_budget from vllm.v1.kv_cache_interface import (AttentionSpec, FullAttentionSpec, KVCacheConfig, KVCacheSpec, SlidingWindowSpec) -from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsTensors, - ModelRunnerOutput) +from vllm.v1.outputs import (EMPTY_MODEL_RUNNER_OUTPUT, LogprobsLists, + LogprobsTensors, ModelRunnerOutput) from vllm.v1.sample.tpu.metadata import TPUSupportedSamplingMetadata from vllm.v1.sample.tpu.sampler import Sampler as TPUSampler from vllm.v1.utils import bind_kv_cache @@ -150,7 +150,11 @@ class TPUModelRunner(LoRAModelRunnerMixin): self.sliding_window = model_config.get_sliding_window() self.block_size = cache_config.block_size self.max_model_len = model_config.max_model_len + self.most_model_len = envs.VLLM_TPU_MOST_MODEL_LEN self.max_num_blocks_per_req = cdiv(self.max_model_len, self.block_size) + self.num_blocks_per_most_len_req = cdiv( + self.most_model_len, + self.block_size) if self.most_model_len is not None else None # InputBatch needs to work with sampling tensors greater than padding # to avoid dynamic shapes. Also, avoid suboptimal alignment. self.max_num_reqs = max(scheduler_config.max_num_seqs, MIN_NUM_SEQS) @@ -220,12 +224,19 @@ class TPUModelRunner(LoRAModelRunnerMixin): dtype=torch.int32, device="cpu") self.positions_np = self.positions_cpu.numpy() - self.block_table_cpu = torch.zeros( (self.max_num_reqs, self.max_num_blocks_per_req), dtype=torch.int32, device="cpu") - + # adjust num_reqs to avoid SMEM OOM. + self.num_reqs_most_model_len = min( + PallasAttentionBackend.get_max_num_seqs(self.most_model_len, + self.block_size), + self.max_num_reqs) if self.most_model_len is not None else None + self.num_reqs_max_model_len = min( + PallasAttentionBackend.get_max_num_seqs(self.max_model_len, + self.block_size), + self.max_num_reqs) self.query_start_loc_cpu = torch.zeros(self.max_num_tokens + 1, dtype=torch.int32, device="cpu", @@ -515,25 +526,50 @@ class TPUModelRunner(LoRAModelRunnerMixin): return kv_cache_spec - def _prepare_inputs(self, scheduler_output: "SchedulerOutput"): - total_num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens - assert total_num_scheduled_tokens > 0 + def _prepare_inputs(self, scheduler_output: "SchedulerOutput", + start_index: int): + assert scheduler_output.total_num_scheduled_tokens > 0 num_reqs = self.input_batch.num_reqs assert num_reqs > 0 + assert start_index < num_reqs # Get the number of scheduled tokens for each request. + use_max_model_len = self.most_model_len is None num_scheduled_tokens_per_req = [] max_num_scheduled_tokens_all_reqs = 0 - for req_id in self.input_batch.req_ids[:num_reqs]: + end_index = start_index + + # Use either most_model_len or max_model_len depending on request size. + for i in range(start_index, num_reqs): + req_id = self.input_batch.req_ids[i] assert req_id is not None num_tokens = scheduler_output.num_scheduled_tokens[req_id] + if not use_max_model_len and num_tokens > self.most_model_len: + use_max_model_len = True num_scheduled_tokens_per_req.append(num_tokens) - max_num_scheduled_tokens_all_reqs = max( - max_num_scheduled_tokens_all_reqs, num_tokens) + if use_max_model_len: + if len(num_scheduled_tokens_per_req) > self.num_reqs_max_model_len: + num_scheduled_tokens_per_req = \ + num_scheduled_tokens_per_req[:self.num_reqs_max_model_len] + end_index = start_index + self.num_reqs_max_model_len + else: + end_index = num_reqs + else: + if len(num_scheduled_tokens_per_req + ) > self.num_reqs_most_model_len: + num_scheduled_tokens_per_req = \ + num_scheduled_tokens_per_req[:self.num_reqs_most_model_len] + end_index = start_index + self.num_reqs_most_model_len + else: + end_index = num_reqs + max_num_scheduled_tokens_all_reqs = max(num_scheduled_tokens_per_req) num_scheduled_tokens_per_req = np.array(num_scheduled_tokens_per_req, dtype=np.int32) + total_num_scheduled_tokens = sum(num_scheduled_tokens_per_req) assert max_num_scheduled_tokens_all_reqs > 0 + num_reqs = len(num_scheduled_tokens_per_req) + # Get request indices. # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2] # For each scheduled token, what are the corresponding req index. @@ -615,13 +651,29 @@ class TPUModelRunner(LoRAModelRunnerMixin): self.input_batch.block_table[0]. slot_mapping_cpu[:padded_total_num_scheduled_tokens].to( self.device)) - block_tables = self.block_table_cpu[:self.max_num_reqs] - block_tables[:num_reqs, :self.max_num_blocks_per_req] = ( - self.input_batch.block_table[0].get_cpu_tensor()[:num_reqs]) + if use_max_model_len: + block_tables = self.block_table_cpu[:self.num_reqs_max_model_len, : + self.max_num_blocks_per_req] + block_tables[:num_reqs, :self.max_num_blocks_per_req] = ( + self.input_batch.block_table[0].get_cpu_tensor()[:num_reqs]) + query_start_loc = self.query_start_loc_cpu[:self. + num_reqs_max_model_len + + 1].to(self.device) + seq_lens = self.seq_lens_cpu[:self.num_reqs_max_model_len].to( + self.device) + else: + block_tables = self.block_table_cpu[:self. + num_reqs_most_model_len, :self. + num_blocks_per_most_len_req] + block_tables[:num_reqs, :self.num_blocks_per_most_len_req] = ( + self.input_batch.block_table[0].get_cpu_tensor() + [:num_reqs, :self.num_blocks_per_most_len_req]) + query_start_loc = self.query_start_loc_cpu[:self. + num_reqs_most_model_len + + 1].to(self.device) + seq_lens = self.seq_lens_cpu[:self.num_reqs_most_model_len].to( + self.device) block_tables = block_tables.to(self.device) - query_start_loc = self.query_start_loc_cpu[:self.max_num_reqs + 1].to( - self.device) - seq_lens = self.seq_lens_cpu[:self.max_num_reqs].to(self.device) if self.lora_config is not None: # We need to respect padding when activating LoRA adapters @@ -672,7 +724,8 @@ class TPUModelRunner(LoRAModelRunnerMixin): layer_name: attn_metadata for layer_name in layer_names } - return per_layer_attn_metadata, logits_indices, padded_num_reqs + return per_layer_attn_metadata, logits_indices, padded_num_reqs,\ + num_reqs, end_index def _scatter_placeholders( self, @@ -847,52 +900,84 @@ class TPUModelRunner(LoRAModelRunnerMixin): else: mm_embeds = [] xm.mark_step() - # Prepare inputs - attn_metadata, logits_indices, padded_num_reqs = self._prepare_inputs( - scheduler_output) - input_ids, inputs_embeds = self._get_model_inputs( - self.input_ids, mm_embeds) - xm.mark_step() - num_reqs = self.input_batch.num_reqs - # Run the decoder - with set_forward_context( - attn_metadata, - self.vllm_config, - num_tokens=scheduler_output.total_num_scheduled_tokens): - hidden_states = self.model( - input_ids=input_ids, - positions=self.position_ids, - inputs_embeds=inputs_embeds, - ) - hidden_states = self.select_hidden_states(hidden_states, - logits_indices) - logits = self.compute_logits(hidden_states) - tpu_sampling_metadata = TPUSupportedSamplingMetadata.\ - from_input_batch(self.input_batch, padded_num_reqs, self.device) - if scheduler_output.grammar_bitmask is not None: - require_struct_decoding, grammar_bitmask_padded, arange = \ - self.prepare_structured_decoding_input(logits, scheduler_output) - logits = self.structured_decode(require_struct_decoding, - grammar_bitmask_padded, logits, - arange) - selected_token_ids = self.sample_from_logits_func( - logits, tpu_sampling_metadata) - # NOTE (NickLucche) Use the original logits (before any penalties or - # temperature scaling) for the top-k logprobs. We can't enforce it due - # to recompilations outside torch.compiled code, so just make sure - # `sample_from_logits` does not modify the logits in-place. - logprobs = self.gather_logprobs(logits, selected_token_ids) \ - if tpu_sampling_metadata.logprobs else None + # Prepare inputs, the requests might be splitted into multiple + # executions, combine the result of each execution. + start_index = 0 + combined_selected_tokens: list[torch.Tensor] = [] + combined_logprobs: list[LogprobsLists] = [] + while start_index < self.input_batch.num_reqs: + attn_metadata, logits_indices, padded_num_reqs, num_reqs,\ + end_index = self._prepare_inputs(scheduler_output, start_index) + input_ids, inputs_embeds = self._get_model_inputs( + self.input_ids, mm_embeds) + xm.mark_step() + # Run the decoder + with set_forward_context( + attn_metadata, + self.vllm_config, + num_tokens=scheduler_output.total_num_scheduled_tokens): + hidden_states = self.model( + input_ids=input_ids, + positions=self.position_ids, + inputs_embeds=inputs_embeds, + ) + hidden_states = self.select_hidden_states(hidden_states, + logits_indices) + logits = self.compute_logits(hidden_states) + tpu_sampling_metadata = TPUSupportedSamplingMetadata.\ + from_input_batch(self.input_batch, padded_num_reqs, self.device) + if scheduler_output.grammar_bitmask is not None: + require_struct_decoding, grammar_bitmask_padded, arange = \ + self.prepare_structured_decoding_input(logits, + scheduler_output) + logits = self.structured_decode(require_struct_decoding, + grammar_bitmask_padded, logits, + arange) + selected_token_ids = self.sample_from_logits_func( + logits, tpu_sampling_metadata) + # NOTE (NickLucche) Use the original logits (before any penalties or + # temperature scaling) for the top-k logprobs. We can't enforce it + # due to recompilations outside torch.compiled code, so just make + # sure `sample_from_logits` does not modify the logits in-place. + logprobs = self.gather_logprobs(logits, selected_token_ids) \ + if tpu_sampling_metadata.logprobs else None - # Remove padding on cpu and keep dynamic op outside of xla graph. - selected_token_ids = selected_token_ids.cpu()[:num_reqs] - logprobs_lists = logprobs.tolists() \ - if tpu_sampling_metadata.logprobs else None + # Remove padding on cpu and keep dynamic op outside of xla graph. + selected_token_ids = selected_token_ids.cpu()[:num_reqs] + + combined_selected_tokens.append(selected_token_ids) + if tpu_sampling_metadata.logprobs: + combined_logprobs.append(logprobs.tolists()) + + start_index = end_index + + selected_token_ids = torch.cat(combined_selected_tokens, dim=0) + if tpu_sampling_metadata.logprobs: + + def concat_lists(input_lists): + result = [] + for input_list in input_lists: + result.extend(input_list) + return result + + logprobs_lists = LogprobsLists(logprob_token_ids=concat_lists( + [lp.logprob_token_ids for lp in combined_logprobs]), + logprobs=concat_lists([ + lp.logprobs + for lp in combined_logprobs + ]), + sampled_token_ranks=concat_lists([ + lp.sampled_token_ranks + for lp in combined_logprobs + ])) + else: + logprobs_lists = None # Update the cache state concurrently. Code above will not block until # we use `selected_token_ids`. Add mark_step if post-processing changes request_seq_lens: list[tuple[int, CachedRequestState, int]] = [] discard_sampled_tokens_req_indices = [] + num_reqs = self.input_batch.num_reqs for i, req_id in zip(range(num_reqs), self.input_batch.req_ids): assert req_id is not None req_state = self.requests[req_id] @@ -1020,7 +1105,8 @@ class TPUModelRunner(LoRAModelRunnerMixin): self.sampler = TPUSampler() @torch.no_grad() - def _dummy_run(self, num_tokens: int) -> None: + def _dummy_run(self, num_tokens: int, num_reqs: int, + num_blocks: int) -> None: if self.is_multimodal_model: input_ids = None inputs_embeds = torch.zeros((num_tokens, self.hidden_size), @@ -1030,20 +1116,19 @@ class TPUModelRunner(LoRAModelRunnerMixin): input_ids = torch.zeros((num_tokens), dtype=torch.int32).to(self.device) inputs_embeds = None - actual_num_reqs = min(num_tokens, self.max_num_reqs) + actual_num_reqs = min(num_tokens, num_reqs) position_ids = torch.zeros(num_tokens, dtype=torch.int32).to(self.device) slot_mapping = torch.zeros(num_tokens, dtype=torch.int64).to(self.device) - block_tables = torch.zeros( - (self.max_num_reqs, self.block_table_cpu.shape[1]), - dtype=torch.int32).to(self.device) - query_lens = [1] * self.max_num_reqs + block_tables = torch.zeros((num_reqs, num_blocks), + dtype=torch.int32).to(self.device) + query_lens = [1] * num_reqs query_start_loc = torch.cumsum(torch.tensor([0] + query_lens, dtype=torch.int32), dim=0, dtype=torch.int32).to(self.device) - context_lens = torch.ones((self.max_num_reqs, ), + context_lens = torch.ones((num_reqs, ), dtype=torch.int32).to(self.device) num_seqs = torch.tensor([actual_num_reqs], dtype=torch.int32).to(self.device) @@ -1061,6 +1146,9 @@ class TPUModelRunner(LoRAModelRunnerMixin): torch._dynamo.mark_dynamic(input_ids, 0) torch._dynamo.mark_dynamic(position_ids, 0) torch._dynamo.mark_dynamic(attn_metadata.slot_mapping, 0) + torch._dynamo.mark_dynamic(attn_metadata.block_tables, (0, 1)) + torch._dynamo.mark_dynamic(attn_metadata.context_lens, 0) + torch._dynamo.mark_dynamic(attn_metadata.query_start_loc, 0) layer_names = get_layers_from_vllm_config(self.vllm_config, Attention).keys() @@ -1152,7 +1240,11 @@ class TPUModelRunner(LoRAModelRunnerMixin): start = time.perf_counter() for num_tokens in self.num_tokens_paddings: logger.info(" -- num_tokens: %d", num_tokens) - self._dummy_run(num_tokens) + self._dummy_run(num_tokens, self.num_reqs_max_model_len, + self.max_num_blocks_per_req) + if self.most_model_len is not None: + self._dummy_run(num_tokens, self.num_reqs_most_model_len, + self.num_blocks_per_most_len_req) xm.wait_device_ops() end = time.perf_counter() logger.info("Compilation finished in %.2f [secs].", end - start) @@ -1341,7 +1433,11 @@ class TPUModelRunner(LoRAModelRunnerMixin): self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs)) # Trigger compilation for general shape. - self._dummy_run(num_tokens) + self._dummy_run(num_tokens, self.num_reqs_max_model_len, + self.max_num_blocks_per_req) + if self.most_model_len is not None: + self._dummy_run(num_tokens, self.num_reqs_most_model_len, + self.num_blocks_per_most_len_req) xm.mark_step() xm.wait_device_ops() From 296ce95d8e72f4c6680bda539058f48dbe0f340a Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Thu, 26 Jun 2025 08:23:56 +0900 Subject: [PATCH 141/211] [CI] Add SM120 to the Dockerfile (#19794) Signed-off-by: mgoin --- docker/Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index cf9c245a95174..8d4375470adf9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -77,7 +77,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \ # can be useful for both `dev` and `test` # explicitly set the list to avoid issues with torch 2.2 # see https://github.com/pytorch/pytorch/pull/123243 -ARG torch_cuda_arch_list='7.0 7.5 8.0 8.9 9.0 10.0+PTX' +ARG torch_cuda_arch_list='7.0 7.5 8.0 8.9 9.0 10.0 12.0' ENV TORCH_CUDA_ARCH_LIST=${torch_cuda_arch_list} # Override the arch list for flash-attn to reduce the binary size ARG vllm_fa_cmake_gpu_arches='80-real;90-real' @@ -244,7 +244,7 @@ RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist # If we need to build FlashInfer wheel before its release: # $ # Note we remove 7.0 from the arch list compared to the list below, since FlashInfer only supports sm75+ -# $ export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0a 10.0a' +# $ export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0a 10.0a 12.0' # $ git clone https://github.com/flashinfer-ai/flashinfer.git --recursive # $ cd flashinfer # $ git checkout v0.2.6.post1 @@ -261,7 +261,7 @@ if [ "$TARGETPLATFORM" != "linux/arm64" ]; then \ if [[ "$CUDA_VERSION" == 12.8* ]]; then \ uv pip install --system https://download.pytorch.org/whl/cu128/flashinfer/flashinfer_python-0.2.6.post1%2Bcu128torch2.7-cp39-abi3-linux_x86_64.whl; \ else \ - export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0a 10.0a' && \ + export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0a 10.0a 12.0' && \ git clone https://github.com/flashinfer-ai/flashinfer.git --single-branch --branch v0.2.6.post1 --recursive && \ # Needed to build AOT kernels (cd flashinfer && \ From 754b00edb3fd2642da08c40363a07f1d60a54977 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Thu, 26 Jun 2025 10:01:17 +0900 Subject: [PATCH 142/211] [Bugfix] Fix Mistral tool-parser regex for nested JSON (#20093) Signed-off-by: mgoin --- .../language/generation/test_mistral.py | 51 +++++++++++++++++++ .../tool_parsers/mistral_tool_parser.py | 4 +- 2 files changed, 53 insertions(+), 2 deletions(-) diff --git a/tests/models/language/generation/test_mistral.py b/tests/models/language/generation/test_mistral.py index bdd857ff50620..c70698ede37a5 100644 --- a/tests/models/language/generation/test_mistral.py +++ b/tests/models/language/generation/test_mistral.py @@ -10,6 +10,7 @@ import pytest from vllm.entrypoints.openai.tool_parsers.mistral_tool_parser import ( MistralToolCall, MistralToolParser) from vllm.sampling_params import GuidedDecodingParams, SamplingParams +from vllm.transformers_utils.tokenizer import MistralTokenizer from ...utils import check_logprobs_close @@ -318,3 +319,53 @@ def test_mistral_guided_decoding( schema=SAMPLE_JSON_SCHEMA) except jsonschema.exceptions.ValidationError: pytest.fail("Generated response is not valid with JSON schema") + + +def test_mistral_function_call_nested_json(): + """Ensure that the function-name regex captures the entire outer-most + JSON block, including nested braces.""" + + # Create a minimal stub tokenizer that provides the few attributes the + # parser accesses (`version` and `get_vocab`). + class _StubMistralTokenizer(MistralTokenizer): + version = 11 # Satisfy the version check + + def __init__(self): + pass + + @staticmethod + def get_vocab(): + # Provide the special TOOL_CALLS token expected by the parser. + return {"[TOOL_CALLS]": 0} + + tokenizer = _StubMistralTokenizer() + parser = MistralToolParser(tokenizer) + + # Craft a model output featuring nested JSON inside the arguments. + args_dict = { + "city": "Dallas", + "state": "TX", + "unit": "fahrenheit", + "sub_dict": { + "foo": "bar", + "inner": { + "x": 1, + "y": 2 + } + }, + } + + model_output = ( + f"{parser.bot_token}get_current_weather{json.dumps(args_dict)}") + + parsed = parser.extract_tool_calls(model_output, None) + + # Assertions: the tool call is detected and the full nested JSON is parsed + # without truncation. + assert parsed.tools_called + + assert MistralToolCall.is_valid_id(parsed.tool_calls[0].id) + assert parsed.tool_calls[0].function.name == "get_current_weather" + assert json.loads(parsed.tool_calls[0].function.arguments) == args_dict + # No additional content outside the tool call should be returned. + assert parsed.content is None diff --git a/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py b/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py index ab1cfd4b6eabe..c0691f122904e 100644 --- a/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py +++ b/vllm/entrypoints/openai/tool_parsers/mistral_tool_parser.py @@ -77,8 +77,8 @@ class MistralToolParser(ToolParser): self.bot_token_id = self.vocab.get(self.bot_token) self.tool_call_regex = re.compile(r"\[{.*}\]", re.DOTALL) if _is_fn_name_regex_support(self.model_tokenizer): - self.fn_name_regex = re.compile(r'([a-zA-Z0-9_-]+)(\{.*?\})', - re.DOTALL) + self.fn_name_regex = re.compile( + r'([a-zA-Z0-9_-]+)(\{[\s\S]*?\})(?=\s*$|,|\s)', re.DOTALL) else: self.fn_name_regex = None From 2582683566ed676a811f4311f1048f0b323676b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Lucchesi?= Date: Thu, 26 Jun 2025 05:04:39 +0200 Subject: [PATCH 143/211] [PD] Skip `tp_size` exchange with rank0 (#19413) Signed-off-by: NickLucche --- .../kv_connector/unit/test_nixl_connector.py | 29 ++++- .../kv_connector/v1/nixl_connector.py | 109 ++++++++---------- 2 files changed, 72 insertions(+), 66 deletions(-) diff --git a/tests/v1/kv_connector/unit/test_nixl_connector.py b/tests/v1/kv_connector/unit/test_nixl_connector.py index ab9729aae2e9f..e30a250449aaa 100644 --- a/tests/v1/kv_connector/unit/test_nixl_connector.py +++ b/tests/v1/kv_connector/unit/test_nixl_connector.py @@ -7,6 +7,8 @@ from collections import defaultdict from typing import Optional from unittest.mock import patch +import pytest + from vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector import ( KVConnectorRole, NixlAgentMetadata, NixlConnector, NixlConnectorMetadata, NixlConnectorWorker) @@ -161,7 +163,8 @@ class FakeNixlConnectorWorker(NixlConnectorWorker): super().__init__(*args, **kwargs) self._hand_shake_latency = hand_shake_latency - def _nixl_handshake(self, host: str, port: int) -> dict[int, str]: + def _nixl_handshake(self, host: str, port: int, + remote_tp_size: int) -> dict[int, str]: # Mimic slow _nixl_handshake, as well as bypass zmq communication. time.sleep(self._hand_shake_latency) # These should've been done in register_kv_caches(), called by @@ -177,10 +180,10 @@ class FakeNixlConnectorWorker(NixlConnectorWorker): agent_metadata=FakeNixlWrapper.AGENT_METADATA, kv_caches_base_addr=[0], num_blocks=1, - tp_size=1, block_len=self.block_len, attn_backend_name=self.backend_name, - )) + ), + remote_tp_size=remote_tp_size) return {0: remote_agent_name} @@ -233,6 +236,8 @@ class TestNixlHandshake: "localhost", "remote_port": 1234, + "remote_tp_size": + 1, }) connector.bind_connector_metadata(metadata) @@ -259,13 +264,23 @@ class TestNixlHandshake: @patch( "vllm.distributed.kv_transfer.kv_connector.v1.nixl_connector.NixlWrapper", FakeNixlWrapper) + @pytest.mark.parametrize("decode_tp_size, prefill_tp_size", [ + (1, 1), + (2, 1), + (4, 2), + (4, 4), + ]) def test_async_load_kv( - self, - # dist_init is a fixture that initializes the distributed environment. - dist_init): + self, + # Fixture that initializes the distributed environment. + dist_init, + # Simulate consumer-producer TP sizes. + decode_tp_size, + prefill_tp_size): """Test that NixlConnector's start_load_kv should be non-blocking.""" vllm_config = create_vllm_config() + vllm_config.parallel_config.tensor_parallel_size = decode_tp_size # Test worker role in decode server. connector = NixlConnector(vllm_config, KVConnectorRole.WORKER) @@ -280,6 +295,7 @@ class TestNixlHandshake: FakeNixlConnectorWorker.REMOTE_ENGINE_ID, "remote_host": "localhost", "remote_port": 1234, + "remote_tp_size": prefill_tp_size, }) connector.bind_connector_metadata(metadata) @@ -329,6 +345,7 @@ class TestNixlHandshake: FakeNixlConnectorWorker.REMOTE_ENGINE_ID, "remote_host": "localhost", "remote_port": 1234, + "remote_tp_size": 1, }) connector.bind_connector_metadata(metadata) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py index 92a9184d318c7..7a077dce7706c 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/nixl_connector.py @@ -62,7 +62,6 @@ class NixlAgentMetadata( agent_metadata: bytes kv_caches_base_addr: list[int] num_blocks: int - tp_size: int block_len: int attn_backend_name: str @@ -73,7 +72,8 @@ class ReqMeta: remote_block_ids: list[int] remote_host: str remote_port: int - remote_engine_id: EngineId + remote_engine_id: str + tp_size: int class NixlConnectorMetadata(KVConnectorMetadata): @@ -93,6 +93,8 @@ class NixlConnectorMetadata(KVConnectorMetadata): remote_engine_id=kv_transfer_params["remote_engine_id"], remote_host=kv_transfer_params["remote_host"], remote_port=kv_transfer_params["remote_port"], + # P workers don't need to receive tp_size from proxy here. + tp_size=kv_transfer_params.get("tp_size", 1), ) @@ -330,7 +332,7 @@ class NixlConnectorScheduler: remote_engine_id=self.engine_id, remote_host=self.side_channel_host, remote_port=self.side_channel_port, - ) + tp_size=self.vllm_config.parallel_config.tensor_parallel_size) class NixlConnectorWorker: @@ -473,7 +475,8 @@ class NixlConnectorWorker: "Connection listener got unexpected message %s", msg) sock.send_multipart((identity, b"", encoded_data)) - def _nixl_handshake(self, host: str, port: int) -> dict[int, str]: + def _nixl_handshake(self, host: str, port: int, + remote_tp_size: int) -> dict[int, str]: """Do a NIXL handshake with a remote instance.""" start_time = time.perf_counter() @@ -482,7 +485,7 @@ class NixlConnectorWorker: # a hack to keep us moving. We will switch when moving to etcd # or where we have a single ZMQ socket in the scheduler. - def handshake(path: str, rank: int) -> tuple[NixlAgentMetadata, str]: + def handshake(path: str, rank: int) -> str: # Send query for the request. with zmq_ctx(zmq.REQ, path) as sock: sock.send(GET_META_MSG) @@ -492,33 +495,25 @@ class NixlConnectorWorker: got_metadata_time = time.perf_counter() # Register Remote agent. - remote_agent_name = self.add_remote_agent(metadata, rank) + remote_agent_name = self.add_remote_agent( + metadata, rank, remote_tp_size) setup_agent_time = time.perf_counter() logger.debug("NIXL handshake: get metadata took: %s", got_metadata_time - start_time) logger.debug("NIXL handshake: add agent took: %s", setup_agent_time - got_metadata_time) - return metadata, remote_agent_name + return remote_agent_name - # Handshake with remote agent-rank0 first to get the tp_size of remote - path = make_zmq_path("tcp", host, port) - logger.debug("Querying master rank metadata on path: %s", path) - rank_to_agent_name: dict[int, str] = {} - metadata, rank_to_agent_name[0] = handshake(path, 0) - - # Handshake only with the other TP remote the current local rank will + # Handshake only with the remote TP rank that current local rank will # pull from. With homogeneous TP it happens to be the same rank_i. - tp_ratio = self._tp_size[self.engine_id] // metadata.tp_size + tp_ratio = self._tp_size[self.engine_id] // remote_tp_size p_remote_rank = self.tp_rank // tp_ratio - if p_remote_rank > 0: - path = make_zmq_path("tcp", host, port + p_remote_rank) - logger.debug("Querying metadata on path: %s at remote rank %s", - path, p_remote_rank) - _, rank_to_agent_name[p_remote_rank] = handshake( - path, p_remote_rank) - - return rank_to_agent_name + path = make_zmq_path("tcp", host, port + p_remote_rank) + logger.debug("Querying metadata on path: %s at remote rank %s", path, + p_remote_rank) + # Remote rank -> agent name. + return {p_remote_rank: handshake(path, p_remote_rank)} def register_kv_caches(self, kv_caches: dict[str, torch.Tensor]): """Register the KV Cache data in nixl.""" @@ -645,7 +640,6 @@ class NixlConnectorWorker: agent_metadata=self.nixl_wrapper.get_agent_metadata(), kv_caches_base_addr=self.kv_caches_base_addr[self.engine_id], num_blocks=self.num_blocks, - tp_size=self.world_size, block_len=self.block_len, attn_backend_name=self.backend_name) ready_event = threading.Event() @@ -659,7 +653,8 @@ class NixlConnectorWorker: def add_remote_agent(self, nixl_agent_meta: NixlAgentMetadata, - remote_tp_rank: int = 0) -> str: + remote_tp_rank: int = 0, + remote_tp_size: int = 1) -> str: """ Add the remote NIXL agent and prepare the descriptors for reading cache blocks from remote. @@ -704,9 +699,9 @@ class NixlConnectorWorker: return self._remote_agents[engine_id][remote_tp_rank] if engine_id in self._tp_size: - assert self._tp_size[engine_id] == nixl_agent_meta.tp_size + assert self._tp_size[engine_id] == remote_tp_size else: - self._tp_size[engine_id] = nixl_agent_meta.tp_size + self._tp_size[engine_id] = remote_tp_size # We may eventually enable this after asserting equality in cache # layout and close outputs. assert nixl_agent_meta.attn_backend_name == self.backend_name @@ -756,33 +751,31 @@ class NixlConnectorWorker: # rank. With heterogeneous TP, prepare the descriptors by splitting the # P KV cache along kv_head dim, of D worker's kv_head size (D>P). # Eg. PTP1 DTP2 => P0 KV:[block0-KV_0 | block0-KV_1..]. - p_remote_tp_rank = self.tp_rank // tp_ratio # Only register the remote's descriptors if current rank pulls from it. - if p_remote_tp_rank == remote_tp_rank: - self.kv_caches_base_addr[ - engine_id] = nixl_agent_meta.kv_caches_base_addr - rank_offset = self.tp_rank % tp_ratio * self.block_len \ - if not (self.use_mla or is_kv_replicated) else 0 - # Register all remote blocks, but only the corresponding kv heads. - for base_addr in nixl_agent_meta.kv_caches_base_addr: - for block_id in range(nixl_agent_meta.num_blocks): - block_offset = block_id * nixl_agent_meta.block_len - # For each block, grab the heads chunk belonging to rank_i - # of size remote_nheads // tp_ratio, which correspond to - # self.block_len == remote_block_len//tp_ratio bytes. - addr = base_addr + block_offset + rank_offset - # (addr, len, device id) - blocks_data.append((addr, self.block_len, remote_tp_rank)) - logger.debug( - "Created %s blocks for dst engine %s with remote rank %s and " - "local rank %s", len(blocks_data), engine_id, remote_tp_rank, - self.tp_rank) + self.kv_caches_base_addr[ + engine_id] = nixl_agent_meta.kv_caches_base_addr + rank_offset = self.tp_rank % tp_ratio * self.block_len \ + if not (self.use_mla or is_kv_replicated) else 0 + # Register all remote blocks, but only the corresponding kv heads. + for base_addr in nixl_agent_meta.kv_caches_base_addr: + for block_id in range(nixl_agent_meta.num_blocks): + block_offset = block_id * nixl_agent_meta.block_len + # For each block, grab the heads chunk belonging to rank_i + # of size remote_nheads // tp_ratio, which correspond to + # self.block_len == remote_block_len//tp_ratio bytes. + addr = base_addr + block_offset + rank_offset + # (addr, len, device id) + blocks_data.append((addr, self.block_len, remote_tp_rank)) + logger.debug( + "Created %s blocks for dst engine %s with remote rank %s and " + "local rank %s", len(blocks_data), engine_id, remote_tp_rank, + self.tp_rank) - # Register with NIXL. - descs = self.nixl_wrapper.get_xfer_descs(blocks_data, "VRAM") - self.dst_xfer_side_handles[ - engine_id] = self.nixl_wrapper.prep_xfer_dlist( - remote_agent_name, descs) + # Register with NIXL. + descs = self.nixl_wrapper.get_xfer_descs(blocks_data, "VRAM") + self.dst_xfer_side_handles[ + engine_id] = self.nixl_wrapper.prep_xfer_dlist( + remote_agent_name, descs) return remote_agent_name @@ -917,7 +910,7 @@ class NixlConnectorWorker: if fut is None: fut = self._handshake_initiation_executor.submit( self._nixl_handshake, meta.remote_host, - meta.remote_port) + meta.remote_port, meta.tp_size) self._handshake_futures[remote_engine_id] = fut def done_callback(f: Future[dict[int, str]], @@ -957,13 +950,9 @@ class NixlConnectorWorker: remote_block_ids=meta.remote_block_ids, ) - def _read_blocks( - self, - local_block_ids: list[int], - remote_block_ids: list[int], - dst_engine_id: str, - request_id: str, - ): + def _read_blocks(self, local_block_ids: list[int], + remote_block_ids: list[int], dst_engine_id: str, + request_id: str): # NOTE(rob): having the staging blocks be on the READER side is # not going to work well (since we will have to call rearrange tensors). # after we detect the txn is complete (which means we cannot make the From 9502c38138a03669c4d54225336553db70ad799d Mon Sep 17 00:00:00 2001 From: Ekagra Ranjan <3116519+ekagra-ranjan@users.noreply.github.com> Date: Thu, 26 Jun 2025 01:06:27 -0400 Subject: [PATCH 144/211] [Benchmark][Bug] Fix multiple bugs in bench and add args to spec_decode offline (#20083) --- benchmarks/benchmark_dataset.py | 3 ++- examples/offline_inference/spec_decode.py | 20 +++++++++++++------- vllm/benchmarks/datasets.py | 12 ++++++++---- vllm/benchmarks/serve.py | 6 ++++++ 4 files changed, 29 insertions(+), 12 deletions(-) diff --git a/benchmarks/benchmark_dataset.py b/benchmarks/benchmark_dataset.py index 8671719bce72f..55c0cf851264f 100644 --- a/benchmarks/benchmark_dataset.py +++ b/benchmarks/benchmark_dataset.py @@ -349,8 +349,9 @@ class RandomDataset(BenchmarkDataset): # [1650, 939, 486] -> ['Ġcall', 'sh', 'ere'] # To avoid uncontrolled change of the prompt length, # the encoded sequence is truncated before being decode again. + total_input_len = prefix_len + int(input_lens[i]) re_encoded_sequence = tokenizer.encode(prompt, add_special_tokens=False)[ - : input_lens[i] + :total_input_len ] prompt = tokenizer.decode(re_encoded_sequence) total_input_len = len(re_encoded_sequence) diff --git a/examples/offline_inference/spec_decode.py b/examples/offline_inference/spec_decode.py index eece8beced510..6fa68d2ecee1d 100644 --- a/examples/offline_inference/spec_decode.py +++ b/examples/offline_inference/spec_decode.py @@ -39,6 +39,9 @@ def parse_args(): parser.add_argument("--top-k", type=int, default=-1) parser.add_argument("--print-output", action="store_true") parser.add_argument("--output-len", type=int, default=256) + parser.add_argument("--model-dir", type=str, default=None) + parser.add_argument("--eagle-dir", type=str, default=None) + parser.add_argument("--max-model-len", type=int, default=2048) return parser.parse_args() @@ -46,9 +49,10 @@ def main(): args = parse_args() args.endpoint_type = "openai-chat" - model_dir = "meta-llama/Llama-3.1-8B-Instruct" + model_dir = args.model_dir + if args.model_dir is None: + model_dir = "meta-llama/Llama-3.1-8B-Instruct" tokenizer = AutoTokenizer.from_pretrained(model_dir) - max_model_len = 2048 prompts = get_samples(args, tokenizer) # add_special_tokens is False to avoid adding bos twice when using chat templates @@ -57,16 +61,18 @@ def main(): ] if args.method == "eagle" or args.method == "eagle3": - if args.method == "eagle": + eagle_dir = args.eagle_dir + if args.method == "eagle" and eagle_dir is None: eagle_dir = "yuhuili/EAGLE-LLaMA3.1-Instruct-8B" - elif args.method == "eagle3": + + elif args.method == "eagle3" and eagle_dir is None: eagle_dir = "yuhuili/EAGLE3-LLaMA3.1-Instruct-8B" speculative_config = { "method": args.method, "model": eagle_dir, "num_speculative_tokens": args.num_spec_tokens, "draft_tensor_parallel_size": args.draft_tp, - "max_model_len": max_model_len, + "max_model_len": args.max_model_len, } elif args.method == "ngram": speculative_config = { @@ -74,7 +80,7 @@ def main(): "num_speculative_tokens": args.num_spec_tokens, "prompt_lookup_max": args.prompt_lookup_max, "prompt_lookup_min": args.prompt_lookup_min, - "max_model_len": max_model_len, + "max_model_len": args.max_model_len, } else: raise ValueError(f"unknown method: {args.method}") @@ -86,7 +92,7 @@ def main(): enable_chunked_prefill=args.enable_chunked_prefill, max_num_batched_tokens=args.max_num_batched_tokens, enforce_eager=args.enforce_eager, - max_model_len=max_model_len, + max_model_len=args.max_model_len, max_num_seqs=args.max_num_seqs, gpu_memory_utilization=0.8, speculative_config=speculative_config, diff --git a/vllm/benchmarks/datasets.py b/vllm/benchmarks/datasets.py index 3efbe5695711f..b3688d2340e44 100644 --- a/vllm/benchmarks/datasets.py +++ b/vllm/benchmarks/datasets.py @@ -320,6 +320,8 @@ class RandomDataset(BenchmarkDataset): **kwargs, ) -> None: super().__init__(**kwargs) + random.seed(self.random_seed) + np.random.seed(self.random_seed) def sample( self, @@ -376,10 +378,11 @@ class RandomDataset(BenchmarkDataset): # [1650, 939, 486] -> ['Ġcall', 'sh', 'ere'] # To avoid uncontrolled change of the prompt length, # the encoded sequence is truncated before being decode again. - re_encoded_sequence = tokenizer.encode( - prompt, add_special_tokens=False)[:input_lens[i]] - prompt = tokenizer.decode(re_encoded_sequence) total_input_len = prefix_len + int(input_lens[i]) + re_encoded_sequence = tokenizer.encode( + prompt, add_special_tokens=False)[:total_input_len] + prompt = tokenizer.decode(re_encoded_sequence) + total_input_len = len(re_encoded_sequence) requests.append( SampleRequest( prompt=prompt, @@ -692,7 +695,8 @@ def get_samples(args, tokenizer) -> list[SampleRequest]: dataset_path=args.dataset_path). sample(tokenizer=tokenizer, num_requests=args.num_prompts), "random": - lambda: RandomDataset(dataset_path=args.dataset_path).sample( + lambda: RandomDataset(random_seed=args.seed, + dataset_path=args.dataset_path).sample( tokenizer=tokenizer, num_requests=args.num_prompts, prefix_len=args.random_prefix_len, diff --git a/vllm/benchmarks/serve.py b/vllm/benchmarks/serve.py index 302f655f424a3..419284cca042e 100644 --- a/vllm/benchmarks/serve.py +++ b/vllm/benchmarks/serve.py @@ -631,6 +631,12 @@ def add_cli_args(parser: argparse.ArgumentParser): help="The label (prefix) of the benchmark results. If not specified, " "the endpoint type will be used as the label.", ) + parser.add_argument( + "--backend", + type=str, + default="vllm", + choices=list(ASYNC_REQUEST_FUNCS.keys()), + ) parser.add_argument( "--base-url", type=str, From 65397e40f58ff5657d9e8bbd860ed9d3fdf734a0 Mon Sep 17 00:00:00 2001 From: Seiji Eicher <58963096+eicherseiji@users.noreply.github.com> Date: Thu, 26 Jun 2025 00:01:57 -0700 Subject: [PATCH 145/211] [Bugfix] Allow `CUDA_VISIBLE_DEVICES=''` in `Platform.device_id_to_physical_device_id` (#18979) Signed-off-by: Seiji Eicher --- tests/config/test_config_generation.py | 38 ++++++++++++ tests/v1/engine/test_engine_core_client.py | 71 ++++++++++++++++++++++ vllm/platforms/interface.py | 15 ++--- 3 files changed, 114 insertions(+), 10 deletions(-) create mode 100644 tests/config/test_config_generation.py diff --git a/tests/config/test_config_generation.py b/tests/config/test_config_generation.py new file mode 100644 index 0000000000000..024e81fccc5f1 --- /dev/null +++ b/tests/config/test_config_generation.py @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import pytest + +from vllm.engine.arg_utils import EngineArgs +from vllm.model_executor.layers.quantization.quark.utils import deep_compare + + +def test_cuda_empty_vs_unset_configs(monkeypatch: pytest.MonkeyPatch): + """Test that configs created with normal (untouched) CUDA_VISIBLE_DEVICES + and CUDA_VISIBLE_DEVICES="" are equivalent. This ensures consistent + behavior regardless of whether GPU visibility is disabled via empty string + or left in its normal state. + """ + + def create_config(): + engine_args = EngineArgs(model="deepseek-ai/DeepSeek-V2-Lite", + trust_remote_code=True) + return engine_args.create_engine_config() + + # Create config with CUDA_VISIBLE_DEVICES set normally + normal_config = create_config() + + # Create config with CUDA_VISIBLE_DEVICES="" + with monkeypatch.context() as m: + m.setenv("CUDA_VISIBLE_DEVICES", "") + empty_config = create_config() + + normal_config_dict = vars(normal_config) + empty_config_dict = vars(empty_config) + + # Remove instance_id before comparison as it's expected to be different + normal_config_dict.pop("instance_id", None) + empty_config_dict.pop("instance_id", None) + + assert deep_compare(normal_config_dict, empty_config_dict), ( + "Configs with normal CUDA_VISIBLE_DEVICES and CUDA_VISIBLE_DEVICES=\"\"" + " should be equivalent") diff --git a/tests/v1/engine/test_engine_core_client.py b/tests/v1/engine/test_engine_core_client.py index 16c36cd5c6b9b..d5ff78c1449a6 100644 --- a/tests/v1/engine/test_engine_core_client.py +++ b/tests/v1/engine/test_engine_core_client.py @@ -8,8 +8,10 @@ import time import uuid from threading import Thread from typing import Optional +from unittest.mock import MagicMock import pytest +import torch from transformers import AutoTokenizer from tests.utils import multi_gpu_test @@ -517,3 +519,72 @@ def test_startup_failure(monkeypatch: pytest.MonkeyPatch): ) assert "Engine core initialization failed" in str(e_info.value) + + +@create_new_process_for_each_test() +def test_engine_core_proc_instantiation_cuda_empty( + monkeypatch: pytest.MonkeyPatch): + """ + Test that EngineCoreProc can be instantiated when CUDA_VISIBLE_DEVICES + is empty. This ensures the engine frontend does not need access to GPUs. + """ + + from vllm.v1.engine.core import EngineCoreProc + from vllm.v1.executor.abstract import Executor + + # Create a simple mock executor instead of a complex custom class + mock_executor_class = MagicMock(spec=Executor) + + def create_mock_executor(vllm_config): + mock_executor = MagicMock() + + # Only implement the methods that are actually called during init + from vllm.v1.kv_cache_interface import FullAttentionSpec + mock_spec = FullAttentionSpec(block_size=16, + num_kv_heads=1, + head_size=64, + dtype=torch.float16, + use_mla=False) + + mock_executor.get_kv_cache_specs.return_value = [{ + "default": mock_spec + }] + mock_executor.determine_available_memory.return_value = [ + 1024 * 1024 * 1024 + ] + mock_executor.initialize_from_config.return_value = None + mock_executor.max_concurrent_batches = 1 + + return mock_executor + + mock_executor_class.side_effect = create_mock_executor + + with monkeypatch.context() as m: + m.setenv("VLLM_USE_V1", "1") + m.setenv("CUDA_VISIBLE_DEVICES", "") # No CUDA devices + + from vllm.v1.utils import EngineZmqAddresses + + def mock_startup_handshake(self, handshake_socket, on_head_node, + parallel_config): + return EngineZmqAddresses(inputs=["tcp://127.0.0.1:5555"], + outputs=["tcp://127.0.0.1:5556"], + coordinator_input=None, + coordinator_output=None) + + # Background processes are not important here + m.setattr(EngineCoreProc, "startup_handshake", mock_startup_handshake) + + vllm_config = EngineArgs( + model="deepseek-ai/DeepSeek-V2-Lite", + trust_remote_code=True).create_engine_config() + engine_core_proc = EngineCoreProc( + vllm_config=vllm_config, + on_head_node=True, + handshake_address="tcp://127.0.0.1:12345", + executor_class=mock_executor_class, + log_stats=False, + engine_index=0, + ) + + engine_core_proc.shutdown() diff --git a/vllm/platforms/interface.py b/vllm/platforms/interface.py index f962fafabf502..0f08bf986333b 100644 --- a/vllm/platforms/interface.py +++ b/vllm/platforms/interface.py @@ -173,17 +173,12 @@ class Platform: @classmethod def device_id_to_physical_device_id(cls, device_id: int): - if cls.device_control_env_var in os.environ: + # Treat empty device control env var as unset. This is a valid + # configuration in Ray setups where the engine is launched in + # a CPU-only placement group located on a GPU node. + if cls.device_control_env_var in os.environ and os.environ[ + cls.device_control_env_var] != "": device_ids = os.environ[cls.device_control_env_var].split(",") - if device_ids == [""]: - msg = (f"{cls.device_control_env_var} is set to empty string, " - "which means current platform support is disabled. If " - "you are using ray, please unset the environment " - f"variable `{cls.device_control_env_var}` inside the " - "worker/actor. Check " - "https://github.com/vllm-project/vllm/issues/8402 for " - "more information.") - raise RuntimeError(msg) physical_device_id = device_ids[device_id] return int(physical_device_id) else: From 1d7c29f5fecab930fbb28bf59f1bc4510abe335b Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 26 Jun 2025 15:47:06 +0800 Subject: [PATCH 146/211] [Doc] Update docs for New Model Implementation (#20115) Signed-off-by: DarkLight1337 --- docs/.nav.yml | 7 ++++++- docs/contributing/model/README.md | 24 +++++++++++++----------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/docs/.nav.yml b/docs/.nav.yml index a9c594c291777..e679807f75346 100644 --- a/docs/.nav.yml +++ b/docs/.nav.yml @@ -48,7 +48,12 @@ nav: - General: - glob: contributing/* flatten_single_child_sections: true - - Model Implementation: contributing/model + - Model Implementation: + - contributing/model/README.md + - contributing/model/basic.md + - contributing/model/registration.md + - contributing/model/tests.md + - contributing/model/multimodal.md - Design Documents: - V0: design - V1: design/v1 diff --git a/docs/contributing/model/README.md b/docs/contributing/model/README.md index b7727f02c11bf..82541924bc028 100644 --- a/docs/contributing/model/README.md +++ b/docs/contributing/model/README.md @@ -1,21 +1,23 @@ --- -title: Adding a New Model +title: Summary --- [](){ #new-model } -This section provides more information on how to integrate a [PyTorch](https://pytorch.org/) model into vLLM. +!!! important + Many decoder language models can now be automatically loaded using the [Transformers backend][transformers-backend] without having to implement them in vLLM. See if `vllm serve ` works first! -Contents: +vLLM models are specialized [PyTorch](https://pytorch.org/) models that take advantage of various [features][compatibility-matrix] to optimize their performance. -- [Basic](basic.md) -- [Registration](registration.md) -- [Tests](tests.md) -- [Multimodal](multimodal.md) +The complexity of integrating a model into vLLM depends heavily on the model's architecture. +The process is considerably straightforward if the model shares a similar architecture with an existing model in vLLM. +However, this can be more complex for models that include new operators (e.g., a new attention mechanism). -!!! note - The complexity of adding a new model depends heavily on the model's architecture. - The process is considerably straightforward if the model shares a similar architecture with an existing model in vLLM. - However, for models that include new operators (e.g., a new attention mechanism), the process can be a bit more complex. +Read through these pages for a step-by-step guide: + +- [Implementing a Basic Model](basic.md) +- [Registering a Model to vLLM](registration.md) +- [Writing Unit Tests](tests.md) +- [Multi-Modal Support](multimodal.md) !!! tip If you are encountering issues while integrating your model into vLLM, feel free to open a [GitHub issue](https://github.com/vllm-project/vllm/issues) From d188913d99bbdfc699bc4f7c2c23187f3745f94b Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Thu, 26 Jun 2025 05:16:10 -0400 Subject: [PATCH 147/211] [Refactor] Remove unused library (#20099) Signed-off-by: yewentao256 --- vllm/_custom_ops.py | 1 - 1 file changed, 1 deletion(-) diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index 8ebe694eefd0e..d5a41284385e6 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -5,7 +5,6 @@ import contextlib from typing import TYPE_CHECKING, Optional, Union import torch -import torch.library import vllm.envs as envs from vllm.logger import init_logger From 0567c8249fdbff59a05f000cb326aed7cf5c8567 Mon Sep 17 00:00:00 2001 From: "Li, Jiang" Date: Thu, 26 Jun 2025 18:34:47 +0800 Subject: [PATCH 148/211] [CPU] Fix torch version in x86 CPU backend (#19258) Signed-off-by: jiang1.li --- csrc/cpu/torch_bindings.cpp | 13 +++++--- docker/Dockerfile.cpu | 33 +++++++++++-------- requirements/cpu-build.txt | 12 +++++++ requirements/cpu.txt | 5 +-- .../multimodal/generation/test_common.py | 2 ++ .../generation/vlm_utils/builders.py | 3 ++ vllm/model_executor/layers/fused_moe/layer.py | 2 ++ .../layers/quantization/ipex_quant.py | 2 +- 8 files changed, 52 insertions(+), 20 deletions(-) create mode 100644 requirements/cpu-build.txt diff --git a/csrc/cpu/torch_bindings.cpp b/csrc/cpu/torch_bindings.cpp index 447e826bc1c09..60304d229a8f5 100644 --- a/csrc/cpu/torch_bindings.cpp +++ b/csrc/cpu/torch_bindings.cpp @@ -131,16 +131,19 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { // Quantization #ifdef __AVX512F__ + at::Tag stride_tag = at::Tag::needs_fixed_stride_order; // Compute int8 quantized tensor for given scaling factor. ops.def( "static_scaled_int8_quant(Tensor! out, Tensor input, Tensor scale," - "Tensor? azp) -> ()"); + "Tensor? azp) -> ()", + {stride_tag}); ops.impl("static_scaled_int8_quant", torch::kCPU, &static_scaled_int8_quant); // Compute int8 quantized tensor and scaling factor ops.def( "dynamic_scaled_int8_quant(Tensor! out, Tensor input, Tensor! scale, " - "Tensor!? azp) -> ()"); + "Tensor!? azp) -> ()", + {stride_tag}); ops.impl("dynamic_scaled_int8_quant", torch::kCPU, &dynamic_scaled_int8_quant); // W8A8 GEMM, supporting symmetric per-tensor or per-row/column @@ -148,7 +151,8 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { ops.def( "cutlass_scaled_mm(Tensor! out, Tensor a," " Tensor b, Tensor a_scales," - " Tensor b_scales, Tensor? bias) -> ()"); + " Tensor b_scales, Tensor? bias) -> ()", + {stride_tag}); ops.impl("cutlass_scaled_mm", torch::kCPU, &int8_scaled_mm); // w8a8 GEMM, supporting asymmetric per-tensor or per-row/column // quantization. @@ -156,7 +160,8 @@ TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) { "cutlass_scaled_mm_azp(Tensor! out, Tensor a," " Tensor b, Tensor a_scales," " Tensor b_scales, Tensor azp_adj," - " Tensor? azp, Tensor? bias) -> ()"); + " Tensor? azp, Tensor? bias) -> ()", + {stride_tag}); ops.impl("cutlass_scaled_mm_azp", torch::kCPU, &int8_scaled_mm_azp); #elif defined(__powerpc64__) // Compute int8 quantized tensor for given scaling factor. diff --git a/docker/Dockerfile.cpu b/docker/Dockerfile.cpu index 3e9fa0e7af2dc..13bd03c5696ab 100644 --- a/docker/Dockerfile.cpu +++ b/docker/Dockerfile.cpu @@ -66,7 +66,7 @@ ENV VLLM_CPU_DISABLE_AVX512=${VLLM_CPU_DISABLE_AVX512} WORKDIR /workspace/vllm RUN --mount=type=cache,target=/root/.cache/uv \ - --mount=type=bind,src=requirements/build.txt,target=requirements/build.txt \ + --mount=type=bind,src=requirements/cpu-build.txt,target=requirements/build.txt \ uv pip install -r requirements/build.txt COPY . . @@ -79,6 +79,22 @@ RUN --mount=type=cache,target=/root/.cache/uv \ --mount=type=bind,source=.git,target=.git \ VLLM_TARGET_DEVICE=cpu python3 setup.py bdist_wheel +######################### TEST DEPS ######################### +FROM base AS vllm-test-deps + +WORKDIR /workspace/vllm + +RUN --mount=type=bind,src=requirements/test.in,target=requirements/test.in \ + cp requirements/test.in requirements/cpu-test.in && \ + sed -i '/mamba_ssm/d' requirements/cpu-test.in && \ + sed -i 's/torch==.*/torch==2.6.0/g' requirements/cpu-test.in && \ + sed -i 's/torchaudio.*/torchaudio/g' requirements/cpu-test.in && \ + sed -i 's/torchvision.*/torchvision/g' requirements/cpu-test.in && \ + uv pip compile requirements/cpu-test.in -o requirements/cpu-test.txt --index-strategy unsafe-best-match --torch-backend cpu + +RUN --mount=type=cache,target=/root/.cache/uv \ + uv pip install -r requirements/cpu-test.txt + ######################### DEV IMAGE ######################### FROM vllm-build AS vllm-dev @@ -97,28 +113,19 @@ RUN --mount=type=cache,target=/root/.cache/uv \ --mount=type=bind,source=.git,target=.git \ VLLM_TARGET_DEVICE=cpu python3 setup.py develop +COPY --from=vllm-test-deps /workspace/vllm/requirements/cpu-test.txt requirements/test.txt + RUN --mount=type=cache,target=/root/.cache/uv \ - --mount=type=bind,src=requirements/test.in,target=requirements/test.in \ - cp requirements/test.in requirements/test-cpu.in && \ - sed -i '/mamba_ssm/d' requirements/test-cpu.in && \ - uv pip compile requirements/test-cpu.in -o requirements/test.txt && \ uv pip install -r requirements/dev.txt && \ pre-commit install --hook-type pre-commit --hook-type commit-msg ENTRYPOINT ["bash"] ######################### TEST IMAGE ######################### -FROM base AS vllm-test +FROM vllm-test-deps AS vllm-test WORKDIR /workspace/ -RUN --mount=type=cache,target=/root/.cache/uv \ - --mount=type=bind,src=requirements/test.in,target=requirements/test.in \ - cp requirements/test.in requirements/test-cpu.in && \ - sed -i '/mamba_ssm/d' requirements/test-cpu.in && \ - uv pip compile requirements/test-cpu.in -o requirements/cpu-test.txt && \ - uv pip install -r requirements/cpu-test.txt - RUN --mount=type=cache,target=/root/.cache/uv \ --mount=type=bind,from=vllm-build,src=/workspace/vllm/dist,target=dist \ uv pip install dist/*.whl diff --git a/requirements/cpu-build.txt b/requirements/cpu-build.txt new file mode 100644 index 0000000000000..37f072202bd71 --- /dev/null +++ b/requirements/cpu-build.txt @@ -0,0 +1,12 @@ +# Temporarily used for x86 CPU backend to avoid performance regression of torch>2.6.0+cpu, +# see https://github.com/pytorch/pytorch/pull/151218 +cmake>=3.26.1 +ninja +packaging>=24.2 +setuptools>=77.0.3,<80.0.0 +setuptools-scm>=8 +--extra-index-url https://download.pytorch.org/whl/cpu +torch==2.6.0+cpu +wheel +jinja2>=3.1.6 +regex diff --git a/requirements/cpu.txt b/requirements/cpu.txt index 8742898cff00f..df3a3393563a0 100644 --- a/requirements/cpu.txt +++ b/requirements/cpu.txt @@ -8,7 +8,7 @@ numba == 0.61.2; python_version > '3.9' packaging>=24.2 setuptools>=77.0.3,<80.0.0 --extra-index-url https://download.pytorch.org/whl/cpu -torch==2.7.0+cpu; platform_machine == "x86_64" +torch==2.6.0+cpu; platform_machine == "x86_64" # torch>2.6.0+cpu has performance regression on x86 platform, see https://github.com/pytorch/pytorch/pull/151218 torch==2.7.0; platform_system == "Darwin" torch==2.7.0; platform_machine == "ppc64le" or platform_machine == "aarch64" @@ -23,6 +23,7 @@ datasets # for benchmark scripts # Intel Extension for PyTorch, only for x86_64 CPUs intel-openmp==2024.2.1; platform_machine == "x86_64" -intel_extension_for_pytorch==2.7.0; platform_machine == "x86_64" +intel_extension_for_pytorch==2.6.0; platform_machine == "x86_64" # torch>2.6.0+cpu has performance regression on x86 platform, see https://github.com/pytorch/pytorch/pull/151218 py-libnuma; platform_system != "Darwin" psutil; platform_system != "Darwin" +triton==3.2.0; platform_machine == "x86_64" # Triton is required for torch 2.6+cpu, as it is imported in torch.compile. diff --git a/tests/models/multimodal/generation/test_common.py b/tests/models/multimodal/generation/test_common.py index 496850b19af4f..9d63339737ce6 100644 --- a/tests/models/multimodal/generation/test_common.py +++ b/tests/models/multimodal/generation/test_common.py @@ -107,6 +107,8 @@ VLM_TEST_SETTINGS = { ), limit_mm_per_prompt={"image": 4}, )], + # TODO: Revert to "auto" when CPU backend can use torch > 2.6 + dtype="bfloat16" if current_platform.is_cpu() else "auto", marks=[pytest.mark.core_model, pytest.mark.cpu_model], ), "paligemma": VLMTestInfo( diff --git a/tests/models/multimodal/generation/vlm_utils/builders.py b/tests/models/multimodal/generation/vlm_utils/builders.py index 7d20dd66089bb..03c08240d6a81 100644 --- a/tests/models/multimodal/generation/vlm_utils/builders.py +++ b/tests/models/multimodal/generation/vlm_utils/builders.py @@ -203,6 +203,9 @@ def build_embedding_inputs_from_test_info( images = [asset.pil_image for asset in image_assets] embeds = test_info.convert_assets_to_embeddings(image_assets) + if test_info.dtype != "auto": + dtype = getattr(torch, test_info.dtype) # type: ignore + embeds = [e.to(dtype=dtype) for e in embeds] assert len(images) == len(model_prompts) inputs = build_single_image_inputs(images, model_prompts, size_wrapper) diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index c1bae033c2b4b..133881fd04990 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -54,6 +54,8 @@ else: if is_rocm_aiter_moe_enabled(): from vllm.model_executor.layers.fused_moe.rocm_aiter_fused_moe import ( # noqa: E501 rocm_aiter_grouped_topk as grouped_topk) +elif current_platform.is_cpu(): + pass else: from vllm.model_executor.layers.fused_moe.fused_moe import grouped_topk if current_platform.is_tpu(): diff --git a/vllm/model_executor/layers/quantization/ipex_quant.py b/vllm/model_executor/layers/quantization/ipex_quant.py index 31ad96eccaf3e..428e9b882bca7 100644 --- a/vllm/model_executor/layers/quantization/ipex_quant.py +++ b/vllm/model_executor/layers/quantization/ipex_quant.py @@ -15,7 +15,7 @@ from vllm.model_executor.layers.quantization.base_config import ( from vllm.model_executor.layers.quantization.gptq import GPTQLinearMethod from vllm.platforms import current_platform -MIN_IPEX_VERSION = "2.7.0" +MIN_IPEX_VERSION = "2.6.0" class IPEXConfig(QuantizationConfig): From 167aca45cbbfd8c56d700dfc9a6a5a3482a5bd74 Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Thu, 26 Jun 2025 18:35:16 +0800 Subject: [PATCH 149/211] [Misc] Use collapsible blocks for benchmark examples. (#20017) Signed-off-by: reidliu41 Co-authored-by: reidliu41 --- benchmarks/README.md | 94 ++++++++++++++++++++++++++++---------------- 1 file changed, 60 insertions(+), 34 deletions(-) diff --git a/benchmarks/README.md b/benchmarks/README.md index 2714b8b49821c..fb8690d42db98 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -4,7 +4,7 @@ This README guides you through running benchmark tests with the extensive datasets supported on vLLM. It’s a living document, updated as new features and datasets become available. -## Dataset Overview +**Dataset Overview** @@ -82,7 +82,10 @@ become available. **Note**: HuggingFace dataset's `dataset-name` should be set to `hf` --- -## Example - Online Benchmark +
+🚀 Example - Online Benchmark + +
First start serving your model @@ -130,7 +133,8 @@ P99 ITL (ms): 8.39 ================================================== ``` -### Custom Dataset +**Custom Dataset** + If the dataset you want to benchmark is not supported yet in vLLM, even then you can benchmark on it using `CustomDataset`. Your data needs to be in `.jsonl` format and needs to have "prompt" field per entry, e.g., data.jsonl ``` @@ -162,7 +166,7 @@ python3 benchmarks/benchmark_serving.py --port 9001 --save-result --save-detaile You can skip applying chat template if your data already has it by using `--custom-skip-chat-template`. -### VisionArena Benchmark for Vision Language Models +**VisionArena Benchmark for Vision Language Models** ```bash # need a model with vision capability here @@ -180,7 +184,7 @@ python3 vllm/benchmarks/benchmark_serving.py \ --num-prompts 1000 ``` -### InstructCoder Benchmark with Speculative Decoding +**InstructCoder Benchmark with Speculative Decoding** ``` bash VLLM_USE_V1=1 vllm serve meta-llama/Meta-Llama-3-8B-Instruct \ @@ -197,7 +201,7 @@ python3 benchmarks/benchmark_serving.py \ --num-prompts 2048 ``` -### Other HuggingFaceDataset Examples +**Other HuggingFaceDataset Examples** ```bash vllm serve Qwen/Qwen2-VL-7B-Instruct --disable-log-requests @@ -251,7 +255,7 @@ python3 vllm/benchmarks/benchmark_serving.py \ --num-prompts 80 ``` -### Running With Sampling Parameters +**Running With Sampling Parameters** When using OpenAI-compatible backends such as `vllm`, optional sampling parameters can be specified. Example client command: @@ -269,7 +273,7 @@ python3 vllm/benchmarks/benchmark_serving.py \ --num-prompts 10 ``` -### Running With Ramp-Up Request Rate +**Running With Ramp-Up Request Rate** The benchmark tool also supports ramping up the request rate over the duration of the benchmark run. This can be useful for stress testing the @@ -284,8 +288,12 @@ The following arguments can be used to control the ramp-up: - `--ramp-up-start-rps`: The request rate at the beginning of the benchmark. - `--ramp-up-end-rps`: The request rate at the end of the benchmark. ---- -## Example - Offline Throughput Benchmark +
+ +
+📈 Example - Offline Throughput Benchmark + +
```bash python3 vllm/benchmarks/benchmark_throughput.py \ @@ -303,7 +311,7 @@ Total num prompt tokens: 5014 Total num output tokens: 1500 ``` -### VisionArena Benchmark for Vision Language Models +**VisionArena Benchmark for Vision Language Models** ``` bash python3 vllm/benchmarks/benchmark_throughput.py \ @@ -323,7 +331,7 @@ Total num prompt tokens: 14527 Total num output tokens: 1280 ``` -### InstructCoder Benchmark with Speculative Decoding +**InstructCoder Benchmark with Speculative Decoding** ``` bash VLLM_WORKER_MULTIPROC_METHOD=spawn \ @@ -347,7 +355,7 @@ Total num prompt tokens: 261136 Total num output tokens: 204800 ``` -### Other HuggingFaceDataset Examples +**Other HuggingFaceDataset Examples** **`lmms-lab/LLaVA-OneVision-Data`** @@ -386,7 +394,7 @@ python3 benchmarks/benchmark_throughput.py \ --num-prompts 10 ``` -### Benchmark with LoRA Adapters +**Benchmark with LoRA Adapters** ``` bash # download dataset @@ -403,18 +411,22 @@ python3 vllm/benchmarks/benchmark_throughput.py \ --lora-path yard1/llama-2-7b-sql-lora-test ``` ---- -## Example - Structured Output Benchmark +
+ +
+🛠️ Example - Structured Output Benchmark + +
Benchmark the performance of structured output generation (JSON, grammar, regex). -### Server Setup +**Server Setup** ```bash vllm serve NousResearch/Hermes-3-Llama-3.1-8B --disable-log-requests ``` -### JSON Schema Benchmark +**JSON Schema Benchmark** ```bash python3 benchmarks/benchmark_serving_structured_output.py \ @@ -426,7 +438,7 @@ python3 benchmarks/benchmark_serving_structured_output.py \ --num-prompts 1000 ``` -### Grammar-based Generation Benchmark +**Grammar-based Generation Benchmark** ```bash python3 benchmarks/benchmark_serving_structured_output.py \ @@ -438,7 +450,7 @@ python3 benchmarks/benchmark_serving_structured_output.py \ --num-prompts 1000 ``` -### Regex-based Generation Benchmark +**Regex-based Generation Benchmark** ```bash python3 benchmarks/benchmark_serving_structured_output.py \ @@ -449,7 +461,7 @@ python3 benchmarks/benchmark_serving_structured_output.py \ --num-prompts 1000 ``` -### Choice-based Generation Benchmark +**Choice-based Generation Benchmark** ```bash python3 benchmarks/benchmark_serving_structured_output.py \ @@ -460,7 +472,7 @@ python3 benchmarks/benchmark_serving_structured_output.py \ --num-prompts 1000 ``` -### XGrammar Benchmark Dataset +**XGrammar Benchmark Dataset** ```bash python3 benchmarks/benchmark_serving_structured_output.py \ @@ -471,12 +483,16 @@ python3 benchmarks/benchmark_serving_structured_output.py \ --num-prompts 1000 ``` ---- -## Example - Long Document QA Throughput Benchmark +
+ +
+📚 Example - Long Document QA Benchmark + +
Benchmark the performance of long document question-answering with prefix caching. -### Basic Long Document QA Test +**Basic Long Document QA Test** ```bash python3 benchmarks/benchmark_long_document_qa_throughput.py \ @@ -488,7 +504,7 @@ python3 benchmarks/benchmark_long_document_qa_throughput.py \ --repeat-count 5 ``` -### Different Repeat Modes +**Different Repeat Modes** ```bash # Random mode (default) - shuffle prompts randomly @@ -519,12 +535,16 @@ python3 benchmarks/benchmark_long_document_qa_throughput.py \ --repeat-mode interleave ``` ---- -## Example - Prefix Caching Benchmark +
+ +
+🗂️ Example - Prefix Caching Benchmark + +
Benchmark the efficiency of automatic prefix caching. -### Fixed Prompt with Prefix Caching +**Fixed Prompt with Prefix Caching** ```bash python3 benchmarks/benchmark_prefix_caching.py \ @@ -535,7 +555,7 @@ python3 benchmarks/benchmark_prefix_caching.py \ --input-length-range 128:256 ``` -### ShareGPT Dataset with Prefix Caching +**ShareGPT Dataset with Prefix Caching** ```bash # download dataset @@ -550,12 +570,16 @@ python3 benchmarks/benchmark_prefix_caching.py \ --input-length-range 128:256 ``` ---- -## Example - Request Prioritization Benchmark +
+ +
+⚡ Example - Request Prioritization Benchmark + +
Benchmark the performance of request prioritization in vLLM. -### Basic Prioritization Test +**Basic Prioritization Test** ```bash python3 benchmarks/benchmark_prioritization.py \ @@ -566,7 +590,7 @@ python3 benchmarks/benchmark_prioritization.py \ --scheduling-policy priority ``` -### Multiple Sequences per Prompt +**Multiple Sequences per Prompt** ```bash python3 benchmarks/benchmark_prioritization.py \ @@ -577,3 +601,5 @@ python3 benchmarks/benchmark_prioritization.py \ --scheduling-policy priority \ --n 2 ``` + +
From 84c260caeb88d25840ec0653c0b978a46eae6a84 Mon Sep 17 00:00:00 2001 From: Michael Yao Date: Thu, 26 Jun 2025 18:41:51 +0800 Subject: [PATCH 150/211] [Docs] Improve frameworks/helm.md (#20113) Signed-off-by: windsonsea --- docs/deployment/frameworks/helm.md | 120 +++++++++++++++-------------- 1 file changed, 64 insertions(+), 56 deletions(-) diff --git a/docs/deployment/frameworks/helm.md b/docs/deployment/frameworks/helm.md index cff8af2c09d29..d929665e8a3df 100644 --- a/docs/deployment/frameworks/helm.md +++ b/docs/deployment/frameworks/helm.md @@ -5,9 +5,9 @@ title: Helm A Helm chart to deploy vLLM for Kubernetes -Helm is a package manager for Kubernetes. It will help you to deploy vLLM on k8s and automate the deployment of vLLM Kubernetes applications. With Helm, you can deploy the same framework architecture with different configurations to multiple namespaces by overriding variable values. +Helm is a package manager for Kubernetes. It helps automate the deployment of vLLM applications on Kubernetes. With Helm, you can deploy the same framework architecture with different configurations to multiple namespaces by overriding variable values. -This guide will walk you through the process of deploying vLLM with Helm, including the necessary prerequisites, steps for helm installation and documentation on architecture and values file. +This guide will walk you through the process of deploying vLLM with Helm, including the necessary prerequisites, steps for Helm installation and documentation on architecture and values file. ## Prerequisites @@ -16,17 +16,23 @@ Before you begin, ensure that you have the following: - A running Kubernetes cluster - NVIDIA Kubernetes Device Plugin (`k8s-device-plugin`): This can be found at [https://github.com/NVIDIA/k8s-device-plugin](https://github.com/NVIDIA/k8s-device-plugin) - Available GPU resources in your cluster -- S3 with the model which will be deployed +- An S3 with the model which will be deployed ## Installing the chart To install the chart with the release name `test-vllm`: ```bash -helm upgrade --install --create-namespace --namespace=ns-vllm test-vllm . -f values.yaml --set secrets.s3endpoint=$ACCESS_POINT --set secrets.s3bucketname=$BUCKET --set secrets.s3accesskeyid=$ACCESS_KEY --set secrets.s3accesskey=$SECRET_KEY +helm upgrade --install --create-namespace \ + --namespace=ns-vllm test-vllm . \ + -f values.yaml \ + --set secrets.s3endpoint=$ACCESS_POINT \ + --set secrets.s3bucketname=$BUCKET \ + --set secrets.s3accesskeyid=$ACCESS_KEY \ + --set secrets.s3accesskey=$SECRET_KEY ``` -## Uninstalling the Chart +## Uninstalling the chart To uninstall the `test-vllm` deployment: @@ -39,57 +45,59 @@ chart **including persistent volumes** and deletes the release. ## Architecture -![](../../assets/deployment/architecture_helm_deployment.png) +![helm deployment architecture](../../assets/deployment/architecture_helm_deployment.png) ## Values -| Key | Type | Default | Description | -|--------------------------------------------|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------| -| autoscaling | object | {"enabled":false,"maxReplicas":100,"minReplicas":1,"targetCPUUtilizationPercentage":80} | Autoscaling configuration | -| autoscaling.enabled | bool | false | Enable autoscaling | -| autoscaling.maxReplicas | int | 100 | Maximum replicas | -| autoscaling.minReplicas | int | 1 | Minimum replicas | -| autoscaling.targetCPUUtilizationPercentage | int | 80 | Target CPU utilization for autoscaling | -| configs | object | {} | Configmap | -| containerPort | int | 8000 | Container port | -| customObjects | list | [] | Custom Objects configuration | -| deploymentStrategy | object | {} | Deployment strategy configuration | -| externalConfigs | list | [] | External configuration | -| extraContainers | list | [] | Additional containers configuration | -| extraInit | object | {"pvcStorage":"1Gi","s3modelpath":"relative_s3_model_path/opt-125m", "awsEc2MetadataDisabled": true} | Additional configuration for the init container | -| extraInit.pvcStorage | string | "50Gi" | Storage size of the s3 | -| extraInit.s3modelpath | string | "relative_s3_model_path/opt-125m" | Path of the model on the s3 which hosts model weights and config files | -| extraInit.awsEc2MetadataDisabled | boolean | true | Disables the use of the Amazon EC2 instance metadata service | -| extraPorts | list | [] | Additional ports configuration | -| gpuModels | list | ["TYPE_GPU_USED"] | Type of gpu used | -| image | object | {"command":["vllm","serve","/data/","--served-model-name","opt-125m","--host","0.0.0.0","--port","8000"],"repository":"vllm/vllm-openai","tag":"latest"} | Image configuration | -| image.command | list | ["vllm","serve","/data/","--served-model-name","opt-125m","--host","0.0.0.0","--port","8000"] | Container launch command | -| image.repository | string | "vllm/vllm-openai" | Image repository | -| image.tag | string | "latest" | Image tag | -| livenessProbe | object | {"failureThreshold":3,"httpGet":{"path":"/health","port":8000},"initialDelaySeconds":15,"periodSeconds":10} | Liveness probe configuration | -| livenessProbe.failureThreshold | int | 3 | Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not alive | -| livenessProbe.httpGet | object | {"path":"/health","port":8000} | Configuration of the Kubelet http request on the server | -| livenessProbe.httpGet.path | string | "/health" | Path to access on the HTTP server | -| livenessProbe.httpGet.port | int | 8000 | Name or number of the port to access on the container, on which the server is listening | -| livenessProbe.initialDelaySeconds | int | 15 | Number of seconds after the container has started before liveness probe is initiated | -| livenessProbe.periodSeconds | int | 10 | How often (in seconds) to perform the liveness probe | -| maxUnavailablePodDisruptionBudget | string | "" | Disruption Budget Configuration | -| readinessProbe | object | {"failureThreshold":3,"httpGet":{"path":"/health","port":8000},"initialDelaySeconds":5,"periodSeconds":5} | Readiness probe configuration | -| readinessProbe.failureThreshold | int | 3 | Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not ready | -| readinessProbe.httpGet | object | {"path":"/health","port":8000} | Configuration of the Kubelet http request on the server | -| readinessProbe.httpGet.path | string | "/health" | Path to access on the HTTP server | -| readinessProbe.httpGet.port | int | 8000 | Name or number of the port to access on the container, on which the server is listening | -| readinessProbe.initialDelaySeconds | int | 5 | Number of seconds after the container has started before readiness probe is initiated | -| readinessProbe.periodSeconds | int | 5 | How often (in seconds) to perform the readiness probe | -| replicaCount | int | 1 | Number of replicas | -| resources | object | {"limits":{"cpu":4,"memory":"16Gi","nvidia.com/gpu":1},"requests":{"cpu":4,"memory":"16Gi","nvidia.com/gpu":1}} | Resource configuration | -| resources.limits."nvidia.com/gpu" | int | 1 | Number of gpus used | -| resources.limits.cpu | int | 4 | Number of CPUs | -| resources.limits.memory | string | "16Gi" | CPU memory configuration | -| resources.requests."nvidia.com/gpu" | int | 1 | Number of gpus used | -| resources.requests.cpu | int | 4 | Number of CPUs | -| resources.requests.memory | string | "16Gi" | CPU memory configuration | -| secrets | object | {} | Secrets configuration | -| serviceName | string | Service name | | -| servicePort | int | 80 | Service port | -| labels.environment | string | test | Environment name | +The following table describes configurable parameters of the chart in `values.yaml`: + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| autoscaling | object | {"enabled":false,"maxReplicas":100,"minReplicas":1,"targetCPUUtilizationPercentage":80} | Autoscaling configuration | +| autoscaling.enabled | bool | false | Enable autoscaling | +| autoscaling.maxReplicas | int | 100 | Maximum replicas | +| autoscaling.minReplicas | int | 1 | Minimum replicas | +| autoscaling.targetCPUUtilizationPercentage | int | 80 | Target CPU utilization for autoscaling | +| configs | object | {} | Configmap | +| containerPort | int | 8000 | Container port | +| customObjects | list | [] | Custom Objects configuration | +| deploymentStrategy | object | {} | Deployment strategy configuration | +| externalConfigs | list | [] | External configuration | +| extraContainers | list | [] | Additional containers configuration | +| extraInit | object | {"pvcStorage":"1Gi","s3modelpath":"relative_s3_model_path/opt-125m", "awsEc2MetadataDisabled": true} | Additional configuration for the init container | +| extraInit.pvcStorage | string | "1Gi" | Storage size of the s3 | +| extraInit.s3modelpath | string | "relative_s3_model_path/opt-125m" | Path of the model on the s3 which hosts model weights and config files | +| extraInit.awsEc2MetadataDisabled | boolean | true | Disables the use of the Amazon EC2 instance metadata service | +| extraPorts | list | [] | Additional ports configuration | +| gpuModels | list | ["TYPE_GPU_USED"] | Type of gpu used | +| image | object | {"command":["vllm","serve","/data/","--served-model-name","opt-125m","--host","0.0.0.0","--port","8000"],"repository":"vllm/vllm-openai","tag":"latest"} | Image configuration | +| image.command | list | ["vllm","serve","/data/","--served-model-name","opt-125m","--host","0.0.0.0","--port","8000"] | Container launch command | +| image.repository | string | "vllm/vllm-openai" | Image repository | +| image.tag | string | "latest" | Image tag | +| livenessProbe | object | {"failureThreshold":3,"httpGet":{"path":"/health","port":8000},"initialDelaySeconds":15,"periodSeconds":10} | Liveness probe configuration | +| livenessProbe.failureThreshold | int | 3 | Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not alive | +| livenessProbe.httpGet | object | {"path":"/health","port":8000} | Configuration of the kubelet http request on the server | +| livenessProbe.httpGet.path | string | "/health" | Path to access on the HTTP server | +| livenessProbe.httpGet.port | int | 8000 | Name or number of the port to access on the container, on which the server is listening | +| livenessProbe.initialDelaySeconds | int | 15 | Number of seconds after the container has started before liveness probe is initiated | +| livenessProbe.periodSeconds | int | 10 | How often (in seconds) to perform the liveness probe | +| maxUnavailablePodDisruptionBudget | string | "" | Disruption Budget Configuration | +| readinessProbe | object | {"failureThreshold":3,"httpGet":{"path":"/health","port":8000},"initialDelaySeconds":5,"periodSeconds":5} | Readiness probe configuration | +| readinessProbe.failureThreshold | int | 3 | Number of times after which if a probe fails in a row, Kubernetes considers that the overall check has failed: the container is not ready | +| readinessProbe.httpGet | object | {"path":"/health","port":8000} | Configuration of the kubelet http request on the server | +| readinessProbe.httpGet.path | string | "/health" | Path to access on the HTTP server | +| readinessProbe.httpGet.port | int | 8000 | Name or number of the port to access on the container, on which the server is listening | +| readinessProbe.initialDelaySeconds | int | 5 | Number of seconds after the container has started before readiness probe is initiated | +| readinessProbe.periodSeconds | int | 5 | How often (in seconds) to perform the readiness probe | +| replicaCount | int | 1 | Number of replicas | +| resources | object | {"limits":{"cpu":4,"memory":"16Gi","nvidia.com/gpu":1},"requests":{"cpu":4,"memory":"16Gi","nvidia.com/gpu":1}} | Resource configuration | +| resources.limits."nvidia.com/gpu" | int | 1 | Number of GPUs used | +| resources.limits.cpu | int | 4 | Number of CPUs | +| resources.limits.memory | string | "16Gi" | CPU memory configuration | +| resources.requests."nvidia.com/gpu" | int | 1 | Number of GPUs used | +| resources.requests.cpu | int | 4 | Number of CPUs | +| resources.requests.memory | string | "16Gi" | CPU memory configuration | +| secrets | object | {} | Secrets configuration | +| serviceName | string | "" | Service name | +| servicePort | int | 80 | Service port | +| labels.environment | string | test | Environment name | From 27c065df50407f6b801d0053378c442ccea37d39 Mon Sep 17 00:00:00 2001 From: TJian Date: Thu, 26 Jun 2025 05:42:31 -0700 Subject: [PATCH 151/211] [Bugfix][V1][ROCm] Fix AITER Flash Attention Backend (Fix API Break and Local Attention Logic: affecting Llama4) (#19904) Signed-off-by: tjtanaa --- vllm/attention/layer.py | 14 ++++-- vllm/v1/attention/backends/rocm_aiter_fa.py | 55 ++++++++++++++------- 2 files changed, 46 insertions(+), 23 deletions(-) diff --git a/vllm/attention/layer.py b/vllm/attention/layer.py index f7d230c5d7d6f..0c79aaf135518 100644 --- a/vllm/attention/layer.py +++ b/vllm/attention/layer.py @@ -306,12 +306,16 @@ class MultiHeadAttention(nn.Module): block_size=16, is_attention_free=False) backend = backend_name_to_enum(attn_backend.get_name()) - if backend in {_Backend.FLASH_ATTN, _Backend.FLASH_ATTN_VLLM_V1}: - backend = _Backend.XFORMERS + if current_platform.is_rocm(): + # currently, only torch_sdpa is supported on rocm + self.attn_backend = _Backend.TORCH_SDPA + else: + if backend in {_Backend.FLASH_ATTN, _Backend.FLASH_ATTN_VLLM_V1}: + backend = _Backend.XFORMERS - self.attn_backend = backend if backend in { - _Backend.TORCH_SDPA, _Backend.XFORMERS, _Backend.PALLAS_VLLM_V1 - } else _Backend.TORCH_SDPA + self.attn_backend = backend if backend in { + _Backend.TORCH_SDPA, _Backend.XFORMERS, _Backend.PALLAS_VLLM_V1 + } else _Backend.TORCH_SDPA def forward( self, diff --git a/vllm/v1/attention/backends/rocm_aiter_fa.py b/vllm/v1/attention/backends/rocm_aiter_fa.py index e011e95efd41b..dc8ff22613061 100644 --- a/vllm/v1/attention/backends/rocm_aiter_fa.py +++ b/vllm/v1/attention/backends/rocm_aiter_fa.py @@ -243,8 +243,8 @@ class AiterFlashAttentionMetadataBuilder: self.runner.device, non_blocking=True) local_seqused_k = torch.from_numpy(virt_k_seqlens_np).to( self.runner.device, non_blocking=True) - local_max_query_len = seqlens_q_local_np.max() - local_max_seq_len = virt_k_seqlens_np.max() + local_max_query_len = int(seqlens_q_local_np.max()) + local_max_seq_len = int(virt_k_seqlens_np.max()) local_scheduler_metadata = schedule( batch_size=local_query_start_loc.shape[0] - 1, cu_query_lens=local_query_start_loc, @@ -253,6 +253,17 @@ class AiterFlashAttentionMetadataBuilder: max_seq_len=local_max_seq_len, causal=True) + local_cu_seq_lens = torch.zeros(virt_k_seqlens_np.shape[0] + 1, + dtype=torch.int32, + device=self.runner.device) + local_cu_seq_lens[1:] = torch.cumsum( + torch.from_numpy(virt_k_seqlens_np).to( + device=self.runner.device, + dtype=torch.int32, + non_blocking=True), + dim=0) + + local_attn_metadata = \ AiterFlashAttentionMetadata.LocalAttentionMetadata( local_query_start_loc=local_query_start_loc, @@ -260,6 +271,7 @@ class AiterFlashAttentionMetadataBuilder: local_block_table=virt_block_table_tensor, local_max_query_len=local_max_query_len, local_max_seq_len=local_max_seq_len, + local_cu_seq_lens=local_cu_seq_lens, local_scheduler_metadata=local_scheduler_metadata, ) @@ -368,6 +380,7 @@ class AiterFlashAttentionMetadata: local_block_table: torch.Tensor local_max_query_len: int local_max_seq_len: int + local_cu_seq_lens: torch.Tensor local_scheduler_metadata: Optional[torch.Tensor] local_attn_metadata: Optional[LocalAttentionMetadata] = None @@ -387,6 +400,7 @@ class AiterFlashAttentionImpl(AttentionImpl): blocksparse_params: Optional[dict[str, Any]] = None, logits_soft_cap: Optional[float] = None, attn_type: AttentionType = AttentionType.DECODER, + kv_sharing_target_layer_name: Optional[int] = None, use_irope: bool = False, ) -> None: if blocksparse_params is not None: @@ -408,6 +422,7 @@ class AiterFlashAttentionImpl(AttentionImpl): # In flash-attn, setting logits_soft_cap as 0 means no soft cap. logits_soft_cap = 0. self.logits_soft_cap = logits_soft_cap + self.kv_sharing_target_layer_name = kv_sharing_target_layer_name assert self.num_heads % self.num_kv_heads == 0 self.num_queries_per_kv = self.num_heads // self.num_kv_heads @@ -478,22 +493,25 @@ class AiterFlashAttentionImpl(AttentionImpl): # performance to make sure it does not introduce any overhead. num_actual_tokens = attn_metadata.num_actual_tokens - # Reshape the input keys and values and store them in the cache. - # NOTE(woosuk): Here, key and value are padded while slot_mapping is - # not padded. However, we don't need to do key[:num_actual_tokens] and - # value[:num_actual_tokens] because the reshape_and_cache_flash op uses - # the slot_mapping's shape to determine the number of actual tokens. key_cache, value_cache = kv_cache.unbind(0) - torch.ops._C_cache_ops.reshape_and_cache_flash( - key, - value, - key_cache, - value_cache, - attn_metadata.slot_mapping, - self.kv_cache_dtype, - layer._k_scale, - layer._v_scale, - ) + if self.kv_sharing_target_layer_name is None: + # Reshape the input keys and values and store them in the cache. + # Skip this if sharing KV cache with an earlier attention layer. + # NOTE(woosuk): Here, key and value are padded while slot_mapping is + # not padded. However, we don't need to do key[:num_actual_tokens] + # and value[:num_actual_tokens] because the reshape_and_cache_flash + # op uses the slot_mapping's shape to determine the number of + # actual tokens. + torch.ops._C_cache_ops.reshape_and_cache_flash( + key, + value, + key_cache, + value_cache, + attn_metadata.slot_mapping, + self.kv_cache_dtype, + layer._k_scale, + layer._v_scale, + ) if self.kv_cache_dtype.startswith("fp8"): key_cache = key_cache.view(torch.float8_e4m3fnuz) @@ -541,7 +559,8 @@ class AiterFlashAttentionImpl(AttentionImpl): alibi_slopes=self.alibi_slopes, window_size=self.sliding_window, block_table=block_table, - cu_seqlens_k=cu_seq_lens, + cu_seqlens_k=(cu_seq_lens if not use_local_attn else + local_metadata.local_cu_seq_lens), ) _, num_heads, head_size = query.shape From 1f5d178e9cc02a49e9d734420b0c0afaff2fd7af Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Thu, 26 Jun 2025 23:32:22 +0900 Subject: [PATCH 152/211] Revert "[Bugfix] default set cuda_graph_sizes to max_num_seqs for v1 engine" (#20128) --- vllm/config.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index e90ad5e9c8b65..96ea47a0dce38 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -2042,12 +2042,11 @@ class SchedulerConfig: NOTE: This will be replaced by speculative config in the future; it is present to enable correctness tests until then.""" - cuda_graph_sizes: list[int] = field(default_factory=list) - """Cuda graph capture sizes - 1. if none provided, then default set to [max_num_seqs] - 2. if one value is provided, then the capture list would follow the + cuda_graph_sizes: list[int] = field(default_factory=lambda: [512]) + """Cuda graph capture sizes, default is 512. + 1. if one value is provided, then the capture list would follow the pattern: [1, 2, 4] + [i for i in range(8, cuda_graph_sizes + 1, 8)] - 3. more than one value (e.g. 1 2 128) is provided, then the capture list + 2. more than one value (e.g. 1 2 128) is provided, then the capture list will follow the provided list.""" delay_factor: float = 0.0 @@ -2212,10 +2211,6 @@ class SchedulerConfig: self.max_num_partial_prefills, self.max_long_partial_prefills, self.long_prefill_token_threshold) - # If cuda_graph_sizes is not specified, default set to [max_num_seqs]. - if not self.cuda_graph_sizes: - self.cuda_graph_sizes = [self.max_num_seqs] - @model_validator(mode='after') def _verify_args(self) -> Self: if (self.max_num_batched_tokens < self.max_model_len From c894c5dc1ffadee8979f3a051bfccea0441ae09a Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Thu, 26 Jun 2025 10:33:13 -0400 Subject: [PATCH 153/211] [Bug Fix] Fix address/port already in use error for deep_ep test (#20094) Signed-off-by: yewentao256 --- tests/kernels/moe/deepep_utils.py | 5 ++++- vllm/model_executor/layers/fused_moe/utils.py | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/tests/kernels/moe/deepep_utils.py b/tests/kernels/moe/deepep_utils.py index 117f1babdf62a..e4cd8386e1020 100644 --- a/tests/kernels/moe/deepep_utils.py +++ b/tests/kernels/moe/deepep_utils.py @@ -4,6 +4,7 @@ DeepEP test utilities """ import dataclasses import importlib +import os import traceback from typing import Callable, Optional @@ -13,6 +14,8 @@ from torch.multiprocessing import ( spawn) # pyright: ignore[reportPrivateImportUsage] from typing_extensions import Concatenate, ParamSpec +from vllm.model_executor.layers.fused_moe.utils import find_free_port + has_deep_ep = importlib.util.find_spec("deep_ep") is not None if has_deep_ep: from vllm.model_executor.layers.fused_moe.deepep_ht_prepare_finalize import ( # noqa: E501 @@ -92,7 +95,7 @@ def parallel_launch( world_size, world_size, 0, - "tcp://localhost:29500", + f"tcp://{os.getenv('LOCALHOST', 'localhost')}:{find_free_port()}", worker, ) + args, nprocs=world_size, diff --git a/vllm/model_executor/layers/fused_moe/utils.py b/vllm/model_executor/layers/fused_moe/utils.py index 692482c2ea692..8f3191db680fd 100644 --- a/vllm/model_executor/layers/fused_moe/utils.py +++ b/vllm/model_executor/layers/fused_moe/utils.py @@ -1,5 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import socket +from contextlib import closing from math import prod from typing import Optional @@ -96,3 +98,10 @@ def _fp8_perm(m: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: return m.view(dtype=torch.uint8)[idx, ...].view(dtype=m.dtype) else: return m[idx, ...] + + +def find_free_port(): + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: + s.bind(('', 0)) + s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + return s.getsockname()[1] \ No newline at end of file From 0907d507bf389b908a267155de4162d725ae1c54 Mon Sep 17 00:00:00 2001 From: "wang.yuqi" Date: Thu, 26 Jun 2025 22:34:17 +0800 Subject: [PATCH 154/211] [Doc] Automatically signed-off by PyCharm (#20120) Signed-off-by: wang.yuqi --- docs/contributing/README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/contributing/README.md b/docs/contributing/README.md index c0c338b426951..d472366c43b5a 100644 --- a/docs/contributing/README.md +++ b/docs/contributing/README.md @@ -151,6 +151,11 @@ the terms of the DCO. Using `-s` with `git commit` will automatically add this header. +!!! tip + If you develop using PyCharm, there is a `Show Commit Options` icon to the right of the `Commit and Push...` button in the `Commit` window. + Opening it will bring up a `git` window where you can modify the `Author` and enable `Sign-off commit`. + This ensures that all your commits are automatically signed-off by PyCharm. + ### PR Title and Classification Only specific types of PRs will be reviewed. The PR title is prefixed From 6393b039865b35c79c5c397e5dca0218d3c26622 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 26 Jun 2025 23:18:36 +0800 Subject: [PATCH 155/211] [Doc] Auto sign-off for VSCode (#20132) Signed-off-by: DarkLight1337 --- docs/contributing/README.md | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/contributing/README.md b/docs/contributing/README.md index d472366c43b5a..83525436be139 100644 --- a/docs/contributing/README.md +++ b/docs/contributing/README.md @@ -152,9 +152,12 @@ the terms of the DCO. Using `-s` with `git commit` will automatically add this header. !!! tip - If you develop using PyCharm, there is a `Show Commit Options` icon to the right of the `Commit and Push...` button in the `Commit` window. - Opening it will bring up a `git` window where you can modify the `Author` and enable `Sign-off commit`. - This ensures that all your commits are automatically signed-off by PyCharm. + You can enable automatic sign-off via your IDE: + + - **PyCharm**: Click on the `Show Commit Options` icon to the right of the `Commit and Push...` button in the `Commit` window. + It will bring up a `git` window where you can modify the `Author` and enable `Sign-off commit`. + - **VSCode**: Open the [Settings editor](https://code.visualstudio.com/docs/configure/settings) + and enable the `Git: Always Sign Off` (`git.alwaysSignOff`) field. ### PR Title and Classification From 34878a0b481bbbb65bf17923b1eae5ebbb56f896 Mon Sep 17 00:00:00 2001 From: Cyrus Leung Date: Thu, 26 Jun 2025 23:18:49 +0800 Subject: [PATCH 156/211] [Doc] Rename page titles (#20130) Signed-off-by: DarkLight1337 --- docs/contributing/incremental_build.md | 2 +- docs/contributing/model/README.md | 6 +++--- docs/contributing/model/basic.md | 2 +- docs/contributing/model/registration.md | 2 +- docs/contributing/model/tests.md | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/contributing/incremental_build.md b/docs/contributing/incremental_build.md index 8efa34825ecaf..14c3aaead51ed 100644 --- a/docs/contributing/incremental_build.md +++ b/docs/contributing/incremental_build.md @@ -1,4 +1,4 @@ -# Incremental Compilation Workflow for vLLM Development +# Incremental Compilation Workflow When working on vLLM's C++/CUDA kernels located in the `csrc/` directory, recompiling the entire project with `uv pip install -e .` for every change can be time-consuming. An incremental compilation workflow using CMake allows for faster iteration by only recompiling the necessary components after an initial setup. This guide details how to set up and use such a workflow, which complements your editable Python installation. diff --git a/docs/contributing/model/README.md b/docs/contributing/model/README.md index 82541924bc028..63abb7991050d 100644 --- a/docs/contributing/model/README.md +++ b/docs/contributing/model/README.md @@ -14,9 +14,9 @@ However, this can be more complex for models that include new operators (e.g., a Read through these pages for a step-by-step guide: -- [Implementing a Basic Model](basic.md) -- [Registering a Model to vLLM](registration.md) -- [Writing Unit Tests](tests.md) +- [Basic Model](basic.md) +- [Registering a Model](registration.md) +- [Unit Testing](tests.md) - [Multi-Modal Support](multimodal.md) !!! tip diff --git a/docs/contributing/model/basic.md b/docs/contributing/model/basic.md index 644d21482ef6f..d552cd06be204 100644 --- a/docs/contributing/model/basic.md +++ b/docs/contributing/model/basic.md @@ -1,5 +1,5 @@ --- -title: Implementing a Basic Model +title: Basic Model --- [](){ #new-model-basic } diff --git a/docs/contributing/model/registration.md b/docs/contributing/model/registration.md index a6dc1e32dfb95..758caa72cd4a0 100644 --- a/docs/contributing/model/registration.md +++ b/docs/contributing/model/registration.md @@ -1,5 +1,5 @@ --- -title: Registering a Model to vLLM +title: Registering a Model --- [](){ #new-model-registration } diff --git a/docs/contributing/model/tests.md b/docs/contributing/model/tests.md index a8cb457453b91..c7bcc02a8b809 100644 --- a/docs/contributing/model/tests.md +++ b/docs/contributing/model/tests.md @@ -1,5 +1,5 @@ --- -title: Writing Unit Tests +title: Unit Testing --- [](){ #new-model-tests } From 0bceac9810a5f51b06bf3e4cace182b639326ed2 Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Thu, 26 Jun 2025 11:19:46 -0400 Subject: [PATCH 157/211] Spam folks if config.py changes (#20131) Signed-off-by: Tyler Michael Smith --- .github/CODEOWNERS | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e98ccd035ee90..da7f89747a16d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -18,6 +18,10 @@ /vllm/entrypoints @aarnphm CMakeLists.txt @tlrmchlsmth +# Any change to the VllmConfig changes can have a large user-facing impact, +# so spam a lot of people +/vllm/config.py @simon-mo @WoosukKwon @youkaichao @robertgshaw2-redhat @mgoin @tlrmchlsmth @houseroad @hmellor + # vLLM V1 /vllm/v1 @WoosukKwon @robertgshaw2-redhat @njhill @ywang96 @comaniac @alexm-redhat /vllm/v1/structured_output @mgoin @russellb @aarnphm From b69781f107b7ad847a351f584178cfafbee2b32a Mon Sep 17 00:00:00 2001 From: Kunshang Ji Date: Fri, 27 Jun 2025 00:27:18 +0800 Subject: [PATCH 158/211] [Hardware][Intel GPU] Add v1 Intel GPU support with Flash attention backend. (#19560) Signed-off-by: Kunshang Ji --- .../scripts/hardware_ci/run-xpu-test.sh | 1 + docker/Dockerfile.xpu | 1 + requirements/xpu.txt | 1 + vllm/_ipex_ops.py | 105 +++++++++++ vllm/attention/utils/fa_utils.py | 15 +- vllm/executor/ray_distributed_executor.py | 2 +- vllm/platforms/xpu.py | 102 +++++++---- vllm/v1/attention/backends/flash_attn.py | 12 +- vllm/v1/worker/xpu_model_runner.py | 32 ++++ vllm/v1/worker/xpu_worker.py | 164 ++++++++++++++++++ 10 files changed, 393 insertions(+), 42 deletions(-) create mode 100644 vllm/v1/worker/xpu_model_runner.py create mode 100644 vllm/v1/worker/xpu_worker.py diff --git a/.buildkite/scripts/hardware_ci/run-xpu-test.sh b/.buildkite/scripts/hardware_ci/run-xpu-test.sh index f54010c4231f9..827649bfcf548 100644 --- a/.buildkite/scripts/hardware_ci/run-xpu-test.sh +++ b/.buildkite/scripts/hardware_ci/run-xpu-test.sh @@ -28,4 +28,5 @@ docker run \ sh -c ' VLLM_USE_V1=0 python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m VLLM_USE_V1=0 python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m -tp 2 + VLLM_USE_V1=1 python3 examples/offline_inference/basic/generate.py --model facebook/opt-125m --block-size 64 --enforce-eager ' diff --git a/docker/Dockerfile.xpu b/docker/Dockerfile.xpu index 681102b9d18be..466ba98333635 100644 --- a/docker/Dockerfile.xpu +++ b/docker/Dockerfile.xpu @@ -35,6 +35,7 @@ RUN --mount=type=bind,source=.git,target=.git \ if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh; fi ENV VLLM_TARGET_DEVICE=xpu +ENV VLLM_WORKER_MULTIPROC_METHOD=spawn RUN --mount=type=cache,target=/root/.cache/pip \ --mount=type=bind,source=.git,target=.git \ diff --git a/requirements/xpu.txt b/requirements/xpu.txt index 3cb6a4a8addac..0d95dc57152de 100644 --- a/requirements/xpu.txt +++ b/requirements/xpu.txt @@ -9,6 +9,7 @@ setuptools>=77.0.3,<80.0.0 wheel jinja2>=3.1.6 datasets # for benchmark scripts +numba == 0.60.0 # v0.61 doesn't support Python 3.9. Required for N-gram speculative decoding torch==2.7.0+xpu torchaudio diff --git a/vllm/_ipex_ops.py b/vllm/_ipex_ops.py index ae63e06030dd1..2be02411ec05e 100644 --- a/vllm/_ipex_ops.py +++ b/vllm/_ipex_ops.py @@ -228,6 +228,111 @@ class ipex_ops: ipex.llm.modules.PagedAttention.reshape_and_cache( key, value, key_cache, value_cache, slot_mapping) + @staticmethod + def reshape_and_cache_flash( + key: torch.Tensor, + value: torch.Tensor, + key_cache: torch.Tensor, + value_cache: torch.Tensor, + slot_mapping: torch.Tensor, + kv_cache_dtype: str, + k_scale: Optional[torch.Tensor] = None, + v_scale: Optional[torch.Tensor] = None, + k_scale_float: float = 1.0, + v_scale_float: float = 1.0, + ) -> None: + assert kv_cache_dtype == "auto" + # TODO: support FP8 kv cache. + ipex.llm.modules.PagedAttention.reshape_and_cache_flash( + key, value, key_cache, value_cache, slot_mapping) + + @staticmethod + def flash_attn_varlen_func( + out: torch.Tensor, + q: torch.Tensor, + k: torch.Tensor, + v: torch.Tensor, + cu_seqlens_q: torch.Tensor, + seqused_k: torch.Tensor, # we don't support this in ipex kernel + max_seqlen_q: int, + max_seqlen_k: int, + softmax_scale: float, + causal: bool, + block_table: torch.Tensor, + alibi_slopes: Optional[torch.Tensor], + window_size: Optional[list[int]] = None, + softcap: Optional[float] = 0.0, + cu_seqlens_k: Optional[torch.Tensor] = None, + # The following parameters are not used in ipex kernel currently, + # we keep API compatible to CUDA's. + scheduler_metadata=None, + fa_version: int = 2, + q_descale=None, + k_descale=None, + v_descale=None, + ): + if cu_seqlens_k is None: + # cu_seqlens_k is not used in ipex kernel. + cu_seqlens_k = torch.cumsum(seqused_k, dim=0) + cu_seqlens_k = torch.cat([ + torch.tensor([0], device=seqused_k.device, dtype=torch.int32), + cu_seqlens_k + ]).to(torch.int32) + + real_window_size: tuple[int, int] + if window_size is None: + real_window_size = (-1, -1) + else: + assert len(window_size) == 2 + real_window_size = (window_size[0], window_size[1]) + return ipex.llm.modules.PagedAttention.flash_attn_varlen_func( + out, + q.contiguous(), + k, + v, + cu_seqlens_q, + cu_seqlens_k, + max_seqlen_q, + max_seqlen_k, + softmax_scale, + causal, + block_table, + alibi_slopes, + softcap=softcap, + window_size_left=real_window_size[0], + window_size_right=real_window_size[1], + k_scale=1.0, + v_scale=1.0, + ) + + @staticmethod + def get_scheduler_metadata( + batch_size, + max_seqlen_q, + max_seqlen_k, + num_heads_q, + num_heads_kv, + headdim, + cache_seqlens: torch.Tensor, + qkv_dtype=torch.bfloat16, + headdim_v=None, + cu_seqlens_q: Optional[torch.Tensor] = None, + cu_seqlens_k_new: Optional[torch.Tensor] = None, + cache_leftpad: Optional[torch.Tensor] = None, + page_size: Optional[int] = None, + max_seqlen_k_new=0, + causal=False, + window_size=(-1, -1), # -1 means infinite context window + has_softcap=False, + num_splits=0, # Can be tuned for speed + pack_gqa=None, # Can be tuned for speed + sm_margin=0, # Can be tuned if some SMs are used for communication + ) -> None: + logger.warning_once( + "get_scheduler_metadata is not implemented for ipex_ops, " + "returning None.") + return None + @staticmethod def copy_blocks(key_caches: list[torch.Tensor], value_caches: list[torch.Tensor], diff --git a/vllm/attention/utils/fa_utils.py b/vllm/attention/utils/fa_utils.py index 69cde06fd72e9..36fd2d231bc5f 100644 --- a/vllm/attention/utils/fa_utils.py +++ b/vllm/attention/utils/fa_utils.py @@ -4,13 +4,27 @@ from typing import Optional from vllm import envs from vllm.logger import init_logger +from vllm.platforms import current_platform logger = init_logger(__name__) +if current_platform.is_cuda(): + from vllm import _custom_ops as ops + reshape_and_cache_flash = ops.reshape_and_cache_flash + from vllm.vllm_flash_attn import (flash_attn_varlen_func, + get_scheduler_metadata) +elif current_platform.is_xpu(): + from vllm._ipex_ops import ipex_ops as ops + reshape_and_cache_flash = ops.reshape_and_cache_flash + flash_attn_varlen_func = ops.flash_attn_varlen_func + get_scheduler_metadata = ops.get_scheduler_metadata + def get_flash_attn_version(requires_alibi: bool = False) -> Optional[int]: # import here to avoid circular dependencies from vllm.platforms import current_platform + if current_platform.is_xpu(): + return 2 try: from vllm.vllm_flash_attn.flash_attn_interface import ( fa_version_unsupported_reason, is_fa_version_supported) @@ -50,6 +64,5 @@ def get_flash_attn_version(requires_alibi: bool = False) -> Optional[int]: def flash_attn_supports_fp8() -> bool: - from vllm.platforms import current_platform return get_flash_attn_version() == 3 and \ current_platform.get_device_capability().major == 9 diff --git a/vllm/executor/ray_distributed_executor.py b/vllm/executor/ray_distributed_executor.py index a3f05ec5ea3f2..84e8ddd8e274d 100644 --- a/vllm/executor/ray_distributed_executor.py +++ b/vllm/executor/ray_distributed_executor.py @@ -73,7 +73,7 @@ class RayDistributedExecutor(DistributedExecutorBase): def _init_executor(self) -> None: self.forward_dag: Optional[ray.dag.CompiledDAG] = None - if envs.VLLM_USE_V1: + if envs.VLLM_USE_V1 and not current_platform.is_xpu(): # V1 uses SPMD worker and compiled DAG os.environ["VLLM_USE_RAY_SPMD_WORKER"] = "1" os.environ["VLLM_USE_RAY_COMPILED_DAG"] = "1" diff --git a/vllm/platforms/xpu.py b/vllm/platforms/xpu.py index 73f6f3d417671..f361f5e2616ef 100644 --- a/vllm/platforms/xpu.py +++ b/vllm/platforms/xpu.py @@ -1,18 +1,21 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import os from typing import TYPE_CHECKING, Optional import torch +import vllm.envs as envs from vllm.logger import init_logger from vllm.utils import DEFAULT_MAX_NUM_BATCHED_TOKENS from .interface import DeviceCapability, Platform, PlatformEnum, _Backend if TYPE_CHECKING: - from vllm.config import VllmConfig + from vllm.config import ModelConfig, VllmConfig else: + ModelConfig = None VllmConfig = None logger = init_logger(__name__) @@ -35,8 +38,13 @@ class XPUPlatform(Platform): use_mla: bool) -> str: if selected_backend != _Backend.IPEX: logger.info("Cannot use %s backend on XPU.", selected_backend) - logger.info("Using IPEX attention backend.") - return "vllm.attention.backends.ipex_attn.IpexAttnBackend" + use_v1 = envs.VLLM_USE_V1 + if use_v1: + logger.info("Using Flash Attention backend on V1 engine.") + return "vllm.v1.attention.backends.flash_attn.FlashAttentionBackend" + else: + logger.info("Using IPEX attention backend.") + return "vllm.attention.backends.ipex_attn.IpexAttnBackend" @classmethod def get_device_capability( @@ -67,25 +75,27 @@ class XPUPlatform(Platform): @classmethod def check_and_update_config(cls, vllm_config: VllmConfig) -> None: cache_config = vllm_config.cache_config + # in V1(or with ipex chunked prefill) block_size is 64 if cache_config and cache_config.block_size is None: - cache_config.block_size = 16 + if envs.VLLM_USE_V1: + cache_config.block_size = 64 + else: + cache_config.block_size = 16 - # check and update model config - model_config = vllm_config.model_config - if model_config.dtype == torch.bfloat16: - bf16_supported = cls.device_support_bf16() - if not bf16_supported: + # Instances created using VllmConfig() typically have model_config as + # None by default. The modification involves adding a check to prevent + # potential null exceptions check and update model config. + if vllm_config.model_config is not None: + model_config = vllm_config.model_config + if model_config.dtype == torch.bfloat16: + bf16_supported = cls.device_support_bf16() + if not bf16_supported: + model_config.dtype = torch.float16 + if not model_config.enforce_eager: logger.warning( - "bfloat16 is only supported on Intel Data Center GPU, " - "Intel Arc GPU is not supported yet. Your device is %s," - " which is not supported. will fallback to float16", - cls.get_device_name()) - model_config.dtype = torch.float16 - if not model_config.enforce_eager: - logger.warning( - "CUDA graph is not supported on XPU, fallback to the eager " - "mode.") - model_config.enforce_eager = True + "CUDA graph is not supported on XPU, fallback to the eager " + "mode.") + model_config.enforce_eager = True if vllm_config.speculative_config is not None: raise NotImplementedError( @@ -96,21 +106,27 @@ class XPUPlatform(Platform): # check and update parallel config parallel_config = vllm_config.parallel_config - if parallel_config.worker_cls == "auto": + if envs.VLLM_USE_V1: + parallel_config.worker_cls =\ + "vllm.v1.worker.xpu_worker.XPUWorker" + else: parallel_config.worker_cls = "vllm.worker.xpu_worker.XPUWorker" if parallel_config.distributed_executor_backend is None: - parallel_config.distributed_executor_backend = "ray" + if parallel_config.world_size > 1: + parallel_config.distributed_executor_backend = "ray" + else: + parallel_config.distributed_executor_backend = "uni" elif parallel_config.distributed_executor_backend == "mp": # FIXME(kunshang): # spawn needs calling `if __name__ == '__main__':`` # fork is not supported for xpu start new process. - logger.error( - "Both start methods (spawn and fork) have issue " - "on XPU if you use mp backend, setting it to ray instead.") - parallel_config.distributed_executor_backend = "ray" - - elif parallel_config.distributed_executor_backend != "ray": + if envs.VLLM_WORKER_MULTIPROC_METHOD != "spawn": + os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" + logger.warning( + "Please use spawn as start method if you want to use mp.") + elif parallel_config.distributed_executor_backend != "ray" and \ + parallel_config.distributed_executor_backend != "uni": logger.warning( "%s is not supported on XPU, fallback to ray distributed" " executor backend.", @@ -142,15 +158,35 @@ class XPUPlatform(Platform): @classmethod def device_support_bf16(cls) -> bool: device_name = cls.get_device_name().lower() - if device_name.count("arc") > 0: + if cls.is_client_gpu_a770(): + logger.warning("Intel Arc A770 have bfloat16 accuracy known issue," + " fallback to float16") return False - elif device_name.count("data center gpu") > 0: - return True else: - logger.warning("Unknown device name %s, always use float16", - device_name) - return False + logger.info( + "Device name %s supports bfloat16. Please file an issue " + "if you encounter any accuracy problems with bfloat16.", + device_name) + return True + + @classmethod + def is_data_center_gpu(cls) -> bool: + device_name = cls.get_device_name().lower() + return device_name.count("data center gpu") > 0 + + @classmethod + def is_client_gpu_a770(cls) -> bool: + device_name = cls.get_device_name().lower() + return device_name.count("a770") > 0 @classmethod def get_device_communicator_cls(cls) -> str: return "vllm.distributed.device_communicators.xpu_communicator.XpuCommunicator" # noqa + + @classmethod + def supports_v1(cls, model_config: ModelConfig) -> bool: + return True + + @classmethod + def device_count(cls) -> int: + return torch.xpu.device_count() diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index ef65d2ea36e4f..42b5997f085b1 100755 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -14,10 +14,12 @@ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, from vllm.attention.layer import Attention from vllm.attention.ops.merge_attn_states import merge_attn_states from vllm.attention.utils.fa_utils import (flash_attn_supports_fp8, - get_flash_attn_version) + flash_attn_varlen_func, + get_flash_attn_version, + get_scheduler_metadata, + reshape_and_cache_flash) from vllm.config import VllmConfig, get_layers_from_vllm_config from vllm.logger import init_logger -from vllm.platforms import current_platform from vllm.utils import cdiv from vllm.v1.attention.backends.utils import ( AttentionMetadataBuilder, CommonAttentionMetadata, get_kv_cache_layout, @@ -28,10 +30,6 @@ from vllm.v1.worker.block_table import BlockTable if TYPE_CHECKING: from vllm.v1.worker.gpu_model_runner import GPUModelRunner -if current_platform.is_cuda(): - from vllm.vllm_flash_attn import (flash_attn_varlen_func, - get_scheduler_metadata) - logger = init_logger(__name__) @@ -443,7 +441,7 @@ class FlashAttentionImpl(AttentionImpl): # and value[:num_actual_tokens] because the reshape_and_cache_flash # op uses the slot_mapping's shape to determine the number of # actual tokens. - torch.ops._C_cache_ops.reshape_and_cache_flash( + reshape_and_cache_flash( key, value, key_cache, diff --git a/vllm/v1/worker/xpu_model_runner.py b/vllm/v1/worker/xpu_model_runner.py new file mode 100644 index 0000000000000..55d116dcd4968 --- /dev/null +++ b/vllm/v1/worker/xpu_model_runner.py @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: Apache-2.0 +from typing import TYPE_CHECKING + +import torch + +from vllm.config import VllmConfig +from vllm.logger import init_logger +from vllm.v1.worker.gpu_model_runner import GPUModelRunner + +if TYPE_CHECKING: + pass + +logger = init_logger(__name__) + + +class XPUModelRunner(GPUModelRunner): + """A model runner for XPU devices.""" + + def __init__( + self, + vllm_config: VllmConfig, + device: torch.device, + ): + super().__init__(vllm_config, device) + # FIXME: To be verified. + self.cascade_attn_enabled = False + + def _init_device_properties(self) -> None: + pass + + def _sync_device(self) -> None: + torch.xpu.synchronize() diff --git a/vllm/v1/worker/xpu_worker.py b/vllm/v1/worker/xpu_worker.py new file mode 100644 index 0000000000000..d9ea03986566b --- /dev/null +++ b/vllm/v1/worker/xpu_worker.py @@ -0,0 +1,164 @@ +# SPDX-License-Identifier: Apache-2.0 +import os + +import torch +import torch.distributed + +import vllm.envs as envs +from vllm.config import VllmConfig +from vllm.logger import init_logger +from vllm.model_executor import set_random_seed +from vllm.platforms import current_platform +from vllm.v1.worker.gpu_worker import (Worker, + init_worker_distributed_environment) +from vllm.v1.worker.xpu_model_runner import XPUModelRunner + +logger = init_logger(__name__) + + +class XPUWorker(Worker): + """A XPU worker class.""" + + def __init__( + self, + vllm_config: VllmConfig, + local_rank: int, + rank: int, + distributed_init_method: str, + is_driver_worker: bool = False, + ): + super().__init__(vllm_config, local_rank, rank, + distributed_init_method, is_driver_worker) + device_config = self.device_config + assert device_config.device_type == "xpu" + assert current_platform.is_xpu() + + # Torch profiler. Enabled and configured through env vars: + # VLLM_TORCH_PROFILER_DIR=/path/to/save/trace + if envs.VLLM_TORCH_PROFILER_DIR: + torch_profiler_trace_dir = envs.VLLM_TORCH_PROFILER_DIR + logger.info("Profiling enabled. Traces will be saved to: %s", + torch_profiler_trace_dir) + self.profiler = torch.profiler.profile( + activities=[ + torch.profiler.ProfilerActivity.CPU, + torch.profiler.ProfilerActivity.XPU, + ], + with_stack=True, + on_trace_ready=torch.profiler.tensorboard_trace_handler( + torch_profiler_trace_dir, use_gzip=True)) + else: + self.profiler = None + + # we provide this function due to `torch.xpu.mem_get_info()` doesn't + # return correct free_gpu_memory on intel client GPU. We need to + # calculate/estiamte it. + def xpu_get_mem_info(self): + if current_platform.is_data_center_gpu(): + return torch.xpu.mem_get_info() + else: + _, total_gpu_memory = torch.xpu.mem_get_info() + # FIXME: memory_allocated() doesn't count non-torch allocations, + # and we don't have any API to get it. so we mark it as 128MB. + used_memory = torch.xpu.memory_allocated() + non_torch_allocations = 128 * 1024 * 1024 + free_gpu_memory = total_gpu_memory - (used_memory + + non_torch_allocations) + return free_gpu_memory, total_gpu_memory + + @torch.inference_mode() + def determine_available_memory(self) -> int: + """Profiles the peak memory usage of the model to determine how many + KV blocks may be allocated without OOMs. + The engine will first conduct a profiling of the existing memory usage. + Then, it calculate the maximum possible number of GPU and CPU blocks + that can be allocated with the remaining free memory. + .. tip:: + You may limit the usage of GPU memory + by adjusting the `gpu_memory_utilization` parameter. + """ + # Profile the memory usage of the model and get the maximum number of + # cache blocks that can be allocated with the remaining free memory. + torch.xpu.empty_cache() + torch.xpu.reset_peak_memory_stats() + + free_gpu_memory, total_gpu_memory = torch.xpu.mem_get_info() + current_allocated_bytes = torch.xpu.memory_allocated() + msg = ("Before memory profiling run, " + f"total GPU memory: {total_gpu_memory / 1024**2:.2f} MB, " + f"model load takes {current_allocated_bytes / 1024**2:.2f} MB, " + f"free gpu memory is {free_gpu_memory / 1024**2:.2f} MB.") + logger.info(msg) + # Execute a forward pass with dummy inputs to profile the memory usage + # of the model. + self.model_runner.profile_run() + + free_gpu_memory, _ = self.xpu_get_mem_info() + # NOTE(woosuk): Here we assume that the other processes using the same + # GPU did not change their memory usage during the profiling. + assert self.init_gpu_memory > free_gpu_memory, ( + "Error in memory profiling. " + f"Initial free memory {self.init_gpu_memory}, current free memory" + f" {free_gpu_memory}. This happens when the GPU memory was " + "not properly cleaned up before initializing the vLLM instance.") + + # Get the peak memory allocation recorded by torch + peak_memory = torch.xpu.memory_stats()["allocated_bytes.all.peak"] + + torch.xpu.empty_cache() + torch_allocated_bytes = torch.xpu.memory_stats( + )["allocated_bytes.all.current"] + total_allocated_bytes = self.xpu_get_mem_info( + )[1] - self.xpu_get_mem_info()[0] + + non_torch_allocations = total_allocated_bytes - torch_allocated_bytes + if non_torch_allocations > 0: + peak_memory += non_torch_allocations + available_kv_cache_memory = ( + total_gpu_memory * self.cache_config.gpu_memory_utilization - + peak_memory) + + msg = ("After memory profiling run, " + f"peak memory usage is {peak_memory / 1024**2:.2f} MB," + f"torch mem is {torch_allocated_bytes / 1024**2:.2f} MB, " + f"non-torch mem is {non_torch_allocations / 1024**2:.2f} MB, " + f"free gpu memory is {free_gpu_memory / 1024**2:.2f} MB.") + logger.info(msg) + + return int(available_kv_cache_memory) + + def init_device(self): + if self.device_config.device.type == "xpu" and current_platform.is_xpu( + ): + self.device = torch.device(f"xpu:{self.local_rank}") + torch.xpu.set_device(self.device) + torch.xpu.empty_cache() + self.init_gpu_memory = torch.xpu.get_device_properties( + self.local_rank).total_memory + else: + raise RuntimeError( + f"Not support device type: {self.device_config.device}") + + ENV_CCL_ZE_IPC_EXCHANGE = os.getenv("CCL_ZE_IPC_EXCHANGE", "drmfd") + ENV_CCL_ATL_TRANSPORT = os.getenv("CCL_ATL_TRANSPORT", "ofi") + ENV_LOCAL_WORLD_SIZE = os.getenv("LOCAL_WORLD_SIZE", + str(self.parallel_config.world_size)) + os.environ["CCL_ZE_IPC_EXCHANGE"] = ENV_CCL_ZE_IPC_EXCHANGE + os.environ["CCL_ATL_TRANSPORT"] = ENV_CCL_ATL_TRANSPORT + os.environ["LOCAL_WORLD_SIZE"] = ENV_LOCAL_WORLD_SIZE + os.environ["LOCAL_RANK"] = str(self.local_rank) + dist_backend = "ccl" + + init_worker_distributed_environment(self.vllm_config, self.rank, + self.distributed_init_method, + self.local_rank, dist_backend) + + # global all_reduce needed for overall oneccl warm up + torch.distributed.all_reduce(torch.zeros(1).xpu()) + + # Set random seed. + set_random_seed(self.model_config.seed) + + # Construct the model runner + self.model_runner = XPUModelRunner( # type: ignore + self.vllm_config, self.device) From 04e1642e3251fc575d104c84782fafea348cfbaf Mon Sep 17 00:00:00 2001 From: Chengji Yao Date: Thu, 26 Jun 2025 10:01:37 -0700 Subject: [PATCH 159/211] [TPU] add kv cache update kernel (#19928) Signed-off-by: Chengji Yao --- .../scripts/hardware_ci/run-tpu-v1-test.sh | 2 + tests/v1/tpu/test_kv_cache_update_kernel.py | 71 ++++++++++ tests/v1/tpu/test_pallas.py | 3 +- vllm/attention/ops/pallas_kv_cache_update.py | 117 ++++++++++++++++ vllm/v1/attention/backends/pallas.py | 55 +++++++- vllm/v1/worker/tpu_model_runner.py | 132 +++++++++++++----- 6 files changed, 342 insertions(+), 38 deletions(-) create mode 100644 tests/v1/tpu/test_kv_cache_update_kernel.py create mode 100644 vllm/attention/ops/pallas_kv_cache_update.py diff --git a/.buildkite/scripts/hardware_ci/run-tpu-v1-test.sh b/.buildkite/scripts/hardware_ci/run-tpu-v1-test.sh index a2a5c2a02cbb9..90cad506ab1e9 100755 --- a/.buildkite/scripts/hardware_ci/run-tpu-v1-test.sh +++ b/.buildkite/scripts/hardware_ci/run-tpu-v1-test.sh @@ -159,6 +159,8 @@ run_and_track_test 14 "test_tpu_qkv_linear.py" \ "python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_tpu_qkv_linear.py" run_and_track_test 15 "test_spmd_model_weight_loading.py" \ "python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_spmd_model_weight_loading.py" +run_and_track_test 16 "test_kv_cache_update_kernel.py" \ + "python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_kv_cache_update_kernel.py" # After all tests have been attempted, exit with the overall status. if [ "$overall_script_exit_code" -ne 0 ]; then diff --git a/tests/v1/tpu/test_kv_cache_update_kernel.py b/tests/v1/tpu/test_kv_cache_update_kernel.py new file mode 100644 index 0000000000000..63a1f6777e4df --- /dev/null +++ b/tests/v1/tpu/test_kv_cache_update_kernel.py @@ -0,0 +1,71 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import numpy as np +import pytest +import torch +import torch_xla + +import vllm.v1.attention.backends.pallas # noqa: F401 +from vllm.platforms import current_platform + + +@pytest.mark.skipif(not current_platform.is_tpu(), + reason="This is a test for TPU only") +@pytest.mark.parametrize("page_size", [32, 33]) +@pytest.mark.parametrize("combined_kv_head_num", [2, 16]) +@pytest.mark.parametrize("head_dim", [128, 256]) +@pytest.mark.parametrize("num_slices_per_block", [4, 8]) +def test_kv_cache_update_kernel(page_size: int, combined_kv_head_num: int, + head_dim: int, num_slices_per_block: int): + page_num = 1000 + padded_num_tokens = 128 + kv_cache_cpu = torch.zeros( + (page_num * page_size, combined_kv_head_num, head_dim), + dtype=torch.bfloat16, + device="cpu") + kv_cache_xla = kv_cache_cpu.to(torch_xla.device()) + new_kv_cpu = torch.randn( + (padded_num_tokens, combined_kv_head_num, head_dim), + dtype=torch.bfloat16, + device="cpu") + new_kv_xla = new_kv_cpu.to(torch_xla.device()) + slice_lens = np.array([7, page_size, page_size, 1, 1, 1, 9], + dtype=np.int32) + kv_cache_start_indices = np.array([ + page_size * 2 - 7, page_size * 2, page_size * 3, page_size * 4 + 6, + page_size * 5 + 7, page_size * 6 + 8, page_size * 15 + 3 + ], + dtype=np.int32) + new_kv_cache_indices = np.concatenate( + [np.array([0], dtype=np.int32), + np.cumsum(slice_lens[:-1])]) + slot_mapping = np.stack( + [kv_cache_start_indices, new_kv_cache_indices, slice_lens], axis=1) + padded_size = (slot_mapping.shape[0] + num_slices_per_block - + 1) // num_slices_per_block * num_slices_per_block + slot_mapping = np.pad(slot_mapping, + [[0, padded_size - slot_mapping.shape[0]], [0, 0]], + constant_values=0) + slot_mapping = np.transpose(slot_mapping) + slot_mapping_cpu = torch.tensor(slot_mapping, + device="cpu", + dtype=torch.int32) + slot_mapping_xla = slot_mapping_cpu.to(torch_xla.device()) + torch_xla.sync() + + torch.ops.xla.dynamo_set_buffer_donor_(kv_cache_xla, True) + new_kv_cache_xla = torch.ops.xla.kv_cache_update_op( + new_kv_xla, slot_mapping_xla, kv_cache_xla, page_size, + num_slices_per_block) + kv_cache_xla.copy_(new_kv_cache_xla) + torch_xla.sync() + + for ni, ci, sl in zip(new_kv_cache_indices, kv_cache_start_indices, + slice_lens): + kv_cache_cpu[ci:ci + sl, :, :] = new_kv_cpu[ni:ni + sl, :, :] + + assert torch.allclose(kv_cache_xla.cpu(), + kv_cache_cpu, + atol=1e-4, + rtol=1e-4) diff --git a/tests/v1/tpu/test_pallas.py b/tests/v1/tpu/test_pallas.py index 3a9d80847a16b..e279edfffbc72 100644 --- a/tests/v1/tpu/test_pallas.py +++ b/tests/v1/tpu/test_pallas.py @@ -47,7 +47,7 @@ def test_ragged_paged_attention(): key = torch.zeros(num_tokens, num_kv_heads * head_size) value = torch.zeros(num_tokens, num_kv_heads * head_size) kv_cache = torch.zeros(num_blocks, block_size, num_kv_heads * 2, head_size) - slot_mapping = torch.zeros(num_tokens, dtype=torch.int64) + slot_mapping = torch.zeros((3, num_tokens), dtype=torch.int64) max_num_reqs = 8 max_num_blocks_per_req = 8 block_tables = torch.zeros((max_num_reqs, max_num_blocks_per_req), @@ -65,6 +65,7 @@ def test_ragged_paged_attention(): context_lens=context_lens, query_start_loc=query_start_loc, num_seqs=num_seqs, + num_slices_per_kv_cache_update_block=8, ) with patch("torch.ops.xla.ragged_paged_attention" diff --git a/vllm/attention/ops/pallas_kv_cache_update.py b/vllm/attention/ops/pallas_kv_cache_update.py new file mode 100644 index 0000000000000..1a92b10e4f9c7 --- /dev/null +++ b/vllm/attention/ops/pallas_kv_cache_update.py @@ -0,0 +1,117 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import functools + +import jax +from jax.experimental import pallas as pl +from jax.experimental.pallas import tpu as pltpu + + +def _kv_cache_update_kernel( + # Prefetch + slices_ref, # [3, num_slices], list of (kv_cache_start, new_kv_start, + # slice_len) + # Input + new_kv_hbm_ref, # [num_tokens, num_combined_kv_heads, head_dim] + kv_cache_hbm_ref, # [total_num_pages * page_size, num_combined_kv_heads, + # head_dim] + # Output + _, # [total_num_pages * page_size, num_combined_kv_heads, head_dim] + # Scratch + scratch, # [num_slices_per_block, page_size, num_combined_kv_heads, + # head_dim] + sem, +): + async_copies = [] + block_idx = pl.program_id(0) + num_slices_per_block = scratch.shape[0] + + # Copy from new_kv_hbm_ref to scratch + for i in range(num_slices_per_block): + offset_i = i + block_idx * num_slices_per_block + new_kv_start = slices_ref[1, offset_i] + length = slices_ref[2, offset_i] + async_copy = pltpu.make_async_copy( + new_kv_hbm_ref.at[pl.ds(new_kv_start, length), ...], + scratch.at[i, pl.ds(0, length), ...], + sem, + ) + async_copy.start() + async_copies.append(async_copy) + + for async_copy in async_copies: + async_copy.wait() + + # Copy from scratch to kv_cache_hbm_ref + async_copies.clear() + for i in range(num_slices_per_block): + offset_i = i + block_idx * num_slices_per_block + kv_cache_start = slices_ref[0, offset_i] + length = slices_ref[2, offset_i] + async_copy = pltpu.make_async_copy( + scratch.at[i, pl.ds(0, length), ...], + kv_cache_hbm_ref.at[pl.ds(kv_cache_start, length), ...], + sem, + ) + async_copy.start() + async_copies.append(async_copy) + for async_copy in async_copies: + async_copy.wait() + + +@functools.partial( + jax.jit, + static_argnames=["page_size", "num_slices_per_block"], +) +def kv_cache_update( + new_kv: jax.Array, # [total_num_token, num_combined_kv_heads, head_dim] + slices: jax. + Array, # [3, slices], list of (kv_cache_start, new_kv_start, slice_len) + kv_cache: jax. + Array, # [total_num_pages * page_size, num_combined_kv_heads, head_dim] + *, + page_size: int = 32, + num_slices_per_block: int = 8, +): + assert slices.shape[1] % num_slices_per_block == 0 + _, num_combined_kv_heads, head_dim = new_kv.shape + assert kv_cache.shape[1] == num_combined_kv_heads + assert kv_cache.shape[2] == head_dim + assert head_dim % 128 == 0 + # TODO: Add dynamic check to make sure that the all the slice lengths are + # smaller or equal to page_size + + in_specs = [ + pl.BlockSpec(memory_space=pltpu.TPUMemorySpace.ANY), + pl.BlockSpec(memory_space=pltpu.TPUMemorySpace.ANY), + ] + + out_specs = [pl.BlockSpec(memory_space=pltpu.TPUMemorySpace.ANY)] + out_shape = [jax.ShapeDtypeStruct(kv_cache.shape, dtype=kv_cache.dtype)] + + scalar_prefetches = [slices] + scratch = pltpu.VMEM( + (num_slices_per_block, page_size, num_combined_kv_heads, head_dim), + new_kv.dtype, + ) + + scratch_shapes = [ + scratch, + pltpu.SemaphoreType.DMA, + ] + + kernel = pl.pallas_call( + _kv_cache_update_kernel, + grid_spec=pltpu.PrefetchScalarGridSpec( + num_scalar_prefetch=len(scalar_prefetches), + in_specs=in_specs, + out_specs=out_specs, + grid=(slices.shape[1] // num_slices_per_block, ), + scratch_shapes=scratch_shapes, + ), + out_shape=out_shape, + input_output_aliases={len(scalar_prefetches) + 1: 0}, + ) + + return kernel(*scalar_prefetches, new_kv, kv_cache)[0] diff --git a/vllm/v1/attention/backends/pallas.py b/vllm/v1/attention/backends/pallas.py index ff2862edaa01b..49f0772c62d13 100644 --- a/vllm/v1/attention/backends/pallas.py +++ b/vllm/v1/attention/backends/pallas.py @@ -5,8 +5,12 @@ from dataclasses import dataclass from typing import Any, Optional import torch -# Required to register custom ops. +import torch_xla.core.xla_builder as xb import torch_xla.experimental.custom_kernel # noqa: F401 +# Required to register custom ops. +from torch.library import impl +from torch_xla._internal.jax_workarounds import requires_jax +from torch_xla.experimental.custom_kernel import XLA_LIB from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, AttentionLayer, AttentionType) @@ -107,6 +111,7 @@ class PallasMetadata: context_lens: torch.Tensor query_start_loc: torch.Tensor num_seqs: torch.Tensor + num_slices_per_kv_cache_update_block: int class PallasAttentionBackendImpl(AttentionImpl): @@ -212,7 +217,9 @@ class PallasAttentionBackendImpl(AttentionImpl): # Write input keys and values to the KV cache. # Skip this if sharing KV cache with an earlier attention layer. slot_mapping = attn_metadata.slot_mapping - write_to_kv_cache(key, value, kv_cache, slot_mapping) + write_to_kv_cache( + key, value, kv_cache, slot_mapping, + attn_metadata.num_slices_per_kv_cache_update_block) output = torch.ops.xla.ragged_paged_attention( query, @@ -244,6 +251,7 @@ def write_to_kv_cache( value: torch.Tensor, kv_cache: torch.Tensor, slot_mapping: torch.Tensor, + num_slices_per_kv_cache_update_block: int, ) -> None: """ Write the key and values to the KV cache. @@ -251,9 +259,9 @@ def write_to_kv_cache( key: shape = [num_tokens, num_kv_heads * head_size] value: shape = [num_tokens, num_kv_heads * head_size] kv_cache = [num_blocks, block_size, num_kv_heads * 2, head_size] - + num_slices_per_kv_cache_update_block: int """ - _, _, num_combined_kv_heads, head_size = kv_cache.shape + _, page_size, num_combined_kv_heads, head_size = kv_cache.shape head_size = cdiv(head_size, TPU_HEAD_SIZE_ALIGNMENT) * TPU_HEAD_SIZE_ALIGNMENT kv = torch.cat([key, value], axis=-1).reshape(-1, num_combined_kv_heads, @@ -262,4 +270,41 @@ def write_to_kv_cache( torch.ops.xla.dynamo_set_buffer_donor_(kv_cache, True) kv_cache = kv_cache.flatten(0, 1) - kv_cache.index_copy_(0, slot_mapping, kv) + new_kv_cache = torch.ops.xla.kv_cache_update_op( + kv, slot_mapping, kv_cache, page_size, + num_slices_per_kv_cache_update_block) + # NOTE: the in-place copy will be optimized away by XLA compiler. + kv_cache.copy_(new_kv_cache) + + +@requires_jax +def kv_cache_update_op_impl(kv: torch.Tensor, slot_mapping: torch.Tensor, + kv_cache: torch.Tensor, page_size: int, + num_slices_per_block: int): + from vllm.attention.ops.pallas_kv_cache_update import kv_cache_update + new_kv_cache = xb.call_jax(kv_cache_update, (kv, slot_mapping, kv_cache), { + "page_size": page_size, + "num_slices_per_block": num_slices_per_block + }) + return new_kv_cache + + +XLA_LIB.define( + "kv_cache_update_op(Tensor kv, Tensor slot_mapping, Tensor kv_cache, " + "int page_size, int num_slices_per_block) -> Tensor", ) + + +@impl(XLA_LIB, "kv_cache_update_op", "XLA") +def kv_cache_update_op_xla(kv: torch.Tensor, slot_mapping: torch.Tensor, + kv_cache: torch.Tensor, page_size: int, + num_slices_per_block: int) -> torch.Tensor: + new_kv_cache = kv_cache_update_op_impl(kv, slot_mapping, kv_cache, + page_size, num_slices_per_block) + return new_kv_cache + + +@impl(XLA_LIB, "kv_cache_update_op", "CompositeExplicitAutograd") +def kv_cache_update_op_non_xla(kv: torch.Tensor, slot_mapping: torch.Tensor, + kv_cache: torch.Tensor, page_size: int, + num_slices_per_block: int) -> torch.Tensor: + return kv_cache diff --git a/vllm/v1/worker/tpu_model_runner.py b/vllm/v1/worker/tpu_model_runner.py index 2d80bac3c9546..bc334419c4cec 100644 --- a/vllm/v1/worker/tpu_model_runner.py +++ b/vllm/v1/worker/tpu_model_runner.py @@ -53,12 +53,11 @@ if TYPE_CHECKING: logger = init_logger(__name__) -# Here we utilize the behavior that out-of-bound index is ignored. -# FIXME(woosuk): Find a more reliable way to prevent possible bugs. -_PAD_SLOT_ID = 1_000_000_000 INVALID_TOKEN_ID = -1 # Smallest output size MIN_NUM_SEQS = 8 +# Block size used for kv cache updating kernel +NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK = 8 ######################################################### @@ -526,6 +525,69 @@ class TPUModelRunner(LoRAModelRunnerMixin): return kv_cache_spec + def _get_slot_mapping_metadata(self, num_reqs, + num_scheduled_tokens_per_req): + """ + Computes metadata for mapping slots to blocks in the key-value (KV) + cache for a batch of requests. + + This function determines, for each request in the batch, how the + scheduled tokens are distributed across memory blocks, and generates + metadata needed to map slices of tokens to their corresponding positions + in the KV cache. + + Args: + num_reqs (int): Number of requests in the current batch. + num_scheduled_tokens_per_req (int or np.ndarray): Number of tokens + to be scheduled for each request. + + Returns: + np.ndarray: A 2D array of shape (total_block_len, 3), where each row + contains: + - kv_cache_start_index (int): The starting index in the KV cache + for the corresponding slice. + - new_kv_start_index (int): The starting index in the new KV + cache for the corresponding slice. + - slice_len (int): The length of the slice. + """ + slices_start = self.input_batch.num_computed_tokens_cpu[:num_reqs] + slices_end = self.input_batch.num_computed_tokens_cpu[:num_reqs] + \ + num_scheduled_tokens_per_req + local_block_start_idx = slices_start // self.block_size + local_block_end_idx = (slices_end - 1) // self.block_size + no_repeat_req_indices = self.arange_np[:num_reqs] + global_block_start_idx = ( + no_repeat_req_indices * self.max_num_blocks_per_req + + local_block_start_idx) + block_lens = local_block_end_idx - local_block_start_idx + 1 + global_block_start_idx = np.repeat(global_block_start_idx, block_lens) + slice_arange = np.concatenate([self.arange_np[:n] for n in block_lens]) + global_block_indices = global_block_start_idx + slice_arange + block_table_cpu = self.input_batch.block_table[0].get_cpu_tensor() + block_numbers = block_table_cpu.flatten()[global_block_indices].numpy() + total_block_len = np.sum(block_lens) + slot_mapping_slices = np.repeat(np.array([[0, self.block_size]], + dtype=np.int32), + total_block_len, + axis=0) + cu_block_lens = np.zeros(len(block_lens) + 1, dtype=np.int32) + np.cumsum(block_lens, out=cu_block_lens[1:]) + for req_idx in range(num_reqs): + slot_mapping_slices[cu_block_lens[req_idx]][ + 0] = slices_start[req_idx] % self.block_size + slot_mapping_slices[ + cu_block_lens[req_idx + 1] - + 1][1] = (slices_end[req_idx] - 1) % self.block_size + 1 + slice_lens = slot_mapping_slices[:, 1] - slot_mapping_slices[:, 0] + cu_slices_lens = np.zeros(len(slice_lens) + 1, dtype=np.int32) + np.cumsum(slice_lens, out=cu_slices_lens[1:]) + kv_cache_start_indices = slot_mapping_slices[:, 0] + \ + (block_numbers * self.block_size) + new_kv_start_indices = cu_slices_lens[:-1] + slot_mapping_metadata = np.stack( + [kv_cache_start_indices, new_kv_start_indices, slice_lens], axis=1) + return slot_mapping_metadata + def _prepare_inputs(self, scheduler_output: "SchedulerOutput", start_index: int): assert scheduler_output.total_num_scheduled_tokens > 0 @@ -603,26 +665,6 @@ class TPUModelRunner(LoRAModelRunnerMixin): torch.from_numpy(token_indices), out=self.input_ids_cpu[:total_num_scheduled_tokens]) - # Calculate the slot mapping. - # E.g., [0, 1, 0, 1, 2, 3, 4, 0, 1, 2] - # -> [0, 0, K, K, K + 1, K + 1, K + 2, 2 * K, 2 * K, 2 * K + 1] - # where K is the max_num_blocks_per_req and the block size is 2. - # NOTE(woosuk): We can't simply use `token_indices // block_size` here - # because M (max_model_len) is not necessarily divisible by block_size. - # req_indices: # E.g., [2, 5, 3] -> [0, 0, 1, 1, 1, 1, 1, 2, 2, 2] - block_table_indices = (req_indices * self.max_num_blocks_per_req + - positions_np // self.block_size) - # NOTE(woosuk): We use torch.index_select instead of np.take here - # because torch.index_select is much faster than np.take for large - # tensors. - block_table_cpu = self.input_batch.block_table[0].get_cpu_tensor() - block_numbers = block_table_cpu.flatten()[block_table_indices].numpy() - block_offsets = positions_np % self.block_size - np.add(block_numbers * self.block_size, - block_offsets, - out=self.input_batch.block_table[0]. - slot_mapping_np[:total_num_scheduled_tokens]) - # Prepare the attention metadata. self.query_start_loc_np[0] = 0 np.cumsum(num_scheduled_tokens_per_req, @@ -645,12 +687,6 @@ class TPUModelRunner(LoRAModelRunnerMixin): self.position_ids = self.positions_cpu[: padded_total_num_scheduled_tokens].to( self.device) - self.input_batch.block_table[0].slot_mapping_cpu[ - total_num_scheduled_tokens:] = _PAD_SLOT_ID - slot_mapping = ( - self.input_batch.block_table[0]. - slot_mapping_cpu[:padded_total_num_scheduled_tokens].to( - self.device)) if use_max_model_len: block_tables = self.block_table_cpu[:self.num_reqs_max_model_len, : self.max_num_blocks_per_req] @@ -675,6 +711,19 @@ class TPUModelRunner(LoRAModelRunnerMixin): self.device) block_tables = block_tables.to(self.device) + slot_mapping_metadata = self._get_slot_mapping_metadata( + num_reqs, num_scheduled_tokens_per_req) + padded_num_slices = _get_padded_num_kv_cache_update_slices( + padded_total_num_scheduled_tokens, self.max_num_reqs, + self.block_size) + slot_mapping_metadata = np.pad( + slot_mapping_metadata, + [[0, padded_num_slices - len(slot_mapping_metadata)], [0, 0]], + constant_values=0) + slot_mapping_metadata = np.transpose(slot_mapping_metadata) + slot_mapping_metadata = torch.tensor(slot_mapping_metadata, + device=self.device) + if self.lora_config is not None: # We need to respect padding when activating LoRA adapters padded_num_scheduled_tokens_per_req = np.copy( @@ -687,13 +736,15 @@ class TPUModelRunner(LoRAModelRunnerMixin): padded_num_scheduled_tokens_per_req) attn_metadata = PallasMetadata( - slot_mapping=slot_mapping, + slot_mapping=slot_mapping_metadata, block_tables=block_tables, context_lens=seq_lens, query_start_loc=query_start_loc, num_seqs=torch.tensor([num_reqs], dtype=torch.int32, device=self.device), + num_slices_per_kv_cache_update_block= + NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK, ) # NOTE(woosuk): Due to chunked prefills, there can be at most 1 partial # request in the batch. While we should not sample any token from this @@ -1119,8 +1170,10 @@ class TPUModelRunner(LoRAModelRunnerMixin): actual_num_reqs = min(num_tokens, num_reqs) position_ids = torch.zeros(num_tokens, dtype=torch.int32).to(self.device) - slot_mapping = torch.zeros(num_tokens, - dtype=torch.int64).to(self.device) + padded_num_slices = _get_padded_num_kv_cache_update_slices( + num_tokens, self.max_num_reqs, self.block_size) + slot_mapping = torch.zeros((3, padded_num_slices), + dtype=torch.int32).to(self.device) block_tables = torch.zeros((num_reqs, num_blocks), dtype=torch.int32).to(self.device) query_lens = [1] * num_reqs @@ -1138,6 +1191,8 @@ class TPUModelRunner(LoRAModelRunnerMixin): context_lens=context_lens, query_start_loc=query_start_loc, num_seqs=num_seqs, + num_slices_per_kv_cache_update_block= + NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK, ) if self.is_multimodal_model: @@ -1742,6 +1797,19 @@ def _get_padded_token_len(paddings: list[int], x: int) -> int: return paddings[index] +def _get_padded_num_kv_cache_update_slices(num_tokens: int, max_num_reqs: int, + page_size: int) -> int: + """Calculates the padded number of KV cache update slices to avoid + recompilation.""" + padded_num_slices = 2 * max_num_reqs + num_tokens // page_size + padded_num_slices = min(padded_num_slices, num_tokens) + padded_num_slices = ( + padded_num_slices + NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK - 1 + ) // NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK * \ + NUM_SLICES_PER_KV_CACHE_UPDATE_BLOCK + return padded_num_slices + + def replace_set_lora(model): def _tpu_set_lora( From 562308816ceabd8414f49ff2aa291480f69fa1a5 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Thu, 26 Jun 2025 18:19:32 -0400 Subject: [PATCH 160/211] [Refactor] Rename commnication utils (#20091) Signed-off-by: yewentao256 --- tests/kernels/moe/test_deepep_deepgemm_moe.py | 2 +- tests/kernels/moe/test_deepep_moe.py | 2 +- tests/kernels/moe/test_pplx_cutlass_moe.py | 2 +- tests/kernels/moe/test_pplx_moe.py | 2 +- tests/kernels/moe/{deepep_utils.py => utils.py} | 0 5 files changed, 4 insertions(+), 4 deletions(-) rename tests/kernels/moe/{deepep_utils.py => utils.py} (100%) diff --git a/tests/kernels/moe/test_deepep_deepgemm_moe.py b/tests/kernels/moe/test_deepep_deepgemm_moe.py index 2d7cf39a8cca5..f580dee4c9285 100644 --- a/tests/kernels/moe/test_deepep_deepgemm_moe.py +++ b/tests/kernels/moe/test_deepep_deepgemm_moe.py @@ -22,7 +22,7 @@ from vllm.model_executor.layers.quantization.utils.fp8_utils import ( per_token_group_quant_fp8) from vllm.platforms import current_platform -from .deepep_utils import ProcessGroupInfo, parallel_launch +from .utils import ProcessGroupInfo, parallel_launch has_deep_ep = importlib.util.find_spec("deep_ep") is not None diff --git a/tests/kernels/moe/test_deepep_moe.py b/tests/kernels/moe/test_deepep_moe.py index 7e029ea950555..380eb43c42a40 100644 --- a/tests/kernels/moe/test_deepep_moe.py +++ b/tests/kernels/moe/test_deepep_moe.py @@ -23,7 +23,7 @@ from vllm.model_executor.layers.quantization.utils.fp8_utils import ( per_token_group_quant_fp8) from vllm.platforms import current_platform -from .deepep_utils import ProcessGroupInfo, parallel_launch +from .utils import ProcessGroupInfo, parallel_launch has_deep_ep = importlib.util.find_spec("deep_ep") is not None diff --git a/tests/kernels/moe/test_pplx_cutlass_moe.py b/tests/kernels/moe/test_pplx_cutlass_moe.py index 0caf14f040bbe..ee2bdc838b0d1 100644 --- a/tests/kernels/moe/test_pplx_cutlass_moe.py +++ b/tests/kernels/moe/test_pplx_cutlass_moe.py @@ -15,7 +15,7 @@ from vllm.model_executor.layers.fused_moe.modular_kernel import ( FusedMoEModularKernel) from vllm.platforms import current_platform -from .deepep_utils import ProcessGroupInfo, parallel_launch +from .utils import ProcessGroupInfo, parallel_launch try: from pplx_kernels import AllToAll diff --git a/tests/kernels/moe/test_pplx_moe.py b/tests/kernels/moe/test_pplx_moe.py index c4ad3af6802d4..1da14eddff317 100644 --- a/tests/kernels/moe/test_pplx_moe.py +++ b/tests/kernels/moe/test_pplx_moe.py @@ -29,7 +29,7 @@ from vllm.model_executor.layers.fused_moe.modular_kernel import ( FusedMoEModularKernel) from vllm.platforms import current_platform -from .deepep_utils import ProcessGroupInfo, parallel_launch +from .utils import ProcessGroupInfo, parallel_launch requires_pplx = pytest.mark.skipif( not has_pplx, diff --git a/tests/kernels/moe/deepep_utils.py b/tests/kernels/moe/utils.py similarity index 100% rename from tests/kernels/moe/deepep_utils.py rename to tests/kernels/moe/utils.py From 07b8fae219b1fff51ef115c38c44b51395be5bb5 Mon Sep 17 00:00:00 2001 From: Kyle Yu <153807854+kyolebu@users.noreply.github.com> Date: Thu, 26 Jun 2025 18:22:12 -0400 Subject: [PATCH 161/211] [Doc] correct LoRA capitalization (#20135) Signed-off-by: kyolebu --- docs/README.md | 2 +- docs/models/supported_models.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/README.md b/docs/README.md index 0c6aff5fa07c3..9fb3137b31928 100644 --- a/docs/README.md +++ b/docs/README.md @@ -40,7 +40,7 @@ vLLM is flexible and easy to use with: - OpenAI-compatible API server - Support NVIDIA GPUs, AMD CPUs and GPUs, Intel CPUs, Gaudi® accelerators and GPUs, IBM Power CPUs, TPU, and AWS Trainium and Inferentia Accelerators. - Prefix caching support -- Multi-lora support +- Multi-LoRA support For more information, check out the following: diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index a435c59a3042b..04d9923f92105 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -427,7 +427,7 @@ Specified using `--task embed`. See [relevant issue on HF Transformers](https://github.com/huggingface/transformers/issues/34882). !!! note - `jinaai/jina-embeddings-v3` supports multiple tasks through lora, while vllm temporarily only supports text-matching tasks by merging lora weights. + `jinaai/jina-embeddings-v3` supports multiple tasks through LoRA, while vllm temporarily only supports text-matching tasks by merging LoRA weights. !!! note The second-generation GTE model (mGTE-TRM) is named `NewModel`. The name `NewModel` is too generic, you should set `--hf-overrides '{"architectures": ["GteNewModel"]}'` to specify the use of the `GteNewModel` architecture. From e9fd658a736a4d30f7a367c317506c87ad7f5359 Mon Sep 17 00:00:00 2001 From: Bowen Wang Date: Thu, 26 Jun 2025 15:30:21 -0700 Subject: [PATCH 162/211] [Feature] Expert Parallelism Load Balancer (EPLB) (#18343) Signed-off-by: Bowen Wang --- .buildkite/test-pipeline.yaml | 17 + tests/distributed/test_eplb_algo.py | 292 ++++++++++ tests/distributed/test_eplb_execute.py | 504 ++++++++++++++++++ tests/models/test_initialization.py | 12 +- vllm/config.py | 33 ++ vllm/distributed/eplb/__init__.py | 7 + vllm/distributed/eplb/eplb_state.py | 431 +++++++++++++++ vllm/distributed/eplb/rebalance_algo.py | 233 ++++++++ vllm/distributed/eplb/rebalance_execute.py | 306 +++++++++++ vllm/engine/arg_utils.py | 20 + vllm/model_executor/layers/fused_moe/layer.py | 264 ++++++++- .../layers/quantization/awq_marlin.py | 8 + .../compressed_tensors_moe.py | 42 ++ .../layers/quantization/experts_int8.py | 8 + .../model_executor/layers/quantization/fp8.py | 14 + .../layers/quantization/gguf.py | 8 + .../layers/quantization/gptq_marlin.py | 8 + .../layers/quantization/modelopt.py | 8 + .../layers/quantization/moe_wna16.py | 8 + .../layers/quantization/quark/quark_moe.py | 8 + vllm/model_executor/models/deepseek_v2.py | 127 ++++- vllm/model_executor/models/interfaces.py | 68 +++ vllm/v1/worker/gpu_model_runner.py | 65 ++- vllm/v1/worker/gpu_worker.py | 9 +- 24 files changed, 2446 insertions(+), 54 deletions(-) create mode 100644 tests/distributed/test_eplb_algo.py create mode 100644 tests/distributed/test_eplb_execute.py create mode 100644 vllm/distributed/eplb/__init__.py create mode 100644 vllm/distributed/eplb/eplb_state.py create mode 100644 vllm/distributed/eplb/rebalance_algo.py create mode 100644 vllm/distributed/eplb/rebalance_execute.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 1536759c06bd2..26f70ad457b67 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -168,6 +168,23 @@ steps: - VLLM_ALLOW_INSECURE_SERIALIZATION=1 RAY_DEDUP_LOGS=0 python3 rlhf_colocate.py - popd +- label: EPLB Algorithm Test + working_dir: "/vllm-workspace/tests" + source_file_dependencies: + - vllm/distributed/eplb + - tests/distributed/test_eplb_algo.py + commands: + - pytest -v -s distributed/test_eplb_algo.py + +- label: EPLB Execution Test # 5min + working_dir: "/vllm-workspace/tests" + num_gpus: 4 + source_file_dependencies: + - vllm/distributed/eplb + - tests/distributed/test_eplb_execute.py + commands: + - pytest -v -s distributed/test_eplb_execute.py + - label: Metrics, Tracing Test # 10min mirror_hardwares: [amdexperimental, amdproduction] num_gpus: 2 diff --git a/tests/distributed/test_eplb_algo.py b/tests/distributed/test_eplb_algo.py new file mode 100644 index 0000000000000..e47ccba99c81d --- /dev/null +++ b/tests/distributed/test_eplb_algo.py @@ -0,0 +1,292 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import pytest +import torch + +from vllm.distributed.eplb.rebalance_algo import rebalance_experts + + +def test_basic_rebalance(): + """Test basic rebalancing functionality""" + # Example from https://github.com/deepseek-ai/eplb + weight = torch.tensor([ + [90, 132, 40, 61, 104, 165, 39, 4, 73, 56, 183, 86], + [20, 107, 104, 64, 19, 197, 187, 157, 172, 86, 16, 27], + ]) + + num_layers = weight.shape[0] + num_replicas = 16 + num_groups = 4 + num_nodes = 2 + num_gpus = 8 + + phy2log, log2phy, logcnt = rebalance_experts(weight, num_replicas, + num_groups, num_nodes, + num_gpus) + + # Verify output shapes + assert phy2log.shape == ( + 2, + 16, + ), f"Expected `phy2log` shape (2, 16), got {phy2log.shape}" + assert (log2phy.shape[0] == 2 + ), f"Expected `log2phy` first dimension 2, got {log2phy.shape[0]}" + assert ( + log2phy.shape[1] == 12 + ), f"Expected `log2phy` second dimension 12, got {log2phy.shape[1]}" + assert logcnt.shape == ( + 2, + 12, + ), f"Expected `logcnt` shape (2, 12), got {logcnt.shape}" + + # Verify physical to logical expert mapping range is correct + assert torch.all(phy2log >= 0) and torch.all( + phy2log < 12), "Physical to logical mapping should be in range [0, 12)" + + # Verify expert count reasonableness + assert torch.all( + logcnt >= 1), "Each logical expert should have at least 1 replica" + assert ( + torch.sum(logcnt, dim=1).sum() == num_replicas * + num_layers), f"Total replicas should be {num_replicas * num_layers}" + + # Verify expected output + expected_phy2log = torch.tensor([ + [5, 6, 5, 7, 8, 4, 3, 4, 10, 9, 10, 2, 0, 1, 11, 1], + [7, 10, 6, 8, 6, 11, 8, 9, 2, 4, 5, 1, 5, 0, 3, 1], + ]) + assert torch.all(phy2log == expected_phy2log) + + expected_logcnt = torch.tensor([[1, 2, 1, 1, 2, 2, 1, 1, 1, 1, 2, 1], + [1, 2, 1, 1, 1, 2, 2, 1, 2, 1, 1, 1]]) + assert torch.all(logcnt == expected_logcnt) + + +def test_single_gpu_case(): + """Test single GPU case""" + weight = torch.tensor([[10, 20, 30, 40]]) + num_replicas = 4 + num_groups = 1 + num_nodes = 1 + num_gpus = 1 + + phy2log, log2phy, logcnt = rebalance_experts(weight, num_replicas, + num_groups, num_nodes, + num_gpus) + + # Verify shapes + assert phy2log.shape == (1, 4) + assert log2phy.shape[0] == 1 + assert log2phy.shape[1] == 4 + assert logcnt.shape == (1, 4) + + # Verify all logical experts are mapped + assert set(phy2log[0].tolist()) == {0, 1, 2, 3} + + +def test_equal_weights(): + """Test case with equal weights""" + weight = torch.tensor([[50, 50, 50, 50, 50, 50, 50, 50]]) + num_replicas = 8 + num_groups = 2 + num_nodes = 2 + num_gpus = 4 + + phy2log, log2phy, logcnt = rebalance_experts(weight, num_replicas, + num_groups, num_nodes, + num_gpus) + + # Verify shapes + assert phy2log.shape == (1, 8) + assert logcnt.shape == (1, 8) + + # With equal weights, each expert should have exactly one replica + assert torch.all( + logcnt == 1 + ), "With equal weights and no replication, " \ + "each expert should have exactly 1 replica" + + +def test_extreme_weight_imbalance(): + """Test extreme weight imbalance case""" + weight = torch.tensor([[1000, 1, 1, 1, 1, 1, 1, 1]]) + num_replicas = 12 + num_groups = 2 + num_nodes = 2 + num_gpus = 4 + + phy2log, log2phy, logcnt = rebalance_experts(weight, num_replicas, + num_groups, num_nodes, + num_gpus) + + # Verify shapes + assert phy2log.shape == (1, 12) + assert logcnt.shape == (1, 8) + + # Expert with highest weight (index 0) should have more replicas + assert ( + logcnt[0, 0] + > logcnt[0, 1]), "Expert with highest weight should have more replicas" + + +def test_multiple_layers(): + """Test multiple layers case""" + weight = torch.tensor([ + [10, 20, 30, 40, 50, 60], # First layer + [60, 50, 40, 30, 20, 10], # Second layer (opposite weight pattern) + [25, 25, 25, 25, 25, 25], # Third layer (equal weights) + ]) + num_replicas = 8 + num_groups = 2 + num_nodes = 2 + num_gpus = 4 + + phy2log, log2phy, logcnt = rebalance_experts(weight, num_replicas, + num_groups, num_nodes, + num_gpus) + + # Verify shapes + assert phy2log.shape == (3, 8) + assert logcnt.shape == (3, 6) + + # Verify expert allocation is reasonable for each layer + for layer in range(3): + assert torch.all(phy2log[layer] >= 0) and torch.all( + phy2log[layer] < 6 + ), f"Layer {layer} physical to logical mapping" \ + "should be in range [0, 6)" + assert (torch.sum(logcnt[layer]) == num_replicas + ), f"Layer {layer} total replicas should be {num_replicas}" + + +def test_parameter_validation(): + """Test parameter validation""" + weight = torch.tensor([[10, 20, 30, 40]]) + + # Test non-divisible case - this should handle normally without throwing + # errors because the function will fall back to global load balancing + # strategy + phy2log, log2phy, logcnt = rebalance_experts(weight, 8, 3, 2, 4) + assert phy2log.shape == (1, 8) + assert logcnt.shape == (1, 4) + + # Test cases that will actually cause errors: + # num_physical_experts not divisible by num_gpus + with pytest.raises(AssertionError): + rebalance_experts(weight, 7, 2, 2, 4) # 7 not divisible by 4 + + +def test_small_scale_hierarchical(): + """Test small-scale hierarchical load balancing""" + weight = torch.tensor([ + [100, 50, 200, 75, 150, 25, 300, 80], # 8 experts + ]) + num_replicas = 12 + num_groups = 4 # 4 groups, 2 experts each + num_nodes = 2 # 2 nodes + num_gpus = 4 # 4 GPUs + + phy2log, log2phy, logcnt = rebalance_experts(weight, num_replicas, + num_groups, num_nodes, + num_gpus) + + # Verify basic constraints + assert phy2log.shape == (1, 12) + assert logcnt.shape == (1, 8) + assert torch.sum(logcnt) == num_replicas + assert torch.all(logcnt >= 1) + + # Expert with highest weight should have more replicas + max_weight_expert = torch.argmax(weight[0]) + assert (logcnt[0, max_weight_expert] + >= 2), "Highest weight expert should have multiple replicas" + + +def test_global_load_balance_fallback(): + """Test global load balancing fallback case""" + # When num_groups % num_nodes != 0, should fall back to global load + # balancing + weight = torch.tensor([[10, 20, 30, 40, 50, 60]]) + num_replicas = 8 + num_groups = 3 # Cannot be divided evenly by num_nodes=2 + num_nodes = 2 + num_gpus = 4 + + phy2log, log2phy, logcnt = rebalance_experts(weight, num_replicas, + num_groups, num_nodes, + num_gpus) + + # Should work normally, just using global load balancing strategy + assert phy2log.shape == (1, 8) + assert logcnt.shape == (1, 6) + assert torch.sum(logcnt) == num_replicas + + +@pytest.mark.parametrize("device", ["cpu", "cuda"]) +def test_device_compatibility(device): + """Test device compatibility""" + if device == "cuda" and not torch.cuda.is_available(): + pytest.skip("CUDA not available") + + weight = torch.tensor([[10, 20, 30, 40]], device=device) + num_replicas = 6 + num_groups = 2 + num_nodes = 1 + num_gpus = 2 + + phy2log, log2phy, logcnt = rebalance_experts(weight, num_replicas, + num_groups, num_nodes, + num_gpus) + + # Function will convert to CPU internally, but should handle different + # device inputs normally + assert phy2log.shape == (1, 6) + assert logcnt.shape == (1, 4) + + +def test_additional_cases(): + """Test more edge cases and different parameter combinations""" + + # Test case 1: Large-scale distributed setup + weight1 = torch.tensor( + [[50, 100, 75, 120, 90, 60, 80, 110, 40, 70, 95, 85, 65, 55, 45, 35]]) + phy2log1, log2phy1, logcnt1 = rebalance_experts(weight1, 24, 8, 4, 8) + + assert phy2log1.shape == (1, 24) + assert logcnt1.shape == (1, 16) + assert torch.sum(logcnt1) == 24 + + # Test case 2: Different weight distributions + weight2 = torch.tensor([ + [200, 150, 100, 50, 25, 12], # Decreasing weights + [12, 25, 50, 100, 150, 200], # Increasing weights + ]) + phy2log2, log2phy2, logcnt2 = rebalance_experts(weight2, 10, 3, 1, 2) + + assert phy2log2.shape == (2, 10) + assert logcnt2.shape == (2, 6) + + # Verify high-weight experts have more replicas + for layer in range(2): + max_weight_idx = torch.argmax(weight2[layer]) + assert logcnt2[layer, max_weight_idx] >= 2 + + +if __name__ == "__main__": + weight = torch.tensor([ + [90, 132, 40, 61, 104, 165, 39, 4, 73, 56, 183, 86], + [20, 107, 104, 64, 19, 197, 187, 157, 172, 86, 16, 27], + ]) + + num_replicas = 16 + num_groups = 4 + num_nodes = 2 + num_gpus = 8 + + phy2log, log2phy, logcnt = rebalance_experts(weight, num_replicas, + num_groups, num_nodes, + num_gpus) + print(phy2log) + + test_basic_rebalance() diff --git a/tests/distributed/test_eplb_execute.py b/tests/distributed/test_eplb_execute.py new file mode 100644 index 0000000000000..de9ed1eabbac6 --- /dev/null +++ b/tests/distributed/test_eplb_execute.py @@ -0,0 +1,504 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import multiprocessing +import os +import random + +import pytest +import torch +import torch.distributed + +from vllm.distributed.eplb.rebalance_execute import ( + rearrange_expert_weights_inplace) +from vllm.distributed.parallel_state import (ensure_model_parallel_initialized, + get_tp_group, + init_distributed_environment) +from vllm.utils import update_environment_variables + + +def distributed_run(fn, world_size): + number_of_processes = world_size + processes: list[multiprocessing.Process] = [] + for i in range(number_of_processes): + env: dict[str, str] = {} + env['RANK'] = str(i) + env['LOCAL_RANK'] = str(i) + env['WORLD_SIZE'] = str(number_of_processes) + env['LOCAL_WORLD_SIZE'] = str(number_of_processes) + env['MASTER_ADDR'] = 'localhost' + env['MASTER_PORT'] = '12345' + p = multiprocessing.Process(target=fn, args=(env, )) + processes.append(p) + p.start() + + for p in processes: + p.join() + + for p in processes: + assert p.exitcode == 0 + + +def worker_fn_wrapper(fn): + # `multiprocessing.Process` cannot accept environment variables directly + # so we need to pass the environment variables as arguments + # and update the environment variables in the function + def wrapped_fn(env): + update_environment_variables(env) + local_rank = os.environ['LOCAL_RANK'] + device = torch.device(f"cuda:{local_rank}") + torch.cuda.set_device(device) + init_distributed_environment() + + # Ensure each worker process has the same random seed + random.seed(42) + torch.manual_seed(42) + + fn() + + return wrapped_fn + + +def create_expert_indices_with_redundancy( + num_layers: int, + num_logical_experts: int, + total_physical_experts: int, + redundancy_config: list[int], # redundancy for each logical expert +) -> torch.Tensor: + """ + Create expert indices with redundancy. + + Args: + num_layers: number of layers + num_logical_experts: number of logical experts + total_physical_experts: total number of physical experts + redundancy_config: redundancy for each logical expert + + Returns: + indices: Shape (num_layers, total_physical_experts) + """ + assert sum(redundancy_config) == total_physical_experts + assert len(redundancy_config) == num_logical_experts + + indices = torch.zeros(num_layers, total_physical_experts, dtype=torch.long) + + for layer in range(num_layers): + physical_pos = 0 + for logical_expert_id, redundancy in enumerate(redundancy_config): + for _ in range(redundancy): + indices[layer, physical_pos] = logical_expert_id + physical_pos += 1 + + # Shuffle the indices at dim 1 + for layer in range(num_layers): + indices[layer] = indices[layer][torch.randperm(indices.shape[1])] + + return indices + + +def create_expert_weights( + num_layers: int, + num_local_experts: int, + hidden_sizes: list[int], + rank: int, + device: torch.device, + physical_to_logical_mapping: torch.Tensor, +) -> list[list[torch.Tensor]]: + """ + Create fake expert weights tensor for testing. + + Use `arange` to generate predictable weights values, based on logical + expert ID. + All replicas of the same logical expert should have the same weights. + + Args: + physical_to_logical_mapping: Shape (num_layers, num_local_experts) + mapping[layer, physical_pos] = logical_expert_id + """ + expert_weights = [] + + for layer in range(num_layers): + layer_weights = [] + for weight_idx, hidden_size in enumerate(hidden_sizes): + weight_tensor = torch.zeros(num_local_experts, + hidden_size, + device=device, + dtype=torch.float32) + + for local_expert in range(num_local_experts): + # Get the logical expert ID for this physical expert + global_pos = rank * num_local_experts + local_expert + logical_expert_id = physical_to_logical_mapping[ + layer, global_pos].item() + + # Generate weights based on logical expert ID + # (so that all replicas of the same logical expert have the + # same weights) + base_value = (logical_expert_id * 1000 + layer * 100 + + weight_idx * 10) + weight_tensor[local_expert] = torch.arange(base_value, + base_value + + hidden_size, + device=device, + dtype=torch.float32) + + layer_weights.append(weight_tensor) + expert_weights.append(layer_weights) + + return expert_weights + + +def create_redundancy_config( + num_logical_experts: int, + num_physical_experts: int, +) -> list[int]: + """Create a redundancy configuration.""" + redundancy_config = [1] * num_logical_experts + remaining = num_physical_experts - num_logical_experts + # Randomly assign the remaining physical experts to the logical experts + for _ in range(remaining): + redundancy_config[random.choice(range(num_logical_experts))] += 1 + return redundancy_config + + +def verify_expert_weights_after_shuffle( + expert_weights: list[list[torch.Tensor]], + new_indices: torch.Tensor, + hidden_sizes: list[int], + ep_rank: int, + num_local_experts: int, +): + """Verify the weights after shuffling are correct.""" + num_layers = len(expert_weights) + + for layer in range(num_layers): + for weight_idx, hidden_size in enumerate(hidden_sizes): + weight_tensor = expert_weights[layer][weight_idx] + + for local_expert in range(num_local_experts): + # Calculate the global expert ID for this local expert + global_pos = ep_rank * num_local_experts + local_expert + expected_logical_expert = new_indices[layer, global_pos].item() + + # Check if the weights are correct + actual_weights = weight_tensor[local_expert] + expected_base = (expected_logical_expert * 1000 + layer * 100 + + weight_idx * 10) + expected_weights = torch.arange(expected_base, + expected_base + hidden_size, + device=actual_weights.device, + dtype=actual_weights.dtype) + + torch.testing.assert_close( + actual_weights, + expected_weights, + msg=f"Layer {layer}, weight {weight_idx}," + f"local expert {local_expert}: " + f"weights do not match. " + f"Expected logical expert {expected_logical_expert}") + + +def verify_redundant_experts_have_same_weights( + expert_weights: list[list[torch.Tensor]], + indices: torch.Tensor, + hidden_sizes: list[int], + world_size: int, + num_local_experts: int, +): + """ + Verify that all replicas of the same logical expert have the same weights. + """ + num_layers = len(expert_weights) + total_physical_experts = world_size * num_local_experts + + for layer in range(num_layers): + # Collect weights for all physical experts for each weight matrix + all_weights: list[torch.Tensor] = [] + + for weight_idx, hidden_size in enumerate(hidden_sizes): + # Create tensor to store all expert weights + # Shape: [total_physical_experts, hidden_size] + gathered_weights = torch.zeros( + total_physical_experts, + hidden_size, + device=expert_weights[layer][weight_idx].device, + dtype=expert_weights[layer][weight_idx].dtype) + + # Use all_gather to collect expert weights from current node + # expert_weights[layer][weight_idx] shape: + # [num_local_experts, hidden_size] + local_weights = expert_weights[layer][ + weight_idx] # [num_local_experts, hidden_size] + + # Split tensor along dim 0 into a list for all_gather + gathered_weights_list = torch.chunk(gathered_weights, + world_size, + dim=0) + + torch.distributed.all_gather( + # Output list: each element corresponds to one rank's weights + list(gathered_weights_list), + local_weights # Input: current rank's local weights + ) + + all_weights.append(gathered_weights) + + # Verify that all replicas of the same logical expert have the same + # weights + logical_expert_weights: dict[int, dict[int, torch.Tensor]] = {} + + for physical_pos in range(total_physical_experts): + logical_expert_id = int(indices[layer, physical_pos].item()) + + if logical_expert_id not in logical_expert_weights: + # First time encountering this logical expert, save its weights + logical_expert_weights[logical_expert_id] = { + weight_idx: all_weights[weight_idx][physical_pos] + for weight_idx in range(len(hidden_sizes)) + } + else: + # Verify that current physical expert's weights match the + # previously saved logical expert weights + for weight_idx in range(len(hidden_sizes)): + torch.testing.assert_close( + all_weights[weight_idx][physical_pos], + logical_expert_weights[logical_expert_id][weight_idx], + msg=f"Layer {layer}, weight {weight_idx}," + f"logical expert {logical_expert_id}: " + f"Physical expert {physical_pos} has different weights" + f"than expected") + + +@pytest.mark.parametrize( + "world_size,num_layers,num_local_experts,num_logical_experts", + [ + # 2 GPU, 2 experts per GPU + # 3 logical experts, 4 physical experts, 1 redundant experts + (2, 1, 2, 3), + # 2 GPU, 3 experts per GPU + # 4 logical experts, 6 physical experts, 2 redundant experts + (2, 2, 3, 4), + # 2 GPU, 8 experts per GPU + # 16 logical experts, 16 physical experts, 0 redundant experts + (2, 4, 8, 16), + # 4 GPU, 2 experts per GPU + # 6 logical experts, 8 physical experts, 2 redundant experts + (4, 1, 2, 6), + # 4 GPU, 2 experts per GPU + # 5 logical experts, 8 physical experts, 3 redundant experts + (4, 2, 2, 5), + # 4 GPU, 8 experts per GPU + # 16 logical experts, 32 physical experts, 16 redundant experts + (4, 8, 8, 16), + ]) +def test_rearrange_expert_weights_with_redundancy(world_size, num_layers, + num_local_experts, + num_logical_experts): + """Test the functionality of rearranging expert weights with redundancy.""" + + if torch.cuda.device_count() < world_size: + pytest.skip(f"Need at least {world_size} GPUs to run the test") + + @worker_fn_wrapper + def worker_fn(): + # Initialize model parallel (using tensor parallel as an entrypoint + # to expert parallel) + ensure_model_parallel_initialized( + tensor_model_parallel_size=world_size, + pipeline_model_parallel_size=1) + + ep_group = get_tp_group().cpu_group + ep_rank = torch.distributed.get_rank() + device = torch.device(f"cuda:{ep_rank}") + + # Test parameters + total_physical_experts = world_size * num_local_experts + hidden_sizes = [32, 64] # Two different weight matrices + + # Create old expert indices (with redundancy) + redundancy_config = create_redundancy_config(num_logical_experts, + total_physical_experts) + + old_indices = create_expert_indices_with_redundancy( + num_layers, + num_logical_experts, + total_physical_experts, + redundancy_config, + ) + + # Create new expert indices (with redundancy) + new_redundancy_config = create_redundancy_config( + num_logical_experts, total_physical_experts) + new_indices = create_expert_indices_with_redundancy( + num_layers, + num_logical_experts, + total_physical_experts, + new_redundancy_config, + ) + + # Create expert weights + expert_weights = create_expert_weights(num_layers, num_local_experts, + hidden_sizes, ep_rank, device, + old_indices) + + # Execute weight rearrangement + rearrange_expert_weights_inplace( + old_indices, + new_indices, + expert_weights, + ep_group, + is_profile=False, + ) + + # Verify the rearrangement result + verify_expert_weights_after_shuffle( + expert_weights, + new_indices, + hidden_sizes, + ep_rank, + num_local_experts, + ) + + verify_redundant_experts_have_same_weights( + expert_weights, + new_indices, + hidden_sizes, + world_size, + num_local_experts, + ) + + distributed_run(worker_fn, world_size) + + +@pytest.mark.parametrize("world_size", [2, 4]) +def test_rearrange_expert_weights_no_change(world_size): + """ + Test that when the indices do not change, the weights should remain + unchanged. + """ + + if torch.cuda.device_count() < world_size: + pytest.skip(f"Need at least {world_size} GPUs to run the test") + + @worker_fn_wrapper + def worker_fn(): + ensure_model_parallel_initialized( + tensor_model_parallel_size=world_size, + pipeline_model_parallel_size=1) + + ep_group = get_tp_group().cpu_group + ep_rank = torch.distributed.get_rank() + device = torch.device(f"cuda:{ep_rank}") + + num_layers = 2 + num_local_experts = 2 + total_physical_experts = world_size * num_local_experts + num_logical_experts = total_physical_experts // 2 # Some redundancy + hidden_sizes = [32, 64] + + # Create redundancy configuration + redundancy_config = [2] * num_logical_experts + + # Same indices - no change + indices = create_expert_indices_with_redundancy( + num_layers, num_logical_experts, total_physical_experts, + redundancy_config) + + expert_weights = create_expert_weights(num_layers, num_local_experts, + hidden_sizes, ep_rank, device, + indices) + + # Save original weights + original_weights = [] + for layer_weights in expert_weights: + layer_copy = [] + for weight in layer_weights: + layer_copy.append(weight.clone()) + original_weights.append(layer_copy) + + # Execute rearrangement (should be no change) + rearrange_expert_weights_inplace( + indices, + indices, # Same indices + expert_weights, + ep_group, + is_profile=False) + + # Verify that the weights have not changed + for layer in range(num_layers): + for weight_idx in range(len(hidden_sizes)): + torch.testing.assert_close( + expert_weights[layer][weight_idx], + original_weights[layer][weight_idx], + msg=f"Layer {layer}, weight {weight_idx} should remain " + f"unchanged") + + distributed_run(worker_fn, world_size) + + +@pytest.mark.parametrize("world_size", [2, 4]) +def test_rearrange_expert_weights_profile_mode(world_size): + """Test profile mode (should not copy actual weights)""" + + if torch.cuda.device_count() < world_size: + pytest.skip(f"Need at least {world_size} GPUs to run the test") + + @worker_fn_wrapper + def worker_fn(): + ensure_model_parallel_initialized( + tensor_model_parallel_size=world_size, + pipeline_model_parallel_size=1) + + ep_group = get_tp_group().cpu_group + ep_rank = torch.distributed.get_rank() + device = torch.device(f"cuda:{ep_rank}") + + num_layers = 1 + num_local_experts = 2 + total_physical_experts = world_size * num_local_experts + num_logical_experts = total_physical_experts // 2 + hidden_sizes = [32] + + # Create different index distributions + old_redundancy = create_redundancy_config(num_logical_experts, + total_physical_experts) + new_redundancy = create_redundancy_config(num_logical_experts, + total_physical_experts) + + old_indices = create_expert_indices_with_redundancy( + num_layers, num_logical_experts, total_physical_experts, + old_redundancy) + new_indices = create_expert_indices_with_redundancy( + num_layers, num_logical_experts, total_physical_experts, + new_redundancy) + + expert_weights = create_expert_weights(num_layers, num_local_experts, + hidden_sizes, ep_rank, device, + old_indices) + + # Save original weights + original_weights = [] + for layer_weights in expert_weights: + layer_copy = [] + for weight in layer_weights: + layer_copy.append(weight.clone()) + original_weights.append(layer_copy) + + # Execute profile mode rearrangement + rearrange_expert_weights_inplace( + old_indices, + new_indices, + expert_weights, + ep_group, + is_profile=True # Profile mode + ) + + # In profile mode, the weights should remain unchanged + for layer in range(num_layers): + for weight_idx in range(len(hidden_sizes)): + torch.testing.assert_close( + expert_weights[layer][weight_idx], + original_weights[layer][weight_idx], + msg="In profile mode, the weights should remain unchanged") + + distributed_run(worker_fn, world_size) diff --git a/tests/models/test_initialization.py b/tests/models/test_initialization.py index 54e8cd597bfc4..e56bc925c9c40 100644 --- a/tests/models/test_initialization.py +++ b/tests/models/test_initialization.py @@ -31,12 +31,20 @@ def test_can_initialize(model_arch: str, monkeypatch: pytest.MonkeyPatch): text_config = hf_config.get_text_config() + # Ensure at least 2 expert per group + # Since `grouped_topk` assums top-2 + num_experts = getattr(text_config, 'n_group', 1) * 2 + text_config.update({ "num_layers": 1, "num_hidden_layers": 1, - "num_experts": 2, + "num_experts": num_experts, "num_experts_per_tok": 2, - "num_local_experts": 2, + "num_local_experts": num_experts, + # Otherwise there will not be any expert layers + "first_k_dense_replace": 0, + # To avoid OOM on DeepSeek-V3 + "n_routed_experts": num_experts, }) if hasattr(hf_config, "vision_config"): diff --git a/vllm/config.py b/vllm/config.py index 96ea47a0dce38..856b361531168 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1775,6 +1775,25 @@ class ParallelConfig: """Backend to use for data parallel, either "mp" or "ray".""" enable_expert_parallel: bool = False """Use expert parallelism instead of tensor parallelism for MoE layers.""" + enable_eplb: bool = False + """Enable expert parallelism load balancing for MoE layers.""" + num_redundant_experts: int = 0 + """Number of redundant experts to use for expert parallelism.""" + eplb_window_size: int = 1000 + """Window size for expert load recording.""" + eplb_step_interval: int = 3000 + """ + Interval for rearranging experts in expert parallelism. + + Note that if this is greater than the EPLB window size, only the metrics + of the last `eplb_window_size` steps will be used for rearranging experts. + """ + eplb_log_balancedness: bool = False + """ + Log the balancedness each step of expert parallelism. + This is turned off by default since it will cause communication overhead. + """ + max_parallel_loading_workers: Optional[int] = None """Maximum number of parallel loading workers when loading model sequentially in multiple batches. To avoid RAM OOM when using tensor @@ -1913,6 +1932,20 @@ class ParallelConfig: os.environ["VLLM_ENABLE_V1_MULTIPROCESSING"] = "0" logger.info("Disabling V1 multiprocessing for external launcher.") + if self.enable_eplb: + if not current_platform.is_cuda(): + raise ValueError( + "Expert parallelism load balancing is only supported on " + "CUDA devices now.") + if self.num_redundant_experts < 0: + raise ValueError( + "num_redundant_experts must be non-negative, but got " + f"{self.num_redundant_experts}.") + else: + if self.num_redundant_experts != 0: + raise ValueError( + "num_redundant_experts should be used with EPLB." + f"{self.num_redundant_experts}.") if self.distributed_executor_backend is None and self.world_size > 1: # We use multiprocessing by default if world_size fits on the # current node and we aren't in a ray placement group. diff --git a/vllm/distributed/eplb/__init__.py b/vllm/distributed/eplb/__init__.py new file mode 100644 index 0000000000000..c87b039afd73d --- /dev/null +++ b/vllm/distributed/eplb/__init__.py @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: Apache-2.0 +''' +Expert parallelism load balancer (EPLB). +''' + +from .eplb_state import * +from .rebalance_algo import * diff --git a/vllm/distributed/eplb/eplb_state.py b/vllm/distributed/eplb/eplb_state.py new file mode 100644 index 0000000000000..2185df865c1f6 --- /dev/null +++ b/vllm/distributed/eplb/eplb_state.py @@ -0,0 +1,431 @@ +# SPDX-License-Identifier: Apache-2.0 +""" +Expert parallelism load balancer (EPLB) metrics and states. + +# Glossary + +- **Logical Expert**: An expert that is part of the model's logical structure. + It holds a set of weights and is replicated across multiple physical + experts. +- **Redundant Expert**: To achieve load balancing, for some popular logical + experts, we create additional copies of the expert weights. During inference, + each of these copies can be routed to by the same set of tokens. +- **Physical Expert**: An expert that is instantiated on a specific device. + It is a replica of a logical expert and can be rearranged across devices. + I.e., one logical expert may have multiple sets of weights initialized on + different devices, and each of these sets is a physical expert. +- **Local Physical Expert**: A physical expert that is instantiated on the + current device. + +For example: DeepSeek-R1 has 256 logical experts, so each MoE layer +has 256 sets of linear layer weights in the model parameters. If we add 32 +redundant experts, DeepSeek-R1 will have 256 + 32 = 288 physical experts in +total. And when deploying, we'll have 288 sets of linear layer weights for each +MoE layer. If we have 32 EP ranks, then each GPU will hold 288 / 32 = 9 local +physical experts. +""" + +import time +from collections.abc import Sequence +from dataclasses import dataclass + +import torch +from torch.distributed import all_gather, all_reduce + +from vllm.config import ParallelConfig +from vllm.distributed.parallel_state import get_ep_group, get_node_count +from vllm.logger import init_logger +from vllm.model_executor.models.interfaces import MixtureOfExperts + +from .rebalance_algo import rebalance_experts +from .rebalance_execute import rearrange_expert_weights_inplace + +logger = init_logger(__name__) + + +@dataclass +class EplbState: + """EPLB metrics.""" + + physical_to_logical_map: torch.Tensor + """ + Mapping from physical experts to logical experts. + + Shape: (num_moe_layers, num_physical_experts) + + # Example + + For a 2-layer MoE model with 6 physical experts and 4 logical experts on 3 + EP ranks, the mapping could look like this: + + ``` + [[0, 1, 2, 3, 0, 1], + [0, 2, 0, 1, 0, 3]] + ``` + """ + logical_to_physical_map: torch.Tensor + """ + Mapping from logical experts to physical experts. + + This is a sparse matrix, where -1 indicates no mapping. + + Shape: (num_moe_layers, num_logical_experts, num_redundant_experts + 1) + + # Example + + For a 2-layer MoE model with 6 physical experts and 4 logical experts on 3 + EP ranks, the mapping could look like this: + + ``` + [[[0, 4, -1], + [1, 5, -1], + [2, -1, -1], + [3, -1, -1]], + [[0, 2, 4], + [3, -1, -1], + [1, -1, -1], + [5, -1, -1]]] + ``` + """ + logical_replica_count: torch.Tensor + """ + Number of replicas for each logical expert. + This is exactly the non-`-1` count in the `logical_to_physical_map`. + + Shape: (num_moe_layers, num_logical_experts) + + # Example + For a 2-layer MoE model with 6 physical experts and 4 logical experts on 3 + EP ranks, the count could look like this: + + ``` + [[2, 2, 1, 1], + [3, 1, 1, 1]] + """ + + expert_load_pass: torch.Tensor + """ + Expert load during this forward pass. + We use the token count each expert processes as the load. + + Shape: (num_moe_layers, num_local_physical_experts) + """ + expert_load_window: torch.Tensor + """ + A sliding window of expert load. + + Shape: (window_size, num_moe_layers, num_local_physical_experts) + """ + expert_load_window_step: int = 0 + """ + Current step in the sliding window. + + Different from `expert_rearrangement_step`, each EP rank may have its own + `expert_load_window_step`. + """ + expert_load_window_size: int = 0 + """ + Size of the expert load sliding window. + This is a constant and is taken from the config. + """ + + expert_rearrangement_step: int = 0 + """ + Steps after last rearrangement. + Will trigger a rearrangement if it exceeds the threshold. + + NOTE: Keep in mind that all EP ranks need to have the same + `expert_rearrangement_step` value to ensure synchronization. + Otherwise, the rearrangement will hang at collective + communication calls. + """ + expert_rearrangement_step_interval: int = 0 + """ + Interval for expert rearrangement steps. + This is a constant and is taken from the config. + """ + + @staticmethod + def build_initial_global_physical_to_logical_map( + num_routed_experts: int, + num_redundant_experts: int, + ) -> Sequence[int]: + """ + Build an initial expert arrangement using the following structure: + [original routed experts, redundant experts] + + Returns: + physical_to_logical_map (Sequence[int]): A list of integers, + where each integer is the index of the logical expert + that the corresponding physical expert maps to. + """ + global_physical_to_logical_map = list(range(num_routed_experts)) + global_physical_to_logical_map += [ + i % num_routed_experts for i in range(num_redundant_experts) + ] + return global_physical_to_logical_map + + @classmethod + def build( + cls, + model: MixtureOfExperts, + device: torch.device, + parallel_config: ParallelConfig, + ) -> "EplbState": + """ + Build the initial EPLB state. + """ + physical_to_logical_map_list = ( + cls.build_initial_global_physical_to_logical_map( + model.num_routed_experts, + model.num_redundant_experts, + )) + physical_to_logical_map = torch.tensor( + physical_to_logical_map_list, + device=device, + ) + logical_to_physical_map = torch.full( + (model.num_logical_experts, model.num_redundant_experts + 1), + -1, + device=device, + ) + logical_replica_count = torch.zeros( + (model.num_logical_experts, ), + device=device, + dtype=torch.long, + ) + + for i in range(model.num_physical_experts): + logical_idx = physical_to_logical_map[i] + logical_to_physical_map[logical_idx, + logical_replica_count[logical_idx]] = i + logical_replica_count[logical_idx] += 1 + + # Duplicate initial mapping for all layers + physical_to_logical_map = physical_to_logical_map.unsqueeze(0).expand( + model.num_moe_layers, + -1, + ).contiguous() + logical_to_physical_map = logical_to_physical_map.unsqueeze(0).expand( + model.num_moe_layers, + -1, + -1, + ).contiguous() + logical_replica_count = logical_replica_count.unsqueeze(0).expand( + model.num_moe_layers, + -1, + ).contiguous() + + expert_load_pass = torch.zeros( + (model.num_moe_layers, model.num_local_physical_experts), + dtype=torch.int32, + device=device, + ) + expert_load_window_size = parallel_config.eplb_window_size + expert_load_window = torch.zeros( + (expert_load_window_size, model.num_moe_layers, + model.num_local_physical_experts), + dtype=torch.int32, + device=device, + ) + + # Set the initial progress of rearrangement to 3/4 + eplb_step_interval = parallel_config.eplb_step_interval + expert_rearrangement_step = max( + 0, eplb_step_interval - eplb_step_interval // 4) + + model.set_eplb_state( + expert_load_pass, + logical_to_physical_map, + logical_replica_count, + ) + + return cls( + physical_to_logical_map, + logical_to_physical_map, + logical_replica_count, + expert_load_pass, + expert_load_window, + expert_load_window_size=expert_load_window_size, + expert_rearrangement_step=expert_rearrangement_step, + expert_rearrangement_step_interval=eplb_step_interval, + ) + + def step(self, + model: MixtureOfExperts, + is_dummy: bool = False, + is_profile: bool = False, + log_stats: bool = False) -> None: + """ + Step the EPLB state. + + Args: + model (MixtureOfExperts): The MoE model. + is_dummy (bool): If `True`, this is a dummy step and the load + metrics recorded in this forward pass will not count. Defaults + to `False`. + is_profile (bool): If `True`, perform a dummy rearrangement + with maximum communication cost. This is used in `profile_run` + to reserve enough memory for the communication buffer. + log_stats (bool): If `True`, log the expert load metrics. + + # Stats + The metrics are all summed up across layers. + - `avg_tokens`: The average load across ranks. + - `max_tokens`: The maximum load across ranks. + - `balancedness`: The ratio of average load to maximum load. + """ + + if is_profile: + self.rearrange(model, is_profile=True) + return + + if is_dummy: + # Do not record load metrics for dummy steps + self.expert_load_pass.zero_() + + if log_stats: + # `num_tokens`: (num_moe_layers,) + num_tokens = self.expert_load_pass.sum(dim=-1) + + # Collect load metrics from all ranks + ep_group = get_ep_group().device_group + num_tokens_list = [ + torch.empty_like(num_tokens) for _ in range(ep_group.size()) + ] + all_gather(num_tokens_list, num_tokens, group=ep_group) + # Stack to get (num_ranks, num_moe_layers) + num_tokens_per_rank = torch.stack(num_tokens_list).float() + + # Compute balancedness ratio: + # for each layer: + # (mean load across ranks) / (max load across ranks) + avg_tokens_tensor = num_tokens_per_rank.mean(dim=0).sum(dim=0) + max_tokens_tensor = num_tokens_per_rank.max(dim=0).values.sum( + dim=0) + + # Just to make type checker happy + tokens_tensors: list[float] = torch.stack( + [avg_tokens_tensor, max_tokens_tensor]).tolist() + avg_tokens, max_tokens = tokens_tensors + balancedness = avg_tokens / max_tokens if max_tokens > 0 else 0.0 + + if ep_group.rank() == 0: + logger.info( + "EPLB step: avg_tokens=%.2f, max_tokens=%d, " + "balancedness=%.4f", avg_tokens, max_tokens, balancedness) + + # Update the expert load sliding window + if not is_dummy: + self.expert_load_window[self.expert_load_window_step] = ( + self.expert_load_pass.clone()) + self.expert_load_window_step += 1 + if self.expert_load_window_step >= self.expert_load_window_size: + self.expert_load_window_step = 0 + self.expert_load_pass.zero_() + + # Step the expert rearrangement step + # Note that even if this is a dummy step, we still increment the + # rearrangement step and perform rearrangement to ensure all ranks are + # performing collective communication. + self.expert_rearrangement_step += 1 + if (self.expert_rearrangement_step + >= self.expert_rearrangement_step_interval): + self.expert_rearrangement_step = 0 + self.rearrange(model) + + def rearrange(self, + model: MixtureOfExperts, + is_profile: bool = False) -> None: + """ + Rearrange the experts according to the current load. + """ + + ep_group = get_ep_group().device_group + ep_rank = ep_group.rank() + + time_start = None + is_main_rank = ep_rank == 0 + if is_main_rank: + torch.cuda.synchronize() + time_start = time.perf_counter() + logger.info("Rearranging experts %s...", + "(profile)" if is_profile else "") + + # This mapping is only used here, so we do not store it in the state + physical_expert_start = ep_rank * model.num_local_physical_experts + physical_expert_end = (physical_expert_start + + model.num_local_physical_experts) + # (num_moe_layers, num_local_physical_experts) + local_physical_to_logical_map = self.physical_to_logical_map[ + :, + physical_expert_start:physical_expert_end, + ] + + # Map the local physical expert load to global logical experts + logical_expert_load_window = torch.zeros( + self.expert_load_window_size, + model.num_moe_layers, + model.num_logical_experts, + dtype=self.expert_load_window.dtype, + device=self.expert_load_window.device, + ) + logical_expert_load_window.scatter_add_( + dim=-1, + index=local_physical_to_logical_map.unsqueeze(0).expand_as( + self.expert_load_window).long(), + src=self.expert_load_window, + ) + + # Perform all-reduce to get the expert load across all ranks + global_expert_load_window = logical_expert_load_window.sum(dim=0) + all_reduce(global_expert_load_window, group=ep_group) + + # TODO(bowen): Treat differently for prefill and decode nodes + num_replicas = model.num_physical_experts + num_groups = model.num_expert_groups + num_nodes = get_node_count() + num_gpus = ep_group.size() + + if num_gpus % num_nodes != 0: + logger.warning_once( + f"num_gpus % num_nodes != 0, " + "not using hierarchical rearrangement algorithm.\n" + f"{num_gpus=}, {num_nodes=}") + + # Get new expert mappings + ( + new_physical_to_logical_map, + new_logical_to_physical_map, + new_logical_replica_count, + ) = (rebalance_experts( + global_expert_load_window, + num_replicas, + num_groups, + num_nodes, + num_gpus, + )) + + # Update expert weights + rearrange_expert_weights_inplace( + self.physical_to_logical_map, + new_physical_to_logical_map, + model.expert_weights, + ep_group, + is_profile, + ) + + if not is_profile: + self.physical_to_logical_map.copy_(new_physical_to_logical_map) + self.logical_to_physical_map.copy_(new_logical_to_physical_map) + self.logical_replica_count.copy_(new_logical_replica_count) + + if is_main_rank: + assert time_start is not None + torch.cuda.synchronize() + time_end = time.perf_counter() + logger.info( + "Rearranged experts%sin %.2f seconds.", + " (profile) " if is_profile else " ", + time_end - time_start, + ) diff --git a/vllm/distributed/eplb/rebalance_algo.py b/vllm/distributed/eplb/rebalance_algo.py new file mode 100644 index 0000000000000..7ad6d566b55bb --- /dev/null +++ b/vllm/distributed/eplb/rebalance_algo.py @@ -0,0 +1,233 @@ +# SPDX-License-Identifier: Apache-2.0 +""" +Expert parallelism load balancer (EPLB) for vLLM. + +This module implements the core rearrangement algorithm. + +The rearrangement algorithm is adapted from +[DeepSeek EPLB](https://github.com/deepseek-ai/eplb). + +Please find at [#12](https://github.com/deepseek-ai/EPLB/issues/12) an example +on how the EPLB algorithm works. +""" + +import torch + + +def balanced_packing(weight: torch.Tensor, + num_packs: int) -> tuple[torch.Tensor, torch.Tensor]: + """ + Pack n weighted objects to m packs, such that each bin contains exactly + n/m objects and the weights of all packs are as balanced as possible. + + Parameters: + weight: [X, n], the weight of each item + num_packs: number of packs + + Returns: + pack_index: [X, n], the pack index of each item + rank_in_pack: [X, n], the rank of the item in the pack + """ + num_layers, num_groups = weight.shape + assert num_groups % num_packs == 0 + groups_per_pack = num_groups // num_packs + + if groups_per_pack == 1: + pack_index = torch.arange(weight.size(-1), + dtype=torch.int64, + device=weight.device).expand(weight.shape) + rank_in_pack = torch.zeros_like(weight, dtype=torch.int64) + return pack_index, rank_in_pack + + indices = weight.float().sort(-1, descending=True).indices.cpu() + pack_index = torch.full_like(weight, + fill_value=-1, + dtype=torch.int64, + device="cpu") + rank_in_pack = torch.full_like(pack_index, fill_value=-1) + for i in range(num_layers): + pack_weights = [0] * num_packs + pack_items = [0] * num_packs + for group in indices[i]: + pack = min( + (i + for i in range(num_packs) if pack_items[i] < groups_per_pack), + key=pack_weights.__getitem__, + ) + assert pack_items[pack] < groups_per_pack + pack_index[i, group] = pack + rank_in_pack[i, group] = pack_items[pack] + pack_weights[pack] += weight[i, group] + pack_items[pack] += 1 + return pack_index, rank_in_pack + + +def replicate_experts( + weight: torch.Tensor, + num_phy: int) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Replicate `num_log` experts to `num_phy` replicas, such that the maximum + load of all replicas is minimized. + + Parameters: + weight: [X, num_log] + num_phy: total number of experts after replication + + Returns: + phy2log: [X, num_phy], logical expert id of each physical expert + rank: [X, num_phy], the replica rank + logcnt: [X, num_log], number of replicas for each logical expert + """ + n, num_log = weight.shape + num_redundant = num_phy - num_log + assert num_redundant >= 0 + device = weight.device + phy2log = torch.arange(num_phy, dtype=torch.int64, + device=device).repeat(n, 1) + rank = torch.zeros(n, num_phy, dtype=torch.int64, device=device) + logcnt = torch.ones(n, num_log, dtype=torch.int64, device=device) + arangen = torch.arange(n, dtype=torch.int64, device=device) + for i in range(num_log, num_phy): + redundant_indices = (weight / logcnt).max(dim=-1).indices + phy2log[:, i] = redundant_indices + rank[:, i] = logcnt[arangen, redundant_indices] + logcnt[arangen, redundant_indices] += 1 + return phy2log, rank, logcnt + + +def rebalance_experts_hierarchical( + weight: torch.Tensor, + num_physical_experts: int, + num_groups: int, + num_nodes: int, + num_gpus: int, +): + """ + Parameters: + weight: [num_moe_layers, num_logical_experts] + num_physical_experts: number of physical experts after replication + num_groups: number of expert groups + num_nodes: number of server nodes, where the intra-node network + (e.g, NVLink) is faster + num_gpus: number of GPUs, must be a multiple of `num_nodes` + + Returns: + physical_to_logical_map: [num_moe_layers, num_physical_experts] + logical_to_physical_map: [num_moe_layers, num_logical_experts, X] + logical_count: [num_moe_layers, num_logical_experts] + """ + num_layers, num_logical_experts = weight.shape + assert num_logical_experts % num_groups == 0 + group_size = num_logical_experts // num_groups + assert num_groups % num_nodes == 0 + groups_per_node = num_groups // num_nodes + assert num_gpus % num_nodes == 0 + assert num_physical_experts % num_gpus == 0 + phy_experts_per_gpu = num_physical_experts // num_gpus + + def inverse(perm: torch.Tensor) -> torch.Tensor: + inv = torch.empty_like(perm) + inv.scatter_( + 1, + perm, + torch.arange(perm.size(1), dtype=torch.int64, + device=perm.device).expand(perm.shape), + ) + return inv + + # Step 1: pack groups to nodes + tokens_per_group = weight.unflatten(-1, (num_groups, group_size)).sum(-1) + group_pack_index, group_rank_in_pack = balanced_packing( + tokens_per_group, num_nodes) + log2mlog = (((group_pack_index * groups_per_node + group_rank_in_pack) * + group_size).unsqueeze(-1) + + torch.arange(group_size, + dtype=torch.int64, + device=group_pack_index.device)).flatten(-2) + mlog2log = inverse(log2mlog) + + # Step 2: construct redundant experts within nodes + # [num_layers * num_nodes, num_logical_experts // num_nodes] + tokens_per_mlog = weight.gather(-1, mlog2log).view( + -1, num_logical_experts // num_nodes) + phy2mlog, phyrank, mlogcnt = replicate_experts( + tokens_per_mlog, num_physical_experts // num_nodes) + + # Step 3: pack physical_experts to GPUs + # [num_layers * num_nodes, num_physical_experts // num_nodes] + tokens_per_phy = (tokens_per_mlog / mlogcnt).gather(-1, phy2mlog) + pack_index, rank_in_pack = balanced_packing(tokens_per_phy, + num_gpus // num_nodes) + phy2pphy = pack_index * phy_experts_per_gpu + rank_in_pack + pphy2phy = inverse(phy2pphy) + + pphy2mlog = phy2mlog.gather( + -1, pphy2phy) # [num_layers * num_nodes, num_log_per_nodes] + pphy2mlog = (pphy2mlog.view(num_layers, num_nodes, -1) + torch.arange( + 0, + num_logical_experts, + num_logical_experts // num_nodes, + device=group_pack_index.device, + ).view(1, -1, 1)).flatten(-2) + pphy2log = mlog2log.gather(-1, pphy2mlog) + pphyrank = phyrank.gather(-1, pphy2phy).view(num_layers, -1) + logcnt = mlogcnt.view(num_layers, -1).gather(-1, log2mlog) + return pphy2log, pphyrank, logcnt + + +def rebalance_experts( + weight: torch.Tensor, + num_replicas: int, + num_groups: int, + num_nodes: int, + num_gpus: int, +) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Entry point for expert-parallelism load balancer. + + Parameters: + weight: [layers, num_logical_experts], the load statistics for all + logical experts + num_replicas: number of physical experts, must be a multiple of + `num_gpus` + num_groups: number of expert groups + num_nodes: number of server nodes, where the intra-node network + (e.g, NVLink) is faster + num_gpus: number of GPUs, must be a multiple of `num_nodes` + + Returns: + physical_to_logical_map: [layers, num_replicas], the expert index of + each replica + logical_to_physical_map: [layers, num_logical_experts, X], the replica + indices for each expert + expert_count: [layers, num_logical_experts], number of physical + replicas for each logical expert + """ + num_layers, num_logical_experts = weight.shape + weight = weight.float().cpu() + if num_groups % num_nodes == 0: + # use hierarchical load-balance policy + phy2log, phyrank, logcnt = rebalance_experts_hierarchical( + weight, num_replicas, num_groups, num_nodes, num_gpus) + else: + # use global load-balance policy + phy2log, phyrank, logcnt = rebalance_experts_hierarchical( + weight, num_replicas, 1, 1, num_gpus) + num_redundant_experts = num_replicas - num_logical_experts + maxlogcnt = num_redundant_experts + 1 + log2phy: torch.Tensor = torch.full( + (num_layers, num_logical_experts, maxlogcnt), + -1, + dtype=torch.int64, + device=logcnt.device, + ) + log2phy.view(num_layers, -1).scatter_( + -1, + phy2log * maxlogcnt + phyrank, + torch.arange(num_replicas, dtype=torch.int64, + device=log2phy.device).expand(num_layers, -1), + ) + return phy2log, log2phy, logcnt + + +__all__ = ["rebalance_experts"] diff --git a/vllm/distributed/eplb/rebalance_execute.py b/vllm/distributed/eplb/rebalance_execute.py new file mode 100644 index 0000000000000..cf173c734afd1 --- /dev/null +++ b/vllm/distributed/eplb/rebalance_execute.py @@ -0,0 +1,306 @@ +# SPDX-License-Identifier: Apache-2.0 +""" +The actual execution of the rearrangement. + +This involves the exchange of expert weights between GPUs. +""" + +from collections.abc import Iterable, MutableSequence, Sequence +from functools import partial + +import torch +from torch.distributed import (P2POp, ProcessGroup, all_gather, + batch_isend_irecv, get_global_rank) + + +def idx_local_to_global( + local_idx: int, + local_cnt: int, + ep_rank: int, +) -> int: + """ + Convert a local expert index to a global expert index. + """ + return ep_rank * local_cnt + local_idx + + +def idx_global_to_local( + global_idx: int, + local_cnt: int, + ep_rank: int, +) -> int: + """ + Convert a global expert index to a local expert index. + """ + return global_idx - ep_rank * local_cnt + + +def global_idx_to_rank( + global_idx: int, + local_cnt: int, +) -> int: + """ + Convert a global expert index to a rank index. + """ + return global_idx // local_cnt + + +def get_ep_ranks_with_expert( + idx: int, + num_local_experts: int, + old_indices: Sequence[int], + new_indices: Sequence[int], +) -> tuple[MutableSequence[int], MutableSequence[int]]: + """ + Get the ranks of the experts that need to be exchanged. + + Args: + idx: The index of the expert. + num_local_experts: The number of local experts. + old_indices: The old indices of the experts. + new_indices: The new indices of the experts. + + Returns: + A tuple of two lists: + - The ranks of the experts that need to be sent. + - The ranks of the experts that need to be received. + """ + global2rank = partial( + global_idx_to_rank, + local_cnt=num_local_experts, + ) + + ranks_to_send: list[int] = [] + ranks_to_recv: list[int] = [] + + for i, e in enumerate(old_indices): + if e == idx: + rank = global2rank(i) + if not ranks_to_send or ranks_to_send[-1] != rank: + ranks_to_send.append(rank) + + for i, e in enumerate(new_indices): + if e == idx: + rank = global2rank(i) + if not ranks_to_recv or ranks_to_recv[-1] != rank: + ranks_to_recv.append(rank) + + # Remove those ranks that can get this expert locally. + ranks_to_send_set = set(ranks_to_send) + ranks_to_recv_actual = [ + rank for rank in ranks_to_recv if rank not in ranks_to_send_set + ] + + return ranks_to_send, ranks_to_recv_actual + + +def shuffle_layer( + num_local_experts: int, + ep_rank: int, + old_indices: Sequence[int], + new_indices: Sequence[int], + expert_weights: Iterable[torch.Tensor], + expert_weights_buffer: Sequence[torch.Tensor], + ep_group: ProcessGroup, +) -> None: + """ + Perform expert weights rearrangement of one layer. + """ + local2global = partial( + idx_local_to_global, + local_cnt=num_local_experts, + ep_rank=ep_rank, + ) + + # 0. Do nothing for experts that did not change. + is_unchanged = [ + old_indices[local2global(i)] == new_indices[local2global(i)] + for i in range(num_local_experts) + ] + + # 1. Perform weight copy inside the local rank. + is_received_locally = is_unchanged[:] + for src in range(num_local_experts): + src_global = local2global(src) + for dst in range(num_local_experts): + dst_global = local2global(dst) + if is_received_locally[dst]: + continue + if old_indices[src_global] == new_indices[dst_global]: + is_received_locally[dst] = True + for weight, buffer in zip(expert_weights, + expert_weights_buffer): + buffer[dst].copy_(weight[src]) + + p2p_ops: list[P2POp] = [] + + # 2. Initiate sending of weights. + experts_send_loc: dict[int, int] = {} + for src in range(num_local_experts): + expert = old_indices[local2global(src)] + if expert in experts_send_loc: + continue + experts_send_loc[expert] = src + + # We need to sort here to match send/recv + for expert, src in sorted(experts_send_loc.items()): + ranks_to_send, ranks_to_recv = get_ep_ranks_with_expert( + expert, + num_local_experts, + old_indices, + new_indices, + ) + + # Calculate the ranks to send by this rank + num_dst_per_sender = len(ranks_to_recv) // len(ranks_to_send) + sender_pos = ranks_to_send.index(ep_rank) + recv_begin = sender_pos * num_dst_per_sender + recv_end = recv_begin + num_dst_per_sender + recv_ranks = ranks_to_recv[recv_begin:recv_end] + + # Tackle remainders + remainder_start = len(ranks_to_send) * num_dst_per_sender + recver_pos = remainder_start + sender_pos + if recver_pos < len(ranks_to_recv): + recv_ranks.append(ranks_to_recv[recver_pos]) + + for dst in recv_ranks: + dst_global = get_global_rank(ep_group, dst) + p2p_ops += [ + P2POp( + torch.distributed.isend, + weight[src], + dst_global, + ) for weight in expert_weights + ] + + # 3. Initiate receiving of weights. + experts_recv_loc: dict[int, int] = {} + for dst in range(num_local_experts): + if is_received_locally[dst]: + continue + expert = new_indices[local2global(dst)] + if expert in experts_recv_loc: + continue + experts_recv_loc[expert] = dst + + # We need to sort here to match send/recv + for expert, dst in sorted(experts_recv_loc.items()): + ranks_to_send, ranks_to_recv = get_ep_ranks_with_expert( + expert, + num_local_experts, + old_indices, + new_indices, + ) + + # Calculate the rank to recv by this rank + num_dst_per_sender = len(ranks_to_recv) // len(ranks_to_send) + recver_pos = ranks_to_recv.index(ep_rank) + remainder_start = len(ranks_to_send) * num_dst_per_sender + if recver_pos < remainder_start: + src = ranks_to_send[recver_pos // num_dst_per_sender] + else: + src = ranks_to_send[recver_pos - remainder_start] + + src_global = get_global_rank(ep_group, src) + p2p_ops += [ + P2POp( + torch.distributed.irecv, + weight[dst], + src_global, + ) for weight in expert_weights_buffer + ] + + # 4. Execute the P2P operations. The real communication happens here. + if p2p_ops: + reqs = batch_isend_irecv(p2p_ops) + for req in reqs: + req.wait() + + # 5. Copy the weights from the buffer back to the original weights. + for dst in range(num_local_experts): + if is_unchanged[dst]: + continue + if is_received_locally[dst]: + for weight, buffer in zip(expert_weights, expert_weights_buffer): + weight[dst].copy_(buffer[dst]) + else: + expert = new_indices[local2global(dst)] + src = experts_recv_loc[expert] + for weight, buffer in zip(expert_weights, expert_weights_buffer): + weight[dst].copy_(buffer[src]) + + +def rearrange_expert_weights_inplace( + old_global_expert_indices: torch.Tensor, + new_global_expert_indices: torch.Tensor, + expert_weights: Sequence[Iterable[torch.Tensor]], + ep_group: ProcessGroup, + is_profile: bool = False, +) -> None: + """ + Rearranges the expert weights in place according to the new expert indices. + + The value of the indices arguments are logical indices of the experts, + while keys are physical. + + Args: + old_global_expert_indices: Shape (num_moe_layers, num_physical_experts). + new_global_expert_indices: Shape (num_moe_layers, num_physical_experts). + expert_weights: A sequence of shape (num_moe_layers)(weight_count) + of tensors of shape (num_local_physical_experts, hidden_size_i). + For example, a linear layer may have up and down projection, + so weight_count = 2. Each weight's hidden size can be different. + ep_group: The device process group for expert parallelism. + is_profile (bool): If `True`, do not perform any actual weight copy. + This is used during profile run, where we only perform dummy + communications to reserve enough memory for the buffers. + """ + num_moe_layers, num_physical_experts = old_global_expert_indices.shape + assert len(expert_weights) == num_moe_layers + + num_local_physical_experts = next(iter(expert_weights[0])).shape[0] + assert new_global_expert_indices.shape == (num_moe_layers, + num_physical_experts) + + ep_rank = ep_group.rank() + ep_size = ep_group.size() + assert num_physical_experts == ep_size * num_local_physical_experts + + # A buffer to hold the expert weights in one layer during the exchange. + # NOTE: Currently we assume the same weights across different layers + # have the same shape. + expert_weights_buffer = [torch.empty_like(w) for w in expert_weights[0]] + + if is_profile: + # Maximum send size is to send all local experts to all ranks, + # So we use a dummy `all_gather` to reserve enough communication buffer + for weight, buffer in zip(expert_weights[0], expert_weights_buffer): + # A `/dev/null`-like buffer to avoid real memory allocation + dummy_recv_buffer = [buffer for _ in range(ep_size)] + # NOTE(bowen): Needed this barrier to avoid OOM during actual + # execution. I'm not very sure why this is needed + torch.distributed.barrier() + all_gather( + dummy_recv_buffer, + weight, + group=ep_group, + ) + return + + for layer in range(num_moe_layers): + # NOTE(bowen): We need this synchronize to run, but I don't know why. + # If you figure out the reason, please let me know -- thank you! + torch.cuda.synchronize() + shuffle_layer( + num_local_physical_experts, + ep_rank, + old_global_expert_indices[layer].tolist(), + new_global_expert_indices[layer].tolist(), + expert_weights[layer], + expert_weights_buffer, + ep_group, + ) + + +__all__ = ["rearrange_expert_weights_inplace"] diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 9d1008b6b350a..6c908f88b9a92 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -320,6 +320,11 @@ class EngineArgs: data_parallel_rpc_port: Optional[int] = None data_parallel_backend: str = ParallelConfig.data_parallel_backend enable_expert_parallel: bool = ParallelConfig.enable_expert_parallel + enable_eplb: bool = ParallelConfig.enable_eplb + num_redundant_experts: int = ParallelConfig.num_redundant_experts + eplb_window_size: int = ParallelConfig.eplb_window_size + eplb_step_interval: int = ParallelConfig.eplb_step_interval + eplb_log_balancedness: bool = ParallelConfig.eplb_log_balancedness max_parallel_loading_workers: Optional[ int] = ParallelConfig.max_parallel_loading_workers block_size: Optional[BlockSize] = CacheConfig.block_size @@ -666,6 +671,16 @@ class EngineArgs: parallel_group.add_argument( "--enable-expert-parallel", **parallel_kwargs["enable_expert_parallel"]) + parallel_group.add_argument("--enable-eplb", + **parallel_kwargs["enable_eplb"]) + parallel_group.add_argument("--num-redundant-experts", + **parallel_kwargs["num_redundant_experts"]) + parallel_group.add_argument("--eplb-window-size", + **parallel_kwargs["eplb_window_size"]) + parallel_group.add_argument("--eplb-step-interval", + **parallel_kwargs["eplb_step_interval"]) + parallel_group.add_argument("--eplb-log-balancedness", + **parallel_kwargs["eplb_log_balancedness"]) parallel_group.add_argument( "--max-parallel-loading-workers", **parallel_kwargs["max_parallel_loading_workers"]) @@ -1135,6 +1150,11 @@ class EngineArgs: data_parallel_rpc_port=data_parallel_rpc_port, data_parallel_backend=data_parallel_backend, enable_expert_parallel=self.enable_expert_parallel, + enable_eplb=self.enable_eplb, + num_redundant_experts=self.num_redundant_experts, + eplb_window_size=self.eplb_window_size, + eplb_step_interval=self.eplb_step_interval, + eplb_log_balancedness=self.eplb_log_balancedness, max_parallel_loading_workers=self.max_parallel_loading_workers, disable_custom_all_reduce=self.disable_custom_all_reduce, ray_workers_use_nsight=self.ray_workers_use_nsight, diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 133881fd04990..6fe95d32a10e7 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -3,9 +3,10 @@ import importlib from abc import abstractmethod +from collections.abc import Iterable from dataclasses import dataclass from enum import Enum -from typing import Callable, Optional, Union +from typing import Callable, Literal, Optional, Union, overload import torch import torch.nn.functional as F @@ -20,6 +21,7 @@ from vllm.distributed import (get_dp_group, get_ep_group, get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size, tensor_model_parallel_all_reduce) +from vllm.distributed.eplb.eplb_state import EplbState from vllm.forward_context import ForwardContext, get_forward_context from vllm.logger import init_logger from vllm.model_executor.custom_op import CustomOp @@ -435,6 +437,10 @@ class FusedMoEMethodBase(QuantizeMethodBase): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: raise NotImplementedError @@ -574,7 +580,15 @@ class UnquantizedFusedMoEMethod(FusedMoEMethodBase, CustomOp): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for `UnquantizedFusedMoEMethod` yet.") + return self.forward( x=x, layer=layer, @@ -821,6 +835,7 @@ class FusedMoE(torch.nn.Module): reduce_results: Whether to all all_reduce on the output of the layer renomalize: Whether to renormalize the logits in the fused_moe kernel quant_config: Quantization configure. + enable_eplb: Whether to enable expert parallelism load balancer. """ def __init__( @@ -845,6 +860,8 @@ class FusedMoE(torch.nn.Module): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + num_redundant_experts: int = 0, ): super().__init__() if params_dtype is None: @@ -860,7 +877,7 @@ class FusedMoE(torch.nn.Module): get_dp_group().world_size), vllm_parallel_config=vllm_config.parallel_config)) - self.global_num_experts = num_experts + self.global_num_experts = num_experts + num_redundant_experts # For smuggling this layer into the fused moe custom op compilation_config = vllm_config.compilation_config @@ -869,8 +886,20 @@ class FusedMoE(torch.nn.Module): compilation_config.static_forward_context[prefix] = self self.layer_name = prefix + self.enable_eplb = enable_eplb + self.expert_load_view: Optional[torch.Tensor] = None + self.logical_to_physical_map: Optional[torch.Tensor] = None + self.logical_replica_count: Optional[torch.Tensor] = None + # Determine expert maps if self.use_ep: + if self.enable_eplb: + assert self.global_num_experts % self.ep_size == 0, \ + "EPLB currently only supports even distribution of " \ + "experts across ranks." + else: + assert num_redundant_experts == 0, \ + "Redundant experts are only supported with EPLB." self.local_num_experts, self.expert_map = determine_expert_map( ep_size=self.ep_size, ep_rank=self.ep_rank, @@ -937,6 +966,20 @@ class FusedMoE(torch.nn.Module): assert isinstance(quant_method, FusedMoEMethodBase) self.quant_method = quant_method + if self.enable_eplb: + from vllm.model_executor.layers.quantization.fp8 import ( + Fp8MoEMethod) + if not isinstance(quant_method, Fp8MoEMethod): + # TODO: Add support for additional quantization methods. + # The implementation for other quantization methods does not + # contain essential differences, but the current quant API + # design causes duplicated work when extending to new + # quantization methods, so I'm leaving it for now. + # If you plan to add support for more quantization methods, + # please refer to the implementation in `Fp8MoEMethod`. + raise NotImplementedError("EPLB is only supported for FP8 " + "quantization for now.") + moe_quant_params = { "num_experts": self.local_num_experts, "hidden_size": hidden_size, @@ -965,8 +1008,9 @@ class FusedMoE(torch.nn.Module): dtype=act_dtype, device=torch.cuda.current_device()) + # Note here we use `num_experts` which is logical expert count self.batched_router_logits = torch.zeros( - (envs.VLLM_MOE_DP_CHUNK_SIZE, self.global_num_experts), + (envs.VLLM_MOE_DP_CHUNK_SIZE, num_experts), dtype=act_dtype, device=torch.cuda.current_device()) @@ -1130,13 +1174,33 @@ class FusedMoE(torch.nn.Module): return expert_id return self.expert_map[expert_id].item() + @overload def weight_loader(self, param: torch.nn.Parameter, loaded_weight: torch.Tensor, weight_name: str, - shard_id: str, expert_id: int) -> None: + shard_id: str, expert_id: int, + return_success: Literal[False]) -> None: + ... + @overload + def weight_loader(self, param: torch.nn.Parameter, + loaded_weight: torch.Tensor, weight_name: str, + shard_id: str, expert_id: int, + return_success: Literal[True]) -> bool: + ... + + def weight_loader(self, + param: torch.nn.Parameter, + loaded_weight: torch.Tensor, + weight_name: str, + shard_id: str, + expert_id: int, + return_success: bool = False) -> Optional[bool]: expert_id = self._map_global_expert_id_to_local_expert_id(expert_id) if expert_id == -1: - return + # Failed to load this param since it's not local to this rank + return False if return_success else None + # Hereafter, `expert_id` is local physical id + quant_method_name = self.quant_method.__class__.__name__ # compressed-tensors checkpoints with packed weights are stored flipped # TODO (mgoin): check self.quant_method.quant_config.quant_format @@ -1163,7 +1227,7 @@ class FusedMoE(torch.nn.Module): if is_gguf_weight_type: param.weight_type = loaded_weight.item() param.data.copy_(loaded_weight) - return + return True if return_success else None # is_transposed: if the dim to shard the weight # should be flipped. Required by GPTQ, compressed-tensors @@ -1202,7 +1266,7 @@ class FusedMoE(torch.nn.Module): self._load_single_value(param=param, loaded_weight=loaded_weight, expert_id=expert_id) - return + return True if return_success else None # Case g_idx if "g_idx" in weight_name: @@ -1211,7 +1275,7 @@ class FusedMoE(torch.nn.Module): loaded_weight=loaded_weight, expert_data=expert_data, tp_rank=self.tp_rank) - return + return True if return_success else None if "ModelOpt" in quant_method_name: if ('weight_scale_2' in weight_name @@ -1227,7 +1291,7 @@ class FusedMoE(torch.nn.Module): loaded_weight=loaded_weight, expert_data=expert_data, tp_rank=self.tp_rank) - return + return True if return_success else None # Case weight scales, zero_points and offset if ("scale" in weight_name or "zero" in weight_name @@ -1264,7 +1328,7 @@ class FusedMoE(torch.nn.Module): else: raise ValueError( f"quant method must be one of {WEIGHT_SCALE_SUPPORTED}") - return + return True if return_success else None # Case weight_shape if "weight_shape" in weight_name: @@ -1272,7 +1336,7 @@ class FusedMoE(torch.nn.Module): self._load_single_value(param=param, loaded_weight=loaded_weight, expert_id=expert_id) - return + return True if return_success else None # Case model weights if "weight" in weight_name: @@ -1282,23 +1346,77 @@ class FusedMoE(torch.nn.Module): loaded_weight=loaded_weight, expert_data=expert_data, tp_rank=self.tp_rank) - return + return True if return_success else None + + return False if return_success else None + + def get_expert_weights(self) -> Iterable[torch.Tensor]: + weights = list(self.named_parameters()) + assert all(weight.is_contiguous() for _, weight in weights) + + # Filter out the non-expert weights. + # `e_score_correction_bias` is a bias for each logical expert, + # with shape (num_logical_experts,), not an expert weight. + NON_EXPERT_WEIGHTS = { + "e_score_correction_bias", + } + + return [ + weight.view(self.local_num_experts, -1) for name, weight in weights + if name not in NON_EXPERT_WEIGHTS + ] + + def set_eplb_state( + self, + moe_layer_idx: int, + expert_load_view: torch.Tensor, + logical_to_physical_map: torch.Tensor, + logical_replica_count: torch.Tensor, + ) -> None: + """ + Register the EPLB state in this layer. + + This is used later in forward pass, where we get the expert mapping + and record the load metrics in `expert_load_view`. + """ + self.expert_load_view = expert_load_view[moe_layer_idx] + self.logical_to_physical_map = logical_to_physical_map[moe_layer_idx] + self.logical_replica_count = logical_replica_count[moe_layer_idx] @staticmethod - def select_experts(hidden_states: torch.Tensor, - router_logits: torch.Tensor, - top_k: int, - use_grouped_topk: bool, - renormalize: bool, - topk_group: Optional[int] = None, - num_expert_group: Optional[int] = None, - custom_routing_function: Optional[Callable] = None, - scoring_func: str = "softmax", - e_score_correction_bias: Optional[torch.Tensor] = None, - indices_type: Optional[torch.dtype] = None): + def select_experts( + hidden_states: torch.Tensor, + router_logits: torch.Tensor, + top_k: int, + use_grouped_topk: bool, + renormalize: bool, + topk_group: Optional[int] = None, + num_expert_group: Optional[int] = None, + custom_routing_function: Optional[Callable] = None, + scoring_func: str = "softmax", + e_score_correction_bias: Optional[torch.Tensor] = None, + indices_type: Optional[torch.dtype] = None, + enable_eplb: bool = False, + expert_map: Optional[torch.Tensor] = None, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, + ) -> tuple[torch.Tensor, torch.Tensor]: + """ + Route the input hidden states to the top-k experts based on the + router logits. + + Returns: + (topk_weights, topk_ids) (tuple[torch.Tensor, torch.Tensor]): + The weights and *global physical* expert ids of the top-k experts. + + **Compatibility**: When EPLB is not enabled, the returned ids are + equivalent to global logical ids, so should be compatible with + plain MoE implementations without redundant experts. + """ from vllm.model_executor.layers.fused_moe.fused_moe import fused_topk - # DeekSeekv2 uses grouped_top_k + # DeepSeekv2 uses grouped_top_k if use_grouped_topk: assert topk_group is not None assert num_expert_group is not None @@ -1330,6 +1448,74 @@ class FusedMoE(torch.nn.Module): if indices_type is not None: topk_ids = topk_ids.to(dtype=indices_type) + if enable_eplb: + assert expert_load_view is not None + assert logical_to_physical_map is not None + assert logical_replica_count is not None + + # 1. Convert the logical expert ids to physical expert ids + # Directly select a random replica for each logical expert + + # TODO: maybe optimize this by using specified kernels, + # or compute pseudo-random indices by modulo + + # In case `indices_type` is not `torch.long` or `torch.int`, + # e.g. `torch.uint32` as required by dispatch/combine kernels + topk_ids_long = topk_ids.long() + replica_indices = ( + torch.rand_like(topk_ids, dtype=torch.float) * + logical_replica_count[topk_ids_long]).long().unsqueeze(-1) + physical_ids = logical_to_physical_map[topk_ids_long].gather( + -1, replica_indices).squeeze(-1) + + topk_ids = physical_ids + + # 2. Record expert load metrics. + + # TODO(bowen): When using `FusedMoEModularKernel`, this + # can be done in a more unified way, since + # `FusedMoEPrepareAndFinalize` will return the expert + # token count, in some cases directly from the kernel. + # However, now there are many code paths not using + # the modular kernel, e.g. calling `fused_experts`, + # so we decide to keep the logic here. + # + # If later refactor moved all the MoE kernel calls + # to the modular kernel, we can move this logic there + # to achieve better efficiency. + + # `expert_load_view`: (num_logical_experts,) + + # Mask out non-local experts + if expert_map is not None: + topk_ids_local = expert_map[topk_ids] + topk_ids_flatten = topk_ids_local.flatten() + else: + topk_ids_flatten = topk_ids.flatten() + + # Should be equivalent to: + # ``` + # topk_ids_masked = topk_ids_local[topk_ids_local >= 0] + # expert_load_view += topk_ids_masked.bincount( + # minlength=expert_load_view.shape[0]) + # ``` + # We use `scatter_add_` since `bincount` cannot be compiled + + # Performance optimization: + # `masked_fill` is significantly faster than `masked_select` + invalid_mask = topk_ids_flatten < 0 + # Replace invalid expert ids with 0 (just a dummy position) + # to avoid out-of-bounds errors in scatter_add_ + index = topk_ids_flatten.masked_fill_(invalid_mask, 0) + # `src` is the valid mask, which is 1 for valid and 0 for invalid + src = ~invalid_mask + + expert_load_view.scatter_add_(dim=0, + index=index.long(), + src=src.to(expert_load_view)) + + topk_ids = topk_ids.to(dtype=indices_type) + return topk_weights, topk_ids def must_reduce_shared_expert_outputs(self) -> bool: @@ -1410,6 +1596,10 @@ class FusedMoE(torch.nn.Module): scoring_func=self.scoring_func, e_score_correction_bias=self.e_score_correction_bias, activation=self.activation, + enable_eplb=self.enable_eplb, + expert_load_view=self.expert_load_view, + logical_to_physical_map=self.logical_to_physical_map, + logical_replica_count=self.logical_replica_count, ) if not skip_result_store: @@ -1467,6 +1657,10 @@ class FusedMoE(torch.nn.Module): e_score_correction_bias=self.e_score_correction_bias, activation=self.activation, apply_router_weight_on_input=self.apply_router_weight_on_input, + enable_eplb=self.enable_eplb, + expert_load_view=self.expert_load_view, + logical_to_physical_map=self.logical_to_physical_map, + logical_replica_count=self.logical_replica_count, ) if do_naive_dispatch_combine: @@ -1481,16 +1675,30 @@ class FusedMoE(torch.nn.Module): @classmethod def make_expert_params_mapping( - cls, ckpt_gate_proj_name: str, ckpt_down_proj_name: str, + cls, + ckpt_gate_proj_name: str, + ckpt_down_proj_name: str, ckpt_up_proj_name: str, - num_experts: int) -> list[tuple[str, str, int, str]]: + num_experts: int, + num_redundant_experts: int = 0) -> list[tuple[str, str, int, str]]: + + num_physical_experts = num_experts + num_redundant_experts + + # In the returned mapping: + # - `expert_id` is the physical expert id + # - `weight_name` contains the weight name of the logical expert + # So that we should map the expert id to logical in `weight_name` + physical_to_logical_map = \ + EplbState.build_initial_global_physical_to_logical_map( + num_experts, num_redundant_experts) return [ # (param_name, weight_name, expert_id, shard_id) ("experts.w13_" if weight_name in [ckpt_gate_proj_name, ckpt_up_proj_name] else "experts.w2_", - f"experts.{expert_id}.{weight_name}.", expert_id, shard_id) - for expert_id in range(num_experts) for shard_id, weight_name in [ + f"experts.{physical_to_logical_map[expert_id]}.{weight_name}.", + expert_id, shard_id) for expert_id in range(num_physical_experts) + for shard_id, weight_name in [ ("w1", ckpt_gate_proj_name), ("w2", ckpt_down_proj_name), ("w3", ckpt_up_proj_name), diff --git a/vllm/model_executor/layers/quantization/awq_marlin.py b/vllm/model_executor/layers/quantization/awq_marlin.py index 56d803c6baf12..aff54bc495b2d 100644 --- a/vllm/model_executor/layers/quantization/awq_marlin.py +++ b/vllm/model_executor/layers/quantization/awq_marlin.py @@ -482,7 +482,15 @@ class AWQMoEMethod(FusedMoEMethodBase): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for `AWQMoEMethod` yet.") + assert activation == "silu", "Only SiLU activation is supported." if apply_router_weight_on_input: diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py index f14131c5f05b3..7703b9e687c4a 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py @@ -331,7 +331,15 @@ class CompressedTensorsW8A8Fp8MoEMethod(CompressedTensorsMoEMethod): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for " + "`CompressedTensorsW8A8Fp8MoEMethod` yet.") topk_weights, topk_ids = FusedMoE.select_experts( hidden_states=x, @@ -593,7 +601,15 @@ class CompressedTensorsW8A8Fp8MoECutlassMethod(CompressedTensorsMoEMethod): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for " + "`CompressedTensorsW8A8Fp8MoECutlassMethod` yet.") topk_weights, topk_ids = FusedMoE.select_experts( hidden_states=x, @@ -722,7 +738,16 @@ class CompressedTensorsW8A8Int8MoEMethod(CompressedTensorsMoEMethod): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for " + "`CompressedTensorsW8A8Int8MoEMethod` yet.") + from vllm.model_executor.layers.fused_moe import fused_experts topk_weights, topk_ids = FusedMoE.select_experts( @@ -1012,7 +1037,16 @@ class CompressedTensorsWNA16MarlinMoEMethod(CompressedTensorsMoEMethod): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for " + "`CompressedTensorsWNA16MarlinMoEMethod` yet.") + assert activation == "silu", ( f"{activation} not supported for Marlin MoE.") assert not apply_router_weight_on_input, ( @@ -1228,7 +1262,15 @@ class CompressedTensorsWNA16MoEMethod(CompressedTensorsMoEMethod): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError("EPLB not supported for " + "`CompressedTensorsWNA16MoEMethod` yet.") + from vllm.model_executor.layers.fused_moe import fused_experts topk_weights, topk_ids = FusedMoE.select_experts( diff --git a/vllm/model_executor/layers/quantization/experts_int8.py b/vllm/model_executor/layers/quantization/experts_int8.py index 01b0064f08058..47eca80609e0e 100644 --- a/vllm/model_executor/layers/quantization/experts_int8.py +++ b/vllm/model_executor/layers/quantization/experts_int8.py @@ -117,7 +117,15 @@ class ExpertsInt8MoEMethod(FusedMoEMethodBase): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for `ExpertsInt8MoEMethod` yet.") + from vllm.model_executor.layers.fused_moe import fused_experts topk_weights, topk_ids = FusedMoE.select_experts( diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py index b3042bfaed3d7..d2eda541f7a40 100644 --- a/vllm/model_executor/layers/quantization/fp8.py +++ b/vllm/model_executor/layers/quantization/fp8.py @@ -825,7 +825,16 @@ class Fp8MoEMethod(FusedMoEMethodBase): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + assert expert_load_view is not None + assert logical_to_physical_map is not None + assert logical_replica_count is not None + assert isinstance(layer, FusedMoE) topk_weights, topk_ids = FusedMoE.select_experts( hidden_states=x, @@ -839,6 +848,11 @@ class Fp8MoEMethod(FusedMoEMethodBase): scoring_func=scoring_func, e_score_correction_bias=e_score_correction_bias, indices_type=self.topk_indices_dtype, + enable_eplb=enable_eplb, + expert_map=expert_map, + expert_load_view=expert_load_view, + logical_to_physical_map=logical_to_physical_map, + logical_replica_count=logical_replica_count, ) if self.rocm_aiter_moe_enabled: diff --git a/vllm/model_executor/layers/quantization/gguf.py b/vllm/model_executor/layers/quantization/gguf.py index 9c8f74545d37d..86da04c39989b 100644 --- a/vllm/model_executor/layers/quantization/gguf.py +++ b/vllm/model_executor/layers/quantization/gguf.py @@ -520,7 +520,15 @@ class GGUFMoEMethod(FusedMoEMethodBase): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ): + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for `GGUFMoEMethod` yet.") + assert activation == "silu", "Only SiLU activation is supported." if apply_router_weight_on_input: raise NotImplementedError( diff --git a/vllm/model_executor/layers/quantization/gptq_marlin.py b/vllm/model_executor/layers/quantization/gptq_marlin.py index e9b8dc3266b4a..48ab04c9ab37f 100644 --- a/vllm/model_executor/layers/quantization/gptq_marlin.py +++ b/vllm/model_executor/layers/quantization/gptq_marlin.py @@ -635,7 +635,15 @@ class GPTQMarlinMoEMethod(FusedMoEMethodBase): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for `GPTQMarlinMoEMethod` yet.") + assert activation == "silu", "Only SiLU activation is supported." if apply_router_weight_on_input: raise NotImplementedError( diff --git a/vllm/model_executor/layers/quantization/modelopt.py b/vllm/model_executor/layers/quantization/modelopt.py index 3f79b203aa170..e35db5b31dba7 100644 --- a/vllm/model_executor/layers/quantization/modelopt.py +++ b/vllm/model_executor/layers/quantization/modelopt.py @@ -664,7 +664,15 @@ class ModelOptNvFp4FusedMoE(FusedMoEMethodBase): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ): + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for `ModelOptNvFp4FusedMoE` yet.") + if self.use_marlin: topk_weights, topk_ids = FusedMoE.select_experts( hidden_states=x, diff --git a/vllm/model_executor/layers/quantization/moe_wna16.py b/vllm/model_executor/layers/quantization/moe_wna16.py index 3aa23f0682576..c5055a02fa3d5 100644 --- a/vllm/model_executor/layers/quantization/moe_wna16.py +++ b/vllm/model_executor/layers/quantization/moe_wna16.py @@ -297,7 +297,15 @@ class MoeWNA16Method(FusedMoEMethodBase): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for `MoeWNA16Method` yet.") + from vllm.model_executor.layers.fused_moe import fused_experts assert activation == "silu", "Only SiLU activation is supported." topk_weights, topk_ids = FusedMoE.select_experts( diff --git a/vllm/model_executor/layers/quantization/quark/quark_moe.py b/vllm/model_executor/layers/quantization/quark/quark_moe.py index 4c2da4c8b04ee..a040c430cbcaa 100644 --- a/vllm/model_executor/layers/quantization/quark/quark_moe.py +++ b/vllm/model_executor/layers/quantization/quark/quark_moe.py @@ -205,7 +205,15 @@ class QuarkW8A8Fp8MoEMethod(QuarkMoEMethod): e_score_correction_bias: Optional[torch.Tensor] = None, apply_router_weight_on_input: bool = False, activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError( + "EPLB not supported for `QuarkW8A8Fp8MoEMethod` yet.") + from vllm.model_executor.layers.fused_moe import fused_experts topk_weights, topk_ids = FusedMoE.select_experts( diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index 0f996d04e6e80..f712b626c74c3 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -23,7 +23,8 @@ # See the License for the specific language governing permissions and # limitations under the License. """Inference-only DeepseekV2/DeepseekV3 model.""" -from collections.abc import Iterable +import typing +from collections.abc import Callable, Iterable from typing import Any, Optional, Union import torch @@ -32,8 +33,10 @@ from transformers import PretrainedConfig from vllm.attention import Attention from vllm.compilation.decorators import support_torch_compile -from vllm.config import CacheConfig, ModelConfig, VllmConfig -from vllm.distributed import get_pp_group, get_tensor_model_parallel_world_size +from vllm.config import (CacheConfig, ModelConfig, VllmConfig, + get_current_vllm_config) +from vllm.distributed import (get_ep_group, get_pp_group, + get_tensor_model_parallel_world_size) from vllm.model_executor.layers.activation import SiluAndMul from vllm.model_executor.layers.fused_moe import FusedMoE from vllm.model_executor.layers.layernorm import RMSNorm @@ -51,7 +54,7 @@ from vllm.model_executor.model_loader.weight_utils import ( from vllm.model_executor.sampling_metadata import SamplingMetadata from vllm.sequence import IntermediateTensors -from .interfaces import SupportsPP +from .interfaces import MixtureOfExperts, SupportsPP from .utils import (PPMissingLayer, is_pp_missing_parameter, make_empty_intermediate_tensors_factory, make_layers, maybe_prefix) @@ -99,11 +102,17 @@ class DeepseekV2MoE(nn.Module): config: PretrainedConfig, quant_config: Optional[QuantizationConfig] = None, prefix: str = "", + enable_eplb: bool = False, ): super().__init__() self.tp_size = get_tensor_model_parallel_world_size() self.routed_scaling_factor = config.routed_scaling_factor - self.n_shared_experts = config.n_shared_experts + + self.ep_group = get_ep_group().device_group + self.ep_rank = self.ep_group.rank() + self.ep_size = self.ep_group.size() + self.n_routed_experts: int = config.n_routed_experts + self.n_shared_experts: int = config.n_shared_experts if config.hidden_act != "silu": raise ValueError(f"Unsupported activation: {config.hidden_act}. " @@ -120,6 +129,22 @@ class DeepseekV2MoE(nn.Module): else: self.gate.e_score_correction_bias = None + # Load balancing settings. + vllm_config = get_current_vllm_config() + parallel_config = vllm_config.parallel_config + self.enable_eplb = enable_eplb + + self.n_redundant_experts = parallel_config.num_redundant_experts + self.n_logical_experts = self.n_routed_experts + self.n_physical_experts = (self.n_logical_experts + + self.n_redundant_experts) + self.n_local_physical_experts = self.n_physical_experts // self.ep_size + + self.physical_expert_start = (self.ep_rank * + self.n_local_physical_experts) + self.physical_expert_end = (self.physical_expert_start + + self.n_local_physical_experts) + self.experts = FusedMoE( num_experts=config.n_routed_experts, top_k=config.num_experts_per_tok, @@ -133,7 +158,9 @@ class DeepseekV2MoE(nn.Module): topk_group=config.topk_group, prefix=f"{prefix}.experts", scoring_func=config.scoring_func, - e_score_correction_bias=self.gate.e_score_correction_bias) + e_score_correction_bias=self.gate.e_score_correction_bias, + enable_eplb=self.enable_eplb, + num_redundant_experts=self.n_redundant_experts) if config.n_shared_experts is not None: intermediate_size = (config.moe_intermediate_size * @@ -503,6 +530,7 @@ class DeepseekV2DecoderLayer(nn.Module): model_config: ModelConfig, cache_config: Optional[CacheConfig] = None, quant_config: Optional[QuantizationConfig] = None, + enable_eplb: bool = False, ) -> None: super().__init__() self.hidden_size = config.hidden_size @@ -543,6 +571,7 @@ class DeepseekV2DecoderLayer(nn.Module): config=config, quant_config=quant_config, prefix=f"{prefix}.mlp", + enable_eplb=enable_eplb, ) else: self.mlp = DeepseekV2MLP( @@ -615,6 +644,7 @@ class DeepseekV2Model(nn.Module): model_config = vllm_config.model_config cache_config = vllm_config.cache_config quant_config = vllm_config.quant_config + enable_eplb = vllm_config.parallel_config.enable_eplb self.config = config self.vocab_size = config.vocab_size @@ -636,6 +666,7 @@ class DeepseekV2Model(nn.Module): model_config=model_config, cache_config=cache_config, quant_config=quant_config, + enable_eplb=enable_eplb, ), prefix=f"{prefix}.layers") @@ -681,7 +712,7 @@ class DeepseekV2Model(nn.Module): return hidden_states -class DeepseekV2ForCausalLM(nn.Module, SupportsPP): +class DeepseekV2ForCausalLM(nn.Module, SupportsPP, MixtureOfExperts): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() @@ -700,6 +731,44 @@ class DeepseekV2ForCausalLM(nn.Module, SupportsPP): self.logits_processor = LogitsProcessor(config.vocab_size) self.make_empty_intermediate_tensors = ( self.model.make_empty_intermediate_tensors) + self.expert_weights = [] + + # Set MoE hyperparameters + self.num_moe_layers = (config.num_hidden_layers - + config.first_k_dense_replace) + self.num_expert_groups = config.n_group + + self.moe_layers: list[FusedMoE] = [] + for layer in self.model.layers: + assert isinstance(layer, DeepseekV2DecoderLayer) + if isinstance(layer.mlp, DeepseekV2MoE): + self.moe_layers.append(layer.mlp.experts) + + # Pick last one layer since the first ones may be dense layers. + example_moe = typing.cast( + DeepseekV2MoE, self.model.layers[config.num_hidden_layers - 1].mlp) + self.num_logical_experts = example_moe.n_logical_experts + self.num_physical_experts = example_moe.n_physical_experts + self.num_local_physical_experts = example_moe.n_local_physical_experts + self.num_routed_experts = example_moe.n_routed_experts + self.num_shared_experts = example_moe.n_shared_experts + self.num_redundant_experts = example_moe.n_redundant_experts + + def set_eplb_state( + self, + expert_load_view: torch.Tensor, + logical_to_physical_map: torch.Tensor, + logical_replica_count: torch.Tensor, + ) -> None: + for layer_idx, layer in enumerate(self.moe_layers): + # Register the expert weights. + self.expert_weights.append(layer.get_expert_weights()) + layer.set_eplb_state( + moe_layer_idx=layer_idx, + expert_load_view=expert_load_view, + logical_to_physical_map=logical_to_physical_map, + logical_replica_count=logical_replica_count, + ) def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: return self.model.get_input_embeddings(input_ids) @@ -752,7 +821,8 @@ class DeepseekV2ForCausalLM(nn.Module, SupportsPP): ckpt_gate_proj_name="gate_proj", ckpt_down_proj_name="down_proj", ckpt_up_proj_name="up_proj", - num_experts=self.config.n_routed_experts) + num_experts=self.config.n_routed_experts, + num_redundant_experts=self.num_redundant_experts) params_dict = dict(self.named_parameters()) loaded_params: set[str] = set() @@ -789,24 +859,44 @@ class DeepseekV2ForCausalLM(nn.Module, SupportsPP): weight_loader(param, loaded_weight, shard_id) break else: + is_expert_weight = False for mapping in expert_params_mapping: param_name, weight_name, expert_id, shard_id = mapping if weight_name not in name: continue - name = name.replace(weight_name, param_name) - if is_pp_missing_parameter(name, self): + # Anyway, this is an expert weight and should not be + # attempted to load as other weights later + is_expert_weight = True + + # Do not modify `name` since the loop may continue here + # Instead, create a new variable + name_mapped = name.replace(weight_name, param_name) + + if is_pp_missing_parameter(name_mapped, self): continue - param = params_dict[name] - weight_loader = param.weight_loader - weight_loader(param, - loaded_weight, - name, - shard_id=shard_id, - expert_id=expert_id) - break + param = params_dict[name_mapped] + # We should ask the weight loader to return success or not + # here since otherwise we may skip experts with other + # available replicas. + weight_loader = typing.cast(Callable[..., bool], + param.weight_loader) + success = weight_loader(param, + loaded_weight, + name_mapped, + shard_id=shard_id, + expert_id=expert_id, + return_success=True) + if success: + break else: + if is_expert_weight: + # We've checked that this is an expert weight + # However it's not mapped locally to this rank + # So we simply skip it + continue + # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue @@ -824,6 +914,7 @@ class DeepseekV2ForCausalLM(nn.Module, SupportsPP): default_weight_loader) weight_loader(param, loaded_weight) loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index f759f8f1f2731..3ea424e44b62e 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -1,6 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from collections.abc import Iterable, MutableSequence from typing import (TYPE_CHECKING, ClassVar, Literal, Optional, Protocol, Union, overload, runtime_checkable) @@ -426,6 +427,73 @@ def is_hybrid( return isinstance(model, IsHybrid) +@runtime_checkable +class MixtureOfExperts(Protocol): + """ + Check if the model is a mixture of experts (MoE) model. + """ + + expert_weights: MutableSequence[Iterable[Tensor]] + """ + Expert weights saved in this rank. + + The first dimension is the layer, and the second dimension is different + parameters in the layer, e.g. up/down projection weights. + """ + + num_moe_layers: int + """Number of MoE layers in this model.""" + + num_expert_groups: int + """Number of expert groups in this model.""" + + num_logical_experts: int + """Number of logical experts in this model.""" + + num_physical_experts: int + """Number of physical experts in this model.""" + + num_local_physical_experts: int + """Number of local physical experts in this model.""" + + num_routed_experts: int + """Number of routed experts in this model.""" + + num_shared_experts: int + """Number of shared experts in this model.""" + + num_redundant_experts: int + """Number of redundant experts in this model.""" + + def set_eplb_state( + self, + expert_load_view: Tensor, + logical_to_physical_map: Tensor, + logical_replica_count: Tensor, + ) -> None: + """ + Register the EPLB state in the MoE model. + + Since these are views of the actual EPLB state, any changes made by + the EPLB algorithm are automatically reflected in the model's behavior + without requiring additional method calls to set new states. + + You should also collect model's `expert_weights` here instead of in + the weight loader, since after initial weight loading, further + processing like quantization may be applied to the weights. + + Args: + expert_load_view: A view of the expert load metrics tensor. + logical_to_physical_map: Mapping from logical to physical experts. + logical_replica_count: Count of replicas for each logical expert. + """ + ... + + +def is_mixture_of_experts(model: object) -> TypeIs[MixtureOfExperts]: + return isinstance(model, MixtureOfExperts) + + @runtime_checkable class HasNoOps(Protocol): has_noops: ClassVar[Literal[True]] = True diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 40639fdf24338..3c9de57204051 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -21,6 +21,7 @@ from vllm.attention.layer import Attention from vllm.compilation.counter import compilation_counter from vllm.config import (CompilationLevel, VllmConfig, get_layers_from_vllm_config) +from vllm.distributed.eplb.eplb_state import EplbState from vllm.distributed.kv_transfer import (get_kv_transfer_group, has_kv_transfer_group) from vllm.distributed.kv_transfer.kv_connector.v1 import KVConnectorBase_V1 @@ -33,7 +34,8 @@ from vllm.logger import init_logger from vllm.model_executor.layers.mamba.mamba_mixer2 import MambaMixer2 from vllm.model_executor.layers.rotary_embedding import MRotaryEmbedding from vllm.model_executor.model_loader import TensorizerLoader, get_model_loader -from vllm.model_executor.models.interfaces import has_step_pooler +from vllm.model_executor.models.interfaces import (has_step_pooler, + is_mixture_of_experts) from vllm.multimodal import MULTIMODAL_REGISTRY from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange from vllm.multimodal.utils import group_mm_inputs_by_modality @@ -150,6 +152,13 @@ class GPUModelRunner(LoRAModelRunnerMixin): # Sampler self.sampler = Sampler() + self.eplb_state: Optional[EplbState] = None + """ + State of the expert parallelism load balancer. + + Will be lazily initialized when the model is loaded. + """ + # Lazy initializations # self.model: nn.Module # Set after load_model # Initialize in initialize_kv_cache @@ -1178,6 +1187,24 @@ class GPUModelRunner(LoRAModelRunnerMixin): for k, v in self.intermediate_tensors.items() }) + def eplb_step(self, + is_dummy: bool = False, + is_profile: bool = False) -> None: + """ + Step for the EPLB (Expert Parallelism Load Balancing) state. + """ + if not self.parallel_config.enable_eplb: + return + + assert self.eplb_state is not None + assert is_mixture_of_experts(self.model) + self.eplb_state.step( + self.model, + is_dummy, + is_profile, + log_stats=self.parallel_config.eplb_log_balancedness, + ) + def get_dp_padding(self, num_tokens: int) -> tuple[int, Optional[torch.Tensor]]: dp_size = self.vllm_config.parallel_config.data_parallel_size @@ -1595,6 +1622,8 @@ class GPUModelRunner(LoRAModelRunnerMixin): if has_kv_transfer_group(): get_kv_transfer_group().clear_connector_metadata() + self.eplb_step() + return ModelRunnerOutput( req_ids=self.input_batch.req_ids, req_id_to_index=self.input_batch.req_id_to_index, @@ -1729,6 +1758,16 @@ class GPUModelRunner(LoRAModelRunnerMixin): time_after_load - time_before_load) prepare_communication_buffer_for_model(self.model) + if is_mixture_of_experts( + self.model) and self.parallel_config.enable_eplb: + logger.info("EPLB is enabled for model %s.", + self.model_config.model) + self.eplb_state = EplbState.build( + self.model, + self.device, + self.parallel_config, + ) + def save_tensorized_model( self, tensorizer_config: "TensorizerConfig", @@ -1887,6 +1926,8 @@ class GPUModelRunner(LoRAModelRunnerMixin): self, num_tokens: int, capture_attn_cudagraph: bool = False, + skip_eplb: bool = False, + is_profile: bool = False, ) -> tuple[torch.Tensor, torch.Tensor]: # Padding for DP @@ -1983,6 +2024,16 @@ class GPUModelRunner(LoRAModelRunnerMixin): assert isinstance(self.drafter, EagleProposer) self.drafter.dummy_run(num_tokens) + # This is necessary to avoid blocking DP. + # For dummy runs, we typically skip EPLB since we don't have any real + # requests to process. + # However, in DP settings, there may be cases when some DP ranks do + # not have any requests to process, so they're executing dummy batches. + # In such cases, we still have to trigger EPLB to make sure + # ranks execute the rearrangement in synchronization. + if not skip_eplb: + self.eplb_step(is_dummy=True, is_profile=is_profile) + logit_indices = np.cumsum(num_scheduled_tokens) - 1 return hidden_states, hidden_states[logit_indices] @@ -2175,8 +2226,9 @@ class GPUModelRunner(LoRAModelRunnerMixin): # Cache the dummy encoder outputs. self.encoder_cache["tmp"] = dict(enumerate(dummy_encoder_outputs)) + # Add `is_profile` here to pre-allocate communication buffers hidden_states, last_hidden_states \ - = self._dummy_run(self.max_num_tokens) + = self._dummy_run(self.max_num_tokens, is_profile=True) if get_pp_group().is_last_rank: if self.is_pooling_model: output = self._dummy_pooler_run(hidden_states) @@ -2210,10 +2262,15 @@ class GPUModelRunner(LoRAModelRunnerMixin): for num_tokens in tqdm(reversed(self.cudagraph_batch_sizes), desc="Capturing CUDA graphs", total=len(self.cudagraph_batch_sizes)): + # We skip EPLB here since we don't want to record dummy metrics for _ in range( self.compilation_config.cudagraph_num_of_warmups): - self._dummy_run(num_tokens, capture_attn_cudagraph=full_cg) - self._dummy_run(num_tokens, capture_attn_cudagraph=full_cg) + self._dummy_run(num_tokens, + capture_attn_cudagraph=full_cg, + skip_eplb=True) + self._dummy_run(num_tokens, + capture_attn_cudagraph=full_cg, + skip_eplb=True) end_time = time.perf_counter() end_free_gpu_memory = torch.cuda.mem_get_info()[0] diff --git a/vllm/v1/worker/gpu_worker.py b/vllm/v1/worker/gpu_worker.py index b0f80c701325f..9e7e44d068612 100644 --- a/vllm/v1/worker/gpu_worker.py +++ b/vllm/v1/worker/gpu_worker.py @@ -259,9 +259,10 @@ class Worker(WorkerBase): x for x in warmup_sizes if x not in self.vllm_config.compilation_config.cudagraph_capture_sizes ] + # We skip EPLB here since we don't want to record dummy metrics for size in sorted(warmup_sizes, reverse=True): logger.info("Compile and warming up model for size %d", size) - self.model_runner._dummy_run(size) + self.model_runner._dummy_run(size, skip_eplb=True) if not self.model_config.enforce_eager: self.model_runner.capture_model() @@ -274,8 +275,12 @@ class Worker(WorkerBase): max_num_reqs = min(self.scheduler_config.max_num_seqs, self.scheduler_config.max_num_batched_tokens) + # We skip EPLB here since we don't want to record dummy metrics hidden_states, last_hidden_states = \ - self.model_runner._dummy_run(num_tokens=max_num_reqs) + self.model_runner._dummy_run( + num_tokens=max_num_reqs, + skip_eplb=True, + ) if self.model_runner.is_pooling_model: self.model_runner._dummy_pooler_run(hidden_states) else: From 71799fd005ca08c9c362e548945a3dde93790fec Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Fri, 27 Jun 2025 12:21:04 +0900 Subject: [PATCH 163/211] [CI Failure] Fix OOM with test_oot_registration_embedding (#20144) Signed-off-by: mgoin --- tests/models/test_oot_registration.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/models/test_oot_registration.py b/tests/models/test_oot_registration.py index ef0ad613d5252..59de35644c12d 100644 --- a/tests/models/test_oot_registration.py +++ b/tests/models/test_oot_registration.py @@ -53,7 +53,9 @@ def test_oot_registration_embedding( with monkeypatch.context() as m: m.setenv("VLLM_PLUGINS", "register_dummy_model") prompts = ["Hello, my name is", "The text does not matter"] - llm = LLM(model=dummy_gemma2_embedding_path, load_format="dummy") + llm = LLM(model=dummy_gemma2_embedding_path, + load_format="dummy", + max_model_len=2048) outputs = llm.embed(prompts) for output in outputs: From a57d57fa72f092b9b8ed8415553ec02609daa644 Mon Sep 17 00:00:00 2001 From: Dipika Sikka Date: Thu, 26 Jun 2025 23:50:06 -0400 Subject: [PATCH 164/211] [Quantization] Bump to use latest `compressed-tensors` (#20033) Signed-off-by: Dipika Co-authored-by: Kyle Sayers --- requirements/common.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/common.txt b/requirements/common.txt index 9a9ae1d93896b..6cc304e5b1f6d 100644 --- a/requirements/common.txt +++ b/requirements/common.txt @@ -37,7 +37,7 @@ pyyaml six>=1.16.0; python_version > '3.11' # transitive dependency of pandas that needs to be the latest version for python 3.12 setuptools>=77.0.3,<80; python_version > '3.11' # Setuptools is used by triton, we need to ensure a modern version is installed for 3.12+ so that it does not try to import distutils, which was removed in 3.12 einops # Required for Qwen2-VL. -compressed-tensors == 0.10.1 # required for compressed-tensors +compressed-tensors == 0.10.2 # required for compressed-tensors depyf==0.18.0 # required for profiling and debugging with compilation config cloudpickle # allows pickling lambda functions in model_executor/models/registry.py watchfiles # required for http server to monitor the updates of TLS files From 2d7779f888f6443c067e0c36bab808ef6b368221 Mon Sep 17 00:00:00 2001 From: Ilya Markov Date: Fri, 27 Jun 2025 05:50:09 +0200 Subject: [PATCH 165/211] [Perf] SM100 FP8 GEMM Optimizations after cutlass_profiler (#20071) Signed-off-by: ilmarkov Co-authored-by: ilmarkov --- .../c3x/scaled_mm_sm100_fp8_dispatch.cuh | 58 +++++++++---------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/csrc/quantization/cutlass_w8a8/c3x/scaled_mm_sm100_fp8_dispatch.cuh b/csrc/quantization/cutlass_w8a8/c3x/scaled_mm_sm100_fp8_dispatch.cuh index 1549ed96aa2be..24564efbd21be 100644 --- a/csrc/quantization/cutlass_w8a8/c3x/scaled_mm_sm100_fp8_dispatch.cuh +++ b/csrc/quantization/cutlass_w8a8/c3x/scaled_mm_sm100_fp8_dispatch.cuh @@ -29,26 +29,12 @@ struct sm100_fp8_config_default { template typename Epilogue> struct sm100_fp8_config_M256 { - // M in (128, 256] + // M in (64, 256] static_assert(std::is_same()); using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto; using EpilogueSchedule = cutlass::epilogue::collective::EpilogueScheduleAuto; using TileShape = Shape<_128, _128, _128>; - using ClusterShape = Shape<_2, _2, _1>; - using Cutlass3xGemm = - cutlass_3x_gemm_sm100; -}; - -template typename Epilogue> -struct sm100_fp8_config_M128 { - // M in (64, 128] - static_assert(std::is_same()); - using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto; - using EpilogueSchedule = cutlass::epilogue::collective::EpilogueScheduleAuto; - using TileShape = Shape<_128, _128, _256>; - using ClusterShape = Shape<_2, _4, _1>; + using ClusterShape = Shape<_2, _1, _1>; using Cutlass3xGemm = cutlass_3x_gemm_sm100; @@ -57,12 +43,26 @@ struct sm100_fp8_config_M128 { template typename Epilogue> struct sm100_fp8_config_M64 { - // M in [1, 64] + // M in (16, 64] static_assert(std::is_same()); using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto; using EpilogueSchedule = cutlass::epilogue::collective::EpilogueScheduleAuto; - using TileShape = Shape<_64, _64, _256>; - using ClusterShape = Shape<_1, _8, _1>; + using TileShape = Shape<_64, _64, _128>; + using ClusterShape = Shape<_1, _1, _1>; + using Cutlass3xGemm = + cutlass_3x_gemm_sm100; +}; + +template typename Epilogue> +struct sm100_fp8_config_M16 { + // M in [1, 16] + static_assert(std::is_same()); + using KernelSchedule = cutlass::gemm::collective::KernelScheduleAuto; + using EpilogueSchedule = cutlass::epilogue::collective::EpilogueScheduleAuto; + using TileShape = Shape<_64, _64, _128>; + using ClusterShape = Shape<_1, _4, _1>; using Cutlass3xGemm = cutlass_3x_gemm_sm100; @@ -82,27 +82,27 @@ inline void cutlass_gemm_sm100_fp8_dispatch(torch::Tensor& out, using Cutlass3xGemmDefault = typename sm100_fp8_config_default::Cutlass3xGemm; + using Cutlass3xGemmM16 = + typename sm100_fp8_config_M16::Cutlass3xGemm; using Cutlass3xGemmM64 = typename sm100_fp8_config_M64::Cutlass3xGemm; - using Cutlass3xGemmM128 = - typename sm100_fp8_config_M128::Cutlass3xGemm; using Cutlass3xGemmM256 = typename sm100_fp8_config_M256::Cutlass3xGemm; uint32_t const m = a.size(0); uint32_t const mp2 = - std::max(static_cast(64), next_pow_2(m)); // next power of 2 + std::max(static_cast(16), next_pow_2(m)); // next power of 2 - if (mp2 <= 64) { - // m in [1, 64] + if (mp2 <= 16) { + // m in [1, 16] + return cutlass_gemm_caller( + out, a, b, std::forward(args)...); + } else if (mp2 <= 64) { + // m in (16, 64] return cutlass_gemm_caller( out, a, b, std::forward(args)...); - } else if (mp2 <= 128) { - // m in (64, 128] - return cutlass_gemm_caller( - out, a, b, std::forward(args)...); } else if (mp2 <= 256) { - // m in (128, 256] + // m in (64, 256] return cutlass_gemm_caller( out, a, b, std::forward(args)...); } else { From 44d2e6af636b7a62dbec1bd985543cbe2918049b Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Fri, 27 Jun 2025 12:50:12 +0900 Subject: [PATCH 166/211] [Bugfix] Build moe_data for both sm100 and sm90 (#20086) Signed-off-by: mgoin --- CMakeLists.txt | 14 ++++++++++++-- csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu | 9 +++++---- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 402131b7a1e7a..8966a663d3ccf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -513,6 +513,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") CUDA_ARCHS "${FP4_ARCHS}") list(APPEND VLLM_EXT_SRC "${SRCS}") list(APPEND VLLM_GPU_FLAGS "-DENABLE_NVFP4=1") + list(APPEND VLLM_GPU_FLAGS "-DENABLE_CUTLASS_MOE_SM100=1") message(STATUS "Building NVFP4 for archs: ${FP4_ARCHS}") else() message(STATUS "Not building NVFP4 as no compatible archs were found.") @@ -547,8 +548,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # if it's possible to compile MoE kernels that use its output. cuda_archs_loose_intersection(SCALED_MM_ARCHS "9.0a" "${CUDA_ARCHS}") if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.3 AND SCALED_MM_ARCHS) - set(SRCS "csrc/quantization/cutlass_w8a8/moe/grouped_mm_c3x.cu" - "csrc/quantization/cutlass_w8a8/moe/moe_data.cu") + set(SRCS "csrc/quantization/cutlass_w8a8/moe/grouped_mm_c3x.cu") set_gencode_flags_for_srcs( SRCS "${SRCS}" CUDA_ARCHS "${SCALED_MM_ARCHS}") @@ -566,6 +566,16 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") endif() endif() + # moe_data.cu is used by all CUTLASS MoE kernels. + cuda_archs_loose_intersection(CUTLASS_MOE_DATA_ARCHS "9.0a;10.0a" "${CUDA_ARCHS}") + if(${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.3 AND CUTLASS_MOE_DATA_ARCHS) + set(SRCS "csrc/quantization/cutlass_w8a8/moe/moe_data.cu") + set_gencode_flags_for_srcs( + SRCS "${SRCS}" + CUDA_ARCHS "${CUTLASS_MOE_DATA_ARCHS}") + list(APPEND VLLM_EXT_SRC "${SRCS}") + endif() + # # Machete kernels diff --git a/csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu b/csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu index 348525810810c..a2080c3001190 100644 --- a/csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu +++ b/csrc/quantization/cutlass_w8a8/scaled_mm_entry.cu @@ -241,7 +241,7 @@ void get_cutlass_moe_mm_data( // mm to run it for. int32_t version_num = get_sm_version_num(); #if (defined ENABLE_CUTLASS_MOE_SM90 && ENABLE_CUTLASS_MOE_SM90) || \ - (defined ENABLE_SCALED_MM_SM100 && ENABLE_SCALED_MM_SM90) + (defined ENABLE_CUTLASS_MOE_SM100 && ENABLE_CUTLASS_MOE_SM100) get_cutlass_moe_mm_data_caller(topk_ids, expert_offsets, problem_sizes1, problem_sizes2, input_permutation, output_permutation, num_experts, n, k, @@ -252,7 +252,7 @@ void get_cutlass_moe_mm_data( false, "No compiled get_cutlass_moe_mm_data: no cutlass_scaled_mm kernel for " "CUDA device capability: ", - version_num, ". Required capability: 90"); + version_num, ". Required capability: 90 or 100"); } void get_cutlass_pplx_moe_mm_data(torch::Tensor& expert_offsets, @@ -265,7 +265,8 @@ void get_cutlass_pplx_moe_mm_data(torch::Tensor& expert_offsets, // This function currently gets compiled only if we have a valid cutlass moe // mm to run it for. int32_t version_num = get_sm_version_num(); -#if defined ENABLE_CUTLASS_MOE_SM90 && ENABLE_CUTLASS_MOE_SM90 +#if (defined ENABLE_CUTLASS_MOE_SM90 && ENABLE_CUTLASS_MOE_SM90) || \ + (defined ENABLE_CUTLASS_MOE_SM100 && ENABLE_CUTLASS_MOE_SM100) get_cutlass_pplx_moe_mm_data_caller(expert_offsets, problem_sizes1, problem_sizes2, expert_num_tokens, num_local_experts, padded_m, n, k); @@ -275,7 +276,7 @@ void get_cutlass_pplx_moe_mm_data(torch::Tensor& expert_offsets, false, "No compiled get_cutlass_pplx_moe_mm_data: no cutlass_scaled_mm kernel " "for CUDA device capability: ", - version_num, ". Required capability: 90"); + version_num, ". Required capability: 90 or 100"); } void cutlass_scaled_mm_azp(torch::Tensor& c, torch::Tensor const& a, From 0740e29b66ca5589f7f35a7c25b6c3de1a749da1 Mon Sep 17 00:00:00 2001 From: li haoyang Date: Fri, 27 Jun 2025 11:54:24 +0800 Subject: [PATCH 167/211] [Feature] add quick all reduce (#19744) Signed-off-by: ilmarkov Signed-off-by: Haoyang Li Co-authored-by: ilmarkov --- CMakeLists.txt | 8 + csrc/custom_quickreduce.cu | 114 +++ csrc/ops.h | 11 + csrc/quickreduce/base.h | 338 +++++++++ csrc/quickreduce/quick_reduce.h | 196 +++++ csrc/quickreduce/quick_reduce_impl.cuh | 698 ++++++++++++++++++ csrc/torch_bindings.cpp | 18 + tests/distributed/test_quick_all_reduce.py | 138 ++++ vllm/_custom_ops.py | 32 + .../device_communicators/cuda_communicator.py | 22 +- .../device_communicators/quick_all_reduce.py | 278 +++++++ vllm/envs.py | 28 + 12 files changed, 1879 insertions(+), 2 deletions(-) create mode 100644 csrc/custom_quickreduce.cu create mode 100644 csrc/quickreduce/base.h create mode 100644 csrc/quickreduce/quick_reduce.h create mode 100644 csrc/quickreduce/quick_reduce_impl.cuh create mode 100644 tests/distributed/test_quick_all_reduce.py create mode 100644 vllm/distributed/device_communicators/quick_all_reduce.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 8966a663d3ccf..b1adeac586f2e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -648,6 +648,14 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") # if CUDA endif endif() +if (VLLM_GPU_LANG STREQUAL "HIP") + # Add QuickReduce kernels + list(APPEND VLLM_EXT_SRC + "csrc/custom_quickreduce.cu" + ) +# if ROCM endif +endif() + message(STATUS "Enabling C extension.") define_gpu_extension_target( _C diff --git a/csrc/custom_quickreduce.cu b/csrc/custom_quickreduce.cu new file mode 100644 index 0000000000000..33d0d4a7226e6 --- /dev/null +++ b/csrc/custom_quickreduce.cu @@ -0,0 +1,114 @@ +#include +#include +#include +#include + +#ifdef USE_ROCM + + #include "quickreduce/quick_reduce.h" + +quickreduce::fptr_t init_custom_qr(int64_t rank, int64_t world_size, + std::optional qr_max_size) { + if (world_size > 8) + throw std::invalid_argument("world size > 8 is not supported"); + if (world_size == 6) + throw std::invalid_argument("world size == 6 is not supported"); + if (world_size % 2 != 0) + throw std::invalid_argument("Odd num gpus is not supported for now"); + if (rank < 0 || rank >= world_size) + throw std::invalid_argument("invalid rank passed in"); + quickreduce::DeviceComms* fptr = new quickreduce::DeviceComms(); + fptr->init(world_size, rank, qr_max_size); + return (quickreduce::fptr_t)fptr; +} + +void qr_destroy(quickreduce::fptr_t _fa) { + if (_fa) { + auto fa = reinterpret_cast(_fa); + fa->destroy(); + delete fa; + } +} + +torch::Tensor qr_get_handle(quickreduce::fptr_t _fa) { + auto fa = reinterpret_cast(_fa); + hipIpcMemHandle_t handle = fa->get_handle(); + auto options = + torch::TensorOptions().dtype(torch::kUInt8).device(torch::kCPU); + auto data_handle = + torch::empty({static_cast(sizeof(hipIpcMemHandle_t))}, options); + std::memcpy(data_handle.data_ptr(), &handle, sizeof(hipIpcMemHandle_t)); + return data_handle; +} + +void qr_open_handles(quickreduce::fptr_t _fa, + const std::vector& handles) { + auto fa = reinterpret_cast(_fa); + std::vector ipc_handles; + ipc_handles.reserve(handles.size()); + for (auto& handle : handles) { + // Ensure the tensor is on the same device as the current device. + hipIpcMemHandle_t ipc_handle; + std::memcpy(&ipc_handle, handle.data_ptr(), sizeof(hipIpcMemHandle_t)); + ipc_handles.push_back(ipc_handle); + } + fa->open_ipc_handles(ipc_handles); +} + +void qr_all_reduce(quickreduce::fptr_t _fa, torch::Tensor& inp, + torch::Tensor& out, int64_t quant_level, bool cast_bf2half) { + auto fa = reinterpret_cast(_fa); + const at::cuda::OptionalCUDAGuard device_guard(device_of(inp)); + auto stream = at::cuda::getCurrentHIPStreamMasqueradingAsCUDA(); + + TORCH_CHECK_EQ(inp.scalar_type(), out.scalar_type()); + TORCH_CHECK_EQ(inp.numel(), out.numel()); + TORCH_CHECK_LE(out.numel(), fa->kMaxProblemSize); + if (out.scalar_type() == at::ScalarType::Half) { + fa->allreduce(reinterpret_cast(inp.data_ptr()), + reinterpret_cast(out.data_ptr()), + out.numel(), quant_level, stream); + } else if (out.scalar_type() == at::ScalarType::BFloat16) { + if (cast_bf2half) { + fa->allreduce(reinterpret_cast(inp.data_ptr()), + reinterpret_cast(out.data_ptr()), + out.numel(), quant_level, stream); + } else { + fa->allreduce( + reinterpret_cast(inp.data_ptr()), + reinterpret_cast(out.data_ptr()), + out.numel(), quant_level, stream); + } + } else { + throw std::runtime_error( + "quick allreduce only supports float16 and bfloat16"); + } +} + +int64_t qr_max_size() { + // The default is 2GB (2,147,483,648 bytes) + return static_cast(std::numeric_limits::max()) + 1; +} + + #define INSTANTIATE_FOR_WORLDSIZE(T, Codec, cast_bf2half) \ + template struct quickreduce::AllReduceTwoshot, \ + cast_bf2half>; \ + template struct quickreduce::AllReduceTwoshot, \ + cast_bf2half>; \ + template struct quickreduce::AllReduceTwoshot, cast_bf2half>; + +INSTANTIATE_FOR_WORLDSIZE(quickreduce::nv_bfloat16, quickreduce::CodecFP, false) +INSTANTIATE_FOR_WORLDSIZE(quickreduce::nv_bfloat16, quickreduce::CodecQ4, false) +INSTANTIATE_FOR_WORLDSIZE(quickreduce::nv_bfloat16, quickreduce::CodecQ6, false) +INSTANTIATE_FOR_WORLDSIZE(quickreduce::nv_bfloat16, quickreduce::CodecQ8, false) +INSTANTIATE_FOR_WORLDSIZE(quickreduce::nv_bfloat16, quickreduce::CodecFP, true) +INSTANTIATE_FOR_WORLDSIZE(quickreduce::nv_bfloat16, quickreduce::CodecQ4, true) +INSTANTIATE_FOR_WORLDSIZE(quickreduce::nv_bfloat16, quickreduce::CodecQ6, true) +INSTANTIATE_FOR_WORLDSIZE(quickreduce::nv_bfloat16, quickreduce::CodecQ8, true) + +INSTANTIATE_FOR_WORLDSIZE(half, quickreduce::CodecFP, false) +INSTANTIATE_FOR_WORLDSIZE(half, quickreduce::CodecQ4, false) +INSTANTIATE_FOR_WORLDSIZE(half, quickreduce::CodecQ6, false) +INSTANTIATE_FOR_WORLDSIZE(half, quickreduce::CodecQ8, false) + +#endif // USE_ROCM \ No newline at end of file diff --git a/csrc/ops.h b/csrc/ops.h index f02f5083ac197..52c264d64ccad 100644 --- a/csrc/ops.h +++ b/csrc/ops.h @@ -360,3 +360,14 @@ std::tuple allocate_shared_buffer_and_handle( int64_t size); int64_t open_mem_handle(torch::Tensor& mem_handle); void free_shared_buffer(int64_t buffer); + +#ifdef USE_ROCM +fptr_t init_custom_qr(int64_t rank, int64_t world_size, + std::optional qr_max_size = std::nullopt); +void qr_destroy(fptr_t _fa); +torch::Tensor qr_get_handle(fptr_t _fa); +void qr_open_handles(fptr_t _fa, const std::vector& handles); +void qr_all_reduce(fptr_t _fa, torch::Tensor& inp, torch::Tensor& out, + int64_t quant_level, bool cast_bf2half = false); +int64_t qr_max_size(); +#endif \ No newline at end of file diff --git a/csrc/quickreduce/base.h b/csrc/quickreduce/base.h new file mode 100644 index 0000000000000..a2170e483207d --- /dev/null +++ b/csrc/quickreduce/base.h @@ -0,0 +1,338 @@ +#pragma once + +#include +#include +#include +#include + +#define __quickreduce_device_inline__ __device__ __forceinline__ +#define __quickreduce_launch_bounds_two_shot__ __launch_bounds__(256, 4) +#define __quickreduce_launch_bounds_one_shot__ __launch_bounds__(512, 4) + +namespace quickreduce { + +typedef __hip_bfloat16 nv_bfloat16; +typedef __hip_bfloat162 nv_bfloat162; + +using int32x2_t = __attribute__((__vector_size__(2 * sizeof(int)))) int; +using int32x4_t = __attribute__((__vector_size__(4 * sizeof(int)))) int; + +// Setup acquire-release semantics for vector memory reads (mubuf instruction) +// as per architecture. +#if defined(__gfx942__) +// CDNA3: Scope bits sc0, sc1 + #define MUBUF_ACQUIRE 16 + #define MUBUF_RELEASE 16 +#elif (defined(__gfx908__) || defined(__gfx90a__)) +// CDNA1 and CDNA2 - glc bit + #define MUBUF_ACQUIRE 1 + #define MUBUF_RELEASE 0 +#endif + +static constexpr int kNegOne = 0xBC00BC00; // {-1, -1}, fp16x2_t + +// Number of atoms (4xf16x2_t) processed by a single thread +static constexpr int kAtoms = 8; + +// We use a workgroup of 256 threads +static constexpr int kBlockSize = 256; +static constexpr int kAtomStride = kBlockSize; + +// Size and atom stride of source/destination data that the block will +// process. +// Workgroup scope = Tile = (256 threads x 8 atoms x 16B) +static constexpr int kTileSize = kBlockSize * kAtoms * sizeof(int32x4_t); + +// Max number of blocks. 304 CUs on MI300 +static constexpr int kMaxNumBlocks = 304 * 4; + +// Standard CDNA wavefront size. +static constexpr int kWavefront = 64; + +// 256 thread, 4 wavefronts. +static dim3 constexpr kBlockTwoShot = {kWavefront, kBlockSize / kWavefront, 1}; + +// Number of threads in a group for quantization +// It corresponds to 32 F16 elements in quantization block +static constexpr int kThreadGroupSize = 8; + +// Methods +__quickreduce_device_inline__ __host__ unsigned long divceil(unsigned long x, + unsigned long y) { + return ((x + y - 1) / y); +} + +union BufferResource { + __quickreduce_device_inline__ constexpr BufferResource() + : config(0x00020000U) {} + + __quickreduce_device_inline__ constexpr BufferResource(void* buffer_address, + uint32_t buffer_size) + : address(buffer_address), range(buffer_size), config(0x00020000U) {} + + int32x4_t descriptor; + struct { + void* address; // 8B, out of which first 48b is address, and 16b is stride + // (unused) + uint32_t range; // Byte range for the buffer resource + uint32_t config; // Constant, DFMT=32b + }; +}; + +__quickreduce_device_inline__ static int32x4_t buffer_load_dwordx4( + int32x4_t srsrc, int32_t voffset, int32_t soffset, + int32_t aux) __asm("llvm.amdgcn.raw.buffer.load.v4i32"); + +__quickreduce_device_inline__ static void buffer_store_dwordx4( + int32x4_t data, int32x4_t srsrc, int32_t voffset, int32_t soffset, + int32_t aux) __asm("llvm.amdgcn.raw.buffer.store.v4i32"); + +__quickreduce_device_inline__ static void set_fp16_ovfl(bool const value) { +#if defined(__gfx942__) + if (value) { + asm volatile("s_setreg_imm32_b32 0xdc1, 1;" ::); + } else { + asm volatile("s_setreg_imm32_b32 0xdc1, 0;" ::); + } +#endif +} +union bf162_int_union { + int i; + nv_bfloat162 bf2; +}; + +template +__quickreduce_device_inline__ void packed_assign_add(int32x4_t* A, + int32x4_t* B); + +template <> +__quickreduce_device_inline__ void packed_assign_add(int32x4_t* A, + int32x4_t* B) { + int32x4_t& tR_fragment = A[0]; + int32x4_t& tA_fragment = B[0]; + + asm volatile("v_pk_add_f16 %0, %1, %2" + : "=v"(tR_fragment[0]) + : "v"(tR_fragment[0]), "v"(tA_fragment[0])); + asm volatile("v_pk_add_f16 %0, %1, %2" + : "=v"(tR_fragment[1]) + : "v"(tR_fragment[1]), "v"(tA_fragment[1])); + asm volatile("v_pk_add_f16 %0, %1, %2" + : "=v"(tR_fragment[2]) + : "v"(tR_fragment[2]), "v"(tA_fragment[2])); + asm volatile("v_pk_add_f16 %0, %1, %2" + : "=v"(tR_fragment[3]) + : "v"(tR_fragment[3]), "v"(tA_fragment[3])); +} + +template <> +__quickreduce_device_inline__ void packed_assign_add( + int32x4_t* A, int32x4_t* B) { + nv_bfloat162* tA = reinterpret_cast(A); + nv_bfloat162* tB = reinterpret_cast(B); +#pragma unroll + for (int i = 0; i < 4; i++) { + tA[i] = __hadd2(tA[i], tB[i]); + } +} + +template +__quickreduce_device_inline__ int packed_max(int a, int b); + +template <> +__quickreduce_device_inline__ int packed_max(int a, int b) { + int result; + asm volatile("v_pk_max_f16 %0, %1, %2" : "=v"(result) : "v"(a), "v"(b)); + return result; +} + +template <> +__quickreduce_device_inline__ int packed_max(int a, int b) { + bf162_int_union A, B, R; + A.i = a; + B.i = b; + R.bf2 = __hmax2(A.bf2, B.bf2); + return R.i; +} + +template +__quickreduce_device_inline__ int packed_min(int a, int b); + +template <> +__quickreduce_device_inline__ int packed_min(int a, int b) { + int result; + asm volatile("v_pk_min_f16 %0, %1, %2" : "=v"(result) : "v"(a), "v"(b)); + return result; +} + +template <> +__quickreduce_device_inline__ int packed_min(int a, int b) { + bf162_int_union A, B, R; + A.i = a; + B.i = b; + R.bf2 = __hmin2(A.bf2, B.bf2); + return R.i; +} + +template +__quickreduce_device_inline__ int packed_abs_max(int a, int b); + +template <> +__quickreduce_device_inline__ int packed_abs_max(int a, int b) { + half2 wmaxh2 = __builtin_bit_cast(half2, a); + half2 wminh2 = __builtin_bit_cast(half2, b); + half2 wblockmaxh2; + + wblockmaxh2.x = + __hgt(__habs(wmaxh2.x), __habs(wminh2.x)) ? wmaxh2.x : wminh2.x; + wblockmaxh2.y = + __hgt(__habs(wmaxh2.y), __habs(wminh2.y)) ? wmaxh2.y : wminh2.y; + return __builtin_bit_cast(int, wblockmaxh2); +} + +template <> +__quickreduce_device_inline__ int packed_abs_max(int a, int b) { + bf162_int_union A, B, R; + A.i = a; + B.i = b; + R.bf2.x = __hgt(__habs(A.bf2.x), __habs(B.bf2.x)) ? A.bf2.x : B.bf2.x; + R.bf2.y = __hgt(__habs(A.bf2.y), __habs(B.bf2.y)) ? A.bf2.y : B.bf2.y; + return R.i; +} + +template +__quickreduce_device_inline__ int packed_add(int a, int b); + +template <> +__quickreduce_device_inline__ int packed_add(int a, int b) { + int result; + asm volatile("v_pk_add_f16 %0, %1, %2" : "=v"(result) : "v"(a), "v"(b)); + return result; +} + +template <> +__quickreduce_device_inline__ int packed_add(int a, int b) { + bf162_int_union A, B, R; + A.i = a; + B.i = b; + R.bf2 = __hadd2(A.bf2, B.bf2); + return R.i; +} + +template <> +__quickreduce_device_inline__ int packed_add(int a, int b) { + int result; + asm volatile("v_pk_add_i16 %0, %1, %2" : "=v"(result) : "v"(a), "v"(b)); + return result; +} + +template +__quickreduce_device_inline__ int packed_sub(int a, int b); + +template <> +__quickreduce_device_inline__ int packed_sub(int a, int b) { + int result; + + // MI300 lacks packed fp16 sub instruction. So we do -1 * min + max + asm volatile("v_pk_fma_f16 %0, %1, %2 %3" + : "=v"(result) + : "v"(kNegOne), "v"(b), "v"(a)); + return result; +} + +template <> +__quickreduce_device_inline__ int packed_sub(int a, int b) { + bf162_int_union A, B, R; + A.i = a; + B.i = b; + R.bf2 = __hsub2(A.bf2, B.bf2); + return R.i; +} + +template +__quickreduce_device_inline__ int packed_mul(int a, int b); + +template <> +__quickreduce_device_inline__ int packed_mul(int a, int b) { + int result; + asm volatile("v_pk_mul_f16 %0, %1, %2" : "=v"(result) : "v"(a), "v"(b)); + return result; +} + +template <> +__quickreduce_device_inline__ int packed_mul(int a, int b) { + nv_bfloat162* tA = reinterpret_cast(&a); + nv_bfloat162* tB = reinterpret_cast(&b); + nv_bfloat162 tR = __hmul2(*tA, *tB); + return *(reinterpret_cast(&tR)); +} + +template +__quickreduce_device_inline__ int packed_rcp(int a); + +template <> +__quickreduce_device_inline__ int packed_rcp(int a) { + return __builtin_bit_cast(int, h2rcp(__builtin_bit_cast(half2, a))); +} + +template <> +__quickreduce_device_inline__ int packed_rcp(int a) { + bf162_int_union A, R; + A.i = a; + R.bf2 = h2rcp(A.bf2); + return R.i; +} + +// changes dtype +__quickreduce_device_inline__ float T2float_cast(half a) { + return __half2float(a); +} + +__quickreduce_device_inline__ float T2float_cast(nv_bfloat16 a) { + return __bfloat162float(a); +} + +template +__quickreduce_device_inline__ int group_abs_max(int32x4_t atom) { + const int group_leader = (threadIdx.x / kThreadGroupSize) * kThreadGroupSize; + + int wmax, wmin, wblockmax; + int a, b; + a = packed_max(atom[0], atom[1]); + b = packed_max(atom[2], atom[3]); + + wmax = packed_max(a, b); + + a = packed_min(atom[0], atom[1]); + b = packed_min(atom[2], atom[3]); + + wmin = packed_min(a, b); + + // Reduce the max among a group of threads + // Note: This is basically 2 blocks of values setup as the + // upper/lower halves of the f16x2_t + for (int i = 1; i < kThreadGroupSize; i <<= 1) { + int x = __shfl_down(wmax, i); + wmax = packed_max(wmax, x); + + int y = __shfl_down(wmin, i); + wmin = packed_min(wmin, y); + } + wblockmax = packed_abs_max(wmax, wmin); + // Share with the cohort + wblockmax = __shfl(wblockmax, group_leader); + return wblockmax; +} + +__quickreduce_device_inline__ void set_sync_flag(uint32_t* flag_ptr, + uint32_t flag) { + __atomic_store_n(flag_ptr, flag, __ATOMIC_RELEASE); +} + +__quickreduce_device_inline__ void wait_sync_flag(uint32_t* flag_ptr, + uint32_t flag) { + while (__atomic_load_n(flag_ptr, __ATOMIC_RELAXED) != flag) { + } +} + +} // namespace quickreduce \ No newline at end of file diff --git a/csrc/quickreduce/quick_reduce.h b/csrc/quickreduce/quick_reduce.h new file mode 100644 index 0000000000000..4fe4c44be7eb9 --- /dev/null +++ b/csrc/quickreduce/quick_reduce.h @@ -0,0 +1,196 @@ +#pragma once + +#include +#include +#include "quick_reduce_impl.cuh" + +#define HIP_CHECK(err) \ + do { \ + hipError_t err_ = (err); \ + if (err_ != hipSuccess) { \ + std::printf("HIP error %d at %s:%d. %s\n", err_, __FILE__, __LINE__, \ + hipGetErrorString(err_)); \ + throw std::runtime_error("HIP error"); \ + } \ + } while (0) + +namespace quickreduce { +using fptr_t = int64_t; +static_assert(sizeof(void*) == sizeof(fptr_t)); + +template +__global__ __quickreduce_launch_bounds_two_shot__ static void +allreduce_prototype_twoshot(T const* A, T* B, uint32_t N, uint32_t num_blocks, + int rank, uint8_t** dbuffer_list, + uint32_t data_offset, uint32_t flag_color) { + int block = blockIdx.x; + int grid = gridDim.x; + + while (block < num_blocks) { + AllReduceKernel::run(A, B, N, block, rank, dbuffer_list, data_offset, + flag_color); + block += grid; + flag_color++; + } +} + +#define TWOSHOT_DISPATCH(__codec) \ + if (world_size == 2) { \ + using LineCodec = __codec; \ + using AllReduceKernel = AllReduceTwoshot; \ + hipLaunchKernelGGL((allreduce_prototype_twoshot), \ + dim3(grid), dim3(kBlockTwoShot), 0, stream, A, B, N, \ + num_blocks, rank, dbuffer_list, data_offset, \ + flag_color); \ + } else if (world_size == 4) { \ + using LineCodec = __codec; \ + using AllReduceKernel = AllReduceTwoshot; \ + hipLaunchKernelGGL((allreduce_prototype_twoshot), \ + dim3(grid), dim3(kBlockTwoShot), 0, stream, A, B, N, \ + num_blocks, rank, dbuffer_list, data_offset, \ + flag_color); \ + } else if (world_size == 8) { \ + using LineCodec = __codec; \ + using AllReduceKernel = AllReduceTwoshot; \ + hipLaunchKernelGGL((allreduce_prototype_twoshot), \ + dim3(grid), dim3(kBlockTwoShot), 0, stream, A, B, N, \ + num_blocks, rank, dbuffer_list, data_offset, \ + flag_color); \ + } + +enum QuickReduceQuantLevel { + F16 = 0, + INT8 = 1, + INT6 = 2, + INT4 = 3, +}; + +struct DeviceComms { + // Max problem size is 2GB (in bytes) or half of uint32_t max value. + int64_t kMaxProblemSize = + static_cast(std::numeric_limits::max()) + 1; + + // Max TP-8 + static int constexpr kMaxWorldSize = 8; + + bool initialized = false; + uint32_t flag_color = 1; + int world_size; + int rank; + + uint8_t* dbuffer; + uint8_t** dbuffer_list; + hipIpcMemHandle_t buffer_ipc_handle; + std::vector all_buffer_ipc_handles; + std::vector buffer_list; + uint32_t data_offset; + + DeviceComms() : initialized(false), world_size(1), rank(0) {} + ~DeviceComms() { destroy(); } + + void init(int world_size, int rank, + std::optional max_problem_size = std::nullopt) { + destroy(); + this->world_size = world_size; + this->rank = rank; + if (max_problem_size.has_value() && max_problem_size.value() > 0) { + this->kMaxProblemSize = max_problem_size.value(); + } + // Allocate buffer size for worst case: F16 2-stage buffer. + uint32_t flags_buffer_size = + 2 * world_size * kMaxNumBlocks * sizeof(uint32_t); + static int64_t data_buffer_size = 2 * this->kMaxProblemSize; + int64_t total_buffer_size = flags_buffer_size + data_buffer_size; + data_offset = flags_buffer_size; + HIP_CHECK(hipExtMallocWithFlags((void**)&dbuffer, total_buffer_size, + hipDeviceMallocUncached)); + + // Clear the flags buffer. + HIP_CHECK(hipMemset(dbuffer, 0, flags_buffer_size)); + + // Device-side list of IPC buffers. + buffer_list.resize(world_size); + HIP_CHECK(hipMalloc(&dbuffer_list, world_size * sizeof(uint8_t*))); + + // Create IPC handles for rank's communication buffer. + all_buffer_ipc_handles.resize(world_size); + HIP_CHECK(hipIpcGetMemHandle(&buffer_ipc_handle, dbuffer)); + + initialized = true; + } + int get_world_size() { return world_size; } + int get_rank() { return rank; } + bool status() { return initialized; } + hipIpcMemHandle_t const get_handle() { return buffer_ipc_handle; } + + void destroy() { + if (initialized) { + for (int i = 0; i < world_size; i++) { + if (i != rank) { + HIP_CHECK(hipIpcCloseMemHandle(dbuffer_list[i])); + } + } + + HIP_CHECK(hipFree(dbuffer)); + HIP_CHECK(hipFree(dbuffer_list)); + + initialized = false; + } + } + + void open_ipc_handles(std::vector const& ipc_handles) { + assert(ipc_handles.size() == all_buffer_ipc_handles.size()); + for (int i = 0; i < world_size; i++) { + all_buffer_ipc_handles[i] = ipc_handles[i]; + } + + // Open device memory access to the IPC communication buffers. + // Note: For our own rank, we do not need to open a handle. + for (int i = 0; i < world_size; i++) { + if (i != rank) { + HIP_CHECK(hipIpcOpenMemHandle((void**)&buffer_list[i], + all_buffer_ipc_handles[i], + hipIpcMemLazyEnablePeerAccess)); + } else { + buffer_list[i] = dbuffer; + } + } + + HIP_CHECK(hipMemcpy(dbuffer_list, buffer_list.data(), + world_size * sizeof(uint8_t*), hipMemcpyHostToDevice)); + } + + template + void allreduce(T const* A, T* B, uint32_t N, int quant_level, + hipStream_t stream) { + if (world_size != 2 && world_size != 4 && world_size != 8) { + throw std::runtime_error("All Reduce not supported for world_size = " + + std::to_string(world_size)); + } + + // Configuration. + uint32_t msg_size = N * sizeof(T); + uint32_t num_blocks = divceil(msg_size, kTileSize); + uint32_t grid = min(kMaxNumBlocks, num_blocks); + auto quant_level_ = static_cast(quant_level); + switch (quant_level_) { + case QuickReduceQuantLevel::INT8: + TWOSHOT_DISPATCH(CodecQ8) + break; + case QuickReduceQuantLevel::INT6: + TWOSHOT_DISPATCH(CodecQ6) + break; + case QuickReduceQuantLevel::INT4: + TWOSHOT_DISPATCH(CodecQ4) + break; + default: + TWOSHOT_DISPATCH(CodecFP) + break; + } + HIP_CHECK(cudaGetLastError()); + // Rotate the flag color. + flag_color += divceil(N, grid); + } +}; + +} // namespace quickreduce \ No newline at end of file diff --git a/csrc/quickreduce/quick_reduce_impl.cuh b/csrc/quickreduce/quick_reduce_impl.cuh new file mode 100644 index 0000000000000..17816c552d25b --- /dev/null +++ b/csrc/quickreduce/quick_reduce_impl.cuh @@ -0,0 +1,698 @@ +#pragma once + +#include +#include "base.h" + +namespace quickreduce { + +struct CodecBase { + const int thread; + const int rank; + const int group_leader; + __quickreduce_device_inline__ CodecBase(int thread, int rank) + : thread(thread), + rank(rank), + group_leader((threadIdx.x / kThreadGroupSize) * kThreadGroupSize) { + set_fp16_ovfl(true); + } +}; + +// Default full precision codec. +template +struct CodecFP : public CodecBase { + static constexpr int kWorldSize = world_size; + static constexpr int kRankAtoms = kAtoms / kWorldSize; + + // Codec tile size process by this workgroup. + // Each thread processes atoms of f16x8_t (16B). + static constexpr int kRankTransmittedTileSize = + kBlockSize * kRankAtoms * sizeof(int32x4_t); + static_assert(kRankTransmittedTileSize % 16 == 0, + "kRankTransmittedTileSize must be 16B aligned."); + + // Total tile size for the collective communication. + static constexpr int kTransmittedTileSize = + kRankTransmittedTileSize * kWorldSize; + + __quickreduce_device_inline__ CodecFP(int thread, int rank) + : CodecBase(thread, rank) {} + + __quickreduce_device_inline__ void send(int32x4_t* __restrict__ send_buffer, + const int32x4_t* __restrict__ data) { + for (int i = 0; i < kRankAtoms; i++) { + __builtin_nontemporal_store(data[i], send_buffer + thread); + send_buffer += kAtomStride; + } + } + + __quickreduce_device_inline__ void recv(int32x4_t** __restrict__ recv_buffer, + int32x4_t* __restrict__ data) { + for (int i = 0; i < kRankAtoms; i++) { + data[i] = __builtin_nontemporal_load(*recv_buffer + thread); + *recv_buffer += kAtomStride; + } + } +}; + +// Int4 symmetric quantization codec. +// We quantize the FP16 data to block-scaled Int4 in blocks of 4 * +// kThreadGroupSize. +template +struct CodecQ4 : public CodecBase { + static constexpr int kWorldSize = world_size; + + // Codec tile size process by this workgroup. + // Each threads processes a fragment of fp16x8_t (16B), + // into a int4x8_t (4B) and a fp16 scale shared among 32 values. + static constexpr int kRankAtoms = kAtoms / kWorldSize; + static constexpr int kRankTileStride = 1152; + static constexpr int kRankTileScaleOffset = 1024; + static constexpr int kRankTransmittedTileSize = kRankTileStride * kRankAtoms; + static_assert(kRankTransmittedTileSize % 16 == 0, + "kRankTransmittedTileSize must be 16B aligned."); + + static constexpr int kRankBufferTileStride = + kRankTileStride / sizeof(int32x4_t); + + // Total tile size for the collective communication. + static constexpr int kTransmittedTileSize = + kRankTransmittedTileSize * kWorldSize; + + // Constants configuration + + // {-1/8.0h, -1/8.0h}, f16x2_t + static constexpr int kScaleFactor = + std::is_same::value ? 0xB000B000 : 0xBE00BE00; + + // {1e-7, 1e-7}, f16x2_t + static constexpr int kScaleEpsilon = + std::is_same::value ? 0x00010001 : 0x33D733D7; + + // {-8, -8}, f16x2_t + static constexpr int kRangeMin = + std::is_same::value ? 0xC800C800 : 0xC100C100; + + // {+7, +7}, f16x2_t + static constexpr int kRangeMax = + std::is_same::value ? 0x47004700 : 0x40E040E0; + + // {+8, +8}, int16x2_t + static constexpr int kRangeBias = 0x00080008; + + __quickreduce_device_inline__ CodecQ4(int thread, int rank) + : CodecBase(thread, rank) {} + + __quickreduce_device_inline__ void send(int32x4_t* __restrict__ send_buffer, + const int32x4_t* __restrict__ data) { + for (int k = 0; k < kRankAtoms; k++) { + int32x4_t const atom = data[k]; + + // Compute the absolute maximum of the atom in the thread group + // In 2 blocks of values, upper/lower halves of the f16x2_t + int wblockmax = group_abs_max(atom); + + // Derive scales + int decoding_scale; + int encoding_scale; + decoding_scale = packed_mul(wblockmax, kScaleFactor); + encoding_scale = packed_add(decoding_scale, kScaleEpsilon); + encoding_scale = packed_rcp(encoding_scale); + + // Apply scales to get quantized values + int32x4_t w; + for (int i = 0; i < 4; i++) { + w[i] = packed_mul(atom[i], encoding_scale); + w[i] = packed_max(w[i], kRangeMin); + w[i] = packed_min(w[i], kRangeMax); + } + + // Convert from f16x2_t to uint16x2_t + int32x4_t q; + { + int16_t* qi = reinterpret_cast(&q); + T* wh = reinterpret_cast(&w); + for (int i = 0; i < 8; i++) qi[i] = (int16_t)rintf(T2float_cast(wh[i])); + + for (int i = 0; i < 4; i++) { + q[i] = packed_add(q[i], kRangeBias); + } + } + + // Pack 8 x q4 into int32_t + int qw = q[0] | (q[1] << 4) | (q[2] << 8) | (q[3] << 12); + + // Write quantized atom to send_buffer + // note: only the group leader stores the scale + uint8_t* atom_ptr = + reinterpret_cast(send_buffer + k * kRankBufferTileStride); + int32_t* qw_ptr = reinterpret_cast(atom_ptr) + thread; + int* qs_ptr = reinterpret_cast(atom_ptr + kRankTileScaleOffset) + + (thread / 8); + + __builtin_nontemporal_store(qw, qw_ptr); + if (threadIdx.x == group_leader) { + __builtin_nontemporal_store(decoding_scale, qs_ptr); + } + } + } + + __quickreduce_device_inline__ void recv(int32x4_t** __restrict__ recv_buffer, + int32x4_t* __restrict__ data) { + for (int k = 0; k < kRankAtoms; k++) { + // Directly read quantized atom from recv_buffer + uint8_t* atom_ptr = reinterpret_cast(*recv_buffer); + int32_t* qw_ptr = reinterpret_cast(atom_ptr) + thread; + int* qs_ptr = reinterpret_cast(atom_ptr + kRankTileScaleOffset) + + (thread / 8); + + int32_t qw = __builtin_nontemporal_load(qw_ptr); + int qs = __builtin_nontemporal_load(qs_ptr); + + *recv_buffer += kRankBufferTileStride; + + // Unpack q4 into f16x8_t + int32x4_t w; + { + static constexpr uint kMask000F = 0x000F000F; + static constexpr uint kHalf2_1024 = + 0x64006400; // {1024.0, 1024.0}, fp16x2_t + static uint constexpr kHalf2_1032 = + 0xE408E408; // {-1032.0, -1032.0}, fp16x2_t + + for (int i = 0; i < 4; i++) { + if constexpr (std::is_same::value) { + int32_t q4 = ((qw >> (i * 4)) & kMask000F) | kHalf2_1024; + w[i] = packed_add(q4, kHalf2_1032); + } else { + int32_t int16_2 = (qw >> (i * 4)) & kMask000F; + int16_t low = static_cast(int16_2 & 0xFFFF); + int16_t high = static_cast((int16_2 >> 16) & 0xFFFF); + nv_bfloat16 bf_low = __float2bfloat16(static_cast(low)); + nv_bfloat16 bf_high = __float2bfloat16(static_cast(high)); + nv_bfloat162 bf2 = __halves2bfloat162(bf_low, bf_high); + int32_t packed_bf16 = *reinterpret_cast(&bf2); + w[i] = packed_add(packed_bf16, kRangeMin); + } + } + } + + // Apply decoding scales + for (int i = 0; i < 4; i++) { + w[i] = packed_mul(w[i], qs); + } + + data[k] = w; + } + } +}; + +// Int6 symmetric quantization codec. +// We quantize the FP16 data to block-scaled Int6 in blocks of 4 * +// kThreadGroupSize. +template +struct CodecQ6 : public CodecBase { + static constexpr int kWorldSize = world_size; + + // Codec tile size process by this workgroup. + // Each threads processes a fragment of fp16x8_t (16B), + // into a int6x8_t (4B + 2B) and a fp16 scale shared among 32 values. + static constexpr int kRankAtoms = kAtoms / kWorldSize; + static constexpr int kRankTileStride = 1664; + static constexpr int kRankTileQ2Offset = 1024; + static constexpr int kRankTileScaleOffset = 1536; + static constexpr int kRankTransmittedTileSize = kRankTileStride * kRankAtoms; + static_assert(kRankTransmittedTileSize % 16 == 0, + "kRankTransmittedTileSize must be 16B aligned."); + + static constexpr int kRankBufferTileStride = + kRankTileStride / sizeof(int32x4_t); + + // Total tile size for the collective communication. + static constexpr int kTransmittedTileSize = + kRankTransmittedTileSize * kWorldSize; + + // Constants configuration + + // {-1/32.0h, -1/32.0h}, fp16x2_t + static constexpr int kScaleFactor = + std::is_same::value ? 0xA800A800 : 0xBD00BD00; + + // {1e-7, 1e-7}, fp16x2_t + static constexpr int kScaleEpsilon = + std::is_same::value ? 0x00010001 : 0x33D733D7; + + // {-32, -32}, fp16x2_t + static constexpr int kRangeMin = + std::is_same::value ? 0xD000D000 : 0xC200C200; + + // {+31, +31}, fp16x2_t + static constexpr int kRangeMax = + std::is_same::value ? 0x4FC04FC0 : 0x41F841F8; + + // {+32, +32}, int16x2_t + static constexpr int kRangeBias = 0x00200020; + + __quickreduce_device_inline__ CodecQ6(int thread, int rank) + : CodecBase(thread, rank) {} + + __quickreduce_device_inline__ void send(int32x4_t* __restrict__ send_buffer, + const int32x4_t* __restrict__ data) { + for (int k = 0; k < kRankAtoms; k++) { + int32x4_t const atom = data[k]; + + // Compute the absolute maximum of the atom in the thread group + // In 2 blocks of values, upper/lower halves of the f16x2_t + int wblockmax = group_abs_max(atom); + + // Derive scales + int decoding_scale; + int encoding_scale; + decoding_scale = packed_mul(wblockmax, kScaleFactor); + encoding_scale = packed_add(decoding_scale, kScaleEpsilon); + encoding_scale = packed_rcp(encoding_scale); + + // Apply scales to get quantized values + int32x4_t w; + for (int i = 0; i < 4; i++) { + w[i] = packed_mul(atom[i], encoding_scale); + w[i] = packed_max(w[i], kRangeMin); + w[i] = packed_min(w[i], kRangeMax); + } + + // Convert from f16x2_t to uint16x2_t + int32x4_t q; + { + int16_t* qi = reinterpret_cast(&q); + T* wh = reinterpret_cast(&w); + for (int i = 0; i < 8; i++) qi[i] = (int16_t)rintf(T2float_cast(wh[i])); + + for (int i = 0; i < 4; i++) { + q[i] = packed_add(q[i], kRangeBias); + } + } + + // Pack 8 x q6 into int32_t + int16_t + uint32_t q4w; + uint16_t q2w = 0; + q4w = (q[0] & 0x000F000F) | ((q[1] & 0x000F000F) << 4) | + ((q[2] & 0x000F000F) << 8) | ((q[3] & 0x000F000F) << 12); + { + int16_t* tw = reinterpret_cast(&q); +#pragma unroll + for (int i = 0; i < 8; i++) { + q2w |= (tw[i] >> 4) << (i * 2); + } + } + // Write quantized atom to send_buffer + // note: only the group leader stores the scale + uint8_t* atom_ptr = + reinterpret_cast(send_buffer + k * kRankBufferTileStride); + uint32_t* q4w_ptr = reinterpret_cast(atom_ptr) + thread; + uint16_t* q2w_ptr = + reinterpret_cast(atom_ptr + kRankTileQ2Offset) + thread; + int* qs_ptr = reinterpret_cast(atom_ptr + kRankTileScaleOffset) + + (thread / 8); + + __builtin_nontemporal_store(q4w, q4w_ptr); + __builtin_nontemporal_store(q2w, q2w_ptr); + if (threadIdx.x == group_leader) { + __builtin_nontemporal_store(decoding_scale, qs_ptr); + } + } + } + + __quickreduce_device_inline__ void recv(int32x4_t** __restrict__ recv_buffer, + int32x4_t* __restrict__ data) { + for (int k = 0; k < kRankAtoms; k++) { + // Directly read quantized atom from recv_buffer + uint8_t* atom_ptr = reinterpret_cast(*recv_buffer); + uint32_t* q4w_ptr = reinterpret_cast(atom_ptr) + thread; + uint16_t* q2w_ptr = + reinterpret_cast(atom_ptr + kRankTileQ2Offset) + thread; + int* qs_ptr = reinterpret_cast(atom_ptr + kRankTileScaleOffset) + + (thread / 8); + + uint32_t q4w = __builtin_nontemporal_load(q4w_ptr); + uint16_t q2w = __builtin_nontemporal_load(q2w_ptr); + int qs = __builtin_nontemporal_load(qs_ptr); + + *recv_buffer += kRankBufferTileStride; + + // Unpack q6 into fp16x8_t + int32x4_t w; + { + static uint constexpr kMask000F = 0x000F000F; + static uint constexpr kHalf2_1024 = + 0x64006400; // {1024.0, 1024.0}, fp16x2_t + static uint constexpr kHalf2_1056 = + 0xE420E420; // {-1056.0, -1056.0}, fp16x2_t + +#pragma unroll + for (int i = 0; i < 4; i++) { + int32_t q4 = q4w & kMask000F; + int32_t q2 = (q2w & 0x3) | ((q2w & 0xC) << 14); + q4w >>= 4; + q2w >>= 4; + if constexpr (std::is_same::value) { + int32_t q6 = q4 | (q2 << 4) | kHalf2_1024; + asm volatile("v_pk_add_f16 %0, %1, %2" + : "=v"(w[i]) + : "v"(q6), "v"(kHalf2_1056)); + } else { + int32_t int16_2 = q4 | (q2 << 4); + int16_t low = static_cast(int16_2 & 0xFFFF); + int16_t high = static_cast((int16_2 >> 16) & 0xFFFF); + + nv_bfloat16 bf_low = __float2bfloat16(static_cast(low)); + nv_bfloat16 bf_high = __float2bfloat16(static_cast(high)); + nv_bfloat162 bf2 = __halves2bfloat162(bf_low, bf_high); + int32_t packed_bf16 = *reinterpret_cast(&bf2); + w[i] = packed_add(packed_bf16, kRangeMin); + } + } + } + + // Apply decoding scales + for (int i = 0; i < 4; i++) { + w[i] = packed_mul(w[i], qs); + } + + // That's pretty much it... + data[k] = w; + } + } +}; + +// Int8 symmetric quantization codec. +// We quantize the FP16 data to block-scaled Int8 in blocks of 4 * +// kThreadGroupSize. +template +struct CodecQ8 : public CodecBase { + static constexpr int kWorldSize = world_size; + + // Codec tile size process by this workgroup. + // Each threads processes a fragment of f16x8_t (16B), + // into a int8x8_t (8B) and a f16 scale shared among 32 values. + static constexpr int kRankAtoms = kAtoms / kWorldSize; + static constexpr int kRankTileStride = 2176; + static constexpr int kRankTileScaleOffset = 2048; + static constexpr int kRankTransmittedTileSize = kRankTileStride * kRankAtoms; + static_assert(kRankTransmittedTileSize % 16 == 0, + "kRankTileSize must be 16B aligned."); + + static constexpr int kRankBufferTileStride = + kRankTileStride / sizeof(int32x4_t); + + // Total tile size for the collective communication. + static constexpr int kTransmittedTileSize = + kRankTransmittedTileSize * kWorldSize; + + // Constants configuration + + // {-1/128.0h, -1/128.0h}, f16x2_t + static constexpr int kScaleFactor = + std::is_same::value ? 0xA000A000 : 0xBC00BC00; + + // {1e-7, 1e-7}, f16x2_t + static constexpr int kScaleEpsilon = + std::is_same::value ? 0x00010001 : 0x33D733D7; + + // {-128, -128}, f16x2_t + static constexpr int kRangeMin = + std::is_same::value ? 0xD800D800 : 0xC300C300; + // {+127, +127}, f16x2_t + static constexpr int kRangeMax = + std::is_same::value ? 0x57F057F0 : 0x42FE42FE; + + // {+128, +128}, int16x2_t + static constexpr int kRangeBias = 0x00800080; + + __quickreduce_device_inline__ CodecQ8(int thread, int rank) + : CodecBase(thread, rank) {} + + __quickreduce_device_inline__ void send(int32x4_t* __restrict__ send_buffer, + int32x4_t const* __restrict__ data) { + for (int k = 0; k < kRankAtoms; k++) { + int32x4_t const atom = data[k]; + // Compute the absolute maximum of the atom in the thread group + // In 2 blocks of values, upper/lower halves of the f16x2_t + int wblockmax = group_abs_max(atom); + + // Derive scales + int decoding_scale; + int encoding_scale; + decoding_scale = packed_mul(wblockmax, kScaleFactor); + encoding_scale = packed_add(decoding_scale, kScaleEpsilon); + encoding_scale = packed_rcp(encoding_scale); + + // Apply scales to get quantized values + int32x4_t w; + for (int i = 0; i < 4; i++) { + w[i] = packed_mul(atom[i], encoding_scale); + w[i] = packed_max(w[i], kRangeMin); + w[i] = packed_min(w[i], kRangeMax); + } + + // Convert from f16x2_t to uint16x2_t + int32x4_t q; + { + int16_t* qi = reinterpret_cast(&q); + T* wh = reinterpret_cast(&w); + for (int i = 0; i < 8; i++) qi[i] = (int16_t)rintf(T2float_cast(wh[i])); + + for (int i = 0; i < 4; i++) { + q[i] = packed_add(q[i], kRangeBias); + } + } + + // Pack 8 x q8 into int32x2_t + int32x2_t qw; + qw[0] = q[0] | (q[1] << 8); + qw[1] = q[2] | (q[3] << 8); + + // Write quantized atom to send_buffer + // note: only the group leader stores the scale + uint8_t* atom_ptr = + reinterpret_cast(send_buffer + k * kRankBufferTileStride); + int32x2_t* qw_ptr = reinterpret_cast(atom_ptr) + thread; + int* qs_ptr = reinterpret_cast(atom_ptr + kRankTileScaleOffset) + + (thread / 8); + + __builtin_nontemporal_store(qw, qw_ptr); + if (threadIdx.x == group_leader) { + __builtin_nontemporal_store(decoding_scale, qs_ptr); + } + } + } + + __quickreduce_device_inline__ void recv(int32x4_t** __restrict__ recv_buffer, + int32x4_t* __restrict__ data) { + for (int k = 0; k < kRankAtoms; k++) { + // Directly read quantized atom from recv_buffer + uint8_t* atom_ptr = reinterpret_cast(*recv_buffer); + int32x2_t* qw_ptr = reinterpret_cast(atom_ptr) + thread; + int* qs_ptr = reinterpret_cast(atom_ptr + kRankTileScaleOffset) + + (thread / 8); + + int32x2_t qw = __builtin_nontemporal_load(qw_ptr); + int qs = __builtin_nontemporal_load(qs_ptr); + + *recv_buffer += kRankBufferTileStride; + + // Unpack q8 into fp16x8_t + int32x4_t w; + { + static uint constexpr kMask00FF = 0x00FF00FF; + + // {1024.0, 1024.0}, fp16x2_t + static uint constexpr kHalf2_1024 = 0x64006400; + + // {-1152.0, -1152.0}, fp16x2_t + static uint constexpr kHalf2_1152 = 0xE480E480; + +#pragma unroll + for (int i = 0; i < 4; i++) { + if constexpr (std::is_same::value) { + int32_t q8 = + ((qw[i / 2] >> ((i % 2) * 8)) & kMask00FF) | kHalf2_1024; + w[i] = packed_add(q8, kHalf2_1152); + } else { + int32_t int16_2 = (qw[i / 2] >> ((i % 2) * 8)) & kMask00FF; + int16_t low = static_cast(int16_2 & 0xFFFF); + int16_t high = static_cast((int16_2 >> 16) & 0xFFFF); + nv_bfloat16 bf_low = __float2bfloat16(static_cast(low)); + nv_bfloat16 bf_high = __float2bfloat16(static_cast(high)); + nv_bfloat162 bf2 = __halves2bfloat162(bf_low, bf_high); + int32_t packed_bf16 = *reinterpret_cast(&bf2); + w[i] = packed_add(packed_bf16, kRangeMin); + } + } + } + + // Apply decoding scales + for (int i = 0; i < 4; i++) { + w[i] = packed_mul(w[i], qs); + } + + data[k] = w; + } + } +}; + +// Twoshot All Reduce +template +struct AllReduceTwoshot { + static_assert(sizeof(T) == 2); + + static constexpr int kWorldSize = Codec::kWorldSize; + + __device__ static void run( + T const* __restrict__ input, T* __restrict__ output, + uint32_t const N, // number of elements + int const block, // block index + int const rank, // rank index + uint8_t** __restrict__ buffer_list, // communication buffers + uint32_t const data_offset, // offset to start of the data buffer + uint32_t flag_color) { + // Topology + int thread = threadIdx.x + threadIdx.y * kWavefront; + uint8_t* rank_buffer = buffer_list[rank]; + Codec codec(thread, rank); + int block_id = blockIdx.x; + int grid_size = gridDim.x; + // -------------------------------------------------------- + // Read input into registers + int32x4_t tA[kAtoms]; + + BufferResource src_buffer(const_cast(input), N * sizeof(T)); + uint32_t src_offset = block * kTileSize + thread * sizeof(int32x4_t); + + for (int i = 0; i < kAtoms; i++) { + tA[i] = buffer_load_dwordx4(src_buffer.descriptor, src_offset, 0, 0); + src_offset += kAtomStride * sizeof(int32x4_t); + if constexpr (cast_bf2half) { + const nv_bfloat162* bf_buf = + reinterpret_cast(&tA[i]); + half2 half_buf[4]; +#pragma unroll + for (int j = 0; j < 4; ++j) { + float2 f = __bfloat1622float2(bf_buf[j]); + half_buf[j] = __float22half2_rn(f); + } + tA[i] = *reinterpret_cast(half_buf); + } + } + + // -------------------------------------------------------- + // Phase-1A: Write segment data into the communication buffer of the target + // rank responsible for this segment. + uint32_t comm_data0_offset = + data_offset + block_id * Codec::kTransmittedTileSize; + uint32_t comm_data1_offset = + grid_size * Codec::kTransmittedTileSize + comm_data0_offset; + + uint32_t comm_flags0_offset = block_id * (kWorldSize * sizeof(uint32_t)); + uint32_t comm_flags1_offset = + grid_size * (kWorldSize * sizeof(uint32_t)) + comm_flags0_offset; + + for (int r = 0; r < kWorldSize; r++) { + int32x4_t* send_buffer = + reinterpret_cast(buffer_list[r] + comm_data0_offset + + rank * Codec::kRankTransmittedTileSize); + codec.send(send_buffer, &tA[r * Codec::kRankAtoms]); + } + + __syncthreads(); + if (thread < kWorldSize) { + int r = thread; + uint32_t* flag_ptr = reinterpret_cast( + buffer_list[r] + comm_flags0_offset + rank * sizeof(uint32_t)); + set_sync_flag(flag_ptr, flag_color); + } + // -------------------------------------------------------- + // Phase-1B: Reduce the segment data from the communication buffers. + int32x4_t tR[Codec::kRankAtoms] = {}; + { + // Read the data from the communication buffer. + int32x4_t* recv_buffer = + reinterpret_cast(rank_buffer + comm_data0_offset); + uint32_t* flag_ptr = + reinterpret_cast(rank_buffer + comm_flags0_offset); + + for (int r = 0; r < kWorldSize; r++) { + // Wait for the flags to be set. + if (thread == 0) { + wait_sync_flag(&flag_ptr[r], flag_color); + } + __syncthreads(); + + // note: we reuse tA as temp buffer here + codec.recv(&recv_buffer, tA); + + for (int i = 0; i < Codec::kRankAtoms; i++) { + packed_assign_add(&tR[i], &tA[i]); + } + } + } + + // Phase-2: Write the reduced segment to every other rank + for (int r = 0; r < kWorldSize; r++) { + int32x4_t* send_buffer = + reinterpret_cast(buffer_list[r] + comm_data1_offset + + rank * Codec::kRankTransmittedTileSize); + codec.send(send_buffer, tR); + } + + __syncthreads(); + if (thread < kWorldSize) { + int r = thread; + uint32_t* flag_ptr = reinterpret_cast( + buffer_list[r] + comm_flags1_offset + rank * sizeof(uint32_t)); + set_sync_flag(flag_ptr, flag_color); + } + + // Phase-2: Read the gather segments from the rank's communication buffer. + { + // Read the data from the communication buffer. + int32x4_t* recv_buffer = + reinterpret_cast(rank_buffer + comm_data1_offset); + uint32_t* flag_ptr = + reinterpret_cast(rank_buffer + comm_flags1_offset); + + for (int r = 0; r < kWorldSize; r++) { + // Wait for the flags to be set. + if (thread == 0) { + wait_sync_flag(&flag_ptr[r], flag_color); + } + __syncthreads(); + + // Gather all reduced and final rank segments into tA. + codec.recv(&recv_buffer, &tA[r * Codec::kRankAtoms]); + } + } + + // -------------------------------------------------------- + // Write the result to output. + BufferResource dst_buffer(output, N * sizeof(T)); + uint32_t dst_offset = block * kTileSize + thread * sizeof(int32x4_t); + + for (int i = 0; i < kAtoms; i++) { + if constexpr (cast_bf2half) { + const half2* half_buf = reinterpret_cast(&tA[i]); + nv_bfloat162 bf16_buf[4]; +#pragma unroll + for (int j = 0; j < 4; ++j) { + float2 f = __half22float2(half_buf[j]); + bf16_buf[j] = __float22bfloat162_rn(f); + } + buffer_store_dwordx4(*reinterpret_cast(bf16_buf), + dst_buffer.descriptor, dst_offset, 0, 0); + } else { + buffer_store_dwordx4(tA[i], dst_buffer.descriptor, dst_offset, 0, 0); + } + dst_offset += kAtomStride * sizeof(int32x4_t); + } + } +}; + +} // namespace quickreduce \ No newline at end of file diff --git a/csrc/torch_bindings.cpp b/csrc/torch_bindings.cpp index 1a1896b4c1ee9..8bb71cad29dae 100644 --- a/csrc/torch_bindings.cpp +++ b/csrc/torch_bindings.cpp @@ -725,6 +725,24 @@ TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _custom_ar), custom_ar) { custom_ar.impl("open_mem_handle", torch::kCPU, &open_mem_handle); custom_ar.def("free_shared_buffer", &free_shared_buffer); +#ifdef USE_ROCM + // Quick Reduce all-reduce kernels + custom_ar.def( + "qr_all_reduce(int fa, Tensor inp, Tensor out, int quant_level, bool " + "cast_bf2half) -> ()"); + custom_ar.impl("qr_all_reduce", torch::kCUDA, &qr_all_reduce); + + custom_ar.def("init_custom_qr", &init_custom_qr); + custom_ar.def("qr_destroy", &qr_destroy); + + custom_ar.def("qr_get_handle", &qr_get_handle); + + custom_ar.def("qr_open_handles(int _fa, Tensor[](b!) handles) -> ()"); + custom_ar.impl("qr_open_handles", torch::kCPU, &qr_open_handles); + + // Max input size in bytes + custom_ar.def("qr_max_size", &qr_max_size); +#endif } REGISTER_EXTENSION(TORCH_EXTENSION_NAME) diff --git a/tests/distributed/test_quick_all_reduce.py b/tests/distributed/test_quick_all_reduce.py new file mode 100644 index 0000000000000..a4added29144e --- /dev/null +++ b/tests/distributed/test_quick_all_reduce.py @@ -0,0 +1,138 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +import random + +import pytest +import ray +import torch +import torch.distributed as dist + +from vllm.distributed.communication_op import ( # noqa + tensor_model_parallel_all_reduce) +from vllm.distributed.parallel_state import (get_tensor_model_parallel_group, + get_tp_group, graph_capture) +from vllm.platforms import current_platform + +from ..utils import (ensure_model_parallel_initialized, + init_test_distributed_environment, multi_process_parallel) + +torch.manual_seed(42) +random.seed(44) +# Size over 8MB is sufficient for custom quick allreduce. +test_sizes = [ + random.randint(8 * 1024 * 1024, 10 * 1024 * 1024) for _ in range(8) +] +for i, v in enumerate(test_sizes): + test_sizes[i] -= v % 8 + + +@ray.remote(num_gpus=1, max_calls=1) +def graph_quickreduce( + monkeypatch: pytest.MonkeyPatch, + tp_size, + pp_size, + rank, + distributed_init_port, +): + with monkeypatch.context() as m: + m.delenv("CUDA_VISIBLE_DEVICES", raising=False) + device = torch.device(f"cuda:{rank}") + torch.cuda.set_device(device) + init_test_distributed_environment(tp_size, pp_size, rank, + distributed_init_port) + ensure_model_parallel_initialized(tp_size, pp_size) + group = get_tensor_model_parallel_group().device_group + + # A small all_reduce for warmup. + # this is needed because device communicators might be created lazily + # (e.g. NCCL). This will ensure that the communicator is initialized + # before any communication happens, so that this group can be used for + # graph capture immediately. + data = torch.zeros(1) + data = data.to(device=device) + torch.distributed.all_reduce(data, group=group) + torch.cuda.synchronize() + del data + + # we use the first group to communicate once + # and the second group to communicate twice + # and so on + # this is used to demonstrate that each group can + # communicate independently + num_communication = rank // tp_size + 1 + + for sz in test_sizes: + for dtype in [torch.float16, torch.bfloat16]: + with graph_capture(device=device) as graph_capture_context: + inp1 = torch.randint(1, + 23, (sz, ), + dtype=dtype, + device=torch.cuda.current_device()) + inp2 = torch.randint(-23, + 1, (sz, ), + dtype=dtype, + device=torch.cuda.current_device()) + torch.cuda.synchronize() + graph = torch.cuda.CUDAGraph() + with torch.cuda.graph(graph, + stream=graph_capture_context.stream): + for _ in range(num_communication): + out1 = tensor_model_parallel_all_reduce(inp1) + dist.all_reduce(inp1, group=group) + out2 = tensor_model_parallel_all_reduce(inp2) + dist.all_reduce(inp2, group=group) + graph.replay() + torch.testing.assert_close(out1, inp1, atol=2.5, rtol=0.1) + torch.testing.assert_close(out2, inp2, atol=2.5, rtol=0.1) + + +@ray.remote(num_gpus=1, max_calls=1) +def eager_quickreduce( + monkeypatch: pytest.MonkeyPatch, + tp_size, + pp_size, + rank, + distributed_init_port, +): + with monkeypatch.context() as m: + m.delenv("CUDA_VISIBLE_DEVICES", raising=False) + device = torch.device(f"cuda:{rank}") + torch.cuda.set_device(device) + + init_test_distributed_environment(tp_size, pp_size, rank, + distributed_init_port) + + # Size over 8MB is sufficient for custom quick allreduce. + sz = 16 * 1024 * 1024 + fa = get_tp_group().device_communicator.qr_comm + inp = torch.tensor([1.0 * ((i) % 23) for i in range(sz)], + dtype=torch.float16, + device=device) + out = fa.quick_all_reduce(inp) + torch.testing.assert_close(out, inp * tp_size, atol=2.5, rtol=0.1) + + inp = torch.tensor([1.0 * ((i) % 23) for i in range(sz)], + dtype=torch.bfloat16, + device=device) + out = fa.quick_all_reduce(inp) + torch.testing.assert_close(out, inp * tp_size, atol=2.5, rtol=0.1) + + +@pytest.mark.skipif(not current_platform.is_rocm(), + reason="only test quick allreduce for rocm") +@pytest.mark.parametrize("quant_mode", ["FP", "INT8", "INT6", "INT4"]) +@pytest.mark.parametrize("tp_size", [2]) +@pytest.mark.parametrize("pipeline_parallel_size", [1, 2]) +@pytest.mark.parametrize("test_target", [graph_quickreduce, eager_quickreduce]) +def test_custom_quick_allreduce(monkeypatch: pytest.MonkeyPatch, tp_size, + pipeline_parallel_size, test_target, + quant_mode): + world_size = tp_size * pipeline_parallel_size + if world_size > torch.cuda.device_count(): + pytest.skip("Not enough GPUs to run the test.") + + monkeypatch.setenv("VLLM_ROCM_QUICK_REDUCE_QUANTIZATION", quant_mode) + + multi_process_parallel(monkeypatch, tp_size, pipeline_parallel_size, + test_target) diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index d5a41284385e6..215f35bad34d9 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -1748,6 +1748,38 @@ def free_shared_buffer(ptr: int) -> None: torch.ops._C_custom_ar.free_shared_buffer(ptr) +# quick all reduce +def init_custom_qr(rank: int, + world_size: int, + qr_max_size: Optional[int] = None) -> int: + return torch.ops._C_custom_ar.init_custom_qr(rank, world_size, qr_max_size) + + +def qr_destroy(fa: int) -> None: + torch.ops._C_custom_ar.qr_destroy(fa) + + +def qr_all_reduce(fa: int, + inp: torch.Tensor, + out: torch.Tensor, + quant_level: int, + cast_bf2half: bool = False) -> None: + torch.ops._C_custom_ar.qr_all_reduce(fa, inp, out, quant_level, + cast_bf2half) + + +def qr_get_handle(fa: int) -> torch.Tensor: + return torch.ops._C_custom_ar.qr_get_handle(fa) + + +def qr_open_handles(fa: int, handles: list[torch.Tensor]) -> None: + return torch.ops._C_custom_ar.qr_open_handles(fa, handles) + + +def qr_max_size() -> int: + return torch.ops._C_custom_ar.qr_max_size() + + def get_flash_mla_metadata( cache_seqlens: torch.Tensor, num_heads_per_head_k: int, diff --git a/vllm/distributed/device_communicators/cuda_communicator.py b/vllm/distributed/device_communicators/cuda_communicator.py index 055d91690e676..3958d566b1745 100644 --- a/vllm/distributed/device_communicators/cuda_communicator.py +++ b/vllm/distributed/device_communicators/cuda_communicator.py @@ -8,6 +8,7 @@ from torch.distributed import ProcessGroup import vllm.envs as envs from vllm.logger import init_logger +from vllm.platforms import current_platform from .base_device_communicator import DeviceCommunicatorBase @@ -41,6 +42,8 @@ class CudaCommunicator(DeviceCommunicatorBase): CustomAllreduce) from vllm.distributed.device_communicators.pynccl import ( PyNcclCommunicator) + from vllm.distributed.device_communicators.quick_all_reduce import ( + QuickAllReduce) self.pynccl_comm: Optional[PyNcclCommunicator] = None if use_pynccl and self.world_size > 1: @@ -50,6 +53,7 @@ class CudaCommunicator(DeviceCommunicatorBase): ) self.ca_comm: Optional[CustomAllreduce] = None + self.qr_comm: Optional[QuickAllReduce] = None if use_custom_allreduce and self.world_size > 1: # Initialize a custom fast all-reduce implementation. self.ca_comm = CustomAllreduce( @@ -57,6 +61,14 @@ class CudaCommunicator(DeviceCommunicatorBase): device=self.device, ) + if current_platform.is_rocm(): + # Initialize a custom quick all-reduce implementation for AMD. + # Quick reduce is designed as a complement to custom allreduce. + # Based on quickreduce (https://github.com/mk1-project/quickreduce). + # If it's a rocm, 'use_custom_allreduce==True' means it must + # currently be an MI300 series. + self.qr_comm = QuickAllReduce(group=self.cpu_group, + device=self.device) if self.use_all2all: all2all_backend = envs.VLLM_ALL2ALL_BACKEND if all2all_backend == "naive": @@ -79,8 +91,14 @@ class CudaCommunicator(DeviceCommunicatorBase): raise ValueError(f"Unknown all2all backend: {all2all_backend}") def all_reduce(self, input_): - # always try custom allreduce first, - # and then pynccl. + # always try quick reduce first, then custom allreduce, + # and then pynccl. (quick reduce just for ROCM MI3*) + qr_comm = self.qr_comm + if qr_comm is not None and not qr_comm.disabled and \ + qr_comm.should_quick_allreduce(input_): + out = qr_comm.quick_all_reduce(input_) + assert out is not None + return out ca_comm = self.ca_comm if ca_comm is not None and not ca_comm.disabled and \ ca_comm.should_custom_ar(input_): diff --git a/vllm/distributed/device_communicators/quick_all_reduce.py b/vllm/distributed/device_communicators/quick_all_reduce.py new file mode 100644 index 0000000000000..c61231e2d33f4 --- /dev/null +++ b/vllm/distributed/device_communicators/quick_all_reduce.py @@ -0,0 +1,278 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project + +from enum import Enum +from typing import Union + +import torch +import torch.distributed as dist +from torch.distributed import ProcessGroup + +import vllm.envs as envs +from vllm import _custom_ops as ops +from vllm.config import get_current_vllm_config +from vllm.distributed.parallel_state import in_the_same_node_as +from vllm.logger import init_logger +from vllm.platforms import current_platform +from vllm.utils import cuda_device_count_stateless + +logger = init_logger(__name__) + +try: + ops.qr_max_size() + quick_ar = True +except Exception: + # For CPUs and CUDA + quick_ar = False + + +def is_weak_contiguous(inp: torch.Tensor): + return inp.is_contiguous() or (inp.storage().nbytes() - + inp.storage_offset() * inp.element_size() + == inp.numel() * inp.element_size()) + + +class QuickReduceRegime(Enum): + FP = 0 + INT8 = 1 + INT6 = 2 + INT4 = 3 + NONE = 4 + + +MB = 1024 * 1024 + + +class QuickAllReduce: + + _SUPPORTED_WORLD_SIZES = [2, 4, 8] + _SUPPORTED_DTYPES = [torch.float16, torch.bfloat16] + # The following data is based on kernel tests. + # In this order [FP, INT8, INT6, INT4]. + _QR_MIN_SIZE = { + (torch.float16, 2): [1 * MB, 2 * MB, 2 * MB, 1 * MB], + (torch.float16, 4): [1 * MB, 16 * MB, 4 * MB, 2 * MB], + (torch.float16, 8): [16 * MB, 4 * MB, 4 * MB, 2 * MB], + (torch.bfloat16, 2): [2 * MB, 8 * MB, 8 * MB, 8 * MB], + (torch.bfloat16, 4): [8 * MB, 64 * MB, 64 * MB, 16 * MB], + (torch.bfloat16, 8): [16 * MB, 2048 * MB, 2048 * MB, 2048 * MB], + } + + def __init__(self, group: ProcessGroup, + device: Union[int, str, torch.device]) -> None: + """ + Custom allreduce provides non-destructive acceleration and is + available for CUDA and ROCm MI300 series. + + Custom quick allreduce leverages quantization for further + acceleration on ROCm. It currently supports Q8, Q6, and Q4 + quantization formats and FP(float16, bfloat16). + + Quick allreduce is designed as a complement to custom allreduce. + Its initialization requires even stricter conditions. + + Only the ROCm MI300 series is supported for quick allreduce at + this time. + + Args: + group: the process group to work on. If None, it will use the + default process group. + device: the device to bind the CustomAllreduce to. If None, + it will be bind to f"cuda:{local_rank}". + It is the caller's responsibility to make sure each communicator + is bind to a unique device, and all communicators in this group + are in the same node. + """ + self.disabled = True + if not self._rocm_arch_available(): + logger.debug( + "Custom quick allreduce is only supported on ROCm MI300 series." + ) + return + + if not quick_ar: + # disable because of missing quick reduce library + # e.g. in a cuda environment + logger.info("Custom quick allreduce is disabled because " + "of missing custom quick allreduce library") + return + + self.group = group + assert dist.get_backend(group) != dist.Backend.NCCL, ( + "Custom quick allreduce should be attached to a non-NCCL group.") + if not all(in_the_same_node_as(group, source_rank=0)): + # No need to initialize custom quick allreduce for + # multi-node case. + logger.warning("Custom quick allreduce is disabled because this " + "process group spans across nodes.") + return + rank = dist.get_rank(group=self.group) + world_size = dist.get_world_size(group=self.group) + self.rank = rank + self.world_size = world_size + if world_size == 1: + # No need to initialize QuickReduce for single GPU case. + return + + if world_size not in QuickAllReduce._SUPPORTED_WORLD_SIZES: + logger.warning( + "Custom quick allreduce is disabled due to an " + "unsupported world size: %d. Supported world sizes: %s.", + world_size, str(QuickAllReduce._SUPPORTED_WORLD_SIZES)) + return + + if isinstance(device, int): + device = torch.device(f"cuda:{device}") + elif isinstance(device, str): + device = torch.device(device) + assert isinstance(device, torch.device) + self.device = device + + cuda_visible_devices = envs.CUDA_VISIBLE_DEVICES + if cuda_visible_devices: + device_ids = list(map(int, cuda_visible_devices.split(","))) + else: + device_ids = list(range(cuda_device_count_stateless())) + physical_device_id = device_ids[device.index] + tensor = torch.tensor([physical_device_id], + dtype=torch.int, + device="cpu") + gather_list = [ + torch.tensor([0], dtype=torch.int, device="cpu") + for _ in range(self.world_size) + ] + dist.all_gather(gather_list, tensor, group=self.group) + physical_device_ids = [t.item() for t in gather_list] + + # test nvlink first, this will filter out most of the cases + # where custom quick allreduce is not supported + # this checks hardware and driver support for NVLink + assert current_platform.is_cuda_alike() + self.fully_connected = current_platform.is_fully_connected( + physical_device_ids) + if self.world_size > 2 and not self.fully_connected: + logger.debug( + "Custom quick allreduce is disabled because it's not supported " + "on more than two PCIe-only GPUs. ") + return + + self.init_quick_all_reduce() + + def init_quick_all_reduce(self): + # On RocM, bfloat16 kernels are slower than fp16 + # due to slower match operations + # If environment variable is set to 1, we convert input to fp16 + self.use_fp16_kernels = envs.VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16 + regime_str = envs.VLLM_ROCM_QUICK_REDUCE_QUANTIZATION + if regime_str not in QuickReduceRegime.__members__: + logger.warning( + "Custom quick allreduce:", + f"Invalid quantization level: {regime_str}. " + "Supported levels: " + f"{list(QuickReduceRegime.__members__.keys())}") + return + + if regime_str == "NONE": + logger.debug("Custom quick allreduce is disabled based " + "on env variable " + "VLLM_ROCM_QUICK_REDUCE_QUANTIZATION='NONE'") + return + self.qr_quant_level = QuickReduceRegime[regime_str] + vllm_config = get_current_vllm_config() + if vllm_config is not None and \ + hasattr(vllm_config, "model_config") and \ + hasattr(vllm_config.model_config, "dtype"): + dtype = vllm_config.model_config.dtype + if dtype not in [torch.float16, torch.bfloat16]: + logger.debug( + "Custom quick allreduce disabled: only supports " + "float16 and float16, but get %s.", dtype) + return + + if dtype == torch.bfloat16 and self.use_fp16_kernels: + logger.info( + "Custom quick allreduce: BF16 inputs will be converted " + "to FP16 to improve performance. set " + "envs.VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16=0 " + "to turn off.") + + # VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB is specified in MB + qr_max_size = envs.VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB + if qr_max_size is not None: + if qr_max_size < 1: + logger.info( + "You should not set a max_size smaller than 1MB, which can " + "lead to error or degradation to custom allreduce or rccl." + ) + qr_max_size = qr_max_size * MB + self._ptr = ops.init_custom_qr(self.rank, self.world_size, qr_max_size) + self.qr_max_size = qr_max_size if qr_max_size is not None \ + else ops.qr_max_size() + self.create_shared_buffer() + self.disabled = False + + def _rocm_arch_available(self): + if not current_platform.is_rocm(): + return False + try: + props = torch.cuda.get_device_properties(0) + gcn_arch = getattr(props, "gcnArchName", "") + supported_archs = ['gfx94', 'gfx95'] + return any(gfx in gcn_arch for gfx in supported_archs) + except Exception as e: + logger.warning("Failed to determine ROCm for quick allreduce: %s", + e) + return False + + def create_shared_buffer(self): + """ + Creates a shared buffer for quickreduce. + Has to be called after init_custom_qr + """ + handle = ops.qr_get_handle(self._ptr) + world_size = dist.get_world_size(group=self.group) + handles = [None] * world_size + dist.all_gather_object(handles, handle, group=self.group) + ops.qr_open_handles(self._ptr, handles) + + def should_quick_allreduce(self, inp: torch.Tensor): + """ + Check if quickreduce is available + """ + if self.disabled: + return False + if inp.dtype not in self._SUPPORTED_DTYPES: + return False + inp_size = inp.numel() * inp.element_size() + # custom quick allreduce requires input byte size to be + # multiples of 16 + if inp_size % 16 != 0: + return False + if not is_weak_contiguous(inp): + return False + dtype = inp.dtype + if self.use_fp16_kernels: + dtype = torch.float16 + return inp_size <= self.qr_max_size and \ + inp_size >= self._QR_MIN_SIZE[(dtype, self.world_size)]\ + [self.qr_quant_level.value] + + def quick_all_reduce(self, inp: torch.Tensor, *, out: torch.Tensor = None): + """Performs an out-of-place custom quick all reduce.""" + # quick allreduce doesn't require a separate graph mode, + # as QR uses static IPC buffer. + if out is None: + out = torch.empty_like(inp) + ops.qr_all_reduce(self._ptr, inp, out, self.qr_quant_level.value, + self.use_fp16_kernels) + return out + + def close(self): + if not self.disabled and getattr(self, "_ptr", None): + if ops is not None: + ops.qr_destroy(self._ptr) + self._ptr = 0 + self.disabled = True + + def __del__(self): + self.close() diff --git a/vllm/envs.py b/vllm/envs.py index c9c81603a75a8..a3f19c7ee5c70 100644 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -135,6 +135,9 @@ if TYPE_CHECKING: VLLM_KV_CACHE_LAYOUT: Optional[str] = None VLLM_COMPUTE_NANS_IN_LOGITS: bool = False VLLM_USE_NVFP4_CT_EMULATIONS: bool = False + VLLM_ROCM_QUICK_REDUCE_QUANTIZATION: str = "NONE" + VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16: bool = True + VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB: Optional[int] = None def get_default_cache_root(): @@ -690,6 +693,31 @@ environment_variables: dict[str, Callable[[], Any]] = { lambda: (os.getenv("VLLM_ROCM_CUSTOM_PAGED_ATTN", "True").lower() in ("true", "1")), + # Custom quick allreduce kernel for MI3* cards + # Choice of quantization level: FP, INT8, INT6, INT4 or NONE + # Recommended for large models to get allreduce + "VLLM_ROCM_QUICK_REDUCE_QUANTIZATION": + lambda: os.getenv("VLLM_ROCM_QUICK_REDUCE_QUANTIZATION", "NONE").upper(), + + # Custom quick allreduce kernel for MI3* cards + # Due to the lack of the bfloat16 asm instruction, bfloat16 + # kernels are slower than fp16, + # If environment variable is set to 1, the input is converted to fp16 + "VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16": + lambda: + (os.getenv("VLLM_ROCM_QUICK_REDUCE_CAST_BF16_TO_FP16", "True").lower() in + ("true", "1")), + + # Custom quick allreduce kernel for MI3* cards. + # Controls the maximum allowed number of data bytes(MB) for custom quick + # allreduce communication. + # Default: 2048 MB. + # Data exceeding this size will use either custom allreduce or RCCL + # communication. + "VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB": + lambda: maybe_convert_int( + os.environ.get("VLLM_ROCM_QUICK_REDUCE_MAX_SIZE_BYTES_MB", None)), + # If set, when running in Quark emulation mode, do not dequantize the # weights at load time. Instead, dequantize weights on-the-fly during # kernel execution. From 8b64c895c0c05d83458f1af67f81060d699d2526 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 26 Jun 2025 20:55:25 -0700 Subject: [PATCH 168/211] [CI] Sync test dependency with test.in for torch nightly (#19632) Signed-off-by: Yang Wang Signed-off-by: Yida Wu Signed-off-by: Nick Hill Co-authored-by: Concurrensee Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Cyrus Leung Co-authored-by: Nick Hill --- .buildkite/test-pipeline.yaml | 12 ++- .pre-commit-config.yaml | 5 ++ requirements/nightly_torch_test.txt | 77 ++++++++++--------- requirements/test.in | 3 +- .../pytorch_nightly_dependency.sh | 42 ++++++++++ tools/generate_nightly_torch_test.py | 34 ++++++++ 6 files changed, 134 insertions(+), 39 deletions(-) create mode 100644 tests/standalone_tests/pytorch_nightly_dependency.sh create mode 100644 tools/generate_nightly_torch_test.py diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 26f70ad457b67..7f1841b1c97cd 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -41,6 +41,16 @@ steps: # TODO: add `--strict` once warnings in docstrings are fixed - mkdocs build +- label: Pytorch Nightly Dependency Override Check # 2min + # if this test fails, it means the nightly torch version is not compatible with some + # of the dependencies. Please check the error message and add the package to whitelist + # in /vllm/tools/generate_nightly_torch_test.py + soft_fail: true + source_file_dependencies: + - requirements/nightly_torch_test.txt + commands: + - bash standalone_tests/pytorch_nightly_dependency.sh + - label: Async Engine, Inputs, Utils, Worker Test # 24min mirror_hardwares: [amdexperimental] source_file_dependencies: @@ -767,7 +777,7 @@ steps: - bash weight_loading/run_model_weight_loading_test.sh -c weight_loading/models.txt - label: Weight Loading Multiple GPU Test - Large Models # optional - mirror_hardwares: [amdexperimental] + mirror_hardwares: [amdexperimental] working_dir: "/vllm-workspace/tests" num_gpus: 2 gpu: a100 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e62b623b4e114..15ef5defff69e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -53,6 +53,11 @@ repos: files: ^requirements/test\.(in|txt)$ - repo: local hooks: + - id: format-torch-nightly-test + name: reformat nightly_torch_test.txt to be in sync with test.in + language: python + entry: python tools/generate_nightly_torch_test.py + files: ^requirements/test\.(in|txt)$ - id: mypy-local name: Run mypy for local Python installation entry: tools/mypy.sh 0 "local" diff --git a/requirements/nightly_torch_test.txt b/requirements/nightly_torch_test.txt index 00acda3662608..fd0b0fac12a92 100644 --- a/requirements/nightly_torch_test.txt +++ b/requirements/nightly_torch_test.txt @@ -1,47 +1,50 @@ -# Dependency that able to run entrypoints test -# pytest and its extensions +# testing pytest -pytest-asyncio +tensorizer>=2.9.0 pytest-forked -pytest-mock +pytest-asyncio pytest-rerunfailures pytest-shard pytest-timeout -librosa # required by audio tests in entrypoints/openai -sentence-transformers # required for embedding tests -transformers==4.52.4 -transformers_stream_generator # required for qwen-vl test -numba == 0.61.2; python_version > '3.9' # testing utils -boto3 -botocore -datasets -ray >= 2.10.0 +backoff # required for phi4mm test +blobfile # required for kimi-vl test +einops # required for MPT, qwen-vl and Mamba +httpx +librosa # required for audio tests +vocos # required for minicpmo_26 test peft +pqdm +ray[cgraph,default]>=2.43.0, !=2.44.* # Ray Compiled Graph, required by pipeline parallelism tests +sentence-transformers # required for embedding tests +soundfile # required for audio tests +jiwer # required for audio tests +timm # required for internvl test +transformers_stream_generator # required for qwen-vl test +matplotlib # required for qwen-vl test +mistral_common[opencv] >= 1.6.2 # required for pixtral test +num2words # required for smolvlm test +opencv-python-headless >= 4.11.0 # required for video test +datamodel_code_generator # required for minicpm3 test +lm-eval[api]==0.4.8 # required for model evaluation test +mteb>=1.38.11, <2 # required for mteb test +transformers==4.52.4 +tokenizers==0.21.1 +huggingface-hub[hf_xet]>=0.30.0 # Required for Xet downloads. +schemathesis>=3.39.15 # Required for openai schema test. +# quantization +bitsandbytes>=0.45.3 +buildkite-test-collector==0.1.9 + + +genai_perf==0.0.8 +tritonclient==2.51.0 + +numba == 0.60.0; python_version == '3.9' # v0.61 doesn't support Python 3.9. Required for N-gram speculative decoding +numba == 0.61.2; python_version > '3.9' +numpy runai-model-streamer==0.11.0 runai-model-streamer-s3==0.11.0 -tensorizer>=2.9.0 -lm-eval==0.4.8 -buildkite-test-collector==0.1.9 -lm-eval[api]==0.4.8 # required for model evaluation test - -# required for quantization test -bitsandbytes>=0.45.3 - -# required for minicpmo_26 test -vector_quantize_pytorch -vocos - -# required for Basic Models Test -blobfile # required for kimi-vl test -matplotlib # required for qwen-vl test - -# required for Multi-Modal Models Test (Standard) -num2words # required for smolvlm test -pqdm -timm # required for internvl test -mistral-common==1.6.2 - -schemathesis==3.39.15 # Required for openai schema test. -mteb>=1.38.11, <2 # required for mteb test +fastsafetensors>=0.1.10 +pydantic>=2.10 # 2.9 leads to error on python 3.10 diff --git a/requirements/test.in b/requirements/test.in index e8f44059fcf87..85c96df8e8f4c 100644 --- a/requirements/test.in +++ b/requirements/test.in @@ -42,6 +42,7 @@ schemathesis>=3.39.15 # Required for openai schema test. bitsandbytes>=0.45.3 buildkite-test-collector==0.1.9 + genai_perf==0.0.8 tritonclient==2.51.0 @@ -51,4 +52,4 @@ numpy runai-model-streamer==0.11.0 runai-model-streamer-s3==0.11.0 fastsafetensors>=0.1.10 -pydantic>=2.10 # 2.9 leads to error on python 3.10 \ No newline at end of file +pydantic>=2.10 # 2.9 leads to error on python 3.10 diff --git a/tests/standalone_tests/pytorch_nightly_dependency.sh b/tests/standalone_tests/pytorch_nightly_dependency.sh new file mode 100644 index 0000000000000..cb531e13ecb81 --- /dev/null +++ b/tests/standalone_tests/pytorch_nightly_dependency.sh @@ -0,0 +1,42 @@ +#!/bin/sh +# This script tests if the nightly torch packages are not overridden by the dependencies + +set -e +set -x + +cd /vllm-workspace/ + +rm -rf .venv + +uv venv .venv + +source .venv/bin/activate + +# check the environment +uv pip freeze + +echo ">>> Installing nightly torch packages" +uv pip install --quiet torch torchvision torchaudio --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu128 + +echo ">>> Capturing torch-related versions before requirements install" +uv pip freeze | grep -E '^torch|^torchvision|^torchaudio' | sort > before.txt +echo "Before:" +cat before.txt + +echo ">>> Installing requirements/nightly_torch_test.txt" +uv pip install --quiet -r requirements/nightly_torch_test.txt + +echo ">>> Capturing torch-related versions after requirements install" +uv pip freeze | grep -E '^torch|^torchvision|^torchaudio' | sort > after.txt +echo "After:" +cat after.txt + +echo ">>> Comparing versions" +if diff before.txt after.txt; then + echo "torch version not overridden." +else + echo "torch version overridden by nightly_torch_test.txt, \ + if the dependency is not triggered by the pytroch nightly test,\ + please add the dependency to the list 'white_list' in tools/generate_nightly_torch_test.py" + exit 1 +fi diff --git a/tools/generate_nightly_torch_test.py b/tools/generate_nightly_torch_test.py new file mode 100644 index 0000000000000..a3d7f7a609ba6 --- /dev/null +++ b/tools/generate_nightly_torch_test.py @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +""" +Generates specialized requirements files for nightly PyTorch testing. + +This script reads the main test requirements input file (`requirements/test.in`) +and splits its content into two files: +1. `requirements/nightly_torch_test.txt`: Contains dependencies +except PyTorch-related. +2. `torch_nightly_test.txt`: Contains only PyTorch-related packages. +""" + +input_file = "requirements/test.in" +output_file = "requirements/nightly_torch_test.txt" + +# white list of packages that are not compatible with PyTorch nightly directly +# with pip install. Please add your package to this list if it is not compatible +# or make the dependency test fails. +white_list = ["torch", "torchaudio", "torchvision", "mamba_ssm"] + +with open(input_file) as f: + lines = f.readlines() + +skip_next = False + +for line in lines: + if skip_next: + if line.startswith((" ", "\t")) or line.strip() == "": + continue + skip_next = False + + if any(k in line.lower() for k in white_list): + skip_next = True + continue From e11093068043a780ba8e778cdfcff8291d3f5b8c Mon Sep 17 00:00:00 2001 From: Thomas Parnell Date: Fri, 27 Jun 2025 06:06:59 +0200 Subject: [PATCH 169/211] [Fix] Fix gemma CI test failing on main (#20124) Signed-off-by: Thomas Parnell --- .../models/language/generation/test_gemma.py | 29 ++++++++++++------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/tests/models/language/generation/test_gemma.py b/tests/models/language/generation/test_gemma.py index ed0f0c19a0411..5be4ae874e615 100644 --- a/tests/models/language/generation/test_gemma.py +++ b/tests/models/language/generation/test_gemma.py @@ -7,14 +7,21 @@ MODELS = ["google/gemma-2b", "google/gemma-2-2b", "google/gemma-3-4b-it"] @pytest.mark.parametrize("model", MODELS) -def test_dummy_loader(vllm_runner, model: str) -> None: - with vllm_runner( - model, - load_format="dummy", - ) as llm: - normalizers = llm.collective_rpc(lambda self: self.worker.model_runner. - model.model.normalizer.cpu().item()) - assert np.allclose( - normalizers, - llm.llm_engine.model_config.hf_config.hidden_size**0.5, - rtol=1e-3) +def test_dummy_loader(vllm_runner, monkeypatch, model: str) -> None: + with monkeypatch.context() as m: + m.setenv("VLLM_ALLOW_INSECURE_SERIALIZATION", "1") + with vllm_runner( + model, + load_format="dummy", + ) as llm: + if model == "google/gemma-3-4b-it": + normalizers = llm.model.collective_rpc( + lambda self: self.model_runner.model.language_model.model. + normalizer.cpu().item()) + config = llm.model.llm_engine.model_config.hf_config.text_config + else: + normalizers = llm.model.collective_rpc( + lambda self: self.model_runner.model.model.normalizer.cpu( + ).item()) + config = llm.model.llm_engine.model_config.hf_config + assert np.allclose(normalizers, config.hidden_size**0.5, rtol=2e-3) From cd4cfee68902dcad9498b3d9d4530b817499d592 Mon Sep 17 00:00:00 2001 From: "wang.yuqi" Date: Fri, 27 Jun 2025 12:10:04 +0800 Subject: [PATCH 170/211] [Model][1/N] Automatic conversion of CrossEncoding model (#20012) Signed-off-by: wang.yuqi --- tests/models/language/pooling/mteb_utils.py | 11 +- vllm/config.py | 29 ++- vllm/model_executor/models/bert_with_rope.py | 149 +------------- vllm/model_executor/models/config.py | 200 +++++++++++++++++++ vllm/model_executor/models/qwen3.py | 17 +- 5 files changed, 239 insertions(+), 167 deletions(-) create mode 100644 vllm/model_executor/models/config.py diff --git a/tests/models/language/pooling/mteb_utils.py b/tests/models/language/pooling/mteb_utils.py index 21d55c418c363..0284e69f3f0e2 100644 --- a/tests/models/language/pooling/mteb_utils.py +++ b/tests/models/language/pooling/mteb_utils.py @@ -43,7 +43,7 @@ class VllmMtebEncoder(mteb.Encoder): # issues by randomizing the order. r = self.rng.permutation(len(sentences)) sentences = [sentences[i] for i in r] - outputs = self.model.encode(sentences, use_tqdm=False) + outputs = self.model.embed(sentences, use_tqdm=False) embeds = np.array(outputs) embeds = embeds[np.argsort(r)] return embeds @@ -250,16 +250,19 @@ def mteb_test_rerank_models(hf_runner, with vllm_runner(model_info.name, task="score", max_model_len=None, + max_num_seqs=8, **vllm_extra_kwargs) as vllm_model: + model_config = vllm_model.model.llm_engine.model_config + if model_info.architecture: - assert (model_info.architecture - in vllm_model.model.llm_engine.model_config.architectures) + assert (model_info.architecture in model_config.architectures) + assert model_config.hf_config.num_labels == 1 vllm_main_score = run_mteb_rerank(VllmMtebEncoder(vllm_model), tasks=MTEB_RERANK_TASKS, languages=MTEB_RERANK_LANGS) - vllm_dtype = vllm_model.model.llm_engine.model_config.dtype + vllm_dtype = model_config.dtype with hf_runner(model_info.name, is_cross_encoder=True, dtype="float32") as hf_model: diff --git a/vllm/config.py b/vllm/config.py index 856b361531168..7a3329aea5f78 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -569,6 +569,10 @@ class ModelConfig: else: self.truncation_side = "right" + model_info, arch = self.registry.inspect_model_cls(self.architectures) + self._model_info = model_info + self._architecture = arch + self.pooler_config = self._init_pooler_config() self.dtype = _get_and_verify_dtype( @@ -660,8 +664,18 @@ class ModelConfig: @property def architectures(self) -> list[str]: + # architectures in the model config. return getattr(self.hf_config, "architectures", []) + @property + def architecture(self) -> str: + # The architecture vllm actually used. + return self._architecture + + @property + def model_info(self) -> dict[str, Any]: + return self._model_info + def maybe_pull_model_tokenizer_for_s3(self, model: str, tokenizer: str) -> None: """Pull model/tokenizer from S3 to temporary directory when needed. @@ -4450,6 +4464,9 @@ class VllmConfig: def __post_init__(self): """Verify configs are valid & consistent with each other. """ + + self.try_verify_and_update_config() + if self.model_config is not None: self.model_config.verify_async_output_proc(self.parallel_config, self.speculative_config, @@ -4694,11 +4711,21 @@ class VllmConfig: batch_size_capture_list) def recalculate_max_model_len(self, max_model_len: int): + # Can only be called in try_verify_and_update_config model_config = self.model_config max_model_len = model_config.get_and_verify_max_len(max_model_len) self.model_config.max_model_len = max_model_len self.scheduler_config.max_model_len = max_model_len - self.compute_hash() + + def try_verify_and_update_config(self): + architecture = getattr(self.model_config, "architecture", None) + if architecture is None: + return + + from vllm.model_executor.models.config import MODELS_CONFIG_MAP + cls = MODELS_CONFIG_MAP.get(architecture, None) + if cls is not None: + cls.verify_and_update_config(self) def __str__(self): return ( diff --git a/vllm/model_executor/models/bert_with_rope.py b/vllm/model_executor/models/bert_with_rope.py index 0f22393c79d98..0b7350f07d3f6 100644 --- a/vllm/model_executor/models/bert_with_rope.py +++ b/vllm/model_executor/models/bert_with_rope.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project from collections.abc import Iterable -from copy import deepcopy from typing import Optional import torch @@ -12,7 +11,6 @@ from vllm.attention import Attention, AttentionType from vllm.compilation.decorators import support_torch_compile from vllm.config import CacheConfig, VllmConfig from vllm.distributed import get_tensor_model_parallel_world_size -from vllm.logger import init_logger from vllm.model_executor.layers.activation import (get_act_and_mul_fn, get_act_fn) from vllm.model_executor.layers.linear import (ColumnParallelLinear, @@ -30,8 +28,6 @@ from vllm.model_executor.models.interfaces import SupportsQuant from vllm.model_executor.models.utils import WeightsMapper from vllm.sequence import IntermediateTensors -logger = init_logger(__name__) - class BertWithRopeEmbedding(nn.Module): @@ -408,7 +404,7 @@ class BertWithRope(nn.Module, SupportsV0Only, SupportsQuant): def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): super().__init__() self.vllm_config = vllm_config - self.config = self.config_verify(vllm_config) + self.config = vllm_config.model_config.hf_config self.embeddings = BertWithRopeEmbedding(self.config) self.encoder = BertWithRopeEncoder( vllm_config=vllm_config, @@ -416,9 +412,6 @@ class BertWithRope(nn.Module, SupportsV0Only, SupportsQuant): rotary_kwargs=self.config.rotary_kwargs, prefix=f"{prefix}.encoder") - def config_verify(self, vllm_config): - raise NotImplementedError - def forward( self, input_ids: Optional[torch.Tensor], @@ -490,95 +483,6 @@ class NomicBertModel(BertWithRope): "norm2": "mlp_ln", }) - def config_verify(self, vllm_config): - config = vllm_config.model_config.hf_config - - assert config.__class__.__name__ == "NomicBertConfig" - assert config.activation_function in ["swiglu", "gelu"] - config.position_embedding_type = getattr(config, - "position_embedding_type", - "rope") - - if config.activation_function == "swiglu": - config.hidden_act = "silu" - else: - config.hidden_act = config.activation_function - - assert (config.mlp_fc1_bias == config.mlp_fc2_bias == - config.qkv_proj_bias) - config.bias = config.qkv_proj_bias - - assert config.rotary_emb_scale_base is None - assert not config.rotary_emb_interleaved - - config.layer_norm_eps = config.layer_norm_epsilon - config.intermediate_size = config.n_inner - config.hidden_size = config.n_embd - config.num_hidden_layers = config.n_layer - - head_dim = config.hidden_size // config.num_attention_heads - rotary_emb_dim = head_dim * config.rotary_emb_fraction - max_trained_positions = getattr(config, "max_trained_positions", 2048) - config.rotary_kwargs = { - "head_size": head_dim, - "rotary_dim": rotary_emb_dim, - "max_position": max_trained_positions, - "base": getattr(config, "rope_theta", config.rotary_emb_base), - "rope_scaling": getattr(config, "rope_scaling", None) - } - - # we ignore config.rotary_scaling_factor so that for datasets shorter - # than max_trained_positions 2048, the results are consistent - # with SentenceTransformer. - # The context extension uses vllm style rope_theta and rope_scaling. - # See #17785 #18755 - if (not vllm_config.model_config.hf_overrides - and vllm_config.model_config.original_max_model_len is None): - # Default - # Reset max_model_len to max_trained_positions. - # nomic-embed-text-v2-moe the length is set to 512 - # by sentence_bert_config.json. - max_model_len_before = vllm_config.model_config.max_model_len - max_model_len = min(vllm_config.model_config.max_model_len, - max_trained_positions) - - vllm_config.recalculate_max_model_len(max_model_len) - logger.warning( - "Nomic context extension is disabled. " - "Changing max_model_len from %s to %s. " - "To enable context extension, see: " - "https://github.com/vllm-project/vllm/tree/main/examples/offline_inference/context_extension.html", - max_model_len_before, vllm_config.model_config.max_model_len) - else: - # We need to re-verify max_model_len to avoid lengths - # greater than position_embedding. - model_config = vllm_config.model_config - hf_text_config = model_config.hf_text_config - - if isinstance(model_config.hf_overrides, dict): - # hf_overrides_kw - max_model_len = model_config.hf_overrides.get( - "max_model_len", vllm_config.model_config.max_model_len) - else: - # hf_overrides_fn - # This might be overridden by sentence_bert_config.json. - max_model_len = vllm_config.model_config.max_model_len - - # reset hf_text_config for recalculate_max_model_len. - if hasattr(hf_text_config, "max_model_len"): - delattr(hf_text_config, "max_model_len") - hf_text_config.max_position_embeddings = max_trained_positions - hf_text_config.rope_scaling = config.rotary_kwargs["rope_scaling"] - - # The priority of sentence_bert_config.json is higher - # than max_position_embeddings - encoder_config = deepcopy(model_config.encoder_config) - encoder_config.pop("max_seq_length", None) - model_config.encoder_config = encoder_config - - vllm_config.recalculate_max_model_len(max_model_len) - return config - class GteNewModel(BertWithRope): # for https://huggingface.co/Alibaba-NLP/new-impl @@ -600,24 +504,6 @@ class GteNewModel(BertWithRope): layer.mlp.gate_up_proj.bias = None layer.mlp.gate_up_proj.skip_bias_add = True - def config_verify(self, vllm_config): - config = vllm_config.model_config.hf_config - - assert config.__class__.__name__ == "NewConfig" - assert config.hidden_act == "gelu" - - config.hidden_act = "geglu" - - head_dim = config.hidden_size // config.num_attention_heads - config.rotary_kwargs = { - "head_size": head_dim, - "rotary_dim": getattr(config, "rotary_emb_dim", head_dim), - "max_position": config.max_position_embeddings, - "base": config.rope_theta, - "rope_scaling": getattr(config, "rope_scaling", None) - } - return config - def split_up_gate_proj(self, weights: Iterable[tuple[str, torch.Tensor]]): n = "mlp.up_gate_proj" for name, weight in weights: @@ -652,24 +538,6 @@ class SnowflakeGteNewModel(GteNewModel): "attention.o_proj": "attn.out_proj", }) - def config_verify(self, vllm_config): - config = vllm_config.model_config.hf_config - - assert config.__class__.__name__ == "GteConfig" - assert config.hidden_act == "gelu" - - config.hidden_act = "geglu" - - head_dim = config.hidden_size // config.num_attention_heads - config.rotary_kwargs = { - "head_size": head_dim, - "rotary_dim": getattr(config, "rotary_emb_dim", head_dim), - "max_position": config.max_position_embeddings, - "base": config.rope_theta, - "rope_scaling": getattr(config, "rope_scaling", None) - } - return config - class JinaRobertaModel(BertWithRope): # for https://huggingface.co/jinaai/jina-embeddings-v3 @@ -685,21 +553,6 @@ class JinaRobertaModel(BertWithRope): "norm2": "mlp_ln", }) - def config_verify(self, vllm_config): - config = vllm_config.model_config.hf_config - - assert config.__class__.__name__ == "XLMRobertaFlashConfig" - - head_dim = config.hidden_size // config.num_attention_heads - config.rotary_kwargs = { - "head_size": head_dim, - "rotary_dim": getattr(config, "rotary_emb_dim", head_dim), - "max_position": config.max_position_embeddings, - "base": getattr(config, "rope_theta", config.rotary_emb_base), - "rope_scaling": getattr(config, "rope_scaling", None) - } - return config - def forward( self, input_ids: torch.Tensor, diff --git a/vllm/model_executor/models/config.py b/vllm/model_executor/models/config.py new file mode 100644 index 0000000000000..7b5345704ad00 --- /dev/null +++ b/vllm/model_executor/models/config.py @@ -0,0 +1,200 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +from copy import deepcopy +from typing import TYPE_CHECKING + +from vllm.logger import init_logger + +if TYPE_CHECKING: + from vllm.config import VllmConfig + +logger = init_logger(__name__) + + +class VerifyAndUpdateConfig: + + @staticmethod + def verify_and_update_config(vllm_config: "VllmConfig") -> None: + raise NotImplementedError + + +class GteNewModelConfig(VerifyAndUpdateConfig): + + @staticmethod + def verify_and_update_config(vllm_config: "VllmConfig") -> None: + config = vllm_config.model_config.hf_config + + assert config.__class__.__name__ == "NewConfig" + assert config.hidden_act == "gelu" + + config.hidden_act = "geglu" + + head_dim = config.hidden_size // config.num_attention_heads + config.rotary_kwargs = { + "head_size": head_dim, + "rotary_dim": getattr(config, "rotary_emb_dim", head_dim), + "max_position": config.max_position_embeddings, + "base": config.rope_theta, + "rope_scaling": getattr(config, "rope_scaling", None) + } + + +class JinaRobertaModelConfig(VerifyAndUpdateConfig): + + @staticmethod + def verify_and_update_config(vllm_config: "VllmConfig") -> None: + config = vllm_config.model_config.hf_config + + if config.position_embedding_type == "rotary": + assert config.__class__.__name__ == "XLMRobertaFlashConfig" + + head_dim = config.hidden_size // config.num_attention_heads + config.rotary_kwargs = { + "head_size": head_dim, + "rotary_dim": getattr(config, "rotary_emb_dim", head_dim), + "max_position": config.max_position_embeddings, + "base": getattr(config, "rope_theta", config.rotary_emb_base), + "rope_scaling": getattr(config, "rope_scaling", None) + } + + +class NomicBertModelConfig(VerifyAndUpdateConfig): + + @staticmethod + def verify_and_update_config(vllm_config: "VllmConfig") -> None: + config = vllm_config.model_config.hf_config + + assert config.__class__.__name__ == "NomicBertConfig" + assert config.activation_function in ["swiglu", "gelu"] + config.position_embedding_type = getattr(config, + "position_embedding_type", + "rope") + + if config.activation_function == "swiglu": + config.hidden_act = "silu" + else: + config.hidden_act = config.activation_function + + assert (config.mlp_fc1_bias == config.mlp_fc2_bias == + config.qkv_proj_bias) + config.bias = config.qkv_proj_bias + + assert config.rotary_emb_scale_base is None + assert not config.rotary_emb_interleaved + + config.layer_norm_eps = config.layer_norm_epsilon + config.intermediate_size = config.n_inner + config.hidden_size = config.n_embd + config.num_hidden_layers = config.n_layer + + head_dim = config.hidden_size // config.num_attention_heads + rotary_emb_dim = head_dim * config.rotary_emb_fraction + max_trained_positions = getattr(config, "max_trained_positions", 2048) + config.rotary_kwargs = { + "head_size": head_dim, + "rotary_dim": rotary_emb_dim, + "max_position": max_trained_positions, + "base": getattr(config, "rope_theta", config.rotary_emb_base), + "rope_scaling": getattr(config, "rope_scaling", None) + } + + # we ignore config.rotary_scaling_factor so that for datasets shorter + # than max_trained_positions 2048, the results are consistent + # with SentenceTransformer. + # The context extension uses vllm style rope_theta and rope_scaling. + # See #17785 #18755 + if (not vllm_config.model_config.hf_overrides + and vllm_config.model_config.original_max_model_len is None): + # Default + # Reset max_model_len to max_trained_positions. + # nomic-embed-text-v2-moe the length is set to 512 + # by sentence_bert_config.json. + max_model_len_before = vllm_config.model_config.max_model_len + max_model_len = min(vllm_config.model_config.max_model_len, + max_trained_positions) + + vllm_config.recalculate_max_model_len(max_model_len) + logger.warning( + "Nomic context extension is disabled. " + "Changing max_model_len from %s to %s. " + "To enable context extension, see: " + "https://github.com/vllm-project/vllm/tree/main/examples/offline_inference/context_extension.html", + max_model_len_before, vllm_config.model_config.max_model_len) + else: + # We need to re-verify max_model_len to avoid lengths + # greater than position_embedding. + model_config = vllm_config.model_config + hf_text_config = model_config.hf_text_config + + if isinstance(model_config.hf_overrides, dict): + # hf_overrides_kw + max_model_len = model_config.hf_overrides.get( + "max_model_len", vllm_config.model_config.max_model_len) + else: + # hf_overrides_fn + # This might be overridden by sentence_bert_config.json. + max_model_len = vllm_config.model_config.max_model_len + + # reset hf_text_config for recalculate_max_model_len. + if hasattr(hf_text_config, "max_model_len"): + delattr(hf_text_config, "max_model_len") + hf_text_config.max_position_embeddings = max_trained_positions + hf_text_config.rope_scaling = config.rotary_kwargs["rope_scaling"] + + # The priority of sentence_bert_config.json is higher + # than max_position_embeddings + encoder_config = deepcopy(model_config.encoder_config) + encoder_config.pop("max_seq_length", None) + model_config.encoder_config = encoder_config + + vllm_config.recalculate_max_model_len(max_model_len) + + +class Qwen3ForSequenceClassificationConfig(VerifyAndUpdateConfig): + + @staticmethod + def verify_and_update_config(vllm_config: "VllmConfig") -> None: + config = vllm_config.model_config.hf_config + + is_original_qwen3_reranker = getattr(config, + "is_original_qwen3_reranker", + False) + + if not is_original_qwen3_reranker: + return + + tokens = getattr(config, "classifier_from_token", None) + assert tokens is not None and len(tokens) == 2, \ + ("Try loading the original Qwen3 Reranker?, see: " + "https://github.com/vllm-project/vllm/tree/main/examples/offline_inference/qwen3_reranker.py") + config.num_labels = 1 + + +class SnowflakeGteNewModelConfig(VerifyAndUpdateConfig): + + @staticmethod + def verify_and_update_config(vllm_config: "VllmConfig") -> None: + config = vllm_config.model_config.hf_config + + assert config.__class__.__name__ == "GteConfig" + assert config.hidden_act == "gelu" + + config.hidden_act = "geglu" + + head_dim = config.hidden_size // config.num_attention_heads + config.rotary_kwargs = { + "head_size": head_dim, + "rotary_dim": getattr(config, "rotary_emb_dim", head_dim), + "max_position": config.max_position_embeddings, + "base": config.rope_theta, + "rope_scaling": getattr(config, "rope_scaling", None) + } + + +MODELS_CONFIG_MAP: dict[str, type[VerifyAndUpdateConfig]] = { + "GteModel": SnowflakeGteNewModelConfig, + "GteNewModel": GteNewModelConfig, + "NomicBertModel": NomicBertModelConfig, + "Qwen3ForSequenceClassification": Qwen3ForSequenceClassificationConfig, + "XLMRobertaModel": JinaRobertaModelConfig, +} diff --git a/vllm/model_executor/models/qwen3.py b/vllm/model_executor/models/qwen3.py index 216c1f1c7ff74..1224ba7abc756 100644 --- a/vllm/model_executor/models/qwen3.py +++ b/vllm/model_executor/models/qwen3.py @@ -400,22 +400,10 @@ class Qwen3ForSequenceClassification(nn.Module, SupportsLoRA, def load_weights_from_original_qwen3_reranker( self, weights: Iterable[tuple[str, torch.Tensor]]): - tokens = getattr(self.config, "classifier_from_token", None) - assert tokens is not None and len(tokens) == 2, \ - ("Try loading the original Qwen3 Reranker?, see: " - "https://github.com/vllm-project/vllm/tree/main/examples/offline_inference/qwen3_reranker.py") - self.config.num_labels = 1 model_config = self.vllm_config.model_config - + tokens = getattr(self.config, "classifier_from_token", None) device = self.score.weight.device - self.score = RowParallelLinear(self.config.hidden_size, - self.config.num_labels, - quant_config=self.quant_config, - input_is_parallel=False, - bias=False, - prefix=maybe_prefix( - self.prefix, "score")).to(device) if self.config.tie_word_embeddings: self.lm_head = self.model.embed_tokens @@ -443,5 +431,6 @@ class Qwen3ForSequenceClassification(nn.Module, SupportsLoRA, self.score.weight.data.copy_(weight) del self.lm_head - loaded_weights.add("classifier.weight") + loaded_weights.add("score.weight") loaded_weights.discard("lm_head.weight") + return loaded_weights From 6e244ae09121b2c1cdcd4db51076decc4a724c5c Mon Sep 17 00:00:00 2001 From: Yazan Sharaya <97323283+Yazan-Sharaya@users.noreply.github.com> Date: Fri, 27 Jun 2025 07:44:14 +0300 Subject: [PATCH 171/211] [Perf][Frontend] eliminate api_key and x_request_id headers middleware overhead (#19946) Signed-off-by: Yazan-Sharaya --- docs/serving/openai_compatible_server.md | 5 - .../openai/test_optional_middleware.py | 116 ++++++++++++++++++ vllm/entrypoints/openai/api_server.py | 100 +++++++++++---- vllm/entrypoints/openai/cli_args.py | 2 +- 4 files changed, 190 insertions(+), 33 deletions(-) create mode 100644 tests/entrypoints/openai/test_optional_middleware.py diff --git a/docs/serving/openai_compatible_server.md b/docs/serving/openai_compatible_server.md index 00756e719992d..a3f1ef9fd8b6b 100644 --- a/docs/serving/openai_compatible_server.md +++ b/docs/serving/openai_compatible_server.md @@ -146,11 +146,6 @@ completion = client.chat.completions.create( Only `X-Request-Id` HTTP request header is supported for now. It can be enabled with `--enable-request-id-headers`. -> Note that enablement of the headers can impact performance significantly at high QPS -> rates. We recommend implementing HTTP headers at the router level (e.g. via Istio), -> rather than within the vLLM layer for this reason. -> See [this PR](https://github.com/vllm-project/vllm/pull/11529) for more details. - ??? Code ```python diff --git a/tests/entrypoints/openai/test_optional_middleware.py b/tests/entrypoints/openai/test_optional_middleware.py new file mode 100644 index 0000000000000..882fa0886ce30 --- /dev/null +++ b/tests/entrypoints/openai/test_optional_middleware.py @@ -0,0 +1,116 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +""" +Tests for middleware that's off by default and can be toggled through +server arguments, mainly --api-key and --enable-request-id-headers. +""" + +from http import HTTPStatus + +import pytest +import requests + +from ...utils import RemoteOpenAIServer + +# Use a small embeddings model for faster startup and smaller memory footprint. +# Since we are not testing any chat functionality, +# using a chat capable model is overkill. +MODEL_NAME = "intfloat/multilingual-e5-small" + + +@pytest.fixture(scope="module") +def server(request: pytest.FixtureRequest): + passed_params = [] + if hasattr(request, "param"): + passed_params = request.param + if isinstance(passed_params, str): + passed_params = [passed_params] + + args = [ + "--task", + "embed", + # use half precision for speed and memory savings in CI environment + "--dtype", + "float16", + "--max-model-len", + "512", + "--enforce-eager", + "--max-num-seqs", + "2", + *passed_params + ] + with RemoteOpenAIServer(MODEL_NAME, args) as remote_server: + yield remote_server + + +@pytest.mark.asyncio +async def test_no_api_token(server: RemoteOpenAIServer): + response = requests.get(server.url_for("v1/models")) + assert response.status_code == HTTPStatus.OK + + +@pytest.mark.asyncio +async def test_no_request_id_header(server: RemoteOpenAIServer): + response = requests.get(server.url_for("health")) + assert "X-Request-Id" not in response.headers + + +@pytest.mark.parametrize( + "server", + [["--api-key", "test"]], + indirect=True, +) +@pytest.mark.asyncio +async def test_missing_api_token(server: RemoteOpenAIServer): + response = requests.get(server.url_for("v1/models")) + assert response.status_code == HTTPStatus.UNAUTHORIZED + + +@pytest.mark.parametrize( + "server", + [["--api-key", "test"]], + indirect=True, +) +@pytest.mark.asyncio +async def test_passed_api_token(server: RemoteOpenAIServer): + response = requests.get(server.url_for("v1/models"), + headers={"Authorization": "Bearer test"}) + assert response.status_code == HTTPStatus.OK + + +@pytest.mark.parametrize( + "server", + [["--api-key", "test"]], + indirect=True, +) +@pytest.mark.asyncio +async def test_not_v1_api_token(server: RemoteOpenAIServer): + # Authorization check is skipped for any paths that + # don't start with /v1 (e.g. /v1/chat/completions). + response = requests.get(server.url_for("health")) + assert response.status_code == HTTPStatus.OK + + +@pytest.mark.parametrize( + "server", + ["--enable-request-id-headers"], + indirect=True, +) +@pytest.mark.asyncio +async def test_enable_request_id_header(server: RemoteOpenAIServer): + response = requests.get(server.url_for("health")) + assert "X-Request-Id" in response.headers + assert len(response.headers.get("X-Request-Id", "")) == 32 + + +@pytest.mark.parametrize( + "server", + ["--enable-request-id-headers"], + indirect=True, +) +@pytest.mark.asyncio +async def test_custom_request_id_header(server: RemoteOpenAIServer): + response = requests.get(server.url_for("health"), + headers={"X-Request-Id": "Custom"}) + assert "X-Request-Id" in response.headers + assert response.headers.get("X-Request-Id") == "Custom" diff --git a/vllm/entrypoints/openai/api_server.py b/vllm/entrypoints/openai/api_server.py index 681633a2aff77..f3fd154862711 100644 --- a/vllm/entrypoints/openai/api_server.py +++ b/vllm/entrypoints/openai/api_server.py @@ -14,7 +14,7 @@ import socket import tempfile import uuid from argparse import Namespace -from collections.abc import AsyncIterator +from collections.abc import AsyncIterator, Awaitable from contextlib import asynccontextmanager from functools import partial from http import HTTPStatus @@ -30,8 +30,9 @@ from fastapi.responses import JSONResponse, Response, StreamingResponse from prometheus_client import make_asgi_app from prometheus_fastapi_instrumentator import Instrumentator from starlette.concurrency import iterate_in_threadpool -from starlette.datastructures import State +from starlette.datastructures import URL, Headers, MutableHeaders, State from starlette.routing import Mount +from starlette.types import ASGIApp, Message, Receive, Scope, Send from typing_extensions import assert_never import vllm.envs as envs @@ -1061,6 +1062,74 @@ def load_log_config(log_config_file: Optional[str]) -> Optional[dict]: return None +class AuthenticationMiddleware: + """ + Pure ASGI middleware that authenticates each request by checking + if the Authorization header exists and equals "Bearer {api_key}". + + Notes + ----- + There are two cases in which authentication is skipped: + 1. The HTTP method is OPTIONS. + 2. The request path doesn't start with /v1 (e.g. /health). + """ + + def __init__(self, app: ASGIApp, api_token: str) -> None: + self.app = app + self.api_token = api_token + + def __call__(self, scope: Scope, receive: Receive, + send: Send) -> Awaitable[None]: + if scope["type"] not in ("http", + "websocket") or scope["method"] == "OPTIONS": + # scope["type"] can be "lifespan" or "startup" for example, + # in which case we don't need to do anything + return self.app(scope, receive, send) + root_path = scope.get("root_path", "") + url_path = URL(scope=scope).path.removeprefix(root_path) + headers = Headers(scope=scope) + # Type narrow to satisfy mypy. + if url_path.startswith("/v1") and headers.get( + "Authorization") != f"Bearer {self.api_token}": + response = JSONResponse(content={"error": "Unauthorized"}, + status_code=401) + return response(scope, receive, send) + return self.app(scope, receive, send) + + +class XRequestIdMiddleware: + """ + Middleware the set's the X-Request-Id header for each response + to a random uuid4 (hex) value if the header isn't already + present in the request, otherwise use the provided request id. + """ + + def __init__(self, app: ASGIApp) -> None: + self.app = app + + def __call__(self, scope: Scope, receive: Receive, + send: Send) -> Awaitable[None]: + if scope["type"] not in ("http", "websocket"): + return self.app(scope, receive, send) + + # Extract the request headers. + request_headers = Headers(scope=scope) + + async def send_with_request_id(message: Message) -> None: + """ + Custom send function to mutate the response headers + and append X-Request-Id to it. + """ + if message["type"] == "http.response.start": + response_headers = MutableHeaders(raw=message["headers"]) + request_id = request_headers.get("X-Request-Id", + uuid.uuid4().hex) + response_headers.append("X-Request-Id", request_id) + await send(message) + + return self.app(scope, receive, send_with_request_id) + + def build_app(args: Namespace) -> FastAPI: if args.disable_fastapi_docs: app = FastAPI(openapi_url=None, @@ -1108,33 +1177,10 @@ def build_app(args: Namespace) -> FastAPI: # Ensure --api-key option from CLI takes precedence over VLLM_API_KEY if token := args.api_key or envs.VLLM_API_KEY: - - @app.middleware("http") - async def authentication(request: Request, call_next): - if request.method == "OPTIONS": - return await call_next(request) - url_path = request.url.path - if app.root_path and url_path.startswith(app.root_path): - url_path = url_path[len(app.root_path):] - if not url_path.startswith("/v1"): - return await call_next(request) - if request.headers.get("Authorization") != "Bearer " + token: - return JSONResponse(content={"error": "Unauthorized"}, - status_code=401) - return await call_next(request) + app.add_middleware(AuthenticationMiddleware, api_token=token) if args.enable_request_id_headers: - logger.warning( - "CAUTION: Enabling X-Request-Id headers in the API Server. " - "This can harm performance at high QPS.") - - @app.middleware("http") - async def add_request_id(request: Request, call_next): - request_id = request.headers.get( - "X-Request-Id") or uuid.uuid4().hex - response = await call_next(request) - response.headers["X-Request-Id"] = request_id - return response + app.add_middleware(XRequestIdMiddleware) if envs.VLLM_DEBUG_LOG_API_SERVER_RESPONSE: logger.warning("CAUTION: Enabling log response in the API Server. " diff --git a/vllm/entrypoints/openai/cli_args.py b/vllm/entrypoints/openai/cli_args.py index dd4bd53046a35..f9bec84518688 100644 --- a/vllm/entrypoints/openai/cli_args.py +++ b/vllm/entrypoints/openai/cli_args.py @@ -216,7 +216,7 @@ def make_arg_parser(parser: FlexibleArgumentParser) -> FlexibleArgumentParser: "--enable-request-id-headers", action="store_true", help="If specified, API server will add X-Request-Id header to " - "responses. Caution: this hurts performance at high QPS.") + "responses.") parser.add_argument( "--enable-auto-tool-choice", action="store_true", From dec197e3e5d14e1d4fbad61b565e151f52976c0f Mon Sep 17 00:00:00 2001 From: "Chendi.Xue" Date: Fri, 27 Jun 2025 00:48:13 -0500 Subject: [PATCH 172/211] Quick Fix by adding conditional import for flash_attn_varlen_func in flash_attn (#20143) Signed-off-by: Chendi.Xue --- vllm/attention/utils/fa_utils.py | 4 ++++ vllm/v1/attention/backends/flash_attn.py | 10 +++++++--- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/vllm/attention/utils/fa_utils.py b/vllm/attention/utils/fa_utils.py index 36fd2d231bc5f..f8b00565f0517 100644 --- a/vllm/attention/utils/fa_utils.py +++ b/vllm/attention/utils/fa_utils.py @@ -66,3 +66,7 @@ def get_flash_attn_version(requires_alibi: bool = False) -> Optional[int]: def flash_attn_supports_fp8() -> bool: return get_flash_attn_version() == 3 and \ current_platform.get_device_capability().major == 9 + + +def is_flash_attn_varlen_func_available() -> bool: + return current_platform.is_cuda() or current_platform.is_xpu() diff --git a/vllm/v1/attention/backends/flash_attn.py b/vllm/v1/attention/backends/flash_attn.py index 42b5997f085b1..527b31153410b 100755 --- a/vllm/v1/attention/backends/flash_attn.py +++ b/vllm/v1/attention/backends/flash_attn.py @@ -14,10 +14,14 @@ from vllm.attention.backends.abstract import (AttentionBackend, AttentionImpl, from vllm.attention.layer import Attention from vllm.attention.ops.merge_attn_states import merge_attn_states from vllm.attention.utils.fa_utils import (flash_attn_supports_fp8, - flash_attn_varlen_func, get_flash_attn_version, - get_scheduler_metadata, - reshape_and_cache_flash) + is_flash_attn_varlen_func_available) + +if is_flash_attn_varlen_func_available(): + from vllm.attention.utils.fa_utils import (flash_attn_varlen_func, + get_scheduler_metadata, + reshape_and_cache_flash) + from vllm.config import VllmConfig, get_layers_from_vllm_config from vllm.logger import init_logger from vllm.utils import cdiv From d1c956dc0f8b64af7a43e23f9fc2850756dea1cb Mon Sep 17 00:00:00 2001 From: Robert Shaw <114415538+robertgshaw2-redhat@users.noreply.github.com> Date: Fri, 27 Jun 2025 03:16:26 -0400 Subject: [PATCH 173/211] Gemma3n (Text-only) (#20134) Signed-off-by: rshaw@neuralmagic.com Signed-off-by: Roger Wang Co-authored-by: Roger Wang --- docs/models/supported_models.md | 4 + tests/models/registry.py | 2 + vllm/model_executor/layers/activation.py | 51 ++ vllm/model_executor/models/gemma3n.py | 811 +++++++++++++++++++++++ vllm/model_executor/models/registry.py | 2 + 5 files changed, 870 insertions(+) create mode 100644 vllm/model_executor/models/gemma3n.py diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index 04d9923f92105..9782fd1781512 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -336,6 +336,7 @@ Specified using `--task generate`. | `GemmaForCausalLM` | Gemma | `google/gemma-2b`, `google/gemma-1.1-2b-it`, etc. | ✅︎ | ✅︎ | ✅︎ | | `Gemma2ForCausalLM` | Gemma 2 | `google/gemma-2-9b`, `google/gemma-2-27b`, etc. | ✅︎ | ✅︎ | ✅︎ | | `Gemma3ForCausalLM` | Gemma 3 | `google/gemma-3-1b-it`, etc. | ✅︎ | ✅︎ | ✅︎ | +| `Gemma3nForConditionalGeneration` | Gemma 3n | `google/gemma-3n-E2B-it`, `google/gemma-3n-E4B-it`, etc. | | | ✅︎ | | `GlmForCausalLM` | GLM-4 | `THUDM/glm-4-9b-chat-hf`, etc. | ✅︎ | ✅︎ | ✅︎ | | `Glm4ForCausalLM` | GLM-4-0414 | `THUDM/GLM-4-32B-0414`, etc. | ✅︎ | ✅︎ | ✅︎ | | `GPT2LMHeadModel` | GPT-2 | `gpt2`, `gpt2-xl`, etc. | | ✅︎ | ✅︎ | @@ -392,6 +393,9 @@ Specified using `--task generate`. !!! note Currently, the ROCm version of vLLM supports Mistral and Mixtral only for context lengths up to 4096. +!!! note + Only text inputs are currently supported for `Gemma3nForConditionalGeneration`. To use this model, please upgrade Hugging Face Transformers to version 4.53.0. + ### Pooling Models See [this page](./pooling_models.md) for more information on how to use pooling models. diff --git a/tests/models/registry.py b/tests/models/registry.py index 4a587e39ad4cd..1bcb4f88a30ff 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -164,6 +164,8 @@ _TEXT_GENERATION_EXAMPLE_MODELS = { "GemmaForCausalLM": _HfExamplesInfo("google/gemma-1.1-2b-it"), "Gemma2ForCausalLM": _HfExamplesInfo("google/gemma-2-9b"), "Gemma3ForCausalLM": _HfExamplesInfo("google/gemma-3-1b-it"), + "Gemma3nForConditionalGeneration": _HfExamplesInfo("google/gemma-3n-E2B-it", # noqa: E501 + min_transformers_version="4.53"), "GlmForCausalLM": _HfExamplesInfo("THUDM/glm-4-9b-chat-hf"), "Glm4ForCausalLM": _HfExamplesInfo("THUDM/GLM-4-9B-0414"), "GPT2LMHeadModel": _HfExamplesInfo("openai-community/gpt2", diff --git a/vllm/model_executor/layers/activation.py b/vllm/model_executor/layers/activation.py index cc9c8d445ab6c..1fd96fe405b9a 100644 --- a/vllm/model_executor/layers/activation.py +++ b/vllm/model_executor/layers/activation.py @@ -135,6 +135,57 @@ class MulAndSilu(CustomOp): # def forward_xpu(self, x: torch.Tensor) -> torch.Tensor: +@CustomOp.register("gelu_and_mul_sparse") +class GeluAndMulSparse(CustomOp): + """An activation function for GeluAndMulSparse. + This activation function is used in Gemma3n. It computes: + up_proj = self.up_proj(x) + gate_proj = self.gate_proj(x) + gate_proj = self._gaussian_topk(gate_proj) # sparsity + activations = self.act_fn(gate_proj) # gelu + down_proj = self.down_proj(activations * up_proj) + Shapes: + x: (num_tokens, 2 * d) or (batch_size, seq_len, 2 * d) + return: (num_tokens, d) or (batch_size, seq_len, d) + """ + + def __init__(self, activation_sparsity: float, approximate: str = "none"): + super().__init__() + # Gelu. + self.approximate = approximate + if approximate not in ("none", "tanh"): + raise ValueError(f"Unknown approximate mode: {approximate}") + + # Sparsity. + if activation_sparsity == 0.0: + raise ValueError( + "activation_sparsity is 0.0. Please use GeluAndMul.") + target_sparsity_tensor = torch.tensor(activation_sparsity, + dtype=torch.float32) + normal_dist = torch.distributions.normal.Normal(0, 1) + self.std_multiplier = normal_dist.icdf(target_sparsity_tensor) + + def _gaussian_topk(self, x: torch.Tensor) -> torch.Tensor: + """Get % sparse percentile of the Gaussian distribution.""" + # NOTE(rob): for TP>1, we could all-gather to get the means/std. + # But we do not do this because in expectation they are the same + # and in practice the eval scores are good without gathering. + mean = torch.mean(x, dim=-1, keepdim=True) + std = torch.std(x, dim=-1, keepdim=True, unbiased=False) + cutoff_x = mean + std * self.std_multiplier + return nn.functional.relu(x - cutoff_x) + + def forward_native(self, x: torch.Tensor) -> torch.Tensor: + """PyTorch-native implementation equivalent to forward().""" + d = x.shape[-1] // 2 + out = self._gaussian_topk(x[..., :d]) + out = F.gelu(out, approximate=self.approximate) + return out * x[..., d:] + + def forward_cuda(self, x: torch.Tensor) -> torch.Tensor: + return self.forward_native(x) + + @CustomOp.register("gelu_and_mul") class GeluAndMul(CustomOp): """An activation function for GeGLU. diff --git a/vllm/model_executor/models/gemma3n.py b/vllm/model_executor/models/gemma3n.py new file mode 100644 index 0000000000000..7d163320e0d6a --- /dev/null +++ b/vllm/model_executor/models/gemma3n.py @@ -0,0 +1,811 @@ +# SPDX-License-Identifier: Apache-2.0 +# SPDX-FileCopyrightText: Copyright contributors to the vLLM project +# Copyright 2025 The vLLM team. +# Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved. +# +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from collections.abc import Iterable +from typing import Optional, Union + +import torch +from torch import nn +from transformers.models.gemma3n.configuration_gemma3n import Gemma3nTextConfig + +from vllm.attention import Attention +from vllm.compilation.decorators import support_torch_compile +from vllm.config import CacheConfig, VllmConfig +from vllm.distributed import get_tensor_model_parallel_world_size +from vllm.logger import init_logger +from vllm.model_executor.layers.activation import (_ACTIVATION_REGISTRY, + GeluAndMul, + GeluAndMulSparse) +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (ColumnParallelLinear, + MergedColumnParallelLinear, + QKVParallelLinear, + ReplicatedLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.vocab_parallel_embedding import ( + VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import ( + default_weight_loader, maybe_remap_kv_scale_name) +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import IntermediateTensors + +from .utils import (AutoWeightsLoader, extract_layer_index, + is_pp_missing_parameter, make_layers, maybe_prefix) + +logger = init_logger(__name__) + + +class Gemma3nAltUp(nn.Module): + """Alternating updates (Altup) + The AltUp module wraps transformer layers. The `predict` step modifies the + input to the transformer layer, and the `correct` step propagates the output + of the transformer layer to the sparsely updated dimensions. + See more in the research paper: + https://proceedings.neurips.cc/paper_files/paper/2023/file/f2059277ac6ce66e7e5543001afa8bb5-Paper-Conference.pdf + """ + + def __init__( + self, + hidden_size: int, + rms_norm_eps: float, + altup_num_inputs: int, + altup_coef_clip: float, + altup_active_idx: int, + prefix: str, + ): + super().__init__() + + self.altup_num_inputs = altup_num_inputs + self.altup_active_idx = altup_active_idx + self.altup_coef_clip = altup_coef_clip + + self.correction_coefs = ReplicatedLinear( + altup_num_inputs, + altup_num_inputs, + bias=False, + prefix=f"{prefix}.correction_coefs", + return_bias=False, + ) + self.prediction_coefs = ReplicatedLinear( + altup_num_inputs, + altup_num_inputs**2, + bias=False, + prefix=f"{prefix}.prediction_coefs", + return_bias=False, + ) + self.modality_router = ReplicatedLinear( + hidden_size, + altup_num_inputs, + bias=False, + prefix=f"{prefix}.modality_router", + return_bias=False, + ) + self.router_norm = RMSNorm( + hidden_size=hidden_size, + eps=rms_norm_eps, + ) + self.router_input_scale = torch.tensor( + hidden_size**-1.0, dtype=self.modality_router.weight.dtype) + self.correct_output_scale = nn.Parameter( + torch.zeros(hidden_size, dtype=torch.float32)) + + def _compute_router_modalities(self, x: torch.Tensor) -> torch.Tensor: + router_inputs = self.router_norm(x) * self.router_input_scale + routed = self.modality_router(router_inputs) + return torch.tanh(routed.float()).type_as(x) + + def scale_corrected_output(self, corrected: torch.Tensor) -> torch.Tensor: + return (corrected.type_as(self.correct_output_scale) * + self.correct_output_scale).type_as(corrected) + + def predict(self, hidden_states: torch.Tensor) -> torch.Tensor: + # hidden: [altup_num_inputs, num_tokens, hidden_size] + # modalities: [num_tokens, num_altup_inputs] + # all_coefs: [num_tokens, num_altup_inputs ** 2] + modalities = self._compute_router_modalities( + hidden_states[self.altup_active_idx]) + all_coefs = self.prediction_coefs(modalities) + + # Reshape and transpose the 2D matrix for the matmul. + # all_coefs_T: [num_tokens, num_altup_inputs, num_altup_inputs] + all_coefs_T = all_coefs.reshape( + -1, + self.altup_num_inputs, + self.altup_num_inputs, + ).permute(0, 2, 1) + + # hidden_states to [num_tokens, hidden_size, altup_num_inputs] + predictions = torch.matmul(hidden_states.permute(1, 2, 0), all_coefs_T) + # [altup_num_inputs, num_tokens, hidden_size] + predictions = predictions.permute(2, 0, 1) + predictions += hidden_states + return predictions.contiguous() + + def correct(self, predictions: torch.Tensor, + activated: torch.Tensor) -> torch.Tensor: + # predictions: [altup_num_inputs, num_tokens, hidden_size] + # activated: [num_tokens, hidden_size] + # modalities: [num_tokens, altup_num_inputs] + modalities = self._compute_router_modalities(activated) + # innovation: [num_tokens, altup_num_inputs] + innovation = activated - predictions[self.altup_active_idx] + # innovation: [altup_num_inputs, num_tokens, hidden_size] + innovation = innovation.repeat(self.altup_num_inputs, 1, 1) + + # Permute to [altup_num_inputs, num_tokens] as the last dim + # is a scalar applied to each altup input and expand on + # num_tokens dim for broadcastability over hidden_size. + # all_coefs: [num_tokens, altup_num_inputs] + all_coefs = self.correction_coefs(modalities) + 1.0 + # all_coefs: [altup_num_inputs, num_tokens, 1] + all_coefs = all_coefs.T.unsqueeze(-1) + + # Elementwise (broadcast over hidden_size). + corrected = torch.mul(innovation, all_coefs) + corrected += predictions + + return corrected.contiguous() + + +class Gemma3nLaurelBlock(nn.Module): + """Learned Augmented Residual Layer""" + + def __init__(self, hidden_size: int, laurel_rank: int, rms_norm_eps: float, + prefix: str): + super().__init__() + + self.linear_left = ColumnParallelLinear( + hidden_size, + laurel_rank, + bias=False, + prefix=f"{prefix}.linear_left", + return_bias=False, + ) + self.linear_right = RowParallelLinear(laurel_rank, + hidden_size, + bias=False, + prefix=f"{prefix}.linear_right", + return_bias=False) + self.post_laurel_norm = RMSNorm( + hidden_size=hidden_size, + eps=rms_norm_eps, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + laurel_x = self.linear_left(x) + laurel_x = self.linear_right(laurel_x) + normed_laurel_x = self.post_laurel_norm(laurel_x) + return x + normed_laurel_x + + +class Gemma3nMLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_activation: str, + activation_sparsity: float = 0.0, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, + [intermediate_size] * 2, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.gate_up_proj", + ) + self.down_proj = RowParallelLinear( + intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.down_proj", + ) + if hidden_activation != "gelu_pytorch_tanh": + raise ValueError( + "Gemma3 uses `gelu_pytorch_tanh` as the hidden activation " + "function. Please set `hidden_act` and `hidden_activation` to " + "`gelu_pytorch_tanh`.") + + self.act_fn = GeluAndMulSparse( + activation_sparsity=activation_sparsity, + approximate="tanh") if activation_sparsity > 0.0 else GeluAndMul( + approximate="tanh") + + def forward(self, x: torch.Tensor) -> torch.Tensor: + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class Gemma3nAttention(nn.Module): + + def __init__(self, + config: Gemma3nTextConfig, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + head_dim: int, + max_position_embeddings: int, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "") -> None: + super().__init__() + self.config = config + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = head_dim + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=config.attention_bias, + quant_config=quant_config, + prefix=f"{prefix}.qkv_proj", + ) + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=config.attention_bias, + quant_config=quant_config, + prefix=f"{prefix}.o_proj", + ) + self.q_norm = RMSNorm(hidden_size=self.head_dim, + eps=config.rms_norm_eps) + self.k_norm = RMSNorm(hidden_size=self.head_dim, + eps=config.rms_norm_eps) + self.v_norm = RMSNorm(hidden_size=self.head_dim, + eps=config.rms_norm_eps, + has_weight=False) + + layer_idx = extract_layer_index(prefix) + if config.layer_types[layer_idx] == "sliding_attention": + self.sliding_window = config.sliding_window + rope_theta = config.rope_local_base_freq + rope_scaling = {"rope_type": "default"} + else: + self.sliding_window = None + rope_theta = config.rope_theta + rope_scaling = config.rope_scaling + + first_kv_shared_layer_idx = (config.num_hidden_layers - + config.num_kv_shared_layers) + self.is_kv_shared = layer_idx >= first_kv_shared_layer_idx + + if self.is_kv_shared: + # Last full attention layer is 1 before sharing + # Last sliding attention layer is 2 before sharing + offset = 2 if self.sliding_window is not None else 1 + kv_shared_layer_index = first_kv_shared_layer_idx - offset + kv_sharing_target_layer_name = f"model.language_model.layers.{kv_shared_layer_index}.self_attn.attn" # noqa: E501 + else: + kv_sharing_target_layer_name = None + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + is_neox_style=True, + rope_scaling=rope_scaling, + ) + + self.attn = Attention( + num_heads=self.num_heads, + head_size=self.head_dim, + scale=1.0, + num_kv_heads=self.num_kv_heads, + cache_config=cache_config, + quant_config=quant_config, + per_layer_sliding_window=self.sliding_window, + kv_sharing_target_layer_name=kv_sharing_target_layer_name, + prefix=f"{prefix}.attn") + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + **kwargs, + ) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + + q = q.unflatten(-1, (self.num_heads, self.head_dim)) + q = self.q_norm(q) + q = q.flatten(-2, -1) + k = k.unflatten(-1, (self.num_kv_heads, self.head_dim)) + k = self.k_norm(k) + k = k.flatten(-2, -1) + v = v.unflatten(-1, (self.num_kv_heads, self.head_dim)) + v = self.v_norm(v) + v = v.flatten(-2, -1) + + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v) + + output, _ = self.o_proj(attn_output) + return output + + +class Gemma3nDecoderLayer(nn.Module): + + def __init__( + self, + config: Gemma3nTextConfig, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + self.altup_active_idx = config.altup_active_idx + assert config.altup_correct_scale + + self.altup = Gemma3nAltUp( + hidden_size=config.hidden_size, + rms_norm_eps=config.rms_norm_eps, + altup_num_inputs=config.altup_num_inputs, + altup_coef_clip=config.altup_coef_clip, + altup_active_idx=config.altup_active_idx, + prefix=f"{prefix}.altup", + ) + self.self_attn = Gemma3nAttention( + config=config, + hidden_size=config.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=config.num_key_value_heads, + head_dim=config.head_dim, + max_position_embeddings=config.max_position_embeddings, + cache_config=cache_config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) + self.mlp = Gemma3nMLP( + hidden_size=config.hidden_size, + # NOTE: Matformer https://github.com/huggingface/transformers/blob/a52478253bbe522a420e88ea3940d4d98a935300/src/transformers/models/gemma3n/modular_gemma3n.py#L258 # noqa: E501 + intermediate_size=config.intermediate_size[extract_layer_index( + prefix)], + hidden_activation=config.hidden_activation, + quant_config=quant_config, + activation_sparsity=config.activation_sparsity_pattern[ + extract_layer_index(prefix)], + prefix=f"{prefix}.mlp", + ) + self.laurel = Gemma3nLaurelBlock( + hidden_size=config.hidden_size, + laurel_rank=config.laurel_rank, + rms_norm_eps=config.rms_norm_eps, + prefix=f"{prefix}.laurel", + ) + + # NOTE(rob): should be ColumnParallelLinear and RowParallelLinear + # But, we need to add per_layer_input_gate(x) to per_layer_input. + # per_layer_input cannot be sharded, so we replicate for now. + self.per_layer_input_gate = ReplicatedLinear( + config.hidden_size, + config.hidden_size_per_layer_input, + bias=False, + prefix=f"{prefix}.per_layer_input_gate", + return_bias=False, + ) + self.per_layer_projection = ReplicatedLinear( + config.hidden_size_per_layer_input, + config.hidden_size, + bias=False, + prefix=f"{prefix}.per_layer_projection", + return_bias=False, + ) + + # LayerNorms. + self.input_layernorm = RMSNorm( + config.hidden_size, + eps=config.rms_norm_eps, + ) + self.post_attention_layernorm = RMSNorm( + config.hidden_size, + eps=config.rms_norm_eps, + ) + self.pre_feedforward_layernorm = RMSNorm( + config.hidden_size, + eps=config.rms_norm_eps, + ) + self.post_feedforward_layernorm = RMSNorm( + config.hidden_size, + eps=config.rms_norm_eps, + ) + self.post_per_layer_input_norm = RMSNorm( + config.hidden_size, + eps=config.rms_norm_eps, + ) + + self.act_fn = _ACTIVATION_REGISTRY[config.hidden_activation] + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + per_layer_input: torch.Tensor, + **kwargs, + ) -> tuple[torch.Tensor, torch.Tensor]: + + # ActUp (predict). + predictions = self.altup.predict(hidden_states) + active_prediction = predictions[self.altup_active_idx] + active_prediction_normed = self.input_layernorm(active_prediction) + laurel_output = self.laurel(active_prediction_normed) + + # Attention. + attn = self.self_attn( + positions=positions, + hidden_states=active_prediction_normed, + **kwargs, + ) + attn = self.post_attention_layernorm(attn) + attn_gated = attn + active_prediction + attn_laurel = (attn_gated + laurel_output) / torch.sqrt( + torch.tensor(2.0)) + + # MLP. + attn_norm = self.pre_feedforward_layernorm(attn_laurel) + attn_ffw = self.mlp(attn_norm) + attn_ffw_norm = self.post_feedforward_layernorm(attn_ffw) + attn_ffw_laurel_gated = attn_laurel + attn_ffw_norm + + # ActUp (connect). + corrected_predictions = self.altup.correct(predictions, + attn_ffw_laurel_gated) + first_prediction = corrected_predictions[self.altup_active_idx] + first_prediction = self.altup.scale_corrected_output(first_prediction) + + # per_layer_input_gate adapted from jax.numpy.einsum("btd,dp->btp", ...) + first_prediction = self.per_layer_input_gate(first_prediction) + first_prediction = self.act_fn(first_prediction) + first_prediction = torch.mul(first_prediction, per_layer_input) + + # per_layer_projection adapted from jax.numpy.einsum("btp,pd->btd", ...) + first_prediction = self.per_layer_projection(first_prediction) + first_prediction = self.post_per_layer_input_norm(first_prediction) + corrected_predictions[1:] += first_prediction + + return corrected_predictions + + +@support_torch_compile +class Gemma3nTextModel(nn.Module): + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config.text_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config + self.config = config + self.quant_config = quant_config + + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + prefix=f"{prefix}.embed_tokens", + ) + self.embed_scale = torch.tensor( + config.hidden_size**0.5, + dtype=self.embed_tokens.weight.dtype, + ) + self.embed_tokens_per_layer = VocabParallelEmbedding( + config.vocab_size_per_layer_input, + config.num_hidden_layers * config.hidden_size_per_layer_input, + prefix=f"{prefix}.per_layer_embed_tokens", + ) + self.embed_scale_per_layer = torch.tensor( + config.hidden_size_per_layer_input**0.5, + dtype=self.embed_tokens.weight.dtype, + ) + self.per_layer_model_projection = ColumnParallelLinear( + config.hidden_size, + config.num_hidden_layers * config.hidden_size_per_layer_input, + bias=False, + gather_output=True, + return_bias=False, + prefix=f"{prefix}.per_layer_model_projection", + ) + self.per_layer_projection_norm = RMSNorm( + hidden_size=config.hidden_size_per_layer_input, + eps=config.rms_norm_eps, + ) + self.per_layer_input_scale = torch.rsqrt(torch.tensor(2.0)).to( + self.embed_tokens.weight.dtype) + self.per_layer_projection_scale = torch.tensor( + config.hidden_size**0.5, + dtype=self.embed_tokens.weight.dtype, + ) + self.altup_projections = nn.ModuleList([ + ColumnParallelLinear( + config.hidden_size, + config.hidden_size, + bias=False, + gather_output=True, + return_bias=False, + prefix=f"{prefix}.{idx-1}.altup_projections", + ) for idx in range(1, self.config.altup_num_inputs) + ]) + self.altup_unembed_projections = nn.ModuleList([ + ColumnParallelLinear( + config.hidden_size, + config.hidden_size, + bias=False, + gather_output=True, + return_bias=False, + prefix=f"{prefix}.{idx-1}.altup_unembed_projections", + ) for idx in range(1, self.config.altup_num_inputs) + ]) + + # Transformer blocks. + self.start_layer, self.end_layer, self.layers = make_layers( + config.num_hidden_layers, + lambda prefix: Gemma3nDecoderLayer( + config, cache_config, quant_config, prefix=prefix), + prefix=f"{prefix}.layers") + self.norm = RMSNorm( + config.hidden_size, + eps=config.rms_norm_eps, + ) + self.eps = torch.tensor(torch.finfo().min) + + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) * self.embed_scale + + def get_per_layer_input_embeddings( + self, input_ids: torch.Tensor) -> torch.Tensor: + # Deal with the fact that vocab_size_per_layer_input < vocab_size + # which causes us to have some out of vocab tokens by setting + # those token ids to 0. This matches the HF implementation. + per_layer_inputs_mask = torch.logical_and( + input_ids >= 0, input_ids < self.config.vocab_size_per_layer_input) + per_layer_inputs_tokens = torch.where(per_layer_inputs_mask, input_ids, + torch.zeros_like(input_ids)) + return self.embed_tokens_per_layer( + per_layer_inputs_tokens) * self.embed_scale_per_layer + + def forward( + self, + input_ids: Optional[torch.Tensor], + positions: torch.Tensor, + inputs_embeds: Optional[torch.Tensor] = None, + **kwargs, + ) -> Union[torch.Tensor, IntermediateTensors]: + if inputs_embeds is not None: + hidden_states_0 = inputs_embeds + else: + hidden_states_0 = self.get_input_embeddings(input_ids) + + # Per layer inputs. + if input_ids is None: + raise ValueError("Passing None for input ids is not supported.") + per_layer_inputs = self.get_per_layer_input_embeddings(input_ids) + per_layer_inputs = per_layer_inputs.reshape( + -1, self.config.num_hidden_layers, + self.config.hidden_size_per_layer_input) + per_layer_projection = self.per_layer_model_projection(hidden_states_0) + per_layer_projection = per_layer_projection.reshape( + *hidden_states_0.shape[:-1], + self.config.num_hidden_layers, + self.config.hidden_size_per_layer_input, + ) + per_layer_projection = self.per_layer_projection_norm( + per_layer_projection) + per_layer_inputs = per_layer_projection + per_layer_inputs + per_layer_inputs *= self.per_layer_input_scale + + # Altup embed. + hidden_states = [hidden_states_0] * self.config.altup_num_inputs + target_magnitude = torch.mean(hidden_states_0**2, dim=-1, + keepdim=True)**0.5 + for i in range(1, self.config.altup_num_inputs): + hidden_states[i] = self.altup_projections[i - 1](hidden_states[i]) + new_magnitude = torch.mean(hidden_states[i]**2, + dim=-1, + keepdim=True)**0.5 + hidden_states[i] *= target_magnitude / torch.maximum( + new_magnitude, self.eps) + hidden_states = torch.stack(hidden_states, dim=0) + + # Transformer blocks. + for layer_idx, layer in enumerate(self.layers): + # [altup_num_inputs, num_tokens, hidden_size] + hidden_states = layer( + positions=positions, + hidden_states=hidden_states, + per_layer_input=per_layer_inputs[:, layer_idx, :], + **kwargs, + ) + + # Altup unembed. + target_magnitude = torch.mean(hidden_states[0]**2, + dim=-1, + keepdim=True)**0.5 + for i in range(1, self.config.altup_num_inputs): + hidden_states[i] = self.altup_unembed_projections[i - 1]( + hidden_states[i]) + new_magnitude = torch.mean(hidden_states[i]**2, + dim=-1, + keepdim=True)**0.5 + hidden_states[i] *= target_magnitude / torch.maximum( + new_magnitude, self.eps) + # [altup_num_inputs,num_tokens,hidden_size] -> [num_tokens,hidden_size] + hidden_states = torch.mean(hidden_states, dim=0) + + return self.norm(hidden_states) + + def load_weights(self, weights: Iterable[tuple[str, + torch.Tensor]]) -> set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + params_dict = dict(self.named_parameters()) + loaded_params: set[str] = set() + for name, loaded_weight in weights: + if (self.quant_config is not None and + (scale_name := self.quant_config.get_cache_scale(name))): + # Loading kv cache scales for compressed-tensors quantization + param = params_dict[scale_name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + loaded_weight = loaded_weight[0] + weight_loader(param, loaded_weight) + loaded_params.add(scale_name) + continue + for (param_name, shard_name, shard_id) in stacked_params_mapping: + if shard_name not in name: + continue + # Avoid spurious match with ".up_proj". + if "altup_projections" in name: + continue + name = name.replace(shard_name, param_name) + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + # Skip loading extra bias for GPTQ models. + if name.endswith(".bias") and name not in params_dict: + continue + # Remapping the name of FP8 kv-scale. + name = maybe_remap_kv_scale_name(name, params_dict) + if name is None: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + + return loaded_params + + +class Gemma3nModel(nn.Module): + + def __init__(self, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + self.language_model = Gemma3nTextModel(vllm_config=vllm_config, + prefix=maybe_prefix( + prefix, "language_model")) + + def forward( + self, + input_ids: Optional[torch.Tensor], + positions: torch.Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.Tensor: + return self.language_model(input_ids=input_ids, + positions=positions, + inputs_embeds=inputs_embeds, + **kwargs) + + +class Gemma3nForConditionalGeneration(nn.Module): + packed_modules_mapping = { + "qkv_proj": [ + "q_proj", + "k_proj", + "v_proj", + ], + "gate_up_proj": [ + "gate_proj", + "up_proj", + ], + } + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + config = vllm_config.model_config.hf_config + lora_config = vllm_config.lora_config + del lora_config # Unused. + super().__init__() + self.config = config + self.model = Gemma3nModel(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + self.logits_processor = LogitsProcessor( + config.text_config.vocab_size, + soft_cap=config.text_config.final_logit_softcapping) + + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.language_model.get_input_embeddings(input_ids) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, + **kwargs, + ) -> Union[torch.Tensor, IntermediateTensors]: + hidden_states = self.model(input_ids, positions, intermediate_tensors, + inputs_embeds, **kwargs) + return hidden_states + + def compute_logits( + self, + hidden_states: torch.Tensor, + sampling_metadata: Optional[SamplingMetadata], + ) -> Optional[torch.Tensor]: + logits = self.logits_processor(self.model.language_model.embed_tokens, + hidden_states, sampling_metadata) + return logits + + def load_weights(self, weights: Iterable[tuple[str, + torch.Tensor]]) -> set[str]: + loader = AutoWeightsLoader(self, + skip_substrs=([ + "embed_audio.", "embed_vision.", + "audio_tower.", "vision_tower." + ])) + return loader.load_weights(weights) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index faeaf6ef68ccc..ff605cae02ea4 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -58,6 +58,8 @@ _TEXT_GENERATION_MODELS = { "GemmaForCausalLM": ("gemma", "GemmaForCausalLM"), "Gemma2ForCausalLM": ("gemma2", "Gemma2ForCausalLM"), "Gemma3ForCausalLM": ("gemma3", "Gemma3ForCausalLM"), + #TODO(ywang96): Support multimodal gemma3n + "Gemma3nForConditionalGeneration": ("gemma3n", "Gemma3nForConditionalGeneration"), # noqa: E501 "GlmForCausalLM": ("glm", "GlmForCausalLM"), "Glm4ForCausalLM": ("glm4", "Glm4ForCausalLM"), "GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"), From 4ab3ac285e824542831c4326d01ce84bd8b65aad Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Fri, 27 Jun 2025 16:30:53 +0900 Subject: [PATCH 174/211] [Bugfix] Fix flaky failure when getting DP ports (#20151) Signed-off-by: mgoin --- vllm/config.py | 39 +++++++++++++++++++++++++++++++-------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/vllm/config.py b/vllm/config.py index 7a3329aea5f78..623ba3aaf1093 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1878,18 +1878,41 @@ class ParallelConfig: return answer def stateless_init_dp_group(self) -> "ProcessGroup": + # NOTE: In high-concurrency scenarios multiple processes + # can pick the same (currently free) port through a race + # condition when calling `get_open_port()`. When the first + # process binds the port the others will subsequently fail + # with `torch.distributed.DistNetworkError: EADDRINUSE`. + # To make the initialization more robust we retry a few times + # with a fresh port whenever this specific error is observed. + from torch.distributed import DistNetworkError + from vllm.distributed.utils import ( stateless_init_torch_distributed_process_group) - # use gloo since the engine process might not have cuda device - dp_group = stateless_init_torch_distributed_process_group( - self.data_parallel_master_ip, - self.get_next_dp_init_port(), - self.data_parallel_rank, - self.data_parallel_size, - backend="gloo") + max_retries = 5 + last_exc: Optional[Exception] = None + for _ in range(max_retries): + try: + # use gloo since the engine process might not have cuda device + return stateless_init_torch_distributed_process_group( + self.data_parallel_master_ip, + self.get_next_dp_init_port(), + self.data_parallel_rank, + self.data_parallel_size, + backend="gloo") + except DistNetworkError as e: + # We only want to retry when the root cause is EADDRINUSE. + if "EADDRINUSE" in str(e): + logger.warning( + "Address already in use. Retrying with a new port.") + last_exc = e + continue # try again with a new port + raise e - return dp_group + # If we get here all retries have failed. + assert last_exc is not None + raise last_exc @staticmethod def has_unfinished_dp(dp_group: "ProcessGroup", From aa0dc77ef53b365ddf54be51748c166895a0bcd9 Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 27 Jun 2025 13:16:41 +0400 Subject: [PATCH 175/211] [Perf] Improved perf for resolve_chat_template_content_format (#20065) Signed-off-by: Ilya Lavrenov --- vllm/entrypoints/chat_utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index 7951c49f5da05..35ee52ab4601d 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -293,6 +293,7 @@ def _try_extract_ast(chat_template: str) -> Optional[jinja2.nodes.Template]: return None +@lru_cache(maxsize=32) def _detect_content_format( chat_template: str, *, From 94a55c76813f97557a0d480c78f1488a360beee7 Mon Sep 17 00:00:00 2001 From: Hosang <156028780+hyoon1@users.noreply.github.com> Date: Fri, 27 Jun 2025 10:14:44 -0400 Subject: [PATCH 176/211] [Fix][ROCm] Remove unused variables to fix build error on GFX11/12 (#19891) Signed-off-by: Hosang Yoon --- csrc/rocm/attention.cu | 4 ---- 1 file changed, 4 deletions(-) diff --git a/csrc/rocm/attention.cu b/csrc/rocm/attention.cu index 39997030751b8..3bddd12cad077 100644 --- a/csrc/rocm/attention.cu +++ b/csrc/rocm/attention.cu @@ -1598,7 +1598,6 @@ __launch_bounds__(NUM_THREADS, 3) void paged_attention_ll4mi_QKV_mfma16_kernel( const int warpid = threadIdx.x / WARP_SIZE; const int laneid = threadIdx.x % WARP_SIZE; const int lane2id = laneid % 2; - const int lane4id = laneid % 4; const int lane16id = laneid % 16; const int rowid = laneid / 16; @@ -1745,7 +1744,6 @@ __launch_bounds__(NUM_THREADS, 3) void paged_attention_ll4mi_QKV_mfma16_kernel( const cache_t* k_ptr2 = k_ptr + kblock_number * kv_block_stride; const int klocal_token_idx = TOKENS_PER_WARP * warpid + token_depth * 16 + lane16id; - const int kglobal_token_idx = partition_start_token_idx + klocal_token_idx; const int kphysical_block_offset = klocal_token_idx % BLOCK_SIZE; const cache_t* k_ptr3 = k_ptr2 + kphysical_block_offset * KX; @@ -2368,7 +2366,6 @@ __launch_bounds__(NUM_THREADS, 3) void paged_attention_ll4mi_QKV_mfma16_kernel( const int warpid = threadIdx.x / WARP_SIZE; const int laneid = threadIdx.x % WARP_SIZE; const int lane2id = laneid % 2; - const int lane4id = laneid % 4; const int lane16id = laneid % 16; const int rowid = laneid / 16; @@ -2514,7 +2511,6 @@ __launch_bounds__(NUM_THREADS, 3) void paged_attention_ll4mi_QKV_mfma16_kernel( const cache_t* k_ptr2 = k_ptr + kblock_number * kv_block_stride; const int klocal_token_idx = TOKENS_PER_WARP * warpid + token_depth * 16 + lane16id; - const int kglobal_token_idx = partition_start_token_idx + klocal_token_idx; const int kphysical_block_offset = klocal_token_idx % BLOCK_SIZE; const cache_t* k_ptr3 = k_ptr2 + kphysical_block_offset * KX; From aafabaa0d5c87c283b366f81fdce55cf91ae980c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luka=20Govedi=C4=8D?= Date: Fri, 27 Jun 2025 11:00:42 -0400 Subject: [PATCH 177/211] [Fix][torch.compile] Enable custom ops by default when Inductor off (#20102) Signed-off-by: luka --- .../model_executor/test_enabled_custom_ops.py | 45 +++++++++++-------- vllm/config.py | 27 ++++------- vllm/model_executor/custom_op.py | 12 ++--- 3 files changed, 41 insertions(+), 43 deletions(-) diff --git a/tests/model_executor/test_enabled_custom_ops.py b/tests/model_executor/test_enabled_custom_ops.py index a94215ee397bf..140f00294765d 100644 --- a/tests/model_executor/test_enabled_custom_ops.py +++ b/tests/model_executor/test_enabled_custom_ops.py @@ -28,42 +28,49 @@ class Relu3(ReLUSquaredActivation): @pytest.mark.parametrize( - "env, torch_level, ops_enabled, default_on", + "env, torch_level, use_inductor, ops_enabled, default_on", [ # Default values based on compile level - ("", 0, [True] * 4, True), - ("", 1, [True] * 4, True), - ("", 2, [True] * 4, True), # All by default - ("", 3, [False] * 4, False), - ("", 4, [False] * 4, False), # None by default + # - All by default (no Inductor compilation) + ("", 0, False, [True] * 4, True), + ("", 1, True, [True] * 4, True), + ("", 2, False, [True] * 4, True), + # - None by default (with Inductor) + ("", 3, True, [False] * 4, False), + ("", 4, True, [False] * 4, False), + # - All by default (without Inductor) + ("", 3, False, [True] * 4, True), + ("", 4, False, [True] * 4, True), # Explicitly enabling/disabling # # Default: all # # All but SiluAndMul - ("+rms_norm,-silu_and_mul", 0, [1, 0, 1, 1], True), + ("+rms_norm,-silu_and_mul", 0, True, [1, 0, 1, 1], True), # Only ReLU3 - ("none,-rms_norm,+relu3", 0, [0, 0, 0, 1], False), + ("none,-rms_norm,+relu3", 1, False, [0, 0, 0, 1], False), # All but SiluAndMul - ("all,-silu_and_mul", 1, [1, 0, 1, 1], True), + ("all,-silu_and_mul", 2, True, [1, 0, 1, 1], True), # All but ReLU3 (even if ReLU2 is on) - ("-relu3,relu2", 1, [1, 1, 1, 0], True), - # GeluAndMul and SiluAndMul - ("none,-relu3,+gelu_and_mul,+silu_and_mul", 2, [0, 1, 1, 0], False), + ("-relu3,relu2", 3, False, [1, 1, 1, 0], True), + # RMSNorm and SiluAndMul + ("none,-relu3,+rms_norm,+silu_and_mul", 4, False, [1, 1, 0, 0], False), # All but RMSNorm - ("-rms_norm", 2, [0, 1, 1, 1], True), + ("-rms_norm", 3, False, [0, 1, 1, 1], True), # # Default: none # # Only ReLU3 - ("-silu_and_mul,+relu3", 3, [0, 0, 0, 1], False), + ("-silu_and_mul,+relu3", 3, True, [0, 0, 0, 1], False), # All but RMSNorm - ("all,-rms_norm", 4, [0, 1, 1, 1], True), + ("all,-rms_norm", 4, True, [0, 1, 1, 1], True), ]) -def test_enabled_ops(env: str, torch_level: int, ops_enabled: list[int], - default_on: bool): - vllm_config = VllmConfig(compilation_config=CompilationConfig( - level=torch_level, custom_ops=env.split(","))) +def test_enabled_ops(env: str, torch_level: int, use_inductor: bool, + ops_enabled: list[int], default_on: bool): + vllm_config = VllmConfig( + compilation_config=CompilationConfig(use_inductor=bool(use_inductor), + level=torch_level, + custom_ops=env.split(","))) with set_current_vllm_config(vllm_config): assert CustomOp.default_on() == default_on diff --git a/vllm/config.py b/vllm/config.py index 623ba3aaf1093..84aa14b7c8605 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -3994,7 +3994,8 @@ class CompilationConfig: - 'none,+op1,+op2' to enable only op1 and op2 By default, all custom ops are enabled when running without Inductor and - disabled when running with Inductor (compile_level >= Inductor).""" + disabled when running with Inductor: level>=PIECEWISE and use_inductor=True. + Inductor generates (fused) Triton kernels for disabled custom ops.""" splitting_ops: list[str] = field(default_factory=list) """A list of ops to split the full graph into subgraphs, used in piecewise compilation.""" @@ -4003,10 +4004,13 @@ class CompilationConfig: use_inductor: bool = True """Whether to use inductor compilation: - - False: inductor compilation is not used. graph runs in eager. - - True: inductor compilation is used. one graph for symbolic shape - is compiled. In addition, compile for compile_sizes, - using configurations in inductor_compile_config.""" + - False: inductor compilation is not used. graph runs in eager + (custom_ops enabled by default). + - True: inductor compilation is used (custom_ops disabled by default). + One graph for symbolic shape and one graph per size in compile_sizes + are compiled using configurations in inductor_compile_config. + + This setting is ignored if level 0 and \ diff --git a/vllm/model_executor/custom_op.py b/vllm/model_executor/custom_op.py index 1680b723d6a29..9c88721fb2782 100644 --- a/vllm/model_executor/custom_op.py +++ b/vllm/model_executor/custom_op.py @@ -141,16 +141,16 @@ class CustomOp(nn.Module): @staticmethod def default_on() -> bool: """ - On by default if level < CompilationLevel.PIECEWISE + On by default if PyTorch Inductor is not used. Specifying 'all' or 'none' in custom_op takes precedence. """ from vllm.config import CompilationLevel compilation_config = get_current_vllm_config().compilation_config - custom_ops = compilation_config.custom_ops - count_none = custom_ops.count("none") - count_all = custom_ops.count("all") - return compilation_config.level < CompilationLevel.PIECEWISE and \ - not count_none > 0 or count_all > 0 + default_on = (compilation_config.level < CompilationLevel.PIECEWISE + or not compilation_config.use_inductor) + count_none = compilation_config.custom_ops.count("none") + count_all = compilation_config.custom_ops.count("all") + return default_on and not count_none > 0 or count_all > 0 # Dictionary of all custom ops (classes, indexed by registered name). # To check if an op with a name is enabled, call .enabled() on the class. From c6c983053d1c430e9347797094c95bbed37bac2a Mon Sep 17 00:00:00 2001 From: bnellnm <49004751+bnellnm@users.noreply.github.com> Date: Fri, 27 Jun 2025 11:42:22 -0400 Subject: [PATCH 178/211] [Bugfix] Mark 'hidden_states' as mutable in moe_forward registration. (#20152) Signed-off-by: Bill Nell --- vllm/model_executor/layers/fused_moe/layer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 6fe95d32a10e7..672244385e52c 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -1743,7 +1743,8 @@ def moe_forward_fake(hidden_states: torch.Tensor, router_logits: torch.Tensor, direct_register_custom_op( op_name="moe_forward", op_func=moe_forward, - mutates_args=[], + mutates_args=["hidden_states"], fake_impl=moe_forward_fake, dispatch_key=current_platform.dispatch_key, + tags=(torch.Tag.needs_fixed_stride_order, ), ) From e8c3bd2cd164786e764f7e3436bbaa6cd00ae64a Mon Sep 17 00:00:00 2001 From: Tyler Michael Smith Date: Fri, 27 Jun 2025 12:01:28 -0400 Subject: [PATCH 179/211] [Bugfix] Fix some narrowing conversion warnings (#20141) Signed-off-by: Tyler Michael Smith --- csrc/attention/mla/cutlass_mla_kernels.cu | 2 +- csrc/mamba/causal_conv1d/causal_conv1d.cu | 8 ++------ csrc/mamba/mamba_ssm/selective_scan_fwd.cu | 4 +--- csrc/quantization/fp4/nvfp4_experts_quant.cu | 4 ++-- csrc/quantization/fp4/nvfp4_quant_kernels.cu | 2 +- csrc/quantization/fp4/nvfp4_scaled_mm_kernels.cu | 2 +- 6 files changed, 8 insertions(+), 14 deletions(-) diff --git a/csrc/attention/mla/cutlass_mla_kernels.cu b/csrc/attention/mla/cutlass_mla_kernels.cu index f4b6b19f4b232..9d05d910dd81f 100644 --- a/csrc/attention/mla/cutlass_mla_kernels.cu +++ b/csrc/attention/mla/cutlass_mla_kernels.cu @@ -207,7 +207,7 @@ void cutlass_mla_decode_sm100a(torch::Tensor const& out, "page_table must be a 32-bit integer tensor"); auto in_dtype = q_nope.dtype(); - at::cuda::CUDAGuard device_guard{(char)q_nope.get_device()}; + const at::cuda::OptionalCUDAGuard device_guard(device_of(q_nope)); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(q_nope.get_device()); if (in_dtype == at::ScalarType::Half) { diff --git a/csrc/mamba/causal_conv1d/causal_conv1d.cu b/csrc/mamba/causal_conv1d/causal_conv1d.cu index f62d08c17c6d8..c83d72751a55c 100644 --- a/csrc/mamba/causal_conv1d/causal_conv1d.cu +++ b/csrc/mamba/causal_conv1d/causal_conv1d.cu @@ -185,9 +185,7 @@ void causal_conv1d_fwd(const at::Tensor &x, const at::Tensor &weight, params.conv_states_ptr = nullptr; } - // Otherwise the kernel will be launched from cuda:0 device - // Cast to char to avoid compiler warning about narrowing - at::cuda::CUDAGuard device_guard{(char)x.get_device()}; + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); auto stream = at::cuda::getCurrentCUDAStream().stream(); DISPATCH_WTYPE_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_fwd", [&] { causal_conv1d_fwd_cuda(params, stream); @@ -278,9 +276,7 @@ void causal_conv1d_update(const at::Tensor &x, params.conv_state_indices_ptr = nullptr; } - // Otherwise the kernel will be launched from cuda:0 device - // Cast to char to avoid compiler warning about narrowing - at::cuda::CUDAGuard device_guard{(char)x.get_device()}; + const at::cuda::OptionalCUDAGuard device_guard(device_of(x)); auto stream = at::cuda::getCurrentCUDAStream().stream(); DISPATCH_WTYPE_ITYPE_FLOAT_AND_HALF_AND_BF16(x.scalar_type(), "causal_conv1d_update", [&] { causal_conv1d_update_cuda(params, stream); diff --git a/csrc/mamba/mamba_ssm/selective_scan_fwd.cu b/csrc/mamba/mamba_ssm/selective_scan_fwd.cu index 0c9df925bdbf6..785d316025eca 100644 --- a/csrc/mamba/mamba_ssm/selective_scan_fwd.cu +++ b/csrc/mamba/mamba_ssm/selective_scan_fwd.cu @@ -647,9 +647,7 @@ void selective_scan_fwd(const torch::Tensor &u, const torch::Tensor &delta, ); - // Otherwise the kernel will be launched from cuda:0 device - // Cast to char to avoid compiler warning about narrowing - at::cuda::CUDAGuard device_guard{(char)u.get_device()}; + const at::cuda::OptionalCUDAGuard device_guard(device_of(u)); auto stream = at::cuda::getCurrentCUDAStream().stream(); DISPATCH_WTYPE_ITYPE_FLOAT_AND_HALF_AND_BF16(u.scalar_type(), "selective_scan_fwd", [&] { selective_scan_fwd_cuda(params, stream); diff --git a/csrc/quantization/fp4/nvfp4_experts_quant.cu b/csrc/quantization/fp4/nvfp4_experts_quant.cu index b51033c9b72c9..190d66f318a83 100644 --- a/csrc/quantization/fp4/nvfp4_experts_quant.cu +++ b/csrc/quantization/fp4/nvfp4_experts_quant.cu @@ -561,7 +561,7 @@ void scaled_fp4_experts_quant_sm100a( TORCH_CHECK(output_scale.size(1) * 4 == padded_k); auto in_dtype = input.dtype(); - at::cuda::CUDAGuard device_guard{(char)input.get_device()}; + const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(input.get_device()); if (in_dtype == at::ScalarType::Half) { @@ -579,4 +579,4 @@ void scaled_fp4_experts_quant_sm100a( } else { TORCH_CHECK(false, "Expected input data type to be half or bfloat16"); } -} \ No newline at end of file +} diff --git a/csrc/quantization/fp4/nvfp4_quant_kernels.cu b/csrc/quantization/fp4/nvfp4_quant_kernels.cu index fef74111624f0..d32911357a953 100644 --- a/csrc/quantization/fp4/nvfp4_quant_kernels.cu +++ b/csrc/quantization/fp4/nvfp4_quant_kernels.cu @@ -347,7 +347,7 @@ void scaled_fp4_quant_sm100a(torch::Tensor const& output, auto input_sf_ptr = static_cast(input_sf.data_ptr()); auto sf_out = static_cast(output_sf.data_ptr()); auto output_ptr = static_cast(output.data_ptr()); - at::cuda::CUDAGuard device_guard{(char)input.get_device()}; + const at::cuda::OptionalCUDAGuard device_guard(device_of(input)); auto stream = at::cuda::getCurrentCUDAStream(input.get_device()); // We don't support e8m0 scales at this moment. diff --git a/csrc/quantization/fp4/nvfp4_scaled_mm_kernels.cu b/csrc/quantization/fp4/nvfp4_scaled_mm_kernels.cu index 97c0e0da7b1fb..7572a7eb3122d 100644 --- a/csrc/quantization/fp4/nvfp4_scaled_mm_kernels.cu +++ b/csrc/quantization/fp4/nvfp4_scaled_mm_kernels.cu @@ -267,7 +267,7 @@ void cutlass_scaled_fp4_mm_sm100a(torch::Tensor& D, torch::Tensor const& A, B_sf.sizes()[1], ")"); auto out_dtype = D.dtype(); - at::cuda::CUDAGuard device_guard{(char)A.get_device()}; + const at::cuda::OptionalCUDAGuard device_guard(device_of(A)); const cudaStream_t stream = at::cuda::getCurrentCUDAStream(A.get_device()); if (out_dtype == at::ScalarType::Half) { From 3c545c0c3b98ee642373a308197d750d0e449403 Mon Sep 17 00:00:00 2001 From: Fabien Dupont Date: Fri, 27 Jun 2025 18:04:39 +0200 Subject: [PATCH 180/211] [CI/Build] Allow hermetic builds (#18064) Signed-off-by: Fabien Dupont Signed-off-by: Tyler Michael Smith Signed-off-by: Fabien Dupont Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> Co-authored-by: Tyler Michael Smith Co-authored-by: Elias Levy Co-authored-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- docker/Dockerfile | 188 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 158 insertions(+), 30 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 8d4375470adf9..a71b052bfca25 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -6,30 +6,106 @@ # docs/assets/contributing/dockerfile-stages-dependency.png ARG CUDA_VERSION=12.8.1 +ARG PYTHON_VERSION=3.12 + +# By parameterizing the base images, we allow third-party to use their own +# base images. One use case is hermetic builds with base images stored in +# private registries that use a different repository naming conventions. +# +# Example: +# docker build --build-arg BUILD_BASE_IMAGE=registry.acme.org/mirror/nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 +ARG BUILD_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 +ARG FINAL_BASE_IMAGE=nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 + +# By parameterizing the Deadsnakes repository URL, we allow third-party to use +# their own mirror. When doing so, we don't benefit from the transparent +# installation of the GPG key of the PPA, as done by add-apt-repository, so we +# also need a URL for the GPG key. +ARG DEADSNAKES_MIRROR_URL +ARG DEADSNAKES_GPGKEY_URL + +# The PyPA get-pip.py script is a self contained script+zip file, that provides +# both the installer script and the pip base85-encoded zip archive. This allows +# bootstrapping pip in environment where a dsitribution package does not exist. +# +# By parameterizing the URL for get-pip.py installation script, we allow +# third-party to use their own copy of the script stored in a private mirror. +# We set the default value to the PyPA owned get-pip.py script. +# +# Reference: https://pip.pypa.io/en/stable/installation/#get-pip-py +ARG GET_PIP_URL="https://bootstrap.pypa.io/get-pip.py" + +# PIP supports fetching the packages from custom indexes, allowing third-party +# to host the packages in private mirrors. The PIP_INDEX_URL and +# PIP_EXTRA_INDEX_URL are standard PIP environment variables to override the +# default indexes. By letting them empty by default, PIP will use its default +# indexes if the build process doesn't override the indexes. +# +# Uv uses different variables. We set them by default to the same values as +# PIP, but they can be overridden. +ARG PIP_INDEX_URL +ARG PIP_EXTRA_INDEX_URL +ARG UV_INDEX_URL=${PIP_INDEX_URL} +ARG UV_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL} + +# PyTorch provides its own indexes for standard and nightly builds +ARG PYTORCH_CUDA_INDEX_BASE_URL=https://download.pytorch.org/whl +ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL=https://download.pytorch.org/whl/nightly + +# PIP supports multiple authentication schemes, including keyring +# By parameterizing the PIP_KEYRING_PROVIDER variable and setting it to +# disabled by default, we allow third-party to use keyring authentication for +# their private Python indexes, while not changing the default behavior which +# is no authentication. +# +# Reference: https://pip.pypa.io/en/stable/topics/authentication/#keyring-support +ARG PIP_KEYRING_PROVIDER=disabled +ARG UV_KEYRING_PROVIDER=${PIP_KEYRING_PROVIDER} + #################### BASE BUILD IMAGE #################### # prepare basic build environment -FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu20.04 AS base -ARG CUDA_VERSION=12.8.1 -ARG PYTHON_VERSION=3.12 +FROM ${BUILD_BASE_IMAGE} AS base +ARG CUDA_VERSION +ARG PYTHON_VERSION ARG TARGETPLATFORM ENV DEBIAN_FRONTEND=noninteractive +ARG DEADSNAKES_MIRROR_URL +ARG DEADSNAKES_GPGKEY_URL +ARG GET_PIP_URL + # Install Python and other dependencies RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ && echo 'tzdata tzdata/Zones/America select Los_Angeles' | debconf-set-selections \ && apt-get update -y \ && apt-get install -y ccache software-properties-common git curl sudo \ - && for i in 1 2 3; do \ - add-apt-repository -y ppa:deadsnakes/ppa && break || \ - { echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \ - done \ + && if [ ! -z ${DEADSNAKES_MIRROR_URL} ] ; then \ + if [ ! -z "${DEADSNAKES_GPGKEY_URL}" ] ; then \ + mkdir -p -m 0755 /etc/apt/keyrings ; \ + curl -L ${DEADSNAKES_GPGKEY_URL} | gpg --dearmor > /etc/apt/keyrings/deadsnakes.gpg ; \ + sudo chmod 644 /etc/apt/keyrings/deadsnakes.gpg ; \ + echo "deb [signed-by=/etc/apt/keyrings/deadsnakes.gpg] ${DEADSNAKES_MIRROR_URL} $(lsb_release -cs) main" > /etc/apt/sources.list.d/deadsnakes.list ; \ + fi ; \ + else \ + for i in 1 2 3; do \ + add-apt-repository -y ppa:deadsnakes/ppa && break || \ + { echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \ + done ; \ + fi \ && apt-get update -y \ && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv \ && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ - && curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION} \ + && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \ && python3 --version && python3 -m pip --version + +ARG PIP_INDEX_URL UV_INDEX_URL +ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL +ARG PYTORCH_CUDA_INDEX_BASE_URL +ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL +ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER + # Install uv for faster pip installs RUN --mount=type=cache,target=/root/.cache/uv \ python3 -m pip install uv @@ -63,15 +139,19 @@ WORKDIR /workspace # after this step RUN --mount=type=cache,target=/root/.cache/uv \ if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ - uv pip install --system --index-url https://download.pytorch.org/whl/nightly/cu128 "torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319"; \ - uv pip install --system --index-url https://download.pytorch.org/whl/nightly/cu128 --pre pytorch_triton==3.3.0+gitab727c40; \ + uv pip install --system \ + --index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \ + "torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319"; \ + uv pip install --system \ + --index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \ + --pre pytorch_triton==3.3.0+gitab727c40; \ fi COPY requirements/common.txt requirements/common.txt COPY requirements/cuda.txt requirements/cuda.txt RUN --mount=type=cache,target=/root/.cache/uv \ uv pip install --system -r requirements/cuda.txt \ - --extra-index-url https://download.pytorch.org/whl/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') + --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') # cuda arch list used by torch # can be useful for both `dev` and `test` @@ -88,6 +168,10 @@ ENV VLLM_FA_CMAKE_GPU_ARCHES=${vllm_fa_cmake_gpu_arches} FROM base AS build ARG TARGETPLATFORM +ARG PIP_INDEX_URL UV_INDEX_URL +ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL +ARG PYTORCH_CUDA_INDEX_BASE_URL + # install build dependencies COPY requirements/build.txt requirements/build.txt @@ -98,7 +182,7 @@ ENV UV_INDEX_STRATEGY="unsafe-best-match" RUN --mount=type=cache,target=/root/.cache/uv \ uv pip install --system -r requirements/build.txt \ - --extra-index-url https://download.pytorch.org/whl/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') + --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') COPY . . ARG GIT_REPO_CHECK=0 @@ -113,6 +197,8 @@ ARG nvcc_threads=8 ENV NVCC_THREADS=$nvcc_threads ARG USE_SCCACHE +ARG SCCACHE_DOWNLOAD_URL=https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz +ARG SCCACHE_ENDPOINT ARG SCCACHE_BUCKET_NAME=vllm-build-sccache ARG SCCACHE_REGION_NAME=us-west-2 ARG SCCACHE_S3_NO_CREDENTIALS=0 @@ -121,10 +207,11 @@ RUN --mount=type=cache,target=/root/.cache/uv \ --mount=type=bind,source=.git,target=.git \ if [ "$USE_SCCACHE" = "1" ]; then \ echo "Installing sccache..." \ - && curl -L -o sccache.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz \ + && curl -L -o sccache.tar.gz ${SCCACHE_DOWNLOAD_URL} \ && tar -xzf sccache.tar.gz \ && sudo mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/bin/sccache \ && rm -rf sccache.tar.gz sccache-v0.8.1-x86_64-unknown-linux-musl \ + && if [ ! -z ${SCCACHE_ENDPOINT} ] ; then export SCCACHE_ENDPOINT=${SCCACHE_ENDPOINT} ; fi \ && export SCCACHE_BUCKET=${SCCACHE_BUCKET_NAME} \ && export SCCACHE_REGION=${SCCACHE_REGION_NAME} \ && export SCCACHE_S3_NO_CREDENTIALS=${SCCACHE_S3_NO_CREDENTIALS} \ @@ -162,6 +249,10 @@ RUN if [ "$RUN_WHEEL_CHECK" = "true" ]; then \ #################### DEV IMAGE #################### FROM base as dev +ARG PIP_INDEX_URL UV_INDEX_URL +ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL +ARG PYTORCH_CUDA_INDEX_BASE_URL + # This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out # Reference: https://github.com/astral-sh/uv/pull/1694 ENV UV_HTTP_TIMEOUT=500 @@ -176,21 +267,25 @@ COPY requirements/test.txt requirements/test.txt COPY requirements/dev.txt requirements/dev.txt RUN --mount=type=cache,target=/root/.cache/uv \ uv pip install --system -r requirements/dev.txt \ - --extra-index-url https://download.pytorch.org/whl/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') + --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') #################### DEV IMAGE #################### #################### vLLM installation IMAGE #################### # image with vLLM installed # TODO: Restore to base image after FlashInfer AOT wheel fixed -FROM nvidia/cuda:${CUDA_VERSION}-devel-ubuntu22.04 AS vllm-base -ARG CUDA_VERSION=12.8.1 -ARG PYTHON_VERSION=3.12 +FROM ${FINAL_BASE_IMAGE} AS vllm-base +ARG CUDA_VERSION +ARG PYTHON_VERSION WORKDIR /vllm-workspace ENV DEBIAN_FRONTEND=noninteractive ARG TARGETPLATFORM SHELL ["/bin/bash", "-c"] +ARG DEADSNAKES_MIRROR_URL +ARG DEADSNAKES_GPGKEY_URL +ARG GET_PIP_URL + RUN PYTHON_VERSION_STR=$(echo ${PYTHON_VERSION} | sed 's/\.//g') && \ echo "export PYTHON_VERSION_STR=${PYTHON_VERSION_STR}" >> /etc/environment @@ -200,17 +295,33 @@ RUN echo 'tzdata tzdata/Areas select America' | debconf-set-selections \ && apt-get update -y \ && apt-get install -y ccache software-properties-common git curl wget sudo vim python3-pip \ && apt-get install -y ffmpeg libsm6 libxext6 libgl1 \ - && for i in 1 2 3; do \ - add-apt-repository -y ppa:deadsnakes/ppa && break || \ - { echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \ - done \ + && if [ ! -z ${DEADSNAKES_MIRROR_URL} ] ; then \ + if [ ! -z "${DEADSNAKES_GPGKEY_URL}" ] ; then \ + mkdir -p -m 0755 /etc/apt/keyrings ; \ + curl -L ${DEADSNAKES_GPGKEY_URL} | gpg --dearmor > /etc/apt/keyrings/deadsnakes.gpg ; \ + sudo chmod 644 /etc/apt/keyrings/deadsnakes.gpg ; \ + echo "deb [signed-by=/etc/apt/keyrings/deadsnakes.gpg] ${DEADSNAKES_MIRROR_URL} $(lsb_release -cs) main" > /etc/apt/sources.list.d/deadsnakes.list ; \ + fi ; \ + else \ + for i in 1 2 3; do \ + add-apt-repository -y ppa:deadsnakes/ppa && break || \ + { echo "Attempt $i failed, retrying in 5s..."; sleep 5; }; \ + done ; \ + fi \ && apt-get update -y \ && apt-get install -y python${PYTHON_VERSION} python${PYTHON_VERSION}-dev python${PYTHON_VERSION}-venv libibverbs-dev \ && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python${PYTHON_VERSION} 1 \ && update-alternatives --set python3 /usr/bin/python${PYTHON_VERSION} \ && ln -sf /usr/bin/python${PYTHON_VERSION}-config /usr/bin/python3-config \ - && curl -sS https://bootstrap.pypa.io/get-pip.py | python${PYTHON_VERSION} \ + && curl -sS ${GET_PIP_URL} | python${PYTHON_VERSION} \ && python3 --version && python3 -m pip --version + +ARG PIP_INDEX_URL UV_INDEX_URL +ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL +ARG PYTORCH_CUDA_INDEX_BASE_URL +ARG PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL +ARG PIP_KEYRING_PROVIDER UV_KEYRING_PROVIDER + # Install uv for faster pip installs RUN --mount=type=cache,target=/root/.cache/uv \ python3 -m pip install uv @@ -232,15 +343,19 @@ RUN ldconfig /usr/local/cuda-$(echo $CUDA_VERSION | cut -d. -f1,2)/compat/ # after this step RUN --mount=type=cache,target=/root/.cache/uv \ if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \ - uv pip install --system --index-url https://download.pytorch.org/whl/nightly/cu128 "torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319"; \ - uv pip install --system --index-url https://download.pytorch.org/whl/nightly/cu128 --pre pytorch_triton==3.3.0+gitab727c40; \ + uv pip install --system \ + --index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \ + "torch==2.8.0.dev20250318+cu128" "torchvision==0.22.0.dev20250319" ; \ + uv pip install --system \ + --index-url ${PYTORCH_CUDA_NIGHTLY_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') \ + --pre pytorch_triton==3.3.0+gitab727c40 ; \ fi # Install vllm wheel first, so that torch etc will be installed. RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist \ --mount=type=cache,target=/root/.cache/uv \ uv pip install --system dist/*.whl --verbose \ - --extra-index-url https://download.pytorch.org/whl/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') + --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') # If we need to build FlashInfer wheel before its release: # $ # Note we remove 7.0 from the arch list compared to the list below, since FlashInfer only supports sm75+ @@ -254,15 +369,20 @@ RUN --mount=type=bind,from=build,src=/workspace/dist,target=/vllm-workspace/dist # -rw-rw-r-- 1 mgoin mgoin 205M Jun 9 18:03 flashinfer_python-0.2.6.post1-cp39-abi3-linux_x86_64.whl # $ # upload the wheel to a public location, e.g. https://wheels.vllm.ai/flashinfer/v0.2.6.post1/flashinfer_python-0.2.6.post1-cp39-abi3-linux_x86_64.whl +# Allow specifying a version, Git revision or local .whl file +ARG FLASHINFER_CUDA128_INDEX_URL="https://download.pytorch.org/whl/cu128/flashinfer" +ARG FLASHINFER_CUDA128_WHEEL="flashinfer_python-0.2.6.post1%2Bcu128torch2.7-cp39-abi3-linux_x86_64.whl" +ARG FLASHINFER_GIT_REPO="https://github.com/flashinfer-ai/flashinfer.git" +ARG FLASHINFER_GIT_REF="v0.2.6.post1" RUN --mount=type=cache,target=/root/.cache/uv \ . /etc/environment && \ if [ "$TARGETPLATFORM" != "linux/arm64" ]; then \ # FlashInfer already has a wheel for PyTorch 2.7.0 and CUDA 12.8. This is enough for CI use if [[ "$CUDA_VERSION" == 12.8* ]]; then \ - uv pip install --system https://download.pytorch.org/whl/cu128/flashinfer/flashinfer_python-0.2.6.post1%2Bcu128torch2.7-cp39-abi3-linux_x86_64.whl; \ + uv pip install --system ${FLASHINFER_CUDA128_INDEX_URL}/${FLASHINFER_CUDA128_WHEEL} ; \ else \ export TORCH_CUDA_ARCH_LIST='7.5 8.0 8.9 9.0a 10.0a 12.0' && \ - git clone https://github.com/flashinfer-ai/flashinfer.git --single-branch --branch v0.2.6.post1 --recursive && \ + git clone ${FLASHINFER_GIT_REPO} --single-branch --branch ${FLASHINFER_GIT_REF} --recursive && \ # Needed to build AOT kernels (cd flashinfer && \ python3 -m flashinfer.aot && \ @@ -286,7 +406,7 @@ uv pip list COPY requirements/build.txt requirements/build.txt RUN --mount=type=cache,target=/root/.cache/uv \ uv pip install --system -r requirements/build.txt \ - --extra-index-url https://download.pytorch.org/whl/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') + --extra-index-url ${PYTORCH_CUDA_INDEX_BASE_URL}/cu$(echo $CUDA_VERSION | cut -d. -f1,2 | tr -d '.') #################### vLLM installation IMAGE #################### @@ -297,6 +417,11 @@ FROM vllm-base AS test ADD . /vllm-workspace/ +ARG PYTHON_VERSION + +ARG PIP_INDEX_URL UV_INDEX_URL +ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL + # This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out # Reference: https://github.com/astral-sh/uv/pull/1694 ENV UV_HTTP_TIMEOUT=500 @@ -307,7 +432,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \ uv pip install --system --no-build-isolation "git+https://github.com/state-spaces/mamba@v2.2.4" # install development dependencies (for testing) -RUN --mount=type=cache,target=/root/.cache/uv \ +RUN --mount=type=cache,target=/root/.cache/uv \ CUDA_MAJOR="${CUDA_VERSION%%.*}"; \ if [ "$CUDA_MAJOR" -ge 12 ]; then \ uv pip install --system -r requirements/dev.txt; \ @@ -323,7 +448,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \ ENV HF_HUB_ENABLE_HF_TRANSFER 1 # Copy in the v1 package for testing (it isn't distributed yet) -COPY vllm/v1 /usr/local/lib/python3.12/dist-packages/vllm/v1 +COPY vllm/v1 /usr/local/lib/python${PYTHON_VERSION}/dist-packages/vllm/v1 # doc requires source code # we hide them inside `test_docs/` , so that this source code @@ -340,6 +465,9 @@ RUN mv mkdocs.yaml test_docs/ FROM vllm-base AS vllm-openai-base ARG TARGETPLATFORM +ARG PIP_INDEX_URL UV_INDEX_URL +ARG PIP_EXTRA_INDEX_URL UV_EXTRA_INDEX_URL + # This timeout (in seconds) is necessary when installing some dependencies via uv since it's likely to time out # Reference: https://github.com/astral-sh/uv/pull/1694 ENV UV_HTTP_TIMEOUT=500 From c329ceca6dd7263a65c7913a14de943266a38088 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Sat, 28 Jun 2025 14:43:06 +0900 Subject: [PATCH 181/211] [CI Fix] Pin tests/models/registry.py MiniMaxText01ForCausalLM to revision due to model changes (#20199) Signed-off-by: mgoin --- tests/models/registry.py | 9 ++++++++- tests/models/test_initialization.py | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/models/registry.py b/tests/models/registry.py index 1bcb4f88a30ff..72e361e2637fd 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -70,6 +70,12 @@ class _HfExamplesInfo: length that is too large to fit into memory in CI. """ + revision: Optional[str] = None + """ + The specific revision (commit hash, tag, or branch) to use for the model. + If not specified, the default revision will be used. + """ + def check_transformers_version( self, *, @@ -207,7 +213,8 @@ _TEXT_GENERATION_EXAMPLE_MODELS = { "MiniCPM3ForCausalLM": _HfExamplesInfo("openbmb/MiniCPM3-4B", trust_remote_code=True), "MiniMaxText01ForCausalLM": _HfExamplesInfo("MiniMaxAI/MiniMax-Text-01", - trust_remote_code=True), + trust_remote_code=True, + revision="a59aa9cbc53b9fb8742ca4e9e1531b9802b6fdc3"), # noqa: E501 "MiniMaxM1ForCausalLM": _HfExamplesInfo("MiniMaxAI/MiniMax-M1-40k", trust_remote_code=True), "MistralForCausalLM": _HfExamplesInfo("mistralai/Mistral-7B-Instruct-v0.1"), diff --git a/tests/models/test_initialization.py b/tests/models/test_initialization.py index e56bc925c9c40..df72607767fdd 100644 --- a/tests/models/test_initialization.py +++ b/tests/models/test_initialization.py @@ -88,6 +88,7 @@ def test_can_initialize(model_arch: str, monkeypatch: pytest.MonkeyPatch): model_info.default, tokenizer=model_info.tokenizer, tokenizer_mode=model_info.tokenizer_mode, + revision=model_info.revision, speculative_config={ "model": model_info.speculative_model, "num_speculative_tokens": 1, From e53be6f00ab37c58d767e44d8ceef26e9defc743 Mon Sep 17 00:00:00 2001 From: Chales Xu <111160781+SHA-4096@users.noreply.github.com> Date: Sat, 28 Jun 2025 13:47:36 +0800 Subject: [PATCH 182/211] [Misc] Add type assertion of request_id for LLMEngine.add_request (#19700) Signed-off-by: n2ptr --- tests/mq_llm_engine/test_error_handling.py | 10 +++++----- vllm/engine/llm_engine.py | 4 ++++ vllm/v1/engine/llm_engine.py | 5 +++++ 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/tests/mq_llm_engine/test_error_handling.py b/tests/mq_llm_engine/test_error_handling.py index 49b02279d61bb..3feee01dadf73 100644 --- a/tests/mq_llm_engine/test_error_handling.py +++ b/tests/mq_llm_engine/test_error_handling.py @@ -66,7 +66,7 @@ async def test_evil_forward(tmp_socket): with pytest.raises(MQEngineDeadError): async for _ in client.generate(prompt="Hello my name is", sampling_params=SamplingParams(), - request_id=uuid.uuid4()): + request_id=str(uuid.uuid4())): pass assert client.errored @@ -115,7 +115,7 @@ async def test_failed_health_check(tmp_socket): with pytest.raises(MQEngineDeadError): async for _ in client.generate(prompt="Hello my name is", sampling_params=SamplingParams(), - request_id=uuid.uuid4()): + request_id=str(uuid.uuid4())): pass client.close() @@ -157,7 +157,7 @@ async def test_failed_abort(tmp_socket): async for _ in client.generate( prompt="Hello my name is", sampling_params=SamplingParams(max_tokens=10), - request_id=uuid.uuid4()): + request_id=str(uuid.uuid4())): pass assert "KeyError" in repr(execinfo.value) assert client.errored @@ -189,7 +189,7 @@ async def test_batch_error(tmp_socket): params = SamplingParams(min_tokens=2048, max_tokens=2048) async for _ in client.generate(prompt="Hello my name is", sampling_params=params, - request_id=uuid.uuid4()): + request_id=str(uuid.uuid4())): pass tasks = [asyncio.create_task(do_generate(client)) for _ in range(10)] @@ -289,7 +289,7 @@ async def test_engine_process_death(tmp_socket): with pytest.raises(MQEngineDeadError): async for _ in client.generate(prompt="Hello my name is", sampling_params=SamplingParams(), - request_id=uuid.uuid4()): + request_id=str(uuid.uuid4())): pass # And the health check should show the engine is dead diff --git a/vllm/engine/llm_engine.py b/vllm/engine/llm_engine.py index 8fccf9bd2aa00..25fa1c3058bef 100644 --- a/vllm/engine/llm_engine.py +++ b/vllm/engine/llm_engine.py @@ -687,6 +687,10 @@ class LLMEngine: >>> # continue the request processing >>> ... """ + if not isinstance(request_id, str): + raise TypeError( + f"request_id must be a string, got {type(request_id)}") + if lora_request is not None and not self.lora_config: raise ValueError(f"Got lora_request {lora_request} but LoRA is " "not enabled!") diff --git a/vllm/v1/engine/llm_engine.py b/vllm/v1/engine/llm_engine.py index 25fab27131142..a2328c37ba0c5 100644 --- a/vllm/v1/engine/llm_engine.py +++ b/vllm/v1/engine/llm_engine.py @@ -192,6 +192,11 @@ class LLMEngine: prompt_adapter_request: Optional[PromptAdapterRequest] = None, priority: int = 0, ) -> None: + # Validate the request_id type. + if not isinstance(request_id, str): + raise TypeError( + f"request_id must be a string, got {type(request_id)}") + # Process raw inputs into the request. prompt_str, request = self.processor.process_inputs( request_id, prompt, params, arrival_time, lora_request, From a29e62ea3452bc6b1d4f3eeac2dc9a6b30357c4d Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Sat, 28 Jun 2025 14:48:13 +0900 Subject: [PATCH 183/211] Fix num_token_padding support for static per-tensor scaled_fp8_quant (#20188) Signed-off-by: mgoin --- vllm/_custom_ops.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vllm/_custom_ops.py b/vllm/_custom_ops.py index 215f35bad34d9..51900de1cc099 100644 --- a/vllm/_custom_ops.py +++ b/vllm/_custom_ops.py @@ -1274,8 +1274,7 @@ def scaled_fp8_quant( scale = torch.zeros(1, device=input.device, dtype=torch.float32) torch.ops._C.dynamic_scaled_fp8_quant(output, input, scale) else: - # num_token_padding not implemented for this case - assert (scale.numel() == 1 and num_token_padding is None) + assert scale.numel() == 1 torch.ops._C.static_scaled_fp8_quant(output, input, scale) return output, scale From d45417b804aaf7f90c9ae70a32f8f07d6b371a8c Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Sat, 28 Jun 2025 01:50:00 -0400 Subject: [PATCH 184/211] fix ci issue distributed 4 gpu test (#20204) Signed-off-by: yewentao256 --- examples/offline_inference/data_parallel.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/examples/offline_inference/data_parallel.py b/examples/offline_inference/data_parallel.py index 3eccb4e11ab6f..dbf8ed58cc477 100644 --- a/examples/offline_inference/data_parallel.py +++ b/examples/offline_inference/data_parallel.py @@ -64,6 +64,18 @@ def parse_args(): parser.add_argument( "--trust-remote-code", action="store_true", help="Trust remote code." ) + parser.add_argument( + "--max-num-seqs", + type=int, + default=64, + help=("Maximum number of sequences to be processed in a single iteration."), + ) + parser.add_argument( + "--gpu-memory-utilization", + type=float, + default=0.8, + help=("Fraction of GPU memory vLLM is allowed to allocate (0.0, 1.0]."), + ) return parser.parse_args() @@ -77,6 +89,8 @@ def main( GPUs_per_dp_rank, enforce_eager, trust_remote_code, + max_num_seqs, + gpu_memory_utilization, ): os.environ["VLLM_DP_RANK"] = str(global_dp_rank) os.environ["VLLM_DP_RANK_LOCAL"] = str(local_dp_rank) @@ -127,6 +141,8 @@ def main( enforce_eager=enforce_eager, enable_expert_parallel=True, trust_remote_code=trust_remote_code, + max_num_seqs=max_num_seqs, + gpu_memory_utilization=gpu_memory_utilization, ) outputs = llm.generate(prompts, sampling_params) # Print the outputs. @@ -181,6 +197,8 @@ if __name__ == "__main__": tp_size, args.enforce_eager, args.trust_remote_code, + args.max_num_seqs, + args.gpu_memory_utilization, ), ) proc.start() From f719772281c76fee6a8641647aa40fbab8a0f3a4 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Sat, 28 Jun 2025 14:50:52 +0900 Subject: [PATCH 185/211] [Bugfix] Properly reject requests with empty list guided_choice (#20195) Signed-off-by: mgoin --- vllm/v1/engine/processor.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/vllm/v1/engine/processor.py b/vllm/v1/engine/processor.py index a0b170ba55ad7..7e7703df2cf10 100644 --- a/vllm/v1/engine/processor.py +++ b/vllm/v1/engine/processor.py @@ -173,6 +173,12 @@ class Processor: params.guided_decoding.backend = engine_level_backend # Request content validation + if (isinstance(params.guided_decoding.choice, list) + and not params.guided_decoding.choice): + # It is invalid for choice to be an empty list + raise ValueError(f"Choice '{params.guided_decoding.choice}' " + "cannot be an empty list") + if engine_level_backend.startswith("xgrammar"): # xgrammar with no fallback validate_xgrammar_grammar(params) From 7b460c25f987dfb6ca7cebc7d1e2c26989801674 Mon Sep 17 00:00:00 2001 From: Jiayi Yan <66017932+1195343015@users.noreply.github.com> Date: Sat, 28 Jun 2025 13:51:16 +0800 Subject: [PATCH 186/211] [BugFix] Fix the incorrect func name in the comments. (config.py) (#20185) --- vllm/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/config.py b/vllm/config.py index 84aa14b7c8605..57b9df2364775 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -1484,7 +1484,7 @@ class CacheConfig: sizes up to 32 are supported. On HPU devices, block size defaults to 128. This config has no static default. If left unspecified by the user, it will - be set in `Platform.check_and_update_configs()` based on the current + be set in `Platform.check_and_update_config()` based on the current platform.""" gpu_memory_utilization: float = 0.9 """The fraction of GPU memory to be used for the model executor, which can From 8615d9776fb066e3b11284bb9e6871e5d8820463 Mon Sep 17 00:00:00 2001 From: Thomas Parnell Date: Sat, 28 Jun 2025 08:00:25 +0200 Subject: [PATCH 187/211] [CI/Build] Add new CI job to validate Hybrid Models for every PR (#20147) Signed-off-by: Thomas Parnell --- .buildkite/test-pipeline.yaml | 13 ++++++++++++- pyproject.toml | 1 + tests/models/language/generation/test_hybrid.py | 3 +++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 7f1841b1c97cd..a13e2cb782182 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -536,6 +536,17 @@ steps: - pip freeze | grep -E 'torch' - pytest -v -s models/language -m core_model +- label: Language Models Test (Hybrid) # 35 min + mirror_hardwares: [amdexperimental] + torch_nightly: true + source_file_dependencies: + - vllm/ + - tests/models/language/generation + commands: + # Install causal-conv1d for plamo2 models here, as it is not compatible with pip-compile. + - pip install 'git+https://github.com/Dao-AILab/causal-conv1d@v1.5.0.post8' + - pytest -v -s models/language/generation -m hybrid_model + - label: Language Models Test (Extended Generation) # 1hr20min mirror_hardwares: [amdexperimental] optional: true @@ -545,7 +556,7 @@ steps: commands: # Install causal-conv1d for plamo2 models here, as it is not compatible with pip-compile. - pip install 'git+https://github.com/Dao-AILab/causal-conv1d@v1.5.0.post8' - - pytest -v -s models/language/generation -m 'not core_model' + - pytest -v -s models/language/generation -m '(not core_model) and (not hybrid_model)' - label: Language Models Test (Extended Pooling) # 36min mirror_hardwares: [amdexperimental] diff --git a/pyproject.toml b/pyproject.toml index e8c2403af064f..fb45572d265b7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -150,6 +150,7 @@ skip_gitignore = true markers = [ "skip_global_cleanup", "core_model: enable this model test in each PR instead of only nightly", + "hybrid_model: models that contain mamba layers (including pure SSM and hybrid architectures)", "cpu_model: enable this model test in CPU tests", "split: run this test as part of a split", "distributed: run this test only in distributed GPU tests", diff --git a/tests/models/language/generation/test_hybrid.py b/tests/models/language/generation/test_hybrid.py index 90c4cd968e7a2..b2348e6449339 100644 --- a/tests/models/language/generation/test_hybrid.py +++ b/tests/models/language/generation/test_hybrid.py @@ -9,6 +9,9 @@ from vllm.sampling_params import SamplingParams from ...utils import check_logprobs_close, check_outputs_equal +# Mark all tests as hybrid +pytestmark = pytest.mark.hybrid_model + # NOTE: The first model in each list is taken as the primary model, # meaning that it will be used in all tests in this file # The rest of the models will only be tested by test_models From daceac57c7d79c4736d64621610076b3a98b0209 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicol=C3=B2=20Lucchesi?= Date: Sat, 28 Jun 2025 17:15:26 +0200 Subject: [PATCH 188/211] [Frontend] Generalize `v1/audio/transcriptions` endpoint (#20179) Signed-off-by: NickLucche --- vllm/entrypoints/openai/speech_to_text.py | 142 +++------------------- vllm/model_executor/models/interfaces.py | 11 ++ vllm/model_executor/models/whisper.py | 129 ++++++++++++++++++++ 3 files changed, 154 insertions(+), 128 deletions(-) diff --git a/vllm/entrypoints/openai/speech_to_text.py b/vllm/entrypoints/openai/speech_to_text.py index b23cf6cab0979..6c16e53245314 100644 --- a/vllm/entrypoints/openai/speech_to_text.py +++ b/vllm/entrypoints/openai/speech_to_text.py @@ -24,6 +24,7 @@ from vllm.entrypoints.openai.serving_engine import (OpenAIServing, from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.inputs.data import PromptType from vllm.logger import init_logger +from vllm.model_executor.model_loader.utils import get_model_architecture from vllm.outputs import RequestOutput from vllm.transformers_utils.processor import cached_get_processor from vllm.utils import PlaceholderModule @@ -38,118 +39,10 @@ T = TypeVar("T", bound=SpeechToTextResponse) logger = init_logger(__name__) -# From https://platform.openai.com/docs/guides/speech-to-text/supported-languages -# TODO these configs should live somewhere with the model so we can support -# additional ones - -ISO639_1_SUPPORTED_LANGS = { - "af": "Afrikaans", - "ar": "Arabic", - "hy": "Armenian", - "az": "Azerbaijani", - "be": "Belarusian", - "bs": "Bosnian", - "bg": "Bulgarian", - "ca": "Catalan", - "zh": "Chinese", - "hr": "Croatian", - "cs": "Czech", - "da": "Danish", - "nl": "Dutch", - "en": "English", - "et": "Estonian", - "fi": "Finnish", - "fr": "French", - "gl": "Galician", - "de": "German", - "el": "Greek", - "he": "Hebrew", - "hi": "Hindi", - "hu": "Hungarian", - "is": "Icelandic", - "id": "Indonesian", - "it": "Italian", - "ja": "Japanese", - "kn": "Kannada", - "kk": "Kazakh", - "ko": "Korean", - "lv": "Latvian", - "lt": "Lithuanian", - "mk": "Macedonian", - "ms": "Malay", - "mr": "Marathi", - "mi": "Maori", - "ne": "Nepali", - "no": "Norwegian", - "fa": "Persian", - "pl": "Polish", - "pt": "Portuguese", - "ro": "Romanian", - "ru": "Russian", - "sr": "Serbian", - "sk": "Slovak", - "sl": "Slovenian", - "es": "Spanish", - "sw": "Swahili", - "sv": "Swedish", - "tl": "Tagalog", - "ta": "Tamil", - "th": "Thai", - "tr": "Turkish", - "uk": "Ukrainian", - "ur": "Urdu", - "vi": "Vietnamese", - "cy": "Welsh" -} -ISO639_1_OTHER_LANGS = { - "lo": "Lao", - "jw": "Javanese", - "tk": "Turkmen", - "yi": "Yiddish", - "so": "Somali", - "bn": "Bengali", - "nn": "Norwegian Nynorsk", - "si": "Sinhala", - "yo": "Yoruba", - "sa": "Sanskrit", - "mi": "Māori", - "fo": "Faroese", # codespell:ignore - "mt": "Maltese", - "tg": "Tajik", - "mg": "Malagasy", - "haw": "Hawaiian", - "km": "Khmer", - "br": "Breton", - "ps": "Pashto", - "ln": "Lingala", - "la": "Latin", - "ml": "Malayalam", - "sq": "Albanian", - "su": "Sundanese", - "eu": "Basque", - "ka": "Georgian", - "uz": "Uzbek", - "sn": "Shona", - "ht": "Haitian", - "as": "Assamese", - "mn": "Mongolian", - "te": "Telugu", - "pa": "Panjabi", - "tt": "Tatar", - "gu": "Gujarati", - "oc": "Occitan", - "ha": "Hausa", - "ba": "Bashkir", - "my": "Burmese", - "sd": "Sindhi", - "am": "Amharic", - "lb": "Luxembourgish", - "bo": "Tibetan" -} - # As per https://platform.openai.com/docs/guides/speech-to-text#overview. # TODO configurable MAX_AUDIO_CLIP_FILESIZE_MB = 25 +MAX_AUDIO_CLIP_SECONDS = 30 OVERLAP_CHUNK_SECOND = 1 MIN_ENERGY_WINDOW_SIZE = 1600 # 1600 ~ 100ms for 16000 Hz audio @@ -177,10 +70,13 @@ class OpenAISpeechToText(OpenAIServing): self.default_sampling_params = ( self.model_config.get_diff_sampling_param()) processor = cached_get_processor(model_config.model) - self.max_audio_clip_s = processor.feature_extractor.chunk_length + self.max_audio_clip_s = processor.feature_extractor.chunk_length \ + if hasattr(processor.feature_extractor, 'chunk_length') \ + else MAX_AUDIO_CLIP_SECONDS self.model_sr = processor.feature_extractor.sampling_rate self.hop_length = processor.feature_extractor.hop_length self.task_type = task_type + self.model_cls, _ = get_model_architecture(model_config) if self.default_sampling_params: logger.info( @@ -196,21 +92,8 @@ class OpenAISpeechToText(OpenAIServing): # TODO language should be optional and can be guessed. # For now we default to en. See # https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/generation_whisper.py#L1520 - lang_token = f"<|{request.language}|>" if request.language else "<|en|>" - if request.language: - if request.language in ISO639_1_SUPPORTED_LANGS: - pass - elif request.language in ISO639_1_OTHER_LANGS: - logger.warning( - "The selected language %s has limited accuracy with" - " reported WER>=0.5. Results may be less accurate " - "for this choice.", request.language) - else: - raise ValueError( - f"Unsupported language: {request.language}." - "Language should be one of:" + - f" {list(ISO639_1_SUPPORTED_LANGS.values())}" + - f"or {list(ISO639_1_OTHER_LANGS.values())}") + lang = request.language or "en" + self.model_cls.validate_language(lang) # type: ignore[attr-defined] if len(audio_data) / 1024**2 > MAX_AUDIO_CLIP_FILESIZE_MB: raise ValueError("Maximum file size exceeded.") @@ -221,7 +104,9 @@ class OpenAISpeechToText(OpenAIServing): y, sr = librosa.load(bytes_, sr=self.model_sr) duration = librosa.get_duration(y=y, sr=sr) - chunks = [y] if duration < 30 else self._split_audio(y, int(sr)) + chunks = [y + ] if duration < self.max_audio_clip_s else self._split_audio( + y, int(sr)) prompts = [] for chunk in chunks: prompt = { @@ -232,8 +117,9 @@ class OpenAISpeechToText(OpenAIServing): }, }, "decoder_prompt": - (f"<|startoftranscript|>{lang_token}" - f"<|{self.task_type}|><|notimestamps|>{request.prompt}") + self.model_cls. + get_decoder_prompt( # type: ignore[attr-defined] + lang, self.task_type, request.prompt) } prompts.append(cast(PromptType, prompt)) return prompts, duration diff --git a/vllm/model_executor/models/interfaces.py b/vllm/model_executor/models/interfaces.py index 3ea424e44b62e..ad59fe79edcb1 100644 --- a/vllm/model_executor/models/interfaces.py +++ b/vllm/model_executor/models/interfaces.py @@ -599,6 +599,17 @@ class SupportsTranscription(Protocol): supports_transcription: ClassVar[Literal[True]] = True + @classmethod + def get_decoder_prompt(cls, language: str, task_type: str, + prompt: str) -> str: + """Get the decoder prompt for the ASR model.""" + ... + + @classmethod + def validate_language(cls, language: str) -> bool: + """Check if the model supports a specific ISO639_1 language.""" + ... + @overload def supports_transcription( diff --git a/vllm/model_executor/models/whisper.py b/vllm/model_executor/models/whisper.py index 8cf2a009d6670..5a0094fa749fd 100644 --- a/vllm/model_executor/models/whisper.py +++ b/vllm/model_executor/models/whisper.py @@ -41,6 +41,113 @@ from .utils import (AutoWeightsLoader, WeightsMapper, cast_overflow_tensors, logger = init_logger(__name__) +# From https://platform.openai.com/docs/guides/speech-to-text/supported-languages + +ISO639_1_SUPPORTED_LANGS = { + "af": "Afrikaans", + "ar": "Arabic", + "hy": "Armenian", + "az": "Azerbaijani", + "be": "Belarusian", + "bs": "Bosnian", + "bg": "Bulgarian", + "ca": "Catalan", + "zh": "Chinese", + "hr": "Croatian", + "cs": "Czech", + "da": "Danish", + "nl": "Dutch", + "en": "English", + "et": "Estonian", + "fi": "Finnish", + "fr": "French", + "gl": "Galician", + "de": "German", + "el": "Greek", + "he": "Hebrew", + "hi": "Hindi", + "hu": "Hungarian", + "is": "Icelandic", + "id": "Indonesian", + "it": "Italian", + "ja": "Japanese", + "kn": "Kannada", + "kk": "Kazakh", + "ko": "Korean", + "lv": "Latvian", + "lt": "Lithuanian", + "mk": "Macedonian", + "ms": "Malay", + "mr": "Marathi", + "mi": "Maori", + "ne": "Nepali", + "no": "Norwegian", + "fa": "Persian", + "pl": "Polish", + "pt": "Portuguese", + "ro": "Romanian", + "ru": "Russian", + "sr": "Serbian", + "sk": "Slovak", + "sl": "Slovenian", + "es": "Spanish", + "sw": "Swahili", + "sv": "Swedish", + "tl": "Tagalog", + "ta": "Tamil", + "th": "Thai", + "tr": "Turkish", + "uk": "Ukrainian", + "ur": "Urdu", + "vi": "Vietnamese", + "cy": "Welsh" +} +ISO639_1_OTHER_LANGS = { + "lo": "Lao", + "jw": "Javanese", + "tk": "Turkmen", + "yi": "Yiddish", + "so": "Somali", + "bn": "Bengali", + "nn": "Norwegian Nynorsk", + "si": "Sinhala", + "yo": "Yoruba", + "sa": "Sanskrit", + "mi": "Māori", + "fo": "Faroese", # codespell:ignore + "mt": "Maltese", + "tg": "Tajik", + "mg": "Malagasy", + "haw": "Hawaiian", + "km": "Khmer", + "br": "Breton", + "ps": "Pashto", + "ln": "Lingala", + "la": "Latin", + "ml": "Malayalam", + "sq": "Albanian", + "su": "Sundanese", + "eu": "Basque", + "ka": "Georgian", + "uz": "Uzbek", + "sn": "Shona", + "ht": "Haitian", + "as": "Assamese", + "mn": "Mongolian", + "te": "Telugu", + "pa": "Panjabi", + "tt": "Tatar", + "gu": "Gujarati", + "oc": "Occitan", + "ha": "Hausa", + "ba": "Bashkir", + "my": "Burmese", + "sd": "Sindhi", + "am": "Amharic", + "lb": "Luxembourgish", + "bo": "Tibetan" +} + class WhisperAudioInputs(TypedDict): input_features: NestedTensors @@ -731,6 +838,28 @@ class WhisperForConditionalGeneration(nn.Module, SupportsTranscription, weights = _create_fake_bias_for_k_proj(weights) return loader.load_weights(weights, mapper=self.hf_to_vllm_mapper) + @classmethod + def validate_language(cls, language: str) -> bool: + if language in ISO639_1_SUPPORTED_LANGS: + return True + elif language in ISO639_1_OTHER_LANGS: + logger.warning( + "The selected language %s has limited accuracy with" + " reported WER>=0.5. Results may be less accurate " + "for this choice.", language) + return True + else: + raise ValueError(f"Unsupported language: {language}." + "Language should be one of:" + + f" {list(ISO639_1_SUPPORTED_LANGS.values())}" + + f"or {list(ISO639_1_OTHER_LANGS.values())}") + + @classmethod + def get_decoder_prompt(cls, language: str, task_type: str, + prompt: str) -> str: + return (f"<|startoftranscript|><|{language}|><|{task_type}|>" + f"<|notimestamps|>{prompt}") + def _create_fake_bias_for_k_proj( weights: Iterable[tuple[str, torch.Tensor]] From daec9dea6e9bb95cb12faa38e347196de4f672a0 Mon Sep 17 00:00:00 2001 From: Stan Wozniak <77159600+s3woz@users.noreply.github.com> Date: Sat, 28 Jun 2025 17:16:41 +0200 Subject: [PATCH 189/211] [Bugfix] Correct behavior of GraniteMoeHybrid for TensorParallel execution (#20137) Signed-off-by: Stanislaw Wozniak --- .../generation/test_granitemoehybrid.py | 42 ------- .../models/language/generation/test_hybrid.py | 5 +- .../model_executor/models/granitemoehybrid.py | 104 ++++++++++++------ 3 files changed, 73 insertions(+), 78 deletions(-) delete mode 100644 tests/models/language/generation/test_granitemoehybrid.py diff --git a/tests/models/language/generation/test_granitemoehybrid.py b/tests/models/language/generation/test_granitemoehybrid.py deleted file mode 100644 index 952449f284159..0000000000000 --- a/tests/models/language/generation/test_granitemoehybrid.py +++ /dev/null @@ -1,42 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# SPDX-FileCopyrightText: Copyright contributors to the vLLM project - -import pytest - -from ...utils import check_logprobs_close - -# Path of the checkpoints -MODELS = [ - "ibm-granite/granite-4.0-tiny-preview", -] - - -@pytest.mark.skip( - reason="Granite 4.0 is not yet available in huggingface transformers") -@pytest.mark.parametrize("model", MODELS) -@pytest.mark.parametrize("dtype", ["float16", "bfloat16"]) -@pytest.mark.parametrize("max_tokens", [64]) -@pytest.mark.parametrize("num_logprobs", [5]) -def test_model_equivalence_to_hf_greedy( - hf_runner, - vllm_runner, - example_prompts, - model: str, - dtype: str, - max_tokens: int, - num_logprobs: int, -): - with vllm_runner(model, dtype=dtype) as vllm_model: - vllm_outputs = vllm_model.generate_greedy_logprobs( - example_prompts, max_tokens, num_logprobs) - - with hf_runner(model, dtype=dtype) as hf_model: - hf_outputs = hf_model.generate_greedy_logprobs_limit( - example_prompts, max_tokens, num_logprobs) - - check_logprobs_close( - outputs_0_lst=hf_outputs, - outputs_1_lst=vllm_outputs, - name_0="hf", - name_1="vllm", - ) diff --git a/tests/models/language/generation/test_hybrid.py b/tests/models/language/generation/test_hybrid.py index b2348e6449339..e6dd6c35e64d6 100644 --- a/tests/models/language/generation/test_hybrid.py +++ b/tests/models/language/generation/test_hybrid.py @@ -28,8 +28,9 @@ SSM_MODELS = [ HYBRID_MODELS = [ "ai21labs/Jamba-tiny-dev", - # NOTE: ibm-granite/granite-4.0-tiny-preview are skipped currently as - # it is not yet available in huggingface transformers + # NOTE: Currently the test failes due to HF transformers issue fixed in: + # https://github.com/huggingface/transformers/pull/39033 + # We will enable vLLM test for Granite after next HF transformers release. # "ibm-granite/granite-4.0-tiny-preview", # NOTE: Running Plamo2 in transformers implementation requires to install # causal-conv1d package, which is not listed as a test dependency as it's diff --git a/vllm/model_executor/models/granitemoehybrid.py b/vllm/model_executor/models/granitemoehybrid.py index 26b5b3ac15345..33e8626209d50 100644 --- a/vllm/model_executor/models/granitemoehybrid.py +++ b/vllm/model_executor/models/granitemoehybrid.py @@ -15,7 +15,8 @@ from vllm.distributed import divide, get_tensor_model_parallel_world_size from vllm.distributed.parallel_state import get_pp_group from vllm.forward_context import get_forward_context from vllm.model_executor.layers.layernorm import RMSNorm -from vllm.model_executor.layers.linear import ReplicatedLinear +from vllm.model_executor.layers.linear import (QKVParallelLinear, + RowParallelLinear) from vllm.model_executor.layers.logits_processor import LogitsProcessor from vllm.model_executor.layers.mamba.mamba2_metadata import ( Mamba2Metadata, prepare_mamba2_metadata) @@ -36,8 +37,9 @@ from .granitemoe import GraniteMoeMoE from .granitemoeshared import GraniteMoeSharedMLP from .interfaces import (HasInnerState, IsHybrid, SupportsLoRA, SupportsPP, SupportsQuant, SupportsV0Only) -from .utils import (AutoWeightsLoader, make_empty_intermediate_tensors_factory, - make_layers, maybe_prefix) +from .utils import (AutoWeightsLoader, is_pp_missing_parameter, + make_empty_intermediate_tensors_factory, make_layers, + maybe_prefix) class GraniteMoeHybridMambaDecoderLayer(nn.Module): @@ -220,35 +222,37 @@ class GraniteMoeHybridAttention(nn.Module): self.hidden_size = config.hidden_size self.attention_bias = config.attention_bias self.attention_multiplier = config.attention_multiplier - self.num_heads = config.num_attention_heads - self.head_dim = self.hidden_size // self.num_heads - self.num_key_value_heads = config.num_key_value_heads + self.total_num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.total_num_heads + self.total_num_kv_heads = config.num_key_value_heads - self.q_proj = ReplicatedLinear(self.hidden_size, - self.num_heads * self.head_dim, - bias=self.attention_bias, - quant_config=quant_config, - prefix=f"{prefix}.q_proj") + # TensorParallel logic + tp_size = get_tensor_model_parallel_world_size() + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_key_value_heads = max(1, self.total_num_kv_heads // tp_size) - self.k_proj = ReplicatedLinear(self.hidden_size, - self.num_key_value_heads * - self.head_dim, - bias=self.attention_bias, - quant_config=quant_config, - prefix=f"{prefix}.k_proj") + self.qkv_proj = QKVParallelLinear(self.hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=self.attention_bias, + quant_config=quant_config, + prefix=f"{prefix}.qkv_proj") - self.v_proj = ReplicatedLinear(self.hidden_size, - self.num_key_value_heads * - self.head_dim, - bias=self.attention_bias, - quant_config=quant_config, - prefix=f"{prefix}.v_proj") - - self.o_proj = ReplicatedLinear(self.hidden_size, - self.hidden_size, - bias=self.attention_bias, - quant_config=quant_config, - prefix=f"{prefix}.o_proj") + self.o_proj = RowParallelLinear(self.hidden_size, + self.hidden_size, + bias=self.attention_bias, + quant_config=quant_config, + prefix=f"{prefix}.o_proj") if config.position_embedding_type == "rope": self.rotary_emb = get_rope( @@ -278,9 +282,12 @@ class GraniteMoeHybridAttention(nn.Module): hidden_states: torch.Tensor, ) -> torch.Tensor: - query = self.q_proj(hidden_states)[0] - key = self.k_proj(hidden_states)[0] - value = self.v_proj(hidden_states)[0] + qkv, _ = self.qkv_proj(hidden_states) + query, key, value = qkv.split([ + self.num_heads * self.head_dim, self.num_key_value_heads * + self.head_dim, self.num_key_value_heads * self.head_dim + ], + dim=-1) if self.rotary_emb is not None: query, key = self.rotary_emb(positions, query, key) @@ -401,6 +408,12 @@ class GraniteMoeHybridModel(nn.Module): def load_weights(self, weights: Iterable[tuple[str, torch.Tensor]]) -> set[str]: + stacked_params_mapping = [ + # (param_name, shard_name, shard_id) + (".qkv_proj", ".q_proj", "q"), + (".qkv_proj", ".k_proj", "k"), + (".qkv_proj", ".v_proj", "v"), + ] params_dict = dict(self.named_parameters()) loaded_params: set[str] = set() @@ -411,6 +424,15 @@ class GraniteMoeHybridModel(nn.Module): weight_loader(param, p) loaded_params.add(n) + def _load_shard(n, p, shard_id): + # Skip layers on other devices. + if not is_pp_missing_parameter(n, self): + param = params_dict[n] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, p, shard_id) + loaded_params.add(n) + def _load_expert(n, p, name, shard_id, expert_id): param = params_dict[n] weight_loader = getattr(param, "weight_loader", @@ -465,7 +487,15 @@ class GraniteMoeHybridModel(nn.Module): ".block_sparse_moe.gate.weight") _load(gate_name, p) else: - _load(n, p) + loaded = False + for param_name, weight_name, shard_id in stacked_params_mapping: + if weight_name in n: + _load_shard(n.replace(weight_name, param_name), + p, + shard_id=shard_id) + loaded = True + if not loaded: + _load(n, p) return loaded_params @@ -473,7 +503,13 @@ class GraniteMoeHybridModel(nn.Module): class GraniteMoeHybridForCausalLM(nn.Module, HasInnerState, SupportsLoRA, SupportsPP, IsHybrid, SupportsV0Only, SupportsQuant): - packed_modules_mapping = {} + packed_modules_mapping = { + "qkv_proj": [ + "q_proj", + "k_proj", + "v_proj", + ], + } embedding_modules = { "embed_tokens": "input_embeddings", "lm_head": "output_embeddings", From 4d366936875330526908185ac93ed1e0e0eb7f40 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Sat, 28 Jun 2025 18:06:38 -0400 Subject: [PATCH 190/211] [Refactor] Create a function util and cache the results for `has_deepgemm`, `has_deepep`, `has_pplx` (#20187) Signed-off-by: yewentao256 --- tests/kernels/moe/test_deepep_deepgemm_moe.py | 20 +++++-------- tests/kernels/moe/test_deepep_moe.py | 8 ++---- .../device_communicators/all2all.py | 10 +++---- .../layers/fused_moe/batched_deep_gemm_moe.py | 3 -- .../layers/fused_moe/deep_gemm_moe.py | 12 ++++---- vllm/model_executor/layers/fused_moe/layer.py | 10 ++----- vllm/model_executor/layers/fused_moe/utils.py | 2 +- .../compressed_tensors_moe.py | 8 ++---- .../layers/quantization/deepgemm.py | 6 ++-- .../model_executor/layers/quantization/fp8.py | 6 ++-- .../layers/quantization/utils/fp8_utils.py | 6 ++-- vllm/utils.py | 28 +++++++++++++++++++ 12 files changed, 61 insertions(+), 58 deletions(-) diff --git a/tests/kernels/moe/test_deepep_deepgemm_moe.py b/tests/kernels/moe/test_deepep_deepgemm_moe.py index f580dee4c9285..475427f439289 100644 --- a/tests/kernels/moe/test_deepep_deepgemm_moe.py +++ b/tests/kernels/moe/test_deepep_deepgemm_moe.py @@ -6,7 +6,6 @@ fp8 block-quantized case. """ import dataclasses -import importlib from typing import Optional import pytest @@ -21,18 +20,11 @@ from vllm.model_executor.layers.fused_moe.modular_kernel import ( from vllm.model_executor.layers.quantization.utils.fp8_utils import ( per_token_group_quant_fp8) from vllm.platforms import current_platform +from vllm.utils import has_deep_ep, has_deep_gemm from .utils import ProcessGroupInfo, parallel_launch -has_deep_ep = importlib.util.find_spec("deep_ep") is not None - -try: - import deep_gemm - has_deep_gemm = True -except ImportError: - has_deep_gemm = False - -if has_deep_ep: +if has_deep_ep(): from vllm.model_executor.layers.fused_moe.deepep_ht_prepare_finalize import ( # noqa: E501 DeepEPHTPrepareAndFinalize) from vllm.model_executor.layers.fused_moe.deepep_ll_prepare_finalize import ( # noqa: E501 @@ -40,19 +32,21 @@ if has_deep_ep: from .deepep_utils import DeepEPHTArgs, DeepEPLLArgs, make_deepep_a2a -if has_deep_gemm: +if has_deep_gemm(): + import deep_gemm + from vllm.model_executor.layers.fused_moe.batched_deep_gemm_moe import ( BatchedDeepGemmExperts) from vllm.model_executor.layers.fused_moe.deep_gemm_moe import ( DeepGemmExperts) requires_deep_ep = pytest.mark.skipif( - not has_deep_ep, + not has_deep_ep(), reason="Requires deep_ep kernels", ) requires_deep_gemm = pytest.mark.skipif( - not has_deep_gemm, + not has_deep_gemm(), reason="Requires deep_gemm kernels", ) diff --git a/tests/kernels/moe/test_deepep_moe.py b/tests/kernels/moe/test_deepep_moe.py index 380eb43c42a40..80a36dc39712a 100644 --- a/tests/kernels/moe/test_deepep_moe.py +++ b/tests/kernels/moe/test_deepep_moe.py @@ -4,7 +4,6 @@ Test deepep dispatch-combine logic """ import dataclasses -import importlib from typing import Optional, Union import pytest @@ -22,12 +21,11 @@ from vllm.model_executor.layers.fused_moe.modular_kernel import ( from vllm.model_executor.layers.quantization.utils.fp8_utils import ( per_token_group_quant_fp8) from vllm.platforms import current_platform +from vllm.utils import has_deep_ep from .utils import ProcessGroupInfo, parallel_launch -has_deep_ep = importlib.util.find_spec("deep_ep") is not None - -if has_deep_ep: +if has_deep_ep(): from vllm.model_executor.layers.fused_moe.deepep_ht_prepare_finalize import ( # noqa: E501 DeepEPHTPrepareAndFinalize) from vllm.model_executor.layers.fused_moe.deepep_ll_prepare_finalize import ( # noqa: E501 @@ -36,7 +34,7 @@ if has_deep_ep: from .deepep_utils import DeepEPHTArgs, DeepEPLLArgs, make_deepep_a2a requires_deep_ep = pytest.mark.skipif( - not has_deep_ep, + not has_deep_ep(), reason="Requires deep_ep kernels", ) diff --git a/vllm/distributed/device_communicators/all2all.py b/vllm/distributed/device_communicators/all2all.py index 35f2fd0ba9e22..85f87cb21edcd 100644 --- a/vllm/distributed/device_communicators/all2all.py +++ b/vllm/distributed/device_communicators/all2all.py @@ -1,6 +1,5 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -import importlib.util from typing import TYPE_CHECKING, Any import torch @@ -8,6 +7,7 @@ import torch.distributed as dist from vllm.forward_context import get_forward_context from vllm.logger import init_logger +from vllm.utils import has_deep_ep, has_pplx from .base_device_communicator import All2AllManagerBase, Cache @@ -80,8 +80,8 @@ class PPLXAll2AllManager(All2AllManagerBase): """ def __init__(self, cpu_group): - has_pplx = importlib.util.find_spec("pplx_kernels") is not None - assert has_pplx, "pplx_kernels not found. Please follow https://github.com/vllm-project/vllm/blob/main/tools/ep_kernels/README.md to install pplx_kernels." # noqa + assert has_pplx( + ), "pplx_kernels not found. Please follow https://github.com/vllm-project/vllm/blob/main/tools/ep_kernels/README.md to install pplx_kernels." # noqa super().__init__(cpu_group) if self.internode: @@ -133,8 +133,8 @@ class DeepEPAll2AllManagerBase(All2AllManagerBase): """ def __init__(self, cpu_group): - has_deepep = importlib.util.find_spec("deep_ep") is not None - assert has_deepep, "DeepEP kernels not found. Please follow https://github.com/vllm-project/vllm/blob/main/tools/ep_kernels/README.md to install DeepEP kernels." # noqa + assert has_deep_ep( + ), "DeepEP kernels not found. Please follow https://github.com/vllm-project/vllm/blob/main/tools/ep_kernels/README.md to install DeepEP kernels." # noqa super().__init__(cpu_group) self.handle_cache = Cache() diff --git a/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py b/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py index 70836879d17c0..b54ac80535a42 100644 --- a/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py +++ b/vllm/model_executor/layers/fused_moe/batched_deep_gemm_moe.py @@ -1,5 +1,4 @@ # SPDX-License-Identifier: Apache-2.0 -import importlib.util from typing import Optional import torch @@ -11,8 +10,6 @@ from vllm.triton_utils import tl, triton logger = init_logger(__name__) -has_deep_gemm = importlib.util.find_spec("deep_gemm") is not None - @triton.jit def _silu_mul_fp8_quant_deep_gemm( diff --git a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py index 050d9520ca013..321fb0351ad93 100644 --- a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +++ b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import functools -import importlib.util from typing import Optional import torch @@ -12,14 +11,13 @@ from vllm.model_executor.layers.fused_moe.moe_permute_unpermute import ( _moe_permute) from vllm.model_executor.layers.fused_moe.prepare_finalize import ( MoEPrepareAndFinalizeNoEP) -from vllm.model_executor.layers.fused_moe.utils import ( - _resize_cache, per_token_group_quant_fp8) -from vllm.utils import round_up +from vllm.model_executor.layers.fused_moe.utils import _resize_cache +from vllm.model_executor.layers.quantization.utils.fp8_utils import ( + per_token_group_quant_fp8) +from vllm.utils import has_deep_gemm, round_up logger = init_logger(__name__) -has_deep_gemm = importlib.util.find_spec("deep_gemm") is not None - @functools.cache def deep_gemm_block_shape() -> list[int]: @@ -41,7 +39,7 @@ def _valid_deep_gemm(hidden_states: torch.Tensor, w1: torch.Tensor, gemm kernel. All of M, N, K and the quantization block_shape must be aligned by `dg.get_m_alignment_for_contiguous_layout()`. """ - if not has_deep_gemm: + if not has_deep_gemm(): logger.debug("DeepGemm disabled: deep_gemm not available.") return False diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 672244385e52c..5408ef1f75e89 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -1,7 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -import importlib from abc import abstractmethod from collections.abc import Iterable from dataclasses import dataclass @@ -32,10 +31,7 @@ from vllm.model_executor.layers.quantization.base_config import ( from vllm.model_executor.utils import set_weight_attrs from vllm.platforms import current_platform from vllm.platforms.interface import CpuArchEnum -from vllm.utils import direct_register_custom_op - -has_pplx = importlib.util.find_spec("pplx_kernels") is not None -has_deepep = importlib.util.find_spec("deep_ep") is not None +from vllm.utils import direct_register_custom_op, has_deep_ep, has_pplx if current_platform.is_cuda_alike(): from .fused_batched_moe import BatchedTritonExperts @@ -43,9 +39,9 @@ if current_platform.is_cuda_alike(): from .modular_kernel import (FusedMoEModularKernel, FusedMoEPermuteExpertsUnpermute, FusedMoEPrepareAndFinalize) - if has_pplx: + if has_pplx(): from .pplx_prepare_finalize import PplxPrepareAndFinalize - if has_deepep: + if has_deep_ep(): from .deepep_ht_prepare_finalize import DeepEPHTPrepareAndFinalize from .deepep_ll_prepare_finalize import (DEEPEP_QUANT_BLOCK_SIZE, DeepEPLLPrepareAndFinalize) diff --git a/vllm/model_executor/layers/fused_moe/utils.py b/vllm/model_executor/layers/fused_moe/utils.py index 8f3191db680fd..4c91e697f8e97 100644 --- a/vllm/model_executor/layers/fused_moe/utils.py +++ b/vllm/model_executor/layers/fused_moe/utils.py @@ -104,4 +104,4 @@ def find_free_port(): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: s.bind(('', 0)) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - return s.getsockname()[1] \ No newline at end of file + return s.getsockname()[1] diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py index 7703b9e687c4a..4a19473005708 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py @@ -2,7 +2,6 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import enum -import importlib from enum import Enum from typing import Callable, Optional @@ -29,13 +28,12 @@ from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( from vllm.model_executor.utils import set_weight_attrs from vllm.platforms import current_platform from vllm.scalar_type import scalar_types - -has_pplx = importlib.util.find_spec("pplx_kernels") is not None +from vllm.utils import has_pplx if current_platform.is_cuda_alike(): from vllm.model_executor.layers.fused_moe.fused_batched_moe import ( BatchedPrepareAndFinalize) - if has_pplx: + if has_pplx(): from vllm.model_executor.layers.fused_moe.pplx_prepare_finalize import ( PplxPrepareAndFinalize) @@ -577,7 +575,7 @@ class CompressedTensorsW8A8Fp8MoECutlassMethod(CompressedTensorsMoEMethod): use_batched_format=True, ) - if has_pplx and isinstance( + if has_pplx() and isinstance( prepare_finalize, (BatchedPrepareAndFinalize, PplxPrepareAndFinalize)): # no expert_map support in this case diff --git a/vllm/model_executor/layers/quantization/deepgemm.py b/vllm/model_executor/layers/quantization/deepgemm.py index 1d40f4915a1be..e4cf647407584 100644 --- a/vllm/model_executor/layers/quantization/deepgemm.py +++ b/vllm/model_executor/layers/quantization/deepgemm.py @@ -1,15 +1,13 @@ # SPDX-License-Identifier: Apache-2.0 -import importlib.util import logging import torch from vllm.platforms import current_platform from vllm.triton_utils import triton -from vllm.utils import direct_register_custom_op +from vllm.utils import direct_register_custom_op, has_deep_gemm -has_deep_gemm = importlib.util.find_spec("deep_gemm") is not None -if has_deep_gemm: +if has_deep_gemm(): import deep_gemm logger = logging.getLogger(__name__) diff --git a/vllm/model_executor/layers/quantization/fp8.py b/vllm/model_executor/layers/quantization/fp8.py index d2eda541f7a40..93472207fbb86 100644 --- a/vllm/model_executor/layers/quantization/fp8.py +++ b/vllm/model_executor/layers/quantization/fp8.py @@ -2,7 +2,6 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import functools -import importlib.util from typing import Any, Callable, Optional, Union import torch @@ -38,13 +37,12 @@ from vllm.model_executor.parameter import (BlockQuantScaleParameter, from vllm.model_executor.utils import set_weight_attrs from vllm.platforms import current_platform from vllm.scalar_type import scalar_types +from vllm.utils import has_deep_gemm ACTIVATION_SCHEMES = ["static", "dynamic"] logger = init_logger(__name__) -has_deep_gemm = importlib.util.find_spec("deep_gemm") is not None - def _is_col_major(x: torch.Tensor) -> bool: assert x.dim() == 3 @@ -451,7 +449,7 @@ class Fp8MoEMethod(FusedMoEMethodBase): # Check for DeepGemm support. self.allow_deep_gemm = False if envs.VLLM_USE_DEEP_GEMM: - if not has_deep_gemm: + if not has_deep_gemm(): logger.warning_once("Failed to import DeepGemm kernels.") elif not self.block_quant: logger.warning_once("Model is not block quantized. Not using " diff --git a/vllm/model_executor/layers/quantization/utils/fp8_utils.py b/vllm/model_executor/layers/quantization/utils/fp8_utils.py index 3a0fb83d627af..c38a445c571b8 100644 --- a/vllm/model_executor/layers/quantization/utils/fp8_utils.py +++ b/vllm/model_executor/layers/quantization/utils/fp8_utils.py @@ -3,7 +3,6 @@ # Adapted from https://github.com/sgl-project/sglang/pull/2575 import functools -import importlib.util import json import os from typing import Any, Callable, Optional, Union @@ -19,10 +18,9 @@ from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( CUTLASS_BLOCK_FP8_SUPPORTED) from vllm.platforms import current_platform from vllm.triton_utils import tl, triton -from vllm.utils import cdiv, direct_register_custom_op +from vllm.utils import cdiv, direct_register_custom_op, has_deep_gemm logger = init_logger(__name__) -has_deep_gemm = importlib.util.find_spec("deep_gemm") is not None def is_fp8(x: Union[torch.dtype, torch.Tensor]) -> bool: @@ -109,7 +107,7 @@ def should_use_deepgemm(output_dtype: torch.dtype, weight: torch.Tensor): """ return (current_platform.is_cuda() - and current_platform.is_device_capability(90) and has_deep_gemm + and current_platform.is_device_capability(90) and has_deep_gemm() and envs.VLLM_USE_DEEP_GEMM and output_dtype == torch.bfloat16 and weight.shape[0] % 128 == 0 and weight.shape[1] % 128 == 0) diff --git a/vllm/utils.py b/vllm/utils.py index fdefda901c4d2..7eb3c1e347cde 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -2929,3 +2929,31 @@ def is_torch_equal_or_newer(target: str) -> bool: def _is_torch_equal_or_newer(torch_version: str, target: str) -> bool: torch_version = version.parse(torch_version) return torch_version >= version.parse(target) + + +@cache +def _has_module(module_name: str) -> bool: + """Return True if *module_name* can be found in the current environment. + + The result is cached so that subsequent queries for the same module incur + no additional overhead. + """ + return importlib.util.find_spec(module_name) is not None + + +def has_pplx() -> bool: + """Whether the optional `pplx_kernels` package is available.""" + + return _has_module("pplx_kernels") + + +def has_deep_ep() -> bool: + """Whether the optional `deep_ep` package is available.""" + + return _has_module("deep_ep") + + +def has_deep_gemm() -> bool: + """Whether the optional `deep_gemm` package is available.""" + + return _has_module("deep_gemm") \ No newline at end of file From 7b1895e6ce4942091e16da790af8c12772a1d384 Mon Sep 17 00:00:00 2001 From: Michael Goin Date: Sun, 29 Jun 2025 11:31:37 +0900 Subject: [PATCH 191/211] [CI Fix] Try fixing eagle e2e test OOM by reducing block allocation (#20213) Signed-off-by: mgoin --- tests/spec_decode/e2e/test_eagle_correctness.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/spec_decode/e2e/test_eagle_correctness.py b/tests/spec_decode/e2e/test_eagle_correctness.py index fd838285aba7c..7c369feec4152 100644 --- a/tests/spec_decode/e2e/test_eagle_correctness.py +++ b/tests/spec_decode/e2e/test_eagle_correctness.py @@ -370,6 +370,10 @@ def test_llama2_eagle_e2e_greedy_correctness(vllm_runner, common_llm_kwargs, @pytest.mark.parametrize( "common_llm_kwargs", [{ + # 2 for small prompt, 256//16 for generated. + "num_gpu_blocks_override": 2 + 256 // 16, + "max_model_len": (2 + 256 // 16) * 16, + # Skip cuda graph recording for fast test. "enforce_eager": True, @@ -420,6 +424,10 @@ def test_llama3_eagle_e2e_greedy_correctness(vllm_runner, common_llm_kwargs, @pytest.mark.parametrize( "common_llm_kwargs", [{ + # 2 for small prompt, 256//16 for generated. + "num_gpu_blocks_override": 2 + 256 // 16, + "max_model_len": (2 + 256 // 16) * 16, + # Skip cuda graph recording for fast test. "enforce_eager": True, From 6f2f53a82dc9703abb389761bf9931e3c9a5a75b Mon Sep 17 00:00:00 2001 From: Dipika Sikka Date: Mon, 30 Jun 2025 00:05:40 +0200 Subject: [PATCH 192/211] [Quantization] Add compressed-tensors NVFP4 MoE Support (#19990) Signed-off-by: Dipika Sikka Signed-off-by: Dipika --- tests/quantization/test_compressed_tensors.py | 6 +- vllm/model_executor/layers/fused_moe/layer.py | 4 +- .../compressed_tensors/compressed_tensors.py | 4 +- .../compressed_tensors_moe.py | 275 +++++++++++++++++- .../schemes/compressed_tensors_w4a4_nvfp4.py | 13 +- .../utils/nvfp4_emulation_utils.py | 15 +- 6 files changed, 295 insertions(+), 22 deletions(-) diff --git a/tests/quantization/test_compressed_tensors.py b/tests/quantization/test_compressed_tensors.py index 516bf4513816a..3646ad6c481b0 100644 --- a/tests/quantization/test_compressed_tensors.py +++ b/tests/quantization/test_compressed_tensors.py @@ -17,7 +17,7 @@ from vllm.model_executor.layers.quantization.compressed_tensors.compressed_tenso CompressedTensorsW4A4Fp4, CompressedTensorsW4A16Fp4, CompressedTensorsW4A16Sparse24, CompressedTensorsW8A8Fp8, CompressedTensorsW8A8Int8, CompressedTensorsW8A16Fp8, - CompressedTensorsWNA16) + CompressedTensorsWNA16, cutlass_fp4_supported) from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( sparse_cutlass_supported) from vllm.platforms import current_platform @@ -668,8 +668,8 @@ def test_compressed_tensors_nvfp4(vllm_runner, args): assert isinstance(qkv_proj.quant_method, CompressedTensorsLinearMethod) if isinstance(qkv_proj.scheme, scheme) or isinstance( - qkv_proj.scheme, CompressedTensorsW4A16Fp4 - ) and not CompressedTensorsW4A4Fp4.cutlass_fp4_supported(): + qkv_proj.scheme, + CompressedTensorsW4A16Fp4) and not cutlass_fp4_supported(): assert True else: raise AssertionError("FP4 Scheme Mismatch") diff --git a/vllm/model_executor/layers/fused_moe/layer.py b/vllm/model_executor/layers/fused_moe/layer.py index 5408ef1f75e89..e6f555d315d8e 100644 --- a/vllm/model_executor/layers/fused_moe/layer.py +++ b/vllm/model_executor/layers/fused_moe/layer.py @@ -1246,6 +1246,7 @@ class FusedMoE(torch.nn.Module): param.materialize(final_shape, dtype=loaded_weight.dtype) expert_data = param.data if full_load else param.data[expert_id] + # Case input scale: input_scale loading is only supported for fp8 if "input_scale" in weight_name: # this is needed for compressed-tensors only @@ -1273,6 +1274,7 @@ class FusedMoE(torch.nn.Module): tp_rank=self.tp_rank) return True if return_success else None + # TODO @dsikka: ModelOpt should follow the proper MoE loading pattern if "ModelOpt" in quant_method_name: if ('weight_scale_2' in weight_name or 'input_scale' in weight_name): @@ -1289,7 +1291,7 @@ class FusedMoE(torch.nn.Module): tp_rank=self.tp_rank) return True if return_success else None - # Case weight scales, zero_points and offset + # Case weight scales, zero_points and offset, weight/input global scales if ("scale" in weight_name or "zero" in weight_name or "offset" in weight_name): # load the weight scales and zp based on the quantization scheme diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py index d21abb2741a29..4f87b2a44f0ac 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors.py @@ -33,6 +33,8 @@ from vllm.model_executor.layers.quantization.compressed_tensors.utils import ( find_matched_target, is_activation_quantization_format, should_ignore_layer) from vllm.model_executor.layers.quantization.kv_cache import BaseKVCacheMethod +from vllm.model_executor.layers.quantization.utils.nvfp4_emulation_utils import ( # noqa: E501 + cutlass_fp4_supported) from vllm.platforms import current_platform logger = init_logger(__name__) @@ -375,7 +377,7 @@ class CompressedTensorsConfig(QuantizationConfig): if is_activation_quantization_format(self.quant_format): if self._is_fp4a4_nvfp4(weight_quant, input_quant): - if CompressedTensorsW4A4Fp4.cutlass_fp4_supported( + if cutlass_fp4_supported( ) or envs.VLLM_USE_NVFP4_CT_EMULATIONS: return CompressedTensorsW4A4Fp4() else: diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py index 4a19473005708..fa4ce5668091b 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/compressed_tensors_moe.py @@ -21,8 +21,12 @@ from vllm.model_executor.layers.quantization.utils import replace_parameter from vllm.model_executor.layers.quantization.utils.marlin_utils import ( check_moe_marlin_supports_layer, marlin_make_workspace_new, marlin_moe_permute_scales) +from vllm.model_executor.layers.quantization.utils.marlin_utils_fp4 import ( + prepare_moe_fp4_layer_for_marlin) from vllm.model_executor.layers.quantization.utils.marlin_utils_fp8 import ( prepare_moe_fp8_layer_for_marlin) +from vllm.model_executor.layers.quantization.utils.nvfp4_emulation_utils import ( # noqa: E501 + cutlass_fp4_supported) from vllm.model_executor.layers.quantization.utils.w8a8_utils import ( all_close_1d, normalize_e4m3fn_to_e4m3fnuz, per_tensor_dequantize) from vllm.model_executor.utils import set_weight_attrs @@ -46,12 +50,11 @@ class GPTQMarlinState(Enum): __all__ = [ - "CompressedTensorsMoEMethod", - "CompressedTensorsW8A8Fp8MoEMethod", + "CompressedTensorsMoEMethod", "CompressedTensorsW8A8Fp8MoEMethod", "CompressedTensorsW8A8Fp8MoECutlassMethod", "CompressedTensorsW8A8Int8MoEMethod", - "CompressedTensorsWNA16MarlinMoEMethod", - "CompressedTensorsWNA16MoEMethod", + "CompressedTensorsWNA16MarlinMoEMethod", "CompressedTensorsWNA16MoEMethod", + "CompressedTensorsW4A4MoeMethod" ] @@ -84,6 +87,8 @@ class CompressedTensorsMoEMethod(FusedMoEMethodBase): else: logger.info_once("Using CompressedTensorsWNA16MarlinMoEMethod") return CompressedTensorsWNA16MarlinMoEMethod(quant_config) + elif quant_config._is_fp4a4_nvfp4(weight_quant, input_quant): + return CompressedTensorsW4A4MoeMethod() elif quant_config._is_fp8_w8a8_sm90(weight_quant, input_quant): return CompressedTensorsW8A8Fp8MoECutlassMethod(quant_config) elif quant_config._is_fp8_w8a8(weight_quant, input_quant): @@ -95,6 +100,268 @@ class CompressedTensorsMoEMethod(FusedMoEMethodBase): f"Unsupported FusedMoe scheme: {weight_quant}, {input_quant}") +class CompressedTensorsW4A4MoeMethod(CompressedTensorsMoEMethod): + + def __init__(self): + self.use_marlin = not cutlass_fp4_supported() + self.group_size = 16 + + def create_weights(self, layer: torch.nn.Module, num_experts: int, + hidden_size: int, intermediate_size_per_partition: int, + params_dtype: torch.dtype, **extra_weight_attrs): + + layer.num_experts = num_experts + layer.params_dtype = params_dtype + + w13_weight = torch.nn.Parameter( + torch.empty( + num_experts, + 2 * intermediate_size_per_partition, + # 2 fp4 items are packed in the input dimension + hidden_size // 2, + requires_grad=False, + dtype=torch.uint8), + requires_grad=False) + layer.register_parameter("w13_weight_packed", w13_weight) + set_weight_attrs(w13_weight, extra_weight_attrs) + + w2_weight = torch.nn.Parameter( + torch.empty( + num_experts, + hidden_size, + # 2 fp4 items are packed in the input dimension + intermediate_size_per_partition // 2, + dtype=torch.uint8), + requires_grad=False) + layer.register_parameter("w2_weight_packed", w2_weight) + set_weight_attrs(w2_weight, extra_weight_attrs) + + # Weight Scales + w13_weight_scale = torch.nn.Parameter( + torch.empty( + num_experts, + 2 * intermediate_size_per_partition, + # 2 fp4 items are packed in the input dimension + hidden_size // self.group_size, + dtype=torch.float8_e4m3fn), + requires_grad=False) + layer.register_parameter("w13_weight_scale", w13_weight_scale) + extra_weight_attrs.update( + {"quant_method": FusedMoeWeightScaleSupported.GROUP.value}) + set_weight_attrs(w13_weight_scale, extra_weight_attrs) + + w2_weight_scale = torch.nn.Parameter( + torch.empty( + num_experts, + hidden_size, + # 2 fp4 items are packed in the input dimension + intermediate_size_per_partition // self.group_size, + dtype=torch.float8_e4m3fn), + requires_grad=False) + layer.register_parameter("w2_weight_scale", w2_weight_scale) + extra_weight_attrs.update( + {"quant_method": FusedMoeWeightScaleSupported.GROUP.value}) + set_weight_attrs(w2_weight_scale, extra_weight_attrs) + + # Weight Global Scales + w13_weight_scale_2 = torch.nn.Parameter(torch.empty( + num_experts, 2, dtype=torch.float32), + requires_grad=False) + layer.register_parameter("w13_weight_global_scale", w13_weight_scale_2) + extra_weight_attrs.update( + {"quant_method": FusedMoeWeightScaleSupported.TENSOR.value}) + set_weight_attrs(w13_weight_scale_2, extra_weight_attrs) + + w2_weight_scale_2 = torch.nn.Parameter(torch.empty( + num_experts, dtype=torch.float32), + requires_grad=False) + layer.register_parameter("w2_weight_global_scale", w2_weight_scale_2) + extra_weight_attrs.update( + {"quant_method": FusedMoeWeightScaleSupported.TENSOR.value}) + set_weight_attrs(w2_weight_scale_2, extra_weight_attrs) + + # Input Global Scales + w13_input_scale = torch.nn.Parameter(torch.empty(num_experts, + 2, + dtype=torch.float32), + requires_grad=False) + layer.register_parameter("w13_input_global_scale", w13_input_scale) + extra_weight_attrs.update( + {"quant_method": FusedMoeWeightScaleSupported.TENSOR.value}) + set_weight_attrs(w13_input_scale, extra_weight_attrs) + + w2_input_scale = torch.nn.Parameter(torch.empty(num_experts, + dtype=torch.float32), + requires_grad=False) + layer.register_parameter("w2_input_global_scale", w2_input_scale) + extra_weight_attrs.update( + {"quant_method": FusedMoeWeightScaleSupported.TENSOR.value}) + set_weight_attrs(w2_input_scale, extra_weight_attrs) + + def swizzle_blockscale(self, scale: torch.tensor): + assert (scale.dtype == torch.float8_e4m3fn) + # Pad and blockwise interleave weight_scale + scale_ndim = scale.ndim + if scale.ndim == 2: + scale = scale.unsqueeze(0) + assert scale.ndim == 3 + B, M, K = scale.shape + round_up_multiple = lambda x, m: (x + m - 1) // m * m + M_padded = round_up_multiple(M, 128) + K_padded = round_up_multiple(K, 4) + padded_scale = torch.zeros((B, M_padded, K_padded), dtype=scale.dtype) + padded_scale[:B, :M, :K] = scale + batches, rows, cols = padded_scale.shape + assert rows % 128 == 0 + assert cols % 4 == 0 + padded_scale = padded_scale.reshape(batches, rows // 128, 4, 32, + cols // 4, 4) + swizzled_scale = padded_scale.permute((0, 1, 4, 3, 2, 5)) + swizzled_scale = swizzled_scale.contiguous().cuda() + return (swizzled_scale.reshape(M, K) + if scale_ndim == 2 else swizzled_scale.reshape(B, M, K)) + + def process_weights_after_loading(self, layer: torch.nn.Module) -> None: + + # From packed to weight + layer.w13_weight = torch.nn.Parameter(layer.w13_weight_packed.data, + requires_grad=False) + + layer.w2_weight = torch.nn.Parameter(layer.w2_weight_packed.data, + requires_grad=False) + + if not torch.allclose(layer.w13_weight_global_scale[:, 0], + layer.w13_weight_global_scale[:, 1]): + logger.warning_once( + "w1_weight_global_scale must match w3_weight_global_scale. " + "Accuracy may be affected.") + + # Take inverse of global scale saved to disk + layer.w13_weight_scale_2 = torch.nn.Parameter( + 1 / layer.w13_weight_global_scale[:, 0], requires_grad=False) + + layer.w2_weight_scale_2 = torch.nn.Parameter( + 1 / layer.w2_weight_global_scale.data, requires_grad=False) + + if self.use_marlin: + prepare_moe_fp4_layer_for_marlin(layer) + return + + # swizzle weight scales + layer.w13_blockscale_swizzled = torch.nn.Parameter( + self.swizzle_blockscale(layer.w13_weight_scale), + requires_grad=False) + + layer.w2_blockscale_swizzled = torch.nn.Parameter( + self.swizzle_blockscale(layer.w2_weight_scale), + requires_grad=False) + + # w13 + w13_input_global_scale = layer.w13_input_global_scale.max( + dim=1).values.to(torch.float32) + + layer.g1_alphas = torch.nn.Parameter( + ((1 / w13_input_global_scale) * layer.w13_weight_scale_2), + requires_grad=False) + + layer.w13_input_scale_quant = torch.nn.Parameter( + (w13_input_global_scale), requires_grad=False) + + # w2 + layer.g2_alphas = torch.nn.Parameter( + ((1 / layer.w2_input_global_scale) * layer.w2_weight_scale_2).to( + torch.float32), + requires_grad=False) + + layer.w2_input_scale_quant = torch.nn.Parameter( + (layer.w2_input_global_scale), requires_grad=False) + + def apply( + self, + layer: torch.nn.Module, + x: torch.Tensor, + router_logits: torch.Tensor, + top_k: int, + renormalize: bool, + use_grouped_topk: bool = False, + topk_group: Optional[int] = None, + num_expert_group: Optional[int] = None, + global_num_experts: int = -1, + expert_map: Optional[torch.Tensor] = None, + custom_routing_function: Optional[Callable] = None, + scoring_func: str = "softmax", + e_score_correction_bias: Optional[torch.Tensor] = None, + apply_router_weight_on_input: bool = False, + activation: str = "silu", + enable_eplb: bool = False, + expert_load_view: Optional[torch.Tensor] = None, + logical_to_physical_map: Optional[torch.Tensor] = None, + logical_replica_count: Optional[torch.Tensor] = None, + ) -> torch.Tensor: + if enable_eplb: + raise NotImplementedError("EPLB not supported for " + "`CompressedTensorsW4A4MoeMethod` yet.") + + topk_weights, topk_ids = FusedMoE.select_experts( + hidden_states=x, + router_logits=router_logits, + use_grouped_topk=use_grouped_topk, + top_k=top_k, + renormalize=renormalize, + topk_group=topk_group, + num_expert_group=num_expert_group, + custom_routing_function=custom_routing_function, + scoring_func=scoring_func, + e_score_correction_bias=e_score_correction_bias, + ) + + if self.use_marlin: + return torch.ops.vllm.fused_marlin_moe( + x, + layer.w13_weight, + layer.w2_weight, + layer.w13_weight_scale, + layer.w2_weight_scale, + router_logits, + topk_weights, + topk_ids, + global_scale1=layer.w13_weight_scale_2, + global_scale2=layer.w2_weight_scale_2, + quant_type_id=scalar_types.float4_e2m1f.id, + global_num_experts=global_num_experts, + expert_map=expert_map) + + assert activation == "silu", "Only SiLU activation is supported." + assert not apply_router_weight_on_input, ( + "Router weight on input is not " + "supported for CompressedTensorsW4A4MoeMethod.") + assert expert_map is None, ("Expert Parallelism / expert_map " + "is currently not supported for " + "CompressedTensorsW4A4MoeMethod.") + + from vllm.model_executor.layers.fused_moe.cutlass_moe import ( + cutlass_moe_fp4) + + # Cutlass moe takes in activations in BF16/Half precision + # and fp4 quantized weights loaded from the checkpoint + return cutlass_moe_fp4(a=x, + w1_fp4=layer.w13_weight, + w1_blockscale=layer.w13_blockscale_swizzled, + w1_alphas=layer.g1_alphas, + w2_fp4=layer.w2_weight, + w2_blockscale=layer.w2_blockscale_swizzled, + w2_alphas=layer.g2_alphas, + topk_weights=topk_weights, + topk_ids=topk_ids, + m=x.shape[0], + n=layer.w2_weight.shape[2] * 2, + k=x.shape[1], + e=layer.w13_weight.shape[0], + a1_gscale=layer.w13_input_scale_quant, + a2_gscale=layer.w2_input_scale_quant, + device=x.device).to(x.dtype) + + class CompressedTensorsW8A8Fp8MoEMethod(CompressedTensorsMoEMethod): def __init__( diff --git a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py index ec1d4a6c0efae..65cbc49d2640a 100644 --- a/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py +++ b/vllm/model_executor/layers/quantization/compressed_tensors/schemes/compressed_tensors_w4a4_nvfp4.py @@ -5,8 +5,7 @@ import torch from torch.nn.parameter import Parameter import vllm.envs as envs -from vllm._custom_ops import (cutlass_scaled_fp4_mm, - cutlass_scaled_mm_supports_fp4, scaled_fp4_quant) +from vllm._custom_ops import cutlass_scaled_fp4_mm, scaled_fp4_quant from vllm.logger import init_logger from vllm.model_executor.layers.quantization.compressed_tensors.schemes import ( CompressedTensorsScheme) @@ -15,7 +14,6 @@ from vllm.model_executor.layers.quantization.utils.nvfp4_emulation_utils import from vllm.model_executor.parameter import (GroupQuantScaleParameter, ModelWeightParameter, PerTensorScaleParameter) -from vllm.platforms import current_platform logger = init_logger(__name__) @@ -33,15 +31,6 @@ class CompressedTensorsW4A4Fp4(CompressedTensorsScheme): return 80 return 100 - @classmethod - def cutlass_fp4_supported(cls) -> bool: - if not current_platform.is_cuda(): - return False - capability_tuple = current_platform.get_device_capability() - capability = -1 if capability_tuple is None else capability_tuple.to_int( # noqa: E501 - ) - return cutlass_scaled_mm_supports_fp4(capability) - def create_weights(self, layer: torch.nn.Module, output_partition_sizes: list[int], input_size_per_partition: int, diff --git a/vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py b/vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py index d5ce6d7ad757a..fb3287d3b89e6 100644 --- a/vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py +++ b/vllm/model_executor/layers/quantization/utils/nvfp4_emulation_utils.py @@ -2,9 +2,14 @@ # SPDX-FileCopyrightText: Copyright contributors to the vLLM project import torch +from vllm._custom_ops import cutlass_scaled_mm_supports_fp4 +from vllm.platforms import current_platform from vllm.scalar_type import scalar_types -__all__ = ["break_fp4_bytes", "dequantize_to_dtype", "ref_nvfp4_quant"] +__all__ = [ + "break_fp4_bytes", "dequantize_to_dtype", "ref_nvfp4_quant", + "cutlass_fp4_supported" +] FLOAT4_E2M1_MAX = scalar_types.float4_e2m1f.max() @@ -12,6 +17,14 @@ kE2M1ToFloat = torch.tensor([0., 0.5, 1., 1.5, 2., 3., 4., 6.], dtype=torch.float32) +def cutlass_fp4_supported() -> bool: + if not current_platform.is_cuda(): + return False + capability_tuple = current_platform.get_device_capability() + capability = -1 if capability_tuple is None else capability_tuple.to_int() + return cutlass_scaled_mm_supports_fp4(capability) + + def break_fp4_bytes(a, dtype): assert a.dtype == torch.uint8 m, n = a.shape From 6c9837a761409aecf47e94ae4272879ce0c81590 Mon Sep 17 00:00:00 2001 From: Huy Do Date: Sun, 29 Jun 2025 16:52:34 -0700 Subject: [PATCH 193/211] Fix cuda_archs_loose_intersection when handling sm_*a (#20207) Signed-off-by: Huy Do --- CMakeLists.txt | 14 ++++++++++++-- cmake/utils.cmake | 33 ++++++++++++++------------------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index b1adeac586f2e..f6f8d59d28aef 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -562,7 +562,7 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") "if you intend on running FP8 quantized MoE models on Hopper.") else() message(STATUS "Not building grouped_mm_c3x as no compatible archs found " - "in CUDA target architectures") + "in CUDA target architectures.") endif() endif() @@ -574,7 +574,17 @@ if(VLLM_GPU_LANG STREQUAL "CUDA") SRCS "${SRCS}" CUDA_ARCHS "${CUTLASS_MOE_DATA_ARCHS}") list(APPEND VLLM_EXT_SRC "${SRCS}") - endif() + message(STATUS "Building moe_data for archs: ${CUTLASS_MOE_DATA_ARCHS}") + else() + if (NOT ${CMAKE_CUDA_COMPILER_VERSION} VERSION_GREATER_EQUAL 12.3 AND CUTLASS_MOE_DATA_ARCHS) + message(STATUS "Not building moe_data as CUDA Compiler version is " + "not >= 12.3, we recommend upgrading to CUDA 12.3 or later " + "if you intend on running FP8 quantized MoE models on Hopper or Blackwell.") + else() + message(STATUS "Not building moe_data as no compatible archs found " + "in CUDA target architectures.") + endif() + endif() # # Machete kernels diff --git a/cmake/utils.cmake b/cmake/utils.cmake index 59c78950a1093..621179a701692 100644 --- a/cmake/utils.cmake +++ b/cmake/utils.cmake @@ -265,8 +265,8 @@ macro(set_gencode_flags_for_srcs) endmacro() # -# For the given `SRC_CUDA_ARCHS` list of gencode versions in the form -# `.[letter]` compute the "loose intersection" with the +# For the given `SRC_CUDA_ARCHS` list of gencode versions in the form +# `.[letter]` compute the "loose intersection" with the # `TGT_CUDA_ARCHS` list of gencodes. We also support the `+PTX` suffix in # `SRC_CUDA_ARCHS` which indicates that the PTX code should be built when there # is a CUDA_ARCH in `TGT_CUDA_ARCHS` that is equal to or larger than the @@ -278,7 +278,7 @@ endmacro() # in `SRC_CUDA_ARCHS` that is less or equal to the version in `TGT_CUDA_ARCHS`. # We have special handling for x.0a, if x.0a is in `SRC_CUDA_ARCHS` and x.0 is # in `TGT_CUDA_ARCHS` then we should remove x.0a from `SRC_CUDA_ARCHS` and add -# x.0a to the result (and remove x.0 from TGT_CUDA_ARCHS). +# x.0a to the result (and remove x.0 from TGT_CUDA_ARCHS). # The result is stored in `OUT_CUDA_ARCHS`. # # Example: @@ -313,21 +313,16 @@ function(cuda_archs_loose_intersection OUT_CUDA_ARCHS SRC_CUDA_ARCHS TGT_CUDA_AR # if x.0a is in SRC_CUDA_ARCHS and x.0 is in CUDA_ARCHS then we should # remove x.0a from SRC_CUDA_ARCHS and add x.0a to _CUDA_ARCHS set(_CUDA_ARCHS) - if ("9.0a" IN_LIST _SRC_CUDA_ARCHS) - list(REMOVE_ITEM _SRC_CUDA_ARCHS "9.0a") - if ("9.0" IN_LIST TGT_CUDA_ARCHS) - list(REMOVE_ITEM _TGT_CUDA_ARCHS "9.0") - set(_CUDA_ARCHS "9.0a") + foreach(_arch ${_SRC_CUDA_ARCHS}) + if(_arch MATCHES "\\a$") + list(REMOVE_ITEM _SRC_CUDA_ARCHS "${_arch}") + string(REPLACE "a" "" _base "${_arch}") + if ("${_base}" IN_LIST TGT_CUDA_ARCHS) + list(REMOVE_ITEM _TGT_CUDA_ARCHS "${_base}") + list(APPEND _CUDA_ARCHS "${_arch}") + endif() endif() - endif() - - if ("10.0a" IN_LIST _SRC_CUDA_ARCHS) - list(REMOVE_ITEM _SRC_CUDA_ARCHS "10.0a") - if ("10.0" IN_LIST TGT_CUDA_ARCHS) - list(REMOVE_ITEM _TGT_CUDA_ARCHS "10.0") - set(_CUDA_ARCHS "10.0a") - endif() - endif() + endforeach() list(SORT _SRC_CUDA_ARCHS COMPARE NATURAL ORDER ASCENDING) @@ -359,7 +354,7 @@ function(cuda_archs_loose_intersection OUT_CUDA_ARCHS SRC_CUDA_ARCHS TGT_CUDA_AR endforeach() list(REMOVE_DUPLICATES _CUDA_ARCHS) - + # reapply +PTX suffix to architectures that requested PTX set(_FINAL_ARCHS) foreach(_arch ${_CUDA_ARCHS}) @@ -370,7 +365,7 @@ function(cuda_archs_loose_intersection OUT_CUDA_ARCHS SRC_CUDA_ARCHS TGT_CUDA_AR endif() endforeach() set(_CUDA_ARCHS ${_FINAL_ARCHS}) - + set(${OUT_CUDA_ARCHS} ${_CUDA_ARCHS} PARENT_SCOPE) endfunction() From 65b1cbb1381bf2301a2441fd988bbe88b4b7865e Mon Sep 17 00:00:00 2001 From: redmoe-moutain Date: Mon, 30 Jun 2025 10:34:36 +0800 Subject: [PATCH 194/211] [Model] support dots1 (#18254) Signed-off-by: redmoe-moutain --- docs/models/supported_models.md | 1 + tests/models/registry.py | 2 + vllm/model_executor/models/dots1.py | 535 +++++++++++++++++++++++++ vllm/model_executor/models/registry.py | 1 + 4 files changed, 539 insertions(+) create mode 100644 vllm/model_executor/models/dots1.py diff --git a/docs/models/supported_models.md b/docs/models/supported_models.md index 9782fd1781512..0248700292ae2 100644 --- a/docs/models/supported_models.md +++ b/docs/models/supported_models.md @@ -329,6 +329,7 @@ Specified using `--task generate`. | `DeepseekForCausalLM` | DeepSeek | `deepseek-ai/deepseek-llm-67b-base`, `deepseek-ai/deepseek-llm-7b-chat` etc. | | ✅︎ | ✅︎ | | `DeepseekV2ForCausalLM` | DeepSeek-V2 | `deepseek-ai/DeepSeek-V2`, `deepseek-ai/DeepSeek-V2-Chat` etc. | | ✅︎ | ✅︎ | | `DeepseekV3ForCausalLM` | DeepSeek-V3 | `deepseek-ai/DeepSeek-V3-Base`, `deepseek-ai/DeepSeek-V3` etc. | | ✅︎ | ✅︎ | +| `Dots1ForCausalLM` | dots.llm1 | `rednote-hilab/dots.llm1.base`, `rednote-hilab/dots.llm1.inst` etc. | | ✅︎ | ✅︎ | | `ExaoneForCausalLM` | EXAONE-3 | `LGAI-EXAONE/EXAONE-3.0-7.8B-Instruct`, etc. | ✅︎ | ✅︎ | ✅︎ | | `FalconForCausalLM` | Falcon | `tiiuae/falcon-7b`, `tiiuae/falcon-40b`, `tiiuae/falcon-rw-7b`, etc. | | ✅︎ | ✅︎ | | `FalconMambaForCausalLM` | FalconMamba | `tiiuae/falcon-mamba-7b`, `tiiuae/falcon-mamba-7b-instruct`, etc. | | ✅︎ | ✅︎ | diff --git a/tests/models/registry.py b/tests/models/registry.py index 72e361e2637fd..e56dd19bec670 100644 --- a/tests/models/registry.py +++ b/tests/models/registry.py @@ -268,6 +268,8 @@ _TEXT_GENERATION_EXAMPLE_MODELS = { "Zamba2ForCausalLM": _HfExamplesInfo("Zyphra/Zamba2-7B-instruct"), "MiMoForCausalLM": _HfExamplesInfo("XiaomiMiMo/MiMo-7B-RL", trust_remote_code=True), + "Dots1ForCausalLM": _HfExamplesInfo("rednote-hilab/dots.llm1.inst", + min_transformers_version="4.53"), # [Encoder-decoder] "BartModel": _HfExamplesInfo("facebook/bart-base"), "BartForConditionalGeneration": _HfExamplesInfo("facebook/bart-large-cnn"), diff --git a/vllm/model_executor/models/dots1.py b/vllm/model_executor/models/dots1.py new file mode 100644 index 0000000000000..01a27d02a3044 --- /dev/null +++ b/vllm/model_executor/models/dots1.py @@ -0,0 +1,535 @@ +# SPDX-License-Identifier: Apache-2.0 + +# Adapted from +# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py +# Copyright 2025 The rednote-hilab team. +# Copyright 2023 The vLLM team. +# Copyright 2023 DeepSeek-AI and the HuggingFace Inc. team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Inference-only dots1 model.""" +from collections.abc import Iterable +from typing import Any, Optional, Union + +import torch +from torch import nn +from transformers import PretrainedConfig + +from vllm.attention import Attention +from vllm.compilation.decorators import support_torch_compile +from vllm.config import CacheConfig, ModelConfig, VllmConfig +from vllm.distributed import (get_pp_group, + get_tensor_model_parallel_world_size, + tensor_model_parallel_all_reduce) +from vllm.model_executor.layers.activation import SiluAndMul +from vllm.model_executor.layers.fused_moe import FusedMoE +from vllm.model_executor.layers.layernorm import RMSNorm +from vllm.model_executor.layers.linear import (MergedColumnParallelLinear, + QKVParallelLinear, + ReplicatedLinear, + RowParallelLinear) +from vllm.model_executor.layers.logits_processor import LogitsProcessor +from vllm.model_executor.layers.quantization import QuantizationConfig +from vllm.model_executor.layers.rotary_embedding import get_rope +from vllm.model_executor.layers.vocab_parallel_embedding import ( + ParallelLMHead, VocabParallelEmbedding) +from vllm.model_executor.model_loader.weight_utils import ( + default_weight_loader, maybe_remap_kv_scale_name) +from vllm.model_executor.sampling_metadata import SamplingMetadata +from vllm.sequence import IntermediateTensors + +from .interfaces import SupportsPP +from .utils import (PPMissingLayer, is_pp_missing_parameter, + make_empty_intermediate_tensors_factory, make_layers, + maybe_prefix) + + +class Dots1MLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + quant_config: Optional[QuantizationConfig] = None, + reduce_results: bool = True, + prefix: str = "", + ) -> None: + super().__init__() + self.gate_up_proj = MergedColumnParallelLinear( + hidden_size, [intermediate_size] * 2, + bias=False, + quant_config=quant_config, + prefix=f"{prefix}.gate_up_proj") + self.down_proj = RowParallelLinear(intermediate_size, + hidden_size, + bias=False, + quant_config=quant_config, + reduce_results=reduce_results, + prefix=f"{prefix}.down_proj") + if hidden_act != "silu": + raise ValueError(f"Unsupported activation: {hidden_act}. " + "Only silu is supported for now.") + self.act_fn = SiluAndMul() + + def forward(self, x): + gate_up, _ = self.gate_up_proj(x) + x = self.act_fn(gate_up) + x, _ = self.down_proj(x) + return x + + +class Dots1MoE(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ): + super().__init__() + self.tp_size = get_tensor_model_parallel_world_size() + self.routed_scaling_factor = config.routed_scaling_factor + self.n_shared_experts = config.n_shared_experts + + if config.hidden_act != "silu": + raise ValueError(f"Unsupported activation: {config.hidden_act}. " + "Only silu is supported for now.") + + self.gate = ReplicatedLinear(config.hidden_size, + config.n_routed_experts, + bias=False, + quant_config=None, + prefix=f"{prefix}.gate") + if config.topk_method == "noaux_tc": + self.gate.e_score_correction_bias = (nn.Parameter( + torch.empty(config.n_routed_experts))) + else: + self.gate.e_score_correction_bias = None + + self.experts = FusedMoE( + num_experts=config.n_routed_experts, + top_k=config.num_experts_per_tok, + hidden_size=config.hidden_size, + intermediate_size=config.moe_intermediate_size, + reduce_results=False, + renormalize=config.norm_topk_prob, + quant_config=quant_config, + use_grouped_topk=True, + num_expert_group=config.n_group, + topk_group=config.topk_group, + prefix=f"{prefix}.experts", + scoring_func=config.scoring_func, + e_score_correction_bias=self.gate.e_score_correction_bias) + + if config.n_shared_experts is not None: + intermediate_size = (config.moe_intermediate_size * + config.n_shared_experts) + self.shared_experts = Dots1MLP( + hidden_size=config.hidden_size, + intermediate_size=intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + reduce_results=False, + prefix=f"{prefix}.shared_experts", + ) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + num_tokens, hidden_dim = hidden_states.shape + hidden_states = hidden_states.view(-1, hidden_dim) + if self.n_shared_experts is not None: + shared_output = self.shared_experts(hidden_states) + router_logits, _ = self.gate(hidden_states) + final_hidden_states = self.experts( + hidden_states=hidden_states, + router_logits=router_logits) * self.routed_scaling_factor + if shared_output is not None: + final_hidden_states = final_hidden_states + shared_output + if self.tp_size > 1: + final_hidden_states = tensor_model_parallel_all_reduce( + final_hidden_states) + return final_hidden_states.view(num_tokens, hidden_dim) + + +class Dots1Attention(nn.Module): + + def __init__( + self, + hidden_size: int, + num_heads: int, + num_kv_heads: int, + config: PretrainedConfig, + rope_theta: float = 10000, + rope_scaling: Optional[dict[str, Any]] = None, + max_position_embeddings: int = 8192, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + prefix: str = "", + ) -> None: + super().__init__() + self.hidden_size = hidden_size + tp_size = get_tensor_model_parallel_world_size() + self.total_num_heads = num_heads + assert self.total_num_heads % tp_size == 0 + self.num_heads = self.total_num_heads // tp_size + self.total_num_kv_heads = num_kv_heads + if self.total_num_kv_heads >= tp_size: + # Number of KV heads is greater than TP size, so we partition + # the KV heads across multiple tensor parallel GPUs. + assert self.total_num_kv_heads % tp_size == 0 + else: + # Number of KV heads is less than TP size, so we replicate + # the KV heads across multiple tensor parallel GPUs. + assert tp_size % self.total_num_kv_heads == 0 + self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) + self.head_dim = getattr(config, "head_dim", + hidden_size // self.total_num_heads) + self.q_size = self.num_heads * self.head_dim + self.kv_size = self.num_kv_heads * self.head_dim + self.scaling = self.head_dim**-0.5 + self.rope_theta = rope_theta + self.max_position_embeddings = max_position_embeddings + attention_bias = config.attention_bias + + self.qkv_proj = QKVParallelLinear( + hidden_size, + self.head_dim, + self.total_num_heads, + self.total_num_kv_heads, + bias=attention_bias, + quant_config=quant_config, + ) + + self.o_proj = RowParallelLinear( + self.total_num_heads * self.head_dim, + hidden_size, + bias=False, + quant_config=quant_config, + ) + + self.rotary_emb = get_rope( + self.head_dim, + rotary_dim=self.head_dim, + max_position=max_position_embeddings, + base=rope_theta, + rope_scaling=rope_scaling, + ) + self.attn = Attention( + self.num_heads, + self.head_dim, + self.scaling, + num_kv_heads=self.num_kv_heads, + cache_config=cache_config, + quant_config=quant_config, + prefix=f"{prefix}.attn", + ) + self.q_norm = RMSNorm(self.head_dim, eps=config.rms_norm_eps) + self.k_norm = RMSNorm(self.head_dim, eps=config.rms_norm_eps) + + def forward(self, positions: torch.Tensor, + hidden_states: torch.Tensor) -> torch.Tensor: + qkv, _ = self.qkv_proj(hidden_states) + q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) + q = self.q_norm(q.reshape(-1, self.num_heads, + self.head_dim)).reshape(q.shape) + k = self.k_norm(k.reshape(-1, self.num_kv_heads, + self.head_dim)).reshape(k.shape) + q, k = self.rotary_emb(positions, q, k) + attn_output = self.attn(q, k, v) + output, _ = self.o_proj(attn_output) + return output + + +class Dots1DecoderLayer(nn.Module): + + def __init__( + self, + config: PretrainedConfig, + prefix: str, + model_config: ModelConfig, + cache_config: Optional[CacheConfig] = None, + quant_config: Optional[QuantizationConfig] = None, + ) -> None: + super().__init__() + self.hidden_size = config.hidden_size + rope_theta = getattr(config, "rope_theta", 10000) + rope_scaling = getattr(config, "rope_scaling", None) + max_position_embeddings = getattr(config, "max_position_embeddings", + 8192) + layer_idx = int(prefix.split(sep='.')[-1]) + self.layer_idx = layer_idx + + self.self_attn = Dots1Attention( + hidden_size=self.hidden_size, + num_heads=config.num_attention_heads, + num_kv_heads=config.num_key_value_heads, + config=config, + rope_theta=rope_theta, + rope_scaling=rope_scaling, + max_position_embeddings=max_position_embeddings, + cache_config=cache_config, + quant_config=quant_config, + prefix=f"{prefix}.self_attn", + ) + if (config.n_routed_experts is not None + and layer_idx >= config.first_k_dense_replace + and layer_idx % config.moe_layer_freq == 0): + self.mlp = Dots1MoE(config=config, + quant_config=quant_config, + prefix=f"{prefix}.mlp") + else: + self.mlp = Dots1MLP( + hidden_size=config.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + quant_config=quant_config, + prefix=f"{prefix}.mlp", + ) + self.input_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, + eps=config.rms_norm_eps) + self.routed_scaling_factor = config.routed_scaling_factor + + def forward( + self, + positions: torch.Tensor, + hidden_states: torch.Tensor, + residual: Optional[torch.Tensor], + ) -> torch.Tensor: + if residual is None: + residual = hidden_states + hidden_states = self.input_layernorm(hidden_states) + else: + hidden_states, residual = self.input_layernorm( + hidden_states, residual) + hidden_states = self.self_attn(positions=positions, + hidden_states=hidden_states) + hidden_states, residual = self.post_attention_layernorm( + hidden_states, residual) + hidden_states = self.mlp(hidden_states) + return hidden_states, residual + + +class Dots1Model(nn.Module): + + fall_back_to_pt_during_load = False + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + + config = vllm_config.model_config.hf_config + model_config = vllm_config.model_config + cache_config = vllm_config.cache_config + quant_config = vllm_config.quant_config + self.config = config + + self.vocab_size = config.vocab_size + + if get_pp_group().is_first_rank: + self.embed_tokens = VocabParallelEmbedding( + config.vocab_size, + config.hidden_size, + quant_config=quant_config, + prefix=f"{prefix}.embed_tokens") + else: + self.embed_tokens = PPMissingLayer() + + self.start_layer, self.end_layer, self.layers = make_layers( + config.num_hidden_layers, + lambda prefix: Dots1DecoderLayer( + config, + prefix, + model_config=model_config, + cache_config=cache_config, + quant_config=quant_config, + ), + prefix=f"{prefix}.layers") + + if get_pp_group().is_last_rank: + self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) + else: + self.norm = PPMissingLayer() + self.make_empty_intermediate_tensors = ( + make_empty_intermediate_tensors_factory( + ["hidden_states", "residual"], config.hidden_size)) + + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.embed_tokens(input_ids) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + intermediate_tensors: Optional[IntermediateTensors], + inputs_embeds: Optional[torch.Tensor] = None, + ) -> Union[torch.Tensor, IntermediateTensors]: + if get_pp_group().is_first_rank: + if inputs_embeds is not None: + hidden_states = inputs_embeds + else: + hidden_states = self.get_input_embeddings(input_ids) + residual = None + else: + assert intermediate_tensors is not None + hidden_states = intermediate_tensors["hidden_states"] + residual = intermediate_tensors["residual"] + for layer in self.layers[self.start_layer:self.end_layer]: + hidden_states, residual = layer( + positions, + hidden_states, + residual, + ) + if not get_pp_group().is_last_rank: + return IntermediateTensors({ + "hidden_states": hidden_states, + "residual": residual + }) + hidden_states, _ = self.norm(hidden_states, residual) + return hidden_states + + +@support_torch_compile +class Dots1ForCausalLM(nn.Module, SupportsPP): + + def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""): + super().__init__() + config = vllm_config.model_config.hf_config + quant_config = vllm_config.quant_config + self.config = config + self.quant_config = quant_config + self.model = Dots1Model(vllm_config=vllm_config, + prefix=maybe_prefix(prefix, "model")) + if get_pp_group().is_last_rank: + self.lm_head = ParallelLMHead(config.vocab_size, + config.hidden_size, + quant_config=quant_config) + else: + self.lm_head = PPMissingLayer() + self.logits_processor = LogitsProcessor(config.vocab_size) + self.make_empty_intermediate_tensors = ( + self.model.make_empty_intermediate_tensors) + + def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor: + return self.model.get_input_embeddings(input_ids) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + intermediate_tensors: Optional[IntermediateTensors] = None, + inputs_embeds: Optional[torch.Tensor] = None, + ) -> Union[torch.Tensor, IntermediateTensors]: + hidden_states = self.model( + input_ids, + positions, + intermediate_tensors, + inputs_embeds, + ) + return hidden_states + + def compute_logits( + self, + hidden_states: torch.Tensor, + sampling_metadata: SamplingMetadata, + ) -> Optional[torch.Tensor]: + logits = self.logits_processor(self.lm_head, hidden_states, + sampling_metadata) + return logits + + def make_empty_intermediate_tensors( + self, batch_size: int, dtype: torch.dtype, + device: torch.device) -> IntermediateTensors: + return IntermediateTensors({ + "hidden_states": + torch.zeros((batch_size, self.config.hidden_size), + dtype=dtype, + device=device), + "residual": + torch.zeros((batch_size, self.config.hidden_size), + dtype=dtype, + device=device), + }) + + def load_weights(self, weights: Iterable[tuple[str, + torch.Tensor]]) -> set[str]: + stacked_params_mapping = [ + ("qkv_proj", "q_proj", "q"), + ("qkv_proj", "k_proj", "k"), + ("qkv_proj", "v_proj", "v"), + ("gate_up_proj", "gate_proj", 0), + ("gate_up_proj", "up_proj", 1), + ] + + expert_params_mapping = FusedMoE.make_expert_params_mapping( + ckpt_gate_proj_name="gate_proj", + ckpt_down_proj_name="down_proj", + ckpt_up_proj_name="up_proj", + num_experts=self.config.n_routed_experts) + + params_dict = dict(self.named_parameters()) + loaded_params: set[str] = set() + for name, loaded_weight in weights: + if "rotary_emb.inv_freq" in name: + continue + for (param_name, weight_name, shard_id) in stacked_params_mapping: + if weight_name not in name: + continue + if (("mlp.experts." in name) and name not in params_dict): + continue + name = name.replace(weight_name, param_name) + if name.endswith(".bias") and name not in params_dict: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, loaded_weight, shard_id) + break + else: + for mapping in expert_params_mapping: + param_name, weight_name, expert_id, shard_id = mapping + if weight_name not in name: + continue + name = name.replace(weight_name, param_name) + + if is_pp_missing_parameter(name, self): + continue + + param = params_dict[name] + weight_loader = param.weight_loader + weight_loader(param, + loaded_weight, + name, + shard_id=shard_id, + expert_id=expert_id) + break + else: + if name.endswith(".bias") and name not in params_dict: + continue + name = maybe_remap_kv_scale_name(name, params_dict) + if name is None: + continue + if is_pp_missing_parameter(name, self): + continue + param = params_dict[name] + weight_loader = getattr(param, "weight_loader", + default_weight_loader) + weight_loader(param, loaded_weight) + loaded_params.add(name) + return loaded_params diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index ff605cae02ea4..d566146662b8d 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -52,6 +52,7 @@ _TEXT_GENERATION_MODELS = { "DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"), "DeepseekV2ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"), "DeepseekV3ForCausalLM": ("deepseek_v2", "DeepseekV3ForCausalLM"), + "Dots1ForCausalLM": ("dots1", "Dots1ForCausalLM"), "ExaoneForCausalLM": ("exaone", "ExaoneForCausalLM"), "FalconForCausalLM": ("falcon", "FalconForCausalLM"), "Fairseq2LlamaForCausalLM": ("fairseq2_llama", "Fairseq2LlamaForCausalLM"), From 5a52f389dde444b9e122a7fa903393fd64857b86 Mon Sep 17 00:00:00 2001 From: "Chendi.Xue" Date: Sun, 29 Jun 2025 21:46:19 -0500 Subject: [PATCH 195/211] [BUGFIX][DEEPSEEK][MODEL_LOAD] fix w13, w2 weight not initialized assert (#20202) Signed-off-by: Chendi Xue --- vllm/model_executor/models/deepseek_v2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/vllm/model_executor/models/deepseek_v2.py b/vllm/model_executor/models/deepseek_v2.py index f712b626c74c3..2fa1294b79b95 100644 --- a/vllm/model_executor/models/deepseek_v2.py +++ b/vllm/model_executor/models/deepseek_v2.py @@ -889,6 +889,7 @@ class DeepseekV2ForCausalLM(nn.Module, SupportsPP, MixtureOfExperts): expert_id=expert_id, return_success=True) if success: + name = name_mapped break else: if is_expert_weight: From 19108ef31191e217766ffe52e8e382ddbec20fdb Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Sun, 29 Jun 2025 20:34:54 -0700 Subject: [PATCH 196/211] [Misc] Fix import (#20233) Signed-off-by: Woosuk Kwon --- vllm/v1/worker/gpu_model_runner.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 3c9de57204051..290b9a44a80e2 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -45,7 +45,7 @@ from vllm.sequence import IntermediateTensors from vllm.utils import (STR_DTYPE_TO_TORCH_DTYPE, DeviceMemoryProfiler, GiB_bytes, LazyLoader, async_tensor_h2d, cdiv, check_use_alibi, get_dtype_size, - is_pin_memory_available) + is_pin_memory_available, round_up) from vllm.v1.attention.backends.mamba_attn import Mamba2AttentionBackend from vllm.v1.attention.backends.utils import (AttentionMetadataBuilder, CommonAttentionMetadata) @@ -1308,7 +1308,6 @@ class GPUModelRunner(LoRAModelRunnerMixin): tp_size = self.vllm_config.parallel_config.tensor_parallel_size if self.compilation_config.pass_config. \ enable_sequence_parallelism and tp_size > 1: - from vllm.utils import round_up num_input_tokens = round_up(num_scheduled_tokens, tp_size) else: num_input_tokens = num_scheduled_tokens From 022c58b80f8ae2e27dc526860b769455ef4c5498 Mon Sep 17 00:00:00 2001 From: Reid <61492567+reidliu41@users.noreply.github.com> Date: Mon, 30 Jun 2025 15:53:45 +0800 Subject: [PATCH 197/211] [doc] Add Slack and Forum to the top navigation (#20208) Signed-off-by: reidliu41 --- docs/mkdocs/javascript/slack_and_forum.js | 56 +++++++++++++++++++++++ docs/mkdocs/stylesheets/extra.css | 26 +++++++++++ mkdocs.yaml | 1 + 3 files changed, 83 insertions(+) create mode 100644 docs/mkdocs/javascript/slack_and_forum.js diff --git a/docs/mkdocs/javascript/slack_and_forum.js b/docs/mkdocs/javascript/slack_and_forum.js new file mode 100644 index 0000000000000..9a92332238363 --- /dev/null +++ b/docs/mkdocs/javascript/slack_and_forum.js @@ -0,0 +1,56 @@ +/** + * slack_and_forum.js + * + * Adds a custom Slack and Forum button to the MkDocs Material header. + * + */ + +window.addEventListener('DOMContentLoaded', () => { + const headerInner = document.querySelector('.md-header__inner'); + + if (headerInner) { + const slackButton = document.createElement('button'); + slackButton.className = 'slack-button'; + slackButton.title = 'Join us on Slack'; + slackButton.style.border = 'none'; + slackButton.style.background = 'transparent'; + slackButton.style.cursor = 'pointer'; + + slackButton.innerHTML = ` + Slack + `; + + slackButton.addEventListener('click', () => { + window.open('https://slack.vllm.ai', '_blank', 'noopener'); + }); + + const forumButton = document.createElement('button'); + forumButton.className = 'forum-button'; + forumButton.title = 'Join the Forum'; + forumButton.style.border = 'none'; + forumButton.style.background = 'transparent'; + forumButton.style.cursor = 'pointer'; + + forumButton.innerHTML = ` + + + + `; + + forumButton.addEventListener('click', () => { + window.open('https://discuss.vllm.ai/', '_blank', 'noopener'); + }); + + const githubSource = document.querySelector('.md-header__source'); + if (githubSource) { + githubSource.parentNode.insertBefore(slackButton, githubSource.nextSibling); + githubSource.parentNode.insertBefore(forumButton, slackButton.nextSibling); + } + } +}); diff --git a/docs/mkdocs/stylesheets/extra.css b/docs/mkdocs/stylesheets/extra.css index 220657f83d5fc..248711f491b9d 100644 --- a/docs/mkdocs/stylesheets/extra.css +++ b/docs/mkdocs/stylesheets/extra.css @@ -108,3 +108,29 @@ body[data-md-color-scheme="slate"] .md-nav__item--section > label.md-nav__link . .md-content__button-wrapper a:hover { color: var(--md-accent-fg-color); } + +/* Slack and Forum css */ +.slack-button, +.forum-button { + display: inline-flex; + align-items: center; + justify-content: center; + margin-left: 0.4rem; + height: 24px; +} + +.slack-button img { + height: 18px; + filter: none !important; +} + +.slack-button:hover, +.forum-button:hover { + opacity: 0.7; +} + +.forum-button svg { + height: 28px; + opacity: 0.9; + transform: translateY(2px); +} diff --git a/mkdocs.yaml b/mkdocs.yaml index 9fb3fed8b8ac6..45b6ffadbeb71 100644 --- a/mkdocs.yaml +++ b/mkdocs.yaml @@ -127,6 +127,7 @@ extra_javascript: - mkdocs/javascript/run_llm_widget.js - https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML - mkdocs/javascript/edit_and_feedback.js + - mkdocs/javascript/slack_and_forum.js # Makes the url format end in .html rather than act as a dir # So index.md generates as index.html and is available under URL /index.html From f5dfa0753163530117b4766c4e79e8cb2dc7066e Mon Sep 17 00:00:00 2001 From: noiji <52301388+noiji@users.noreply.github.com> Date: Mon, 30 Jun 2025 18:21:56 +0900 Subject: [PATCH 198/211] [Bugfix] Skip loading extra parameters for modelopt Qwen3 MoE model (#19598) Signed-off-by: noiji <> --- vllm/model_executor/models/qwen3_moe.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/vllm/model_executor/models/qwen3_moe.py b/vllm/model_executor/models/qwen3_moe.py index 417d7b22088bf..90a28192eccbc 100644 --- a/vllm/model_executor/models/qwen3_moe.py +++ b/vllm/model_executor/models/qwen3_moe.py @@ -386,6 +386,11 @@ class Qwen3MoeModel(nn.Module): ("gate_up_proj", "up_proj", 1), ] + # Skip loading extra parameters for GPTQ/modelopt models. + ignore_suffixes = (".bias", "_bias", ".k_scale", "_k_scale", + ".v_scale", "_v_scale", ".weight_scale", + "_weight_scale", ".input_scale", "_input_scale") + # Params for weights, fp8 weight scales, fp8 activation scales # (param_name, weight_name, expert_id, shard_id) expert_params_mapping = FusedMoE.make_expert_params_mapping( @@ -410,10 +415,11 @@ class Qwen3MoeModel(nn.Module): if "mlp.experts" in name: continue name = name.replace(weight_name, param_name) - # Skip loading extra bias for GPTQ models. - if ((name.endswith(".bias") or name.endswith("_bias")) - and name not in params_dict): + + # Skip loading extra parameters for GPTQ/modelopt models. + if name.endswith(ignore_suffixes) and name not in params_dict: continue + # Skip layers on other devices. if is_pp_missing_parameter(name, self): continue @@ -433,9 +439,9 @@ class Qwen3MoeModel(nn.Module): # Skip layers on other devices. if is_pp_missing_parameter(name, self): continue - # Skip loading extra bias for GPTQ models. - if ((name.endswith(".bias") or name.endswith("_bias")) - and name not in params_dict): + # Skip loading extra parameters for GPTQ/modelopt models. + if name.endswith( + ignore_suffixes) and name not in params_dict: continue param = params_dict[name] weight_loader = param.weight_loader @@ -446,9 +452,9 @@ class Qwen3MoeModel(nn.Module): expert_id=expert_id) break else: - # Skip loading extra bias for GPTQ models. - if ((name.endswith(".bias") or name.endswith("_bias")) - and name not in params_dict): + # Skip loading extra parameters for GPTQ/modelopt models. + if name.endswith( + ignore_suffixes) and name not in params_dict: continue # Skip layers on other devices. if is_pp_missing_parameter(name, self): From e936e401debe7fba64d6462666d7dc632bc76357 Mon Sep 17 00:00:00 2001 From: Isotr0py Date: Mon, 30 Jun 2025 18:16:16 +0800 Subject: [PATCH 199/211] [Bugfix] Fix processor initialization in transformers 4.53.0 (#20244) Signed-off-by: Isotr0py <2037008807@qq.com> --- vllm/inputs/registry.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/vllm/inputs/registry.py b/vllm/inputs/registry.py index 66e78833f52af..fc6e190e54806 100644 --- a/vllm/inputs/registry.py +++ b/vllm/inputs/registry.py @@ -5,7 +5,9 @@ from dataclasses import dataclass from typing import TYPE_CHECKING, Any, NamedTuple, Optional, Union import torch +from packaging.version import Version from transformers import BatchFeature, PretrainedConfig, ProcessorMixin +from transformers import __version__ as TRANSFORMERS_VERSION from typing_extensions import TypeVar from vllm.jsontree import JSONTree, json_map_leaves @@ -128,9 +130,13 @@ class InputProcessingContext(InputContext): /, **kwargs: object, ) -> _P: + # Transformers 4.53.0 has issue with passing tokenizer to + # initialize processor. We disable it for this version. + # See: https://github.com/vllm-project/vllm/issues/20224 + if Version(TRANSFORMERS_VERSION) != Version("4.53.0"): + kwargs["tokenizer"] = self.tokenizer return super().get_hf_processor( typ, - tokenizer=self.tokenizer, **kwargs, ) From 8fe7fc863481a7d48c6f5bcc7bb40b2c7ebb5476 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Mon, 30 Jun 2025 18:22:09 +0800 Subject: [PATCH 200/211] [Quantization] Improve BitsAndBytesModelLoader (#20242) Signed-off-by: Jee Jee Li --- .../model_loader/bitsandbytes_loader.py | 123 ++++++++++-------- 1 file changed, 72 insertions(+), 51 deletions(-) diff --git a/vllm/model_executor/model_loader/bitsandbytes_loader.py b/vllm/model_executor/model_loader/bitsandbytes_loader.py index 09857ef297f0a..0c46d170e88d5 100644 --- a/vllm/model_executor/model_loader/bitsandbytes_loader.py +++ b/vllm/model_executor/model_loader/bitsandbytes_loader.py @@ -20,8 +20,6 @@ from vllm.distributed import (get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size) # yapf: enable from vllm.logger import init_logger -# yapf conflicts with isort for this block -# yapf: disable from vllm.model_executor.layers.linear import (LinearBase, MergedColumnParallelLinear, QKVParallelLinear, @@ -39,6 +37,8 @@ from vllm.model_executor.utils import (get_packed_modules_mapping, set_weight_attrs) from vllm.platforms import current_platform +# yapf conflicts with isort for this block + logger = init_logger(__name__) @@ -54,11 +54,17 @@ class BitsAndBytesModelLoader(BaseModelLoader): self.unsharded_weights_modules: list[str] = [] # Save the module names that are sharded by column. self.column_sharded_weights_modules: list[str] = [] + # Modules whose weights might have fused on disk + # we need their output_sizes to make shard in flight correctly with TP + self.maybe_fused_weights_modules: dict[str, list[int]] = {} # Store all module names (from transformers) that support # BNB quantization. self.target_modules: list[str] = [] # mapping weight names from transformers to vllm. self.weight_mapper: Callable = lambda name: name + self.pre_quant: bool = False + self.load_8bit: bool = False + self.is_pool_model: bool = False def _get_weight_files( self, @@ -134,13 +140,14 @@ class BitsAndBytesModelLoader(BaseModelLoader): return hf_weights_files, use_safetensors def _hf_weight_iter(self, hf_weights_files, use_safetensors: bool): - def _maybe_pool_model(module_name:str): + + def _maybe_pool_model(module_name: str): # For pool model, we need to add the prefix `model.` # for the weight name if possible. if self.is_pool_model and self.target_modules[0]. \ startswith("model.") and not module_name.startswith( "model."): - return "model."+module_name + return "model." + module_name return module_name @@ -159,8 +166,7 @@ class BitsAndBytesModelLoader(BaseModelLoader): # mapping weight names from transformers to vllm while preserving # original names. mapped_name = self.weight_mapper(org_name) - mapped_name=_maybe_pool_model(mapped_name) - + mapped_name = _maybe_pool_model(mapped_name) yield org_name, mapped_name, param @@ -168,8 +174,6 @@ class BitsAndBytesModelLoader(BaseModelLoader): self, model_name_or_path: str, revision: Optional[str], - pre_quant: bool, - load_8bit: bool, ) -> tuple[Generator[tuple[str, torch.Tensor], None, None], dict[str, Any]]: """Get an iterator to the model weights with bitsandbytes quantization, @@ -192,8 +196,8 @@ class BitsAndBytesModelLoader(BaseModelLoader): quant_state_dict: dict[str, Any] = {} - if pre_quant: - if load_8bit: + if self.pre_quant: + if self.load_8bit: return self._quantized_8bit_generator( hf_weights_files, use_safetensors, quant_state_dict), quant_state_dict @@ -390,10 +394,13 @@ class BitsAndBytesModelLoader(BaseModelLoader): yield org_weight_name, processed_weight def _get_bnb_target_modules(self, model: nn.Module) -> None: - + """ + Identify and collect all modules that support BitsAndBytes + quantization. + """ for name, module in model.named_modules(): - if (isinstance(module, LinearBase) and - hasattr(module.quant_method, "quant_config")): + if (isinstance(module, LinearBase) + and hasattr(module.quant_method, "quant_config")): if modules_info := self.modules_mapping.get_sub_modules(name): # Map vllm's names to transformers's names. rep_name, sub_modules = modules_info @@ -409,29 +416,11 @@ class BitsAndBytesModelLoader(BaseModelLoader): ), "vllm currently does not support BNB quantization for" f" {type(model).__name__}" - def load_weights(self, model: nn.Module, model_config: ModelConfig) -> None: - if not hasattr(model, "load_weights"): - raise AttributeError( - "The required method 'load_weights' is not defined in class" - f" {type(model).__name__}.") - - if not hasattr(model, "packed_modules_mapping"): - raise AttributeError( - f"Model {type(model).__name__} does not support BitsAndBytes " - "quantization yet. No 'packed_modules_mapping' found.") - self.is_pool_model=is_pooling_model(model) - - self.modules_mapping = ParamMapping(get_packed_modules_mapping(model)) - - # For some models like Molmo, we need to use hf_to_vllm_mapper - # to ensure correct loading of weights. - if hf_to_vllm_mapper := getattr(model, "hf_to_vllm_mapper", None): - self.weight_mapper = lambda name: hf_to_vllm_mapper._map_name(name) - - # Modules whose weights might have fused on disk - # we need their output_sizes to make shard in flight correctly with TP - self.maybe_fused_weights_modules: dict[str, list[int]] = {} - self._get_bnb_target_modules(model) + def _classify_module_sharding(self, model: nn.Module): + """ + Categorize modules based on their weight sharding requirements + for tensor parallelism. + """ for name, module in model.named_modules(): # Some modules like `ReplicatedLinear` should not have their weights # sharded. The reason for implementing it this way is to avoid new @@ -449,19 +438,27 @@ class BitsAndBytesModelLoader(BaseModelLoader): elif isinstance(module, (RowParallelLinear, )): self.column_sharded_weights_modules.append(name) - self.model_type = type(model).__name__ + def _verify_model_compatibility(self, model: nn.Module, + model_config: ModelConfig) -> None: + """ + Verify that the model is compatible with BitsAndBytes quantization. + """ + if not hasattr(model, "load_weights"): + raise AttributeError( + "The required method 'load_weights' is not defined in class" + f" {type(model).__name__}.") - logger.info("Loading weights with BitsAndBytes quantization. " - "May take a while ...") + if not hasattr(model, "packed_modules_mapping"): + raise AttributeError( + f"Model {type(model).__name__} does not support BitsAndBytes " + "quantization yet. No 'packed_modules_mapping' found.") quant_config = getattr(model_config.hf_config, "quantization_config", None) - - pre_quant = False if quant_config is not None: quant_method = quant_config.get("quant_method") if quant_method == "bitsandbytes": - pre_quant = True + self.pre_quant = True else: raise ValueError( f"BitsAndBytes loader does not support {quant_method} " @@ -469,20 +466,43 @@ class BitsAndBytesModelLoader(BaseModelLoader): # The quant_states in pre_quantized models cannot work with a split # weight tensor. So TP does not work with pre_quantized bnb models. - if pre_quant and get_tensor_model_parallel_world_size() > 1: + if self.pre_quant and get_tensor_model_parallel_world_size() > 1: raise ValueError( "Prequant BitsAndBytes models with tensor parallelism is not " "supported. Please try with pipeline parallelism.") + if self.pre_quant: + self.load_8bit = quant_config.get("load_in_8bit", False) - load_8bit = False - if pre_quant: - load_8bit = quant_config.get("load_in_8bit", False) + def _initialize_loader_state(self, model: nn.Module, + model_config: ModelConfig) -> None: + """ + Initialize the loader's internal state based on the model and + configuration. + """ + self.is_pool_model = is_pooling_model(model) + self.modules_mapping = ParamMapping(get_packed_modules_mapping(model)) + # For some models like Molmo, we need to use hf_to_vllm_mapper + # to ensure correct loading of weights. + if hf_to_vllm_mapper := getattr(model, "hf_to_vllm_mapper", None): + self.weight_mapper = lambda name: hf_to_vllm_mapper._map_name(name) + + self._get_bnb_target_modules(model) + self._classify_module_sharding(model) + + def load_weights(self, model: nn.Module, + model_config: ModelConfig) -> None: + + self._verify_model_compatibility(model, model_config) + self._initialize_loader_state(model, model_config) + + logger.info("Loading weights with BitsAndBytes quantization. " + "May take a while ...") qweight_iterator, quant_state_dict = ( - self._get_quantized_weights_iterator(model_config.model, - model_config.revision, - pre_quant, load_8bit)) - + self._get_quantized_weights_iterator( + model_config.model, + model_config.revision, + )) weights_to_load = {name for name, _ in model.named_parameters()} loaded_weights = model.load_weights(qweight_iterator) # Some models may have weights loading tracker unimplemented. @@ -562,10 +582,11 @@ class BitsAndBytesModelLoader(BaseModelLoader): offsets = torch.tensor(offsets).cpu() set_weight_attrs(param, {"bnb_shard_offsets": offsets}) - if load_8bit: + if self.load_8bit: set_weight_attrs( param, {"matmul_state": [None] * len(quant_states)}) torch.cuda.empty_cache() + def download_model(self, model_config: ModelConfig) -> None: self._prepare_weights(model_config.model, model_config.revision) From 3ee56e26be4cfddc17f7d2e5f38f15ab74ede1c2 Mon Sep 17 00:00:00 2001 From: Michael Yao Date: Mon, 30 Jun 2025 19:20:51 +0800 Subject: [PATCH 201/211] [Docs] Fix 1-2-3 list in v1/prefix_caching.md (#20243) Signed-off-by: windsonsea --- docs/design/v1/prefix_caching.md | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/design/v1/prefix_caching.md b/docs/design/v1/prefix_caching.md index e87e4c6a48b73..2d3c8412894a6 100644 --- a/docs/design/v1/prefix_caching.md +++ b/docs/design/v1/prefix_caching.md @@ -117,8 +117,8 @@ There are two design points to highlight: 1. We allocate all KVCacheBlock when initializing the KV cache manager to be a block pool. This avoids Python object creation overheads and can easily track all blocks all the time. 2. We introduce doubly linked list pointers directly in the KVCacheBlock, so that we could construct a free queue directly. This gives us two benefits: - 1. We could have O(1) complexity moving elements in the middle to the tail. - 2. We could avoid introducing another Python queue (e.g., `deque`) which has a wrapper to the elements. + 1. We could have O(1) complexity moving elements in the middle to the tail. + 2. We could avoid introducing another Python queue (e.g., `deque`) which has a wrapper to the elements. As a result, we will have the following components when the KV cache manager is initialized: @@ -135,19 +135,19 @@ As a result, we will have the following components when the KV cache manager is **New request:** Workflow for the scheduler to schedule a new request with KV cache block allocation: -1. The scheduler calls `kv_cache_manager.get_computed_blocks()` to get a sequence of blocks that have already been computed. This is done by hashing the prompt tokens in the request and looking up Cache Blocks. +1. The scheduler calls `kv_cache_manager.get_computed_blocks()` to get a sequence of blocks that have already been computed. This is done by hashing the prompt tokens in the request and looking up cache blocks. 2. The scheduler calls `kv_cache_manager.allocate_slots()`. It does the following steps: - 1. Compute the number of new required blocks, and return if there are no sufficient blocks to allocate. - 2. “Touch” the computed blocks. It increases the reference count of the computed block by one, and removes the block from the free queue if the block wasn’t used by other requests. This is to avoid these computed blocks being evicted. See the example in the next section for illustration. - 3. Allocate new blocks by popping the heads of the free queue. If the head block is a cached block, this also “evicts” the block so that no other requests can reuse it anymore from now on. - 4. If an allocated block is already full of tokens, we immediately add it to the Cache Block, so that the block can be reused by other requests in the same batch. + 1. Compute the number of new required blocks, and return if there are no sufficient blocks to allocate. + 2. “Touch” the computed blocks. It increases the reference count of the computed block by one, and removes the block from the free queue if the block wasn’t used by other requests. This is to avoid these computed blocks being evicted. See the example in the next section for illustration. + 3. Allocate new blocks by popping the heads of the free queue. If the head block is a cached block, this also “evicts” the block so that no other requests can reuse it anymore from now on. + 4. If an allocated block is already full of tokens, we immediately add it to the cache block, so that the block can be reused by other requests in the same batch. **Running request:** Workflow for the scheduler to schedule a running request with KV cache block allocation: 1. The scheduler calls `kv_cache_manager.allocate_slots()`. It does the following steps: - 1. Compute the number of new required blocks, and return if there are no sufficient blocks to allocate. - 2. Allocate new blocks by popping the heads of the free queue. If the head block is a cached block, this also “evicts” the block so that no other requests can reuse it anymore from now on. - 3. Append token IDs to the slots in existing blocks as well as the new blocks. If a block is full, we add it to the Cache Block to cache it. + 1. Compute the number of new required blocks, and return if there are no sufficient blocks to allocate. + 2. Allocate new blocks by popping the heads of the free queue. If the head block is a cached block, this also “evicts” the block so that no other requests can reuse it anymore from now on. + 3. Append token IDs to the slots in existing blocks as well as the new blocks. If a block is full, we add it to the cache block to cache it. **Duplicated blocks** Assuming block size is 4 and you send a request (Request 1\) with prompt ABCDEF and decoding length 3: @@ -199,7 +199,7 @@ When a request is finished, we free all its blocks if no other requests are usin When the head block (least recently used block) of the free queue is cached, we have to evict the block to prevent it from being used by other requests. Specifically, eviction involves the following steps: 1. Pop the block from the head of the free queue. This is the LRU block to be evicted. -2. Remove the block ID from the Cache Block. +2. Remove the block ID from the cache block. 3. Remove the block hash. ## Example From 1c50e100a9c5dc439aceb9c4437b262d564baa53 Mon Sep 17 00:00:00 2001 From: li haoyang Date: Mon, 30 Jun 2025 21:24:50 +0800 Subject: [PATCH 202/211] [Bugfix] fix quark ptpc (#20251) Signed-off-by: Haoyang Li Co-authored-by: Haoyang Li <307790822@qq.com> --- .../layers/quantization/quark/quark.py | 6 +--- .../quark/schemes/quark_w8a8_fp8.py | 33 ++++++++++++------- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/vllm/model_executor/layers/quantization/quark/quark.py b/vllm/model_executor/layers/quantization/quark/quark.py index 6ae5f5c9ad46b..05dff4bae3957 100644 --- a/vllm/model_executor/layers/quantization/quark/quark.py +++ b/vllm/model_executor/layers/quantization/quark/quark.py @@ -312,11 +312,7 @@ class QuarkConfig(QuantizationConfig): is_fp8_w8a8_supported = self._check_scheme_supported( QuarkW8A8Fp8.get_min_capability(), error=False) if is_fp8_w8a8_supported: - weight_qscheme = cast(str, weight_config.get("qscheme")) - input_static = (input_config is not None and - not cast(bool, input_config.get("is_dynamic"))) - return QuarkW8A8Fp8(qscheme=weight_qscheme, - is_static_input_scheme=input_static) + return QuarkW8A8Fp8(weight_config, input_config) elif self._is_static_tensor_w8a8(weight_config, input_config): weight_qscheme = cast(str, weight_config.get("qscheme")) return QuarkW8A8Int8(qscheme=weight_qscheme, diff --git a/vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py b/vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py index 47e0a492b23b9..c7bc98184d0eb 100644 --- a/vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py +++ b/vllm/model_executor/layers/quantization/quark/schemes/quark_w8a8_fp8.py @@ -1,7 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -from typing import Callable, Optional +from typing import Any, Callable, Optional, cast import torch from torch.nn import Parameter @@ -19,10 +19,19 @@ __all__ = ["QuarkW8A8Fp8"] class QuarkW8A8Fp8(QuarkScheme): - def __init__(self, qscheme: str, is_static_input_scheme: Optional[bool]): - self.qscheme = qscheme - self.is_static_input_scheme = is_static_input_scheme - self.fp8_linear = Fp8LinearOp(use_per_token_if_dynamic=False) + def __init__(self, weight_config: dict[str, Any], + input_config: Optional[dict[str, Any]]): + self.weight_qscheme = cast(str, weight_config.get("qscheme")) + self.is_static_input_scheme: bool = False + self.input_qscheme: Optional[str] = None + if input_config is not None: + self.is_static_input_scheme = not cast( + bool, input_config.get("is_dynamic")) + self.input_qscheme = cast(str, input_config.get("qscheme")) + self.use_per_token_if_dynamic = (not self.is_static_input_scheme \ + and self.input_qscheme == "per_channel") + self.fp8_linear = Fp8LinearOp( + use_per_token_if_dynamic=self.use_per_token_if_dynamic) self.out_dtype = torch.get_default_dtype() @classmethod @@ -34,7 +43,7 @@ class QuarkW8A8Fp8(QuarkScheme): # If per tensor, when we have a fused module (e.g. QKV) with per # tensor scales (thus N scales being passed to the kernel), # requantize so we can always run per tensor - if self.qscheme == "per_tensor": + if self.weight_qscheme == "per_tensor": if current_platform.is_rocm(): input_scale = getattr(layer, 'input_scale', None) weight, max_w_scale, input_scale = normalize_e4m3fn_to_e4m3fnuz( @@ -58,7 +67,7 @@ class QuarkW8A8Fp8(QuarkScheme): layer.weight_scale = Parameter(max_w_scale, requires_grad=False) # If channelwise, scales are already lined up, so just transpose. - elif self.qscheme == "per_channel": + elif self.weight_qscheme == "per_channel": weight = layer.weight if current_platform.is_fp8_fnuz(): @@ -73,13 +82,15 @@ class QuarkW8A8Fp8(QuarkScheme): requires_grad=False) else: weight_scale = layer.weight_scale.data - + if self.use_per_token_if_dynamic: + weight_scale = weight_scale.view(-1, 1) layer.weight = Parameter(weight.t(), requires_grad=False) # required by torch.compile to be torch.nn.Parameter layer.weight_scale = Parameter(weight_scale, requires_grad=False) else: - raise ValueError(f"Unknown quantization scheme {self.qscheme}") + raise ValueError( + f"Unknown quantization scheme {self.weight_qscheme}") # INPUT SCALE if self.is_static_input_scheme: @@ -109,14 +120,14 @@ class QuarkW8A8Fp8(QuarkScheme): # WEIGHT SCALE # TODO: update create_xxx_parameter functions to return # the newly added parameters - if self.qscheme == "per_channel": + if self.weight_qscheme == "per_channel": weight_scale = ChannelQuantScaleParameter( data=torch.empty((sum(output_partition_sizes)), dtype=torch.float32), output_dim=0, weight_loader=weight_loader) else: - assert self.qscheme == "per_tensor" + assert self.weight_qscheme == "per_tensor" weight_scale = PerTensorScaleParameter(data=torch.empty( len(output_partition_sizes), dtype=torch.float32), weight_loader=weight_loader) From 2062c0723d38a8f4a8a7565b61a99e8c81b5cacd Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Mon, 30 Jun 2025 08:13:50 -0700 Subject: [PATCH 203/211] [Spec Decode] Refactor spec decoding into a separate function (#20238) Signed-off-by: Woosuk Kwon --- vllm/v1/worker/gpu_model_runner.py | 93 +++++++++++++++++++----------- 1 file changed, 60 insertions(+), 33 deletions(-) diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index 290b9a44a80e2..e063e44dabfa1 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -1388,6 +1388,8 @@ class GPUModelRunner(LoRAModelRunnerMixin): hidden_states, aux_hidden_states = model_output else: hidden_states = model_output + aux_hidden_states = None + # Broadcast PP output for external_launcher (torchrun) # to make sure we are synced across pp ranks # TODO: Support overlapping mirco-batches @@ -1510,25 +1512,67 @@ class GPUModelRunner(LoRAModelRunnerMixin): if not self.speculative_config: # Speculative decoding is not enabled. spec_token_ids = None - elif self.speculative_config.method == "ngram": + else: + spec_token_ids = self.propose_draft_token_ids( + scheduler_output, + valid_sampled_token_ids, + sampling_metadata, + hidden_states, + sample_hidden_states, + aux_hidden_states, + spec_decode_metadata, + attn_metadata, + ) + + # Clear KVConnector state after all KVs are generated. + if has_kv_transfer_group(): + get_kv_transfer_group().clear_connector_metadata() + + self.eplb_step() + + return ModelRunnerOutput( + req_ids=self.input_batch.req_ids, + req_id_to_index=self.input_batch.req_id_to_index, + sampled_token_ids=valid_sampled_token_ids, + spec_token_ids=spec_token_ids, + logprobs=logprobs_lists, + prompt_logprobs_dict=prompt_logprobs_dict, + pooler_output=[], + finished_sending=finished_sending, + finished_recving=finished_recving, + num_nans_in_logits=num_nans_in_logits, + ) + + def propose_draft_token_ids( + self, + scheduler_output: "SchedulerOutput", + sampled_token_ids: list[list[int]], + sampling_metadata: SamplingMetadata, + hidden_states: torch.Tensor, + sample_hidden_states: torch.Tensor, + aux_hidden_states: Optional[torch.Tensor], + spec_decode_metadata: Optional[SpecDecodeMetadata], + attn_metadata: dict[str, Any], + ) -> list[list[int]]: + num_scheduled_tokens = scheduler_output.total_num_scheduled_tokens + if self.speculative_config.method == "ngram": assert isinstance(self.drafter, NgramProposer) - spec_token_ids = self.generate_draft_token_ids( - valid_sampled_token_ids, sampling_metadata) + spec_token_ids = self.propose_ngram_draft_token_ids( + sampled_token_ids) elif self.speculative_config.method == "medusa": assert isinstance(self.drafter, MedusaProposer) - if max_gen_len == 1: + if sample_hidden_states.shape[0] == len(sampled_token_ids): + # The input to the target model does not include draft tokens. hidden_states = sample_hidden_states else: indices = [] offset = 0 for num_draft, tokens in zip( spec_decode_metadata.num_draft_tokens, - valid_sampled_token_ids): + sampled_token_ids): indices.append(offset + len(tokens) - 1) offset += num_draft + 1 - - indices = torch.tensor(indices, - device=sample_hidden_states.device) + indices = torch.tensor(indices, device=self.device) hidden_states = sample_hidden_states[indices] spec_token_ids = self.drafter.propose( @@ -1539,7 +1583,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): assert isinstance(self.drafter, EagleProposer) # TODO(woosuk): Refactor the loop. next_token_ids: list[int] = [] - for i, token_ids in enumerate(valid_sampled_token_ids): + for i, token_ids in enumerate(sampled_token_ids): if token_ids: # Common case. next_token_id = token_ids[-1] @@ -1569,7 +1613,8 @@ class GPUModelRunner(LoRAModelRunnerMixin): if spec_decode_metadata is None: # input_ids can be None for multimodal models. target_token_ids = self.input_ids[:num_scheduled_tokens] - target_positions = positions[:num_scheduled_tokens] + # TODO(woosuk): Support M-RoPE. + target_positions = self.positions[:num_scheduled_tokens] if self.use_aux_hidden_state_outputs: target_hidden_states = torch.cat( [h[:num_scheduled_tokens] for h in aux_hidden_states], @@ -1582,7 +1627,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): # TODO(woosuk): Refactor this. num_draft_tokens = spec_decode_metadata.num_draft_tokens num_rejected_tokens = [ - n + 1 - len(valid_sampled_token_ids[i]) if n > 0 else 0 + n + 1 - len(sampled_token_ids[i]) if n > 0 else 0 for i, n in enumerate(num_draft_tokens) ] num_rejected_tokens_tensor = async_tensor_h2d( @@ -1597,7 +1642,8 @@ class GPUModelRunner(LoRAModelRunnerMixin): num_tokens, ) target_token_ids = self.input_ids[token_indices] - target_positions = positions[token_indices] + # TODO(woosuk): Support M-RoPE. + target_positions = self.positions[token_indices] if self.use_aux_hidden_state_outputs: target_hidden_states = torch.cat( [h[token_indices] for h in aux_hidden_states], dim=-1) @@ -1616,25 +1662,7 @@ class GPUModelRunner(LoRAModelRunnerMixin): sampling_metadata=sampling_metadata, ) spec_token_ids = draft_token_ids.tolist() - - # Clear KVConnector state after all KVs are generated. - if has_kv_transfer_group(): - get_kv_transfer_group().clear_connector_metadata() - - self.eplb_step() - - return ModelRunnerOutput( - req_ids=self.input_batch.req_ids, - req_id_to_index=self.input_batch.req_id_to_index, - sampled_token_ids=valid_sampled_token_ids, - spec_token_ids=spec_token_ids, - logprobs=logprobs_lists, - prompt_logprobs_dict=prompt_logprobs_dict, - pooler_output=[], - finished_sending=finished_sending, - finished_recving=finished_recving, - num_nans_in_logits=num_nans_in_logits, - ) + return spec_token_ids def kv_connector_no_forward( self, scheduler_output: "SchedulerOutput") -> ModelRunnerOutput: @@ -1682,10 +1710,9 @@ class GPUModelRunner(LoRAModelRunnerMixin): scheduler_output.finished_req_ids) return None, None - def generate_draft_token_ids( + def propose_ngram_draft_token_ids( self, sampled_token_ids: list[list[int]], - sampling_metadata: SamplingMetadata, ) -> list[list[int]]: # TODO(woosuk): Optimize. draft_token_ids: list[list[int]] = [] From 2965c99c86b460ee819e4805764d769c7b7d3d8e Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Mon, 30 Jun 2025 08:28:13 -0700 Subject: [PATCH 204/211] [Spec Decode] Clean up spec decode example (#20240) Signed-off-by: Woosuk Kwon --- examples/offline_inference/eagle.py | 144 ---------------------- examples/offline_inference/spec_decode.py | 40 +++--- 2 files changed, 21 insertions(+), 163 deletions(-) delete mode 100644 examples/offline_inference/eagle.py diff --git a/examples/offline_inference/eagle.py b/examples/offline_inference/eagle.py deleted file mode 100644 index f4193fdb8bd38..0000000000000 --- a/examples/offline_inference/eagle.py +++ /dev/null @@ -1,144 +0,0 @@ -# SPDX-License-Identifier: Apache-2.0 -# SPDX-FileCopyrightText: Copyright contributors to the vLLM project -import argparse -import json -import os - -from transformers import AutoTokenizer - -from vllm import LLM, SamplingParams -from vllm.v1.metrics.reader import Counter, Vector - - -def load_prompts(dataset_path, num_prompts): - if os.path.exists(dataset_path): - prompts = [] - try: - with open(dataset_path) as f: - for line in f: - data = json.loads(line) - prompts.append(data["turns"][0]) - except Exception as e: - print(f"Error reading dataset: {e}") - return [] - else: - prompts = ["The future of AI is", "The president of the United States is"] - - return prompts[:num_prompts] - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument( - "--dataset", - type=str, - default="./examples/data/gsm8k.jsonl", - help="downloaded from the eagle repo " - "https://github.com/SafeAILab/EAGLE/blob/main/eagle/data/", - ) - parser.add_argument( - "--method", type=str, default="eagle", choices=["eagle", "eagle3"] - ) - parser.add_argument("--max_num_seqs", type=int, default=8) - parser.add_argument("--num_prompts", type=int, default=80) - parser.add_argument("--num_spec_tokens", type=int, default=2) - parser.add_argument("--tp", type=int, default=1) - parser.add_argument("--draft_tp", type=int, default=1) - parser.add_argument("--enforce_eager", action="store_true") - parser.add_argument("--enable_chunked_prefill", action="store_true") - parser.add_argument("--max_num_batched_tokens", type=int, default=2048) - parser.add_argument("--temp", type=float, default=0) - return parser.parse_args() - - -def main(): - args = parse_args() - - model_dir = "meta-llama/Llama-3.1-8B-Instruct" - - if args.method == "eagle": - eagle_dir = "yuhuili/EAGLE-LLaMA3.1-Instruct-8B" - elif args.method == "eagle3": - eagle_dir = "yuhuili/EAGLE3-LLaMA3.1-Instruct-8B" - else: - raise ValueError(f"unknown method: {args.method}") - - max_model_len = 2048 - - tokenizer = AutoTokenizer.from_pretrained(model_dir) - - prompts = load_prompts(args.dataset, args.num_prompts) - - prompt_ids = [ - tokenizer.apply_chat_template( - [{"role": "user", "content": prompt}], add_generation_prompt=True - ) - for prompt in prompts - ] - - llm = LLM( - model=model_dir, - trust_remote_code=True, - tensor_parallel_size=args.tp, - enable_chunked_prefill=args.enable_chunked_prefill, - max_num_batched_tokens=args.max_num_batched_tokens, - enforce_eager=args.enforce_eager, - max_model_len=max_model_len, - max_num_seqs=args.max_num_seqs, - gpu_memory_utilization=0.8, - speculative_config={ - "method": args.method, - "model": eagle_dir, - "num_speculative_tokens": args.num_spec_tokens, - "draft_tensor_parallel_size": args.draft_tp, - "max_model_len": max_model_len, - }, - disable_log_stats=False, - ) - - sampling_params = SamplingParams(temperature=args.temp, max_tokens=256) - - outputs = llm.generate(prompt_token_ids=prompt_ids, sampling_params=sampling_params) - - # print the generated text - for output in outputs: - print("-" * 50) - print(f"prompt: {output.prompt}") - print(f"generated text: {output.outputs[0].text}") - print("-" * 50) - - try: - metrics = llm.get_metrics() - except AssertionError: - print("Metrics are not supported in the V0 engine.") - return - - num_drafts = num_accepted = 0 - acceptance_counts = [0] * args.num_spec_tokens - for metric in metrics: - if metric.name == "vllm:spec_decode_num_drafts": - assert isinstance(metric, Counter) - num_drafts += metric.value - elif metric.name == "vllm:spec_decode_num_accepted_tokens": - assert isinstance(metric, Counter) - num_accepted += metric.value - elif metric.name == "vllm:spec_decode_num_accepted_tokens_per_pos": - assert isinstance(metric, Vector) - for pos in range(len(metric.values)): - acceptance_counts[pos] += metric.values[pos] - - print("-" * 50) - print(f"mean acceptance length: {1 + (num_accepted / num_drafts):.2f}") - print("-" * 50) - - # print acceptance at each token position - for i in range(len(acceptance_counts)): - print(f"acceptance at token {i}:{acceptance_counts[i] / num_drafts:.2f}") - - -if __name__ == "__main__": - print( - "[WARNING] Use examples/offline_inference/spec_decode.py" - " instead of this script." - ) - main() diff --git a/examples/offline_inference/spec_decode.py b/examples/offline_inference/spec_decode.py index 6fa68d2ecee1d..90d103e5cb05d 100644 --- a/examples/offline_inference/spec_decode.py +++ b/examples/offline_inference/spec_decode.py @@ -16,24 +16,17 @@ def parse_args(): parser = FlexibleArgumentParser() add_dataset_parser(parser) parser.add_argument( - "--dataset", + "--method", type=str, - default="./examples/data/gsm8k.jsonl", - help="downloaded from the eagle repo " - "https://github.com/SafeAILab/EAGLE/blob/main/eagle/data/", + default="eagle", + choices=["ngram", "eagle", "eagle3", "mtp"], ) - parser.add_argument( - "--method", type=str, default="eagle", choices=["ngram", "eagle", "eagle3"] - ) - parser.add_argument("--max-num-seqs", type=int, default=8) parser.add_argument("--num-spec-tokens", type=int, default=2) parser.add_argument("--prompt-lookup-max", type=int, default=5) parser.add_argument("--prompt-lookup-min", type=int, default=2) parser.add_argument("--tp", type=int, default=1) - parser.add_argument("--draft-tp", type=int, default=1) parser.add_argument("--enforce-eager", action="store_true") parser.add_argument("--enable-chunked-prefill", action="store_true") - parser.add_argument("--max-num-batched-tokens", type=int, default=2048) parser.add_argument("--temp", type=float, default=0) parser.add_argument("--top-p", type=float, default=1.0) parser.add_argument("--top-k", type=int, default=-1) @@ -41,7 +34,6 @@ def parse_args(): parser.add_argument("--output-len", type=int, default=256) parser.add_argument("--model-dir", type=str, default=None) parser.add_argument("--eagle-dir", type=str, default=None) - parser.add_argument("--max-model-len", type=int, default=2048) return parser.parse_args() @@ -71,8 +63,6 @@ def main(): "method": args.method, "model": eagle_dir, "num_speculative_tokens": args.num_spec_tokens, - "draft_tensor_parallel_size": args.draft_tp, - "max_model_len": args.max_model_len, } elif args.method == "ngram": speculative_config = { @@ -80,7 +70,6 @@ def main(): "num_speculative_tokens": args.num_spec_tokens, "prompt_lookup_max": args.prompt_lookup_max, "prompt_lookup_min": args.prompt_lookup_min, - "max_model_len": args.max_model_len, } else: raise ValueError(f"unknown method: {args.method}") @@ -92,7 +81,6 @@ def main(): enable_chunked_prefill=args.enable_chunked_prefill, max_num_batched_tokens=args.max_num_batched_tokens, enforce_eager=args.enforce_eager, - max_model_len=args.max_model_len, max_num_seqs=args.max_num_seqs, gpu_memory_utilization=0.8, speculative_config=speculative_config, @@ -116,27 +104,41 @@ def main(): print("Metrics are not supported in the V0 engine.") return - num_drafts = num_accepted = 0 + total_num_output_tokens = sum( + len(output.outputs[0].token_ids) for output in outputs + ) + num_drafts = 0 + num_draft_tokens = 0 + num_accepted_tokens = 0 acceptance_counts = [0] * args.num_spec_tokens for metric in metrics: if metric.name == "vllm:spec_decode_num_drafts": assert isinstance(metric, Counter) num_drafts += metric.value + elif metric.name == "vllm:spec_decode_num_draft_tokens": + assert isinstance(metric, Counter) + num_draft_tokens += metric.value elif metric.name == "vllm:spec_decode_num_accepted_tokens": assert isinstance(metric, Counter) - num_accepted += metric.value + num_accepted_tokens += metric.value elif metric.name == "vllm:spec_decode_num_accepted_tokens_per_pos": assert isinstance(metric, Vector) for pos in range(len(metric.values)): acceptance_counts[pos] += metric.values[pos] print("-" * 50) - print(f"mean acceptance length: {1 + (num_accepted / num_drafts):.2f}") + print(f"total_num_output_tokens: {total_num_output_tokens}") + print(f"num_drafts: {num_drafts}") + print(f"num_draft_tokens: {num_draft_tokens}") + print(f"num_accepted_tokens: {num_accepted_tokens}") + acceptance_length = 1 + (num_accepted_tokens / num_drafts) if num_drafts > 0 else 1 + print(f"mean acceptance length: {acceptance_length:.2f}") print("-" * 50) # print acceptance at each token position for i in range(len(acceptance_counts)): - print(f"acceptance at token {i}:{acceptance_counts[i] / num_drafts:.2f}") + acceptance_rate = acceptance_counts[i] / num_drafts if num_drafts > 0 else 0 + print(f"acceptance at token {i}: {acceptance_rate:.2f}") if __name__ == "__main__": From 2863befce359ee1a82afe02d1953252866aa3e96 Mon Sep 17 00:00:00 2001 From: Woosuk Kwon Date: Mon, 30 Jun 2025 09:07:50 -0700 Subject: [PATCH 205/211] [Optimization] Use Shared `CachedRequestData` Instance Across All Requests (#20232) Signed-off-by: Woosuk Kwon --- tests/v1/core/test_scheduler.py | 130 +++++++++--------- .../unit/test_remote_decode_lifecycle.py | 4 +- .../unit/test_remote_prefill_lifecycle.py | 12 +- tests/v1/kv_connector/unit/utils.py | 1 - tests/v1/tpu/worker/test_tpu_model_runner.py | 22 +-- tests/v1/worker/test_gpu_model_runner.py | 22 +-- .../kv_connector/v1/p2p/p2p_nccl_connector.py | 43 +++--- .../v1/shared_storage_connector.py | 19 ++- vllm/v1/core/sched/output.py | 34 +++-- vllm/v1/core/sched/scheduler.py | 106 ++++++-------- vllm/v1/worker/gpu_model_runner.py | 34 ++--- vllm/v1/worker/tpu_model_runner.py | 24 ++-- 12 files changed, 220 insertions(+), 231 deletions(-) diff --git a/tests/v1/core/test_scheduler.py b/tests/v1/core/test_scheduler.py index 8994816a3017c..652a556659fe3 100644 --- a/tests/v1/core/test_scheduler.py +++ b/tests/v1/core/test_scheduler.py @@ -10,7 +10,7 @@ from vllm.config import (CacheConfig, KVTransferConfig, ModelConfig, SchedulerConfig, SpeculativeConfig, VllmConfig) from vllm.multimodal.inputs import MultiModalKwargs, PlaceholderRange from vllm.sampling_params import SamplingParams -from vllm.v1.core.sched.output import SchedulerOutput +from vllm.v1.core.sched.output import CachedRequestData, SchedulerOutput from vllm.v1.core.sched.scheduler import Scheduler from vllm.v1.kv_cache_interface import (FullAttentionSpec, KVCacheConfig, KVCacheGroupSpec) @@ -198,7 +198,7 @@ def test_schedule(enable_prefix_caching: Optional[bool], # Test initial scheduling output = scheduler.schedule() assert len(output.scheduled_new_reqs) == len(requests) - assert len(output.scheduled_cached_reqs) == 0 + assert output.scheduled_cached_reqs.num_reqs == 0 assert len(output.finished_req_ids) == 0 # Verify all requests are scheduled. for req_id, num_tokens in output.num_scheduled_tokens.items(): @@ -225,7 +225,7 @@ def test_schedule_multimodal_requests(): output = scheduler.schedule() assert len(output.scheduled_new_reqs) == len(requests) - assert len(output.scheduled_cached_reqs) == 0 + assert output.scheduled_cached_reqs.num_reqs == 0 assert len(output.finished_req_ids) == 0 for req_id, num_tokens in output.num_scheduled_tokens.items(): assert num_tokens == len(requests[int(req_id)].prompt_token_ids) @@ -259,7 +259,7 @@ def test_schedule_partial_requests(): output = scheduler.schedule() assert len(output.scheduled_new_reqs) == 3 - assert len(output.scheduled_cached_reqs) == 0 + assert output.scheduled_cached_reqs.num_reqs == 0 assert len(output.finished_req_ids) == 0 assert scheduler.max_num_encoder_input_tokens == 1024 @@ -295,7 +295,7 @@ def test_schedule_partial_requests(): output = scheduler.schedule() assert len(scheduler.running) == 3 assert len(output.scheduled_new_reqs) == 0 - assert len(output.scheduled_cached_reqs) == 2 + assert output.scheduled_cached_reqs.num_reqs == 2 assert len(output.finished_req_ids) == 0 assert output.num_scheduled_tokens[requests[0].request_id] == 1 assert output.num_scheduled_tokens[requests[1].request_id] == 700 @@ -319,7 +319,7 @@ def test_no_mm_input_chunking(): output = scheduler.schedule() assert len(output.scheduled_new_reqs) == 1 - assert len(output.scheduled_cached_reqs) == 0 + assert output.scheduled_cached_reqs.num_reqs == 0 assert len(output.finished_req_ids) == 0 # We want to only see the 400 text tokens at the start scheduled assert output.num_scheduled_tokens[requests[0].request_id] == 400 @@ -342,7 +342,7 @@ def test_no_mm_input_chunking(): output = scheduler.schedule() assert len(scheduler.running) == 1 assert len(output.scheduled_new_reqs) == 0 - assert len(output.scheduled_cached_reqs) == 1 + assert output.scheduled_cached_reqs.num_reqs == 1 assert len(output.finished_req_ids) == 0 assert output.num_scheduled_tokens[requests[0].request_id] == 800 @@ -379,7 +379,7 @@ def test_schedule_concurrent_partial_requests(enable_prefix_caching: bool): output = scheduler.schedule() assert len(output.scheduled_new_reqs) == 3 - assert len(output.scheduled_cached_reqs) == 0 + assert output.scheduled_cached_reqs.num_reqs == 0 assert len(output.finished_req_ids) == 0 # The first request is scheduled partially - 400. @@ -408,7 +408,7 @@ def test_schedule_concurrent_partial_requests(enable_prefix_caching: bool): output1 = scheduler.schedule() assert len(scheduler.running) == 3 assert len(output1.scheduled_new_reqs) == 0 - assert len(output1.scheduled_cached_reqs) == 3 + assert output1.scheduled_cached_reqs.num_reqs == 3 assert len(output1.finished_req_ids) == 0 assert output1.num_scheduled_tokens[requests[0].request_id] == 400 assert output1.num_scheduled_tokens[requests[1].request_id] == 400 @@ -430,7 +430,7 @@ def test_schedule_concurrent_partial_requests(enable_prefix_caching: bool): output2 = scheduler.schedule() assert len(scheduler.running) == 3 assert len(output2.scheduled_new_reqs) == 0 - assert len(output2.scheduled_cached_reqs) == 3 + assert output2.scheduled_cached_reqs.num_reqs == 3 assert len(output2.finished_req_ids) == 0 assert output2.num_scheduled_tokens[requests[0].request_id] == 1 assert output2.num_scheduled_tokens[requests[1].request_id] == 1 @@ -449,23 +449,24 @@ def test_stop_via_update_from_output(): scheduler.requests[req.request_id] = req scheduler.running.append(req) - scheduler_output = SchedulerOutput(scheduled_new_reqs=[], - scheduled_cached_reqs=[], - num_scheduled_tokens={ - requests[0].request_id: 1, - requests[1].request_id: 2 - }, - total_num_scheduled_tokens=3, - scheduled_encoder_inputs={}, - scheduled_spec_decode_tokens={ - requests[0].request_id: [], - requests[1].request_id: [10] - }, - num_common_prefix_blocks=0, - finished_req_ids=set(), - free_encoder_input_ids=[], - structured_output_request_ids={}, - grammar_bitmask=None) + scheduler_output = SchedulerOutput( + scheduled_new_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), + num_scheduled_tokens={ + requests[0].request_id: 1, + requests[1].request_id: 2 + }, + total_num_scheduled_tokens=3, + scheduled_encoder_inputs={}, + scheduled_spec_decode_tokens={ + requests[0].request_id: [], + requests[1].request_id: [10] + }, + num_common_prefix_blocks=0, + finished_req_ids=set(), + free_encoder_input_ids=[], + structured_output_request_ids={}, + grammar_bitmask=None) model_output = ModelRunnerOutput( req_ids=[req.request_id for req in requests], @@ -501,23 +502,25 @@ def test_stop_via_update_from_output(): scheduler.requests[req.request_id] = req scheduler.running.append(req) - scheduler_output = SchedulerOutput(scheduled_new_reqs=[], - scheduled_cached_reqs=[], - num_scheduled_tokens={ - requests[0].request_id: 3, - requests[1].request_id: 2 - }, - total_num_scheduled_tokens=5, - scheduled_encoder_inputs={}, - scheduled_spec_decode_tokens={ - requests[0].request_id: [10, 42], - requests[1].request_id: [13] - }, - num_common_prefix_blocks=0, - finished_req_ids=set(), - free_encoder_input_ids=[], - structured_output_request_ids={}, - grammar_bitmask=None) + scheduler_output = SchedulerOutput( + scheduled_new_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), + num_scheduled_tokens={ + requests[0].request_id: 3, + requests[1].request_id: 2 + }, + total_num_scheduled_tokens=5, + scheduled_encoder_inputs={}, + scheduled_spec_decode_tokens={ + requests[0].request_id: [10, 42], + requests[1].request_id: [13] + }, + num_common_prefix_blocks=0, + finished_req_ids=set(), + free_encoder_input_ids=[], + structured_output_request_ids={}, + grammar_bitmask=None, + ) model_output = ModelRunnerOutput( req_ids=[req.request_id for req in requests], @@ -551,23 +554,25 @@ def test_stop_via_update_from_output(): scheduler.requests[req.request_id] = req scheduler.running.append(req) - scheduler_output = SchedulerOutput(scheduled_new_reqs=[], - scheduled_cached_reqs=[], - num_scheduled_tokens={ - requests[0].request_id: 3, - requests[1].request_id: 1 - }, - total_num_scheduled_tokens=4, - scheduled_encoder_inputs={}, - scheduled_spec_decode_tokens={ - requests[0].request_id: [10, 11], - requests[1].request_id: [] - }, - num_common_prefix_blocks=0, - finished_req_ids=set(), - free_encoder_input_ids=[], - structured_output_request_ids={}, - grammar_bitmask=None) + scheduler_output = SchedulerOutput( + scheduled_new_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), + num_scheduled_tokens={ + requests[0].request_id: 3, + requests[1].request_id: 1 + }, + total_num_scheduled_tokens=4, + scheduled_encoder_inputs={}, + scheduled_spec_decode_tokens={ + requests[0].request_id: [10, 11], + requests[1].request_id: [] + }, + num_common_prefix_blocks=0, + finished_req_ids=set(), + free_encoder_input_ids=[], + structured_output_request_ids={}, + grammar_bitmask=None, + ) model_output = ModelRunnerOutput( req_ids=[req.request_id for req in requests], @@ -603,7 +608,7 @@ def test_stop_via_update_from_output(): scheduler_output = SchedulerOutput( scheduled_new_reqs=[], - scheduled_cached_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens={requests[0].request_id: 3}, total_num_scheduled_tokens=3, scheduled_encoder_inputs={}, @@ -1208,7 +1213,6 @@ def assert_scheduler_empty(scheduler: Scheduler): assert len(scheduler.waiting) == 0 assert len(scheduler.running) == 0 assert len(scheduler.finished_req_ids) == 0 - assert len(scheduler._cached_reqs_data) == 0 # EncoderCacheManager. assert len(scheduler.encoder_cache_manager.freed) == 0 diff --git a/tests/v1/kv_connector/unit/test_remote_decode_lifecycle.py b/tests/v1/kv_connector/unit/test_remote_decode_lifecycle.py index ff36a281c413d..12a71d97e8d29 100644 --- a/tests/v1/kv_connector/unit/test_remote_decode_lifecycle.py +++ b/tests/v1/kv_connector/unit/test_remote_decode_lifecycle.py @@ -66,7 +66,7 @@ def test_basic_lifecycle(): assert len(scheduler_output.finished_req_ids) == 1 assert request_id in scheduler_output.finished_req_ids assert len(scheduler_output.scheduled_new_reqs) == 0 - assert len(scheduler_output.scheduled_cached_reqs) == 0 + assert scheduler_output.scheduled_cached_reqs.num_reqs == 0 assert len(scheduler.finished_req_ids) == 0 # (2b): execute_model() @@ -81,7 +81,7 @@ def test_basic_lifecycle(): assert len(scheduler.running) == 0 assert len(scheduler_output.finished_req_ids) == 0 assert len(scheduler_output.scheduled_new_reqs) == 0 - assert len(scheduler_output.scheduled_cached_reqs) == 0 + assert scheduler_output.scheduled_cached_reqs.num_reqs == 0 assert len(scheduler.finished_req_ids) == 0 # (3b): execute_model() diff --git a/tests/v1/kv_connector/unit/test_remote_prefill_lifecycle.py b/tests/v1/kv_connector/unit/test_remote_prefill_lifecycle.py index a1156306dc4bf..f89970bf2c807 100644 --- a/tests/v1/kv_connector/unit/test_remote_prefill_lifecycle.py +++ b/tests/v1/kv_connector/unit/test_remote_prefill_lifecycle.py @@ -36,7 +36,7 @@ def test_basic_lifecycle(): # Nothing running and empty scheduler output. assert len(scheduler.running) == 0 assert len(scheduler_output.scheduled_new_reqs) == 0 - assert len(scheduler_output.scheduled_cached_reqs) == 0 + assert scheduler_output.scheduled_cached_reqs.num_reqs == 0 assert len(scheduler_output.num_scheduled_tokens) == 0 assert scheduler_output.total_num_scheduled_tokens == 0 @@ -158,7 +158,7 @@ def test_interleaved_lifecycle(): assert len(scheduler.running) == 2 assert len(scheduler.waiting) == 1 assert len(scheduler_output.scheduled_new_reqs) == 1 - assert len(scheduler_output.scheduled_cached_reqs) == 1 + assert scheduler_output.scheduled_cached_reqs.num_reqs == 1 model_runner_output = create_model_runner_output( [request_local_a, request_local_b]) @@ -169,7 +169,7 @@ def test_interleaved_lifecycle(): assert len(scheduler.running) == 2 assert len(scheduler.waiting) == 1 assert len(scheduler_output.scheduled_new_reqs) == 0 - assert len(scheduler_output.scheduled_cached_reqs) == 2 + assert scheduler_output.scheduled_cached_reqs.num_reqs == 2 model_runner_output = create_model_runner_output( reqs=[request_local_a, request_local_b]) @@ -177,14 +177,14 @@ def test_interleaved_lifecycle(): assert len(scheduler.running) == 2 assert len(scheduler.waiting) == 1 assert len(scheduler_output.scheduled_new_reqs) == 0 - assert len(scheduler_output.scheduled_cached_reqs) == 2 + assert scheduler_output.scheduled_cached_reqs.num_reqs == 2 # STEP 4: KVs arrive. scheduler_output = scheduler.schedule() assert len(scheduler.running) == 2 assert len(scheduler.waiting) == 1 assert len(scheduler_output.scheduled_new_reqs) == 0 - assert len(scheduler_output.scheduled_cached_reqs) == 2 + assert scheduler_output.scheduled_cached_reqs.num_reqs == 2 model_runner_output = create_model_runner_output( [request_local_a, request_local_b], @@ -196,7 +196,7 @@ def test_interleaved_lifecycle(): assert len(scheduler.running) == 3 assert len(scheduler.waiting) == 0 assert len(scheduler_output.scheduled_new_reqs) == 1 - assert len(scheduler_output.scheduled_cached_reqs) == 2 + assert scheduler_output.scheduled_cached_reqs.num_reqs == 2 model_runner_output = create_model_runner_output( [request_local_a, request_local_b, request_remote]) diff --git a/tests/v1/kv_connector/unit/utils.py b/tests/v1/kv_connector/unit/utils.py index 61f59f35f75b9..983d900606fc9 100644 --- a/tests/v1/kv_connector/unit/utils.py +++ b/tests/v1/kv_connector/unit/utils.py @@ -25,7 +25,6 @@ def assert_scheduler_empty(scheduler: Scheduler): assert len(scheduler.running) == 0 assert len(scheduler.finished_req_ids) == 0 assert len(scheduler.finished_recving_kv_req_ids) == 0 - assert len(scheduler._cached_reqs_data) == 0 # EncoderCacheManager. assert len(scheduler.encoder_cache_manager.freed) == 0 diff --git a/tests/v1/tpu/worker/test_tpu_model_runner.py b/tests/v1/tpu/worker/test_tpu_model_runner.py index 25839d0897a4c..40db0b2afe0d9 100644 --- a/tests/v1/tpu/worker/test_tpu_model_runner.py +++ b/tests/v1/tpu/worker/test_tpu_model_runner.py @@ -82,7 +82,7 @@ def _schedule_new_request(*req_ids: str) -> SchedulerOutput: return SchedulerOutput( scheduled_new_reqs=new_reqs, - scheduled_cached_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens=num_scheduled_tokens, total_num_scheduled_tokens=total_num_scheduled_tokens, scheduled_spec_decode_tokens={}, @@ -161,7 +161,7 @@ def test_update_states_request_finished(model_runner): # finish req scheduler_output = SchedulerOutput( scheduled_new_reqs=[], - scheduled_cached_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens={}, total_num_scheduled_tokens=0, scheduled_spec_decode_tokens={}, @@ -191,7 +191,7 @@ def test_update_states_request_resumed(model_runner): # unschedule req scheduler_output = SchedulerOutput( scheduled_new_reqs=[], - scheduled_cached_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens={}, total_num_scheduled_tokens=0, scheduled_spec_decode_tokens={}, @@ -209,16 +209,16 @@ def test_update_states_request_resumed(model_runner): # resume req cached_req_data = CachedRequestData( - req_id=req_id, - resumed_from_preemption=False, - new_token_ids=[], - new_block_ids=([], ), - num_computed_tokens=0, + req_ids=[req_id], + resumed_from_preemption=[False], + new_token_ids=[[]], + new_block_ids=[([], )], + num_computed_tokens=[0], ) scheduler_output = SchedulerOutput( scheduled_new_reqs=[], - scheduled_cached_reqs=[cached_req_data], + scheduled_cached_reqs=cached_req_data, num_scheduled_tokens={req_id: 1}, total_num_scheduled_tokens=1, scheduled_spec_decode_tokens={}, @@ -249,7 +249,7 @@ def test_update_states_no_changes(model_runner): # schedule req scheduler_output = SchedulerOutput( scheduled_new_reqs=[], - scheduled_cached_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens={req_id: 1}, total_num_scheduled_tokens=1, scheduled_spec_decode_tokens={}, @@ -284,7 +284,7 @@ def test_update_states_request_unscheduled(model_runner): # unschedule req_1 scheduler_output = SchedulerOutput( scheduled_new_reqs=[], - scheduled_cached_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens={req_ids[0]: 1}, total_num_scheduled_tokens=1, scheduled_spec_decode_tokens={}, diff --git a/tests/v1/worker/test_gpu_model_runner.py b/tests/v1/worker/test_gpu_model_runner.py index 583a88d8e6ec6..c739b23b90dc8 100644 --- a/tests/v1/worker/test_gpu_model_runner.py +++ b/tests/v1/worker/test_gpu_model_runner.py @@ -133,7 +133,7 @@ def _schedule_new_request(*req_ids: str) -> SchedulerOutput: return SchedulerOutput( scheduled_new_reqs=new_reqs, - scheduled_cached_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens=num_scheduled_tokens, total_num_scheduled_tokens=total_num_scheduled_tokens, scheduled_spec_decode_tokens={}, @@ -199,7 +199,7 @@ def test_update_states_request_finished(model_runner): # finish req scheduler_output = SchedulerOutput( scheduled_new_reqs=[], - scheduled_cached_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens={}, total_num_scheduled_tokens=0, scheduled_spec_decode_tokens={}, @@ -231,7 +231,7 @@ def test_update_states_request_resumed(model_runner): # unschedule req scheduler_output = SchedulerOutput( scheduled_new_reqs=[], - scheduled_cached_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens={}, total_num_scheduled_tokens=0, scheduled_spec_decode_tokens={}, @@ -249,16 +249,16 @@ def test_update_states_request_resumed(model_runner): # resume req cached_req_data = CachedRequestData( - req_id=req_id, - resumed_from_preemption=False, - new_token_ids=[], - new_block_ids=([], ), - num_computed_tokens=0, + req_ids=[req_id], + resumed_from_preemption=[False], + new_token_ids=[[]], + new_block_ids=([[0]], ), + num_computed_tokens=[0], ) scheduler_output = SchedulerOutput( scheduled_new_reqs=[], - scheduled_cached_reqs=[cached_req_data], + scheduled_cached_reqs=cached_req_data, num_scheduled_tokens={req_id: 1}, total_num_scheduled_tokens=1, scheduled_spec_decode_tokens={}, @@ -339,7 +339,7 @@ def test_update_states_no_changes(model_runner): # schedule req scheduler_output = SchedulerOutput( scheduled_new_reqs=[], - scheduled_cached_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens={req_id: 1}, total_num_scheduled_tokens=1, scheduled_spec_decode_tokens={}, @@ -376,7 +376,7 @@ def test_update_states_request_unscheduled(model_runner): # unschedule req_1 scheduler_output = SchedulerOutput( scheduled_new_reqs=[], - scheduled_cached_reqs=[], + scheduled_cached_reqs=CachedRequestData.make_empty(), num_scheduled_tokens={req_ids[0]: 1}, total_num_scheduled_tokens=1, scheduled_spec_decode_tokens={}, diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py index a47deaf91272e..2f870971ded70 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_connector.py @@ -371,45 +371,48 @@ class P2pNcclConnector(KVConnectorBase_V1): block_size=self._block_size) self._requests_need_load.pop(new_req.req_id) - for cached_req in scheduler_output.scheduled_cached_reqs: + cached_reqs = scheduler_output.scheduled_cached_reqs + for i, req_id in enumerate(cached_reqs.req_ids): + num_computed_tokens = cached_reqs.num_computed_tokens[i] + new_block_ids = cached_reqs.new_block_ids[i] + resumed_from_preemption = cached_reqs.resumed_from_preemption[i] + if self.is_producer: num_scheduled_tokens = ( - scheduler_output.num_scheduled_tokens)[cached_req.req_id] - num_tokens = (num_scheduled_tokens + - cached_req.num_computed_tokens) - assert cached_req.req_id in self.chunked_prefill - block_ids = cached_req.new_block_ids[0] - if not cached_req.resumed_from_preemption: - block_ids = (self.chunked_prefill[cached_req.req_id][0] + - block_ids) - prompt_token_ids = self.chunked_prefill[cached_req.req_id][1] + scheduler_output.num_scheduled_tokens)[req_id] + num_tokens = (num_scheduled_tokens + num_computed_tokens) + assert req_id in self.chunked_prefill + block_ids = new_block_ids[0] + if not resumed_from_preemption: + block_ids = (self.chunked_prefill[req_id][0] + block_ids) + prompt_token_ids = self.chunked_prefill[req_id][1] # the request's prompt is chunked prefill again if num_tokens < len(prompt_token_ids): - self.chunked_prefill[cached_req.req_id] = ( - block_ids, prompt_token_ids) + self.chunked_prefill[req_id] = (block_ids, + prompt_token_ids) continue # the request's prompt is all prefilled finally - meta.add_request(request_id=cached_req.req_id, + meta.add_request(request_id=req_id, token_ids=prompt_token_ids, block_ids=block_ids, block_size=self._block_size) - self.chunked_prefill.pop(cached_req.req_id, None) + self.chunked_prefill.pop(req_id, None) continue # NOTE(rob): here we rely on the resumed requests being # the first N requests in the list scheduled_cache_reqs. - if not cached_req.resumed_from_preemption: + if not resumed_from_preemption: break - if cached_req.req_id in self._requests_need_load: - request, _ = self._requests_need_load.pop(cached_req.req_id) - total_tokens = cached_req.num_computed_tokens + 1 + if req_id in self._requests_need_load: + request, _ = self._requests_need_load.pop(req_id) + total_tokens = num_computed_tokens + 1 token_ids = request.all_token_ids[:total_tokens] # NOTE(rob): For resumed req, new_block_ids is all # of the block_ids for the request. - block_ids = cached_req.new_block_ids[0] + block_ids = new_block_ids[0] - meta.add_request(request_id=cached_req.req_id, + meta.add_request(request_id=req_id, token_ids=token_ids, block_ids=block_ids, block_size=self._block_size) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py b/vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py index f86b92692a0e5..0bceee19f873d 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/shared_storage_connector.py @@ -304,23 +304,28 @@ class SharedStorageConnector(KVConnectorBase_V1): block_size=self._block_size, is_store=True) - for cached_req in scheduler_output.scheduled_cached_reqs: + cached_reqs = scheduler_output.scheduled_cached_reqs + for i, req_id in enumerate(cached_reqs.req_ids): + num_computed_tokens = cached_reqs.num_computed_tokens[i] + new_token_ids = cached_reqs.new_token_ids[i] + new_block_ids = cached_reqs.new_block_ids[i] + resumed_from_preemption = cached_reqs.resumed_from_preemption[i] + # NOTE(rob): here we rely on the resumed requests being # the first N requests in the list scheduled_cache_reqs. - if not cached_req.resumed_from_preemption: + if not resumed_from_preemption: break - if cached_req.req_id in self._requests_need_load: + if req_id in self._requests_need_load: # NOTE(rob): cached_req_data does not have the full # list of token ids (only new tokens). So we look it # up in the actual request object. - request = self._requests_need_load[cached_req.req_id] - total_tokens = (len(cached_req.new_token_ids) + - cached_req.num_computed_tokens) + request = self._requests_need_load[req_id] + total_tokens = (len(new_token_ids) + num_computed_tokens) token_ids = request.all_token_ids[:total_tokens] # NOTE(rob): For resumed req, new_block_ids is all # of the block_ids for the request. - block_ids = cached_req.new_block_ids[0] + block_ids = new_block_ids[0] meta.add_request(token_ids=token_ids, block_ids=block_ids, diff --git a/vllm/v1/core/sched/output.py b/vllm/v1/core/sched/output.py index 6f31031a1086e..efc5b3012ec2f 100644 --- a/vllm/v1/core/sched/output.py +++ b/vllm/v1/core/sched/output.py @@ -83,29 +83,27 @@ class NewRequestData: @dataclass class CachedRequestData: - req_id: str + req_ids: list[str] # If resumed_from_preemption is False, new_block_ids will be appended to # the request's block IDs. If True, new_block_ids will be used as the # request's block IDs instead of appending to the existing block IDs. - resumed_from_preemption: bool - new_token_ids: list[int] - new_block_ids: tuple[list[int], ...] - num_computed_tokens: int + resumed_from_preemption: list[bool] + new_token_ids: list[list[int]] + new_block_ids: list[tuple[list[int], ...]] + num_computed_tokens: list[int] + + @property + def num_reqs(self) -> int: + return len(self.req_ids) @classmethod - def from_request( - cls, - request: Request, - resumed_from_preemption: bool, - new_token_ids: list[int], - new_block_ids: tuple[list[int], ...], - ) -> CachedRequestData: + def make_empty(cls) -> CachedRequestData: return cls( - req_id=request.request_id, - resumed_from_preemption=resumed_from_preemption, - new_token_ids=new_token_ids, - new_block_ids=new_block_ids, - num_computed_tokens=request.num_computed_tokens, + req_ids=[], + resumed_from_preemption=[], + new_token_ids=[], + new_block_ids=[], + num_computed_tokens=[], ) @@ -119,7 +117,7 @@ class SchedulerOutput: # list of the requests that have been scheduled before. # Since the request's data is already cached in the worker processes, # we only send the diff to minimize the communication cost. - scheduled_cached_reqs: list[CachedRequestData] + scheduled_cached_reqs: CachedRequestData # req_id -> num_scheduled_tokens # Number of tokens scheduled for each request. diff --git a/vllm/v1/core/sched/scheduler.py b/vllm/v1/core/sched/scheduler.py index 00b0844a5660b..20a40d74f3118 100644 --- a/vllm/v1/core/sched/scheduler.py +++ b/vllm/v1/core/sched/scheduler.py @@ -3,8 +3,9 @@ from __future__ import annotations +import itertools import time -from collections import defaultdict, deque +from collections import defaultdict from collections.abc import Iterable from typing import Any, Optional, Union @@ -117,12 +118,6 @@ class Scheduler(SchedulerInterface): # KV Connector: requests in process of async KV loading or recving self.finished_recving_kv_req_ids: set[str] = set() - # OPTIMIZATION: Cache the CachedRequestData objects to avoid creating - # them at each scheduling step. - # Request id -> deque of CachedRequestData - self._cached_reqs_data: dict[ - str, deque[CachedRequestData]] = defaultdict(deque) - # Encoder-related. # Calculate encoder cache size if applicable # NOTE: For now we use the same budget for both compute and space. @@ -547,27 +542,16 @@ class Scheduler(SchedulerInterface): req_to_new_block_ids[req.request_id]) for req in scheduled_new_reqs ] - resumed_reqs_data = [ - self._make_cached_request_data( - req, - num_scheduled_tokens[req.request_id], - len(scheduled_spec_decode_tokens.get(req.request_id, ())), - req_to_new_block_ids[req.request_id], - resumed_from_preemption=True, - ) for req in scheduled_resumed_reqs - ] - running_reqs_data = [ - self._make_cached_request_data( - req, - num_scheduled_tokens[req.request_id], - len(scheduled_spec_decode_tokens.get(req.request_id, ())), - req_to_new_block_ids[req.request_id], - resumed_from_preemption=False, - ) for req in scheduled_running_reqs - ] + cached_reqs_data = self._make_cached_request_data( + scheduled_running_reqs, + scheduled_resumed_reqs, + num_scheduled_tokens, + scheduled_spec_decode_tokens, + req_to_new_block_ids, + ) scheduler_output = SchedulerOutput( scheduled_new_reqs=new_reqs_data, - scheduled_cached_reqs=resumed_reqs_data + running_reqs_data, + scheduled_cached_reqs=cached_reqs_data, num_scheduled_tokens=num_scheduled_tokens, total_num_scheduled_tokens=total_num_scheduled_tokens, scheduled_spec_decode_tokens=scheduled_spec_decode_tokens, @@ -613,34 +597,39 @@ class Scheduler(SchedulerInterface): def _make_cached_request_data( self, - request: Request, - num_scheduled_tokens: int, - num_scheduled_spec_tokens: int, - new_block_ids: tuple[list[int], ...], - resumed_from_preemption: bool, + running_reqs: list[Request], + resumed_reqs: list[Request], + num_scheduled_tokens: dict[str, int], + spec_decode_tokens: dict[str, list[int]], + req_to_new_block_ids: dict[str, tuple[list[int], ...]], ) -> CachedRequestData: - # OPTIMIZATION: Cache the CachedRequestData objects to avoid creating - # them at each scheduling step. - num_computed_tokens = request.num_computed_tokens - num_regular_tokens = num_scheduled_tokens - num_scheduled_spec_tokens - new_token_ids = request.all_token_ids[ - num_computed_tokens:num_computed_tokens + num_regular_tokens] + req_ids: list[str] = [] + new_token_ids: list[list[int]] = [] + new_block_ids: list[tuple[list[int], ...]] = [] + num_computed_tokens: list[int] = [] - req_data_queue = self._cached_reqs_data.get(request.request_id) - if req_data_queue: - req_data = req_data_queue.popleft() - req_data.resumed_from_preemption = resumed_from_preemption - req_data.new_token_ids = new_token_ids - req_data.new_block_ids = new_block_ids - req_data.num_computed_tokens = num_computed_tokens - else: - # No cached request data, or all cached request data has been - # used by the scheduled requests. - req_data = CachedRequestData.from_request(request, - resumed_from_preemption, - new_token_ids, - new_block_ids) - return req_data + for req in itertools.chain(running_reqs, resumed_reqs): + req_id = req.request_id + req_ids.append(req_id) + num_tokens = (num_scheduled_tokens[req_id] - + len(spec_decode_tokens.get(req_id, ()))) + token_ids = req.all_token_ids[req.num_computed_tokens:req. + num_computed_tokens + num_tokens] + new_token_ids.append(token_ids) + new_block_ids.append(req_to_new_block_ids[req_id]) + num_computed_tokens.append(req.num_computed_tokens) + # Because resumed_reqs is usually empty, it is more efficient to do + # in-place appending so that we don't need to allocate a new list. + resumed_from_preemption = [False] * len(running_reqs) + resumed_from_preemption += [True] * len(resumed_reqs) + + return CachedRequestData( + req_ids=req_ids, + resumed_from_preemption=resumed_from_preemption, + new_token_ids=new_token_ids, + new_block_ids=new_block_ids, + num_computed_tokens=num_computed_tokens, + ) def _try_schedule_encoder_inputs( self, @@ -870,19 +859,11 @@ class Scheduler(SchedulerInterface): if not stopped: new_running.append(request) + self.running = new_running # KV Connector: update state for finished KV Transfers. self._update_from_kv_xfer_finished(model_runner_output) - # Return the cached request data to the queue so they can be reused. - for req_data in scheduler_output.scheduled_cached_reqs: - # NOTE(rob): since we free stopped reqs above, adding stopped reqs - # to _cached_reqs_data will cause a memory leak. - if req_data.req_id not in self.finished_req_ids: - self._cached_reqs_data[req_data.req_id].append(req_data) - - self.running = new_running - # Create EngineCoreOutputs for all clients that have requests with # outputs in this step. engine_core_outputs = { @@ -965,13 +946,11 @@ class Scheduler(SchedulerInterface): self._free_request(request) def _free_request(self, request: Request) -> Optional[dict[str, Any]]: - assert request.is_finished() delay_free_blocks, kv_xfer_params = self._connector_finished(request) self.encoder_cache_manager.free(request) request_id = request.request_id - self._cached_reqs_data.pop(request_id, None) self.finished_req_ids.add(request_id) if self.finished_req_ids_dict is not None: self.finished_req_ids_dict[request.client_index].add(request_id) @@ -983,7 +962,6 @@ class Scheduler(SchedulerInterface): def _free_blocks(self, request: Request): assert request.is_finished() - assert request.request_id not in self._cached_reqs_data self.kv_cache_manager.free(request) self.kv_cache_manager.free_block_hashes(request) del self.requests[request.request_id] diff --git a/vllm/v1/worker/gpu_model_runner.py b/vllm/v1/worker/gpu_model_runner.py index e063e44dabfa1..29d39de212f88 100644 --- a/vllm/v1/worker/gpu_model_runner.py +++ b/vllm/v1/worker/gpu_model_runner.py @@ -470,34 +470,36 @@ class GPUModelRunner(LoRAModelRunnerMixin): req_ids_to_add.append(req_id) # Update the states of the running/resumed requests. - for req_data in scheduler_output.scheduled_cached_reqs: - req_id = req_data.req_id + req_data = scheduler_output.scheduled_cached_reqs + for i, req_id in enumerate(req_data.req_ids): req_state = self.requests[req_id] + num_computed_tokens = req_data.num_computed_tokens[i] + new_token_ids = req_data.new_token_ids[i] + new_block_ids = req_data.new_block_ids[i] + resumed_from_preemption = req_data.resumed_from_preemption[i] # Update the cached states. - num_computed_tokens = req_data.num_computed_tokens req_state.num_computed_tokens = num_computed_tokens # Add the sampled token(s) from the previous step (if any). # This doesn't include "unverified" tokens like spec decode tokens. - num_new_tokens = (num_computed_tokens + - len(req_data.new_token_ids) - + num_new_tokens = (num_computed_tokens + len(new_token_ids) - req_state.num_tokens) if num_new_tokens == 1: # Avoid slicing list in most common case. - req_state.output_token_ids.append(req_data.new_token_ids[-1]) + req_state.output_token_ids.append(new_token_ids[-1]) elif num_new_tokens > 0: req_state.output_token_ids.extend( - req_data.new_token_ids[-num_new_tokens:]) + new_token_ids[-num_new_tokens:]) # Update the block IDs. - if not req_data.resumed_from_preemption: + if not resumed_from_preemption: # Append the new blocks to the existing block IDs. - for block_ids, new_block_ids in zip(req_state.block_ids, - req_data.new_block_ids): - block_ids.extend(new_block_ids) + for block_ids, new_ids in zip(req_state.block_ids, + new_block_ids): + block_ids.extend(new_ids) else: # The request is resumed from preemption. # Replace the existing block IDs with the new ones. - req_state.block_ids = req_data.new_block_ids + req_state.block_ids = new_block_ids req_index = self.input_batch.req_id_to_index.get(req_id) if req_index is None: @@ -510,14 +512,12 @@ class GPUModelRunner(LoRAModelRunnerMixin): # Update the persistent batch. self.input_batch.num_computed_tokens_cpu[req_index] = ( num_computed_tokens) - self.input_batch.block_table.append_row(req_data.new_block_ids, - req_index) + self.input_batch.block_table.append_row(new_block_ids, req_index) # Add new_token_ids to token_ids_cpu. start_token_index = num_computed_tokens - end_token_index = num_computed_tokens + len(req_data.new_token_ids) + end_token_index = num_computed_tokens + len(new_token_ids) self.input_batch.token_ids_cpu[ - req_index, - start_token_index:end_token_index] = req_data.new_token_ids + req_index, start_token_index:end_token_index] = new_token_ids self.input_batch.num_tokens_no_spec[req_index] = end_token_index # Add spec_token_ids to token_ids_cpu. spec_token_ids = scheduler_output.scheduled_spec_decode_tokens.get( diff --git a/vllm/v1/worker/tpu_model_runner.py b/vllm/v1/worker/tpu_model_runner.py index bc334419c4cec..0cc218bdb646f 100644 --- a/vllm/v1/worker/tpu_model_runner.py +++ b/vllm/v1/worker/tpu_model_runner.py @@ -418,21 +418,24 @@ class TPUModelRunner(LoRAModelRunnerMixin): req_ids_to_add.append(req_id) # Update the states of the running/resumed requests. - for req_data in scheduler_output.scheduled_cached_reqs: - req_id = req_data.req_id + req_data = scheduler_output.scheduled_cached_reqs + for i, req_id in enumerate(req_data.req_ids): req_state = self.requests[req_id] + num_computed_tokens = req_data.num_computed_tokens[i] + new_block_ids = req_data.new_block_ids[i] + resumed_from_preemption = req_data.resumed_from_preemption[i] # Update the cached states. - req_state.num_computed_tokens = req_data.num_computed_tokens - if not req_data.resumed_from_preemption: + req_state.num_computed_tokens = num_computed_tokens + if not resumed_from_preemption: # Append the new blocks to the existing block IDs. - for block_ids, new_block_ids in zip(req_state.block_ids, - req_data.new_block_ids): - block_ids.extend(new_block_ids) + for block_ids, new_ids in zip(req_state.block_ids, + new_block_ids): + block_ids.extend(new_ids) else: # The request is resumed from preemption. # Replace the existing block IDs with the new ones. - req_state.block_ids = req_data.new_block_ids + req_state.block_ids = new_block_ids req_index = self.input_batch.req_id_to_index.get(req_id) if req_index is None: @@ -444,9 +447,8 @@ class TPUModelRunner(LoRAModelRunnerMixin): # Update the persistent batch. self.input_batch.num_computed_tokens_cpu[req_index] = ( - req_data.num_computed_tokens) - self.input_batch.block_table.append_row(req_data.new_block_ids, - req_index) + num_computed_tokens) + self.input_batch.block_table.append_row(new_block_ids, req_index) # Add the new or resumed requests to the persistent batch. # The smaller empty indices are filled first. From 551ef1631a98d60fe9e82f0282e49c4a59a7887b Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Mon, 30 Jun 2025 12:26:42 -0400 Subject: [PATCH 206/211] [Unit Test] Add unit test for deep gemm (#20090) Signed-off-by: yewentao256 Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- tests/kernels/moe/test_deepgemm.py | 225 +++++++++++++++++++++++++++++ 1 file changed, 225 insertions(+) create mode 100644 tests/kernels/moe/test_deepgemm.py diff --git a/tests/kernels/moe/test_deepgemm.py b/tests/kernels/moe/test_deepgemm.py new file mode 100644 index 0000000000000..5d2690904cea2 --- /dev/null +++ b/tests/kernels/moe/test_deepgemm.py @@ -0,0 +1,225 @@ +# SPDX-License-Identifier: Apache-2.0 +""" +Unit-test DeepGEMM FP8 kernels (no DeepEP). +Compare DeepGEMM path against the Triton fallback inside vLLM's fused_experts. +""" + +import importlib +import math + +import pytest +import torch + +# vLLM fused-expert reference (Triton fallback + DeepGEMM option) +from vllm.model_executor.layers.fused_moe.fused_moe import fused_experts +from vllm.model_executor.layers.quantization.utils.fp8_utils import ( + per_token_group_quant_fp8) +from vllm.utils import cdiv + +has_deep_gemm = importlib.util.find_spec("deep_gemm") is not None + +if has_deep_gemm: + import deep_gemm + BLOCK_M = deep_gemm.get_m_alignment_for_contiguous_layout() + BLOCK_SIZE = [BLOCK_M, BLOCK_M] + +requires_deep_gemm = pytest.mark.skipif( + not has_deep_gemm, + reason="Requires deep_gemm kernels", +) + + +def calc_diff(x: torch.Tensor, y: torch.Tensor): + x, y = x.double(), y.double() + denominator = (x * x + y * y).sum() + sim = 2 * (x * y).sum() / denominator + return 1 - sim + + +def per_block_cast_to_fp8( + x: torch.Tensor, + block_size_n: int = 128) -> tuple[torch.Tensor, torch.Tensor]: + assert x.dim() == 2 + m, n = x.shape + x_padded = torch.zeros( + (cdiv(m, 128) * 128, cdiv(n, block_size_n) * block_size_n), + dtype=x.dtype, + device=x.device) + x_padded[:m, :n] = x + x_view = x_padded.view(-1, 128, x_padded.size(1) // 128, block_size_n) + x_amax = x_view.abs().float().amax(dim=(1, 3), keepdim=True).clamp(1e-4) + x_scaled = (x_view * (448.0 / x_amax)).to(torch.float8_e4m3fn) + x_scaled_sub = x_scaled.view_as(x_padded)[:m, :n].contiguous() + scales = (x_amax / 448.0).view(x_view.size(0), x_view.size(2)) + return x_scaled_sub, scales + + +def make_block_quant_fp8_weights( + e: int, + n: int, + k: int, + block_size: list[int], +): + """ + Generate (w1, w2) expert weights and their per-block scale tensors + in FP8 block-quantized format. + + w1 shape: (E, 2N, K) + w2 shape: (E, K, N) + """ + dtype = torch.bfloat16 + fp8_max, fp8_min = torch.finfo(torch.float8_e4m3fn).max, torch.finfo( + torch.float8_e4m3fn).min + + # bf16 reference weights + w1_bf16 = torch.randn(e, 2 * n, k, device="cuda", dtype=dtype) / 10 + w2_bf16 = torch.randn(e, k, n, device="cuda", dtype=dtype) / 10 + w1_bf16.clamp_(fp8_min, fp8_max) + w2_bf16.clamp_(fp8_min, fp8_max) + + block_n, block_k = block_size + n_tiles_w1 = math.ceil((2 * n) / block_n) + k_tiles_w1 = math.ceil(k / block_k) + n_tiles_w2 = math.ceil(k / block_n) + k_tiles_w2 = math.ceil(n / block_k) + + w1 = torch.empty_like(w1_bf16, dtype=torch.float8_e4m3fn) + w2 = torch.empty_like(w2_bf16, dtype=torch.float8_e4m3fn) + w1_s = torch.empty(e, + n_tiles_w1, + k_tiles_w1, + device="cuda", + dtype=torch.float32) + w2_s = torch.empty(e, + n_tiles_w2, + k_tiles_w2, + device="cuda", + dtype=torch.float32) + + for i in range(e): + w1[i], w1_s[i] = per_block_cast_to_fp8(w1_bf16[i]) + w2[i], w2_s[i] = per_block_cast_to_fp8(w2_bf16[i]) + + return w1, w2, w1_s, w2_s + + +def run_single_case(m, n, k, topk, num_experts, block_size): + """ + Run one (M,N,K) configuration on a single GPU and assert DeepGEMM == + Triton baseline within tolerance. + """ + tokens_bf16 = torch.randn( + m, k, device="cuda", dtype=torch.bfloat16).clamp_min_(-1).clamp_max_(1) + _, a1_scale = per_token_group_quant_fp8(tokens_bf16, block_size[1]) + + # expert weight tensors + w1, w2, w1_s, w2_s = make_block_quant_fp8_weights(num_experts, n, k, + block_size) + + router_logits = torch.randn(m, + num_experts, + device="cuda", + dtype=torch.float32) + topk_weights, topk_ids = torch.topk(router_logits, k=topk, dim=-1) + topk_weights = torch.nn.functional.softmax(topk_weights, dim=-1) + + # triton referrence + out_triton = fused_experts( + hidden_states=tokens_bf16, + w1=w1, + w2=w2, + topk_weights=topk_weights, + topk_ids=topk_ids, + inplace=False, + use_fp8_w8a8=True, + w1_scale=w1_s, + w2_scale=w2_s, + a1_scale=a1_scale, + block_shape=block_size, + allow_deep_gemm=False, + ) + + # DeepGemm + out_deepgemm = fused_experts( + hidden_states=tokens_bf16, + w1=w1, + w2=w2, + topk_weights=topk_weights, + topk_ids=topk_ids, + inplace=False, + use_fp8_w8a8=True, + w1_scale=w1_s, + w2_scale=w2_s, + a1_scale=a1_scale, + block_shape=block_size, + allow_deep_gemm=True, + ) + + base = out_triton.abs().mean() + atol = 0.1 * base.clamp(min=1e-2) # 10% of mean, but not lower than 1e-3 + rtol = 0.05 + # ----- Compare ----- + torch.testing.assert_close( + out_deepgemm.to(torch.float32), + out_triton.to(torch.float32), + rtol=rtol, + atol=float(atol), + ) + + +# Note: W1 has shape (E, 2N, K), so N = 512 +# can trigger the deepgemm path. +MNKs = [ + (1024, 512, 128), + (1024, 512, 512), + (2048, 512, 512), + (512, 1024, 1024), + (512, 2048, 2048), + (4096, 4096, 1024), +] + +TOPKS = [2, 6] +NUM_EXPERTS = [32] + + +@pytest.mark.parametrize("mnk", MNKs) +@pytest.mark.parametrize("topk", TOPKS) +@pytest.mark.parametrize("num_experts", NUM_EXPERTS) +@requires_deep_gemm +def test_deepgemm_vs_triton(mnk, topk, num_experts, monkeypatch): + + with monkeypatch.context() as m: + m.setenv("VLLM_USE_DEEP_GEMM", "1") + + _fused_moe_mod = importlib.import_module( + "vllm.model_executor.layers.fused_moe.fused_moe") + + call_counter = {"cnt": 0} + + orig_fn = _fused_moe_mod.deep_gemm_moe_fp8 + + def _spy_deep_gemm_moe_fp8(*args, **kwargs): + call_counter["cnt"] += 1 + return orig_fn(*args, **kwargs) + + monkeypatch.setattr(_fused_moe_mod, "deep_gemm_moe_fp8", + _spy_deep_gemm_moe_fp8) + + m, n, k = mnk + + if topk > num_experts: + pytest.skip(f"topk={topk} > num_experts={num_experts}") + + run_single_case( + m=m, + n=n, + k=k, + topk=topk, + num_experts=num_experts, + block_size=BLOCK_SIZE, + ) + + # ensure that the DeepGEMM path was indeed taken. + assert call_counter["cnt"] == 1, \ + f"DeepGEMM path was not executed during the test. " \ + f"Call counter: {call_counter['cnt']}" From d8cf819a9a337a578b7dfc7642617921cc468c17 Mon Sep 17 00:00:00 2001 From: Kyle Sayers Date: Mon, 30 Jun 2025 13:26:49 -0400 Subject: [PATCH 207/211] [Core] [Bugfix] [Multimodal] Fix multimodal profiling and generation for SFT/PTQed models (#20058) Signed-off-by: Kyle Sayers --- docs/contributing/model/multimodal.md | 7 +++ tests/multimodal/test_processing.py | 1 + vllm/entrypoints/llm.py | 8 +++ vllm/entrypoints/utils.py | 4 ++ vllm/inputs/preprocess.py | 29 +++++++--- vllm/model_executor/models/aya_vision.py | 2 + vllm/model_executor/models/blip2.py | 2 + vllm/model_executor/models/chameleon.py | 2 + vllm/model_executor/models/deepseek_vl2.py | 6 ++- vllm/model_executor/models/florence2.py | 4 +- vllm/model_executor/models/fuyu.py | 2 + vllm/model_executor/models/gemma3_mm.py | 2 + vllm/model_executor/models/glm4v.py | 1 + vllm/model_executor/models/granite_speech.py | 2 + vllm/model_executor/models/h2ovl.py | 3 ++ vllm/model_executor/models/idefics3.py | 2 + vllm/model_executor/models/internvl.py | 5 +- vllm/model_executor/models/llava.py | 5 +- vllm/model_executor/models/llava_onevision.py | 7 +++ vllm/model_executor/models/minicpmo.py | 10 ++-- vllm/model_executor/models/minicpmv.py | 18 +++++-- vllm/model_executor/models/minimax_vl_01.py | 2 + vllm/model_executor/models/mistral3.py | 2 + vllm/model_executor/models/mllama.py | 6 ++- vllm/model_executor/models/mllama4.py | 2 + vllm/model_executor/models/ovis.py | 2 + vllm/model_executor/models/paligemma.py | 5 +- vllm/model_executor/models/phi3v.py | 2 + vllm/model_executor/models/phi4mm.py | 3 +- vllm/model_executor/models/pixtral.py | 7 ++- .../models/prithvi_geospatial_mae.py | 1 + .../models/qwen2_5_omni_thinker.py | 7 +++ vllm/model_executor/models/qwen2_audio.py | 2 + vllm/model_executor/models/qwen2_vl.py | 4 +- vllm/model_executor/models/qwen_vl.py | 3 ++ vllm/model_executor/models/skyworkr1v.py | 2 + vllm/model_executor/models/ultravox.py | 6 +++ vllm/model_executor/models/whisper.py | 4 +- vllm/multimodal/processing.py | 54 +++++++++++++++---- vllm/multimodal/profiling.py | 7 ++- vllm/utils.py | 2 + 41 files changed, 207 insertions(+), 38 deletions(-) diff --git a/docs/contributing/model/multimodal.md b/docs/contributing/model/multimodal.md index 6ff2abbae6329..670d747b9ee7d 100644 --- a/docs/contributing/model/multimodal.md +++ b/docs/contributing/model/multimodal.md @@ -538,11 +538,13 @@ return a schema of the tensors outputted by the HF processor that are related to prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: processed_outputs = super()._call_hf_processor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) image_patches = processed_outputs.get("image_patches") @@ -566,6 +568,11 @@ return a schema of the tensors outputted by the HF processor that are related to Our [actual code](gh-file:vllm/model_executor/models/fuyu.py) has special handling for text-only inputs to prevent unnecessary warnings from HF processor. + !!! note + The `_call_hf_processor` method specifies both `mm_kwargs` and `tok_kwargs` for + processing. `mm_kwargs` is used to both initialize and call the huggingface + processor, whereas `tok_kwargs` is only used to call the huggingface processor. + This lets us override [_get_mm_fields_config][vllm.multimodal.processing.BaseMultiModalProcessor._get_mm_fields_config] as follows: ```python diff --git a/tests/multimodal/test_processing.py b/tests/multimodal/test_processing.py index 8b52911c6ccf3..2f97475f121a0 100644 --- a/tests/multimodal/test_processing.py +++ b/tests/multimodal/test_processing.py @@ -1086,6 +1086,7 @@ def test_hf_processor_kwargs(model_id, call_kwargs, expected_kwargs): prompt="", mm_data={}, mm_kwargs=call_kwargs, + tok_kwargs={}, ) assert out_kwargs == expected_kwargs diff --git a/vllm/entrypoints/llm.py b/vllm/entrypoints/llm.py index 63967e4d2d4bc..f0404e0bc6eac 100644 --- a/vllm/entrypoints/llm.py +++ b/vllm/entrypoints/llm.py @@ -481,6 +481,13 @@ class LLM: # Use default sampling params. sampling_params = self.get_default_sampling_params() + tokenization_kwargs: dict[str, Any] = {} + truncate_prompt_tokens = None + if isinstance(sampling_params, SamplingParams): + truncate_prompt_tokens = sampling_params.truncate_prompt_tokens + _validate_truncation_size(self.llm_engine.model_config.max_model_len, + truncate_prompt_tokens, tokenization_kwargs) + self._validate_and_add_requests( prompts=parsed_prompts, params=sampling_params, @@ -488,6 +495,7 @@ class LLM: lora_request=lora_request, prompt_adapter_request=prompt_adapter_request, guided_options=guided_options_request, + tokenization_kwargs=tokenization_kwargs, priority=priority, ) diff --git a/vllm/entrypoints/utils.py b/vllm/entrypoints/utils.py index 16ba2b4531acf..50f810afb8ccd 100644 --- a/vllm/entrypoints/utils.py +++ b/vllm/entrypoints/utils.py @@ -171,6 +171,10 @@ def _validate_truncation_size( tokenization_kwargs["truncation"] = True tokenization_kwargs["max_length"] = truncate_prompt_tokens + else: + if tokenization_kwargs is not None: + tokenization_kwargs["truncation"] = False + return truncate_prompt_tokens diff --git a/vllm/inputs/preprocess.py b/vllm/inputs/preprocess.py index a13e563f34a14..deda9bc23dafe 100644 --- a/vllm/inputs/preprocess.py +++ b/vllm/inputs/preprocess.py @@ -265,7 +265,8 @@ class InputPreprocessor: prompt: Union[str, list[int]], mm_data: MultiModalDataDict, mm_processor_kwargs: Optional[Mapping[str, object]], - lora_request: Optional[LoRARequest], + tokenization_kwargs: Optional[dict[str, Any]] = None, + lora_request: Optional[LoRARequest] = None, return_mm_hashes: bool = False, ) -> MultiModalInputs: """ @@ -280,15 +281,19 @@ class InputPreprocessor: if mm_processor_kwargs is None: mm_processor_kwargs = {} - return mm_processor.apply(prompt, mm_data, mm_processor_kwargs, - return_mm_hashes) + return mm_processor.apply(prompt, + mm_data, + hf_processor_mm_kwargs=mm_processor_kwargs, + tokenization_kwargs=tokenization_kwargs, + return_mm_hashes=return_mm_hashes) async def _process_multimodal_async( self, prompt: Union[str, list[int]], mm_data: MultiModalDataDict, mm_processor_kwargs: Optional[Mapping[str, object]], - lora_request: Optional[LoRARequest], + tokenization_kwargs: Optional[dict[str, Any]] = None, + lora_request: Optional[LoRARequest] = None, return_mm_hashes: bool = False, ) -> MultiModalInputs: """ @@ -302,8 +307,11 @@ class InputPreprocessor: if mm_processor_kwargs is None: mm_processor_kwargs = {} - return mm_processor.apply(prompt, mm_data, mm_processor_kwargs, - return_mm_hashes) + return mm_processor.apply(prompt, + mm_data, + hf_processor_mm_kwargs=mm_processor_kwargs, + tokenization_kwargs=tokenization_kwargs, + return_mm_hashes=return_mm_hashes) def _process_embeds( self, @@ -338,6 +346,7 @@ class InputPreprocessor: def _process_tokens( self, parsed_content: TokensPrompt, + tokenization_kwargs: Optional[dict[str, Any]] = None, lora_request: Optional[LoRARequest] = None, return_mm_hashes: bool = False, ) -> Union[TokenInputs, MultiModalInputs]: @@ -350,6 +359,7 @@ class InputPreprocessor: prompt_token_ids, multi_modal_data, parsed_content.get("mm_processor_kwargs"), + tokenization_kwargs=tokenization_kwargs, lora_request=lora_request, return_mm_hashes=return_mm_hashes, ) @@ -367,6 +377,7 @@ class InputPreprocessor: async def _process_tokens_async( self, parsed_content: TokensPrompt, + tokenization_kwargs: Optional[dict[str, Any]] = None, lora_request: Optional[LoRARequest] = None, return_mm_hashes: bool = False, ) -> Union[TokenInputs, MultiModalInputs]: @@ -379,6 +390,7 @@ class InputPreprocessor: prompt_token_ids, multi_modal_data, parsed_content.get("mm_processor_kwargs"), + tokenization_kwargs=tokenization_kwargs, lora_request=lora_request, return_mm_hashes=return_mm_hashes, ) @@ -408,6 +420,7 @@ class InputPreprocessor: prompt_text, multi_modal_data, parsed_content.get("mm_processor_kwargs"), + tokenization_kwargs=tokenization_kwargs, lora_request=lora_request, return_mm_hashes=return_mm_hashes, ) @@ -442,6 +455,7 @@ class InputPreprocessor: prompt_text, multi_modal_data, parsed_content.get("mm_processor_kwargs"), + tokenization_kwargs=tokenization_kwargs, lora_request=lora_request, return_mm_hashes=return_mm_hashes, ) @@ -860,7 +874,8 @@ class InputPreprocessor: "returned until they are supported on vLLM V1.") # Encoder-decoder model requires special mapping of # input prompts to encoder & decoder - return self._process_encoder_decoder_prompt(prompt) + return self._process_encoder_decoder_prompt( + prompt, tokenization_kwargs) if is_explicit_encoder_decoder_prompt(prompt): raise ValueError("Cannot pass encoder-decoder prompt " diff --git a/vllm/model_executor/models/aya_vision.py b/vllm/model_executor/models/aya_vision.py index a48631ad709f7..38daf995b8ca3 100644 --- a/vllm/model_executor/models/aya_vision.py +++ b/vllm/model_executor/models/aya_vision.py @@ -185,11 +185,13 @@ class AyaVisionMultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: processed_outputs = super()._call_hf_processor( prompt, mm_data, mm_kwargs, + tok_kwargs, ) hf_processor = self.info.get_hf_processor(**mm_kwargs) image_processor = hf_processor.image_processor diff --git a/vllm/model_executor/models/blip2.py b/vllm/model_executor/models/blip2.py index 3c3955161daaa..ecc12fa8d3727 100644 --- a/vllm/model_executor/models/blip2.py +++ b/vllm/model_executor/models/blip2.py @@ -454,6 +454,7 @@ class Blip2MultiModalProcessor(BaseMultiModalProcessor[Blip2ProcessingInfo]): prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: if not mm_data: # HF processor always adds placeholders even when there's no image @@ -465,6 +466,7 @@ class Blip2MultiModalProcessor(BaseMultiModalProcessor[Blip2ProcessingInfo]): prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) def _get_mm_fields_config( diff --git a/vllm/model_executor/models/chameleon.py b/vllm/model_executor/models/chameleon.py index d538ba09c65cf..06e33ad7737e3 100644 --- a/vllm/model_executor/models/chameleon.py +++ b/vllm/model_executor/models/chameleon.py @@ -107,6 +107,7 @@ class ChameleonMultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: if not mm_data: prompt_ids = self.info.get_tokenizer().encode(prompt) @@ -117,6 +118,7 @@ class ChameleonMultiModalProcessor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) def _apply_hf_processor_tokens_only( diff --git a/vllm/model_executor/models/deepseek_vl2.py b/vllm/model_executor/models/deepseek_vl2.py index da5452409d2f9..cdda9fb5a7490 100644 --- a/vllm/model_executor/models/deepseek_vl2.py +++ b/vllm/model_executor/models/deepseek_vl2.py @@ -204,12 +204,13 @@ class DeepseekVL2MultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: if mm_data: processed_outputs = self.info.ctx.call_hf_processor( self.info.get_hf_processor(**mm_kwargs), dict(prompt=prompt, **mm_data), - mm_kwargs, + dict(**mm_kwargs, **tok_kwargs), ) pixel_values = processed_outputs["pixel_values"] # split pixel values into patches corresponding to each image @@ -278,6 +279,7 @@ class DeepseekVL2MultiModalProcessor( prompt: Union[str, list[int]], mm_data_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], *, return_mm_hashes: bool, ) -> tuple[list[int], MultiModalKwargs, Optional[MultiModalHashes], bool]: @@ -290,6 +292,7 @@ class DeepseekVL2MultiModalProcessor( prompt=prompt, mm_data_items=mm_data_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, return_mm_hashes=return_mm_hashes, ) @@ -297,6 +300,7 @@ class DeepseekVL2MultiModalProcessor( prompt=prompt, mm_data_items=mm_data_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, return_mm_hashes=return_mm_hashes, ) diff --git a/vllm/model_executor/models/florence2.py b/vllm/model_executor/models/florence2.py index 425407c19ab5d..bda552721eb23 100644 --- a/vllm/model_executor/models/florence2.py +++ b/vllm/model_executor/models/florence2.py @@ -794,6 +794,7 @@ class Florence2MultiModalProcessor( prompt_text: str, mm_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], ) -> bool: return False @@ -828,10 +829,11 @@ class Florence2MultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: if mm_data: processed_outputs = super()._call_hf_processor( - prompt, mm_data, mm_kwargs) + prompt, mm_data, mm_kwargs, tok_kwargs) else: hf_processor = self.info.get_hf_processor() tokenizer = hf_processor.tokenizer diff --git a/vllm/model_executor/models/fuyu.py b/vllm/model_executor/models/fuyu.py index 7e03982e78e69..b3e055b966b08 100644 --- a/vllm/model_executor/models/fuyu.py +++ b/vllm/model_executor/models/fuyu.py @@ -153,6 +153,7 @@ class FuyuMultiModalProcessor(BaseMultiModalProcessor[FuyuProcessingInfo]): prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: if not mm_data: # Avoid warning from HF logger for text-only input @@ -164,6 +165,7 @@ class FuyuMultiModalProcessor(BaseMultiModalProcessor[FuyuProcessingInfo]): prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) image_patches = processed_outputs.get("image_patches") diff --git a/vllm/model_executor/models/gemma3_mm.py b/vllm/model_executor/models/gemma3_mm.py index 3a1c14978b45b..e9c27674b8457 100644 --- a/vllm/model_executor/models/gemma3_mm.py +++ b/vllm/model_executor/models/gemma3_mm.py @@ -259,11 +259,13 @@ class Gemma3MultiModalProcessor(BaseMultiModalProcessor[Gemma3ProcessingInfo]): prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: processed_outputs = super()._call_hf_processor( prompt, mm_data, mm_kwargs, + tok_kwargs, ) # HF processor pops the `num_crops` kwarg, which is needed by vLLM diff --git a/vllm/model_executor/models/glm4v.py b/vllm/model_executor/models/glm4v.py index 70916c45c0e09..95e3fcfc02fab 100644 --- a/vllm/model_executor/models/glm4v.py +++ b/vllm/model_executor/models/glm4v.py @@ -481,6 +481,7 @@ class GLM4VMultiModalProcessor(BaseMultiModalProcessor[GLM4VProcessingInfo]): prompt_text: str, mm_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], ) -> bool: return False diff --git a/vllm/model_executor/models/granite_speech.py b/vllm/model_executor/models/granite_speech.py index f2dc5708028ba..77fbc4808b4a3 100644 --- a/vllm/model_executor/models/granite_speech.py +++ b/vllm/model_executor/models/granite_speech.py @@ -141,6 +141,7 @@ class GraniteSpeechMultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: mm_data = dict(mm_data) audios = mm_data.pop("audios", []) @@ -153,6 +154,7 @@ class GraniteSpeechMultiModalProcessor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) if "audio" in mm_data: diff --git a/vllm/model_executor/models/h2ovl.py b/vllm/model_executor/models/h2ovl.py index 8f7f359b75521..467b074f37753 100644 --- a/vllm/model_executor/models/h2ovl.py +++ b/vllm/model_executor/models/h2ovl.py @@ -490,6 +490,7 @@ class H2OVLMultiModalProcessor( prompt: Union[str, list[int]], mm_data_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], *, return_mm_hashes: bool, ) -> tuple[list[int], MultiModalKwargs, Optional[MultiModalHashes], bool]: @@ -502,6 +503,7 @@ class H2OVLMultiModalProcessor( prompt=prompt, mm_data_items=mm_data_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, return_mm_hashes=return_mm_hashes, ) @@ -509,6 +511,7 @@ class H2OVLMultiModalProcessor( prompt=prompt, mm_data_items=mm_data_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, return_mm_hashes=return_mm_hashes, ) diff --git a/vllm/model_executor/models/idefics3.py b/vllm/model_executor/models/idefics3.py index b1d0626217a0a..36cfb5807d7d6 100644 --- a/vllm/model_executor/models/idefics3.py +++ b/vllm/model_executor/models/idefics3.py @@ -326,6 +326,7 @@ class Idefics3MultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: # Text-only input not supported in composite processor if not (images := mm_data.get("images", [])): @@ -337,6 +338,7 @@ class Idefics3MultiModalProcessor( prompt, mm_data, mm_kwargs, + tok_kwargs, ) parsed_images = (self._get_data_parser().parse_mm_data({ diff --git a/vllm/model_executor/models/internvl.py b/vllm/model_executor/models/internvl.py index bb71177ecad8e..6abe6cd6965c8 100644 --- a/vllm/model_executor/models/internvl.py +++ b/vllm/model_executor/models/internvl.py @@ -758,11 +758,13 @@ class BaseInternVLMultiModalProcessor(BaseMultiModalProcessor[_I]): prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> Mapping[str, NestedTensors]: processed_outputs = super()._call_hf_processor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) hf_processor = self.info.get_hf_processor(**mm_kwargs) @@ -941,9 +943,10 @@ class InternVLMultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> Mapping[str, NestedTensors]: processed_outputs = super()._call_hf_processor(prompt, mm_data, - mm_kwargs) + mm_kwargs, tok_kwargs) hf_processor = self.info.get_hf_processor(**mm_kwargs) if self.info.supports_video and ( diff --git a/vllm/model_executor/models/llava.py b/vllm/model_executor/models/llava.py index 1c35bf5206db7..7a7aefb267181 100644 --- a/vllm/model_executor/models/llava.py +++ b/vllm/model_executor/models/llava.py @@ -296,11 +296,13 @@ class PixtralHFMultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: processed_outputs = super()._call_hf_processor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) pixel_values = processed_outputs.get("pixel_values") @@ -797,6 +799,7 @@ class MantisMultiModalProcessor(LlavaMultiModalProcessor): prompt: Union[str, list[int]], mm_data: MultiModalDataDict, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Optional[Mapping[str, object]] = None, return_mm_hashes: bool = False, ) -> MultiModalInputs: hf_config = self.info.get_hf_config() @@ -809,7 +812,7 @@ class MantisMultiModalProcessor(LlavaMultiModalProcessor): ) result = super().apply(prompt, mm_data, hf_processor_mm_kwargs, - return_mm_hashes) + tokenization_kwargs, return_mm_hashes) mm_items = self._to_mm_items(mm_data) mm_item_counts = mm_items.get_all_counts() diff --git a/vllm/model_executor/models/llava_onevision.py b/vllm/model_executor/models/llava_onevision.py index c5403762f5390..7ff1026bfc94d 100644 --- a/vllm/model_executor/models/llava_onevision.py +++ b/vllm/model_executor/models/llava_onevision.py @@ -286,6 +286,7 @@ class LlavaOnevisionMultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: mm_data = dict(mm_data) videos = mm_data.pop("videos", []) @@ -296,6 +297,7 @@ class LlavaOnevisionMultiModalProcessor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) # LLaVA-OneVision processor doesn't support multiple videos @@ -310,6 +312,7 @@ class LlavaOnevisionMultiModalProcessor( prompt=prompt, mm_data={}, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) images = mm_data.pop("images", []) @@ -319,6 +322,7 @@ class LlavaOnevisionMultiModalProcessor( prompt=image_token * len(images), mm_data={"images": images}, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) image_outputs = { k: v @@ -334,6 +338,7 @@ class LlavaOnevisionMultiModalProcessor( prompt=video_token, mm_data={"videos": video}, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) pixel_values_videos.append(item_outputs["pixel_values_videos"][0]) @@ -352,11 +357,13 @@ class LlavaOnevisionMultiModalProcessor( prompt_text: str, mm_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], ) -> bool: base_result = super()._hf_processor_applies_updates( prompt_text=prompt_text, mm_items=mm_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, ) return base_result and mm_items.get_count("video", strict=False) == 0 diff --git a/vllm/model_executor/models/minicpmo.py b/vllm/model_executor/models/minicpmo.py index ff5959ed196ea..112e0b91d3f17 100644 --- a/vllm/model_executor/models/minicpmo.py +++ b/vllm/model_executor/models/minicpmo.py @@ -260,6 +260,7 @@ class MiniCPMOMultiModalProcessor( self, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> Mapping[str, NestedTensors]: if (audios := mm_data.get("audios")) is None: return {} @@ -276,9 +277,9 @@ class MiniCPMOMultiModalProcessor( prompts=[self.info.audio_pattern] * len(parsed_audios), mm_data={"audios": [[audio] for audio in parsed_audios]}, mm_kwargs={ - **mm_kwargs, - "chunk_input": True, + **mm_kwargs, "chunk_input": True }, + tok_kwargs=tok_kwargs, out_keys={"audio_features", "audio_feature_lens"}, ) @@ -302,10 +303,11 @@ class MiniCPMOMultiModalProcessor( self, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> Mapping[str, NestedTensors]: return { - **super().process_mm_inputs(mm_data, mm_kwargs), - **self.process_audios(mm_data, mm_kwargs), + **super().process_mm_inputs(mm_data, mm_kwargs, tok_kwargs), + **self.process_audios(mm_data, mm_kwargs, tok_kwargs), } def _get_prompt_updates( diff --git a/vllm/model_executor/models/minicpmv.py b/vllm/model_executor/models/minicpmv.py index 9dc03c8001824..1dba88be83500 100644 --- a/vllm/model_executor/models/minicpmv.py +++ b/vllm/model_executor/models/minicpmv.py @@ -534,6 +534,7 @@ class MiniCPMVMultiModalProcessor(BaseMultiModalProcessor[_I]): self, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> Mapping[str, NestedTensors]: if (images := mm_data.get("images")) is None: return {} @@ -550,6 +551,7 @@ class MiniCPMVMultiModalProcessor(BaseMultiModalProcessor[_I]): prompts=[self.info.image_pattern] * len(parsed_images), mm_data={"images": [[image] for image in parsed_images]}, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, out_keys={"pixel_values", "image_sizes", "tgt_sizes"}, ) @@ -563,6 +565,7 @@ class MiniCPMVMultiModalProcessor(BaseMultiModalProcessor[_I]): self, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> Mapping[str, NestedTensors]: if (videos := mm_data.get("videos")) is None: return {} @@ -586,6 +589,7 @@ class MiniCPMVMultiModalProcessor(BaseMultiModalProcessor[_I]): "max_slice_nums": self.info.get_video_max_slice_num(), }, + tok_kwargs=tok_kwargs, out_keys={"pixel_values", "image_sizes", "tgt_sizes"}, ) @@ -601,10 +605,11 @@ class MiniCPMVMultiModalProcessor(BaseMultiModalProcessor[_I]): self, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> Mapping[str, NestedTensors]: return { - **self.process_images(mm_data, mm_kwargs), - **self.process_videos(mm_data, mm_kwargs), + **self.process_images(mm_data, mm_kwargs, tok_kwargs), + **self.process_videos(mm_data, mm_kwargs, tok_kwargs), } def _base_call_hf_processor( @@ -612,6 +617,7 @@ class MiniCPMVMultiModalProcessor(BaseMultiModalProcessor[_I]): prompts: list[str], mm_data: Mapping[str, Sequence[object]], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], *, out_keys: set[str], ) -> dict[str, NestedTensors]: @@ -621,6 +627,7 @@ class MiniCPMVMultiModalProcessor(BaseMultiModalProcessor[_I]): prompt=prompts, # type: ignore mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) else: inputs = defaultdict[str, list[torch.Tensor]](list) @@ -633,6 +640,7 @@ class MiniCPMVMultiModalProcessor(BaseMultiModalProcessor[_I]): for k, v in mm_data.items() }, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) for k, v in inputs_one.items(): @@ -646,11 +654,12 @@ class MiniCPMVMultiModalProcessor(BaseMultiModalProcessor[_I]): prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: tokenizer = self.info.get_tokenizer() - input_ids = torch.tensor([tokenizer.encode(prompt)]) - mm_inputs = self.process_mm_inputs(mm_data, mm_kwargs) + input_ids = torch.tensor([tokenizer.encode(prompt, **tok_kwargs)]) + mm_inputs = self.process_mm_inputs(mm_data, mm_kwargs, tok_kwargs) return BatchFeature({ "input_ids": input_ids, @@ -662,6 +671,7 @@ class MiniCPMVMultiModalProcessor(BaseMultiModalProcessor[_I]): prompt_text: str, mm_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], ) -> bool: return False diff --git a/vllm/model_executor/models/minimax_vl_01.py b/vllm/model_executor/models/minimax_vl_01.py index 8ce94540e87fe..a125454c0c060 100644 --- a/vllm/model_executor/models/minimax_vl_01.py +++ b/vllm/model_executor/models/minimax_vl_01.py @@ -113,11 +113,13 @@ class MiniMaxVL01MultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: processed_outputs = super()._call_hf_processor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) pixel_values = processed_outputs.get("pixel_values") diff --git a/vllm/model_executor/models/mistral3.py b/vllm/model_executor/models/mistral3.py index 04d6d347cb84f..6840c672a3299 100644 --- a/vllm/model_executor/models/mistral3.py +++ b/vllm/model_executor/models/mistral3.py @@ -228,11 +228,13 @@ class Mistral3MultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: processed_outputs = super()._call_hf_processor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) pixel_values = processed_outputs.get("pixel_values") diff --git a/vllm/model_executor/models/mllama.py b/vllm/model_executor/models/mllama.py index 1b7e93fafad93..ead5a8e950f0f 100644 --- a/vllm/model_executor/models/mllama.py +++ b/vllm/model_executor/models/mllama.py @@ -166,10 +166,11 @@ class MllamaMultiModalProcessor(EncDecMultiModalProcessor[MllamaProcessingInfo] prompt: Union[str, list[int]], mm_data: MultiModalDataDict, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Optional[Mapping[str, object]] = None, return_mm_hashes: bool = False, ) -> MultiModalEncDecInputs: mm_inputs = super().apply(prompt, mm_data, hf_processor_mm_kwargs, - return_mm_hashes) + tokenization_kwargs, return_mm_hashes) image_token_id = self.info.get_hf_config().image_token_index # Check that the number of image tokens in the decoder prompt matches @@ -239,6 +240,7 @@ class MllamaMultiModalProcessor(EncDecMultiModalProcessor[MllamaProcessingInfo] prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: tokenizer = self.info.get_tokenizer() if mm_data: @@ -247,7 +249,7 @@ class MllamaMultiModalProcessor(EncDecMultiModalProcessor[MllamaProcessingInfo] for img in mm_data["images"] ] processed_outputs = super()._call_hf_processor( - prompt, mm_data, mm_kwargs) + prompt, mm_data, mm_kwargs, tok_kwargs) processed_outputs["num_tiles"] = torch.tensor(num_tiles) for k in ('pixel_values', 'aspect_ratio_ids', "aspect_ratio_mask"): processed_outputs[k] = processed_outputs[k].squeeze(0) diff --git a/vllm/model_executor/models/mllama4.py b/vllm/model_executor/models/mllama4.py index a420e757e2194..ea781e18db272 100644 --- a/vllm/model_executor/models/mllama4.py +++ b/vllm/model_executor/models/mllama4.py @@ -574,6 +574,7 @@ class Mllama4MultiModalProcessor(BaseMultiModalProcessor[Mllama4ProcessingInfo] prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: tokenizer = self.info.get_tokenizer() @@ -583,6 +584,7 @@ class Mllama4MultiModalProcessor(BaseMultiModalProcessor[Mllama4ProcessingInfo] prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) processor = self.info.get_hf_processor(**mm_kwargs) diff --git a/vllm/model_executor/models/ovis.py b/vllm/model_executor/models/ovis.py index 6eecd4499fb96..5059b4e69f076 100644 --- a/vllm/model_executor/models/ovis.py +++ b/vllm/model_executor/models/ovis.py @@ -335,6 +335,7 @@ class OvisMultiModalProcessor(BaseMultiModalProcessor[OvisProcessingInfo]): prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: if not mm_data: # Avoid warning from HF logger for text-only input @@ -346,6 +347,7 @@ class OvisMultiModalProcessor(BaseMultiModalProcessor[OvisProcessingInfo]): prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) hf_processor = self.info.get_hf_processor() diff --git a/vllm/model_executor/models/paligemma.py b/vllm/model_executor/models/paligemma.py index e1de8cf458780..29ffb62eeafd0 100644 --- a/vllm/model_executor/models/paligemma.py +++ b/vllm/model_executor/models/paligemma.py @@ -121,6 +121,7 @@ class PaliGemmaMultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: tokenizer = self.info.get_tokenizer() if not mm_data: @@ -131,6 +132,7 @@ class PaliGemmaMultiModalProcessor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) def _get_mm_fields_config( @@ -191,10 +193,11 @@ class PaliGemmaMultiModalProcessor( prompt: Union[str, list[int]], mm_data: MultiModalDataDict, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Optional[Mapping[str, object]] = None, return_mm_hashes: bool = False, ) -> MultiModalInputs: mm_inputs = super().apply(prompt, mm_data, hf_processor_mm_kwargs, - return_mm_hashes) + tokenization_kwargs, return_mm_hashes) prompt_token_ids = mm_inputs["prompt_token_ids"] tokenizer = self.info.get_tokenizer() diff --git a/vllm/model_executor/models/phi3v.py b/vllm/model_executor/models/phi3v.py index 0a7adf91e488f..a084e71f734c2 100644 --- a/vllm/model_executor/models/phi3v.py +++ b/vllm/model_executor/models/phi3v.py @@ -376,11 +376,13 @@ class Phi3VMultiModalProcessor(BaseMultiModalProcessor[Phi3VProcessingInfo]): prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: processed_outputs = super()._call_hf_processor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) input_ids = processed_outputs["input_ids"] diff --git a/vllm/model_executor/models/phi4mm.py b/vllm/model_executor/models/phi4mm.py index 5d1f0775b07fb..3c4162507f03d 100644 --- a/vllm/model_executor/models/phi4mm.py +++ b/vllm/model_executor/models/phi4mm.py @@ -762,6 +762,7 @@ class Phi4MMMultiModalProcessor(BaseMultiModalProcessor[Phi4MMProcessingInfo]): prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: if not mm_data: prompt_ids = self.info.get_tokenizer().encode(prompt) @@ -773,7 +774,7 @@ class Phi4MMMultiModalProcessor(BaseMultiModalProcessor[Phi4MMProcessingInfo]): mm_data['audios'] = [(data, sr) for data in audio_data] processed_outputs = super()._call_hf_processor(prompt, mm_data, - mm_kwargs) + mm_kwargs, tok_kwargs) num_img_tokens = [ self.info.get_num_image_tokens(image_width=img_size[0], diff --git a/vllm/model_executor/models/pixtral.py b/vllm/model_executor/models/pixtral.py index 709ac1d9df945..a31c757f7d592 100644 --- a/vllm/model_executor/models/pixtral.py +++ b/vllm/model_executor/models/pixtral.py @@ -237,6 +237,7 @@ class PixtralDummyInputsBuilder(BaseDummyInputsBuilder[PixtralProcessingInfo]): dummy_text = self.get_dummy_text(mm_counts) dummy_mm_data = self.get_dummy_mm_data(seq_len, mm_counts) dummy_images = dummy_mm_data.get("image", []) + tokenization_kwargs = {"truncation": False} request = ChatCompletionRequest(messages=[ UserMessage(content=[ @@ -247,7 +248,9 @@ class PixtralDummyInputsBuilder(BaseDummyInputsBuilder[PixtralProcessingInfo]): res = tokenizer.mistral.encode_chat_completion(request) dummy_tokens = res.tokens - return ProcessorInputs(prompt=dummy_tokens, mm_data=dummy_mm_data) + return ProcessorInputs(prompt=dummy_tokens, + mm_data=dummy_mm_data, + tokenization_kwargs=tokenization_kwargs) class PixtralMultiModalProcessor(BaseMultiModalProcessor[PixtralProcessingInfo] @@ -297,6 +300,7 @@ class PixtralMultiModalProcessor(BaseMultiModalProcessor[PixtralProcessingInfo] prompt: Union[str, list[int]], mm_data_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], *, return_mm_hashes: bool, ) -> tuple[list[int], MultiModalKwargs, Optional[MultiModalHashes], bool]: @@ -309,6 +313,7 @@ class PixtralMultiModalProcessor(BaseMultiModalProcessor[PixtralProcessingInfo] prompt=prompt, mm_data_items=mm_data_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, return_mm_hashes=return_mm_hashes, ) diff --git a/vllm/model_executor/models/prithvi_geospatial_mae.py b/vllm/model_executor/models/prithvi_geospatial_mae.py index 4fdcae5de644a..f89cf1b5274cf 100644 --- a/vllm/model_executor/models/prithvi_geospatial_mae.py +++ b/vllm/model_executor/models/prithvi_geospatial_mae.py @@ -92,6 +92,7 @@ class PrithviGeoSpatialMAEMultiModalProcessor(BaseMultiModalProcessor): prompt: Union[str, list[int]], mm_data: MultiModalDataDict, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Optional[Mapping[str, object]] = None, return_mm_hashes: bool = False, ) -> MultiModalInputs: mm_kwargs = {} diff --git a/vllm/model_executor/models/qwen2_5_omni_thinker.py b/vllm/model_executor/models/qwen2_5_omni_thinker.py index 9497f15984b75..8980f386502fc 100644 --- a/vllm/model_executor/models/qwen2_5_omni_thinker.py +++ b/vllm/model_executor/models/qwen2_5_omni_thinker.py @@ -244,6 +244,7 @@ class Qwen2_5OmniThinkerMultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: mm_data = dict(mm_data) audios = mm_data.pop("audios", []) @@ -258,6 +259,7 @@ class Qwen2_5OmniThinkerMultiModalProcessor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) input_features = hf_inputs.pop('input_features', None) @@ -453,6 +455,7 @@ class Qwen2_5OmniThinkerMultiModalProcessor( prompt: Union[str, list[int]], mm_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], *, enable_hf_prompt_update: bool, ) -> tuple[list[int], MultiModalKwargs, bool]: @@ -465,6 +468,7 @@ class Qwen2_5OmniThinkerMultiModalProcessor( prompt_text=prompt, mm_items=mm_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, ) tokenizer = self.info.get_tokenizer() prompt_ids = encode_tokens(tokenizer, prompt) @@ -474,6 +478,7 @@ class Qwen2_5OmniThinkerMultiModalProcessor( mm_kwargs = self._apply_hf_processor_mm_only( mm_items=mm_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, ) return prompt_ids, mm_kwargs, False @@ -482,6 +487,7 @@ class Qwen2_5OmniThinkerMultiModalProcessor( self, mm_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], ) -> MultiModalKwargs: """ Qwen2.5-Omni reimplements this function to handle `use_audio_in_video`. @@ -498,6 +504,7 @@ class Qwen2_5OmniThinkerMultiModalProcessor( prompt_text=self.dummy_inputs.get_dummy_text(mm_counts), mm_items=mm_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, ) return mm_kwargs diff --git a/vllm/model_executor/models/qwen2_audio.py b/vllm/model_executor/models/qwen2_audio.py index aefa1db24628d..31b25ef0bc731 100644 --- a/vllm/model_executor/models/qwen2_audio.py +++ b/vllm/model_executor/models/qwen2_audio.py @@ -150,6 +150,7 @@ class Qwen2AudioMultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, Any], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: # NOTE - we rename audios -> audio in mm data because transformers has # deprecated audios for the qwen2audio processor and will remove @@ -174,6 +175,7 @@ class Qwen2AudioMultiModalProcessor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) def _get_mm_fields_config( diff --git a/vllm/model_executor/models/qwen2_vl.py b/vllm/model_executor/models/qwen2_vl.py index 899fc57c7a0e5..dc7b08c65bb13 100644 --- a/vllm/model_executor/models/qwen2_vl.py +++ b/vllm/model_executor/models/qwen2_vl.py @@ -1027,11 +1027,13 @@ class Qwen2VLMultiModalProcessor(BaseMultiModalProcessor[Qwen2VLProcessingInfo] prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: + mm_kwargs = self.info._get_image_processor_kwargs(**mm_kwargs) return self.info.ctx.call_hf_processor( self.info.get_hf_processor(**mm_kwargs), dict(text=prompt, **mm_data), - self.info._get_image_processor_kwargs(**mm_kwargs), + dict(**mm_kwargs, **tok_kwargs), ) def _get_prompt_updates( diff --git a/vllm/model_executor/models/qwen_vl.py b/vllm/model_executor/models/qwen_vl.py index fc29785af95a0..563650a4f162c 100644 --- a/vllm/model_executor/models/qwen_vl.py +++ b/vllm/model_executor/models/qwen_vl.py @@ -580,6 +580,7 @@ class QwenVLMultiModalProcessor(BaseMultiModalProcessor[QwenVLProcessingInfo]): prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: # Drops anything between / tags; encoding with the tokenizer # will automatically add the image pads for the context. @@ -600,6 +601,7 @@ class QwenVLMultiModalProcessor(BaseMultiModalProcessor[QwenVLProcessingInfo]): prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) def _hf_processor_applies_updates( @@ -607,6 +609,7 @@ class QwenVLMultiModalProcessor(BaseMultiModalProcessor[QwenVLProcessingInfo]): prompt_text: str, mm_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], ) -> bool: return False diff --git a/vllm/model_executor/models/skyworkr1v.py b/vllm/model_executor/models/skyworkr1v.py index 28f181dde2154..d362838dbb398 100644 --- a/vllm/model_executor/models/skyworkr1v.py +++ b/vllm/model_executor/models/skyworkr1v.py @@ -534,11 +534,13 @@ class SkyworkR1VMultiModalProcessor(BaseMultiModalProcessor[_I]): prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> Mapping[str, NestedTensors]: processed_outputs = super()._call_hf_processor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) hf_processor = self.info.get_hf_processor(**mm_kwargs) diff --git a/vllm/model_executor/models/ultravox.py b/vllm/model_executor/models/ultravox.py index 94f5e03fd446e..5cccd6b8841b4 100644 --- a/vllm/model_executor/models/ultravox.py +++ b/vllm/model_executor/models/ultravox.py @@ -144,6 +144,7 @@ class UltravoxMultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: # Text-only input not supported in composite processor if not mm_data.get("audios", []): @@ -165,10 +166,15 @@ class UltravoxMultiModalProcessor( item_processor_data = dict(**mm_data, audios=audios) + # some tokenizer kwargs are incompatible with UltravoxProcessor + tok_kwargs.pop("padding", None) + tok_kwargs.pop("truncation", None) + output = super()._call_hf_processor( prompt=prompt, mm_data=item_processor_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) output['audio_features'] = output.pop('audio_values') diff --git a/vllm/model_executor/models/whisper.py b/vllm/model_executor/models/whisper.py index 5a0094fa749fd..568b81c4bbfa8 100644 --- a/vllm/model_executor/models/whisper.py +++ b/vllm/model_executor/models/whisper.py @@ -700,9 +700,10 @@ class WhisperMultiModalProcessor( prompt: str, mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> BatchFeature: if mm_data: - feature_extractor = self.info.get_feature_extractor(**mm_kwargs) + feature_extractor = self.info.get_feature_extractor() mm_data = dict(audio=mm_data.pop("audios")) mm_kwargs = dict( **mm_kwargs, @@ -712,6 +713,7 @@ class WhisperMultiModalProcessor( prompt=prompt, mm_data=mm_data, mm_kwargs=mm_kwargs, + tok_kwargs=tok_kwargs, ) if "labels" in processed_outputs: processed_outputs["input_ids"] = processed_outputs.pop("labels") diff --git a/vllm/multimodal/processing.py b/vllm/multimodal/processing.py index 38f3a7cb932f4..aa7889fc3cc59 100644 --- a/vllm/multimodal/processing.py +++ b/vllm/multimodal/processing.py @@ -1267,6 +1267,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): # This refers to the data to be passed to HF processor. mm_data: Mapping[str, object], mm_kwargs: Mapping[str, object], + tok_kwargs: Mapping[str, object], ) -> "BatchFeature": """ Call the HF processor on the prompt text and @@ -1275,7 +1276,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): return self.info.ctx.call_hf_processor( self.info.get_hf_processor(**mm_kwargs), dict(text=prompt, **mm_data), - mm_kwargs, + dict(**mm_kwargs, **tok_kwargs), ) def _hf_processor_applies_updates( @@ -1283,6 +1284,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt_text: str, mm_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], ) -> bool: """ Return whether the HF processor applies prompt updates. @@ -1300,6 +1302,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt_text: str, mm_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], ) -> tuple[list[int], MultiModalKwargs, bool]: """ Apply the HF processor on the prompt text and multi-modal data @@ -1313,6 +1316,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt=prompt_text, mm_data=processor_data, mm_kwargs=hf_processor_mm_kwargs, + tok_kwargs=tokenization_kwargs, ) processed_data.update(passthrough_data) @@ -1327,11 +1331,14 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt_text=prompt_text, mm_items=mm_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, ) return prompt_ids, mm_kwargs, is_update_applied - def _apply_hf_processor_text_only(self, prompt_text: str) -> list[int]: + def _apply_hf_processor_text_only( + self, prompt_text: str, + tokenization_kwargs: Mapping[str, object]) -> list[int]: """ Apply the HF processor on the prompt text only. @@ -1343,6 +1350,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt_text=prompt_text, mm_items=MultiModalDataItems({}), hf_processor_mm_kwargs={}, + tokenization_kwargs=tokenization_kwargs, ) return prompt_ids @@ -1368,6 +1376,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): self, mm_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], ) -> MultiModalKwargs: """ Apply the HF processor on the multi-modal data only. @@ -1383,6 +1392,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt_text=self.dummy_inputs.get_dummy_text(mm_counts), mm_items=mm_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, ) return mm_kwargs @@ -1392,6 +1402,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt: Union[str, list[int]], mm_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], *, enable_hf_prompt_update: bool, ) -> tuple[list[int], MultiModalKwargs, bool]: @@ -1412,15 +1423,18 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt_text=prompt, mm_items=mm_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, ) - prompt_ids = self._apply_hf_processor_text_only(prompt) + prompt_ids = self._apply_hf_processor_text_only( + prompt, tokenization_kwargs) else: prompt_ids = self._apply_hf_processor_tokens_only(prompt) mm_kwargs = self._apply_hf_processor_mm_only( mm_items=mm_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, ) return prompt_ids, mm_kwargs, False @@ -1430,14 +1444,17 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): cache: ProcessingCache, mm_data_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], ) -> tuple[dict[str, list[ProcessingCacheOptionalItem]], dict[ str, list[object]]]: model_id = self.info.model_id mm_cache_items = { modality: [ - cache.get_item(model_id, modality, item, - hf_processor_mm_kwargs) for item in items + cache.get_item( + model_id, modality, item, + dict(**hf_processor_mm_kwargs, **tokenization_kwargs)) + for item in items ] for modality, items in mm_data_items.items() } @@ -1457,10 +1474,9 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): return mm_cache_items, mm_missing_data def _hash_mm_items( - self, - mm_items: MultiModalDataItems, - hf_processor_mm_kwargs: Mapping[str, object], - ) -> MultiModalHashes: + self, mm_items: MultiModalDataItems, + hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object]) -> MultiModalHashes: """Create MM hashes to be returned (only used in V1).""" model_id = self.info.model_id @@ -1468,7 +1484,8 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): modality: [ MultiModalHasher.hash_kwargs(model_id=model_id, **{modality: item}, - **hf_processor_mm_kwargs) + **hf_processor_mm_kwargs, + **tokenization_kwargs) for item in items ] for modality, items in mm_items.items() @@ -1513,6 +1530,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt: Union[str, list[int]], mm_data_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], *, return_mm_hashes: bool, ) -> tuple[list[int], MultiModalKwargs, Optional[MultiModalHashes], bool]: @@ -1524,10 +1542,12 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt=prompt, mm_items=mm_data_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, enable_hf_prompt_update=True, ) - mm_hashes = (self._hash_mm_items(mm_data_items, hf_processor_mm_kwargs) + mm_hashes = (self._hash_mm_items(mm_data_items, hf_processor_mm_kwargs, + tokenization_kwargs) if return_mm_hashes else None) return prompt_ids, mm_kwargs, mm_hashes, is_update_applied @@ -1537,6 +1557,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt: Union[str, list[int]], mm_data_items: MultiModalDataItems, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Mapping[str, object], *, return_mm_hashes: bool, ) -> tuple[list[int], MultiModalKwargs, Optional[MultiModalHashes], bool]: @@ -1552,6 +1573,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt=prompt, mm_data_items=mm_data_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, return_mm_hashes=return_mm_hashes, ) @@ -1562,6 +1584,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): cache=cache, mm_data_items=mm_data_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, ) # NOTE: `prompt` does not correspond to `mm_missing_data_items`, @@ -1575,6 +1598,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt=prompt, mm_items=self._to_mm_items(mm_missing_data), hf_processor_mm_kwargs=hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, enable_hf_prompt_update=False, ) @@ -1783,6 +1807,7 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt: Union[str, list[int]], mm_data: MultiModalDataDict, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Optional[Mapping[str, object]] = None, return_mm_hashes: bool = False, ) -> MultiModalInputs: """ @@ -1800,6 +1825,9 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): """ mm_items = self._to_mm_items(mm_data) + if tokenization_kwargs is None: + tokenization_kwargs = {} + ( prompt_ids, mm_kwargs, @@ -1809,9 +1837,11 @@ class BaseMultiModalProcessor(ABC, Generic[_I]): prompt, mm_items, hf_processor_mm_kwargs, + tokenization_kwargs=tokenization_kwargs, return_mm_hashes=return_mm_hashes, ) + # NOTE: tokenization_kwargs are not required to init processor prompt_ids, prompt, mm_placeholders = self._maybe_apply_prompt_updates( mm_items=mm_items, hf_processor_mm_kwargs=hf_processor_mm_kwargs, @@ -1892,6 +1922,7 @@ class EncDecMultiModalProcessor(BaseMultiModalProcessor[_I]): prompt: Union[str, list[int]], mm_data: MultiModalDataDict, hf_processor_mm_kwargs: Mapping[str, object], + tokenization_kwargs: Optional[Mapping[str, object]] = None, return_mm_hashes: bool = False, ) -> MultiModalEncDecInputs: """ @@ -1906,6 +1937,7 @@ class EncDecMultiModalProcessor(BaseMultiModalProcessor[_I]): encoder_prompt, mm_data, hf_processor_mm_kwargs, + tokenization_kwargs, return_mm_hashes, ) diff --git a/vllm/multimodal/profiling.py b/vllm/multimodal/profiling.py index 67bcb31f23f70..fb5a7b64c4199 100644 --- a/vllm/multimodal/profiling.py +++ b/vllm/multimodal/profiling.py @@ -30,6 +30,7 @@ class ProcessorInputs: prompt: Union[str, list[int]] mm_data: MultiModalDataDict hf_processor_mm_kwargs: Mapping[str, object] = field(default_factory=dict) + tokenization_kwargs: Mapping[str, object] = field(default_factory=dict) class DummyEncoderData(NamedTuple): @@ -90,8 +91,11 @@ class BaseDummyInputsBuilder(ABC, Generic[_I]): """ dummy_text = self.get_dummy_text(mm_counts) dummy_mm_data = self.get_dummy_mm_data(seq_len, mm_counts) + tokenization_kwargs = {"truncation": False} - return ProcessorInputs(prompt=dummy_text, mm_data=dummy_mm_data) + return ProcessorInputs(prompt=dummy_text, + mm_data=dummy_mm_data, + tokenization_kwargs=tokenization_kwargs) def _get_dummy_audios( self, @@ -170,6 +174,7 @@ class MultiModalProfiler(Generic[_I]): prompt=processor_inputs.prompt, mm_data=processor_inputs.mm_data, hf_processor_mm_kwargs=processor_inputs.hf_processor_mm_kwargs, + tokenization_kwargs=processor_inputs.tokenization_kwargs, ) def _get_mm_num_tokens( diff --git a/vllm/utils.py b/vllm/utils.py index 7eb3c1e347cde..689102281c54f 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -1729,6 +1729,7 @@ def supports_kw( last_param = params[next(reversed(params))] # type: ignore return (last_param.kind == inspect.Parameter.VAR_KEYWORD and last_param.name != kw_name) + return False @@ -1771,6 +1772,7 @@ def resolve_mm_processor_kwargs( # Merge the final processor kwargs, prioritizing inference # time values over the initialization time values. mm_processor_kwargs = {**init_mm_kwargs, **runtime_mm_kwargs} + return mm_processor_kwargs From 97d9524fe90ad5799cc11db4b4216fe3a30a07d6 Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Mon, 30 Jun 2025 14:15:24 -0400 Subject: [PATCH 208/211] [Refactor] Remove useless pdb comment (#20266) Signed-off-by: yewentao256 --- vllm/model_executor/layers/fused_moe/deep_gemm_moe.py | 1 - 1 file changed, 1 deletion(-) diff --git a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py index 321fb0351ad93..818f6d345ba6d 100644 --- a/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py +++ b/vllm/model_executor/layers/fused_moe/deep_gemm_moe.py @@ -141,7 +141,6 @@ class DeepGemmExperts(mk.FusedMoEPermuteExpertsUnpermute): quant_out = _resize_cache(workspace13.view(dtype=torch.float8_e4m3fn), (M_sum, N // 2)) mm2_out = _resize_cache(workspace2, (M_sum, K)) - # import pdb; pdb.set_trace() dg.m_grouped_gemm_fp8_fp8_bf16_nt_contiguous( (a1q, a1q_scale), (w1, w1_scale), mm1_out, expert_ids) From ded1fb635b7c1504a83fc7c195a5bf47d31c1bef Mon Sep 17 00:00:00 2001 From: Zhonghua Deng Date: Tue, 1 Jul 2025 07:45:14 +0800 Subject: [PATCH 209/211] [Bugfix][V1][P/D]Fix the issue of occasional garbled output for P2pNcclConnector (#20263) Signed-off-by: Abatom --- .../kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py b/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py index 81f7a2525896e..35c26897fe3f4 100644 --- a/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py +++ b/vllm/distributed/kv_transfer/kv_connector/v1/p2p/p2p_nccl_engine.py @@ -310,10 +310,11 @@ class P2pNcclEngine: elif data["cmd"] == "PUT": tensor_id = data["tensor_id"] try: - tensor = torch.empty(data["shape"], - dtype=getattr( - torch, data["dtype"]), - device=self.device) + with torch.cuda.stream(self.recv_stream): + tensor = torch.empty(data["shape"], + dtype=getattr( + torch, data["dtype"]), + device=self.device) self.router_socket.send_multipart( [remote_address, b"0"]) comm, rank = self.comms[remote_address.decode()] From 6d42ce83155d42f04643c1fa54eaed8abf8170c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luka=20Govedi=C4=8D?= Date: Mon, 30 Jun 2025 21:03:13 -0400 Subject: [PATCH 210/211] [CLI] Improve CLI arg parsing for `-O`/`--compilation-config` (#20156) Signed-off-by: luka --- tests/engine/test_arg_utils.py | 28 +++++++++------ tests/test_utils.py | 47 ++++++++++++++++++++++++ vllm/config.py | 19 +++++----- vllm/engine/arg_utils.py | 5 ++- vllm/utils.py | 65 ++++++++++++++++++++++++---------- 5 files changed, 124 insertions(+), 40 deletions(-) diff --git a/tests/engine/test_arg_utils.py b/tests/engine/test_arg_utils.py index cfbc7c245ffd4..847f150bd6443 100644 --- a/tests/engine/test_arg_utils.py +++ b/tests/engine/test_arg_utils.py @@ -239,32 +239,40 @@ def test_compilation_config(): assert args.compilation_config == CompilationConfig() # set to O3 - args = parser.parse_args(["-O3"]) - assert args.compilation_config.level == 3 + args = parser.parse_args(["-O0"]) + assert args.compilation_config.level == 0 # set to O 3 (space) - args = parser.parse_args(["-O", "3"]) - assert args.compilation_config.level == 3 + args = parser.parse_args(["-O", "1"]) + assert args.compilation_config.level == 1 # set to O 3 (equals) - args = parser.parse_args(["-O=3"]) + args = parser.parse_args(["-O=2"]) + assert args.compilation_config.level == 2 + + # set to O.level 3 + args = parser.parse_args(["-O.level", "3"]) assert args.compilation_config.level == 3 # set to string form of a dict args = parser.parse_args([ - "--compilation-config", - '{"level": 3, "cudagraph_capture_sizes": [1, 2, 4, 8]}', + "-O", + '{"level": 3, "cudagraph_capture_sizes": [1, 2, 4, 8], ' + '"use_inductor": false}', ]) assert (args.compilation_config.level == 3 and - args.compilation_config.cudagraph_capture_sizes == [1, 2, 4, 8]) + args.compilation_config.cudagraph_capture_sizes == [1, 2, 4, 8] + and not args.compilation_config.use_inductor) # set to string form of a dict args = parser.parse_args([ "--compilation-config=" - '{"level": 3, "cudagraph_capture_sizes": [1, 2, 4, 8]}', + '{"level": 3, "cudagraph_capture_sizes": [1, 2, 4, 8], ' + '"use_inductor": true}', ]) assert (args.compilation_config.level == 3 and - args.compilation_config.cudagraph_capture_sizes == [1, 2, 4, 8]) + args.compilation_config.cudagraph_capture_sizes == [1, 2, 4, 8] + and args.compilation_config.use_inductor) def test_prefix_cache_default(): diff --git a/tests/test_utils.py b/tests/test_utils.py index 913188455d8e6..36db8202ba622 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -5,6 +5,7 @@ import asyncio import hashlib import json +import logging import pickle import socket from collections.abc import AsyncIterator @@ -142,6 +143,7 @@ def parser(): parser.add_argument('--batch-size', type=int) parser.add_argument('--enable-feature', action='store_true') parser.add_argument('--hf-overrides', type=json.loads) + parser.add_argument('-O', '--compilation-config', type=json.loads) return parser @@ -265,6 +267,11 @@ def test_dict_args(parser): "val2", "--hf-overrides.key2.key4", "val3", + # Test compile config and compilation level + "-O.use_inductor=true", + "-O.backend", + "custom", + "-O1", # Test = sign "--hf-overrides.key5=val4", # Test underscore to dash conversion @@ -281,6 +288,13 @@ def test_dict_args(parser): "true", "--hf_overrides.key12.key13", "null", + # Test '-' and '.' in value + "--hf_overrides.key14.key15", + "-minus.and.dot", + # Test array values + "-O.custom_ops+", + "-quant_fp8", + "-O.custom_ops+=+silu_mul,-rms_norm", ] parsed_args = parser.parse_args(args) assert parsed_args.model_name == "something.something" @@ -301,7 +315,40 @@ def test_dict_args(parser): "key12": { "key13": None, }, + "key14": { + "key15": "-minus.and.dot", + } } + assert parsed_args.compilation_config == { + "level": 1, + "use_inductor": True, + "backend": "custom", + "custom_ops": ["-quant_fp8", "+silu_mul", "-rms_norm"], + } + + +def test_duplicate_dict_args(caplog_vllm, parser): + args = [ + "--model-name=something.something", + "--hf-overrides.key1", + "val1", + "--hf-overrides.key1", + "val2", + "-O1", + "-O.level", + "2", + "-O3", + ] + + parsed_args = parser.parse_args(args) + # Should be the last value + assert parsed_args.hf_overrides == {"key1": "val2"} + assert parsed_args.compilation_config == {"level": 3} + + assert len(caplog_vllm.records) == 1 + assert "duplicate" in caplog_vllm.text + assert "--hf-overrides.key1" in caplog_vllm.text + assert "-O.level" in caplog_vllm.text # yapf: enable diff --git a/vllm/config.py b/vllm/config.py index 57b9df2364775..46a5bf34f66e4 100644 --- a/vllm/config.py +++ b/vllm/config.py @@ -4140,9 +4140,9 @@ class CompilationConfig: @classmethod def from_cli(cls, cli_value: str) -> "CompilationConfig": - """Parse the CLI value for the compilation config.""" - if cli_value in ["0", "1", "2", "3"]: - return cls(level=int(cli_value)) + """Parse the CLI value for the compilation config. + -O1, -O2, -O3, etc. is handled in FlexibleArgumentParser. + """ return TypeAdapter(CompilationConfig).validate_json(cli_value) def __post_init__(self) -> None: @@ -4303,17 +4303,16 @@ class VllmConfig: """Quantization configuration.""" compilation_config: CompilationConfig = field( default_factory=CompilationConfig) - """`torch.compile` configuration for the model. + """`torch.compile` and cudagraph capture configuration for the model. - When it is a number (0, 1, 2, 3), it will be interpreted as the - optimization level. + As a shorthand, `-O` can be used to directly specify the compilation + level `n`: `-O3` is equivalent to `-O.level=3` (same as `-O='{"level":3}'`). + Currently, -O and -O= are supported as well but this will likely be + removed in favor of clearer -O syntax in the future. NOTE: level 0 is the default level without any optimization. level 1 and 2 are for internal testing only. level 3 is the recommended level for - production. - - Following the convention of traditional compilers, using `-O` without space - is also supported. `-O3` is equivalent to `-O 3`. + production, also default in V1. You can specify the full compilation config like so: `{"level": 3, "cudagraph_capture_sizes": [1, 2, 4, 8]}` diff --git a/vllm/engine/arg_utils.py b/vllm/engine/arg_utils.py index 6c908f88b9a92..2d3783363c00b 100644 --- a/vllm/engine/arg_utils.py +++ b/vllm/engine/arg_utils.py @@ -202,7 +202,10 @@ def _compute_kwargs(cls: ConfigType) -> dict[str, Any]: passed individually. For example, the following sets of arguments are equivalent:\n\n - `--json-arg '{"key1": "value1", "key2": {"key3": "value2"}}'`\n - - `--json-arg.key1 value1 --json-arg.key2.key3 value2`\n\n""" + - `--json-arg.key1 value1 --json-arg.key2.key3 value2`\n + Additionally, list elements can be passed individually using '+': + - `--json-arg '{"key4": ["value3", "value4", "value5"]}'`\n + - `--json-arg.key4+ value3 --json-arg.key4+='value4,value5'`\n\n""" if dataclass_cls is not None: def parse_dataclass(val: str, cls=dataclass_cls) -> Any: diff --git a/vllm/utils.py b/vllm/utils.py index 689102281c54f..60e560c70ad3a 100644 --- a/vllm/utils.py +++ b/vllm/utils.py @@ -89,15 +89,15 @@ MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS = 5120 STR_NOT_IMPL_ENC_DEC_SWA = \ "Sliding window attention for encoder/decoder models " + \ - "is not currently supported." + "is not currently supported." STR_NOT_IMPL_ENC_DEC_PREFIX_CACHE = \ "Prefix caching for encoder/decoder models " + \ - "is not currently supported." + "is not currently supported." STR_NOT_IMPL_ENC_DEC_CHUNKED_PREFILL = \ "Chunked prefill for encoder/decoder models " + \ - "is not currently supported." + "is not currently supported." STR_NOT_IMPL_ENC_DEC_LOGIT_SOFTCAP = ( "Models with logits_soft_cap " @@ -752,7 +752,7 @@ def _generate_random_fp8( # to generate random data for fp8 data. # For example, s.11111.00 in fp8e5m2 format represents Inf. # | E4M3 | E5M2 - #-----|-------------|------------------- + # -----|-------------|------------------- # Inf | N/A | s.11111.00 # NaN | s.1111.111 | s.11111.{01,10,11} from vllm import _custom_ops as ops @@ -840,7 +840,6 @@ def create_kv_caches_with_random( seed: Optional[int] = None, device: Optional[str] = "cuda", ) -> tuple[list[torch.Tensor], list[torch.Tensor]]: - if cache_dtype == "fp8" and head_size % 16: raise ValueError( f"Does not support key cache of type fp8 with head_size {head_size}" @@ -1205,7 +1204,6 @@ def deprecate_args( is_deprecated: Union[bool, Callable[[], bool]] = True, additional_message: Optional[str] = None, ) -> Callable[[F], F]: - if not callable(is_deprecated): is_deprecated = partial(identity, is_deprecated) @@ -1355,7 +1353,7 @@ def weak_bind(bound_method: Callable[..., Any], ) -> Callable[..., None]: return weak_bound -#From: https://stackoverflow.com/a/4104188/2749989 +# From: https://stackoverflow.com/a/4104188/2749989 def run_once(f: Callable[P, None]) -> Callable[P, None]: def wrapper(*args: P.args, **kwargs: P.kwargs) -> None: @@ -1474,7 +1472,7 @@ class FlexibleArgumentParser(ArgumentParser): # Convert underscores to dashes and vice versa in argument names processed_args = list[str]() - for arg in args: + for i, arg in enumerate(args): if arg.startswith('--'): if '=' in arg: key, value = arg.split('=', 1) @@ -1483,10 +1481,17 @@ class FlexibleArgumentParser(ArgumentParser): else: key = pattern.sub(repl, arg, count=1) processed_args.append(key) - elif arg.startswith('-O') and arg != '-O' and len(arg) == 2: - # allow -O flag to be used without space, e.g. -O3 - processed_args.append('-O') - processed_args.append(arg[2:]) + elif arg.startswith('-O') and arg != '-O' and arg[2] != '.': + # allow -O flag to be used without space, e.g. -O3 or -Odecode + # -O.<...> handled later + # also handle -O= here + level = arg[3:] if arg[2] == '=' else arg[2:] + processed_args.append(f'-O.level={level}') + elif arg == '-O' and i + 1 < len(args) and args[i + 1] in { + "0", "1", "2", "3" + }: + # Convert -O to -O.level + processed_args.append('-O.level') else: processed_args.append(arg) @@ -1504,27 +1509,44 @@ class FlexibleArgumentParser(ArgumentParser): def recursive_dict_update( original: dict[str, Any], update: dict[str, Any], - ): - """Recursively updates a dictionary with another dictionary.""" + ) -> set[str]: + """Recursively updates a dictionary with another dictionary. + Returns a set of duplicate keys that were overwritten. + """ + duplicates = set[str]() for k, v in update.items(): if isinstance(v, dict) and isinstance(original.get(k), dict): - recursive_dict_update(original[k], v) + nested_duplicates = recursive_dict_update(original[k], v) + duplicates |= {f"{k}.{d}" for d in nested_duplicates} + elif isinstance(v, list) and isinstance(original.get(k), list): + original[k] += v else: + if k in original: + duplicates.add(k) original[k] = v + return duplicates delete = set[int]() dict_args = defaultdict[str, dict[str, Any]](dict) + duplicates = set[str]() for i, processed_arg in enumerate(processed_args): - if processed_arg.startswith("--") and "." in processed_arg: + if i in delete: # skip if value from previous arg + continue + + if processed_arg.startswith("-") and "." in processed_arg: if "=" in processed_arg: processed_arg, value_str = processed_arg.split("=", 1) if "." not in processed_arg: - # False positive, . was only in the value + # False positive, '.' was only in the value continue else: value_str = processed_args[i + 1] delete.add(i + 1) + if processed_arg.endswith("+"): + processed_arg = processed_arg[:-1] + value_str = json.dumps(list(value_str.split(","))) + key, *keys = processed_arg.split(".") try: value = json.loads(value_str) @@ -1533,12 +1555,17 @@ class FlexibleArgumentParser(ArgumentParser): # Merge all values with the same key into a single dict arg_dict = create_nested_dict(keys, value) - recursive_dict_update(dict_args[key], arg_dict) + arg_duplicates = recursive_dict_update(dict_args[key], + arg_dict) + duplicates |= {f'{key}.{d}' for d in arg_duplicates} delete.add(i) # Filter out the dict args we set to None processed_args = [ a for i, a in enumerate(processed_args) if i not in delete ] + if duplicates: + logger.warning("Found duplicate keys %s", ", ".join(duplicates)) + # Add the dict args back as if they were originally passed as JSON for dict_arg, dict_value in dict_args.items(): processed_args.append(dict_arg) @@ -2405,7 +2432,7 @@ def memory_profiling( The increase of `torch.cuda.memory_stats()["allocated_bytes.all.peak"]` during profiling gives (b.). The increase of `non_torch_memory` from creating the current vLLM instance until after profiling to get (c.). - """ # noqa + """ # noqa gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() From e28533a16f73a4eae01c2b7b1b4ddf3fc1beedab Mon Sep 17 00:00:00 2001 From: fyuan1316 Date: Tue, 1 Jul 2025 09:30:14 +0800 Subject: [PATCH 211/211] [Bugfix] Fix include prompt in stream response when echo=true (#15233) Signed-off-by: Yuan Fang --- tests/entrypoints/openai/test_completion.py | 54 +++++++++++++++++++ vllm/entrypoints/openai/serving_completion.py | 21 ++++++-- 2 files changed, 71 insertions(+), 4 deletions(-) diff --git a/tests/entrypoints/openai/test_completion.py b/tests/entrypoints/openai/test_completion.py index 7e54143f6e1c3..7933ca5cd6c6f 100644 --- a/tests/entrypoints/openai/test_completion.py +++ b/tests/entrypoints/openai/test_completion.py @@ -779,3 +779,57 @@ async def test_guided_decoding_type_error(client: openai.AsyncOpenAI, prompt="Give an example string that fits this regex", extra_body=dict(guided_regex=sample_regex, guided_json=sample_json_schema)) + + +@pytest.mark.asyncio +@pytest.mark.parametrize( + "model_name,stream,echo", + [ + (MODEL_NAME, False, False), + (MODEL_NAME, False, True), + (MODEL_NAME, True, False), + (MODEL_NAME, True, True) # should not raise BadRequestError error + ], +) +async def test_echo_stream_completion(client: openai.AsyncOpenAI, + model_name: str, stream: bool, + echo: bool): + saying: str = "Hello, my name is" + result = await client.completions.create(model=model_name, + prompt=saying, + max_tokens=10, + temperature=0.0, + echo=echo, + stream=stream) + + stop_reason = "length" + + if not stream: + completion = result + assert completion.id is not None + assert completion.choices is not None and len(completion.choices) == 1 + + choice = completion.choices[0] + assert len(choice.text) >= 5 + assert choice.finish_reason == stop_reason + + if echo: + assert choice.text is not None and saying in choice.text + else: + assert choice.text is not None and saying not in choice.text + + else: + chunks: list[str] = [] + final_finish_reason = None + async for chunk in result: + if chunk.choices and chunk.choices[0].text: + chunks.append(chunk.choices[0].text) + if chunk.choices and chunk.choices[0].finish_reason: + final_finish_reason = chunk.choices[0].finish_reason + + assert final_finish_reason == stop_reason + content = "".join(chunks) + if echo: + assert content is not None and saying in content + else: + assert content is not None and saying not in content diff --git a/vllm/entrypoints/openai/serving_completion.py b/vllm/entrypoints/openai/serving_completion.py index a19fde8d70a83..8171b491aafcc 100644 --- a/vllm/entrypoints/openai/serving_completion.py +++ b/vllm/entrypoints/openai/serving_completion.py @@ -25,10 +25,13 @@ from vllm.entrypoints.openai.protocol import (CompletionLogProbs, ErrorResponse, RequestResponseMetadata, UsageInfo) -# yapf: enable +from vllm.entrypoints.openai.serving_engine import ( + EmbedsPrompt as ServingEngineEmbedsPrompt) from vllm.entrypoints.openai.serving_engine import (OpenAIServing, + TextTokensPrompt, clamp_prompt_logprobs, is_text_tokens_prompt) +# yapf: enable from vllm.entrypoints.openai.serving_models import OpenAIServingModels from vllm.inputs.data import (EmbedsPrompt, TokensPrompt, is_embeds_prompt, is_tokens_prompt) @@ -223,6 +226,7 @@ class OpenAIServingCompletion(OpenAIServing): if stream: return self.completion_stream_generator( request, + request_prompts, result_generator, request_id, created_time, @@ -285,6 +289,8 @@ class OpenAIServingCompletion(OpenAIServing): async def completion_stream_generator( self, request: CompletionRequest, + request_prompts: list[Union[TextTokensPrompt, + ServingEngineEmbedsPrompt]], result_generator: AsyncIterator[tuple[int, RequestOutput]], request_id: str, created_time: int, @@ -313,7 +319,15 @@ class OpenAIServingCompletion(OpenAIServing): async for prompt_idx, res in result_generator: prompt_token_ids = res.prompt_token_ids prompt_logprobs = res.prompt_logprobs - prompt_text = res.prompt + + if res.prompt is not None: + prompt_text = res.prompt + else: + request_prompt = request_prompts[prompt_idx] + if is_text_tokens_prompt(request_prompt): + prompt_text = request_prompt["prompt"] + else: + prompt_text = None # Prompt details are excluded from later streamed outputs if prompt_token_ids is not None: @@ -336,14 +350,13 @@ class OpenAIServingCompletion(OpenAIServing): delta_token_ids = prompt_token_ids out_logprobs = prompt_logprobs else: - assert prompt_logprobs is not None # echo the prompt and first token delta_text = prompt_text + output.text delta_token_ids = [ *prompt_token_ids, *output.token_ids ] out_logprobs = [ - *prompt_logprobs, + *(prompt_logprobs or []), *(output.logprobs or []), ] has_echoed[i] = True