diff --git a/vllm/v1/worker/cp_utils.py b/vllm/v1/worker/cp_utils.py index f666c739b0be7..7625b1b5f8951 100644 --- a/vllm/v1/worker/cp_utils.py +++ b/vllm/v1/worker/cp_utils.py @@ -21,22 +21,58 @@ def check_attention_cp_compatibility(vllm_config: VllmConfig) -> None: layer_impl = getattr(layer, "impl", None) if layer_impl is None: continue - if vllm_config.speculative_config is not None and interleave_size > 1: - assert layer_impl.supports_mtp_with_cp_non_trivial_interleave_size, ( - "MTP with cp_kv_cache_interleave_size > 1 is not " - f"supported in {layer_impl.__class__.__name__}." - ) - if dcp_size > 1: - assert layer_impl.need_to_return_lse_for_decode, ( - "DCP requires attention impls to return" - " the softmax lse for decode, but the impl " - f"{layer_impl.__class__.__name__} " - "does not return the softmax lse for decode." + + supports_mtp = layer_impl.supports_mtp_with_cp_non_trivial_interleave_size + if ( + vllm_config.speculative_config is not None + and interleave_size > 1 + and not supports_mtp + ): + raise RuntimeError( + f"Multi-Token Prediction (MTP) with " + f"cp_kv_cache_interleave_size > 1 is not supported by the " + f"current attention backend " + f"'{layer_impl.__class__.__name__}'.\n\n" + f"To resolve this issue, try one of the following:\n" + f" 1. Set cp_kv_cache_interleave_size to 1\n" + f" 2. Disable speculative decoding\n\n" + f"Note: No backends currently support MTP with " + f"cp_kv_cache_interleave_size > 1.\n\n" + f"For more information, see:\n" + f" https://docs.vllm.ai/en/latest/serving/" + f"distributed_serving.html" ) - if pcp_size > 1: - assert layer_impl.supports_pcp, ( - "PCP requires attention impls' support, " - f"but the impl {layer_impl.__class__.__name__} " - "does not support PCP." + if dcp_size > 1 and not layer_impl.need_to_return_lse_for_decode: + raise RuntimeError( + f"Decode Context Parallel (DCP) requires an attention " + f"backend that supports returning softmax LSE (log-sum-exp) " + f"for decode operations. The current backend " + f"'{layer_impl.__class__.__name__}' does not support this " + f"feature.\n\n" + f"To resolve this issue, try one of the following:\n" + f" 1. Use a compatible attention backend by specifying:\n" + f" --attention-backend \n" + f" Compatible backends: FLASH_ATTN, FLASHINFER, " + f"TRITON_MLA, FLASH_MLA, FLASH_ATTN_MLA, CUTLASS_MLA\n" + f" 2. Disable DCP by removing the " + f"--decode-context-parallel-size flag\n\n" + f"For more information, see:\n" + f" https://docs.vllm.ai/en/latest/serving/" + f"distributed_serving.html" + ) + + if pcp_size > 1 and not layer_impl.supports_pcp: + raise RuntimeError( + f"Prefill Context Parallel (PCP) requires an attention " + f"backend that supports PCP. The current backend " + f"'{layer_impl.__class__.__name__}' does not support this " + f"feature.\n\n" + f"To resolve this issue:\n" + f" Disable PCP by removing the " + f"--prefill-context-parallel-size flag\n\n" + f"Note: No backends currently support PCP.\n\n" + f"For more information, see:\n" + f" https://docs.vllm.ai/en/latest/serving/" + f"distributed_serving.html" )