diff --git a/vllm/envs.py b/vllm/envs.py index 50cb3b7d1b7a..ec4b0888d0f4 100755 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -668,12 +668,14 @@ environment_variables: dict[str, Callable[[], Any]] = { (os.environ.get("VLLM_ALLOW_RUNTIME_LORA_UPDATING", "0").strip().lower() in ("1", "true")), - # By default, vLLM will check the peer-to-peer capability itself, - # in case of broken drivers. See https://github.com/vllm-project/vllm/blob/a9b15c606fea67a072416ea0ea115261a2756058/vllm/distributed/device_communicators/custom_all_reduce_utils.py#L101-L108 for details. # noqa - # If this env var is set to 1, vLLM will skip the peer-to-peer check, - # and trust the driver's peer-to-peer capability report. + # We assume drivers can report p2p status correctly. + # If the program hangs when using custom allreduce, + # potantially caused by a bug in the driver (535 series), + # if might be helpful to set VLLM_SKIP_P2P_CHECK=0 + # so that vLLM can verify if p2p is actually working. + # See https://github.com/vllm-project/vllm/blob/a9b15c606fea67a072416ea0ea115261a2756058/vllm/distributed/device_communicators/custom_all_reduce_utils.py#L101-L108 for details. # noqa "VLLM_SKIP_P2P_CHECK": - lambda: os.getenv("VLLM_SKIP_P2P_CHECK", "0") == "1", + lambda: os.getenv("VLLM_SKIP_P2P_CHECK", "1") == "1", # List of quantization kernels that should be disabled, used for testing # and performance comparisons. Currently only affects MPLinearKernel