diff --git a/vllm/envs.py b/vllm/envs.py index 9bce5c6d2e0bb..e28e9658e5b53 100755 --- a/vllm/envs.py +++ b/vllm/envs.py @@ -70,7 +70,6 @@ if TYPE_CHECKING: NVCC_THREADS: Optional[str] = None VLLM_USE_PRECOMPILED: bool = False VLLM_TEST_USE_PRECOMPILED_NIGHTLY_WHEEL: bool = False - VLLM_NO_DEPRECATION_WARNING: bool = False VLLM_KEEP_ALIVE_ON_ENGINE_DEATH: bool = False CMAKE_BUILD_TYPE: Optional[str] = None VERBOSE: bool = False @@ -582,10 +581,6 @@ environment_variables: dict[str, Callable[[], Any]] = { lambda: bool( int(os.getenv("VLLM_ENABLE_FUSED_MOE_ACTIVATION_CHUNKING", "1"))), - # If set, vllm will skip the deprecation warnings. - "VLLM_NO_DEPRECATION_WARNING": - lambda: bool(int(os.getenv("VLLM_NO_DEPRECATION_WARNING", "0"))), - # If set, the OpenAI API server will stay alive even after the underlying # AsyncLLMEngine errors and stops serving requests "VLLM_KEEP_ALIVE_ON_ENGINE_DEATH":