[Chore] Remove duplicate has_ functions in vllm.utils (#27372)

Signed-off-by: Jonathan <chenleejonathan@gmail.com>
This commit is contained in:
Jonathan Chen 2025-10-23 02:11:59 -04:00 committed by GitHub
parent 3729ed00ba
commit e255d92990
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -5,7 +5,6 @@ import contextlib
import datetime
import enum
import getpass
import importlib.util
import inspect
import json
import multiprocessing
@ -1048,46 +1047,6 @@ def check_use_alibi(model_config: ModelConfig) -> bool:
)
@cache
def _has_module(module_name: str) -> bool:
"""Return True if *module_name* can be found in the current environment.
The result is cached so that subsequent queries for the same module incur
no additional overhead.
"""
return importlib.util.find_spec(module_name) is not None
def has_pplx() -> bool:
"""Whether the optional `pplx_kernels` package is available."""
return _has_module("pplx_kernels")
def has_deep_ep() -> bool:
"""Whether the optional `deep_ep` package is available."""
return _has_module("deep_ep")
def has_deep_gemm() -> bool:
"""Whether the optional `deep_gemm` package is available."""
return _has_module("deep_gemm")
def has_triton_kernels() -> bool:
"""Whether the optional `triton_kernels` package is available."""
return _has_module("triton_kernels")
def has_tilelang() -> bool:
"""Whether the optional `tilelang` package is available."""
return _has_module("tilelang")
def length_from_prompt_token_ids_or_embeds(
prompt_token_ids: list[int] | None,
prompt_embeds: torch.Tensor | None,