mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2026-03-20 22:25:47 +08:00
Merge 5f863b4820105b30a1991df7498688cbda09df71 into 254f6b986720c92ddf97fbb1a6a6465da8e87e29
This commit is contained in:
commit
cb47a40a1b
@ -1,5 +1,10 @@
|
||||
FROM intel/deep-learning-essentials:2025.2.2-0-devel-ubuntu24.04 AS vllm-base
|
||||
|
||||
WORKDIR /workspace/
|
||||
|
||||
ARG PYTHON_VERSION=3.12
|
||||
ARG PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/xpu"
|
||||
|
||||
RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/oneapi-archive-keyring.gpg > /dev/null && \
|
||||
echo "deb [signed-by=/usr/share/keyrings/oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main" | tee /etc/apt/sources.list.d/oneAPI.list && \
|
||||
add-apt-repository -y ppa:kobuk-team/intel-graphics-staging
|
||||
@ -27,6 +32,13 @@ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.12 1
|
||||
|
||||
RUN apt install -y libze1 libze-dev libze-intel-gpu1 intel-opencl-icd libze-intel-gpu-raytracing intel-ocloc
|
||||
|
||||
ENV PATH="/root/.local/bin:$PATH"
|
||||
ENV VIRTUAL_ENV="/opt/venv"
|
||||
ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python
|
||||
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV}
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
# This oneccl contains the BMG support which is not the case for default version of oneapi 2025.2.
|
||||
RUN wget https://github.com/uxlfoundation/oneCCL/releases/download/2021.15.6/intel-oneccl-2021.15.6.9_offline.sh
|
||||
RUN bash intel-oneccl-2021.15.6.9_offline.sh -a --silent --eula accept && \
|
||||
@ -34,23 +46,24 @@ RUN bash intel-oneccl-2021.15.6.9_offline.sh -a --silent --eula accept && \
|
||||
echo "source /opt/intel/oneapi/ccl/2021.15/env/vars.sh --force" >> /root/.bashrc
|
||||
|
||||
SHELL ["bash", "-c"]
|
||||
CMD ["bash", "-c", "source /root/.bashrc && exec bash"]
|
||||
# CMD ["bash", "-c", "source /root/.bashrc && exec bash"]
|
||||
|
||||
WORKDIR /workspace/vllm
|
||||
COPY requirements/xpu.txt /workspace/vllm/requirements/xpu.txt
|
||||
COPY requirements/common.txt /workspace/vllm/requirements/common.txt
|
||||
|
||||
# suppress the python externally managed environment error
|
||||
RUN python3 -m pip config set global.break-system-packages true
|
||||
ENV UV_HTTP_TIMEOUT=500
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --no-cache-dir \
|
||||
-r requirements/xpu.txt
|
||||
# Configure package index for XPU
|
||||
ENV PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}
|
||||
ENV UV_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL}
|
||||
ENV UV_INDEX_STRATEGY="unsafe-best-match"
|
||||
ENV UV_LINK_MODE="copy"
|
||||
|
||||
# arctic-inference is built from source which needs torch-xpu properly installed
|
||||
# used for suffix method speculative decoding
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install --no-cache-dir arctic-inference==0.1.1
|
||||
RUN --mount=type=cache,target=/root/.cache/uv \
|
||||
--mount=type=bind,src=requirements/common.txt,target=/workspace/vllm/requirements/common.txt \
|
||||
--mount=type=bind,src=requirements/xpu.txt,target=/workspace/vllm/requirements/xpu.txt \
|
||||
uv pip install --upgrade pip && \
|
||||
uv pip install triton && \
|
||||
uv pip install -r requirements/xpu.txt
|
||||
|
||||
ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/lib/"
|
||||
|
||||
@ -64,7 +77,7 @@ ENV VLLM_WORKER_MULTIPROC_METHOD=spawn
|
||||
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
--mount=type=bind,source=.git,target=.git \
|
||||
pip install --no-build-isolation .
|
||||
uv pip install --no-build-isolation .
|
||||
|
||||
CMD ["/bin/bash"]
|
||||
|
||||
@ -72,10 +85,10 @@ FROM vllm-base AS vllm-openai
|
||||
|
||||
# install additional dependencies for openai api server
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip install accelerate hf_transfer pytest pytest_asyncio lm_eval[api] modelscope
|
||||
uv pip install accelerate hf_transfer pytest pytest_asyncio lm_eval[api] modelscope
|
||||
|
||||
# install development dependencies (for testing)
|
||||
RUN python3 -m pip install -e tests/vllm_test_utils
|
||||
RUN uv pip install -e tests/vllm_test_utils
|
||||
|
||||
# install nixl from source code
|
||||
ENV NIXL_VERSION=0.7.0
|
||||
@ -86,6 +99,6 @@ RUN rm /usr/lib/python3/dist-packages/PyJWT-2.7.0.dist-info/ -rf
|
||||
|
||||
# remove torch bundled oneccl to avoid conflicts
|
||||
RUN --mount=type=cache,target=/root/.cache/pip \
|
||||
pip uninstall oneccl oneccl-devel -y
|
||||
uv pip uninstall oneccl oneccl-devel
|
||||
|
||||
ENTRYPOINT ["vllm", "serve"]
|
||||
|
||||
@ -43,7 +43,7 @@ def run_command(command, cwd=".", env=None):
|
||||
def is_pip_package_installed(package_name):
|
||||
"""Checks if a package is installed via pip without raising an exception."""
|
||||
result = subprocess.run(
|
||||
[sys.executable, "-m", "pip", "show", package_name],
|
||||
["uv", "pip", "show", package_name],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
@ -117,7 +117,7 @@ def build_and_install_prerequisites(args):
|
||||
flush=True,
|
||||
)
|
||||
print("--> Installing from cache, skipping all source builds.", flush=True)
|
||||
install_command = [sys.executable, "-m", "pip", "install", cached_wheel]
|
||||
install_command = ["uv", "pip", "install", cached_wheel]
|
||||
run_command(install_command)
|
||||
print("\n--- Installation from cache complete. ---", flush=True)
|
||||
return
|
||||
@ -128,7 +128,7 @@ def build_and_install_prerequisites(args):
|
||||
flush=True,
|
||||
)
|
||||
print("\n--> Installing auditwheel...", flush=True)
|
||||
run_command([sys.executable, "-m", "pip", "install", "auditwheel"])
|
||||
run_command(["uv", "pip", "install", "auditwheel"])
|
||||
install_system_dependencies()
|
||||
ucx_install_path = os.path.abspath(UCX_INSTALL_DIR)
|
||||
print(f"--> Using wheel cache directory: {WHEELS_CACHE_HOME}", flush=True)
|
||||
@ -182,7 +182,7 @@ def build_and_install_prerequisites(args):
|
||||
temp_wheel_dir = os.path.join(ROOT_DIR, "temp_wheelhouse")
|
||||
run_command(
|
||||
[
|
||||
sys.executable,
|
||||
sys.executable, # uv pip is not guaranteed to be the same python
|
||||
"-m",
|
||||
"pip",
|
||||
"wheel",
|
||||
@ -226,8 +226,7 @@ def build_and_install_prerequisites(args):
|
||||
flush=True,
|
||||
)
|
||||
install_command = [
|
||||
sys.executable,
|
||||
"-m",
|
||||
"uv",
|
||||
"pip",
|
||||
"install",
|
||||
"--no-deps", # w/o "no-deps", it will install cuda-torch
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user