diff --git a/docker/Dockerfile b/docker/Dockerfile index b06c4d33626d..d1fa92ce6d19 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -510,7 +510,7 @@ RUN --mount=type=cache,target=/root/.cache/uv \ else \ BITSANDBYTES_VERSION="0.46.1"; \ fi; \ - uv pip install --system accelerate hf_transfer 'modelscope!=1.15.0' "bitsandbytes>=${BITSANDBYTES_VERSION}" 'timm==0.9.10' boto3 runai-model-streamer runai-model-streamer[s3] + uv pip install --system accelerate hf_transfer modelscope "bitsandbytes>=${BITSANDBYTES_VERSION}" 'timm==0.9.10' boto3 runai-model-streamer runai-model-streamer[s3] ENV VLLM_USAGE_SOURCE production-docker-image diff --git a/docker/Dockerfile.xpu b/docker/Dockerfile.xpu index 41b4c42e4c4b..3130435ca721 100644 --- a/docker/Dockerfile.xpu +++ b/docker/Dockerfile.xpu @@ -47,7 +47,7 @@ FROM vllm-base AS vllm-openai # install additional dependencies for openai api server RUN --mount=type=cache,target=/root/.cache/pip \ - pip install accelerate hf_transfer pytest 'modelscope!=1.15.0' + pip install accelerate hf_transfer pytest modelscope ENV VLLM_USAGE_SOURCE production-docker-image \ TRITON_XPU_PROFILE 1