mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-22 13:05:01 +08:00
[Doc][CPU] Fix index link for CPU regular release wheels (#31015)
Signed-off-by: jiang1.li <jiang1.li@intel.com>
This commit is contained in:
parent
de08b8f61b
commit
096b25c9ed
@ -19,12 +19,12 @@ Pre-built vLLM wheels for Arm are available since version 0.11.2. These wheels c
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
export VLLM_VERSION=$(curl -s https://api.github.com/repos/vllm-project/vllm/releases/latest | jq -r .tag_name | sed 's/^v//')
|
export VLLM_VERSION=$(curl -s https://api.github.com/repos/vllm-project/vllm/releases/latest | jq -r .tag_name | sed 's/^v//')
|
||||||
uv pip install vllm --extra-index-url https://wheels.vllm.ai/${VLLM_VERSION}/cpu --index-strategy first-index
|
uv pip install https://github.com/vllm-project/vllm/releases/download/v${VLLM_VERSION}/vllm-${VLLM_VERSION}+cpu-cp38-abi3-manylinux_2_35_aarch64.whl
|
||||||
```
|
```
|
||||||
|
|
||||||
??? console "pip"
|
??? console "pip"
|
||||||
```bash
|
```bash
|
||||||
pip install vllm==${VLLM_VERSION}+cpu --extra-index-url https://wheels.vllm.ai/${VLLM_VERSION}/cpu
|
pip install https://github.com/vllm-project/vllm/releases/download/v${VLLM_VERSION}/vllm-${VLLM_VERSION}+cpu-cp38-abi3-manylinux_2_35_aarch64.whl
|
||||||
```
|
```
|
||||||
|
|
||||||
!!! warning "set `LD_PRELOAD`"
|
!!! warning "set `LD_PRELOAD`"
|
||||||
|
|||||||
@ -23,12 +23,12 @@ Pre-built vLLM wheels for x86 with AVX512 are available since version 0.13.0. To
|
|||||||
export VLLM_VERSION=$(curl -s https://api.github.com/repos/vllm-project/vllm/releases/latest | jq -r .tag_name | sed 's/^v//')
|
export VLLM_VERSION=$(curl -s https://api.github.com/repos/vllm-project/vllm/releases/latest | jq -r .tag_name | sed 's/^v//')
|
||||||
|
|
||||||
# use uv
|
# use uv
|
||||||
uv pip install vllm --extra-index-url https://wheels.vllm.ai/${VLLM_VERSION}/cpu --index-strategy first-index --torch-backend cpu
|
uv pip install https://github.com/vllm-project/vllm/releases/download/v${VLLM_VERSION}/vllm-${VLLM_VERSION}+cpu-cp38-abi3-manylinux_2_35_x86_64.whl --torch-backend cpu
|
||||||
```
|
```
|
||||||
??? console "pip"
|
??? console "pip"
|
||||||
```bash
|
```bash
|
||||||
# use pip
|
# use pip
|
||||||
pip install vllm==${VLLM_VERSION}+cpu --extra-index-url https://wheels.vllm.ai/${VLLM_VERSION}/cpu --extra-index-url https://download.pytorch.org/whl/cpu
|
pip install https://github.com/vllm-project/vllm/releases/download/v${VLLM_VERSION}/vllm-${VLLM_VERSION}+cpu-cp38-abi3-manylinux_2_35_x86_64.whl --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
```
|
```
|
||||||
!!! warning "set `LD_PRELOAD`"
|
!!! warning "set `LD_PRELOAD`"
|
||||||
Before use vLLM CPU installed via wheels, make sure TCMalloc and Intel OpenMP are installed and added to `LD_PRELOAD`:
|
Before use vLLM CPU installed via wheels, make sure TCMalloc and Intel OpenMP are installed and added to `LD_PRELOAD`:
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user