[Bugfix][CI/Build][Hardware][AMD] Install matching torchvision to fix AMD tests (#5949)

This commit is contained in:
Matt Wong 2024-06-29 14:47:58 -05:00 committed by GitHub
parent 75aa1442db
commit 9def10664e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 14 additions and 8 deletions

View File

@ -55,16 +55,22 @@ RUN apt-get purge -y sccache; pip uninstall -y sccache; rm -f "$(which sccache)"
# Install torch == 2.4.0 on ROCm # Install torch == 2.4.0 on ROCm
RUN case "$(ls /opt | grep -Po 'rocm-[0-9]\.[0-9]')" in \ RUN case "$(ls /opt | grep -Po 'rocm-[0-9]\.[0-9]')" in \
*"rocm-5.7"*) \ *"rocm-5.7"*) \
pip uninstall -y torch \ pip uninstall -y torch torchaudio torchvision \
&& pip install --no-cache-dir --pre torch==2.4.0.dev20240612 \ && pip install --no-cache-dir --pre \
torch==2.4.0.dev20240612 torchaudio==2.4.0.dev20240612 \
torchvision==0.19.0.dev20240612 \
--index-url https://download.pytorch.org/whl/nightly/rocm5.7;; \ --index-url https://download.pytorch.org/whl/nightly/rocm5.7;; \
*"rocm-6.0"*) \ *"rocm-6.0"*) \
pip uninstall -y torch \ pip uninstall -y torch torchaudio torchvision \
&& pip install --no-cache-dir --pre torch==2.4.0.dev20240612 \ && pip install --no-cache-dir --pre \
torch==2.4.0.dev20240612 torchaudio==2.4.0.dev20240612 \
torchvision==0.19.0.dev20240612 \
--index-url https://download.pytorch.org/whl/nightly/rocm6.0;; \ --index-url https://download.pytorch.org/whl/nightly/rocm6.0;; \
*"rocm-6.1"*) \ *"rocm-6.1"*) \
pip uninstall -y torch \ pip uninstall -y torch torchaudio torchvision \
&& pip install --no-cache-dir --pre torch==2.4.0.dev20240612 \ && pip install --no-cache-dir --pre \
torch==2.4.0.dev20240612 torchaudio==2.4.0.dev20240612 \
torchvision==0.19.0.dev20240612 \
--index-url https://download.pytorch.org/whl/nightly/rocm6.1;; \ --index-url https://download.pytorch.org/whl/nightly/rocm6.1;; \
*) ;; esac *) ;; esac

View File

@ -14,7 +14,7 @@ import torch
from huggingface_hub import snapshot_download from huggingface_hub import snapshot_download
from openai import BadRequestError from openai import BadRequestError
from ..utils import VLLM_PATH, RemoteOpenAIServer from ..utils import RemoteOpenAIServer
# any model with a chat template should work here # any model with a chat template should work here
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta" MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta"
@ -79,7 +79,7 @@ def zephyr_lora_files():
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def ray_ctx(): def ray_ctx():
ray.init(runtime_env={"working_dir": VLLM_PATH}) ray.init()
yield yield
ray.shutdown() ray.shutdown()