[CI/Build][Bugfix]Fix Quantized Models Test on AMD (#27712)

Signed-off-by: zhewenli <zhewenli@meta.com>
This commit is contained in:
Zhewen Li 2025-10-28 23:27:30 -07:00 committed by GitHub
parent a4a4f0f617
commit 83fd49b1fc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 8 additions and 2 deletions

View File

@ -908,7 +908,7 @@ steps:
- label: Quantized Models Test # 45 min
timeout_in_minutes: 60
mirror_hardwares: [amdexperimental]
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
# grade: Blocking
source_file_dependencies:

View File

@ -9,10 +9,16 @@ import pytest
from transformers import BitsAndBytesConfig
from tests.quantization.utils import is_quant_method_supported
from vllm.platforms import current_platform
from ...utils import compare_two_settings, multi_gpu_test
from ..utils import check_embeddings_close, check_logprobs_close
pytestmark = pytest.mark.skipif(
current_platform.is_rocm(),
reason="bitsandbytes quantization not supported on ROCm (CUDA-only kernels)",
)
models_4bit_to_test = [
("facebook/opt-125m", "quantize opt model inflight"),
(

View File

@ -413,7 +413,7 @@ class RocmPlatform(Platform):
"Using AWQ quantization with ROCm, but VLLM_USE_TRITON_AWQ"
" is not set, enabling VLLM_USE_TRITON_AWQ."
)
envs.VLLM_USE_TRITON_AWQ = True
os.environ["VLLM_USE_TRITON_AWQ"] = "1"
@classmethod
def get_punica_wrapper(cls) -> str: