[CI/Build][AMD] Skip marlin, machete, and hadacore tests since these require _C functions not defined for ROCm (#30109)

Signed-off-by: Randall Smith <ransmith@amd.com>
Co-authored-by: Randall Smith <ransmith@amd.com>
This commit is contained in:
rasmith 2025-12-05 22:54:17 -06:00 committed by GitHub
parent bf4a901af9
commit 62079d8600
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 21 additions and 0 deletions

View File

@ -8,6 +8,13 @@ import torch
from compressed_tensors.transform import deterministic_hadamard_matrix
from vllm import _custom_ops as ops
from vllm.platforms import current_platform
if current_platform.is_rocm():
pytest.skip(
"These tests require hadacore_transform, not supported on ROCm.",
allow_module_level=True,
)
@pytest.mark.parametrize("batch_size", [1, 32])

View File

@ -23,6 +23,12 @@ from vllm.model_executor.layers.quantization.utils.quant_utils import (
from vllm.platforms import current_platform
from vllm.scalar_type import ScalarType, scalar_types
if current_platform.is_rocm():
pytest.skip(
"These tests require machete_prepack_B, not supported on ROCm.",
allow_module_level=True,
)
CUDA_DEVICES = [f"cuda:{i}" for i in range(1 if torch.cuda.device_count() == 1 else 2)]
# TODO: in future PR refactor this and `is_quant_method_supported` in the kernel

View File

@ -56,6 +56,14 @@ from vllm.model_executor.layers.quantization.utils.quant_utils import (
from vllm.platforms import current_platform
from vllm.scalar_type import scalar_types
if current_platform.is_rocm():
pytest.skip(
"These tests require gptq_marlin_repack,"
"marlin_int4_fp8_preprocess, gptq_marlin_24_gemm,"
"or gptq_marlin_gemm which are not supported on ROCm.",
allow_module_level=True,
)
ACT_ORDER_OPTS = [False, True]
K_FULL_OPTS = [False, True]
USE_ATOMIC_ADD_OPTS = [False, True]