mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 07:04:53 +08:00
[Quantization] Skip Fp4 Test for compressed-tensors (#19217)
This commit is contained in:
parent
9ef9173cfa
commit
aa49f14832
@ -651,6 +651,7 @@ def test_compressed_tensors_2of4_sparse_compressed(vllm_runner, args_2of4):
|
||||
assert output
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="Skip until the model config is updated")
|
||||
def test_compressed_tensors_nvfp4a16(vllm_runner):
|
||||
# run weight only example
|
||||
model = "nm-testing/TinyLlama-1.1B-Chat-v1.0-FP4"
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user