From 2e610deb72dfc1e34b904d9b6b02c85eefa451d2 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Mon, 7 Jul 2025 13:10:41 +0800 Subject: [PATCH] [CI/Build] Enable phi2 lora test (#20540) Signed-off-by: Jee Jee Li --- tests/lora/test_phi.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/lora/test_phi.py b/tests/lora/test_phi.py index 9d75512a248b..3090941e6367 100644 --- a/tests/lora/test_phi.py +++ b/tests/lora/test_phi.py @@ -1,8 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project -import pytest - import vllm from vllm.lora.request import LoRARequest @@ -49,9 +47,6 @@ def do_sample(llm: vllm.LLM, lora_path: str, lora_id: int) -> list[str]: return generated_texts -# Skipping for V1 for now as we are hitting, -# "Head size 80 is not supported by FlashAttention." error. -@pytest.mark.skip(reason="Head size 80 is not supported by FlashAttention") def test_phi2_lora(phi2_lora_files): # We enable enforce_eager=True here to reduce VRAM usage for lora-test CI, # Otherwise, the lora-test will fail due to CUDA OOM.