[CI Bugfix] Make sure TRTLLM attention is available in test_blackwell_moe (#26188)

Signed-off-by: mgoin <mgoin64@gmail.com>
Signed-off-by: Michael Goin <mgoin64@gmail.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
This commit is contained in:
Michael Goin 2025-10-06 13:50:11 -04:00 committed by GitHub
parent 6431be808f
commit 20db99cc69
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -15,7 +15,15 @@ if not current_platform.is_device_capability(100):
"This test only runs on Blackwell GPUs (SM100).", allow_module_level=True
)
os.environ["FLASHINFER_NVCC_THREADS"] = "16"
@pytest.fixture(scope="module", autouse=True)
def set_test_environment():
"""Sets environment variables required for this test module."""
# Make sure TRTLLM attention is available
os.environ["VLLM_HAS_FLASHINFER_CUBIN"] = "1"
# Set compilation threads to 16 to speed up startup
os.environ["FLASHINFER_NVCC_THREADS"] = "16"
# dummy_hf_overrides = {"num_layers": 4, "num_hidden_layers": 4,
# "text_config": {"num_layers": 4, "num_hidden_layers": 4}}