[TPU] Update dynamo dump file name in compilation test (#19108)

Signed-off-by: Siyuan Liu <lsiyuan@google.com>
This commit is contained in:
Siyuan Liu 2025-06-04 13:13:43 -07:00 committed by GitHub
parent 53a5a0ce30
commit 7ee2590478
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 19 additions and 13 deletions

View File

@ -150,7 +150,7 @@ run_and_track_test 9 "test_multimodal.py" \
run_and_track_test 10 "test_pallas.py" \
"python3 -m pytest -s -v /workspace/vllm/tests/v1/tpu/test_pallas.py"
run_and_track_test 11 "test_struct_output_generate.py" \
"python3 -m pytest -s -v /workspace/vllm/tests/v1/entrypoints/llm/test_struct_output_generate.py -k 'not test_structured_output_with_reasoning_matrices'"
"python3 -m pytest -s -v /workspace/vllm/tests/v1/entrypoints/llm/test_struct_output_generate.py -k \"not test_structured_output_with_reasoning_matrices\""
run_and_track_test 12 "test_moe_pallas.py" \
"python3 -m pytest -s -v /workspace/vllm/tests/tpu/test_moe_pallas.py"
run_and_track_test 13 "test_lora.py" \

View File

@ -64,9 +64,10 @@ def test_tpu_compilation():
numbers = [int(part) for part in parts if part.isdigit()]
return numbers[0]
# Check all the compilations are as expected
# Check all the compilations are as expected. The dump files include the
# captured graph for the forward function of the nn.Module.
compiled_fns = sorted(glob.glob(
os.path.join(temp_dir, "__compiled_fn*Captured*.py")),
os.path.join(temp_dir, "__compiled_fn*Forward_graph*.py")),
key=lambda s: extract_compiled_index(s))
for i, compiled_fn in enumerate(compiled_fns):

View File

@ -370,6 +370,7 @@ def test_get_req_paddings():
assert _get_req_paddings(8, 36) == [8, 16, 32, 36]
@pytest.mark.skip(reason="Test is broken on TPU when it's added.")
def test_init_kv_cache_with_kv_sharing_invalid_target_layer_order():
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
@ -381,7 +382,7 @@ def test_init_kv_cache_with_kv_sharing_invalid_target_layer_order():
layer_0:
Attention(
num_heads=8,
head_size=64,
head_size=128,
scale=1.0,
prefix=layer_0,
kv_sharing_target_layer_name=layer_1,
@ -389,7 +390,7 @@ def test_init_kv_cache_with_kv_sharing_invalid_target_layer_order():
layer_1:
Attention(
num_heads=8,
head_size=64,
head_size=128,
scale=1.0,
prefix=layer_1,
)
@ -398,6 +399,7 @@ def test_init_kv_cache_with_kv_sharing_invalid_target_layer_order():
assert fwd_context is not None
@pytest.mark.skip(reason="Test is broken on TPU when it's added.")
def test_init_kv_cache_with_kv_sharing_target_layer_not_exist():
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
@ -408,14 +410,14 @@ def test_init_kv_cache_with_kv_sharing_target_layer_not_exist():
layer_0:
Attention(
num_heads=8,
head_size=64,
head_size=128,
scale=1.0,
prefix=layer_0,
),
layer_1:
Attention(
num_heads=8,
head_size=64,
head_size=128,
scale=1.0,
prefix=layer_1,
# invalid layer: cross_attn.atn doesn't exist!
@ -426,6 +428,7 @@ def test_init_kv_cache_with_kv_sharing_target_layer_not_exist():
assert fwd_context is not None
@pytest.mark.skip(reason="Test is broken on TPU when it's added.")
def test_init_kv_cache_with_kv_sharing_target_same_as_current():
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
@ -437,14 +440,14 @@ def test_init_kv_cache_with_kv_sharing_target_same_as_current():
layer_0:
Attention(
num_heads=8,
head_size=64,
head_size=128,
scale=1.0,
prefix=layer_0,
),
layer_1:
Attention(
num_heads=8,
head_size=64,
head_size=128,
scale=1.0,
prefix=layer_1,
kv_sharing_target_layer_name=layer_1,
@ -454,6 +457,7 @@ def test_init_kv_cache_with_kv_sharing_target_same_as_current():
assert fwd_context is not None
@pytest.mark.skip(reason="Test is broken on TPU when it's added.")
def test_init_kv_cache_without_kv_sharing(model_runner):
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
@ -463,14 +467,14 @@ def test_init_kv_cache_without_kv_sharing(model_runner):
layer_0:
Attention(
num_heads=8,
head_size=64,
head_size=128,
scale=1.0,
prefix=layer_0,
),
layer_1:
Attention(
num_heads=8,
head_size=64,
head_size=128,
scale=1.0,
prefix=layer_1,
)
@ -520,6 +524,7 @@ def test_init_kv_cache_without_kv_sharing(model_runner):
assert kv_cache_config.kv_cache_groups[0].layer_names[1] == layer_1
@pytest.mark.skip(reason="Test is broken on TPU when it's added.")
def test_init_kv_cache_with_kv_sharing_valid(model_runner):
layer_0 = "model.layers.0.self_attn.attn"
layer_1 = "model.layers.1.self_attn.attn"
@ -529,14 +534,14 @@ def test_init_kv_cache_with_kv_sharing_valid(model_runner):
layer_0:
Attention(
num_heads=8,
head_size=64,
head_size=128,
scale=1.0,
prefix=layer_0,
),
layer_1:
Attention(
num_heads=8,
head_size=64,
head_size=128,
scale=1.0,
prefix=layer_1,
kv_sharing_target_layer_name="model.layers.0.self_attn.attn",