[Refactor] [3/N] Move tool parser tests and run on CPU (#30693)

Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
Cyrus Leung 2025-12-15 21:45:36 +08:00 committed by GitHub
parent 2a1776b7ac
commit ed586e7724
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 14 additions and 53 deletions

View File

@ -61,8 +61,8 @@ steps:
- pytest -v -s -m 'not cpu_test' multimodal
- pytest -v -s utils_
- label: Async Engine, Inputs, Utils, Worker, Config Test (CPU) # 15min
timeout_in_minutes: 20
- label: Async Engine, Inputs, Utils, Worker, Config Test (CPU) # 20min
timeout_in_minutes: 30
mirror_hardwares: [amdexperimental, amdproduction, amdtentative]
agent_pool: mi325_1
grade: Blocking
@ -73,6 +73,7 @@ steps:
- tests/multimodal
- tests/standalone_tests/lazy_imports.py
- tests/tokenizers_
- tests/tool_parsers
- tests/transformers_utils
- tests/config
no_gpu: true
@ -82,6 +83,7 @@ steps:
- pytest -v -s test_outputs.py
- pytest -v -s -m 'cpu_test' multimodal
- pytest -v -s tokenizers_
- pytest -v -s tool_parsers
- pytest -v -s transformers_utils
- pytest -v -s config
@ -759,19 +761,7 @@ steps:
- vllm/
- tests/tool_use
commands:
- pytest -v -s -m 'not cpu_test' tool_use
- label: OpenAI-Compatible Tool Use (CPU) # 5 mins
mirror_hardwares: [amdexperimental, amdproduction]
agent_pool: mi325_1
# grade: Blocking
timeout_in_minutes: 10
source_file_dependencies:
- vllm/
- tests/tool_use
no_gpu: true
commands:
- pytest -v -s -m 'cpu_test' tool_use
- pytest -v -s tool_use
##### models test #####

View File

@ -57,8 +57,8 @@ steps:
- pytest -v -s -m 'not cpu_test' multimodal
- pytest -v -s utils_
- label: Async Engine, Inputs, Utils, Worker, Config Test (CPU) # 15min
timeout_in_minutes: 20
- label: Async Engine, Inputs, Utils, Worker, Config Test (CPU) # 20min
timeout_in_minutes: 30
source_file_dependencies:
- vllm/
- tests/test_inputs.py
@ -66,6 +66,7 @@ steps:
- tests/multimodal
- tests/standalone_tests/lazy_imports.py
- tests/tokenizers_
- tests/tool_parsers
- tests/transformers_utils
- tests/config
no_gpu: true
@ -75,6 +76,7 @@ steps:
- pytest -v -s test_outputs.py
- pytest -v -s -m 'cpu_test' multimodal
- pytest -v -s tokenizers_
- pytest -v -s tool_parsers
- pytest -v -s transformers_utils
- pytest -v -s config
@ -672,16 +674,7 @@ steps:
- vllm/
- tests/tool_use
commands:
- pytest -v -s -m 'not cpu_test' tool_use
- label: OpenAI-Compatible Tool Use (CPU) # 5 mins
timeout_in_minutes: 10
source_file_dependencies:
- vllm/
- tests/tool_use
no_gpu: true
commands:
- pytest -v -s -m 'cpu_test' tool_use
- pytest -v -s tool_use
##### models test #####

View File

@ -115,7 +115,7 @@ steps:
- label: Async Engine, Inputs, Utils, Worker, Config (CPU)
depends_on: ~
timeout_in_minutes: 20
timeout_in_minutes: 30
source_file_dependencies:
- vllm/
- tests/test_inputs.py
@ -123,6 +123,7 @@ steps:
- tests/multimodal
- tests/standalone_tests/lazy_imports.py
- tests/tokenizers_
- tests/tool_parsers
- tests/transformers_utils
- tests/config
no_gpu: true
@ -132,6 +133,7 @@ steps:
- pytest -v -s test_outputs.py
- pytest -v -s -m 'cpu_test' multimodal
- pytest -v -s tokenizers_
- pytest -v -s tool_parsers
- pytest -v -s transformers_utils
- pytest -v -s config

View File

@ -10,14 +10,4 @@ steps:
- vllm/
- tests/tool_use
commands:
- pytest -v -s -m 'not cpu_test' tool_use
- label: OpenAI-Compatible Tool Use (CPU)
depends_on: ~
timeout_in_minutes: 10
source_file_dependencies:
- vllm/
- tests/tool_use
no_gpu: true
commands:
- pytest -v -s -m 'cpu_test' tool_use
- pytest -v -s tool_use

View File

View File

@ -12,8 +12,6 @@ from vllm.tool_parsers.glm4_moe_tool_parser import (
Glm4MoeModelToolParser,
)
pytestmark = pytest.mark.cpu_test
pytest.skip("skip glm4_moe parser test", allow_module_level=True)
# Use a common model that is likely to be available
MODEL = "zai-org/GLM-4.5"

View File

@ -13,8 +13,6 @@ from vllm.tokenizers import TokenizerLike, get_tokenizer
from vllm.tokenizers.detokenizer_utils import detokenize_incrementally
from vllm.tool_parsers.jamba_tool_parser import JambaToolParser
pytestmark = pytest.mark.cpu_test
MODEL = "ai21labs/Jamba-tiny-dev"

View File

@ -10,8 +10,6 @@ from vllm.entrypoints.openai.protocol import FunctionCall, ToolCall
from vllm.tokenizers import get_tokenizer
from vllm.tool_parsers.kimi_k2_tool_parser import KimiK2ToolParser
pytestmark = pytest.mark.cpu_test
# Use a common model that is likely to be available
MODEL = "moonshotai/Kimi-K2-Instruct"

View File

@ -15,8 +15,6 @@ from vllm.entrypoints.openai.protocol import (
from vllm.tokenizers import get_tokenizer
from vllm.tool_parsers.minimax_tool_parser import MinimaxToolParser
pytestmark = pytest.mark.cpu_test
# Use a common model that is likely to be available
MODEL = "MiniMaxAi/MiniMax-M1-40k"

View File

@ -20,8 +20,6 @@ from vllm.tool_parsers.qwen3coder_tool_parser import (
)
from vllm.tool_parsers.qwen3xml_tool_parser import Qwen3XMLToolParser
pytestmark = pytest.mark.cpu_test
MODEL = "Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8"

View File

@ -18,8 +18,6 @@ from vllm.tokenizers import TokenizerLike, get_tokenizer
from vllm.tokenizers.detokenizer_utils import detokenize_incrementally
from vllm.tool_parsers.seed_oss_tool_parser import SeedOssToolParser
pytestmark = pytest.mark.cpu_test
# Use a common model that is likely to be available
MODEL = "ByteDance-Seed/Seed-OSS-36B-Instruct"

View File

@ -16,8 +16,6 @@ from vllm.tokenizers import TokenizerLike, get_tokenizer
from vllm.tokenizers.detokenizer_utils import detokenize_incrementally
from vllm.tool_parsers.xlam_tool_parser import xLAMToolParser
pytestmark = pytest.mark.cpu_test
# Use a common model that is likely to be available
MODEL = "Salesforce/Llama-xLAM-2-8B-fc-r"