mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 04:34:57 +08:00
[CI/Build] Fix model nightly tests (#26466)
Signed-off-by: DarkLight1337 <tlleungac@connect.ust.hk>
This commit is contained in:
parent
d24cf322e1
commit
0f29dca988
@ -100,7 +100,7 @@ AITER_MODEL_LIST = [
|
|||||||
"allenai/OLMoE-1B-7B-0924-Instruct",
|
"allenai/OLMoE-1B-7B-0924-Instruct",
|
||||||
marks=[pytest.mark.cpu_model],
|
marks=[pytest.mark.cpu_model],
|
||||||
),
|
),
|
||||||
pytest.param("swiss-ai/Apertus-8B-2509"), # apertus
|
pytest.param("swiss-ai/Apertus-8B-Instruct-2509"), # apertus
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@pytest.mark.parametrize("max_tokens", [32])
|
@pytest.mark.parametrize("max_tokens", [32])
|
||||||
|
|||||||
@ -67,4 +67,4 @@ def test_modernbert_models(
|
|||||||
for hf_output, vllm_output in zip(hf_outputs, vllm_outputs):
|
for hf_output, vllm_output in zip(hf_outputs, vllm_outputs):
|
||||||
hf_output = torch.tensor(hf_output).cpu().float()
|
hf_output = torch.tensor(hf_output).cpu().float()
|
||||||
vllm_output = torch.tensor(vllm_output).cpu().float()
|
vllm_output = torch.tensor(vllm_output).cpu().float()
|
||||||
assert torch.allclose(hf_output, vllm_output, 1e-2)
|
assert torch.allclose(hf_output, vllm_output, atol=1e-2)
|
||||||
|
|||||||
@ -749,6 +749,7 @@ VLM_TEST_SETTINGS = {
|
|||||||
max_num_seqs=2,
|
max_num_seqs=2,
|
||||||
auto_cls=AutoModelForImageTextToText,
|
auto_cls=AutoModelForImageTextToText,
|
||||||
hf_output_post_proc=model_utils.smolvlm_trunc_hf_output,
|
hf_output_post_proc=model_utils.smolvlm_trunc_hf_output,
|
||||||
|
num_logprobs=10,
|
||||||
),
|
),
|
||||||
"tarsier": VLMTestInfo(
|
"tarsier": VLMTestInfo(
|
||||||
models=["omni-research/Tarsier-7b"],
|
models=["omni-research/Tarsier-7b"],
|
||||||
|
|||||||
@ -45,14 +45,16 @@ def _run_test(
|
|||||||
|
|
||||||
all_outputs = []
|
all_outputs = []
|
||||||
for inputs in all_inputs:
|
for inputs in all_inputs:
|
||||||
|
inputs = hf_model.wrap_device(inputs)
|
||||||
|
|
||||||
if "pixel_values" in inputs:
|
if "pixel_values" in inputs:
|
||||||
inputs.pop("input_ids")
|
|
||||||
pooled_output = hf_model.model.get_image_features(
|
pooled_output = hf_model.model.get_image_features(
|
||||||
**hf_model.wrap_device(inputs)
|
pixel_values=inputs.pixel_values,
|
||||||
).squeeze(0)
|
).squeeze(0)
|
||||||
else:
|
else:
|
||||||
pooled_output = hf_model.model.get_text_features(
|
pooled_output = hf_model.model.get_text_features(
|
||||||
**hf_model.wrap_device(inputs)
|
input_ids=inputs.input_ids,
|
||||||
|
attention_mask=inputs.attention_mask,
|
||||||
).squeeze(0)
|
).squeeze(0)
|
||||||
|
|
||||||
all_outputs.append(pooled_output.tolist())
|
all_outputs.append(pooled_output.tolist())
|
||||||
|
|||||||
@ -172,9 +172,8 @@ class _HfExamplesInfo:
|
|||||||
_TEXT_GENERATION_EXAMPLE_MODELS = {
|
_TEXT_GENERATION_EXAMPLE_MODELS = {
|
||||||
# [Decoder-only]
|
# [Decoder-only]
|
||||||
"ApertusForCausalLM": _HfExamplesInfo(
|
"ApertusForCausalLM": _HfExamplesInfo(
|
||||||
"swiss-ai/Apertus-8B-2509",
|
"swiss-ai/Apertus-8B-Instruct-2509",
|
||||||
min_transformers_version="4.56.0",
|
min_transformers_version="4.56.0",
|
||||||
trust_remote_code=True,
|
|
||||||
),
|
),
|
||||||
"AquilaModel": _HfExamplesInfo("BAAI/AquilaChat-7B", trust_remote_code=True),
|
"AquilaModel": _HfExamplesInfo("BAAI/AquilaChat-7B", trust_remote_code=True),
|
||||||
"AquilaForCausalLM": _HfExamplesInfo("BAAI/AquilaChat2-7B", trust_remote_code=True),
|
"AquilaForCausalLM": _HfExamplesInfo("BAAI/AquilaChat2-7B", trust_remote_code=True),
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user