mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 00:25:01 +08:00
Signed-off-by: Matthew Bonanni <mbonanni@redhat.com> Signed-off-by: Matthew Bonanni <mbonanni001@gmail.com> Co-authored-by: Luka Govedič <ProExpertProg@users.noreply.github.com>
26 lines
853 B
Python
26 lines
853 B
Python
# SPDX-License-Identifier: Apache-2.0
|
|
# SPDX-FileCopyrightText: Copyright contributors to the vLLM project
|
|
|
|
import pytest
|
|
|
|
from vllm.attention.backends.registry import AttentionBackendEnum
|
|
from vllm.config.multimodal import MultiModalConfig
|
|
|
|
|
|
def test_mm_encoder_attn_backend_str_conversion():
|
|
config = MultiModalConfig(mm_encoder_attn_backend="FLASH_ATTN")
|
|
assert config.mm_encoder_attn_backend == AttentionBackendEnum.FLASH_ATTN
|
|
|
|
|
|
def test_mm_encoder_attn_backend_invalid():
|
|
with pytest.raises(ValueError):
|
|
MultiModalConfig(mm_encoder_attn_backend="not_a_backend")
|
|
|
|
|
|
def test_mm_encoder_attn_backend_hash_updates():
|
|
base_hash = MultiModalConfig().compute_hash()
|
|
overridden_hash = MultiModalConfig(
|
|
mm_encoder_attn_backend=AttentionBackendEnum.FLASH_ATTN
|
|
).compute_hash()
|
|
assert base_hash != overridden_hash
|