From a931b4cdcf70c45d7da6945e0ffca3c372808e20 Mon Sep 17 00:00:00 2001 From: Harry Mellor <19981378+hmellor@users.noreply.github.com> Date: Wed, 16 Jul 2025 17:25:23 +0100 Subject: [PATCH] Remove Qwen Omni workaround that's no longer necessary (#21057) Signed-off-by: Harry Mellor <19981378+hmellor@users.noreply.github.com> --- vllm/transformers_utils/config.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/vllm/transformers_utils/config.py b/vllm/transformers_utils/config.py index db8f675bcc5ee..dc35d212766c3 100644 --- a/vllm/transformers_utils/config.py +++ b/vllm/transformers_utils/config.py @@ -733,13 +733,6 @@ def get_hf_text_config(config: PretrainedConfig): """Get the "sub" config relevant to llm for multi modal models. No op for pure text models. """ - # This block should be unnecessary after https://github.com/huggingface/transformers/pull/37517 - if hasattr(config, "thinker_config"): - # TODO(suyang.fy): Refactor code. - # For Qwen2.5-Omni, change hf_text_config to - # thinker_config.text_config. - return config.thinker_config.text_config - text_config = config.get_text_config() if text_config is not config: