From cb528d0585c0a2a876dfc3813c7fe6092a2549ae Mon Sep 17 00:00:00 2001 From: Aaron Pham Date: Tue, 13 May 2025 06:04:10 -0400 Subject: [PATCH] [Fix] check to make sure processor has chat templates (#18047) Signed-off-by: Aaron Pham --- tests/compile/test_pass_manager.py | 2 +- vllm/compilation/inductor_pass.py | 2 +- vllm/entrypoints/chat_utils.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/compile/test_pass_manager.py b/tests/compile/test_pass_manager.py index 673ebe8b6fdc0..b630d0e85d31a 100644 --- a/tests/compile/test_pass_manager.py +++ b/tests/compile/test_pass_manager.py @@ -22,7 +22,7 @@ def test_bad_callable(): pass_manager.configure(config) with pytest.raises(AssertionError): - pass_manager.add(simple_callable) # noqa, type wrong on purpose + pass_manager.add(simple_callable) # Pass that inherits from InductorPass diff --git a/vllm/compilation/inductor_pass.py b/vllm/compilation/inductor_pass.py index 6cd7720fca2f9..4f5c827768394 100644 --- a/vllm/compilation/inductor_pass.py +++ b/vllm/compilation/inductor_pass.py @@ -16,7 +16,7 @@ if is_torch_equal_or_newer("2.6"): from torch._inductor.custom_graph_pass import CustomGraphPass else: # CustomGraphPass is not present in 2.5 or lower, import our version - from .torch25_custom_graph_pass import ( # noqa: yapf + from .torch25_custom_graph_pass import ( # noqa: E501 Torch25CustomGraphPass as CustomGraphPass) _pass_context = None diff --git a/vllm/entrypoints/chat_utils.py b/vllm/entrypoints/chat_utils.py index 4ff8821fca542..183b5bf683113 100644 --- a/vllm/entrypoints/chat_utils.py +++ b/vllm/entrypoints/chat_utils.py @@ -349,11 +349,11 @@ def resolve_hf_chat_template( trust_remote_code=model_config.trust_remote_code, ) if isinstance(processor, ProcessorMixin) and \ + hasattr(processor, 'chat_template') and \ processor.chat_template is not None: return processor.chat_template except Exception: - logger.debug("Failed to load AutoProcessor chat template for %s", - tokenizer.name_or_path, exc_info=True) + logger.debug("Failed to load AutoProcessor chat template for %s", tokenizer.name_or_path, exc_info=True) # noqa: E501 # 3rd priority: AutoTokenizer chat template try: