From cec8c7d7f8753d13737427ceb5cebe987f5f0549 Mon Sep 17 00:00:00 2001 From: "Jason (Siyu) Zhu" Date: Thu, 27 Mar 2025 20:27:20 -0700 Subject: [PATCH] Refactor error handling for multiple exceptions in preprocessing (#15650) Signed-off-by: JasonZhu1313 --- vllm/entrypoints/openai/serving_chat.py | 12 ++---------- vllm/entrypoints/openai/serving_embedding.py | 5 +---- vllm/entrypoints/openai/serving_pooling.py | 8 +------- vllm/entrypoints/openai/serving_tokenization.py | 8 +------- 4 files changed, 5 insertions(+), 28 deletions(-) diff --git a/vllm/entrypoints/openai/serving_chat.py b/vllm/entrypoints/openai/serving_chat.py index 3c35a848ea3a5..3102db4050f5b 100644 --- a/vllm/entrypoints/openai/serving_chat.py +++ b/vllm/entrypoints/openai/serving_chat.py @@ -197,16 +197,8 @@ class OpenAIServingChat(OpenAIServing): truncate_prompt_tokens=request.truncate_prompt_tokens, add_special_tokens=request.add_special_tokens, ) - except ValueError as e: - logger.exception("Error in preprocessing prompt inputs") - return self.create_error_response(str(e)) - except TypeError as e: - logger.exception("Error in preprocessing prompt inputs") - return self.create_error_response(str(e)) - except RuntimeError as e: - logger.exception("Error in preprocessing prompt inputs") - return self.create_error_response(str(e)) - except jinja2.TemplateError as e: + except (ValueError, TypeError, RuntimeError, + jinja2.TemplateError) as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) diff --git a/vllm/entrypoints/openai/serving_embedding.py b/vllm/entrypoints/openai/serving_embedding.py index 1c2c78aaf8926..0ee58672631d0 100644 --- a/vllm/entrypoints/openai/serving_embedding.py +++ b/vllm/entrypoints/openai/serving_embedding.py @@ -139,10 +139,7 @@ class OpenAIServingEmbedding(OpenAIServing): truncate_prompt_tokens=truncate_prompt_tokens, add_special_tokens=request.add_special_tokens, ) - except ValueError as e: - logger.exception("Error in preprocessing prompt inputs") - return self.create_error_response(str(e)) - except TypeError as e: + except (ValueError, TypeError) as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) diff --git a/vllm/entrypoints/openai/serving_pooling.py b/vllm/entrypoints/openai/serving_pooling.py index 894128ee974cd..779a3eded2c16 100644 --- a/vllm/entrypoints/openai/serving_pooling.py +++ b/vllm/entrypoints/openai/serving_pooling.py @@ -136,13 +136,7 @@ class OpenAIServingPooling(OpenAIServing): truncate_prompt_tokens=truncate_prompt_tokens, add_special_tokens=request.add_special_tokens, ) - except ValueError as e: - logger.exception("Error in preprocessing prompt inputs") - return self.create_error_response(str(e)) - except TypeError as e: - logger.exception("Error in preprocessing prompt inputs") - return self.create_error_response(str(e)) - except jinja2.TemplateError as e: + except (ValueError, TypeError, jinja2.TemplateError) as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e)) diff --git a/vllm/entrypoints/openai/serving_tokenization.py b/vllm/entrypoints/openai/serving_tokenization.py index 90c0da2a24d51..c642fc51005ea 100644 --- a/vllm/entrypoints/openai/serving_tokenization.py +++ b/vllm/entrypoints/openai/serving_tokenization.py @@ -89,13 +89,7 @@ class OpenAIServingTokenization(OpenAIServing): request.prompt, add_special_tokens=request.add_special_tokens, ) - except ValueError as e: - logger.exception("Error in preprocessing prompt inputs") - return self.create_error_response(str(e)) - except TypeError as e: - logger.exception("Error in preprocessing prompt inputs") - return self.create_error_response(str(e)) - except jinja2.TemplateError as e: + except (ValueError, TypeError, jinja2.TemplateError) as e: logger.exception("Error in preprocessing prompt inputs") return self.create_error_response(str(e))