From 2f5c14de6a7e281a86ab5e2376de95f7021dff0a Mon Sep 17 00:00:00 2001 From: youkaichao Date: Wed, 23 Jul 2025 15:03:16 +0800 Subject: [PATCH] add clear messages for deprecated models (#21424) Signed-off-by: youkaichao --- vllm/model_executor/model_loader/utils.py | 11 ++++++++++- vllm/model_executor/models/registry.py | 2 ++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/vllm/model_executor/model_loader/utils.py b/vllm/model_executor/model_loader/utils.py index 42c5512905f2..4b30336f0132 100644 --- a/vllm/model_executor/model_loader/utils.py +++ b/vllm/model_executor/model_loader/utils.py @@ -25,7 +25,8 @@ from vllm.model_executor.models.adapters import (as_embedding_model, as_reward_model, as_seq_cls_model) from vllm.model_executor.models.interfaces import SupportsQuant -from vllm.model_executor.models.registry import _TRANSFORMERS_MODELS +from vllm.model_executor.models.registry import (_PREVIOUSLY_SUPPORTED_MODELS, + _TRANSFORMERS_MODELS) from vllm.utils import is_pin_memory_available logger = init_logger(__name__) @@ -261,6 +262,14 @@ def get_model_architecture( vllm_not_supported = False break + if any(arch in _PREVIOUSLY_SUPPORTED_MODELS for arch in architectures): + previous_version = _PREVIOUSLY_SUPPORTED_MODELS[architectures[0]] + raise ValueError( + f"Model architecture {architectures[0]} was supported" + f" in vLLM until version {previous_version}, and is " + "not supported anymore. Please use an older version" + " of vLLM if you want to use this model architecture.") + if (model_config.model_impl == ModelImpl.TRANSFORMERS or model_config.model_impl == ModelImpl.AUTO and vllm_not_supported): architectures = resolve_transformers_arch(model_config, architectures) diff --git a/vllm/model_executor/models/registry.py b/vllm/model_executor/models/registry.py index 9d88b5fe82cf..100532943c2b 100644 --- a/vllm/model_executor/models/registry.py +++ b/vllm/model_executor/models/registry.py @@ -276,6 +276,8 @@ _SUBPROCESS_COMMAND = [ sys.executable, "-m", "vllm.model_executor.models.registry" ] +_PREVIOUSLY_SUPPORTED_MODELS = {"Phi3SmallForCausalLM": "0.9.2"} + @dataclass(frozen=True) class _ModelInfo: