From 4492e3a55428e161ca8db381edc28263e5da4c8d Mon Sep 17 00:00:00 2001 From: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Date: Wed, 24 Sep 2025 21:52:52 -0400 Subject: [PATCH] [Bug] Dynamo Unsupported due to `BasevLLMParameter.torch_function` calling disabled super() (#25613) Signed-off-by: yewentao256 Signed-off-by: Wentao Ye <44945378+yewentao256@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> --- vllm/model_executor/parameter.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/vllm/model_executor/parameter.py b/vllm/model_executor/parameter.py index 66add98dab443..9b9d89ebaed11 100644 --- a/vllm/model_executor/parameter.py +++ b/vllm/model_executor/parameter.py @@ -12,6 +12,7 @@ from torch.nn import Parameter from vllm.distributed import (get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size) from vllm.logger import init_logger +from vllm.utils import is_torch_equal_or_newer __all__ = [ "BasevLLMParameter", "PackedvLLMParameter", "PerTensorScaleParameter", @@ -114,6 +115,15 @@ class BasevLLMParameter(Parameter): @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): + if not is_torch_equal_or_newer("2.8.0"): + logger.warning_once( + "Torch %s detected (<2.8.0): returning NotImplemented in " + "BasevLLMParameter.__torch_function__ to avoid potential " + "TorchDynamo issues.", + torch.__version__, + ) + return NotImplemented + if kwargs is None: kwargs = {} return super().__torch_function__(func, types, args, kwargs)