From 60e419c1eeef90425acbd6d34bfadf4202707507 Mon Sep 17 00:00:00 2001 From: bnellnm <49004751+bnellnm@users.noreply.github.com> Date: Sun, 12 Oct 2025 20:17:50 -0400 Subject: [PATCH] [Misc] cache result of disable_inplace (#26666) Signed-off-by: Bill Nell --- vllm/model_executor/layers/fused_moe/utils.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vllm/model_executor/layers/fused_moe/utils.py b/vllm/model_executor/layers/fused_moe/utils.py index a682f848b0c4..e5957474630c 100644 --- a/vllm/model_executor/layers/fused_moe/utils.py +++ b/vllm/model_executor/layers/fused_moe/utils.py @@ -1,5 +1,6 @@ # SPDX-License-Identifier: Apache-2.0 # SPDX-FileCopyrightText: Copyright contributors to the vLLM project +import functools from math import prod import torch @@ -325,5 +326,6 @@ def activation_without_mul(activation: str) -> str: # Torch custom ops can't deal with outputs aliasing inputs so we need to # disable inplace for torch >= 2.9. # See https://github.com/vllm-project/vllm/issues/26378 +@functools.cache def disable_inplace() -> bool: return is_torch_equal_or_newer("2.9")