From e6803499941333e30e7db984cc590f21fc284e7b Mon Sep 17 00:00:00 2001 From: bnellnm <49004751+bnellnm@users.noreply.github.com> Date: Fri, 16 Aug 2024 22:05:49 -0400 Subject: [PATCH] [Bugfix] Fix custom_ar support check (#7617) --- vllm/distributed/device_communicators/custom_all_reduce.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vllm/distributed/device_communicators/custom_all_reduce.py b/vllm/distributed/device_communicators/custom_all_reduce.py index 479dc95a8b667..6229f1d6ec788 100644 --- a/vllm/distributed/device_communicators/custom_all_reduce.py +++ b/vllm/distributed/device_communicators/custom_all_reduce.py @@ -15,7 +15,7 @@ from vllm.platforms import current_platform from vllm.utils import cuda_device_count_stateless try: - assert ops.is_custom_op_supported("_C_custom_ar::meta_size") + ops.meta_size() custom_ar = True except Exception: # For AMD GPUs and CPUs