mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-10 10:40:44 +08:00
Co-authored-by: Philipp Moritz <pcmoritz@gmail.com> Co-authored-by: Amir Balwel <amoooori04@gmail.com> Co-authored-by: root <kuanfu.liu@akirakan.com> Co-authored-by: tjtanaa <tunjian.tan@embeddedllm.com> Co-authored-by: kuanfu <kuanfu.liu@embeddedllm.com> Co-authored-by: miloice <17350011+kliuae@users.noreply.github.com>
14 lines
814 B
Diff
14 lines
814 B
Diff
--- /opt/conda/envs/py_3.10/lib/python3.10/site-packages/xformers/ops/fmha/common.py 2023-11-29 03:17:03.930103539 +0000
|
|
+++ common.py 2023-11-28 16:14:19.846233146 +0000
|
|
@@ -298,8 +298,8 @@
|
|
dtype = d.query.dtype
|
|
if device_type not in cls.SUPPORTED_DEVICES:
|
|
reasons.append(f"device={device_type} (supported: {cls.SUPPORTED_DEVICES})")
|
|
- if device_type == "cuda" and not _built_with_cuda:
|
|
- reasons.append("xFormers wasn't build with CUDA support")
|
|
+ #if device_type == "cuda" and not _built_with_cuda:
|
|
+ # reasons.append("xFormers wasn't build with CUDA support")
|
|
if device_type == "cuda":
|
|
device_capability = torch.cuda.get_device_capability(d.device)
|
|
if device_capability < cls.CUDA_MINIMUM_COMPUTE_CAPABILITY:
|