From a94a699c3ff9bfc23a35f147150a826b753bbf6a Mon Sep 17 00:00:00 2001 From: Gregory Shtrasberg <156009573+gshtras@users.noreply.github.com> Date: Wed, 12 Mar 2025 23:14:04 -0400 Subject: [PATCH] [ROCm][FP8] Fix for adjustments needed only for fnuz (#14689) Signed-off-by: Gregory Shtrasberg --- vllm/model_executor/layers/quantization/kv_cache.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vllm/model_executor/layers/quantization/kv_cache.py b/vllm/model_executor/layers/quantization/kv_cache.py index 388a4f16699c5..92990487885b9 100644 --- a/vllm/model_executor/layers/quantization/kv_cache.py +++ b/vllm/model_executor/layers/quantization/kv_cache.py @@ -50,7 +50,7 @@ class BaseKVCacheMethod(QuantizeMethodBase): # We prefer to use separate k_scale and v_scale if present k_scale = layer.k_scale.to("cpu").tolist() v_scale = layer.v_scale.to("cpu").tolist() - if current_platform.is_rocm(): + if current_platform.is_fp8_fnuz(): k_scale *= 2 v_scale *= 2 elif layer.k_scale < 0.0 and layer.v_scale < 0.0: @@ -66,7 +66,7 @@ class BaseKVCacheMethod(QuantizeMethodBase): scale_to_duplicate = max(layer.k_scale, layer.v_scale) k_scale = scale_to_duplicate.to("cpu").tolist() v_scale = scale_to_duplicate.to("cpu").tolist() - if current_platform.is_rocm(): + if current_platform.is_fp8_fnuz(): k_scale *= 2 v_scale *= 2