diff --git a/vllm/model_executor/layers/quantization/kv_cache.py b/vllm/model_executor/layers/quantization/kv_cache.py index 388a4f16699c5..92990487885b9 100644 --- a/vllm/model_executor/layers/quantization/kv_cache.py +++ b/vllm/model_executor/layers/quantization/kv_cache.py @@ -50,7 +50,7 @@ class BaseKVCacheMethod(QuantizeMethodBase): # We prefer to use separate k_scale and v_scale if present k_scale = layer.k_scale.to("cpu").tolist() v_scale = layer.v_scale.to("cpu").tolist() - if current_platform.is_rocm(): + if current_platform.is_fp8_fnuz(): k_scale *= 2 v_scale *= 2 elif layer.k_scale < 0.0 and layer.v_scale < 0.0: @@ -66,7 +66,7 @@ class BaseKVCacheMethod(QuantizeMethodBase): scale_to_duplicate = max(layer.k_scale, layer.v_scale) k_scale = scale_to_duplicate.to("cpu").tolist() v_scale = scale_to_duplicate.to("cpu").tolist() - if current_platform.is_rocm(): + if current_platform.is_fp8_fnuz(): k_scale *= 2 v_scale *= 2