#pragma once #include "quantization/vectorization.cuh" #include #include #ifndef USE_ROCM #include #define MAYBE_HOST_DEVICE C10_HOST_DEVICE #else #include #include #include #include "amd/quant_utils.cuh" // ROCm doesn't seem to need C10_HOST_DEVICE for static constexpr #define MAYBE_HOST_DEVICE #endif // Determines the preferred FP8 type for the current platform. // Note that for CUDA this just returns true, // but on ROCm it will check device props. static bool is_fp8_ocp() { #ifndef USE_ROCM return true; #else auto dprops = at::cuda::getCurrentDeviceProperties(); std::string device_arch = dprops->gcnArchName; size_t substring = device_arch.find("gfx94"); return substring == std::string::npos; #endif } template struct fp8_e4m3_adjusted_max; template <> struct fp8_e4m3_adjusted_max { static constexpr c10::Float8_e4m3fn val() { return std::numeric_limits::max(); } }; // Using the default max value from pytorch (240.0 0x7F) will cause accuracy // issues when running dynamic quantization. Here use 224.0 0x7E for rocm. template <> struct fp8_e4m3_adjusted_max { static constexpr c10::Float8_e4m3fnuz val() { return c10::Float8_e4m3fnuz(0x7E, c10::Float8_e4m3fnuz::from_bits()); } }; template MAYBE_HOST_DEVICE static constexpr T fp8_e4m3_adjusted_max_v = fp8_e4m3_adjusted_max::val(); namespace vllm { __device__ __forceinline__ float atomicMaxFloat(float* addr, float value) { float old; old = (value >= 0) ? __int_as_float(atomicMax((int*)addr, __float_as_int(value))) : __uint_as_float( atomicMin((unsigned int*)addr, __float_as_uint(value))); return old; } template __device__ __forceinline__ fp8_type scaled_fp8_conversion(float const val, float const scale) { float x = 0.0f; if constexpr (is_scale_inverted) { x = val * scale; } else { x = val / scale; } float r = fmax(-fp8_e4m3_adjusted_max_v, fmin(x, fp8_e4m3_adjusted_max_v)); #ifndef USE_ROCM return static_cast(r); #else // Use hardware cvt instruction for fp8 on rocm return fp8::cvt_c10(r); #endif } // Compute the absolute maximum m of the input tensor and store // m / float8_e4m3::max() in *scale. Each thread block performs a // reduction tree and the memory in scale is atomically updated. // So to get the right answer, *scale needs to be initialized to // a value <= 0.0 and we need to wait for all thread blocks to // finish before consuming *scale. template __global__ void segmented_max_reduction(float* __restrict__ scale, const scalar_t* __restrict__ input, int64_t num_elems) { __shared__ float cache[1024]; int64_t i = blockDim.x * blockIdx.x + threadIdx.x; // First store maximum for all values processes by // the current thread in cache[threadIdx.x] scalar_t tmp = 0.0; while (i < num_elems) { float x = static_cast(input[i]); tmp = max(tmp, fabs(x)); i += blockDim.x * gridDim.x; } cache[threadIdx.x] = tmp; __syncthreads(); // Now perform parallel reduction within the thread block int ib = blockDim.x / 2; while (ib != 0) { if (threadIdx.x < ib && cache[threadIdx.x + ib] > cache[threadIdx.x]) { cache[threadIdx.x] = cache[threadIdx.x + ib]; } __syncthreads(); ib /= 2; } // Finally, since cache[0] contains the maximum for this thread block, // atomically write the max to the target location if (threadIdx.x == 0) { atomicMaxFloat(scale, cache[0] / fp8_e4m3_adjusted_max_v); } } template __device__ float thread_max_vec(scalar_t const* __restrict__ input, int64_t const num_elems, int const tid, int const step) { // Vectorized input/output to better utilize memory bandwidth. vec4_t const* vectorized_in = reinterpret_cast const*>(input); int64_t const num_vec_elems = num_elems >> 2; float absmax_val = 0.0f; #pragma unroll 4 for (int64_t i = tid; i < num_vec_elems; i += step) { vec4_t in_vec = vectorized_in[i]; absmax_val = max(absmax_val, fabs(in_vec.x)); absmax_val = max(absmax_val, fabs(in_vec.y)); absmax_val = max(absmax_val, fabs(in_vec.z)); absmax_val = max(absmax_val, fabs(in_vec.w)); } // Handle the remaining elements if num_elems is not divisible by 4 for (int64_t i = num_vec_elems * 4 + tid; i < num_elems; i += step) { absmax_val = max(absmax_val, fabs(input[i])); } return absmax_val; } template __device__ void scaled_fp8_conversion_vec(fp8_type* __restrict__ out, scalar_t const* __restrict__ input, float const scale, int64_t const num_elems, int const tid, int const step) { using float8x4_t = q8x4_t; // Vectorized input/output to better utilize memory bandwidth. auto const* vectorized_in = reinterpret_cast const*>(input); auto* vectorized_out = reinterpret_cast(out); int64_t const num_vec_elems = num_elems >> 2; #pragma unroll 4 for (int64_t i = tid; i < num_vec_elems; i += step) { vec4_t in_vec = vectorized_in[i]; float8x4_t out_vec; out_vec.x = scaled_fp8_conversion( static_cast(in_vec.x), scale); out_vec.y = scaled_fp8_conversion( static_cast(in_vec.y), scale); out_vec.z = scaled_fp8_conversion( static_cast(in_vec.z), scale); out_vec.w = scaled_fp8_conversion( static_cast(in_vec.w), scale); vectorized_out[i] = out_vec; } // Handle the remaining elements if num_elems is not divisible by 4 for (int64_t i = num_vec_elems * 4 + tid; i < num_elems; i += step) { out[i] = scaled_fp8_conversion( static_cast(input[i]), scale); } } } // namespace vllm