mirror of
https://git.datalinker.icu/vllm-project/vllm.git
synced 2025-12-14 05:35:01 +08:00
use ceil_div in cutlass block scaling shape check (#17918)
This commit is contained in:
parent
390ec88905
commit
e23564cb70
@ -115,8 +115,16 @@ def bench_fp8(
|
|||||||
a_cont = a.contiguous()
|
a_cont = a.contiguous()
|
||||||
scale_a = torch.tensor(1.0, device="cuda", dtype=torch.float32)
|
scale_a = torch.tensor(1.0, device="cuda", dtype=torch.float32)
|
||||||
scale_b = torch.tensor(1.0, device="cuda", dtype=torch.float32)
|
scale_b = torch.tensor(1.0, device="cuda", dtype=torch.float32)
|
||||||
block_scale_a = torch.rand((m, k // 128), device="cuda", dtype=torch.float32)
|
|
||||||
block_scale_b = torch.rand((k // 128, n // 128), device="cuda", dtype=torch.float32)
|
def ceil_div(x: int, y: int) -> int:
|
||||||
|
return (x + y - 1) // y
|
||||||
|
|
||||||
|
block_scale_a = torch.rand(
|
||||||
|
(m, ceil_div(k, 128)), device="cuda", dtype=torch.float32
|
||||||
|
)
|
||||||
|
block_scale_b = torch.rand(
|
||||||
|
ceil_div(k, 128), ceil_div(n, 128), device="cuda", dtype=torch.float32
|
||||||
|
)
|
||||||
block_scale_a_M_major = block_scale_a.t().contiguous().t()
|
block_scale_a_M_major = block_scale_a.t().contiguous().t()
|
||||||
block_scale_b_K_major = block_scale_b.t().contiguous().t()
|
block_scale_b_K_major = block_scale_b.t().contiguous().t()
|
||||||
bias = torch.zeros((n,), device="cuda", dtype=torch.bfloat16)
|
bias = torch.zeros((n,), device="cuda", dtype=torch.bfloat16)
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
#include <torch/all.h>
|
#include <torch/all.h>
|
||||||
#include "cuda_utils.h"
|
#include "cuda_utils.h"
|
||||||
|
#include "cutlass_extensions/common.hpp"
|
||||||
|
|
||||||
template <typename Fp8Func, typename Int8Func, typename BlockwiseFunc>
|
template <typename Fp8Func, typename Int8Func, typename BlockwiseFunc>
|
||||||
void dispatch_scaled_mm(torch::Tensor& c, torch::Tensor const& a,
|
void dispatch_scaled_mm(torch::Tensor& c, torch::Tensor const& a,
|
||||||
@ -28,29 +29,46 @@ void dispatch_scaled_mm(torch::Tensor& c, torch::Tensor const& a,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
using GroupShape = std::array<int64_t, 2>;
|
TORCH_CHECK(a_scales.dim() == 2, "a scale must be 2d tensor.");
|
||||||
auto make_group_shape = [](torch::Tensor const& x,
|
TORCH_CHECK(b_scales.dim() == 2, "b scale must be 2d tensor.");
|
||||||
torch::Tensor const& s) -> GroupShape {
|
int32_t version_num = get_sm_version_num();
|
||||||
TORCH_CHECK(s.dim() == 2, "cutlass_scaled_mm group scales must be 2D");
|
if (version_num >= 100) {
|
||||||
return {cuda_utils::ceil_div(x.size(0), s.size(0)),
|
TORCH_CHECK(
|
||||||
cuda_utils::ceil_div(x.size(1), s.size(1))};
|
a.size(0) == a_scales.size(0) &&
|
||||||
};
|
cuda_utils::ceil_div(a.size(1), int64_t(128)) == a_scales.size(1),
|
||||||
|
"a_scale_group_shape must be [1, 128].");
|
||||||
|
TORCH_CHECK(
|
||||||
|
cuda_utils::ceil_div(b.size(0), int64_t(128)) == b_scales.size(0) &&
|
||||||
|
cuda_utils::ceil_div(b.size(1), int64_t(128)) == b_scales.size(1),
|
||||||
|
"b_scale_group_shape must be [128, 128].");
|
||||||
|
} else {
|
||||||
|
// TODO: Remove this after using cutlass sm90 blockwise scaling gemm
|
||||||
|
// kernel, or introducing ceil_div to the load_init() of mainloop.
|
||||||
|
using GroupShape = std::array<int64_t, 2>;
|
||||||
|
auto make_group_shape = [](torch::Tensor const& x,
|
||||||
|
torch::Tensor const& s) -> GroupShape {
|
||||||
|
TORCH_CHECK(s.dim() == 2, "cutlass_scaled_mm group scales must be 2D");
|
||||||
|
return {cuda_utils::ceil_div(x.size(0), s.size(0)),
|
||||||
|
cuda_utils::ceil_div(x.size(1), s.size(1))};
|
||||||
|
};
|
||||||
|
|
||||||
GroupShape a_scale_group_shape = make_group_shape(a, a_scales);
|
GroupShape a_scale_group_shape = make_group_shape(a, a_scales);
|
||||||
GroupShape b_scale_group_shape = make_group_shape(b, b_scales);
|
GroupShape b_scale_group_shape = make_group_shape(b, b_scales);
|
||||||
|
|
||||||
|
// 1x128 per-token group scales for activations
|
||||||
|
// 128x128 blockwise scales for weights
|
||||||
|
TORCH_CHECK((a_scale_group_shape == GroupShape{1, 128} &&
|
||||||
|
b_scale_group_shape == GroupShape{128, 128} &&
|
||||||
|
a.dtype() == torch::kFloat8_e4m3fn &&
|
||||||
|
b.dtype() == torch::kFloat8_e4m3fn),
|
||||||
|
"cutlass_scaled_mm only supports datatype float8_e4m3fn.\n"
|
||||||
|
"a_scale_group_shape must be [1, 128]. Got: [",
|
||||||
|
a_scale_group_shape[0], ", ", a_scale_group_shape[1],
|
||||||
|
"]\n"
|
||||||
|
"b_scale_group_shape must be [128, 128]. Got: [",
|
||||||
|
b_scale_group_shape[0], ", ", b_scale_group_shape[1], "]");
|
||||||
|
}
|
||||||
|
|
||||||
// 1x128 per-token group scales for activations
|
|
||||||
// 128x128 blockwise scales for weights
|
|
||||||
TORCH_CHECK((a_scale_group_shape == GroupShape{1, 128} &&
|
|
||||||
b_scale_group_shape == GroupShape{128, 128} &&
|
|
||||||
a.dtype() == torch::kFloat8_e4m3fn &&
|
|
||||||
b.dtype() == torch::kFloat8_e4m3fn),
|
|
||||||
"cutlass_scaled_mm only supports datatype float8_e4m3fn.\n"
|
|
||||||
"a_scale_group_shape must be [1, 128]. Got: [",
|
|
||||||
a_scale_group_shape[0], ", ", a_scale_group_shape[1],
|
|
||||||
"]\n"
|
|
||||||
"b_scale_group_shape must be [128, 128]. Got: [",
|
|
||||||
b_scale_group_shape[0], ", ", b_scale_group_shape[1], "]");
|
|
||||||
TORCH_CHECK(!bias, "Bias not yet supported blockwise scaled_mm");
|
TORCH_CHECK(!bias, "Bias not yet supported blockwise scaled_mm");
|
||||||
blockwise_func(c, a, b, a_scales, b_scales);
|
blockwise_func(c, a, b, a_scales, b_scales);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -115,8 +115,19 @@ def apply_w8a8_block_fp8_linear(
|
|||||||
output_shape = [*input.shape[:-1], weight.shape[0]]
|
output_shape = [*input.shape[:-1], weight.shape[0]]
|
||||||
|
|
||||||
if current_platform.is_cuda():
|
if current_platform.is_cuda():
|
||||||
use_cutlass = cutlass_block_fp8_supported and (
|
if current_platform.has_device_capability(100):
|
||||||
weight.shape[0] % 128 == 0 and weight.shape[1] % 128 == 0)
|
|
||||||
|
def ceil_div(x: int, y: int) -> int:
|
||||||
|
return (x + y - 1) // y
|
||||||
|
|
||||||
|
use_cutlass = cutlass_block_fp8_supported and (
|
||||||
|
ceil_div(weight.shape[0], 128) == weight_scale.shape[0]
|
||||||
|
and ceil_div(weight.shape[1], 128) == weight_scale.shape[1])
|
||||||
|
else:
|
||||||
|
# TODO: update this after switching to public sm90 block scale gemm
|
||||||
|
# as it also supports weight.shape % 128 != 0
|
||||||
|
use_cutlass = cutlass_block_fp8_supported and (
|
||||||
|
weight.shape[0] % 128 == 0 and weight.shape[1] % 128 == 0)
|
||||||
else:
|
else:
|
||||||
use_cutlass = False
|
use_cutlass = False
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user