[bugfix][quantization] Fix fp8 per_tensor scale shape (#30257)

Signed-off-by: Haoyang Li <lihaoyang0109@gmail.com>
This commit is contained in:
haoyangli-amd 2025-12-09 19:28:50 +08:00 committed by GitHub
parent c72ea10723
commit 03416eada6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1726,7 +1726,7 @@ def scaled_fp8_quant(
output, input, scale, scale_ub
)
else:
scale = torch.empty((1, 1), device=input.device, dtype=torch.float32)
scale = torch.empty(1, device=input.device, dtype=torch.float32)
torch.ops._C.dynamic_scaled_fp8_quant(output, input, scale)
else:
assert scale.numel() == 1, f"{scale.shape}"