Use float16 for autocast on mps

mps only supports float16 for autocast for now.
This commit is contained in:
Yoshimasa Niwa 2024-11-05 13:14:17 +09:00
parent 21374934d3
commit 99285ca1e7

View File

@ -240,7 +240,10 @@ class T2VSynthMochiModel:
if hasattr(self.dit, "cublas_half_matmul") and self.dit.cublas_half_matmul:
autocast_dtype = torch.float16
else:
autocast_dtype = torch.bfloat16
if self.device.type == "mps":
autocast_dtype = torch.float16
else:
autocast_dtype = torch.bfloat16
def model_fn(*, z, sigma, cfg_scale):
nonlocal sample, sample_null