Use float16 for autocast on mps

mps only supports float16 for autocast for now.
This commit is contained in:
Yoshimasa Niwa 2024-11-05 13:14:17 +09:00
parent 21374934d3
commit 99285ca1e7

View File

@ -239,6 +239,9 @@ class T2VSynthMochiModel:
self.dit.to(self.device)
if hasattr(self.dit, "cublas_half_matmul") and self.dit.cublas_half_matmul:
autocast_dtype = torch.float16
else:
if self.device.type == "mps":
autocast_dtype = torch.float16
else:
autocast_dtype = torch.bfloat16