From 99285ca1e7700613990cbd35119f45625bd8a889 Mon Sep 17 00:00:00 2001 From: Yoshimasa Niwa Date: Tue, 5 Nov 2024 13:14:17 +0900 Subject: [PATCH] Use float16 for autocast on mps mps only supports float16 for autocast for now. --- mochi_preview/t2v_synth_mochi.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mochi_preview/t2v_synth_mochi.py b/mochi_preview/t2v_synth_mochi.py index 798a7aa..f20808d 100644 --- a/mochi_preview/t2v_synth_mochi.py +++ b/mochi_preview/t2v_synth_mochi.py @@ -240,7 +240,10 @@ class T2VSynthMochiModel: if hasattr(self.dit, "cublas_half_matmul") and self.dit.cublas_half_matmul: autocast_dtype = torch.float16 else: - autocast_dtype = torch.bfloat16 + if self.device.type == "mps": + autocast_dtype = torch.float16 + else: + autocast_dtype = torch.bfloat16 def model_fn(*, z, sigma, cfg_scale): nonlocal sample, sample_null