make compatible with comfy save/load latents

This commit is contained in:
kijai 2024-10-24 03:23:01 +03:00
parent 9bb3a79275
commit f714748ad4

View File

@ -266,7 +266,9 @@ class MochiDecode:
device = mm.get_torch_device() device = mm.get_torch_device()
offload_device = mm.unet_offload_device() offload_device = mm.unet_offload_device()
samples = samples["samples"] samples = samples["samples"]
samples = samples.to(torch.bfloat16).to(device)
def blend_v(a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor: def blend_v(a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
blend_extent = min(a.shape[3], b.shape[3], blend_extent) blend_extent = min(a.shape[3], b.shape[3], blend_extent)
for y in range(blend_extent): for y in range(blend_extent):