mirror of
https://git.datalinker.icu/kijai/ComfyUI-Hunyuan3DWrapper.git
synced 2025-12-14 15:24:29 +08:00
parent
87e2a4341f
commit
a148a02247
@ -628,30 +628,48 @@ class ShapeVAE(nn.Module):
|
|||||||
)
|
)
|
||||||
xyz_samples = torch.FloatTensor(xyz_samples)
|
xyz_samples = torch.FloatTensor(xyz_samples)
|
||||||
|
|
||||||
|
if mc_algo == 'odc':
|
||||||
|
from ....occupancy_dual_contouring import occupancy_dual_contouring
|
||||||
|
odc = occupancy_dual_contouring(device=device)
|
||||||
|
|
||||||
# 2. latents to 3d volume
|
# 2. latents to 3d volume
|
||||||
batch_logits = []
|
batch_logits = []
|
||||||
batch_size = latents.shape[0]
|
batch_size = latents.shape[0]
|
||||||
comfy_pbar = ProgressBar(xyz_samples.shape[0])
|
comfy_pbar = ProgressBar(xyz_samples.shape[0])
|
||||||
for start in tqdm(range(0, xyz_samples.shape[0], num_chunks),
|
for start in tqdm(range(0, xyz_samples.shape[0], num_chunks),
|
||||||
desc=f"MC Level {mc_level} Implicit Function:"):
|
desc=f"MC Level {mc_level} Implicit Function:"):
|
||||||
queries = xyz_samples[start: start + num_chunks, :].to(device)
|
if mc_algo == 'odc':
|
||||||
queries = queries.half()
|
imp_func = lambda xyz: torch.flatten(self.geo_decoder(repeat(xyz, "p c -> b p c", b=batch_size).to(latents.dtype), latents))
|
||||||
batch_queries = repeat(queries, "p c -> b p c", b=batch_size)
|
vertices, faces = odc.extract_mesh(imp_func, num_grid = octree_resolution, isolevel=mc_level, batch_size=num_chunks, min_coord=bbox_min, max_coord=bbox_max)
|
||||||
|
comfy_pbar.update(num_chunks)
|
||||||
|
else:
|
||||||
|
queries = xyz_samples[start: start + num_chunks, :].to(device)
|
||||||
|
queries = queries.half()
|
||||||
|
batch_queries = repeat(queries, "p c -> b p c", b=batch_size)
|
||||||
|
logits = self.geo_decoder(batch_queries.to(latents.dtype), latents)
|
||||||
|
if mc_level == -1:
|
||||||
|
mc_level = 0
|
||||||
|
logits = torch.sigmoid(logits) * 2 - 1
|
||||||
|
print(f'Training with soft labels, inference with sigmoid and marching cubes level 0.')
|
||||||
|
batch_logits.append(logits)
|
||||||
|
comfy_pbar.update(num_chunks)
|
||||||
|
|
||||||
logits = self.geo_decoder(batch_queries.to(latents.dtype), latents)
|
if mc_algo == 'odc':
|
||||||
if mc_level == -1:
|
vertices = vertices.detach().cpu().numpy()
|
||||||
mc_level = 0
|
faces = faces.detach().cpu().numpy()
|
||||||
logits = torch.sigmoid(logits) * 2 - 1
|
outputs = [
|
||||||
print(f'Training with soft labels, inference with sigmoid and marching cubes level 0.')
|
Latent2MeshOutput(
|
||||||
batch_logits.append(logits)
|
mesh_v=vertices.astype(np.float32),
|
||||||
comfy_pbar.update(num_chunks)
|
mesh_f=np.ascontiguousarray(faces)
|
||||||
grid_logits = torch.cat(batch_logits, dim=1)
|
)]
|
||||||
grid_logits = grid_logits.view((batch_size, grid_size[0], grid_size[1], grid_size[2])).float()
|
return outputs
|
||||||
|
else:
|
||||||
|
grid_logits = torch.cat(batch_logits, dim=1)
|
||||||
|
grid_logits = grid_logits.view((batch_size, grid_size[0], grid_size[1], grid_size[2])).float()
|
||||||
|
|
||||||
# 3. extract surface
|
# 3. extract surface
|
||||||
outputs = []
|
outputs = []
|
||||||
for i in range(batch_size):
|
for i in range(batch_size):
|
||||||
try:
|
|
||||||
if mc_algo == 'mc':
|
if mc_algo == 'mc':
|
||||||
vertices, faces, normals, _ = measure.marching_cubes(
|
vertices, faces, normals, _ = measure.marching_cubes(
|
||||||
grid_logits[i].cpu().numpy(),
|
grid_logits[i].cpu().numpy(),
|
||||||
@ -712,8 +730,6 @@ class ShapeVAE(nn.Module):
|
|||||||
else:
|
else:
|
||||||
vertices = np.array([])
|
vertices = np.array([])
|
||||||
faces = np.array([])
|
faces = np.array([])
|
||||||
else:
|
|
||||||
raise ValueError(f"mc_algo {mc_algo} not supported.")
|
|
||||||
|
|
||||||
outputs.append(
|
outputs.append(
|
||||||
Latent2MeshOutput(
|
Latent2MeshOutput(
|
||||||
@ -722,12 +738,7 @@ class ShapeVAE(nn.Module):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
except ValueError:
|
return outputs
|
||||||
outputs.append(None)
|
|
||||||
except RuntimeError:
|
|
||||||
outputs.append(None)
|
|
||||||
|
|
||||||
return outputs
|
|
||||||
|
|
||||||
|
|
||||||
def create_cube_mesh(pos, size=1.0):
|
def create_cube_mesh(pos, size=1.0):
|
||||||
|
|||||||
2
nodes.py
2
nodes.py
@ -1085,7 +1085,7 @@ class Hy3DVAEDecode:
|
|||||||
"octree_resolution": ("INT", {"default": 384, "min": 64, "max": 4096, "step": 16}),
|
"octree_resolution": ("INT", {"default": 384, "min": 64, "max": 4096, "step": 16}),
|
||||||
"num_chunks": ("INT", {"default": 8000, "min": 1, "max": 10000000, "step": 1, "tooltip": "Number of chunks to process at once, higher values use more memory, but make the process faster"}),
|
"num_chunks": ("INT", {"default": 8000, "min": 1, "max": 10000000, "step": 1, "tooltip": "Number of chunks to process at once, higher values use more memory, but make the process faster"}),
|
||||||
"mc_level": ("FLOAT", {"default": 0, "min": -1.0, "max": 1.0, "step": 0.0001}),
|
"mc_level": ("FLOAT", {"default": 0, "min": -1.0, "max": 1.0, "step": 0.0001}),
|
||||||
"mc_algo": (["mc", "dmc", "none"], {"default": "mc"}),
|
"mc_algo": (["mc", "dmc", "odc", "none"], {"default": "mc"}),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
1097
occupancy_dual_contouring.py
Normal file
1097
occupancy_dual_contouring.py
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user