mirror of
https://git.datalinker.icu/kijai/ComfyUI-KJNodes.git
synced 2025-12-22 11:14:35 +08:00
Update nodes.py
This commit is contained in:
parent
0a341a2811
commit
f81567595e
24
nodes.py
24
nodes.py
@ -811,7 +811,7 @@ class GrowMaskWithBlur:
|
|||||||
if fill_holes:
|
if fill_holes:
|
||||||
binary_mask = output > 0
|
binary_mask = output > 0
|
||||||
output = scipy.ndimage.binary_fill_holes(binary_mask)
|
output = scipy.ndimage.binary_fill_holes(binary_mask)
|
||||||
output = output.astype(np.uint8) * 255
|
output = output.astype(np.float32) * 255
|
||||||
output = torch.from_numpy(output)
|
output = torch.from_numpy(output)
|
||||||
if alpha < 1.0 and previous_output is not None:
|
if alpha < 1.0 and previous_output is not None:
|
||||||
# Interpolate between the previous and current frame
|
# Interpolate between the previous and current frame
|
||||||
@ -1755,8 +1755,15 @@ class BatchUncrop:
|
|||||||
draw.rectangle((width - border_width, 0, width, height), fill=border_color)
|
draw.rectangle((width - border_width, 0, width, height), fill=border_color)
|
||||||
return image
|
return image
|
||||||
|
|
||||||
if len(original_images) != len(cropped_images) or len(original_images) != len(bboxes):
|
if len(original_images) != len(cropped_images):
|
||||||
raise ValueError("The number of images, crop_images, and bboxes should be the same")
|
raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same")
|
||||||
|
|
||||||
|
# Ensure there are enough bboxes, but drop the excess if there are more bboxes than images
|
||||||
|
if len(bboxes) > len(original_images):
|
||||||
|
print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}")
|
||||||
|
bboxes = bboxes[:len(original_images)]
|
||||||
|
elif len(bboxes) < len(original_images):
|
||||||
|
raise ValueError("There should be at least as many bboxes as there are original and cropped images")
|
||||||
|
|
||||||
input_images = tensor2pil(original_images)
|
input_images = tensor2pil(original_images)
|
||||||
crop_imgs = tensor2pil(cropped_images)
|
crop_imgs = tensor2pil(cropped_images)
|
||||||
@ -2140,8 +2147,15 @@ class BatchUncropAdvanced:
|
|||||||
draw.rectangle((0, 0, width - 1, height - 1), outline=border_color, width=border_width)
|
draw.rectangle((0, 0, width - 1, height - 1), outline=border_color, width=border_width)
|
||||||
return bordered_image
|
return bordered_image
|
||||||
|
|
||||||
if len(original_images) != len(cropped_images) or len(original_images) != len(bboxes):
|
if len(original_images) != len(cropped_images):
|
||||||
raise ValueError("The number of images, crop_images, and bboxes should be the same")
|
raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same")
|
||||||
|
|
||||||
|
# Ensure there are enough bboxes, but drop the excess if there are more bboxes than images
|
||||||
|
if len(bboxes) > len(original_images):
|
||||||
|
print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}")
|
||||||
|
bboxes = bboxes[:len(original_images)]
|
||||||
|
elif len(bboxes) < len(original_images):
|
||||||
|
raise ValueError("There should be at least as many bboxes as there are original and cropped images")
|
||||||
|
|
||||||
crop_imgs = tensor2pil(cropped_images)
|
crop_imgs = tensor2pil(cropped_images)
|
||||||
input_images = tensor2pil(original_images)
|
input_images = tensor2pil(original_images)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user