mirror of
https://git.datalinker.icu/kijai/ComfyUI-KJNodes.git
synced 2026-01-23 12:24:29 +08:00
LoadImagesFromFolderKJ
This commit is contained in:
parent
a23d734fa0
commit
7e7a73191c
@ -63,6 +63,7 @@ NODE_CONFIG = {
|
||||
"ImageUpscaleWithModelBatched": {"class": ImageUpscaleWithModelBatched, "name": "Image Upscale With Model Batched"},
|
||||
"InsertImagesToBatchIndexed": {"class": InsertImagesToBatchIndexed, "name": "Insert Images To Batch Indexed"},
|
||||
"LoadAndResizeImage": {"class": LoadAndResizeImage, "name": "Load & Resize Image"},
|
||||
"LoadImagesFromFolderKJ": {"class": LoadImagesFromFolderKJ, "name": "Load Images From Folder (KJ)"},
|
||||
"MergeImageChannels": {"class": MergeImageChannels, "name": "Merge Image Channels"},
|
||||
"PreviewAnimation": {"class": PreviewAnimation, "name": "Preview Animation"},
|
||||
"RemapImageRange": {"class": RemapImageRange, "name": "Remap Image Range"},
|
||||
|
||||
@ -1682,3 +1682,100 @@ class LoadAndResizeImage:
|
||||
return "Invalid image file: {}".format(image)
|
||||
|
||||
return True
|
||||
|
||||
class LoadImagesFromFolderKJ:
|
||||
@classmethod
|
||||
def INPUT_TYPES(s):
|
||||
return {
|
||||
"required": {
|
||||
"folder": ("STRING", {"default": ""}),
|
||||
},
|
||||
"optional": {
|
||||
"image_load_cap": ("INT", {"default": 0, "min": 0, "step": 1}),
|
||||
"start_index": ("INT", {"default": 0, "min": 0, "step": 1}),
|
||||
}
|
||||
}
|
||||
|
||||
RETURN_TYPES = ("IMAGE", "MASK", "INT", "STRING",)
|
||||
RETURN_NAMES = ("image", "mask", "count", "image_path",)
|
||||
FUNCTION = "load_images"
|
||||
|
||||
CATEGORY = "image"
|
||||
|
||||
def load_images(self, folder, image_load_cap, start_index):
|
||||
if not os.path.isdir(folder):
|
||||
raise FileNotFoundError(f"Folder '{folder} cannot be found.'")
|
||||
dir_files = os.listdir(folder)
|
||||
if len(dir_files) == 0:
|
||||
raise FileNotFoundError(f"No files in directory '{folder}'.")
|
||||
|
||||
# Filter files by extension
|
||||
valid_extensions = ['.jpg', '.jpeg', '.png', '.webp']
|
||||
dir_files = [f for f in dir_files if any(f.lower().endswith(ext) for ext in valid_extensions)]
|
||||
|
||||
dir_files = sorted(dir_files)
|
||||
dir_files = [os.path.join(folder, x) for x in dir_files]
|
||||
|
||||
# start at start_index
|
||||
dir_files = dir_files[start_index:]
|
||||
|
||||
images = []
|
||||
masks = []
|
||||
image_path_list = []
|
||||
|
||||
limit_images = False
|
||||
if image_load_cap > 0:
|
||||
limit_images = True
|
||||
image_count = 0
|
||||
|
||||
has_non_empty_mask = False
|
||||
|
||||
for image_path in dir_files:
|
||||
if os.path.isdir(image_path) and os.path.ex:
|
||||
continue
|
||||
if limit_images and image_count >= image_load_cap:
|
||||
break
|
||||
i = Image.open(image_path)
|
||||
i = ImageOps.exif_transpose(i)
|
||||
image = i.convert("RGB")
|
||||
image = np.array(image).astype(np.float32) / 255.0
|
||||
image = torch.from_numpy(image)[None,]
|
||||
if 'A' in i.getbands():
|
||||
mask = np.array(i.getchannel('A')).astype(np.float32) / 255.0
|
||||
mask = 1. - torch.from_numpy(mask)
|
||||
has_non_empty_mask = True
|
||||
else:
|
||||
mask = torch.zeros((64, 64), dtype=torch.float32, device="cpu")
|
||||
images.append(image)
|
||||
masks.append(mask)
|
||||
image_path_list.append(image_path)
|
||||
image_count += 1
|
||||
|
||||
if len(images) == 1:
|
||||
return (images[0], masks[0], 1)
|
||||
|
||||
elif len(images) > 1:
|
||||
image1 = images[0]
|
||||
mask1 = None
|
||||
|
||||
for image2 in images[1:]:
|
||||
if image1.shape[1:] != image2.shape[1:]:
|
||||
image2 = common_upscale(image2.movedim(-1, 1), image1.shape[2], image1.shape[1], "bilinear", "center").movedim(1, -1)
|
||||
image1 = torch.cat((image1, image2), dim=0)
|
||||
|
||||
for mask2 in masks[1:]:
|
||||
if has_non_empty_mask:
|
||||
if image1.shape[1:3] != mask2.shape:
|
||||
mask2 = torch.nn.functional.interpolate(mask2.unsqueeze(0).unsqueeze(0), size=(image1.shape[2], image1.shape[1]), mode='bilinear', align_corners=False)
|
||||
mask2 = mask2.squeeze(0)
|
||||
else:
|
||||
mask2 = mask2.unsqueeze(0)
|
||||
else:
|
||||
mask2 = mask2.unsqueeze(0)
|
||||
|
||||
if mask1 is None:
|
||||
mask1 = mask2
|
||||
else:
|
||||
mask1 = torch.cat((mask1, mask2), dim=0)
|
||||
|
||||
return (image1, mask1, len(images), image_path_list)
|
||||
Loading…
x
Reference in New Issue
Block a user