Merge branch 'main' into develop

This commit is contained in:
kijai 2024-04-19 18:40:44 +03:00
commit 891daeb438
2 changed files with 270 additions and 4 deletions

3
config.json Normal file
View File

@ -0,0 +1,3 @@
{
"sai_api_key": "your_api_key_here"
}

271
nodes.py
View File

@ -4519,6 +4519,7 @@ class ImageAndMaskPreview(SaveImage):
"required": {
"mask_opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
"mask_color": ("STRING", {"default": "255, 255, 255"}),
"pass_through": ("BOOLEAN", {"default": False}),
},
"optional": {
"image": ("IMAGE",),
@ -4526,15 +4527,20 @@ class ImageAndMaskPreview(SaveImage):
},
"hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"},
}
RETURN_TYPES = ("IMAGE",)
RETURN_NAMES = ("composite",)
FUNCTION = "execute"
CATEGORY = "KJNodes"
DESCRIPTION = """
Preview an image or a mask, when both inputs are used
composites the mask on top of the image.
with pass_through on the preview is disabled and the
composite is returned from the composite slot instead,
this allows for the preview to be passed for video combine
nodes for example.
"""
def execute(self, mask_opacity, mask_color, filename_prefix="ComfyUI", image=None, mask=None, prompt=None, extra_pnginfo=None):
def execute(self, mask_opacity, mask_color, pass_through, filename_prefix="ComfyUI", image=None, mask=None, prompt=None, extra_pnginfo=None):
if mask is not None and image is None:
preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3)
elif mask is None and image is not None:
@ -4550,7 +4556,9 @@ composites the mask on top of the image.
mask_image[:, :, :, 2] = color_list[2] // 255 # Blue channel
preview, = ImageCompositeMasked.composite(self, image, mask_image, 0, 0, True, mask_adjusted)
return self.save_images(preview, filename_prefix, prompt, extra_pnginfo)
if pass_through:
return (preview, )
return(self.save_images(preview, filename_prefix, prompt, extra_pnginfo))
class SplineEditor:
@ -4605,7 +4613,256 @@ class SplineEditor:
print(masks_out.shape)
return (masks_out, coordinates, normalized_y_values,)
class StabilityAPI_SD3:
@classmethod
def INPUT_TYPES(cls):
return {
"required": {
"prompt": ("STRING", {"multiline": True}),
"n_prompt": ("STRING", {"multiline": True}),
"seed": ("INT", {"default": 123,"min": 0, "max": 4294967294, "step": 1}),
"model": (
[
'sd3',
'sd3-turbo',
],
{
"default": 'sd3'
}),
"aspect_ratio": (
[
'1:1',
'16:9',
'21:9',
'2:3',
'3:2',
'4:5',
'5:4',
'9:16',
'9:21',
],
{
"default": '1:1'
}),
"output_format": (
[
'png',
'jpeg',
],
{
"default": 'jpeg'
}),
},
"optional": {
"api_key": ("STRING", {"multiline": True}),
"image": ("IMAGE",),
"img2img_strength": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}),
"disable_metadata": ("BOOLEAN", {"default": True}),
},
}
RETURN_TYPES = ("IMAGE",)
FUNCTION = "apicall"
CATEGORY = "KJNodes/experimental"
DESCRIPTION = """
## Calls StabilityAI API
Although you may have multiple keys in your account,
you should use the same key for all requests to this API.
Get your API key here: https://platform.stability.ai/account/keys
Recommended to set the key in the config.json -file under this
node packs folder.
# WARNING:
Otherwise the API key may get saved in the image metadata even
with "disable_metadata" on if the workflow includes save nodes
separate from this node.
sd3 requires 6.5 credits per generation
sd3-turbo requires 4 credits per generation
If no image is provided, mode is set to text-to-image
"""
def apicall(self, prompt, n_prompt, model, seed, aspect_ratio, output_format,
img2img_strength=0.5, image=None, disable_metadata=True, api_key=""):
from comfy.cli_args import args
if disable_metadata:
args.disable_metadata = True
else:
args.disable_metadata = False
import requests
from io import BytesIO
from torchvision import transforms
data = {
"mode": "text-to-image",
"prompt": prompt,
"model": model,
"seed": seed,
"output_format": output_format
}
if image is not None:
image = image.permute(0, 3, 1, 2).squeeze(0)
to_pil = transforms.ToPILImage()
pil_image = to_pil(image)
# Save the PIL Image to a BytesIO object
buffer = BytesIO()
pil_image.save(buffer, format='PNG')
buffer.seek(0)
files = {"image": ("image.png", buffer, "image/png")}
data["mode"] = "image-to-image"
data["image"] = pil_image
data["strength"] = img2img_strength
else:
data["aspect_ratio"] = aspect_ratio,
files = {"none": ''}
if model != "sd3-turbo":
data["negative_prompt"] = n_prompt
headers={
"accept": "image/*"
}
if api_key != "":
headers["authorization"] = api_key
else:
config_file_path = os.path.join(script_directory,"config.json")
with open(config_file_path, 'r') as file:
config = json.load(file)
api_key_from_config = config.get("sai_api_key")
headers["authorization"] = api_key_from_config
response = requests.post(
f"https://api.stability.ai/v2beta/stable-image/generate/sd3",
headers=headers,
files = files,
data = data,
)
if response.status_code == 200:
# Convert the response content to a PIL Image
image = Image.open(BytesIO(response.content))
# Convert the PIL Image to a PyTorch tensor
transform = transforms.ToTensor()
tensor_image = transform(image)
tensor_image = tensor_image.unsqueeze(0)
tensor_image = tensor_image.permute(0, 2, 3, 1).cpu().float()
return (tensor_image,)
else:
try:
# Attempt to parse the response as JSON
error_data = response.json()
raise Exception(f"Server error: {error_data}")
except json.JSONDecodeError:
# If the response is not valid JSON, raise a different exception
raise Exception(f"Server error: {response.text}")
class MaskOrImageToWeight:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"output_type": (
[
'list',
'list of lists',
'pandas series',
],
{
"default": 'list'
}),
},
"optional": {
"images": ("IMAGE",),
"masks": ("MASK",),
},
}
RETURN_TYPES = ("FLOAT",)
FUNCTION = "execute"
CATEGORY = "KJNodes"
DESCRIPTION = """
Gets the mean value of mask or image
and returns it as a float value.
"""
def execute(self, output_type, images=None, masks=None):
mean_values = []
if masks is not None and images is None:
for mask in masks:
mean_values.append(mask.mean().item())
print(mean_values)
elif masks is None and images is not None:
for image in images:
mean_values.append(image.mean().item())
elif masks is not None and images is not None:
raise Exception("MaskOrImageToWeight: Use either mask or image input only.")
# Convert mean_values to the specified output_type
if output_type == 'list':
return mean_values,
elif output_type == 'list of lists':
return [[value] for value in mean_values],
elif output_type == 'pandas series':
try:
import pandas as pd
except:
raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type")
return pd.Series(mean_values),
else:
raise ValueError(f"Unsupported output_type: {output_type}")
class FloatToMask:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"input_values": ("FLOAT", {"forceInput": True, "default": 0}),
"width": ("INT", {"default": 100, "min": 1}),
"height": ("INT", {"default": 100, "min": 1}),
},
}
RETURN_TYPES = ("MASK",)
FUNCTION = "execute"
CATEGORY = "KJNodes"
DESCRIPTION = """
Generates a batch of masks based on the input float values.
The batch size is determined by the length of the input float values.
Each mask is generated with the specified width and height.
"""
def execute(self, input_values, width, height):
import pandas as pd
# Ensure input_values is a list
if isinstance(input_values, (float, int)):
input_values = [input_values]
elif isinstance(input_values, pd.Series):
input_values = input_values.tolist()
elif isinstance(input_values, list) and all(isinstance(item, list) for item in input_values):
input_values = [item for sublist in input_values for item in sublist]
# Generate a batch of masks based on the input_values
masks = []
for value in input_values:
# Assuming value is a float between 0 and 1 representing the mask's intensity
mask = torch.ones((height, width), dtype=torch.float32) * value
masks.append(mask)
masks_out = torch.stack(masks, dim=0)
print(masks_out.shape)
return(masks_out,)
NODE_CLASS_MAPPINGS = {
"INTConstant": INTConstant,
"FloatConstant": FloatConstant,
@ -4685,7 +4942,10 @@ NODE_CLASS_MAPPINGS = {
"Sleep": Sleep,
"ImagePadForOutpaintMasked": ImagePadForOutpaintMasked,
"SplineEditor": SplineEditor,
"ImageAndMaskPreview": ImageAndMaskPreview
"ImageAndMaskPreview": ImageAndMaskPreview,
"StabilityAPI_SD3": StabilityAPI_SD3,
"MaskOrImageToWeight": MaskOrImageToWeight,
"FloatToMask": FloatToMask
}
NODE_DISPLAY_NAME_MAPPINGS = {
"INTConstant": "INT Constant",
@ -4767,4 +5027,7 @@ NODE_DISPLAY_NAME_MAPPINGS = {
"ImagePadForOutpaintMasked": "Pad Image For Outpaint Masked",
"SplineEditor": "Spline Editor",
"ImageAndMaskPreview": "Image & Mask Preview",
"StabilityAPI_SD3": "Stability API SD3",
"MaskOrImageToWeight": "Mask Or Image To Weight",
"FloatToMask": "Float To Mask",
}