mirror of
https://git.datalinker.icu/comfyanonymous/ComfyUI
synced 2025-12-08 21:44:33 +08:00
[API Nodes] add Flux.2 Pro node (#10880)
This commit is contained in:
parent
6b573ae0cb
commit
5c7b08ca58
@ -70,6 +70,29 @@ class BFLFluxProGenerateRequest(BaseModel):
|
|||||||
# )
|
# )
|
||||||
|
|
||||||
|
|
||||||
|
class Flux2ProGenerateRequest(BaseModel):
|
||||||
|
prompt: str = Field(...)
|
||||||
|
width: int = Field(1024, description="Must be a multiple of 32.")
|
||||||
|
height: int = Field(768, description="Must be a multiple of 32.")
|
||||||
|
seed: int | None = Field(None)
|
||||||
|
prompt_upsampling: bool | None = Field(None)
|
||||||
|
input_image: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
|
||||||
|
input_image_2: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
|
||||||
|
input_image_3: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
|
||||||
|
input_image_4: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
|
||||||
|
input_image_5: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
|
||||||
|
input_image_6: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
|
||||||
|
input_image_7: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
|
||||||
|
input_image_8: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
|
||||||
|
input_image_9: str | None = Field(None, description="Base64 encoded image for image-to-image generation")
|
||||||
|
safety_tolerance: int | None = Field(
|
||||||
|
5, description="Tolerance level for input and output moderation. Value 0 being most strict.", ge=0, le=5
|
||||||
|
)
|
||||||
|
output_format: str | None = Field(
|
||||||
|
"png", description="Output format for the generated image. Can be 'jpeg' or 'png'."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class BFLFluxKontextProGenerateRequest(BaseModel):
|
class BFLFluxKontextProGenerateRequest(BaseModel):
|
||||||
prompt: str = Field(..., description='The text prompt for what you wannt to edit.')
|
prompt: str = Field(..., description='The text prompt for what you wannt to edit.')
|
||||||
input_image: Optional[str] = Field(None, description='Image to edit in base64 format')
|
input_image: Optional[str] = Field(None, description='Image to edit in base64 format')
|
||||||
@ -109,8 +132,9 @@ class BFLFluxProUltraGenerateRequest(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
class BFLFluxProGenerateResponse(BaseModel):
|
class BFLFluxProGenerateResponse(BaseModel):
|
||||||
id: str = Field(..., description='The unique identifier for the generation task.')
|
id: str = Field(..., description="The unique identifier for the generation task.")
|
||||||
polling_url: str = Field(..., description='URL to poll for the generation result.')
|
polling_url: str = Field(..., description="URL to poll for the generation result.")
|
||||||
|
cost: float | None = Field(None, description="Price in cents")
|
||||||
|
|
||||||
|
|
||||||
class BFLStatus(str, Enum):
|
class BFLStatus(str, Enum):
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
from inspect import cleandoc
|
from inspect import cleandoc
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
|
from pydantic import BaseModel
|
||||||
from typing_extensions import override
|
from typing_extensions import override
|
||||||
|
|
||||||
from comfy_api.latest import IO, ComfyExtension
|
from comfy_api.latest import IO, ComfyExtension
|
||||||
@ -9,15 +9,16 @@ from comfy_api_nodes.apis.bfl_api import (
|
|||||||
BFLFluxExpandImageRequest,
|
BFLFluxExpandImageRequest,
|
||||||
BFLFluxFillImageRequest,
|
BFLFluxFillImageRequest,
|
||||||
BFLFluxKontextProGenerateRequest,
|
BFLFluxKontextProGenerateRequest,
|
||||||
BFLFluxProGenerateRequest,
|
|
||||||
BFLFluxProGenerateResponse,
|
BFLFluxProGenerateResponse,
|
||||||
BFLFluxProUltraGenerateRequest,
|
BFLFluxProUltraGenerateRequest,
|
||||||
BFLFluxStatusResponse,
|
BFLFluxStatusResponse,
|
||||||
BFLStatus,
|
BFLStatus,
|
||||||
|
Flux2ProGenerateRequest,
|
||||||
)
|
)
|
||||||
from comfy_api_nodes.util import (
|
from comfy_api_nodes.util import (
|
||||||
ApiEndpoint,
|
ApiEndpoint,
|
||||||
download_url_to_image_tensor,
|
download_url_to_image_tensor,
|
||||||
|
get_number_of_images,
|
||||||
poll_op,
|
poll_op,
|
||||||
resize_mask_to_image,
|
resize_mask_to_image,
|
||||||
sync_op,
|
sync_op,
|
||||||
@ -116,7 +117,7 @@ class FluxProUltraImageNode(IO.ComfyNode):
|
|||||||
prompt_upsampling: bool = False,
|
prompt_upsampling: bool = False,
|
||||||
raw: bool = False,
|
raw: bool = False,
|
||||||
seed: int = 0,
|
seed: int = 0,
|
||||||
image_prompt: Optional[torch.Tensor] = None,
|
image_prompt: torch.Tensor | None = None,
|
||||||
image_prompt_strength: float = 0.1,
|
image_prompt_strength: float = 0.1,
|
||||||
) -> IO.NodeOutput:
|
) -> IO.NodeOutput:
|
||||||
if image_prompt is None:
|
if image_prompt is None:
|
||||||
@ -230,7 +231,7 @@ class FluxKontextProImageNode(IO.ComfyNode):
|
|||||||
aspect_ratio: str,
|
aspect_ratio: str,
|
||||||
guidance: float,
|
guidance: float,
|
||||||
steps: int,
|
steps: int,
|
||||||
input_image: Optional[torch.Tensor] = None,
|
input_image: torch.Tensor | None = None,
|
||||||
seed=0,
|
seed=0,
|
||||||
prompt_upsampling=False,
|
prompt_upsampling=False,
|
||||||
) -> IO.NodeOutput:
|
) -> IO.NodeOutput:
|
||||||
@ -280,124 +281,6 @@ class FluxKontextMaxImageNode(FluxKontextProImageNode):
|
|||||||
DISPLAY_NAME = "Flux.1 Kontext [max] Image"
|
DISPLAY_NAME = "Flux.1 Kontext [max] Image"
|
||||||
|
|
||||||
|
|
||||||
class FluxProImageNode(IO.ComfyNode):
|
|
||||||
"""
|
|
||||||
Generates images synchronously based on prompt and resolution.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def define_schema(cls) -> IO.Schema:
|
|
||||||
return IO.Schema(
|
|
||||||
node_id="FluxProImageNode",
|
|
||||||
display_name="Flux 1.1 [pro] Image",
|
|
||||||
category="api node/image/BFL",
|
|
||||||
description=cleandoc(cls.__doc__ or ""),
|
|
||||||
inputs=[
|
|
||||||
IO.String.Input(
|
|
||||||
"prompt",
|
|
||||||
multiline=True,
|
|
||||||
default="",
|
|
||||||
tooltip="Prompt for the image generation",
|
|
||||||
),
|
|
||||||
IO.Boolean.Input(
|
|
||||||
"prompt_upsampling",
|
|
||||||
default=False,
|
|
||||||
tooltip="Whether to perform upsampling on the prompt. "
|
|
||||||
"If active, automatically modifies the prompt for more creative generation, "
|
|
||||||
"but results are nondeterministic (same seed will not produce exactly the same result).",
|
|
||||||
),
|
|
||||||
IO.Int.Input(
|
|
||||||
"width",
|
|
||||||
default=1024,
|
|
||||||
min=256,
|
|
||||||
max=1440,
|
|
||||||
step=32,
|
|
||||||
),
|
|
||||||
IO.Int.Input(
|
|
||||||
"height",
|
|
||||||
default=768,
|
|
||||||
min=256,
|
|
||||||
max=1440,
|
|
||||||
step=32,
|
|
||||||
),
|
|
||||||
IO.Int.Input(
|
|
||||||
"seed",
|
|
||||||
default=0,
|
|
||||||
min=0,
|
|
||||||
max=0xFFFFFFFFFFFFFFFF,
|
|
||||||
control_after_generate=True,
|
|
||||||
tooltip="The random seed used for creating the noise.",
|
|
||||||
),
|
|
||||||
IO.Image.Input(
|
|
||||||
"image_prompt",
|
|
||||||
optional=True,
|
|
||||||
),
|
|
||||||
# "image_prompt_strength": (
|
|
||||||
# IO.FLOAT,
|
|
||||||
# {
|
|
||||||
# "default": 0.1,
|
|
||||||
# "min": 0.0,
|
|
||||||
# "max": 1.0,
|
|
||||||
# "step": 0.01,
|
|
||||||
# "tooltip": "Blend between the prompt and the image prompt.",
|
|
||||||
# },
|
|
||||||
# ),
|
|
||||||
],
|
|
||||||
outputs=[IO.Image.Output()],
|
|
||||||
hidden=[
|
|
||||||
IO.Hidden.auth_token_comfy_org,
|
|
||||||
IO.Hidden.api_key_comfy_org,
|
|
||||||
IO.Hidden.unique_id,
|
|
||||||
],
|
|
||||||
is_api_node=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def execute(
|
|
||||||
cls,
|
|
||||||
prompt: str,
|
|
||||||
prompt_upsampling,
|
|
||||||
width: int,
|
|
||||||
height: int,
|
|
||||||
seed=0,
|
|
||||||
image_prompt=None,
|
|
||||||
# image_prompt_strength=0.1,
|
|
||||||
) -> IO.NodeOutput:
|
|
||||||
image_prompt = image_prompt if image_prompt is None else tensor_to_base64_string(image_prompt)
|
|
||||||
initial_response = await sync_op(
|
|
||||||
cls,
|
|
||||||
ApiEndpoint(
|
|
||||||
path="/proxy/bfl/flux-pro-1.1/generate",
|
|
||||||
method="POST",
|
|
||||||
),
|
|
||||||
response_model=BFLFluxProGenerateResponse,
|
|
||||||
data=BFLFluxProGenerateRequest(
|
|
||||||
prompt=prompt,
|
|
||||||
prompt_upsampling=prompt_upsampling,
|
|
||||||
width=width,
|
|
||||||
height=height,
|
|
||||||
seed=seed,
|
|
||||||
image_prompt=image_prompt,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
response = await poll_op(
|
|
||||||
cls,
|
|
||||||
ApiEndpoint(initial_response.polling_url),
|
|
||||||
response_model=BFLFluxStatusResponse,
|
|
||||||
status_extractor=lambda r: r.status,
|
|
||||||
progress_extractor=lambda r: r.progress,
|
|
||||||
completed_statuses=[BFLStatus.ready],
|
|
||||||
failed_statuses=[
|
|
||||||
BFLStatus.request_moderated,
|
|
||||||
BFLStatus.content_moderated,
|
|
||||||
BFLStatus.error,
|
|
||||||
BFLStatus.task_not_found,
|
|
||||||
],
|
|
||||||
queued_statuses=[],
|
|
||||||
)
|
|
||||||
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
|
|
||||||
|
|
||||||
|
|
||||||
class FluxProExpandNode(IO.ComfyNode):
|
class FluxProExpandNode(IO.ComfyNode):
|
||||||
"""
|
"""
|
||||||
Outpaints image based on prompt.
|
Outpaints image based on prompt.
|
||||||
@ -640,16 +523,125 @@ class FluxProFillNode(IO.ComfyNode):
|
|||||||
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
|
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
|
||||||
|
|
||||||
|
|
||||||
|
class Flux2ProImageNode(IO.ComfyNode):
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def define_schema(cls) -> IO.Schema:
|
||||||
|
return IO.Schema(
|
||||||
|
node_id="Flux2ProImageNode",
|
||||||
|
display_name="Flux.2 [pro] Image",
|
||||||
|
category="api node/image/BFL",
|
||||||
|
description="Generates images synchronously based on prompt and resolution.",
|
||||||
|
inputs=[
|
||||||
|
IO.String.Input(
|
||||||
|
"prompt",
|
||||||
|
multiline=True,
|
||||||
|
default="",
|
||||||
|
tooltip="Prompt for the image generation or edit",
|
||||||
|
),
|
||||||
|
IO.Int.Input(
|
||||||
|
"width",
|
||||||
|
default=1024,
|
||||||
|
min=256,
|
||||||
|
max=2048,
|
||||||
|
step=32,
|
||||||
|
),
|
||||||
|
IO.Int.Input(
|
||||||
|
"height",
|
||||||
|
default=768,
|
||||||
|
min=256,
|
||||||
|
max=2048,
|
||||||
|
step=32,
|
||||||
|
),
|
||||||
|
IO.Int.Input(
|
||||||
|
"seed",
|
||||||
|
default=0,
|
||||||
|
min=0,
|
||||||
|
max=0xFFFFFFFFFFFFFFFF,
|
||||||
|
control_after_generate=True,
|
||||||
|
tooltip="The random seed used for creating the noise.",
|
||||||
|
),
|
||||||
|
IO.Boolean.Input(
|
||||||
|
"prompt_upsampling",
|
||||||
|
default=False,
|
||||||
|
tooltip="Whether to perform upsampling on the prompt. "
|
||||||
|
"If active, automatically modifies the prompt for more creative generation, "
|
||||||
|
"but results are nondeterministic (same seed will not produce exactly the same result).",
|
||||||
|
),
|
||||||
|
IO.Image.Input("images", optional=True, tooltip="Up to 4 images to be used as references."),
|
||||||
|
],
|
||||||
|
outputs=[IO.Image.Output()],
|
||||||
|
hidden=[
|
||||||
|
IO.Hidden.auth_token_comfy_org,
|
||||||
|
IO.Hidden.api_key_comfy_org,
|
||||||
|
IO.Hidden.unique_id,
|
||||||
|
],
|
||||||
|
is_api_node=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
async def execute(
|
||||||
|
cls,
|
||||||
|
prompt: str,
|
||||||
|
width: int,
|
||||||
|
height: int,
|
||||||
|
seed: int,
|
||||||
|
prompt_upsampling: bool,
|
||||||
|
images: torch.Tensor | None = None,
|
||||||
|
) -> IO.NodeOutput:
|
||||||
|
reference_images = {}
|
||||||
|
if images is not None:
|
||||||
|
if get_number_of_images(images) > 9:
|
||||||
|
raise ValueError("The current maximum number of supported images is 9.")
|
||||||
|
for image_index in range(images.shape[0]):
|
||||||
|
key_name = f"input_image_{image_index + 1}" if image_index else "input_image"
|
||||||
|
reference_images[key_name] = tensor_to_base64_string(images[image_index], total_pixels=2048 * 2048)
|
||||||
|
initial_response = await sync_op(
|
||||||
|
cls,
|
||||||
|
ApiEndpoint(path="/proxy/bfl/flux-2-pro/generate", method="POST"),
|
||||||
|
response_model=BFLFluxProGenerateResponse,
|
||||||
|
data=Flux2ProGenerateRequest(
|
||||||
|
prompt=prompt,
|
||||||
|
width=width,
|
||||||
|
height=height,
|
||||||
|
seed=seed,
|
||||||
|
prompt_upsampling=prompt_upsampling,
|
||||||
|
**reference_images,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def price_extractor(_r: BaseModel) -> float | None:
|
||||||
|
return None if initial_response.cost is None else initial_response.cost / 100
|
||||||
|
|
||||||
|
response = await poll_op(
|
||||||
|
cls,
|
||||||
|
ApiEndpoint(initial_response.polling_url),
|
||||||
|
response_model=BFLFluxStatusResponse,
|
||||||
|
status_extractor=lambda r: r.status,
|
||||||
|
progress_extractor=lambda r: r.progress,
|
||||||
|
price_extractor=price_extractor,
|
||||||
|
completed_statuses=[BFLStatus.ready],
|
||||||
|
failed_statuses=[
|
||||||
|
BFLStatus.request_moderated,
|
||||||
|
BFLStatus.content_moderated,
|
||||||
|
BFLStatus.error,
|
||||||
|
BFLStatus.task_not_found,
|
||||||
|
],
|
||||||
|
queued_statuses=[],
|
||||||
|
)
|
||||||
|
return IO.NodeOutput(await download_url_to_image_tensor(response.result["sample"]))
|
||||||
|
|
||||||
|
|
||||||
class BFLExtension(ComfyExtension):
|
class BFLExtension(ComfyExtension):
|
||||||
@override
|
@override
|
||||||
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
async def get_node_list(self) -> list[type[IO.ComfyNode]]:
|
||||||
return [
|
return [
|
||||||
FluxProUltraImageNode,
|
FluxProUltraImageNode,
|
||||||
# FluxProImageNode,
|
|
||||||
FluxKontextProImageNode,
|
FluxKontextProImageNode,
|
||||||
FluxKontextMaxImageNode,
|
FluxKontextMaxImageNode,
|
||||||
FluxProExpandNode,
|
FluxProExpandNode,
|
||||||
FluxProFillNode,
|
FluxProFillNode,
|
||||||
|
Flux2ProImageNode,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -36,6 +36,7 @@ from .upload_helpers import (
|
|||||||
upload_video_to_comfyapi,
|
upload_video_to_comfyapi,
|
||||||
)
|
)
|
||||||
from .validation_utils import (
|
from .validation_utils import (
|
||||||
|
get_image_dimensions,
|
||||||
get_number_of_images,
|
get_number_of_images,
|
||||||
validate_aspect_ratio_string,
|
validate_aspect_ratio_string,
|
||||||
validate_audio_duration,
|
validate_audio_duration,
|
||||||
@ -82,6 +83,7 @@ __all__ = [
|
|||||||
"trim_video",
|
"trim_video",
|
||||||
"video_to_base64_string",
|
"video_to_base64_string",
|
||||||
# Validation utilities
|
# Validation utilities
|
||||||
|
"get_image_dimensions",
|
||||||
"get_number_of_images",
|
"get_number_of_images",
|
||||||
"validate_aspect_ratio_string",
|
"validate_aspect_ratio_string",
|
||||||
"validate_audio_duration",
|
"validate_audio_duration",
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user