chore(api-nodes): remove chat widgets from OpenAI/Gemini nodes (#10861)

This commit is contained in:
Alexander Piskun 2025-11-27 00:42:01 +02:00 committed by GitHub
parent dd41b74549
commit d8433c63fd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 11 additions and 112 deletions

View File

@ -4,10 +4,7 @@ See: https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/infer
""" """
import base64 import base64
import json
import os import os
import time
import uuid
from enum import Enum from enum import Enum
from io import BytesIO from io import BytesIO
from typing import Literal from typing import Literal
@ -43,7 +40,6 @@ from comfy_api_nodes.util import (
validate_string, validate_string,
video_to_base64_string, video_to_base64_string,
) )
from server import PromptServer
GEMINI_BASE_ENDPOINT = "/proxy/vertexai/gemini" GEMINI_BASE_ENDPOINT = "/proxy/vertexai/gemini"
GEMINI_MAX_INPUT_FILE_SIZE = 20 * 1024 * 1024 # 20 MB GEMINI_MAX_INPUT_FILE_SIZE = 20 * 1024 * 1024 # 20 MB
@ -384,29 +380,6 @@ class GeminiNode(IO.ComfyNode):
) )
output_text = get_text_from_response(response) output_text = get_text_from_response(response)
if output_text:
# Not a true chat history like the OpenAI Chat node. It is emulated so the frontend can show a copy button.
render_spec = {
"node_id": cls.hidden.unique_id,
"component": "ChatHistoryWidget",
"props": {
"history": json.dumps(
[
{
"prompt": prompt,
"response": output_text,
"response_id": str(uuid.uuid4()),
"timestamp": time.time(),
}
]
),
},
}
PromptServer.instance.send_sync(
"display_component",
render_spec,
)
return IO.NodeOutput(output_text or "Empty response from Gemini model...") return IO.NodeOutput(output_text or "Empty response from Gemini model...")
@ -601,30 +574,7 @@ class GeminiImage(IO.ComfyNode):
response_model=GeminiGenerateContentResponse, response_model=GeminiGenerateContentResponse,
price_extractor=calculate_tokens_price, price_extractor=calculate_tokens_price,
) )
return IO.NodeOutput(get_image_from_response(response), get_text_from_response(response))
output_text = get_text_from_response(response)
if output_text:
render_spec = {
"node_id": cls.hidden.unique_id,
"component": "ChatHistoryWidget",
"props": {
"history": json.dumps(
[
{
"prompt": prompt,
"response": output_text,
"response_id": str(uuid.uuid4()),
"timestamp": time.time(),
}
]
),
},
}
PromptServer.instance.send_sync(
"display_component",
render_spec,
)
return IO.NodeOutput(get_image_from_response(response), output_text)
class GeminiImage2(IO.ComfyNode): class GeminiImage2(IO.ComfyNode):
@ -744,30 +694,7 @@ class GeminiImage2(IO.ComfyNode):
response_model=GeminiGenerateContentResponse, response_model=GeminiGenerateContentResponse,
price_extractor=calculate_tokens_price, price_extractor=calculate_tokens_price,
) )
return IO.NodeOutput(get_image_from_response(response), get_text_from_response(response))
output_text = get_text_from_response(response)
if output_text:
render_spec = {
"node_id": cls.hidden.unique_id,
"component": "ChatHistoryWidget",
"props": {
"history": json.dumps(
[
{
"prompt": prompt,
"response": output_text,
"response_id": str(uuid.uuid4()),
"timestamp": time.time(),
}
]
),
},
}
PromptServer.instance.send_sync(
"display_component",
render_spec,
)
return IO.NodeOutput(get_image_from_response(response), output_text)
class GeminiExtension(ComfyExtension): class GeminiExtension(ComfyExtension):

View File

@ -1,15 +1,10 @@
from io import BytesIO from io import BytesIO
from typing import Optional, Union
import json
import os import os
import time
import uuid
from enum import Enum from enum import Enum
from inspect import cleandoc from inspect import cleandoc
import numpy as np import numpy as np
import torch import torch
from PIL import Image from PIL import Image
from server import PromptServer
import folder_paths import folder_paths
import base64 import base64
from comfy_api.latest import IO, ComfyExtension from comfy_api.latest import IO, ComfyExtension
@ -587,11 +582,11 @@ class OpenAIChatNode(IO.ComfyNode):
def create_input_message_contents( def create_input_message_contents(
cls, cls,
prompt: str, prompt: str,
image: Optional[torch.Tensor] = None, image: torch.Tensor | None = None,
files: Optional[list[InputFileContent]] = None, files: list[InputFileContent] | None = None,
) -> InputMessageContentList: ) -> InputMessageContentList:
"""Create a list of input message contents from prompt and optional image.""" """Create a list of input message contents from prompt and optional image."""
content_list: list[Union[InputContent, InputTextContent, InputImageContent, InputFileContent]] = [ content_list: list[InputContent | InputTextContent | InputImageContent | InputFileContent] = [
InputTextContent(text=prompt, type="input_text"), InputTextContent(text=prompt, type="input_text"),
] ]
if image is not None: if image is not None:
@ -617,9 +612,9 @@ class OpenAIChatNode(IO.ComfyNode):
prompt: str, prompt: str,
persist_context: bool = False, persist_context: bool = False,
model: SupportedOpenAIModel = SupportedOpenAIModel.gpt_5.value, model: SupportedOpenAIModel = SupportedOpenAIModel.gpt_5.value,
images: Optional[torch.Tensor] = None, images: torch.Tensor | None = None,
files: Optional[list[InputFileContent]] = None, files: list[InputFileContent] | None = None,
advanced_options: Optional[CreateModelResponseProperties] = None, advanced_options: CreateModelResponseProperties | None = None,
) -> IO.NodeOutput: ) -> IO.NodeOutput:
validate_string(prompt, strip_whitespace=False) validate_string(prompt, strip_whitespace=False)
@ -660,30 +655,7 @@ class OpenAIChatNode(IO.ComfyNode):
status_extractor=lambda response: response.status, status_extractor=lambda response: response.status,
completed_statuses=["incomplete", "completed"] completed_statuses=["incomplete", "completed"]
) )
output_text = cls.get_text_from_message_content(cls.get_message_content_from_response(result_response)) return IO.NodeOutput(cls.get_text_from_message_content(cls.get_message_content_from_response(result_response)))
# Update history
render_spec = {
"node_id": cls.hidden.unique_id,
"component": "ChatHistoryWidget",
"props": {
"history": json.dumps(
[
{
"prompt": prompt,
"response": output_text,
"response_id": str(uuid.uuid4()),
"timestamp": time.time(),
}
]
),
},
}
PromptServer.instance.send_sync(
"display_component",
render_spec,
)
return IO.NodeOutput(output_text)
class OpenAIInputFiles(IO.ComfyNode): class OpenAIInputFiles(IO.ComfyNode):
@ -790,8 +762,8 @@ class OpenAIChatConfig(IO.ComfyNode):
def execute( def execute(
cls, cls,
truncation: bool, truncation: bool,
instructions: Optional[str] = None, instructions: str | None = None,
max_output_tokens: Optional[int] = None, max_output_tokens: int | None = None,
) -> IO.NodeOutput: ) -> IO.NodeOutput:
""" """
Configure advanced options for the OpenAI Chat Node. Configure advanced options for the OpenAI Chat Node.