New nodes and continue restructuring

This commit is contained in:
kijai 2024-05-01 18:44:30 +03:00
parent edae7ef9d2
commit f259e062c7
5 changed files with 1139 additions and 1047 deletions

View File

@ -2,6 +2,7 @@ from .nodes.nodes import *
from .nodes.curve_nodes import *
from .nodes.batchcrop_nodes import *
from .nodes.audioscheduler_nodes import *
from .nodes.image_nodes import *
NODE_CLASS_MAPPINGS = {
#constants
"INTConstant": INTConstant,
@ -80,6 +81,7 @@ NODE_CLASS_MAPPINGS = {
"ScaleBatchPromptSchedule": ScaleBatchPromptSchedule,
"CameraPoseVisualizer": CameraPoseVisualizer,
"JoinStrings": JoinStrings,
"JoinStringMulti": JoinStringMulti,
"Sleep": Sleep,
"VRAM_Debug" : VRAM_Debug,
"SomethingToString" : SomethingToString,

View File

@ -217,6 +217,7 @@ class MaskOrImageToWeight:
'list',
'pandas series',
'tensor',
'string'
],
{
"default": 'list'
@ -228,7 +229,7 @@ class MaskOrImageToWeight:
},
}
RETURN_TYPES = ("FLOAT",)
RETURN_TYPES = ("FLOAT", "STRING",)
FUNCTION = "execute"
CATEGORY = "KJNodes"
DESCRIPTION = """
@ -249,18 +250,17 @@ and returns that as the selected output type.
# Convert mean_values to the specified output_type
if output_type == 'list':
return mean_values,
out = mean_values,
elif output_type == 'pandas series':
try:
import pandas as pd
except:
raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type")
return pd.Series(mean_values),
out = pd.Series(mean_values),
elif output_type == 'tensor':
return torch.tensor(mean_values, dtype=torch.float32),
else:
raise ValueError(f"Unsupported output_type: {output_type}")
out = torch.tensor(mean_values, dtype=torch.float32),
return (out, [str(value) for value in mean_values],)
class WeightScheduleConvert:
@classmethod
@ -287,7 +287,7 @@ class WeightScheduleConvert:
},
}
RETURN_TYPES = ("FLOAT",)
RETURN_TYPES = ("FLOAT", "STRING",)
FUNCTION = "execute"
CATEGORY = "KJNodes"
DESCRIPTION = """
@ -344,18 +344,18 @@ Converts different value lists/series to another type.
float_values = float_values * repeat
if output_type == 'list':
return float_values,
out = float_values,
elif output_type == 'pandas series':
return pd.Series(float_values),
out = pd.Series(float_values),
elif output_type == 'tensor':
if input_type == 'pandas series':
return torch.tensor(float_values.values, dtype=torch.float32),
out = torch.tensor(float_values.values, dtype=torch.float32),
else:
return torch.tensor(float_values, dtype=torch.float32),
out = torch.tensor(float_values, dtype=torch.float32),
elif output_type == 'match_input':
return float_values,
else:
raise ValueError(f"Unsupported output_type: {output_type}")
out = float_values,
return (out, [str(value) for value in float_values],)
class FloatToMask:

1032
nodes/image_nodes.py Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -73,6 +73,28 @@ app.registerExtension({
});
}
break;
case "JoinStringMulti":
nodeType.prototype.onNodeCreated = function () {
this._type = "STRING"
this.inputs_offset = nodeData.name.includes("selective")?1:0
this.addWidget("button", "Update inputs", null, () => {
if (!this.inputs) {
this.inputs = [];
}
const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"];
if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing
if(target_number_of_inputs < this.inputs.length){
for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--)
this.removeInput(i)
}
else{
for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i)
this.addInput(`string_${i}`, this._type)
}
});
}
break;
case "SoundReactive":
nodeType.prototype.onNodeCreated = function () {
let audioContext;