diff --git a/.gitignore b/.gitignore index 587d297..be53a8b 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,5 @@ __pycache__ *.ckpt *.pth types -models \ No newline at end of file +models +jsconfig.json \ No newline at end of file diff --git a/README.md b/README.md index b4b0f52..4902861 100644 --- a/README.md +++ b/README.md @@ -2,21 +2,35 @@ Various quality of life and masking related -nodes and scripts made by combining functionality of existing nodes for ComfyUI. +I know I'm bad at documentation, especially this project that has grown from random practice nodes to... too many lines in one file. +I have however started to add descriptions to the nodes themselves, there's a small ? you can click for info what the node does. +This is still work in progress, like everything else. + # Installation 1. Clone this repo into `custom_nodes` folder. -2. Install dependencies: pip install -r requirements.txt +2. Install dependencies: `pip install -r requirements.txt` + or if you use the portable install, run this in ComfyUI_windows_portable -folder: + + `python_embeded\python.exe -m pip install -r ComfyUI\custom_nodes\ComfyUI-KJNodes\requirements.txt` + ## Javascript ### browserstatus.js -Sets the favicon to green circle when not processing anything, sets it to red when processing and shows progress percentage and the lenghth of your queue. Might clash with other scripts that affect the page title, delete this file to disable (until I figure out how to add options). +Sets the favicon to green circle when not processing anything, sets it to red when processing and shows progress percentage and the lenghth of your queue. +Default off, needs to be enabled from options, overrides Custom-Scripts favicon when enabled. ## Nodes: ### Set/Get Javascript nodes to set and get constants to reduce unnecessary lines. Takes in and returns anything, purely visual nodes. -Could still be buggy, especially when loading workflow with missing nodes, use with precaution. +On the right click menu of these nodes there's now an options to visualize the paths, as well as option to jump to the corresponding node on the other end. + +**Known limitations**: + - Will not work with any node that dynamically sets it's outpute, such as reroute or other Set/Get node + - Will not work when directly connected to a bypassed node + - Other possible conflicts with javascript based nodes. ### ColorToMask @@ -34,14 +48,6 @@ Mask and combine two sets of conditions, saves space. Grows or shrinks (with negative values) mask, option to invert input, returns mask and inverted mask. Additionally Blurs the mask, this is a slow operation especially with big batches. -### CreateFadeMask - -This node creates batch of single color images by interpolating between white/black levels. Useful to control mask strengths or QR code controlnet input weight when combined with MaskComposite node. - -### CreateAudioMask - -Work in progress, currently creates a sphere that's size is synced with audio input. - ### RoundMask ![image](https://github.com/kijai/ComfyUI-KJNodes/assets/40791699/52c85202-f74e-4b96-9dac-c8bda5ddcc40) diff --git a/__init__.py b/__init__.py index 138b9c7..70eb417 100644 --- a/__init__.py +++ b/__init__.py @@ -1,4 +1,136 @@ -from .nodes import NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS +from .nodes.nodes import * +from .nodes.curve_nodes import * +from .nodes.batchcrop_nodes import * +from .nodes.audioscheduler_nodes import * +from .nodes.image_nodes import * +from .nodes.intrinsic_lora_nodes import * +from .nodes.mask_nodes import * +NODE_CONFIG = { + #constants + "INTConstant": {"class": INTConstant, "name": "INT Constant"}, + "FloatConstant": {"class": FloatConstant, "name": "Float Constant"}, + "StringConstant": {"class": StringConstant, "name": "String Constant"}, + "StringConstantMultiline": {"class": StringConstantMultiline, "name": "String Constant Multiline"}, + #conditioning + "ConditioningMultiCombine": {"class": ConditioningMultiCombine, "name": "Conditioning Multi Combine"}, + "ConditioningSetMaskAndCombine": {"class": ConditioningSetMaskAndCombine, "name": "ConditioningSetMaskAndCombine"}, + "ConditioningSetMaskAndCombine3": {"class": ConditioningSetMaskAndCombine3, "name": "ConditioningSetMaskAndCombine3"}, + "ConditioningSetMaskAndCombine4": {"class": ConditioningSetMaskAndCombine4, "name": "ConditioningSetMaskAndCombine4"}, + "ConditioningSetMaskAndCombine5": {"class": ConditioningSetMaskAndCombine5, "name": "ConditioningSetMaskAndCombine5"}, + "CondPassThrough": {"class": CondPassThrough}, + #masking + "BatchCLIPSeg": {"class": BatchCLIPSeg, "name": "Batch CLIPSeg"}, + "ColorToMask": {"class": ColorToMask, "name": "Color To Mask"}, + "CreateGradientMask": {"class": CreateGradientMask, "name": "Create Gradient Mask"}, + "CreateTextMask": {"class": CreateTextMask, "name": "Create Text Mask"}, + "CreateAudioMask": {"class": CreateAudioMask, "name": "Create Audio Mask"}, + "CreateFadeMask": {"class": CreateFadeMask, "name": "Create Fade Mask"}, + "CreateFadeMaskAdvanced": {"class": CreateFadeMaskAdvanced, "name": "Create Fade Mask Advanced"}, + "CreateFluidMask": {"class": CreateFluidMask, "name": "Create Fluid Mask"}, + "CreateShapeMask": {"class": CreateShapeMask, "name": "Create Shape Mask"}, + "CreateVoronoiMask": {"class": CreateVoronoiMask, "name": "Create Voronoi Mask"}, + "CreateMagicMask": {"class": CreateMagicMask, "name": "Create Magic Mask"}, + "GetMaskSizeAndCount": {"class": GetMaskSizeAndCount, "name": "Get Mask Size & Count"}, + "GrowMaskWithBlur": {"class": GrowMaskWithBlur, "name": "Grow Mask With Blur"}, + "MaskBatchMulti": {"class": MaskBatchMulti, "name": "Mask Batch Multi"}, + "OffsetMask": {"class": OffsetMask, "name": "Offset Mask"}, + "RemapMaskRange": {"class": RemapMaskRange, "name": "Remap Mask Range"}, + "ResizeMask": {"class": ResizeMask, "name": "Resize Mask"}, + "RoundMask": {"class": RoundMask, "name": "Round Mask"}, + #images + "AddLabel": {"class": AddLabel, "name": "Add Label"}, + "ColorMatch": {"class": ColorMatch, "name": "Color Match"}, + "CrossFadeImages": {"class": CrossFadeImages, "name": "Cross Fade Images"}, + "GetImageRangeFromBatch": {"class": GetImageRangeFromBatch, "name": "Get Image Range From Batch"}, + "GetImageSizeAndCount": {"class": GetImageSizeAndCount, "name": "Get Image Size & Count"}, + "ImageAndMaskPreview": {"class": ImageAndMaskPreview}, + "ImageBatchMulti": {"class": ImageBatchMulti, "name": "Image Batch Multi"}, + "ImageBatchRepeatInterleaving": {"class": ImageBatchRepeatInterleaving}, + "ImageBatchTestPattern": {"class": ImageBatchTestPattern, "name": "Image Batch Test Pattern"}, + "ImageConcanate": {"class": ImageConcanate, "name": "Image Concatenate"}, + "ImageGrabPIL": {"class": ImageGrabPIL, "name": "Image Grab PIL"}, + "ImageGridComposite2x2": {"class": ImageGridComposite2x2, "name": "Image Grid Composite 2x2"}, + "ImageGridComposite3x3": {"class": ImageGridComposite3x3, "name": "Image Grid Composite 3x3"}, + "ImageNormalize_Neg1_To_1": {"class": ImageNormalize_Neg1_To_1, "name": "Image Normalize -1 to 1"}, + "ImagePass": {"class": ImagePass}, + "ImagePadForOutpaintMasked": {"class": ImagePadForOutpaintMasked, "name": "Image Pad For Outpaint Masked"}, + "ImageUpscaleWithModelBatched": {"class": ImageUpscaleWithModelBatched, "name": "Image Upscale With Model Batched"}, + "InsertImagesToBatchIndexed": {"class": InsertImagesToBatchIndexed, "name": "Insert Images To Batch Indexed"}, + "MergeImageChannels": {"class": MergeImageChannels, "name": "Merge Image Channels"}, + "RemapImageRange": {"class": RemapImageRange, "name": "Remap Image Range"}, + "ReverseImageBatch": {"class": ReverseImageBatch, "name": "Reverse Image Batch"}, + "ReplaceImagesInBatch": {"class": ReplaceImagesInBatch, "name": "Replace Images In Batch"}, + "SaveImageWithAlpha": {"class": SaveImageWithAlpha, "name": "Save Image With Alpha"}, + "SplitImageChannels": {"class": SplitImageChannels, "name": "Split Image Channels"}, + #batch cropping + "BatchCropFromMask": {"class": BatchCropFromMask, "name": "Batch Crop From Mask"}, + "BatchCropFromMaskAdvanced": {"class": BatchCropFromMaskAdvanced, "name": "Batch Crop From Mask Advanced"}, + "FilterZeroMasksAndCorrespondingImages": {"class": FilterZeroMasksAndCorrespondingImages}, + "InsertImageBatchByIndexes": {"class": InsertImageBatchByIndexes, "name": "Insert Image Batch By Indexes"}, + "BatchUncrop": {"class": BatchUncrop, "name": "Batch Uncrop"}, + "BatchUncropAdvanced": {"class": BatchUncropAdvanced, "name": "Batch Uncrop Advanced"}, + "SplitBboxes": {"class": SplitBboxes, "name": "Split Bboxes"}, + "BboxToInt": {"class": BboxToInt, "name": "Bbox To Int"}, + "BboxVisualize": {"class": BboxVisualize, "name": "Bbox Visualize"}, + #noise + "GenerateNoise": {"class": GenerateNoise, "name": "Generate Noise"}, + "FlipSigmasAdjusted": {"class": FlipSigmasAdjusted, "name": "Flip Sigmas Adjusted"}, + "InjectNoiseToLatent": {"class": InjectNoiseToLatent, "name": "Inject Noise To Latent"}, + "CustomSigmas": {"class": CustomSigmas, "name": "Custom Sigmas"}, + #utility + "WidgetToString": {"class": WidgetToString, "name": "Widget To String"}, + "DummyLatentOut": {"class": DummyLatentOut, "name": "Dummy Latent Out"}, + "GetLatentsFromBatchIndexed": {"class": GetLatentsFromBatchIndexed, "name": "Get Latents From Batch Indexed"}, + "ScaleBatchPromptSchedule": {"class": ScaleBatchPromptSchedule, "name": "Scale Batch Prompt Schedule"}, + "CameraPoseVisualizer": {"class": CameraPoseVisualizer, "name": "Camera Pose Visualizer"}, + "JoinStrings": {"class": JoinStrings, "name": "Join Strings"}, + "JoinStringMulti": {"class": JoinStringMulti, "name": "Join String Multi"}, + "Sleep": {"class": Sleep, "name": "Sleep"}, + "VRAM_Debug": {"class": VRAM_Debug, "name": "VRAM Debug"}, + "SomethingToString": {"class": SomethingToString, "name": "Something To String"}, + "EmptyLatentImagePresets": {"class": EmptyLatentImagePresets, "name": "Empty Latent Image Presets"}, + #audioscheduler stuff + "NormalizedAmplitudeToMask": {"class": NormalizedAmplitudeToMask}, + "NormalizedAmplitudeToFloatList": {"class": NormalizedAmplitudeToFloatList}, + "OffsetMaskByNormalizedAmplitude": {"class": OffsetMaskByNormalizedAmplitude}, + "ImageTransformByNormalizedAmplitude": {"class": ImageTransformByNormalizedAmplitude}, + #curve nodes + "SplineEditor": {"class": SplineEditor, "name": "Spline Editor"}, + "CreateShapeMaskOnPath": {"class": CreateShapeMaskOnPath, "name": "Create Shape Mask On Path"}, + "WeightScheduleExtend": {"class": WeightScheduleExtend, "name": "Weight Schedule Extend"}, + "MaskOrImageToWeight": {"class": MaskOrImageToWeight, "name": "Mask Or Image To Weight"}, + "WeightScheduleConvert": {"class": WeightScheduleConvert, "name": "Weight Schedule Convert"}, + "FloatToMask": {"class": FloatToMask, "name": "Float To Mask"}, + "FloatToSigmas": {"class": FloatToSigmas, "name": "Float To Sigmas"}, + "PlotCoordinates": {"class": PlotCoordinates, "name": "Plot Coordinates"}, + "InterpolateCoords": {"class": InterpolateCoords, "name": "Interpolate Coords"}, + #experimental + "StabilityAPI_SD3": {"class": StabilityAPI_SD3, "name": "Stability API SD3"}, + "SoundReactive": {"class": SoundReactive, "name": "Sound Reactive"}, + "StableZero123_BatchSchedule": {"class": StableZero123_BatchSchedule, "name": "Stable Zero123 Batch Schedule"}, + "SV3D_BatchSchedule": {"class": SV3D_BatchSchedule, "name": "SV3D Batch Schedule"}, + "LoadResAdapterNormalization": {"class": LoadResAdapterNormalization}, + "Superprompt": {"class": Superprompt, "name": "Superprompt"}, + "GLIGENTextBoxApplyBatchCoords": {"class": GLIGENTextBoxApplyBatchCoords}, + "Intrinsic_lora_sampling": {"class": Intrinsic_lora_sampling, "name": "Intrinsic Lora Sampling"}, + #instance diffusion + "CreateInstanceDiffusionTracking": {"class": CreateInstanceDiffusionTracking}, + "AppendInstanceDiffusionTracking": {"class": AppendInstanceDiffusionTracking}, + "DrawInstanceDiffusionTracking": {"class": DrawInstanceDiffusionTracking}, +} + +def generate_node_mappings(node_config): + node_class_mappings = {} + node_display_name_mappings = {} + + for node_name, node_info in node_config.items(): + node_class_mappings[node_name] = node_info["class"] + node_display_name_mappings[node_name] = node_info.get("name", node_info["class"].__name__) + + return node_class_mappings, node_display_name_mappings + +NODE_CLASS_MAPPINGS, NODE_DISPLAY_NAME_MAPPINGS = generate_node_mappings(NODE_CONFIG) + __all__ = ["NODE_CLASS_MAPPINGS", "NODE_DISPLAY_NAME_MAPPINGS", "WEB_DIRECTORY"] WEB_DIRECTORY = "./web" diff --git a/audio.wav b/audio.wav deleted file mode 100644 index 78cff47..0000000 Binary files a/audio.wav and /dev/null differ diff --git a/favicon-active.ico b/favicon-active.ico deleted file mode 100644 index 64045ab..0000000 Binary files a/favicon-active.ico and /dev/null differ diff --git a/favicon.ico b/favicon.ico deleted file mode 100644 index 08df248..0000000 Binary files a/favicon.ico and /dev/null differ diff --git a/intrinsic_loras.txt b/intristic_loras/intrinsic_loras.txt similarity index 100% rename from intrinsic_loras.txt rename to intristic_loras/intrinsic_loras.txt diff --git a/kjweb_async/d3.v6.min.js b/kjweb_async/d3.v6.min.js deleted file mode 100644 index 05cd5ca..0000000 --- a/kjweb_async/d3.v6.min.js +++ /dev/null @@ -1,2 +0,0 @@ -// https://d3js.org v6.7.0 Copyright 2021 Mike Bostock -!function(t,n){"object"==typeof exports&&"undefined"!=typeof module?n(exports):"function"==typeof define&&define.amd?define(["exports"],n):n((t="undefined"!=typeof globalThis?globalThis:t||self).d3=t.d3||{})}(this,(function(t){"use strict";function n(t,n){return tn?1:t>=n?0:NaN}function e(t){let e=t,r=t;function i(t,n,e,i){for(null==e&&(e=0),null==i&&(i=t.length);e>>1;r(t[o],n)<0?e=o+1:i=o}return e}return 1===t.length&&(e=(n,e)=>t(n)-e,r=function(t){return(e,r)=>n(t(e),r)}(t)),{left:i,center:function(t,n,r,o){null==r&&(r=0),null==o&&(o=t.length);const a=i(t,n,r,o-1);return a>r&&e(t[a-1],n)>-e(t[a],n)?a-1:a},right:function(t,n,e,i){for(null==e&&(e=0),null==i&&(i=t.length);e>>1;r(t[o],n)>0?i=o:e=o+1}return e}}}function r(t){return null===t?NaN:+t}const i=e(n),o=i.right,a=i.left,u=e(r).center;function c(t,n){let e=0;if(void 0===n)for(let n of t)null!=n&&(n=+n)>=n&&++e;else{let r=-1;for(let i of t)null!=(i=n(i,++r,t))&&(i=+i)>=i&&++e}return e}function f(t){return 0|t.length}function s(t){return!(t>0)}function l(t){return"object"!=typeof t||"length"in t?t:Array.from(t)}function h(t,n){let e,r=0,i=0,o=0;if(void 0===n)for(let n of t)null!=n&&(n=+n)>=n&&(e=n-i,i+=e/++r,o+=e*(n-i));else{let a=-1;for(let u of t)null!=(u=n(u,++a,t))&&(u=+u)>=u&&(e=u-i,i+=e/++r,o+=e*(u-i))}if(r>1)return o/(r-1)}function d(t,n){const e=h(t,n);return e?Math.sqrt(e):e}function p(t,n){let e,r;if(void 0===n)for(const n of t)null!=n&&(void 0===e?n>=n&&(e=r=n):(e>n&&(e=n),r=o&&(e=r=o):(e>o&&(e=o),r0){for(o=t[--i];i>0&&(n=o,e=t[--i],o=n+e,r=e-(o-n),!r););i>0&&(r<0&&t[i-1]<0||r>0&&t[i-1]>0)&&(e=2*r,n=o+e,e==n-o&&(o=n))}return o}}class y extends Map{constructor(t,n=x){if(super(),Object.defineProperties(this,{_intern:{value:new Map},_key:{value:n}}),null!=t)for(const[n,e]of t)this.set(n,e)}get(t){return super.get(_(this,t))}has(t){return super.has(_(this,t))}set(t,n){return super.set(b(this,t),n)}delete(t){return super.delete(m(this,t))}}class v extends Set{constructor(t,n=x){if(super(),Object.defineProperties(this,{_intern:{value:new Map},_key:{value:n}}),null!=t)for(const n of t)this.add(n)}has(t){return super.has(_(this,t))}add(t){return super.add(b(this,t))}delete(t){return super.delete(m(this,t))}}function _({_intern:t,_key:n},e){const r=n(e);return t.has(r)?t.get(r):e}function b({_intern:t,_key:n},e){const r=n(e);return t.has(r)?t.get(r):(t.set(r,e),e)}function m({_intern:t,_key:n},e){const r=n(e);return t.has(r)&&(e=t.get(e),t.delete(r)),e}function x(t){return null!==t&&"object"==typeof t?t.valueOf():t}function w(t){return t}function M(t,...n){return S(t,w,w,n)}function A(t,n,...e){return S(t,w,n,e)}function T(t){if(1!==t.length)throw new Error("duplicate key");return t[0]}function S(t,n,e,r){return function t(i,o){if(o>=r.length)return e(i);const a=new y,u=r[o++];let c=-1;for(const t of i){const n=u(t,++c,i),e=a.get(n);e?e.push(t):a.set(n,[t])}for(const[n,e]of a)a.set(n,t(e,o));return n(a)}(t,0)}function E(t,n){return Array.from(n,(n=>t[n]))}function k(t,...e){if("function"!=typeof t[Symbol.iterator])throw new TypeError("values is not iterable");t=Array.from(t);let[r=n]=e;if(1===r.length||e.length>1){const i=Uint32Array.from(t,((t,n)=>n));return e.length>1?(e=e.map((n=>t.map(n))),i.sort(((t,r)=>{for(const i of e){const e=n(i[t],i[r]);if(e)return e}}))):(r=t.map(r),i.sort(((t,e)=>n(r[t],r[e])))),E(t,i)}return t.sort(r)}var N=Array.prototype.slice;function C(t){return function(){return t}}var P=Math.sqrt(50),z=Math.sqrt(10),D=Math.sqrt(2);function q(t,n,e){var r,i,o,a,u=-1;if(e=+e,(t=+t)===(n=+n)&&e>0)return[t];if((r=n0){let e=Math.round(t/a),r=Math.round(n/a);for(e*an&&--r,o=new Array(i=r-e+1);++un&&--r,o=new Array(i=r-e+1);++u=0?(o>=P?10:o>=z?5:o>=D?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(o>=P?10:o>=z?5:o>=D?2:1)}function F(t,n,e){var r=Math.abs(n-t)/Math.max(0,e),i=Math.pow(10,Math.floor(Math.log(r)/Math.LN10)),o=r/i;return o>=P?i*=10:o>=z?i*=5:o>=D&&(i*=2),n0?(t=Math.floor(t/i)*i,n=Math.ceil(n/i)*i):i<0&&(t=Math.ceil(t*i)/i,n=Math.floor(n*i)/i),r=i}}function I(t){return Math.ceil(Math.log(c(t))/Math.LN2)+1}function U(){var t=w,n=p,e=I;function r(r){Array.isArray(r)||(r=Array.from(r));var i,a,u=r.length,c=new Array(u);for(i=0;i=l)if(t>=l&&n===p){const t=R(s,l,e);isFinite(t)&&(t>0?l=(Math.floor(l/t)+1)*t:t<0&&(l=(Math.ceil(l*-t)+1)/-t))}else h.pop()}for(var d=h.length;h[0]<=s;)h.shift(),--d;for(;h[d-1]>l;)h.pop(),--d;var g,y=new Array(d+1);for(i=0;i<=d;++i)(g=y[i]=[]).x0=i>0?h[i-1]:s,g.x1=i=n)&&(e=n);else{let r=-1;for(let i of t)null!=(i=n(i,++r,t))&&(e=i)&&(e=i)}return e}function Y(t,n){let e;if(void 0===n)for(const n of t)null!=n&&(e>n||void 0===e&&n>=n)&&(e=n);else{let r=-1;for(let i of t)null!=(i=n(i,++r,t))&&(e>i||void 0===e&&i>=i)&&(e=i)}return e}function L(t,e,r=0,i=t.length-1,o=n){for(;i>r;){if(i-r>600){const n=i-r+1,a=e-r+1,u=Math.log(n),c=.5*Math.exp(2*u/3),f=.5*Math.sqrt(u*c*(n-c)/n)*(a-n/2<0?-1:1);L(t,e,Math.max(r,Math.floor(e-a*c/n+f)),Math.min(i,Math.floor(e+(n-a)*c/n+f)),o)}const n=t[e];let a=r,u=i;for(j(t,r,e),o(t[i],n)>0&&j(t,r,i);a0;)--u}0===o(t[r],n)?j(t,r,u):(++u,j(t,u,i)),u<=e&&(r=u+1),e<=u&&(i=u-1)}return t}function j(t,n,e){const r=t[n];t[n]=t[e],t[e]=r}function H(t,n,e){if(r=(t=Float64Array.from(function*(t,n){if(void 0===n)for(let n of t)null!=n&&(n=+n)>=n&&(yield n);else{let e=-1;for(let r of t)null!=(r=n(r,++e,t))&&(r=+r)>=r&&(yield r)}}(t,e))).length){if((n=+n)<=0||r<2)return Y(t);if(n>=1)return B(t);var r,i=(r-1)*n,o=Math.floor(i),a=B(L(t,o).subarray(0,o+1));return a+(Y(t.subarray(o+1))-a)*(i-o)}}function X(t,n,e=r){if(i=t.length){if((n=+n)<=0||i<2)return+e(t[0],0,t);if(n>=1)return+e(t[i-1],i-1,t);var i,o=(i-1)*n,a=Math.floor(o),u=+e(t[a],a,t);return u+(+e(t[a+1],a+1,t)-u)*(o-a)}}function G(t,n){let e,r=-1,i=-1;if(void 0===n)for(const n of t)++i,null!=n&&(e=n)&&(e=n,r=i);else for(let o of t)null!=(o=n(o,++i,t))&&(e=o)&&(e=o,r=i);return r}function V(t){return Array.from(function*(t){for(const n of t)yield*n}(t))}function $(t,n){let e,r=-1,i=-1;if(void 0===n)for(const n of t)++i,null!=n&&(e>n||void 0===e&&n>=n)&&(e=n,r=i);else for(let o of t)null!=(o=n(o,++i,t))&&(e>o||void 0===e&&o>=o)&&(e=o,r=i);return r}function W(t,n){return[t,n]}function Z(t,n,e){t=+t,n=+n,e=(i=arguments.length)<2?(n=t,t=0,1):i<3?1:+e;for(var r=-1,i=0|Math.max(0,Math.ceil((n-t)/e)),o=new Array(i);++r+t(n)}function st(t,n){return n=Math.max(0,t.bandwidth()-2*n)/2,t.round()&&(n=Math.round(n)),e=>+t(e)+n}function lt(){return!this.__axis}function ht(t,n){var e=[],r=null,i=null,o=6,a=6,u=3,c="undefined"!=typeof window&&window.devicePixelRatio>1?0:.5,f=1===t||4===t?-1:1,s=4===t||2===t?"x":"y",l=1===t||3===t?ut:ct;function h(h){var d=null==r?n.ticks?n.ticks.apply(n,e):n.domain():r,p=null==i?n.tickFormat?n.tickFormat.apply(n,e):ot:i,g=Math.max(o,0)+u,y=n.range(),v=+y[0]+c,_=+y[y.length-1]+c,b=(n.bandwidth?st:ft)(n.copy(),c),m=h.selection?h.selection():h,x=m.selectAll(".domain").data([null]),w=m.selectAll(".tick").data(d,n).order(),M=w.exit(),A=w.enter().append("g").attr("class","tick"),T=w.select("line"),S=w.select("text");x=x.merge(x.enter().insert("path",".tick").attr("class","domain").attr("stroke","currentColor")),w=w.merge(A),T=T.merge(A.append("line").attr("stroke","currentColor").attr(s+"2",f*o)),S=S.merge(A.append("text").attr("fill","currentColor").attr(s,f*g).attr("dy",1===t?"0em":3===t?"0.71em":"0.32em")),h!==m&&(x=x.transition(h),w=w.transition(h),T=T.transition(h),S=S.transition(h),M=M.transition(h).attr("opacity",at).attr("transform",(function(t){return isFinite(t=b(t))?l(t+c):this.getAttribute("transform")})),A.attr("opacity",at).attr("transform",(function(t){var n=this.parentNode.__axis;return l((n&&isFinite(n=n(t))?n:b(t))+c)}))),M.remove(),x.attr("d",4===t||2===t?a?"M"+f*a+","+v+"H"+c+"V"+_+"H"+f*a:"M"+c+","+v+"V"+_:a?"M"+v+","+f*a+"V"+c+"H"+_+"V"+f*a:"M"+v+","+c+"H"+_),w.attr("opacity",1).attr("transform",(function(t){return l(b(t)+c)})),T.attr(s+"2",f*o),S.attr(s,f*g).text(p),m.filter(lt).attr("fill","none").attr("font-size",10).attr("font-family","sans-serif").attr("text-anchor",2===t?"start":4===t?"end":"middle"),m.each((function(){this.__axis=b}))}return h.scale=function(t){return arguments.length?(n=t,h):n},h.ticks=function(){return e=it.call(arguments),h},h.tickArguments=function(t){return arguments.length?(e=null==t?[]:it.call(t),h):e.slice()},h.tickValues=function(t){return arguments.length?(r=null==t?null:it.call(t),h):r&&r.slice()},h.tickFormat=function(t){return arguments.length?(i=t,h):i},h.tickSize=function(t){return arguments.length?(o=a=+t,h):o},h.tickSizeInner=function(t){return arguments.length?(o=+t,h):o},h.tickSizeOuter=function(t){return arguments.length?(a=+t,h):a},h.tickPadding=function(t){return arguments.length?(u=+t,h):u},h.offset=function(t){return arguments.length?(c=+t,h):c},h}var dt={value:()=>{}};function pt(){for(var t,n=0,e=arguments.length,r={};n=0&&(e=t.slice(r+1),t=t.slice(0,r)),t&&!n.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:e}}))}function vt(t,n){for(var e,r=0,i=t.length;r0)for(var e,r,i=new Array(e),o=0;o=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),mt.hasOwnProperty(n)?{space:mt[n],local:t}:t}function wt(t){return function(){var n=this.ownerDocument,e=this.namespaceURI;return e===bt&&n.documentElement.namespaceURI===bt?n.createElement(t):n.createElementNS(e,t)}}function Mt(t){return function(){return this.ownerDocument.createElementNS(t.space,t.local)}}function At(t){var n=xt(t);return(n.local?Mt:wt)(n)}function Tt(){}function St(t){return null==t?Tt:function(){return this.querySelector(t)}}function Et(t){return"object"==typeof t&&"length"in t?t:Array.from(t)}function kt(){return[]}function Nt(t){return null==t?kt:function(){return this.querySelectorAll(t)}}function Ct(t){return function(){return this.matches(t)}}function Pt(t){return function(n){return n.matches(t)}}var zt=Array.prototype.find;function Dt(){return this.firstElementChild}var qt=Array.prototype.filter;function Rt(){return this.children}function Ft(t){return new Array(t.length)}function Ot(t,n){this.ownerDocument=t.ownerDocument,this.namespaceURI=t.namespaceURI,this._next=null,this._parent=t,this.__data__=n}function It(t){return function(){return t}}function Ut(t,n,e,r,i,o){for(var a,u=0,c=n.length,f=o.length;un?1:t>=n?0:NaN}function jt(t){return function(){this.removeAttribute(t)}}function Ht(t){return function(){this.removeAttributeNS(t.space,t.local)}}function Xt(t,n){return function(){this.setAttribute(t,n)}}function Gt(t,n){return function(){this.setAttributeNS(t.space,t.local,n)}}function Vt(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}}function $t(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}}function Wt(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function Zt(t){return function(){this.style.removeProperty(t)}}function Kt(t,n,e){return function(){this.style.setProperty(t,n,e)}}function Qt(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}function Jt(t,n){return t.style.getPropertyValue(n)||Wt(t).getComputedStyle(t,null).getPropertyValue(n)}function tn(t){return function(){delete this[t]}}function nn(t,n){return function(){this[t]=n}}function en(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}function rn(t){return t.trim().split(/^|\s+/)}function on(t){return t.classList||new an(t)}function an(t){this._node=t,this._names=rn(t.getAttribute("class")||"")}function un(t,n){for(var e=on(t),r=-1,i=n.length;++r=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}}))}function Tn(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;r=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var Cn=[null];function Pn(t,n){this._groups=t,this._parents=n}function zn(){return new Pn([[document.documentElement]],Cn)}function Dn(t){return"string"==typeof t?new Pn([[document.querySelector(t)]],[document.documentElement]):new Pn([[t]],Cn)}Pn.prototype=zn.prototype={constructor:Pn,select:function(t){"function"!=typeof t&&(t=St(t));for(var n=this._groups,e=n.length,r=new Array(e),i=0;i=x&&(x=m+1);!(b=y[x])&&++x=0;)(r=i[o])&&(a&&4^r.compareDocumentPosition(a)&&a.parentNode.insertBefore(r,a),a=r);return this},sort:function(t){function n(n,e){return n&&e?t(n.__data__,e.__data__):!n-!e}t||(t=Lt);for(var e=this._groups,r=e.length,i=new Array(r),o=0;o1?this.each((null==n?Zt:"function"==typeof n?Qt:Kt)(t,n,null==e?"":e)):Jt(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?tn:"function"==typeof n?en:nn)(t,n)):this.node()[t]},classed:function(t,n){var e=rn(t+"");if(arguments.length<2){for(var r=on(this.node()),i=-1,o=e.length;++i()=>t;function Hn(t,{sourceEvent:n,subject:e,target:r,identifier:i,active:o,x:a,y:u,dx:c,dy:f,dispatch:s}){Object.defineProperties(this,{type:{value:t,enumerable:!0,configurable:!0},sourceEvent:{value:n,enumerable:!0,configurable:!0},subject:{value:e,enumerable:!0,configurable:!0},target:{value:r,enumerable:!0,configurable:!0},identifier:{value:i,enumerable:!0,configurable:!0},active:{value:o,enumerable:!0,configurable:!0},x:{value:a,enumerable:!0,configurable:!0},y:{value:u,enumerable:!0,configurable:!0},dx:{value:c,enumerable:!0,configurable:!0},dy:{value:f,enumerable:!0,configurable:!0},_:{value:s}})}function Xn(t){return!t.ctrlKey&&!t.button}function Gn(){return this.parentNode}function Vn(t,n){return null==n?{x:t.x,y:t.y}:n}function $n(){return navigator.maxTouchPoints||"ontouchstart"in this}function Wn(t,n,e){t.prototype=n.prototype=e,e.constructor=t}function Zn(t,n){var e=Object.create(t.prototype);for(var r in n)e[r]=n[r];return e}function Kn(){}Hn.prototype.on=function(){var t=this._.on.apply(this._,arguments);return t===this._?this:t};var Qn=.7,Jn=1/Qn,te="\\s*([+-]?\\d+)\\s*",ne="\\s*([+-]?\\d*\\.?\\d+(?:[eE][+-]?\\d+)?)\\s*",ee="\\s*([+-]?\\d*\\.?\\d+(?:[eE][+-]?\\d+)?)%\\s*",re=/^#([0-9a-f]{3,8})$/,ie=new RegExp("^rgb\\("+[te,te,te]+"\\)$"),oe=new RegExp("^rgb\\("+[ee,ee,ee]+"\\)$"),ae=new RegExp("^rgba\\("+[te,te,te,ne]+"\\)$"),ue=new RegExp("^rgba\\("+[ee,ee,ee,ne]+"\\)$"),ce=new RegExp("^hsl\\("+[ne,ee,ee]+"\\)$"),fe=new RegExp("^hsla\\("+[ne,ee,ee,ne]+"\\)$"),se={aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074};function le(){return this.rgb().formatHex()}function he(){return this.rgb().formatRgb()}function de(t){var n,e;return t=(t+"").trim().toLowerCase(),(n=re.exec(t))?(e=n[1].length,n=parseInt(n[1],16),6===e?pe(n):3===e?new _e(n>>8&15|n>>4&240,n>>4&15|240&n,(15&n)<<4|15&n,1):8===e?ge(n>>24&255,n>>16&255,n>>8&255,(255&n)/255):4===e?ge(n>>12&15|n>>8&240,n>>8&15|n>>4&240,n>>4&15|240&n,((15&n)<<4|15&n)/255):null):(n=ie.exec(t))?new _e(n[1],n[2],n[3],1):(n=oe.exec(t))?new _e(255*n[1]/100,255*n[2]/100,255*n[3]/100,1):(n=ae.exec(t))?ge(n[1],n[2],n[3],n[4]):(n=ue.exec(t))?ge(255*n[1]/100,255*n[2]/100,255*n[3]/100,n[4]):(n=ce.exec(t))?we(n[1],n[2]/100,n[3]/100,1):(n=fe.exec(t))?we(n[1],n[2]/100,n[3]/100,n[4]):se.hasOwnProperty(t)?pe(se[t]):"transparent"===t?new _e(NaN,NaN,NaN,0):null}function pe(t){return new _e(t>>16&255,t>>8&255,255&t,1)}function ge(t,n,e,r){return r<=0&&(t=n=e=NaN),new _e(t,n,e,r)}function ye(t){return t instanceof Kn||(t=de(t)),t?new _e((t=t.rgb()).r,t.g,t.b,t.opacity):new _e}function ve(t,n,e,r){return 1===arguments.length?ye(t):new _e(t,n,e,null==r?1:r)}function _e(t,n,e,r){this.r=+t,this.g=+n,this.b=+e,this.opacity=+r}function be(){return"#"+xe(this.r)+xe(this.g)+xe(this.b)}function me(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function xe(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function we(t,n,e,r){return r<=0?t=n=e=NaN:e<=0||e>=1?t=n=NaN:n<=0&&(t=NaN),new Te(t,n,e,r)}function Me(t){if(t instanceof Te)return new Te(t.h,t.s,t.l,t.opacity);if(t instanceof Kn||(t=de(t)),!t)return new Te;if(t instanceof Te)return t;var n=(t=t.rgb()).r/255,e=t.g/255,r=t.b/255,i=Math.min(n,e,r),o=Math.max(n,e,r),a=NaN,u=o-i,c=(o+i)/2;return u?(a=n===o?(e-r)/u+6*(e0&&c<1?0:a,new Te(a,u,c,t.opacity)}function Ae(t,n,e,r){return 1===arguments.length?Me(t):new Te(t,n,e,null==r?1:r)}function Te(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function Se(t,n,e){return 255*(t<60?n+(e-n)*t/60:t<180?e:t<240?n+(e-n)*(240-t)/60:n)}Wn(Kn,de,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:le,formatHex:le,formatHsl:function(){return Me(this).formatHsl()},formatRgb:he,toString:he}),Wn(_e,ve,Zn(Kn,{brighter:function(t){return t=null==t?Jn:Math.pow(Jn,t),new _e(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?Qn:Math.pow(Qn,t),new _e(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:be,formatHex:be,formatRgb:me,toString:me})),Wn(Te,Ae,Zn(Kn,{brighter:function(t){return t=null==t?Jn:Math.pow(Jn,t),new Te(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?Qn:Math.pow(Qn,t),new Te(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),n=isNaN(t)||isNaN(this.s)?0:this.s,e=this.l,r=e+(e<.5?e:1-e)*n,i=2*e-r;return new _e(Se(t>=240?t-240:t+120,i,r),Se(t,i,r),Se(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));const Ee=Math.PI/180,ke=180/Math.PI,Ne=.96422,Ce=.82521,Pe=4/29,ze=6/29,De=3*ze*ze;function qe(t){if(t instanceof Fe)return new Fe(t.l,t.a,t.b,t.opacity);if(t instanceof je)return He(t);t instanceof _e||(t=ye(t));var n,e,r=Be(t.r),i=Be(t.g),o=Be(t.b),a=Oe((.2225045*r+.7168786*i+.0606169*o)/1);return r===i&&i===o?n=e=a:(n=Oe((.4360747*r+.3850649*i+.1430804*o)/Ne),e=Oe((.0139322*r+.0971045*i+.7141733*o)/Ce)),new Fe(116*a-16,500*(n-a),200*(a-e),t.opacity)}function Re(t,n,e,r){return 1===arguments.length?qe(t):new Fe(t,n,e,null==r?1:r)}function Fe(t,n,e,r){this.l=+t,this.a=+n,this.b=+e,this.opacity=+r}function Oe(t){return t>.008856451679035631?Math.pow(t,1/3):t/De+Pe}function Ie(t){return t>ze?t*t*t:De*(t-Pe)}function Ue(t){return 255*(t<=.0031308?12.92*t:1.055*Math.pow(t,1/2.4)-.055)}function Be(t){return(t/=255)<=.04045?t/12.92:Math.pow((t+.055)/1.055,2.4)}function Ye(t){if(t instanceof je)return new je(t.h,t.c,t.l,t.opacity);if(t instanceof Fe||(t=qe(t)),0===t.a&&0===t.b)return new je(NaN,0=1?(e=1,n-1):Math.floor(e*n),i=t[r],o=t[r+1],a=r>0?t[r-1]:2*i-o,u=r()=>t;function ar(t,n){return function(e){return t+e*n}}function ur(t,n){var e=n-t;return e?ar(t,e>180||e<-180?e-360*Math.round(e/360):e):or(isNaN(t)?n:t)}function cr(t){return 1==(t=+t)?fr:function(n,e){return e-n?function(t,n,e){return t=Math.pow(t,e),n=Math.pow(n,e)-t,e=1/e,function(r){return Math.pow(t+r*n,e)}}(n,e,t):or(isNaN(n)?e:n)}}function fr(t,n){var e=n-t;return e?ar(t,e):or(isNaN(t)?n:t)}var sr=function t(n){var e=cr(n);function r(t,n){var r=e((t=ve(t)).r,(n=ve(n)).r),i=e(t.g,n.g),o=e(t.b,n.b),a=fr(t.opacity,n.opacity);return function(n){return t.r=r(n),t.g=i(n),t.b=o(n),t.opacity=a(n),t+""}}return r.gamma=t,r}(1);function lr(t){return function(n){var e,r,i=n.length,o=new Array(i),a=new Array(i),u=new Array(i);for(e=0;eo&&(i=n.slice(o,i),u[a]?u[a]+=i:u[++a]=i),(e=e[0])===(r=r[0])?u[a]?u[a]+=r:u[++a]=r:(u[++a]=null,c.push({i:a,x:_r(e,r)})),o=xr.lastIndex;return o180?n+=360:n-t>180&&(t+=360),o.push({i:e.push(i(e)+"rotate(",null,r)-2,x:_r(t,n)})):n&&e.push(i(e)+"rotate("+n+r)}(o.rotate,a.rotate,u,c),function(t,n,e,o){t!==n?o.push({i:e.push(i(e)+"skewX(",null,r)-2,x:_r(t,n)}):n&&e.push(i(e)+"skewX("+n+r)}(o.skewX,a.skewX,u,c),function(t,n,e,r,o,a){if(t!==e||n!==r){var u=o.push(i(o)+"scale(",null,",",null,")");a.push({i:u-4,x:_r(t,e)},{i:u-2,x:_r(n,r)})}else 1===e&&1===r||o.push(i(o)+"scale("+e+","+r+")")}(o.scaleX,o.scaleY,a.scaleX,a.scaleY,u,c),o=a=null,function(t){for(var n,e=-1,r=c.length;++e=0&&n._call.call(null,t),n=n._next;--Gr}function oi(){Zr=(Wr=Qr.now())+Kr,Gr=Vr=0;try{ii()}finally{Gr=0,function(){var t,n,e=Hr,r=1/0;for(;e;)e._call?(r>e._time&&(r=e._time),t=e,e=e._next):(n=e._next,e._next=null,e=t?t._next=n:Hr=n);Xr=t,ui(r)}(),Zr=0}}function ai(){var t=Qr.now(),n=t-Wr;n>1e3&&(Kr-=n,Wr=t)}function ui(t){Gr||(Vr&&(Vr=clearTimeout(Vr)),t-Zr>24?(t<1/0&&(Vr=setTimeout(oi,t-Qr.now()-Kr)),$r&&($r=clearInterval($r))):($r||(Wr=Qr.now(),$r=setInterval(ai,1e3)),Gr=1,Jr(oi)))}function ci(t,n,e){var r=new ei;return n=null==n?0:+n,r.restart((e=>{r.stop(),t(e+n)}),n,e),r}ei.prototype=ri.prototype={constructor:ei,restart:function(t,n,e){if("function"!=typeof t)throw new TypeError("callback is not a function");e=(null==e?ti():+e)+(null==n?0:+n),this._next||Xr===this||(Xr?Xr._next=this:Hr=this,Xr=this),this._call=t,this._time=e,ui()},stop:function(){this._call&&(this._call=null,this._time=1/0,ui())}};var fi=pt("start","end","cancel","interrupt"),si=[];function li(t,n,e,r,i,o){var a=t.__transition;if(a){if(e in a)return}else t.__transition={};!function(t,n,e){var r,i=t.__transition;function o(t){e.state=1,e.timer.restart(a,e.delay,e.time),e.delay<=t&&a(t-e.delay)}function a(o){var f,s,l,h;if(1!==e.state)return c();for(f in i)if((h=i[f]).name===e.name){if(3===h.state)return ci(a);4===h.state?(h.state=6,h.timer.stop(),h.on.call("interrupt",t,t.__data__,h.index,h.group),delete i[f]):+f0)throw new Error("too late; already scheduled");return e}function di(t,n){var e=pi(t,n);if(e.state>3)throw new Error("too late; already running");return e}function pi(t,n){var e=t.__transition;if(!e||!(e=e[n]))throw new Error("transition not found");return e}function gi(t,n){var e,r,i,o=t.__transition,a=!0;if(o){for(i in n=null==n?null:n+"",o)(e=o[i]).name===n?(r=e.state>2&&e.state<5,e.state=6,e.timer.stop(),e.on.call(r?"interrupt":"cancel",t,t.__data__,e.index,e.group),delete o[i]):a=!1;a&&delete t.__transition}}function yi(t,n){var e,r;return function(){var i=di(this,t),o=i.tween;if(o!==e)for(var a=0,u=(r=e=o).length;a=0&&(t=t.slice(0,n)),!t||"start"===t}))}(n)?hi:di;return function(){var a=o(this,t),u=a.on;u!==r&&(i=(r=u).copy()).on(n,e),a.on=i}}var Fi=zn.prototype.constructor;function Oi(t){return function(){this.style.removeProperty(t)}}function Ii(t,n,e){return function(r){this.style.setProperty(t,n.call(this,r),e)}}function Ui(t,n,e){var r,i;function o(){var o=n.apply(this,arguments);return o!==i&&(r=(i=o)&&Ii(t,o,e)),r}return o._value=n,o}function Bi(t){return function(n){this.textContent=t.call(this,n)}}function Yi(t){var n,e;function r(){var r=t.apply(this,arguments);return r!==e&&(n=(e=r)&&Bi(r)),n}return r._value=t,r}var Li=0;function ji(t,n,e,r){this._groups=t,this._parents=n,this._name=e,this._id=r}function Hi(t){return zn().transition(t)}function Xi(){return++Li}var Gi=zn.prototype;ji.prototype=Hi.prototype={constructor:ji,select:function(t){var n=this._name,e=this._id;"function"!=typeof t&&(t=St(t));for(var r=this._groups,i=r.length,o=new Array(i),a=0;a()=>t;function mo(t,{sourceEvent:n,target:e,selection:r,mode:i,dispatch:o}){Object.defineProperties(this,{type:{value:t,enumerable:!0,configurable:!0},sourceEvent:{value:n,enumerable:!0,configurable:!0},target:{value:e,enumerable:!0,configurable:!0},selection:{value:r,enumerable:!0,configurable:!0},mode:{value:i,enumerable:!0,configurable:!0},_:{value:o}})}function xo(t){t.stopImmediatePropagation()}function wo(t){t.preventDefault(),t.stopImmediatePropagation()}var Mo={name:"drag"},Ao={name:"space"},To={name:"handle"},So={name:"center"};const{abs:Eo,max:ko,min:No}=Math;function Co(t){return[+t[0],+t[1]]}function Po(t){return[Co(t[0]),Co(t[1])]}var zo={name:"x",handles:["w","e"].map(Bo),input:function(t,n){return null==t?null:[[+t[0],n[0][1]],[+t[1],n[1][1]]]},output:function(t){return t&&[t[0][0],t[1][0]]}},Do={name:"y",handles:["n","s"].map(Bo),input:function(t,n){return null==t?null:[[n[0][0],+t[0]],[n[1][0],+t[1]]]},output:function(t){return t&&[t[0][1],t[1][1]]}},qo={name:"xy",handles:["n","w","e","s","nw","ne","sw","se"].map(Bo),input:function(t){return null==t?null:Po(t)},output:function(t){return t}},Ro={overlay:"crosshair",selection:"move",n:"ns-resize",e:"ew-resize",s:"ns-resize",w:"ew-resize",nw:"nwse-resize",ne:"nesw-resize",se:"nwse-resize",sw:"nesw-resize"},Fo={e:"w",w:"e",nw:"ne",ne:"nw",se:"sw",sw:"se"},Oo={n:"s",s:"n",nw:"sw",ne:"se",se:"ne",sw:"nw"},Io={overlay:1,selection:1,n:null,e:1,s:null,w:-1,nw:-1,ne:1,se:1,sw:-1},Uo={overlay:1,selection:1,n:-1,e:null,s:1,w:null,nw:-1,ne:-1,se:1,sw:1};function Bo(t){return{type:t}}function Yo(t){return!t.ctrlKey&&!t.button}function Lo(){var t=this.ownerSVGElement||this;return t.hasAttribute("viewBox")?[[(t=t.viewBox.baseVal).x,t.y],[t.x+t.width,t.y+t.height]]:[[0,0],[t.width.baseVal.value,t.height.baseVal.value]]}function jo(){return navigator.maxTouchPoints||"ontouchstart"in this}function Ho(t){for(;!t.__brush;)if(!(t=t.parentNode))return;return t.__brush}function Xo(t){return t[0][0]===t[1][0]||t[0][1]===t[1][1]}function Go(t){var n,e=Lo,r=Yo,i=jo,o=!0,a=pt("start","brush","end"),u=6;function c(n){var e=n.property("__brush",g).selectAll(".overlay").data([Bo("overlay")]);e.enter().append("rect").attr("class","overlay").attr("pointer-events","all").attr("cursor",Ro.overlay).merge(e).each((function(){var t=Ho(this).extent;Dn(this).attr("x",t[0][0]).attr("y",t[0][1]).attr("width",t[1][0]-t[0][0]).attr("height",t[1][1]-t[0][1])})),n.selectAll(".selection").data([Bo("selection")]).enter().append("rect").attr("class","selection").attr("cursor",Ro.selection).attr("fill","#777").attr("fill-opacity",.3).attr("stroke","#fff").attr("shape-rendering","crispEdges");var r=n.selectAll(".handle").data(t.handles,(function(t){return t.type}));r.exit().remove(),r.enter().append("rect").attr("class",(function(t){return"handle handle--"+t.type})).attr("cursor",(function(t){return Ro[t.type]})),n.each(f).attr("fill","none").attr("pointer-events","all").on("mousedown.brush",h).filter(i).on("touchstart.brush",h).on("touchmove.brush",d).on("touchend.brush touchcancel.brush",p).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function f(){var t=Dn(this),n=Ho(this).selection;n?(t.selectAll(".selection").style("display",null).attr("x",n[0][0]).attr("y",n[0][1]).attr("width",n[1][0]-n[0][0]).attr("height",n[1][1]-n[0][1]),t.selectAll(".handle").style("display",null).attr("x",(function(t){return"e"===t.type[t.type.length-1]?n[1][0]-u/2:n[0][0]-u/2})).attr("y",(function(t){return"s"===t.type[0]?n[1][1]-u/2:n[0][1]-u/2})).attr("width",(function(t){return"n"===t.type||"s"===t.type?n[1][0]-n[0][0]+u:u})).attr("height",(function(t){return"e"===t.type||"w"===t.type?n[1][1]-n[0][1]+u:u}))):t.selectAll(".selection,.handle").style("display","none").attr("x",null).attr("y",null).attr("width",null).attr("height",null)}function s(t,n,e){var r=t.__brush.emitter;return!r||e&&r.clean?new l(t,n,e):r}function l(t,n,e){this.that=t,this.args=n,this.state=t.__brush,this.active=0,this.clean=e}function h(e){if((!n||e.touches)&&r.apply(this,arguments)){var i,a,u,c,l,h,d,p,g,y,v,_=this,b=e.target.__data__.type,m="selection"===(o&&e.metaKey?b="overlay":b)?Mo:o&&e.altKey?So:To,x=t===Do?null:Io[b],w=t===zo?null:Uo[b],M=Ho(_),A=M.extent,T=M.selection,S=A[0][0],E=A[0][1],k=A[1][0],N=A[1][1],C=0,P=0,z=x&&w&&o&&e.shiftKey,D=Array.from(e.touches||[e],(t=>{const n=t.identifier;return(t=In(t,_)).point0=t.slice(),t.identifier=n,t}));if("overlay"===b){T&&(g=!0);const n=[D[0],D[1]||D[0]];M.selection=T=[[i=t===Do?S:No(n[0][0],n[1][0]),u=t===zo?E:No(n[0][1],n[1][1])],[l=t===Do?k:ko(n[0][0],n[1][0]),d=t===zo?N:ko(n[0][1],n[1][1])]],D.length>1&&U()}else i=T[0][0],u=T[0][1],l=T[1][0],d=T[1][1];a=i,c=u,h=l,p=d;var q=Dn(_).attr("pointer-events","none"),R=q.selectAll(".overlay").attr("cursor",Ro[b]);gi(_);var F=s(_,arguments,!0).beforestart();if(e.touches)F.moved=I,F.ended=B;else{var O=Dn(e.view).on("mousemove.brush",I,!0).on("mouseup.brush",B,!0);o&&O.on("keydown.brush",Y,!0).on("keyup.brush",L,!0),Yn(e.view)}f.call(_),F.start(e,m.name)}function I(t){for(const n of t.changedTouches||[t])for(const t of D)t.identifier===n.identifier&&(t.cur=In(n,_));if(z&&!y&&!v&&1===D.length){const t=D[0];Eo(t.cur[0]-t[0])>Eo(t.cur[1]-t[1])?v=!0:y=!0}for(const t of D)t.cur&&(t[0]=t.cur[0],t[1]=t.cur[1]);g=!0,wo(t),U(t)}function U(t){const n=D[0],e=n.point0;var r;switch(C=n[0]-e[0],P=n[1]-e[1],m){case Ao:case Mo:x&&(C=ko(S-i,No(k-l,C)),a=i+C,h=l+C),w&&(P=ko(E-u,No(N-d,P)),c=u+P,p=d+P);break;case To:D[1]?(x&&(a=ko(S,No(k,D[0][0])),h=ko(S,No(k,D[1][0])),x=1),w&&(c=ko(E,No(N,D[0][1])),p=ko(E,No(N,D[1][1])),w=1)):(x<0?(C=ko(S-i,No(k-i,C)),a=i+C,h=l):x>0&&(C=ko(S-l,No(k-l,C)),a=i,h=l+C),w<0?(P=ko(E-u,No(N-u,P)),c=u+P,p=d):w>0&&(P=ko(E-d,No(N-d,P)),c=u,p=d+P));break;case So:x&&(a=ko(S,No(k,i-C*x)),h=ko(S,No(k,l+C*x))),w&&(c=ko(E,No(N,u-P*w)),p=ko(E,No(N,d+P*w)))}h0&&(i=a-C),w<0?d=p-P:w>0&&(u=c-P),m=Ao,R.attr("cursor",Ro.selection),U());break;default:return}wo(t)}function L(t){switch(t.keyCode){case 16:z&&(y=v=z=!1,U());break;case 18:m===So&&(x<0?l=h:x>0&&(i=a),w<0?d=p:w>0&&(u=c),m=To,U());break;case 32:m===Ao&&(t.altKey?(x&&(l=h-C*x,i=a+C*x),w&&(d=p-P*w,u=c+P*w),m=So):(x<0?l=h:x>0&&(i=a),w<0?d=p:w>0&&(u=c),m=To),R.attr("cursor",Ro[b]),U());break;default:return}wo(t)}}function d(t){s(this,arguments).moved(t)}function p(t){s(this,arguments).ended(t)}function g(){var n=this.__brush||{selection:null};return n.extent=Po(e.apply(this,arguments)),n.dim=t,n}return c.move=function(n,e){n.tween?n.on("start.brush",(function(t){s(this,arguments).beforestart().start(t)})).on("interrupt.brush end.brush",(function(t){s(this,arguments).end(t)})).tween("brush",(function(){var n=this,r=n.__brush,i=s(n,arguments),o=r.selection,a=t.input("function"==typeof e?e.apply(this,arguments):e,r.extent),u=Mr(o,a);function c(t){r.selection=1===t&&null===a?null:u(t),f.call(n),i.brush()}return null!==o&&null!==a?c:c(1)})):n.each((function(){var n=this,r=arguments,i=n.__brush,o=t.input("function"==typeof e?e.apply(n,r):e,i.extent),a=s(n,r).beforestart();gi(n),i.selection=null===o?null:o,f.call(n),a.start().brush().end()}))},c.clear=function(t){c.move(t,null)},l.prototype={beforestart:function(){return 1==++this.active&&(this.state.emitter=this,this.starting=!0),this},start:function(t,n){return this.starting?(this.starting=!1,this.emit("start",t,n)):this.emit("brush",t),this},brush:function(t,n){return this.emit("brush",t,n),this},end:function(t,n){return 0==--this.active&&(delete this.state.emitter,this.emit("end",t,n)),this},emit:function(n,e,r){var i=Dn(this.that).datum();a.call(n,this.that,new mo(n,{sourceEvent:e,target:c,selection:t.output(this.state.selection),mode:r,dispatch:a}),i)}},c.extent=function(t){return arguments.length?(e="function"==typeof t?t:bo(Po(t)),c):e},c.filter=function(t){return arguments.length?(r="function"==typeof t?t:bo(!!t),c):r},c.touchable=function(t){return arguments.length?(i="function"==typeof t?t:bo(!!t),c):i},c.handleSize=function(t){return arguments.length?(u=+t,c):u},c.keyModifiers=function(t){return arguments.length?(o=!!t,c):o},c.on=function(){var t=a.on.apply(a,arguments);return t===a?c:t},c}var Vo=Math.abs,$o=Math.cos,Wo=Math.sin,Zo=Math.PI,Ko=Zo/2,Qo=2*Zo,Jo=Math.max,ta=1e-12;function na(t,n){return Array.from({length:n-t},((n,e)=>t+e))}function ea(t){return function(n,e){return t(n.source.value+n.target.value,e.source.value+e.target.value)}}function ra(t,n){var e=0,r=null,i=null,o=null;function a(a){var u,c=a.length,f=new Array(c),s=na(0,c),l=new Array(c*c),h=new Array(c),d=0;a=Float64Array.from({length:c*c},n?(t,n)=>a[n%c][n/c|0]:(t,n)=>a[n/c|0][n%c]);for(let n=0;nr(f[t],f[n])));for(const e of s){const r=n;if(t){const t=na(1+~c,c).filter((t=>t<0?a[~t*c+e]:a[e*c+t]));i&&t.sort(((t,n)=>i(t<0?-a[~t*c+e]:a[e*c+t],n<0?-a[~n*c+e]:a[e*c+n])));for(const r of t)if(r<0){(l[~r*c+e]||(l[~r*c+e]={source:null,target:null})).target={index:e,startAngle:n,endAngle:n+=a[~r*c+e]*d,value:a[~r*c+e]}}else{(l[e*c+r]||(l[e*c+r]={source:null,target:null})).source={index:e,startAngle:n,endAngle:n+=a[e*c+r]*d,value:a[e*c+r]}}h[e]={index:e,startAngle:r,endAngle:n,value:f[e]}}else{const t=na(0,c).filter((t=>a[e*c+t]||a[t*c+e]));i&&t.sort(((t,n)=>i(a[e*c+t],a[e*c+n])));for(const r of t){let t;if(eaa)if(Math.abs(s*u-c*f)>aa&&i){var h=e-o,d=r-a,p=u*u+c*c,g=h*h+d*d,y=Math.sqrt(p),v=Math.sqrt(l),_=i*Math.tan((ia-Math.acos((p+l-g)/(2*y*v)))/2),b=_/v,m=_/y;Math.abs(b-1)>aa&&(this._+="L"+(t+b*f)+","+(n+b*s)),this._+="A"+i+","+i+",0,0,"+ +(s*h>f*d)+","+(this._x1=t+m*u)+","+(this._y1=n+m*c)}else this._+="L"+(this._x1=t)+","+(this._y1=n);else;},arc:function(t,n,e,r,i,o){t=+t,n=+n,o=!!o;var a=(e=+e)*Math.cos(r),u=e*Math.sin(r),c=t+a,f=n+u,s=1^o,l=o?r-i:i-r;if(e<0)throw new Error("negative radius: "+e);null===this._x1?this._+="M"+c+","+f:(Math.abs(this._x1-c)>aa||Math.abs(this._y1-f)>aa)&&(this._+="L"+c+","+f),e&&(l<0&&(l=l%oa+oa),l>ua?this._+="A"+e+","+e+",0,1,"+s+","+(t-a)+","+(n-u)+"A"+e+","+e+",0,1,"+s+","+(this._x1=c)+","+(this._y1=f):l>aa&&(this._+="A"+e+","+e+",0,"+ +(l>=ia)+","+s+","+(this._x1=t+e*Math.cos(i))+","+(this._y1=n+e*Math.sin(i))))},rect:function(t,n,e,r){this._+="M"+(this._x0=this._x1=+t)+","+(this._y0=this._y1=+n)+"h"+ +e+"v"+ +r+"h"+-e+"Z"},toString:function(){return this._}};var sa=Array.prototype.slice;function la(t){return function(){return t}}function ha(t){return t.source}function da(t){return t.target}function pa(t){return t.radius}function ga(t){return t.startAngle}function ya(t){return t.endAngle}function va(){return 0}function _a(){return 10}function ba(t){var n=ha,e=da,r=pa,i=pa,o=ga,a=ya,u=va,c=null;function f(){var f,s=n.apply(this,arguments),l=e.apply(this,arguments),h=u.apply(this,arguments)/2,d=sa.call(arguments),p=+r.apply(this,(d[0]=s,d)),g=o.apply(this,d)-Ko,y=a.apply(this,d)-Ko,v=+i.apply(this,(d[0]=l,d)),_=o.apply(this,d)-Ko,b=a.apply(this,d)-Ko;if(c||(c=f=fa()),h>ta&&(Vo(y-g)>2*h+ta?y>g?(g+=h,y-=h):(g-=h,y+=h):g=y=(g+y)/2,Vo(b-_)>2*h+ta?b>_?(_+=h,b-=h):(_-=h,b+=h):_=b=(_+b)/2),c.moveTo(p*$o(g),p*Wo(g)),c.arc(0,0,p,g,y),g!==_||y!==b)if(t){var m=+t.apply(this,arguments),x=v-m,w=(_+b)/2;c.quadraticCurveTo(0,0,x*$o(_),x*Wo(_)),c.lineTo(v*$o(w),v*Wo(w)),c.lineTo(x*$o(b),x*Wo(b))}else c.quadraticCurveTo(0,0,v*$o(_),v*Wo(_)),c.arc(0,0,v,_,b);if(c.quadraticCurveTo(0,0,p*$o(g),p*Wo(g)),c.closePath(),f)return c=null,f+""||null}return t&&(f.headRadius=function(n){return arguments.length?(t="function"==typeof n?n:la(+n),f):t}),f.radius=function(t){return arguments.length?(r=i="function"==typeof t?t:la(+t),f):r},f.sourceRadius=function(t){return arguments.length?(r="function"==typeof t?t:la(+t),f):r},f.targetRadius=function(t){return arguments.length?(i="function"==typeof t?t:la(+t),f):i},f.startAngle=function(t){return arguments.length?(o="function"==typeof t?t:la(+t),f):o},f.endAngle=function(t){return arguments.length?(a="function"==typeof t?t:la(+t),f):a},f.padAngle=function(t){return arguments.length?(u="function"==typeof t?t:la(+t),f):u},f.source=function(t){return arguments.length?(n=t,f):n},f.target=function(t){return arguments.length?(e=t,f):e},f.context=function(t){return arguments.length?(c=null==t?null:t,f):c},f}var ma=Array.prototype.slice;function xa(t,n){return t-n}var wa=t=>()=>t;function Ma(t,n){for(var e,r=-1,i=n.length;++rr!=d>r&&e<(h-f)*(r-s)/(d-s)+f&&(i=-i)}return i}function Ta(t,n,e){var r,i,o,a;return function(t,n,e){return(n[0]-t[0])*(e[1]-t[1])==(e[0]-t[0])*(n[1]-t[1])}(t,n,e)&&(i=t[r=+(t[0]===n[0])],o=e[r],a=n[r],i<=o&&o<=a||a<=o&&o<=i)}function Sa(){}var Ea=[[],[[[1,1.5],[.5,1]]],[[[1.5,1],[1,1.5]]],[[[1.5,1],[.5,1]]],[[[1,.5],[1.5,1]]],[[[1,1.5],[.5,1]],[[1,.5],[1.5,1]]],[[[1,.5],[1,1.5]]],[[[1,.5],[.5,1]]],[[[.5,1],[1,.5]]],[[[1,1.5],[1,.5]]],[[[.5,1],[1,.5]],[[1.5,1],[1,1.5]]],[[[1.5,1],[1,.5]]],[[[.5,1],[1.5,1]]],[[[1,1.5],[1.5,1]]],[[[.5,1],[1,1.5]]],[]];function ka(){var t=1,n=1,e=I,r=u;function i(t){var n=e(t);if(Array.isArray(n))n=n.slice().sort(xa);else{var r=p(t),i=r[0],a=r[1];n=F(i,a,n),n=Z(Math.floor(i/n)*n,Math.floor(a/n)*n,n)}return n.map((function(n){return o(t,n)}))}function o(e,i){var o=[],u=[];return function(e,r,i){var o,u,c,f,s,l,h=new Array,d=new Array;o=u=-1,f=e[0]>=r,Ea[f<<1].forEach(p);for(;++o=r,Ea[c|f<<1].forEach(p);Ea[f<<0].forEach(p);for(;++u=r,s=e[u*t]>=r,Ea[f<<1|s<<2].forEach(p);++o=r,l=s,s=e[u*t+o+1]>=r,Ea[c|f<<1|s<<2|l<<3].forEach(p);Ea[f|s<<3].forEach(p)}o=-1,s=e[u*t]>=r,Ea[s<<2].forEach(p);for(;++o=r,Ea[s<<2|l<<3].forEach(p);function p(t){var n,e,r=[t[0][0]+o,t[0][1]+u],c=[t[1][0]+o,t[1][1]+u],f=a(r),s=a(c);(n=d[f])?(e=h[s])?(delete d[n.end],delete h[e.start],n===e?(n.ring.push(c),i(n.ring)):h[n.start]=d[e.end]={start:n.start,end:e.end,ring:n.ring.concat(e.ring)}):(delete d[n.end],n.ring.push(c),d[n.end=s]=n):(n=h[s])?(e=d[f])?(delete h[n.start],delete d[e.end],n===e?(n.ring.push(c),i(n.ring)):h[e.start]=d[n.end]={start:e.start,end:n.end,ring:e.ring.concat(n.ring)}):(delete h[n.start],n.ring.unshift(r),h[n.start=f]=n):h[f]=d[s]={start:f,end:s,ring:[r,c]}}Ea[s<<3].forEach(p)}(e,i,(function(t){r(t,e,i),function(t){for(var n=0,e=t.length,r=t[e-1][1]*t[0][0]-t[e-1][0]*t[0][1];++n0?o.push([t]):u.push(t)})),u.forEach((function(t){for(var n,e=0,r=o.length;e0&&a0&&u=0&&o>=0))throw new Error("invalid size");return t=r,n=o,i},i.thresholds=function(t){return arguments.length?(e="function"==typeof t?t:Array.isArray(t)?wa(ma.call(t)):wa(t),i):e},i.smooth=function(t){return arguments.length?(r=t?u:Sa,i):r===u},i}function Na(t,n,e){for(var r=t.width,i=t.height,o=1+(e<<1),a=0;a=e&&(u>=o&&(c-=t.data[u-o+a*r]),n.data[u-e+a*r]=c/Math.min(u+1,r-1+o-u,o))}function Ca(t,n,e){for(var r=t.width,i=t.height,o=1+(e<<1),a=0;a=e&&(u>=o&&(c-=t.data[a+(u-o)*r]),n.data[a+(u-e)*r]=c/Math.min(u+1,i-1+o-u,o))}function Pa(t){return t[0]}function za(t){return t[1]}function Da(){return 1}const qa=Math.pow(2,-52),Ra=new Uint32Array(512);class Fa{static from(t,n=Ha,e=Xa){const r=t.length,i=new Float64Array(2*r);for(let o=0;o>1;if(n>0&&"number"!=typeof t[0])throw new Error("Expected coords to contain numbers.");this.coords=t;const e=Math.max(2*n-5,0);this._triangles=new Uint32Array(3*e),this._halfedges=new Int32Array(3*e),this._hashSize=Math.ceil(Math.sqrt(n)),this._hullPrev=new Uint32Array(n),this._hullNext=new Uint32Array(n),this._hullTri=new Uint32Array(n),this._hullHash=new Int32Array(this._hashSize).fill(-1),this._ids=new Uint32Array(n),this._dists=new Float64Array(n),this.update()}update(){const{coords:t,_hullPrev:n,_hullNext:e,_hullTri:r,_hullHash:i}=this,o=t.length>>1;let a=1/0,u=1/0,c=-1/0,f=-1/0;for(let n=0;nc&&(c=e),r>f&&(f=r),this._ids[n]=n}const s=(a+c)/2,l=(u+f)/2;let h,d,p,g=1/0;for(let n=0;n0&&(d=n,g=e)}let _=t[2*d],b=t[2*d+1],m=1/0;for(let n=0;nr&&(n[e++]=i,r=this._dists[i])}return this.hull=n.subarray(0,e),this.triangles=new Uint32Array(0),void(this.halfedges=new Uint32Array(0))}if(Ua(y,v,_,b,x,w)){const t=d,n=_,e=b;d=p,_=x,b=w,p=t,x=n,w=e}const M=function(t,n,e,r,i,o){const a=e-t,u=r-n,c=i-t,f=o-n,s=a*a+u*u,l=c*c+f*f,h=.5/(a*f-u*c);return{x:t+(f*s-u*l)*h,y:n+(a*l-c*s)*h}}(y,v,_,b,x,w);this._cx=M.x,this._cy=M.y;for(let n=0;n0&&Math.abs(f-o)<=qa&&Math.abs(s-a)<=qa)continue;if(o=f,a=s,c===h||c===d||c===p)continue;let l=0;for(let t=0,n=this._hashKey(f,s);t0?3-e:1+e)/4}(t-this._cx,n-this._cy)*this._hashSize)%this._hashSize}_legalize(t){const{_triangles:n,_halfedges:e,coords:r}=this;let i=0,o=0;for(;;){const a=e[t],u=t-t%3;if(o=u+(t+2)%3,-1===a){if(0===i)break;t=Ra[--i];continue}const c=a-a%3,f=u+(t+1)%3,s=c+(a+2)%3,l=n[o],h=n[t],d=n[f],p=n[s];if(Ba(r[2*l],r[2*l+1],r[2*h],r[2*h+1],r[2*d],r[2*d+1],r[2*p],r[2*p+1])){n[t]=p,n[a]=l;const r=e[s];if(-1===r){let n=this._hullStart;do{if(this._hullTri[n]===s){this._hullTri[n]=t;break}n=this._hullPrev[n]}while(n!==this._hullStart)}this._link(t,r),this._link(a,e[o]),this._link(o,s);const u=c+(a+1)%3;i=33306690738754716e-32*Math.abs(a+u)?a-u:0}function Ua(t,n,e,r,i,o){return(Ia(i,o,t,n,e,r)||Ia(t,n,e,r,i,o)||Ia(e,r,i,o,t,n))<0}function Ba(t,n,e,r,i,o,a,u){const c=t-a,f=n-u,s=e-a,l=r-u,h=i-a,d=o-u,p=s*s+l*l,g=h*h+d*d;return c*(l*g-p*d)-f*(s*g-p*h)+(c*c+f*f)*(s*d-l*h)<0}function Ya(t,n,e,r,i,o){const a=e-t,u=r-n,c=i-t,f=o-n,s=a*a+u*u,l=c*c+f*f,h=.5/(a*f-u*c),d=(f*s-u*l)*h,p=(a*l-c*s)*h;return d*d+p*p}function La(t,n,e,r){if(r-e<=20)for(let i=e+1;i<=r;i++){const r=t[i],o=n[r];let a=i-1;for(;a>=e&&n[t[a]]>o;)t[a+1]=t[a--];t[a+1]=r}else{let i=e+1,o=r;ja(t,e+r>>1,i),n[t[e]]>n[t[r]]&&ja(t,e,r),n[t[i]]>n[t[r]]&&ja(t,i,r),n[t[e]]>n[t[i]]&&ja(t,e,i);const a=t[i],u=n[a];for(;;){do{i++}while(n[t[i]]u);if(o=o-e?(La(t,n,i,r),La(t,n,e,o-1)):(La(t,n,e,o-1),La(t,n,i,r))}}function ja(t,n,e){const r=t[n];t[n]=t[e],t[e]=r}function Ha(t){return t[0]}function Xa(t){return t[1]}const Ga=1e-6;class Va{constructor(){this._x0=this._y0=this._x1=this._y1=null,this._=""}moveTo(t,n){this._+=`M${this._x0=this._x1=+t},${this._y0=this._y1=+n}`}closePath(){null!==this._x1&&(this._x1=this._x0,this._y1=this._y0,this._+="Z")}lineTo(t,n){this._+=`L${this._x1=+t},${this._y1=+n}`}arc(t,n,e){const r=(t=+t)+(e=+e),i=n=+n;if(e<0)throw new Error("negative radius");null===this._x1?this._+=`M${r},${i}`:(Math.abs(this._x1-r)>Ga||Math.abs(this._y1-i)>Ga)&&(this._+="L"+r+","+i),e&&(this._+=`A${e},${e},0,1,1,${t-e},${n}A${e},${e},0,1,1,${this._x1=r},${this._y1=i}`)}rect(t,n,e,r){this._+=`M${this._x0=this._x1=+t},${this._y0=this._y1=+n}h${+e}v${+r}h${-e}Z`}value(){return this._||null}}class $a{constructor(){this._=[]}moveTo(t,n){this._.push([t,n])}closePath(){this._.push(this._[0].slice())}lineTo(t,n){this._.push([t,n])}value(){return this._.length?this._:null}}class Wa{constructor(t,[n,e,r,i]=[0,0,960,500]){if(!((r=+r)>=(n=+n)&&(i=+i)>=(e=+e)))throw new Error("invalid bounds");this.delaunay=t,this._circumcenters=new Float64Array(2*t.points.length),this.vectors=new Float64Array(2*t.points.length),this.xmax=r,this.xmin=n,this.ymax=i,this.ymin=e,this._init()}update(){return this.delaunay.update(),this._init(),this}_init(){const{delaunay:{points:t,hull:n,triangles:e},vectors:r}=this,i=this.circumcenters=this._circumcenters.subarray(0,e.length/3*2);for(let n,r,o=0,a=0,u=e.length;o1;)i-=2;for(let t=2;t4)for(let t=0;t0){if(n>=this.ymax)return null;(i=(this.ymax-n)/r)0){if(t>=this.xmax)return null;(i=(this.xmax-t)/e)this.xmax?2:0)|(nthis.ymax?8:0)}}const Za=2*Math.PI,Ka=Math.pow;function Qa(t){return t[0]}function Ja(t){return t[1]}function tu(t,n,e){return[t+Math.sin(t+n)*e,n+Math.cos(t-n)*e]}class nu{static from(t,n=Qa,e=Ja,r){return new nu("length"in t?function(t,n,e,r){const i=t.length,o=new Float64Array(2*i);for(let a=0;a2&&function(t){const{triangles:n,coords:e}=t;for(let t=0;t1e-10)return!1}return!0}(t)){this.collinear=Int32Array.from({length:n.length/2},((t,n)=>n)).sort(((t,e)=>n[2*t]-n[2*e]||n[2*t+1]-n[2*e+1]));const t=this.collinear[0],e=this.collinear[this.collinear.length-1],r=[n[2*t],n[2*t+1],n[2*e],n[2*e+1]],i=1e-8*Math.hypot(r[3]-r[1],r[2]-r[0]);for(let t=0,e=n.length/2;t0&&(this.triangles=new Int32Array(3).fill(-1),this.halfedges=new Int32Array(3).fill(-1),this.triangles[0]=r[0],this.triangles[1]=r[1],this.triangles[2]=r[1],o[r[0]]=1,2===r.length&&(o[r[1]]=0))}voronoi(t){return new Wa(this,t)}*neighbors(t){const{inedges:n,hull:e,_hullIndex:r,halfedges:i,triangles:o,collinear:a}=this;if(a){const n=a.indexOf(t);return n>0&&(yield a[n-1]),void(n=0&&i!==e&&i!==r;)e=i;return i}_step(t,n,e){const{inedges:r,hull:i,_hullIndex:o,halfedges:a,triangles:u,points:c}=this;if(-1===r[t]||!c.length)return(t+1)%(c.length>>1);let f=t,s=Ka(n-c[2*t],2)+Ka(e-c[2*t+1],2);const l=r[t];let h=l;do{let r=u[h];const l=Ka(n-c[2*r],2)+Ka(e-c[2*r+1],2);if(l9999?"+"+au(t,6):au(t,4)}(t.getUTCFullYear())+"-"+au(t.getUTCMonth()+1,2)+"-"+au(t.getUTCDate(),2)+(i?"T"+au(n,2)+":"+au(e,2)+":"+au(r,2)+"."+au(i,3)+"Z":r?"T"+au(n,2)+":"+au(e,2)+":"+au(r,2)+"Z":e||n?"T"+au(n,2)+":"+au(e,2)+"Z":"")}function cu(t){var n=new RegExp('["'+t+"\n\r]"),e=t.charCodeAt(0);function r(t,n){var r,i=[],o=t.length,a=0,u=0,c=o<=0,f=!1;function s(){if(c)return ru;if(f)return f=!1,eu;var n,r,i=a;if(34===t.charCodeAt(i)){for(;a++=o?c=!0:10===(r=t.charCodeAt(a++))?f=!0:13===r&&(f=!0,10===t.charCodeAt(a)&&++a),t.slice(i+1,n-1).replace(/""/g,'"')}for(;aNu(n,e).then((n=>(new DOMParser).parseFromString(n,t)))}var Ru=qu("application/xml"),Fu=qu("text/html"),Ou=qu("image/svg+xml");function Iu(t,n,e,r){if(isNaN(n)||isNaN(e))return t;var i,o,a,u,c,f,s,l,h,d=t._root,p={data:r},g=t._x0,y=t._y0,v=t._x1,_=t._y1;if(!d)return t._root=p,t;for(;d.length;)if((f=n>=(o=(g+v)/2))?g=o:v=o,(s=e>=(a=(y+_)/2))?y=a:_=a,i=d,!(d=d[l=s<<1|f]))return i[l]=p,t;if(u=+t._x.call(null,d.data),c=+t._y.call(null,d.data),n===u&&e===c)return p.next=d,i?i[l]=p:t._root=p,t;do{i=i?i[l]=new Array(4):t._root=new Array(4),(f=n>=(o=(g+v)/2))?g=o:v=o,(s=e>=(a=(y+_)/2))?y=a:_=a}while((l=s<<1|f)==(h=(c>=a)<<1|u>=o));return i[h]=d,i[l]=p,t}function Uu(t,n,e,r,i){this.node=t,this.x0=n,this.y0=e,this.x1=r,this.y1=i}function Bu(t){return t[0]}function Yu(t){return t[1]}function Lu(t,n,e){var r=new ju(null==n?Bu:n,null==e?Yu:e,NaN,NaN,NaN,NaN);return null==t?r:r.addAll(t)}function ju(t,n,e,r,i,o){this._x=t,this._y=n,this._x0=e,this._y0=r,this._x1=i,this._y1=o,this._root=void 0}function Hu(t){for(var n={data:t.data},e=n;t=t.next;)e=e.next={data:t.data};return n}var Xu=Lu.prototype=ju.prototype;function Gu(t){return function(){return t}}function Vu(t){return 1e-6*(t()-.5)}function $u(t){return t.x+t.vx}function Wu(t){return t.y+t.vy}function Zu(t){return t.index}function Ku(t,n){var e=t.get(n);if(!e)throw new Error("node not found: "+n);return e}Xu.copy=function(){var t,n,e=new ju(this._x,this._y,this._x0,this._y0,this._x1,this._y1),r=this._root;if(!r)return e;if(!r.length)return e._root=Hu(r),e;for(t=[{source:r,target:e._root=new Array(4)}];r=t.pop();)for(var i=0;i<4;++i)(n=r.source[i])&&(n.length?t.push({source:n,target:r.target[i]=new Array(4)}):r.target[i]=Hu(n));return e},Xu.add=function(t){const n=+this._x.call(null,t),e=+this._y.call(null,t);return Iu(this.cover(n,e),n,e,t)},Xu.addAll=function(t){var n,e,r,i,o=t.length,a=new Array(o),u=new Array(o),c=1/0,f=1/0,s=-1/0,l=-1/0;for(e=0;es&&(s=r),il&&(l=i));if(c>s||f>l)return this;for(this.cover(c,f).cover(s,l),e=0;et||t>=i||r>n||n>=o;)switch(u=(nh||(o=c.y0)>d||(a=c.x1)=v)<<1|t>=y)&&(c=p[p.length-1],p[p.length-1]=p[p.length-1-f],p[p.length-1-f]=c)}else{var _=t-+this._x.call(null,g.data),b=n-+this._y.call(null,g.data),m=_*_+b*b;if(m=(u=(p+y)/2))?p=u:y=u,(s=a>=(c=(g+v)/2))?g=c:v=c,n=d,!(d=d[l=s<<1|f]))return this;if(!d.length)break;(n[l+1&3]||n[l+2&3]||n[l+3&3])&&(e=n,h=l)}for(;d.data!==t;)if(r=d,!(d=d.next))return this;return(i=d.next)&&delete d.next,r?(i?r.next=i:delete r.next,this):n?(i?n[l]=i:delete n[l],(d=n[0]||n[1]||n[2]||n[3])&&d===(n[3]||n[2]||n[1]||n[0])&&!d.length&&(e?e[h]=d:this._root=d),this):(this._root=i,this)},Xu.removeAll=function(t){for(var n=0,e=t.length;n1?r[0]+r.slice(2):r,+t.slice(e+1)]}function rc(t){return(t=ec(Math.abs(t)))?t[1]:NaN}var ic,oc=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function ac(t){if(!(n=oc.exec(t)))throw new Error("invalid format: "+t);var n;return new uc({fill:n[1],align:n[2],sign:n[3],symbol:n[4],zero:n[5],width:n[6],comma:n[7],precision:n[8]&&n[8].slice(1),trim:n[9],type:n[10]})}function uc(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}function cc(t,n){var e=ec(t,n);if(!e)return t+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}ac.prototype=uc.prototype,uc.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};var fc={"%":(t,n)=>(100*t).toFixed(n),b:t=>Math.round(t).toString(2),c:t=>t+"",d:function(t){return Math.abs(t=Math.round(t))>=1e21?t.toLocaleString("en").replace(/,/g,""):t.toString(10)},e:(t,n)=>t.toExponential(n),f:(t,n)=>t.toFixed(n),g:(t,n)=>t.toPrecision(n),o:t=>Math.round(t).toString(8),p:(t,n)=>cc(100*t,n),r:cc,s:function(t,n){var e=ec(t,n);if(!e)return t+"";var r=e[0],i=e[1],o=i-(ic=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,a=r.length;return o===a?r:o>a?r+new Array(o-a+1).join("0"):o>0?r.slice(0,o)+"."+r.slice(o):"0."+new Array(1-o).join("0")+ec(t,Math.max(0,n+o-1))[0]},X:t=>Math.round(t).toString(16).toUpperCase(),x:t=>Math.round(t).toString(16)};function sc(t){return t}var lc,hc=Array.prototype.map,dc=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];function pc(t){var n,e,r=void 0===t.grouping||void 0===t.thousands?sc:(n=hc.call(t.grouping,Number),e=t.thousands+"",function(t,r){for(var i=t.length,o=[],a=0,u=n[0],c=0;i>0&&u>0&&(c+u+1>r&&(u=Math.max(1,r-c)),o.push(t.substring(i-=u,i+u)),!((c+=u+1)>r));)u=n[a=(a+1)%n.length];return o.reverse().join(e)}),i=void 0===t.currency?"":t.currency[0]+"",o=void 0===t.currency?"":t.currency[1]+"",a=void 0===t.decimal?".":t.decimal+"",u=void 0===t.numerals?sc:function(t){return function(n){return n.replace(/[0-9]/g,(function(n){return t[+n]}))}}(hc.call(t.numerals,String)),c=void 0===t.percent?"%":t.percent+"",f=void 0===t.minus?"−":t.minus+"",s=void 0===t.nan?"NaN":t.nan+"";function l(t){var n=(t=ac(t)).fill,e=t.align,l=t.sign,h=t.symbol,d=t.zero,p=t.width,g=t.comma,y=t.precision,v=t.trim,_=t.type;"n"===_?(g=!0,_="g"):fc[_]||(void 0===y&&(y=12),v=!0,_="g"),(d||"0"===n&&"="===e)&&(d=!0,n="0",e="=");var b="$"===h?i:"#"===h&&/[boxX]/.test(_)?"0"+_.toLowerCase():"",m="$"===h?o:/[%p]/.test(_)?c:"",x=fc[_],w=/[defgprs%]/.test(_);function M(t){var i,o,c,h=b,M=m;if("c"===_)M=x(t)+M,t="";else{var A=(t=+t)<0||1/t<0;if(t=isNaN(t)?s:x(Math.abs(t),y),v&&(t=function(t){t:for(var n,e=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(n+1):t}(t)),A&&0==+t&&"+"!==l&&(A=!1),h=(A?"("===l?l:f:"-"===l||"("===l?"":l)+h,M=("s"===_?dc[8+ic/3]:"")+M+(A&&"("===l?")":""),w)for(i=-1,o=t.length;++i(c=t.charCodeAt(i))||c>57){M=(46===c?a+t.slice(i+1):t.slice(i))+M,t=t.slice(0,i);break}}g&&!d&&(t=r(t,1/0));var T=h.length+t.length+M.length,S=T>1)+h+t+M+S.slice(T);break;default:t=S+h+t+M}return u(t)}return y=void 0===y?6:/[gprs]/.test(_)?Math.max(1,Math.min(21,y)):Math.max(0,Math.min(20,y)),M.toString=function(){return t+""},M}return{format:l,formatPrefix:function(t,n){var e=l(((t=ac(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor(rc(n)/3))),i=Math.pow(10,-r),o=dc[8+r/3];return function(t){return e(i*t)+o}}}}function gc(n){return lc=pc(n),t.format=lc.format,t.formatPrefix=lc.formatPrefix,lc}function yc(t){return Math.max(0,-rc(Math.abs(t)))}function vc(t,n){return Math.max(0,3*Math.max(-8,Math.min(8,Math.floor(rc(n)/3)))-rc(Math.abs(t)))}function _c(t,n){return t=Math.abs(t),n=Math.abs(n)-t,Math.max(0,rc(n)-rc(t))+1}t.format=void 0,t.formatPrefix=void 0,gc({thousands:",",grouping:[3],currency:["$",""]});var bc=1e-6,mc=1e-12,xc=Math.PI,wc=xc/2,Mc=xc/4,Ac=2*xc,Tc=180/xc,Sc=xc/180,Ec=Math.abs,kc=Math.atan,Nc=Math.atan2,Cc=Math.cos,Pc=Math.ceil,zc=Math.exp,Dc=Math.hypot,qc=Math.log,Rc=Math.pow,Fc=Math.sin,Oc=Math.sign||function(t){return t>0?1:t<0?-1:0},Ic=Math.sqrt,Uc=Math.tan;function Bc(t){return t>1?0:t<-1?xc:Math.acos(t)}function Yc(t){return t>1?wc:t<-1?-wc:Math.asin(t)}function Lc(t){return(t=Fc(t/2))*t}function jc(){}function Hc(t,n){t&&Gc.hasOwnProperty(t.type)&&Gc[t.type](t,n)}var Xc={Feature:function(t,n){Hc(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;++r=0?1:-1,i=r*e,o=Cc(n=(n*=Sc)/2+Mc),a=Fc(n),u=tf*a,c=Jc*o+u*Cc(i),f=u*r*Fc(i);df.add(Nc(f,c)),Qc=t,Jc=o,tf=a}function mf(t){return[Nc(t[1],t[0]),Yc(t[2])]}function xf(t){var n=t[0],e=t[1],r=Cc(e);return[r*Cc(n),r*Fc(n),Fc(e)]}function wf(t,n){return t[0]*n[0]+t[1]*n[1]+t[2]*n[2]}function Mf(t,n){return[t[1]*n[2]-t[2]*n[1],t[2]*n[0]-t[0]*n[2],t[0]*n[1]-t[1]*n[0]]}function Af(t,n){t[0]+=n[0],t[1]+=n[1],t[2]+=n[2]}function Tf(t,n){return[t[0]*n,t[1]*n,t[2]*n]}function Sf(t){var n=Ic(t[0]*t[0]+t[1]*t[1]+t[2]*t[2]);t[0]/=n,t[1]/=n,t[2]/=n}var Ef,kf,Nf,Cf,Pf,zf,Df,qf,Rf,Ff,Of,If,Uf,Bf,Yf,Lf,jf={point:Hf,lineStart:Gf,lineEnd:Vf,polygonStart:function(){jf.point=$f,jf.lineStart=Wf,jf.lineEnd=Zf,sf=new g,gf.polygonStart()},polygonEnd:function(){gf.polygonEnd(),jf.point=Hf,jf.lineStart=Gf,jf.lineEnd=Vf,df<0?(nf=-(rf=180),ef=-(of=90)):sf>bc?of=90:sf<-1e-6&&(ef=-90),hf[0]=nf,hf[1]=rf},sphere:function(){nf=-(rf=180),ef=-(of=90)}};function Hf(t,n){lf.push(hf=[nf=t,rf=t]),nof&&(of=n)}function Xf(t,n){var e=xf([t*Sc,n*Sc]);if(ff){var r=Mf(ff,e),i=Mf([r[1],-r[0],0],r);Sf(i),i=mf(i);var o,a=t-af,u=a>0?1:-1,c=i[0]*Tc*u,f=Ec(a)>180;f^(u*afof&&(of=o):f^(u*af<(c=(c+360)%360-180)&&cof&&(of=n)),f?tKf(nf,rf)&&(rf=t):Kf(t,rf)>Kf(nf,rf)&&(nf=t):rf>=nf?(trf&&(rf=t)):t>af?Kf(nf,t)>Kf(nf,rf)&&(rf=t):Kf(t,rf)>Kf(nf,rf)&&(nf=t)}else lf.push(hf=[nf=t,rf=t]);nof&&(of=n),ff=e,af=t}function Gf(){jf.point=Xf}function Vf(){hf[0]=nf,hf[1]=rf,jf.point=Hf,ff=null}function $f(t,n){if(ff){var e=t-af;sf.add(Ec(e)>180?e+(e>0?360:-360):e)}else uf=t,cf=n;gf.point(t,n),Xf(t,n)}function Wf(){gf.lineStart()}function Zf(){$f(uf,cf),gf.lineEnd(),Ec(sf)>bc&&(nf=-(rf=180)),hf[0]=nf,hf[1]=rf,ff=null}function Kf(t,n){return(n-=t)<0?n+360:n}function Qf(t,n){return t[0]-n[0]}function Jf(t,n){return t[0]<=t[1]?t[0]<=n&&n<=t[1]:nxc?t+Math.round(-t/Ac)*Ac:t,n]}function ps(t,n,e){return(t%=Ac)?n||e?hs(ys(t),vs(n,e)):ys(t):n||e?vs(n,e):ds}function gs(t){return function(n,e){return[(n+=t)>xc?n-Ac:n<-xc?n+Ac:n,e]}}function ys(t){var n=gs(t);return n.invert=gs(-t),n}function vs(t,n){var e=Cc(t),r=Fc(t),i=Cc(n),o=Fc(n);function a(t,n){var a=Cc(n),u=Cc(t)*a,c=Fc(t)*a,f=Fc(n),s=f*e+u*r;return[Nc(c*i-s*o,u*e-f*r),Yc(s*i+c*o)]}return a.invert=function(t,n){var a=Cc(n),u=Cc(t)*a,c=Fc(t)*a,f=Fc(n),s=f*i-c*o;return[Nc(c*i+f*o,u*e+s*r),Yc(s*e-u*r)]},a}function _s(t){function n(n){return(n=t(n[0]*Sc,n[1]*Sc))[0]*=Tc,n[1]*=Tc,n}return t=ps(t[0]*Sc,t[1]*Sc,t.length>2?t[2]*Sc:0),n.invert=function(n){return(n=t.invert(n[0]*Sc,n[1]*Sc))[0]*=Tc,n[1]*=Tc,n},n}function bs(t,n,e,r,i,o){if(e){var a=Cc(n),u=Fc(n),c=r*e;null==i?(i=n+r*Ac,o=n-c/2):(i=ms(a,i),o=ms(a,o),(r>0?io)&&(i+=r*Ac));for(var f,s=i;r>0?s>o:s1&&n.push(n.pop().concat(n.shift()))},result:function(){var e=n;return n=[],t=null,e}}}function ws(t,n){return Ec(t[0]-n[0])=0;--o)i.point((s=f[o])[0],s[1]);else r(h.x,h.p.x,-1,i);h=h.p}f=(h=h.o).z,d=!d}while(!h.v);i.lineEnd()}}}function Ts(t){if(n=t.length){for(var n,e,r=0,i=t[0];++r=0?1:-1,E=S*T,k=E>xc,N=v*M;if(c.add(Nc(N*S*Fc(E),_*A+N*Cc(E))),a+=k?T+S*Ac:T,k^p>=e^x>=e){var C=Mf(xf(d),xf(m));Sf(C);var P=Mf(o,C);Sf(P);var z=(k^T>=0?-1:1)*Yc(P[2]);(r>z||r===z&&(C[0]||C[1]))&&(u+=k^T>=0?1:-1)}}return(a<-1e-6||a0){for(l||(i.polygonStart(),l=!0),i.lineStart(),t=0;t1&&2&c&&h.push(h.pop().concat(h.shift())),a.push(h.filter(Ns))}return h}}function Ns(t){return t.length>1}function Cs(t,n){return((t=t.x)[0]<0?t[1]-wc-bc:wc-t[1])-((n=n.x)[0]<0?n[1]-wc-bc:wc-n[1])}ds.invert=ds;var Ps=ks((function(){return!0}),(function(t){var n,e=NaN,r=NaN,i=NaN;return{lineStart:function(){t.lineStart(),n=1},point:function(o,a){var u=o>0?xc:-xc,c=Ec(o-e);Ec(c-xc)0?wc:-wc),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(u,r),t.point(o,r),n=0):i!==u&&c>=xc&&(Ec(e-i)bc?kc((Fc(n)*(o=Cc(r))*Fc(e)-Fc(r)*(i=Cc(n))*Fc(t))/(i*o*a)):(n+r)/2}(e,r,o,a),t.point(i,r),t.lineEnd(),t.lineStart(),t.point(u,r),n=0),t.point(e=o,r=a),i=u},lineEnd:function(){t.lineEnd(),e=r=NaN},clean:function(){return 2-n}}}),(function(t,n,e,r){var i;if(null==t)i=e*wc,r.point(-xc,i),r.point(0,i),r.point(xc,i),r.point(xc,0),r.point(xc,-i),r.point(0,-i),r.point(-xc,-i),r.point(-xc,0),r.point(-xc,i);else if(Ec(t[0]-n[0])>bc){var o=t[0]0,i=Ec(n)>bc;function o(t,e){return Cc(t)*Cc(e)>n}function a(t,e,r){var i=[1,0,0],o=Mf(xf(t),xf(e)),a=wf(o,o),u=o[0],c=a-u*u;if(!c)return!r&&t;var f=n*a/c,s=-n*u/c,l=Mf(i,o),h=Tf(i,f);Af(h,Tf(o,s));var d=l,p=wf(h,d),g=wf(d,d),y=p*p-g*(wf(h,h)-1);if(!(y<0)){var v=Ic(y),_=Tf(d,(-p-v)/g);if(Af(_,h),_=mf(_),!r)return _;var b,m=t[0],x=e[0],w=t[1],M=e[1];x0^_[1]<(Ec(_[0]-m)xc^(m<=_[0]&&_[0]<=x)){var S=Tf(d,(-p+v)/g);return Af(S,h),[_,mf(S)]}}}function u(n,e){var i=r?t:xc-t,o=0;return n<-i?o|=1:n>i&&(o|=2),e<-i?o|=4:e>i&&(o|=8),o}return ks(o,(function(t){var n,e,c,f,s;return{lineStart:function(){f=c=!1,s=1},point:function(l,h){var d,p=[l,h],g=o(l,h),y=r?g?0:u(l,h):g?u(l+(l<0?xc:-xc),h):0;if(!n&&(f=c=g)&&t.lineStart(),g!==c&&(!(d=a(n,p))||ws(n,d)||ws(p,d))&&(p[2]=1),g!==c)s=0,g?(t.lineStart(),d=a(p,n),t.point(d[0],d[1])):(d=a(n,p),t.point(d[0],d[1],2),t.lineEnd()),n=d;else if(i&&n&&r^g){var v;y&e||!(v=a(p,n,!0))||(s=0,r?(t.lineStart(),t.point(v[0][0],v[0][1]),t.point(v[1][0],v[1][1]),t.lineEnd()):(t.point(v[1][0],v[1][1]),t.lineEnd(),t.lineStart(),t.point(v[0][0],v[0][1],3)))}!g||n&&ws(n,p)||t.point(p[0],p[1]),n=p,c=g,e=y},lineEnd:function(){c&&t.lineEnd(),n=null},clean:function(){return s|(f&&c)<<1}}}),(function(n,r,i,o){bs(o,t,e,i,n,r)}),r?[0,-t]:[-xc,t-xc])}var Ds,qs,Rs,Fs,Os=1e9,Is=-Os;function Us(t,n,e,r){function i(i,o){return t<=i&&i<=e&&n<=o&&o<=r}function o(i,o,u,f){var s=0,l=0;if(null==i||(s=a(i,u))!==(l=a(o,u))||c(i,o)<0^u>0)do{f.point(0===s||3===s?t:e,s>1?r:n)}while((s=(s+u+4)%4)!==l);else f.point(o[0],o[1])}function a(r,i){return Ec(r[0]-t)0?0:3:Ec(r[0]-e)0?2:1:Ec(r[1]-n)0?1:0:i>0?3:2}function u(t,n){return c(t.x,n.x)}function c(t,n){var e=a(t,1),r=a(n,1);return e!==r?e-r:0===e?n[1]-t[1]:1===e?t[0]-n[0]:2===e?t[1]-n[1]:n[0]-t[0]}return function(a){var c,f,s,l,h,d,p,g,y,v,_,b=a,m=xs(),x={point:w,lineStart:function(){x.point=M,f&&f.push(s=[]);v=!0,y=!1,p=g=NaN},lineEnd:function(){c&&(M(l,h),d&&y&&m.rejoin(),c.push(m.result()));x.point=w,y&&b.lineEnd()},polygonStart:function(){b=m,c=[],f=[],_=!0},polygonEnd:function(){var n=function(){for(var n=0,e=0,i=f.length;er&&(h-o)*(r-a)>(d-a)*(t-o)&&++n:d<=r&&(h-o)*(r-a)<(d-a)*(t-o)&&--n;return n}(),e=_&&n,i=(c=V(c)).length;(e||i)&&(a.polygonStart(),e&&(a.lineStart(),o(null,null,1,a),a.lineEnd()),i&&As(c,u,n,o,a),a.polygonEnd());b=a,c=f=s=null}};function w(t,n){i(t,n)&&b.point(t,n)}function M(o,a){var u=i(o,a);if(f&&s.push([o,a]),v)l=o,h=a,d=u,v=!1,u&&(b.lineStart(),b.point(o,a));else if(u&&y)b.point(o,a);else{var c=[p=Math.max(Is,Math.min(Os,p)),g=Math.max(Is,Math.min(Os,g))],m=[o=Math.max(Is,Math.min(Os,o)),a=Math.max(Is,Math.min(Os,a))];!function(t,n,e,r,i,o){var a,u=t[0],c=t[1],f=0,s=1,l=n[0]-u,h=n[1]-c;if(a=e-u,l||!(a>0)){if(a/=l,l<0){if(a0){if(a>s)return;a>f&&(f=a)}if(a=i-u,l||!(a<0)){if(a/=l,l<0){if(a>s)return;a>f&&(f=a)}else if(l>0){if(a0)){if(a/=h,h<0){if(a0){if(a>s)return;a>f&&(f=a)}if(a=o-c,h||!(a<0)){if(a/=h,h<0){if(a>s)return;a>f&&(f=a)}else if(h>0){if(a0&&(t[0]=u+f*l,t[1]=c+f*h),s<1&&(n[0]=u+s*l,n[1]=c+s*h),!0}}}}}(c,m,t,n,e,r)?u&&(b.lineStart(),b.point(o,a),_=!1):(y||(b.lineStart(),b.point(c[0],c[1])),b.point(m[0],m[1]),u||b.lineEnd(),_=!1)}p=o,g=a,y=u}return x}}var Bs={sphere:jc,point:jc,lineStart:function(){Bs.point=Ls,Bs.lineEnd=Ys},lineEnd:jc,polygonStart:jc,polygonEnd:jc};function Ys(){Bs.point=Bs.lineEnd=jc}function Ls(t,n){qs=t*=Sc,Rs=Fc(n*=Sc),Fs=Cc(n),Bs.point=js}function js(t,n){t*=Sc;var e=Fc(n*=Sc),r=Cc(n),i=Ec(t-qs),o=Cc(i),a=r*Fc(i),u=Fs*e-Rs*r*o,c=Rs*e+Fs*r*o;Ds.add(Nc(Ic(a*a+u*u),c)),qs=t,Rs=e,Fs=r}function Hs(t){return Ds=new g,Wc(t,Bs),+Ds}var Xs=[null,null],Gs={type:"LineString",coordinates:Xs};function Vs(t,n){return Xs[0]=t,Xs[1]=n,Hs(Gs)}var $s={Feature:function(t,n){return Zs(t.geometry,n)},FeatureCollection:function(t,n){for(var e=t.features,r=-1,i=e.length;++r0&&(i=Vs(t[o],t[o-1]))>0&&e<=i&&r<=i&&(e+r-i)*(1-Math.pow((e-r)/i,2))bc})).map(c)).concat(Z(Pc(o/d)*d,i,d).filter((function(t){return Ec(t%g)>bc})).map(f))}return v.lines=function(){return _().map((function(t){return{type:"LineString",coordinates:t}}))},v.outline=function(){return{type:"Polygon",coordinates:[s(r).concat(l(a).slice(1),s(e).reverse().slice(1),l(u).reverse().slice(1))]}},v.extent=function(t){return arguments.length?v.extentMajor(t).extentMinor(t):v.extentMinor()},v.extentMajor=function(t){return arguments.length?(r=+t[0][0],e=+t[1][0],u=+t[0][1],a=+t[1][1],r>e&&(t=r,r=e,e=t),u>a&&(t=u,u=a,a=t),v.precision(y)):[[r,u],[e,a]]},v.extentMinor=function(e){return arguments.length?(n=+e[0][0],t=+e[1][0],o=+e[0][1],i=+e[1][1],n>t&&(e=n,n=t,t=e),o>i&&(e=o,o=i,i=e),v.precision(y)):[[n,o],[t,i]]},v.step=function(t){return arguments.length?v.stepMajor(t).stepMinor(t):v.stepMinor()},v.stepMajor=function(t){return arguments.length?(p=+t[0],g=+t[1],v):[p,g]},v.stepMinor=function(t){return arguments.length?(h=+t[0],d=+t[1],v):[h,d]},v.precision=function(h){return arguments.length?(y=+h,c=el(o,i,90),f=rl(n,t,y),s=el(u,a,90),l=rl(r,e,y),v):y},v.extentMajor([[-180,-89.999999],[180,89.999999]]).extentMinor([[-180,-80.000001],[180,80.000001]])}var ol,al,ul,cl,fl=t=>t,sl=new g,ll=new g,hl={point:jc,lineStart:jc,lineEnd:jc,polygonStart:function(){hl.lineStart=dl,hl.lineEnd=yl},polygonEnd:function(){hl.lineStart=hl.lineEnd=hl.point=jc,sl.add(Ec(ll)),ll=new g},result:function(){var t=sl/2;return sl=new g,t}};function dl(){hl.point=pl}function pl(t,n){hl.point=gl,ol=ul=t,al=cl=n}function gl(t,n){ll.add(cl*t-ul*n),ul=t,cl=n}function yl(){gl(ol,al)}var vl=1/0,_l=vl,bl=-vl,ml=bl,xl={point:function(t,n){tbl&&(bl=t);n<_l&&(_l=n);n>ml&&(ml=n)},lineStart:jc,lineEnd:jc,polygonStart:jc,polygonEnd:jc,result:function(){var t=[[vl,_l],[bl,ml]];return bl=ml=-(_l=vl=1/0),t}};var wl,Ml,Al,Tl,Sl=0,El=0,kl=0,Nl=0,Cl=0,Pl=0,zl=0,Dl=0,ql=0,Rl={point:Fl,lineStart:Ol,lineEnd:Bl,polygonStart:function(){Rl.lineStart=Yl,Rl.lineEnd=Ll},polygonEnd:function(){Rl.point=Fl,Rl.lineStart=Ol,Rl.lineEnd=Bl},result:function(){var t=ql?[zl/ql,Dl/ql]:Pl?[Nl/Pl,Cl/Pl]:kl?[Sl/kl,El/kl]:[NaN,NaN];return Sl=El=kl=Nl=Cl=Pl=zl=Dl=ql=0,t}};function Fl(t,n){Sl+=t,El+=n,++kl}function Ol(){Rl.point=Il}function Il(t,n){Rl.point=Ul,Fl(Al=t,Tl=n)}function Ul(t,n){var e=t-Al,r=n-Tl,i=Ic(e*e+r*r);Nl+=i*(Al+t)/2,Cl+=i*(Tl+n)/2,Pl+=i,Fl(Al=t,Tl=n)}function Bl(){Rl.point=Fl}function Yl(){Rl.point=jl}function Ll(){Hl(wl,Ml)}function jl(t,n){Rl.point=Hl,Fl(wl=Al=t,Ml=Tl=n)}function Hl(t,n){var e=t-Al,r=n-Tl,i=Ic(e*e+r*r);Nl+=i*(Al+t)/2,Cl+=i*(Tl+n)/2,Pl+=i,zl+=(i=Tl*t-Al*n)*(Al+t),Dl+=i*(Tl+n),ql+=3*i,Fl(Al=t,Tl=n)}function Xl(t){this._context=t}Xl.prototype={_radius:4.5,pointRadius:function(t){return this._radius=t,this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._context.closePath(),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._context.moveTo(t,n),this._point=1;break;case 1:this._context.lineTo(t,n);break;default:this._context.moveTo(t+this._radius,n),this._context.arc(t,n,this._radius,0,Ac)}},result:jc};var Gl,Vl,$l,Wl,Zl,Kl=new g,Ql={point:jc,lineStart:function(){Ql.point=Jl},lineEnd:function(){Gl&&th(Vl,$l),Ql.point=jc},polygonStart:function(){Gl=!0},polygonEnd:function(){Gl=null},result:function(){var t=+Kl;return Kl=new g,t}};function Jl(t,n){Ql.point=th,Vl=Wl=t,$l=Zl=n}function th(t,n){Wl-=t,Zl-=n,Kl.add(Ic(Wl*Wl+Zl*Zl)),Wl=t,Zl=n}function nh(){this._string=[]}function eh(t){return"m0,"+t+"a"+t+","+t+" 0 1,1 0,"+-2*t+"a"+t+","+t+" 0 1,1 0,"+2*t+"z"}function rh(t){return function(n){var e=new ih;for(var r in t)e[r]=t[r];return e.stream=n,e}}function ih(){}function oh(t,n,e){var r=t.clipExtent&&t.clipExtent();return t.scale(150).translate([0,0]),null!=r&&t.clipExtent(null),Wc(e,t.stream(xl)),n(xl.result()),null!=r&&t.clipExtent(r),t}function ah(t,n,e){return oh(t,(function(e){var r=n[1][0]-n[0][0],i=n[1][1]-n[0][1],o=Math.min(r/(e[1][0]-e[0][0]),i/(e[1][1]-e[0][1])),a=+n[0][0]+(r-o*(e[1][0]+e[0][0]))/2,u=+n[0][1]+(i-o*(e[1][1]+e[0][1]))/2;t.scale(150*o).translate([a,u])}),e)}function uh(t,n,e){return ah(t,[[0,0],n],e)}function ch(t,n,e){return oh(t,(function(e){var r=+n,i=r/(e[1][0]-e[0][0]),o=(r-i*(e[1][0]+e[0][0]))/2,a=-i*e[0][1];t.scale(150*i).translate([o,a])}),e)}function fh(t,n,e){return oh(t,(function(e){var r=+n,i=r/(e[1][1]-e[0][1]),o=-i*e[0][0],a=(r-i*(e[1][1]+e[0][1]))/2;t.scale(150*i).translate([o,a])}),e)}nh.prototype={_radius:4.5,_circle:eh(4.5),pointRadius:function(t){return(t=+t)!==this._radius&&(this._radius=t,this._circle=null),this},polygonStart:function(){this._line=0},polygonEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){0===this._line&&this._string.push("Z"),this._point=NaN},point:function(t,n){switch(this._point){case 0:this._string.push("M",t,",",n),this._point=1;break;case 1:this._string.push("L",t,",",n);break;default:null==this._circle&&(this._circle=eh(this._radius)),this._string.push("M",t,",",n,this._circle)}},result:function(){if(this._string.length){var t=this._string.join("");return this._string=[],t}return null}},ih.prototype={constructor:ih,point:function(t,n){this.stream.point(t,n)},sphere:function(){this.stream.sphere()},lineStart:function(){this.stream.lineStart()},lineEnd:function(){this.stream.lineEnd()},polygonStart:function(){this.stream.polygonStart()},polygonEnd:function(){this.stream.polygonEnd()}};var sh=Cc(30*Sc);function lh(t,n){return+n?function(t,n){function e(r,i,o,a,u,c,f,s,l,h,d,p,g,y){var v=f-r,_=s-i,b=v*v+_*_;if(b>4*n&&g--){var m=a+h,x=u+d,w=c+p,M=Ic(m*m+x*x+w*w),A=Yc(w/=M),T=Ec(Ec(w)-1)n||Ec((v*N+_*C)/b-.5)>.3||a*h+u*d+c*p2?t[2]%360*Sc:0,N()):[y*Tc,v*Tc,_*Tc]},E.angle=function(t){return arguments.length?(b=t%360*Sc,N()):b*Tc},E.reflectX=function(t){return arguments.length?(m=t?-1:1,N()):m<0},E.reflectY=function(t){return arguments.length?(x=t?-1:1,N()):x<0},E.precision=function(t){return arguments.length?(a=lh(u,S=t*t),C()):Ic(S)},E.fitExtent=function(t,n){return ah(E,t,n)},E.fitSize=function(t,n){return uh(E,t,n)},E.fitWidth=function(t,n){return ch(E,t,n)},E.fitHeight=function(t,n){return fh(E,t,n)},function(){return n=t.apply(this,arguments),E.invert=n.invert&&k,N()}}function yh(t){var n=0,e=xc/3,r=gh(t),i=r(n,e);return i.parallels=function(t){return arguments.length?r(n=t[0]*Sc,e=t[1]*Sc):[n*Tc,e*Tc]},i}function vh(t,n){var e=Fc(t),r=(e+Fc(n))/2;if(Ec(r)0?n<-wc+bc&&(n=-wc+bc):n>wc-bc&&(n=wc-bc);var e=i/Rc(Sh(n),r);return[e*Fc(r*t),i-e*Cc(r*t)]}return o.invert=function(t,n){var e=i-n,o=Oc(r)*Ic(t*t+e*e),a=Nc(t,Ec(e))*Oc(e);return e*r<0&&(a-=xc*Oc(t)*Oc(e)),[a/r,2*kc(Rc(i/o,1/r))-wc]},o}function kh(t,n){return[t,n]}function Nh(t,n){var e=Cc(t),r=t===n?Fc(t):(e-Cc(n))/(n-t),i=e/r+t;if(Ec(r)=0;)n+=e[r].value;else n=1;t.value=n}function Xh(t,n){t instanceof Map?(t=[void 0,t],void 0===n&&(n=Vh)):void 0===n&&(n=Gh);for(var e,r,i,o,a,u=new Zh(t),c=[u];e=c.pop();)if((i=n(e.data))&&(a=(i=Array.from(i)).length))for(e.children=i,o=a-1;o>=0;--o)c.push(r=i[o]=new Zh(i[o])),r.parent=e,r.depth=e.depth+1;return u.eachBefore(Wh)}function Gh(t){return t.children}function Vh(t){return Array.isArray(t)?t[1]:null}function $h(t){void 0!==t.data.value&&(t.value=t.data.value),t.data=t.data.data}function Wh(t){var n=0;do{t.height=n}while((t=t.parent)&&t.height<++n)}function Zh(t){this.data=t,this.depth=this.height=0,this.parent=null}function Kh(t){for(var n,e,r=0,i=(t=function(t){for(var n,e,r=t.length;r;)e=Math.random()*r--|0,n=t[r],t[r]=t[e],t[e]=n;return t}(Array.from(t))).length,o=[];r0&&e*e>r*r+i*i}function nd(t,n){for(var e=0;e(a*=a)?(r=(f+a-i)/(2*f),o=Math.sqrt(Math.max(0,a/f-r*r)),e.x=t.x-r*u-o*c,e.y=t.y-r*c+o*u):(r=(f+i-a)/(2*f),o=Math.sqrt(Math.max(0,i/f-r*r)),e.x=n.x+r*u-o*c,e.y=n.y+r*c+o*u)):(e.x=n.x+e.r,e.y=n.y)}function ad(t,n){var e=t.r+n.r-1e-6,r=n.x-t.x,i=n.y-t.y;return e>0&&e*e>r*r+i*i}function ud(t){var n=t._,e=t.next._,r=n.r+e.r,i=(n.x*e.r+e.x*n.r)/r,o=(n.y*e.r+e.y*n.r)/r;return i*i+o*o}function cd(t){this._=t,this.next=null,this.previous=null}function fd(t){if(!(i=(t=function(t){return"object"==typeof t&&"length"in t?t:Array.from(t)}(t)).length))return 0;var n,e,r,i,o,a,u,c,f,s,l;if((n=t[0]).x=0,n.y=0,!(i>1))return n.r;if(e=t[1],n.x=-e.r,e.x=n.r,e.y=0,!(i>2))return n.r+e.r;od(e,n,r=t[2]),n=new cd(n),e=new cd(e),r=new cd(r),n.next=r.previous=e,e.next=n.previous=r,r.next=e.previous=n;t:for(u=3;ubc&&--i>0);return[t/(.8707+(o=r*r)*(o*(o*o*o*(.003971-.001529*o)-.013791)-.131979)),r]},Ih.invert=xh(Yc),Uh.invert=xh((function(t){return 2*kc(t)})),Bh.invert=function(t,n){return[-n,2*kc(zc(t))-wc]},Zh.prototype=Xh.prototype={constructor:Zh,count:function(){return this.eachAfter(Hh)},each:function(t,n){let e=-1;for(const r of this)t.call(n,r,++e,this);return this},eachAfter:function(t,n){for(var e,r,i,o=this,a=[o],u=[],c=-1;o=a.pop();)if(u.push(o),e=o.children)for(r=0,i=e.length;r=0;--r)o.push(e[r]);return this},find:function(t,n){let e=-1;for(const r of this)if(t.call(n,r,++e,this))return r},sum:function(t){return this.eachAfter((function(n){for(var e=+t(n.data)||0,r=n.children,i=r&&r.length;--i>=0;)e+=r[i].value;n.value=e}))},sort:function(t){return this.eachBefore((function(n){n.children&&n.children.sort(t)}))},path:function(t){for(var n=this,e=function(t,n){if(t===n)return t;var e=t.ancestors(),r=n.ancestors(),i=null;t=e.pop(),n=r.pop();for(;t===n;)i=t,t=e.pop(),n=r.pop();return i}(n,t),r=[n];n!==e;)n=n.parent,r.push(n);for(var i=r.length;t!==e;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,n=[t];t=t.parent;)n.push(t);return n},descendants:function(){return Array.from(this)},leaves:function(){var t=[];return this.eachBefore((function(n){n.children||t.push(n)})),t},links:function(){var t=this,n=[];return t.each((function(e){e!==t&&n.push({source:e.parent,target:e})})),n},copy:function(){return Xh(this).eachBefore($h)},[Symbol.iterator]:function*(){var t,n,e,r,i=this,o=[i];do{for(t=o.reverse(),o=[];i=t.pop();)if(yield i,n=i.children)for(e=0,r=n.length;eh&&(h=u),y=s*s*g,(d=Math.max(h/y,y/l))>p){s-=u;break}p=d}v.push(a={value:s,dice:c1?n:1)},e}(Pd);var qd=function t(n){function e(t,e,r,i,o){if((a=t._squarify)&&a.ratio===n)for(var a,u,c,f,s,l=-1,h=a.length,d=t.value;++l1?n:1)},e}(Pd);function Rd(t,n,e){return(n[0]-t[0])*(e[1]-t[1])-(n[1]-t[1])*(e[0]-t[0])}function Fd(t,n){return t[0]-n[0]||t[1]-n[1]}function Od(t){const n=t.length,e=[0,1];let r,i=2;for(r=2;r1&&Rd(t[e[i-2]],t[e[i-1]],t[r])<=0;)--i;e[i++]=r}return e.slice(0,i)}var Id=Math.random,Ud=function t(n){function e(t,e){return t=null==t?0:+t,e=null==e?1:+e,1===arguments.length?(e=t,t=0):e-=t,function(){return n()*e+t}}return e.source=t,e}(Id),Bd=function t(n){function e(t,e){return arguments.length<2&&(e=t,t=0),t=Math.floor(t),e=Math.floor(e)-t,function(){return Math.floor(n()*e+t)}}return e.source=t,e}(Id),Yd=function t(n){function e(t,e){var r,i;return t=null==t?0:+t,e=null==e?1:+e,function(){var o;if(null!=r)o=r,r=null;else do{r=2*n()-1,o=2*n()-1,i=r*r+o*o}while(!i||i>1);return t+e*o*Math.sqrt(-2*Math.log(i)/i)}}return e.source=t,e}(Id),Ld=function t(n){var e=Yd.source(n);function r(){var t=e.apply(this,arguments);return function(){return Math.exp(t())}}return r.source=t,r}(Id),jd=function t(n){function e(t){return(t=+t)<=0?()=>0:function(){for(var e=0,r=t;r>1;--r)e+=n();return e+r*n()}}return e.source=t,e}(Id),Hd=function t(n){var e=jd.source(n);function r(t){if(0==(t=+t))return n;var r=e(t);return function(){return r()/t}}return r.source=t,r}(Id),Xd=function t(n){function e(t){return function(){return-Math.log1p(-n())/t}}return e.source=t,e}(Id),Gd=function t(n){function e(t){if((t=+t)<0)throw new RangeError("invalid alpha");return t=1/-t,function(){return Math.pow(1-n(),t)}}return e.source=t,e}(Id),Vd=function t(n){function e(t){if((t=+t)<0||t>1)throw new RangeError("invalid p");return function(){return Math.floor(n()+t)}}return e.source=t,e}(Id),$d=function t(n){function e(t){if((t=+t)<0||t>1)throw new RangeError("invalid p");return 0===t?()=>1/0:1===t?()=>1:(t=Math.log1p(-t),function(){return 1+Math.floor(Math.log1p(-n())/t)})}return e.source=t,e}(Id),Wd=function t(n){var e=Yd.source(n)();function r(t,r){if((t=+t)<0)throw new RangeError("invalid k");if(0===t)return()=>0;if(r=null==r?1:+r,1===t)return()=>-Math.log1p(-n())*r;var i=(t<1?t+1:t)-1/3,o=1/(3*Math.sqrt(i)),a=t<1?()=>Math.pow(n(),1/t):()=>1;return function(){do{do{var t=e(),u=1+o*t}while(u<=0);u*=u*u;var c=1-n()}while(c>=1-.0331*t*t*t*t&&Math.log(c)>=.5*t*t+i*(1-u+Math.log(u)));return i*u*a()*r}}return r.source=t,r}(Id),Zd=function t(n){var e=Wd.source(n);function r(t,n){var r=e(t),i=e(n);return function(){var t=r();return 0===t?0:t/(t+i())}}return r.source=t,r}(Id),Kd=function t(n){var e=$d.source(n),r=Zd.source(n);function i(t,n){return t=+t,(n=+n)>=1?()=>t:n<=0?()=>0:function(){for(var i=0,o=t,a=n;o*a>16&&o*(1-a)>16;){var u=Math.floor((o+1)*a),c=r(u,o-u+1)();c<=a?(i+=u,o-=u,a=(a-c)/(1-c)):(o=u-1,a/=c)}for(var f=a<.5,s=e(f?a:1-a),l=s(),h=0;l<=o;++h)l+=s();return i+(f?h:o-h)}}return i.source=t,i}(Id),Qd=function t(n){function e(t,e,r){var i;return 0==(t=+t)?i=t=>-Math.log(t):(t=1/t,i=n=>Math.pow(n,t)),e=null==e?0:+e,r=null==r?1:+r,function(){return e+r*i(-Math.log1p(-n()))}}return e.source=t,e}(Id),Jd=function t(n){function e(t,e){return t=null==t?0:+t,e=null==e?1:+e,function(){return t+e*Math.tan(Math.PI*n())}}return e.source=t,e}(Id),tp=function t(n){function e(t,e){return t=null==t?0:+t,e=null==e?1:+e,function(){var r=n();return t+e*Math.log(r/(1-r))}}return e.source=t,e}(Id),np=function t(n){var e=Wd.source(n),r=Kd.source(n);function i(t){return function(){for(var i=0,o=t;o>16;){var a=Math.floor(.875*o),u=e(a)();if(u>o)return i+r(a-1,o/u)();i+=a,o-=u}for(var c=-Math.log1p(-n()),f=0;c<=o;++f)c-=Math.log1p(-n());return i+f}}return i.source=t,i}(Id);const ep=1/4294967296;function rp(t,n){switch(arguments.length){case 0:break;case 1:this.range(t);break;default:this.range(n).domain(t)}return this}function ip(t,n){switch(arguments.length){case 0:break;case 1:"function"==typeof t?this.interpolator(t):this.range(t);break;default:this.domain(t),"function"==typeof n?this.interpolator(n):this.range(n)}return this}const op=Symbol("implicit");function ap(){var t=new Map,n=[],e=[],r=op;function i(i){var o=i+"",a=t.get(o);if(!a){if(r!==op)return r;t.set(o,a=n.push(i))}return e[(a-1)%e.length]}return i.domain=function(e){if(!arguments.length)return n.slice();n=[],t=new Map;for(const r of e){const e=r+"";t.has(e)||t.set(e,n.push(r))}return i},i.range=function(t){return arguments.length?(e=Array.from(t),i):e.slice()},i.unknown=function(t){return arguments.length?(r=t,i):r},i.copy=function(){return ap(n,e).unknown(r)},rp.apply(i,arguments),i}function up(){var t,n,e=ap().unknown(void 0),r=e.domain,i=e.range,o=0,a=1,u=!1,c=0,f=0,s=.5;function l(){var e=r().length,l=an&&(e=t,t=n,n=e),function(e){return Math.max(t,Math.min(n,e))}}(a[0],a[t-1])),r=t>2?pp:dp,i=o=null,l}function l(n){return null==n||isNaN(n=+n)?e:(i||(i=r(a.map(t),u,c)))(t(f(n)))}return l.invert=function(e){return f(n((o||(o=r(u,a.map(t),_r)))(e)))},l.domain=function(t){return arguments.length?(a=Array.from(t,fp),s()):a.slice()},l.range=function(t){return arguments.length?(u=Array.from(t),s()):u.slice()},l.rangeRound=function(t){return u=Array.from(t),c=Ar,s()},l.clamp=function(t){return arguments.length?(f=!!t||lp,s()):f!==lp},l.interpolate=function(t){return arguments.length?(c=t,s()):c},l.unknown=function(t){return arguments.length?(e=t,l):e},function(e,r){return t=e,n=r,s()}}function vp(){return yp()(lp,lp)}function _p(n,e,r,i){var o,a=F(n,e,r);switch((i=ac(null==i?",f":i)).type){case"s":var u=Math.max(Math.abs(n),Math.abs(e));return null!=i.precision||isNaN(o=vc(a,u))||(i.precision=o),t.formatPrefix(i,u);case"":case"e":case"g":case"p":case"r":null!=i.precision||isNaN(o=_c(a,Math.max(Math.abs(n),Math.abs(e))))||(i.precision=o-("e"===i.type));break;case"f":case"%":null!=i.precision||isNaN(o=yc(a))||(i.precision=o-2*("%"===i.type))}return t.format(i)}function bp(t){var n=t.domain;return t.ticks=function(t){var e=n();return q(e[0],e[e.length-1],null==t?10:t)},t.tickFormat=function(t,e){var r=n();return _p(r[0],r[r.length-1],null==t?10:t,e)},t.nice=function(e){null==e&&(e=10);var r,i,o=n(),a=0,u=o.length-1,c=o[a],f=o[u],s=10;for(f0;){if((i=R(c,f,e))===r)return o[a]=c,o[u]=f,n(o);if(i>0)c=Math.floor(c/i)*i,f=Math.ceil(f/i)*i;else{if(!(i<0))break;c=Math.ceil(c*i)/i,f=Math.floor(f*i)/i}r=i}return t},t}function mp(t,n){var e,r=0,i=(t=t.slice()).length-1,o=t[r],a=t[i];return a0){for(;h<=d;++h)for(s=1,f=r(h);sc)break;g.push(l)}}else for(;h<=d;++h)for(s=a-1,f=r(h);s>=1;--s)if(!((l=f*s)c)break;g.push(l)}2*g.length0))return u;do{u.push(a=new Date(+e)),n(e,o),t(e)}while(a=n)for(;t(n),!e(n);)n.setTime(n-1)}),(function(t,r){if(t>=t)if(r<0)for(;++r<=0;)for(;n(t,-1),!e(t););else for(;--r>=0;)for(;n(t,1),!e(t););}))},e&&(i.count=function(n,r){return Ip.setTime(+n),Up.setTime(+r),t(Ip),t(Up),Math.floor(e(Ip,Up))},i.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?i.filter(r?function(n){return r(n)%t==0}:function(n){return i.count(0,n)%t==0}):i:null}),i}var Yp=Bp((function(){}),(function(t,n){t.setTime(+t+n)}),(function(t,n){return n-t}));Yp.every=function(t){return t=Math.floor(t),isFinite(t)&&t>0?t>1?Bp((function(n){n.setTime(Math.floor(n/t)*t)}),(function(n,e){n.setTime(+n+e*t)}),(function(n,e){return(e-n)/t})):Yp:null};var Lp=Yp.range;const jp=1e3,Hp=6e4,Xp=36e5,Gp=864e5,Vp=6048e5,$p=2592e6,Wp=31536e6;var Zp=Bp((function(t){t.setTime(t-t.getMilliseconds())}),(function(t,n){t.setTime(+t+n*jp)}),(function(t,n){return(n-t)/jp}),(function(t){return t.getUTCSeconds()})),Kp=Zp.range,Qp=Bp((function(t){t.setTime(t-t.getMilliseconds()-t.getSeconds()*jp)}),(function(t,n){t.setTime(+t+n*Hp)}),(function(t,n){return(n-t)/Hp}),(function(t){return t.getMinutes()})),Jp=Qp.range,tg=Bp((function(t){t.setTime(t-t.getMilliseconds()-t.getSeconds()*jp-t.getMinutes()*Hp)}),(function(t,n){t.setTime(+t+n*Xp)}),(function(t,n){return(n-t)/Xp}),(function(t){return t.getHours()})),ng=tg.range,eg=Bp((t=>t.setHours(0,0,0,0)),((t,n)=>t.setDate(t.getDate()+n)),((t,n)=>(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*Hp)/Gp),(t=>t.getDate()-1)),rg=eg.range;function ig(t){return Bp((function(n){n.setDate(n.getDate()-(n.getDay()+7-t)%7),n.setHours(0,0,0,0)}),(function(t,n){t.setDate(t.getDate()+7*n)}),(function(t,n){return(n-t-(n.getTimezoneOffset()-t.getTimezoneOffset())*Hp)/Vp}))}var og=ig(0),ag=ig(1),ug=ig(2),cg=ig(3),fg=ig(4),sg=ig(5),lg=ig(6),hg=og.range,dg=ag.range,pg=ug.range,gg=cg.range,yg=fg.range,vg=sg.range,_g=lg.range,bg=Bp((function(t){t.setDate(1),t.setHours(0,0,0,0)}),(function(t,n){t.setMonth(t.getMonth()+n)}),(function(t,n){return n.getMonth()-t.getMonth()+12*(n.getFullYear()-t.getFullYear())}),(function(t){return t.getMonth()})),mg=bg.range,xg=Bp((function(t){t.setMonth(0,1),t.setHours(0,0,0,0)}),(function(t,n){t.setFullYear(t.getFullYear()+n)}),(function(t,n){return n.getFullYear()-t.getFullYear()}),(function(t){return t.getFullYear()}));xg.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Bp((function(n){n.setFullYear(Math.floor(n.getFullYear()/t)*t),n.setMonth(0,1),n.setHours(0,0,0,0)}),(function(n,e){n.setFullYear(n.getFullYear()+e*t)})):null};var wg=xg.range,Mg=Bp((function(t){t.setUTCSeconds(0,0)}),(function(t,n){t.setTime(+t+n*Hp)}),(function(t,n){return(n-t)/Hp}),(function(t){return t.getUTCMinutes()})),Ag=Mg.range,Tg=Bp((function(t){t.setUTCMinutes(0,0,0)}),(function(t,n){t.setTime(+t+n*Xp)}),(function(t,n){return(n-t)/Xp}),(function(t){return t.getUTCHours()})),Sg=Tg.range,Eg=Bp((function(t){t.setUTCHours(0,0,0,0)}),(function(t,n){t.setUTCDate(t.getUTCDate()+n)}),(function(t,n){return(n-t)/Gp}),(function(t){return t.getUTCDate()-1})),kg=Eg.range;function Ng(t){return Bp((function(n){n.setUTCDate(n.getUTCDate()-(n.getUTCDay()+7-t)%7),n.setUTCHours(0,0,0,0)}),(function(t,n){t.setUTCDate(t.getUTCDate()+7*n)}),(function(t,n){return(n-t)/Vp}))}var Cg=Ng(0),Pg=Ng(1),zg=Ng(2),Dg=Ng(3),qg=Ng(4),Rg=Ng(5),Fg=Ng(6),Og=Cg.range,Ig=Pg.range,Ug=zg.range,Bg=Dg.range,Yg=qg.range,Lg=Rg.range,jg=Fg.range,Hg=Bp((function(t){t.setUTCDate(1),t.setUTCHours(0,0,0,0)}),(function(t,n){t.setUTCMonth(t.getUTCMonth()+n)}),(function(t,n){return n.getUTCMonth()-t.getUTCMonth()+12*(n.getUTCFullYear()-t.getUTCFullYear())}),(function(t){return t.getUTCMonth()})),Xg=Hg.range,Gg=Bp((function(t){t.setUTCMonth(0,1),t.setUTCHours(0,0,0,0)}),(function(t,n){t.setUTCFullYear(t.getUTCFullYear()+n)}),(function(t,n){return n.getUTCFullYear()-t.getUTCFullYear()}),(function(t){return t.getUTCFullYear()}));Gg.every=function(t){return isFinite(t=Math.floor(t))&&t>0?Bp((function(n){n.setUTCFullYear(Math.floor(n.getUTCFullYear()/t)*t),n.setUTCMonth(0,1),n.setUTCHours(0,0,0,0)}),(function(n,e){n.setUTCFullYear(n.getUTCFullYear()+e*t)})):null};var Vg=Gg.range;function $g(t,n,r,i,o,a){const u=[[Zp,1,jp],[Zp,5,5e3],[Zp,15,15e3],[Zp,30,3e4],[a,1,Hp],[a,5,3e5],[a,15,9e5],[a,30,18e5],[o,1,Xp],[o,3,108e5],[o,6,216e5],[o,12,432e5],[i,1,Gp],[i,2,1728e5],[r,1,Vp],[n,1,$p],[n,3,7776e6],[t,1,Wp]];function c(n,r,i){const o=Math.abs(r-n)/i,a=e((([,,t])=>t)).right(u,o);if(a===u.length)return t.every(F(n/Wp,r/Wp,i));if(0===a)return Yp.every(Math.max(F(n,r,i),1));const[c,f]=u[o/u[a-1][2]=12)]},q:function(t){return 1+~~(t.getMonth()/3)},Q:bv,s:mv,S:By,u:Yy,U:Ly,V:Hy,w:Xy,W:Gy,x:null,X:null,y:Vy,Y:Wy,Z:Ky,"%":_v},m={a:function(t){return a[t.getUTCDay()]},A:function(t){return o[t.getUTCDay()]},b:function(t){return c[t.getUTCMonth()]},B:function(t){return u[t.getUTCMonth()]},c:null,d:Qy,e:Qy,f:rv,g:pv,G:yv,H:Jy,I:tv,j:nv,L:ev,m:iv,M:ov,p:function(t){return i[+(t.getUTCHours()>=12)]},q:function(t){return 1+~~(t.getUTCMonth()/3)},Q:bv,s:mv,S:av,u:uv,U:cv,V:sv,w:lv,W:hv,x:null,X:null,y:dv,Y:gv,Z:vv,"%":_v},x={a:function(t,n,e){var r=d.exec(n.slice(e));return r?(t.w=p.get(r[0].toLowerCase()),e+r[0].length):-1},A:function(t,n,e){var r=l.exec(n.slice(e));return r?(t.w=h.get(r[0].toLowerCase()),e+r[0].length):-1},b:function(t,n,e){var r=v.exec(n.slice(e));return r?(t.m=_.get(r[0].toLowerCase()),e+r[0].length):-1},B:function(t,n,e){var r=g.exec(n.slice(e));return r?(t.m=y.get(r[0].toLowerCase()),e+r[0].length):-1},c:function(t,e,r){return A(t,n,e,r)},d:wy,e:wy,f:ky,g:_y,G:vy,H:Ay,I:Ay,j:My,L:Ey,m:xy,M:Ty,p:function(t,n,e){var r=f.exec(n.slice(e));return r?(t.p=s.get(r[0].toLowerCase()),e+r[0].length):-1},q:my,Q:Cy,s:Py,S:Sy,u:dy,U:py,V:gy,w:hy,W:yy,x:function(t,n,r){return A(t,e,n,r)},X:function(t,n,e){return A(t,r,n,e)},y:_y,Y:vy,Z:by,"%":Ny};function w(t,n){return function(e){var r,i,o,a=[],u=-1,c=0,f=t.length;for(e instanceof Date||(e=new Date(+e));++u53)return null;"w"in o||(o.w=1),"Z"in o?(i=(r=ty(ny(o.y,0,1))).getUTCDay(),r=i>4||0===i?Pg.ceil(r):Pg(r),r=Eg.offset(r,7*(o.V-1)),o.y=r.getUTCFullYear(),o.m=r.getUTCMonth(),o.d=r.getUTCDate()+(o.w+6)%7):(i=(r=Jg(ny(o.y,0,1))).getDay(),r=i>4||0===i?ag.ceil(r):ag(r),r=eg.offset(r,7*(o.V-1)),o.y=r.getFullYear(),o.m=r.getMonth(),o.d=r.getDate()+(o.w+6)%7)}else("W"in o||"U"in o)&&("w"in o||(o.w="u"in o?o.u%7:"W"in o?1:0),i="Z"in o?ty(ny(o.y,0,1)).getUTCDay():Jg(ny(o.y,0,1)).getDay(),o.m=0,o.d="W"in o?(o.w+6)%7+7*o.W-(i+5)%7:o.w+7*o.U-(i+6)%7);return"Z"in o?(o.H+=o.Z/100|0,o.M+=o.Z%100,ty(o)):Jg(o)}}function A(t,n,e,r){for(var i,o,a=0,u=n.length,c=e.length;a=c)return-1;if(37===(i=n.charCodeAt(a++))){if(i=n.charAt(a++),!(o=x[i in iy?n.charAt(a++):i])||(r=o(t,e,r))<0)return-1}else if(i!=e.charCodeAt(r++))return-1}return r}return b.x=w(e,b),b.X=w(r,b),b.c=w(n,b),m.x=w(e,m),m.X=w(r,m),m.c=w(n,m),{format:function(t){var n=w(t+="",b);return n.toString=function(){return t},n},parse:function(t){var n=M(t+="",!1);return n.toString=function(){return t},n},utcFormat:function(t){var n=w(t+="",m);return n.toString=function(){return t},n},utcParse:function(t){var n=M(t+="",!0);return n.toString=function(){return t},n}}}var ry,iy={"-":"",_:" ",0:"0"},oy=/^\s*\d+/,ay=/^%/,uy=/[\\^$*+?|[\]().{}]/g;function cy(t,n,e){var r=t<0?"-":"",i=(r?-t:t)+"",o=i.length;return r+(o[t.toLowerCase(),n])))}function hy(t,n,e){var r=oy.exec(n.slice(e,e+1));return r?(t.w=+r[0],e+r[0].length):-1}function dy(t,n,e){var r=oy.exec(n.slice(e,e+1));return r?(t.u=+r[0],e+r[0].length):-1}function py(t,n,e){var r=oy.exec(n.slice(e,e+2));return r?(t.U=+r[0],e+r[0].length):-1}function gy(t,n,e){var r=oy.exec(n.slice(e,e+2));return r?(t.V=+r[0],e+r[0].length):-1}function yy(t,n,e){var r=oy.exec(n.slice(e,e+2));return r?(t.W=+r[0],e+r[0].length):-1}function vy(t,n,e){var r=oy.exec(n.slice(e,e+4));return r?(t.y=+r[0],e+r[0].length):-1}function _y(t,n,e){var r=oy.exec(n.slice(e,e+2));return r?(t.y=+r[0]+(+r[0]>68?1900:2e3),e+r[0].length):-1}function by(t,n,e){var r=/^(Z)|([+-]\d\d)(?::?(\d\d))?/.exec(n.slice(e,e+6));return r?(t.Z=r[1]?0:-(r[2]+(r[3]||"00")),e+r[0].length):-1}function my(t,n,e){var r=oy.exec(n.slice(e,e+1));return r?(t.q=3*r[0]-3,e+r[0].length):-1}function xy(t,n,e){var r=oy.exec(n.slice(e,e+2));return r?(t.m=r[0]-1,e+r[0].length):-1}function wy(t,n,e){var r=oy.exec(n.slice(e,e+2));return r?(t.d=+r[0],e+r[0].length):-1}function My(t,n,e){var r=oy.exec(n.slice(e,e+3));return r?(t.m=0,t.d=+r[0],e+r[0].length):-1}function Ay(t,n,e){var r=oy.exec(n.slice(e,e+2));return r?(t.H=+r[0],e+r[0].length):-1}function Ty(t,n,e){var r=oy.exec(n.slice(e,e+2));return r?(t.M=+r[0],e+r[0].length):-1}function Sy(t,n,e){var r=oy.exec(n.slice(e,e+2));return r?(t.S=+r[0],e+r[0].length):-1}function Ey(t,n,e){var r=oy.exec(n.slice(e,e+3));return r?(t.L=+r[0],e+r[0].length):-1}function ky(t,n,e){var r=oy.exec(n.slice(e,e+6));return r?(t.L=Math.floor(r[0]/1e3),e+r[0].length):-1}function Ny(t,n,e){var r=ay.exec(n.slice(e,e+1));return r?e+r[0].length:-1}function Cy(t,n,e){var r=oy.exec(n.slice(e));return r?(t.Q=+r[0],e+r[0].length):-1}function Py(t,n,e){var r=oy.exec(n.slice(e));return r?(t.s=+r[0],e+r[0].length):-1}function zy(t,n){return cy(t.getDate(),n,2)}function Dy(t,n){return cy(t.getHours(),n,2)}function qy(t,n){return cy(t.getHours()%12||12,n,2)}function Ry(t,n){return cy(1+eg.count(xg(t),t),n,3)}function Fy(t,n){return cy(t.getMilliseconds(),n,3)}function Oy(t,n){return Fy(t,n)+"000"}function Iy(t,n){return cy(t.getMonth()+1,n,2)}function Uy(t,n){return cy(t.getMinutes(),n,2)}function By(t,n){return cy(t.getSeconds(),n,2)}function Yy(t){var n=t.getDay();return 0===n?7:n}function Ly(t,n){return cy(og.count(xg(t)-1,t),n,2)}function jy(t){var n=t.getDay();return n>=4||0===n?fg(t):fg.ceil(t)}function Hy(t,n){return t=jy(t),cy(fg.count(xg(t),t)+(4===xg(t).getDay()),n,2)}function Xy(t){return t.getDay()}function Gy(t,n){return cy(ag.count(xg(t)-1,t),n,2)}function Vy(t,n){return cy(t.getFullYear()%100,n,2)}function $y(t,n){return cy((t=jy(t)).getFullYear()%100,n,2)}function Wy(t,n){return cy(t.getFullYear()%1e4,n,4)}function Zy(t,n){var e=t.getDay();return cy((t=e>=4||0===e?fg(t):fg.ceil(t)).getFullYear()%1e4,n,4)}function Ky(t){var n=t.getTimezoneOffset();return(n>0?"-":(n*=-1,"+"))+cy(n/60|0,"0",2)+cy(n%60,"0",2)}function Qy(t,n){return cy(t.getUTCDate(),n,2)}function Jy(t,n){return cy(t.getUTCHours(),n,2)}function tv(t,n){return cy(t.getUTCHours()%12||12,n,2)}function nv(t,n){return cy(1+Eg.count(Gg(t),t),n,3)}function ev(t,n){return cy(t.getUTCMilliseconds(),n,3)}function rv(t,n){return ev(t,n)+"000"}function iv(t,n){return cy(t.getUTCMonth()+1,n,2)}function ov(t,n){return cy(t.getUTCMinutes(),n,2)}function av(t,n){return cy(t.getUTCSeconds(),n,2)}function uv(t){var n=t.getUTCDay();return 0===n?7:n}function cv(t,n){return cy(Cg.count(Gg(t)-1,t),n,2)}function fv(t){var n=t.getUTCDay();return n>=4||0===n?qg(t):qg.ceil(t)}function sv(t,n){return t=fv(t),cy(qg.count(Gg(t),t)+(4===Gg(t).getUTCDay()),n,2)}function lv(t){return t.getUTCDay()}function hv(t,n){return cy(Pg.count(Gg(t)-1,t),n,2)}function dv(t,n){return cy(t.getUTCFullYear()%100,n,2)}function pv(t,n){return cy((t=fv(t)).getUTCFullYear()%100,n,2)}function gv(t,n){return cy(t.getUTCFullYear()%1e4,n,4)}function yv(t,n){var e=t.getUTCDay();return cy((t=e>=4||0===e?qg(t):qg.ceil(t)).getUTCFullYear()%1e4,n,4)}function vv(){return"+0000"}function _v(){return"%"}function bv(t){return+t}function mv(t){return Math.floor(+t/1e3)}function xv(n){return ry=ey(n),t.timeFormat=ry.format,t.timeParse=ry.parse,t.utcFormat=ry.utcFormat,t.utcParse=ry.utcParse,ry}t.timeFormat=void 0,t.timeParse=void 0,t.utcFormat=void 0,t.utcParse=void 0,xv({dateTime:"%x, %X",date:"%-m/%-d/%Y",time:"%-I:%M:%S %p",periods:["AM","PM"],days:["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"],shortDays:["Sun","Mon","Tue","Wed","Thu","Fri","Sat"],months:["January","February","March","April","May","June","July","August","September","October","November","December"],shortMonths:["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]});var wv="%Y-%m-%dT%H:%M:%S.%LZ";var Mv=Date.prototype.toISOString?function(t){return t.toISOString()}:t.utcFormat(wv);var Av=+new Date("2000-01-01T00:00:00.000Z")?function(t){var n=new Date(t);return isNaN(n)?null:n}:t.utcParse(wv);function Tv(t){return new Date(t)}function Sv(t){return t instanceof Date?+t:+new Date(+t)}function Ev(t,n,e,r,i,o,a,u,c,f){var s=vp(),l=s.invert,h=s.domain,d=f(".%L"),p=f(":%S"),g=f("%I:%M"),y=f("%I %p"),v=f("%a %d"),_=f("%b %d"),b=f("%B"),m=f("%Y");function x(t){return(c(t)hr(t[t.length-1]),Xv=new Array(3).concat("d8b365f5f5f55ab4ac","a6611adfc27d80cdc1018571","a6611adfc27df5f5f580cdc1018571","8c510ad8b365f6e8c3c7eae55ab4ac01665e","8c510ad8b365f6e8c3f5f5f5c7eae55ab4ac01665e","8c510abf812ddfc27df6e8c3c7eae580cdc135978f01665e","8c510abf812ddfc27df6e8c3f5f5f5c7eae580cdc135978f01665e","5430058c510abf812ddfc27df6e8c3c7eae580cdc135978f01665e003c30","5430058c510abf812ddfc27df6e8c3f5f5f5c7eae580cdc135978f01665e003c30").map(Dv),Gv=Hv(Xv),Vv=new Array(3).concat("af8dc3f7f7f77fbf7b","7b3294c2a5cfa6dba0008837","7b3294c2a5cff7f7f7a6dba0008837","762a83af8dc3e7d4e8d9f0d37fbf7b1b7837","762a83af8dc3e7d4e8f7f7f7d9f0d37fbf7b1b7837","762a839970abc2a5cfe7d4e8d9f0d3a6dba05aae611b7837","762a839970abc2a5cfe7d4e8f7f7f7d9f0d3a6dba05aae611b7837","40004b762a839970abc2a5cfe7d4e8d9f0d3a6dba05aae611b783700441b","40004b762a839970abc2a5cfe7d4e8f7f7f7d9f0d3a6dba05aae611b783700441b").map(Dv),$v=Hv(Vv),Wv=new Array(3).concat("e9a3c9f7f7f7a1d76a","d01c8bf1b6dab8e1864dac26","d01c8bf1b6daf7f7f7b8e1864dac26","c51b7de9a3c9fde0efe6f5d0a1d76a4d9221","c51b7de9a3c9fde0eff7f7f7e6f5d0a1d76a4d9221","c51b7dde77aef1b6dafde0efe6f5d0b8e1867fbc414d9221","c51b7dde77aef1b6dafde0eff7f7f7e6f5d0b8e1867fbc414d9221","8e0152c51b7dde77aef1b6dafde0efe6f5d0b8e1867fbc414d9221276419","8e0152c51b7dde77aef1b6dafde0eff7f7f7e6f5d0b8e1867fbc414d9221276419").map(Dv),Zv=Hv(Wv),Kv=new Array(3).concat("998ec3f7f7f7f1a340","5e3c99b2abd2fdb863e66101","5e3c99b2abd2f7f7f7fdb863e66101","542788998ec3d8daebfee0b6f1a340b35806","542788998ec3d8daebf7f7f7fee0b6f1a340b35806","5427888073acb2abd2d8daebfee0b6fdb863e08214b35806","5427888073acb2abd2d8daebf7f7f7fee0b6fdb863e08214b35806","2d004b5427888073acb2abd2d8daebfee0b6fdb863e08214b358067f3b08","2d004b5427888073acb2abd2d8daebf7f7f7fee0b6fdb863e08214b358067f3b08").map(Dv),Qv=Hv(Kv),Jv=new Array(3).concat("ef8a62f7f7f767a9cf","ca0020f4a58292c5de0571b0","ca0020f4a582f7f7f792c5de0571b0","b2182bef8a62fddbc7d1e5f067a9cf2166ac","b2182bef8a62fddbc7f7f7f7d1e5f067a9cf2166ac","b2182bd6604df4a582fddbc7d1e5f092c5de4393c32166ac","b2182bd6604df4a582fddbc7f7f7f7d1e5f092c5de4393c32166ac","67001fb2182bd6604df4a582fddbc7d1e5f092c5de4393c32166ac053061","67001fb2182bd6604df4a582fddbc7f7f7f7d1e5f092c5de4393c32166ac053061").map(Dv),t_=Hv(Jv),n_=new Array(3).concat("ef8a62ffffff999999","ca0020f4a582bababa404040","ca0020f4a582ffffffbababa404040","b2182bef8a62fddbc7e0e0e09999994d4d4d","b2182bef8a62fddbc7ffffffe0e0e09999994d4d4d","b2182bd6604df4a582fddbc7e0e0e0bababa8787874d4d4d","b2182bd6604df4a582fddbc7ffffffe0e0e0bababa8787874d4d4d","67001fb2182bd6604df4a582fddbc7e0e0e0bababa8787874d4d4d1a1a1a","67001fb2182bd6604df4a582fddbc7ffffffe0e0e0bababa8787874d4d4d1a1a1a").map(Dv),e_=Hv(n_),r_=new Array(3).concat("fc8d59ffffbf91bfdb","d7191cfdae61abd9e92c7bb6","d7191cfdae61ffffbfabd9e92c7bb6","d73027fc8d59fee090e0f3f891bfdb4575b4","d73027fc8d59fee090ffffbfe0f3f891bfdb4575b4","d73027f46d43fdae61fee090e0f3f8abd9e974add14575b4","d73027f46d43fdae61fee090ffffbfe0f3f8abd9e974add14575b4","a50026d73027f46d43fdae61fee090e0f3f8abd9e974add14575b4313695","a50026d73027f46d43fdae61fee090ffffbfe0f3f8abd9e974add14575b4313695").map(Dv),i_=Hv(r_),o_=new Array(3).concat("fc8d59ffffbf91cf60","d7191cfdae61a6d96a1a9641","d7191cfdae61ffffbfa6d96a1a9641","d73027fc8d59fee08bd9ef8b91cf601a9850","d73027fc8d59fee08bffffbfd9ef8b91cf601a9850","d73027f46d43fdae61fee08bd9ef8ba6d96a66bd631a9850","d73027f46d43fdae61fee08bffffbfd9ef8ba6d96a66bd631a9850","a50026d73027f46d43fdae61fee08bd9ef8ba6d96a66bd631a9850006837","a50026d73027f46d43fdae61fee08bffffbfd9ef8ba6d96a66bd631a9850006837").map(Dv),a_=Hv(o_),u_=new Array(3).concat("fc8d59ffffbf99d594","d7191cfdae61abdda42b83ba","d7191cfdae61ffffbfabdda42b83ba","d53e4ffc8d59fee08be6f59899d5943288bd","d53e4ffc8d59fee08bffffbfe6f59899d5943288bd","d53e4ff46d43fdae61fee08be6f598abdda466c2a53288bd","d53e4ff46d43fdae61fee08bffffbfe6f598abdda466c2a53288bd","9e0142d53e4ff46d43fdae61fee08be6f598abdda466c2a53288bd5e4fa2","9e0142d53e4ff46d43fdae61fee08bffffbfe6f598abdda466c2a53288bd5e4fa2").map(Dv),c_=Hv(u_),f_=new Array(3).concat("e5f5f999d8c92ca25f","edf8fbb2e2e266c2a4238b45","edf8fbb2e2e266c2a42ca25f006d2c","edf8fbccece699d8c966c2a42ca25f006d2c","edf8fbccece699d8c966c2a441ae76238b45005824","f7fcfde5f5f9ccece699d8c966c2a441ae76238b45005824","f7fcfde5f5f9ccece699d8c966c2a441ae76238b45006d2c00441b").map(Dv),s_=Hv(f_),l_=new Array(3).concat("e0ecf49ebcda8856a7","edf8fbb3cde38c96c688419d","edf8fbb3cde38c96c68856a7810f7c","edf8fbbfd3e69ebcda8c96c68856a7810f7c","edf8fbbfd3e69ebcda8c96c68c6bb188419d6e016b","f7fcfde0ecf4bfd3e69ebcda8c96c68c6bb188419d6e016b","f7fcfde0ecf4bfd3e69ebcda8c96c68c6bb188419d810f7c4d004b").map(Dv),h_=Hv(l_),d_=new Array(3).concat("e0f3dba8ddb543a2ca","f0f9e8bae4bc7bccc42b8cbe","f0f9e8bae4bc7bccc443a2ca0868ac","f0f9e8ccebc5a8ddb57bccc443a2ca0868ac","f0f9e8ccebc5a8ddb57bccc44eb3d32b8cbe08589e","f7fcf0e0f3dbccebc5a8ddb57bccc44eb3d32b8cbe08589e","f7fcf0e0f3dbccebc5a8ddb57bccc44eb3d32b8cbe0868ac084081").map(Dv),p_=Hv(d_),g_=new Array(3).concat("fee8c8fdbb84e34a33","fef0d9fdcc8afc8d59d7301f","fef0d9fdcc8afc8d59e34a33b30000","fef0d9fdd49efdbb84fc8d59e34a33b30000","fef0d9fdd49efdbb84fc8d59ef6548d7301f990000","fff7ecfee8c8fdd49efdbb84fc8d59ef6548d7301f990000","fff7ecfee8c8fdd49efdbb84fc8d59ef6548d7301fb300007f0000").map(Dv),y_=Hv(g_),v_=new Array(3).concat("ece2f0a6bddb1c9099","f6eff7bdc9e167a9cf02818a","f6eff7bdc9e167a9cf1c9099016c59","f6eff7d0d1e6a6bddb67a9cf1c9099016c59","f6eff7d0d1e6a6bddb67a9cf3690c002818a016450","fff7fbece2f0d0d1e6a6bddb67a9cf3690c002818a016450","fff7fbece2f0d0d1e6a6bddb67a9cf3690c002818a016c59014636").map(Dv),__=Hv(v_),b_=new Array(3).concat("ece7f2a6bddb2b8cbe","f1eef6bdc9e174a9cf0570b0","f1eef6bdc9e174a9cf2b8cbe045a8d","f1eef6d0d1e6a6bddb74a9cf2b8cbe045a8d","f1eef6d0d1e6a6bddb74a9cf3690c00570b0034e7b","fff7fbece7f2d0d1e6a6bddb74a9cf3690c00570b0034e7b","fff7fbece7f2d0d1e6a6bddb74a9cf3690c00570b0045a8d023858").map(Dv),m_=Hv(b_),x_=new Array(3).concat("e7e1efc994c7dd1c77","f1eef6d7b5d8df65b0ce1256","f1eef6d7b5d8df65b0dd1c77980043","f1eef6d4b9dac994c7df65b0dd1c77980043","f1eef6d4b9dac994c7df65b0e7298ace125691003f","f7f4f9e7e1efd4b9dac994c7df65b0e7298ace125691003f","f7f4f9e7e1efd4b9dac994c7df65b0e7298ace125698004367001f").map(Dv),w_=Hv(x_),M_=new Array(3).concat("fde0ddfa9fb5c51b8a","feebe2fbb4b9f768a1ae017e","feebe2fbb4b9f768a1c51b8a7a0177","feebe2fcc5c0fa9fb5f768a1c51b8a7a0177","feebe2fcc5c0fa9fb5f768a1dd3497ae017e7a0177","fff7f3fde0ddfcc5c0fa9fb5f768a1dd3497ae017e7a0177","fff7f3fde0ddfcc5c0fa9fb5f768a1dd3497ae017e7a017749006a").map(Dv),A_=Hv(M_),T_=new Array(3).concat("edf8b17fcdbb2c7fb8","ffffcca1dab441b6c4225ea8","ffffcca1dab441b6c42c7fb8253494","ffffccc7e9b47fcdbb41b6c42c7fb8253494","ffffccc7e9b47fcdbb41b6c41d91c0225ea80c2c84","ffffd9edf8b1c7e9b47fcdbb41b6c41d91c0225ea80c2c84","ffffd9edf8b1c7e9b47fcdbb41b6c41d91c0225ea8253494081d58").map(Dv),S_=Hv(T_),E_=new Array(3).concat("f7fcb9addd8e31a354","ffffccc2e69978c679238443","ffffccc2e69978c67931a354006837","ffffccd9f0a3addd8e78c67931a354006837","ffffccd9f0a3addd8e78c67941ab5d238443005a32","ffffe5f7fcb9d9f0a3addd8e78c67941ab5d238443005a32","ffffe5f7fcb9d9f0a3addd8e78c67941ab5d238443006837004529").map(Dv),k_=Hv(E_),N_=new Array(3).concat("fff7bcfec44fd95f0e","ffffd4fed98efe9929cc4c02","ffffd4fed98efe9929d95f0e993404","ffffd4fee391fec44ffe9929d95f0e993404","ffffd4fee391fec44ffe9929ec7014cc4c028c2d04","ffffe5fff7bcfee391fec44ffe9929ec7014cc4c028c2d04","ffffe5fff7bcfee391fec44ffe9929ec7014cc4c02993404662506").map(Dv),C_=Hv(N_),P_=new Array(3).concat("ffeda0feb24cf03b20","ffffb2fecc5cfd8d3ce31a1c","ffffb2fecc5cfd8d3cf03b20bd0026","ffffb2fed976feb24cfd8d3cf03b20bd0026","ffffb2fed976feb24cfd8d3cfc4e2ae31a1cb10026","ffffccffeda0fed976feb24cfd8d3cfc4e2ae31a1cb10026","ffffccffeda0fed976feb24cfd8d3cfc4e2ae31a1cbd0026800026").map(Dv),z_=Hv(P_),D_=new Array(3).concat("deebf79ecae13182bd","eff3ffbdd7e76baed62171b5","eff3ffbdd7e76baed63182bd08519c","eff3ffc6dbef9ecae16baed63182bd08519c","eff3ffc6dbef9ecae16baed64292c62171b5084594","f7fbffdeebf7c6dbef9ecae16baed64292c62171b5084594","f7fbffdeebf7c6dbef9ecae16baed64292c62171b508519c08306b").map(Dv),q_=Hv(D_),R_=new Array(3).concat("e5f5e0a1d99b31a354","edf8e9bae4b374c476238b45","edf8e9bae4b374c47631a354006d2c","edf8e9c7e9c0a1d99b74c47631a354006d2c","edf8e9c7e9c0a1d99b74c47641ab5d238b45005a32","f7fcf5e5f5e0c7e9c0a1d99b74c47641ab5d238b45005a32","f7fcf5e5f5e0c7e9c0a1d99b74c47641ab5d238b45006d2c00441b").map(Dv),F_=Hv(R_),O_=new Array(3).concat("f0f0f0bdbdbd636363","f7f7f7cccccc969696525252","f7f7f7cccccc969696636363252525","f7f7f7d9d9d9bdbdbd969696636363252525","f7f7f7d9d9d9bdbdbd969696737373525252252525","fffffff0f0f0d9d9d9bdbdbd969696737373525252252525","fffffff0f0f0d9d9d9bdbdbd969696737373525252252525000000").map(Dv),I_=Hv(O_),U_=new Array(3).concat("efedf5bcbddc756bb1","f2f0f7cbc9e29e9ac86a51a3","f2f0f7cbc9e29e9ac8756bb154278f","f2f0f7dadaebbcbddc9e9ac8756bb154278f","f2f0f7dadaebbcbddc9e9ac8807dba6a51a34a1486","fcfbfdefedf5dadaebbcbddc9e9ac8807dba6a51a34a1486","fcfbfdefedf5dadaebbcbddc9e9ac8807dba6a51a354278f3f007d").map(Dv),B_=Hv(U_),Y_=new Array(3).concat("fee0d2fc9272de2d26","fee5d9fcae91fb6a4acb181d","fee5d9fcae91fb6a4ade2d26a50f15","fee5d9fcbba1fc9272fb6a4ade2d26a50f15","fee5d9fcbba1fc9272fb6a4aef3b2ccb181d99000d","fff5f0fee0d2fcbba1fc9272fb6a4aef3b2ccb181d99000d","fff5f0fee0d2fcbba1fc9272fb6a4aef3b2ccb181da50f1567000d").map(Dv),L_=Hv(Y_),j_=new Array(3).concat("fee6cefdae6be6550d","feeddefdbe85fd8d3cd94701","feeddefdbe85fd8d3ce6550da63603","feeddefdd0a2fdae6bfd8d3ce6550da63603","feeddefdd0a2fdae6bfd8d3cf16913d948018c2d04","fff5ebfee6cefdd0a2fdae6bfd8d3cf16913d948018c2d04","fff5ebfee6cefdd0a2fdae6bfd8d3cf16913d94801a636037f2704").map(Dv),H_=Hv(j_);var X_=Lr(tr(300,.5,0),tr(-240,.5,1)),G_=Lr(tr(-100,.75,.35),tr(80,1.5,.8)),V_=Lr(tr(260,.75,.35),tr(80,1.5,.8)),$_=tr();var W_=ve(),Z_=Math.PI/3,K_=2*Math.PI/3;function Q_(t){var n=t.length;return function(e){return t[Math.max(0,Math.min(n-1,Math.floor(e*n)))]}}var J_=Q_(Dv("44015444025645045745055946075a46085c460a5d460b5e470d60470e6147106347116447136548146748166848176948186a481a6c481b6d481c6e481d6f481f70482071482173482374482475482576482677482878482979472a7a472c7a472d7b472e7c472f7d46307e46327e46337f463480453581453781453882443983443a83443b84433d84433e85423f854240864241864142874144874045884046883f47883f48893e49893e4a893e4c8a3d4d8a3d4e8a3c4f8a3c508b3b518b3b528b3a538b3a548c39558c39568c38588c38598c375a8c375b8d365c8d365d8d355e8d355f8d34608d34618d33628d33638d32648e32658e31668e31678e31688e30698e306a8e2f6b8e2f6c8e2e6d8e2e6e8e2e6f8e2d708e2d718e2c718e2c728e2c738e2b748e2b758e2a768e2a778e2a788e29798e297a8e297b8e287c8e287d8e277e8e277f8e27808e26818e26828e26828e25838e25848e25858e24868e24878e23888e23898e238a8d228b8d228c8d228d8d218e8d218f8d21908d21918c20928c20928c20938c1f948c1f958b1f968b1f978b1f988b1f998a1f9a8a1e9b8a1e9c891e9d891f9e891f9f881fa0881fa1881fa1871fa28720a38620a48621a58521a68522a78522a88423a98324aa8325ab8225ac8226ad8127ad8128ae8029af7f2ab07f2cb17e2db27d2eb37c2fb47c31b57b32b67a34b67935b77937b87838b9773aba763bbb753dbc743fbc7340bd7242be7144bf7046c06f48c16e4ac16d4cc26c4ec36b50c46a52c56954c56856c66758c7655ac8645cc8635ec96260ca6063cb5f65cb5e67cc5c69cd5b6ccd5a6ece5870cf5773d05675d05477d1537ad1517cd2507fd34e81d34d84d44b86d54989d5488bd6468ed64590d74393d74195d84098d83e9bd93c9dd93ba0da39a2da37a5db36a8db34aadc32addc30b0dd2fb2dd2db5de2bb8de29bade28bddf26c0df25c2df23c5e021c8e020cae11fcde11dd0e11cd2e21bd5e21ad8e219dae319dde318dfe318e2e418e5e419e7e419eae51aece51befe51cf1e51df4e61ef6e620f8e621fbe723fde725")),tb=Q_(Dv("00000401000501010601010802010902020b02020d03030f03031204041405041606051806051a07061c08071e0907200a08220b09240c09260d0a290e0b2b100b2d110c2f120d31130d34140e36150e38160f3b180f3d19103f1a10421c10441d11471e114920114b21114e22115024125325125527125829115a2a115c2c115f2d11612f116331116533106734106936106b38106c390f6e3b0f703d0f713f0f72400f74420f75440f764510774710784910784a10794c117a4e117b4f127b51127c52137c54137d56147d57157e59157e5a167e5c167f5d177f5f187f601880621980641a80651a80671b80681c816a1c816b1d816d1d816e1e81701f81721f817320817521817621817822817922827b23827c23827e24828025828125818326818426818627818827818928818b29818c29818e2a81902a81912b81932b80942c80962c80982d80992d809b2e7f9c2e7f9e2f7fa02f7fa1307ea3307ea5317ea6317da8327daa337dab337cad347cae347bb0357bb2357bb3367ab5367ab73779b83779ba3878bc3978bd3977bf3a77c03a76c23b75c43c75c53c74c73d73c83e73ca3e72cc3f71cd4071cf4070d0416fd2426fd3436ed5446dd6456cd8456cd9466bdb476adc4869de4968df4a68e04c67e24d66e34e65e44f64e55064e75263e85362e95462ea5661eb5760ec5860ed5a5fee5b5eef5d5ef05f5ef1605df2625df2645cf3655cf4675cf4695cf56b5cf66c5cf66e5cf7705cf7725cf8745cf8765cf9785df9795df97b5dfa7d5efa7f5efa815ffb835ffb8560fb8761fc8961fc8a62fc8c63fc8e64fc9065fd9266fd9467fd9668fd9869fd9a6afd9b6bfe9d6cfe9f6dfea16efea36ffea571fea772fea973feaa74feac76feae77feb078feb27afeb47bfeb67cfeb77efeb97ffebb81febd82febf84fec185fec287fec488fec68afec88cfeca8dfecc8ffecd90fecf92fed194fed395fed597fed799fed89afdda9cfddc9efddea0fde0a1fde2a3fde3a5fde5a7fde7a9fde9aafdebacfcecaefceeb0fcf0b2fcf2b4fcf4b6fcf6b8fcf7b9fcf9bbfcfbbdfcfdbf")),nb=Q_(Dv("00000401000501010601010802010a02020c02020e03021004031204031405041706041907051b08051d09061f0a07220b07240c08260d08290e092b10092d110a30120a32140b34150b37160b39180c3c190c3e1b0c411c0c431e0c451f0c48210c4a230c4c240c4f260c51280b53290b552b0b572d0b592f0a5b310a5c320a5e340a5f3609613809623909633b09643d09653e0966400a67420a68440a68450a69470b6a490b6a4a0c6b4c0c6b4d0d6c4f0d6c510e6c520e6d540f6d550f6d57106e59106e5a116e5c126e5d126e5f136e61136e62146e64156e65156e67166e69166e6a176e6c186e6d186e6f196e71196e721a6e741a6e751b6e771c6d781c6d7a1d6d7c1d6d7d1e6d7f1e6c801f6c82206c84206b85216b87216b88226a8a226a8c23698d23698f24699025689225689326679526679727669827669a28659b29649d29649f2a63a02a63a22b62a32c61a52c60a62d60a82e5fa92e5eab2f5ead305dae305cb0315bb1325ab3325ab43359b63458b73557b93556ba3655bc3754bd3853bf3952c03a51c13a50c33b4fc43c4ec63d4dc73e4cc83f4bca404acb4149cc4248ce4347cf4446d04545d24644d34743d44842d54a41d74b3fd84c3ed94d3dda4e3cdb503bdd513ade5238df5337e05536e15635e25734e35933e45a31e55c30e65d2fe75e2ee8602de9612bea632aeb6429eb6628ec6726ed6925ee6a24ef6c23ef6e21f06f20f1711ff1731df2741cf3761bf37819f47918f57b17f57d15f67e14f68013f78212f78410f8850ff8870ef8890cf98b0bf98c0af98e09fa9008fa9207fa9407fb9606fb9706fb9906fb9b06fb9d07fc9f07fca108fca309fca50afca60cfca80dfcaa0ffcac11fcae12fcb014fcb216fcb418fbb61afbb81dfbba1ffbbc21fbbe23fac026fac228fac42afac62df9c72ff9c932f9cb35f8cd37f8cf3af7d13df7d340f6d543f6d746f5d949f5db4cf4dd4ff4df53f4e156f3e35af3e55df2e661f2e865f2ea69f1ec6df1ed71f1ef75f1f179f2f27df2f482f3f586f3f68af4f88ef5f992f6fa96f8fb9af9fc9dfafda1fcffa4")),eb=Q_(Dv("0d088710078813078916078a19068c1b068d1d068e20068f2206902406912605912805922a05932c05942e05952f059631059733059735049837049938049a3a049a3c049b3e049c3f049c41049d43039e44039e46039f48039f4903a04b03a14c02a14e02a25002a25102a35302a35502a45601a45801a45901a55b01a55c01a65e01a66001a66100a76300a76400a76600a76700a86900a86a00a86c00a86e00a86f00a87100a87201a87401a87501a87701a87801a87a02a87b02a87d03a87e03a88004a88104a78305a78405a78606a68707a68808a68a09a58b0aa58d0ba58e0ca48f0da4910ea3920fa39410a29511a19613a19814a099159f9a169f9c179e9d189d9e199da01a9ca11b9ba21d9aa31e9aa51f99a62098a72197a82296aa2395ab2494ac2694ad2793ae2892b02991b12a90b22b8fb32c8eb42e8db52f8cb6308bb7318ab83289ba3388bb3488bc3587bd3786be3885bf3984c03a83c13b82c23c81c33d80c43e7fc5407ec6417dc7427cc8437bc9447aca457acb4679cc4778cc4977cd4a76ce4b75cf4c74d04d73d14e72d24f71d35171d45270d5536fd5546ed6556dd7566cd8576bd9586ada5a6ada5b69db5c68dc5d67dd5e66de5f65de6164df6263e06363e16462e26561e26660e3685fe4695ee56a5de56b5de66c5ce76e5be76f5ae87059e97158e97257ea7457eb7556eb7655ec7754ed7953ed7a52ee7b51ef7c51ef7e50f07f4ff0804ef1814df1834cf2844bf3854bf3874af48849f48948f58b47f58c46f68d45f68f44f79044f79143f79342f89441f89540f9973ff9983ef99a3efa9b3dfa9c3cfa9e3bfb9f3afba139fba238fca338fca537fca636fca835fca934fdab33fdac33fdae32fdaf31fdb130fdb22ffdb42ffdb52efeb72dfeb82cfeba2cfebb2bfebd2afebe2afec029fdc229fdc328fdc527fdc627fdc827fdca26fdcb26fccd25fcce25fcd025fcd225fbd324fbd524fbd724fad824fada24f9dc24f9dd25f8df25f8e125f7e225f7e425f6e626f6e826f5e926f5eb27f4ed27f3ee27f3f027f2f227f1f426f1f525f0f724f0f921"));function rb(t){return function(){return t}}var ib=Math.abs,ob=Math.atan2,ab=Math.cos,ub=Math.max,cb=Math.min,fb=Math.sin,sb=Math.sqrt,lb=1e-12,hb=Math.PI,db=hb/2,pb=2*hb;function gb(t){return t>1?0:t<-1?hb:Math.acos(t)}function yb(t){return t>=1?db:t<=-1?-db:Math.asin(t)}function vb(t){return t.innerRadius}function _b(t){return t.outerRadius}function bb(t){return t.startAngle}function mb(t){return t.endAngle}function xb(t){return t&&t.padAngle}function wb(t,n,e,r,i,o,a,u){var c=e-t,f=r-n,s=a-i,l=u-o,h=l*c-s*f;if(!(h*hC*C+P*P&&(A=S,T=E),{cx:A,cy:T,x01:-s,y01:-l,x11:A*(i/x-1),y11:T*(i/x-1)}}var Ab=Array.prototype.slice;function Tb(t){return"object"==typeof t&&"length"in t?t:Array.from(t)}function Sb(t){this._context=t}function Eb(t){return new Sb(t)}function kb(t){return t[0]}function Nb(t){return t[1]}function Cb(t,n){var e=rb(!0),r=null,i=Eb,o=null;function a(a){var u,c,f,s=(a=Tb(a)).length,l=!1;for(null==r&&(o=i(f=fa())),u=0;u<=s;++u)!(u=s;--l)u.point(y[l],v[l]);u.lineEnd(),u.areaEnd()}g&&(y[f]=+t(h,f,c),v[f]=+n(h,f,c),u.point(r?+r(h,f,c):y[f],e?+e(h,f,c):v[f]))}if(d)return u=null,d+""||null}function f(){return Cb().defined(i).curve(a).context(o)}return t="function"==typeof t?t:void 0===t?kb:rb(+t),n="function"==typeof n?n:rb(void 0===n?0:+n),e="function"==typeof e?e:void 0===e?Nb:rb(+e),c.x=function(n){return arguments.length?(t="function"==typeof n?n:rb(+n),r=null,c):t},c.x0=function(n){return arguments.length?(t="function"==typeof n?n:rb(+n),c):t},c.x1=function(t){return arguments.length?(r=null==t?null:"function"==typeof t?t:rb(+t),c):r},c.y=function(t){return arguments.length?(n="function"==typeof t?t:rb(+t),e=null,c):n},c.y0=function(t){return arguments.length?(n="function"==typeof t?t:rb(+t),c):n},c.y1=function(t){return arguments.length?(e=null==t?null:"function"==typeof t?t:rb(+t),c):e},c.lineX0=c.lineY0=function(){return f().x(t).y(n)},c.lineY1=function(){return f().x(t).y(e)},c.lineX1=function(){return f().x(r).y(n)},c.defined=function(t){return arguments.length?(i="function"==typeof t?t:rb(!!t),c):i},c.curve=function(t){return arguments.length?(a=t,null!=o&&(u=a(o)),c):a},c.context=function(t){return arguments.length?(null==t?o=u=null:u=a(o=t),c):o},c}function zb(t,n){return nt?1:n>=t?0:NaN}function Db(t){return t}Sb.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._point=0},lineEnd:function(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:this._context.lineTo(t,n)}}};var qb=Fb(Eb);function Rb(t){this._curve=t}function Fb(t){function n(n){return new Rb(t(n))}return n._curve=t,n}function Ob(t){var n=t.curve;return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t.curve=function(t){return arguments.length?n(Fb(t)):n()._curve},t}function Ib(){return Ob(Cb().curve(qb))}function Ub(){var t=Pb().curve(qb),n=t.curve,e=t.lineX0,r=t.lineX1,i=t.lineY0,o=t.lineY1;return t.angle=t.x,delete t.x,t.startAngle=t.x0,delete t.x0,t.endAngle=t.x1,delete t.x1,t.radius=t.y,delete t.y,t.innerRadius=t.y0,delete t.y0,t.outerRadius=t.y1,delete t.y1,t.lineStartAngle=function(){return Ob(e())},delete t.lineX0,t.lineEndAngle=function(){return Ob(r())},delete t.lineX1,t.lineInnerRadius=function(){return Ob(i())},delete t.lineY0,t.lineOuterRadius=function(){return Ob(o())},delete t.lineY1,t.curve=function(t){return arguments.length?n(Fb(t)):n()._curve},t}function Bb(t,n){return[(n=+n)*Math.cos(t-=Math.PI/2),n*Math.sin(t)]}function Yb(t){return t.source}function Lb(t){return t.target}function jb(t){var n=Yb,e=Lb,r=kb,i=Nb,o=null;function a(){var a,u=Ab.call(arguments),c=n.apply(this,u),f=e.apply(this,u);if(o||(o=a=fa()),t(o,+r.apply(this,(u[0]=c,u)),+i.apply(this,u),+r.apply(this,(u[0]=f,u)),+i.apply(this,u)),a)return o=null,a+""||null}return a.source=function(t){return arguments.length?(n=t,a):n},a.target=function(t){return arguments.length?(e=t,a):e},a.x=function(t){return arguments.length?(r="function"==typeof t?t:rb(+t),a):r},a.y=function(t){return arguments.length?(i="function"==typeof t?t:rb(+t),a):i},a.context=function(t){return arguments.length?(o=null==t?null:t,a):o},a}function Hb(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n=(n+r)/2,e,n,i,r,i)}function Xb(t,n,e,r,i){t.moveTo(n,e),t.bezierCurveTo(n,e=(e+i)/2,r,e,r,i)}function Gb(t,n,e,r,i){var o=Bb(n,e),a=Bb(n,e=(e+i)/2),u=Bb(r,e),c=Bb(r,i);t.moveTo(o[0],o[1]),t.bezierCurveTo(a[0],a[1],u[0],u[1],c[0],c[1])}Rb.prototype={areaStart:function(){this._curve.areaStart()},areaEnd:function(){this._curve.areaEnd()},lineStart:function(){this._curve.lineStart()},lineEnd:function(){this._curve.lineEnd()},point:function(t,n){this._curve.point(n*Math.sin(t),n*-Math.cos(t))}};var Vb={draw:function(t,n){var e=Math.sqrt(n/hb);t.moveTo(e,0),t.arc(0,0,e,0,pb)}},$b={draw:function(t,n){var e=Math.sqrt(n/5)/2;t.moveTo(-3*e,-e),t.lineTo(-e,-e),t.lineTo(-e,-3*e),t.lineTo(e,-3*e),t.lineTo(e,-e),t.lineTo(3*e,-e),t.lineTo(3*e,e),t.lineTo(e,e),t.lineTo(e,3*e),t.lineTo(-e,3*e),t.lineTo(-e,e),t.lineTo(-3*e,e),t.closePath()}},Wb=Math.sqrt(1/3),Zb=2*Wb,Kb={draw:function(t,n){var e=Math.sqrt(n/Zb),r=e*Wb;t.moveTo(0,-e),t.lineTo(r,0),t.lineTo(0,e),t.lineTo(-r,0),t.closePath()}},Qb=Math.sin(hb/10)/Math.sin(7*hb/10),Jb=Math.sin(pb/10)*Qb,tm=-Math.cos(pb/10)*Qb,nm={draw:function(t,n){var e=Math.sqrt(.8908130915292852*n),r=Jb*e,i=tm*e;t.moveTo(0,-e),t.lineTo(r,i);for(var o=1;o<5;++o){var a=pb*o/5,u=Math.cos(a),c=Math.sin(a);t.lineTo(c*e,-u*e),t.lineTo(u*r-c*i,c*r+u*i)}t.closePath()}},em={draw:function(t,n){var e=Math.sqrt(n),r=-e/2;t.rect(r,r,e,e)}},rm=Math.sqrt(3),im={draw:function(t,n){var e=-Math.sqrt(n/(3*rm));t.moveTo(0,2*e),t.lineTo(-rm*e,-e),t.lineTo(rm*e,-e),t.closePath()}},om=-.5,am=Math.sqrt(3)/2,um=1/Math.sqrt(12),cm=3*(um/2+1),fm={draw:function(t,n){var e=Math.sqrt(n/cm),r=e/2,i=e*um,o=r,a=e*um+e,u=-o,c=a;t.moveTo(r,i),t.lineTo(o,a),t.lineTo(u,c),t.lineTo(om*r-am*i,am*r+om*i),t.lineTo(om*o-am*a,am*o+om*a),t.lineTo(om*u-am*c,am*u+om*c),t.lineTo(om*r+am*i,om*i-am*r),t.lineTo(om*o+am*a,om*a-am*o),t.lineTo(om*u+am*c,om*c-am*u),t.closePath()}},sm=[Vb,$b,Kb,em,nm,im,fm];function lm(){}function hm(t,n,e){t._context.bezierCurveTo((2*t._x0+t._x1)/3,(2*t._y0+t._y1)/3,(t._x0+2*t._x1)/3,(t._y0+2*t._y1)/3,(t._x0+4*t._x1+n)/6,(t._y0+4*t._y1+e)/6)}function dm(t){this._context=t}function pm(t){this._context=t}function gm(t){this._context=t}dm.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){switch(this._point){case 3:hm(this,this._x1,this._y1);case 2:this._context.lineTo(this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3,this._context.lineTo((5*this._x0+this._x1)/6,(5*this._y0+this._y1)/6);default:hm(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}},pm.prototype={areaStart:lm,areaEnd:lm,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._y0=this._y1=this._y2=this._y3=this._y4=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x2,this._y2),this._context.closePath();break;case 2:this._context.moveTo((this._x2+2*this._x3)/3,(this._y2+2*this._y3)/3),this._context.lineTo((this._x3+2*this._x2)/3,(this._y3+2*this._y2)/3),this._context.closePath();break;case 3:this.point(this._x2,this._y2),this.point(this._x3,this._y3),this.point(this._x4,this._y4)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x2=t,this._y2=n;break;case 1:this._point=2,this._x3=t,this._y3=n;break;case 2:this._point=3,this._x4=t,this._y4=n,this._context.moveTo((this._x0+4*this._x1+t)/6,(this._y0+4*this._y1+n)/6);break;default:hm(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}},gm.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._y0=this._y1=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3;var e=(this._x0+4*this._x1+t)/6,r=(this._y0+4*this._y1+n)/6;this._line?this._context.lineTo(e,r):this._context.moveTo(e,r);break;case 3:this._point=4;default:hm(this,t,n)}this._x0=this._x1,this._x1=t,this._y0=this._y1,this._y1=n}};class ym{constructor(t,n){this._context=t,this._x=n}areaStart(){this._line=0}areaEnd(){this._line=NaN}lineStart(){this._point=0}lineEnd(){(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line}point(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:this._x?this._context.bezierCurveTo(this._x0=(this._x0+t)/2,this._y0,this._x0,n,t,n):this._context.bezierCurveTo(this._x0,this._y0=(this._y0+n)/2,t,this._y0,t,n)}this._x0=t,this._y0=n}}function vm(t,n){this._basis=new dm(t),this._beta=n}vm.prototype={lineStart:function(){this._x=[],this._y=[],this._basis.lineStart()},lineEnd:function(){var t=this._x,n=this._y,e=t.length-1;if(e>0)for(var r,i=t[0],o=n[0],a=t[e]-i,u=n[e]-o,c=-1;++c<=e;)r=c/e,this._basis.point(this._beta*t[c]+(1-this._beta)*(i+r*a),this._beta*n[c]+(1-this._beta)*(o+r*u));this._x=this._y=null,this._basis.lineEnd()},point:function(t,n){this._x.push(+t),this._y.push(+n)}};var _m=function t(n){function e(t){return 1===n?new dm(t):new vm(t,n)}return e.beta=function(n){return t(+n)},e}(.85);function bm(t,n,e){t._context.bezierCurveTo(t._x1+t._k*(t._x2-t._x0),t._y1+t._k*(t._y2-t._y0),t._x2+t._k*(t._x1-n),t._y2+t._k*(t._y1-e),t._x2,t._y2)}function mm(t,n){this._context=t,this._k=(1-n)/6}mm.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:bm(this,this._x1,this._y1)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2,this._x1=t,this._y1=n;break;case 2:this._point=3;default:bm(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var xm=function t(n){function e(t){return new mm(t,n)}return e.tension=function(n){return t(+n)},e}(0);function wm(t,n){this._context=t,this._k=(1-n)/6}wm.prototype={areaStart:lm,areaEnd:lm,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:bm(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var Mm=function t(n){function e(t){return new wm(t,n)}return e.tension=function(n){return t(+n)},e}(0);function Am(t,n){this._context=t,this._k=(1-n)/6}Am.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:bm(this,t,n)}this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var Tm=function t(n){function e(t){return new Am(t,n)}return e.tension=function(n){return t(+n)},e}(0);function Sm(t,n,e){var r=t._x1,i=t._y1,o=t._x2,a=t._y2;if(t._l01_a>lb){var u=2*t._l01_2a+3*t._l01_a*t._l12_a+t._l12_2a,c=3*t._l01_a*(t._l01_a+t._l12_a);r=(r*u-t._x0*t._l12_2a+t._x2*t._l01_2a)/c,i=(i*u-t._y0*t._l12_2a+t._y2*t._l01_2a)/c}if(t._l23_a>lb){var f=2*t._l23_2a+3*t._l23_a*t._l12_a+t._l12_2a,s=3*t._l23_a*(t._l23_a+t._l12_a);o=(o*f+t._x1*t._l23_2a-n*t._l12_2a)/s,a=(a*f+t._y1*t._l23_2a-e*t._l12_2a)/s}t._context.bezierCurveTo(r,i,o,a,t._x2,t._y2)}function Em(t,n){this._context=t,this._alpha=n}Em.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 2:this._context.lineTo(this._x2,this._y2);break;case 3:this.point(this._x2,this._y2)}(this._line||0!==this._line&&1===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;break;case 2:this._point=3;default:Sm(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var km=function t(n){function e(t){return n?new Em(t,n):new mm(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);function Nm(t,n){this._context=t,this._alpha=n}Nm.prototype={areaStart:lm,areaEnd:lm,lineStart:function(){this._x0=this._x1=this._x2=this._x3=this._x4=this._x5=this._y0=this._y1=this._y2=this._y3=this._y4=this._y5=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){switch(this._point){case 1:this._context.moveTo(this._x3,this._y3),this._context.closePath();break;case 2:this._context.lineTo(this._x3,this._y3),this._context.closePath();break;case 3:this.point(this._x3,this._y3),this.point(this._x4,this._y4),this.point(this._x5,this._y5)}},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1,this._x3=t,this._y3=n;break;case 1:this._point=2,this._context.moveTo(this._x4=t,this._y4=n);break;case 2:this._point=3,this._x5=t,this._y5=n;break;default:Sm(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var Cm=function t(n){function e(t){return n?new Nm(t,n):new wm(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);function Pm(t,n){this._context=t,this._alpha=n}Pm.prototype={areaStart:function(){this._line=0},areaEnd:function(){this._line=NaN},lineStart:function(){this._x0=this._x1=this._x2=this._y0=this._y1=this._y2=NaN,this._l01_a=this._l12_a=this._l23_a=this._l01_2a=this._l12_2a=this._l23_2a=this._point=0},lineEnd:function(){(this._line||0!==this._line&&3===this._point)&&this._context.closePath(),this._line=1-this._line},point:function(t,n){if(t=+t,n=+n,this._point){var e=this._x2-t,r=this._y2-n;this._l23_a=Math.sqrt(this._l23_2a=Math.pow(e*e+r*r,this._alpha))}switch(this._point){case 0:this._point=1;break;case 1:this._point=2;break;case 2:this._point=3,this._line?this._context.lineTo(this._x2,this._y2):this._context.moveTo(this._x2,this._y2);break;case 3:this._point=4;default:Sm(this,t,n)}this._l01_a=this._l12_a,this._l12_a=this._l23_a,this._l01_2a=this._l12_2a,this._l12_2a=this._l23_2a,this._x0=this._x1,this._x1=this._x2,this._x2=t,this._y0=this._y1,this._y1=this._y2,this._y2=n}};var zm=function t(n){function e(t){return n?new Pm(t,n):new Am(t,0)}return e.alpha=function(n){return t(+n)},e}(.5);function Dm(t){this._context=t}function qm(t){return t<0?-1:1}function Rm(t,n,e){var r=t._x1-t._x0,i=n-t._x1,o=(t._y1-t._y0)/(r||i<0&&-0),a=(e-t._y1)/(i||r<0&&-0),u=(o*i+a*r)/(r+i);return(qm(o)+qm(a))*Math.min(Math.abs(o),Math.abs(a),.5*Math.abs(u))||0}function Fm(t,n){var e=t._x1-t._x0;return e?(3*(t._y1-t._y0)/e-n)/2:n}function Om(t,n,e){var r=t._x0,i=t._y0,o=t._x1,a=t._y1,u=(o-r)/3;t._context.bezierCurveTo(r+u,i+u*n,o-u,a-u*e,o,a)}function Im(t){this._context=t}function Um(t){this._context=new Bm(t)}function Bm(t){this._context=t}function Ym(t){this._context=t}function Lm(t){var n,e,r=t.length-1,i=new Array(r),o=new Array(r),a=new Array(r);for(i[0]=0,o[0]=2,a[0]=t[0]+2*t[1],n=1;n=0;--n)i[n]=(a[n]-i[n+1])/o[n];for(o[r-1]=(t[r]+i[r-1])/2,n=0;n1)for(var e,r,i,o=1,a=t[n[0]],u=a.length;o=0;)e[n]=n;return e}function Gm(t,n){return t[n]}function Vm(t){const n=[];return n.key=t,n}function $m(t){var n=t.map(Wm);return Xm(t).sort((function(t,e){return n[t]-n[e]}))}function Wm(t){for(var n,e=-1,r=0,i=t.length,o=-1/0;++eo&&(o=n,r=e);return r}function Zm(t){var n=t.map(Km);return Xm(t).sort((function(t,e){return n[t]-n[e]}))}function Km(t){for(var n,e=0,r=-1,i=t.length;++r=0&&(this._t=1-this._t,this._line=1-this._line)},point:function(t,n){switch(t=+t,n=+n,this._point){case 0:this._point=1,this._line?this._context.lineTo(t,n):this._context.moveTo(t,n);break;case 1:this._point=2;default:if(this._t<=0)this._context.lineTo(this._x,n),this._context.lineTo(t,n);else{var e=this._x*(1-this._t)+t*this._t;this._context.lineTo(e,this._y),this._context.lineTo(e,n)}}this._x=t,this._y=n}};var Qm=t=>()=>t;function Jm(t,{sourceEvent:n,target:e,transform:r,dispatch:i}){Object.defineProperties(this,{type:{value:t,enumerable:!0,configurable:!0},sourceEvent:{value:n,enumerable:!0,configurable:!0},target:{value:e,enumerable:!0,configurable:!0},transform:{value:r,enumerable:!0,configurable:!0},_:{value:i}})}function tx(t,n,e){this.k=t,this.x=n,this.y=e}tx.prototype={constructor:tx,scale:function(t){return 1===t?this:new tx(this.k*t,this.x,this.y)},translate:function(t,n){return 0===t&0===n?this:new tx(this.k,this.x+this.k*t,this.y+this.k*n)},apply:function(t){return[t[0]*this.k+this.x,t[1]*this.k+this.y]},applyX:function(t){return t*this.k+this.x},applyY:function(t){return t*this.k+this.y},invert:function(t){return[(t[0]-this.x)/this.k,(t[1]-this.y)/this.k]},invertX:function(t){return(t-this.x)/this.k},invertY:function(t){return(t-this.y)/this.k},rescaleX:function(t){return t.copy().domain(t.range().map(this.invertX,this).map(t.invert,t))},rescaleY:function(t){return t.copy().domain(t.range().map(this.invertY,this).map(t.invert,t))},toString:function(){return"translate("+this.x+","+this.y+") scale("+this.k+")"}};var nx=new tx(1,0,0);function ex(t){for(;!t.__zoom;)if(!(t=t.parentNode))return nx;return t.__zoom}function rx(t){t.stopImmediatePropagation()}function ix(t){t.preventDefault(),t.stopImmediatePropagation()}function ox(t){return!(t.ctrlKey&&"wheel"!==t.type||t.button)}function ax(){var t=this;return t instanceof SVGElement?(t=t.ownerSVGElement||t).hasAttribute("viewBox")?[[(t=t.viewBox.baseVal).x,t.y],[t.x+t.width,t.y+t.height]]:[[0,0],[t.width.baseVal.value,t.height.baseVal.value]]:[[0,0],[t.clientWidth,t.clientHeight]]}function ux(){return this.__zoom||nx}function cx(t){return-t.deltaY*(1===t.deltaMode?.05:t.deltaMode?1:.002)*(t.ctrlKey?10:1)}function fx(){return navigator.maxTouchPoints||"ontouchstart"in this}function sx(t,n,e){var r=t.invertX(n[0][0])-e[0][0],i=t.invertX(n[1][0])-e[1][0],o=t.invertY(n[0][1])-e[0][1],a=t.invertY(n[1][1])-e[1][1];return t.translate(i>r?(r+i)/2:Math.min(0,r)||Math.max(0,i),a>o?(o+a)/2:Math.min(0,o)||Math.max(0,a))}ex.prototype=tx.prototype,t.Adder=g,t.Delaunay=nu,t.FormatSpecifier=uc,t.InternMap=y,t.InternSet=v,t.Voronoi=Wa,t.active=function(t,n){var e,r,i=t.__transition;if(i)for(r in n=null==n?null:n+"",i)if((e=i[r]).state>1&&e.name===n)return new ji([[t]],_o,n,+r);return null},t.arc=function(){var t=vb,n=_b,e=rb(0),r=null,i=bb,o=mb,a=xb,u=null;function c(){var c,f,s=+t.apply(this,arguments),l=+n.apply(this,arguments),h=i.apply(this,arguments)-db,d=o.apply(this,arguments)-db,p=ib(d-h),g=d>h;if(u||(u=c=fa()),llb)if(p>pb-lb)u.moveTo(l*ab(h),l*fb(h)),u.arc(0,0,l,h,d,!g),s>lb&&(u.moveTo(s*ab(d),s*fb(d)),u.arc(0,0,s,d,h,g));else{var y,v,_=h,b=d,m=h,x=d,w=p,M=p,A=a.apply(this,arguments)/2,T=A>lb&&(r?+r.apply(this,arguments):sb(s*s+l*l)),S=cb(ib(l-s)/2,+e.apply(this,arguments)),E=S,k=S;if(T>lb){var N=yb(T/s*fb(A)),C=yb(T/l*fb(A));(w-=2*N)>lb?(m+=N*=g?1:-1,x-=N):(w=0,m=x=(h+d)/2),(M-=2*C)>lb?(_+=C*=g?1:-1,b-=C):(M=0,_=b=(h+d)/2)}var P=l*ab(_),z=l*fb(_),D=s*ab(x),q=s*fb(x);if(S>lb){var R,F=l*ab(b),O=l*fb(b),I=s*ab(m),U=s*fb(m);if(plb?k>lb?(y=Mb(I,U,P,z,l,k,g),v=Mb(F,O,D,q,l,k,g),u.moveTo(y.cx+y.x01,y.cy+y.y01),klb&&w>lb?E>lb?(y=Mb(D,q,F,O,s,-E,g),v=Mb(P,z,I,U,s,-E,g),u.lineTo(y.cx+y.x01,y.cy+y.y01),E>a,f=i+2*u>>a,s=wa(20);function l(r){var i=new Float32Array(c*f),l=new Float32Array(c*f);r.forEach((function(r,o,s){var l=+t(r,o,s)+u>>a,h=+n(r,o,s)+u>>a,d=+e(r,o,s);l>=0&&l=0&&h>a),Ca({width:c,height:f,data:l},{width:c,height:f,data:i},o>>a),Na({width:c,height:f,data:i},{width:c,height:f,data:l},o>>a),Ca({width:c,height:f,data:l},{width:c,height:f,data:i},o>>a),Na({width:c,height:f,data:i},{width:c,height:f,data:l},o>>a),Ca({width:c,height:f,data:l},{width:c,height:f,data:i},o>>a);var d=s(i);if(!Array.isArray(d)){var p=B(i);d=F(0,p,d),(d=Z(0,Math.floor(p/d)*d,d)).shift()}return ka().thresholds(d).size([c,f])(i).map(h)}function h(t){return t.value*=Math.pow(2,-2*a),t.coordinates.forEach(d),t}function d(t){t.forEach(p)}function p(t){t.forEach(g)}function g(t){t[0]=t[0]*Math.pow(2,a)-u,t[1]=t[1]*Math.pow(2,a)-u}function y(){return c=r+2*(u=3*o)>>a,f=i+2*u>>a,l}return l.x=function(n){return arguments.length?(t="function"==typeof n?n:wa(+n),l):t},l.y=function(t){return arguments.length?(n="function"==typeof t?t:wa(+t),l):n},l.weight=function(t){return arguments.length?(e="function"==typeof t?t:wa(+t),l):e},l.size=function(t){if(!arguments.length)return[r,i];var n=+t[0],e=+t[1];if(!(n>=0&&e>=0))throw new Error("invalid size");return r=n,i=e,y()},l.cellSize=function(t){if(!arguments.length)return 1<=1))throw new Error("invalid cell size");return a=Math.floor(Math.log(t)/Math.LN2),y()},l.thresholds=function(t){return arguments.length?(s="function"==typeof t?t:Array.isArray(t)?wa(ma.call(t)):wa(t),l):s},l.bandwidth=function(t){if(!arguments.length)return Math.sqrt(o*(o+1));if(!((t=+t)>=0))throw new Error("invalid bandwidth");return o=Math.round((Math.sqrt(4*t*t+1)-1)/2),y()},l},t.contours=ka,t.count=c,t.create=function(t){return Dn(At(t).call(document.documentElement))},t.creator=At,t.cross=function(...t){const n="function"==typeof t[t.length-1]&&function(t){return n=>t(...n)}(t.pop()),e=(t=t.map(l)).map(f),r=t.length-1,i=new Array(r+1).fill(0),o=[];if(r<0||e.some(s))return o;for(;;){o.push(i.map(((n,e)=>t[e][n])));let a=r;for(;++i[a]===e[a];){if(0===a)return n?o.map(n):o;i[a--]=0}}},t.csv=Pu,t.csvFormat=hu,t.csvFormatBody=du,t.csvFormatRow=gu,t.csvFormatRows=pu,t.csvFormatValue=yu,t.csvParse=su,t.csvParseRows=lu,t.cubehelix=tr,t.cumsum=function(t,n){var e=0,r=0;return Float64Array.from(t,void 0===n?t=>e+=+t||0:i=>e+=+n(i,r++,t)||0)},t.curveBasis=function(t){return new dm(t)},t.curveBasisClosed=function(t){return new pm(t)},t.curveBasisOpen=function(t){return new gm(t)},t.curveBumpX=function(t){return new ym(t,!0)},t.curveBumpY=function(t){return new ym(t,!1)},t.curveBundle=_m,t.curveCardinal=xm,t.curveCardinalClosed=Mm,t.curveCardinalOpen=Tm,t.curveCatmullRom=km,t.curveCatmullRomClosed=Cm,t.curveCatmullRomOpen=zm,t.curveLinear=Eb,t.curveLinearClosed=function(t){return new Dm(t)},t.curveMonotoneX=function(t){return new Im(t)},t.curveMonotoneY=function(t){return new Um(t)},t.curveNatural=function(t){return new Ym(t)},t.curveStep=function(t){return new jm(t,.5)},t.curveStepAfter=function(t){return new jm(t,1)},t.curveStepBefore=function(t){return new jm(t,0)},t.descending=function(t,n){return nt?1:n>=t?0:NaN},t.deviation=d,t.difference=function(t,...n){t=new Set(t);for(const e of n)for(const n of e)t.delete(n);return t},t.disjoint=function(t,n){const e=n[Symbol.iterator](),r=new Set;for(const n of t){if(r.has(n))return!1;let t,i;for(;({value:t,done:i}=e.next())&&!i;){if(Object.is(n,t))return!1;r.add(t)}}return!0},t.dispatch=pt,t.drag=function(){var t,n,e,r,i=Xn,o=Gn,a=Vn,u=$n,c={},f=pt("start","drag","end"),s=0,l=0;function h(t){t.on("mousedown.drag",d).filter(u).on("touchstart.drag",y).on("touchmove.drag",v).on("touchend.drag touchcancel.drag",_).style("touch-action","none").style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function d(a,u){if(!r&&i.call(this,a,u)){var c=b(this,o.call(this,a,u),a,u,"mouse");c&&(Dn(a.view).on("mousemove.drag",p,!0).on("mouseup.drag",g,!0),Yn(a.view),Un(a),e=!1,t=a.clientX,n=a.clientY,c("start",a))}}function p(r){if(Bn(r),!e){var i=r.clientX-t,o=r.clientY-n;e=i*i+o*o>l}c.mouse("drag",r)}function g(t){Dn(t.view).on("mousemove.drag mouseup.drag",null),Ln(t.view,e),Bn(t),c.mouse("end",t)}function y(t,n){if(i.call(this,t,n)){var e,r,a=t.changedTouches,u=o.call(this,t,n),c=a.length;for(e=0;e+t,t.easePoly=Ki,t.easePolyIn=Wi,t.easePolyInOut=Ki,t.easePolyOut=Zi,t.easeQuad=Vi,t.easeQuadIn=function(t){return t*t},t.easeQuadInOut=Vi,t.easeQuadOut=function(t){return t*(2-t)},t.easeSin=to,t.easeSinIn=function(t){return 1==+t?1:1-Math.cos(t*Ji)},t.easeSinInOut=to,t.easeSinOut=function(t){return Math.sin(t*Ji)},t.every=function(t,n){if("function"!=typeof n)throw new TypeError("test is not a function");let e=-1;for(const r of t)if(!n(r,++e,t))return!1;return!0},t.extent=p,t.fcumsum=function(t,n){const e=new g;let r=-1;return Float64Array.from(t,void 0===n?t=>e.add(+t||0):i=>e.add(+n(i,++r,t)||0))},t.filter=function(t,n){if("function"!=typeof n)throw new TypeError("test is not a function");const e=[];let r=-1;for(const i of t)n(i,++r,t)&&e.push(i);return e},t.forceCenter=function(t,n){var e,r=1;function i(){var i,o,a=e.length,u=0,c=0;for(i=0;if+p||os+p||ac.index){var g=f-u.x-u.vx,y=s-u.y-u.vy,v=g*g+y*y;vt.r&&(t.r=t[n].r)}function c(){if(n){var r,i,o=n.length;for(e=new Array(o),r=0;r[u(t,n,r),t])));for(a=0,i=new Array(f);a=u)){(t.data!==n||t.next)&&(0===l&&(p+=(l=Vu(e))*l),0===h&&(p+=(h=Vu(e))*h),p(t=(1664525*t+1013904223)%Qu)/Qu}();function l(){h(),f.call("tick",n),e1?(null==e?u.delete(t):u.set(t,p(e)),n):u.get(t)},find:function(n,e,r){var i,o,a,u,c,f=0,s=t.length;for(null==r?r=1/0:r*=r,f=0;f1?(f.on(t,e),n):f.on(t)}}},t.forceX=function(t){var n,e,r,i=Gu(.1);function o(t){for(var i,o=0,a=n.length;o=.12&&i<.234&&r>=-.425&&r<-.214?u:i>=.166&&i<.234&&r>=-.214&&r<-.115?c:a).invert(t)},s.stream=function(e){return t&&n===e?t:(r=[a.stream(n=e),u.stream(e),c.stream(e)],i=r.length,t={point:function(t,n){for(var e=-1;++eKf(r[0],r[1])&&(r[1]=i[1]),Kf(i[0],r[1])>Kf(r[0],r[1])&&(r[0]=i[0])):o.push(r=i);for(a=-1/0,n=0,r=o[e=o.length-1];n<=e;r=i,++n)i=o[n],(u=Kf(r[1],i[0]))>a&&(a=u,nf=i[0],rf=r[1])}return lf=hf=null,nf===1/0||ef===1/0?[[NaN,NaN],[NaN,NaN]]:[[nf,ef],[rf,of]]},t.geoCentroid=function(t){Ef=kf=Nf=Cf=Pf=zf=Df=qf=0,Rf=new g,Ff=new g,Of=new g,Wc(t,ts);var n=+Rf,e=+Ff,r=+Of,i=Dc(n,e,r);return i2?t[2]+90:90]):[(t=e())[0],t[1],t[2]-90]},e([0,0,90]).scale(159.155)},t.geoTransverseMercatorRaw=Bh,t.gray=function(t,n){return new Fe(t,0,0,null==n?1:n)},t.greatest=function(t,e=n){let r,i=!1;if(1===e.length){let o;for(const a of t){const t=e(a);(i?n(t,o)>0:0===n(t,t))&&(r=a,o=t,i=!0)}}else for(const n of t)(i?e(n,r)>0:0===e(n,n))&&(r=n,i=!0);return r},t.greatestIndex=function(t,e=n){if(1===e.length)return G(t,e);let r,i=-1,o=-1;for(const n of t)++o,(i<0?0===e(n,n):e(n,r)>0)&&(r=n,i=o);return i},t.group=M,t.groupSort=function(t,e,r){return(1===e.length?k(A(t,e,r),(([t,e],[r,i])=>n(e,i)||n(t,r))):k(M(t,r),(([t,r],[i,o])=>e(r,o)||n(t,i)))).map((([t])=>t))},t.groups=function(t,...n){return S(t,Array.from,w,n)},t.hcl=Le,t.hierarchy=Xh,t.histogram=U,t.hsl=Ae,t.html=Fu,t.image=function(t,n){return new Promise((function(e,r){var i=new Image;for(var o in n)i[o]=n[o];i.onerror=r,i.onload=function(){e(i)},i.src=t}))},t.index=function(t,...n){return S(t,w,T,n)},t.indexes=function(t,...n){return S(t,Array.from,T,n)},t.interpolate=Mr,t.interpolateArray=function(t,n){return(gr(n)?pr:yr)(t,n)},t.interpolateBasis=rr,t.interpolateBasisClosed=ir,t.interpolateBlues=q_,t.interpolateBrBG=Gv,t.interpolateBuGn=s_,t.interpolateBuPu=h_,t.interpolateCividis=function(t){return t=Math.max(0,Math.min(1,t)),"rgb("+Math.max(0,Math.min(255,Math.round(-4.54-t*(35.34-t*(2381.73-t*(6402.7-t*(7024.72-2710.57*t)))))))+", "+Math.max(0,Math.min(255,Math.round(32.49+t*(170.73+t*(52.82-t*(131.46-t*(176.58-67.37*t)))))))+", "+Math.max(0,Math.min(255,Math.round(81.24+t*(442.36-t*(2482.43-t*(6167.24-t*(6614.94-2475.67*t)))))))+")"},t.interpolateCool=V_,t.interpolateCubehelix=Yr,t.interpolateCubehelixDefault=X_,t.interpolateCubehelixLong=Lr,t.interpolateDate=vr,t.interpolateDiscrete=function(t){var n=t.length;return function(e){return t[Math.max(0,Math.min(n-1,Math.floor(e*n)))]}},t.interpolateGnBu=p_,t.interpolateGreens=F_,t.interpolateGreys=I_,t.interpolateHcl=Ir,t.interpolateHclLong=Ur,t.interpolateHsl=Rr,t.interpolateHslLong=Fr,t.interpolateHue=function(t,n){var e=ur(+t,+n);return function(t){var n=e(t);return n-360*Math.floor(n/360)}},t.interpolateInferno=nb,t.interpolateLab=function(t,n){var e=fr((t=Re(t)).l,(n=Re(n)).l),r=fr(t.a,n.a),i=fr(t.b,n.b),o=fr(t.opacity,n.opacity);return function(n){return t.l=e(n),t.a=r(n),t.b=i(n),t.opacity=o(n),t+""}},t.interpolateMagma=tb,t.interpolateNumber=_r,t.interpolateNumberArray=pr,t.interpolateObject=br,t.interpolateOrRd=y_,t.interpolateOranges=H_,t.interpolatePRGn=$v,t.interpolatePiYG=Zv,t.interpolatePlasma=eb,t.interpolatePuBu=m_,t.interpolatePuBuGn=__,t.interpolatePuOr=Qv,t.interpolatePuRd=w_,t.interpolatePurples=B_,t.interpolateRainbow=function(t){(t<0||t>1)&&(t-=Math.floor(t));var n=Math.abs(t-.5);return $_.h=360*t-100,$_.s=1.5-1.5*n,$_.l=.8-.9*n,$_+""},t.interpolateRdBu=t_,t.interpolateRdGy=e_,t.interpolateRdPu=A_,t.interpolateRdYlBu=i_,t.interpolateRdYlGn=a_,t.interpolateReds=L_,t.interpolateRgb=sr,t.interpolateRgbBasis=hr,t.interpolateRgbBasisClosed=dr,t.interpolateRound=Ar,t.interpolateSinebow=function(t){var n;return t=(.5-t)*Math.PI,W_.r=255*(n=Math.sin(t))*n,W_.g=255*(n=Math.sin(t+Z_))*n,W_.b=255*(n=Math.sin(t+K_))*n,W_+""},t.interpolateSpectral=c_,t.interpolateString=wr,t.interpolateTransformCss=Cr,t.interpolateTransformSvg=Pr,t.interpolateTurbo=function(t){return t=Math.max(0,Math.min(1,t)),"rgb("+Math.max(0,Math.min(255,Math.round(34.61+t*(1172.33-t*(10793.56-t*(33300.12-t*(38394.49-14825.05*t)))))))+", "+Math.max(0,Math.min(255,Math.round(23.31+t*(557.33+t*(1225.33-t*(3574.96-t*(1073.77+707.56*t)))))))+", "+Math.max(0,Math.min(255,Math.round(27.2+t*(3211.1-t*(15327.97-t*(27814-t*(22569.18-6838.66*t)))))))+")"},t.interpolateViridis=J_,t.interpolateWarm=G_,t.interpolateYlGn=k_,t.interpolateYlGnBu=S_,t.interpolateYlOrBr=C_,t.interpolateYlOrRd=z_,t.interpolateZoom=Dr,t.interrupt=gi,t.intersection=function(t,...n){t=new Set(t),n=n.map(et);t:for(const e of t)for(const r of n)if(!r.has(e)){t.delete(e);continue t}return t},t.interval=function(t,n,e){var r=new ei,i=n;return null==n?(r.restart(t,n,e),r):(r._restart=r.restart,r.restart=function(t,n,e){n=+n,e=null==e?ti():+e,r._restart((function o(a){a+=i,r._restart(o,i+=n,e),t(a)}),n,e)},r.restart(t,n,e),r)},t.isoFormat=Mv,t.isoParse=Av,t.json=function(t,n){return fetch(t,n).then(Du)},t.lab=Re,t.lch=function(t,n,e,r){return 1===arguments.length?Ye(t):new je(e,n,t,null==r?1:r)},t.least=function(t,e=n){let r,i=!1;if(1===e.length){let o;for(const a of t){const t=e(a);(i?n(t,o)<0:0===n(t,t))&&(r=a,o=t,i=!0)}}else for(const n of t)(i?e(n,r)<0:0===e(n,n))&&(r=n,i=!0);return r},t.leastIndex=K,t.line=Cb,t.lineRadial=Ib,t.linkHorizontal=function(){return jb(Hb)},t.linkRadial=function(){var t=jb(Gb);return t.angle=t.x,delete t.x,t.radius=t.y,delete t.y,t},t.linkVertical=function(){return jb(Xb)},t.local=Rn,t.map=function(t,n){if("function"!=typeof t[Symbol.iterator])throw new TypeError("values is not iterable");if("function"!=typeof n)throw new TypeError("mapper is not a function");return Array.from(t,((e,r)=>n(e,r,t)))},t.matcher=Ct,t.max=B,t.maxIndex=G,t.mean=function(t,n){let e=0,r=0;if(void 0===n)for(let n of t)null!=n&&(n=+n)>=n&&(++e,r+=n);else{let i=-1;for(let o of t)null!=(o=n(o,++i,t))&&(o=+o)>=o&&(++e,r+=o)}if(e)return r/e},t.median=function(t,n){return H(t,.5,n)},t.merge=V,t.min=Y,t.minIndex=$,t.namespace=xt,t.namespaces=mt,t.nice=O,t.now=ti,t.pack=function(){var t=null,n=1,e=1,r=hd;function i(i){return i.x=n/2,i.y=e/2,t?i.eachBefore(gd(t)).eachAfter(yd(r,.5)).eachBefore(vd(1)):i.eachBefore(gd(pd)).eachAfter(yd(hd,1)).eachAfter(yd(r,i.r/Math.min(n,e))).eachBefore(vd(Math.min(n,e)/(2*i.r))),i}return i.radius=function(n){return arguments.length?(t=sd(n),i):t},i.size=function(t){return arguments.length?(n=+t[0],e=+t[1],i):[n,e]},i.padding=function(t){return arguments.length?(r="function"==typeof t?t:dd(+t),i):r},i},t.packEnclose=Kh,t.packSiblings=function(t){return fd(t),t},t.pairs=function(t,n=W){const e=[];let r,i=!1;for(const o of t)i&&e.push(n(r,o)),r=o,i=!0;return e},t.partition=function(){var t=1,n=1,e=0,r=!1;function i(i){var o=i.height+1;return i.x0=i.y0=e,i.x1=t,i.y1=n/o,i.eachBefore(function(t,n){return function(r){r.children&&bd(r,r.x0,t*(r.depth+1)/n,r.x1,t*(r.depth+2)/n);var i=r.x0,o=r.y0,a=r.x1-e,u=r.y1-e;a0&&(d+=l);for(null!=n?p.sort((function(t,e){return n(g[t],g[e])})):null!=e&&p.sort((function(t,n){return e(a[t],a[n])})),u=0,f=d?(v-h*b)/d:0;u0?l*f:0)+b,g[c]={data:a[c],index:u,value:l,startAngle:y,endAngle:s,padAngle:_};return g}return a.value=function(n){return arguments.length?(t="function"==typeof n?n:rb(+n),a):t},a.sortValues=function(t){return arguments.length?(n=t,e=null,a):n},a.sort=function(t){return arguments.length?(e=t,n=null,a):e},a.startAngle=function(t){return arguments.length?(r="function"==typeof t?t:rb(+t),a):r},a.endAngle=function(t){return arguments.length?(i="function"==typeof t?t:rb(+t),a):i},a.padAngle=function(t){return arguments.length?(o="function"==typeof t?t:rb(+t),a):o},a},t.piecewise=jr,t.pointRadial=Bb,t.pointer=In,t.pointers=function(t,n){return t.target&&(t=On(t),void 0===n&&(n=t.currentTarget),t=t.touches||[t]),Array.from(t,(t=>In(t,n)))},t.polygonArea=function(t){for(var n,e=-1,r=t.length,i=t[r-1],o=0;++eu!=f>u&&a<(c-e)*(u-r)/(f-r)+e&&(s=!s),c=e,f=r;return s},t.polygonHull=function(t){if((e=t.length)<3)return null;var n,e,r=new Array(e),i=new Array(e);for(n=0;n=0;--n)f.push(t[r[o[n]][2]]);for(n=+u;n(n=1664525*n+1013904223|0,ep*(n>>>0))},t.randomLogNormal=Ld,t.randomLogistic=tp,t.randomNormal=Yd,t.randomPareto=Gd,t.randomPoisson=np,t.randomUniform=Ud,t.randomWeibull=Qd,t.range=Z,t.reduce=function(t,n,e){if("function"!=typeof n)throw new TypeError("reducer is not a function");const r=t[Symbol.iterator]();let i,o,a=-1;if(arguments.length<3){if(({done:i,value:e}=r.next()),i)return;++a}for(;({done:i,value:o}=r.next()),!i;)e=n(e,o,++a,t);return e},t.reverse=function(t){if("function"!=typeof t[Symbol.iterator])throw new TypeError("values is not iterable");return Array.from(t).reverse()},t.rgb=ve,t.ribbon=function(){return ba()},t.ribbonArrow=function(){return ba(_a)},t.rollup=A,t.rollups=function(t,n,...e){return S(t,Array.from,n,e)},t.scaleBand=up,t.scaleDiverging=function t(){var n=bp(Pv()(lp));return n.copy=function(){return Nv(n,t())},ip.apply(n,arguments)},t.scaleDivergingLog=function t(){var n=Ep(Pv()).domain([.1,1,10]);return n.copy=function(){return Nv(n,t()).base(n.base())},ip.apply(n,arguments)},t.scaleDivergingPow=zv,t.scaleDivergingSqrt=function(){return zv.apply(null,arguments).exponent(.5)},t.scaleDivergingSymlog=function t(){var n=Cp(Pv());return n.copy=function(){return Nv(n,t()).constant(n.constant())},ip.apply(n,arguments)},t.scaleIdentity=function t(n){var e;function r(t){return null==t||isNaN(t=+t)?e:t}return r.invert=r,r.domain=r.range=function(t){return arguments.length?(n=Array.from(t,fp),r):n.slice()},r.unknown=function(t){return arguments.length?(e=t,r):e},r.copy=function(){return t(n).unknown(e)},n=arguments.length?Array.from(n,fp):[0,1],bp(r)},t.scaleImplicit=op,t.scaleLinear=function t(){var n=vp();return n.copy=function(){return gp(n,t())},rp.apply(n,arguments),bp(n)},t.scaleLog=function t(){var n=Ep(yp()).domain([1,10]);return n.copy=function(){return gp(n,t()).base(n.base())},rp.apply(n,arguments),n},t.scaleOrdinal=ap,t.scalePoint=function(){return cp(up.apply(null,arguments).paddingInner(1))},t.scalePow=Rp,t.scaleQuantile=function t(){var e,r=[],i=[],a=[];function u(){var t=0,n=Math.max(1,i.length);for(a=new Array(n-1);++t0?a[n-1]:r[0],n=i?[a[i-1],r]:[a[n-1],a[n]]},c.unknown=function(t){return arguments.length?(n=t,c):c},c.thresholds=function(){return a.slice()},c.copy=function(){return t().domain([e,r]).range(u).unknown(n)},rp.apply(bp(c),arguments)},t.scaleRadial=function t(){var n,e=vp(),r=[0,1],i=!1;function o(t){var r=Op(e(t));return isNaN(r)?n:i?Math.round(r):r}return o.invert=function(t){return e.invert(Fp(t))},o.domain=function(t){return arguments.length?(e.domain(t),o):e.domain()},o.range=function(t){return arguments.length?(e.range((r=Array.from(t,fp)).map(Fp)),o):r.slice()},o.rangeRound=function(t){return o.range(t).round(!0)},o.round=function(t){return arguments.length?(i=!!t,o):i},o.clamp=function(t){return arguments.length?(e.clamp(t),o):e.clamp()},o.unknown=function(t){return arguments.length?(n=t,o):n},o.copy=function(){return t(e.domain(),r).round(i).clamp(e.clamp()).unknown(n)},rp.apply(o,arguments),bp(o)},t.scaleSequential=function t(){var n=bp(kv()(lp));return n.copy=function(){return Nv(n,t())},ip.apply(n,arguments)},t.scaleSequentialLog=function t(){var n=Ep(kv()).domain([1,10]);return n.copy=function(){return Nv(n,t()).base(n.base())},ip.apply(n,arguments)},t.scaleSequentialPow=Cv,t.scaleSequentialQuantile=function t(){var e=[],r=lp;function i(t){if(null!=t&&!isNaN(t=+t))return r((o(e,t,1)-1)/(e.length-1))}return i.domain=function(t){if(!arguments.length)return e.slice();e=[];for(let n of t)null==n||isNaN(n=+n)||e.push(n);return e.sort(n),i},i.interpolator=function(t){return arguments.length?(r=t,i):r},i.range=function(){return e.map(((t,n)=>r(n/(e.length-1))))},i.quantiles=function(t){return Array.from({length:t+1},((n,r)=>H(e,r/t)))},i.copy=function(){return t(r).domain(e)},ip.apply(i,arguments)},t.scaleSequentialSqrt=function(){return Cv.apply(null,arguments).exponent(.5)},t.scaleSequentialSymlog=function t(){var n=Cp(kv());return n.copy=function(){return Nv(n,t()).constant(n.constant())},ip.apply(n,arguments)},t.scaleSqrt=function(){return Rp.apply(null,arguments).exponent(.5)},t.scaleSymlog=function t(){var n=Cp(yp());return n.copy=function(){return gp(n,t()).constant(n.constant())},rp.apply(n,arguments)},t.scaleThreshold=function t(){var n,e=[.5],r=[0,1],i=1;function a(t){return null!=t&&t<=t?r[o(e,t,0,i)]:n}return a.domain=function(t){return arguments.length?(e=Array.from(t),i=Math.min(e.length,r.length-1),a):e.slice()},a.range=function(t){return arguments.length?(r=Array.from(t),i=Math.min(e.length,r.length-1),a):r.slice()},a.invertExtent=function(t){var n=r.indexOf(t);return[e[n-1],e[n]]},a.unknown=function(t){return arguments.length?(n=t,a):n},a.copy=function(){return t().domain(e).range(r).unknown(n)},rp.apply(a,arguments)},t.scaleTime=function(){return rp.apply(Ev(Kg,Qg,xg,bg,og,eg,tg,Qp,Zp,t.timeFormat).domain([new Date(2e3,0,1),new Date(2e3,0,2)]),arguments)},t.scaleUtc=function(){return rp.apply(Ev(Wg,Zg,Gg,Hg,Cg,Eg,Tg,Mg,Zp,t.utcFormat).domain([Date.UTC(2e3,0,1),Date.UTC(2e3,0,2)]),arguments)},t.scan=function(t,n){const e=K(t,n);return e<0?void 0:e},t.schemeAccent=Rv,t.schemeBlues=D_,t.schemeBrBG=Xv,t.schemeBuGn=f_,t.schemeBuPu=l_,t.schemeCategory10=qv,t.schemeDark2=Fv,t.schemeGnBu=d_,t.schemeGreens=R_,t.schemeGreys=O_,t.schemeOrRd=g_,t.schemeOranges=j_,t.schemePRGn=Vv,t.schemePaired=Ov,t.schemePastel1=Iv,t.schemePastel2=Uv,t.schemePiYG=Wv,t.schemePuBu=b_,t.schemePuBuGn=v_,t.schemePuOr=Kv,t.schemePuRd=x_,t.schemePurples=U_,t.schemeRdBu=Jv,t.schemeRdGy=n_,t.schemeRdPu=M_,t.schemeRdYlBu=r_,t.schemeRdYlGn=o_,t.schemeReds=Y_,t.schemeSet1=Bv,t.schemeSet2=Yv,t.schemeSet3=Lv,t.schemeSpectral=u_,t.schemeTableau10=jv,t.schemeYlGn=E_,t.schemeYlGnBu=T_,t.schemeYlOrBr=N_,t.schemeYlOrRd=P_,t.select=Dn,t.selectAll=function(t){return"string"==typeof t?new Pn([document.querySelectorAll(t)],[document.documentElement]):new Pn([null==t?[]:Et(t)],Cn)},t.selection=zn,t.selector=St,t.selectorAll=Nt,t.shuffle=Q,t.shuffler=J,t.some=function(t,n){if("function"!=typeof n)throw new TypeError("test is not a function");let e=-1;for(const r of t)if(n(r,++e,t))return!0;return!1},t.sort=k,t.stack=function(){var t=rb([]),n=Xm,e=Hm,r=Gm;function i(i){var o,a,u=Array.from(t.apply(this,arguments),Vm),c=u.length,f=-1;for(const t of i)for(o=0,++f;o0)for(var e,r,i,o,a,u,c=0,f=t[n[0]].length;c0?(r[0]=o,r[1]=o+=i):i<0?(r[1]=a,r[0]=a+=i):(r[0]=0,r[1]=i)},t.stackOffsetExpand=function(t,n){if((r=t.length)>0){for(var e,r,i,o=0,a=t[0].length;o0){for(var e,r=0,i=t[n[0]],o=i.length;r0&&(r=(e=t[n[0]]).length)>0){for(var e,r,i,o=0,a=1;a0)throw new Error("cycle");return o}return e.id=function(n){return arguments.length?(t=ld(n),e):t},e.parentId=function(t){return arguments.length?(n=ld(t),e):n},e},t.style=Jt,t.subset=function(t,n){return rt(n,t)},t.sum=function(t,n){let e=0;if(void 0===n)for(let n of t)(n=+n)&&(e+=n);else{let r=-1;for(let i of t)(i=+n(i,++r,t))&&(e+=i)}return e},t.superset=rt,t.svg=Ou,t.symbol=function(t,n){var e=null;function r(){var r;if(e||(e=r=fa()),t.apply(this,arguments).draw(e,+n.apply(this,arguments)),r)return e=null,r+""||null}return t="function"==typeof t?t:rb(t||Vb),n="function"==typeof n?n:rb(void 0===n?64:+n),r.type=function(n){return arguments.length?(t="function"==typeof n?n:rb(n),r):t},r.size=function(t){return arguments.length?(n="function"==typeof t?t:rb(+t),r):n},r.context=function(t){return arguments.length?(e=null==t?null:t,r):e},r},t.symbolCircle=Vb,t.symbolCross=$b,t.symbolDiamond=Kb,t.symbolSquare=em,t.symbolStar=nm,t.symbolTriangle=im,t.symbolWye=fm,t.symbols=sm,t.text=Nu,t.thresholdFreedmanDiaconis=function(t,n,e){return Math.ceil((e-n)/(2*(H(t,.75)-H(t,.25))*Math.pow(c(t),-1/3)))},t.thresholdScott=function(t,n,e){return Math.ceil((e-n)/(3.5*d(t)*Math.pow(c(t),-1/3)))},t.thresholdSturges=I,t.tickFormat=_p,t.tickIncrement=R,t.tickStep=F,t.ticks=q,t.timeDay=eg,t.timeDays=rg,t.timeFormatDefaultLocale=xv,t.timeFormatLocale=ey,t.timeFriday=sg,t.timeFridays=vg,t.timeHour=tg,t.timeHours=ng,t.timeInterval=Bp,t.timeMillisecond=Yp,t.timeMilliseconds=Lp,t.timeMinute=Qp,t.timeMinutes=Jp,t.timeMonday=ag,t.timeMondays=dg,t.timeMonth=bg,t.timeMonths=mg,t.timeSaturday=lg,t.timeSaturdays=_g,t.timeSecond=Zp,t.timeSeconds=Kp,t.timeSunday=og,t.timeSundays=hg,t.timeThursday=fg,t.timeThursdays=yg,t.timeTickInterval=Qg,t.timeTicks=Kg,t.timeTuesday=ug,t.timeTuesdays=pg,t.timeWednesday=cg,t.timeWednesdays=gg,t.timeWeek=og,t.timeWeeks=hg,t.timeYear=xg,t.timeYears=wg,t.timeout=ci,t.timer=ri,t.timerFlush=ii,t.transition=Hi,t.transpose=tt,t.tree=function(){var t=Ad,n=1,e=1,r=null;function i(i){var c=function(t){for(var n,e,r,i,o,a=new Nd(t,0),u=[a];n=u.pop();)if(r=n._.children)for(n.children=new Array(o=r.length),i=o-1;i>=0;--i)u.push(e=n.children[i]=new Nd(r[i],i)),e.parent=n;return(a.parent=new Nd(null,0)).children=[a],a}(i);if(c.eachAfter(o),c.parent.m=-c.z,c.eachBefore(a),r)i.eachBefore(u);else{var f=i,s=i,l=i;i.eachBefore((function(t){t.xs.x&&(s=t),t.depth>l.depth&&(l=t)}));var h=f===s?1:t(f,s)/2,d=h-f.x,p=n/(s.x+h+d),g=e/(l.depth||1);i.eachBefore((function(t){t.x=(t.x+d)*p,t.y=t.depth*g}))}return i}function o(n){var e=n.children,r=n.parent.children,i=n.i?r[n.i-1]:null;if(e){!function(t){for(var n,e=0,r=0,i=t.children,o=i.length;--o>=0;)(n=i[o]).z+=e,n.m+=e,e+=n.s+(r+=n.c)}(n);var o=(e[0].z+e[e.length-1].z)/2;i?(n.z=i.z+t(n._,i._),n.m=n.z-o):n.z=o}else i&&(n.z=i.z+t(n._,i._));n.parent.A=function(n,e,r){if(e){for(var i,o=n,a=n,u=e,c=o.parent.children[0],f=o.m,s=a.m,l=u.m,h=c.m;u=Sd(u),o=Td(o),u&&o;)c=Td(c),(a=Sd(a)).a=n,(i=u.z+l-o.z-f+t(u._,o._))>0&&(Ed(kd(u,n,r),n,i),f+=i,s+=i),l+=u.m,f+=o.m,h+=c.m,s+=a.m;u&&!Sd(a)&&(a.t=u,a.m+=l-s),o&&!Td(c)&&(c.t=o,c.m+=f-h,r=n)}return r}(n,i,n.parent.A||r[0])}function a(t){t._.x=t.z+t.parent.m,t.m+=t.parent.m}function u(t){t.x*=n,t.y=t.depth*e}return i.separation=function(n){return arguments.length?(t=n,i):t},i.size=function(t){return arguments.length?(r=!1,n=+t[0],e=+t[1],i):r?null:[n,e]},i.nodeSize=function(t){return arguments.length?(r=!0,n=+t[0],e=+t[1],i):r?[n,e]:null},i},t.treemap=function(){var t=Dd,n=!1,e=1,r=1,i=[0],o=hd,a=hd,u=hd,c=hd,f=hd;function s(t){return t.x0=t.y0=0,t.x1=e,t.y1=r,t.eachBefore(l),i=[0],n&&t.eachBefore(_d),t}function l(n){var e=i[n.depth],r=n.x0+e,s=n.y0+e,l=n.x1-e,h=n.y1-e;l=e-1){var s=u[n];return s.x0=i,s.y0=o,s.x1=a,void(s.y1=c)}var l=f[n],h=r/2+l,d=n+1,p=e-1;for(;d>>1;f[g]c-o){var _=r?(i*v+a*y)/r:a;t(n,d,y,i,o,_,c),t(d,e,v,_,o,a,c)}else{var b=r?(o*v+c*y)/r:c;t(n,d,y,i,o,a,b),t(d,e,v,i,b,a,c)}}(0,c,t.value,n,e,r,i)},t.treemapDice=bd,t.treemapResquarify=qd,t.treemapSlice=Cd,t.treemapSliceDice=function(t,n,e,r,i){(1&t.depth?Cd:bd)(t,n,e,r,i)},t.treemapSquarify=Dd,t.tsv=zu,t.tsvFormat=mu,t.tsvFormatBody=xu,t.tsvFormatRow=Mu,t.tsvFormatRows=wu,t.tsvFormatValue=Au,t.tsvParse=_u,t.tsvParseRows=bu,t.union=function(...t){const n=new Set;for(const e of t)for(const t of e)n.add(t);return n},t.utcDay=Eg,t.utcDays=kg,t.utcFriday=Rg,t.utcFridays=Lg,t.utcHour=Tg,t.utcHours=Sg,t.utcMillisecond=Yp,t.utcMilliseconds=Lp,t.utcMinute=Mg,t.utcMinutes=Ag,t.utcMonday=Pg,t.utcMondays=Ig,t.utcMonth=Hg,t.utcMonths=Xg,t.utcSaturday=Fg,t.utcSaturdays=jg,t.utcSecond=Zp,t.utcSeconds=Kp,t.utcSunday=Cg,t.utcSundays=Og,t.utcThursday=qg,t.utcThursdays=Yg,t.utcTickInterval=Zg,t.utcTicks=Wg,t.utcTuesday=zg,t.utcTuesdays=Ug,t.utcWednesday=Dg,t.utcWednesdays=Bg,t.utcWeek=Cg,t.utcWeeks=Og,t.utcYear=Gg,t.utcYears=Vg,t.variance=h,t.version="6.7.0",t.window=Wt,t.xml=Ru,t.zip=function(){return tt(arguments)},t.zoom=function(){var t,n,e,r=ox,i=ax,o=sx,a=cx,u=fx,c=[0,1/0],f=[[-1/0,-1/0],[1/0,1/0]],s=250,l=Dr,h=pt("start","zoom","end"),d=500,p=0,g=10;function y(t){t.property("__zoom",ux).on("wheel.zoom",M).on("mousedown.zoom",A).on("dblclick.zoom",T).filter(u).on("touchstart.zoom",S).on("touchmove.zoom",E).on("touchend.zoom touchcancel.zoom",k).style("-webkit-tap-highlight-color","rgba(0,0,0,0)")}function v(t,n){return(n=Math.max(c[0],Math.min(c[1],n)))===t.k?t:new tx(n,t.x,t.y)}function _(t,n,e){var r=n[0]-e[0]*t.k,i=n[1]-e[1]*t.k;return r===t.x&&i===t.y?t:new tx(t.k,r,i)}function b(t){return[(+t[0][0]+ +t[1][0])/2,(+t[0][1]+ +t[1][1])/2]}function m(t,n,e,r){t.on("start.zoom",(function(){x(this,arguments).event(r).start()})).on("interrupt.zoom end.zoom",(function(){x(this,arguments).event(r).end()})).tween("zoom",(function(){var t=this,o=arguments,a=x(t,o).event(r),u=i.apply(t,o),c=null==e?b(u):"function"==typeof e?e.apply(t,o):e,f=Math.max(u[1][0]-u[0][0],u[1][1]-u[0][1]),s=t.__zoom,h="function"==typeof n?n.apply(t,o):n,d=l(s.invert(c).concat(f/s.k),h.invert(c).concat(f/h.k));return function(t){if(1===t)t=h;else{var n=d(t),e=f/n[2];t=new tx(e,c[0]-n[0]*e,c[1]-n[1]*e)}a.zoom(null,t)}}))}function x(t,n,e){return!e&&t.__zooming||new w(t,n)}function w(t,n){this.that=t,this.args=n,this.active=0,this.sourceEvent=null,this.extent=i.apply(t,n),this.taps=0}function M(t,...n){if(r.apply(this,arguments)){var e=x(this,n).event(t),i=this.__zoom,u=Math.max(c[0],Math.min(c[1],i.k*Math.pow(2,a.apply(this,arguments)))),s=In(t);if(e.wheel)e.mouse[0][0]===s[0]&&e.mouse[0][1]===s[1]||(e.mouse[1]=i.invert(e.mouse[0]=s)),clearTimeout(e.wheel);else{if(i.k===u)return;e.mouse=[s,i.invert(s)],gi(this),e.start()}ix(t),e.wheel=setTimeout(l,150),e.zoom("mouse",o(_(v(i,u),e.mouse[0],e.mouse[1]),e.extent,f))}function l(){e.wheel=null,e.end()}}function A(t,...n){if(!e&&r.apply(this,arguments)){var i=x(this,n,!0).event(t),a=Dn(t.view).on("mousemove.zoom",h,!0).on("mouseup.zoom",d,!0),u=In(t,c),c=t.currentTarget,s=t.clientX,l=t.clientY;Yn(t.view),rx(t),i.mouse=[u,this.__zoom.invert(u)],gi(this),i.start()}function h(t){if(ix(t),!i.moved){var n=t.clientX-s,e=t.clientY-l;i.moved=n*n+e*e>p}i.event(t).zoom("mouse",o(_(i.that.__zoom,i.mouse[0]=In(t,c),i.mouse[1]),i.extent,f))}function d(t){a.on("mousemove.zoom mouseup.zoom",null),Ln(t.view,i.moved),ix(t),i.event(t).end()}}function T(t,...n){if(r.apply(this,arguments)){var e=this.__zoom,a=In(t.changedTouches?t.changedTouches[0]:t,this),u=e.invert(a),c=e.k*(t.shiftKey?.5:2),l=o(_(v(e,c),a,u),i.apply(this,n),f);ix(t),s>0?Dn(this).transition().duration(s).call(m,l,a,t):Dn(this).call(y.transform,l,a,t)}}function S(e,...i){if(r.apply(this,arguments)){var o,a,u,c,f=e.touches,s=f.length,l=x(this,i,e.changedTouches.length===s).event(e);for(rx(e),a=0;a bool: - return False -any = AnyType("*") - -class INTConstant: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "value": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), - }, - } - RETURN_TYPES = ("INT",) - RETURN_NAMES = ("value",) - FUNCTION = "get_value" - CATEGORY = "KJNodes/constants" - - def get_value(self, value): - return (value,) - -class FloatConstant: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "value": ("FLOAT", {"default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.001}), - }, - } - - RETURN_TYPES = ("FLOAT",) - RETURN_NAMES = ("value",) - FUNCTION = "get_value" - CATEGORY = "KJNodes/constants" - - def get_value(self, value): - return (value,) - -class StringConstant: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "string": ("STRING", {"default": '', "multiline": False}), - } - } - RETURN_TYPES = ("STRING",) - FUNCTION = "passtring" - CATEGORY = "KJNodes/constants" - - def passtring(self, string): - return (string, ) - -class StringConstantMultiline: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "string": ("STRING", {"default": "", "multiline": True}), - "strip_newlines": ("BOOLEAN", {"default": True}), - } - } - RETURN_TYPES = ("STRING",) - FUNCTION = "stringify" - CATEGORY = "KJNodes/constants" - - def stringify(self, string, strip_newlines): - new_string = [] - for line in io.StringIO(string): - if not line.strip().startswith("\n") and strip_newlines: - line = line.replace("\n", '') - new_string.append(line) - new_string = "\n".join(new_string) - - return (new_string, ) - -class JoinStrings: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "string1": ("STRING", {"default": '', "forceInput": True}), - "string2": ("STRING", {"default": '', "forceInput": True}), - "delimiter": ("STRING", {"default": ' ', "multiline": False}), - } - } - RETURN_TYPES = ("STRING",) - FUNCTION = "joinstring" - CATEGORY = "KJNodes/constants" - - def joinstring(self, string1, string2, delimiter): - joined_string = string1 + delimiter + string2 - return (joined_string, ) - -class CreateFluidMask: - - RETURN_TYPES = ("IMAGE", "MASK") - FUNCTION = "createfluidmask" - CATEGORY = "KJNodes/masking/generate" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "invert": ("BOOLEAN", {"default": False}), - "frames": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), - "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), - "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), - "inflow_count": ("INT", {"default": 3,"min": 0, "max": 255, "step": 1}), - "inflow_velocity": ("INT", {"default": 1,"min": 0, "max": 255, "step": 1}), - "inflow_radius": ("INT", {"default": 8,"min": 0, "max": 255, "step": 1}), - "inflow_padding": ("INT", {"default": 50,"min": 0, "max": 255, "step": 1}), - "inflow_duration": ("INT", {"default": 60,"min": 0, "max": 255, "step": 1}), - }, - } - #using code from https://github.com/GregTJ/stable-fluids - def createfluidmask(self, frames, width, height, invert, inflow_count, inflow_velocity, inflow_radius, inflow_padding, inflow_duration): - from .fluid import Fluid - from scipy.spatial import erf - out = [] - masks = [] - RESOLUTION = width, height - DURATION = frames - - INFLOW_PADDING = inflow_padding - INFLOW_DURATION = inflow_duration - INFLOW_RADIUS = inflow_radius - INFLOW_VELOCITY = inflow_velocity - INFLOW_COUNT = inflow_count - - print('Generating fluid solver, this may take some time.') - fluid = Fluid(RESOLUTION, 'dye') - - center = np.floor_divide(RESOLUTION, 2) - r = np.min(center) - INFLOW_PADDING - - points = np.linspace(-np.pi, np.pi, INFLOW_COUNT, endpoint=False) - points = tuple(np.array((np.cos(p), np.sin(p))) for p in points) - normals = tuple(-p for p in points) - points = tuple(r * p + center for p in points) - - inflow_velocity = np.zeros_like(fluid.velocity) - inflow_dye = np.zeros(fluid.shape) - for p, n in zip(points, normals): - mask = np.linalg.norm(fluid.indices - p[:, None, None], axis=0) <= INFLOW_RADIUS - inflow_velocity[:, mask] += n[:, None] * INFLOW_VELOCITY - inflow_dye[mask] = 1 - - - for f in range(DURATION): - print(f'Computing frame {f + 1} of {DURATION}.') - if f <= INFLOW_DURATION: - fluid.velocity += inflow_velocity - fluid.dye += inflow_dye - - curl = fluid.step()[1] - # Using the error function to make the contrast a bit higher. - # Any other sigmoid function e.g. smoothstep would work. - curl = (erf(curl * 2) + 1) / 4 - - color = np.dstack((curl, np.ones(fluid.shape), fluid.dye)) - color = (np.clip(color, 0, 1) * 255).astype('uint8') - image = np.array(color).astype(np.float32) / 255.0 - image = torch.from_numpy(image)[None,] - mask = image[:, :, :, 0] - masks.append(mask) - out.append(image) - - if invert: - return (1.0 - torch.cat(out, dim=0),1.0 - torch.cat(masks, dim=0),) - return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) - -class CreateAudioMask: - def __init__(self): - try: - import librosa - self.librosa = librosa - except ImportError: - print("Can not import librosa. Install it with 'pip install librosa'") - RETURN_TYPES = ("IMAGE",) - FUNCTION = "createaudiomask" - CATEGORY = "KJNodes/deprecated" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "invert": ("BOOLEAN", {"default": False}), - "frames": ("INT", {"default": 16,"min": 1, "max": 255, "step": 1}), - "scale": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 2.0, "step": 0.01}), - "audio_path": ("STRING", {"default": "audio.wav"}), - "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), - "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), - }, - } - - def createaudiomask(self, frames, width, height, invert, audio_path, scale): - # Define the number of images in the batch - batch_size = frames - out = [] - masks = [] - if audio_path == "audio.wav": #I don't know why relative path won't work otherwise... - audio_path = os.path.join(script_directory, audio_path) - audio, sr = self.librosa.load(audio_path) - spectrogram = np.abs(self.librosa.stft(audio)) - - for i in range(batch_size): - image = Image.new("RGB", (width, height), "black") - draw = ImageDraw.Draw(image) - frame = spectrogram[:, i] - circle_radius = int(height * np.mean(frame)) - circle_radius *= scale - circle_center = (width // 2, height // 2) # Calculate the center of the image - - draw.ellipse([(circle_center[0] - circle_radius, circle_center[1] - circle_radius), - (circle_center[0] + circle_radius, circle_center[1] + circle_radius)], - fill='white') - - image = np.array(image).astype(np.float32) / 255.0 - image = torch.from_numpy(image)[None,] - mask = image[:, :, :, 0] - masks.append(mask) - out.append(image) - - if invert: - return (1.0 - torch.cat(out, dim=0),) - return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) - -class CreateGradientMask: - - RETURN_TYPES = ("MASK",) - FUNCTION = "createmask" - CATEGORY = "KJNodes/masking/generate" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "invert": ("BOOLEAN", {"default": False}), - "frames": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), - "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), - "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), - }, - } - def createmask(self, frames, width, height, invert): - # Define the number of images in the batch - batch_size = frames - out = [] - # Create an empty array to store the image batch - image_batch = np.zeros((batch_size, height, width), dtype=np.float32) - # Generate the black to white gradient for each image - for i in range(batch_size): - gradient = np.linspace(1.0, 0.0, width, dtype=np.float32) - time = i / frames # Calculate the time variable - offset_gradient = gradient - time # Offset the gradient values based on time - image_batch[i] = offset_gradient.reshape(1, -1) - output = torch.from_numpy(image_batch) - mask = output - out.append(mask) - if invert: - return (1.0 - torch.cat(out, dim=0),) - return (torch.cat(out, dim=0),) - -class CreateFadeMask: - - RETURN_TYPES = ("MASK",) - FUNCTION = "createfademask" - CATEGORY = "KJNodes/deprecated" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "invert": ("BOOLEAN", {"default": False}), - "frames": ("INT", {"default": 2,"min": 2, "max": 255, "step": 1}), - "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), - "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), - "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), - "start_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}), - "midpoint_level": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}), - "end_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}), - "midpoint_frame": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), - }, - } - - def createfademask(self, frames, width, height, invert, interpolation, start_level, midpoint_level, end_level, midpoint_frame): - def ease_in(t): - return t * t - - def ease_out(t): - return 1 - (1 - t) * (1 - t) - - def ease_in_out(t): - return 3 * t * t - 2 * t * t * t - - batch_size = frames - out = [] - image_batch = np.zeros((batch_size, height, width), dtype=np.float32) - - if midpoint_frame == 0: - midpoint_frame = batch_size // 2 - - for i in range(batch_size): - if i <= midpoint_frame: - t = i / midpoint_frame - if interpolation == "ease_in": - t = ease_in(t) - elif interpolation == "ease_out": - t = ease_out(t) - elif interpolation == "ease_in_out": - t = ease_in_out(t) - color = start_level - t * (start_level - midpoint_level) - else: - t = (i - midpoint_frame) / (batch_size - midpoint_frame) - if interpolation == "ease_in": - t = ease_in(t) - elif interpolation == "ease_out": - t = ease_out(t) - elif interpolation == "ease_in_out": - t = ease_in_out(t) - color = midpoint_level - t * (midpoint_level - end_level) - - color = np.clip(color, 0, 255) - image = np.full((height, width), color, dtype=np.float32) - image_batch[i] = image - - output = torch.from_numpy(image_batch) - mask = output - out.append(mask) - - if invert: - return (1.0 - torch.cat(out, dim=0),) - return (torch.cat(out, dim=0),) - -class CreateFadeMaskAdvanced: - - RETURN_TYPES = ("MASK",) - FUNCTION = "createfademask" - CATEGORY = "KJNodes/masking/generate" - DESCRIPTION = """ -Create a batch of masks interpolated between given frames and values. -Uses same syntax as Fizz' BatchValueSchedule. -First value is the frame index (not that this starts from 0, not 1) -and the second value inside the brackets is the float value of the mask in range 0.0 - 1.0 - -For example the default values: -0:(0.0) -7:(1.0) -15:(0.0) - -Would create a mask batch fo 16 frames, starting from black, -interpolating with the chosen curve to fully white at the 8th frame, -and interpolating from that to fully black at the 16th frame. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "points_string": ("STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": True}), - "invert": ("BOOLEAN", {"default": False}), - "frames": ("INT", {"default": 16,"min": 2, "max": 255, "step": 1}), - "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), - }, - } - - def createfademask(self, frames, width, height, invert, points_string, interpolation): - def ease_in(t): - return t * t - - def ease_out(t): - return 1 - (1 - t) * (1 - t) - - def ease_in_out(t): - return 3 * t * t - 2 * t * t * t - - # Parse the input string into a list of tuples - points = [] - points_string = points_string.rstrip(',\n') - for point_str in points_string.split(','): - frame_str, color_str = point_str.split(':') - frame = int(frame_str.strip()) - color = float(color_str.strip()[1:-1]) # Remove parentheses around color - points.append((frame, color)) - - # Check if the last frame is already in the points - if len(points) == 0 or points[-1][0] != frames - 1: - # If not, add it with the color of the last specified frame - points.append((frames - 1, points[-1][1] if points else 0)) - - # Sort the points by frame number - points.sort(key=lambda x: x[0]) - - batch_size = frames - out = [] - image_batch = np.zeros((batch_size, height, width), dtype=np.float32) - - # Index of the next point to interpolate towards - next_point = 1 - - for i in range(batch_size): - while next_point < len(points) and i > points[next_point][0]: - next_point += 1 - - # Interpolate between the previous point and the next point - prev_point = next_point - 1 - t = (i - points[prev_point][0]) / (points[next_point][0] - points[prev_point][0]) - if interpolation == "ease_in": - t = ease_in(t) - elif interpolation == "ease_out": - t = ease_out(t) - elif interpolation == "ease_in_out": - t = ease_in_out(t) - elif interpolation == "linear": - pass # No need to modify `t` for linear interpolation - - color = points[prev_point][1] - t * (points[prev_point][1] - points[next_point][1]) - color = np.clip(color, 0, 255) - image = np.full((height, width), color, dtype=np.float32) - image_batch[i] = image - - output = torch.from_numpy(image_batch) - mask = output - out.append(mask) - - if invert: - return (1.0 - torch.cat(out, dim=0),) - return (torch.cat(out, dim=0),) - -class ScaleBatchPromptSchedule: - - RETURN_TYPES = ("STRING",) - FUNCTION = "scaleschedule" - CATEGORY = "KJNodes" - DESCRIPTION = """ -Scales a batch schedule from Fizz' nodes BatchPromptSchedule -to a different frame count. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "input_str": ("STRING", {"forceInput": True,"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n"}), - "old_frame_count": ("INT", {"forceInput": True,"default": 1,"min": 1, "max": 4096, "step": 1}), - "new_frame_count": ("INT", {"forceInput": True,"default": 1,"min": 1, "max": 4096, "step": 1}), - - }, - } - - def scaleschedule(self, old_frame_count, input_str, new_frame_count): - print("input_str:", input_str) - pattern = r'"(\d+)"\s*:\s*"(.*?)"(?:,|\Z)' - frame_strings = dict(re.findall(pattern, input_str)) - - # Calculate the scaling factor - scaling_factor = (new_frame_count - 1) / (old_frame_count - 1) - - # Initialize a dictionary to store the new frame numbers and strings - new_frame_strings = {} - - # Iterate over the frame numbers and strings - for old_frame, string in frame_strings.items(): - # Calculate the new frame number - new_frame = int(round(int(old_frame) * scaling_factor)) - - # Store the new frame number and corresponding string - new_frame_strings[new_frame] = string - - # Format the output string - output_str = ', '.join([f'"{k}":"{v}"' for k, v in sorted(new_frame_strings.items())]) - print(output_str) - return (output_str,) - -class CrossFadeImages: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "crossfadeimages" - CATEGORY = "KJNodes/image" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images_1": ("IMAGE",), - "images_2": ("IMAGE",), - "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), - "transition_start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), - "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), - "start_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}), - "end_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}), - }, - } - - def crossfadeimages(self, images_1, images_2, transition_start_index, transitioning_frames, interpolation, start_level, end_level): - - def crossfade(images_1, images_2, alpha): - crossfade = (1 - alpha) * images_1 + alpha * images_2 - return crossfade - def ease_in(t): - return t * t - def ease_out(t): - return 1 - (1 - t) * (1 - t) - def ease_in_out(t): - return 3 * t * t - 2 * t * t * t - def bounce(t): - if t < 0.5: - return self.ease_out(t * 2) * 0.5 - else: - return self.ease_in((t - 0.5) * 2) * 0.5 + 0.5 - def elastic(t): - return math.sin(13 * math.pi / 2 * t) * math.pow(2, 10 * (t - 1)) - def glitchy(t): - return t + 0.1 * math.sin(40 * t) - def exponential_ease_out(t): - return 1 - (1 - t) ** 4 - - easing_functions = { - "linear": lambda t: t, - "ease_in": ease_in, - "ease_out": ease_out, - "ease_in_out": ease_in_out, - "bounce": bounce, - "elastic": elastic, - "glitchy": glitchy, - "exponential_ease_out": exponential_ease_out, - } - - crossfade_images = [] - - alphas = torch.linspace(start_level, end_level, transitioning_frames) - for i in range(transitioning_frames): - alpha = alphas[i] - image1 = images_1[i + transition_start_index] - image2 = images_2[i + transition_start_index] - easing_function = easing_functions.get(interpolation) - alpha = easing_function(alpha) # Apply the easing function to the alpha value - - crossfade_image = crossfade(image1, image2, alpha) - crossfade_images.append(crossfade_image) - - # Convert crossfade_images to tensor - crossfade_images = torch.stack(crossfade_images, dim=0) - # Get the last frame result of the interpolation - last_frame = crossfade_images[-1] - # Calculate the number of remaining frames from images_2 - remaining_frames = len(images_2) - (transition_start_index + transitioning_frames) - # Crossfade the remaining frames with the last used alpha value - for i in range(remaining_frames): - alpha = alphas[-1] - image1 = images_1[i + transition_start_index + transitioning_frames] - image2 = images_2[i + transition_start_index + transitioning_frames] - easing_function = easing_functions.get(interpolation) - alpha = easing_function(alpha) # Apply the easing function to the alpha value - - crossfade_image = crossfade(image1, image2, alpha) - crossfade_images = torch.cat([crossfade_images, crossfade_image.unsqueeze(0)], dim=0) - # Append the beginning of images_1 - beginning_images_1 = images_1[:transition_start_index] - crossfade_images = torch.cat([beginning_images_1, crossfade_images], dim=0) - return (crossfade_images, ) - -class GetImageRangeFromBatch: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "imagesfrombatch" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Creates a new batch using images from the input, -batch, starting from start_index. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images": ("IMAGE",), - "start_index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}), - "num_frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), - }, - } - - def imagesfrombatch(self, images, start_index, num_frames): - if start_index == -1: - start_index = len(images) - num_frames - if start_index < 0 or start_index >= len(images): - raise ValueError("GetImageRangeFromBatch: Start index is out of range") - end_index = start_index + num_frames - if end_index > len(images): - raise ValueError("GetImageRangeFromBatch: End index is out of range") - chosen_images = images[start_index:end_index] - return (chosen_images, ) - -class GetImagesFromBatchIndexed: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "indexedimagesfrombatch" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Selects and returns the images at the specified indices as an image batch. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images": ("IMAGE",), - "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), - }, - } - - def indexedimagesfrombatch(self, images, indexes): - - # Parse the indexes string into a list of integers - index_list = [int(index.strip()) for index in indexes.split(',')] - - # Convert list of indices to a PyTorch tensor - indices_tensor = torch.tensor(index_list, dtype=torch.long) - - # Select the images at the specified indices - chosen_images = images[indices_tensor] - - return (chosen_images,) - -class InsertImagesToBatchIndexed: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "insertimagesfrombatch" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Inserts images at the specified indices into the original image batch. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "original_images": ("IMAGE",), - "images_to_insert": ("IMAGE",), - "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), - }, - } - - def insertimagesfrombatch(self, original_images, images_to_insert, indexes): - - # Parse the indexes string into a list of integers - index_list = [int(index.strip()) for index in indexes.split(',')] - - # Convert list of indices to a PyTorch tensor - indices_tensor = torch.tensor(index_list, dtype=torch.long) - - # Ensure the images_to_insert is a tensor - if not isinstance(images_to_insert, torch.Tensor): - images_to_insert = torch.tensor(images_to_insert) - - # Insert the images at the specified indices - for index, image in zip(indices_tensor, images_to_insert): - original_images[index] = image - - return (original_images,) - -class GetLatentsFromBatchIndexed: - - RETURN_TYPES = ("LATENT",) - FUNCTION = "indexedlatentsfrombatch" - CATEGORY = "KJNodes" - DESCRIPTION = """ -Selects and returns the latents at the specified indices as an latent batch. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "latents": ("LATENT",), - "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), - }, - } - - def indexedlatentsfrombatch(self, latents, indexes): - - samples = latents.copy() - latent_samples = samples["samples"] - - # Parse the indexes string into a list of integers - index_list = [int(index.strip()) for index in indexes.split(',')] - - # Convert list of indices to a PyTorch tensor - indices_tensor = torch.tensor(index_list, dtype=torch.long) - - # Select the latents at the specified indices - chosen_latents = latent_samples[indices_tensor] - - samples["samples"] = chosen_latents - return (samples,) - -class ReplaceImagesInBatch: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "replace" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Replaces the images in a batch, starting from the specified start index, -with the replacement images. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "original_images": ("IMAGE",), - "replacement_images": ("IMAGE",), - "start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), - }, - } - - def replace(self, original_images, replacement_images, start_index): - images = None - if start_index >= len(original_images): - raise ValueError("GetImageRangeFromBatch: Start index is out of range") - end_index = start_index + len(replacement_images) - if end_index > len(original_images): - raise ValueError("GetImageRangeFromBatch: End index is out of range") - # Create a copy of the original_images tensor - original_images_copy = original_images.clone() - original_images_copy[start_index:end_index] = replacement_images - images = original_images_copy - return (images, ) - - -class ReverseImageBatch: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "reverseimagebatch" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Reverses the order of the images in a batch. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images": ("IMAGE",), - }, - } - - def reverseimagebatch(self, images): - reversed_images = torch.flip(images, [0]) - return (reversed_images, ) - - - -class CreateTextMask: - - RETURN_TYPES = ("IMAGE", "MASK",) - FUNCTION = "createtextmask" - CATEGORY = "KJNodes/text" - DESCRIPTION = """ -Creates a text image and mask. -Looks for fonts from this folder: -ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts - -If start_rotation and/or end_rotation are different values, -creates animation between them. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "invert": ("BOOLEAN", {"default": False}), - "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), - "text_x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), - "text_y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), - "font_size": ("INT", {"default": 32,"min": 8, "max": 4096, "step": 1}), - "font_color": ("STRING", {"default": "white"}), - "text": ("STRING", {"default": "HELLO!", "multiline": True}), - "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), - "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "start_rotation": ("INT", {"default": 0,"min": 0, "max": 359, "step": 1}), - "end_rotation": ("INT", {"default": 0,"min": -359, "max": 359, "step": 1}), - }, - } - - def createtextmask(self, frames, width, height, invert, text_x, text_y, text, font_size, font_color, font, start_rotation, end_rotation): - # Define the number of images in the batch - batch_size = frames - out = [] - masks = [] - rotation = start_rotation - if start_rotation != end_rotation: - rotation_increment = (end_rotation - start_rotation) / (batch_size - 1) - - font_path = folder_paths.get_full_path("kjnodes_fonts", font) - # Generate the text - for i in range(batch_size): - image = Image.new("RGB", (width, height), "black") - draw = ImageDraw.Draw(image) - font = ImageFont.truetype(font_path, font_size) - - # Split the text into words - words = text.split() - - # Initialize variables for line creation - lines = [] - current_line = [] - current_line_width = 0 - try: #new pillow - # Iterate through words to create lines - for word in words: - word_width = font.getbbox(word)[2] - if current_line_width + word_width <= width - 2 * text_x: - current_line.append(word) - current_line_width += word_width + font.getbbox(" ")[2] # Add space width - else: - lines.append(" ".join(current_line)) - current_line = [word] - current_line_width = word_width - except: #old pillow - for word in words: - word_width = font.getsize(word)[0] - if current_line_width + word_width <= width - 2 * text_x: - current_line.append(word) - current_line_width += word_width + font.getsize(" ")[0] # Add space width - else: - lines.append(" ".join(current_line)) - current_line = [word] - current_line_width = word_width - - # Add the last line if it's not empty - if current_line: - lines.append(" ".join(current_line)) - - # Draw each line of text separately - y_offset = text_y - for line in lines: - text_width = font.getlength(line) - text_height = font_size - text_center_x = text_x + text_width / 2 - text_center_y = y_offset + text_height / 2 - try: - draw.text((text_x, y_offset), line, font=font, fill=font_color, features=['-liga']) - except: - draw.text((text_x, y_offset), line, font=font, fill=font_color) - y_offset += text_height # Move to the next line - - if start_rotation != end_rotation: - image = image.rotate(rotation, center=(text_center_x, text_center_y)) - rotation += rotation_increment - - image = np.array(image).astype(np.float32) / 255.0 - image = torch.from_numpy(image)[None,] - mask = image[:, :, :, 0] - masks.append(mask) - out.append(image) - - if invert: - return (1.0 - torch.cat(out, dim=0), 1.0 - torch.cat(masks, dim=0),) - return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) - -class GrowMaskWithBlur: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "mask": ("MASK",), - "expand": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}), - "incremental_expandrate": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}), - "tapered_corners": ("BOOLEAN", {"default": True}), - "flip_input": ("BOOLEAN", {"default": False}), - "blur_radius": ("FLOAT", { - "default": 0.0, - "min": 0.0, - "max": 100, - "step": 0.1 - }), - "lerp_alpha": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - "decay_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - }, - "optional": { - "fill_holes": ("BOOLEAN", {"default": False}), - }, - } - - CATEGORY = "KJNodes/masking" - RETURN_TYPES = ("MASK", "MASK",) - RETURN_NAMES = ("mask", "mask_inverted",) - FUNCTION = "expand_mask" - DESCRIPTION = """ -# GrowMaskWithBlur -- mask: Input mask or mask batch -- expand: Expand or contract mask or mask batch by a given amount -- incremental_expandrate: increase expand rate by a given amount per frame -- tapered_corners: use tapered corners -- flip_input: flip input mask -- blur_radius: value higher than 0 will blur the mask -- lerp_alpha: alpha value for interpolation between frames -- decay_factor: decay value for interpolation between frames -- fill_holes: fill holes in the mask (slow)""" - - def expand_mask(self, mask, expand, tapered_corners, flip_input, blur_radius, incremental_expandrate, lerp_alpha, decay_factor, fill_holes=False): - alpha = lerp_alpha - decay = decay_factor - if flip_input: - mask = 1.0 - mask - c = 0 if tapered_corners else 1 - kernel = np.array([[c, 1, c], - [1, 1, 1], - [c, 1, c]]) - growmask = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).cpu() - out = [] - previous_output = None - current_expand = expand - for m in growmask: - output = m.numpy() - for _ in range(abs(round(current_expand))): - if current_expand < 0: - output = scipy.ndimage.grey_erosion(output, footprint=kernel) - else: - output = scipy.ndimage.grey_dilation(output, footprint=kernel) - if current_expand < 0: - current_expand -= abs(incremental_expandrate) - else: - current_expand += abs(incremental_expandrate) - if fill_holes: - binary_mask = output > 0 - output = scipy.ndimage.binary_fill_holes(binary_mask) - output = output.astype(np.float32) * 255 - output = torch.from_numpy(output) - if alpha < 1.0 and previous_output is not None: - # Interpolate between the previous and current frame - output = alpha * output + (1 - alpha) * previous_output - if decay < 1.0 and previous_output is not None: - # Add the decayed previous output to the current frame - output += decay * previous_output - output = output / output.max() - previous_output = output - out.append(output) - - if blur_radius != 0: - # Convert the tensor list to PIL images, apply blur, and convert back - for idx, tensor in enumerate(out): - # Convert tensor to PIL image - pil_image = tensor2pil(tensor.cpu().detach())[0] - # Apply Gaussian blur - pil_image = pil_image.filter(ImageFilter.GaussianBlur(blur_radius)) - # Convert back to tensor - out[idx] = pil2tensor(pil_image) - blurred = torch.cat(out, dim=0) - return (blurred, 1.0 - blurred) - else: - return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) - -class ColorToMask: - - RETURN_TYPES = ("MASK",) - FUNCTION = "clip" - CATEGORY = "KJNodes/masking" - DESCRIPTION = """ -Converts chosen RGB value to a mask -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images": ("IMAGE",), - "invert": ("BOOLEAN", {"default": False}), - "red": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), - "green": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), - "blue": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), - "threshold": ("INT", {"default": 10,"min": 0, "max": 255, "step": 1}), - }, - } - - def clip(self, images, red, green, blue, threshold, invert): - color = np.array([red, green, blue]) - images = 255. * images.cpu().numpy() - images = np.clip(images, 0, 255).astype(np.uint8) - images = [Image.fromarray(image) for image in images] - images = [np.array(image) for image in images] - - black = [0, 0, 0] - white = [255, 255, 255] - if invert: - black, white = white, black - - new_images = [] - for image in images: - new_image = np.full_like(image, black) - - color_distances = np.linalg.norm(image - color, axis=-1) - complement_indexes = color_distances <= threshold - - new_image[complement_indexes] = white - - new_images.append(new_image) - - new_images = np.array(new_images).astype(np.float32) / 255.0 - new_images = torch.from_numpy(new_images).permute(3, 0, 1, 2) - return new_images - -class ConditioningMultiCombine: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "inputcount": ("INT", {"default": 2, "min": 2, "max": 20, "step": 1}), - "conditioning_1": ("CONDITIONING", ), - "conditioning_2": ("CONDITIONING", ), - }, - } - - RETURN_TYPES = ("CONDITIONING", "INT") - RETURN_NAMES = ("combined", "inputcount") - FUNCTION = "combine" - CATEGORY = "KJNodes/masking/conditioning" - DESCRIPTION = """ -Combines multiple conditioning nodes into one -""" - - def combine(self, inputcount, **kwargs): - from nodes import ConditioningCombine - cond_combine_node = ConditioningCombine() - cond = kwargs["conditioning_1"] - for c in range(1, inputcount): - new_cond = kwargs[f"conditioning_{c + 1}"] - cond = cond_combine_node.combine(new_cond, cond)[0] - return (cond, inputcount,) - -class CondPassThrough: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "positive": ("CONDITIONING", ), - "negative": ("CONDITIONING", ), - }, - } - - RETURN_TYPES = ("CONDITIONING", "CONDITIONING",) - RETURN_NAMES = ("positive", "negative") - FUNCTION = "passthrough" - CATEGORY = "KJNodes/misc" - DESCRIPTION = """ - Simply passes through the positive and negative conditioning, - workaround for Set node not allowing bypassed inputs. -""" - - def passthrough(self, positive, negative): - return (positive, negative,) - -def append_helper(t, mask, c, set_area_to_bounds, strength): - n = [t[0], t[1].copy()] - _, h, w = mask.shape - n[1]['mask'] = mask - n[1]['set_area_to_bounds'] = set_area_to_bounds - n[1]['mask_strength'] = strength - c.append(n) - -class ConditioningSetMaskAndCombine: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "positive_1": ("CONDITIONING", ), - "negative_1": ("CONDITIONING", ), - "positive_2": ("CONDITIONING", ), - "negative_2": ("CONDITIONING", ), - "mask_1": ("MASK", ), - "mask_2": ("MASK", ), - "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "set_cond_area": (["default", "mask bounds"],), - } - } - - RETURN_TYPES = ("CONDITIONING","CONDITIONING",) - RETURN_NAMES = ("combined_positive", "combined_negative",) - FUNCTION = "append" - CATEGORY = "KJNodes/masking/conditioning" - DESCRIPTION = """ -Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes -""" - - def append(self, positive_1, negative_1, positive_2, negative_2, mask_1, mask_2, set_cond_area, mask_1_strength, mask_2_strength): - c = [] - c2 = [] - set_area_to_bounds = False - if set_cond_area != "default": - set_area_to_bounds = True - if len(mask_1.shape) < 3: - mask_1 = mask_1.unsqueeze(0) - if len(mask_2.shape) < 3: - mask_2 = mask_2.unsqueeze(0) - for t in positive_1: - append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) - for t in positive_2: - append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) - for t in negative_1: - append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) - for t in negative_2: - append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) - return (c, c2) - -class ConditioningSetMaskAndCombine3: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "positive_1": ("CONDITIONING", ), - "negative_1": ("CONDITIONING", ), - "positive_2": ("CONDITIONING", ), - "negative_2": ("CONDITIONING", ), - "positive_3": ("CONDITIONING", ), - "negative_3": ("CONDITIONING", ), - "mask_1": ("MASK", ), - "mask_2": ("MASK", ), - "mask_3": ("MASK", ), - "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "set_cond_area": (["default", "mask bounds"],), - } - } - - RETURN_TYPES = ("CONDITIONING","CONDITIONING",) - RETURN_NAMES = ("combined_positive", "combined_negative",) - FUNCTION = "append" - CATEGORY = "KJNodes/masking/conditioning" - DESCRIPTION = """ -Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes -""" - - def append(self, positive_1, negative_1, positive_2, positive_3, negative_2, negative_3, mask_1, mask_2, mask_3, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength): - c = [] - c2 = [] - set_area_to_bounds = False - if set_cond_area != "default": - set_area_to_bounds = True - if len(mask_1.shape) < 3: - mask_1 = mask_1.unsqueeze(0) - if len(mask_2.shape) < 3: - mask_2 = mask_2.unsqueeze(0) - if len(mask_3.shape) < 3: - mask_3 = mask_3.unsqueeze(0) - for t in positive_1: - append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) - for t in positive_2: - append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) - for t in positive_3: - append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) - for t in negative_1: - append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) - for t in negative_2: - append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) - for t in negative_3: - append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) - return (c, c2) - -class ConditioningSetMaskAndCombine4: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "positive_1": ("CONDITIONING", ), - "negative_1": ("CONDITIONING", ), - "positive_2": ("CONDITIONING", ), - "negative_2": ("CONDITIONING", ), - "positive_3": ("CONDITIONING", ), - "negative_3": ("CONDITIONING", ), - "positive_4": ("CONDITIONING", ), - "negative_4": ("CONDITIONING", ), - "mask_1": ("MASK", ), - "mask_2": ("MASK", ), - "mask_3": ("MASK", ), - "mask_4": ("MASK", ), - "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "mask_4_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "set_cond_area": (["default", "mask bounds"],), - } - } - - RETURN_TYPES = ("CONDITIONING","CONDITIONING",) - RETURN_NAMES = ("combined_positive", "combined_negative",) - FUNCTION = "append" - CATEGORY = "KJNodes/masking/conditioning" - DESCRIPTION = """ -Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes -""" - - def append(self, positive_1, negative_1, positive_2, positive_3, positive_4, negative_2, negative_3, negative_4, mask_1, mask_2, mask_3, mask_4, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength, mask_4_strength): - c = [] - c2 = [] - set_area_to_bounds = False - if set_cond_area != "default": - set_area_to_bounds = True - if len(mask_1.shape) < 3: - mask_1 = mask_1.unsqueeze(0) - if len(mask_2.shape) < 3: - mask_2 = mask_2.unsqueeze(0) - if len(mask_3.shape) < 3: - mask_3 = mask_3.unsqueeze(0) - if len(mask_4.shape) < 3: - mask_4 = mask_4.unsqueeze(0) - for t in positive_1: - append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) - for t in positive_2: - append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) - for t in positive_3: - append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) - for t in positive_4: - append_helper(t, mask_4, c, set_area_to_bounds, mask_4_strength) - for t in negative_1: - append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) - for t in negative_2: - append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) - for t in negative_3: - append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) - for t in negative_4: - append_helper(t, mask_4, c2, set_area_to_bounds, mask_4_strength) - return (c, c2) - -class ConditioningSetMaskAndCombine5: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "positive_1": ("CONDITIONING", ), - "negative_1": ("CONDITIONING", ), - "positive_2": ("CONDITIONING", ), - "negative_2": ("CONDITIONING", ), - "positive_3": ("CONDITIONING", ), - "negative_3": ("CONDITIONING", ), - "positive_4": ("CONDITIONING", ), - "negative_4": ("CONDITIONING", ), - "positive_5": ("CONDITIONING", ), - "negative_5": ("CONDITIONING", ), - "mask_1": ("MASK", ), - "mask_2": ("MASK", ), - "mask_3": ("MASK", ), - "mask_4": ("MASK", ), - "mask_5": ("MASK", ), - "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "mask_4_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "mask_5_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "set_cond_area": (["default", "mask bounds"],), - } - } - - RETURN_TYPES = ("CONDITIONING","CONDITIONING",) - RETURN_NAMES = ("combined_positive", "combined_negative",) - FUNCTION = "append" - CATEGORY = "KJNodes/masking/conditioning" - DESCRIPTION = """ -Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes -""" - - def append(self, positive_1, negative_1, positive_2, positive_3, positive_4, positive_5, negative_2, negative_3, negative_4, negative_5, mask_1, mask_2, mask_3, mask_4, mask_5, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength, mask_4_strength, mask_5_strength): - c = [] - c2 = [] - set_area_to_bounds = False - if set_cond_area != "default": - set_area_to_bounds = True - if len(mask_1.shape) < 3: - mask_1 = mask_1.unsqueeze(0) - if len(mask_2.shape) < 3: - mask_2 = mask_2.unsqueeze(0) - if len(mask_3.shape) < 3: - mask_3 = mask_3.unsqueeze(0) - if len(mask_4.shape) < 3: - mask_4 = mask_4.unsqueeze(0) - if len(mask_5.shape) < 3: - mask_5 = mask_5.unsqueeze(0) - for t in positive_1: - append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) - for t in positive_2: - append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) - for t in positive_3: - append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) - for t in positive_4: - append_helper(t, mask_4, c, set_area_to_bounds, mask_4_strength) - for t in positive_5: - append_helper(t, mask_5, c, set_area_to_bounds, mask_5_strength) - for t in negative_1: - append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) - for t in negative_2: - append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) - for t in negative_3: - append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) - for t in negative_4: - append_helper(t, mask_4, c2, set_area_to_bounds, mask_4_strength) - for t in negative_5: - append_helper(t, mask_5, c2, set_area_to_bounds, mask_5_strength) - return (c, c2) - -class VRAM_Debug: - - @classmethod - - def INPUT_TYPES(s): - return { - "required": { - - "empty_cache": ("BOOLEAN", {"default": True}), - "gc_collect": ("BOOLEAN", {"default": True}), - "unload_all_models": ("BOOLEAN", {"default": False}), - }, - "optional": { - "any_input": (any, {}), - "image_pass": ("IMAGE",), - "model_pass": ("MODEL",), - } - } - - RETURN_TYPES = (any, "IMAGE","MODEL","INT", "INT",) - RETURN_NAMES = ("any_output", "image_pass", "model_pass", "freemem_before", "freemem_after") - FUNCTION = "VRAMdebug" - CATEGORY = "KJNodes/misc" - DESCRIPTION = """ -Returns the inputs unchanged, they are only used as triggers, -and performs comfy model management functions and garbage collection, -reports free VRAM before and after the operations. -""" - - def VRAMdebug(self, gc_collect,empty_cache, unload_all_models, image_pass=None, model_pass=None, any_input=None): - freemem_before = model_management.get_free_memory() - print("VRAMdebug: free memory before: ", freemem_before) - if empty_cache: - model_management.soft_empty_cache() - if unload_all_models: - model_management.unload_all_models() - if gc_collect: - import gc - gc.collect() - freemem_after = model_management.get_free_memory() - print("VRAMdebug: free memory after: ", freemem_after) - print("VRAMdebug: freed memory: ", freemem_after - freemem_before) - return (any_input, image_pass, model_pass, freemem_before, freemem_after) - -class SomethingToString: - @classmethod - - def INPUT_TYPES(s): - return { - "required": { - "input": (any, {}), - }, - "optional": { - "prefix": ("STRING", {"default": ""}), - "suffix": ("STRING", {"default": ""}), - } - } - RETURN_TYPES = ("STRING",) - FUNCTION = "stringify" - CATEGORY = "KJNodes/text" - DESCRIPTION = """ -Converts any type to a string. -""" - - def stringify(self, input, prefix="", suffix=""): - if isinstance(input, (int, float, bool)): - stringified = str(input) - if prefix: # Check if prefix is not empty - stringified = prefix + stringified # Add the prefix - if suffix: # Check if suffix is not empty - stringified = stringified + suffix # Add the suffix - else: - return - return (stringified,) - -class Sleep: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "input": (any, {}), - "minutes": ("INT", {"default": 0, "min": 0, "max": 1439}), - "seconds": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 59.99, "step": 0.01}), - }, - } - RETURN_TYPES = (any,) - FUNCTION = "sleepdelay" - CATEGORY = "KJNodes/misc" - DESCRIPTION = """ -Delays the execution for the input amount of time. -""" - - def sleepdelay(self, input, minutes, seconds): - total_seconds = minutes * 60 + seconds - time.sleep(total_seconds) - return input, - -class EmptyLatentImagePresets: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "dimensions": ( - [ '512 x 512', - '768 x 512', - '960 x 512', - '1024 x 512', - '1536 x 640', - '1344 x 768', - '1216 x 832', - '1152 x 896', - '1024 x 1024', - ], - { - "default": '512 x 512' - }), - - "invert": ("BOOLEAN", {"default": False}), - "batch_size": ("INT", { - "default": 1, - "min": 1, - "max": 4096 - }), - }, - } - - RETURN_TYPES = ("LATENT", "INT", "INT") - RETURN_NAMES = ("Latent", "Width", "Height") - FUNCTION = "generate" - CATEGORY = "KJNodes" - - def generate(self, dimensions, invert, batch_size): - from nodes import EmptyLatentImage - result = [x.strip() for x in dimensions.split('x')] - - if invert: - width = int(result[1].split(' ')[0]) - height = int(result[0]) - else: - width = int(result[0]) - height = int(result[1].split(' ')[0]) - latent = EmptyLatentImage().generate(width, height, batch_size)[0] - - return (latent, int(width), int(height),) - -class ColorMatch: - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "image_ref": ("IMAGE",), - "image_target": ("IMAGE",), - "method": ( - [ - 'mkl', - 'hm', - 'reinhard', - 'mvgd', - 'hm-mvgd-hm', - 'hm-mkl-hm', - ], { - "default": 'mkl' - }), - - }, - } - - CATEGORY = "KJNodes/image" - - RETURN_TYPES = ("IMAGE",) - RETURN_NAMES = ("image",) - FUNCTION = "colormatch" - DESCRIPTION = """ -color-matcher enables color transfer across images which comes in handy for automatic -color-grading of photographs, paintings and film sequences as well as light-field -and stopmotion corrections. - -The methods behind the mappings are based on the approach from Reinhard et al., -the Monge-Kantorovich Linearization (MKL) as proposed by Pitie et al. and our analytical solution -to a Multi-Variate Gaussian Distribution (MVGD) transfer in conjunction with classical histogram -matching. As shown below our HM-MVGD-HM compound outperforms existing methods. -https://github.com/hahnec/color-matcher/ - -""" - - def colormatch(self, image_ref, image_target, method): - try: - from color_matcher import ColorMatcher - except: - raise Exception("Can't import color-matcher, did you install requirements.txt? Manual install: pip install color-matcher") - cm = ColorMatcher() - image_ref = image_ref.cpu() - image_target = image_target.cpu() - batch_size = image_target.size(0) - out = [] - images_target = image_target.squeeze() - images_ref = image_ref.squeeze() - - image_ref_np = images_ref.numpy() - images_target_np = images_target.numpy() - - if image_ref.size(0) > 1 and image_ref.size(0) != batch_size: - raise ValueError("ColorMatch: Use either single reference image or a matching batch of reference images.") - - for i in range(batch_size): - image_target_np = images_target_np if batch_size == 1 else images_target[i].numpy() - image_ref_np_i = image_ref_np if image_ref.size(0) == 1 else images_ref[i].numpy() - try: - image_result = cm.transfer(src=image_target_np, ref=image_ref_np_i, method=method) - except BaseException as e: - print(f"Error occurred during transfer: {e}") - break - out.append(torch.from_numpy(image_result)) - return (torch.stack(out, dim=0).to(torch.float32), ) - -class SaveImageWithAlpha: - def __init__(self): - self.output_dir = folder_paths.get_output_directory() - self.type = "output" - self.prefix_append = "" - - @classmethod - def INPUT_TYPES(s): - return {"required": - {"images": ("IMAGE", ), - "mask": ("MASK", ), - "filename_prefix": ("STRING", {"default": "ComfyUI"})}, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } - - RETURN_TYPES = () - FUNCTION = "save_images_alpha" - OUTPUT_NODE = True - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Saves an image and mask as .PNG with the mask as the alpha channel. -""" - - def save_images_alpha(self, images, mask, filename_prefix="ComfyUI_image_with_alpha", prompt=None, extra_pnginfo=None): - from comfy.cli_args import args - from PIL.PngImagePlugin import PngInfo - filename_prefix += self.prefix_append - full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) - results = list() - if mask.dtype == torch.float16: - mask = mask.to(torch.float32) - def file_counter(): - max_counter = 0 - # Loop through the existing files - for existing_file in os.listdir(full_output_folder): - # Check if the file matches the expected format - match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file) - if match: - # Extract the numeric portion of the filename - file_counter = int(match.group(1)) - # Update the maximum counter value if necessary - if file_counter > max_counter: - max_counter = file_counter - return max_counter - - for image, alpha in zip(images, mask): - i = 255. * image.cpu().numpy() - a = 255. * alpha.cpu().numpy() - img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) - - # Resize the mask to match the image size - a_resized = Image.fromarray(a).resize(img.size, Image.LANCZOS) - a_resized = np.clip(a_resized, 0, 255).astype(np.uint8) - img.putalpha(Image.fromarray(a_resized, mode='L')) - metadata = None - if not args.disable_metadata: - metadata = PngInfo() - if prompt is not None: - metadata.add_text("prompt", json.dumps(prompt)) - if extra_pnginfo is not None: - for x in extra_pnginfo: - metadata.add_text(x, json.dumps(extra_pnginfo[x])) - - # Increment the counter by 1 to get the next available value - counter = file_counter() + 1 - file = f"{filename}_{counter:05}.png" - img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4) - results.append({ - "filename": file, - "subfolder": subfolder, - "type": self.type - }) - - return { "ui": { "images": results } } - -class ImageConcanate: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "image1": ("IMAGE",), - "image2": ("IMAGE",), - "direction": ( - [ 'right', - 'down', - 'left', - 'up', - ], - { - "default": 'right' - }), - "match_image_size": ("BOOLEAN", {"default": False}), - }} - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "concanate" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Concatenates the image2 to image1 in the specified direction. -""" - - def concanate(self, image1, image2, direction, match_image_size): - if match_image_size: - image2 = torch.nn.functional.interpolate(image2, size=(image1.shape[2], image1.shape[3]), mode="bilinear") - if direction == 'right': - row = torch.cat((image1, image2), dim=2) - elif direction == 'down': - row = torch.cat((image1, image2), dim=1) - elif direction == 'left': - row = torch.cat((image2, image1), dim=2) - elif direction == 'up': - row = torch.cat((image2, image1), dim=1) - return (row,) - -class ImageGridComposite2x2: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "image1": ("IMAGE",), - "image2": ("IMAGE",), - "image3": ("IMAGE",), - "image4": ("IMAGE",), - }} - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "compositegrid" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Concatenates the 4 input images into a 2x2 grid. -""" - - def compositegrid(self, image1, image2, image3, image4): - top_row = torch.cat((image1, image2), dim=2) - bottom_row = torch.cat((image3, image4), dim=2) - grid = torch.cat((top_row, bottom_row), dim=1) - return (grid,) - -class ImageGridComposite3x3: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "image1": ("IMAGE",), - "image2": ("IMAGE",), - "image3": ("IMAGE",), - "image4": ("IMAGE",), - "image5": ("IMAGE",), - "image6": ("IMAGE",), - "image7": ("IMAGE",), - "image8": ("IMAGE",), - "image9": ("IMAGE",), - }} - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "compositegrid" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Concatenates the 9 input images into a 3x3 grid. -""" - - def compositegrid(self, image1, image2, image3, image4, image5, image6, image7, image8, image9): - top_row = torch.cat((image1, image2, image3), dim=2) - mid_row = torch.cat((image4, image5, image6), dim=2) - bottom_row = torch.cat((image7, image8, image9), dim=2) - grid = torch.cat((top_row, mid_row, bottom_row), dim=1) - return (grid,) - -class ImageBatchTestPattern: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "batch_size": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), - "start_from": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), - "text_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), - "text_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), - "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), - "font_size": ("INT", {"default": 255,"min": 8, "max": 4096, "step": 1}), - }} - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "generatetestpattern" - CATEGORY = "KJNodes/text" - - def generatetestpattern(self, batch_size, font, font_size, start_from, width, height, text_x, text_y): - out = [] - # Generate the sequential numbers for each image - numbers = np.arange(start_from, start_from + batch_size) - font_path = folder_paths.get_full_path("kjnodes_fonts", font) - - for number in numbers: - # Create a black image with the number as a random color text - image = Image.new("RGB", (width, height), color='black') - draw = ImageDraw.Draw(image) - - # Generate a random color for the text - font_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) - - font = ImageFont.truetype(font_path, font_size) - - # Get the size of the text and position it in the center - text = str(number) - - try: - draw.text((text_x, text_y), text, font=font, fill=font_color, features=['-liga']) - except: - draw.text((text_x, text_y), text, font=font, fill=font_color,) - - # Convert the image to a numpy array and normalize the pixel values - image_np = np.array(image).astype(np.float32) / 255.0 - image_tensor = torch.from_numpy(image_np).unsqueeze(0) - out.append(image_tensor) - out_tensor = torch.cat(out, dim=0) - - return (out_tensor,) - -#based on nodes from mtb https://github.com/melMass/comfy_mtb - -from .utility import tensor2pil, pil2tensor - -class BatchCropFromMask: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "original_images": ("IMAGE",), - "masks": ("MASK",), - "crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}), - "bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), - }, - } - - RETURN_TYPES = ( - "IMAGE", - "IMAGE", - "BBOX", - "INT", - "INT", - ) - RETURN_NAMES = ( - "original_images", - "cropped_images", - "bboxes", - "width", - "height", - ) - FUNCTION = "crop" - CATEGORY = "KJNodes/masking" - - def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha): - if alpha == 0: - return prev_bbox_size - return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size) - - def smooth_center(self, prev_center, curr_center, alpha=0.5): - if alpha == 0: - return prev_center - return ( - round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]), - round(alpha * curr_center[1] + (1 - alpha) * prev_center[1]) - ) - - def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha): - - bounding_boxes = [] - cropped_images = [] - - self.max_bbox_width = 0 - self.max_bbox_height = 0 - - # First, calculate the maximum bounding box size across all masks - curr_max_bbox_width = 0 - curr_max_bbox_height = 0 - for mask in masks: - _mask = tensor2pil(mask)[0] - non_zero_indices = np.nonzero(np.array(_mask)) - min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) - min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) - width = max_x - min_x - height = max_y - min_y - curr_max_bbox_width = max(curr_max_bbox_width, width) - curr_max_bbox_height = max(curr_max_bbox_height, height) - - # Smooth the changes in the bounding box size - self.max_bbox_width = self.smooth_bbox_size(self.max_bbox_width, curr_max_bbox_width, bbox_smooth_alpha) - self.max_bbox_height = self.smooth_bbox_size(self.max_bbox_height, curr_max_bbox_height, bbox_smooth_alpha) - - # Apply the crop size multiplier - self.max_bbox_width = round(self.max_bbox_width * crop_size_mult) - self.max_bbox_height = round(self.max_bbox_height * crop_size_mult) - bbox_aspect_ratio = self.max_bbox_width / self.max_bbox_height - - # Then, for each mask and corresponding image... - for i, (mask, img) in enumerate(zip(masks, original_images)): - _mask = tensor2pil(mask)[0] - non_zero_indices = np.nonzero(np.array(_mask)) - min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) - min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) - - # Calculate center of bounding box - center_x = np.mean(non_zero_indices[1]) - center_y = np.mean(non_zero_indices[0]) - curr_center = (round(center_x), round(center_y)) - - # If this is the first frame, initialize prev_center with curr_center - if not hasattr(self, 'prev_center'): - self.prev_center = curr_center - - # Smooth the changes in the center coordinates from the second frame onwards - if i > 0: - center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha) - else: - center = curr_center - - # Update prev_center for the next frame - self.prev_center = center - - # Create bounding box using max_bbox_width and max_bbox_height - half_box_width = round(self.max_bbox_width / 2) - half_box_height = round(self.max_bbox_height / 2) - min_x = max(0, center[0] - half_box_width) - max_x = min(img.shape[1], center[0] + half_box_width) - min_y = max(0, center[1] - half_box_height) - max_y = min(img.shape[0], center[1] + half_box_height) - - # Append bounding box coordinates - bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y)) - - # Crop the image from the bounding box - cropped_img = img[min_y:max_y, min_x:max_x, :] - - # Calculate the new dimensions while maintaining the aspect ratio - new_height = min(cropped_img.shape[0], self.max_bbox_height) - new_width = round(new_height * bbox_aspect_ratio) - - # Resize the image - resize_transform = Resize((new_height, new_width)) - resized_img = resize_transform(cropped_img.permute(2, 0, 1)) - - # Perform the center crop to the desired size - crop_transform = CenterCrop((self.max_bbox_height, self.max_bbox_width)) # swap the order here if necessary - cropped_resized_img = crop_transform(resized_img) - - cropped_images.append(cropped_resized_img.permute(1, 2, 0)) - - cropped_out = torch.stack(cropped_images, dim=0) - - return (original_images, cropped_out, bounding_boxes, self.max_bbox_width, self.max_bbox_height, ) - - -def bbox_to_region(bbox, target_size=None): - bbox = bbox_check(bbox, target_size) - return (bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]) - -def bbox_check(bbox, target_size=None): - if not target_size: - return bbox - - new_bbox = ( - bbox[0], - bbox[1], - min(target_size[0] - bbox[0], bbox[2]), - min(target_size[1] - bbox[1], bbox[3]), - ) - return new_bbox - -class BatchUncrop: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "original_images": ("IMAGE",), - "cropped_images": ("IMAGE",), - "bboxes": ("BBOX",), - "border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ), - "crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "border_top": ("BOOLEAN", {"default": True}), - "border_bottom": ("BOOLEAN", {"default": True}), - "border_left": ("BOOLEAN", {"default": True}), - "border_right": ("BOOLEAN", {"default": True}), - } - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "uncrop" - - CATEGORY = "KJNodes/masking" - - def uncrop(self, original_images, cropped_images, bboxes, border_blending, crop_rescale, border_top, border_bottom, border_left, border_right): - def inset_border(image, border_width, border_color, border_top, border_bottom, border_left, border_right): - draw = ImageDraw.Draw(image) - width, height = image.size - if border_top: - draw.rectangle((0, 0, width, border_width), fill=border_color) - if border_bottom: - draw.rectangle((0, height - border_width, width, height), fill=border_color) - if border_left: - draw.rectangle((0, 0, border_width, height), fill=border_color) - if border_right: - draw.rectangle((width - border_width, 0, width, height), fill=border_color) - return image - - if len(original_images) != len(cropped_images): - raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same") - - # Ensure there are enough bboxes, but drop the excess if there are more bboxes than images - if len(bboxes) > len(original_images): - print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}") - bboxes = bboxes[:len(original_images)] - elif len(bboxes) < len(original_images): - raise ValueError("There should be at least as many bboxes as there are original and cropped images") - - input_images = tensor2pil(original_images) - crop_imgs = tensor2pil(cropped_images) - - out_images = [] - for i in range(len(input_images)): - img = input_images[i] - crop = crop_imgs[i] - bbox = bboxes[i] - - # uncrop the image based on the bounding box - bb_x, bb_y, bb_width, bb_height = bbox - - paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) - - # scale factors - scale_x = crop_rescale - scale_y = crop_rescale - - # scaled paste_region - paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y)) - - # rescale the crop image to fit the paste_region - crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1]))) - crop_img = crop.convert("RGB") - - if border_blending > 1.0: - border_blending = 1.0 - elif border_blending < 0.0: - border_blending = 0.0 - - blend_ratio = (max(crop_img.size) / 2) * float(border_blending) - - blend = img.convert("RGBA") - mask = Image.new("L", img.size, 0) - - mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255) - mask_block = inset_border(mask_block, round(blend_ratio / 2), (0), border_top, border_bottom, border_left, border_right) - - mask.paste(mask_block, paste_region) - blend.paste(crop_img, paste_region) - - mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4)) - mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4)) - - blend.putalpha(mask) - img = Image.alpha_composite(img.convert("RGBA"), blend) - out_images.append(img.convert("RGB")) - - return (pil2tensor(out_images),) - -class BatchCropFromMaskAdvanced: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "original_images": ("IMAGE",), - "masks": ("MASK",), - "crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), - }, - } - - RETURN_TYPES = ( - "IMAGE", - "IMAGE", - "MASK", - "IMAGE", - "MASK", - "BBOX", - "BBOX", - "INT", - "INT", - ) - RETURN_NAMES = ( - "original_images", - "cropped_images", - "cropped_masks", - "combined_crop_image", - "combined_crop_masks", - "bboxes", - "combined_bounding_box", - "bbox_width", - "bbox_height", - ) - FUNCTION = "crop" - CATEGORY = "KJNodes/masking" - - def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha): - return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size) - - def smooth_center(self, prev_center, curr_center, alpha=0.5): - return (round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]), - round(alpha * curr_center[1] + (1 - alpha) * prev_center[1])) - - def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha): - bounding_boxes = [] - combined_bounding_box = [] - cropped_images = [] - cropped_masks = [] - cropped_masks_out = [] - combined_crop_out = [] - combined_cropped_images = [] - combined_cropped_masks = [] - - def calculate_bbox(mask): - non_zero_indices = np.nonzero(np.array(mask)) - - # handle empty masks - min_x, max_x, min_y, max_y = 0, 0, 0, 0 - if len(non_zero_indices[1]) > 0 and len(non_zero_indices[0]) > 0: - min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) - min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) - - width = max_x - min_x - height = max_y - min_y - bbox_size = max(width, height) - return min_x, max_x, min_y, max_y, bbox_size - - combined_mask = torch.max(masks, dim=0)[0] - _mask = tensor2pil(combined_mask)[0] - new_min_x, new_max_x, new_min_y, new_max_y, combined_bbox_size = calculate_bbox(_mask) - center_x = (new_min_x + new_max_x) / 2 - center_y = (new_min_y + new_max_y) / 2 - half_box_size = round(combined_bbox_size // 2) - new_min_x = max(0, round(center_x - half_box_size)) - new_max_x = min(original_images[0].shape[1], round(center_x + half_box_size)) - new_min_y = max(0, round(center_y - half_box_size)) - new_max_y = min(original_images[0].shape[0], round(center_y + half_box_size)) - - combined_bounding_box.append((new_min_x, new_min_y, new_max_x - new_min_x, new_max_y - new_min_y)) - - self.max_bbox_size = 0 - - # First, calculate the maximum bounding box size across all masks - curr_max_bbox_size = max(calculate_bbox(tensor2pil(mask)[0])[-1] for mask in masks) - # Smooth the changes in the bounding box size - self.max_bbox_size = self.smooth_bbox_size(self.max_bbox_size, curr_max_bbox_size, bbox_smooth_alpha) - # Apply the crop size multiplier - self.max_bbox_size = round(self.max_bbox_size * crop_size_mult) - # Make sure max_bbox_size is divisible by 16, if not, round it upwards so it is - self.max_bbox_size = math.ceil(self.max_bbox_size / 16) * 16 - - if self.max_bbox_size > original_images[0].shape[0] or self.max_bbox_size > original_images[0].shape[1]: - # max_bbox_size can only be as big as our input's width or height, and it has to be even - self.max_bbox_size = math.floor(min(original_images[0].shape[0], original_images[0].shape[1]) / 2) * 2 - - # Then, for each mask and corresponding image... - for i, (mask, img) in enumerate(zip(masks, original_images)): - _mask = tensor2pil(mask)[0] - non_zero_indices = np.nonzero(np.array(_mask)) - - # check for empty masks - if len(non_zero_indices[0]) > 0 and len(non_zero_indices[1]) > 0: - min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) - min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) - - # Calculate center of bounding box - center_x = np.mean(non_zero_indices[1]) - center_y = np.mean(non_zero_indices[0]) - curr_center = (round(center_x), round(center_y)) - - # If this is the first frame, initialize prev_center with curr_center - if not hasattr(self, 'prev_center'): - self.prev_center = curr_center - - # Smooth the changes in the center coordinates from the second frame onwards - if i > 0: - center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha) - else: - center = curr_center - - # Update prev_center for the next frame - self.prev_center = center - - # Create bounding box using max_bbox_size - half_box_size = self.max_bbox_size // 2 - min_x = max(0, center[0] - half_box_size) - max_x = min(img.shape[1], center[0] + half_box_size) - min_y = max(0, center[1] - half_box_size) - max_y = min(img.shape[0], center[1] + half_box_size) - - # Append bounding box coordinates - bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y)) - - # Crop the image from the bounding box - cropped_img = img[min_y:max_y, min_x:max_x, :] - cropped_mask = mask[min_y:max_y, min_x:max_x] - - # Resize the cropped image to a fixed size - new_size = max(cropped_img.shape[0], cropped_img.shape[1]) - resize_transform = Resize(new_size, interpolation=InterpolationMode.NEAREST, max_size=max(img.shape[0], img.shape[1])) - resized_mask = resize_transform(cropped_mask.unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0) - resized_img = resize_transform(cropped_img.permute(2, 0, 1)) - # Perform the center crop to the desired size - # Constrain the crop to the smaller of our bbox or our image so we don't expand past the image dimensions. - crop_transform = CenterCrop((min(self.max_bbox_size, resized_img.shape[1]), min(self.max_bbox_size, resized_img.shape[2]))) - - cropped_resized_img = crop_transform(resized_img) - cropped_images.append(cropped_resized_img.permute(1, 2, 0)) - - cropped_resized_mask = crop_transform(resized_mask) - cropped_masks.append(cropped_resized_mask) - - combined_cropped_img = original_images[i][new_min_y:new_max_y, new_min_x:new_max_x, :] - combined_cropped_images.append(combined_cropped_img) - - combined_cropped_mask = masks[i][new_min_y:new_max_y, new_min_x:new_max_x] - combined_cropped_masks.append(combined_cropped_mask) - else: - bounding_boxes.append((0, 0, img.shape[1], img.shape[0])) - cropped_images.append(img) - cropped_masks.append(mask) - combined_cropped_images.append(img) - combined_cropped_masks.append(mask) - - cropped_out = torch.stack(cropped_images, dim=0) - combined_crop_out = torch.stack(combined_cropped_images, dim=0) - cropped_masks_out = torch.stack(cropped_masks, dim=0) - combined_crop_mask_out = torch.stack(combined_cropped_masks, dim=0) - - return (original_images, cropped_out, cropped_masks_out, combined_crop_out, combined_crop_mask_out, bounding_boxes, combined_bounding_box, self.max_bbox_size, self.max_bbox_size) - -class FilterZeroMasksAndCorrespondingImages: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "masks": ("MASK",), - }, - "optional": { - "original_images": ("IMAGE",), - }, - } - - RETURN_TYPES = ("MASK", "IMAGE", "IMAGE", "INDEXES",) - RETURN_NAMES = ("non_zero_masks_out", "non_zero_mask_images_out", "zero_mask_images_out", "zero_mask_images_out_indexes",) - FUNCTION = "filter" - CATEGORY = "KJNodes/masking" - DESCRIPTION = """ -Filter out all the empty (i.e. all zero) mask in masks -Also filter out all the corresponding images in original_images by indexes if provide - -original_images (optional): If provided, need have same length as masks. -""" - - def filter(self, masks, original_images=None): - non_zero_masks = [] - non_zero_mask_images = [] - zero_mask_images = [] - zero_mask_images_indexes = [] - - masks_num = len(masks) - also_process_images = False - if original_images is not None: - imgs_num = len(original_images) - if len(original_images) == masks_num: - also_process_images = True - else: - print(f"[WARNING] ignore input: original_images, due to number of original_images ({imgs_num}) is not equal to number of masks ({masks_num})") - - for i in range(masks_num): - non_zero_num = np.count_nonzero(np.array(masks[i])) - if non_zero_num > 0: - non_zero_masks.append(masks[i]) - if also_process_images: - non_zero_mask_images.append(original_images[i]) - else: - zero_mask_images.append(original_images[i]) - zero_mask_images_indexes.append(i) - - non_zero_masks_out = torch.stack(non_zero_masks, dim=0) - non_zero_mask_images_out = zero_mask_images_out = zero_mask_images_out_indexes = None - - if also_process_images: - non_zero_mask_images_out = torch.stack(non_zero_mask_images, dim=0) - if len(zero_mask_images) > 0: - zero_mask_images_out = torch.stack(zero_mask_images, dim=0) - zero_mask_images_out_indexes = zero_mask_images_indexes - - return (non_zero_masks_out, non_zero_mask_images_out, zero_mask_images_out, zero_mask_images_out_indexes) - -class InsertImageBatchByIndexes: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "images": ("IMAGE",), - "images_to_insert": ("IMAGE",), - "insert_indexes": ("INDEXES",), - }, - } - - RETURN_TYPES = ("IMAGE", ) - RETURN_NAMES = ("images_after_insert", ) - FUNCTION = "insert" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -This node is designed to be use with node FilterZeroMasksAndCorrespondingImages -It inserts the images_to_insert into images according to insert_indexes - -Returns: - images_after_insert: updated original images with origonal sequence order -""" - - def insert(self, images, images_to_insert, insert_indexes): - images_after_insert = images - - if images_to_insert is not None and insert_indexes is not None: - images_to_insert_num = len(images_to_insert) - insert_indexes_num = len(insert_indexes) - if images_to_insert_num == insert_indexes_num: - images_after_insert = [] - - i_images = 0 - for i in range(len(images) + images_to_insert_num): - if i in insert_indexes: - images_after_insert.append(images_to_insert[insert_indexes.index(i)]) - else: - images_after_insert.append(images[i_images]) - i_images += 1 - - images_after_insert = torch.stack(images_after_insert, dim=0) - - else: - print(f"[WARNING] skip this node, due to number of images_to_insert ({images_to_insert_num}) is not equal to number of insert_indexes ({insert_indexes_num})") - - - return (images_after_insert, ) - -def bbox_to_region(bbox, target_size=None): - bbox = bbox_check(bbox, target_size) - return (bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]) - -def bbox_check(bbox, target_size=None): - if not target_size: - return bbox - - new_bbox = ( - bbox[0], - bbox[1], - min(target_size[0] - bbox[0], bbox[2]), - min(target_size[1] - bbox[1], bbox[3]), - ) - return new_bbox - -class BatchUncropAdvanced: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "original_images": ("IMAGE",), - "cropped_images": ("IMAGE",), - "cropped_masks": ("MASK",), - "combined_crop_mask": ("MASK",), - "bboxes": ("BBOX",), - "border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ), - "crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), - "use_combined_mask": ("BOOLEAN", {"default": False}), - "use_square_mask": ("BOOLEAN", {"default": True}), - }, - "optional": { - "combined_bounding_box": ("BBOX", {"default": None}), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "uncrop" - CATEGORY = "KJNodes/masking" - - - def uncrop(self, original_images, cropped_images, cropped_masks, combined_crop_mask, bboxes, border_blending, crop_rescale, use_combined_mask, use_square_mask, combined_bounding_box = None): - - def inset_border(image, border_width=20, border_color=(0)): - width, height = image.size - bordered_image = Image.new(image.mode, (width, height), border_color) - bordered_image.paste(image, (0, 0)) - draw = ImageDraw.Draw(bordered_image) - draw.rectangle((0, 0, width - 1, height - 1), outline=border_color, width=border_width) - return bordered_image - - if len(original_images) != len(cropped_images): - raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same") - - # Ensure there are enough bboxes, but drop the excess if there are more bboxes than images - if len(bboxes) > len(original_images): - print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}") - bboxes = bboxes[:len(original_images)] - elif len(bboxes) < len(original_images): - raise ValueError("There should be at least as many bboxes as there are original and cropped images") - - crop_imgs = tensor2pil(cropped_images) - input_images = tensor2pil(original_images) - out_images = [] - - for i in range(len(input_images)): - img = input_images[i] - crop = crop_imgs[i] - bbox = bboxes[i] - - if use_combined_mask: - bb_x, bb_y, bb_width, bb_height = combined_bounding_box[0] - paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) - mask = combined_crop_mask[i] - else: - bb_x, bb_y, bb_width, bb_height = bbox - paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) - mask = cropped_masks[i] - - # scale paste_region - scale_x = scale_y = crop_rescale - paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y)) - - # rescale the crop image to fit the paste_region - crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1]))) - crop_img = crop.convert("RGB") - - #border blending - if border_blending > 1.0: - border_blending = 1.0 - elif border_blending < 0.0: - border_blending = 0.0 - - blend_ratio = (max(crop_img.size) / 2) * float(border_blending) - blend = img.convert("RGBA") - - if use_square_mask: - mask = Image.new("L", img.size, 0) - mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255) - mask_block = inset_border(mask_block, round(blend_ratio / 2), (0)) - mask.paste(mask_block, paste_region) - else: - original_mask = tensor2pil(mask)[0] - original_mask = original_mask.resize((paste_region[2]-paste_region[0], paste_region[3]-paste_region[1])) - mask = Image.new("L", img.size, 0) - mask.paste(original_mask, paste_region) - - mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4)) - mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4)) - - blend.paste(crop_img, paste_region) - blend.putalpha(mask) - - img = Image.alpha_composite(img.convert("RGBA"), blend) - out_images.append(img.convert("RGB")) - - return (pil2tensor(out_images),) - -class BatchCLIPSeg: - - def __init__(self): - pass - - @classmethod - def INPUT_TYPES(s): - - return {"required": - { - "images": ("IMAGE",), - "text": ("STRING", {"multiline": False}), - "threshold": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.001}), - "binary_mask": ("BOOLEAN", {"default": True}), - "combine_mask": ("BOOLEAN", {"default": False}), - "use_cuda": ("BOOLEAN", {"default": True}), - }, - } - - CATEGORY = "KJNodes/masking" - RETURN_TYPES = ("MASK",) - RETURN_NAMES = ("Mask",) - FUNCTION = "segment_image" - DESCRIPTION = """ -Segments an image or batch of images using CLIPSeg. -""" - - def segment_image(self, images, text, threshold, binary_mask, combine_mask, use_cuda): - from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation - out = [] - height, width, _ = images[0].shape - if use_cuda and torch.cuda.is_available(): - device = torch.device("cuda") - else: - device = torch.device("cpu") - dtype = comfy.model_management.unet_dtype() - model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined") - model.to(dtype) - model.to(device) - images = images.to(device) - processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") - pbar = comfy.utils.ProgressBar(images.shape[0]) - autocast_condition = (dtype != torch.float32) and not comfy.model_management.is_device_mps(device) - with torch.autocast(comfy.model_management.get_autocast_device(device), dtype=dtype) if autocast_condition else nullcontext(): - for image in images: - image = (image* 255).type(torch.uint8) - prompt = text - input_prc = processor(text=prompt, images=image, padding="max_length", return_tensors="pt") - # Move the processed input to the device - for key in input_prc: - input_prc[key] = input_prc[key].to(device) - - outputs = model(**input_prc) - - tensor = torch.sigmoid(outputs[0]) - tensor_thresholded = torch.where(tensor > threshold, tensor, torch.tensor(0, dtype=torch.float)) - tensor_normalized = (tensor_thresholded - tensor_thresholded.min()) / (tensor_thresholded.max() - tensor_thresholded.min()) - tensor = tensor_normalized.unsqueeze(0).unsqueeze(0) - - # Resize the mask - resized_tensor = F.interpolate(tensor, size=(height, width), mode='nearest') - - # Remove the extra dimensions - resized_tensor = resized_tensor[0, 0, :, :] - pbar.update(1) - out.append(resized_tensor) - - results = torch.stack(out).cpu().float() - - if combine_mask: - combined_results = torch.max(results, dim=0)[0] - results = combined_results.unsqueeze(0).repeat(len(images),1,1) - - if binary_mask: - results = results.round() - - return results, - -class RoundMask: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "mask": ("MASK",), - }} - - RETURN_TYPES = ("MASK",) - FUNCTION = "round" - CATEGORY = "KJNodes/masking" - DESCRIPTION = """ -Rounds the mask or batch of masks to a binary mask. -RoundMask example - -""" - - def round(self, mask): - mask = mask.round() - return (mask,) - -class ResizeMask: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "mask": ("MASK",), - "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, "display": "number" }), - "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, "display": "number" }), - "keep_proportions": ("BOOLEAN", { "default": False }), - } - } - - RETURN_TYPES = ("MASK", "INT", "INT",) - RETURN_NAMES = ("mask", "width", "height",) - FUNCTION = "resize" - CATEGORY = "KJNodes/masking" - DESCRIPTION = """ -Resizes the mask or batch of masks to the specified width and height. -""" - - def resize(self, mask, width, height, keep_proportions): - if keep_proportions: - _, oh, ow, _ = mask.shape - width = ow if width == 0 else width - height = oh if height == 0 else height - ratio = min(width / ow, height / oh) - width = round(ow*ratio) - height = round(oh*ratio) - - outputs = mask.unsqueeze(0) # Add an extra dimension for batch size - outputs = F.interpolate(outputs, size=(height, width), mode="nearest") - outputs = outputs.squeeze(0) # Remove the extra dimension after interpolation - - return(outputs, outputs.shape[2], outputs.shape[1],) - -class OffsetMask: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "mask": ("MASK",), - "x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), - "y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), - "angle": ("INT", { "default": 0, "min": -360, "max": 360, "step": 1, "display": "number" }), - "duplication_factor": ("INT", { "default": 1, "min": 1, "max": 1000, "step": 1, "display": "number" }), - "roll": ("BOOLEAN", { "default": False }), - "incremental": ("BOOLEAN", { "default": False }), - "padding_mode": ( - [ - 'empty', - 'border', - 'reflection', - - ], { - "default": 'empty' - }), - } - } - - RETURN_TYPES = ("MASK",) - RETURN_NAMES = ("mask",) - FUNCTION = "offset" - CATEGORY = "KJNodes/masking" - DESCRIPTION = """ -Offsets the mask by the specified amount. - - mask: Input mask or mask batch - - x: Horizontal offset - - y: Vertical offset - - angle: Angle in degrees - - roll: roll edge wrapping - - duplication_factor: Number of times to duplicate the mask to form a batch - - border padding_mode: Padding mode for the mask -""" - - def offset(self, mask, x, y, angle, roll=False, incremental=False, duplication_factor=1, padding_mode="empty"): - # Create duplicates of the mask batch - mask = mask.repeat(duplication_factor, 1, 1).clone() - - batch_size, height, width = mask.shape - - if angle != 0 and incremental: - for i in range(batch_size): - rotation_angle = angle * (i+1) - mask[i] = TF.rotate(mask[i].unsqueeze(0), rotation_angle).squeeze(0) - elif angle > 0: - for i in range(batch_size): - mask[i] = TF.rotate(mask[i].unsqueeze(0), angle).squeeze(0) - - if roll: - if incremental: - for i in range(batch_size): - shift_x = min(x*(i+1), width-1) - shift_y = min(y*(i+1), height-1) - if shift_x != 0: - mask[i] = torch.roll(mask[i], shifts=shift_x, dims=1) - if shift_y != 0: - mask[i] = torch.roll(mask[i], shifts=shift_y, dims=0) - else: - shift_x = min(x, width-1) - shift_y = min(y, height-1) - if shift_x != 0: - mask = torch.roll(mask, shifts=shift_x, dims=2) - if shift_y != 0: - mask = torch.roll(mask, shifts=shift_y, dims=1) - else: - - for i in range(batch_size): - if incremental: - temp_x = min(x * (i+1), width-1) - temp_y = min(y * (i+1), height-1) - else: - temp_x = min(x, width-1) - temp_y = min(y, height-1) - if temp_x > 0: - if padding_mode == 'empty': - mask[i] = torch.cat([torch.zeros((height, temp_x)), mask[i, :, :-temp_x]], dim=1) - elif padding_mode in ['replicate', 'reflect']: - mask[i] = F.pad(mask[i, :, :-temp_x], (0, temp_x), mode=padding_mode) - elif temp_x < 0: - if padding_mode == 'empty': - mask[i] = torch.cat([mask[i, :, :temp_x], torch.zeros((height, -temp_x))], dim=1) - elif padding_mode in ['replicate', 'reflect']: - mask[i] = F.pad(mask[i, :, -temp_x:], (temp_x, 0), mode=padding_mode) - - if temp_y > 0: - if padding_mode == 'empty': - mask[i] = torch.cat([torch.zeros((temp_y, width)), mask[i, :-temp_y, :]], dim=0) - elif padding_mode in ['replicate', 'reflect']: - mask[i] = F.pad(mask[i, :-temp_y, :], (0, temp_y), mode=padding_mode) - elif temp_y < 0: - if padding_mode == 'empty': - mask[i] = torch.cat([mask[i, :temp_y, :], torch.zeros((-temp_y, width))], dim=0) - elif padding_mode in ['replicate', 'reflect']: - mask[i] = F.pad(mask[i, -temp_y:, :], (temp_y, 0), mode=padding_mode) - - return mask, - - -class WidgetToString: - @classmethod - def IS_CHANGED(cls, **kwargs): - return float("NaN") - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "id": ("INT", {"default": 0}), - "widget_name": ("STRING", {"multiline": False}), - "return_all": ("BOOLEAN", {"default": False}), - }, - - "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", - "prompt": "PROMPT"}, - } - - RETURN_TYPES = ("STRING", ) - FUNCTION = "get_widget_value" - CATEGORY = "KJNodes/text" - DESCRIPTION = """ -Selects a node and it's specified widget and outputs the value as a string. -To see node id's, enable node id display from Manager badge menu. -""" - - def get_widget_value(self, id, widget_name, extra_pnginfo, prompt, return_all=False): - workflow = extra_pnginfo["workflow"] - print(workflow) - results = [] - for node in workflow["nodes"]: - print(node) - node_id = node["id"] - - if node_id != id: - continue - - values = prompt[str(node_id)] - if "inputs" in values: - if return_all: - results.append(', '.join(f'{k}: {str(v)}' for k, v in values["inputs"].items())) - elif widget_name in values["inputs"]: - v = str(values["inputs"][widget_name]) # Convert to string here - return (v, ) - else: - raise NameError(f"Widget not found: {id}.{widget_name}") - if not results: - raise NameError(f"Node not found: {id}") - return (', '.join(results).strip(', '), ) - -class CreateShapeMask: - - RETURN_TYPES = ("MASK", "MASK",) - RETURN_NAMES = ("mask", "mask_inverted",) - FUNCTION = "createshapemask" - CATEGORY = "KJNodes/masking/generate" - DESCRIPTION = """ -Creates a mask or batch of masks with the specified shape. -Locations are center locations. -Grow value is the amount to grow the shape on each frame, creating animated masks. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "shape": ( - [ 'circle', - 'square', - 'triangle', - ], - { - "default": 'circle' - }), - "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), - "location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), - "location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), - "grow": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), - "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), - "shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), - }, - } - - def createshapemask(self, frames, frame_width, frame_height, location_x, location_y, shape_width, shape_height, grow, shape): - # Define the number of images in the batch - batch_size = frames - out = [] - color = "white" - for i in range(batch_size): - image = Image.new("RGB", (frame_width, frame_height), "black") - draw = ImageDraw.Draw(image) - - # Calculate the size for this frame and ensure it's not less than 0 - current_width = max(0, shape_width + i*grow) - current_height = max(0, shape_height + i*grow) - - if shape == 'circle' or shape == 'square': - # Define the bounding box for the shape - left_up_point = (location_x - current_width // 2, location_y - current_height // 2) - right_down_point = (location_x + current_width // 2, location_y + current_height // 2) - two_points = [left_up_point, right_down_point] - - if shape == 'circle': - draw.ellipse(two_points, fill=color) - elif shape == 'square': - draw.rectangle(two_points, fill=color) - - elif shape == 'triangle': - # Define the points for the triangle - left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left - right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right - top_point = (location_x, location_y - current_height // 2) # top point - draw.polygon([top_point, left_up_point, right_down_point], fill=color) - - image = pil2tensor(image) - mask = image[:, :, :, 0] - out.append(mask) - outstack = torch.cat(out, dim=0) - return (outstack, 1.0 - outstack,) - -class CreateVoronoiMask: - - RETURN_TYPES = ("MASK", "MASK",) - RETURN_NAMES = ("mask", "mask_inverted",) - FUNCTION = "createvoronoi" - CATEGORY = "KJNodes/masking/generate" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}), - "num_points": ("INT", {"default": 15,"min": 1, "max": 4096, "step": 1}), - "line_width": ("INT", {"default": 4,"min": 1, "max": 4096, "step": 1}), - "speed": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}), - "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - }, - } - - def createvoronoi(self, frames, num_points, line_width, speed, frame_width, frame_height): - from scipy.spatial import Voronoi - # Define the number of images in the batch - batch_size = frames - out = [] - - # Calculate aspect ratio - aspect_ratio = frame_width / frame_height - - # Create start and end points for each point, considering the aspect ratio - start_points = np.random.rand(num_points, 2) - start_points[:, 0] *= aspect_ratio - - end_points = np.random.rand(num_points, 2) - end_points[:, 0] *= aspect_ratio - - for i in range(batch_size): - # Interpolate the points' positions based on the current frame - t = (i * speed) / (batch_size - 1) # normalize to [0, 1] over the frames - t = np.clip(t, 0, 1) # ensure t is in [0, 1] - points = (1 - t) * start_points + t * end_points # lerp - - # Adjust points for aspect ratio - points[:, 0] *= aspect_ratio - - vor = Voronoi(points) - - # Create a blank image with a white background - fig, ax = plt.subplots() - plt.subplots_adjust(left=0, right=1, bottom=0, top=1) - ax.set_xlim([0, aspect_ratio]); ax.set_ylim([0, 1]) # adjust x limits - ax.axis('off') - ax.margins(0, 0) - fig.set_size_inches(aspect_ratio * frame_height/100, frame_height/100) # adjust figure size - ax.fill_between([0, 1], [0, 1], color='white') - - # Plot each Voronoi ridge - for simplex in vor.ridge_vertices: - simplex = np.asarray(simplex) - if np.all(simplex >= 0): - plt.plot(vor.vertices[simplex, 0], vor.vertices[simplex, 1], 'k-', linewidth=line_width) - - fig.canvas.draw() - img = np.array(fig.canvas.renderer._renderer) - - plt.close(fig) - - pil_img = Image.fromarray(img).convert("L") - mask = torch.tensor(np.array(pil_img)) / 255.0 - - out.append(mask) - - return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) - -class CreateMagicMask: - - RETURN_TYPES = ("MASK", "MASK",) - RETURN_NAMES = ("mask", "mask_inverted",) - FUNCTION = "createmagicmask" - CATEGORY = "KJNodes/masking/generate" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}), - "depth": ("INT", {"default": 12,"min": 1, "max": 500, "step": 1}), - "distortion": ("FLOAT", {"default": 1.5,"min": 0.0, "max": 100.0, "step": 0.01}), - "seed": ("INT", {"default": 123,"min": 0, "max": 99999999, "step": 1}), - "transitions": ("INT", {"default": 1,"min": 1, "max": 20, "step": 1}), - "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - }, - } - - def createmagicmask(self, frames, transitions, depth, distortion, seed, frame_width, frame_height): - from .magictex import coordinate_grid, random_transform, magic - rng = np.random.default_rng(seed) - out = [] - coords = coordinate_grid((frame_width, frame_height)) - - # Calculate the number of frames for each transition - frames_per_transition = frames // transitions - - # Generate a base set of parameters - base_params = { - "coords": random_transform(coords, rng), - "depth": depth, - "distortion": distortion, - } - for t in range(transitions): - # Generate a second set of parameters that is at most max_diff away from the base parameters - params1 = base_params.copy() - params2 = base_params.copy() - - params1['coords'] = random_transform(coords, rng) - params2['coords'] = random_transform(coords, rng) - - for i in range(frames_per_transition): - # Compute the interpolation factor - alpha = i / frames_per_transition - - # Interpolate between the two sets of parameters - params = params1.copy() - params['coords'] = (1 - alpha) * params1['coords'] + alpha * params2['coords'] - - tex = magic(**params) - - dpi = frame_width / 10 - fig = plt.figure(figsize=(10, 10), dpi=dpi) - - ax = fig.add_subplot(111) - plt.subplots_adjust(left=0, right=1, bottom=0, top=1) - - ax.get_yaxis().set_ticks([]) - ax.get_xaxis().set_ticks([]) - ax.imshow(tex, aspect='auto') - - fig.canvas.draw() - img = np.array(fig.canvas.renderer._renderer) - - plt.close(fig) - - pil_img = Image.fromarray(img).convert("L") - mask = torch.tensor(np.array(pil_img)) / 255.0 - - out.append(mask) - - return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) - -class BboxToInt: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "bboxes": ("BBOX",), - "index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}), - }, - } - - RETURN_TYPES = ("INT","INT","INT","INT","INT","INT",) - RETURN_NAMES = ("x_min","y_min","width","height", "center_x","center_y",) - FUNCTION = "bboxtoint" - CATEGORY = "KJNodes/masking" - DESCRIPTION = """ -Returns selected index from bounding box list as integers. -""" - def bboxtoint(self, bboxes, index): - x_min, y_min, width, height = bboxes[index] - center_x = int(x_min + width / 2) - center_y = int(y_min + height / 2) - - return (x_min, y_min, width, height, center_x, center_y,) - -class BboxVisualize: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "images": ("IMAGE",), - "bboxes": ("BBOX",), - "line_width": ("INT", {"default": 1,"min": 1, "max": 10, "step": 1}), - }, - } - - RETURN_TYPES = ("IMAGE",) - RETURN_NAMES = ("images",) - FUNCTION = "visualizebbox" - DESCRIPTION = """ -Visualizes the specified bbox on the image. -""" - - CATEGORY = "KJNodes/masking" - - def visualizebbox(self, bboxes, images, line_width): - image_list = [] - for image, bbox in zip(images, bboxes): - x_min, y_min, width, height = bbox - image = image.permute(2, 0, 1) - - img_with_bbox = image.clone() - - # Define the color for the bbox, e.g., red - color = torch.tensor([1, 0, 0], dtype=torch.float32) - - # Draw lines for each side of the bbox with the specified line width - for lw in range(line_width): - # Top horizontal line - img_with_bbox[:, y_min + lw, x_min:x_min + width] = color[:, None] - - # Bottom horizontal line - img_with_bbox[:, y_min + height - lw, x_min:x_min + width] = color[:, None] - - # Left vertical line - img_with_bbox[:, y_min:y_min + height, x_min + lw] = color[:, None] - - # Right vertical line - img_with_bbox[:, y_min:y_min + height, x_min + width - lw] = color[:, None] - - img_with_bbox = img_with_bbox.permute(1, 2, 0).unsqueeze(0) - image_list.append(img_with_bbox) - - return (torch.cat(image_list, dim=0),) - -class SplitBboxes: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "bboxes": ("BBOX",), - "index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}), - }, - } - - RETURN_TYPES = ("BBOX","BBOX",) - RETURN_NAMES = ("bboxes_a","bboxes_b",) - FUNCTION = "splitbbox" - CATEGORY = "KJNodes/masking" - DESCRIPTION = """ -Splits the specified bbox list at the given index into two lists. -""" - - def splitbbox(self, bboxes, index): - bboxes_a = bboxes[:index] # Sub-list from the start of bboxes up to (but not including) the index - bboxes_b = bboxes[index:] # Sub-list from the index to the end of bboxes - - return (bboxes_a, bboxes_b,) - -from PIL import ImageGrab -import time -class ImageGrabPIL: - - @classmethod - def IS_CHANGED(cls): - - return - - RETURN_TYPES = ("IMAGE",) - RETURN_NAMES = ("image",) - FUNCTION = "screencap" - CATEGORY = "KJNodes/experimental" - DESCRIPTION = """ -Captures an area specified by screen coordinates. -Can be used for realtime diffusion with autoqueue. -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), - "y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), - "width": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), - "height": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), - "num_frames": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), - "delay": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.01}), - }, - } - - def screencap(self, x, y, width, height, num_frames, delay): - captures = [] - bbox = (x, y, x + width, y + height) - - for _ in range(num_frames): - # Capture screen - screen_capture = ImageGrab.grab(bbox=bbox) - screen_capture_torch = torch.tensor(np.array(screen_capture), dtype=torch.float32) / 255.0 - screen_capture_torch = screen_capture_torch.unsqueeze(0) - captures.append(screen_capture_torch) - - # Wait for a short delay if more than one frame is to be captured - if num_frames > 1: - time.sleep(delay) - - return (torch.cat(captures, dim=0),) - -class DummyLatentOut: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "latent": ("LATENT",), - } - } - - RETURN_TYPES = ("LATENT",) - FUNCTION = "dummy" - CATEGORY = "KJNodes/misc" - OUTPUT_NODE = True - DESCRIPTION = """ -Does nothing, used to trigger generic workflow output. -A way to get previews in the UI without saving anything to disk. -""" - - def dummy(self, latent): - return (latent,) - -class FlipSigmasAdjusted: - @classmethod - def INPUT_TYPES(s): - return {"required": - {"sigmas": ("SIGMAS", ), - "divide_by_last_sigma": ("BOOLEAN", {"default": False}), - "divide_by": ("FLOAT", {"default": 1,"min": 1, "max": 255, "step": 0.01}), - "offset_by": ("INT", {"default": 1,"min": -100, "max": 100, "step": 1}), - } - } - RETURN_TYPES = ("SIGMAS", "STRING",) - RETURN_NAMES = ("SIGMAS", "sigmas_string",) - CATEGORY = "KJNodes/noise" - FUNCTION = "get_sigmas_adjusted" - - def get_sigmas_adjusted(self, sigmas, divide_by_last_sigma, divide_by, offset_by): - - sigmas = sigmas.flip(0) - if sigmas[0] == 0: - sigmas[0] = 0.0001 - adjusted_sigmas = sigmas.clone() - #offset sigma - for i in range(1, len(sigmas)): - offset_index = i - offset_by - if 0 <= offset_index < len(sigmas): - adjusted_sigmas[i] = sigmas[offset_index] - else: - adjusted_sigmas[i] = 0.0001 - if adjusted_sigmas[0] == 0: - adjusted_sigmas[0] = 0.0001 - if divide_by_last_sigma: - adjusted_sigmas = adjusted_sigmas / adjusted_sigmas[-1] - - sigma_np_array = adjusted_sigmas.numpy() - array_string = np.array2string(sigma_np_array, precision=2, separator=', ', threshold=np.inf) - adjusted_sigmas = adjusted_sigmas / divide_by - return (adjusted_sigmas, array_string,) - - -class InjectNoiseToLatent: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "latents":("LATENT",), - "strength": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 200.0, "step": 0.0001}), - "noise": ("LATENT",), - "normalize": ("BOOLEAN", {"default": False}), - "average": ("BOOLEAN", {"default": False}), - }, - "optional":{ - "mask": ("MASK", ), - "mix_randn_amount": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.001}), - "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), - } - } - - RETURN_TYPES = ("LATENT",) - FUNCTION = "injectnoise" - CATEGORY = "KJNodes/noise" - - def injectnoise(self, latents, strength, noise, normalize, average, mix_randn_amount=0, seed=None, mask=None): - samples = latents.copy() - if latents["samples"].shape != noise["samples"].shape: - raise ValueError("InjectNoiseToLatent: Latent and noise must have the same shape") - if average: - noised = (samples["samples"].clone() + noise["samples"].clone()) / 2 - else: - noised = samples["samples"].clone() + noise["samples"].clone() * strength - if normalize: - noised = noised / noised.std() - if mask is not None: - mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(noised.shape[2], noised.shape[3]), mode="bilinear") - mask = mask.expand((-1,noised.shape[1],-1,-1)) - if mask.shape[0] < noised.shape[0]: - mask = mask.repeat((noised.shape[0] -1) // mask.shape[0] + 1, 1, 1, 1)[:noised.shape[0]] - noised = mask * noised + (1-mask) * latents["samples"] - if mix_randn_amount > 0: - if seed is not None: - torch.manual_seed(seed) - rand_noise = torch.randn_like(noised) - noised = ((1 - mix_randn_amount) * noised + mix_randn_amount * - rand_noise) / ((mix_randn_amount**2 + (1-mix_randn_amount)**2) ** 0.5) - samples["samples"] = noised - return (samples,) - -class AddLabel: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "image":("IMAGE",), - "text_x": ("INT", {"default": 10, "min": 0, "max": 4096, "step": 1}), - "text_y": ("INT", {"default": 2, "min": 0, "max": 4096, "step": 1}), - "height": ("INT", {"default": 48, "min": 0, "max": 4096, "step": 1}), - "font_size": ("INT", {"default": 32, "min": 0, "max": 4096, "step": 1}), - "font_color": ("STRING", {"default": "white"}), - "label_color": ("STRING", {"default": "black"}), - "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), - "text": ("STRING", {"default": "Text"}), - "direction": ( - [ 'up', - 'down', - ], - { - "default": 'up' - - }), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "addlabel" - CATEGORY = "KJNodes/text" - DESCRIPTION = """ -Creates a new with the given text, and concatenates it to -either above or below the input image. -Note that this changes the input image's height! -Fonts are loaded from this folder: -ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts -""" - - def addlabel(self, image, text_x, text_y, text, height, font_size, font_color, label_color, font, direction): - batch_size = image.shape[0] - width = image.shape[2] - - if font == "TTNorms-Black.otf": - font_path = os.path.join(script_directory, "fonts", "TTNorms-Black.otf") - else: - font_path = folder_paths.get_full_path("kjnodes_fonts", font) - label_image = Image.new("RGB", (width, height), label_color) - draw = ImageDraw.Draw(label_image) - font = ImageFont.truetype(font_path, font_size) - try: - draw.text((text_x, text_y), text, font=font, fill=font_color, features=['-liga']) - except: - draw.text((text_x, text_y), text, font=font, fill=font_color) - - label_image = np.array(label_image).astype(np.float32) / 255.0 - label_image = torch.from_numpy(label_image)[None, :, :, :] - # Duplicate the label image for the entire batch - label_batch = label_image.repeat(batch_size, 1, 1, 1) - - if direction == 'down': - combined_images = torch.cat((image, label_batch), dim=1) - elif direction == 'up': - combined_images = torch.cat((label_batch, image), dim=1) - - return (combined_images,) - - -class SoundReactive: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "sound_level": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 99999, "step": 0.01}), - "start_range_hz": ("INT", {"default": 150, "min": 0, "max": 9999, "step": 1}), - "end_range_hz": ("INT", {"default": 2000, "min": 0, "max": 9999, "step": 1}), - "multiplier": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 99999, "step": 0.01}), - "smoothing_factor": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), - "normalize": ("BOOLEAN", {"default": False}), - }, - } - - RETURN_TYPES = ("FLOAT","INT",) - RETURN_NAMES =("sound_level", "sound_level_int",) - FUNCTION = "react" - CATEGORY = "KJNodes/audio" - DESCRIPTION = """ -Reacts to the sound level of the input. -Uses your browsers sound input options and requires. -Meant to be used with realtime diffusion with autoqueue. -""" - - def react(self, sound_level, start_range_hz, end_range_hz, smoothing_factor, multiplier, normalize): - - sound_level *= multiplier - - if normalize: - sound_level /= 255 - - sound_level_int = int(sound_level) - return (sound_level, sound_level_int, ) - -class GenerateNoise: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), - "multiplier": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 4096, "step": 0.01}), - "constant_batch_noise": ("BOOLEAN", {"default": False}), - "normalize": ("BOOLEAN", {"default": False}), - }, - "optional": { - "model": ("MODEL", ), - "sigmas": ("SIGMAS", ), - } - } - - RETURN_TYPES = ("LATENT",) - FUNCTION = "generatenoise" - CATEGORY = "KJNodes/noise" - DESCRIPTION = """ -Generates noise for injection or to be used as empty latents on samplers with add_noise off. -""" - - def generatenoise(self, batch_size, width, height, seed, multiplier, constant_batch_noise, normalize, sigmas=None, model=None): - - generator = torch.manual_seed(seed) - noise = torch.randn([batch_size, 4, height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu") - if sigmas is not None: - sigma = sigmas[0] - sigmas[-1] - sigma /= model.model.latent_format.scale_factor - noise *= sigma - - noise *=multiplier - - if normalize: - noise = noise / noise.std() - if constant_batch_noise: - noise = noise[0].repeat(batch_size, 1, 1, 1) - return ({"samples":noise}, ) - -def camera_embeddings(elevation, azimuth): - elevation = torch.as_tensor([elevation]) - azimuth = torch.as_tensor([azimuth]) - embeddings = torch.stack( - [ - torch.deg2rad( - (90 - elevation) - (90) - ), # Zero123 polar is 90-elevation - torch.sin(torch.deg2rad(azimuth)), - torch.cos(torch.deg2rad(azimuth)), - torch.deg2rad( - 90 - torch.full_like(elevation, 0) - ), - ], dim=-1).unsqueeze(1) - - return embeddings - -def interpolate_angle(start, end, fraction): - # Calculate the difference in angles and adjust for wraparound if necessary - diff = (end - start + 540) % 360 - 180 - # Apply fraction to the difference - interpolated = start + fraction * diff - # Normalize the result to be within the range of -180 to 180 - return (interpolated + 180) % 360 - 180 - - -class StableZero123_BatchSchedule: - @classmethod - def INPUT_TYPES(s): - return {"required": { "clip_vision": ("CLIP_VISION",), - "init_image": ("IMAGE",), - "vae": ("VAE",), - "width": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), - "height": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), - "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), - "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), - "azimuth_points_string": ("STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": True}), - "elevation_points_string": ("STRING", {"default": "0:(0.0),\n7:(0.0),\n15:(0.0)\n", "multiline": True}), - }} - - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - FUNCTION = "encode" - CATEGORY = "KJNodes/experimental" - - def encode(self, clip_vision, init_image, vae, width, height, batch_size, azimuth_points_string, elevation_points_string, interpolation): - output = clip_vision.encode_image(init_image) - pooled = output.image_embeds.unsqueeze(0) - pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) - encode_pixels = pixels[:,:,:,:3] - t = vae.encode(encode_pixels) - - def ease_in(t): - return t * t - def ease_out(t): - return 1 - (1 - t) * (1 - t) - def ease_in_out(t): - return 3 * t * t - 2 * t * t * t - - # Parse the azimuth input string into a list of tuples - azimuth_points = [] - azimuth_points_string = azimuth_points_string.rstrip(',\n') - for point_str in azimuth_points_string.split(','): - frame_str, azimuth_str = point_str.split(':') - frame = int(frame_str.strip()) - azimuth = float(azimuth_str.strip()[1:-1]) - azimuth_points.append((frame, azimuth)) - # Sort the points by frame number - azimuth_points.sort(key=lambda x: x[0]) - - # Parse the elevation input string into a list of tuples - elevation_points = [] - elevation_points_string = elevation_points_string.rstrip(',\n') - for point_str in elevation_points_string.split(','): - frame_str, elevation_str = point_str.split(':') - frame = int(frame_str.strip()) - elevation_val = float(elevation_str.strip()[1:-1]) - elevation_points.append((frame, elevation_val)) - # Sort the points by frame number - elevation_points.sort(key=lambda x: x[0]) - - # Index of the next point to interpolate towards - next_point = 1 - next_elevation_point = 1 - - positive_cond_out = [] - positive_pooled_out = [] - negative_cond_out = [] - negative_pooled_out = [] - - #azimuth interpolation - for i in range(batch_size): - # Find the interpolated azimuth for the current frame - while next_point < len(azimuth_points) and i >= azimuth_points[next_point][0]: - next_point += 1 - # If next_point is equal to the length of points, we've gone past the last point - if next_point == len(azimuth_points): - next_point -= 1 # Set next_point to the last index of points - prev_point = max(next_point - 1, 0) # Ensure prev_point is not less than 0 - - # Calculate fraction - if azimuth_points[next_point][0] != azimuth_points[prev_point][0]: # Prevent division by zero - fraction = (i - azimuth_points[prev_point][0]) / (azimuth_points[next_point][0] - azimuth_points[prev_point][0]) - if interpolation == "ease_in": - fraction = ease_in(fraction) - elif interpolation == "ease_out": - fraction = ease_out(fraction) - elif interpolation == "ease_in_out": - fraction = ease_in_out(fraction) - - # Use the new interpolate_angle function - interpolated_azimuth = interpolate_angle(azimuth_points[prev_point][1], azimuth_points[next_point][1], fraction) - else: - interpolated_azimuth = azimuth_points[prev_point][1] - # Interpolate the elevation - next_elevation_point = 1 - while next_elevation_point < len(elevation_points) and i >= elevation_points[next_elevation_point][0]: - next_elevation_point += 1 - if next_elevation_point == len(elevation_points): - next_elevation_point -= 1 - prev_elevation_point = max(next_elevation_point - 1, 0) - - if elevation_points[next_elevation_point][0] != elevation_points[prev_elevation_point][0]: - fraction = (i - elevation_points[prev_elevation_point][0]) / (elevation_points[next_elevation_point][0] - elevation_points[prev_elevation_point][0]) - if interpolation == "ease_in": - fraction = ease_in(fraction) - elif interpolation == "ease_out": - fraction = ease_out(fraction) - elif interpolation == "ease_in_out": - fraction = ease_in_out(fraction) - - interpolated_elevation = interpolate_angle(elevation_points[prev_elevation_point][1], elevation_points[next_elevation_point][1], fraction) - else: - interpolated_elevation = elevation_points[prev_elevation_point][1] - - cam_embeds = camera_embeddings(interpolated_elevation, interpolated_azimuth) - cond = torch.cat([pooled, cam_embeds.repeat((pooled.shape[0], 1, 1))], dim=-1) - - positive_pooled_out.append(t) - positive_cond_out.append(cond) - negative_pooled_out.append(torch.zeros_like(t)) - negative_cond_out.append(torch.zeros_like(pooled)) - - # Concatenate the conditions and pooled outputs - final_positive_cond = torch.cat(positive_cond_out, dim=0) - final_positive_pooled = torch.cat(positive_pooled_out, dim=0) - final_negative_cond = torch.cat(negative_cond_out, dim=0) - final_negative_pooled = torch.cat(negative_pooled_out, dim=0) - - # Structure the final output - final_positive = [[final_positive_cond, {"concat_latent_image": final_positive_pooled}]] - final_negative = [[final_negative_cond, {"concat_latent_image": final_negative_pooled}]] - - latent = torch.zeros([batch_size, 4, height // 8, width // 8]) - return (final_positive, final_negative, {"samples": latent}) - -def linear_interpolate(start, end, fraction): - return start + (end - start) * fraction - -class SV3D_BatchSchedule: - @classmethod - def INPUT_TYPES(s): - return {"required": { "clip_vision": ("CLIP_VISION",), - "init_image": ("IMAGE",), - "vae": ("VAE",), - "width": ("INT", {"default": 576, "min": 16, "max": MAX_RESOLUTION, "step": 8}), - "height": ("INT", {"default": 576, "min": 16, "max": MAX_RESOLUTION, "step": 8}), - "batch_size": ("INT", {"default": 21, "min": 1, "max": 4096}), - "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), - "azimuth_points_string": ("STRING", {"default": "0:(0.0),\n9:(180.0),\n20:(360.0)\n", "multiline": True}), - "elevation_points_string": ("STRING", {"default": "0:(0.0),\n9:(0.0),\n20:(0.0)\n", "multiline": True}), - }} - - RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") - RETURN_NAMES = ("positive", "negative", "latent") - FUNCTION = "encode" - CATEGORY = "KJNodes/experimental" - DESCRIPTION = """ -Allow scheduling of the azimuth and elevation conditions for SV3D. -Note that SV3D is still a video model and the schedule needs to always go forward -https://huggingface.co/stabilityai/sv3d -""" - - def encode(self, clip_vision, init_image, vae, width, height, batch_size, azimuth_points_string, elevation_points_string, interpolation): - output = clip_vision.encode_image(init_image) - pooled = output.image_embeds.unsqueeze(0) - pixels = comfy.utils.common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) - encode_pixels = pixels[:,:,:,:3] - t = vae.encode(encode_pixels) - - def ease_in(t): - return t * t - def ease_out(t): - return 1 - (1 - t) * (1 - t) - def ease_in_out(t): - return 3 * t * t - 2 * t * t * t - - # Parse the azimuth input string into a list of tuples - azimuth_points = [] - azimuth_points_string = azimuth_points_string.rstrip(',\n') - for point_str in azimuth_points_string.split(','): - frame_str, azimuth_str = point_str.split(':') - frame = int(frame_str.strip()) - azimuth = float(azimuth_str.strip()[1:-1]) - azimuth_points.append((frame, azimuth)) - # Sort the points by frame number - azimuth_points.sort(key=lambda x: x[0]) - - # Parse the elevation input string into a list of tuples - elevation_points = [] - elevation_points_string = elevation_points_string.rstrip(',\n') - for point_str in elevation_points_string.split(','): - frame_str, elevation_str = point_str.split(':') - frame = int(frame_str.strip()) - elevation_val = float(elevation_str.strip()[1:-1]) - elevation_points.append((frame, elevation_val)) - # Sort the points by frame number - elevation_points.sort(key=lambda x: x[0]) - - # Index of the next point to interpolate towards - next_point = 1 - next_elevation_point = 1 - elevations = [] - azimuths = [] - # For azimuth interpolation - for i in range(batch_size): - # Find the interpolated azimuth for the current frame - while next_point < len(azimuth_points) and i >= azimuth_points[next_point][0]: - next_point += 1 - if next_point == len(azimuth_points): - next_point -= 1 - prev_point = max(next_point - 1, 0) - - if azimuth_points[next_point][0] != azimuth_points[prev_point][0]: - fraction = (i - azimuth_points[prev_point][0]) / (azimuth_points[next_point][0] - azimuth_points[prev_point][0]) - # Apply the ease function to the fraction - if interpolation == "ease_in": - fraction = ease_in(fraction) - elif interpolation == "ease_out": - fraction = ease_out(fraction) - elif interpolation == "ease_in_out": - fraction = ease_in_out(fraction) - - interpolated_azimuth = linear_interpolate(azimuth_points[prev_point][1], azimuth_points[next_point][1], fraction) - else: - interpolated_azimuth = azimuth_points[prev_point][1] - - # Interpolate the elevation - next_elevation_point = 1 - while next_elevation_point < len(elevation_points) and i >= elevation_points[next_elevation_point][0]: - next_elevation_point += 1 - if next_elevation_point == len(elevation_points): - next_elevation_point -= 1 - prev_elevation_point = max(next_elevation_point - 1, 0) - - if elevation_points[next_elevation_point][0] != elevation_points[prev_elevation_point][0]: - fraction = (i - elevation_points[prev_elevation_point][0]) / (elevation_points[next_elevation_point][0] - elevation_points[prev_elevation_point][0]) - # Apply the ease function to the fraction - if interpolation == "ease_in": - fraction = ease_in(fraction) - elif interpolation == "ease_out": - fraction = ease_out(fraction) - elif interpolation == "ease_in_out": - fraction = ease_in_out(fraction) - - interpolated_elevation = linear_interpolate(elevation_points[prev_elevation_point][1], elevation_points[next_elevation_point][1], fraction) - else: - interpolated_elevation = elevation_points[prev_elevation_point][1] - - azimuths.append(interpolated_azimuth) - elevations.append(interpolated_elevation) - - print("azimuths", azimuths) - print("elevations", elevations) - - # Structure the final output - final_positive = [[pooled, {"concat_latent_image": t, "elevation": elevations, "azimuth": azimuths}]] - final_negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t),"elevation": elevations, "azimuth": azimuths}]] - - latent = torch.zeros([batch_size, 4, height // 8, width // 8]) - return (final_positive, final_negative, {"samples": latent}) - -class ImageBatchRepeatInterleaving: - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "repeat" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Repeats each image in a batch by the specified number of times. -Example batch of 5 images: 0, 1 ,2, 3, 4 -with repeats 2 becomes batch of 10 images: 0, 0, 1, 1, 2, 2, 3, 3, 4, 4 -""" - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "images": ("IMAGE",), - "repeats": ("INT", {"default": 1, "min": 1, "max": 4096}), - }, - } - - def repeat(self, images, repeats): - - repeated_images = torch.repeat_interleave(images, repeats=repeats, dim=0) - return (repeated_images, ) - -class NormalizedAmplitudeToMask: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "normalized_amp": ("NORMALIZED_AMPLITUDE",), - "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), - "frame_offset": ("INT", {"default": 0,"min": -255, "max": 255, "step": 1}), - "location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), - "location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), - "size": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), - "shape": ( - [ - 'none', - 'circle', - 'square', - 'triangle', - ], - { - "default": 'none' - }), - "color": ( - [ - 'white', - 'amplitude', - ], - { - "default": 'amplitude' - }), - },} - - CATEGORY = "KJNodes/audio" - RETURN_TYPES = ("MASK",) - FUNCTION = "convert" - DESCRIPTION = """ -Works as a bridge to the AudioScheduler -nodes: -https://github.com/a1lazydog/ComfyUI-AudioScheduler -Creates masks based on the normalized amplitude. -""" - - def convert(self, normalized_amp, width, height, frame_offset, shape, location_x, location_y, size, color): - # Ensure normalized_amp is an array and within the range [0, 1] - normalized_amp = np.clip(normalized_amp, 0.0, 1.0) - - # Offset the amplitude values by rolling the array - normalized_amp = np.roll(normalized_amp, frame_offset) - - # Initialize an empty list to hold the image tensors - out = [] - # Iterate over each amplitude value to create an image - for amp in normalized_amp: - # Scale the amplitude value to cover the full range of grayscale values - if color == 'amplitude': - grayscale_value = int(amp * 255) - elif color == 'white': - grayscale_value = 255 - # Convert the grayscale value to an RGB format - gray_color = (grayscale_value, grayscale_value, grayscale_value) - finalsize = size * amp - - if shape == 'none': - shapeimage = Image.new("RGB", (width, height), gray_color) - else: - shapeimage = Image.new("RGB", (width, height), "black") - - draw = ImageDraw.Draw(shapeimage) - if shape == 'circle' or shape == 'square': - # Define the bounding box for the shape - left_up_point = (location_x - finalsize, location_y - finalsize) - right_down_point = (location_x + finalsize,location_y + finalsize) - two_points = [left_up_point, right_down_point] - - if shape == 'circle': - draw.ellipse(two_points, fill=gray_color) - elif shape == 'square': - draw.rectangle(two_points, fill=gray_color) - - elif shape == 'triangle': - # Define the points for the triangle - left_up_point = (location_x - finalsize, location_y + finalsize) # bottom left - right_down_point = (location_x + finalsize, location_y + finalsize) # bottom right - top_point = (location_x, location_y) # top point - draw.polygon([top_point, left_up_point, right_down_point], fill=gray_color) - - shapeimage = pil2tensor(shapeimage) - mask = shapeimage[:, :, :, 0] - out.append(mask) - - return (torch.cat(out, dim=0),) - -class OffsetMaskByNormalizedAmplitude: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "normalized_amp": ("NORMALIZED_AMPLITUDE",), - "mask": ("MASK",), - "x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), - "y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), - "rotate": ("BOOLEAN", { "default": False }), - "angle_multiplier": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }), - } - } - - RETURN_TYPES = ("MASK",) - RETURN_NAMES = ("mask",) - FUNCTION = "offset" - CATEGORY = "KJNodes/audio" - DESCRIPTION = """ -Works as a bridge to the AudioScheduler -nodes: -https://github.com/a1lazydog/ComfyUI-AudioScheduler -Offsets masks based on the normalized amplitude. -""" - - def offset(self, mask, x, y, angle_multiplier, rotate, normalized_amp): - - # Ensure normalized_amp is an array and within the range [0, 1] - offsetmask = mask.clone() - normalized_amp = np.clip(normalized_amp, 0.0, 1.0) - - batch_size, height, width = mask.shape - - if rotate: - for i in range(batch_size): - rotation_amp = int(normalized_amp[i] * (360 * angle_multiplier)) - rotation_angle = rotation_amp - offsetmask[i] = TF.rotate(offsetmask[i].unsqueeze(0), rotation_angle).squeeze(0) - if x != 0 or y != 0: - for i in range(batch_size): - offset_amp = normalized_amp[i] * 10 - shift_x = min(x*offset_amp, width-1) - shift_y = min(y*offset_amp, height-1) - if shift_x != 0: - offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_x), dims=1) - if shift_y != 0: - offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_y), dims=0) - - return offsetmask, - -class ImageTransformByNormalizedAmplitude: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "normalized_amp": ("NORMALIZED_AMPLITUDE",), - "zoom_scale": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }), - "x_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }), - "y_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }), - "cumulative": ("BOOLEAN", { "default": False }), - "image": ("IMAGE",), - }} - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "amptransform" - CATEGORY = "KJNodes/audio" - DESCRIPTION = """ -Works as a bridge to the AudioScheduler -nodes: -https://github.com/a1lazydog/ComfyUI-AudioScheduler -Transforms image based on the normalized amplitude. -""" - - def amptransform(self, image, normalized_amp, zoom_scale, cumulative, x_offset, y_offset): - # Ensure normalized_amp is an array and within the range [0, 1] - normalized_amp = np.clip(normalized_amp, 0.0, 1.0) - transformed_images = [] - - # Initialize the cumulative zoom factor - prev_amp = 0.0 - - for i in range(image.shape[0]): - img = image[i] # Get the i-th image in the batch - amp = normalized_amp[i] # Get the corresponding amplitude value - - # Incrementally increase the cumulative zoom factor - if cumulative: - prev_amp += amp - amp += prev_amp - - # Convert the image tensor from BxHxWxC to CxHxW format expected by torchvision - img = img.permute(2, 0, 1) - - # Convert PyTorch tensor to PIL Image for processing - pil_img = TF.to_pil_image(img) - - # Calculate the crop size based on the amplitude - width, height = pil_img.size - crop_size = int(min(width, height) * (1 - amp * zoom_scale)) - crop_size = max(crop_size, 1) - - # Calculate the crop box coordinates (centered crop) - left = (width - crop_size) // 2 - top = (height - crop_size) // 2 - right = (width + crop_size) // 2 - bottom = (height + crop_size) // 2 - - # Crop and resize back to original size - cropped_img = TF.crop(pil_img, top, left, crop_size, crop_size) - resized_img = TF.resize(cropped_img, (height, width)) - - # Convert back to tensor in CxHxW format - tensor_img = TF.to_tensor(resized_img) - - # Convert the tensor back to BxHxWxC format - tensor_img = tensor_img.permute(1, 2, 0) - - # Offset the image based on the amplitude - offset_amp = amp * 10 # Calculate the offset magnitude based on the amplitude - shift_x = min(x_offset * offset_amp, img.shape[1] - 1) # Calculate the shift in x direction - shift_y = min(y_offset * offset_amp, img.shape[0] - 1) # Calculate the shift in y direction - - # Apply the offset to the image tensor - if shift_x != 0: - tensor_img = torch.roll(tensor_img, shifts=int(shift_x), dims=1) - if shift_y != 0: - tensor_img = torch.roll(tensor_img, shifts=int(shift_y), dims=0) - - # Add to the list - transformed_images.append(tensor_img) - - # Stack all transformed images into a batch - transformed_batch = torch.stack(transformed_images) - - return (transformed_batch,) - -def parse_coordinates(coordinates_str): - coordinates = {} - pattern = r'(\d+):\((\d+),(\d+)\)' - matches = re.findall(pattern, coordinates_str) - for match in matches: - index, x, y = map(int, match) - coordinates[index] = (x, y) - return coordinates - -def interpolate_coordinates(coordinates_dict, batch_size): - sorted_coords = sorted(coordinates_dict.items()) - interpolated = {} - - for i, ((index1, (x1, y1)), (index2, (x2, y2))) in enumerate(zip(sorted_coords, sorted_coords[1:])): - distance = index2 - index1 - x_step = (x2 - x1) / distance - y_step = (y2 - y1) / distance - - for j in range(distance): - interpolated_x = round(x1 + j * x_step) - interpolated_y = round(y1 + j * y_step) - interpolated[index1 + j] = (interpolated_x, interpolated_y) - interpolated[sorted_coords[-1][0]] = sorted_coords[-1][1] - - # Ensure we have coordinates for all indices in the batch - last_index, last_coords = sorted_coords[-1] - for i in range(last_index + 1, batch_size): - interpolated[i] = last_coords - - return interpolated - -def interpolate_coordinates_with_curves(coordinates_dict, batch_size): - from scipy.interpolate import CubicSpline - sorted_coords = sorted(coordinates_dict.items()) - x_coords, y_coords = zip(*[coord for index, coord in sorted_coords]) - - # Create the spline curve functions - indices = np.array([index for index, coord in sorted_coords]) - cs_x = CubicSpline(indices, x_coords) - cs_y = CubicSpline(indices, y_coords) - - # Generate interpolated coordinates using the spline functions - interpolated_indices = np.arange(0, batch_size) - interpolated_x = cs_x(interpolated_indices) - interpolated_y = cs_y(interpolated_indices) - - # Round the interpolated coordinates and create the dictionary - interpolated = {i: (round(x), round(y)) for i, (x, y) in enumerate(zip(interpolated_x, interpolated_y))} - return interpolated - -def plot_to_tensor(coordinates_dict, interpolated_dict, height, width, box_size): - from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas - import matplotlib.patches as patches - - original_x, original_y = zip(*coordinates_dict.values()) - interpolated_x, interpolated_y = zip(*interpolated_dict.values()) - - fig, ax = plt.subplots(figsize=(width/100, height/100), dpi=100) - ax.scatter(original_x, original_y, color='blue', label='Original Points') - ax.scatter(interpolated_x, interpolated_y, color='red', alpha=0.5, label='Interpolated Points') - ax.plot(interpolated_x, interpolated_y, color='grey', linestyle='--', linewidth=0.5) - # Draw a box at each interpolated coordinate - for x, y in interpolated_dict.values(): - rect = patches.Rectangle((x - box_size/2, y - box_size/2), box_size, box_size, - linewidth=1, edgecolor='green', facecolor='none') - ax.add_patch(rect) - ax.set_title('Interpolated Coordinates') - ax.set_xlabel('X Coordinate') - ax.set_ylabel('Y Coordinate') - ax.legend() - ax.set_xlim(0, width) # Set the x-axis to match the input latent width - ax.set_ylim(height, 0) # Set the y-axis to match the input latent height, with (0,0) at top-left - - canvas = FigureCanvas(fig) - canvas.draw() - - width, height = fig.get_size_inches() * fig.get_dpi() - image_np = np.frombuffer(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3) - - image_tensor = torch.from_numpy(image_np).float() / 255.0 - image_tensor = image_tensor.unsqueeze(0) - - plt.close(fig) - - return image_tensor - -class GLIGENTextBoxApplyBatch: - @classmethod - def INPUT_TYPES(s): - return {"required": {"conditioning_to": ("CONDITIONING", ), - "latents": ("LATENT", ), - "clip": ("CLIP", ), - "gligen_textbox_model": ("GLIGEN", ), - "text": ("STRING", {"multiline": True}), - "width": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}), - "height": ("INT", {"default": 64, "min": 8, "max": MAX_RESOLUTION, "step": 8}), - "coordinates": ("STRING", {"multiline": True}), - "interpolation": ( - [ - 'straight', - 'CubicSpline', - ], - { - "default": 'CubicSpline' - }), - }} - RETURN_TYPES = ("CONDITIONING", "IMAGE",) - FUNCTION = "append" - CATEGORY = "KJNodes/experimental" - DESCRIPTION = """ -Experimental, does not function yet as ComfyUI base changes are needed -""" - - def append(self, latents, conditioning_to, clip, gligen_textbox_model, text, width, height, coordinates, interpolation): - - coordinates_dict = parse_coordinates(coordinates) - batch_size = sum(tensor.size(0) for tensor in latents.values()) - c = [] - cond, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True) - - # Interpolate coordinates for the entire batch - if interpolation == 'CubicSpline': - interpolated_coords = interpolate_coordinates_with_curves(coordinates_dict, batch_size) - if interpolation == 'straight': - interpolated_coords = interpolate_coordinates(coordinates_dict, batch_size) - - plot_image_tensor = plot_to_tensor(coordinates_dict, interpolated_coords, 512, 512, height) - for t in conditioning_to: - n = [t[0], t[1].copy()] - - position_params_batch = [[] for _ in range(batch_size)] # Initialize a list of empty lists for each batch item - - for i in range(batch_size): - x_position, y_position = interpolated_coords[i] - position_param = (cond_pooled, height // 8, width // 8, y_position // 8, x_position // 8) - position_params_batch[i].append(position_param) # Append position_param to the correct sublist - print("x ",x_position, "y ", y_position) - prev = [] - if "gligen" in n[1]: - prev = n[1]['gligen'][2] - else: - prev = [[] for _ in range(batch_size)] - # Concatenate prev and position_params_batch, ensuring both are lists of lists - # and each sublist corresponds to a batch item - combined_position_params = [prev_item + batch_item for prev_item, batch_item in zip(prev, position_params_batch)] - n[1]['gligen'] = ("position", gligen_textbox_model, combined_position_params) - c.append(n) - - return (c, plot_image_tensor,) - -class ImageUpscaleWithModelBatched: - @classmethod - def INPUT_TYPES(s): - return {"required": { "upscale_model": ("UPSCALE_MODEL",), - "images": ("IMAGE",), - "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), - }} - RETURN_TYPES = ("IMAGE",) - FUNCTION = "upscale" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Same as ComfyUI native model upscaling node, -but allows setting sub-batches for reduced VRAM usage. -""" - def upscale(self, upscale_model, images, per_batch): - - device = model_management.get_torch_device() - upscale_model.to(device) - in_img = images.movedim(-1,-3).to(device) - - steps = in_img.shape[0] - pbar = comfy.utils.ProgressBar(steps) - t = [] - - for start_idx in range(0, in_img.shape[0], per_batch): - sub_images = upscale_model(in_img[start_idx:start_idx+per_batch]) - t.append(sub_images.cpu()) - # Calculate the number of images processed in this batch - batch_count = sub_images.shape[0] - # Update the progress bar by the number of images processed in this batch - pbar.update(batch_count) - upscale_model.cpu() - - t = torch.cat(t, dim=0).permute(0, 2, 3, 1).cpu() - - return (t,) - -class ImageNormalize_Neg1_To_1: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "images": ("IMAGE",), - - }} - RETURN_TYPES = ("IMAGE",) - FUNCTION = "normalize" - CATEGORY = "KJNodes/misc" - DESCRIPTION = """ -Normalize the images to be in the range [-1, 1] -""" - - def normalize(self,images): - images = images * 2.0 - 1.0 - return (images,) - -import comfy.sample -from nodes import CLIPTextEncode -folder_paths.add_model_folder_path("intristic_loras", os.path.join(script_directory, "intristic_loras")) - -class Intrinsic_lora_sampling: - def __init__(self): - self.loaded_lora = None - - @classmethod - def INPUT_TYPES(s): - return {"required": { "model": ("MODEL",), - "lora_name": (folder_paths.get_filename_list("intristic_loras"), ), - "task": ( - [ - 'depth map', - 'surface normals', - 'albedo', - 'shading', - ], - { - "default": 'depth map' - }), - "text": ("STRING", {"multiline": True, "default": ""}), - "clip": ("CLIP", ), - "vae": ("VAE", ), - "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), - }, - "optional": { - "image": ("IMAGE",), - "optional_latent": ("LATENT",), - }, - } - - RETURN_TYPES = ("IMAGE", "LATENT",) - FUNCTION = "onestepsample" - CATEGORY = "KJNodes" - DESCRIPTION = """ -Sampler to use the intrinsic loras: -https://github.com/duxiaodan/intrinsic-lora -These LoRAs are tiny and thus included -with this node pack. -""" - - def onestepsample(self, model, lora_name, clip, vae, text, task, per_batch, image=None, optional_latent=None): - pbar = comfy.utils.ProgressBar(3) - - if optional_latent is None: - image_list = [] - for start_idx in range(0, image.shape[0], per_batch): - sub_pixels = vae.vae_encode_crop_pixels(image[start_idx:start_idx+per_batch]) - image_list.append(vae.encode(sub_pixels[:,:,:,:3])) - sample = torch.cat(image_list, dim=0) - else: - sample = optional_latent["samples"] - noise = torch.zeros(sample.size(), dtype=sample.dtype, layout=sample.layout, device="cpu") - prompt = task + "," + text - positive, = CLIPTextEncode.encode(self, clip, prompt) - negative = positive #negative shouldn't do anything in this scenario - - pbar.update(1) - - #custom model sampling to pass latent through as it is - class X0_PassThrough(comfy.model_sampling.EPS): - def calculate_denoised(self, sigma, model_output, model_input): - return model_output - def calculate_input(self, sigma, noise): - return noise - sampling_base = comfy.model_sampling.ModelSamplingDiscrete - sampling_type = X0_PassThrough - - class ModelSamplingAdvanced(sampling_base, sampling_type): - pass - model_sampling = ModelSamplingAdvanced(model.model.model_config) - - #load lora - model_clone = model.clone() - lora_path = folder_paths.get_full_path("intristic_loras", lora_name) - lora = comfy.utils.load_torch_file(lora_path, safe_load=True) - self.loaded_lora = (lora_path, lora) - - model_clone_with_lora = comfy.sd.load_lora_for_models(model_clone, None, lora, 1.0, 0)[0] - - model_clone_with_lora.add_object_patch("model_sampling", model_sampling) - - samples = {"samples": comfy.sample.sample(model_clone_with_lora, noise, 1, 1.0, "euler", "simple", positive, negative, sample, - denoise=1.0, disable_noise=True, start_step=0, last_step=1, - force_full_denoise=True, noise_mask=None, callback=None, disable_pbar=True, seed=None)} - pbar.update(1) - - decoded = [] - for start_idx in range(0, samples["samples"].shape[0], per_batch): - decoded.append(vae.decode(samples["samples"][start_idx:start_idx+per_batch])) - image_out = torch.cat(decoded, dim=0) - - pbar.update(1) - - if task == 'depth map': - imax = image_out.max() - imin = image_out.min() - image_out = (image_out-imin)/(imax-imin) - image_out = torch.max(image_out, dim=3, keepdim=True)[0].repeat(1, 1, 1, 3) - elif task == 'surface normals': - image_out = F.normalize(image_out * 2 - 1, dim=3) / 2 + 0.5 - image_out = 1.0 - image_out - else: - image_out = image_out.clamp(-1.,1.) - - return (image_out, samples,) - -class RemapMaskRange: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "mask": ("MASK",), - "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}), - "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}), - } - } - - RETURN_TYPES = ("MASK",) - RETURN_NAMES = ("mask",) - FUNCTION = "remap" - CATEGORY = "KJNodes/masking" - DESCRIPTION = """ -Sets new min and max values for the mask. -""" - - def remap(self, mask, min, max): - - # Find the maximum value in the mask - mask_max = torch.max(mask) - - # If the maximum mask value is zero, avoid division by zero by setting it to 1 - mask_max = mask_max if mask_max > 0 else 1 - - # Scale the mask values to the new range defined by min and max - # The highest pixel value in the mask will be scaled to max - scaled_mask = (mask / mask_max) * (max - min) + min - - # Clamp the values to ensure they are within [0.0, 1.0] - scaled_mask = torch.clamp(scaled_mask, min=0.0, max=1.0) - - return (scaled_mask, ) - -class LoadResAdapterNormalization: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "model": ("MODEL",), - "resadapter_path": (folder_paths.get_filename_list("checkpoints"), ) - } - } - - RETURN_TYPES = ("MODEL",) - FUNCTION = "load_res_adapter" - CATEGORY = "KJNodes/experimental" - - def load_res_adapter(self, model, resadapter_path): - print("ResAdapter: Checking ResAdapter path") - resadapter_full_path = folder_paths.get_full_path("checkpoints", resadapter_path) - if not os.path.exists(resadapter_full_path): - raise Exception("Invalid model path") - else: - print("ResAdapter: Loading ResAdapter normalization weights") - prefix_to_remove = 'diffusion_model.' - model_clone = model.clone() - norm_state_dict = comfy.utils.load_torch_file(resadapter_full_path) - new_values = {key[len(prefix_to_remove):]: value for key, value in norm_state_dict.items() if key.startswith(prefix_to_remove)} - print("ResAdapter: Attempting to add patches with ResAdapter weights") - try: - for key in model.model.diffusion_model.state_dict().keys(): - if key in new_values: - original_tensor = model.model.diffusion_model.state_dict()[key] - new_tensor = new_values[key].to(model.model.diffusion_model.dtype) - if original_tensor.shape == new_tensor.shape: - model_clone.add_object_patch(f"diffusion_model.{key}.data", new_tensor) - else: - print("ResAdapter: No match for key: ",key) - except: - raise Exception("Could not patch model, this way of patching was added to ComfyUI on March 3rd 2024, is your ComfyUI up to date?") - print("ResAdapter: Added resnet normalization patches") - return (model_clone, ) - -class Superprompt: - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "instruction_prompt": ("STRING", {"default": 'Expand the following prompt to add more detail', "multiline": True}), - "prompt": ("STRING", {"default": '', "multiline": True, "forceInput": True}), - "max_new_tokens": ("INT", {"default": 128, "min": 1, "max": 4096, "step": 1}), - } - } - - RETURN_TYPES = ("STRING",) - FUNCTION = "process" - CATEGORY = "KJNodes/text" - DESCRIPTION = """ -# SuperPrompt -A T5 model fine-tuned on the SuperPrompt dataset for -upsampling text prompts to more detailed descriptions. -Meant to be used as a pre-generation step for text-to-image -models that benefit from more detailed prompts. -https://huggingface.co/roborovski/superprompt-v1 -""" - - def process(self, instruction_prompt, prompt, max_new_tokens): - device = model_management.get_torch_device() - from transformers import T5Tokenizer, T5ForConditionalGeneration - - checkpoint_path = os.path.join(script_directory, "models","superprompt-v1") - tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small", legacy=False) - - model = T5ForConditionalGeneration.from_pretrained(checkpoint_path, device_map=device) - model.to(device) - input_text = instruction_prompt + ": " + prompt - print(input_text) - input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device) - outputs = model.generate(input_ids, max_new_tokens=max_new_tokens) - out = (tokenizer.decode(outputs[0])) - out = out.replace('', '') - out = out.replace('', '') - print(out) - - return (out, ) - -class RemapImageRange: - @classmethod - def INPUT_TYPES(s): - return {"required": { - "image": ("IMAGE",), - "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}), - "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}), - "clamp": ("BOOLEAN", {"default": True}), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "remap" - CATEGORY = "KJNodes/image" - DESCRIPTION = """ -Remaps the image values to the specified range. -""" - - def remap(self, image, min, max, clamp): - if image.dtype == torch.float16: - image = image.to(torch.float32) - image = min + image * (max - min) - if clamp: - image = torch.clamp(image, min=0.0, max=1.0) - return (image, ) - -class CameraPoseVisualizer: - - @classmethod - def INPUT_TYPES(s): - return {"required": { - "pose_file_path": ("STRING", {"default": 'pose file path here', "multiline": False}), - "sample_stride": ("INT", {"default": 1,"min": 0, "max": 100, "step": 1}), - "frames": ("INT", {"default": 16,"min": 0, "max": 100, "step": 1}), - "base_xval": ("FLOAT", {"default": 0.5,"min": 0, "max": 100, "step": 0.01}), - "zval": ("FLOAT", {"default": 2.0,"min": 0, "max": 100, "step": 0.01}), - "use_exact_fx": ("BOOLEAN", {"default": True}), - "relative_c2w": ("BOOLEAN", {"default": True}), - "x_min": ("FLOAT", {"default": -5.0,"min": -100, "max": 100, "step": 0.01}), - "x_max": ("FLOAT", {"default": 5.0,"min": -100, "max": 100, "step": 0.01}), - "y_min": ("FLOAT", {"default": -5.0,"min": -100, "max": 100, "step": 0.01}), - "y_max": ("FLOAT", {"default": 5.0,"min": -100, "max": 100, "step": 0.01}), - "z_min": ("FLOAT", {"default": -5.0,"min": -100, "max": 100, "step": 0.01}), - "z_max": ("FLOAT", {"default": 5.0,"min": -100, "max": 100, "step": 0.01}), - "use_viewer": ("BOOLEAN", {"default": False}), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "plot" - CATEGORY = "KJNodes/misc" - DESCRIPTION = """ -Visualizes the camera poses from a .txt file with -RealEstate camera intrinsics and coordinates in a 3D plot. -""" - - def plot(self, pose_file_path, sample_stride, frames, base_xval, zval, use_exact_fx, relative_c2w, x_min, x_max, y_min, y_max, z_min, z_max, use_viewer): - import matplotlib as mpl - import matplotlib.pyplot as plt - import io - from torchvision.transforms import ToTensor - self.fig = plt.figure(figsize=(18, 7)) - self.ax = self.fig.add_subplot(projection='3d') - self.plotly_data = None # plotly data traces - self.ax.set_aspect("auto") - self.ax.set_xlim(x_min, x_max) - self.ax.set_ylim(y_min, y_max) - self.ax.set_zlim(z_min, z_max) - self.ax.set_xlabel('x') - self.ax.set_ylabel('y') - self.ax.set_zlabel('z') - print('initialize camera pose visualizer') - with open(pose_file_path, 'r') as f: - poses = f.readlines() - w2cs = [np.asarray([float(p) for p in pose.strip().split(' ')[7:]]).reshape(3, 4) for pose in poses[1:]] - fxs = [float(pose.strip().split(' ')[1]) for pose in poses[1:]] - - cropped_length = frames * sample_stride - total_frames = len(w2cs) - start_frame_ind = random.randint(0, max(0, total_frames - cropped_length - 1)) - end_frame_ind = min(start_frame_ind + cropped_length, total_frames) - frame_ind = np.linspace(start_frame_ind, end_frame_ind - 1, frames, dtype=int) - w2cs = [w2cs[x] for x in frame_ind] - transform_matrix = np.asarray([[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]]).reshape(4, 4) - last_row = np.zeros((1, 4)) - last_row[0, -1] = 1.0 - w2cs = [np.concatenate((w2c, last_row), axis=0) for w2c in w2cs] - c2ws = self.get_c2w(w2cs, transform_matrix, relative_c2w) - - for frame_idx, c2w in enumerate(c2ws): - self.extrinsic2pyramid(c2w, frame_idx / frames, hw_ratio=1/1, base_xval=base_xval, - zval=(fxs[frame_idx] if use_exact_fx else zval)) - - cmap = mpl.cm.rainbow - norm = mpl.colors.Normalize(vmin=0, vmax=frames) - self.fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), ax=self.ax, orientation='vertical', label='Frame Number') - plt.title('Extrinsic Parameters') - plt.draw() - buf = io.BytesIO() - plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0) - buf.seek(0) - img = Image.open(buf) - tensor_img = ToTensor()(img) - buf.close() - tensor_img = tensor_img.permute(1, 2, 0).unsqueeze(0) - if use_viewer: - time.sleep(1) - plt.show() - return (tensor_img,) - - def extrinsic2pyramid(self, extrinsic, color_map='red', hw_ratio=1/1, base_xval=1, zval=3): - from mpl_toolkits.mplot3d.art3d import Poly3DCollection - vertex_std = np.array([[0, 0, 0, 1], - [base_xval, -base_xval * hw_ratio, zval, 1], - [base_xval, base_xval * hw_ratio, zval, 1], - [-base_xval, base_xval * hw_ratio, zval, 1], - [-base_xval, -base_xval * hw_ratio, zval, 1]]) - vertex_transformed = vertex_std @ extrinsic.T - meshes = [[vertex_transformed[0, :-1], vertex_transformed[1][:-1], vertex_transformed[2, :-1]], - [vertex_transformed[0, :-1], vertex_transformed[2, :-1], vertex_transformed[3, :-1]], - [vertex_transformed[0, :-1], vertex_transformed[3, :-1], vertex_transformed[4, :-1]], - [vertex_transformed[0, :-1], vertex_transformed[4, :-1], vertex_transformed[1, :-1]], - [vertex_transformed[1, :-1], vertex_transformed[2, :-1], vertex_transformed[3, :-1], vertex_transformed[4, :-1]]] - - color = color_map if isinstance(color_map, str) else plt.cm.rainbow(color_map) - - self.ax.add_collection3d( - Poly3DCollection(meshes, facecolors=color, linewidths=0.3, edgecolors=color, alpha=0.35)) - - def customize_legend(self, list_label): - from matplotlib.patches import Patch - list_handle = [] - for idx, label in enumerate(list_label): - color = plt.cm.rainbow(idx / len(list_label)) - patch = Patch(color=color, label=label) - list_handle.append(patch) - plt.legend(loc='right', bbox_to_anchor=(1.8, 0.5), handles=list_handle) - - def get_c2w(self, w2cs, transform_matrix, relative_c2w): - if relative_c2w: - target_cam_c2w = np.array([ - [1, 0, 0, 0], - [0, 1, 0, 0], - [0, 0, 1, 0], - [0, 0, 0, 1] - ]) - abs2rel = target_cam_c2w @ w2cs[0] - ret_poses = [target_cam_c2w, ] + [abs2rel @ np.linalg.inv(w2c) for w2c in w2cs[1:]] - else: - ret_poses = [np.linalg.inv(w2c) for w2c in w2cs] - ret_poses = [transform_matrix @ x for x in ret_poses] - return np.array(ret_poses, dtype=np.float32) - -class ImagePadForOutpaintMasked: - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "image": ("IMAGE",), - "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), - "feathering": ("INT", {"default": 40, "min": 0, "max": MAX_RESOLUTION, "step": 1}), - }, - "optional": { - "mask": ("MASK",), - } - } - - RETURN_TYPES = ("IMAGE", "MASK") - FUNCTION = "expand_image" - - CATEGORY = "image" - - def expand_image(self, image, left, top, right, bottom, feathering, mask=None): - B, H, W, C = image.size() - - new_image = torch.ones( - (B, H + top + bottom, W + left + right, C), - dtype=torch.float32, - ) * 0.5 - - new_image[:, top:top + H, left:left + W, :] = image - - if mask is None: - new_mask = torch.ones( - (H + top + bottom, W + left + right), - dtype=torch.float32, - ) - - t = torch.zeros( - (H, W), - dtype=torch.float32 - ) - else: - # If a mask is provided, pad it to fit the new image size - mask = F.pad(mask, (left, right, top, bottom), mode='constant', value=0) - mask = 1 - mask - t = torch.zeros_like(mask) - - - - if feathering > 0 and feathering * 2 < H and feathering * 2 < W: - - for i in range(H): - for j in range(W): - dt = i if top != 0 else H - db = H - i if bottom != 0 else H - - dl = j if left != 0 else W - dr = W - j if right != 0 else W - - d = min(dt, db, dl, dr) - - if d >= feathering: - continue - - v = (feathering - d) / feathering - - if mask is None: - t[i, j] = v * v - else: - t[:, top + i, left + j] = v * v - - if mask is None: - mask = new_mask.squeeze(0) - mask[top:top + H, left:left + W] = t - mask = mask.unsqueeze(0) - - return (new_image, mask,) - -class ImageAndMaskPreview(SaveImage): - def __init__(self): - self.output_dir = folder_paths.get_temp_directory() - self.type = "temp" - self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) - self.compress_level = 4 - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "mask_opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), - "mask_color": ("STRING", {"default": "255, 255, 255"}), - "pass_through": ("BOOLEAN", {"default": False}), - }, - "optional": { - "image": ("IMAGE",), - "mask": ("MASK",), - }, - "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, - } - RETURN_TYPES = ("IMAGE",) - RETURN_NAMES = ("composite",) - FUNCTION = "execute" - CATEGORY = "KJNodes" - DESCRIPTION = """ -Preview an image or a mask, when both inputs are used -composites the mask on top of the image. -with pass_through on the preview is disabled and the -composite is returned from the composite slot instead, -this allows for the preview to be passed for video combine -nodes for example. -""" - - def execute(self, mask_opacity, mask_color, pass_through, filename_prefix="ComfyUI", image=None, mask=None, prompt=None, extra_pnginfo=None): - if mask is not None and image is None: - preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) - elif mask is None and image is not None: - preview = image - elif mask is not None and image is not None: - mask_adjusted = mask * mask_opacity - mask_image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3).clone() - - color_list = list(map(int, mask_color.split(', '))) - print(color_list[0]) - mask_image[:, :, :, 0] = color_list[0] // 255 # Red channel - mask_image[:, :, :, 1] = color_list[1] // 255 # Green channel - mask_image[:, :, :, 2] = color_list[2] // 255 # Blue channel - - preview, = ImageCompositeMasked.composite(self, image, mask_image, 0, 0, True, mask_adjusted) - if pass_through: - return (preview, ) - return(self.save_images(preview, filename_prefix, prompt, extra_pnginfo)) - - -class SplineEditor: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "points_store": ("STRING", {"multiline": False}), - "coordinates": ("STRING", {"multiline": False}), - "mask_width": ("INT", {"default": 512, "min": 8, "max": MAX_RESOLUTION, "step": 8}), - "mask_height": ("INT", {"default": 512, "min": 8, "max": MAX_RESOLUTION, "step": 18}), - "points_to_sample": ("INT", {"default": 4, "min": 2, "max": 1000, "step": 1}), - "interpolation": ( - [ - 'cardinal', - 'monotone', - 'basis', - 'linear', - 'step-before', - 'step-after', - 'polar', - 'polar-reverse', - ], - { - "default": 'cardinal' - }), - "tension": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), - "segmented": ("BOOLEAN", {"default": False}), - }, - } - - RETURN_TYPES = ("MASK", "STRING", "FLOAT") - FUNCTION = "splinedata" - - CATEGORY = "KJNodes/experimental" - - def splinedata(self, mask_width, mask_height, coordinates, interpolation, points_to_sample, points_store, tension, segmented): - print(coordinates) - coordinates = json.loads(coordinates) - print(coordinates) - - normalized_y_values = [ - 1.0 - (point['y'] / 512) - for point in coordinates - ] - - # Create a color map for grayscale intensities - color_map = lambda y: torch.full((mask_height, mask_width, 3), y, dtype=torch.float32) - - # Create image tensors for each normalized y value - image_tensors = [color_map(y) for y in normalized_y_values] - - # Batch the tensors - masks_out = torch.stack(image_tensors) - masks_out = masks_out.mean(dim=-1) - print(masks_out.shape) - return (masks_out, coordinates, normalized_y_values,) - -class StabilityAPI_SD3: - - @classmethod - def INPUT_TYPES(cls): - return { - "required": { - "prompt": ("STRING", {"multiline": True}), - "n_prompt": ("STRING", {"multiline": True}), - "seed": ("INT", {"default": 123,"min": 0, "max": 4294967294, "step": 1}), - "model": ( - [ - 'sd3', - 'sd3-turbo', - ], - { - "default": 'sd3' - }), - "aspect_ratio": ( - [ - '1:1', - '16:9', - '21:9', - '2:3', - '3:2', - '4:5', - '5:4', - '9:16', - '9:21', - ], - { - "default": '1:1' - }), - "output_format": ( - [ - 'png', - 'jpeg', - ], - { - "default": 'jpeg' - }), - }, - "optional": { - "api_key": ("STRING", {"multiline": True}), - "image": ("IMAGE",), - "img2img_strength": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), - "disable_metadata": ("BOOLEAN", {"default": True}), - }, - } - - RETURN_TYPES = ("IMAGE",) - FUNCTION = "apicall" - - CATEGORY = "KJNodes/experimental" - DESCRIPTION = """ -## Calls StabilityAI API - -Although you may have multiple keys in your account, -you should use the same key for all requests to this API. - -Get your API key here: https://platform.stability.ai/account/keys -Recommended to set the key in the config.json -file under this -node packs folder. -# WARNING: -Otherwise the API key may get saved in the image metadata even -with "disable_metadata" on if the workflow includes save nodes -separate from this node. - -sd3 requires 6.5 credits per generation -sd3-turbo requires 4 credits per generation - -If no image is provided, mode is set to text-to-image - -""" - - def apicall(self, prompt, n_prompt, model, seed, aspect_ratio, output_format, - img2img_strength=0.5, image=None, disable_metadata=True, api_key=""): - from comfy.cli_args import args - if disable_metadata: - args.disable_metadata = True - else: - args.disable_metadata = False - - import requests - from io import BytesIO - from torchvision import transforms - - data = { - "mode": "text-to-image", - "prompt": prompt, - "model": model, - "seed": seed, - "output_format": output_format - } - - if image is not None: - image = image.permute(0, 3, 1, 2).squeeze(0) - to_pil = transforms.ToPILImage() - pil_image = to_pil(image) - # Save the PIL Image to a BytesIO object - buffer = BytesIO() - pil_image.save(buffer, format='PNG') - buffer.seek(0) - files = {"image": ("image.png", buffer, "image/png")} - - data["mode"] = "image-to-image" - data["image"] = pil_image - data["strength"] = img2img_strength - else: - data["aspect_ratio"] = aspect_ratio, - files = {"none": ''} - - if model != "sd3-turbo": - data["negative_prompt"] = n_prompt - - - headers={ - "accept": "image/*" - } - - if api_key != "": - headers["authorization"] = api_key - else: - config_file_path = os.path.join(script_directory,"config.json") - with open(config_file_path, 'r') as file: - config = json.load(file) - api_key_from_config = config.get("sai_api_key") - headers["authorization"] = api_key_from_config - - response = requests.post( - f"https://api.stability.ai/v2beta/stable-image/generate/sd3", - headers=headers, - files = files, - data = data, - ) - - if response.status_code == 200: - # Convert the response content to a PIL Image - image = Image.open(BytesIO(response.content)) - # Convert the PIL Image to a PyTorch tensor - transform = transforms.ToTensor() - tensor_image = transform(image) - tensor_image = tensor_image.unsqueeze(0) - tensor_image = tensor_image.permute(0, 2, 3, 1).cpu().float() - return (tensor_image,) - else: - try: - # Attempt to parse the response as JSON - error_data = response.json() - raise Exception(f"Server error: {error_data}") - except json.JSONDecodeError: - # If the response is not valid JSON, raise a different exception - raise Exception(f"Server error: {response.text}") - - -class MaskOrImageToWeight: - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "output_type": ( - [ - 'list', - 'list of lists', - 'pandas series', - ], - { - "default": 'list' - }), - }, - "optional": { - "images": ("IMAGE",), - "masks": ("MASK",), - }, - - } - RETURN_TYPES = ("FLOAT",) - FUNCTION = "execute" - CATEGORY = "KJNodes" - DESCRIPTION = """ -Gets the mean value of mask or image -and returns it as a float value. -""" - - def execute(self, output_type, images=None, masks=None): - mean_values = [] - if masks is not None and images is None: - for mask in masks: - mean_values.append(mask.mean().item()) - print(mean_values) - elif masks is None and images is not None: - for image in images: - mean_values.append(image.mean().item()) - elif masks is not None and images is not None: - raise Exception("MaskOrImageToWeight: Use either mask or image input only.") - - # Convert mean_values to the specified output_type - if output_type == 'list': - return mean_values, - elif output_type == 'list of lists': - return [[value] for value in mean_values], - elif output_type == 'pandas series': - try: - import pandas as pd - except: - raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type") - return pd.Series(mean_values), - else: - raise ValueError(f"Unsupported output_type: {output_type}") -class FloatToMask: - - @classmethod - def INPUT_TYPES(s): - return { - "required": { - "input_values": ("FLOAT", {"forceInput": True, "default": 0}), - "width": ("INT", {"default": 100, "min": 1}), - "height": ("INT", {"default": 100, "min": 1}), - }, - } - RETURN_TYPES = ("MASK",) - FUNCTION = "execute" - CATEGORY = "KJNodes" - DESCRIPTION = """ -Generates a batch of masks based on the input float values. -The batch size is determined by the length of the input float values. -Each mask is generated with the specified width and height. -""" - - def execute(self, input_values, width, height): - import pandas as pd - # Ensure input_values is a list - if isinstance(input_values, (float, int)): - input_values = [input_values] - elif isinstance(input_values, pd.Series): - input_values = input_values.tolist() - elif isinstance(input_values, list) and all(isinstance(item, list) for item in input_values): - input_values = [item for sublist in input_values for item in sublist] - - # Generate a batch of masks based on the input_values - masks = [] - for value in input_values: - # Assuming value is a float between 0 and 1 representing the mask's intensity - mask = torch.ones((height, width), dtype=torch.float32) * value - masks.append(mask) - masks_out = torch.stack(masks, dim=0) - print(masks_out.shape) - return(masks_out,) - - -NODE_CLASS_MAPPINGS = { - "INTConstant": INTConstant, - "FloatConstant": FloatConstant, - "ConditioningMultiCombine": ConditioningMultiCombine, - "ConditioningSetMaskAndCombine": ConditioningSetMaskAndCombine, - "ConditioningSetMaskAndCombine3": ConditioningSetMaskAndCombine3, - "ConditioningSetMaskAndCombine4": ConditioningSetMaskAndCombine4, - "ConditioningSetMaskAndCombine5": ConditioningSetMaskAndCombine5, - "GrowMaskWithBlur": GrowMaskWithBlur, - "ColorToMask": ColorToMask, - "CreateGradientMask": CreateGradientMask, - "CreateTextMask": CreateTextMask, - "CreateAudioMask": CreateAudioMask, - "CreateFadeMask": CreateFadeMask, - "CreateFadeMaskAdvanced": CreateFadeMaskAdvanced, - "CreateFluidMask" :CreateFluidMask, - "VRAM_Debug" : VRAM_Debug, - "SomethingToString" : SomethingToString, - "CrossFadeImages": CrossFadeImages, - "EmptyLatentImagePresets": EmptyLatentImagePresets, - "ColorMatch": ColorMatch, - "GetImageRangeFromBatch": GetImageRangeFromBatch, - "SaveImageWithAlpha": SaveImageWithAlpha, - "ReverseImageBatch": ReverseImageBatch, - "ImageGridComposite2x2": ImageGridComposite2x2, - "ImageGridComposite3x3": ImageGridComposite3x3, - "ImageConcanate": ImageConcanate, - "ImageBatchTestPattern": ImageBatchTestPattern, - "ReplaceImagesInBatch": ReplaceImagesInBatch, - "BatchCropFromMask": BatchCropFromMask, - "BatchCropFromMaskAdvanced": BatchCropFromMaskAdvanced, - "FilterZeroMasksAndCorrespondingImages": FilterZeroMasksAndCorrespondingImages, - "InsertImageBatchByIndexes": InsertImageBatchByIndexes, - "BatchUncrop": BatchUncrop, - "BatchUncropAdvanced": BatchUncropAdvanced, - "BatchCLIPSeg": BatchCLIPSeg, - "RoundMask": RoundMask, - "ResizeMask": ResizeMask, - "OffsetMask": OffsetMask, - "WidgetToString": WidgetToString, - "CreateShapeMask": CreateShapeMask, - "CreateVoronoiMask": CreateVoronoiMask, - "CreateMagicMask": CreateMagicMask, - "BboxToInt": BboxToInt, - "SplitBboxes": SplitBboxes, - "ImageGrabPIL": ImageGrabPIL, - "DummyLatentOut": DummyLatentOut, - "FlipSigmasAdjusted": FlipSigmasAdjusted, - "InjectNoiseToLatent": InjectNoiseToLatent, - "AddLabel": AddLabel, - "SoundReactive": SoundReactive, - "GenerateNoise": GenerateNoise, - "StableZero123_BatchSchedule": StableZero123_BatchSchedule, - "SV3D_BatchSchedule": SV3D_BatchSchedule, - "GetImagesFromBatchIndexed": GetImagesFromBatchIndexed, - "InsertImagesToBatchIndexed": InsertImagesToBatchIndexed, - "ImageBatchRepeatInterleaving": ImageBatchRepeatInterleaving, - "NormalizedAmplitudeToMask": NormalizedAmplitudeToMask, - "OffsetMaskByNormalizedAmplitude": OffsetMaskByNormalizedAmplitude, - "ImageTransformByNormalizedAmplitude": ImageTransformByNormalizedAmplitude, - "GetLatentsFromBatchIndexed": GetLatentsFromBatchIndexed, - "StringConstant": StringConstant, - "GLIGENTextBoxApplyBatch": GLIGENTextBoxApplyBatch, - "CondPassThrough": CondPassThrough, - "ImageUpscaleWithModelBatched": ImageUpscaleWithModelBatched, - "ScaleBatchPromptSchedule": ScaleBatchPromptSchedule, - "ImageNormalize_Neg1_To_1": ImageNormalize_Neg1_To_1, - "Intrinsic_lora_sampling": Intrinsic_lora_sampling, - "RemapMaskRange": RemapMaskRange, - "LoadResAdapterNormalization": LoadResAdapterNormalization, - "Superprompt": Superprompt, - "RemapImageRange": RemapImageRange, - "CameraPoseVisualizer": CameraPoseVisualizer, - "BboxVisualize": BboxVisualize, - "StringConstantMultiline": StringConstantMultiline, - "JoinStrings": JoinStrings, - "Sleep": Sleep, - "ImagePadForOutpaintMasked": ImagePadForOutpaintMasked, - "SplineEditor": SplineEditor, - "ImageAndMaskPreview": ImageAndMaskPreview, - "StabilityAPI_SD3": StabilityAPI_SD3, - "MaskOrImageToWeight": MaskOrImageToWeight, - "FloatToMask": FloatToMask -} -NODE_DISPLAY_NAME_MAPPINGS = { - "INTConstant": "INT Constant", - "FloatConstant": "Float Constant", - "ConditioningMultiCombine": "Conditioning Multi Combine", - "ConditioningSetMaskAndCombine": "ConditioningSetMaskAndCombine", - "ConditioningSetMaskAndCombine3": "ConditioningSetMaskAndCombine3", - "ConditioningSetMaskAndCombine4": "ConditioningSetMaskAndCombine4", - "ConditioningSetMaskAndCombine5": "ConditioningSetMaskAndCombine5", - "GrowMaskWithBlur": "GrowMaskWithBlur", - "ColorToMask": "ColorToMask", - "CreateGradientMask": "CreateGradientMask", - "CreateTextMask" : "CreateTextMask", - "CreateFadeMask" : "CreateFadeMask (Deprecated)", - "CreateFadeMaskAdvanced" : "CreateFadeMaskAdvanced", - "CreateFluidMask" : "CreateFluidMask", - "CreateAudioMask" : "CreateAudioMask (Deprecated)", - "VRAM_Debug" : "VRAM Debug", - "CrossFadeImages": "CrossFadeImages", - "SomethingToString": "SomethingToString", - "EmptyLatentImagePresets": "EmptyLatentImagePresets", - "ColorMatch": "ColorMatch", - "GetImageRangeFromBatch": "GetImageRangeFromBatch", - "InsertImagesToBatchIndexed": "InsertImagesToBatchIndexed", - "SaveImageWithAlpha": "SaveImageWithAlpha", - "ReverseImageBatch": "ReverseImageBatch", - "ImageGridComposite2x2": "ImageGridComposite2x2", - "ImageGridComposite3x3": "ImageGridComposite3x3", - "ImageConcanate": "ImageConcatenate", - "ImageBatchTestPattern": "ImageBatchTestPattern", - "ReplaceImagesInBatch": "ReplaceImagesInBatch", - "BatchCropFromMask": "BatchCropFromMask", - "BatchCropFromMaskAdvanced": "BatchCropFromMaskAdvanced", - "FilterZeroMasksAndCorrespondingImages": "FilterZeroMasksAndCorrespondingImages", - "InsertImageBatchByIndexes": "InsertImageBatchByIndexes", - "BatchUncrop": "BatchUncrop", - "BatchUncropAdvanced": "BatchUncropAdvanced", - "BatchCLIPSeg": "BatchCLIPSeg", - "RoundMask": "RoundMask", - "ResizeMask": "ResizeMask", - "OffsetMask": "OffsetMask", - "WidgetToString": "WidgetToString", - "CreateShapeMask": "CreateShapeMask", - "CreateVoronoiMask": "CreateVoronoiMask", - "CreateMagicMask": "CreateMagicMask", - "BboxToInt": "BboxToInt", - "SplitBboxes": "SplitBboxes", - "ImageGrabPIL": "ImageGrabPIL", - "DummyLatentOut": "DummyLatentOut", - "FlipSigmasAdjusted": "FlipSigmasAdjusted", - "InjectNoiseToLatent": "InjectNoiseToLatent", - "AddLabel": "AddLabel", - "SoundReactive": "SoundReactive", - "GenerateNoise": "GenerateNoise", - "StableZero123_BatchSchedule": "StableZero123_BatchSchedule", - "SV3D_BatchSchedule": "SV3D_BatchSchedule", - "GetImagesFromBatchIndexed": "GetImagesFromBatchIndexed", - "ImageBatchRepeatInterleaving": "ImageBatchRepeatInterleaving", - "NormalizedAmplitudeToMask": "NormalizedAmplitudeToMask", - "OffsetMaskByNormalizedAmplitude": "OffsetMaskByNormalizedAmplitude", - "ImageTransformByNormalizedAmplitude": "ImageTransformByNormalizedAmplitude", - "GetLatentsFromBatchIndexed": "GetLatentsFromBatchIndexed", - "StringConstant": "StringConstant", - "GLIGENTextBoxApplyBatch": "GLIGENTextBoxApplyBatch", - "CondPassThrough": "CondPassThrough", - "ImageUpscaleWithModelBatched": "ImageUpscaleWithModelBatched", - "ScaleBatchPromptSchedule": "ScaleBatchPromptSchedule", - "ImageNormalize_Neg1_To_1": "ImageNormalize_Neg1_To_1", - "Intrinsic_lora_sampling": "Intrinsic_lora_sampling", - "RemapMaskRange": "RemapMaskRange", - "LoadResAdapterNormalization": "LoadResAdapterNormalization", - "Superprompt": "Superprompt", - "RemapImageRange": "RemapImageRange", - "CameraPoseVisualizer": "CameraPoseVisualizer", - "BboxVisualize": "BboxVisualize", - "StringConstantMultiline": "StringConstantMultiline", - "JoinStrings": "JoinStrings", - "Sleep": "🛌 Sleep 🛌", - "ImagePadForOutpaintMasked": "Pad Image For Outpaint Masked", - "SplineEditor": "Spline Editor", - "ImageAndMaskPreview": "Image & Mask Preview", - "StabilityAPI_SD3": "Stability API SD3", - "MaskOrImageToWeight": "Mask Or Image To Weight", - "FloatToMask": "Float To Mask", -} \ No newline at end of file diff --git a/nodes/audioscheduler_nodes.py b/nodes/audioscheduler_nodes.py new file mode 100644 index 0000000..69d0422 --- /dev/null +++ b/nodes/audioscheduler_nodes.py @@ -0,0 +1,251 @@ +# to be used with https://github.com/a1lazydog/ComfyUI-AudioScheduler +import torch +from torchvision.transforms import functional as TF +from PIL import Image, ImageDraw +import numpy as np +from ..utility.utility import pil2tensor +from nodes import MAX_RESOLUTION + +class NormalizedAmplitudeToMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_offset": ("INT", {"default": 0,"min": -255, "max": 255, "step": 1}), + "location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "size": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + "shape": ( + [ + 'none', + 'circle', + 'square', + 'triangle', + ], + { + "default": 'none' + }), + "color": ( + [ + 'white', + 'amplitude', + ], + { + "default": 'amplitude' + }), + },} + + CATEGORY = "KJNodes/audio" + RETURN_TYPES = ("MASK",) + FUNCTION = "convert" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Creates masks based on the normalized amplitude. +""" + + def convert(self, normalized_amp, width, height, frame_offset, shape, location_x, location_y, size, color): + # Ensure normalized_amp is an array and within the range [0, 1] + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + + # Offset the amplitude values by rolling the array + normalized_amp = np.roll(normalized_amp, frame_offset) + + # Initialize an empty list to hold the image tensors + out = [] + # Iterate over each amplitude value to create an image + for amp in normalized_amp: + # Scale the amplitude value to cover the full range of grayscale values + if color == 'amplitude': + grayscale_value = int(amp * 255) + elif color == 'white': + grayscale_value = 255 + # Convert the grayscale value to an RGB format + gray_color = (grayscale_value, grayscale_value, grayscale_value) + finalsize = size * amp + + if shape == 'none': + shapeimage = Image.new("RGB", (width, height), gray_color) + else: + shapeimage = Image.new("RGB", (width, height), "black") + + draw = ImageDraw.Draw(shapeimage) + if shape == 'circle' or shape == 'square': + # Define the bounding box for the shape + left_up_point = (location_x - finalsize, location_y - finalsize) + right_down_point = (location_x + finalsize,location_y + finalsize) + two_points = [left_up_point, right_down_point] + + if shape == 'circle': + draw.ellipse(two_points, fill=gray_color) + elif shape == 'square': + draw.rectangle(two_points, fill=gray_color) + + elif shape == 'triangle': + # Define the points for the triangle + left_up_point = (location_x - finalsize, location_y + finalsize) # bottom left + right_down_point = (location_x + finalsize, location_y + finalsize) # bottom right + top_point = (location_x, location_y) # top point + draw.polygon([top_point, left_up_point, right_down_point], fill=gray_color) + + shapeimage = pil2tensor(shapeimage) + mask = shapeimage[:, :, :, 0] + out.append(mask) + + return (torch.cat(out, dim=0),) + +class NormalizedAmplitudeToFloatList: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + },} + + CATEGORY = "KJNodes/audio" + RETURN_TYPES = ("FLOAT",) + FUNCTION = "convert" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Creates a list of floats from the normalized amplitude. +""" + + def convert(self, normalized_amp): + # Ensure normalized_amp is an array and within the range [0, 1] + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + return (normalized_amp.tolist(),) + +class OffsetMaskByNormalizedAmplitude: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + "mask": ("MASK",), + "x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "rotate": ("BOOLEAN", { "default": False }), + "angle_multiplier": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }), + } + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("mask",) + FUNCTION = "offset" + CATEGORY = "KJNodes/audio" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Offsets masks based on the normalized amplitude. +""" + + def offset(self, mask, x, y, angle_multiplier, rotate, normalized_amp): + + # Ensure normalized_amp is an array and within the range [0, 1] + offsetmask = mask.clone() + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + + batch_size, height, width = mask.shape + + if rotate: + for i in range(batch_size): + rotation_amp = int(normalized_amp[i] * (360 * angle_multiplier)) + rotation_angle = rotation_amp + offsetmask[i] = TF.rotate(offsetmask[i].unsqueeze(0), rotation_angle).squeeze(0) + if x != 0 or y != 0: + for i in range(batch_size): + offset_amp = normalized_amp[i] * 10 + shift_x = min(x*offset_amp, width-1) + shift_y = min(y*offset_amp, height-1) + if shift_x != 0: + offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_x), dims=1) + if shift_y != 0: + offsetmask[i] = torch.roll(offsetmask[i], shifts=int(shift_y), dims=0) + + return offsetmask, + +class ImageTransformByNormalizedAmplitude: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "normalized_amp": ("NORMALIZED_AMPLITUDE",), + "zoom_scale": ("FLOAT", { "default": 0.0, "min": -1.0, "max": 1.0, "step": 0.001, "display": "number" }), + "x_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "y_offset": ("INT", { "default": 0, "min": (1 -MAX_RESOLUTION), "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "cumulative": ("BOOLEAN", { "default": False }), + "image": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "amptransform" + CATEGORY = "KJNodes/audio" + DESCRIPTION = """ +Works as a bridge to the AudioScheduler -nodes: +https://github.com/a1lazydog/ComfyUI-AudioScheduler +Transforms image based on the normalized amplitude. +""" + + def amptransform(self, image, normalized_amp, zoom_scale, cumulative, x_offset, y_offset): + # Ensure normalized_amp is an array and within the range [0, 1] + normalized_amp = np.clip(normalized_amp, 0.0, 1.0) + transformed_images = [] + + # Initialize the cumulative zoom factor + prev_amp = 0.0 + + for i in range(image.shape[0]): + img = image[i] # Get the i-th image in the batch + amp = normalized_amp[i] # Get the corresponding amplitude value + + # Incrementally increase the cumulative zoom factor + if cumulative: + prev_amp += amp + amp += prev_amp + + # Convert the image tensor from BxHxWxC to CxHxW format expected by torchvision + img = img.permute(2, 0, 1) + + # Convert PyTorch tensor to PIL Image for processing + pil_img = TF.to_pil_image(img) + + # Calculate the crop size based on the amplitude + width, height = pil_img.size + crop_size = int(min(width, height) * (1 - amp * zoom_scale)) + crop_size = max(crop_size, 1) + + # Calculate the crop box coordinates (centered crop) + left = (width - crop_size) // 2 + top = (height - crop_size) // 2 + right = (width + crop_size) // 2 + bottom = (height + crop_size) // 2 + + # Crop and resize back to original size + cropped_img = TF.crop(pil_img, top, left, crop_size, crop_size) + resized_img = TF.resize(cropped_img, (height, width)) + + # Convert back to tensor in CxHxW format + tensor_img = TF.to_tensor(resized_img) + + # Convert the tensor back to BxHxWxC format + tensor_img = tensor_img.permute(1, 2, 0) + + # Offset the image based on the amplitude + offset_amp = amp * 10 # Calculate the offset magnitude based on the amplitude + shift_x = min(x_offset * offset_amp, img.shape[1] - 1) # Calculate the shift in x direction + shift_y = min(y_offset * offset_amp, img.shape[0] - 1) # Calculate the shift in y direction + + # Apply the offset to the image tensor + if shift_x != 0: + tensor_img = torch.roll(tensor_img, shifts=int(shift_x), dims=1) + if shift_y != 0: + tensor_img = torch.roll(tensor_img, shifts=int(shift_y), dims=0) + + # Add to the list + transformed_images.append(tensor_img) + + # Stack all transformed images into a batch + transformed_batch = torch.stack(transformed_images) + + return (transformed_batch,) \ No newline at end of file diff --git a/nodes/batchcrop_nodes.py b/nodes/batchcrop_nodes.py new file mode 100644 index 0000000..282dd93 --- /dev/null +++ b/nodes/batchcrop_nodes.py @@ -0,0 +1,737 @@ +from ..utility.utility import tensor2pil, pil2tensor +from PIL import Image, ImageDraw, ImageFilter +import numpy as np +import torch +from torchvision.transforms import Resize, CenterCrop, InterpolationMode +import math + +#based on nodes from mtb https://github.com/melMass/comfy_mtb + +def bbox_to_region(bbox, target_size=None): + bbox = bbox_check(bbox, target_size) + return (bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]) + +def bbox_check(bbox, target_size=None): + if not target_size: + return bbox + + new_bbox = ( + bbox[0], + bbox[1], + min(target_size[0] - bbox[0], bbox[2]), + min(target_size[1] - bbox[1], bbox[3]), + ) + return new_bbox + +class BatchCropFromMask: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "masks": ("MASK",), + "crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.001}), + "bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ( + "IMAGE", + "IMAGE", + "BBOX", + "INT", + "INT", + ) + RETURN_NAMES = ( + "original_images", + "cropped_images", + "bboxes", + "width", + "height", + ) + FUNCTION = "crop" + CATEGORY = "KJNodes/masking" + + def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha): + if alpha == 0: + return prev_bbox_size + return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size) + + def smooth_center(self, prev_center, curr_center, alpha=0.5): + if alpha == 0: + return prev_center + return ( + round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]), + round(alpha * curr_center[1] + (1 - alpha) * prev_center[1]) + ) + + def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha): + + bounding_boxes = [] + cropped_images = [] + + self.max_bbox_width = 0 + self.max_bbox_height = 0 + + # First, calculate the maximum bounding box size across all masks + curr_max_bbox_width = 0 + curr_max_bbox_height = 0 + for mask in masks: + _mask = tensor2pil(mask)[0] + non_zero_indices = np.nonzero(np.array(_mask)) + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + width = max_x - min_x + height = max_y - min_y + curr_max_bbox_width = max(curr_max_bbox_width, width) + curr_max_bbox_height = max(curr_max_bbox_height, height) + + # Smooth the changes in the bounding box size + self.max_bbox_width = self.smooth_bbox_size(self.max_bbox_width, curr_max_bbox_width, bbox_smooth_alpha) + self.max_bbox_height = self.smooth_bbox_size(self.max_bbox_height, curr_max_bbox_height, bbox_smooth_alpha) + + # Apply the crop size multiplier + self.max_bbox_width = round(self.max_bbox_width * crop_size_mult) + self.max_bbox_height = round(self.max_bbox_height * crop_size_mult) + bbox_aspect_ratio = self.max_bbox_width / self.max_bbox_height + + # Then, for each mask and corresponding image... + for i, (mask, img) in enumerate(zip(masks, original_images)): + _mask = tensor2pil(mask)[0] + non_zero_indices = np.nonzero(np.array(_mask)) + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + + # Calculate center of bounding box + center_x = np.mean(non_zero_indices[1]) + center_y = np.mean(non_zero_indices[0]) + curr_center = (round(center_x), round(center_y)) + + # If this is the first frame, initialize prev_center with curr_center + if not hasattr(self, 'prev_center'): + self.prev_center = curr_center + + # Smooth the changes in the center coordinates from the second frame onwards + if i > 0: + center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha) + else: + center = curr_center + + # Update prev_center for the next frame + self.prev_center = center + + # Create bounding box using max_bbox_width and max_bbox_height + half_box_width = round(self.max_bbox_width / 2) + half_box_height = round(self.max_bbox_height / 2) + min_x = max(0, center[0] - half_box_width) + max_x = min(img.shape[1], center[0] + half_box_width) + min_y = max(0, center[1] - half_box_height) + max_y = min(img.shape[0], center[1] + half_box_height) + + # Append bounding box coordinates + bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y)) + + # Crop the image from the bounding box + cropped_img = img[min_y:max_y, min_x:max_x, :] + + # Calculate the new dimensions while maintaining the aspect ratio + new_height = min(cropped_img.shape[0], self.max_bbox_height) + new_width = round(new_height * bbox_aspect_ratio) + + # Resize the image + resize_transform = Resize((new_height, new_width)) + resized_img = resize_transform(cropped_img.permute(2, 0, 1)) + + # Perform the center crop to the desired size + crop_transform = CenterCrop((self.max_bbox_height, self.max_bbox_width)) # swap the order here if necessary + cropped_resized_img = crop_transform(resized_img) + + cropped_images.append(cropped_resized_img.permute(1, 2, 0)) + + cropped_out = torch.stack(cropped_images, dim=0) + + return (original_images, cropped_out, bounding_boxes, self.max_bbox_width, self.max_bbox_height, ) + +class BatchUncrop: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "cropped_images": ("IMAGE",), + "bboxes": ("BBOX",), + "border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ), + "crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "border_top": ("BOOLEAN", {"default": True}), + "border_bottom": ("BOOLEAN", {"default": True}), + "border_left": ("BOOLEAN", {"default": True}), + "border_right": ("BOOLEAN", {"default": True}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "uncrop" + + CATEGORY = "KJNodes/masking" + + def uncrop(self, original_images, cropped_images, bboxes, border_blending, crop_rescale, border_top, border_bottom, border_left, border_right): + def inset_border(image, border_width, border_color, border_top, border_bottom, border_left, border_right): + draw = ImageDraw.Draw(image) + width, height = image.size + if border_top: + draw.rectangle((0, 0, width, border_width), fill=border_color) + if border_bottom: + draw.rectangle((0, height - border_width, width, height), fill=border_color) + if border_left: + draw.rectangle((0, 0, border_width, height), fill=border_color) + if border_right: + draw.rectangle((width - border_width, 0, width, height), fill=border_color) + return image + + if len(original_images) != len(cropped_images): + raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same") + + # Ensure there are enough bboxes, but drop the excess if there are more bboxes than images + if len(bboxes) > len(original_images): + print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}") + bboxes = bboxes[:len(original_images)] + elif len(bboxes) < len(original_images): + raise ValueError("There should be at least as many bboxes as there are original and cropped images") + + input_images = tensor2pil(original_images) + crop_imgs = tensor2pil(cropped_images) + + out_images = [] + for i in range(len(input_images)): + img = input_images[i] + crop = crop_imgs[i] + bbox = bboxes[i] + + # uncrop the image based on the bounding box + bb_x, bb_y, bb_width, bb_height = bbox + + paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) + + # scale factors + scale_x = crop_rescale + scale_y = crop_rescale + + # scaled paste_region + paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y)) + + # rescale the crop image to fit the paste_region + crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1]))) + crop_img = crop.convert("RGB") + + if border_blending > 1.0: + border_blending = 1.0 + elif border_blending < 0.0: + border_blending = 0.0 + + blend_ratio = (max(crop_img.size) / 2) * float(border_blending) + + blend = img.convert("RGBA") + mask = Image.new("L", img.size, 0) + + mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255) + mask_block = inset_border(mask_block, round(blend_ratio / 2), (0), border_top, border_bottom, border_left, border_right) + + mask.paste(mask_block, paste_region) + blend.paste(crop_img, paste_region) + + mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4)) + mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4)) + + blend.putalpha(mask) + img = Image.alpha_composite(img.convert("RGBA"), blend) + out_images.append(img.convert("RGB")) + + return (pil2tensor(out_images),) + +class BatchCropFromMaskAdvanced: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "masks": ("MASK",), + "crop_size_mult": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "bbox_smooth_alpha": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + RETURN_TYPES = ( + "IMAGE", + "IMAGE", + "MASK", + "IMAGE", + "MASK", + "BBOX", + "BBOX", + "INT", + "INT", + ) + RETURN_NAMES = ( + "original_images", + "cropped_images", + "cropped_masks", + "combined_crop_image", + "combined_crop_masks", + "bboxes", + "combined_bounding_box", + "bbox_width", + "bbox_height", + ) + FUNCTION = "crop" + CATEGORY = "KJNodes/masking" + + def smooth_bbox_size(self, prev_bbox_size, curr_bbox_size, alpha): + return round(alpha * curr_bbox_size + (1 - alpha) * prev_bbox_size) + + def smooth_center(self, prev_center, curr_center, alpha=0.5): + return (round(alpha * curr_center[0] + (1 - alpha) * prev_center[0]), + round(alpha * curr_center[1] + (1 - alpha) * prev_center[1])) + + def crop(self, masks, original_images, crop_size_mult, bbox_smooth_alpha): + bounding_boxes = [] + combined_bounding_box = [] + cropped_images = [] + cropped_masks = [] + cropped_masks_out = [] + combined_crop_out = [] + combined_cropped_images = [] + combined_cropped_masks = [] + + def calculate_bbox(mask): + non_zero_indices = np.nonzero(np.array(mask)) + + # handle empty masks + min_x, max_x, min_y, max_y = 0, 0, 0, 0 + if len(non_zero_indices[1]) > 0 and len(non_zero_indices[0]) > 0: + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + + width = max_x - min_x + height = max_y - min_y + bbox_size = max(width, height) + return min_x, max_x, min_y, max_y, bbox_size + + combined_mask = torch.max(masks, dim=0)[0] + _mask = tensor2pil(combined_mask)[0] + new_min_x, new_max_x, new_min_y, new_max_y, combined_bbox_size = calculate_bbox(_mask) + center_x = (new_min_x + new_max_x) / 2 + center_y = (new_min_y + new_max_y) / 2 + half_box_size = round(combined_bbox_size // 2) + new_min_x = max(0, round(center_x - half_box_size)) + new_max_x = min(original_images[0].shape[1], round(center_x + half_box_size)) + new_min_y = max(0, round(center_y - half_box_size)) + new_max_y = min(original_images[0].shape[0], round(center_y + half_box_size)) + + combined_bounding_box.append((new_min_x, new_min_y, new_max_x - new_min_x, new_max_y - new_min_y)) + + self.max_bbox_size = 0 + + # First, calculate the maximum bounding box size across all masks + curr_max_bbox_size = max(calculate_bbox(tensor2pil(mask)[0])[-1] for mask in masks) + # Smooth the changes in the bounding box size + self.max_bbox_size = self.smooth_bbox_size(self.max_bbox_size, curr_max_bbox_size, bbox_smooth_alpha) + # Apply the crop size multiplier + self.max_bbox_size = round(self.max_bbox_size * crop_size_mult) + # Make sure max_bbox_size is divisible by 16, if not, round it upwards so it is + self.max_bbox_size = math.ceil(self.max_bbox_size / 16) * 16 + + if self.max_bbox_size > original_images[0].shape[0] or self.max_bbox_size > original_images[0].shape[1]: + # max_bbox_size can only be as big as our input's width or height, and it has to be even + self.max_bbox_size = math.floor(min(original_images[0].shape[0], original_images[0].shape[1]) / 2) * 2 + + # Then, for each mask and corresponding image... + for i, (mask, img) in enumerate(zip(masks, original_images)): + _mask = tensor2pil(mask)[0] + non_zero_indices = np.nonzero(np.array(_mask)) + + # check for empty masks + if len(non_zero_indices[0]) > 0 and len(non_zero_indices[1]) > 0: + min_x, max_x = np.min(non_zero_indices[1]), np.max(non_zero_indices[1]) + min_y, max_y = np.min(non_zero_indices[0]), np.max(non_zero_indices[0]) + + # Calculate center of bounding box + center_x = np.mean(non_zero_indices[1]) + center_y = np.mean(non_zero_indices[0]) + curr_center = (round(center_x), round(center_y)) + + # If this is the first frame, initialize prev_center with curr_center + if not hasattr(self, 'prev_center'): + self.prev_center = curr_center + + # Smooth the changes in the center coordinates from the second frame onwards + if i > 0: + center = self.smooth_center(self.prev_center, curr_center, bbox_smooth_alpha) + else: + center = curr_center + + # Update prev_center for the next frame + self.prev_center = center + + # Create bounding box using max_bbox_size + half_box_size = self.max_bbox_size // 2 + min_x = max(0, center[0] - half_box_size) + max_x = min(img.shape[1], center[0] + half_box_size) + min_y = max(0, center[1] - half_box_size) + max_y = min(img.shape[0], center[1] + half_box_size) + + # Append bounding box coordinates + bounding_boxes.append((min_x, min_y, max_x - min_x, max_y - min_y)) + + # Crop the image from the bounding box + cropped_img = img[min_y:max_y, min_x:max_x, :] + cropped_mask = mask[min_y:max_y, min_x:max_x] + + # Resize the cropped image to a fixed size + new_size = max(cropped_img.shape[0], cropped_img.shape[1]) + resize_transform = Resize(new_size, interpolation=InterpolationMode.NEAREST, max_size=max(img.shape[0], img.shape[1])) + resized_mask = resize_transform(cropped_mask.unsqueeze(0).unsqueeze(0)).squeeze(0).squeeze(0) + resized_img = resize_transform(cropped_img.permute(2, 0, 1)) + # Perform the center crop to the desired size + # Constrain the crop to the smaller of our bbox or our image so we don't expand past the image dimensions. + crop_transform = CenterCrop((min(self.max_bbox_size, resized_img.shape[1]), min(self.max_bbox_size, resized_img.shape[2]))) + + cropped_resized_img = crop_transform(resized_img) + cropped_images.append(cropped_resized_img.permute(1, 2, 0)) + + cropped_resized_mask = crop_transform(resized_mask) + cropped_masks.append(cropped_resized_mask) + + combined_cropped_img = original_images[i][new_min_y:new_max_y, new_min_x:new_max_x, :] + combined_cropped_images.append(combined_cropped_img) + + combined_cropped_mask = masks[i][new_min_y:new_max_y, new_min_x:new_max_x] + combined_cropped_masks.append(combined_cropped_mask) + else: + bounding_boxes.append((0, 0, img.shape[1], img.shape[0])) + cropped_images.append(img) + cropped_masks.append(mask) + combined_cropped_images.append(img) + combined_cropped_masks.append(mask) + + cropped_out = torch.stack(cropped_images, dim=0) + combined_crop_out = torch.stack(combined_cropped_images, dim=0) + cropped_masks_out = torch.stack(cropped_masks, dim=0) + combined_crop_mask_out = torch.stack(combined_cropped_masks, dim=0) + + return (original_images, cropped_out, cropped_masks_out, combined_crop_out, combined_crop_mask_out, bounding_boxes, combined_bounding_box, self.max_bbox_size, self.max_bbox_size) + +class FilterZeroMasksAndCorrespondingImages: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "masks": ("MASK",), + }, + "optional": { + "original_images": ("IMAGE",), + }, + } + + RETURN_TYPES = ("MASK", "IMAGE", "IMAGE", "INDEXES",) + RETURN_NAMES = ("non_zero_masks_out", "non_zero_mask_images_out", "zero_mask_images_out", "zero_mask_images_out_indexes",) + FUNCTION = "filter" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Filter out all the empty (i.e. all zero) mask in masks +Also filter out all the corresponding images in original_images by indexes if provide + +original_images (optional): If provided, need have same length as masks. +""" + + def filter(self, masks, original_images=None): + non_zero_masks = [] + non_zero_mask_images = [] + zero_mask_images = [] + zero_mask_images_indexes = [] + + masks_num = len(masks) + also_process_images = False + if original_images is not None: + imgs_num = len(original_images) + if len(original_images) == masks_num: + also_process_images = True + else: + print(f"[WARNING] ignore input: original_images, due to number of original_images ({imgs_num}) is not equal to number of masks ({masks_num})") + + for i in range(masks_num): + non_zero_num = np.count_nonzero(np.array(masks[i])) + if non_zero_num > 0: + non_zero_masks.append(masks[i]) + if also_process_images: + non_zero_mask_images.append(original_images[i]) + else: + zero_mask_images.append(original_images[i]) + zero_mask_images_indexes.append(i) + + non_zero_masks_out = torch.stack(non_zero_masks, dim=0) + non_zero_mask_images_out = zero_mask_images_out = zero_mask_images_out_indexes = None + + if also_process_images: + non_zero_mask_images_out = torch.stack(non_zero_mask_images, dim=0) + if len(zero_mask_images) > 0: + zero_mask_images_out = torch.stack(zero_mask_images, dim=0) + zero_mask_images_out_indexes = zero_mask_images_indexes + + return (non_zero_masks_out, non_zero_mask_images_out, zero_mask_images_out, zero_mask_images_out_indexes) + +class InsertImageBatchByIndexes: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "images_to_insert": ("IMAGE",), + "insert_indexes": ("INDEXES",), + }, + } + + RETURN_TYPES = ("IMAGE", ) + RETURN_NAMES = ("images_after_insert", ) + FUNCTION = "insert" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +This node is designed to be use with node FilterZeroMasksAndCorrespondingImages +It inserts the images_to_insert into images according to insert_indexes + +Returns: + images_after_insert: updated original images with origonal sequence order +""" + + def insert(self, images, images_to_insert, insert_indexes): + images_after_insert = images + + if images_to_insert is not None and insert_indexes is not None: + images_to_insert_num = len(images_to_insert) + insert_indexes_num = len(insert_indexes) + if images_to_insert_num == insert_indexes_num: + images_after_insert = [] + + i_images = 0 + for i in range(len(images) + images_to_insert_num): + if i in insert_indexes: + images_after_insert.append(images_to_insert[insert_indexes.index(i)]) + else: + images_after_insert.append(images[i_images]) + i_images += 1 + + images_after_insert = torch.stack(images_after_insert, dim=0) + + else: + print(f"[WARNING] skip this node, due to number of images_to_insert ({images_to_insert_num}) is not equal to number of insert_indexes ({insert_indexes_num})") + + + return (images_after_insert, ) + +class BatchUncropAdvanced: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "original_images": ("IMAGE",), + "cropped_images": ("IMAGE",), + "cropped_masks": ("MASK",), + "combined_crop_mask": ("MASK",), + "bboxes": ("BBOX",), + "border_blending": ("FLOAT", {"default": 0.25, "min": 0.0, "max": 1.0, "step": 0.01}, ), + "crop_rescale": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "use_combined_mask": ("BOOLEAN", {"default": False}), + "use_square_mask": ("BOOLEAN", {"default": True}), + }, + "optional": { + "combined_bounding_box": ("BBOX", {"default": None}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "uncrop" + CATEGORY = "KJNodes/masking" + + + def uncrop(self, original_images, cropped_images, cropped_masks, combined_crop_mask, bboxes, border_blending, crop_rescale, use_combined_mask, use_square_mask, combined_bounding_box = None): + + def inset_border(image, border_width=20, border_color=(0)): + width, height = image.size + bordered_image = Image.new(image.mode, (width, height), border_color) + bordered_image.paste(image, (0, 0)) + draw = ImageDraw.Draw(bordered_image) + draw.rectangle((0, 0, width - 1, height - 1), outline=border_color, width=border_width) + return bordered_image + + if len(original_images) != len(cropped_images): + raise ValueError(f"The number of original_images ({len(original_images)}) and cropped_images ({len(cropped_images)}) should be the same") + + # Ensure there are enough bboxes, but drop the excess if there are more bboxes than images + if len(bboxes) > len(original_images): + print(f"Warning: Dropping excess bounding boxes. Expected {len(original_images)}, but got {len(bboxes)}") + bboxes = bboxes[:len(original_images)] + elif len(bboxes) < len(original_images): + raise ValueError("There should be at least as many bboxes as there are original and cropped images") + + crop_imgs = tensor2pil(cropped_images) + input_images = tensor2pil(original_images) + out_images = [] + + for i in range(len(input_images)): + img = input_images[i] + crop = crop_imgs[i] + bbox = bboxes[i] + + if use_combined_mask: + bb_x, bb_y, bb_width, bb_height = combined_bounding_box[0] + paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) + mask = combined_crop_mask[i] + else: + bb_x, bb_y, bb_width, bb_height = bbox + paste_region = bbox_to_region((bb_x, bb_y, bb_width, bb_height), img.size) + mask = cropped_masks[i] + + # scale paste_region + scale_x = scale_y = crop_rescale + paste_region = (round(paste_region[0]*scale_x), round(paste_region[1]*scale_y), round(paste_region[2]*scale_x), round(paste_region[3]*scale_y)) + + # rescale the crop image to fit the paste_region + crop = crop.resize((round(paste_region[2]-paste_region[0]), round(paste_region[3]-paste_region[1]))) + crop_img = crop.convert("RGB") + + #border blending + if border_blending > 1.0: + border_blending = 1.0 + elif border_blending < 0.0: + border_blending = 0.0 + + blend_ratio = (max(crop_img.size) / 2) * float(border_blending) + blend = img.convert("RGBA") + + if use_square_mask: + mask = Image.new("L", img.size, 0) + mask_block = Image.new("L", (paste_region[2]-paste_region[0], paste_region[3]-paste_region[1]), 255) + mask_block = inset_border(mask_block, round(blend_ratio / 2), (0)) + mask.paste(mask_block, paste_region) + else: + original_mask = tensor2pil(mask)[0] + original_mask = original_mask.resize((paste_region[2]-paste_region[0], paste_region[3]-paste_region[1])) + mask = Image.new("L", img.size, 0) + mask.paste(original_mask, paste_region) + + mask = mask.filter(ImageFilter.BoxBlur(radius=blend_ratio / 4)) + mask = mask.filter(ImageFilter.GaussianBlur(radius=blend_ratio / 4)) + + blend.paste(crop_img, paste_region) + blend.putalpha(mask) + + img = Image.alpha_composite(img.convert("RGBA"), blend) + out_images.append(img.convert("RGB")) + + return (pil2tensor(out_images),) + +class SplitBboxes: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "bboxes": ("BBOX",), + "index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}), + }, + } + + RETURN_TYPES = ("BBOX","BBOX",) + RETURN_NAMES = ("bboxes_a","bboxes_b",) + FUNCTION = "splitbbox" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Splits the specified bbox list at the given index into two lists. +""" + + def splitbbox(self, bboxes, index): + bboxes_a = bboxes[:index] # Sub-list from the start of bboxes up to (but not including) the index + bboxes_b = bboxes[index:] # Sub-list from the index to the end of bboxes + + return (bboxes_a, bboxes_b,) + +class BboxToInt: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "bboxes": ("BBOX",), + "index": ("INT", {"default": 0,"min": 0, "max": 99999999, "step": 1}), + }, + } + + RETURN_TYPES = ("INT","INT","INT","INT","INT","INT",) + RETURN_NAMES = ("x_min","y_min","width","height", "center_x","center_y",) + FUNCTION = "bboxtoint" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Returns selected index from bounding box list as integers. +""" + def bboxtoint(self, bboxes, index): + x_min, y_min, width, height = bboxes[index] + center_x = int(x_min + width / 2) + center_y = int(y_min + height / 2) + + return (x_min, y_min, width, height, center_x, center_y,) + +class BboxVisualize: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "images": ("IMAGE",), + "bboxes": ("BBOX",), + "line_width": ("INT", {"default": 1,"min": 1, "max": 10, "step": 1}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "visualizebbox" + DESCRIPTION = """ +Visualizes the specified bbox on the image. +""" + + CATEGORY = "KJNodes/masking" + + def visualizebbox(self, bboxes, images, line_width): + image_list = [] + for image, bbox in zip(images, bboxes): + x_min, y_min, width, height = bbox + image = image.permute(2, 0, 1) + + img_with_bbox = image.clone() + + # Define the color for the bbox, e.g., red + color = torch.tensor([1, 0, 0], dtype=torch.float32) + + # Draw lines for each side of the bbox with the specified line width + for lw in range(line_width): + # Top horizontal line + img_with_bbox[:, y_min + lw, x_min:x_min + width] = color[:, None] + + # Bottom horizontal line + img_with_bbox[:, y_min + height - lw, x_min:x_min + width] = color[:, None] + + # Left vertical line + img_with_bbox[:, y_min:y_min + height, x_min + lw] = color[:, None] + + # Right vertical line + img_with_bbox[:, y_min:y_min + height, x_min + width - lw] = color[:, None] + + img_with_bbox = img_with_bbox.permute(1, 2, 0).unsqueeze(0) + image_list.append(img_with_bbox) + + return (torch.cat(image_list, dim=0),) \ No newline at end of file diff --git a/nodes/curve_nodes.py b/nodes/curve_nodes.py new file mode 100644 index 0000000..47086af --- /dev/null +++ b/nodes/curve_nodes.py @@ -0,0 +1,945 @@ +import torch +from torchvision import transforms +import json +from PIL import Image, ImageDraw, ImageFont +import numpy as np +from ..utility.utility import pil2tensor +import folder_paths + +def plot_coordinates_to_tensor(coordinates, height, width, bbox_height, bbox_width, size_multiplier, prompt): + import matplotlib + matplotlib.use('Agg') + from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas + text_color = '#999999' + bg_color = '#353535' + matplotlib.pyplot.rcParams['text.color'] = text_color + fig, ax = matplotlib.pyplot.subplots(figsize=(width/100, height/100), dpi=100) + fig.patch.set_facecolor(bg_color) + ax.set_facecolor(bg_color) + ax.grid(color=text_color, linestyle='-', linewidth=0.5) + ax.set_xlabel('x', color=text_color) + ax.set_ylabel('y', color=text_color) + for text in ax.get_xticklabels() + ax.get_yticklabels(): + text.set_color(text_color) + ax.set_title('position for: ' + prompt) + ax.set_xlabel('X Coordinate') + ax.set_ylabel('Y Coordinate') + #ax.legend().remove() + ax.set_xlim(0, width) # Set the x-axis to match the input latent width + ax.set_ylim(height, 0) # Set the y-axis to match the input latent height, with (0,0) at top-left + # Adjust the margins of the subplot + matplotlib.pyplot.subplots_adjust(left=0.08, right=0.95, bottom=0.05, top=0.95, wspace=0.2, hspace=0.2) + + cmap = matplotlib.pyplot.get_cmap('rainbow') + image_batch = [] + canvas = FigureCanvas(fig) + width, height = fig.get_size_inches() * fig.get_dpi() + # Draw a box at each coordinate + for i, ((x, y), size) in enumerate(zip(coordinates, size_multiplier)): + color_index = i / (len(coordinates) - 1) + color = cmap(color_index) + draw_height = bbox_height * size + draw_width = bbox_width * size + rect = matplotlib.patches.Rectangle((x - draw_width/2, y - draw_height/2), draw_width, draw_height, + linewidth=1, edgecolor=color, facecolor='none', alpha=0.5) + ax.add_patch(rect) + + # Check if there is a next coordinate to draw an arrow to + if i < len(coordinates) - 1: + x1, y1 = coordinates[i] + x2, y2 = coordinates[i + 1] + ax.annotate("", xy=(x2, y2), xytext=(x1, y1), + arrowprops=dict(arrowstyle="->", + linestyle="-", + lw=1, + color=color, + mutation_scale=20)) + canvas.draw() + image_np = np.frombuffer(canvas.tostring_rgb(), dtype='uint8').reshape(int(height), int(width), 3).copy() + image_tensor = torch.from_numpy(image_np).float() / 255.0 + image_tensor = image_tensor.unsqueeze(0) + image_batch.append(image_tensor) + + matplotlib.pyplot.close(fig) + image_batch_tensor = torch.cat(image_batch, dim=0) + + return image_batch_tensor + +class PlotCoordinates: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "coordinates": ("STRING", {"forceInput": True}), + "text": ("STRING", {"default": 'title', "multiline": False}), + "width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "bbox_width": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + "bbox_height": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + }, + "optional": {"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True})}, + } + RETURN_TYPES = ("IMAGE", "INT", "INT", "INT", "INT",) + RETURN_NAMES = ("images", "width", "height", "bbox_width", "bbox_height",) + FUNCTION = "append" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Plots coordinates to sequence of images using Matplotlib. + +""" + + def append(self, coordinates, text, width, height, bbox_width, bbox_height, size_multiplier=[1.0]): + coordinates = json.loads(coordinates.replace("'", '"')) + coordinates = [(coord['x'], coord['y']) for coord in coordinates] + batch_size = len(coordinates) + if len(size_multiplier) != batch_size: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + + plot_image_tensor = plot_coordinates_to_tensor(coordinates, height, width, bbox_height, bbox_width, size_multiplier, text) + + return (plot_image_tensor, width, height, bbox_width, bbox_height) + +class SplineEditor: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "points_store": ("STRING", {"multiline": False}), + "coordinates": ("STRING", {"multiline": False}), + "mask_width": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "mask_height": ("INT", {"default": 512, "min": 8, "max": 4096, "step": 8}), + "points_to_sample": ("INT", {"default": 16, "min": 2, "max": 1000, "step": 1}), + "sampling_method": ( + [ + 'path', + 'time', + ], + { + "default": 'time' + }), + "interpolation": ( + [ + 'cardinal', + 'monotone', + 'basis', + 'linear', + 'step-before', + 'step-after', + 'polar', + 'polar-reverse', + ], + { + "default": 'cardinal' + }), + "tension": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "repeat_output": ("INT", {"default": 1, "min": 1, "max": 4096, "step": 1}), + "float_output_type": ( + [ + 'list', + 'pandas series', + 'tensor', + ], + { + "default": 'list' + }), + }, + "optional": { + "min_value": ("FLOAT", {"default": 0.0, "min": -10000.0, "max": 10000.0, "step": 0.01}), + "max_value": ("FLOAT", {"default": 1.0, "min": -10000.0, "max": 10000.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("MASK", "STRING", "FLOAT", "INT") + RETURN_NAMES = ("mask", "coord_str", "float", "count") + FUNCTION = "splinedata" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +# WORK IN PROGRESS +Do not count on this as part of your workflow yet, +probably contains lots of bugs and stability is not +guaranteed!! + +## Graphical editor to create values for various +## schedules and/or mask batches. + +**Shift + click** to add control point at end. +**Ctrl + click** to add control point (subdivide) between two points. +**Right click on a point** to delete it. +Note that you can't delete from start/end. + +Right click on canvas for context menu: +These are purely visual options, doesn't affect the output: + - Toggle handles visibility + - Display sample points: display the points to be returned. + +**points_to_sample** value sets the number of samples +returned from the **drawn spline itself**, this is independent from the +actual control points, so the interpolation type matters. +sampling_method: + - time: samples along the time axis, used for schedules + - path: samples along the path itself, useful for coordinates + +output types: + - mask batch + example compatible nodes: anything that takes masks + - list of floats + example compatible nodes: IPAdapter weights + - pandas series + example compatible nodes: anything that takes Fizz' + nodes Batch Value Schedule + - torch tensor + example compatible nodes: unknown +""" + + def splinedata(self, mask_width, mask_height, coordinates, float_output_type, interpolation, + points_to_sample, sampling_method, points_store, tension, repeat_output, min_value=0.0, max_value=1.0): + + coordinates = json.loads(coordinates) + for coord in coordinates: + coord['x'] = int(round(coord['x'])) + coord['y'] = int(round(coord['y'])) + + normalized_y_values = [ + (1.0 - (point['y'] / mask_height) - 0.0) * (max_value - min_value) + min_value + for point in coordinates + ] + if float_output_type == 'list': + out_floats = normalized_y_values * repeat_output + elif float_output_type == 'pandas series': + try: + import pandas as pd + except: + raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type") + out_floats = pd.Series(normalized_y_values * repeat_output), + elif float_output_type == 'tensor': + out_floats = torch.tensor(normalized_y_values * repeat_output, dtype=torch.float32) + # Create a color map for grayscale intensities + color_map = lambda y: torch.full((mask_height, mask_width, 3), y, dtype=torch.float32) + + # Create image tensors for each normalized y value + mask_tensors = [color_map(y) for y in normalized_y_values] + masks_out = torch.stack(mask_tensors) + masks_out = masks_out.repeat(repeat_output, 1, 1, 1) + masks_out = masks_out.mean(dim=-1) + return (masks_out, str(coordinates), out_floats, len(out_floats)) + +class CreateShapeMaskOnPath: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createshapemask" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Creates a mask or batch of masks with the specified shape. +Locations are center locations. +Grow value is the amount to grow the shape on each frame, creating animated masks. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "shape": ( + [ 'circle', + 'square', + 'triangle', + ], + { + "default": 'circle' + }), + "coordinates": ("STRING", {"forceInput": True}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + "shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + }, + "optional": { + "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}), + } + } + + def createshapemask(self, coordinates, frame_width, frame_height, shape_width, shape_height, shape, size_multiplier=[1.0]): + # Define the number of images in the batch + coordinates = coordinates.replace("'", '"') + coordinates = json.loads(coordinates) + + batch_size = len(coordinates) + out = [] + color = "white" + if len(size_multiplier) != batch_size: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + for i, coord in enumerate(coordinates): + image = Image.new("RGB", (frame_width, frame_height), "black") + draw = ImageDraw.Draw(image) + + # Calculate the size for this frame and ensure it's not less than 0 + current_width = max(0, shape_width + i * size_multiplier[i]) + current_height = max(0, shape_height + i * size_multiplier[i]) + + location_x = coord['x'] + location_y = coord['y'] + + if shape == 'circle' or shape == 'square': + # Define the bounding box for the shape + left_up_point = (location_x - current_width // 2, location_y - current_height // 2) + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) + two_points = [left_up_point, right_down_point] + + if shape == 'circle': + draw.ellipse(two_points, fill=color) + elif shape == 'square': + draw.rectangle(two_points, fill=color) + + elif shape == 'triangle': + # Define the points for the triangle + left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right + top_point = (location_x, location_y - current_height // 2) # top point + draw.polygon([top_point, left_up_point, right_down_point], fill=color) + + image = pil2tensor(image) + mask = image[:, :, :, 0] + out.append(mask) + outstack = torch.cat(out, dim=0) + return (outstack, 1.0 - outstack,) + +class MaskOrImageToWeight: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "output_type": ( + [ + 'list', + 'pandas series', + 'tensor', + 'string' + ], + { + "default": 'list' + }), + }, + "optional": { + "images": ("IMAGE",), + "masks": ("MASK",), + }, + + } + RETURN_TYPES = ("FLOAT", "STRING",) + FUNCTION = "execute" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +Gets the mean values from mask or image batch +and returns that as the selected output type. +""" + + def execute(self, output_type, images=None, masks=None): + mean_values = [] + if masks is not None and images is None: + for mask in masks: + mean_values.append(mask.mean().item()) + elif masks is None and images is not None: + for image in images: + mean_values.append(image.mean().item()) + elif masks is not None and images is not None: + raise Exception("MaskOrImageToWeight: Use either mask or image input only.") + + # Convert mean_values to the specified output_type + if output_type == 'list': + out = mean_values, + elif output_type == 'pandas series': + try: + import pandas as pd + except: + raise Exception("MaskOrImageToWeight: pandas is not installed. Please install pandas to use this output_type") + out = pd.Series(mean_values), + elif output_type == 'tensor': + out = torch.tensor(mean_values, dtype=torch.float32), + return (out, [str(value) for value in mean_values],) + +class WeightScheduleConvert: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_values": ("FLOAT", {"default": 0.0, "forceInput": True}), + "output_type": ( + [ + 'match_input', + 'list', + 'pandas series', + 'tensor', + ], + { + "default": 'list' + }), + "invert": ("BOOLEAN", {"default": False}), + "repeat": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + }, + "optional": { + "remap_to_frames": ("INT", {"default": 0}), + "interpolation_curve": ("FLOAT", {"forceInput": True}), + "remap_values": ("BOOLEAN", {"default": False}), + "remap_min": ("FLOAT", {"default": 0.0, "min": -100000, "max": 100000.0, "step": 0.01}), + "remap_max": ("FLOAT", {"default": 1.0, "min": -100000, "max": 100000.0, "step": 0.01}), + }, + + } + RETURN_TYPES = ("FLOAT", "STRING", "INT",) + FUNCTION = "execute" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +Converts different value lists/series to another type. +""" + + def detect_input_type(self, input_values): + import pandas as pd + if isinstance(input_values, list): + return 'list' + elif isinstance(input_values, pd.Series): + return 'pandas series' + elif isinstance(input_values, torch.Tensor): + return 'tensor' + else: + raise ValueError("Unsupported input type") + + def execute(self, input_values, output_type, invert, repeat, remap_to_frames=0, interpolation_curve=None, remap_min=0.0, remap_max=1.0, remap_values=False): + import pandas as pd + input_type = self.detect_input_type(input_values) + + if input_type == 'pandas series': + float_values = input_values.tolist() + elif input_type == 'tensor': + float_values = input_values + else: + float_values = input_values + + if invert: + float_values = [1 - value for value in float_values] + + if interpolation_curve is not None: + interpolated_pattern = [] + orig_float_values = float_values + for value in interpolation_curve: + min_val = min(orig_float_values) + max_val = max(orig_float_values) + # Normalize the values to [0, 1] + normalized_values = [(value - min_val) / (max_val - min_val) for value in orig_float_values] + # Interpolate the normalized values to the new frame count + remapped_float_values = np.interp(np.linspace(0, 1, int(remap_to_frames * value)), np.linspace(0, 1, len(normalized_values)), normalized_values).tolist() + interpolated_pattern.extend(remapped_float_values) + float_values = interpolated_pattern + else: + # Remap float_values to match target_frame_amount + if remap_to_frames > 0 and remap_to_frames != len(float_values): + min_val = min(float_values) + max_val = max(float_values) + # Normalize the values to [0, 1] + normalized_values = [(value - min_val) / (max_val - min_val) for value in float_values] + # Interpolate the normalized values to the new frame count + float_values = np.interp(np.linspace(0, 1, remap_to_frames), np.linspace(0, 1, len(normalized_values)), normalized_values).tolist() + + float_values = float_values * repeat + if remap_values: + float_values = self.remap_values(float_values, remap_min, remap_max) + + if output_type == 'list': + out = float_values, + elif output_type == 'pandas series': + out = pd.Series(float_values), + elif output_type == 'tensor': + if input_type == 'pandas series': + out = torch.tensor(float_values.values, dtype=torch.float32), + else: + out = torch.tensor(float_values, dtype=torch.float32), + elif output_type == 'match_input': + out = float_values, + return (out, [str(value) for value in float_values], [int(value) for value in float_values]) + + def remap_values(self, values, target_min, target_max): + # Determine the current range + current_min = min(values) + current_max = max(values) + current_range = current_max - current_min + + # Determine the target range + target_range = target_max - target_min + + # Perform the linear interpolation for each value + remapped_values = [(value - current_min) / current_range * target_range + target_min for value in values] + + return remapped_values + + +class FloatToMask: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_values": ("FLOAT", {"forceInput": True, "default": 0}), + "width": ("INT", {"default": 100, "min": 1}), + "height": ("INT", {"default": 100, "min": 1}), + }, + } + RETURN_TYPES = ("MASK",) + FUNCTION = "execute" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Generates a batch of masks based on the input float values. +The batch size is determined by the length of the input float values. +Each mask is generated with the specified width and height. +""" + + def execute(self, input_values, width, height): + import pandas as pd + # Ensure input_values is a list + if isinstance(input_values, (float, int)): + input_values = [input_values] + elif isinstance(input_values, pd.Series): + input_values = input_values.tolist() + elif isinstance(input_values, list) and all(isinstance(item, list) for item in input_values): + input_values = [item for sublist in input_values for item in sublist] + + # Generate a batch of masks based on the input_values + masks = [] + for value in input_values: + # Assuming value is a float between 0 and 1 representing the mask's intensity + mask = torch.ones((height, width), dtype=torch.float32) * value + masks.append(mask) + masks_out = torch.stack(masks, dim=0) + + return(masks_out,) +class WeightScheduleExtend: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_values_1": ("FLOAT", {"default": 0.0, "forceInput": True}), + "input_values_2": ("FLOAT", {"default": 0.0, "forceInput": True}), + "output_type": ( + [ + 'match_input', + 'list', + 'pandas series', + 'tensor', + ], + { + "default": 'match_input' + }), + }, + + } + RETURN_TYPES = ("FLOAT",) + FUNCTION = "execute" + CATEGORY = "KJNodes/weights" + DESCRIPTION = """ +Extends, and converts if needed, different value lists/series +""" + + def detect_input_type(self, input_values): + import pandas as pd + if isinstance(input_values, list): + return 'list' + elif isinstance(input_values, pd.Series): + return 'pandas series' + elif isinstance(input_values, torch.Tensor): + return 'tensor' + else: + raise ValueError("Unsupported input type") + + def execute(self, input_values_1, input_values_2, output_type): + import pandas as pd + input_type_1 = self.detect_input_type(input_values_1) + input_type_2 = self.detect_input_type(input_values_2) + # Convert input_values_2 to the same format as input_values_1 if they do not match + if not input_type_1 == input_type_2: + print("Converting input_values_2 to the same format as input_values_1") + if input_type_1 == 'pandas series': + # Convert input_values_2 to a pandas Series + float_values_2 = pd.Series(input_values_2) + elif input_type_1 == 'tensor': + # Convert input_values_2 to a tensor + float_values_2 = torch.tensor(input_values_2, dtype=torch.float32) + else: + print("Input types match, no conversion needed") + # If the types match, no conversion is needed + float_values_2 = input_values_2 + + float_values = input_values_1 + float_values_2 + + if output_type == 'list': + return float_values, + elif output_type == 'pandas series': + return pd.Series(float_values), + elif output_type == 'tensor': + if input_type_1 == 'pandas series': + return torch.tensor(float_values.values, dtype=torch.float32), + else: + return torch.tensor(float_values, dtype=torch.float32), + elif output_type == 'match_input': + return float_values, + else: + raise ValueError(f"Unsupported output_type: {output_type}") + +class FloatToSigmas: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "float_list": ("FLOAT", {"default": 0.0, "forceInput": True}), + } + } + RETURN_TYPES = ("SIGMAS",) + RETURN_NAMES = ("SIGMAS",) + CATEGORY = "KJNodes/noise" + FUNCTION = "customsigmas" + DESCRIPTION = """ +Creates a sigmas tensor from list of float values. + +""" + def customsigmas(self, float_list): + return torch.tensor(float_list, dtype=torch.float32), + +class GLIGENTextBoxApplyBatchCoords: + @classmethod + def INPUT_TYPES(s): + return {"required": {"conditioning_to": ("CONDITIONING", ), + "latents": ("LATENT", ), + "clip": ("CLIP", ), + "gligen_textbox_model": ("GLIGEN", ), + "coordinates": ("STRING", {"forceInput": True}), + "text": ("STRING", {"multiline": True}), + "width": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + "height": ("INT", {"default": 128, "min": 8, "max": 4096, "step": 8}), + }, + "optional": {"size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True})}, + } + RETURN_TYPES = ("CONDITIONING", "IMAGE", ) + RETURN_NAMES = ("conditioning", "coord_preview", ) + FUNCTION = "append" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +This node allows scheduling GLIGEN text box positions in a batch, +to be used with AnimateDiff-Evolved. Intended to pair with the +Spline Editor -node. + +GLIGEN model can be downloaded through the Manage's "Install Models" menu. +Or directly from here: +https://huggingface.co/comfyanonymous/GLIGEN_pruned_safetensors/tree/main + +Inputs: +- **latents** input is used to calculate batch size +- **clip** is your standard text encoder, use same as for the main prompt +- **gligen_textbox_model** connects to GLIGEN Loader +- **coordinates** takes a json string of points, directly compatible +with the spline editor node. +- **text** is the part of the prompt to set position for +- **width** and **height** are the size of the GLIGEN bounding box + +Outputs: +- **conditioning** goes between to clip text encode and the sampler +- **coord_preview** is an optional preview of the coordinates and +bounding boxes. + +""" + + def append(self, latents, coordinates, conditioning_to, clip, gligen_textbox_model, text, width, height, size_multiplier=[1.0]): + coordinates = json.loads(coordinates.replace("'", '"')) + coordinates = [(coord['x'], coord['y']) for coord in coordinates] + + batch_size = sum(tensor.size(0) for tensor in latents.values()) + if len(coordinates) != batch_size: + print("GLIGENTextBoxApplyBatchCoords WARNING: The number of coordinates does not match the number of latents") + + c = [] + _, cond_pooled = clip.encode_from_tokens(clip.tokenize(text), return_pooled=True) + + for t in conditioning_to: + n = [t[0], t[1].copy()] + + position_params_batch = [[] for _ in range(batch_size)] # Initialize a list of empty lists for each batch item + if len(size_multiplier) != batch_size: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + + for i in range(batch_size): + x_position, y_position = coordinates[i] + position_param = (cond_pooled, int((height // 8) * size_multiplier[i]), int((width // 8) * size_multiplier[i]), (y_position - height // 2) // 8, (x_position - width // 2) // 8) + position_params_batch[i].append(position_param) # Append position_param to the correct sublist + + prev = [] + if "gligen" in n[1]: + prev = n[1]['gligen'][2] + else: + prev = [[] for _ in range(batch_size)] + # Concatenate prev and position_params_batch, ensuring both are lists of lists + # and each sublist corresponds to a batch item + combined_position_params = [prev_item + batch_item for prev_item, batch_item in zip(prev, position_params_batch)] + n[1]['gligen'] = ("position_batched", gligen_textbox_model, combined_position_params) + c.append(n) + + image_height = latents['samples'].shape[-2] * 8 + image_width = latents['samples'].shape[-1] * 8 + plot_image_tensor = plot_coordinates_to_tensor(coordinates, image_height, image_width, height, width, size_multiplier, text) + + return (c, plot_image_tensor,) + +class CreateInstanceDiffusionTracking: + + RETURN_TYPES = ("TRACKING", "STRING", "INT", "INT", "INT", "INT",) + RETURN_NAMES = ("tracking", "prompt", "width", "height", "bbox_width", "bbox_height",) + FUNCTION = "tracking" + CATEGORY = "KJNodes/InstanceDiffusion" + DESCRIPTION = """ +Creates tracking data to be used with InstanceDiffusion: +https://github.com/logtd/ComfyUI-InstanceDiffusion + +InstanceDiffusion prompt format: +"class_id.class_name": "prompt", +for example: +"1.head": "((head))", +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "coordinates": ("STRING", {"forceInput": True}), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "bbox_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "bbox_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "class_name": ("STRING", {"default": "class_name"}), + "class_id": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "prompt": ("STRING", {"default": "prompt", "multiline": True}), + }, + "optional": { + "size_multiplier": ("FLOAT", {"default": [1.0], "forceInput": True}), + } + } + + def tracking(self, coordinates, class_name, class_id, width, height, bbox_width, bbox_height, prompt, size_multiplier=[1.0]): + # Define the number of images in the batch + coordinates = coordinates.replace("'", '"') + coordinates = json.loads(coordinates) + + tracked = {} + tracked[class_name] = {} + batch_size = len(coordinates) + # Initialize a list to hold the coordinates for the current ID + id_coordinates = [] + if len(size_multiplier) != batch_size: + size_multiplier = size_multiplier * (batch_size // len(size_multiplier)) + size_multiplier[:batch_size % len(size_multiplier)] + for i, coord in enumerate(coordinates): + x = coord['x'] + y = coord['y'] + adjusted_bbox_width = bbox_width * size_multiplier[i] + adjusted_bbox_height = bbox_height * size_multiplier[i] + # Calculate the top left and bottom right coordinates + top_left_x = x - adjusted_bbox_width // 2 + top_left_y = y - adjusted_bbox_height // 2 + bottom_right_x = x + adjusted_bbox_width // 2 + bottom_right_y = y + adjusted_bbox_height // 2 + + # Append the top left and bottom right coordinates to the list for the current ID + id_coordinates.append([top_left_x, top_left_y, bottom_right_x, bottom_right_y, width, height]) + + class_id = int(class_id) + # Assign the list of coordinates to the specified ID within the class_id dictionary + tracked[class_name][class_id] = id_coordinates + + prompt_string = "" + for class_name, class_data in tracked.items(): + for class_id in class_data.keys(): + class_id_str = str(class_id) + # Use the incoming prompt for each class name and ID + prompt_string += f'"{class_id_str}.{class_name}": "({prompt})",\n' + + # Remove the last comma and newline + prompt_string = prompt_string.rstrip(",\n") + + return (tracked, prompt_string, width, height, bbox_width, bbox_height) + +class AppendInstanceDiffusionTracking: + + RETURN_TYPES = ("TRACKING", "STRING",) + RETURN_NAMES = ("tracking", "prompt",) + FUNCTION = "append" + CATEGORY = "KJNodes/InstanceDiffusion" + DESCRIPTION = """ +Appends tracking data to be used with InstanceDiffusion: +https://github.com/logtd/ComfyUI-InstanceDiffusion + +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "tracking_1": ("TRACKING", {"forceInput": True}), + "tracking_2": ("TRACKING", {"forceInput": True}), + }, + "optional": { + "prompt_1": ("STRING", {"default": "", "forceInput": True}), + "prompt_2": ("STRING", {"default": "", "forceInput": True}), + } + } + + def append(self, tracking_1, tracking_2, prompt_1="", prompt_2=""): + tracking_copy = tracking_1.copy() + # Check for existing class names and class IDs, and raise an error if they exist + for class_name, class_data in tracking_2.items(): + if class_name not in tracking_copy: + tracking_copy[class_name] = class_data + else: + # If the class name exists, merge the class data from tracking_2 into tracking_copy + # This will add new class IDs under the same class name without raising an error + tracking_copy[class_name].update(class_data) + prompt_string = prompt_1 + "," + prompt_2 + return (tracking_copy, prompt_string) + +class InterpolateCoords: + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("coordinates",) + FUNCTION = "interpolate" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Interpolates coordinates based on a curve. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "coordinates": ("STRING", {"forceInput": True}), + "interpolation_curve": ("FLOAT", {"forceInput": True}), + + }, + } + + def interpolate(self, coordinates, interpolation_curve): + # Parse the JSON string to get the list of coordinates + coordinates = json.loads(coordinates.replace("'", '"')) + + # Convert the list of dictionaries to a list of (x, y) tuples for easier processing + coordinates = [(coord['x'], coord['y']) for coord in coordinates] + + # Calculate the total length of the original path + path_length = sum(np.linalg.norm(np.array(coordinates[i]) - np.array(coordinates[i-1])) for i in range(1, len(coordinates))) + + # Normalize the interpolation curve + normalized_curve = [x / path_length for x in interpolation_curve] + + # Initialize variables for interpolation + interpolated_coords = [] + current_length = 0 + current_index = 1 + + # Iterate over the normalized curve + for target_length in normalized_curve: + target_length *= path_length # Convert back to the original scale + while current_length < target_length and current_index < len(coordinates): + segment_length = np.linalg.norm(np.array(coordinates[current_index]) - np.array(coordinates[current_index-1])) + current_length += segment_length + current_index += 1 + + # Interpolate between the last two points + if current_index == 1: + interpolated_coords.append(coordinates[0]) + else: + p1, p2 = np.array(coordinates[current_index-2]), np.array(coordinates[current_index-1]) + segment_length = np.linalg.norm(p2 - p1) + if segment_length > 0: + t = (target_length - (current_length - segment_length)) / segment_length + interpolated_point = p1 + t * (p2 - p1) + interpolated_coords.append(interpolated_point.tolist()) + else: + interpolated_coords.append(p1.tolist()) + + # Convert back to string format if necessary + interpolated_coords_str = "[" + ", ".join([f"{{'x': {round(coord[0])}, 'y': {round(coord[1])}}}" for coord in interpolated_coords]) + "]" + + return (interpolated_coords_str, ) + +class DrawInstanceDiffusionTracking: + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image", ) + FUNCTION = "draw" + CATEGORY = "KJNodes/InstanceDiffusion" + DESCRIPTION = """ +Draws the tracking data from +CreateInstanceDiffusionTracking -node. + +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE", ), + "tracking": ("TRACKING", {"forceInput": True}), + "box_line_width": ("INT", {"default": 2, "min": 1, "max": 10, "step": 1}), + "draw_text": ("BOOLEAN", {"default": True}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "font_size": ("INT", {"default": 20}), + }, + } + + def draw(self, image, tracking, box_line_width, draw_text, font, font_size): + import matplotlib.cm as cm + + modified_images = [] + + colormap = cm.get_cmap('rainbow', len(tracking)) + if draw_text: + #font = ImageFont.load_default() + font = ImageFont.truetype("arial.ttf", font_size) + + # Iterate over each image in the batch + for i in range(image.shape[0]): + # Extract the current image and convert it to a PIL image + # Adjust the tensor to (C, H, W) for ToPILImage + current_image = image[i, :, :, :].permute(2, 0, 1) + pil_image = transforms.ToPILImage()(current_image) + + draw = ImageDraw.Draw(pil_image) + + # Iterate over the bounding boxes for the current image + for j, (class_name, class_data) in enumerate(tracking.items()): + for class_id, bbox_list in class_data.items(): + # Check if the current index is within the bounds of the bbox_list + if i < len(bbox_list): + bbox = bbox_list[i] + # Ensure bbox is a list or tuple before unpacking + if isinstance(bbox, (list, tuple)): + x1, y1, x2, y2, _, _ = bbox + # Convert coordinates to integers + x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2) + # Generate a color from the rainbow colormap + color = tuple(int(255 * x) for x in colormap(j / len(tracking)))[:3] + # Draw the bounding box on the image with the generated color + draw.rectangle([x1, y1, x2, y2], outline=color, width=box_line_width) + if draw_text: + # Draw the class name and ID as text above the box with the generated color + text = f"{class_id}.{class_name}" + # Calculate the width and height of the text + _, _, text_width, text_height = draw.textbbox((0, 0), text=text, font=font) + # Position the text above the top-left corner of the box + text_position = (x1, y1 - text_height) + draw.text(text_position, text, fill=color, font=font) + else: + print(f"Unexpected data type for bbox: {type(bbox)}") + + # Convert the drawn image back to a torch tensor and adjust back to (H, W, C) + modified_image_tensor = transforms.ToTensor()(pil_image).permute(1, 2, 0) + modified_images.append(modified_image_tensor) + + # Stack the modified images back into a batch + image_tensor_batch = torch.stack(modified_images).cpu().float() + + return image_tensor_batch, \ No newline at end of file diff --git a/nodes/image_nodes.py b/nodes/image_nodes.py new file mode 100644 index 0000000..4dec4d3 --- /dev/null +++ b/nodes/image_nodes.py @@ -0,0 +1,1076 @@ +import numpy as np +import time +import torch +import torch.nn.functional as F +import random +import math +import os +import re +import json +from PIL import ImageGrab, ImageDraw, ImageFont, Image + +from nodes import MAX_RESOLUTION, SaveImage +from comfy_extras.nodes_mask import ImageCompositeMasked +from comfy.cli_args import args +from comfy.utils import ProgressBar +import folder_paths +import model_management + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +class ImagePass: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + }, + } + RETURN_TYPES = ("IMAGE",) + FUNCTION = "passthrough" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Passes the image through without modifying it. +""" + + def passthrough(self, image): + return image, + +class ColorMatch: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "image_ref": ("IMAGE",), + "image_target": ("IMAGE",), + "method": ( + [ + 'mkl', + 'hm', + 'reinhard', + 'mvgd', + 'hm-mvgd-hm', + 'hm-mkl-hm', + ], { + "default": 'mkl' + }), + + }, + } + + CATEGORY = "KJNodes/image" + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "colormatch" + DESCRIPTION = """ +color-matcher enables color transfer across images which comes in handy for automatic +color-grading of photographs, paintings and film sequences as well as light-field +and stopmotion corrections. + +The methods behind the mappings are based on the approach from Reinhard et al., +the Monge-Kantorovich Linearization (MKL) as proposed by Pitie et al. and our analytical solution +to a Multi-Variate Gaussian Distribution (MVGD) transfer in conjunction with classical histogram +matching. As shown below our HM-MVGD-HM compound outperforms existing methods. +https://github.com/hahnec/color-matcher/ + +""" + + def colormatch(self, image_ref, image_target, method): + try: + from color_matcher import ColorMatcher + except: + raise Exception("Can't import color-matcher, did you install requirements.txt? Manual install: pip install color-matcher") + cm = ColorMatcher() + image_ref = image_ref.cpu() + image_target = image_target.cpu() + batch_size = image_target.size(0) + out = [] + images_target = image_target.squeeze() + images_ref = image_ref.squeeze() + + image_ref_np = images_ref.numpy() + images_target_np = images_target.numpy() + + if image_ref.size(0) > 1 and image_ref.size(0) != batch_size: + raise ValueError("ColorMatch: Use either single reference image or a matching batch of reference images.") + + for i in range(batch_size): + image_target_np = images_target_np if batch_size == 1 else images_target[i].numpy() + image_ref_np_i = image_ref_np if image_ref.size(0) == 1 else images_ref[i].numpy() + try: + image_result = cm.transfer(src=image_target_np, ref=image_ref_np_i, method=method) + except BaseException as e: + print(f"Error occurred during transfer: {e}") + break + out.append(torch.from_numpy(image_result)) + return (torch.stack(out, dim=0).to(torch.float32), ) + +class SaveImageWithAlpha: + def __init__(self): + self.output_dir = folder_paths.get_output_directory() + self.type = "output" + self.prefix_append = "" + + @classmethod + def INPUT_TYPES(s): + return {"required": + {"images": ("IMAGE", ), + "mask": ("MASK", ), + "filename_prefix": ("STRING", {"default": "ComfyUI"})}, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + + RETURN_TYPES = () + FUNCTION = "save_images_alpha" + OUTPUT_NODE = True + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Saves an image and mask as .PNG with the mask as the alpha channel. +""" + + def save_images_alpha(self, images, mask, filename_prefix="ComfyUI_image_with_alpha", prompt=None, extra_pnginfo=None): + from PIL.PngImagePlugin import PngInfo + filename_prefix += self.prefix_append + full_output_folder, filename, counter, subfolder, filename_prefix = folder_paths.get_save_image_path(filename_prefix, self.output_dir, images[0].shape[1], images[0].shape[0]) + results = list() + if mask.dtype == torch.float16: + mask = mask.to(torch.float32) + def file_counter(): + max_counter = 0 + # Loop through the existing files + for existing_file in os.listdir(full_output_folder): + # Check if the file matches the expected format + match = re.fullmatch(fr"{filename}_(\d+)_?\.[a-zA-Z0-9]+", existing_file) + if match: + # Extract the numeric portion of the filename + file_counter = int(match.group(1)) + # Update the maximum counter value if necessary + if file_counter > max_counter: + max_counter = file_counter + return max_counter + + for image, alpha in zip(images, mask): + i = 255. * image.cpu().numpy() + a = 255. * alpha.cpu().numpy() + img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8)) + + # Resize the mask to match the image size + a_resized = Image.fromarray(a).resize(img.size, Image.LANCZOS) + a_resized = np.clip(a_resized, 0, 255).astype(np.uint8) + img.putalpha(Image.fromarray(a_resized, mode='L')) + metadata = None + if not args.disable_metadata: + metadata = PngInfo() + if prompt is not None: + metadata.add_text("prompt", json.dumps(prompt)) + if extra_pnginfo is not None: + for x in extra_pnginfo: + metadata.add_text(x, json.dumps(extra_pnginfo[x])) + + # Increment the counter by 1 to get the next available value + counter = file_counter() + 1 + file = f"{filename}_{counter:05}.png" + img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4) + results.append({ + "filename": file, + "subfolder": subfolder, + "type": self.type + }) + + return { "ui": { "images": results } } + +class ImageConcanate: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "direction": ( + [ 'right', + 'down', + 'left', + 'up', + ], + { + "default": 'right' + }), + "match_image_size": ("BOOLEAN", {"default": False}), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "concanate" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the image2 to image1 in the specified direction. +""" + + def concanate(self, image1, image2, direction, match_image_size): + if match_image_size: + image2 = torch.nn.functional.interpolate(image2, size=(image1.shape[2], image1.shape[3]), mode="bilinear") + if direction == 'right': + row = torch.cat((image1, image2), dim=2) + elif direction == 'down': + row = torch.cat((image1, image2), dim=1) + elif direction == 'left': + row = torch.cat((image2, image1), dim=2) + elif direction == 'up': + row = torch.cat((image2, image1), dim=1) + return (row,) + +class ImageGridComposite2x2: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "image3": ("IMAGE",), + "image4": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "compositegrid" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the 4 input images into a 2x2 grid. +""" + + def compositegrid(self, image1, image2, image3, image4): + top_row = torch.cat((image1, image2), dim=2) + bottom_row = torch.cat((image3, image4), dim=2) + grid = torch.cat((top_row, bottom_row), dim=1) + return (grid,) + +class ImageGridComposite3x3: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image1": ("IMAGE",), + "image2": ("IMAGE",), + "image3": ("IMAGE",), + "image4": ("IMAGE",), + "image5": ("IMAGE",), + "image6": ("IMAGE",), + "image7": ("IMAGE",), + "image8": ("IMAGE",), + "image9": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "compositegrid" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Concatenates the 9 input images into a 3x3 grid. +""" + + def compositegrid(self, image1, image2, image3, image4, image5, image6, image7, image8, image9): + top_row = torch.cat((image1, image2, image3), dim=2) + mid_row = torch.cat((image4, image5, image6), dim=2) + bottom_row = torch.cat((image7, image8, image9), dim=2) + grid = torch.cat((top_row, mid_row, bottom_row), dim=1) + return (grid,) + +class ImageBatchTestPattern: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "batch_size": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + "start_from": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "text_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "text_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "font_size": ("INT", {"default": 255,"min": 8, "max": 4096, "step": 1}), + }} + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "generatetestpattern" + CATEGORY = "KJNodes/text" + + def generatetestpattern(self, batch_size, font, font_size, start_from, width, height, text_x, text_y): + out = [] + # Generate the sequential numbers for each image + numbers = np.arange(start_from, start_from + batch_size) + font_path = folder_paths.get_full_path("kjnodes_fonts", font) + + for number in numbers: + # Create a black image with the number as a random color text + image = Image.new("RGB", (width, height), color='black') + draw = ImageDraw.Draw(image) + + # Generate a random color for the text + font_color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) + + font = ImageFont.truetype(font_path, font_size) + + # Get the size of the text and position it in the center + text = str(number) + + try: + draw.text((text_x, text_y), text, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, text_y), text, font=font, fill=font_color,) + + # Convert the image to a numpy array and normalize the pixel values + image_np = np.array(image).astype(np.float32) / 255.0 + image_tensor = torch.from_numpy(image_np).unsqueeze(0) + out.append(image_tensor) + out_tensor = torch.cat(out, dim=0) + + return (out_tensor,) + +class ImageGrabPIL: + + @classmethod + def IS_CHANGED(cls): + + return + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "screencap" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Captures an area specified by screen coordinates. +Can be used for realtime diffusion with autoqueue. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "width": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 0, "max": 4096, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": 1, "max": 255, "step": 1}), + "delay": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.01}), + }, + } + + def screencap(self, x, y, width, height, num_frames, delay): + captures = [] + bbox = (x, y, x + width, y + height) + + for _ in range(num_frames): + # Capture screen + screen_capture = ImageGrab.grab(bbox=bbox) + screen_capture_torch = torch.tensor(np.array(screen_capture), dtype=torch.float32) / 255.0 + screen_capture_torch = screen_capture_torch.unsqueeze(0) + captures.append(screen_capture_torch) + + # Wait for a short delay if more than one frame is to be captured + if num_frames > 1: + time.sleep(delay) + + return (torch.cat(captures, dim=0),) + +class AddLabel: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image":("IMAGE",), + "text_x": ("INT", {"default": 10, "min": 0, "max": 4096, "step": 1}), + "text_y": ("INT", {"default": 2, "min": 0, "max": 4096, "step": 1}), + "height": ("INT", {"default": 48, "min": 0, "max": 4096, "step": 1}), + "font_size": ("INT", {"default": 32, "min": 0, "max": 4096, "step": 1}), + "font_color": ("STRING", {"default": "white"}), + "label_color": ("STRING", {"default": "black"}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "text": ("STRING", {"default": "Text"}), + "direction": ( + [ 'up', + 'down', + 'left', + 'right', + 'overlay' + ], + { + "default": 'up' + }), + }, + "optional":{ + "caption": ("STRING", {"default": "", "forceInput": True}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "addlabel" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Creates a new with the given text, and concatenates it to +either above or below the input image. +Note that this changes the input image's height! +Fonts are loaded from this folder: +ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts +""" + + def addlabel(self, image, text_x, text_y, text, height, font_size, font_color, label_color, font, direction, caption=""): + batch_size = image.shape[0] + width = image.shape[2] + + font_path = os.path.join(script_directory, "fonts", "TTNorms-Black.otf") if font == "TTNorms-Black.otf" else folder_paths.get_full_path("kjnodes_fonts", font) + + def process_image(input_image, caption_text): + if direction == 'overlay': + pil_image = Image.fromarray((input_image.cpu().numpy() * 255).astype(np.uint8)) + else: + label_image = Image.new("RGB", (width, height), label_color) + pil_image = label_image + + draw = ImageDraw.Draw(pil_image) + font = ImageFont.truetype(font_path, font_size) + + words = caption_text.split() + + lines = [] + current_line = [] + current_line_width = 0 + for word in words: + word_width = font.getbbox(word)[2] + if current_line_width + word_width <= width - 2 * text_x: + current_line.append(word) + current_line_width += word_width + font.getbbox(" ")[2] # Add space width + else: + lines.append(" ".join(current_line)) + current_line = [word] + current_line_width = word_width + + if current_line: + lines.append(" ".join(current_line)) + + y_offset = text_y + for line in lines: + try: + draw.text((text_x, y_offset), line, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, y_offset), line, font=font, fill=font_color) + y_offset += font_size # Move to the next line + + processed_image = torch.from_numpy(np.array(pil_image).astype(np.float32) / 255.0).unsqueeze(0) + return processed_image + + if caption == "": + processed_images = [process_image(img, text) for img in image] + else: + assert len(caption) == batch_size, "Number of captions does not match number of images" + processed_images = [process_image(img, cap) for img, cap in zip(image, caption)] + processed_batch = torch.cat(processed_images, dim=0) + + # Combine images based on direction + if direction == 'down': + combined_images = torch.cat((image, processed_batch), dim=1) + elif direction == 'up': + combined_images = torch.cat((processed_batch, image), dim=1) + elif direction == 'left': + processed_batch = torch.rot90(processed_batch, 3, (2, 3)).permute(0, 3, 1, 2) + combined_images = torch.cat((processed_batch, image), dim=2) + elif direction == 'right': + processed_batch = torch.rot90(processed_batch, 3, (2, 3)).permute(0, 3, 1, 2) + combined_images = torch.cat((image, processed_batch), dim=2) + else: + combined_images = processed_batch + + return (combined_images,) + +class GetImageSizeAndCount: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + }} + + RETURN_TYPES = ("IMAGE","INT", "INT", "INT",) + RETURN_NAMES = ("image", "width", "height", "count",) + FUNCTION = "getsize" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Returns width, height and batch size of the image, +and passes it through unchanged. + +""" + + def getsize(self, image): + width = image.shape[2] + height = image.shape[1] + count = image.shape[0] + return {"ui": { + "text": [f"{count}x{width}x{height}"]}, + "result": (image, width, height, count) + } + +class ImageBatchRepeatInterleaving: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "repeat" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Repeats each image in a batch by the specified number of times. +Example batch of 5 images: 0, 1 ,2, 3, 4 +with repeats 2 becomes batch of 10 images: 0, 0, 1, 1, 2, 2, 3, 3, 4, 4 +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "repeats": ("INT", {"default": 1, "min": 1, "max": 4096}), + }, + } + + def repeat(self, images, repeats): + + repeated_images = torch.repeat_interleave(images, repeats=repeats, dim=0) + return (repeated_images, ) + +class ImageUpscaleWithModelBatched: + @classmethod + def INPUT_TYPES(s): + return {"required": { "upscale_model": ("UPSCALE_MODEL",), + "images": ("IMAGE",), + "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "upscale" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Same as ComfyUI native model upscaling node, +but allows setting sub-batches for reduced VRAM usage. +""" + def upscale(self, upscale_model, images, per_batch): + + device = model_management.get_torch_device() + upscale_model.to(device) + in_img = images.movedim(-1,-3).to(device) + + steps = in_img.shape[0] + pbar = ProgressBar(steps) + t = [] + + for start_idx in range(0, in_img.shape[0], per_batch): + sub_images = upscale_model(in_img[start_idx:start_idx+per_batch]) + t.append(sub_images.cpu()) + # Calculate the number of images processed in this batch + batch_count = sub_images.shape[0] + # Update the progress bar by the number of images processed in this batch + pbar.update(batch_count) + upscale_model.cpu() + + t = torch.cat(t, dim=0).permute(0, 2, 3, 1).cpu() + + return (t,) + +class ImageNormalize_Neg1_To_1: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "images": ("IMAGE",), + + }} + RETURN_TYPES = ("IMAGE",) + FUNCTION = "normalize" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Normalize the images to be in the range [-1, 1] +""" + + def normalize(self,images): + images = images * 2.0 - 1.0 + return (images,) + +class RemapImageRange: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}), + "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}), + "clamp": ("BOOLEAN", {"default": True}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "remap" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Remaps the image values to the specified range. +""" + + def remap(self, image, min, max, clamp): + if image.dtype == torch.float16: + image = image.to(torch.float32) + image = min + image * (max - min) + if clamp: + image = torch.clamp(image, min=0.0, max=1.0) + return (image, ) + +class SplitImageChannels: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "image": ("IMAGE",), + }, + } + + RETURN_TYPES = ("IMAGE", "IMAGE", "IMAGE", "MASK") + RETURN_NAMES = ("red", "green", "blue", "mask") + FUNCTION = "split" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Splits image channels into images where the selected channel +is repeated for all channels, and the alpha as a mask. +""" + + def split(self, image): + red = image[:, :, :, 0:1] # Red channel + green = image[:, :, :, 1:2] # Green channel + blue = image[:, :, :, 2:3] # Blue channel + alpha = image[:, :, :, 3:4] # Alpha channel + alpha = alpha.squeeze(-1) + + # Repeat the selected channel for all channels + red = torch.cat([red, red, red], dim=3) + green = torch.cat([green, green, green], dim=3) + blue = torch.cat([blue, blue, blue], dim=3) + return (red, green, blue, alpha) + +class MergeImageChannels: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "red": ("IMAGE",), + "green": ("IMAGE",), + "blue": ("IMAGE",), + + }, + "optional": { + "mask": ("MASK", {"default": None}), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("image",) + FUNCTION = "merge" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Merges channel data into an image. +""" + + def merge(self, red, green, blue, alpha=None): + image = torch.stack([ + red[..., 0, None], # Red channel + green[..., 1, None], # Green channel + blue[..., 2, None] # Blue channel + ], dim=-1) + image = image.squeeze(-2) + if alpha is not None: + image = torch.cat([image, alpha], dim=-1) + return (image,) + +class ImagePadForOutpaintMasked: + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "image": ("IMAGE",), + "left": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "top": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "right": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "bottom": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 8}), + "feathering": ("INT", {"default": 0, "min": 0, "max": MAX_RESOLUTION, "step": 1}), + }, + "optional": { + "mask": ("MASK",), + } + } + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "expand_image" + + CATEGORY = "image" + + def expand_image(self, image, left, top, right, bottom, feathering, mask=None): + if mask is not None: + if torch.allclose(mask, torch.zeros_like(mask)): + print("Warning: The incoming mask is fully black. Handling it as None.") + mask = None + B, H, W, C = image.size() + + new_image = torch.ones( + (B, H + top + bottom, W + left + right, C), + dtype=torch.float32, + ) * 0.5 + + new_image[:, top:top + H, left:left + W, :] = image + + if mask is None: + new_mask = torch.ones( + (B, H + top + bottom, W + left + right), + dtype=torch.float32, + ) + + t = torch.zeros( + (B, H, W), + dtype=torch.float32 + ) + else: + # If a mask is provided, pad it to fit the new image size + mask = F.pad(mask, (left, right, top, bottom), mode='constant', value=0) + mask = 1 - mask + t = torch.zeros_like(mask) + + if feathering > 0 and feathering * 2 < H and feathering * 2 < W: + + for i in range(H): + for j in range(W): + dt = i if top != 0 else H + db = H - i if bottom != 0 else H + + dl = j if left != 0 else W + dr = W - j if right != 0 else W + + d = min(dt, db, dl, dr) + + if d >= feathering: + continue + + v = (feathering - d) / feathering + + if mask is None: + t[:, i, j] = v * v + else: + t[:, top + i, left + j] = v * v + + if mask is None: + new_mask[:, top:top + H, left:left + W] = t + return (new_image, new_mask,) + else: + return (new_image, mask,) + +class ImageAndMaskPreview(SaveImage): + def __init__(self): + self.output_dir = folder_paths.get_temp_directory() + self.type = "temp" + self.prefix_append = "_temp_" + ''.join(random.choice("abcdefghijklmnopqrstupvxyz") for x in range(5)) + self.compress_level = 4 + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask_opacity": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "mask_color": ("STRING", {"default": "255, 255, 255"}), + "pass_through": ("BOOLEAN", {"default": False}), + }, + "optional": { + "image": ("IMAGE",), + "mask": ("MASK",), + }, + "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO"}, + } + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("composite",) + FUNCTION = "execute" + CATEGORY = "KJNodes" + DESCRIPTION = """ +Preview an image or a mask, when both inputs are used +composites the mask on top of the image. +with pass_through on the preview is disabled and the +composite is returned from the composite slot instead, +this allows for the preview to be passed for video combine +nodes for example. +""" + + def execute(self, mask_opacity, mask_color, pass_through, filename_prefix="ComfyUI", image=None, mask=None, prompt=None, extra_pnginfo=None): + if mask is not None and image is None: + preview = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3) + elif mask is None and image is not None: + preview = image + elif mask is not None and image is not None: + mask_adjusted = mask * mask_opacity + mask_image = mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])).movedim(1, -1).expand(-1, -1, -1, 3).clone() + + color_list = list(map(int, mask_color.split(', '))) + mask_image[:, :, :, 0] = color_list[0] // 255 # Red channel + mask_image[:, :, :, 1] = color_list[1] // 255 # Green channel + mask_image[:, :, :, 2] = color_list[2] // 255 # Blue channel + + preview, = ImageCompositeMasked.composite(self, image, mask_image, 0, 0, True, mask_adjusted) + if pass_through: + return (preview, ) + return(self.save_images(preview, filename_prefix, prompt, extra_pnginfo)) + +class CrossFadeImages: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "crossfadeimages" + CATEGORY = "KJNodes/image" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images_1": ("IMAGE",), + "images_2": ("IMAGE",), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out", "bounce", "elastic", "glitchy", "exponential_ease_out"],), + "transition_start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + "transitioning_frames": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + "start_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}), + "end_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}), + }, + } + + def crossfadeimages(self, images_1, images_2, transition_start_index, transitioning_frames, interpolation, start_level, end_level): + + def crossfade(images_1, images_2, alpha): + crossfade = (1 - alpha) * images_1 + alpha * images_2 + return crossfade + def ease_in(t): + return t * t + def ease_out(t): + return 1 - (1 - t) * (1 - t) + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + def bounce(t): + if t < 0.5: + return self.ease_out(t * 2) * 0.5 + else: + return self.ease_in((t - 0.5) * 2) * 0.5 + 0.5 + def elastic(t): + return math.sin(13 * math.pi / 2 * t) * math.pow(2, 10 * (t - 1)) + def glitchy(t): + return t + 0.1 * math.sin(40 * t) + def exponential_ease_out(t): + return 1 - (1 - t) ** 4 + + easing_functions = { + "linear": lambda t: t, + "ease_in": ease_in, + "ease_out": ease_out, + "ease_in_out": ease_in_out, + "bounce": bounce, + "elastic": elastic, + "glitchy": glitchy, + "exponential_ease_out": exponential_ease_out, + } + + crossfade_images = [] + + alphas = torch.linspace(start_level, end_level, transitioning_frames) + for i in range(transitioning_frames): + alpha = alphas[i] + image1 = images_1[i + transition_start_index] + image2 = images_2[i + transition_start_index] + easing_function = easing_functions.get(interpolation) + alpha = easing_function(alpha) # Apply the easing function to the alpha value + + crossfade_image = crossfade(image1, image2, alpha) + crossfade_images.append(crossfade_image) + + # Convert crossfade_images to tensor + crossfade_images = torch.stack(crossfade_images, dim=0) + # Get the last frame result of the interpolation + last_frame = crossfade_images[-1] + # Calculate the number of remaining frames from images_2 + remaining_frames = len(images_2) - (transition_start_index + transitioning_frames) + # Crossfade the remaining frames with the last used alpha value + for i in range(remaining_frames): + alpha = alphas[-1] + image1 = images_1[i + transition_start_index + transitioning_frames] + image2 = images_2[i + transition_start_index + transitioning_frames] + easing_function = easing_functions.get(interpolation) + alpha = easing_function(alpha) # Apply the easing function to the alpha value + + crossfade_image = crossfade(image1, image2, alpha) + crossfade_images = torch.cat([crossfade_images, crossfade_image.unsqueeze(0)], dim=0) + # Append the beginning of images_1 + beginning_images_1 = images_1[:transition_start_index] + crossfade_images = torch.cat([beginning_images_1, crossfade_images], dim=0) + return (crossfade_images, ) + +class GetImageRangeFromBatch: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "imagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates a new batch using images from the input, +batch, starting from start_index. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "start_index": ("INT", {"default": 0,"min": -1, "max": 4096, "step": 1}), + "num_frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + }, + } + + def imagesfrombatch(self, images, start_index, num_frames): + if start_index == -1: + start_index = len(images) - num_frames + if start_index < 0 or start_index >= len(images): + raise ValueError("GetImageRangeFromBatch: Start index is out of range") + end_index = start_index + num_frames + if end_index > len(images): + raise ValueError("GetImageRangeFromBatch: End index is out of range") + chosen_images = images[start_index:end_index] + return (chosen_images, ) + +class GetImagesFromBatchIndexed: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "indexedimagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Selects and returns the images at the specified indices as an image batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), + }, + } + + def indexedimagesfrombatch(self, images, indexes): + + # Parse the indexes string into a list of integers + index_list = [int(index.strip()) for index in indexes.split(',')] + + # Convert list of indices to a PyTorch tensor + indices_tensor = torch.tensor(index_list, dtype=torch.long) + + # Select the images at the specified indices + chosen_images = images[indices_tensor] + + return (chosen_images,) + +class InsertImagesToBatchIndexed: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "insertimagesfrombatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Inserts images at the specified indices into the original image batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "original_images": ("IMAGE",), + "images_to_insert": ("IMAGE",), + "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), + }, + } + + def insertimagesfrombatch(self, original_images, images_to_insert, indexes): + + # Parse the indexes string into a list of integers + index_list = [int(index.strip()) for index in indexes.split(',')] + + # Convert list of indices to a PyTorch tensor + indices_tensor = torch.tensor(index_list, dtype=torch.long) + + # Ensure the images_to_insert is a tensor + if not isinstance(images_to_insert, torch.Tensor): + images_to_insert = torch.tensor(images_to_insert) + + # Insert the images at the specified indices + for index, image in zip(indices_tensor, images_to_insert): + original_images[index] = image + + return (original_images,) + +class ReplaceImagesInBatch: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "replace" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Replaces the images in a batch, starting from the specified start index, +with the replacement images. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "original_images": ("IMAGE",), + "replacement_images": ("IMAGE",), + "start_index": ("INT", {"default": 1,"min": 0, "max": 4096, "step": 1}), + }, + } + + def replace(self, original_images, replacement_images, start_index): + images = None + if start_index >= len(original_images): + raise ValueError("GetImageRangeFromBatch: Start index is out of range") + end_index = start_index + len(replacement_images) + if end_index > len(original_images): + raise ValueError("GetImageRangeFromBatch: End index is out of range") + # Create a copy of the original_images tensor + original_images_copy = original_images.clone() + original_images_copy[start_index:end_index] = replacement_images + images = original_images_copy + return (images, ) + + +class ReverseImageBatch: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "reverseimagebatch" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Reverses the order of the images in a batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + }, + } + + def reverseimagebatch(self, images): + reversed_images = torch.flip(images, [0]) + return (reversed_images, ) + +class ImageBatchMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "image_1": ("IMAGE", ), + "image_2": ("IMAGE", ), + }, + } + + RETURN_TYPES = ("IMAGE",) + RETURN_NAMES = ("images",) + FUNCTION = "combine" + CATEGORY = "KJNodes/image" + DESCRIPTION = """ +Creates an image batch from multiple images. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, **kwargs): + from nodes import ImageBatch + image_batch_node = ImageBatch() + image = kwargs["image_1"] + for c in range(1, inputcount): + new_image = kwargs[f"image_{c + 1}"] + image, = image_batch_node.batch(image, new_image) + return (image,) diff --git a/nodes/intrinsic_lora_nodes.py b/nodes/intrinsic_lora_nodes.py new file mode 100644 index 0000000..798cf74 --- /dev/null +++ b/nodes/intrinsic_lora_nodes.py @@ -0,0 +1,115 @@ +import folder_paths +import os +import torch +import torch.nn.functional as F +from comfy.utils import ProgressBar, load_torch_file +import comfy.sample +from nodes import CLIPTextEncode + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +folder_paths.add_model_folder_path("intristic_loras", os.path.join(script_directory, "intristic_loras")) + +class Intrinsic_lora_sampling: + def __init__(self): + self.loaded_lora = None + + @classmethod + def INPUT_TYPES(s): + return {"required": { "model": ("MODEL",), + "lora_name": (folder_paths.get_filename_list("intristic_loras"), ), + "task": ( + [ + 'depth map', + 'surface normals', + 'albedo', + 'shading', + ], + { + "default": 'depth map' + }), + "text": ("STRING", {"multiline": True, "default": ""}), + "clip": ("CLIP", ), + "vae": ("VAE", ), + "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), + }, + "optional": { + "image": ("IMAGE",), + "optional_latent": ("LATENT",), + }, + } + + RETURN_TYPES = ("IMAGE", "LATENT",) + FUNCTION = "onestepsample" + CATEGORY = "KJNodes" + DESCRIPTION = """ +Sampler to use the intrinsic loras: +https://github.com/duxiaodan/intrinsic-lora +These LoRAs are tiny and thus included +with this node pack. +""" + + def onestepsample(self, model, lora_name, clip, vae, text, task, per_batch, image=None, optional_latent=None): + pbar = ProgressBar(3) + + if optional_latent is None: + image_list = [] + for start_idx in range(0, image.shape[0], per_batch): + sub_pixels = vae.vae_encode_crop_pixels(image[start_idx:start_idx+per_batch]) + image_list.append(vae.encode(sub_pixels[:,:,:,:3])) + sample = torch.cat(image_list, dim=0) + else: + sample = optional_latent["samples"] + noise = torch.zeros(sample.size(), dtype=sample.dtype, layout=sample.layout, device="cpu") + prompt = task + "," + text + positive, = CLIPTextEncode.encode(self, clip, prompt) + negative = positive #negative shouldn't do anything in this scenario + + pbar.update(1) + + #custom model sampling to pass latent through as it is + class X0_PassThrough(comfy.model_sampling.EPS): + def calculate_denoised(self, sigma, model_output, model_input): + return model_output + def calculate_input(self, sigma, noise): + return noise + sampling_base = comfy.model_sampling.ModelSamplingDiscrete + sampling_type = X0_PassThrough + + class ModelSamplingAdvanced(sampling_base, sampling_type): + pass + model_sampling = ModelSamplingAdvanced(model.model.model_config) + + #load lora + model_clone = model.clone() + lora_path = folder_paths.get_full_path("intristic_loras", lora_name) + lora = load_torch_file(lora_path, safe_load=True) + self.loaded_lora = (lora_path, lora) + + model_clone_with_lora = comfy.sd.load_lora_for_models(model_clone, None, lora, 1.0, 0)[0] + + model_clone_with_lora.add_object_patch("model_sampling", model_sampling) + + samples = {"samples": comfy.sample.sample(model_clone_with_lora, noise, 1, 1.0, "euler", "simple", positive, negative, sample, + denoise=1.0, disable_noise=True, start_step=0, last_step=1, + force_full_denoise=True, noise_mask=None, callback=None, disable_pbar=True, seed=None)} + pbar.update(1) + + decoded = [] + for start_idx in range(0, samples["samples"].shape[0], per_batch): + decoded.append(vae.decode(samples["samples"][start_idx:start_idx+per_batch])) + image_out = torch.cat(decoded, dim=0) + + pbar.update(1) + + if task == 'depth map': + imax = image_out.max() + imin = image_out.min() + image_out = (image_out-imin)/(imax-imin) + image_out = torch.max(image_out, dim=3, keepdim=True)[0].repeat(1, 1, 1, 3) + elif task == 'surface normals': + image_out = F.normalize(image_out * 2 - 1, dim=3) / 2 + 0.5 + image_out = 1.0 - image_out + else: + image_out = image_out.clamp(-1.,1.) + + return (image_out, samples,) \ No newline at end of file diff --git a/nodes/mask_nodes.py b/nodes/mask_nodes.py new file mode 100644 index 0000000..8b1d9f0 --- /dev/null +++ b/nodes/mask_nodes.py @@ -0,0 +1,1166 @@ +import torch +import torch.nn.functional as F +from torchvision.transforms import functional as TF +from PIL import Image, ImageDraw, ImageFilter, ImageFont +import scipy.ndimage +import numpy as np + +import matplotlib.pyplot as plt +from contextlib import nullcontext +import os + +import model_management +from comfy.utils import ProgressBar +from nodes import MAX_RESOLUTION + +import folder_paths + +from ..utility.utility import tensor2pil, pil2tensor + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +class BatchCLIPSeg: + + def __init__(self): + pass + + @classmethod + def INPUT_TYPES(s): + + return {"required": + { + "images": ("IMAGE",), + "text": ("STRING", {"multiline": False}), + "threshold": ("FLOAT", {"default": 0.1,"min": 0.0, "max": 10.0, "step": 0.001}), + "binary_mask": ("BOOLEAN", {"default": True}), + "combine_mask": ("BOOLEAN", {"default": False}), + "use_cuda": ("BOOLEAN", {"default": True}), + }, + } + + CATEGORY = "KJNodes/masking" + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("Mask",) + FUNCTION = "segment_image" + DESCRIPTION = """ +Segments an image or batch of images using CLIPSeg. +""" + + def segment_image(self, images, text, threshold, binary_mask, combine_mask, use_cuda): + from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation + out = [] + height, width, _ = images[0].shape + if use_cuda and torch.cuda.is_available(): + device = torch.device("cuda") + else: + device = torch.device("cpu") + dtype = model_management.unet_dtype() + model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined") + model.to(dtype) + model.to(device) + images = images.to(device) + processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") + pbar = ProgressBar(images.shape[0]) + autocast_condition = (dtype != torch.float32) and not model_management.is_device_mps(device) + with torch.autocast(model_management.get_autocast_device(device), dtype=dtype) if autocast_condition else nullcontext(): + for image in images: + image = (image* 255).type(torch.uint8) + prompt = text + input_prc = processor(text=prompt, images=image, return_tensors="pt") + # Move the processed input to the device + for key in input_prc: + input_prc[key] = input_prc[key].to(device) + + outputs = model(**input_prc) + + tensor = torch.sigmoid(outputs[0]) + tensor_thresholded = torch.where(tensor > threshold, tensor, torch.tensor(0, dtype=torch.float)) + tensor_normalized = (tensor_thresholded - tensor_thresholded.min()) / (tensor_thresholded.max() - tensor_thresholded.min()) + tensor = tensor_normalized + + # Resize the mask + if len(tensor.shape) == 3: + tensor = tensor.unsqueeze(0) + resized_tensor = F.interpolate(tensor, size=(height, width), mode='nearest') + + # Remove the extra dimensions + resized_tensor = resized_tensor[0, 0, :, :] + pbar.update(1) + out.append(resized_tensor) + + results = torch.stack(out).cpu().float() + + if combine_mask: + combined_results = torch.max(results, dim=0)[0] + results = combined_results.unsqueeze(0).repeat(len(images),1,1) + + if binary_mask: + results = results.round() + + return results, + +class CreateTextMask: + + RETURN_TYPES = ("IMAGE", "MASK",) + FUNCTION = "createtextmask" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Creates a text image and mask. +Looks for fonts from this folder: +ComfyUI/custom_nodes/ComfyUI-KJNodes/fonts + +If start_rotation and/or end_rotation are different values, +creates animation between them. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + "text_x": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "text_y": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + "font_size": ("INT", {"default": 32,"min": 8, "max": 4096, "step": 1}), + "font_color": ("STRING", {"default": "white"}), + "text": ("STRING", {"default": "HELLO!", "multiline": True}), + "font": (folder_paths.get_filename_list("kjnodes_fonts"), ), + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "start_rotation": ("INT", {"default": 0,"min": 0, "max": 359, "step": 1}), + "end_rotation": ("INT", {"default": 0,"min": -359, "max": 359, "step": 1}), + }, + } + + def createtextmask(self, frames, width, height, invert, text_x, text_y, text, font_size, font_color, font, start_rotation, end_rotation): + # Define the number of images in the batch + batch_size = frames + out = [] + masks = [] + rotation = start_rotation + if start_rotation != end_rotation: + rotation_increment = (end_rotation - start_rotation) / (batch_size - 1) + + font_path = folder_paths.get_full_path("kjnodes_fonts", font) + # Generate the text + for i in range(batch_size): + image = Image.new("RGB", (width, height), "black") + draw = ImageDraw.Draw(image) + font = ImageFont.truetype(font_path, font_size) + + # Split the text into words + words = text.split() + + # Initialize variables for line creation + lines = [] + current_line = [] + current_line_width = 0 + try: #new pillow + # Iterate through words to create lines + for word in words: + word_width = font.getbbox(word)[2] + if current_line_width + word_width <= width - 2 * text_x: + current_line.append(word) + current_line_width += word_width + font.getbbox(" ")[2] # Add space width + else: + lines.append(" ".join(current_line)) + current_line = [word] + current_line_width = word_width + except: #old pillow + for word in words: + word_width = font.getsize(word)[0] + if current_line_width + word_width <= width - 2 * text_x: + current_line.append(word) + current_line_width += word_width + font.getsize(" ")[0] # Add space width + else: + lines.append(" ".join(current_line)) + current_line = [word] + current_line_width = word_width + + # Add the last line if it's not empty + if current_line: + lines.append(" ".join(current_line)) + + # Draw each line of text separately + y_offset = text_y + for line in lines: + text_width = font.getlength(line) + text_height = font_size + text_center_x = text_x + text_width / 2 + text_center_y = y_offset + text_height / 2 + try: + draw.text((text_x, y_offset), line, font=font, fill=font_color, features=['-liga']) + except: + draw.text((text_x, y_offset), line, font=font, fill=font_color) + y_offset += text_height # Move to the next line + + if start_rotation != end_rotation: + image = image.rotate(rotation, center=(text_center_x, text_center_y)) + rotation += rotation_increment + + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + mask = image[:, :, :, 0] + masks.append(mask) + out.append(image) + + if invert: + return (1.0 - torch.cat(out, dim=0), 1.0 - torch.cat(masks, dim=0),) + return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) + +class ColorToMask: + + RETURN_TYPES = ("MASK",) + FUNCTION = "clip" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Converts chosen RGB value to a mask. +With batch inputs, the **per_batch** +controls the number of images processed at once. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "images": ("IMAGE",), + "invert": ("BOOLEAN", {"default": False}), + "red": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "green": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "blue": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "threshold": ("INT", {"default": 10,"min": 0, "max": 255, "step": 1}), + "per_batch": ("INT", {"default": 16, "min": 1, "max": 4096, "step": 1}), + }, + } + + def clip(self, images, red, green, blue, threshold, invert, per_batch): + + color = torch.tensor([red, green, blue], dtype=torch.uint8) + black = torch.tensor([0, 0, 0], dtype=torch.uint8) + white = torch.tensor([255, 255, 255], dtype=torch.uint8) + + if invert: + black, white = white, black + + steps = images.shape[0] + pbar = ProgressBar(steps) + tensors_out = [] + + for start_idx in range(0, images.shape[0], per_batch): + + # Calculate color distances + color_distances = torch.norm(images[start_idx:start_idx+per_batch] * 255 - color, dim=-1) + + # Create a mask based on the threshold + mask = color_distances <= threshold + + # Apply the mask to create new images + mask_out = torch.where(mask.unsqueeze(-1), white, black).float() + mask_out = mask_out.mean(dim=-1) + + tensors_out.append(mask_out.cpu()) + batch_count = mask_out.shape[0] + pbar.update(batch_count) + + tensors_out = torch.cat(tensors_out, dim=0) + tensors_out = torch.clamp(tensors_out, min=0.0, max=1.0) + return tensors_out, + +class CreateFluidMask: + + RETURN_TYPES = ("IMAGE", "MASK") + FUNCTION = "createfluidmask" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "inflow_count": ("INT", {"default": 3,"min": 0, "max": 255, "step": 1}), + "inflow_velocity": ("INT", {"default": 1,"min": 0, "max": 255, "step": 1}), + "inflow_radius": ("INT", {"default": 8,"min": 0, "max": 255, "step": 1}), + "inflow_padding": ("INT", {"default": 50,"min": 0, "max": 255, "step": 1}), + "inflow_duration": ("INT", {"default": 60,"min": 0, "max": 255, "step": 1}), + }, + } + #using code from https://github.com/GregTJ/stable-fluids + def createfluidmask(self, frames, width, height, invert, inflow_count, inflow_velocity, inflow_radius, inflow_padding, inflow_duration): + from ..utility.fluid import Fluid + from scipy.spatial import erf + out = [] + masks = [] + RESOLUTION = width, height + DURATION = frames + + INFLOW_PADDING = inflow_padding + INFLOW_DURATION = inflow_duration + INFLOW_RADIUS = inflow_radius + INFLOW_VELOCITY = inflow_velocity + INFLOW_COUNT = inflow_count + + print('Generating fluid solver, this may take some time.') + fluid = Fluid(RESOLUTION, 'dye') + + center = np.floor_divide(RESOLUTION, 2) + r = np.min(center) - INFLOW_PADDING + + points = np.linspace(-np.pi, np.pi, INFLOW_COUNT, endpoint=False) + points = tuple(np.array((np.cos(p), np.sin(p))) for p in points) + normals = tuple(-p for p in points) + points = tuple(r * p + center for p in points) + + inflow_velocity = np.zeros_like(fluid.velocity) + inflow_dye = np.zeros(fluid.shape) + for p, n in zip(points, normals): + mask = np.linalg.norm(fluid.indices - p[:, None, None], axis=0) <= INFLOW_RADIUS + inflow_velocity[:, mask] += n[:, None] * INFLOW_VELOCITY + inflow_dye[mask] = 1 + + + for f in range(DURATION): + print(f'Computing frame {f + 1} of {DURATION}.') + if f <= INFLOW_DURATION: + fluid.velocity += inflow_velocity + fluid.dye += inflow_dye + + curl = fluid.step()[1] + # Using the error function to make the contrast a bit higher. + # Any other sigmoid function e.g. smoothstep would work. + curl = (erf(curl * 2) + 1) / 4 + + color = np.dstack((curl, np.ones(fluid.shape), fluid.dye)) + color = (np.clip(color, 0, 1) * 255).astype('uint8') + image = np.array(color).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + mask = image[:, :, :, 0] + masks.append(mask) + out.append(image) + + if invert: + return (1.0 - torch.cat(out, dim=0),1.0 - torch.cat(masks, dim=0),) + return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) + +class CreateAudioMask: + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "createaudiomask" + CATEGORY = "KJNodes/deprecated" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 16,"min": 1, "max": 255, "step": 1}), + "scale": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 2.0, "step": 0.01}), + "audio_path": ("STRING", {"default": "audio.wav"}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + }, + } + + def createaudiomask(self, frames, width, height, invert, audio_path, scale): + try: + import librosa + except ImportError: + raise Exception("Can not import librosa. Install it with 'pip install librosa'") + batch_size = frames + out = [] + masks = [] + if audio_path == "audio.wav": #I don't know why relative path won't work otherwise... + audio_path = os.path.join(script_directory, audio_path) + audio, sr = librosa.load(audio_path) + spectrogram = np.abs(librosa.stft(audio)) + + for i in range(batch_size): + image = Image.new("RGB", (width, height), "black") + draw = ImageDraw.Draw(image) + frame = spectrogram[:, i] + circle_radius = int(height * np.mean(frame)) + circle_radius *= scale + circle_center = (width // 2, height // 2) # Calculate the center of the image + + draw.ellipse([(circle_center[0] - circle_radius, circle_center[1] - circle_radius), + (circle_center[0] + circle_radius, circle_center[1] + circle_radius)], + fill='white') + + image = np.array(image).astype(np.float32) / 255.0 + image = torch.from_numpy(image)[None,] + mask = image[:, :, :, 0] + masks.append(mask) + out.append(image) + + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),torch.cat(masks, dim=0),) + +class CreateGradientMask: + + RETURN_TYPES = ("MASK",) + FUNCTION = "createmask" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 0,"min": 0, "max": 255, "step": 1}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + }, + } + def createmask(self, frames, width, height, invert): + # Define the number of images in the batch + batch_size = frames + out = [] + # Create an empty array to store the image batch + image_batch = np.zeros((batch_size, height, width), dtype=np.float32) + # Generate the black to white gradient for each image + for i in range(batch_size): + gradient = np.linspace(1.0, 0.0, width, dtype=np.float32) + time = i / frames # Calculate the time variable + offset_gradient = gradient - time # Offset the gradient values based on time + image_batch[i] = offset_gradient.reshape(1, -1) + output = torch.from_numpy(image_batch) + mask = output + out.append(mask) + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),) + +class CreateFadeMask: + + RETURN_TYPES = ("MASK",) + FUNCTION = "createfademask" + CATEGORY = "KJNodes/deprecated" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 2,"min": 2, "max": 255, "step": 1}), + "width": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 256,"min": 16, "max": 4096, "step": 1}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), + "start_level": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 1.0, "step": 0.01}), + "midpoint_level": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}), + "end_level": ("FLOAT", {"default": 0.0,"min": 0.0, "max": 1.0, "step": 0.01}), + "midpoint_frame": ("INT", {"default": 0,"min": 0, "max": 4096, "step": 1}), + }, + } + + def createfademask(self, frames, width, height, invert, interpolation, start_level, midpoint_level, end_level, midpoint_frame): + def ease_in(t): + return t * t + + def ease_out(t): + return 1 - (1 - t) * (1 - t) + + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + batch_size = frames + out = [] + image_batch = np.zeros((batch_size, height, width), dtype=np.float32) + + if midpoint_frame == 0: + midpoint_frame = batch_size // 2 + + for i in range(batch_size): + if i <= midpoint_frame: + t = i / midpoint_frame + if interpolation == "ease_in": + t = ease_in(t) + elif interpolation == "ease_out": + t = ease_out(t) + elif interpolation == "ease_in_out": + t = ease_in_out(t) + color = start_level - t * (start_level - midpoint_level) + else: + t = (i - midpoint_frame) / (batch_size - midpoint_frame) + if interpolation == "ease_in": + t = ease_in(t) + elif interpolation == "ease_out": + t = ease_out(t) + elif interpolation == "ease_in_out": + t = ease_in_out(t) + color = midpoint_level - t * (midpoint_level - end_level) + + color = np.clip(color, 0, 255) + image = np.full((height, width), color, dtype=np.float32) + image_batch[i] = image + + output = torch.from_numpy(image_batch) + mask = output + out.append(mask) + + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),) + +class CreateFadeMaskAdvanced: + + RETURN_TYPES = ("MASK",) + FUNCTION = "createfademask" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Create a batch of masks interpolated between given frames and values. +Uses same syntax as Fizz' BatchValueSchedule. +First value is the frame index (not that this starts from 0, not 1) +and the second value inside the brackets is the float value of the mask in range 0.0 - 1.0 + +For example the default values: +0:(0.0) +7:(1.0) +15:(0.0) + +Would create a mask batch fo 16 frames, starting from black, +interpolating with the chosen curve to fully white at the 8th frame, +and interpolating from that to fully black at the 16th frame. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "points_string": ("STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": True}), + "invert": ("BOOLEAN", {"default": False}), + "frames": ("INT", {"default": 16,"min": 2, "max": 255, "step": 1}), + "width": ("INT", {"default": 512,"min": 1, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 1, "max": 4096, "step": 1}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), + }, + } + + def createfademask(self, frames, width, height, invert, points_string, interpolation): + def ease_in(t): + return t * t + + def ease_out(t): + return 1 - (1 - t) * (1 - t) + + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + # Parse the input string into a list of tuples + points = [] + points_string = points_string.rstrip(',\n') + for point_str in points_string.split(','): + frame_str, color_str = point_str.split(':') + frame = int(frame_str.strip()) + color = float(color_str.strip()[1:-1]) # Remove parentheses around color + points.append((frame, color)) + + # Check if the last frame is already in the points + if len(points) == 0 or points[-1][0] != frames - 1: + # If not, add it with the color of the last specified frame + points.append((frames - 1, points[-1][1] if points else 0)) + + # Sort the points by frame number + points.sort(key=lambda x: x[0]) + + batch_size = frames + out = [] + image_batch = np.zeros((batch_size, height, width), dtype=np.float32) + + # Index of the next point to interpolate towards + next_point = 1 + + for i in range(batch_size): + while next_point < len(points) and i > points[next_point][0]: + next_point += 1 + + # Interpolate between the previous point and the next point + prev_point = next_point - 1 + t = (i - points[prev_point][0]) / (points[next_point][0] - points[prev_point][0]) + if interpolation == "ease_in": + t = ease_in(t) + elif interpolation == "ease_out": + t = ease_out(t) + elif interpolation == "ease_in_out": + t = ease_in_out(t) + elif interpolation == "linear": + pass # No need to modify `t` for linear interpolation + + color = points[prev_point][1] - t * (points[prev_point][1] - points[next_point][1]) + color = np.clip(color, 0, 255) + image = np.full((height, width), color, dtype=np.float32) + image_batch[i] = image + + output = torch.from_numpy(image_batch) + mask = output + out.append(mask) + + if invert: + return (1.0 - torch.cat(out, dim=0),) + return (torch.cat(out, dim=0),) + +class CreateMagicMask: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createmagicmask" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}), + "depth": ("INT", {"default": 12,"min": 1, "max": 500, "step": 1}), + "distortion": ("FLOAT", {"default": 1.5,"min": 0.0, "max": 100.0, "step": 0.01}), + "seed": ("INT", {"default": 123,"min": 0, "max": 99999999, "step": 1}), + "transitions": ("INT", {"default": 1,"min": 1, "max": 20, "step": 1}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + }, + } + + def createmagicmask(self, frames, transitions, depth, distortion, seed, frame_width, frame_height): + from ..utility.magictex import coordinate_grid, random_transform, magic + rng = np.random.default_rng(seed) + out = [] + coords = coordinate_grid((frame_width, frame_height)) + + # Calculate the number of frames for each transition + frames_per_transition = frames // transitions + + # Generate a base set of parameters + base_params = { + "coords": random_transform(coords, rng), + "depth": depth, + "distortion": distortion, + } + for t in range(transitions): + # Generate a second set of parameters that is at most max_diff away from the base parameters + params1 = base_params.copy() + params2 = base_params.copy() + + params1['coords'] = random_transform(coords, rng) + params2['coords'] = random_transform(coords, rng) + + for i in range(frames_per_transition): + # Compute the interpolation factor + alpha = i / frames_per_transition + + # Interpolate between the two sets of parameters + params = params1.copy() + params['coords'] = (1 - alpha) * params1['coords'] + alpha * params2['coords'] + + tex = magic(**params) + + dpi = frame_width / 10 + fig = plt.figure(figsize=(10, 10), dpi=dpi) + + ax = fig.add_subplot(111) + plt.subplots_adjust(left=0, right=1, bottom=0, top=1) + + ax.get_yaxis().set_ticks([]) + ax.get_xaxis().set_ticks([]) + ax.imshow(tex, aspect='auto') + + fig.canvas.draw() + img = np.array(fig.canvas.renderer._renderer) + + plt.close(fig) + + pil_img = Image.fromarray(img).convert("L") + mask = torch.tensor(np.array(pil_img)) / 255.0 + + out.append(mask) + + return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) + +class CreateShapeMask: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createshapemask" + CATEGORY = "KJNodes/masking/generate" + DESCRIPTION = """ +Creates a mask or batch of masks with the specified shape. +Locations are center locations. +Grow value is the amount to grow the shape on each frame, creating animated masks. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "shape": ( + [ 'circle', + 'square', + 'triangle', + ], + { + "default": 'circle' + }), + "frames": ("INT", {"default": 1,"min": 1, "max": 4096, "step": 1}), + "location_x": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "location_y": ("INT", {"default": 256,"min": 0, "max": 4096, "step": 1}), + "grow": ("INT", {"default": 0, "min": -512, "max": 512, "step": 1}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "shape_width": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + "shape_height": ("INT", {"default": 128,"min": 8, "max": 4096, "step": 1}), + }, + } + + def createshapemask(self, frames, frame_width, frame_height, location_x, location_y, shape_width, shape_height, grow, shape): + # Define the number of images in the batch + batch_size = frames + out = [] + color = "white" + for i in range(batch_size): + image = Image.new("RGB", (frame_width, frame_height), "black") + draw = ImageDraw.Draw(image) + + # Calculate the size for this frame and ensure it's not less than 0 + current_width = max(0, shape_width + i*grow) + current_height = max(0, shape_height + i*grow) + + if shape == 'circle' or shape == 'square': + # Define the bounding box for the shape + left_up_point = (location_x - current_width // 2, location_y - current_height // 2) + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) + two_points = [left_up_point, right_down_point] + + if shape == 'circle': + draw.ellipse(two_points, fill=color) + elif shape == 'square': + draw.rectangle(two_points, fill=color) + + elif shape == 'triangle': + # Define the points for the triangle + left_up_point = (location_x - current_width // 2, location_y + current_height // 2) # bottom left + right_down_point = (location_x + current_width // 2, location_y + current_height // 2) # bottom right + top_point = (location_x, location_y - current_height // 2) # top point + draw.polygon([top_point, left_up_point, right_down_point], fill=color) + + image = pil2tensor(image) + mask = image[:, :, :, 0] + out.append(mask) + outstack = torch.cat(out, dim=0) + return (outstack, 1.0 - outstack,) + +class CreateVoronoiMask: + + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "createvoronoi" + CATEGORY = "KJNodes/masking/generate" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "frames": ("INT", {"default": 16,"min": 2, "max": 4096, "step": 1}), + "num_points": ("INT", {"default": 15,"min": 1, "max": 4096, "step": 1}), + "line_width": ("INT", {"default": 4,"min": 1, "max": 4096, "step": 1}), + "speed": ("FLOAT", {"default": 0.5,"min": 0.0, "max": 1.0, "step": 0.01}), + "frame_width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "frame_height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + }, + } + + def createvoronoi(self, frames, num_points, line_width, speed, frame_width, frame_height): + from scipy.spatial import Voronoi + # Define the number of images in the batch + batch_size = frames + out = [] + + # Calculate aspect ratio + aspect_ratio = frame_width / frame_height + + # Create start and end points for each point, considering the aspect ratio + start_points = np.random.rand(num_points, 2) + start_points[:, 0] *= aspect_ratio + + end_points = np.random.rand(num_points, 2) + end_points[:, 0] *= aspect_ratio + + for i in range(batch_size): + # Interpolate the points' positions based on the current frame + t = (i * speed) / (batch_size - 1) # normalize to [0, 1] over the frames + t = np.clip(t, 0, 1) # ensure t is in [0, 1] + points = (1 - t) * start_points + t * end_points # lerp + + # Adjust points for aspect ratio + points[:, 0] *= aspect_ratio + + vor = Voronoi(points) + + # Create a blank image with a white background + fig, ax = plt.subplots() + plt.subplots_adjust(left=0, right=1, bottom=0, top=1) + ax.set_xlim([0, aspect_ratio]); ax.set_ylim([0, 1]) # adjust x limits + ax.axis('off') + ax.margins(0, 0) + fig.set_size_inches(aspect_ratio * frame_height/100, frame_height/100) # adjust figure size + ax.fill_between([0, 1], [0, 1], color='white') + + # Plot each Voronoi ridge + for simplex in vor.ridge_vertices: + simplex = np.asarray(simplex) + if np.all(simplex >= 0): + plt.plot(vor.vertices[simplex, 0], vor.vertices[simplex, 1], 'k-', linewidth=line_width) + + fig.canvas.draw() + img = np.array(fig.canvas.renderer._renderer) + + plt.close(fig) + + pil_img = Image.fromarray(img).convert("L") + mask = torch.tensor(np.array(pil_img)) / 255.0 + + out.append(mask) + + return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) + +class GetMaskSizeAndCount: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + }} + + RETURN_TYPES = ("MASK","INT", "INT", "INT",) + RETURN_NAMES = ("mask", "width", "height", "count",) + FUNCTION = "getsize" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Returns the width, height and batch size of the mask, +and passes it through unchanged. + +""" + + def getsize(self, mask): + width = mask.shape[2] + height = mask.shape[1] + count = mask.shape[0] + return {"ui": { + "text": [f"{count}x{width}x{height}"]}, + "result": (mask, width, height, count) + } + +class GrowMaskWithBlur: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "mask": ("MASK",), + "expand": ("INT", {"default": 0, "min": -MAX_RESOLUTION, "max": MAX_RESOLUTION, "step": 1}), + "incremental_expandrate": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 100.0, "step": 0.1}), + "tapered_corners": ("BOOLEAN", {"default": True}), + "flip_input": ("BOOLEAN", {"default": False}), + "blur_radius": ("FLOAT", { + "default": 0.0, + "min": 0.0, + "max": 100, + "step": 0.1 + }), + "lerp_alpha": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + "decay_factor": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}), + }, + "optional": { + "fill_holes": ("BOOLEAN", {"default": False}), + }, + } + + CATEGORY = "KJNodes/masking" + RETURN_TYPES = ("MASK", "MASK",) + RETURN_NAMES = ("mask", "mask_inverted",) + FUNCTION = "expand_mask" + DESCRIPTION = """ +# GrowMaskWithBlur +- mask: Input mask or mask batch +- expand: Expand or contract mask or mask batch by a given amount +- incremental_expandrate: increase expand rate by a given amount per frame +- tapered_corners: use tapered corners +- flip_input: flip input mask +- blur_radius: value higher than 0 will blur the mask +- lerp_alpha: alpha value for interpolation between frames +- decay_factor: decay value for interpolation between frames +- fill_holes: fill holes in the mask (slow)""" + + def expand_mask(self, mask, expand, tapered_corners, flip_input, blur_radius, incremental_expandrate, lerp_alpha, decay_factor, fill_holes=False): + alpha = lerp_alpha + decay = decay_factor + if flip_input: + mask = 1.0 - mask + c = 0 if tapered_corners else 1 + kernel = np.array([[c, 1, c], + [1, 1, 1], + [c, 1, c]]) + growmask = mask.reshape((-1, mask.shape[-2], mask.shape[-1])).cpu() + out = [] + previous_output = None + current_expand = expand + for m in growmask: + output = m.numpy() + for _ in range(abs(round(current_expand))): + if current_expand < 0: + output = scipy.ndimage.grey_erosion(output, footprint=kernel) + else: + output = scipy.ndimage.grey_dilation(output, footprint=kernel) + if current_expand < 0: + current_expand -= abs(incremental_expandrate) + else: + current_expand += abs(incremental_expandrate) + if fill_holes: + binary_mask = output > 0 + output = scipy.ndimage.binary_fill_holes(binary_mask) + output = output.astype(np.float32) * 255 + output = torch.from_numpy(output) + if alpha < 1.0 and previous_output is not None: + # Interpolate between the previous and current frame + output = alpha * output + (1 - alpha) * previous_output + if decay < 1.0 and previous_output is not None: + # Add the decayed previous output to the current frame + output += decay * previous_output + output = output / output.max() + previous_output = output + out.append(output) + + if blur_radius != 0: + # Convert the tensor list to PIL images, apply blur, and convert back + for idx, tensor in enumerate(out): + # Convert tensor to PIL image + pil_image = tensor2pil(tensor.cpu().detach())[0] + # Apply Gaussian blur + pil_image = pil_image.filter(ImageFilter.GaussianBlur(blur_radius)) + # Convert back to tensor + out[idx] = pil2tensor(pil_image) + blurred = torch.cat(out, dim=0) + return (blurred, 1.0 - blurred) + else: + return (torch.stack(out, dim=0), 1.0 - torch.stack(out, dim=0),) + +class MaskBatchMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + }, + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("masks",) + FUNCTION = "combine" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Creates an image batch from multiple masks. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, **kwargs): + mask = kwargs["mask_1"] + for c in range(1, inputcount): + new_mask = kwargs[f"mask_{c + 1}"] + if mask.shape[1:] != new_mask.shape[1:]: + new_mask = F.interpolate(new_mask.unsqueeze(1), size=(mask.shape[1], mask.shape[2]), mode="bicubic").squeeze(1) + mask = torch.cat((mask, new_mask), dim=0) + return (mask,) + +class OffsetMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "x": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "y": ("INT", { "default": 0, "min": -4096, "max": MAX_RESOLUTION, "step": 1, "display": "number" }), + "angle": ("INT", { "default": 0, "min": -360, "max": 360, "step": 1, "display": "number" }), + "duplication_factor": ("INT", { "default": 1, "min": 1, "max": 1000, "step": 1, "display": "number" }), + "roll": ("BOOLEAN", { "default": False }), + "incremental": ("BOOLEAN", { "default": False }), + "padding_mode": ( + [ + 'empty', + 'border', + 'reflection', + + ], { + "default": 'empty' + }), + } + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("mask",) + FUNCTION = "offset" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Offsets the mask by the specified amount. + - mask: Input mask or mask batch + - x: Horizontal offset + - y: Vertical offset + - angle: Angle in degrees + - roll: roll edge wrapping + - duplication_factor: Number of times to duplicate the mask to form a batch + - border padding_mode: Padding mode for the mask +""" + + def offset(self, mask, x, y, angle, roll=False, incremental=False, duplication_factor=1, padding_mode="empty"): + # Create duplicates of the mask batch + mask = mask.repeat(duplication_factor, 1, 1).clone() + + batch_size, height, width = mask.shape + + if angle != 0 and incremental: + for i in range(batch_size): + rotation_angle = angle * (i+1) + mask[i] = TF.rotate(mask[i].unsqueeze(0), rotation_angle).squeeze(0) + elif angle > 0: + for i in range(batch_size): + mask[i] = TF.rotate(mask[i].unsqueeze(0), angle).squeeze(0) + + if roll: + if incremental: + for i in range(batch_size): + shift_x = min(x*(i+1), width-1) + shift_y = min(y*(i+1), height-1) + if shift_x != 0: + mask[i] = torch.roll(mask[i], shifts=shift_x, dims=1) + if shift_y != 0: + mask[i] = torch.roll(mask[i], shifts=shift_y, dims=0) + else: + shift_x = min(x, width-1) + shift_y = min(y, height-1) + if shift_x != 0: + mask = torch.roll(mask, shifts=shift_x, dims=2) + if shift_y != 0: + mask = torch.roll(mask, shifts=shift_y, dims=1) + else: + + for i in range(batch_size): + if incremental: + temp_x = min(x * (i+1), width-1) + temp_y = min(y * (i+1), height-1) + else: + temp_x = min(x, width-1) + temp_y = min(y, height-1) + if temp_x > 0: + if padding_mode == 'empty': + mask[i] = torch.cat([torch.zeros((height, temp_x)), mask[i, :, :-temp_x]], dim=1) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, :, :-temp_x], (0, temp_x), mode=padding_mode) + elif temp_x < 0: + if padding_mode == 'empty': + mask[i] = torch.cat([mask[i, :, :temp_x], torch.zeros((height, -temp_x))], dim=1) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, :, -temp_x:], (temp_x, 0), mode=padding_mode) + + if temp_y > 0: + if padding_mode == 'empty': + mask[i] = torch.cat([torch.zeros((temp_y, width)), mask[i, :-temp_y, :]], dim=0) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, :-temp_y, :], (0, temp_y), mode=padding_mode) + elif temp_y < 0: + if padding_mode == 'empty': + mask[i] = torch.cat([mask[i, :temp_y, :], torch.zeros((-temp_y, width))], dim=0) + elif padding_mode in ['replicate', 'reflect']: + mask[i] = F.pad(mask[i, -temp_y:, :], (temp_y, 0), mode=padding_mode) + + return mask, + +class RoundMask: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "mask": ("MASK",), + }} + + RETURN_TYPES = ("MASK",) + FUNCTION = "round" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Rounds the mask or batch of masks to a binary mask. +RoundMask example + +""" + + def round(self, mask): + mask = mask.round() + return (mask,) + +class ResizeMask: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "width": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, "display": "number" }), + "height": ("INT", { "default": 512, "min": 0, "max": MAX_RESOLUTION, "step": 8, "display": "number" }), + "keep_proportions": ("BOOLEAN", { "default": False }), + } + } + + RETURN_TYPES = ("MASK", "INT", "INT",) + RETURN_NAMES = ("mask", "width", "height",) + FUNCTION = "resize" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Resizes the mask or batch of masks to the specified width and height. +""" + + def resize(self, mask, width, height, keep_proportions): + if keep_proportions: + _, oh, ow, _ = mask.shape + width = ow if width == 0 else width + height = oh if height == 0 else height + ratio = min(width / ow, height / oh) + width = round(ow*ratio) + height = round(oh*ratio) + + outputs = mask.unsqueeze(0) # Add an extra dimension for batch size + outputs = F.interpolate(outputs, size=(height, width), mode="nearest") + outputs = outputs.squeeze(0) # Remove the extra dimension after interpolation + + return(outputs, outputs.shape[2], outputs.shape[1],) + +class RemapMaskRange: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "mask": ("MASK",), + "min": ("FLOAT", {"default": 0.0,"min": -10.0, "max": 1.0, "step": 0.01}), + "max": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 10.0, "step": 0.01}), + } + } + + RETURN_TYPES = ("MASK",) + RETURN_NAMES = ("mask",) + FUNCTION = "remap" + CATEGORY = "KJNodes/masking" + DESCRIPTION = """ +Sets new min and max values for the mask. +""" + + def remap(self, mask, min, max): + + # Find the maximum value in the mask + mask_max = torch.max(mask) + + # If the maximum mask value is zero, avoid division by zero by setting it to 1 + mask_max = mask_max if mask_max > 0 else 1 + + # Scale the mask values to the new range defined by min and max + # The highest pixel value in the mask will be scaled to max + scaled_mask = (mask / mask_max) * (max - min) + min + + # Clamp the values to ensure they are within [0.0, 1.0] + scaled_mask = torch.clamp(scaled_mask, min=0.0, max=1.0) + + return (scaled_mask, ) \ No newline at end of file diff --git a/nodes/nodes.py b/nodes/nodes.py new file mode 100644 index 0000000..5d87e5b --- /dev/null +++ b/nodes/nodes.py @@ -0,0 +1,1632 @@ +import torch +import torch.nn.functional as F +import matplotlib.pyplot as plt +import numpy as np +from PIL import Image + +import json, re, os, io, time + +import model_management +import folder_paths +from nodes import MAX_RESOLUTION +from comfy.utils import common_upscale + +script_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +folder_paths.add_model_folder_path("kjnodes_fonts", os.path.join(script_directory, "fonts")) + +class AnyType(str): + """A special class that is always equal in not equal comparisons. Credit to pythongosssss""" + + def __ne__(self, __value: object) -> bool: + return False +any = AnyType("*") + +class INTConstant: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "value": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}), + }, + } + RETURN_TYPES = ("INT",) + RETURN_NAMES = ("value",) + FUNCTION = "get_value" + CATEGORY = "KJNodes/constants" + + def get_value(self, value): + return (value,) + +class FloatConstant: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "value": ("FLOAT", {"default": 0.0, "min": -0xffffffffffffffff, "max": 0xffffffffffffffff, "step": 0.001}), + }, + } + + RETURN_TYPES = ("FLOAT",) + RETURN_NAMES = ("value",) + FUNCTION = "get_value" + CATEGORY = "KJNodes/constants" + + def get_value(self, value): + return (value,) + +class StringConstant: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string": ("STRING", {"default": '', "multiline": False}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "passtring" + CATEGORY = "KJNodes/constants" + + def passtring(self, string): + return (string, ) + +class StringConstantMultiline: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string": ("STRING", {"default": "", "multiline": True}), + "strip_newlines": ("BOOLEAN", {"default": True}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "stringify" + CATEGORY = "KJNodes/constants" + + def stringify(self, string, strip_newlines): + new_string = [] + for line in io.StringIO(string): + if not line.strip().startswith("\n") and strip_newlines: + line = line.replace("\n", '') + new_string.append(line) + new_string = "\n".join(new_string) + + return (new_string, ) + + + +class ScaleBatchPromptSchedule: + + RETURN_TYPES = ("STRING",) + FUNCTION = "scaleschedule" + CATEGORY = "KJNodes" + DESCRIPTION = """ +Scales a batch schedule from Fizz' nodes BatchPromptSchedule +to a different frame count. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input_str": ("STRING", {"forceInput": True,"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n"}), + "old_frame_count": ("INT", {"forceInput": True,"default": 1,"min": 1, "max": 4096, "step": 1}), + "new_frame_count": ("INT", {"forceInput": True,"default": 1,"min": 1, "max": 4096, "step": 1}), + + }, + } + + def scaleschedule(self, old_frame_count, input_str, new_frame_count): + pattern = r'"(\d+)"\s*:\s*"(.*?)"(?:,|\Z)' + frame_strings = dict(re.findall(pattern, input_str)) + + # Calculate the scaling factor + scaling_factor = (new_frame_count - 1) / (old_frame_count - 1) + + # Initialize a dictionary to store the new frame numbers and strings + new_frame_strings = {} + + # Iterate over the frame numbers and strings + for old_frame, string in frame_strings.items(): + # Calculate the new frame number + new_frame = int(round(int(old_frame) * scaling_factor)) + + # Store the new frame number and corresponding string + new_frame_strings[new_frame] = string + + # Format the output string + output_str = ', '.join([f'"{k}":"{v}"' for k, v in sorted(new_frame_strings.items())]) + return (output_str,) + + +class GetLatentsFromBatchIndexed: + + RETURN_TYPES = ("LATENT",) + FUNCTION = "indexedlatentsfrombatch" + CATEGORY = "KJNodes" + DESCRIPTION = """ +Selects and returns the latents at the specified indices as an latent batch. +""" + + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "latents": ("LATENT",), + "indexes": ("STRING", {"default": "0, 1, 2", "multiline": True}), + }, + } + + def indexedlatentsfrombatch(self, latents, indexes): + + samples = latents.copy() + latent_samples = samples["samples"] + + # Parse the indexes string into a list of integers + index_list = [int(index.strip()) for index in indexes.split(',')] + + # Convert list of indices to a PyTorch tensor + indices_tensor = torch.tensor(index_list, dtype=torch.long) + + # Select the latents at the specified indices + chosen_latents = latent_samples[indices_tensor] + + samples["samples"] = chosen_latents + return (samples,) + + +class ConditioningMultiCombine: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 20, "step": 1}), + "conditioning_1": ("CONDITIONING", ), + "conditioning_2": ("CONDITIONING", ), + }, + } + + RETURN_TYPES = ("CONDITIONING", "INT") + RETURN_NAMES = ("combined", "inputcount") + FUNCTION = "combine" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Combines multiple conditioning nodes into one +""" + + def combine(self, inputcount, **kwargs): + from nodes import ConditioningCombine + cond_combine_node = ConditioningCombine() + cond = kwargs["conditioning_1"] + for c in range(1, inputcount): + new_cond = kwargs[f"conditioning_{c + 1}"] + cond = cond_combine_node.combine(new_cond, cond)[0] + return (cond, inputcount,) + + +class JoinStrings: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "string1": ("STRING", {"default": '', "forceInput": True}), + "string2": ("STRING", {"default": '', "forceInput": True}), + "delimiter": ("STRING", {"default": ' ', "multiline": False}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "joinstring" + CATEGORY = "KJNodes/constants" + + def joinstring(self, string1, string2, delimiter): + joined_string = string1 + delimiter + string2 + return (joined_string, ) + +class JoinStringMulti: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "inputcount": ("INT", {"default": 2, "min": 2, "max": 1000, "step": 1}), + "string_1": ("STRING", {"default": '', "forceInput": True}), + "string_2": ("STRING", {"default": '', "forceInput": True}), + "delimiter": ("STRING", {"default": ' ', "multiline": False}), + "return_list": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("STRING",) + RETURN_NAMES = ("string",) + FUNCTION = "combine" + CATEGORY = "KJNodes" + DESCRIPTION = """ +Creates single string, or a list of strings, from +multiple input strings. +You can set how many inputs the node has, +with the **inputcount** and clicking update. +""" + + def combine(self, inputcount, delimiter, **kwargs): + string = kwargs["string_1"] + return_list = kwargs["return_list"] + strings = [string] # Initialize a list with the first string + for c in range(1, inputcount): + new_string = kwargs[f"string_{c + 1}"] + if return_list: + strings.append(new_string) # Add new string to the list + else: + string = string + delimiter + new_string + if return_list: + return (strings,) # Return the list of strings + else: + return (string,) # Return the combined string + +class CondPassThrough: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "positive": ("CONDITIONING", ), + "negative": ("CONDITIONING", ), + }, + } + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING",) + RETURN_NAMES = ("positive", "negative") + FUNCTION = "passthrough" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ + Simply passes through the positive and negative conditioning, + workaround for Set node not allowing bypassed inputs. +""" + + def passthrough(self, positive, negative): + return (positive, negative,) + +def append_helper(t, mask, c, set_area_to_bounds, strength): + n = [t[0], t[1].copy()] + _, h, w = mask.shape + n[1]['mask'] = mask + n[1]['set_area_to_bounds'] = set_area_to_bounds + n[1]['mask_strength'] = strength + c.append(n) + +class ConditioningSetMaskAndCombine: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, negative_2, mask_1, mask_2, set_cond_area, mask_1_strength, mask_2_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + return (c, c2) + +class ConditioningSetMaskAndCombine3: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "positive_3": ("CONDITIONING", ), + "negative_3": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_3": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, positive_3, negative_2, negative_3, mask_1, mask_2, mask_3, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + if len(mask_3.shape) < 3: + mask_3 = mask_3.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in positive_3: + append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + for t in negative_3: + append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) + return (c, c2) + +class ConditioningSetMaskAndCombine4: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "positive_3": ("CONDITIONING", ), + "negative_3": ("CONDITIONING", ), + "positive_4": ("CONDITIONING", ), + "negative_4": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_3": ("MASK", ), + "mask_4": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_4_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, positive_3, positive_4, negative_2, negative_3, negative_4, mask_1, mask_2, mask_3, mask_4, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength, mask_4_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + if len(mask_3.shape) < 3: + mask_3 = mask_3.unsqueeze(0) + if len(mask_4.shape) < 3: + mask_4 = mask_4.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in positive_3: + append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) + for t in positive_4: + append_helper(t, mask_4, c, set_area_to_bounds, mask_4_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + for t in negative_3: + append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) + for t in negative_4: + append_helper(t, mask_4, c2, set_area_to_bounds, mask_4_strength) + return (c, c2) + +class ConditioningSetMaskAndCombine5: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "positive_1": ("CONDITIONING", ), + "negative_1": ("CONDITIONING", ), + "positive_2": ("CONDITIONING", ), + "negative_2": ("CONDITIONING", ), + "positive_3": ("CONDITIONING", ), + "negative_3": ("CONDITIONING", ), + "positive_4": ("CONDITIONING", ), + "negative_4": ("CONDITIONING", ), + "positive_5": ("CONDITIONING", ), + "negative_5": ("CONDITIONING", ), + "mask_1": ("MASK", ), + "mask_2": ("MASK", ), + "mask_3": ("MASK", ), + "mask_4": ("MASK", ), + "mask_5": ("MASK", ), + "mask_1_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_2_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_3_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_4_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "mask_5_strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}), + "set_cond_area": (["default", "mask bounds"],), + } + } + + RETURN_TYPES = ("CONDITIONING","CONDITIONING",) + RETURN_NAMES = ("combined_positive", "combined_negative",) + FUNCTION = "append" + CATEGORY = "KJNodes/masking/conditioning" + DESCRIPTION = """ +Bundles multiple conditioning mask and combine nodes into one,functionality is identical to ComfyUI native nodes +""" + + def append(self, positive_1, negative_1, positive_2, positive_3, positive_4, positive_5, negative_2, negative_3, negative_4, negative_5, mask_1, mask_2, mask_3, mask_4, mask_5, set_cond_area, mask_1_strength, mask_2_strength, mask_3_strength, mask_4_strength, mask_5_strength): + c = [] + c2 = [] + set_area_to_bounds = False + if set_cond_area != "default": + set_area_to_bounds = True + if len(mask_1.shape) < 3: + mask_1 = mask_1.unsqueeze(0) + if len(mask_2.shape) < 3: + mask_2 = mask_2.unsqueeze(0) + if len(mask_3.shape) < 3: + mask_3 = mask_3.unsqueeze(0) + if len(mask_4.shape) < 3: + mask_4 = mask_4.unsqueeze(0) + if len(mask_5.shape) < 3: + mask_5 = mask_5.unsqueeze(0) + for t in positive_1: + append_helper(t, mask_1, c, set_area_to_bounds, mask_1_strength) + for t in positive_2: + append_helper(t, mask_2, c, set_area_to_bounds, mask_2_strength) + for t in positive_3: + append_helper(t, mask_3, c, set_area_to_bounds, mask_3_strength) + for t in positive_4: + append_helper(t, mask_4, c, set_area_to_bounds, mask_4_strength) + for t in positive_5: + append_helper(t, mask_5, c, set_area_to_bounds, mask_5_strength) + for t in negative_1: + append_helper(t, mask_1, c2, set_area_to_bounds, mask_1_strength) + for t in negative_2: + append_helper(t, mask_2, c2, set_area_to_bounds, mask_2_strength) + for t in negative_3: + append_helper(t, mask_3, c2, set_area_to_bounds, mask_3_strength) + for t in negative_4: + append_helper(t, mask_4, c2, set_area_to_bounds, mask_4_strength) + for t in negative_5: + append_helper(t, mask_5, c2, set_area_to_bounds, mask_5_strength) + return (c, c2) + +class VRAM_Debug: + + @classmethod + + def INPUT_TYPES(s): + return { + "required": { + + "empty_cache": ("BOOLEAN", {"default": True}), + "gc_collect": ("BOOLEAN", {"default": True}), + "unload_all_models": ("BOOLEAN", {"default": False}), + }, + "optional": { + "any_input": (any, {}), + "image_pass": ("IMAGE",), + "model_pass": ("MODEL",), + } + } + + RETURN_TYPES = (any, "IMAGE","MODEL","INT", "INT",) + RETURN_NAMES = ("any_output", "image_pass", "model_pass", "freemem_before", "freemem_after") + FUNCTION = "VRAMdebug" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Returns the inputs unchanged, they are only used as triggers, +and performs comfy model management functions and garbage collection, +reports free VRAM before and after the operations. +""" + + def VRAMdebug(self, gc_collect, empty_cache, unload_all_models, image_pass=None, model_pass=None, any_input=None): + freemem_before = model_management.get_free_memory() + print("VRAMdebug: free memory before: ", f"{freemem_before:,.0f}") + if empty_cache: + model_management.soft_empty_cache() + if unload_all_models: + model_management.unload_all_models() + if gc_collect: + import gc + gc.collect() + freemem_after = model_management.get_free_memory() + print("VRAMdebug: free memory after: ", f"{freemem_after:,.0f}") + print("VRAMdebug: freed memory: ", f"{freemem_after - freemem_before:,.0f}") + return {"ui": { + "text": [f"{freemem_before:,.0f}x{freemem_after:,.0f}"]}, + "result": (any_input, image_pass, model_pass, freemem_before, freemem_after) + } + +class SomethingToString: + @classmethod + + def INPUT_TYPES(s): + return { + "required": { + "input": (any, {}), + }, + "optional": { + "prefix": ("STRING", {"default": ""}), + "suffix": ("STRING", {"default": ""}), + } + } + RETURN_TYPES = ("STRING",) + FUNCTION = "stringify" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Converts any type to a string. +""" + + def stringify(self, input, prefix="", suffix=""): + if isinstance(input, (int, float, bool)): + stringified = str(input) + elif isinstance(input, list): + stringified = ', '.join(str(item) for item in input) + else: + return + if prefix: # Check if prefix is not empty + stringified = prefix + stringified # Add the prefix + if suffix: # Check if suffix is not empty + stringified = stringified + suffix # Add the suffix + + return (stringified,) + +class Sleep: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "input": (any, {}), + "minutes": ("INT", {"default": 0, "min": 0, "max": 1439}), + "seconds": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 59.99, "step": 0.01}), + }, + } + RETURN_TYPES = (any,) + FUNCTION = "sleepdelay" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Delays the execution for the input amount of time. +""" + + def sleepdelay(self, input, minutes, seconds): + total_seconds = minutes * 60 + seconds + time.sleep(total_seconds) + return input, + +class EmptyLatentImagePresets: + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "dimensions": ( + [ '512 x 512', + '768 x 512', + '960 x 512', + '1024 x 512', + '1536 x 640', + '1344 x 768', + '1216 x 832', + '1152 x 896', + '1024 x 1024', + ], + { + "default": '512 x 512' + }), + + "invert": ("BOOLEAN", {"default": False}), + "batch_size": ("INT", { + "default": 1, + "min": 1, + "max": 4096 + }), + }, + } + + RETURN_TYPES = ("LATENT", "INT", "INT") + RETURN_NAMES = ("Latent", "Width", "Height") + FUNCTION = "generate" + CATEGORY = "KJNodes" + + def generate(self, dimensions, invert, batch_size): + from nodes import EmptyLatentImage + result = [x.strip() for x in dimensions.split('x')] + + if invert: + width = int(result[1].split(' ')[0]) + height = int(result[0]) + else: + width = int(result[0]) + height = int(result[1].split(' ')[0]) + latent = EmptyLatentImage().generate(width, height, batch_size)[0] + + return (latent, int(width), int(height),) + + + +class WidgetToString: + @classmethod + def IS_CHANGED(cls, **kwargs): + return float("NaN") + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "id": ("INT", {"default": 0}), + "widget_name": ("STRING", {"multiline": False}), + "return_all": ("BOOLEAN", {"default": False}), + }, + + "hidden": {"extra_pnginfo": "EXTRA_PNGINFO", + "prompt": "PROMPT"}, + } + + RETURN_TYPES = ("STRING", ) + FUNCTION = "get_widget_value" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +Selects a node and it's specified widget and outputs the value as a string. +To see node id's, enable node id display from Manager badge menu. +""" + + def get_widget_value(self, id, widget_name, extra_pnginfo, prompt, return_all=False): + workflow = extra_pnginfo["workflow"] + results = [] + for node in workflow["nodes"]: + node_id = node["id"] + + if node_id != id: + continue + + values = prompt[str(node_id)] + if "inputs" in values: + if return_all: + results.append(', '.join(f'{k}: {str(v)}' for k, v in values["inputs"].items())) + elif widget_name in values["inputs"]: + v = str(values["inputs"][widget_name]) # Convert to string here + return (v, ) + else: + raise NameError(f"Widget not found: {id}.{widget_name}") + if not results: + raise NameError(f"Node not found: {id}") + return (', '.join(results).strip(', '), ) + +class DummyLatentOut: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "latent": ("LATENT",), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "dummy" + CATEGORY = "KJNodes/misc" + OUTPUT_NODE = True + DESCRIPTION = """ +Does nothing, used to trigger generic workflow output. +A way to get previews in the UI without saving anything to disk. +""" + + def dummy(self, latent): + return (latent,) + +class FlipSigmasAdjusted: + @classmethod + def INPUT_TYPES(s): + return {"required": + {"sigmas": ("SIGMAS", ), + "divide_by_last_sigma": ("BOOLEAN", {"default": False}), + "divide_by": ("FLOAT", {"default": 1,"min": 1, "max": 255, "step": 0.01}), + "offset_by": ("INT", {"default": 1,"min": -100, "max": 100, "step": 1}), + } + } + RETURN_TYPES = ("SIGMAS", "STRING",) + RETURN_NAMES = ("SIGMAS", "sigmas_string",) + CATEGORY = "KJNodes/noise" + FUNCTION = "get_sigmas_adjusted" + + def get_sigmas_adjusted(self, sigmas, divide_by_last_sigma, divide_by, offset_by): + + sigmas = sigmas.flip(0) + if sigmas[0] == 0: + sigmas[0] = 0.0001 + adjusted_sigmas = sigmas.clone() + #offset sigma + for i in range(1, len(sigmas)): + offset_index = i - offset_by + if 0 <= offset_index < len(sigmas): + adjusted_sigmas[i] = sigmas[offset_index] + else: + adjusted_sigmas[i] = 0.0001 + if adjusted_sigmas[0] == 0: + adjusted_sigmas[0] = 0.0001 + if divide_by_last_sigma: + adjusted_sigmas = adjusted_sigmas / adjusted_sigmas[-1] + + sigma_np_array = adjusted_sigmas.numpy() + array_string = np.array2string(sigma_np_array, precision=2, separator=', ', threshold=np.inf) + adjusted_sigmas = adjusted_sigmas / divide_by + return (adjusted_sigmas, array_string,) + +class CustomSigmas: + @classmethod + def INPUT_TYPES(s): + return {"required": + { + "sigmas_string" :("STRING", {"default": "14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029","multiline": True}), + "interpolate_to_steps": ("INT", {"default": 10,"min": 0, "max": 255, "step": 1}), + } + } + RETURN_TYPES = ("SIGMAS",) + RETURN_NAMES = ("SIGMAS",) + CATEGORY = "KJNodes/noise" + FUNCTION = "customsigmas" + DESCRIPTION = """ +Creates a sigmas tensor from a string of comma separated values. +Examples: + +Nvidia's optimized AYS 10 step schedule for SD 1.5: +14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029 +SDXL: +14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029 +SVD: +700.00, 54.5, 15.886, 7.977, 4.248, 1.789, 0.981, 0.403, 0.173, 0.034, 0.002 +""" + def customsigmas(self, sigmas_string, interpolate_to_steps): + sigmas_list = sigmas_string.split(', ') + sigmas_float_list = [float(sigma) for sigma in sigmas_list] + sigmas_tensor = torch.tensor(sigmas_float_list) + if len(sigmas_tensor) != interpolate_to_steps: + sigmas_tensor = self.loglinear_interp(sigmas_tensor, interpolate_to_steps) + return (sigmas_tensor,) + + def loglinear_interp(self, t_steps, num_steps): + """ + Performs log-linear interpolation of a given array of decreasing numbers. + """ + t_steps_np = t_steps.numpy() + + xs = np.linspace(0, 1, len(t_steps_np)) + ys = np.log(t_steps_np[::-1]) + + new_xs = np.linspace(0, 1, num_steps) + new_ys = np.interp(new_xs, xs, ys) + + interped_ys = np.exp(new_ys)[::-1].copy() + interped_ys_tensor = torch.tensor(interped_ys) + return interped_ys_tensor + + +class InjectNoiseToLatent: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "latents":("LATENT",), + "strength": ("FLOAT", {"default": 0.1, "min": 0.0, "max": 200.0, "step": 0.0001}), + "noise": ("LATENT",), + "normalize": ("BOOLEAN", {"default": False}), + "average": ("BOOLEAN", {"default": False}), + }, + "optional":{ + "mask": ("MASK", ), + "mix_randn_amount": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1000.0, "step": 0.001}), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "injectnoise" + CATEGORY = "KJNodes/noise" + + def injectnoise(self, latents, strength, noise, normalize, average, mix_randn_amount=0, seed=None, mask=None): + samples = latents.copy() + if latents["samples"].shape != noise["samples"].shape: + raise ValueError("InjectNoiseToLatent: Latent and noise must have the same shape") + if average: + noised = (samples["samples"].clone() + noise["samples"].clone()) / 2 + else: + noised = samples["samples"].clone() + noise["samples"].clone() * strength + if normalize: + noised = noised / noised.std() + if mask is not None: + mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(noised.shape[2], noised.shape[3]), mode="bilinear") + mask = mask.expand((-1,noised.shape[1],-1,-1)) + if mask.shape[0] < noised.shape[0]: + mask = mask.repeat((noised.shape[0] -1) // mask.shape[0] + 1, 1, 1, 1)[:noised.shape[0]] + noised = mask * noised + (1-mask) * latents["samples"] + if mix_randn_amount > 0: + if seed is not None: + torch.manual_seed(seed) + rand_noise = torch.randn_like(noised) + noised = ((1 - mix_randn_amount) * noised + mix_randn_amount * + rand_noise) / ((mix_randn_amount**2 + (1-mix_randn_amount)**2) ** 0.5) + samples["samples"] = noised + return (samples,) + +class SoundReactive: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "sound_level": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 99999, "step": 0.01}), + "start_range_hz": ("INT", {"default": 150, "min": 0, "max": 9999, "step": 1}), + "end_range_hz": ("INT", {"default": 2000, "min": 0, "max": 9999, "step": 1}), + "multiplier": ("FLOAT", {"default": 1.0, "min": 0.01, "max": 99999, "step": 0.01}), + "smoothing_factor": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "normalize": ("BOOLEAN", {"default": False}), + }, + } + + RETURN_TYPES = ("FLOAT","INT",) + RETURN_NAMES =("sound_level", "sound_level_int",) + FUNCTION = "react" + CATEGORY = "KJNodes/audio" + DESCRIPTION = """ +Reacts to the sound level of the input. +Uses your browsers sound input options and requires. +Meant to be used with realtime diffusion with autoqueue. +""" + + def react(self, sound_level, start_range_hz, end_range_hz, smoothing_factor, multiplier, normalize): + + sound_level *= multiplier + + if normalize: + sound_level /= 255 + + sound_level_int = int(sound_level) + return (sound_level, sound_level_int, ) + +class GenerateNoise: + @classmethod + def INPUT_TYPES(s): + return {"required": { + "width": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "height": ("INT", {"default": 512,"min": 16, "max": 4096, "step": 1}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "seed": ("INT", {"default": 123,"min": 0, "max": 0xffffffffffffffff, "step": 1}), + "multiplier": ("FLOAT", {"default": 1.0,"min": 0.0, "max": 4096, "step": 0.01}), + "constant_batch_noise": ("BOOLEAN", {"default": False}), + "normalize": ("BOOLEAN", {"default": False}), + }, + "optional": { + "model": ("MODEL", ), + "sigmas": ("SIGMAS", ), + } + } + + RETURN_TYPES = ("LATENT",) + FUNCTION = "generatenoise" + CATEGORY = "KJNodes/noise" + DESCRIPTION = """ +Generates noise for injection or to be used as empty latents on samplers with add_noise off. +""" + + def generatenoise(self, batch_size, width, height, seed, multiplier, constant_batch_noise, normalize, sigmas=None, model=None): + + generator = torch.manual_seed(seed) + noise = torch.randn([batch_size, 4, height // 8, width // 8], dtype=torch.float32, layout=torch.strided, generator=generator, device="cpu") + if sigmas is not None: + sigma = sigmas[0] - sigmas[-1] + sigma /= model.model.latent_format.scale_factor + noise *= sigma + + noise *=multiplier + + if normalize: + noise = noise / noise.std() + if constant_batch_noise: + noise = noise[0].repeat(batch_size, 1, 1, 1) + return ({"samples":noise}, ) + +def camera_embeddings(elevation, azimuth): + elevation = torch.as_tensor([elevation]) + azimuth = torch.as_tensor([azimuth]) + embeddings = torch.stack( + [ + torch.deg2rad( + (90 - elevation) - (90) + ), # Zero123 polar is 90-elevation + torch.sin(torch.deg2rad(azimuth)), + torch.cos(torch.deg2rad(azimuth)), + torch.deg2rad( + 90 - torch.full_like(elevation, 0) + ), + ], dim=-1).unsqueeze(1) + + return embeddings + +def interpolate_angle(start, end, fraction): + # Calculate the difference in angles and adjust for wraparound if necessary + diff = (end - start + 540) % 360 - 180 + # Apply fraction to the difference + interpolated = start + fraction * diff + # Normalize the result to be within the range of -180 to 180 + return (interpolated + 180) % 360 - 180 + + +class StableZero123_BatchSchedule: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_vision": ("CLIP_VISION",), + "init_image": ("IMAGE",), + "vae": ("VAE",), + "width": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 256, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 1, "min": 1, "max": 4096}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), + "azimuth_points_string": ("STRING", {"default": "0:(0.0),\n7:(1.0),\n15:(0.0)\n", "multiline": True}), + "elevation_points_string": ("STRING", {"default": "0:(0.0),\n7:(0.0),\n15:(0.0)\n", "multiline": True}), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + CATEGORY = "KJNodes/experimental" + + def encode(self, clip_vision, init_image, vae, width, height, batch_size, azimuth_points_string, elevation_points_string, interpolation): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + t = vae.encode(encode_pixels) + + def ease_in(t): + return t * t + def ease_out(t): + return 1 - (1 - t) * (1 - t) + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + # Parse the azimuth input string into a list of tuples + azimuth_points = [] + azimuth_points_string = azimuth_points_string.rstrip(',\n') + for point_str in azimuth_points_string.split(','): + frame_str, azimuth_str = point_str.split(':') + frame = int(frame_str.strip()) + azimuth = float(azimuth_str.strip()[1:-1]) + azimuth_points.append((frame, azimuth)) + # Sort the points by frame number + azimuth_points.sort(key=lambda x: x[0]) + + # Parse the elevation input string into a list of tuples + elevation_points = [] + elevation_points_string = elevation_points_string.rstrip(',\n') + for point_str in elevation_points_string.split(','): + frame_str, elevation_str = point_str.split(':') + frame = int(frame_str.strip()) + elevation_val = float(elevation_str.strip()[1:-1]) + elevation_points.append((frame, elevation_val)) + # Sort the points by frame number + elevation_points.sort(key=lambda x: x[0]) + + # Index of the next point to interpolate towards + next_point = 1 + next_elevation_point = 1 + + positive_cond_out = [] + positive_pooled_out = [] + negative_cond_out = [] + negative_pooled_out = [] + + #azimuth interpolation + for i in range(batch_size): + # Find the interpolated azimuth for the current frame + while next_point < len(azimuth_points) and i >= azimuth_points[next_point][0]: + next_point += 1 + # If next_point is equal to the length of points, we've gone past the last point + if next_point == len(azimuth_points): + next_point -= 1 # Set next_point to the last index of points + prev_point = max(next_point - 1, 0) # Ensure prev_point is not less than 0 + + # Calculate fraction + if azimuth_points[next_point][0] != azimuth_points[prev_point][0]: # Prevent division by zero + fraction = (i - azimuth_points[prev_point][0]) / (azimuth_points[next_point][0] - azimuth_points[prev_point][0]) + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + # Use the new interpolate_angle function + interpolated_azimuth = interpolate_angle(azimuth_points[prev_point][1], azimuth_points[next_point][1], fraction) + else: + interpolated_azimuth = azimuth_points[prev_point][1] + # Interpolate the elevation + next_elevation_point = 1 + while next_elevation_point < len(elevation_points) and i >= elevation_points[next_elevation_point][0]: + next_elevation_point += 1 + if next_elevation_point == len(elevation_points): + next_elevation_point -= 1 + prev_elevation_point = max(next_elevation_point - 1, 0) + + if elevation_points[next_elevation_point][0] != elevation_points[prev_elevation_point][0]: + fraction = (i - elevation_points[prev_elevation_point][0]) / (elevation_points[next_elevation_point][0] - elevation_points[prev_elevation_point][0]) + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + interpolated_elevation = interpolate_angle(elevation_points[prev_elevation_point][1], elevation_points[next_elevation_point][1], fraction) + else: + interpolated_elevation = elevation_points[prev_elevation_point][1] + + cam_embeds = camera_embeddings(interpolated_elevation, interpolated_azimuth) + cond = torch.cat([pooled, cam_embeds.repeat((pooled.shape[0], 1, 1))], dim=-1) + + positive_pooled_out.append(t) + positive_cond_out.append(cond) + negative_pooled_out.append(torch.zeros_like(t)) + negative_cond_out.append(torch.zeros_like(pooled)) + + # Concatenate the conditions and pooled outputs + final_positive_cond = torch.cat(positive_cond_out, dim=0) + final_positive_pooled = torch.cat(positive_pooled_out, dim=0) + final_negative_cond = torch.cat(negative_cond_out, dim=0) + final_negative_pooled = torch.cat(negative_pooled_out, dim=0) + + # Structure the final output + final_positive = [[final_positive_cond, {"concat_latent_image": final_positive_pooled}]] + final_negative = [[final_negative_cond, {"concat_latent_image": final_negative_pooled}]] + + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + return (final_positive, final_negative, {"samples": latent}) + +def linear_interpolate(start, end, fraction): + return start + (end - start) * fraction + +class SV3D_BatchSchedule: + @classmethod + def INPUT_TYPES(s): + return {"required": { "clip_vision": ("CLIP_VISION",), + "init_image": ("IMAGE",), + "vae": ("VAE",), + "width": ("INT", {"default": 576, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "height": ("INT", {"default": 576, "min": 16, "max": MAX_RESOLUTION, "step": 8}), + "batch_size": ("INT", {"default": 21, "min": 1, "max": 4096}), + "interpolation": (["linear", "ease_in", "ease_out", "ease_in_out"],), + "azimuth_points_string": ("STRING", {"default": "0:(0.0),\n9:(180.0),\n20:(360.0)\n", "multiline": True}), + "elevation_points_string": ("STRING", {"default": "0:(0.0),\n9:(0.0),\n20:(0.0)\n", "multiline": True}), + }} + + RETURN_TYPES = ("CONDITIONING", "CONDITIONING", "LATENT") + RETURN_NAMES = ("positive", "negative", "latent") + FUNCTION = "encode" + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +Allow scheduling of the azimuth and elevation conditions for SV3D. +Note that SV3D is still a video model and the schedule needs to always go forward +https://huggingface.co/stabilityai/sv3d +""" + + def encode(self, clip_vision, init_image, vae, width, height, batch_size, azimuth_points_string, elevation_points_string, interpolation): + output = clip_vision.encode_image(init_image) + pooled = output.image_embeds.unsqueeze(0) + pixels = common_upscale(init_image.movedim(-1,1), width, height, "bilinear", "center").movedim(1,-1) + encode_pixels = pixels[:,:,:,:3] + t = vae.encode(encode_pixels) + + def ease_in(t): + return t * t + def ease_out(t): + return 1 - (1 - t) * (1 - t) + def ease_in_out(t): + return 3 * t * t - 2 * t * t * t + + # Parse the azimuth input string into a list of tuples + azimuth_points = [] + azimuth_points_string = azimuth_points_string.rstrip(',\n') + for point_str in azimuth_points_string.split(','): + frame_str, azimuth_str = point_str.split(':') + frame = int(frame_str.strip()) + azimuth = float(azimuth_str.strip()[1:-1]) + azimuth_points.append((frame, azimuth)) + # Sort the points by frame number + azimuth_points.sort(key=lambda x: x[0]) + + # Parse the elevation input string into a list of tuples + elevation_points = [] + elevation_points_string = elevation_points_string.rstrip(',\n') + for point_str in elevation_points_string.split(','): + frame_str, elevation_str = point_str.split(':') + frame = int(frame_str.strip()) + elevation_val = float(elevation_str.strip()[1:-1]) + elevation_points.append((frame, elevation_val)) + # Sort the points by frame number + elevation_points.sort(key=lambda x: x[0]) + + # Index of the next point to interpolate towards + next_point = 1 + next_elevation_point = 1 + elevations = [] + azimuths = [] + # For azimuth interpolation + for i in range(batch_size): + # Find the interpolated azimuth for the current frame + while next_point < len(azimuth_points) and i >= azimuth_points[next_point][0]: + next_point += 1 + if next_point == len(azimuth_points): + next_point -= 1 + prev_point = max(next_point - 1, 0) + + if azimuth_points[next_point][0] != azimuth_points[prev_point][0]: + fraction = (i - azimuth_points[prev_point][0]) / (azimuth_points[next_point][0] - azimuth_points[prev_point][0]) + # Apply the ease function to the fraction + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + interpolated_azimuth = linear_interpolate(azimuth_points[prev_point][1], azimuth_points[next_point][1], fraction) + else: + interpolated_azimuth = azimuth_points[prev_point][1] + + # Interpolate the elevation + next_elevation_point = 1 + while next_elevation_point < len(elevation_points) and i >= elevation_points[next_elevation_point][0]: + next_elevation_point += 1 + if next_elevation_point == len(elevation_points): + next_elevation_point -= 1 + prev_elevation_point = max(next_elevation_point - 1, 0) + + if elevation_points[next_elevation_point][0] != elevation_points[prev_elevation_point][0]: + fraction = (i - elevation_points[prev_elevation_point][0]) / (elevation_points[next_elevation_point][0] - elevation_points[prev_elevation_point][0]) + # Apply the ease function to the fraction + if interpolation == "ease_in": + fraction = ease_in(fraction) + elif interpolation == "ease_out": + fraction = ease_out(fraction) + elif interpolation == "ease_in_out": + fraction = ease_in_out(fraction) + + interpolated_elevation = linear_interpolate(elevation_points[prev_elevation_point][1], elevation_points[next_elevation_point][1], fraction) + else: + interpolated_elevation = elevation_points[prev_elevation_point][1] + + azimuths.append(interpolated_azimuth) + elevations.append(interpolated_elevation) + + #print("azimuths", azimuths) + #print("elevations", elevations) + + # Structure the final output + final_positive = [[pooled, {"concat_latent_image": t, "elevation": elevations, "azimuth": azimuths}]] + final_negative = [[torch.zeros_like(pooled), {"concat_latent_image": torch.zeros_like(t),"elevation": elevations, "azimuth": azimuths}]] + + latent = torch.zeros([batch_size, 4, height // 8, width // 8]) + return (final_positive, final_negative, {"samples": latent}) + +class LoadResAdapterNormalization: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "model": ("MODEL",), + "resadapter_path": (folder_paths.get_filename_list("checkpoints"), ) + } + } + + RETURN_TYPES = ("MODEL",) + FUNCTION = "load_res_adapter" + CATEGORY = "KJNodes/experimental" + + def load_res_adapter(self, model, resadapter_path): + print("ResAdapter: Checking ResAdapter path") + resadapter_full_path = folder_paths.get_full_path("checkpoints", resadapter_path) + if not os.path.exists(resadapter_full_path): + raise Exception("Invalid model path") + else: + print("ResAdapter: Loading ResAdapter normalization weights") + from comfy.utils import load_torch_file + prefix_to_remove = 'diffusion_model.' + model_clone = model.clone() + norm_state_dict = load_torch_file(resadapter_full_path) + new_values = {key[len(prefix_to_remove):]: value for key, value in norm_state_dict.items() if key.startswith(prefix_to_remove)} + print("ResAdapter: Attempting to add patches with ResAdapter weights") + try: + for key in model.model.diffusion_model.state_dict().keys(): + if key in new_values: + original_tensor = model.model.diffusion_model.state_dict()[key] + new_tensor = new_values[key].to(model.model.diffusion_model.dtype) + if original_tensor.shape == new_tensor.shape: + model_clone.add_object_patch(f"diffusion_model.{key}.data", new_tensor) + else: + print("ResAdapter: No match for key: ",key) + except: + raise Exception("Could not patch model, this way of patching was added to ComfyUI on March 3rd 2024, is your ComfyUI up to date?") + print("ResAdapter: Added resnet normalization patches") + return (model_clone, ) + +class Superprompt: + @classmethod + def INPUT_TYPES(s): + return { + "required": { + "instruction_prompt": ("STRING", {"default": 'Expand the following prompt to add more detail', "multiline": True}), + "prompt": ("STRING", {"default": '', "multiline": True, "forceInput": True}), + "max_new_tokens": ("INT", {"default": 128, "min": 1, "max": 4096, "step": 1}), + } + } + + RETURN_TYPES = ("STRING",) + FUNCTION = "process" + CATEGORY = "KJNodes/text" + DESCRIPTION = """ +# SuperPrompt +A T5 model fine-tuned on the SuperPrompt dataset for +upsampling text prompts to more detailed descriptions. +Meant to be used as a pre-generation step for text-to-image +models that benefit from more detailed prompts. +https://huggingface.co/roborovski/superprompt-v1 +""" + + def process(self, instruction_prompt, prompt, max_new_tokens): + device = model_management.get_torch_device() + from transformers import T5Tokenizer, T5ForConditionalGeneration + + checkpoint_path = os.path.join(script_directory, "models","superprompt-v1") + tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small", legacy=False) + + model = T5ForConditionalGeneration.from_pretrained(checkpoint_path, device_map=device) + model.to(device) + input_text = instruction_prompt + ": " + prompt + + input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(device) + outputs = model.generate(input_ids, max_new_tokens=max_new_tokens) + out = (tokenizer.decode(outputs[0])) + out = out.replace('', '') + out = out.replace('', '') + + return (out, ) + + +class CameraPoseVisualizer: + + @classmethod + def INPUT_TYPES(s): + return {"required": { + "pose_file_path": ("STRING", {"default": '', "multiline": False}), + "base_xval": ("FLOAT", {"default": 0.2,"min": 0, "max": 100, "step": 0.01}), + "zval": ("FLOAT", {"default": 0.3,"min": 0, "max": 100, "step": 0.01}), + "scale": ("FLOAT", {"default": 1.0,"min": 0.01, "max": 10.0, "step": 0.01}), + "use_exact_fx": ("BOOLEAN", {"default": False}), + "relative_c2w": ("BOOLEAN", {"default": True}), + "use_viewer": ("BOOLEAN", {"default": False}), + }, + "optional": { + "cameractrl_poses": ("CAMERACTRL_POSES", {"default": None}), + } + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "plot" + CATEGORY = "KJNodes/misc" + DESCRIPTION = """ +Visualizes the camera poses, from Animatediff-Evolved CameraCtrl Pose +or a .txt file with RealEstate camera intrinsics and coordinates, in a 3D plot. +""" + + def plot(self, pose_file_path, scale, base_xval, zval, use_exact_fx, relative_c2w, use_viewer, cameractrl_poses=None): + import matplotlib as mpl + import matplotlib.pyplot as plt + from torchvision.transforms import ToTensor + + x_min = -2.0 * scale + x_max = 2.0 * scale + y_min = -2.0 * scale + y_max = 2.0 * scale + z_min = -2.0 * scale + z_max = 2.0 * scale + plt.rcParams['text.color'] = '#999999' + self.fig = plt.figure(figsize=(18, 7)) + self.fig.patch.set_facecolor('#353535') + self.ax = self.fig.add_subplot(projection='3d') + self.ax.set_facecolor('#353535') # Set the background color here + self.ax.grid(color='#999999', linestyle='-', linewidth=0.5) + self.plotly_data = None # plotly data traces + self.ax.set_aspect("auto") + self.ax.set_xlim(x_min, x_max) + self.ax.set_ylim(y_min, y_max) + self.ax.set_zlim(z_min, z_max) + self.ax.set_xlabel('x', color='#999999') + self.ax.set_ylabel('y', color='#999999') + self.ax.set_zlabel('z', color='#999999') + for text in self.ax.get_xticklabels() + self.ax.get_yticklabels() + self.ax.get_zticklabels(): + text.set_color('#999999') + print('initialize camera pose visualizer') + + if pose_file_path != "": + with open(pose_file_path, 'r') as f: + poses = f.readlines() + w2cs = [np.asarray([float(p) for p in pose.strip().split(' ')[7:]]).reshape(3, 4) for pose in poses[1:]] + fxs = [float(pose.strip().split(' ')[1]) for pose in poses[1:]] + #print(poses) + elif cameractrl_poses is not None: + poses = cameractrl_poses + w2cs = [np.array(pose[7:]).reshape(3, 4) for pose in cameractrl_poses] + fxs = [pose[1] for pose in cameractrl_poses] + else: + raise ValueError("Please provide either pose_file_path or cameractrl_poses") + + total_frames = len(w2cs) + transform_matrix = np.asarray([[1, 0, 0, 0], [0, 0, 1, 0], [0, -1, 0, 0], [0, 0, 0, 1]]).reshape(4, 4) + last_row = np.zeros((1, 4)) + last_row[0, -1] = 1.0 + + w2cs = [np.concatenate((w2c, last_row), axis=0) for w2c in w2cs] + c2ws = self.get_c2w(w2cs, transform_matrix, relative_c2w) + + for frame_idx, c2w in enumerate(c2ws): + self.extrinsic2pyramid(c2w, frame_idx / total_frames, hw_ratio=1/1, base_xval=base_xval, + zval=(fxs[frame_idx] if use_exact_fx else zval)) + + # Create the colorbar + cmap = mpl.cm.rainbow + norm = mpl.colors.Normalize(vmin=0, vmax=total_frames) + colorbar = self.fig.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap), ax=self.ax, orientation='vertical') + + # Change the colorbar label + colorbar.set_label('Frame', color='#999999') # Change the label and its color + + # Change the tick colors + colorbar.ax.yaxis.set_tick_params(colors='#999999') # Change the tick color + + # Change the tick frequency + # Assuming you want to set the ticks at every 10th frame + ticks = np.arange(0, total_frames, 10) + colorbar.ax.yaxis.set_ticks(ticks) + + plt.title('') + plt.draw() + buf = io.BytesIO() + plt.savefig(buf, format='png', bbox_inches='tight', pad_inches=0) + buf.seek(0) + img = Image.open(buf) + tensor_img = ToTensor()(img) + buf.close() + tensor_img = tensor_img.permute(1, 2, 0).unsqueeze(0) + if use_viewer: + time.sleep(1) + plt.show() + return (tensor_img,) + + def extrinsic2pyramid(self, extrinsic, color_map='red', hw_ratio=1/1, base_xval=1, zval=3): + from mpl_toolkits.mplot3d.art3d import Poly3DCollection + vertex_std = np.array([[0, 0, 0, 1], + [base_xval, -base_xval * hw_ratio, zval, 1], + [base_xval, base_xval * hw_ratio, zval, 1], + [-base_xval, base_xval * hw_ratio, zval, 1], + [-base_xval, -base_xval * hw_ratio, zval, 1]]) + vertex_transformed = vertex_std @ extrinsic.T + meshes = [[vertex_transformed[0, :-1], vertex_transformed[1][:-1], vertex_transformed[2, :-1]], + [vertex_transformed[0, :-1], vertex_transformed[2, :-1], vertex_transformed[3, :-1]], + [vertex_transformed[0, :-1], vertex_transformed[3, :-1], vertex_transformed[4, :-1]], + [vertex_transformed[0, :-1], vertex_transformed[4, :-1], vertex_transformed[1, :-1]], + [vertex_transformed[1, :-1], vertex_transformed[2, :-1], vertex_transformed[3, :-1], vertex_transformed[4, :-1]]] + + color = color_map if isinstance(color_map, str) else plt.cm.rainbow(color_map) + + self.ax.add_collection3d( + Poly3DCollection(meshes, facecolors=color, linewidths=0.3, edgecolors=color, alpha=0.25)) + + def customize_legend(self, list_label): + from matplotlib.patches import Patch + list_handle = [] + for idx, label in enumerate(list_label): + color = plt.cm.rainbow(idx / len(list_label)) + patch = Patch(color=color, label=label) + list_handle.append(patch) + plt.legend(loc='right', bbox_to_anchor=(1.8, 0.5), handles=list_handle) + + def get_c2w(self, w2cs, transform_matrix, relative_c2w): + if relative_c2w: + target_cam_c2w = np.array([ + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0], + [0, 0, 0, 1] + ]) + abs2rel = target_cam_c2w @ w2cs[0] + ret_poses = [target_cam_c2w, ] + [abs2rel @ np.linalg.inv(w2c) for w2c in w2cs[1:]] + else: + ret_poses = [np.linalg.inv(w2c) for w2c in w2cs] + ret_poses = [transform_matrix @ x for x in ret_poses] + return np.array(ret_poses, dtype=np.float32) + + + +class StabilityAPI_SD3: + + @classmethod + def INPUT_TYPES(cls): + return { + "required": { + "prompt": ("STRING", {"multiline": True}), + "n_prompt": ("STRING", {"multiline": True}), + "seed": ("INT", {"default": 123,"min": 0, "max": 4294967294, "step": 1}), + "model": ( + [ + 'sd3', + 'sd3-turbo', + ], + { + "default": 'sd3' + }), + "aspect_ratio": ( + [ + '1:1', + '16:9', + '21:9', + '2:3', + '3:2', + '4:5', + '5:4', + '9:16', + '9:21', + ], + { + "default": '1:1' + }), + "output_format": ( + [ + 'png', + 'jpeg', + ], + { + "default": 'jpeg' + }), + }, + "optional": { + "api_key": ("STRING", {"multiline": True}), + "image": ("IMAGE",), + "img2img_strength": ("FLOAT", {"default": 0.5, "min": 0.0, "max": 1.0, "step": 0.01}), + "disable_metadata": ("BOOLEAN", {"default": True}), + }, + } + + RETURN_TYPES = ("IMAGE",) + FUNCTION = "apicall" + + CATEGORY = "KJNodes/experimental" + DESCRIPTION = """ +## Calls StabilityAI API + +Although you may have multiple keys in your account, +you should use the same key for all requests to this API. + +Get your API key here: https://platform.stability.ai/account/keys +Recommended to set the key in the config.json -file under this +node packs folder. +# WARNING: +Otherwise the API key may get saved in the image metadata even +with "disable_metadata" on if the workflow includes save nodes +separate from this node. + +sd3 requires 6.5 credits per generation +sd3-turbo requires 4 credits per generation + +If no image is provided, mode is set to text-to-image + +""" + + def apicall(self, prompt, n_prompt, model, seed, aspect_ratio, output_format, + img2img_strength=0.5, image=None, disable_metadata=True, api_key=""): + from comfy.cli_args import args + if disable_metadata: + args.disable_metadata = True + else: + args.disable_metadata = False + + import requests + from torchvision import transforms + + data = { + "mode": "text-to-image", + "prompt": prompt, + "model": model, + "seed": seed, + "output_format": output_format + } + + if image is not None: + image = image.permute(0, 3, 1, 2).squeeze(0) + to_pil = transforms.ToPILImage() + pil_image = to_pil(image) + # Save the PIL Image to a BytesIO object + buffer = io.BytesIO() + pil_image.save(buffer, format='PNG') + buffer.seek(0) + files = {"image": ("image.png", buffer, "image/png")} + + data["mode"] = "image-to-image" + data["image"] = pil_image + data["strength"] = img2img_strength + else: + data["aspect_ratio"] = aspect_ratio, + files = {"none": ''} + + if model != "sd3-turbo": + data["negative_prompt"] = n_prompt + + headers={ + "accept": "image/*" + } + + if api_key != "": + headers["authorization"] = api_key + else: + config_file_path = os.path.join(script_directory,"config.json") + with open(config_file_path, 'r') as file: + config = json.load(file) + api_key_from_config = config.get("sai_api_key") + headers["authorization"] = api_key_from_config + + response = requests.post( + f"https://api.stability.ai/v2beta/stable-image/generate/sd3", + headers=headers, + files = files, + data = data, + ) + + if response.status_code == 200: + # Convert the response content to a PIL Image + image = Image.open(io.BytesIO(response.content)) + # Convert the PIL Image to a PyTorch tensor + transform = transforms.ToTensor() + tensor_image = transform(image) + tensor_image = tensor_image.unsqueeze(0) + tensor_image = tensor_image.permute(0, 2, 3, 1).cpu().float() + return (tensor_image,) + else: + try: + # Attempt to parse the response as JSON + error_data = response.json() + raise Exception(f"Server error: {error_data}") + except json.JSONDecodeError: + # If the response is not valid JSON, raise a different exception + raise Exception(f"Server error: {response.text}") \ No newline at end of file diff --git a/fluid.py b/utility/fluid.py similarity index 100% rename from fluid.py rename to utility/fluid.py diff --git a/magictex.py b/utility/magictex.py similarity index 100% rename from magictex.py rename to utility/magictex.py diff --git a/numerical.py b/utility/numerical.py similarity index 100% rename from numerical.py rename to utility/numerical.py diff --git a/utility.py b/utility/utility.py similarity index 100% rename from utility.py rename to utility/utility.py diff --git a/web/js/appearance.js b/web/js/appearance.js index 843a5b5..d90b4aa 100644 --- a/web/js/appearance.js +++ b/web/js/appearance.js @@ -1,27 +1,23 @@ import { app } from "../../../scripts/app.js"; - app.registerExtension({ name: "KJNodes.appearance", nodeCreated(node) { - const title = node.getTitle(); - switch (title) { - case "INT Constant": + switch (node.comfyClass) { + case "INTConstant": node.setSize([200, 58]); node.color = "#1b4669"; node.bgcolor = "#29699c"; break; - case "Float Constant": + case "FloatConstant": node.setSize([200, 58]); node.color = LGraphCanvas.node_colors.green.color; node.bgcolor = LGraphCanvas.node_colors.green.bgcolor; break; case "ConditioningMultiCombine": - node.color = LGraphCanvas.node_colors.brown.color; node.bgcolor = LGraphCanvas.node_colors.brown.bgcolor; break; - } } }); diff --git a/web/js/browserstatus.js b/web/js/browserstatus.js index c796b6f..fd377e7 100644 --- a/web/js/browserstatus.js +++ b/web/js/browserstatus.js @@ -4,6 +4,9 @@ import { app } from "../../../scripts/app.js"; app.registerExtension({ name: "KJNodes.browserstatus", setup() { + if (!app.ui.settings.getSettingValue("KJNodes.browserStatus")) { + return; + } api.addEventListener("status", ({ detail }) => { let title = "ComfyUI"; let favicon = "green"; @@ -11,7 +14,6 @@ app.registerExtension({ if (queueRemaining) { favicon = "red"; - title = `00% - ${queueRemaining} | ${title}`; } let link = document.querySelector("link[rel~='icon']"); @@ -22,9 +24,8 @@ app.registerExtension({ } link.href = new URL(`../${favicon}.png`, import.meta.url); document.title = title; - }); -//add progress to the title + //add progress to the title api.addEventListener("progress", ({ detail }) => { const { value, max } = detail; const progress = Math.floor((value / max) * 100); @@ -34,8 +35,19 @@ app.registerExtension({ const paddedProgress = String(progress).padStart(2, '0'); title = `${paddedProgress}% ${title.replace(/^\d+%\s/, '')}`; } - document.title = title; }); }, + init() { + if (!app.ui.settings.getSettingValue("KJNodes.browserStatus")) { + return; + } + const pythongossFeed = app.extensions.find( + (e) => e.name === 'pysssss.FaviconStatus', + ) + if (pythongossFeed) { + console.warn("KJNodes - Overriding pysssss.FaviconStatus") + app.extensions = app.extensions.filter(item => item !== pythongossFeed); + } + }, }); \ No newline at end of file diff --git a/web/js/contextmenu.js b/web/js/contextmenu.js index 0b05d7d..636358e 100644 --- a/web/js/contextmenu.js +++ b/web/js/contextmenu.js @@ -1,7 +1,5 @@ import { app } from "../../../scripts/app.js"; - -var nodeAutoColor = true // Adds context menu entries, code partly from pyssssscustom-scripts function addMenuHandler(nodeType, cb) { @@ -45,13 +43,9 @@ app.registerExtension({ content: "Add SetNode", callback: () => {addNode("SetNode", this, { side:"right", offset: 30 }); }, - }); }); - } - - }, async setup(app) { const onChange = (value) => { @@ -144,5 +138,15 @@ app.registerExtension({ { value: false, text: "Off", selected: value === false }, ], }); + app.ui.settings.addSetting({ + id: "KJNodes.browserStatus", + name: "🦛 KJNodes: 🟢 Stoplight browser status icon 🔴", + defaultValue: false, + type: "boolean", + options: (value) => [ + { value: true, text: "On", selected: value === true }, + { value: false, text: "Off", selected: value === false }, + ], + }); } }); diff --git a/web/js/help_popup.js b/web/js/help_popup.js index 9567222..b6ccf3a 100644 --- a/web/js/help_popup.js +++ b/web/js/help_popup.js @@ -45,6 +45,7 @@ loadScript('/kjweb_async/purify.min.js').catch((e) => { console.log(e) }) +const categories = ["KJNodes", "SUPIR", "VoiceCraft", "Marigold"]; app.registerExtension({ name: "KJNodes.HelpPopup", async beforeRegisterNodeDef(nodeType, nodeData) { @@ -52,13 +53,12 @@ app.registerExtension({ if (app.ui.settings.getSettingValue("KJNodes.helpPopup") === false) { return; } - - const categories = ["KJNodes", "SUPIR", "VoiceCraft", "Marigold"]; try { categories.forEach(category => { if (nodeData?.category?.startsWith(category)) { addDocumentation(nodeData, nodeType); } + else return }); } catch (error) { console.error("Error in registering KJNodes.HelpPopup", error); @@ -182,13 +182,16 @@ const create_documentation_stylesheet = () => { let startX, startY, startWidth, startHeight resizeHandle.addEventListener('mousedown', function (e) { + e.preventDefault(); e.stopPropagation(); isResizing = true; startX = e.clientX; startY = e.clientY; startWidth = parseInt(document.defaultView.getComputedStyle(docElement).width, 10); startHeight = parseInt(document.defaultView.getComputedStyle(docElement).height, 10); - }); + }, + { signal: this.docCtrl.signal }, + ); // close button const closeButton = document.createElement('div'); @@ -208,19 +211,30 @@ const create_documentation_stylesheet = () => { this.show_doc = !this.show_doc docElement.parentNode.removeChild(docElement) docElement = null - }); + if (contentWrapper) { + contentWrapper.remove() + contentWrapper = null + } + }, + { signal: this.docCtrl.signal }, + ); document.addEventListener('mousemove', function (e) { if (!isResizing) return; - const newWidth = startWidth + e.clientX - startX; - const newHeight = startHeight + e.clientY - startY; + const scale = app.canvas.ds.scale; + const newWidth = startWidth + (e.clientX - startX) / scale; + const newHeight = startHeight + (e.clientY - startY) / scale;; docElement.style.width = `${newWidth}px`; docElement.style.height = `${newHeight}px`; - }); + }, + { signal: this.docCtrl.signal }, + ); document.addEventListener('mouseup', function () { isResizing = false - }) + }, + { signal: this.docCtrl.signal }, + ) document.body.appendChild(docElement) } @@ -238,7 +252,7 @@ const create_documentation_stylesheet = () => { const transform = new DOMMatrix() .scaleSelf(scaleX, scaleY) .multiplySelf(ctx.getTransform()) - .translateSelf(this.size[0] * scaleX, 0) + .translateSelf(this.size[0] * scaleX * Math.max(1.0,window.devicePixelRatio) , 0) .translateSelf(10, -32) const scale = new DOMMatrix() @@ -283,8 +297,29 @@ const create_documentation_stylesheet = () => { } else { this.show_doc = !this.show_doc } + if (this.show_doc) { + this.docCtrl = new AbortController() + } else { + this.docCtrl.abort() + } return true; } return r; } + const onRem = nodeType.prototype.onRemoved + + nodeType.prototype.onRemoved = function () { + const r = onRem ? onRem.apply(this, []) : undefined + + if (docElement) { + docElement.remove() + docElement = null + } + + if (contentWrapper) { + contentWrapper.remove() + contentWrapper = null + } + return r + } } \ No newline at end of file diff --git a/web/js/jsnodes.js b/web/js/jsnodes.js index 3ab7d4c..6131289 100644 --- a/web/js/jsnodes.js +++ b/web/js/jsnodes.js @@ -3,10 +3,12 @@ import { app } from "../../../scripts/app.js"; app.registerExtension({ name: "KJNodes.jsnodes", async beforeRegisterNodeDef(nodeType, nodeData, app) { + if(!nodeData?.category?.startsWith("KJNodes")) { + return; + } switch (nodeData.name) { case "ConditioningMultiCombine": nodeType.prototype.onNodeCreated = function () { - //this.inputs_offset = nodeData.name.includes("selective")?1:0 this.cond_type = "CONDITIONING" this.inputs_offset = nodeData.name.includes("selective")?1:0 this.addWidget("button", "Update inputs", null, () => { @@ -24,9 +26,133 @@ app.registerExtension({ for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i) this.addInput(`conditioning_${i}`, this.cond_type) } - }); + }); } break; + case "ImageBatchMulti": + nodeType.prototype.onNodeCreated = function () { + this._type = "IMAGE" + this.inputs_offset = nodeData.name.includes("selective")?1:0 + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing + + if(target_number_of_inputs < this.inputs.length){ + for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--) + this.removeInput(i) + } + else{ + for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i) + this.addInput(`image_${i}`, this._type) + } + }); + } + break; + case "MaskBatchMulti": + nodeType.prototype.onNodeCreated = function () { + this._type = "MASK" + this.inputs_offset = nodeData.name.includes("selective")?1:0 + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing + + if(target_number_of_inputs < this.inputs.length){ + for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--) + this.removeInput(i) + } + else{ + for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i) + this.addInput(`mask_${i}`, this._type) + } + }); + } + break; + + case "GetMaskSizeAndCount": + const onGetMaskSizeConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + const v = onGetMaskSizeConnectInput?.(this, arguments); + targetSlot.outputs[1]["name"] = "width" + targetSlot.outputs[2]["name"] = "height" + targetSlot.outputs[3]["name"] = "count" + return v; + } + const onGetMaskSizeExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function(message) { + const r = onGetMaskSizeExecuted? onGetMaskSizeExecuted.apply(this,arguments): undefined + let values = message["text"].toString().split('x').map(Number); + this.outputs[1]["name"] = values[1] + " width" + this.outputs[2]["name"] = values[2] + " height" + this.outputs[3]["name"] = values[0] + " count" + return r + } + break; + + case "GetImageSizeAndCount": + const onGetImageSizeConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + const v = onGetImageSizeConnectInput?.(this, arguments); + targetSlot.outputs[1]["name"] = "width" + targetSlot.outputs[2]["name"] = "height" + targetSlot.outputs[3]["name"] = "count" + return v; + } + const onGetImageSizeExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function(message) { + const r = onGetImageSizeExecuted? onGetImageSizeExecuted.apply(this,arguments): undefined + let values = message["text"].toString().split('x').map(Number); + this.outputs[1]["name"] = values[1] + " width" + this.outputs[2]["name"] = values[2] + " height" + this.outputs[3]["name"] = values[0] + " count" + return r + } + break; + case "VRAM_Debug": + const onVRAM_DebugConnectInput = nodeType.prototype.onConnectInput; + nodeType.prototype.onConnectInput = function (targetSlot, type, output, originNode, originSlot) { + const v = onVRAM_DebugConnectInput?.(this, arguments); + targetSlot.outputs[3]["name"] = "freemem_before" + targetSlot.outputs[4]["name"] = "freemem_after" + return v; + } + const onVRAM_DebugExecuted = nodeType.prototype.onExecuted; + nodeType.prototype.onExecuted = function(message) { + const r = onVRAM_DebugExecuted? onVRAM_DebugExecuted.apply(this,arguments): undefined + let values = message["text"].toString().split('x'); + this.outputs[3]["name"] = values[0] + " freemem_before" + this.outputs[4]["name"] = values[1] + " freemem_after" + return r + } + break; + + case "JoinStringMulti": + nodeType.prototype.onNodeCreated = function () { + this._type = "STRING" + this.inputs_offset = nodeData.name.includes("selective")?1:0 + this.addWidget("button", "Update inputs", null, () => { + if (!this.inputs) { + this.inputs = []; + } + const target_number_of_inputs = this.widgets.find(w => w.name === "inputcount")["value"]; + if(target_number_of_inputs===this.inputs.length)return; // already set, do nothing + + if(target_number_of_inputs < this.inputs.length){ + for(let i = this.inputs.length; i>=this.inputs_offset+target_number_of_inputs; i--) + this.removeInput(i) + } + else{ + for(let i = this.inputs.length+1-this.inputs_offset; i <= target_number_of_inputs; ++i) + this.addInput(`string_${i}`, this._type) + } + }); + } + break; case "SoundReactive": nodeType.prototype.onNodeCreated = function () { let audioContext; @@ -130,6 +256,21 @@ app.registerExtension({ }; break; - } + } + }, + async setup() { + // to keep Set/Get node virtual connections visible when offscreen + const originalComputeVisibleNodes = LGraphCanvas.prototype.computeVisibleNodes; + LGraphCanvas.prototype.computeVisibleNodes = function () { + const visibleNodesSet = new Set(originalComputeVisibleNodes.apply(this, arguments)); + for (const node of this.graph._nodes) { + if ((node.type === "SetNode" || node.type === "GetNode") && node.drawConnection) { + visibleNodesSet.add(node); + } + } + return Array.from(visibleNodesSet); + }; + + } }); \ No newline at end of file diff --git a/web/js/plotnode.js b/web/js/plotnode.js deleted file mode 100644 index 67c519a..0000000 --- a/web/js/plotnode.js +++ /dev/null @@ -1,30 +0,0 @@ -import { app } from "../../../scripts/app.js"; -//WIP doesn't do anything -app.registerExtension({ - name: "KJNodes.PlotNode", - async beforeRegisterNodeDef(nodeType, nodeData, app) { - switch (nodeData.name) { - case "PlotNode": - - nodeType.prototype.onNodeCreated = function () { - - this.addWidget("button", "Update", null, () => { - - console.log("start x:" + this.pos[0]) - console.log("start y:" +this.pos[1]) - console.log(this.graph.links); - const toNode = this.graph._nodes.find((otherNode) => otherNode.id == this.graph.links[1].target_id); - console.log("target x:" + toNode.pos[0]) - const a = this.pos[0] - const b = toNode.pos[0] - const distance = Math.abs(a - b); - const maxDistance = 1000 - const finalDistance = (distance - 0) / (maxDistance - 0); - - this.widgets[0].value = finalDistance; - }); - } - break; - } - }, -}); \ No newline at end of file diff --git a/web/js/setgetnodes.js b/web/js/setgetnodes.js index d3035f7..ac35f23 100644 --- a/web/js/setgetnodes.js +++ b/web/js/setgetnodes.js @@ -1,5 +1,5 @@ import { app } from "../../../scripts/app.js"; -import { ComfyWidgets } from '../../../scripts/widgets.js'; + //based on diffus3's SetGet: https://github.com/diffus3/ComfyUI-extensions // Nodes that allow you to tunnel connections for cleaner graphs @@ -21,8 +21,6 @@ function setColorAndBgColor(type) { if (colors) { this.color = colors.color; this.bgcolor = colors.bgcolor; - } else { - // Handle the default case if needed } } let isAlertShown = false; @@ -41,6 +39,12 @@ app.registerExtension({ class SetNode { defaultVisibility = true; serialize_widgets = true; + drawConnection = false; + currentGetters = null; + slotColor = "#FFF"; + canvas = app.canvas; + menuEntry = "Show connections"; + constructor() { if (!this.properties) { this.properties = { @@ -201,9 +205,11 @@ app.registerExtension({ return graph._nodes.filter(otherNode => otherNode.type === 'GetNode' && otherNode.widgets[0].value === name && name !== ''); } + // This node is purely frontend and does not impact the resulting prompt so should not be serialized this.isVirtualNode = true; } + onRemoved() { const allGetters = this.graph._nodes.filter((otherNode) => otherNode.type == "GetNode"); @@ -213,6 +219,136 @@ app.registerExtension({ } }) } + getExtraMenuOptions(_, options) { + this.menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + options.unshift( + { + content: this.menuEntry, + callback: () => { + this.currentGetters = this.findGetters(this.graph); + if (this.currentGetters.length == 0) return; + let linkType = (this.currentGetters[0].outputs[0].type); + this.slotColor = this.canvas.default_connection_color_byType[linkType] + this.menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + this.drawConnection = !this.drawConnection; + this.canvas.setDirty(true, true); + + }, + has_submenu: true, + submenu: { + title: "Color", + options: [ + { + content: "Highlight", + callback: () => { + this.slotColor = "orange" + this.canvas.setDirty(true, true); + } + } + ], + }, + }, + { + content: "Hide all connections", + callback: () => { + const allGetters = this.graph._nodes.filter(otherNode => otherNode.type === "GetNode" || otherNode.type === "SetNode"); + allGetters.forEach(otherNode => { + otherNode.drawConnection = false; + console.log(otherNode); + }); + + this.menuEntry = "Show connections"; + this.drawConnection = false + this.canvas.setDirty(true, true); + + }, + + }, + ); + // Dynamically add a submenu for all getters + this.currentGetters = this.findGetters(this.graph); + if (this.currentGetters) { + + let gettersSubmenu = this.currentGetters.map(getter => ({ + + content: `${getter.title} id: ${getter.id}`, + callback: () => { + this.canvas.centerOnNode(getter); + this.canvas.selectNode(getter, false); + this.canvas.setDirty(true, true); + + }, + })); + + options.unshift({ + content: "Getters", + has_submenu: true, + submenu: { + title: "GetNodes", + options: gettersSubmenu, + } + }); + } + } + + + onDrawForeground(ctx, lGraphCanvas) { + if (this.drawConnection) { + this._drawVirtualLinks(lGraphCanvas, ctx); + } + } + // onDrawCollapsed(ctx, lGraphCanvas) { + // if (this.drawConnection) { + // this._drawVirtualLinks(lGraphCanvas, ctx); + // } + // } + _drawVirtualLinks(lGraphCanvas, ctx) { + if (!this.currentGetters?.length) return; + var title = this.getTitle ? this.getTitle() : this.title; + var title_width = ctx.measureText(title).width; + if (!this.flags.collapsed) { + var start_node_slotpos = [ + this.size[0], + LiteGraph.NODE_TITLE_HEIGHT * 0.5, + ]; + } + else { + + var start_node_slotpos = [ + title_width + 55, + -15, + + ]; + } + + for (const getter of this.currentGetters) { + if (!this.flags.collapsed) { + var end_node_slotpos = this.getConnectionPos(false, 0); + end_node_slotpos = [ + getter.pos[0] - end_node_slotpos[0] + this.size[0], + getter.pos[1] - end_node_slotpos[1] + ]; + } + else { + var end_node_slotpos = this.getConnectionPos(false, 0); + end_node_slotpos = [ + getter.pos[0] - end_node_slotpos[0] + title_width + 50, + getter.pos[1] - end_node_slotpos[1] - 30 + ]; + } + lGraphCanvas.renderLink( + ctx, + start_node_slotpos, + end_node_slotpos, + null, + false, + null, + this.slotColor, + LiteGraph.RIGHT, + LiteGraph.LEFT + ); + } + } } LiteGraph.registerNodeType( @@ -233,13 +369,16 @@ app.registerExtension({ defaultVisibility = true; serialize_widgets = true; + drawConnection = false; + slotColor = "#FFF"; + currentSetter = null; + canvas = app.canvas; constructor() { if (!this.properties) { this.properties = {}; } this.properties.showOutputText = GetNode.defaultVisibility; - const node = this; this.addWidget( "combo", @@ -266,7 +405,7 @@ app.registerExtension({ ) { this.validateLinks(); } - + this.setName = function(name) { node.widgets[0].value = name; node.onRename(); @@ -315,13 +454,20 @@ app.registerExtension({ this.findSetter = function(graph) { const name = this.widgets[0].value; - return graph._nodes.find(otherNode => otherNode.type === 'SetNode' && otherNode.widgets[0].value === name && name !== ''); + const foundNode = graph._nodes.find(otherNode => otherNode.type === 'SetNode' && otherNode.widgets[0].value === name && name !== ''); + return foundNode; }; + this.goToSetter = function() { + const setter = this.findSetter(this.graph); + this.canvas.centerOnNode(setter); + this.canvas.selectNode(setter, false); + }; + // This node is purely frontend and does not impact the resulting prompt so should not be serialized this.isVirtualNode = true; } - + getInputLink(slot) { const setter = this.findSetter(this.graph); @@ -337,6 +483,60 @@ app.registerExtension({ } onAdded(graph) { } + getExtraMenuOptions(_, options) { + let menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + + options.unshift( + { + content: "Go to setter", + callback: () => { + this.goToSetter(); + }, + }, + { + content: menuEntry, + callback: () => { + this.currentSetter = this.findSetter(this.graph); + if (this.currentSetter.length == 0) return; + let linkType = (this.currentSetter.inputs[0].type); + this.drawConnection = !this.drawConnection; + this.slotColor = this.canvas.default_connection_color_byType[linkType] + menuEntry = this.drawConnection ? "Hide connections" : "Show connections"; + this.canvas.setDirty(true, true); + }, + }, + ); + } + + onDrawForeground(ctx, lGraphCanvas) { + if (this.drawConnection) { + this._drawVirtualLink(lGraphCanvas, ctx); + } + } + // onDrawCollapsed(ctx, lGraphCanvas) { + // if (this.drawConnection) { + // this._drawVirtualLink(lGraphCanvas, ctx); + // } + // } + _drawVirtualLink(lGraphCanvas, ctx) { + if (!this.currentSetter) return; + + let start_node_slotpos = this.currentSetter.getConnectionPos(false, 0); + start_node_slotpos = [ + start_node_slotpos[0] - this.pos[0], + start_node_slotpos[1] - this.pos[1], + ]; + let end_node_slotpos = [0, -LiteGraph.NODE_TITLE_HEIGHT * 0.5]; + lGraphCanvas.renderLink( + ctx, + start_node_slotpos, + end_node_slotpos, + null, + false, + null, + this.slotColor + ); + } } LiteGraph.registerNodeType( diff --git a/web/js/spline_editor.js b/web/js/spline_editor.js index 531aebe..8f03f66 100644 --- a/web/js/spline_editor.js +++ b/web/js/spline_editor.js @@ -101,8 +101,9 @@ app.registerExtension({ name: 'KJNodes.SplineEditor', async beforeRegisterNodeDef(nodeType, nodeData) { - if (nodeData?.name == 'SplineEditor') { + if (nodeData?.name === 'SplineEditor') { chainCallback(nodeType.prototype, "onNodeCreated", function () { + hideWidgetForGood(this, this.widgets.find(w => w.name === "coordinates")) var element = document.createElement("div"); @@ -113,8 +114,64 @@ app.registerExtension({ serialize: false, hideOnZoom: false, }); + + // context menu + this.contextMenu = document.createElement("div"); + this.contextMenu.id = "context-menu"; + this.contextMenu.style.display = "none"; + this.contextMenu.style.position = "absolute"; + this.contextMenu.style.backgroundColor = "#202020"; + this.contextMenu.style.minWidth = "100px"; + this.contextMenu.style.boxShadow = "0px 8px 16px 0px rgba(0,0,0,0.2)"; + this.contextMenu.style.zIndex = "100"; + this.contextMenu.style.padding = "5px"; + + function styleMenuItem(menuItem) { + menuItem.style.display = "block"; + menuItem.style.padding = "5px"; + menuItem.style.color = "#FFF"; + menuItem.style.fontFamily = "Arial, sans-serif"; + menuItem.style.fontSize = "16px"; + menuItem.style.textDecoration = "none"; + menuItem.style.marginBottom = "5px"; + } + this.menuItem1 = document.createElement("a"); + this.menuItem1.href = "#"; + this.menuItem1.id = "menu-item-1"; + this.menuItem1.textContent = "Toggle handles"; + styleMenuItem(this.menuItem1); + + this.menuItem2 = document.createElement("a"); + this.menuItem2.href = "#"; + this.menuItem2.id = "menu-item-2"; + this.menuItem2.textContent = "Display sample points"; + styleMenuItem(this.menuItem2); + + this.menuItem3 = document.createElement("a"); + this.menuItem3.href = "#"; + this.menuItem3.id = "menu-item-2"; + this.menuItem3.textContent = "Switch point shape"; + styleMenuItem(this.menuItem3); + + const menuItems = [this.menuItem1, this.menuItem2, this.menuItem3]; + + menuItems.forEach(menuItem => { + menuItem.addEventListener('mouseover', function() { + this.style.backgroundColor = "gray"; + }); + menuItem.addEventListener('mouseout', function() { + this.style.backgroundColor = "#202020"; + }); + }); + + // Append menu items to the context menu + menuItems.forEach(menuItem => { + this.contextMenu.appendChild(menuItem); + }); + + document.body.appendChild( this.contextMenu); + this.addWidget("button", "New spline", null, () => { - if (!this.properties || !("points" in this.properties)) { createSplineEditor(this) this.addProperty("points", this.constructor.type, "string"); @@ -123,22 +180,18 @@ app.registerExtension({ createSplineEditor(this, true) } }); - this.setSize([550, 800]) + + this.setSize([550, 920]); + this.resizable = false; this.splineEditor.parentEl = document.createElement("div"); this.splineEditor.parentEl.className = "spline-editor"; this.splineEditor.parentEl.id = `spline-editor-${this.uuid}` element.appendChild(this.splineEditor.parentEl); - //disable context menu on right click - document.addEventListener('contextmenu', function(e) { - if (e.button === 2) { // Right mouse button - e.preventDefault(); - e.stopPropagation(); - } - }) chainCallback(this, "onGraphConfigured", function() { - createSplineEditor(this) + createSplineEditor(this); }); + }); // onAfterGraphConfigured }//node created } //before register @@ -147,22 +200,173 @@ app.registerExtension({ function createSplineEditor(context, reset=false) { console.log("creatingSplineEditor") + + document.addEventListener('contextmenu', function(e) { + e.preventDefault(); + }); + + document.addEventListener('click', function(e) { + if (!context.contextMenu.contains(e.target)) { + context.contextMenu.style.display = 'none'; + } + }); + + context.menuItem1.addEventListener('click', function(e) { + e.preventDefault(); + if (!drawHandles) { + drawHandles = true + vis.add(pv.Line) + .data(() => points.map((point, index) => ({ + start: point, + end: [index] + }))) + .left(d => d.start.x) + .top(d => d.start.y) + .interpolate("linear") + .tension(0) // Straight lines + .strokeStyle("#ff7f0e") // Same color as control points + .lineWidth(1) + .visible(() => drawHandles); + vis.render(); + + + } else { + drawHandles = false + vis.render(); + } + context.contextMenu.style.display = 'none'; + + }); + + context.menuItem2.addEventListener('click', function(e) { + e.preventDefault(); + drawSamplePoints = !drawSamplePoints; + updatePath(); + }); + + context.menuItem3.addEventListener('click', function(e) { + e.preventDefault(); + if (dotShape == "circle"){ + dotShape = "triangle" + } + else { + dotShape = "circle" + } + console.log(dotShape) + updatePath(); +}); + var dotShape = "circle"; + var drawSamplePoints = false; + + function updatePath() { + let coords = samplePoints(pathElements[0], points_to_sample, samplingMethod, w); + + if (drawSamplePoints) { + if (pointsLayer) { + // Update the data of the existing points layer + pointsLayer.data(coords); + } else { + // Create the points layer if it doesn't exist + pointsLayer = vis.add(pv.Dot) + .data(coords) + .left(function(d) { return d.x; }) + .top(function(d) { return d.y; }) + .radius(5) // Adjust the radius as needed + .fillStyle("red") // Change the color as needed + .strokeStyle("black") // Change the stroke color as needed + .lineWidth(1); // Adjust the line width as needed + } + } else { + if (pointsLayer) { + // Remove the points layer + pointsLayer.data([]); + vis.render(); + } + } + let coordsString = JSON.stringify(coords); + pointsStoreWidget.value = JSON.stringify(points); + if (coordWidget) { + coordWidget.value = coordsString; + } + vis.render(); + } if (reset && context.splineEditor.element) { context.splineEditor.element.innerHTML = ''; // Clear the container - } + } const coordWidget = context.widgets.find(w => w.name === "coordinates"); const interpolationWidget = context.widgets.find(w => w.name === "interpolation"); const pointsWidget = context.widgets.find(w => w.name === "points_to_sample"); const pointsStoreWidget = context.widgets.find(w => w.name === "points_store"); const tensionWidget = context.widgets.find(w => w.name === "tension"); - const segmentedWidget = context.widgets.find(w => w.name === "segmented"); + const minValueWidget = context.widgets.find(w => w.name === "min_value"); + const maxValueWidget = context.widgets.find(w => w.name === "max_value"); + const samplingMethodWidget = context.widgets.find(w => w.name === "sampling_method"); + const widthWidget = context.widgets.find(w => w.name === "mask_width"); + const heightWidget = context.widgets.find(w => w.name === "mask_height"); + //const segmentedWidget = context.widgets.find(w => w.name === "segmented"); + + var interpolation = interpolationWidget.value + var tension = tensionWidget.value + var points_to_sample = pointsWidget.value + var rangeMin = minValueWidget.value + var rangeMax = maxValueWidget.value + var pointsLayer = null; + var samplingMethod = samplingMethodWidget.value + + if (samplingMethod == "path") { + dotShape = "triangle" + } + + interpolationWidget.callback = () => { + interpolation = interpolationWidget.value + updatePath(); + } + samplingMethodWidget.callback = () => { + samplingMethod = samplingMethodWidget.value + if (samplingMethod == "path") { + dotShape = "triangle" + } + updatePath(); + } + tensionWidget.callback = () => { + tension = tensionWidget.value + updatePath(); + } + pointsWidget.callback = () => { + points_to_sample = pointsWidget.value + updatePath(); + } + minValueWidget.callback = () => { + rangeMin = minValueWidget.value + updatePath(); + } + maxValueWidget.callback = () => { + rangeMax = maxValueWidget.value + updatePath(); + } + widthWidget.callback = () => { + w = widthWidget.value + vis.width(w) + context.setSize([w + 45, context.size[1]]); + updatePath(); + } + heightWidget.callback = () => { + h = heightWidget.value + vis.height(h) + context.setSize([context.size[0], h + 410]); + updatePath(); + } // Initialize or reset points array - var w = 512 - var h = 512 - var i = 3 + var drawHandles = false; + var hoverIndex = -1; + var isDragging = false; + var w = widthWidget.value; + var h = heightWidget.value; + var i = 3; let points = []; + if (!reset && pointsStoreWidget.value != "") { points = JSON.parse(pointsStoreWidget.value); } else { @@ -187,96 +391,246 @@ function createSplineEditor(context, reset=false) { var vis = new pv.Panel() .width(w) .height(h) - .fillStyle("var(--comfy-menu-bg)") + .fillStyle("#222") .strokeStyle("gray") .lineWidth(2) .antialias(false) .margin(10) .event("mousedown", function() { if (pv.event.shiftKey) { // Use pv.event to access the event object - i = points.push(this.mouse()) - 1; + let scaledMouse = { + x: this.mouse().x / app.canvas.ds.scale, + y: this.mouse().y / app.canvas.ds.scale + }; + i = points.push(scaledMouse) - 1; + updatePath(); return this; } - }) - .event("mouseup", function() { - if (this.pathElements !== null) { - let coords = samplePoints(pathElements[0], pointsWidget.value); - let coordsString = JSON.stringify(coords); - pointsStoreWidget.value = JSON.stringify(points); - if (coordWidget) { - coordWidget.value = coordsString; - } - } - }); + else if (pv.event.ctrlKey) { + // Capture the clicked location + let clickedPoint = { + x: this.mouse().x / app.canvas.ds.scale, + y: this.mouse().y / app.canvas.ds.scale + }; + // Find the two closest points to the clicked location + let { point1Index, point2Index } = findClosestPoints(points, clickedPoint); + + // Calculate the midpoint between the two closest points + let midpoint = { + x: (points[point1Index].x + points[point2Index].x) / 2, + y: (points[point1Index].y + points[point2Index].y) / 2 + }; + + // Insert the midpoint into the array + points.splice(point2Index, 0, midpoint); + i = point2Index; + updatePath(); + } + else if (pv.event.button === 2) { + context.contextMenu.style.display = 'block'; + context.contextMenu.style.left = `${pv.event.clientX}px`; + context.contextMenu.style.top = `${pv.event.clientY}px`; + } + }) + vis.add(pv.Rule) - .data(pv.range(0, 8, .5)) - .bottom(d => d * 64 + 0) + .data(pv.range(0, h, 64)) + .bottom(d => d) .strokeStyle("gray") - .lineWidth(1) + .lineWidth(3) + + // vis.add(pv.Rule) + // .data(pv.range(0, points_to_sample, 1)) + // .left(d => d * 512 / (points_to_sample - 1)) + // .strokeStyle("gray") + // .lineWidth(2) vis.add(pv.Line) .data(() => points) .left(d => d.x) .top(d => d.y) - .interpolate(() => interpolationWidget.value) - .tension(() => tensionWidget.value) - .segmented(() => segmentedWidget.value) + .interpolate(() => interpolation) + .tension(() => tension) + .segmented(() => false) .strokeStyle(pv.Colors.category10().by(pv.index)) .lineWidth(3) - + vis.add(pv.Dot) .data(() => points) .left(d => d.x) .top(d => d.y) - .radius(8) + .radius(10) + .shape(function() { + return dotShape; + }) + .angle(function() { + const index = this.index; + let angle = 0; + + if (dotShape === "triangle") { + let dxNext = 0, dyNext = 0; + if (index < points.length - 1) { + dxNext = points[index + 1].x - points[index].x; + dyNext = points[index + 1].y - points[index].y; + } + + let dxPrev = 0, dyPrev = 0; + if (index > 0) { + dxPrev = points[index].x - points[index - 1].x; + dyPrev = points[index].y - points[index - 1].y; + } + + const dx = (dxNext + dxPrev) / 2; + const dy = (dyNext + dyPrev) / 2; + + angle = Math.atan2(dy, dx); + angle -= Math.PI / 2; + angle = (angle + 2 * Math.PI) % (2 * Math.PI); + } + + return angle; + }) .cursor("move") .strokeStyle(function() { return i == this.index ? "#ff7f0e" : "#1f77b4"; }) - .fillStyle(function() { return "rgba(100, 100, 100, 0.2)"; }) + .fillStyle(function() { return "rgba(100, 100, 100, 0.3)"; }) .event("mousedown", pv.Behavior.drag()) .event("dragstart", function() { i = this.index; - if (pv.event.button === 2) { + hoverIndex = this.index; + isDragging = true; + if (pv.event.button === 2 && i !== 0 && i !== points.length - 1) { points.splice(i--, 1); vis.render(); } return this; }) - .event("drag", vis) - .anchor("top").add(pv.Label) - .font(d => Math.sqrt(d[2]) * 32 + "px sans-serif") - //.text(d => `(${Math.round(d.x)}, ${Math.round(d.y)})`) - .text(d => { - // Normalize y to range 0.0 to 1.0, considering the inverted y-axis - var normalizedY = 1.0 - (d.y / h); - return `${normalizedY.toFixed(2)}`; + .event("dragend", function() { + if (this.pathElements !== null) { + updatePath(); + } + isDragging = false; + }) + .event("drag", function() { + let adjustedX = this.mouse().x / app.canvas.ds.scale; // Adjust the new X position by the inverse of the scale factor + let adjustedY = this.mouse().y / app.canvas.ds.scale; // Adjust the new Y position by the inverse of the scale factor + // Determine the bounds of the vis.Panel + const panelWidth = vis.width(); + const panelHeight = vis.height(); + + // Adjust the new position if it would place the dot outside the bounds of the vis.Panel + adjustedX = Math.max(0, Math.min(panelWidth, adjustedX)); + adjustedY = Math.max(0, Math.min(panelHeight, adjustedY)); + points[this.index] = { x: adjustedX, y: adjustedY }; // Update the point's position + vis.render(); // Re-render the visualization to reflect the new position + }) + .event("mouseover", function() { + hoverIndex = this.index; // Set the hover index to the index of the hovered dot + vis.render(); // Re-render the visualization + }) + .event("mouseout", function() { + !isDragging && (hoverIndex = -1); // Reset the hover index when the mouse leaves the dot + vis.render(); // Re-render the visualization + }) + .anchor("center") + .add(pv.Label) + .visible(function() { + return hoverIndex === this.index; // Only show the label for the hovered dot + }) + .left(d => d.x < w / 2 ? d.x + 80 : d.x - 70) // Shift label to right if on left half, otherwise shift to left + .top(d => d.y < h / 2 ? d.y + 20 : d.y - 20) // Shift label down if on top half, otherwise shift up + .font(12 + "px sans-serif") + .text(d => { + if (samplingMethod == "path") { + return `X: ${Math.round(d.x)}, Y: ${Math.round(d.y)}`; + } else { + let frame = Math.round((d.x / w) * points_to_sample); + let normalizedY = (1.0 - (d.y / h) - 0.0) * (rangeMax - rangeMin) + rangeMin; + let normalizedX = (d.x / w); + return `F: ${frame}, X: ${normalizedX.toFixed(2)}, Y: ${normalizedY.toFixed(2)}`; + } }) .textStyle("orange") - + vis.render(); var svgElement = vis.canvas(); svgElement.style['zIndex'] = "2" svgElement.style['position'] = "relative" context.splineEditor.element.appendChild(svgElement); - var pathElements = svgElement.getElementsByTagName('path'); // Get all path elements - + var pathElements = svgElement.getElementsByTagName('path'); // Get all path elements + updatePath(); } -function samplePoints(svgPathElement, numSamples) { - var pathLength = svgPathElement.getTotalLength(); - var points = []; - for (var i = 0; i < numSamples; i++) { +function samplePoints(svgPathElement, numSamples, samplingMethod, width) { + var svgWidth = width; // Fixed width of the SVG element + var pathLength = svgPathElement.getTotalLength(); + var points = []; + + for (var i = 0; i < numSamples; i++) { + if (samplingMethod === "time") { + // Calculate the x-coordinate for the current sample based on the SVG's width + var x = (svgWidth / (numSamples - 1)) * i; + // Find the point on the path that intersects the vertical line at the calculated x-coordinate + var point = findPointAtX(svgPathElement, x, pathLength); + } + else if (samplingMethod === "path") { // Calculate the distance along the path for the current sample var distance = (pathLength / (numSamples - 1)) * i; - // Get the point at the current distance var point = svgPathElement.getPointAtLength(distance); + } - // Add the point to the array of points - points.push({ x: point.x, y: point.y }); - } - //console.log(points); - return points; + // Add the point to the array of points + points.push({ x: point.x, y: point.y }); + } + return points; +} + +function findClosestPoints(points, clickedPoint) { + // Calculate distances from clickedPoint to each point in the array + let distances = points.map(point => { + let dx = clickedPoint.x - point.x; + let dy = clickedPoint.y - point.y; + return { index: points.indexOf(point), distance: Math.sqrt(dx * dx + dy * dy) }; + }); + // Sort distances and get the indices of the two closest points + let sortedDistances = distances.sort((a, b) => a.distance - b.distance); + let closestPoint1Index = sortedDistances[0].index; + let closestPoint2Index = sortedDistances[1].index; + // Ensure point1Index is always the smaller index + if (closestPoint1Index > closestPoint2Index) { + [closestPoint1Index, closestPoint2Index] = [closestPoint2Index, closestPoint1Index]; + } + return { point1Index: closestPoint1Index, point2Index: closestPoint2Index }; +} + +function findPointAtX(svgPathElement, targetX, pathLength) { + let low = 0; + let high = pathLength; + let bestPoint = svgPathElement.getPointAtLength(0); + + while (low <= high) { + let mid = low + (high - low) / 2; + let point = svgPathElement.getPointAtLength(mid); + + if (Math.abs(point.x - targetX) < 1) { + return point; // The point is close enough to the target + } + + if (point.x < targetX) { + low = mid + 1; + } else { + high = mid - 1; + } + + // Keep track of the closest point found so far + if (Math.abs(point.x - targetX) < Math.abs(bestPoint.x - targetX)) { + bestPoint = point; + } + } + + // Return the closest point found + return bestPoint; } //from melmass