From 7700bf7396e1a61c6edc8d39466d55ac89b2a48d Mon Sep 17 00:00:00 2001 From: Easymode <76738305+Easymode-ai@users.noreply.github.com> Date: Tue, 18 Feb 2025 23:44:55 +0000 Subject: [PATCH] Added BPT --- hy3dgen/shapegen/bpt/README.md | 10 + .../bpt/__pycache__/utils.cpython-312.pyc | Bin 0 -> 4322 bytes hy3dgen/shapegen/bpt/miche/LICENSE | 674 ++++++++++++++++++ hy3dgen/shapegen/bpt/miche/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 221 bytes .../miche/__pycache__/encode.cpython-312.pyc | Bin 0 -> 3885 bytes hy3dgen/shapegen/bpt/miche/encode.py | 74 ++ .../bpt/miche/michelangelo/__init__.py | 1 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 234 bytes .../miche/michelangelo/graphics/__init__.py | 1 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 243 bytes .../graphics/primitives/__init__.py | 4 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 316 bytes .../__pycache__/volume.cpython-312.pyc | Bin 0 -> 1662 bytes .../graphics/primitives/volume.py | 21 + .../bpt/miche/michelangelo/models/__init__.py | 1 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 241 bytes .../michelangelo/models/modules/__init__.py | 3 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 285 bytes .../__pycache__/checkpoint.cpython-312.pyc | Bin 0 -> 3286 bytes .../__pycache__/distributions.cpython-312.pyc | Bin 0 -> 4756 bytes .../__pycache__/embedder.cpython-312.pyc | Bin 0 -> 10551 bytes .../transformer_blocks.cpython-312.pyc | Bin 0 -> 14769 bytes .../michelangelo/models/modules/checkpoint.py | 64 ++ .../models/modules/distributions.py | 83 +++ .../michelangelo/models/modules/embedder.py | 213 ++++++ .../models/modules/transformer_blocks.py | 286 ++++++++ .../michelangelo/models/tsal/__init__.py | 1 + .../tsal/__pycache__/__init__.cpython-312.pyc | Bin 0 -> 246 bytes .../__pycache__/asl_pl_module.cpython-312.pyc | Bin 0 -> 15489 bytes .../clip_asl_module.cpython-312.pyc | Bin 0 -> 4374 bytes .../inference_utils.cpython-312.pyc | Bin 0 -> 3438 bytes .../tsal/__pycache__/loss.cpython-312.pyc | Bin 0 -> 6974 bytes .../__pycache__/sal_perceiver.cpython-312.pyc | Bin 0 -> 13507 bytes .../__pycache__/tsal_base.cpython-312.pyc | Bin 0 -> 6589 bytes .../michelangelo/models/tsal/asl_pl_module.py | 383 ++++++++++ .../models/tsal/clip_asl_module.py | 118 +++ .../models/tsal/inference_utils.py | 76 ++ .../miche/michelangelo/models/tsal/loss.py | 130 ++++ .../michelangelo/models/tsal/sal_perceiver.py | 410 +++++++++++ .../michelangelo/models/tsal/tsal_base.py | 125 ++++ .../bpt/miche/michelangelo/utils/__init__.py | 3 + .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 297 bytes .../utils/__pycache__/misc.cpython-312.pyc | Bin 0 -> 3314 bytes .../bpt/miche/michelangelo/utils/misc.py | 86 +++ hy3dgen/shapegen/bpt/model/__init__.py | 0 .../__pycache__/__init__.cpython-312.pyc | Bin 0 -> 221 bytes .../__pycache__/data_utils.cpython-312.pyc | Bin 0 -> 8680 bytes .../miche_conditioner.cpython-312.pyc | Bin 0 -> 4085 bytes .../model/__pycache__/model.cpython-312.pyc | Bin 0 -> 13320 bytes .../__pycache__/serializaiton.cpython-312.pyc | Bin 0 -> 10107 bytes hy3dgen/shapegen/bpt/model/data_utils.py | 194 +++++ .../shapegen/bpt/model/miche_conditioner.py | 90 +++ hy3dgen/shapegen/bpt/model/model.py | 382 ++++++++++ hy3dgen/shapegen/bpt/model/serializaiton.py | 241 +++++++ hy3dgen/shapegen/bpt/requirements.txt | 30 + hy3dgen/shapegen/bpt/utils.py | 86 +++ 57 files changed, 3790 insertions(+) create mode 100644 hy3dgen/shapegen/bpt/README.md create mode 100644 hy3dgen/shapegen/bpt/__pycache__/utils.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/LICENSE create mode 100644 hy3dgen/shapegen/bpt/miche/__init__.py create mode 100644 hy3dgen/shapegen/bpt/miche/__pycache__/__init__.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/__pycache__/encode.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/encode.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/__init__.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/__pycache__/__init__.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/graphics/__init__.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/graphics/__pycache__/__init__.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/graphics/primitives/__init__.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/graphics/primitives/__pycache__/__init__.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/graphics/primitives/__pycache__/volume.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/graphics/primitives/volume.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/__init__.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/__pycache__/__init__.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__init__.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__pycache__/__init__.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__pycache__/checkpoint.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__pycache__/distributions.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__pycache__/embedder.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__pycache__/transformer_blocks.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/checkpoint.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/distributions.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/embedder.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/transformer_blocks.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__init__.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/__init__.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/asl_pl_module.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/clip_asl_module.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/inference_utils.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/loss.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/sal_perceiver.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/tsal_base.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/asl_pl_module.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/clip_asl_module.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/inference_utils.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/loss.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/sal_perceiver.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/tsal_base.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/utils/__init__.py create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/utils/__pycache__/__init__.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/utils/__pycache__/misc.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/miche/michelangelo/utils/misc.py create mode 100644 hy3dgen/shapegen/bpt/model/__init__.py create mode 100644 hy3dgen/shapegen/bpt/model/__pycache__/__init__.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/model/__pycache__/data_utils.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/model/__pycache__/miche_conditioner.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/model/__pycache__/model.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/model/__pycache__/serializaiton.cpython-312.pyc create mode 100644 hy3dgen/shapegen/bpt/model/data_utils.py create mode 100644 hy3dgen/shapegen/bpt/model/miche_conditioner.py create mode 100644 hy3dgen/shapegen/bpt/model/model.py create mode 100644 hy3dgen/shapegen/bpt/model/serializaiton.py create mode 100644 hy3dgen/shapegen/bpt/requirements.txt create mode 100644 hy3dgen/shapegen/bpt/utils.py diff --git a/hy3dgen/shapegen/bpt/README.md b/hy3dgen/shapegen/bpt/README.md new file mode 100644 index 0000000..589e125 --- /dev/null +++ b/hy3dgen/shapegen/bpt/README.md @@ -0,0 +1,10 @@ +# BPT Installation + +Original repo: https://github.com/whaohan/bpt + + +### Installation +pip install -r requirements.txt + +### Download weights (From main Hunyuan3D2 directory) +huggingface-cli download whaohan/bpt --local-dir ./weights \ No newline at end of file diff --git a/hy3dgen/shapegen/bpt/__pycache__/utils.cpython-312.pyc b/hy3dgen/shapegen/bpt/__pycache__/utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..246dccdfcaafad4abc4ac827979c9653ab5ca311 GIT binary patch literal 4322 zcmd5$M#_CYS#%iE|+w<{+0GG+ZDDL4*KD8$rnRdTzMw;@u=0)_-ny z4M|udtB2G?r8?)KilbhIdFlB?9&%FE^4O{;wNf8ylh9ZTQd6~6#Y+Vj)Smj%elzQJ z0;JbUed$P^{buHy`Too|-}m!xyk0kg_UGUJWPBK?KjMMa=rXY}1jGUokU+^OY<{V* z4aPRv9;L%{)Dd=2$c9cK!F~e?^c|bIGt3CA;J87BodRx2m=X|3ro#4FG)4)w8*G@yGQ$pmhMt*4Qy}dE z3ONkt0I%_?sLgb^gV2r#wfJ;UFz13}aV03l#zaMoMMSb5v2PL00*%KT{m}n_T0mN5 zagNo|_ZtnwOi!Q01cuSyT@PLw>`dCKIA!upB4FB#q+?8gDRcv4la( zab5t12|ghjjKar+c+_Ab6LAT&V)#TtjN#>kENO;QQzWdAQ4RC*bXmGIBN{GK8kdL| z_8AHCUqS&B4ZAGHLN0?=MS09{69;kHGzcowvZQJjo#3?zgN-B=j_78Wgek*?rE!GI z33e00#c>gtS5=NvUEn924RK5VLp^bLbXbh?f;f629vz#0`wVwg0&}jaTq3S${3W@v zFd9j!T09B{2%>6n??0W4O(*%-fq{1vY`3yM%1g1)iRlBvxELEH9>T<>gf^PgBw6iD zOe?jZ!i;~wOvIg*e{t4RKY%>=MH)S1y>reY+qlX$mul;0>6@;Sr!E_~=h^k3WlzTa zr@+ozhZYaz4zC0{Gwgap>%#T<>p8yIuqQuouc52p7YeTnE6_J|J*;nBkmjYe`rXC) z-MOicuiw7D%oksI;2cz;mAhZ#E-*k!;Qqt=HBl>{hOF2mq0!|y@w1GkPA(CtqA8ZA4 zh*frCl>${C87}-5IYEkODpm>JnQ5noHvMr@R>#g3!-rR-7qxp_vpbMru;H|7zHx}4N;+hbP(^ia+ zijcP492boXNg2~s9QS^bm&+^Qcp)C)IPxHs*FdoHI&_A&oGl`vEOQ)~_$pQm-&jAf zM8SZAbeK?a3H{;*dSbIV*r!aB}|&fGZ3m$ZZ`4;@M!*sU^xP zh$*CDF4AvbD>@=K0coDPjB~vlf%rYgNKG^oV0w^22T z3ko+T$(pDrZvsmf{8U`wAl$q)nV#(XtKPQsAfa>JtKN?EV9B#1{dF_?Dh~iQRP6Z- z1`Gcto>CMBhB*4*OQLE9Xu&??fr4*)VGYvZWW{=5EH?}GrR)&3079Q{y3K>E#|JP4 zO;&*CCZ|B@L@LOJ69gLfTlZk_Jk##R6~e68P_2TvssMlvertdCzF_x7vib-+ft2${ z_%Z%vjI_TV3(*F1DV~f8s$q}vQ^pRn^l4BjRhieMc+BvaQ)+~l#SpDvkZRDEF2$#y zj*2n35~!beO~t;&nwXK-f(7EP2YzZCx-@#sqALG~N9K;C2TH!0O!wS)md>7EbQXQB zx$dHO7hIcnN2YD=os5<}K7Y07ZCR^oFIKg$RCT0JuG8!q-CU%bvxC_~t8{w_k86u` zZRUKoZvNfej`@qZkz!qFmEODVsZO6YFMsRB;7h>cjbZS{v%zdR6)wxBHUcKzx(~MC zhA)0IunGF3G3vg6w&w?2`6539yx39CHJ!oXmU8}we;>5&9JdMdmj4mk`~z8XSXeg2 zZsGiI|8cOBIFUcs1fm~!z`4X$PIu|7?jm}#It!ub8XAEIUZsdyQi%Z;6T~U$+%F*v zuxJ2xG(RP&hC{#@iNxZdf-z3WMdHL@Oq69*jR9mVqc2sd2M)s%iN_+mCdS}rIL83g zY27^v7Ogbnt`Q&ORxkio@V^|T26rS>8552g6lbvHV2r$+=4(;QSQJ!Mu_JNxn$3)1 zoMIH*orIry8#+ihp62YK6;FG5;18bqQf=eH+4-|MW_cuYcBS_4oNJcJP^Id+S?_v7 zVBy;QwcJ;iwam4ZhCT??^x*7ii2F5PD-L+J=nJJ!J@nULAdzG8&O&R~vhrEYubO|+ zyz2j2`V3TkkN3mSTxiYRTy!^Qdv6_EJeKb+2KMK#+)WnT%`5Jc=@WoUvhDM}^qF-} zZGo;etBUOrE)U9a=t9(B*)xU%kTrlh;?a&Njy!Te-ceCeJNcv*SHy8h&8l=oqDHQ0d|u_D{7+k|sXB7fzW|0h!F>P# literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/LICENSE b/hy3dgen/shapegen/bpt/miche/LICENSE new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/hy3dgen/shapegen/bpt/miche/__init__.py b/hy3dgen/shapegen/bpt/miche/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hy3dgen/shapegen/bpt/miche/__pycache__/__init__.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97f5d22c5645839db7e94286c054b86834448cd3 GIT binary patch literal 221 zcmX@j%ge<81gbx`q=V?kAOanHW&w&!XQ*V*Wb|9fP{ah}eFmxdRpV?G6O@{pn35Xf zoS&Ok8R{8do|%`DUtSzvkY7}in3MyTh)FIjF3Hc0&&y9qErzMq^(f7&EKSTab_p*^ zEGS4V(#=iG%!|pWG)_rR&5J3{NGwPNF_H>OVsbN+Gg4#X<1_OzOXB183My}L*yQG? bl;)(`6|n*x!wAI1AjU^#Mn=XWW*`dy5-vcB literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/__pycache__/encode.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/__pycache__/encode.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a44f74dde1129afb61a26ab64c2f6faf3ede9669 GIT binary patch literal 3885 zcmb^zTWlN0aqq!5d6Fp0v?xoW{7NaCa^*+bI)W5cvK*(bqSOv-D>8`l-AX))yrcJy zw#8BifusPH29WYo%P4}#Ur}Kd@J~_wH9$Z6BbF+tUL3$dTonEjY6AiC)6O1wq^&r$ z3v>zY&d$uv%+1bY{}Kpz5sbE`Kg|3dps(r19?lx@xQj*T7Lt+7q|hW?*%XszCt0S7 z<0g5F=9qNIoXn@3Y1gC+XpRriq+9NkogXlh9=S_)!Rp06***IMgvdWUh(#DB_t@$h z>)jeS8Nh*LX9I6TvIjTG-ZkG{u8LvM_we~&k}m*!g50#$e3yTL#1`NTuC=~U^Df|P zdkMbXz!zHE^Fn#;au_$u2sod%aKG)5##;AX5WL$ecid&{FS7P~aWDzq-#LDZK4Euo zK6}!GsXQ+Z+FW-bCg@<$95cs6yC2cbc$6_cZ>8~!IHIZ3kEm+EgcMabMAcA41IMR{ zmX1q6Rc54`Y4jwb!A7t1@j75{A)`)9))Y}Dd*qRA#z{2K(j%`QPJq@N-Y`H#g+k5Xk3dSlPAaw*8T8wP}r2=4}1Rwg%RQ>fT`U z?0K{w^&_3R0R`YP1OS6Rx_6VijBYSd&$#K#Xo_m+rc2dGT1@GL-k*ZNt}%!O7r~j7 zDB(x{h5>s{R?<dhVUG@f*OR-O%G1jTqw9RGly-Wpx80TGeE% z+uX4?vg%w`RF98dBw{9mNh~cY>QrLxxIBZ^DeEIBa5ZC0r4=cGr?4u4fWFL}>8D6#S@T*}WAEFaNj{JWzCPwX`osODzYA&TkyZ+xEnb ze1XO2LbT%TE_u7l-k!q9mN&52yU@FI@z#}(udMW!+77N5_ueV?mc6ePM(zu(6`{K% zbg#HpxjX){5G$Oy9|%?gU8O+R^7R$z_AkqU!-cVjLhGWkpe%=04sGm-u98jRSVb5p z2?OhImxZBMU>sPPn(T1djE%Zebg$;wG7rDf%KMl=$pBMsq}GK1B)i(3&H$LXtBHhtAim7%G| zFq^7Toie783MtErWJV{n1DVdNS{5?M^lOqqFpkSO13J8F7Ak!rtIq0#mN#2+RZXC( zre>(=u5NSJOixDB4NMfsX;+PEMNl<2t(nj_0sU6!Bf3GdlA&lS=?7#8{`vw;kSjuX z`9!7TXsP39S@>b$%(fRf{Ker9#U-YA`u*{xpO*OD?~iXg(Y{wc>$uZV=@}~Z41Mm} zxHPrVGgR)mQsTP`KU?bD3U_~c`1avSxW5$cuY`w7;o;9?ZP*qDrj^ff8a3~gCu%M_Na&aJ;1!M(=qi+XN`K|(i&==4eYoD0s@?7#cVOOvGKtGS7+_-h;5NjT3HUmW!!U@8R z<}jhBO-F`6{Wo0+Q5Ow^SlonKZu0oLn2Pe`1l{6 zt*U5okyAkFg};6lrUJU(y7$)L$Ac?}%dLHdbN6{q@yI40y6+N-`Q?k7u7g{l{gqI( z6p9x85B>XB&aI!_^p6!zZ~0pm-&}a}*XIhSANZOJ=g2XTX1e2XTB_smTp$*E%EAj# z*5r*j=sj+NuVod2<(v;VY!BpGVzC`LmJ9bC#W%%tCIty08sgDfP481Pxm@Q2xGiF6 z5hH;kR(Fb6$0JsbMma*um+7kYK$B14R3_(-#gw!-gDs!sLf;i(QKPJNm<)l0oHG_P z@J%E4`dQimBNUF@P>h74M#RW8ZP3Q4$Z+KNXvFeuM4JYHc2|+S7Mak94$Ud@lm7lb zs$9z&w2jIN$+`QER(H0xL8^_7=~SbR!8<>X{Xa);_u$}QCYy#@JweqFcs*5DzbMOb&~DL84yqh|r5L8yqEm`a=sTXAq{_XG ztN9(WBDtD2?g6;gYzhyO31Fj7YW*0@+Z@9%U!$fkQTG=p_yzKRi3a|~2bk8!T`f%D z@gWb>vb_uO{$lR0T>Cc5JKRt_g@eUQmB4{g;6OQWs4%w0dn3C_@FLFSUtb-qY3NG+x0Wc?iGEj zGhXKxnP(2H_geg?8~proV)gubkA)uC^0icadrQ8(WnWie^ef)8;lI4ezx#Iz>{~hU q+0dP#jeW;AyeBsKlNORvzv6>SCvJyVM$7Hdzwy1>F~o;0pZ*8)v4=(g literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/encode.py b/hy3dgen/shapegen/bpt/miche/encode.py new file mode 100644 index 0000000..f755c7b --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/encode.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +import argparse +from omegaconf import OmegaConf +import numpy as np +import torch +from .michelangelo.utils.misc import instantiate_from_config + +def load_surface(fp): + + with np.load(fp) as input_pc: + surface = input_pc['points'] + normal = input_pc['normals'] + + rng = np.random.default_rng() + ind = rng.choice(surface.shape[0], 4096, replace=False) + surface = torch.FloatTensor(surface[ind]) + normal = torch.FloatTensor(normal[ind]) + + surface = torch.cat([surface, normal], dim=-1).unsqueeze(0).cuda() + + return surface + +def reconstruction(args, model, bounds=(-1.25, -1.25, -1.25, 1.25, 1.25, 1.25), octree_depth=7, num_chunks=10000): + + surface = load_surface(args.pointcloud_path) + # old_surface = surface.clone() + + # surface[0,:,0]*=-1 + # surface[0,:,1]*=-1 + surface[0,:,2]*=-1 + + # encoding + shape_embed, shape_latents = model.model.encode_shape_embed(surface, return_latents=True) + shape_zq, posterior = model.model.shape_model.encode_kl_embed(shape_latents) + + # decoding + latents = model.model.shape_model.decode(shape_zq) + # geometric_func = partial(model.model.shape_model.query_geometry, latents=latents) + + return 0 + +def load_model(ckpt_path="shapevae-256.ckpt", config_path="shapevae-256.yaml"): + model_config = OmegaConf.load(config_path) + print(model_config) + if hasattr(model_config, "model"): + model_config = model_config.model + + model = instantiate_from_config(model_config, ckpt_path=ckpt_path) + model = model.eval() + + return model +if __name__ == "__main__": + ''' + 1. Reconstruct point cloud + 2. Image-conditioned generation + 3. Text-conditioned generation + ''' + parser = argparse.ArgumentParser() + parser.add_argument("--config_path", type=str, required=True) + parser.add_argument("--ckpt_path", type=str, required=True) + parser.add_argument("--pointcloud_path", type=str, default='./example_data/surface.npz', + help='Path to the input point cloud') + parser.add_argument("--image_path", type=str, help='Path to the input image') + parser.add_argument("--text", type=str, + help='Input text within a format: A 3D model of motorcar; Porsche 911.') + parser.add_argument("--output_dir", type=str, default='./output') + parser.add_argument("-s", "--seed", type=int, default=0) + args = parser.parse_args() + + print(f'-----------------------------------------------------------------------------') + print(f'>>> Output directory: {args.output_dir}') + print(f'-----------------------------------------------------------------------------') + + reconstruction(args, load_model(args)) diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/__init__.py b/hy3dgen/shapegen/bpt/miche/michelangelo/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/__pycache__/__init__.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbe81bb660dc6ca3fe289cbea2b872350bfac930 GIT binary patch literal 234 zcmX@j%ge<81W(UzNtXc9k3k%C@RvCAAo)TGyjAud*~T&)6lr zD6ya*wMaKNF*7eFqtZAfJvA?;I3uwj6~ss?D2d68UySG4b)4d6^~g l@p=W7w>WHa^HWN5QtgUZfi7bN;$jfvBQql-V-Yiu1ptJ^Mu`9b literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/graphics/__init__.py b/hy3dgen/shapegen/bpt/miche/michelangelo/graphics/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/graphics/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/graphics/__pycache__/__init__.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/graphics/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6568861580657d9ef578c32c05ad90959a623f4 GIT binary patch literal 243 zcmX@j%ge<81W(UzNtXc9k3k%C@RGAI_=5bRlEkDOxI|2HX>mz@ZhT&TN@_7owXR2LUS(-wp0P`K zQDQ+sYLRYkVrE`UMx}8|dTL%waYkZ6Du|I(P!f}ynVgXdA#)P*(o=KtW72_|GBT5k tW8&j8^D;}~Y zEbN4Ug_Uz?oMPtt=8tdYWjZ}zWbd!f&648VNcL9zf!!g22d0_khFN~cIqQqM{NR+X z`Eb;LgQzk{4K9IfB5D~tx}2`+VyHIWbQWSs-A`;c?)-jSoQi9(N`qK|Ea|H@~=zDg=l#tI&zIpKB5$QmX=tU=^~kxNhOs z8|4}>UNkh+;yQ^C(PG98ZI%)pW+{(DKTOW7*PVg$I3cy;nQ8peXURGLsu(|s711AX Ce`3%8 literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/graphics/primitives/__pycache__/volume.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/graphics/primitives/__pycache__/volume.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25d8c4b11480e972968b50d492e4f925d6b3ce20 GIT binary patch literal 1662 zcmcIk&ubi29G~AaJNv`@P-&x-Y^9Jfq!c5HZ53@q8XAgCD5Yg^nVt7`cjEl&o7r|} znS~WRG^cFvkOeO*dPqsY96S{C4^R}Er8$&G4h8X$+e$on(f6ji=~juo)GzFNpZERs zdEf8*&i@rr0MXy$HEO&s68qRoTh#)Kg=!X@oST2Ay%PpJ3j53KP zH)am%9KBu4#!Q{m(hx$HCZ@7XEdP|4+w9vL;*jTX=6*;_>OD?y%s$>j%9uzj4srml zPBdX(PZ*U$cnG%m&3^WV^h`#s&zS7bfl+=YqtIte_UFK;3^69Nb=Zgm#8NEBO3ZJG zgRbc06&tCB+DH#YW}c3sr#Ka>ak|6_1w=a-@^2Fyqsyg~jkF=Ee zMmEmG*;pv?ON^GfjOb5Dbf}O>7s`Sq8@grk zwWQptQ!t@z2bis4Ho|NjbF~0-5&6{v%-5niPSM1Lz^cO_M}jAxl0UzkJF723#{f{D z^PEz2b-r+$e1qQYpx}EbG>Z12K{u;G=s5+~127nP&s?m!QPpr~KfHzv--l?%F)UZF zM6;j_T|KFSf}$Vlj%8Nh5!*5x(1op~^FsyV->sH^Ff zbn9aK%G#yQv5ov!uWXsy+NOK&X7}{_zi{`}pVVDtsrkYG;5h!$`nk1p?VFu^=gRi6 z-SM-ZEi@PJjNMt>Q!}emD^sltZLWQxb-q*FRkh~aLp8fPy)xY@w(}3vSDSNtqc66u ze{rId?~cCq&C6ljuamGh1Vsy2KNO_3v=^Wa^w z^mvwK4i?`4=XZ~iXl*pnM$-%i|}=rII7)TR^66Z~lAOiv_xPhwu5`GYzp>Hh}b C@_HQr literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/graphics/primitives/volume.py b/hy3dgen/shapegen/bpt/miche/michelangelo/graphics/primitives/volume.py new file mode 100644 index 0000000..9c98418 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/graphics/primitives/volume.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- + +import numpy as np + +# produce dense points +def generate_dense_grid_points(bbox_min: np.ndarray, + bbox_max: np.ndarray, + octree_depth: int, + indexing: str = "ij"): + length = bbox_max - bbox_min + num_cells = np.exp2(octree_depth) + x = np.linspace(bbox_min[0], bbox_max[0], int(num_cells) + 1, dtype=np.float32) + y = np.linspace(bbox_min[1], bbox_max[1], int(num_cells) + 1, dtype=np.float32) + z = np.linspace(bbox_min[2], bbox_max[2], int(num_cells) + 1, dtype=np.float32) + [xs, ys, zs] = np.meshgrid(x, y, z, indexing=indexing) + xyz = np.stack((xs, ys, zs), axis=-1) + xyz = xyz.reshape(-1, 3) + grid_size = [int(num_cells) + 1, int(num_cells) + 1, int(num_cells) + 1] + + return xyz, grid_size, length + diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/__init__.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/__pycache__/__init__.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/models/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db553e82eb6b813bfd542c2263a129018e3d25a7 GIT binary patch literal 241 zcmX@j%ge<81W(UzNtXc9k3k%C@RGAI_=5bRlEkDOxI|2HX>mz@ZhT&TN@_7owXR2LUS(-wp0P`K zQDQ+sYLRYkVrE`UMx}8|dTL%waYkZ6Du|I(P!f}ynVgXdA#)P*(o=KtV{(CJu{)2*kx8#z$sGM#ds$APWGx=1Fh> literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__init__.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__init__.py new file mode 100644 index 0000000..0729b49 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- + +from .checkpoint import checkpoint diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__pycache__/__init__.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8003ebc1ea0757dc85b6183e6eef957cffcecabe GIT binary patch literal 285 zcmX@j%ge<81W(UzNw)#gk3k$5V1hC}3ji6@8B!Qh7;_kM8KM{%87i4HnO`yjg;z4( z;!4g)P0lXJ&&(_FD`EzTF?1l<^(@8qd zB$+&$Wb<5-qfijZyv$%)W-*6Rj`z$od(5_$_khX@-J~-2O`5U~Gf%c7`>|gRVE;-; z4&LCB0UBLEa_9zB~8lfAXcP|9VzKplrb*qMU3S%*IW|$ zfG5iy{SL@BG8rvtvf8jr$w)yP9F0saM<3I#vpM$bG81JellnRJ3*Y-_9vOa5!=&~o zmp>+>J=t3@wc#@$2;7!)0>c}*y@`y7Cuv&C(5BB@4g5qVMb?eea~g3t?ze-DE!&aw zDo$T17P6|5RWo*|IkY*WRD`qbNHjEN;V2DqsGL+RD(g0*U^VX7$cD54A;ejY_^FXt z)rfC4D`^>BCptrJd-RjUcT(TOc}d2pL?OSje(^PNEvw3fHC-$gG=touH<3!0bfb_L z)q;$5PdNH&NnI~V>e=Z<4IIJRXkN;ysnzwfat5m@eN`%A!Ymh!R6Ywka;YMz8LSji zd9bDEq$??4w7KK4;<^?keiK3**3&@VMGpp0fd6y2`!9U|VQg~i#cFKCij7oblU8i< zu3U*tf4p!nHeKPTKZ|y5E&OQXr={}32OC!OxlQ(dbhsLQ#)>|(%YKx;dwTCmB|7!- z^u6d*B{223Xl!d?bE6XIKZ0ctS*y{!C_35&(Kf;;32~RC0cdQiILI}kpkV=$!$*x+ zl9?N5uRi7VN&geAwKuJJ+*hozEvyCCxwuawU)pTCBujD14$JsjHjPDNy@=Cw2s`!& zw#fM&{Sydai!zbuOb}u;G)+cECf$oXM1WQUamTexT1H>dYFPMY^mr#Ri6}&Hl56z?Ktx<7mF7C5=t)z-1 zAvq{wm8@>q5hpW=23GZgrrW$btfwi?C}`5%N1d2bM7I7+JsM|LM0$Pxsir%(eIsN!4gz_3%i=Xr(p_u> zq>@n}h_P8hwE@tF^aK%tY`zW^GNIelb-N8JhO>UUP|RXoJLMR6ap)-NUF9*yLfMQ@u2^bHM{tm_MPwExpe!|2g#~%&Jxbu zH9iqus?080{g+&rMLMdH0V^`_No1%NK$9<2CuZ(V%p3@_*2K*2)g9?h_I7rc`k7Lh znAx1G2(!nr_jW}%52y>Hw&5>$bh3Y2y`@%9#H|zYL%#Qrk5+lX;)OC(9U8ab&yOGS z-G@CwEr?Ez93k#xXfs^nQRJx#FMxIh3LktH?c2FxMdOt~{L9aWk+4L4`FRKpT%aBX zVVO!~=#frN?&j%47rWmcO$;*oL*0p9-~J2j!2h zw{s&+?FTFj-GvF*;?!)McfIyO354gBMlzE|z@w2oq>-?)eQ!9K-)5H!1;u@Te4EGv z-fPRDOA#-{AROYWJS*UrNP+BA9m;VPlLEo_e literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__pycache__/distributions.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__pycache__/distributions.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1699afe1a89e8bf28c7566e812bacee1aef308af GIT binary patch literal 4756 zcmb^#TWk~A_0H2yY{z+eoseX`F(EiWAPJ8RVFN5#Lbfzo%0??fR@UGd5-0Z9of&Vi zaipl?!$_U(5s*0~pb>&cpvcM-0wvHBnyIA>2nnqDcs)$JOzMy)uQ6ZL}; zS!m073yj+0w9lqC_6NDKh{%NoxR@*lIk6w?lY*CG3bwX9%1K;UPy|U1&?PqDO~Q-a ztR=C{Nh*o_1#*&r9$|D9V?$ArP>i*rd%%Z3sV+(I1hyD>3L0QcTLp|2xW!h*p|Jp% zs4%c%3p`Ykx=$O2FbW#pkXa(DL{eo@f`KKG*`D(ZUpU}WyOJdI$_(BT$+gBBT4w?uwJaooUw%WB}o){uV%pK^(YqSbK} zM?UM2gpc?&Ue#-iEfXrm2g75!q`s$lmx6Z`zloHWr0)tFio<+s9GMBgQBc( zVn9&o5ICkvhXqbl*-&Kc3r+W# z;Z+K=lqUo!BKt$ZNx?^}v@C=!Yq}&CD3q{Y$=D4&@crv&r;d*PNCz zeoPQY<#8@5VB}I%84U*m$cyXm+_1x1l&su+&vA5tEx0J80zH;rDo1C3Y?_AomxaV$ch0nTrL8L9$ z)SjZBSsPx|xaWFid*;ul4=(jC_GX$ovNii*g4G~=di&zS#cYiakL7I6=eDLl*qUCn zv^{U>%CvN?9DLOCuqWH{!J7R8P;sfC61WFP9{t!Z7ex4v}Nrlxc6G<-dLdt_nc zKJ%b#xolc>bdOhZ2fLSdZ%~As+>7x6;^p20TMXIU zwSh=?*E%Ary4J18y+arGqPBi+aCUH>za3l%-fex*zTBQ^KDfeVYu{fz^rYu;Po}m% z=W3W6pB+!vE!8j9XI$+dwr)>u-vQiq=DLqO>V1e+)g8~BJpG%-Uo~QUhEidR<$k|@hXUO0_y0J?g>;I9 zIkXzBowbDPo79AXc988D>%^cNz%tU5YsVeS9`^WN^(lnDKtRiGcduPLiqGwAz{G%|@1SZB`OYy)G3CeB*_MgH+q>pKF27 zqiH$`^%Y(l)MdIKeeQuP2cr~@C&?&GHUdT_ZZs#EX_`PHL8Bx+WBC{!xc5LA z{(b_N7?4l)qnm7i976%}^AAA}TcU@3J3SJaWJPaDh!>KIt_i~=Yb271!uzR&N*mN! z@S(WCvo)>J18eUNWC7Zm*6paKVg5*_%Dc%>ZuYNL-qgu0 z3RSr030=rD$L{OIec?f9Ikeh&FY>(OM5g1!Q&+a*OvZX9SLNNnY#F+CW+3$$7-VCdI>EUa@O7qUJuS&(rp>HFI&;RVWAv*z@RGEJTvP^zn5`(@5Gkk zi}97V6?eAnXr|)mMj7pZ^2^%Ub;L64KfkkqCK)^AFHc%d*=b*yeotq_SU4J&_5uO+ zKOPN=V|s?bc|m1iXN7>Ghd$mw35R_#C#cqAdY0*vP6Gi)ri>3_ogxTglSPE%w@ XsCkRsPt>OmZy{(l;nxSjYBK%}F_xJX literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__pycache__/embedder.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/__pycache__/embedder.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c505d55b49a2bb0e886e8fac1c33cde4f84c31a GIT binary patch literal 10551 zcmcIKYiv|kdiT!zWsjfO#x`7=m&X_fLz42yk`U|=L+}Q0LNSoZ_1tUwI`hi8cQ7*< zN8X4uHiD7<$9lcm{jmQ58jjN2n2yBoU zVCk=Ez(jw|A0n+vi zNGFA~1Ek{_kS@V_nHg{k9>H~)9q>3A{3nzNZs?W8Szq}hT)fX`RUMKbh80PPtM;Kt zj7TC;9V5IvGE5{v^~%u^k%;~go`fP{DWFy;M2ZIau;|YL2MRC5FlD*U0K@|#K|LkROHTu{e!#`80p|TJ3BkL zfM1Y87dkje;R2EHMUg17i-RKafe~&<rhzmxB^Go8sJ8&1_;$7-nKey+dwxV76a(oHnk`cW&e&HZ}x$gr)GA79y>ioM) zqRjC?mLLcSPk^Fy8v-{dDq|w-;9OVd?hXzx7rzDUoMJsphUEe?7hu|e%Y6sv{$X%X zBq#+sy1pK-a(@#faBLw4^)Q(2NsU3oilKHJGY112{OAb zd-k{b>e9PWNwpCX%t^Q}0Ol-Tu#Jwz;~>j0 zf6QKGl>F4BJ;{6q4$bY{2+Jf*#3!(d?&L6|Moc-VB&%3-6r&EkRrtMQx^2>W378$X zg?Ab7QOBwPi!H${ut`?1Bw!Ao_$~0~)*rVet)x0>GwjDxj2n9ia25cP8TCFnCs>m< z!FI>4yIw#6OOo;D-zv#MBLIBNBF;Wuf%4V1+Kux_ozFmZ>^nW*Xq%OQoqk>G<; zf=7SimpOn}Zn3IWP~uTBH?s2bBSfRR4N~`-k5ye!k;sw^u7=X*^Qcxi1_P?Y54KBE z{C?F+1yi;0gbxpk;IjoIys~!>Fpm_b`dKvrYj}w`j7W%oFg7$K5`vx?!P8J3!y=Ul zS+z$r>F_yJi!26*=sdw!B1l*58FXC927J)z_XmQ!Ec^X35)8!)%EW)o9650Qlo;X# z@%)iUXejrL5;hNTUSS3}dG zRjawR{9DG{SU!1t)x|V&UvysU%vMyx{~71(;cI1?itPqyr?s&>b^P0M#Qvq#aEU9LKWF?I{Z^oo#4MS^l7Wxvuf*`1JVf z*@e32QkKs=k71WgASY(nrXo(oJPe(yz>1@ExVVzNFm8M*H!ZO5wZW;H1XGw5*l`oG zeMvZT0#rlNlz`{3HEvFt!K1eH5kyK*I38ikXDaZQ&q7*p5;;hY+x?zKVX#&M0n2>`; zb8#9p^F>#oKsS7`H3ex^N|g=+&QdBARH9`g^Cc+7Y4*=-f5^azFry_@CwWoPBP^y= zL<#3;8m(Nj8R+JBYZ|Rye|89VamT7F^V7^T*80D!WGY@M$pTl|csm>m_gaLE@k9Xc zJrWRQIcFx3bBg^*9JvHuq!yAD!ZGUPVGLGq0!~pgX>fG#*}NNY0OPxSPm9Qm@tuwk zf}tnOKeD&)0(Gbmr(VWrG=Y=o0(7nGV!+A*x4Yo|6e1ETV7E-2eSMGq8wwZ?GrW_} zrKxZigqDgl9E-$cO*LCCO5zyU3XDiKQQQdUteOM7qM9LagjPiMnKjAu;gprWv5c$@ zyQV^k@js<~qHe9q%uSpDX!$FsCYY5CjHlwtp~*uthwrHQ$7fq+-@0-3`q{6}&29TD-;aEMzBAo&X#Uj)bw|?`M<3>3w%zbu_kFeV zX6$bKR{Zt{>6U}@RS)XA(-qyz)itTtsRr;_AED6qKf?Fy?+1GdhBVBFL@=Z-zhAQ% z(DwNK@5lIHZpQBS3y}Z}n;}wBEf{T3Q$n>Cp_H|0jxl)=@Tz4w!x#{QLBIbNOSZtM zjMn`XP1~VIL9jyGDqn)?HUo>57 zT4A7hbKBjHTOI$2O-dEIl>qD_UvwMsxE+R`%-hhMjOnst0CF~vld;m=iVZD+y5S87 z4a1_ahGPce7y(CMn0qBaz!`{y-y7swWiBZ40yicpBiO=6F|!eqBa#5Q7f8dP zEOG%nFui~_)m+-Ip|QrLwLa<63Ty%nT`-}IlTF8%k3cKJ7`!9eucX$p7@X7+POp-8 z0!!)Pq(yfD^+E}(ja@14 zQeDH<+?l5{Ql*vk4UK6#v)V8v?J+O07uPvJ0MJp;$4@A-V z^SiOOV10_Mt%7n%%V>#!f?CxQ!Gyu8KE>u1Th*KgWlMZiFi$1ynjU_?hR%IH2G{b0 zfoEB^OHX@RC|E196J9yTs2$KGyP)zp$?Na~j2b1Ia1gy@(uEZ&KC%a^y;waDmG10m zCIurIS#=I+*;Y}I(KSMb)YuVqWn$Ah?3lAKV*n$|P*Dq0*~3m%PVJv=nzh{UT=(4U z&v1J(m3!t+W-4F&o2tL9{p;FH<)Ouj?o0)!{WJ6YPltazd@pd1%m<;H?tlA!#hH|K zsixs-?{x3X`?G5;#wN5FHQgy^)?1%;)N2;QPWHp{qkImzP-^H06t60r<-oow*-xg(0KzrD~T2wQCkk zpmr@(?OH+Yn$=DHxioxfau%siJOwlSJ^mq7Yt*NCt@F2 zmPR$XOa$W1}=}cW)Xicz-L-(j~zR2R`eU?9^77t>{C%h6Rf3Brs@1!AdS1QK5W7dUp-1H1gxHc%6LL%@P z_^6iz2|BQf32vxd0e?844`X1%Orv&q;Wb6D7kJXJBY0|U5gf>GdJ=BjVt_6Y z^%lIXajOBcl%>x5W#cvjWQn`iBqRcdO@i}|D^Epx(hjQCZSdR(UOSRXtWc#tssNvk zpruUMLmk4dLm`2tKs3AQ(ClL)LN`IB+Tr>M<1y7HLS`-&Dp*ZwL?P*B$JB371dotP z4)yHJ-~p@FNJt#!RWs!0R6ET1F@&QH2;)>2g%-fGmW_!xO}^9`nO(Rxv9HA&q3fXsuIDF?EO{!gyfOL4 zOv5aHZPT6D-SJ!F4?N$S=*e1~sk1Xxlka3KjnMOyPu0$Bo8FZ1Y@X;@c9mV(Ik|Ic z@YCl2K@MqrcFSyR!L@7R$iwpLE7GJiWBxQeadgSpd04sOYQuEH%+c91>Be1iT@NZ> zm^ilVu1#CCpry#9p?6MoWC!py=*S$0kDCpruA5Z=)l0#2mCDY5xywnUn=#-i#lOJ4 z25Bn6vml*0olG-mW|SKp`)EFgqwkCQ!6mr#PJy2xO$}0L{n#W}hqSCzX)F~5oO(-N zHwicGsOxAZ&nQVHZr2U(kF39@B&|A5103dv*OUSr$Qq{Ib7unS`J6r^FBmYC*asy0 z3tAGxs5J%B`8lcxj*m?mW%Rv0u_i{WPX2cRp$omzBLuGF=|=#1^r^tTJS5Gdg>(?) zpAhsw6hk&npO+zL#C7fJ<}}wFb4GoT_ZK9{SarbVGki_}x0>{`08R{waM`aEQagyR zP~gTu7P$}~hg(&+1my$TM+imBLV%}TmqMfP-2hxsa-bmKyM!X(5P!)Jt4k5#0Dgg> zkVsIw(GI}{LMRr>-?YYY+lZu~PI8E}YbBZ&JJ-iu!p{{h7+$s1rxH0j8KPb@q@xs` zK>kS#3#1y(4K6oqf_M>#)JL@_)tgj<`pUFO*eqU(s+JJ1jHqU~3nK+(YJyvj_W%b) zqy-_*V;3V4)s+wlXu6>Ep6Iiy&ipB9QM0Q+5!HebH0i^KEIeFK=QJz73SyKh#^ar8ncO$gjmoljvUkZ@cBOu@eyV-J*$jd6*H#Q% z=DNPw`uE~bLq85Z;QBL;{`8r18OOQDW>cB%mo~=_2B(~h6DR;{SL>YR4M^}_yZ`)B(< zfB8<&-QHWhbHeTKr#HSd-}<1UXQDT2cco5#G^Pha@jKfV+Ye;g4}7!#CnxWj?p5A1 z&A*j7czmJ#jr67y>C2Z=5zRM;lm&fqMaT9R~@~%Pq1iajz%tObzem3 z4MjEWAqqW&M2PBon-9iBtzG%{;Wv7_{XHjM?dkUSpMA6Ew9lRM4g7e|SlA+p-7pbF z)ulNc7a^mhI)fto7P3iNR}Sz&QS}b;vgil@B@&FGK2j?|8u!3QVDd!@&U>eR;$8AqUio11gHOk2q_lTO+OcEVSv?iM@8p*1w$C-pOWEDe&v&ny+4^H_ z%CqckoH@VP+?8qWN_)G~jxGdF+;_GtmA9lFEznEXwcU5NY28&D<87EZHuF+?$B74h z{`84=(+&JW`C!^HsHFo~avrt@iTC{JlYKDo2)mEa*lH>dJVSoUG1j^k|XO_TLawqsjKoY<)qB@9hc7G;W*XDB&x zsEq>{4Yf&XX|shSB!K8{QA==usKo;HLkqZlSOn-pB}j{?Q!UnLHwE&hjg(6l1={aF za}_lblT9CbApY~8nRCvZIp;tBRZ3MFe; z$skHrS14J_O4gucZH1B{Rx*T=;R+?gy?iwCISpBJl$V8PyYlH=Hl6|RO|+*HSGscP zY(8N##+P$|r!ijm1Z0TI>yJ9jrPR9}nk49@IIh5TxJ+4*b<$`h2^TPhQrVh`MB3HY+>Q*>qlZXLDj_JQI^$*HY=r?RnX8 zDIJ$&Pa>C<^6_jw7PZNIMs}frWU5D^@7T2I4}W>|Q0uu=XFQo|J(}xm>pg!eb}gMv z=B`PxuAG>UU&`nYS`*z;KGzw`=8`E%E4}Arceb}Xp51@!B{AOBl@j-K#?#r>_TK%; z%c*Ru)E@6j5p$_4-`bhRd?_>&&t6Vta;=?cD{2F|xFYTC z>Xm);eX&eBn~IBKHTvzxPdW^8gPU`>-ahfxiMOA9>)Cs!^E*n#Z>xJt>Z~ zL5nTPCv6|{Njs#~V&0Pu+-KTGYe=~!ohcXefGg?7Pe{3wo|J$!EVNY@xi&uOyIIxZ zN&2BRyhZMHzQy+f7Y)d@XS*}`bbBhEJe<#`vLx?K}5kHCE}TsEL^?P6C+s=Bj{5C>W(G)2seIgp)}*}U-0Io z((fs7?_p`&RZ4RZczc11=?kdfKu%R;STIiHXxXup*M+=;rm8HVrX$H;N81H=fiKuT zvfWjeAljfFu~fMYR@Z)4eJkqf(~=Kqtxy^C&0Y{ba+uztPgH)5UXtsm(s<)qa2bnm zNm&xZvScoc#4k3)z{7VV-4IBGx75I^@Xnr!Op*BH0ty1d1)Bs8@DKsZ5)x7pw!u zI~wV2>$FbEo>(jaA(UbC+U_5peE!1ZtBJ`gT{HDpal2|x2n`$?dS>vMl6OYfrQ}Wzo-Q@~ zTG%bpl3!v!NcXd4Q2;YUnqFHp$ZM&<-&ODS@p)q*a2@(G&`Ltkd8~El4^3G6c(kdD zy1AUuW`TbJ!qYs?y%=?f+fYWNhQ*ykwu3;9et?V2zpmhoG$OkQuWm3?S6>N{tP{G7jTAV2hz!JmxrrTo~riSV@l@gh$}PWPW4=HF>X zCRkk**oZ+uA(`2DXDSwxy|I|0!@>Jwv8&zjjQYfx&xwh4L%?J^tTm~!T%1f)E|+0a zBR)*u5>Uk}k;-IZv2k8xJ*8p(u_9n3KO&J@{G<+$Z*Fq)b=;bDUqr&rJ@YQErf$eT z=%05}PT;B|x8CS~W8OnKFIOF!_YvnOENz-);?YQ-SYB1bwFbWVH0Y*Yd!uf0QFeBlGk?@K;@VX{33~CH+ znGVr)J?U2xQ6|*X&oW)_TBhqEpe9{!L(UL!vIDx1GdrN_Z`BUyu2U~J&NoNh;%4e3 z>4p`1ib5pJD?V2QfmV#7am@N=gQcp|Rn9(Cl88X%%n1Qq;nb}oeX&&Gm~-!WS5XY@ zu!B$xJV{`AYQ;$P+4}5#uya`Ey2UigUB^9GK+ryC(w5gp&&ugs{b|9eDGcp;9rD=c zLeD(L`!o)=5OZhMqoi&}q0BUi<}9r@UvMVvu%@a!u&;IZ34QPvtSqQlpVwR}u7g%> zS%MEB6g*mMecl43+tdS+@3NG1qK=FC4}x;lYx@em4l=Z5sn2g}(IOy2EwXcE>8_e* zU#rey$NI8D$QJzC*!THN@3HE+tKc)fwPT}2TbLotg*vv%Xhhx3;szA`ob=%5|APcy z(4I_p{vmiN-=5fgNor5*dFoPoUcwDf?FOEWSHp=Q7n~^u+0hw?i|CN9icq~h>C`oG zKbl05u1q@5ybhUflGib}F&Rtd^5jb-yA%1C?l#D-R5~klcQRK-c1yXod}sW1@c^~s zVE(9EOtDu+1B!jJlUriDBpKd|>_&6&TH&oK9x8uT=C84y`xncDsR?jm2@SByXK1#cbt>7lvW(5JPJOw~RzQ8RIP z;_7tm!QxREm+DZ_yBOr^8i!g3TW6~`m-dyqZyy|sPi!AgFW7B$RmCQlvAT8ixY#ro zX&8EC@Riczcb>fccRk#2fWPB&WL`3>~-EFtuAG)D!~IIbGfh?`Jr z++ixtf*%@ZtQNy_VUt1fC_6}diS$LX=S;jeB{l=FkR6$9Y@cjDd*(UW-kIrQ`A5-0 znOjqNP8C;&Y_OsC%)v6H2&6X zo{g+WcCp42@xm?gEQUG6US|f+lp!9jqctXAt`~^}dWeImxu`xd|f-TL~UdLmEQPhg=KN&S}R~I?cT|Hzg zZ{-C}{1M6_(s~sa@iiish>$uG_kb)3vV@oxWvw_HYJpCwO6rk9t-yd|$TR2}-ZmN? ziIxt3uzR|C=ahdZg;?}R7!-!XqxB>8rF|c)o37q5<=hZ zBvSnF&V%(Bnmr?X#x{?I$MPS)IsWGK!#}>i?b9RYr;ePT4qliPE->MWR#h-#%z`mn zMve)yWwPtVR94D~tnaHBwu@AO$XWcP5(q_@es0rt5QLd));o{!rZ|&jjX;xSyU9SfQprtjK2_Q4&CjCforphUx zrkg-bSW$qPDUWJN1F)oC*h_4QFnj2SJq*fgbX?-7n3E*kDmFKb+htcamW=1)vX>~3 zXh53fRxyLui?pmn>PSVaNNRu-%EG^6iF`M!$Use98Nnhh343n zib)s=kGK^?wgX<39f{bL9tOV24d&QmsURy59gV>FGV@{Dr^$B+-RkY{9S* zVPn5d(AYs$i1gqm-2_o=>*npV+kOCimdtAda90DxrJP}0Dd**C>W4#vfq5V0{9I)H z@C$=y=L3{eEG%(~fhA6{uf!?ll{m$^g8S+Tr-tiNw-Wg2NmFbKG<9Y66LBYEPulX+ zmA77RRqLt7okGe*nnuNUJuKq%!cH2SfIg8Nqo(GlO|gGWan#`!16~xXM=DID2YMJt zo-oY|{TLoyX7Iib&PzLdx~I*|9xX(cdR6av6{MH-pXpp{q*woz)sWt;D=S@H{e6fY zN&7)a{Xx$dJ4}}9&va7*d};8sr0bf0f<0Lk@CDr4XD`@DjLH*(NS%GIJ~v`&OZVIa z2n*j65EeA8rd=5z>{$U2cCG|bn*m{bm6fI=jKK11Om%G);&@v@SOO+vl>5AWKD2AG z8Cwxb<_q3}PX|x^1vs3h_gnZ5fRg!whxt!l<*L^X6atn|a+Rq?D=0bey`bbOZPxk% zruSI&+*Jq|-)adZV{T1QG8+wol5-GGks?y@DI!l3IZWgTk)t5dpkk9=0598s-ty|! z^K(>!tz_mWJx|$lL|!Cvp2!6vULr4nNI?(-#$cSuu5DRmJ*%eeuc?(?aNS|r>edWh z9=zP|1Il_}VDs#{4WlPVPU27pK;5Q+^*0SXI?xTbremaIY|FHN56od8 zc?MPhV+=O3tzpMjKzdk=h(N#FX}`VGZreiz=QBECBA3&d9keM4Rq z^sTc5eN~9JVWR2&_L=ZW7VUjUAXw}{2NSv&Aw$E%1om&i{PAco90lDcMMG*bLOTsc zFrme|U}>dkYs1pP(L*`SC~K`~XsFYK7PASaxfS0e(hXt}$!%S2N>!8hTdH~j#1b+d zbA0R>_e^ZN8=S6wV#@yn1#sU2JYEi&(&$@m{~e$Q%rI;ruARur8-acFn8@GbCw&aE zJf_>ELZ>Txt_@;u}<~{755#X_`5#h0{65=sdiSZb#1bK{A zqCCbbVIE_T@r0r>&~%PZXi7HCN?HA2j|=-NZC!7<}v zTh;Wy+#qZe+61=M=)CF2=}*^^z|XB{39>axsK?Y0DIYz`b0DJ!CQmI__=wb2JGp6W z_E|-*Favg+O~I;CRWiK|$sGMvYC)}UU)F+Q4)j%M<>=p4UP9hty9okyw zmzK5g0t%+GQ2zGj&*>wXO2@&f&BQa&XG2dtRWLxne{Bf`{&So-e&)RFz!~OlMvTS8y}qx?w=I)D?3NayyDf>AQOX?l^-+cL_VW_90eieXY)Dt%+_yKPC-o7KRj>8 zjR$Ktl_GZ*n3W3EYOQUwXiO)xbL+Qxp=lxVsc>z$2b>3J31ch=m^-+5!%8`VW~ShEBL&&Wck?#-;2h9 z{wA>n9XFgT*_ENqb9L`<)jfJ$^O4deV4Vr(9GEhE%fC$X5o)E2HQU0Wx-8LIIO80n zDcrWu7qjU6N;t6Lqq?Vqws+S$P^ZtQ zwS!*hu4^xz@a*X|E^2Fznw`>BhluUKE>T`a@t>k!%6SFvS@w?>XVMam;9_bnr`T4f z>=9Fv^8Xg(b@Ie zDZ6#wS0QgrE%4uloxi7A3-%!GNW53|Utidel}JnfBj0!!$48c(Umz2~wd6 zrC~loTpewiuOV(N?U}Q;HW}WY$;G3-MkC^JT$FQ<0y~<%H>)1z32E8AZQa>~@=s>6 z*Wkrds42T}$T6M0EYc{-ht>ac+N=Ga(_Z|a(_Z#}P6}23&uOpne@=9v|8v@FI`c(h z#asy4r7TW%!kitgV{4P`moVgodD)70$i{v*)g+z6q(v5Y4(#GH{02T$s=ZTx8*PAHJi_nbYf4-0 nL~lo_P7SIA`NxV|ZbkcX1MK3qoqWx}mZ9ii^h*vb`;h+y5_QYG literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/checkpoint.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/checkpoint.py new file mode 100644 index 0000000..55775b0 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/checkpoint.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +import torch +from typing import Callable, Iterable, Sequence, Union + + +def checkpoint( + func: Callable[..., Union[torch.Tensor, Sequence[torch.Tensor]]], + inputs: Sequence[torch.Tensor], + params: Iterable[torch.Tensor], + flag: bool, + use_deepspeed: bool = False +): + # Evaluate a function without caching intermediate activations, allowing for + # reduced memory at the expense of extra compute in the backward pass. + # :param func: the function to evaluate. + # :param inputs: the argument sequence to pass to `func`. + # :param params: a sequence of parameters `func` depends on but does not + # explicitly take as arguments. + # :param flag: if False, disable gradient checkpointing. + # :param use_deepspeed: if True, use deepspeed + if flag: + if use_deepspeed: + import deepspeed + return deepspeed.checkpointing.checkpoint(func, *inputs) + + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + @torch.amp.custom_fwd(device_type="cuda") + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_tensors = list(args[:length]) + ctx.input_params = list(args[length:]) + + with torch.no_grad(): + output_tensors = ctx.run_function(*ctx.input_tensors) + return output_tensors + + @staticmethod + @torch.amp.custom_bwd(device_type="cuda") + def backward(ctx, *output_grads): + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] + with torch.enable_grad(): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + input_grads = torch.autograd.grad( + output_tensors, + ctx.input_tensors + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return (None, None) + input_grads diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/distributions.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/distributions.py new file mode 100644 index 0000000..1115dcb --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/distributions.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- + +import torch +import numpy as np +from typing import Union, List + + +class DiagonalGaussianDistribution(object): + # Gaussian distribution + def __init__(self, parameters: Union[torch.Tensor, List[torch.Tensor]], deterministic=False, feat_dim=1): + self.feat_dim = feat_dim + self.parameters = parameters + + if isinstance(parameters, list): + self.mean = parameters[0] + self.logvar = parameters[1] + else: + self.mean, self.logvar = torch.chunk(parameters, 2, dim=feat_dim) + + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean) + + # sample from the guassian distribution + def sample(self): + x = self.mean + self.std * torch.randn_like(self.mean) + return x + + def kl(self, other=None, dims=(1, 2, 3)): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + return 0.5 * torch.mean(torch.pow(self.mean, 2) + + self.var - 1.0 - self.logvar, + dim=dims) + else: + return 0.5 * torch.mean( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar, + dim=dims) + + def nll(self, sample, dims=(1, 2, 3)): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean + + +def normal_kl(mean1, logvar1, mean2, logvar2): + # Compute the KL divergence between two gaussians. + # Shapes are automatically broadcasted, so batches can be compared to + # scalars, among other use cases. + + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/embedder.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/embedder.py new file mode 100644 index 0000000..223de82 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/embedder.py @@ -0,0 +1,213 @@ +# -*- coding: utf-8 -*- + +import numpy as np +import torch +import torch.nn as nn +import math + +VALID_EMBED_TYPES = ["identity", "fourier", "hashgrid", "sphere_harmonic", "triplane_fourier"] + + +class FourierEmbedder(nn.Module): + """The sin/cosine positional embedding. Given an input tensor `x` of shape [n_batch, ..., c_dim], it converts + each feature dimension of `x[..., i]` into: + [ + sin(x[..., i]), + sin(f_1*x[..., i]), + sin(f_2*x[..., i]), + ... + sin(f_N * x[..., i]), + cos(x[..., i]), + cos(f_1*x[..., i]), + cos(f_2*x[..., i]), + ... + cos(f_N * x[..., i]), + x[..., i] # only present if include_input is True. + ], here f_i is the frequency. + + Denote the space is [0 / num_freqs, 1 / num_freqs, 2 / num_freqs, 3 / num_freqs, ..., (num_freqs - 1) / num_freqs]. + If logspace is True, then the frequency f_i is [2^(0 / num_freqs), ..., 2^(i / num_freqs), ...]; + Otherwise, the frequencies are linearly spaced between [1.0, 2^(num_freqs - 1)]. + + Args: + num_freqs (int): the number of frequencies, default is 6; + logspace (bool): If logspace is True, then the frequency f_i is [..., 2^(i / num_freqs), ...], + otherwise, the frequencies are linearly spaced between [1.0, 2^(num_freqs - 1)]; + input_dim (int): the input dimension, default is 3; + include_input (bool): include the input tensor or not, default is True. + + Attributes: + frequencies (torch.Tensor): If logspace is True, then the frequency f_i is [..., 2^(i / num_freqs), ...], + otherwise, the frequencies are linearly spaced between [1.0, 2^(num_freqs - 1); + + out_dim (int): the embedding size, if include_input is True, it is input_dim * (num_freqs * 2 + 1), + otherwise, it is input_dim * num_freqs * 2. + + """ + + def __init__(self, + num_freqs: int = 6, + logspace: bool = True, + input_dim: int = 3, + include_input: bool = True, + include_pi: bool = True) -> None: + + """The initialization""" + + super().__init__() + + if logspace: + frequencies = 2.0 ** torch.arange( + num_freqs, + dtype=torch.float32 + ) + else: + frequencies = torch.linspace( + 1.0, + 2.0 ** (num_freqs - 1), + num_freqs, + dtype=torch.float32 + ) + + if include_pi: + frequencies *= torch.pi + + self.register_buffer("frequencies", frequencies, persistent=False) + self.include_input = include_input + self.num_freqs = num_freqs + + self.out_dim = self.get_dims(input_dim) + + def get_dims(self, input_dim): + temp = 1 if self.include_input or self.num_freqs == 0 else 0 + out_dim = input_dim * (self.num_freqs * 2 + temp) + + return out_dim + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """ Forward process. + + Args: + x: tensor of shape [..., dim] + + Returns: + embedding: an embedding of `x` of shape [..., dim * (num_freqs * 2 + temp)] + where temp is 1 if include_input is True and 0 otherwise. + """ + + if self.num_freqs > 0: + embed = (x[..., None].contiguous() * self.frequencies).view(*x.shape[:-1], -1) + if self.include_input: + return torch.cat((x, embed.sin(), embed.cos()), dim=-1) + else: + return torch.cat((embed.sin(), embed.cos()), dim=-1) + else: + return x + + +class LearnedFourierEmbedder(nn.Module): + """ following @crowsonkb "s lead with learned sinusoidal pos emb """ + """ https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 """ + + def __init__(self, in_channels, dim): + super().__init__() + assert (dim % 2) == 0 + half_dim = dim // 2 + per_channel_dim = half_dim // in_channels + self.weights = nn.Parameter(torch.randn(per_channel_dim)) + + def forward(self, x): + """ + + Args: + x (torch.FloatTensor): [..., c] + + Returns: + x (torch.FloatTensor): [..., d] + """ + + # [b, t, c, 1] * [1, d] = [b, t, c, d] -> [b, t, c * d] + freqs = (x[..., None] * self.weights[None] * 2 * np.pi).view(*x.shape[:-1], -1) + fouriered = torch.cat((x, freqs.sin(), freqs.cos()), dim=-1) + return fouriered + + +class TriplaneLearnedFourierEmbedder(nn.Module): + def __init__(self, in_channels, dim): + super().__init__() + + self.yz_plane_embedder = LearnedFourierEmbedder(in_channels, dim) + self.xz_plane_embedder = LearnedFourierEmbedder(in_channels, dim) + self.xy_plane_embedder = LearnedFourierEmbedder(in_channels, dim) + + self.out_dim = in_channels + dim + + def forward(self, x): + + yz_embed = self.yz_plane_embedder(x) + xz_embed = self.xz_plane_embedder(x) + xy_embed = self.xy_plane_embedder(x) + + embed = yz_embed + xz_embed + xy_embed + + return embed + + +def sequential_pos_embed(num_len, embed_dim): + assert embed_dim % 2 == 0 + + pos = torch.arange(num_len, dtype=torch.float32) + omega = torch.arange(embed_dim // 2, dtype=torch.float32) + omega /= embed_dim / 2. + omega = 1. / 10000 ** omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = torch.einsum("m,d->md", pos, omega) # (M, D/2), outer product + + emb_sin = torch.sin(out) # (M, D/2) + emb_cos = torch.cos(out) # (M, D/2) + + embeddings = torch.cat([emb_sin, emb_cos], dim=1) # (M, D) + + return embeddings + + +def timestep_embedding(timesteps, dim, max_period=10000): + """ + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].to(timesteps.dtype) * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + return embedding + + +def get_embedder(embed_type="fourier", num_freqs=-1, input_dim=3, degree=4, + num_levels=16, level_dim=2, per_level_scale=2, base_resolution=16, + log2_hashmap_size=19, desired_resolution=None): + if embed_type == "identity" or (embed_type == "fourier" and num_freqs == -1): + return nn.Identity(), input_dim + + elif embed_type == "fourier": + embedder_obj = FourierEmbedder(num_freqs=num_freqs, input_dim=input_dim, + logspace=True, include_input=True) + return embedder_obj, embedder_obj.out_dim + + elif embed_type == "hashgrid": + raise NotImplementedError + + elif embed_type == "sphere_harmonic": + raise NotImplementedError + + else: + raise ValueError(f"{embed_type} is not valid. Currently only supprts {VALID_EMBED_TYPES}") diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/transformer_blocks.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/transformer_blocks.py new file mode 100644 index 0000000..8aaabd7 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/modules/transformer_blocks.py @@ -0,0 +1,286 @@ +# -*- coding: utf-8 -*- + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from typing import Optional + +from hy3dgen.shapegen.bpt.miche.michelangelo.models.modules.checkpoint import checkpoint + +# Initialize linear layers with normal distribution weights and zero biases +def init_linear(l, stddev): + nn.init.normal_(l.weight, std=stddev) + if l.bias is not None: + nn.init.constant_(l.bias, 0.0) + +# Multihead attention module +class MultiheadAttention(nn.Module): + def __init__( + self, + *, + device: torch.device, + dtype: torch.dtype, + n_ctx: int, # Context size + width: int, # Width of the input tensor + heads: int, # Number of attention heads + init_scale: float, # Initialization scale for weights + qkv_bias: bool, # Whether to use bias in QKV layers + flash: bool = False # Whether to use flash attention + ): + super().__init__() + self.n_ctx = n_ctx + self.width = width + self.heads = heads + self.c_qkv = nn.Linear(width, width * 3, bias=qkv_bias, device=device, dtype=dtype) + self.c_proj = nn.Linear(width, width, device=device, dtype=dtype) + self.attention = QKVMultiheadAttention(device=device, dtype=dtype, heads=heads, n_ctx=n_ctx, flash=flash) + init_linear(self.c_qkv, init_scale) + init_linear(self.c_proj, init_scale) + + def forward(self, x): + x = self.c_qkv(x) + x = checkpoint(self.attention, (x,), (), True) + x = self.c_proj(x) + return x + +# QKV multihead attention module +class QKVMultiheadAttention(nn.Module): + def __init__(self, *, device: torch.device, dtype: torch.dtype, heads: int, n_ctx: int, flash: bool = False): + super().__init__() + self.device = device + self.dtype = dtype + self.heads = heads + self.n_ctx = n_ctx + self.flash = flash + + def forward(self, qkv): + bs, n_ctx, width = qkv.shape + attn_ch = width // self.heads // 3 + scale = 1 / math.sqrt(math.sqrt(attn_ch)) + qkv = qkv.view(bs, n_ctx, self.heads, -1) + q, k, v = torch.split(qkv, attn_ch, dim=-1) + + if self.flash: + out = F.scaled_dot_product_attention(q, k, v) + else: + weight = torch.einsum( + "bthc,bshc->bhts", q * scale, k * scale + ) # More stable with f16 than dividing afterwards + wdtype = weight.dtype + weight = torch.softmax(weight.float(), dim=-1).type(wdtype) + out = torch.einsum("bhts,bshc->bthc", weight, v).reshape(bs, n_ctx, -1) + + return out + +# Residual attention block module +class ResidualAttentionBlock(nn.Module): + def __init__( + self, + *, + device: torch.device, + dtype: torch.dtype, + use_checkpoint: bool = False, + n_ctx: int, # Context size + width: int, # Width of the input tensor + heads: int, # Number of attention heads + init_scale: float, # Initialization scale for weights + qkv_bias: bool, # Whether to use bias in QKV layers + flash: bool = False # Whether to use flash attention + ): + super().__init__() + + self.use_checkpoint = use_checkpoint + + self.attn = MultiheadAttention( + device=device, + dtype=dtype, + n_ctx=n_ctx, + width=width, + heads=heads, + init_scale=init_scale, + qkv_bias=qkv_bias, + flash=flash + ) + self.ln_1 = nn.LayerNorm(width, device=device, dtype=dtype) + self.mlp = MLP(device=device, dtype=dtype, width=width, init_scale=init_scale) + self.ln_2 = nn.LayerNorm(width, device=device, dtype=dtype) + + def _forward(self, x: torch.Tensor): + x = x + self.attn(self.ln_1(x)) + x = x + self.mlp(self.ln_2(x)) + return x + + def forward(self, x: torch.Tensor): + return checkpoint(self._forward, (x,), self.parameters(), self.use_checkpoint) + +# Multihead cross attention module +class MultiheadCrossAttention(nn.Module): + def __init__( + self, + *, + device: torch.device, + dtype: torch.dtype, + n_data: Optional[int] = None, + data_width: Optional[int] = None, + width: int, # Width of the input tensor + heads: int, # Number of attention heads + init_scale: float, # Initialization scale for weights + qkv_bias: bool, # Whether to use bias in QKV layers + flash: bool = False # Whether to use flash attention + ): + super().__init__() + self.n_data = n_data + self.width = width + self.heads = heads + self.data_width = width if data_width is None else data_width + self.c_q = nn.Linear(width, width, bias=qkv_bias, device=device, dtype=dtype) + self.c_kv = nn.Linear(self.data_width, width * 2, bias=qkv_bias, device=device, dtype=dtype) + self.c_proj = nn.Linear(width, width, device=device, dtype=dtype) + self.attention = QKVMultiheadCrossAttention( + device=device, dtype=dtype, heads=heads, n_data=n_data, flash=flash + ) + init_linear(self.c_q, init_scale) + init_linear(self.c_kv, init_scale) + init_linear(self.c_proj, init_scale) + + def forward(self, x, data): + x = self.c_q(x) + data = self.c_kv(data) + x = checkpoint(self.attention, (x, data), (), True) + x = self.c_proj(x) + return x + +# QKV multihead cross attention module +class QKVMultiheadCrossAttention(nn.Module): + def __init__(self, *, device: torch.device, dtype: torch.dtype, heads: int, + flash: bool = False, n_data: Optional[int] = None): + + super().__init__() + self.device = device + self.dtype = dtype + self.heads = heads + self.n_data = n_data + self.flash = flash + + def forward(self, q, kv): + _, n_ctx, _ = q.shape + bs, n_data, width = kv.shape + attn_ch = width // self.heads // 2 + scale = 1 / math.sqrt(math.sqrt(attn_ch)) + q = q.view(bs, n_ctx, self.heads, -1) + kv = kv.view(bs, n_data, self.heads, -1) + k, v = torch.split(kv, attn_ch, dim=-1) + + if self.flash: + out = F.scaled_dot_product_attention(q, k, v) + else: + weight = torch.einsum( + "bthc,bshc->bhts", q * scale, k * scale + ) # More stable with f16 than dividing afterwards + wdtype = weight.dtype + weight = torch.softmax(weight.float(), dim=-1).type(wdtype) + out = torch.einsum("bhts,bshc->bthc", weight, v).reshape(bs, n_ctx, -1) + + return out + +# Residual cross attention block module +class ResidualCrossAttentionBlock(nn.Module): + def __init__( + self, + *, + device: Optional[torch.device], + dtype: Optional[torch.dtype], + n_data: Optional[int] = None, + data_width: Optional[int] = None, + width: int, # Width of the input tensor + heads: int, # Number of attention heads + init_scale: float, # Initialization scale for weights + qkv_bias: bool, # Whether to use bias in QKV layers + flash: bool = False # Whether to use flash attention + ): + super().__init__() + + if data_width is None: + data_width = width + + self.attn = MultiheadCrossAttention( + device=device, + dtype=dtype, + n_data=n_data, + width=width, + heads=heads, + data_width=data_width, + init_scale=init_scale, + qkv_bias=qkv_bias, + flash=flash, + ) + self.ln_1 = nn.LayerNorm(width, device=device, dtype=dtype) + self.ln_2 = nn.LayerNorm(data_width, device=device, dtype=dtype) + self.mlp = MLP(device=device, dtype=dtype, width=width, init_scale=init_scale) + self.ln_3 = nn.LayerNorm(width, device=device, dtype=dtype) + + def forward(self, x: torch.Tensor, data: torch.Tensor): + x = x + self.attn(self.ln_1(x), self.ln_2(data)) + x = x + self.mlp(self.ln_3(x)) + return x + +# MLP Module +class MLP(nn.Module): + def __init__(self, *, + device: Optional[torch.device], + dtype: Optional[torch.dtype], + width: int, + init_scale: float): + super().__init__() + self.width = width + self.c_fc = nn.Linear(width, width * 4, device=device, dtype=dtype) + self.c_proj = nn.Linear(width * 4, width, device=device, dtype=dtype) + self.gelu = nn.GELU() + init_linear(self.c_fc, init_scale) + init_linear(self.c_proj, init_scale) + + def forward(self, x): + return self.c_proj(self.gelu(self.c_fc(x))) + +# Transformer Module +class Transformer(nn.Module): + def __init__( + self, + *, + device: Optional[torch.device], + dtype: Optional[torch.dtype], + layers: int, + use_checkpoint: bool = False, + n_ctx: int, # Context size + width: int, # Width of the input tensor + heads: int, # Number of attention heads + init_scale: float, # Initialization scale for weights + qkv_bias: bool, # Whether to use bias in QKV layers + flash: bool = False # Whether to use flash attention + ): + super().__init__() + self.n_ctx = n_ctx + self.width = width + self.layers = layers + self.resblocks = nn.ModuleList( + [ + ResidualAttentionBlock( + device=device, + dtype=dtype, + n_ctx=n_ctx, + width=width, + heads=heads, + init_scale=init_scale, + qkv_bias=qkv_bias, + flash=flash, + use_checkpoint=use_checkpoint + ) + for _ in range(layers) + ] + ) + + def forward(self, x: torch.Tensor): + for block in self.resblocks: + x = block(x) + return x diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__init__.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/__init__.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d3ea6e87eecb7f8d7eec98ec92d8c2d81e8190c GIT binary patch literal 246 zcmX@j%ge<81W(UzNtXc9k3k%C@RGAI_=5bRlEkDOxI|2HX>mz@ZhT&TN@_7owXR2LUS(-wp0P`K zQDQ+sYLRYkVrE`UMx}8|dTL%waYkZ6Du|I(P!f}ynVgXdA#)P*(o=KtV{(CJlIYq;;;eAl;)(`6|n+c$q2;7AjU^#Mn=XWW*`dyTxv^& literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/asl_pl_module.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/asl_pl_module.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b57e95a09c64142948055be54e93f00284a4ffeb GIT binary patch literal 15489 zcmc(G32+-%c3?No#tmM$NP-|KQUXbl6eUYzTCzn+mTg+1$2x2UrUnb4n-nMjq`N`O z ztyJy4_q!VaDM*%Qs*h?AtvNmt4paYLG6Ey;#dW26z%R@RnmN;OBCQ!SB}lqcdzc_Us@ z$DZ`1{1HECt4{_Z0Uh-m#X8=mSSM$h*{v<5`u;$#q$6z$`mpN`X2J&9a3mpQWaEkX zxg;kW4kh9lnR#h0lbB7%l3`l5BIe+1dMYtJ0a;x-4bYzCqeA>F$Id4?9`eSMX{Zkg z^IVM2Bx1=sxPg%OCelJCmIhRYi%#*gsc4*Zb%$044~!)f(`k-9el|A8jR{AfS~_!d z7WLDAHDkDcZ;p|KEnYsCl+@SVRMZv&s?#&Dzi)W(K+-!==@C$KVuJ$yliFf@&vwh_i@WP2Vq(v*&EdfOOjdPI3M^w7+vw?m1a>>a}fV59)F+{(6ct(@^8b0#~@wn5u=(!TBv!ODWr zTiavyWPiw5Cbk1s*RHGxX148N`|!EHNC>D&r5&;#b@oc31-5plf@G)MY$vR(qq5Ik z^=JP%_utBPL$9G6^(GzZJWhqfa$7yN{NfRXF( z&9YILpW}F$iAEFYL?#;LaS+)^YRQJ=tf1_f>?FmC$b_v8TV#X4C8uO}rB{%VWa}Ga ziVVe(G#rB+VdwL`^YiJ2`B-}Ap;!6X9ISJDDwar3o?Y0< zPIKu=GIdOxnafP35}-g7n2e>Tx#a95*#u!SBgB%EF(Db9OR8%coLk^KV5W;bk0Q)L zZBLQZgam+peT%y9rYz12W{L6LVgjG`A1K+|@+VfC@9T8~)|~CWi5hu3s5vHf6KPuV-~ z!C+PD#thORq3$n0Vu=E3LhPnzrb3_9f-21~P*-W7EItHLFNPnv#M<_| zusMpLx2Sbj)4Pio7K^SP$<sE@fwmOckxCrgX; zaWEN`cUV8cWAhUrO_?ZID~U7+2D6s1CTHWZWR#Dk&jAhKXO=A>ZD$Eaz=k39lj8yx zdmp(Um4---!UzD^44doSeP7>K^0mEZzico1h9uw6>JG`bEB8W~q1^rg<0~5}e|zqQ zb(aq^$~M(fEZWb64!#Ez0rFdwt5n@>0l7_AWiv~RfvmnpcI#)%(BGy<;|5J;XAPp^ z5Oo@sku{1sNCDwyO`>rI1=*%YG-<5_`kj`HO)F*%^(DJjTYJ-#s27dK9B?N_HQ zYdd!c_`SVmB)e$-7WEyS)(f<&uGt|n8_S61a!$zl_}n~bD|;$77e$go%U`yF)eaUr z3%37a=n$9WGKutb2sC==%h?brrBJoP;9+QP5;8nib{^wEE$|$x)LJx!&L=WwLyL}3 zDj^8iNi5BV7TuxwH23Bl7tcUVVkjU%FFKE^m0kqfVKlVZaI#uUL4h7LLjUozs4HVE zK=fV=*aew1pAm3C**M21(izz_HOr@B8QF!Eqt!Xf`XrYQTNJi3pxHw7Gmq@S?*kyy z=VV>toNQL7EnBM--k>y99_+&D_dk*cH!AcoXdwI@fVZgIErF%TrAXee`oyi4C$A4k zEzjkwpSqeZjxW7*>7}*io;6o*&hW#AmfYA6J)IYIrKaYky_fdpbssl%-){C70(*;r zgHqt&jj^@B;aknmm3&?K9hV(<&6Kwbv{R#}Y^OT9i@^~oII=o=gDV6_)`CYQrZs22 z7`V+iE;^Sp%TxKo*LJR)|6uRR;C07e#BcWf`E23YR|>C86`a$zn6tNS&fNKL>|O5s z$kuw>-f(duzb79qG!ERdZ-;E5xhFr9pDZ+vd~DwdLc`uhgoSM4xy(GD-k?0t^dmx} zM1)3t#h9ft6+r_v4A5rN^H{<{2W^>64I%qhUxv8;STbXOu)Hpt>rjBPIxzm!*-X<} z{WJvvNiQ0vbjNF%wC<=gn%7w~ps2fM&_qYpSi|P^duXm{swQEDo(Hirz6Wt4ea%=g znoXHjt;S57ma4wd%$*5V^IEDZ=vXs`DZZ`Gnnfe-glGm>*^96h443J@{TqD#`I+h8 zd+tYz&A<0A&rJW@)w>@ZJ@dt9K;`L@ylgzfWnzMCKhGtm&t{@57mqEVFo7sSEG2+Z z?MlVoRM5b@+?*hrAo2h>sAPduGL|~S#=tU$GTsT3gnliRz=X_z2;lfPVoBLD#dELE zbLsd(*eyGfT#QeHECN*u<$Ix>Vq;m!xE$bA$7K|XWFzr(WaAhcOTDU?>z0c1!ym)e zs8D4K&XvrPM`a{4N674s9FrMsN@0db9DW?jn~A1Jn;QHkST7k@wpO}Efu_Y+1pXJW ziL-);PR*y|WN-lqTr=T`4JbD&L6&)N!m7P}hEZ6r$;;5Qa0dVwg^q@zqeF6Zi_DKI_v+bRF>s-ZZ1KpH+!96llqA1Rzn z-15!jUbx-9tK@Djy1OKISJ6Enxd%$2E$`c}*o&dvQfPO{>%VU{1f98w`*teWS8N}V z+DBHOyy+}HAC;bu-fI6(rM9k8d+@H=;CF&`W2IWREW_}nKsUbDU4Cs2ir6v>^ZZmS z4!&F>6`O{rSO%g=vh$7E#}8T7MPPy%z}-rxCbarW~UPwK|nJ}Vi8t*f=r zy%Vwk-lEC@s;l??z?HztbE_w>A6n}ilbB%6aq;|mu^6oYV<7iUFs_~Zj&0f6&r`7#-UYat?|j6xzy0I6u1;9Hf)s|wiX+9OAWhM z|MQLWYYi{uOzW;TP~M4xYpB2sJ!qp>DD$f@H*jUt0O!|qN{|TzH)0^%S;0Ug1Q)aU zsq5<0qexJPpw=p?W)k=!3v>>O!UVx3)5IY0dhQ^*+(Baib-`R*HcoM|i~#YCSPCN1 z5N}Yw)&|GWq>zdxM+qgo0e$coJL2a7XkiyZ4}22QDF6>jN&Y;b7rP(%ZI#h}5!+7! z0H4XxeDU~l=knNc_r+6^Bbd{b+}>sV^2^JwFCSlKBzI@dxZXeX`=eJ!i~VC#|5&mA zd8z;TlB;34rLbcRtg(R?=zA2z9wcU4&UW$C@&fu=zMM<>aKSH0S;lD{Yr4j0vjq8xRAo`AisxZWUH||-bA={|X>bR_7FTIm&n8k~bB*|M zs^Uiz3YPh_0D^~G1o=;-2T#lZJ-9~Dt{Vp+bXV>EO~A z0z6$dftOJ9^h=)pm9aI?V9r)@1&XeaRo&>qG*FuS~um;u${9^t9B`rK;)lAlsX;Jbpt2*w3$f&JJl%|rS zk)tb|XBDl$dCUaiE{H^?p^%97tEPWcI=}w|{+AGmKtnyl|1zQw+%Y3?Zv|#G$xij4 zE)iEuTXauk>zP<4el{w=F@tP6GdrJV!C9D%XLt@`6Wkp5`PTG2oGG52PoERQcEVOw z&gQ3aq_YT6!Se|OO$e}a9>qF_+Y%`_OO(wgl*9%j*N2L^Y$)%7gWij$&bb!@@`^EOtFo^^iKtFpTgK05HHkfubiYdBQ7O*E}P@rM~Xp z9?QReh0i}L`TBB>QXSjk^jh$^YFhxCH+dgoUEKDE^37Z$9o4_~{{@Zp!%S`Ou`U~U%}|AX?OqVMvs;D-fW>+)4S zD+IJ%i7|e9H!DbJHVMB5OeR`Uwbo@o0O54fRM4utZf5uBcFtfQv*;pqQBM z%&Z#IS2e}r_D2%M=IXx>y$J^ZfL5sU{a-D_rV7DBYr$#F!rU1D9*|rEE2q|6y9>;2 z@;&%>pfCPc5d12D4K5A}5F)-8A@TEno{HEs!bhpM*%02v$O;_v;hw>xs(Rf7ScDtr zx#0^SzsojdKSz)WRi!QK7o)FlYDkz_HmhF4gK8T4`V13MaaFzt6$OkNg7t5&$7%Yt z#$8~?H+qVVT~cFLe$VReoAyHNT%oaRtudK1ue&@sC;0{t6hWaUTNK%sVBf4gYlUoF z-47Uzg5!4etNarr6!RX1G#F(Nt0E~Qq?BO02~z5f2G+!yD=HP#ii&4Q3q}W_ZZx$- z6S#xwXvB=SWUW~nmez9+SZ9I~E#IX@8@hU|g|&X$@Ll>>X~KN=tOHsh{cL(f2Ww-Y zMLp%Hnum7QcFmzFFbK-qf&E+)iV+rv?uZ*GTS0+<*8m|OLGdt%EqGNoc<2AY15sl< zV7Y3LtrgD#bct%S5ihWki=E+;0@!|Y$wcPF;j2cOnd4`tqi13~aTupLp8p063z4RU zbR1&j5P){#F-?@5vf#NKk2>|Lp6|qJa27jFBI!iqgq@09_!dt5+X%jcfLN53#SovC z|F2kpJI((W1m6W9n__TKDhNQ}YNyc{Ct&0Yg-Z2N#EXNh83SDymRjt4v~0os(dzyH zdKCUU0HEg()#O{Y0MP>dx?3(zUvlP*C0FysS1x38hIM~%S(N+(E5}zmSI1VnS58U( z-8m;Hh2=@f+qbeu^6t#pe>Adt_52Ovjm*t2t&L1d%n*3PVm`7S+`7!Zm$;m`OPM;X zC1-0{5AStvXUX4#{~G=GY=A)FWx8l9!%42=UB?B-#d9l%t~cKtD2`7`WNNAIVf=z_u);6MISRiZ=hWkuDjzBQVkZ28ZZB}m z27UoUD{Fluzc9crR?#AwH}Q)tYkvs805<@?SUv~8*y`~M{~mnZ8b;yskeBrkEaiU( zQYQ}cEAS$O%>OQ=!j8>!`CdKegac9vM>ruxI3nxHRCIPd$6j=)ZimWypIP6zslM|w z`gd%q?_e3xwFqkyoio;&4N;Seuy6Gaec|ZVQn5tbl|CFla~}<`!!Xw7X9h*|re~&d zs9P-%?b>XJ)@;l+eUARnwb}HDjaq-%=Bms0Ir^;U95!FGR-C|=44Mq&5nDixVW1bR z1{fir;j3>ub<_+{tAFQK&=>#6yYV)bJOsG=NI<#UxpX|Hh`;U{_B_~VTo^G zYNLN6o59G$gTje$Yc+O3yiS=uEL-5ZWeQX%1WD)w?dOKK9O0F zJqlXcp(;MyUK2%)<8oO9e}G`)!uf9^4!1?toe>mU_#C4C5CO;oO4;WBfvKAak_a&1 zUyal7moWQl0AxDB{{?3L5;N9BHD;rSZ!DlB(kQ&{TXovsNC@+AGYfWnI-P>}?q@jd zo2&mD3(efY`0rHORrseEgx8ITzwrbndGdu zJ>E@qpx>^oE3H!JuGPgq6h9QD&V$9!5h-+}@bd9O=*U{=M1g6CO5NL5`lYTXuG@a# z_#=nZ^=z?oLh76-96wp;oLK98rN9KCQb+gCcGC5ju_!+r`Iege=p=x*05{m2mz{;? z-uo`58i?{xBTs#Q=k+&lwyr&O^6z%MBH6d+#w6Ev$sM}rS@z`n{#R2woZc)sFTwqn zTkb6-x9`)&?zP6A+`*E+BX{r}IHOixfzEvA<-Yd@FAuH-`ig;JDKNYi7=aoumVE8Y zvC9lzp!=4y=o^rH18crPD0u;*=ml4AskJx%`sJrD>VJbNZQFM}cBA1sD{VWNJ9x|9 zQwl$GgTArjhCvEHi-_KJd(*oU7bcc>er)e3cTgj*(&aA7?N#FfgrGM>WGYtS*FiWF ztI$}zf(IHH<<6$3u5E&molOti6`?f^3~?8_o{a~gX`9vzb$$lTd-901rbCn%4$P&F z{R7s3Zi6*|Vd;uZl&R`rrh1U9^$XU(^QsMCtylwKvqF@?0oH(VrV`%P<^)GkmC!%2 z1}geg)7M$BZbfI*uC);DkgKk#Zl9u4n-TOm4%*TP+S0K2qpFs8P*tj-zR}QYXM}+e z>Ma#B5Q3wr(;L+4zoQQPoXQXN7XK#zCLp4LbU@URqI43NCCUsxB7Xn@>Kpz~5&Ri~ zKSzKcyDE^wu9}ExJv~s>krj!~|IZCw@PCcjzX2fNsmUfGPQ*9A3YU_IF3x z4;%(-f`?xO))H{C5D4id!ur!kMax0*HTo4&c?m{?LN;Zc_GXyVIyxDm>uD~3KR#p@BEg->p%laLzxJ|MHe4U$z=s8Y1%mWn-=^T&&=>ZW28K)fp82uEV?0E| z$$e*dB_M@%l`SN1rP_P)$1fi(+ej8}U2e%gyV8ASeATde{6k9t?n?zne?r>+c$hZ! z0kxQ%cXhOJ=RG~7%9v7dCZk`)Van|^?kb18#@;-)a%gqS%DB|K=N<+5@<@|$xa1EO z{e6t?jY~kDtWe+&6tNd_ZCAtq|lCg7RcZ2vlxBLTroH# z1&8iYkSTlY#@0Mn?A=2Kfy|HFnvH=U59*D3%1-J4{VZMT-csu9y=$=;y)fGH$@~+S zCs%f@8m~Te-B@VfCwZSP8=(xw4tAE!m}025jxyGV)WC3I7_LSfxJU9nYVW?g`YlFJ ze%FeAi|T@u+%YJHWLIxZ5>+4tRB3&AoV_3wh){Ztf&0^ z%E>F@>WS+;AHG)Dx?k!XE89q+o!Zu4c95i#>J694sIZ&6M+>7bN>3cQry!E>6Ha=Z zE^P~!di(EMhG1sxUB&i6seQ1lhunHwXTE2}cx3g0#E>n96c2c))xWh0cqEW3Nl zW=z31-crWaklOJ?;fcf2@N+-KymH8cgdJ{|P4GJQ!s^s<|xeZAq0bIkxhO@Itfv61GM}7n} zNTB*7puzb}0*X=Z$`<@35X9XDjz@9M3ediD&FM%#ZiZ zcp;V}@jz80?W$>|id5o(eOeGmt*Soo$YZ5GG_I)}jXHG zK&AG|KKJ*Wx%Zs!o^#LdJ368S$`^n5ZQ*a7g!~0Nts?l1#k0`3K~$o0I>}-k(22oi zIS$)FJzxa0LDuGV-Vm|^YYTeF2xr3_36SGN4P7H@SPS0`dNQ-(6qgd8V&_zfa|5Oc zG(t6nQpGH2*g10Y#F_UkRnwuJa=si)PW0WjESZnH#ea0G|FR9@CASQK({Q4zLK0MROCbpcy4+$`vjbRcFHO z@;FM=n$&U*s5`~GyshbZx(?Wx?%jwi=XAxkW!XjmNs@H+pEr*j96PHSimHtrv5frG z`4jTxqN!S!ZMkGoM;X_bCdP7Q+p!GU1gqMu)#-Q2=2TfRM~;3-l@gpNZ74-^Y+`Ce zEokN#+Z8s(OU{^4%uQ$>)e-%=HO3HPk2$uYkFoPBwvNXJz-LNR6fIYIZEY}SS{lT~ z_78x3bCrB0l1S(Ea9!N=Q0!ZXk}dr;{*!g{TXxs@hvKG3(vHvLpT+N;|J8*%7w-2o z51gnUIMF*yx@LMPRnGBstLxe+_JbYEZ+0l@bDb78RQaV59`Fw{ ztIOnass3a&Smj%HU<|Z0Elh{10`Rx|)-oX#7@H1P1C+0ZtKgl^k!l!xkWl$YGzti1 z#0RLSg{$GyJCy5oM{(`hsgE*SG=W+ zud?SQczm8^JIJ;3K(3O7O{6n1b8hxqvxjGQ&it_6vAq_U@7g#UoE`n;+1YTtOR5P^ zlBs5LUp=|6nLJug9({23cZo*wbY0w0i_Kh|k8hma)$ALt!!JHu>=(HSzyw9nZb`dgw+yINBBGIW_P&`T{L< z`^aTXo)W(OYFKY`96=r6wFAc+@A5Q|uE*22)kb`z#?7}iz{8?e!RIE!=Kw?lz-RD9 zZdK)&8|EEc$x1b-23k&a8l7qD+1gqFbyZkGaA+B4NCk&{k8eFn!5N1p!^_zGrYhVE zwfU>KYU$b1Uai8@@m{30lL`bmkVhsl*sUtnz>>5utXO)6wUs9L8F8Y9|F*uZ@%o}Gt|je*(sGBTN+S{IR~PbGoeXrMlG5J$)6pP6jPOo zj+C=Z2Oe0fA{1^e?VU?zyscfX2#hO~;IXu^{Hh$wg2St_=vDzGGyU4VuEyBrc3GcS`$l zU;xPwl3^fjj0ttDOYrib=n&~%Bq*Qm#04EIOl;#FVTbmJ7j?sVi(Xc!TG{;)_v5cS z4npkT00GDQ9dx|!UVnGyqwDY2xJSMHxA?i;`y0InYO(pm=4N8Bo*2CSUL&!;7JZc1 zK3ka^eCt8?!^DwVbUx94L;jh3yL(1%BnDak*6W`Se>VK<@dvwZ4L5ciuP2Vzq6;Ci zaZ9r|UGGib)*8KgYcUwv{dzNbsGdA@|HHZSKblJ(Y9zCMyoSlGug&-Ef`|5(VPSK$ z*0In@y0*<4bL$4@#6cDfQrt1fFd^?TA)mp#NA@55(2mRUM`cC#XM(mvDS8ds2gK#^ zvBqrTSqE)Kzl$>+eqxi;bX}GK0lhHF-rC=1I;Qy6VFWE;?+5bDHL@U*q4&6FLQI(C zo+JmpW{qcCIidf{P(n!D-?d1fdX|X`-OsiLgzmqK!@>|d0uz;r&~6lq4%!WAMbj$T z3`}m^f#lu>a~Y8NQgqYqTYf^6VVHx2C<6&mh9yKE^%J6umk?zzA!0-xS8R=9hRQyA zhj<%c8H9Mq%aq=x$6y|BhK-0_2yz^^7$RK9W76}OY<*02d_mN&LYFvhXfFA^uL(Ao Gn12Hs&O;0U literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/inference_utils.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/inference_utils.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aff70989feb2f3263c8764f501db51a95dbcc4cb GIT binary patch literal 3438 zcmbsrTWlLy_0HJiw_|(qPBxD=O(^nn?4d!wX|vLda)*+YkS<8v3qBn z7h@@iMP#AUW+kM$Vnu`a$*PgaN+2SHVEI5);>T99#2Q*rpa|v9yNv|eFV3|+P1$zE z2RHFO=bqO+_nvdlnLqmd9t7j<;4hMYb0hS3y0AvDjy!z=$Q>jik&#h^LRMxJCc-cj zx5=zxi`c3-Bikb!(Co59aYmeqE8}t<-Q=#Q zQbsjh7qU7)K?WVCf(0!{@FoYwVBL|#Dkg%CVlG$EIgO(Br9?Ce32gF_FosqGtLsah+glC99`QkD60r@l;N|shLhu(x_{3 zC-uKRC~kw_({I{o_;ki%7&e`?@cVU)cnzBuZT}3LVM{vlHl3cKI?@gc+lGcsWJI6HFH=3#MkgtK|pmpz2*gM~nZ_T&m+l>~(I}6!mxMv~fAuB;T-(obrG!8B4ecQGz z?75Tdw`kkY^7y$MIk?TWFr(FyOFTOxl**9NWHcWKq1)4TWHcI03HBSP59x<2y+1EhWGn zEQrzMH621Wi&fFI#j`n>z@mLp!c!&(B|1A}ain%h-}246Kpf)>xHx|Gaia$40j(=#r}Is7Ie8Djb_<$$TwR^blf2cx8cPPF0? z+PSOuOlYC2-XdwE7<~e!yBdmWYw=Fc%*HeUA^=XftBtAFa%8L~p~?7aZAi`}C1_S0 zTpQ6%=dBzj5-9jI;{GwNz{}mN303L5o3OObBrEjAXhdG0~ydfn7?1 zE~aLPBFLJ_#Iz1@wD$|gXYZdHieAEsAmZq$jFOnSaxMnp7Bf>CG*+V1UT86*@tme- zl$e@Y~?)UrG zoxSD8_Juu#;lk0y@x?Ps<16E<AL=-ugerVEb3t;JBmyB63rclO~+XyssA z;k8n*Z|>Za{@2T)cKYAEqa149a&x;KCHu~~k%jic@RkoXwr%izCBAQQWQ{*GH}bf# zd2aM!sQHJu@J8WV8$E|gJ%?68rJli+$Xd(Ea_5&;TOV-$vN8MxW}bV@cNOsBm8D3j z|7fxM=&wS>?x7O@l@0zxi9fMAxyBFAJ0G@n7p9gPRtMJG-kkUUHPo}{Uke?aca&XC z8?MfhtFthC@BH2KKN|hm6|VTH-K`)vbtn8@xG=gTtn-I9_#-9$$ja zKQ=e=(9bUn{mI`w&s12yyYJz^;f;ZlrGb+lb%O2g;>AnF8{+x^F8TJ%pIyL(tBXRp zwP!J0YTdUam0FLk?k%-`ZT{?IU(>r6-nme?b}x1}wtVSlkq;tkU9bIm=f==jX=v<0 zZf)pF@s;s)-_>&0j`@)S^Zl`kA2sw;dXcYb;YQKhTeSE7dn<&39iJlB-B+%g8(sH> z%UwIE#t+7x1UkQ%S>ZtKGmSpn-yeQs@XP@Fhk>56`?&|+ZaVK{U=DMn8~90Pge0z&w%yhDa>;E#9gs&D{7p}TMK$Wrr9j;}PW;14@jak1}Isq1vbNx3exYfr^Z eF%R0=U-44RhkE)xrDmVn5F7f01&mw+KmP;1TTdAP literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/loss.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/loss.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3db2a4f4a9742e241d0cacc1325b5fa67095553 GIT binary patch literal 6974 zcmcgRTWlNGl{0)lL~`_^C`uG5Sr1c+sQ6VymMlA#9L0ztJ9dr6s*J&iGonZhNo8ha zS?sX!0x3#&TdakmklkVtrazTo1NO&$xWEFNe03MQKsienLkwK3fHo-nN5d&n^k?_n zA%~Pji3(a^N8-Kbp4UD1o-_B}bLT%D4jTr~m%sg&$ZuOP><_4+J;pNd>J|W77>&_5 zi_Mp6Jg%G9;U!of*M$L&8|IBFM#q}s=6SOU>sd?OI&W2B18a-h=j}MA!)7qr_z6av z7+uVxbyuw)>PzsvV*?MGU!cZZ5LYahR)uIXL9qaLPDiN-f@i2To{v(A=_oI7(Zw~y z0dzw=%7?>Ml^E7kZoDc2@Ix36gU#bKHm|c{r9aw5r6DVEGYH_P_%}1h1EbJu@)y)8c)Sn zS%wd5v4Mt;yoG+RwxReLzJ&=;m!3OD5&H@7J}kFU9EcdH)97le>}?DaaiAGAny7%!G{bA$!|$#oHrA-biRtff)G7>fI8H?q3(FfP z=m?Wo;FqaY1_6t!!a_V6US>)qOC=%8 z0d)Qn6o0vmebsT{W3x;)@Ke*aYU5P=@W%I2B>E28(<^^58i} zv8^U~)d3u@G?c_uaJPOC623HT9l@MOQ_fpt3+&-6Z*kp4b`>=J<>fa_a1X;xN2R| z4=8vOP#~)hvYM=nOla2dG1v|EJ|a%Ty7Vwx=7?4$&auXePym7+MbZJe9eCwI=NZUY*KoKa|&1(d#&rmjGV< zH`Lp>YhJv2jD@D?gvi=w$I<+Kh_tJ|bP9T)4JnBAL-Y{!^&BGISYNto1%lE|z};Cp z6Std1Q>?2NPn*>J(vX5TAl;nktjD##<`leXY0wU*9P9@=8W)>?pj*-*t?3rgybrcj z;}x5cM48vE@xF#_gFd@!trlBiM{2QJbc(I@sG9H7o&$8d>q(1k^{5ry?V|Da@UGRU zx7J$P45R3;#RGP~HOlg^yxXU}i2>0V3)V`~)<-t&jZS-a^*&h3D|)M(qDO3pxpfStyw_9(-j1EDMMM(Y$gL!|Fw3 zx?Ajy4b>TMH|>ax*V09c79V0$>`X0P^H}Wc!8F==i01aX(yl|KJ$0ptL!`ZRr5g{C z?xLFx;qcSVhj4WK&GXuFi01C7CEX($|KUflPVDpU*NcaGvm*A0mS13W>p$xz!0Dj- zT&?f_PTlUhx~FRU`zCQ%w!5vQ8|_FmrmY}2CU%HDOS-Gr5$qVoqC~q7yk>-3l*nR^mH_?;Hxjt3KP++4C)jkEo?vnfwr)q zjzg!g4zi63Y%(#s&pJzkMUGE zoNB6|N~d{*a{x|A3Xp@L3`A88=rxWCZ={SPafV8ytRuWYlPYK_fz+*F40S52N z9?=QgqBmik@GKE2br6hM*Y3U>$?}B~W0v9>%Nafirl7LuYgj zU7&F2pu%}k(SZtdbmDxdK!+vH4~1e{S%)roh1i?bDz06L6$v9&oYmw?ZM$|QSSq5m zltqy+WU}f<$ePvsS4FETRGQ-6g)VWc`d-7AdMNQbwD?io4pRiT4>yynVuJ!uD8(+r zrR=SAZBZta8zqVvNi2pL#iHI`Ajk~Y8Yr5O(^0v~K{ja)gQJ+Rpg60eAtBVXsi}md zVnqDBV$tGI(L=vHdK=WVy?()=Bt|iVW5E@4s=YRoE|ZYg_~($G+{TK%Sm%J;F(h>i zZNHuG2<0rE9ec+;LH3VH{;~hIj}^RwkG}o*#)BIVNy$6$uWSD%{#q;$f$a`CI4%Xp zztZc@;m$ufo9@v6xb)Jf>uAkP|Mz;MCLO z&zdJQ(*?J8E3z56M}N9^KYd$(?9(< z^DfMEW0y?yN}7{;ThQ_Wk_8Xn{B)6N3^l zxP2;59D6cZAbMqDNP^xj42dRgmJaS$dIiDYyE)c%m z7JgW4v^G04Q$;sMcyq44!oaa#ef-(S^1!$>FrFVcBM(eV1Jn6|ckb%%uHB8?yLR^y zjIJha`*NqJ~e8k)=xos)-Vq@kJo(8V&>^<2ww&)o_rj75RwNcq``^&;G{fw zK^nY}ADk((eVA(jLkGSd#hkw4X^d#Odo{BGi`LiUdk}x+(RJZ}dEPyCtyTZKR^zp9 zbI|Z7K*3Wy8~OQt^!)lPcLUn!`rin**U?1Kfo^LPRGfj^S{q5iHRT$M;0BWXXpLe^ z6z&uVae62eeH8X-baBa{%uunvgEr@Hm$qhDV1WTX9B`0{V2Y!B4;yA+*C*jr(+XG~ z9fr=SD>sL3oYexQw)}rU@t04qox$Nkf3VOyP&hIA(&{pvEc6T%^-%A09Vr^2h6wWY z6wL^-V7+f_Pknag&xo&h96LJlvXRD(?FC%k{l2b)GNURYU{k@0Ghoq^^;oLq#~N2>8$eN(syp$m%!KU#k{y zZQ9s^&eBX-)Pqtek>Jqvy3(vQLrZIkaOp#gVynX32;wrqK`{zvDRt_n%XiHobyJ1A z<`CR9ht#{~Qdz!h4wdejQ3bx_L-ek1IXtI2Y51F}f)79}Upn)d-dFXh4ts!=2B>bp zmWfhNlUxF9M*ihdY!vl4j{gB`_&wJ8KbSv<`M<YoAnqhzTqiKzo<(Jb2hqgPraCqD8O8JLkUd+{d}!cjo#Z>*{O_xL^I#-%fvikYWA}1DfMhVe@kk?l1x) zuu*25KG~RI+`yu2h#F(2aTAe^Q7&d4HxrqSTE;DqZi@0T>$sJ~aZy{$K5i$nIqHbj zjn@&`60MIp$DJ|PxC_F(V2w7!8pj)1#=x9o1lzleU>6ND16s#Q&-+Fyk2jN?4#-*e zB{^G2&U(n{6dU#OZW8N)*ap$9$F|;N1C5_z6Jr5Z;h&pLMiTK*6lAj`&WfQVNVf1b zF?@YC5s72Sc{VX8MMUXLY*G{iQG%$B(MV_-i=7M2$#NtVAC1UKDKa^Sl?4on_p&HQ zgt<`kX(=Jgr;U{TAA#_Fm^zUeX9Z^5Ah3c#Fp__!pKt;v8b#9#Spen58KxbAf|;Z`DpN2- zQZ1rI;6<1!c-r7+ho1v}b)r?M7i}=OHo*nI2GKUvrDuwn z!S}9mGw!Mg2&TAs^v!B7Of(0yq_4`Pw$>TU134`zYs!|ge`vgK(R1r>m+Yo-P$%Yw zH%#$b3BmLscVFEL&;rg8aVdw?RrWr;T72mVwZJ%iXx2svO6%`;6I5hnIA9rDHApxt zQVV#~Z#Vz!+%M<<{Kbwo)#IiwDEWw`gV@hgv1b>IxW0Tcl{9 zN#W$VSy58>V32H#Aju=)_$!8ZT(LeMl0q>t30b&gLJGqo1|=lK74F8IDBYA=F{Aln zBrb*|#gdpy28BorH`y$Rvr=M4!es%95%pOxluX7YTp5a07Nb)ntXMDNb{GS-rdZUy z7jP;j%usqi(j(HE)CT3t28F!rc3VlY27_VP+Hx=`V;*0Hd*?rY{PgclToz*?L7aFx z5u3XC;)URwk+_g}QwAL%B}0=@Jz*j|CnpoJU>tOctmPg)KNr6_7m6PoeMt(`kc$`a9oMQ>Be)vxV~{{|9)Vzr$>GG1mH9mMq`4&U-exhSH{W-dpev;Ir+MmexBH zixWTX|6uU_!IifkoXL!w{^*I!E3dEj1~ZdW+1{yK%XHfG1KWmg9~S6*^zT3moLa1zY7( z9Mq|`YKA;`a#XpLS#K3{=<27G?K=$+k0(u)t0)PIcFF+9s(DPp20#68Z~!=ru)pUW zF<;m>e9|}R0|6r0$$$+s39fd!NBvMjF@{4)DFBj$%34CmChb9Q5WQjWK+lc9Lov*T z;Rq&2rD0N5OhPCbBKbih&=Xch4X$opX&;v1gMftI3`xSadqhot26D@P3En$Q(ZSTa z-h27h%eOYPcU%~CU6J3lhAI`Z4)8{sv?j=L|?F{dc_Cx=qBj_CkuVR7Y z5+nqCy~3=rW2+_!E09ir zr`Tq-Q$VI?h>^@&slzgM5T=JtjYXT8&f3~bP5u^?k#RwQ?y6^8o+bai;k&~ty({91 zcTM=a#9t?JPn`RxIe%dydtoByd@aMjM#t$%C=+nunH7h#DHv32!62wofW9C*g25XA zplOPP{gV9X>8Gq>1b8mt`y=d2gp!JBGLeW9eM?{0eioyzig7t1NvAPmoqCLf#b`7b zTxF%b5G&(MJVjnvLij{HY)1YU@IHT+DY}_~J%!$ZLbt!rzo)Q!_}AV8oUhQ{v)uo| z(ECH#_Ps?TL~OMAmRmmXzVFSp4Hr3xD4Ln>-l7F1p79?nT2ZoLoE;?x)80|6L#ZAm zCrU2H(^+glsgddGD>k9j%yjsQEhxE}_ReA}N^MMcU$Gq}57RsF8|=@oud&UX_wkU0 zd!1eC&wB>5p1~r6p|$f5Ud*2u%fjC`p8JeM(Fwq}4Z}}&2m{b<{ug*PS%&E+rr!a_ zumH#4%Q%J=IEGE*7*lS-EnI|KGytbSq)_FA#@g|~CYs(gjn@H3`EAQJL%9+QX{lx* zqatA;5&}AW~h6!13~x8AELyQtN1|f!S3&4y;5!z;qOD0UQpv{X@fjl~F*S^ux6bedCx_ zIlU!(e{fJzKX%JX;s{kaC~3kT0KZYSAe7aQ>6=WzI3{&MnE+p=NX+pdIjOdrn?Rl9 zgECaJ;CV}@vKOUY=u4AK3UbJtfS8;+lb_TClZP@d zyp#zF>mCtN;wQZ)R9x89RgEMYtbKqE*ZJ-(H`C_1lUhtIpIIAt$gQ`Yg`y6`i%0Js zU4C)p;zRdGbL-w0)23hWZA4co798G)C`-qoqZJ3MjvRjmDS#ZA8U1L52-QKXx|p;W ze3lTSjwpXMWrJ)#Gm@oFV&Ya>a}1FeX(zo!+3>}$Pvg%FF+PL)0a`g1tMJm z57Z1<7*{a@UtD3|l5t_<6w?XDgMHfxR!`_Zfjsha;Jw3W2*;oAJ)G@5{J@gyeJa;@ z3ZRa|m3Q=I9evB^R$j_E4y9R$dGF$_i%SiE{2U479UWOm$I=TqM>m9>t~5_(reZGs z4?qqXL+L4ayMPL3^wFw-GXw$5S3wzVE2snYBOE9SWga=(RUor#?QkY=?7`K42>yLA zcVaB_{MG#P;q3F_oKwi~0vUsVOL`iLN~7qVK@S&O6=0FhVjPZ;bPm1q=;8j9E}(}K zN%|-~gVM9$l_Qf&7>V8tCTxI*$fUW1OaPX&`nYehmAC|BDuD^csK|tfJmc#tT8U^w zlwv0$1t&zL=!A$a1SJhbY(!MjM8sx56+ zP+O=8tf62Dg@5LsSOv?D7Eyjji|7|Ep*)}z3s^!eU0rB)J?h51f^O|vl@=*i_R|%>*yJ{sy zA;n6pufzgnMeFR8B;JtG7!{7r31VfTvd(cwGQXN^or(122FX3ji54AC%C~ zrIcf5L$6>+@$&$gwDSl}ybF#hkjB(9AcFzbM1fn%0cPm>lr!Z53rQWk893;JeyS0W zsY9?7Cz!5-`jajsH4?$p0`5>ZY)tGAfobM4n5MP?U~=agIH&_5UU}Ji+CQk(nBtH& z2XQ`#i(4U%uZ&A-N<=dn^C>%kRr-woTl-`Cjkf|9Hy3Cg8Wc3{8pw?x~p)_5TGk{p@dxx*sJRv z!z9vxz9*{JI`%oxEcq}eIjS&%mLUfj_E9m#Lhx0@B;SydggYx1byG=X>-Az0#L0?z zRuY5PqlyJT%%I?)W}zT8IpNY$?8(yCF>>B1_GvMp0qE7nnn;=0lDFIEQV=`vE%b0v z)cRCI?Et7o5Li{Xava@VTaoEN`o~a#d<>4I&jCpLYXIpD)r|YdxA#FH^ZfX_?^R-@ z-Dv6lssDq(`+=3G*ZNnV&GjD1wj4>@9<>gsSh}+(-+3V0d7#kNhkw*a+tgpsZLbjN z4QzSq+tB>#DEb&rAKGCH_U79+{^*1T+|R5WUAwV*f}m^9i|NrHJgZ^uSJt=(Qy-mK z_g;qBi-n${l}N^SBt7~IN9P6v#EkD7>CtsZ=O^BQN)vEDc1G_8^r6MxK|p@nRtM9u zzqKqIOSL+%l!o?87AeIghhno}5yB+{1OUEWApX@H%0%lbAZ&~M%dqM38f^j{Yn}q0uxg^#Tj?O=1Vb&4Mpir^1MtxrqUc7OLJ9k%nCYOw&V(-$ zz;y|U(9Mz(55H42*P-k$H=W-rj>PobO~ z2M>(3ypGO>y=lvB&qi}w+FAgM>d9Ltmrnh&y%TM!*B7rZ@5;IN zq|ZHS`^MT!52f|CXVMpb>Fh3e`tqJVS*>l^1##h_&g^Y7J z!w*x)cpTadm(MJ&5VA@Tfp5P`w9|3{IUwe*awWE?F*k#Rs#}VH2l||hGBz$_A}40Zz-3|%bs)FuK7 z9KJnDH|P7eSu~Z|95@z!s0hZQp(WqYlWpi(-nDW#*Kjc3a4g$!?7`_=!;@)q!P%0o zBg3K8zmbT}fel$T5`@clF3M!~zYD@nBaWG?*K2LJNwYcJ&*PNdBn&X(Kr%SSWLp$tDnCSuhvc6F7#8ff^+e3-D{ zbI_SGDqJ}#JCNd6upD}K;V0jQ_d-%^WBLaQy?yxS-wmNE<9*L1ma|o|WQ?g~$rz)u zWFqotwzm?|#&r6Mb|N}ZtRrGQ)74va648a`{01U6qS!=4pz48Q3lZUaLZH}6#5QEq z?L_n-qxKT90~vKE5xdaR-%Ugxl55iY;-rB&bhNO0=&_}f8!0rk<(vH3CV$Zgp$&JJ z#;UXK{Y4I<;6-`|HZg1Qq=5@8iTSR**{;2t3Z2tflq11i(EV3=q6c72AW zsJ6l>3d2u#sOivc{te98{}U45vja`WOIKfk#D9HL0DRc!09suKWWBR?Tf|sk61wm* z)z>2R-8!ZbOvgoms2X~0t&-VG<1oJ}vzJ~4xr~04Sj_|`=tQVQ&c6a^ zub|(E-HDQa8HC#O8)=)sri`dMDYSxnLgX6-kiQbyHQYx!4-W2O@j%;jAVI$#5Gv*R zUB3&ME`87>)q(9OT?bG8Qbyw6VHR{+GK~5j0mJ9zv_wVbZS70X-gP1)Szxq>M0D>FZMar@bxIY+E*JBD|Q+0KF}-_AoE%Ya_oK)50evozIUf- zpiN)JNq7o|01jOrUyfv)do%oAGT^nLA8w|vJilqF`wI{HIdZ>3lN{mhVCgf1qpp?d($HDu;r4Q*3A$ESxmk*jYe-Ru!{8j>k z$1|?x_foe~OQ)B)oO9roDb1$$Z@623%r5O;GA&N0?S;msKRtE(#-eol%;N4eFa*!i zxs04YqtC5d|sZ+~HNV@`D6(y8rAy&G9N#+t!q0o{5$|R=W0#8Q6PC2Cf zK7{87zrti`TLrJS$ZtU@!0*2u%k%p4-odPQFy|e<&BFx=SKCs1#@UlIIfeL&N zlhGT*SNk*YkP0}ygbMU?-by9_5tT#$B2*rL5~TqsQ5Jv_B>^Z=4uBG+04PxgfD$DD zC{g^65{3ULQS^@z1^*~f?2i(K{wTrqNEP`var?@UKZL50KSZg>AEgoI#3}YMXXX5G zQ8Vuy%6f;2MhIO)G)-tM_x4!<*nD*=M3y{rgPsGgTj_zO0eumruWEi J7!*j4{s%cfCgT7A literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/tsal_base.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/__pycache__/tsal_base.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8c426284cfad6337edab3a2fd25a01edd136332 GIT binary patch literal 6589 zcmb_g&2JmW6`v)S%cZDq$CCAd!lvEWbR)S@+BkO9Hny7&p|+A%PCp2tixqby(cN9r zvr9W9d@u?XRe&034y~0_>XR$vl0*NC9%{o4AS?p(P~?zX5Hf|)Q{Q{TuO%tcO0@vr z&d$7@dGqG|-tR5{J~TAI;QIU@f7`e?!q~s@p?<|`jmj$uWA~ZGEX8Jyl2H_FN9>3b z%|snFqdKunOkolBCbObj%u>1baa6vWiBoS3dbO5boqFTYtGDzfs5b$<16;ZBT-}W3 zy)kQ&G?Sn?*hh1aG>1TQxR2%#X^w#AXdlgC(i{WL@jjX(B_)023*5kRIvT{T7YjBI z)T=qq5A?SSelG8tcDA`c#=y~ZRmS0VpK+E^ES8B_$`;E+OA(l4^qqXp^%t&jZ}aV< zUnu(7`aAey1b*oH$|49nvOCOxmb~bxp-oSyPZvGa4vn!A+f|m+YM?o=jB%@WUk|h_ zcYQ8`NFlqc1ggjFbq^;z{@*`exw!f>?wA%|y^?pU*Y5!dNpsK3^yWx?$wpoNpNAsdmTB*N1f2f%h#CciG;=^tSqk zWZP|gVq3izub`urcU7FzH63mk!GK}F%0(O7gNAXlXxef_jDhYh6L^eb6h*u)u;HQD zVwH#*o0K}(all9zyK4Gy9DPIKI85C;6iQ4aKq(y&m<&V`#V`nYw$gswP)v`&$-AU`R}&X!>LdA5q|Y4WC3}Gg1df4(gLjc5cJvRzG;G!_>Wf(hD9m zrot3-(NjHFO2}#TvASpDP0t2#uPD~dEDvHi$J~H?;PdzV;3$Q-fo;RfIXT-DIlmN4 z$ZpbX>EGvgI(FYUo?~E<_k9rIwF`cFNzcmCCF z^;7+1k29QwIRy$l+r}BjV~g?(aU^tx^&C@KhLL85dzbMjSMkJo%u+CWSrN#28WKt- z4wRzv1dgsptk{;C8K9nItw)2N!BRY}2S-;hQC#w5oW67Q8lf{7vh1^u0?P}P(9DN>MsbChm%RU=E@kl>GzB`be;xxQovnq&|jho^ubcpYO zXt&7Xy)2TWX?%GO1)??31ow-={pJ?Mz7PfWJ5lMW zs64->lQ1qDr#4U^k|kq~cQ>Y~aV})ba(YN$=^~Dzm_UJ32T@?{V8k#?*UkGTQom;y zg7)zuDq+}AE)*|g3;94?K+%>nPhlU5lkoE{g7_WV8=u^XesK1QI{THX#8O{rAihNL ziT`Nv+T-)f6;|*0>-9$qmmkkvsbY_WC{C7zU*kf1@v6K5;`bdO6T>-G_ zO|SxFb$}_$B(~XmN@ie%rM2Kl$d3}3;@hx7==5-H!ea^=e^Ocz`F`{cy9aspV;I3p zfigv6G!n$TBGe3$b!*UU9Bq&=jI3>X;O{yV?zV7R7@5+pM%p|N>riTV3eePt-j6@l zkA137OMEs;DKe&a9-*icuYjh9TdYSK1weVai)(7B0S~wd#E`3=DbTwtiQ(3LNs0X* z9Qu;M@XaC@C1Zo<9qx-#In`~$VV&1PR-}{NKqbgKuc9WFA1!El6CQ)1+PP=BZ5zg} zA}&KO)ikfk^QWDn_#O^mEIzG)X=i0e|KQvc^+Xi_HA8?R@gGE$gblve9X8it^gB0Ui&|or0ngc>%ITZTrX##&M~xv>D}tpgF=P}N z5(?QSH(DCB2Q2s(B$Fhb90Z=!gXwP0t<|x~7F2ek9g%)e?iA=QaS22{)?2x;Ypl0X z4GNgt=k!mn}FNv(pJ zxiu#c<`uwvls;rt`jCwg{xzzNya3OxG$I^{5 zxB$!c!1y;nmn8C3uf=7gd${zW0$UJ`_gq@C_^rcV8tc;X*zBENQnXMOn*jWsm=K^15cPy#=A1?3c~94I%CN#RfJ zKwos|@1q|IOi)-$ds`sBj4F!qwZ@c*f3Y)<*_o;qQzrj-tIFV39giwAJ6e^&tvakL zi`a%+_1KV-+PVDE(uYe`2F+>;pHm;DKTOx2N0roH79L)BaG}beSv@lj&*KlL9!ymk VG^=Mv;5qd${UH4{gC=q9{{UPJIr9Jj literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/asl_pl_module.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/asl_pl_module.py new file mode 100644 index 0000000..9b84bf0 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/asl_pl_module.py @@ -0,0 +1,383 @@ +# -*- coding: utf-8 -*- + +from typing import List, Tuple, Dict, Optional +from omegaconf import DictConfig + +import torch +import torch.nn.functional as F +from torch import nn +from torch.optim import lr_scheduler +from typing import Union +from functools import partial + +from .....miche.michelangelo.utils import instantiate_from_config + +from .tsal_base import ( + AlignedShapeAsLatentModule, + ShapeAsLatentModule, + Latent2MeshOutput, + AlignedMeshOutput +) +from .....miche.michelangelo.models.tsal.inference_utils import extract_geometry +import trimesh + +class AlignedShapeAsLatentPLModule(nn.Module): + def __init__(self, *, + shape_module_cfg, + aligned_module_cfg, + loss_cfg, + optimizer_cfg: Optional[DictConfig] = None, + ckpt_path: Optional[str] = None, + ignore_keys: Union[Tuple[str], List[str]] = ()): + + super().__init__() + + shape_model: ShapeAsLatentModule = instantiate_from_config( + shape_module_cfg, device=None, dtype=None + ) + self.model: AlignedShapeAsLatentModule = instantiate_from_config( + aligned_module_cfg, shape_model=shape_model + ) + + self.loss = instantiate_from_config(loss_cfg) + + self.optimizer_cfg = optimizer_cfg + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + def set_shape_model_only(self): + self.model.set_shape_model_only() + + @property + def latent_shape(self): + return self.model.shape_model.latent_shape + + @property + def zero_rank(self): + if self._trainer: + zero_rank = self.trainer.local_rank == 0 + else: + zero_rank = True + + return zero_rank + + def init_from_ckpt(self, path, ignore_keys=()): + state_dict = torch.load(path, map_location="cpu")["state_dict"] + + keys = list(state_dict.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del state_dict[k] + + missing, unexpected = self.load_state_dict(state_dict, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + print(f"Unexpected Keys: {unexpected}") + + def configure_optimizers(self) -> Tuple[List, List]: + lr = self.learning_rate + + trainable_parameters = list(self.model.parameters()) + + if self.optimizer_cfg is None: + optimizers = [torch.optim.AdamW(trainable_parameters, lr=lr, betas=(0.9, 0.99), weight_decay=1e-3)] + schedulers = [] + else: + optimizer = instantiate_from_config(self.optimizer_cfg.optimizer, params=trainable_parameters) + scheduler_func = instantiate_from_config( + self.optimizer_cfg.scheduler, + max_decay_steps=self.trainer.max_steps, + lr_max=lr + ) + scheduler = { + "scheduler": lr_scheduler.LambdaLR(optimizer, lr_lambda=scheduler_func.schedule), + "interval": "step", + "frequency": 1 + } + optimizers = [optimizer] + schedulers = [scheduler] + + return optimizers, schedulers + + def forward(self, + surface: torch.FloatTensor, + image: torch.FloatTensor, + text: torch.FloatTensor, + volume_queries: torch.FloatTensor): + # Args: + # surface (torch.FloatTensor): + # image (torch.FloatTensor): + # text (torch.FloatTensor): + # volume_queries (torch.FloatTensor): + # + # Returns: + + embed_outputs, shape_z = self.model(surface, image, text) + + shape_zq, posterior = self.model.shape_model.encode_kl_embed(shape_z) + latents = self.model.shape_model.decode(shape_zq) + logits = self.model.shape_model.query_geometry(volume_queries, latents) + + return embed_outputs, logits, posterior + + def encode(self, surface: torch.FloatTensor, sample_posterior=True): + + pc = surface[..., 0:3] + feats = surface[..., 3:6] + + shape_embed, shape_zq, posterior = self.model.shape_model.encode( + pc=pc, feats=feats, sample_posterior=sample_posterior + ) + + return shape_zq + + def encode_latents(self, surface: torch.FloatTensor): + + pc = surface[..., 0:3] + feats = surface[..., 3:6] + + shape_embed, shape_latents = self.model.shape_model.encode_latents( + pc=pc, feats=feats + ) + shape_embed = shape_embed.unsqueeze(1) + assert shape_embed.shape[1] == 1 and shape_latents.shape[1] == 256 + cat_latents = torch.cat([shape_embed, shape_latents], dim=1) + + return cat_latents + + def recon(self, surface): + cat_latents = self.encode_latents(surface) + shape_latents = cat_latents[:, 1:] + shape_zq, posterior = self.model.shape_model.encode_kl_embed(shape_latents) + + # decoding + latents = self.model.shape_model.decode(shape_zq) + geometric_func = partial(self.model.shape_model.query_geometry, latents=latents) + + # reconstruction + mesh_v_f, has_surface = extract_geometry( + geometric_func=geometric_func, + device=surface.device, + batch_size=surface.shape[0], + bounds=(-1.25, -1.25, -1.25, 1.25, 1.25, 1.25), + octree_depth=7, + num_chunks=10000, + ) + recon_mesh = trimesh.Trimesh(mesh_v_f[0][0], mesh_v_f[0][1]) + + return recon_mesh + + + def to_shape_latents(self, latents): + + shape_zq, posterior = self.model.shape_model.encode_kl_embed(latents, sample_posterior = False) + return self.model.shape_model.decode(shape_zq) + + def decode(self, + z_q, + bounds: Union[Tuple[float], List[float], float] = 1.1, + octree_depth: int = 7, + num_chunks: int = 10000) -> List[Latent2MeshOutput]: + + latents = self.model.shape_model.decode(z_q) # latents: [bs, num_latents, dim] + outputs = self.latent2mesh(latents, bounds=bounds, octree_depth=octree_depth, num_chunks=num_chunks) + + return outputs + + def training_step(self, batch: Dict[str, torch.FloatTensor], + batch_idx: int, optimizer_idx: int = 0) -> torch.FloatTensor: + #Args: + # batch (dict): the batch sample, and it contains: + # - surface (torch.FloatTensor): [bs, n_surface, (3 + input_dim)] + # - image (torch.FloatTensor): [bs, 3, 224, 224] + # - text (torch.FloatTensor): [bs, num_templates, 77] + # - geo_points (torch.FloatTensor): [bs, n_pts, (3 + 1)] + # + # batch_idx (int): + # + # optimizer_idx (int): + # + # Returns: + # loss (torch.FloatTensor): + + surface = batch["surface"] + image = batch["image"] + text = batch["text"] + + volume_queries = batch["geo_points"][..., 0:3] + shape_labels = batch["geo_points"][..., -1] + + embed_outputs, shape_logits, posteriors = self(surface, image, text, volume_queries) + + aeloss, log_dict_ae = self.loss( + **embed_outputs, + posteriors=posteriors, + shape_logits=shape_logits, + shape_labels=shape_labels, + split="train" + ) + + self.log_dict(log_dict_ae, prog_bar=True, logger=True, batch_size=shape_logits.shape[0], + sync_dist=False, rank_zero_only=True) + + return aeloss + + def validation_step(self, batch: Dict[str, torch.FloatTensor], batch_idx: int) -> torch.FloatTensor: + + surface = batch["surface"] + image = batch["image"] + text = batch["text"] + + volume_queries = batch["geo_points"][..., 0:3] + shape_labels = batch["geo_points"][..., -1] + + embed_outputs, shape_logits, posteriors = self(surface, image, text, volume_queries) + + aeloss, log_dict_ae = self.loss( + **embed_outputs, + posteriors=posteriors, + shape_logits=shape_logits, + shape_labels=shape_labels, + split="val" + ) + self.log_dict(log_dict_ae, prog_bar=True, logger=True, batch_size=shape_logits.shape[0], + sync_dist=False, rank_zero_only=True) + + return aeloss + + def visual_alignment(self, + surface: torch.FloatTensor, + image: torch.FloatTensor, + text: torch.FloatTensor, + description: Optional[List[str]] = None, + bounds: Union[Tuple[float], List[float]] = (-1.25, -1.25, -1.25, 1.25, 1.25, 1.25), + octree_depth: int = 7, + num_chunks: int = 10000) -> List[AlignedMeshOutput]: + + """ + + Args: + surface: + image: + text: + description: + bounds: + octree_depth: + num_chunks: + + Returns: + mesh_outputs (List[AlignedMeshOutput]): the mesh outputs list. + + """ + + outputs = [] + + device = surface.device + bs = surface.shape[0] + + embed_outputs, shape_z = self.model(surface, image, text) + + # calculate the similarity + image_embed = embed_outputs["image_embed"] + text_embed = embed_outputs["text_embed"] + shape_embed = embed_outputs["shape_embed"] + + # normalized features + shape_embed = F.normalize(shape_embed, dim=-1, p=2) + text_embed = F.normalize(text_embed, dim=-1, p=2) + image_embed = F.normalize(image_embed, dim=-1, p=2) + + # B x B + shape_text_similarity = (100.0 * shape_embed @ text_embed.T).softmax(dim=-1) + + # B x B + shape_image_similarity = (100.0 * shape_embed @ image_embed.T).softmax(dim=-1) + + # shape reconstruction + shape_zq, posterior = self.model.shape_model.encode_kl_embed(shape_z) + latents = self.model.shape_model.decode(shape_zq) + geometric_func = partial(self.model.shape_model.query_geometry, latents=latents) + + # 2. decode geometry + mesh_v_f, has_surface = extract_geometry( + geometric_func=geometric_func, + device=device, + batch_size=bs, + bounds=bounds, + octree_depth=octree_depth, + num_chunks=num_chunks, + disable=not self.zero_rank + ) + + # 3. decode texture + for i, ((mesh_v, mesh_f), is_surface) in enumerate(zip(mesh_v_f, has_surface)): + if not is_surface: + outputs.append(None) + continue + + out = AlignedMeshOutput() + out.mesh_v = mesh_v + out.mesh_f = mesh_f + out.surface = surface[i].cpu().numpy() + out.image = image[i].cpu().numpy() + if description is not None: + out.text = description[i] + out.shape_text_similarity = shape_text_similarity[i, i] + out.shape_image_similarity = shape_image_similarity[i, i] + + outputs.append(out) + + return outputs + + def latent2mesh(self, + latents: torch.FloatTensor, + bounds: Union[Tuple[float], List[float], float] = 1.1, + octree_depth: int = 7, + num_chunks: int = 10000) -> List[Latent2MeshOutput]: + + """ + + Args: + latents: [bs, num_latents, dim] + bounds: + octree_depth: + num_chunks: + + Returns: + mesh_outputs (List[MeshOutput]): the mesh outputs list. + + """ + + outputs = [] + + geometric_func = partial(self.model.shape_model.query_geometry, latents=latents) + + # 2. decode geometry + device = latents.device + mesh_v_f, has_surface = extract_geometry( + geometric_func=geometric_func, + device=device, + batch_size=len(latents), + bounds=bounds, + octree_depth=octree_depth, + num_chunks=num_chunks, + disable=not self.zero_rank + ) + + # 3. decode texture + for i, ((mesh_v, mesh_f), is_surface) in enumerate(zip(mesh_v_f, has_surface)): + if not is_surface: + outputs.append(None) + continue + + out = Latent2MeshOutput() + out.mesh_v = mesh_v + out.mesh_f = mesh_f + + outputs.append(out) + + return outputs diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/clip_asl_module.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/clip_asl_module.py new file mode 100644 index 0000000..a5c9562 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/clip_asl_module.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- + +import torch +from torch import nn +from einops import rearrange +from transformers import CLIPModel + +from hy3dgen.shapegen.bpt.miche.michelangelo.models.tsal.tsal_base import AlignedShapeAsLatentModule + + +class CLIPAlignedShapeAsLatentModule(AlignedShapeAsLatentModule): + + def __init__(self, *, + shape_model, + clip_model_version: str = "openai/clip-vit-large-patch14"): + + super().__init__() + + # self.clip_model: CLIPModel = CLIPModel.from_pretrained(clip_model_version) + # for params in self.clip_model.parameters(): + # params.requires_grad = False + self.clip_model = None + self.shape_model = shape_model + self.shape_projection = nn.Parameter(torch.empty(self.shape_model.width, self.shape_model.width)) + # nn.init.normal_(self.shape_projection, std=self.shape_model.width ** -0.5) + + def set_shape_model_only(self): + self.clip_model = None + + def encode_shape_embed(self, surface, return_latents: bool = False): + """ + + Args: + surface (torch.FloatTensor): [bs, n, 3 + c] + return_latents (bool): + + Returns: + x (torch.FloatTensor): [bs, projection_dim] + shape_latents (torch.FloatTensor): [bs, m, d] + """ + + pc = surface[..., 0:3] + feats = surface[..., 3:] + + shape_embed, shape_latents = self.shape_model.encode_latents(pc, feats) + x = shape_embed @ self.shape_projection + + if return_latents: + return x, shape_latents + else: + return x + + def encode_image_embed(self, image): + """ + + Args: + image (torch.FloatTensor): [bs, 3, h, w] + + Returns: + x (torch.FloatTensor): [bs, projection_dim] + """ + + x = self.clip_model.get_image_features(image) + + return x + + def encode_text_embed(self, text): + x = self.clip_model.get_text_features(text) + return x + + def forward(self, surface, image, text): + """ + + Args: + surface (torch.FloatTensor): + image (torch.FloatTensor): [bs, 3, 224, 224] + text (torch.LongTensor): [bs, num_templates, 77] + + Returns: + embed_outputs (dict): the embedding outputs, and it contains: + - image_embed (torch.FloatTensor): + - text_embed (torch.FloatTensor): + - shape_embed (torch.FloatTensor): + - logit_scale (float): + """ + + # # text embedding + # text_embed_all = [] + # for i in range(text.shape[0]): + # text_for_one_sample = text[i] + # text_embed = self.encode_text_embed(text_for_one_sample) + # text_embed = text_embed / text_embed.norm(dim=-1, keepdim=True) + # text_embed = text_embed.mean(dim=0) + # text_embed = text_embed / text_embed.norm(dim=-1, keepdim=True) + # text_embed_all.append(text_embed) + # text_embed_all = torch.stack(text_embed_all) + + b = text.shape[0] + text_tokens = rearrange(text, "b t l -> (b t) l") + text_embed = self.encode_text_embed(text_tokens) + text_embed = rearrange(text_embed, "(b t) d -> b t d", b=b) + text_embed = text_embed.mean(dim=1) + text_embed = text_embed / text_embed.norm(dim=-1, keepdim=True) + + # image embedding + image_embed = self.encode_image_embed(image) + + # shape embedding + shape_embed, shape_latents = self.encode_shape_embed(surface, return_latents=True) + + embed_outputs = { + "image_embed": image_embed, + "text_embed": text_embed, + "shape_embed": shape_embed, + # "logit_scale": self.clip_model.logit_scale.exp() + } + + return embed_outputs, shape_latents diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/inference_utils.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/inference_utils.py new file mode 100644 index 0000000..1086a95 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/inference_utils.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- + +import torch +from tqdm import tqdm +from einops import repeat +import numpy as np +from typing import Callable, Tuple, List, Union, Optional +from skimage import measure + +from .....miche.michelangelo.graphics.primitives import generate_dense_grid_points + + +@torch.no_grad() +def extract_geometry(geometric_func: Callable, + device: torch.device, + batch_size: int = 1, + bounds: Union[Tuple[float], List[float], float] = (-1.25, -1.25, -1.25, 1.25, 1.25, 1.25), + octree_depth: int = 7, + num_chunks: int = 10000, + disable: bool = True): + + # Args: + # geometric_func: + # device: + # bounds: + # octree_depth: + # batch_size: + # num_chunks: + # disable: + # Returns: + + if isinstance(bounds, float): + bounds = [-bounds, -bounds, -bounds, bounds, bounds, bounds] + + bbox_min = np.array(bounds[0:3]) + bbox_max = np.array(bounds[3:6]) + bbox_size = bbox_max - bbox_min + + xyz_samples, grid_size, length = generate_dense_grid_points( + bbox_min=bbox_min, + bbox_max=bbox_max, + octree_depth=octree_depth, + indexing="ij" + ) + xyz_samples = torch.FloatTensor(xyz_samples) + + batch_logits = [] + for start in tqdm(range(0, xyz_samples.shape[0], num_chunks), + desc="Implicit Function:", disable=disable, leave=False): + queries = xyz_samples[start: start + num_chunks, :].to(device) + batch_queries = repeat(queries, "p c -> b p c", b=batch_size) + + logits = geometric_func(batch_queries) + batch_logits.append(logits.cpu()) + + grid_logits = torch.cat(batch_logits, dim=1).view((batch_size, grid_size[0], grid_size[1], grid_size[2])).numpy() + + mesh_v_f = [] + has_surface = np.zeros((batch_size,), dtype=np.bool_) + for i in range(batch_size): + try: + vertices, faces, normals, _ = measure.marching_cubes(grid_logits[i], 0, method="lewiner") + vertices = vertices / grid_size * bbox_size + bbox_min + # vertices[:, [0, 1]] = vertices[:, [1, 0]] + mesh_v_f.append((vertices.astype(np.float32), np.ascontiguousarray(faces))) + has_surface[i] = True + + except ValueError: + mesh_v_f.append((None, None)) + has_surface[i] = False + + except RuntimeError: + mesh_v_f.append((None, None)) + has_surface[i] = False + + return mesh_v_f, has_surface diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/loss.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/loss.py new file mode 100644 index 0000000..a2aa24c --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/loss.py @@ -0,0 +1,130 @@ +# -*- coding: utf-8 -*- +import torch +import torch.nn as nn +import torch.nn.functional as F + +from typing import Optional + +from hy3dgen.shapegen.bpt.miche.michelangelo.models.modules.distributions import DiagonalGaussianDistribution +from hy3dgen.shapegen.bpt.miche.michelangelo.utils import misc + + +class ContrastKLNearFar(nn.Module): + def __init__(self, + contrast_weight: float = 1.0, + near_weight: float = 0.1, + kl_weight: float = 1.0, + num_near_samples: Optional[int] = None): + + super().__init__() + + self.labels = None + self.last_local_batch_size = None + + self.contrast_weight = contrast_weight + self.near_weight = near_weight + self.kl_weight = kl_weight + self.num_near_samples = num_near_samples + self.geo_criterion = nn.BCEWithLogitsLoss() + + def forward(self, + shape_embed: torch.FloatTensor, + text_embed: torch.FloatTensor, + image_embed: torch.FloatTensor, + logit_scale: torch.FloatTensor, + posteriors: Optional[DiagonalGaussianDistribution], + shape_logits: torch.FloatTensor, + shape_labels: torch.FloatTensor, + split: Optional[str] = "train", **kwargs): + + # shape_embed: torch.FloatTensor + # text_embed: torch.FloatTensor + # image_embed: torch.FloatTensor + # logit_scale: torch.FloatTensor + # posteriors: Optional[DiagonalGaussianDistribution] + # shape_logits: torch.FloatTensor + # shape_labels: torch.FloatTensor + + local_batch_size = shape_embed.size(0) + + if local_batch_size != self.last_local_batch_size: + self.labels = local_batch_size * misc.get_rank() + torch.arange( + local_batch_size, device=shape_embed.device + ).long() + self.last_local_batch_size = local_batch_size + + # normalized features + shape_embed = F.normalize(shape_embed, dim=-1, p=2) + text_embed = F.normalize(text_embed, dim=-1, p=2) + image_embed = F.normalize(image_embed, dim=-1, p=2) + + # gather features from all GPUs + shape_embed_all, text_embed_all, image_embed_all = misc.all_gather_batch( + [shape_embed, text_embed, image_embed] + ) + + # cosine similarity as logits + logits_per_shape_text = logit_scale * shape_embed @ text_embed_all.t() + logits_per_text_shape = logit_scale * text_embed @ shape_embed_all.t() + logits_per_shape_image = logit_scale * shape_embed @ image_embed_all.t() + logits_per_image_shape = logit_scale * image_embed @ shape_embed_all.t() + contrast_loss = (F.cross_entropy(logits_per_shape_text, self.labels) + + F.cross_entropy(logits_per_text_shape, self.labels)) / 2 + \ + (F.cross_entropy(logits_per_shape_image, self.labels) + + F.cross_entropy(logits_per_image_shape, self.labels)) / 2 + + # shape reconstruction + if self.num_near_samples is None: + num_vol = shape_logits.shape[1] // 2 + else: + num_vol = shape_logits.shape[1] - self.num_near_samples + + vol_logits = shape_logits[:, 0:num_vol] + vol_labels = shape_labels[:, 0:num_vol] + + near_logits = shape_logits[:, num_vol:] + near_labels = shape_labels[:, num_vol:] + + # occupancy loss + vol_bce = self.geo_criterion(vol_logits.float(), vol_labels.float()) + near_bce = self.geo_criterion(near_logits.float(), near_labels.float()) + + if posteriors is None: + kl_loss = torch.tensor(0.0, dtype=vol_logits.dtype, device=vol_logits.device) + else: + kl_loss = posteriors.kl(dims=(1, 2)) + kl_loss = torch.mean(kl_loss) + + loss = vol_bce + near_bce * self.near_weight + kl_loss * self.kl_weight + contrast_loss * self.contrast_weight + + # compute accuracy + with torch.no_grad(): + pred = torch.argmax(logits_per_shape_text, dim=-1) + correct = pred.eq(self.labels).sum() + shape_text_acc = 100 * correct / local_batch_size + + pred = torch.argmax(logits_per_shape_image, dim=-1) + correct = pred.eq(self.labels).sum() + shape_image_acc = 100 * correct / local_batch_size + + preds = shape_logits >= 0 + accuracy = (preds == shape_labels).float() + accuracy = accuracy.mean() + + log = { + "{}/contrast".format(split): contrast_loss.clone().detach(), + "{}/near".format(split): near_bce.detach(), + "{}/far".format(split): vol_bce.detach(), + "{}/kl".format(split): kl_loss.detach(), + "{}/shape_text_acc".format(split): shape_text_acc, + "{}/shape_image_acc".format(split): shape_image_acc, + "{}/total_loss".format(split): loss.clone().detach(), + "{}/accuracy".format(split): accuracy, + } + + if posteriors is not None: + log[f"{split}/mean"] = posteriors.mean.mean().detach() + log[f"{split}/std_mean"] = posteriors.std.mean().detach() + log[f"{split}/std_max"] = posteriors.std.max().detach() + + return loss, log diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/sal_perceiver.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/sal_perceiver.py new file mode 100644 index 0000000..82fe326 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/sal_perceiver.py @@ -0,0 +1,410 @@ +# -*- coding: utf-8 -*- + +import torch +import torch.nn as nn +from typing import Optional +from einops import repeat +import math + +from hy3dgen.shapegen.bpt.miche.michelangelo.models.modules import checkpoint +from hy3dgen.shapegen.bpt.miche.michelangelo.models.modules.embedder import FourierEmbedder +from hy3dgen.shapegen.bpt.miche.michelangelo.models.modules.distributions import DiagonalGaussianDistribution +from hy3dgen.shapegen.bpt.miche.michelangelo.models.modules.transformer_blocks import ( + ResidualCrossAttentionBlock, + Transformer +) + +from .tsal_base import ShapeAsLatentModule + + +class CrossAttentionEncoder(nn.Module): + + def __init__(self, *, + device: Optional[torch.device], + dtype: Optional[torch.dtype], + num_latents: int, + fourier_embedder: FourierEmbedder, + point_feats: int, + width: int, + heads: int, + layers: int, + init_scale: float = 0.25, + qkv_bias: bool = True, + flash: bool = False, + use_ln_post: bool = False, + use_checkpoint: bool = False): + + super().__init__() + + self.use_checkpoint = use_checkpoint + self.num_latents = num_latents + + self.query = nn.Parameter(torch.randn((num_latents, width), device=device, dtype=dtype) * 0.02) + + self.fourier_embedder = fourier_embedder + self.input_proj = nn.Linear(self.fourier_embedder.out_dim + point_feats, width, device=device, dtype=dtype) + self.cross_attn = ResidualCrossAttentionBlock( + device=device, + dtype=dtype, + width=width, + heads=heads, + init_scale=init_scale, + qkv_bias=qkv_bias, + flash=flash, + ) + + self.self_attn = Transformer( + device=device, + dtype=dtype, + n_ctx=num_latents, + width=width, + layers=layers, + heads=heads, + init_scale=init_scale, + qkv_bias=qkv_bias, + flash=flash, + use_checkpoint=False + ) + + if use_ln_post: + self.ln_post = nn.LayerNorm(width, dtype=dtype, device=device) + else: + self.ln_post = None + + def _forward(self, pc, feats): + + # Args: + # pc (torch.FloatTensor): [B, N, 3] + # feats (torch.FloatTensor or None): [B, N, C] + + bs = pc.shape[0] + + data = self.fourier_embedder(pc) + if feats is not None: + data = torch.cat([data, feats], dim=-1) + data = self.input_proj(data) + + query = repeat(self.query, "m c -> b m c", b=bs) + latents = self.cross_attn(query, data) + latents = self.self_attn(latents) + + if self.ln_post is not None: + latents = self.ln_post(latents) + + return latents, pc + + def forward(self, pc: torch.FloatTensor, feats: Optional[torch.FloatTensor] = None): + + # Args: + # pc (torch.FloatTensor): [B, N, 3] + # feats (torch.FloatTensor or None): [B, N, C] + + + return checkpoint(self._forward, (pc, feats), self.parameters(), self.use_checkpoint) + + +class CrossAttentionDecoder(nn.Module): + + def __init__(self, *, + device: Optional[torch.device], + dtype: Optional[torch.dtype], + num_latents: int, + out_channels: int, + fourier_embedder: FourierEmbedder, + width: int, + heads: int, + init_scale: float = 0.25, + qkv_bias: bool = True, + flash: bool = False, + use_checkpoint: bool = False): + + super().__init__() + + self.use_checkpoint = use_checkpoint + self.fourier_embedder = fourier_embedder + + self.query_proj = nn.Linear(self.fourier_embedder.out_dim, width, device=device, dtype=dtype) + + self.cross_attn_decoder = ResidualCrossAttentionBlock( + device=device, + dtype=dtype, + n_data=num_latents, + width=width, + heads=heads, + init_scale=init_scale, + qkv_bias=qkv_bias, + flash=flash + ) + + self.ln_post = nn.LayerNorm(width, device=device, dtype=dtype) + self.output_proj = nn.Linear(width, out_channels, device=device, dtype=dtype) + + def _forward(self, queries: torch.FloatTensor, latents: torch.FloatTensor): + queries = self.query_proj(self.fourier_embedder(queries)) + x = self.cross_attn_decoder(queries, latents) + x = self.ln_post(x) + x = self.output_proj(x) + return x + + def forward(self, queries: torch.FloatTensor, latents: torch.FloatTensor): + return checkpoint(self._forward, (queries, latents), self.parameters(), self.use_checkpoint) + + +class ShapeAsLatentPerceiver(ShapeAsLatentModule): + def __init__(self, *, + device: Optional[torch.device], + dtype: Optional[torch.dtype], + num_latents: int, + point_feats: int = 0, + embed_dim: int = 0, + num_freqs: int = 8, + include_pi: bool = True, + width: int, + heads: int, + num_encoder_layers: int, + num_decoder_layers: int, + init_scale: float = 0.25, + qkv_bias: bool = True, + flash: bool = False, + use_ln_post: bool = False, + use_checkpoint: bool = False): + + super().__init__() + + self.use_checkpoint = use_checkpoint + + self.num_latents = num_latents + self.fourier_embedder = FourierEmbedder(num_freqs=num_freqs, include_pi=include_pi) + + init_scale = init_scale * math.sqrt(1.0 / width) + self.encoder = CrossAttentionEncoder( + device=device, + dtype=dtype, + fourier_embedder=self.fourier_embedder, + num_latents=num_latents, + point_feats=point_feats, + width=width, + heads=heads, + layers=num_encoder_layers, + init_scale=init_scale, + qkv_bias=qkv_bias, + flash=flash, + use_ln_post=use_ln_post, + use_checkpoint=use_checkpoint + ) + + self.embed_dim = embed_dim + if embed_dim > 0: + # VAE embed + self.pre_kl = nn.Linear(width, embed_dim * 2, device=device, dtype=dtype) + self.post_kl = nn.Linear(embed_dim, width, device=device, dtype=dtype) + self.latent_shape = (num_latents, embed_dim) + else: + self.latent_shape = (num_latents, width) + + self.transformer = Transformer( + device=device, + dtype=dtype, + n_ctx=num_latents, + width=width, + layers=num_decoder_layers, + heads=heads, + init_scale=init_scale, + qkv_bias=qkv_bias, + flash=flash, + use_checkpoint=use_checkpoint + ) + + # geometry decoder + self.geo_decoder = CrossAttentionDecoder( + device=device, + dtype=dtype, + fourier_embedder=self.fourier_embedder, + out_channels=1, + num_latents=num_latents, + width=width, + heads=heads, + init_scale=init_scale, + qkv_bias=qkv_bias, + flash=flash, + use_checkpoint=use_checkpoint + ) + + def encode(self, + pc: torch.FloatTensor, + feats: Optional[torch.FloatTensor] = None, + sample_posterior: bool = True): + + + # Args: + # pc (torch.FloatTensor): [B, N, 3] + # feats (torch.FloatTensor or None): [B, N, C] + # sample_posterior (bool): + + # Returns: + # latents (torch.FloatTensor) + # center_pos (torch.FloatTensor or None): + # posterior (DiagonalGaussianDistribution or None): + + + latents, center_pos = self.encoder(pc, feats) + + posterior = None + if self.embed_dim > 0: + moments = self.pre_kl(latents) + posterior = DiagonalGaussianDistribution(moments, feat_dim=-1) + + if sample_posterior: + latents = posterior.sample() + else: + latents = posterior.mode() + + return latents, center_pos, posterior + + def decode(self, latents: torch.FloatTensor): + latents = self.post_kl(latents) + return self.transformer(latents) + + def query_geometry(self, queries: torch.FloatTensor, latents: torch.FloatTensor): + logits = self.geo_decoder(queries, latents).squeeze(-1) + return logits + + def forward(self, + pc: torch.FloatTensor, + feats: torch.FloatTensor, + volume_queries: torch.FloatTensor, + sample_posterior: bool = True): + + # Args: + # pc (torch.FloatTensor): [B, N, 3] + # feats (torch.FloatTensor or None): [B, N, C] + # volume_queries (torch.FloatTensor): [B, P, 3] + # sample_posterior (bool): + + # Returns: + # logits (torch.FloatTensor): [B, P] + # center_pos (torch.FloatTensor): [B, M, 3] + # posterior (DiagonalGaussianDistribution or None). + + + + latents, center_pos, posterior = self.encode(pc, feats, sample_posterior=sample_posterior) + + latents = self.decode(latents) + logits = self.query_geometry(volume_queries, latents) + + return logits, center_pos, posterior + + +class AlignedShapeLatentPerceiver(ShapeAsLatentPerceiver): + + def __init__(self, *, + device: Optional[torch.device], + dtype: Optional[torch.dtype], + num_latents: int, + point_feats: int = 0, + embed_dim: int = 0, + num_freqs: int = 8, + include_pi: bool = True, + width: int, + heads: int, + num_encoder_layers: int, + num_decoder_layers: int, + init_scale: float = 0.25, + qkv_bias: bool = True, + flash: bool = False, + use_ln_post: bool = False, + use_checkpoint: bool = False): + + super().__init__( + device=device, + dtype=dtype, + num_latents=1 + num_latents, + point_feats=point_feats, + embed_dim=embed_dim, + num_freqs=num_freqs, + include_pi=include_pi, + width=width, + heads=heads, + num_encoder_layers=num_encoder_layers, + num_decoder_layers=num_decoder_layers, + init_scale=init_scale, + qkv_bias=qkv_bias, + flash=flash, + use_ln_post=use_ln_post, + use_checkpoint=use_checkpoint + ) + + self.width = width + + def encode(self, + pc: torch.FloatTensor, + feats: Optional[torch.FloatTensor] = None, + sample_posterior: bool = True): + + # Args: + # pc (torch.FloatTensor): [B, N, 3] + # feats (torch.FloatTensor or None): [B, N, c] + # sample_posterior (bool): + + # Returns: + # shape_embed (torch.FloatTensor) + # kl_embed (torch.FloatTensor): + # posterior (DiagonalGaussianDistribution or None): + + + shape_embed, latents = self.encode_latents(pc, feats) + kl_embed, posterior = self.encode_kl_embed(latents, sample_posterior) + + return shape_embed, kl_embed, posterior + + def encode_latents(self, + pc: torch.FloatTensor, + feats: Optional[torch.FloatTensor] = None): + + x, _ = self.encoder(pc, feats) + + shape_embed = x[:, 0] + latents = x[:, 1:] + + return shape_embed, latents + + def encode_kl_embed(self, latents: torch.FloatTensor, sample_posterior: bool = True): + posterior = None + if self.embed_dim > 0: + moments = self.pre_kl(latents) + posterior = DiagonalGaussianDistribution(moments, feat_dim=-1) + + if sample_posterior: + kl_embed = posterior.sample() + else: + kl_embed = posterior.mode() + else: + kl_embed = latents + + return kl_embed, posterior + + def forward(self, + pc: torch.FloatTensor, + feats: torch.FloatTensor, + volume_queries: torch.FloatTensor, + sample_posterior: bool = True): + + # Args: + # pc (torch.FloatTensor): [B, N, 3] + # feats (torch.FloatTensor or None): [B, N, C] + # volume_queries (torch.FloatTensor): [B, P, 3] + # sample_posterior (bool): + + # Returns: + # shape_embed (torch.FloatTensor): [B, projection_dim] + # logits (torch.FloatTensor): [B, M] + # posterior (DiagonalGaussianDistribution or None). + + + shape_embed, kl_embed, posterior = self.encode(pc, feats, sample_posterior=sample_posterior) + + latents = self.decode(kl_embed) + logits = self.query_geometry(volume_queries, latents) + + return shape_embed, logits, posterior diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/tsal_base.py b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/tsal_base.py new file mode 100644 index 0000000..0de0859 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/models/tsal/tsal_base.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- + +import torch.nn as nn +from typing import Tuple, List, Optional + +# Base class for output of Point to Mesh transformation +class Point2MeshOutput(object): + def __init__(self): + self.mesh_v = None # Vertices of the mesh + self.mesh_f = None # Faces of the mesh + self.center = None # Center of the mesh + self.pc = None # Point cloud data + + +# Base class for output of Latent to Mesh transformation +class Latent2MeshOutput(object): + def __init__(self): + self.mesh_v = None # Vertices of the mesh + self.mesh_f = None # Faces of the mesh + + +# Base class for output of Aligned Mesh transformation +class AlignedMeshOutput(object): + def __init__(self): + self.mesh_v = None # Vertices of the mesh + self.mesh_f = None # Faces of the mesh + self.surface = None # Surface data + self.image = None # Aligned image data + self.text: Optional[str] = None # Aligned text data + self.shape_text_similarity: Optional[float] = None # Similarity between shape and text + self.shape_image_similarity: Optional[float] = None # Similarity between shape and image + + +# Base class for Shape as Latent with Point to Mesh transformation module +class ShapeAsLatentPLModule(nn.Module): + latent_shape: Tuple[int] # Shape of the latent space + + def encode(self, surface, *args, **kwargs): + raise NotImplementedError + + def decode(self, z_q, *args, **kwargs): + raise NotImplementedError + + def latent2mesh(self, latents, *args, **kwargs) -> List[Latent2MeshOutput]: + raise NotImplementedError + + def point2mesh(self, *args, **kwargs) -> List[Point2MeshOutput]: + raise NotImplementedError + + +# Base class for Shape as Latent module +class ShapeAsLatentModule(nn.Module): + latent_shape: Tuple[int, int] # Shape of the latent space + + def __init__(self, *args, **kwargs): + super().__init__() + + def encode(self, *args, **kwargs): + raise NotImplementedError + + def decode(self, *args, **kwargs): + raise NotImplementedError + + def query_geometry(self, *args, **kwargs): + raise NotImplementedError + + +# Base class for Aligned Shape as Latent with Point to Mesh transformation module +class AlignedShapeAsLatentPLModule(nn.Module): + latent_shape: Tuple[int] # Shape of the latent space + + def set_shape_model_only(self): + raise NotImplementedError + + def encode(self, surface, *args, **kwargs): + raise NotImplementedError + + def decode(self, z_q, *args, **kwargs): + raise NotImplementedError + + def latent2mesh(self, latents, *args, **kwargs) -> List[Latent2MeshOutput]: + raise NotImplementedError + + def point2mesh(self, *args, **kwargs) -> List[Point2MeshOutput]: + raise NotImplementedError + + +# Base class for Aligned Shape as Latent module +class AlignedShapeAsLatentModule(nn.Module): + shape_model: ShapeAsLatentModule # Shape model module + latent_shape: Tuple[int, int] # Shape of the latent space + + + def __init__(self, *args, **kwargs): + super().__init__() + + def set_shape_model_only(self): + raise NotImplementedError + + def encode_image_embed(self, *args, **kwargs): + raise NotImplementedError + + def encode_text_embed(self, *args, **kwargs): + raise NotImplementedError + + def encode_shape_embed(self, *args, **kwargs): + raise NotImplementedError + +# Base class for Textured Shape as Latent module +class TexturedShapeAsLatentModule(nn.Module): + + def __init__(self, *args, **kwargs): + super().__init__() + + def encode(self, *args, **kwargs): + raise NotImplementedError + + def decode(self, *args, **kwargs): + raise NotImplementedError + + def query_geometry(self, *args, **kwargs): + raise NotImplementedError + + def query_color(self, *args, **kwargs): + raise NotImplementedError diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/utils/__init__.py b/hy3dgen/shapegen/bpt/miche/michelangelo/utils/__init__.py new file mode 100644 index 0000000..6e9efc9 --- /dev/null +++ b/hy3dgen/shapegen/bpt/miche/michelangelo/utils/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- + +from .misc import instantiate_from_config diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/utils/__pycache__/__init__.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/utils/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8858c3124d0708d179dec4abd268b3bf70ac8b72 GIT binary patch literal 297 zcmX|7Jxc^J5KVUFID`W`f5QrAbDfCIT4-Y-h#-NG+hlhG$t2{1BfrAZ-(c^*u(01& zEbN2>i>pk~^A$61W*(23d0i|HQO3vH%h2ZhUWz@cKe9c_)EC|{=qiJWLm%~j{O$S=~&ed7?jJCyY_t>71TcH^jq&2!vch__yft!t| z*0{jd>VH7G=tH&x!-e0gmsb%|WU#utcg$MhmYPW*V_NY+0P!QUqL8%>M9Xdzzfmz5 sgfb)ssXU=nf((?N+BD72wc$~T3pdaEm79Ed$ybdr{+=Ox;%2%00foz2&;S4c literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/miche/michelangelo/utils/__pycache__/misc.cpython-312.pyc b/hy3dgen/shapegen/bpt/miche/michelangelo/utils/__pycache__/misc.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..584d7c4d89b17e0d3ac5b344c6d5f9ac3dd8d6b1 GIT binary patch literal 3314 zcmcguO>7&-6`tWPmn$wQN|dNS%L1Laj;UBg0S9qyII$5ss%*n*UDs|xC}39H6}8lI zmz`N!r9q-}5Wo@=)G`oMDo~&T8Wb*)LoNk!?5UStNRWn@tqK@$d&o_Y+Cy{dn_VtR z32qJr+5zYB&CHwG_r7^Q^XG6lh@j04{UN&{BJ`XMyvDVegI@x(i4>$@9i`lVoWjo7 zm*-MEcIW&lzv|D0yzSlpT%^l;oZ|ZoyS%fT)5UyWstQ3qMlxvZ5|rG4o5MJJ-^ynxUAhG+i)>B`@lo zg@sI!T4p|Nn2Jh0>6y8rQ7X#D*{eSxa-pD-nY^qS32cXDs;3KjI&UgPUCsDB{o!AL z&M-Ro6Oc_*M!B{{Fzhr1gQe|V3-Dj@Rk$)H)0T%W%JCiH4pZ)+Q{j6g*%yzrGT+6~ z>hl(R_Pp?P1%1wWT9rH8ZIs~WZJhj?*#Wi(3UX#e&Z-H&Ef8AJHOubP@(fR1TeQP2 z+eQW?RNa&nJCIc^*|G@RL@lBjSt7x4w%{UYa~Yl5L3fabBd8x%l5ifJ0HH*Mr#2IsSDK7>GYyM)xq0 ztd2}KhCaaE8!v<8IPCEJ0&bO?w*^ZkV9__B4bOo7P8y?WsI zZvRMa^vy>9rJ8u@Z_=@?+3oMuC(ksbnHoPshCuX??aV2d+Z_B3$bWBp#RpNz&N|W* z&I))_pS2k;9$0+lI+5f_Ap~*KMjj&{D*F^(LuLFa{spe^WgbxW&l6@h83AGo1)0cs z`g@orqimLWkIeyKHm_(I%NB|S1)zrIF-Lx76)uPpmMY0HHY2(hl2N-6;-WsQ(s{X*eC^)^-SZZ^e^c`R}QP(+h?WEU{1jd7!;u7wW zWR8BM^-7j};J~v#PH+r4YS|%;rsaFGrn9VW_rpjtH1JzpyRRyQ0pO_Vf^{60*)DfS zS~iqJ(yxH@tI+AoK&oh0itI>}b!oCGO|>G!&Ty(OO*N%B3|ms+!Z z(WM$c@qf^om8L^l_cN_mO`5fbP`9-d02s-J*_O1dBq2%m+1}-LmJ7kl5(V#1y3W4K7NM> zDbN~hsR(86Me;p*_8&P4hGjwFKlgj-t>@%vIGMW*)?(-t|0+rd_b&%oDgL2KmQ0eT zaaq^XS=m}vN!)~2k!)$E5r-el#0$i{rzucLEY+YUp-E>eVMxtFy%1$uGBRni;CeK+ zIYrASMCa0NKlpQ2wf&Y!GRt7 z$(@7~AU%Z5qE#qbX?G3^pQD|P({`{e2B%ZzLCjKUZlW0Ielxu&TbX6T%I+oTl+~ar z`bQ8&0KFruo?1UV1t*Q#&`e7Ocgt?TVpCk@ZL`G|~!1Td|3M2Kc_ptrol@~+@~ya#z_W=#!n~1u7l=LRAf05XkZ6lVcmUW^&%ni_I|p@<2{`wUX^tH#+XCMY#GF(oy| zIX^e8GSoA^JTosPzq~lUAit<2F)0Tw5tCe6T#}y~pO>GKS`1UI>rt9lS(=z<>=Is- zSWu8!q??QL-JGvaMaeW5=GwK7%<2i-;BA1xFsK#~2^ zo^yvo(zNpU(@XHqeV)1ZoO93p&Y6F5II1W}zyH@y`mfYc)PG_{PsT!FX$A^cDURx) zIGQuW=pLFp4LwHkWO_{SG{($BmL3bOjbVFOXk%j5p{kxLX!CNWH|QQ4XMTg~v2zy4 z9h{G2q0Pxvb5?k|xEihso^Gy|v%%BD)p2%sdO1JmfTxdJ!8zesZKbRfSI@clnmz;P ze#6vL`$&riTIxm(!G^Kwqr5l};KEWkkd&gas1)VJwgXr(4jMNv8w8sSEa0d&4gIjiU(kJq70{icWO@)rRHly#4V>ZJdWsUB zkaT8JgXM22jFRb(3N$#w7zkw0Qb~`Zbc!ijvg*BqM0k2ld&;mP&UlmgsPM)VBO8T% zGBfCSbR5=nnluIduE+bK)$@3(K1*LSneF@baoGPN6*P4RjSp~}4-AkABr`+lK+veN z7kEL6MtD&*^@Z_a85R=o6oXZ&aVQ#B4e?>s6c&W=sA`rHv8X7i7DfrM4di9*3S z#0&j=s7TA690uw0A&sOx8IPV%>NQ(mbR-l{2t(nR7&HkiE(!UnS|v~naX2CJsvUAE zR9K11;LAkRivXrSy!PDgv&Z?NFvp*LE-}P*H*li~Q*j?+STc$gP94TYoevjd}Bxqd!=RvZWq^H}H|md*~r#MoI> z$NdoGj4gTVrkfo{5E$(?=!@KS8j%JT2zF>yEZ?XSu^y27G{&*t% zTT9)7$9L5~>Cdja(m1tQ@vOb;-l(`Y=G|=>(}K-=dEfZHsm_`Gx3Nn);H_fqaq}i{q{yDaO-sValUV?>Ck@91K%1DkA z2L(7z)N+T0qi-5?rApB-rg&(Oo|YaNW8jQ3EgQz5YYYUFGJ^6O@c@CkF=Nl1KuN|x z0|L?jAJEkzASR$5mIA$rWSon}`=Kxrh>C#-AD4K6=K|0-#tVryqN6uxl?jhT#V@t@ z-`{rwOaYWZFT*22hG>qazadmqqRAoXQf>IckVaI*LyuY`_;IRBYb|Q3uohbD;V#4Grm$$5^w|{1S|+9v zpD+(PP+>nRyq%^lH8N;Kmz~KKW^%?y=SV04lFIgD*V{_Zl3~@?N;IST036gnWHb`v z#XvM3Km*YxpfP&zzmSOBgBCD4BdX~j9L@m&Ij9p>qxV#R16D)bH4;%j`J&R#m!v!Qp*p>x6V= zSN8nm)BomZ%Fy$y^>WpC)x@?eJ-IX6G3A*$cD?4>;m_Fhi$==Xe4jD>?9jzS8EN9# z@sVtGHhisaj%ix3x!y5cbxu07=kxBSysbIMG;8WnE_hT-q8_*&JeVAftd{997zH&= z0D6sxLF?-?A}hNE)DFTr?X*xqgK9?2=SS44-dG}XE+j_BcoE;JtG`Wa2O01q!glD` z2!F8^vNSbs_fBjc@5)5B!JG+&=Cv&z_Ip);kJHcI$ zo$P=@`A*fGdbqW zckJg9(E33p)9?dKrcpM6!Gy!RoN+B4e+CV!#xasJ9D^JhgJTC=GL3b-7zxL|#jK*O zjU~XWqSXu}`iSwZw6%#i95}-2l|Z*2{+OQW|M`FYYU+P}GqYD{glgelR!t(lO=yB9 zp&2sO!jDK`ti{r8tl3q+cA#EDHS6t0^oh^#9CGQNT z56;`16E)*cW&L1ir%q2x*Z)YdZObv+G{K?m&=W9MQ^W;0T;&!Q4;L^Wlo9SZ+&rWd z^*zuKRF-3InshjK7Urd>lo>$Xc-hSI_iKbfwL7TGmI%Prh~X6{%(9e#qim5GeFc+- zOKt|nq%69POtIfXnFY$$GRoGHJ(jJq=`qUIBIT+w%FEeniZgTOH{o8_;5Beet6M7S z1^6vj^)W@-ws_mQnS{jzL^XMUooy2HCB@0e0*y>=;~ylvlRP9@$Hbq9_ z^`snkVL4Uvh^~U>r0jByrh|ic3sofD#Di|uX}Sq}fYbCMSOA(>fmijCwOFzjOWtA$ z+JcU;_G93|NMLY_@OooF;D-_yczB4(n1uM6OAg1P5y0VMAPl%m;K|KTP?y+7?pgrZ zcD!7OLXyzuNWy$^kEO zFBG(aXJE1jX9zBu_fCGWW4h^=yXIDHA{PZvH=g51g(sj_z^hL+M&nYjPOU0z9at%# zVN}hUyDA_Z0zU}2P%SZj1Q)8(Csa#V=*Kd_l0uNof+loB%;-_ZISscmWP*u!l&Xn` zUBqAjiGQf(0Z|G^&Z)+599-*2a!5=LL8mBD&KdTvJ+tkz)gL==vH$3~ z(|)J=wlnv_OS#6LeD%xY*00S}O&!k2j=Vp1Z7g5inz4c_nY99vNq2s}ZCBpD`{Vu7 z=Pw<<+%w*jwdNhoQ(LFbU*C~;w9f2L@6XwH-*wTe80O@RcJI^t^k;U3Wln2Yy|1Yvk9B`KDvH8)w_6nx>9Td#<0H z9#Gcq$T#o2)u=Qb`^4zC}0Xs7>!*WGPb>dN7J_Q~J;x(>Cw+YmSTJ zZc2A8z^2Yjo>>47$vSSG2gYEQos{FLZytK7=Hv7?4_8x;9U@+Mw`pHj4ST!I*HzE{U1M`s zBlGuuZ`WG;o$d8qK_*i4xM3)ki8FH(3Ys&+(IoU}D1{zNzHiC60d_G8WoSjF`sgSG z;69+=rs0+fjna#N!2bHY{=UkoX&c!OKX4Sl>_A8UoZeFB}CbQ;SFt7$aA%los z;KIr;y5*udAS;O1m!z3u%H+=sF6T2agUI%ZggPN(NV%Xr#09KBk_dBwMDJjrFAA<8 zUhD-Hwh8bh0m8a)v-Tx~2rr;R1Of%@O9?S7n*cUqSu$EfZulCPFXlJMhPGc3%mnI63SW{T+ZENOQvTz*Gw+Dq)1K2Z4l-)F85uc+@#46!65v4 zqHMKjZYt7Xxr7USGGhaSsXGBFD;PKH7{D+zbHu>4l9)otvh|Jz!iZ+2gn-e+mw(*0 zbNj}?=C&={UwMEU)BUCXRt@(d{~mk-SKcqnfT=^GBoO!{M1ZQW z8#98h;q_cJ99JEMap56|j7uUO+Y)3VNJ)s|B+M?sU;Gcq;B2vQl}~Jb`%H%Zox7G? z+@sUMyn9>51UIkM^QWGP_KE7ZoY{2>+jz;jXrr99i%trR;+EH6TPVyBrZ(r@>pr;f z;cM4lo9Ufx%D3$Ko4&t|e;U7YJa_DPe(#Gp*NF^+A1`VqYqGw{hU|#qX~~!ud<}Pf zEsC#Y+Wd+4ulyhTXV?DaYGwVtyziL|JMXQ(+BVrX<wgVXzRt-EIdlpV=9&{7{&*d{XAUY&lPm;qrQ>@$Lr)DvnA)|Y+*8C=ga3CkGa zTN`|BBTl;E5cN8I!q7fPU@JOncrwUkX~6T1B?yRvvBc7HA4EPXyl=T!om9LQmNVq~ zSy?Wu&KV#OPu2q_y>KSbq<0GlQnaWXs%Rkw2VY9@Ly2$-oA3~;CU9A{Zc!P4=EFBk zDlMt*NCLdTcruX`L&UEjEK?1!glZUwR+wv{6DeW#XYd!%{=*epMLAbrJdl1iW0Y)MzE&#m;p5eVhIA&hedZ?Ye7g zP;3qFJ^Oy=wa&b4-CbL&Vr!l8%x=8nQFb57*;@0q!#U=#X4=ceiDe?r*P-xT;#^Kh zQqim|XY6%B6og$cBY>dbLl98~T{S?ZKuO^JAPR#f3V=x^rd#^pk^{C2KZP-s936x< z0kdCW=L?X1D@Wl|0(@t9$9uJYvL28$un#y&_*lkTn|;DCaeUG;{&aSWVq2ACRuQHJ z4JQr+=?4Lz4-ZtpApK<@N&mes$UH#oa6#7J~_v`qvU5e>)M$cJ5GUUB8*vnc_Vn)|#R+c17F6p^I{O%O?F zcJCmTi1HGCZ^3FkW*|#g-5&T#n~d>$g{x4-0D_o?Y|%*5^ygIlJ<4&9s<}sbe@`{s zqn!UvZCopcY8Z`b^3>=Z!%M8s;&3b6bs0JB6SA|sQCIWiFja8@QsHo`JU zv&pukEn)-ME;?j;(h+eOuv2y>T@e?+uB#-%i5&E9$;OgnM08&zqDLYLyLDx4OtjWu z3wmHPp$8R0#G7S8-WP}shZvnz6zI88NuXLbExllY4g`;!5=1_k5+xb1dO?->w2XL0 zmZD7MC>(G--mr{MGD~tGRfai3?lZ&p@f_PlK~`~n=eO66^o^XBl7c9W97!c(*|W#_ zOL0X^T~hgUifY2BT%8z+W>hVeKF{qPJ;!^UVY07XL3! zEICNCf5}Pe8fgQp_ztfQAYfZQ0o~7mTZy36MOcv$SWSDSJ6c%P zHXMefIo-q!HU^f)L|l0?!9=(-B;?f_Po?6DcBJ;6!{0U;Z@hEY_xJ3KNrIN4QXI|U zqmW(qi18#Z3!0>8y2nrmuLwy==TIR$OzO@Um88oxd%|}{|1s?8ukbX#W@d>71hZZD z&|wM|jSp5QqwZRr%J|SoEAD$31jdJ7GxY_24%GW`=qM8ic{;X97=x1oHw$&>4mAU~&hdO)iEBKsvnf@#8wa|?I5iy5 zC5);D1y;r4vIMK_3Gi&)TXA8-Iq`mxmnCIP8>i?`dVLz*pO=&ai0q=S}l`drBR8sjdDu@&< zn6_budZwrr-AAP{l#%kInOF=2bZ99W9oL;8wwj_4o9wD2$Bf11UO($SCI;(BU2+p(%cI*w$R@FrMJ8659E(8Hn+}ho!L6uGt={%jd!=+*?PC|6#e~d-)$e->@k^@T4wK-muXa7rS=M$Gt8#`rp z@M0LSjbJ|n0EGJL9JnCTS2ZgbSdR9#66j1N;o5Y&B28$z%QP5OcbX0anRquGYt|&D zYWD$tx$O@^@* zFC+}|ce_gF8i(1Hdn=0)Rvurmsa9i5LX=BjMY5_vK7{(~8jG3!8XIGgZ|kupG4@w; zNo`|wj}3RO+GE$y8H|>wWv{7~J*HZe@^3`xQIX3Jx>kQIzZjY86XCNcka` z>75}U=?!KisniH6#b8alvBN;C+oFONav0hIM~y-;NRxtWMWITl0yN4?$x%t_6DFArTmR9N5prMI^n=v)Ydi-GV$;6O2O z;8DC37|NeCs6E9%&wTw~gvU*fdmpzviWT2GS_%xpWZSygPi8)uOa8up{=(;U{%CPa zU#V?herT~RFq@sp&h4D{7rOc$Gf&zM=ZBvA+VVcrj#ipI)&Yil{0jiEBX+7EbXau~ zAXZwAd7HavRU3M{wY=d@)w;tvw`M(6D`Nk*MXZ>uRuZ+-a;$ZZHB#4y@P}k^0@VQv z*24~8?ilVL?leo}&bZQSKiYsGkh@(g0NS%cguonQ@hnPde-5lYpV7~RwfzJbfSYPV?V4-`k*gaU`}_sYZzvxG9}c-Od1Ic2wCv$4zgz&<@%)91Ae%ErIs#*- zDUeyqQVe0ntI(4GLA78ev#2D?JP#r72Pgm<&Ie}5!wQLFhjod15V~)!qG)S7%4@cj zTh^AF*8IDEv*ROX=@dhp4KG>7v18c=&=NwgIL_f;>L3lxyf2IfNj{!wF)nm{t*%*d-O6r~x_cPM^j0B&N&Cf{RD~@5-y>i-_ l#-&EWdS0_f7^ZzXGdnReF?(g^$}&M5q-8xXjVa@z{{n>$d}IIs literal 0 HcmV?d00001 diff --git a/hy3dgen/shapegen/bpt/model/__pycache__/model.cpython-312.pyc b/hy3dgen/shapegen/bpt/model/__pycache__/model.cpython-312.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a594f4b4a6319a3148d9747c04e254d33d34a766 GIT binary patch literal 13320 zcmcJ0Yj7J^c4jx;#Df3`;zf`k_>@4Ilx00=i+b9WY{{~0S(H6SXk$TilLUze)eXub z-B>9ZO$JIiYjQGKLF?HvvU&<0&-cjy|XR)19chqAOa z+5Fh-Ik(XOMAI`{`?w-*-}mF(d(S!FxmW+iW;0XZey0EPsU!O+>c3({b%tu-@d=uu zZcz-y&`BzyeCY@caZQp=X(F1GHln35U7OUU^btJ?(@8^06W7pG$`~<`6kW0@WsaCr zmWU-~jaW&VK50ueN1934khG`l5jzPRla7=#;!L$fT2iivE7cllO}QiPA8f~z^rXBI zFUfC8`cnRgpM=fHwp1VzNVP}WA#P!;$&OSo5+r%HWM?WA36XGfvMbdU=^|z9$?jB7 zqzA%|8&ss1af08+w1D5wxWM1Vc$wB4bY!=NVw=CH5=le7Ofl{o6yss_Uucy?H7^og zpu@h0n0P*{ku~YGY#3wHe1;3tvf;H1GoNI`TDgg1V;mPtPqDIrW9QgdR@TkM;(@U{rGB$ys*XOd35lhP2lj#Mh zWE^GV8HU9kTh7H6Snfkuf&p321;?UY=)Ed+00yY_8}}i1$7*6 zWD@D@OPMs2z;;PH#yJ>9B9??~UG_4Qg0GoL@NtgKCKlN^)>NGa0!Wm{xZ7?~0&JFf z`XY6gK7SWmr?2o>I`mHqFC89#lTF1KcKoGGYI5Pyndtk8G?RIskIrSdEKW_$7?02M z*-R>$hFS4y;@~Uu>4o`NdjII#Tx@QR3okNPQ`I1s>2zPS%n~Qi^|q!EKpxm z!<Z%_m1~1SVVblb)^P0*lWB&>_Cg`91EWTUos7*Vvs^dC0`TKq;9aLS9G<1i zKX1zKE9pXo%iwXHkiIj4anyvsv>!r1V$FFD`{1xWAhjluRcNhrsh>=6e3t8nOq@7x z0S_l_%g1gWynasUsBz91CUb6q0CptMi@+!tdUiK<$Mr(8(w9corsq?#mVup)wbWT! znZ%U@pMa%|PAqT(kkbV}-U%M`Y_%`-+&ok05-ov}E}-hDoHS zfIx{9HwYEDA@F1~mzbK)Mj?@x^@}W*;dvvZhZLu*ni=XwW26vw2r_%&$Dap}?AMPD z-8^)CRO;Kka`Mw3E%g>$%L9eWKO0)TxH`JF@0YJ!KPy>VmLj6H^ZKa`i}j;}HxDj3 zORfD&EO><}v9*6yziv5H(j8K$56#w`yBd1@A?)v;0{vME1wll^Y8jf*Fj_{(=oter zMjeEWjP{;Mr4-Y|n2`ZPo}RI(=VQ?XE7Qyv7(3&D8d}E5v@ot;xR_Sf!0K5e{PdF= z+{%j2xLMt#E>BI;jORv;`9@4^6XU%>)fldswLm}C89Z~_oLUA(>?8mDY%`8mO=sF* z1WI0D(#o{6cGiLYGaa>1uomiske=j*SSQm3v+jlzC+nD^*%oEQWbDmM&keoW!}f2) zMaI$l3k}m(Yq=jnt)#Aw*>%GZaW_$o|Clhd8$^&NPhF)WUf6{Lvi&udpB@AG!B1v@ zv2zIuc5=e8i=xIJB13z40zC(MxW!>t8YU;BQ^|SR4CgeOVpGxCE3!3~&8DN5XMwMB zsVOK&+{1qc7uHncTYsenjz7em{?LdXtRy9yuCO2z;w=C0e?pB#XDSh&W(S`;cxZ4E zvOo({XX{aDhHid~KJ?B!^ZP0QxVWQ?DNEuDoh83Du2Wv6AE5>BL|(`<~XW?_(* zEhKC9{TMgJ1Lx#2bD8<9Y`~L|U}SeZli{NAOgfuL&u8ZOs3KQeaHdrm#X@$5<7M+i zG83PT@*uNhOJ;JCXR}H~hY~`z@pEh(q+B$cnT3%$=Xo}&CXt%56S}F2MJAd_N9W?Q zl}!_A8%@VjEQr=>Y?5PHIMO(O?p5$=EFAfKGyDi=`$JfsKczvQK!W0KdIai^flsMB zho%GvcS5i-G@c*9rhFHnu$qS9Tx%A&C2=#VGN&fMH`}B}W>73|bBsoyzrfW6r=C`U ziq_gm_o;m~_77YZ%I8`IJCwz3LEKIyAs=^)lJ9_6Z8ZNkqlPh8C9nF0mTR*US|Mhs z_1X+El$XRc^tuDr5L%U))oB8Cam9!eED*ETVrGcpIU`Q6LJWnQ60<=JMJI8;qtAvF zfLgdmh!a4kh@!M1jsY1MRrxX{I3eb#=jGfDebm-Y&Hq2#175ZM|A)4bQxOM}y;}{} zzmVEnkEtP);gEwG%r=Kw39mUz{Q)gFxx-mJdBlO_Z~2&Fbb{++>fh?ry+TvjgK7z( zMR48It0&+o8dEa^YN`5n1_kjpmvg5L+2_^7KcKTCYM5yfXwXdb0%#M03H~_+H%LL_ zFU*4RGx}fA&|hsf4OBj+)~|n|o$B6f7;8w)7n-0Y)USxko~ngTt0CCC=DL)I(lz=n zP+{}=hscsR+--|)f&oJDWM-ZTRax5M(4r+IgcQbhI0QJtO!R7n@foTN?+8>6n-#vV zS2()~6dZ-A>lJ>kuzey2szONR>1Gg9YS&Y8JS^S}Gl_ z%QhICwUPhJj_vY6QC$0EJwFfFK{iFBiF6_xjdDl?x#^7<2kabR6M`cyJ}v7(sxWD} znV*X)LWE6Cuncz*OFL8{LF7g>KEaKVM)(Y06(v?y4T8kt-XyuFumPeLT9kT9i?2f_ zhYB9|26`8;tR*&qsECh35e}DAw$ud?$6X>R4a1VHRpCaOl=X8QAUX~?2g0^gGzma3 zot0hZGubmKK$9tmvCJus%WxcWGlVBZsL6(Ni8Me^xhb0gOgaUKf_n`LaL9K!JixN2 zjsv5UvA7~?89wZlbv&D#R3spdnR^+%SI|3y-dXfeG;-(ABh8X|6AIKOPpC!cOsGmG z%e{p)-Ud%LU&+K{6GV!0aZG56M&qDi@X;ub%&V$QZ~I&)$~89)1r~kVl&K+gD-1y{ z-w$%}AFoqSo~N3cZyH6DZ{5`P50>^#e^2qk?T(eeYV2-M^bfBM+>iampy+=&e|Dq0 z_w#4&JacdU%Mb2;u(oe4Uf%m+x%;L3=-1ZHEd$lo`*g)F(La(uD|HR!N7t=Esds3V zzVq7ZtM{G1JTLaXh>2aBUC-rp>!#oXf8dk$+wGsRpU>WzUF|9NA1eEY^STe4rS^U- z;Fnr`5N+BBbbNB;_K_9y>V><_Yua+)2fk<6_B^Zv<%{~2>mb;eKldF7r_lVlpMtks+bGOgkbAK7Q8(6&n zg9slfw;#n;{F`>yN7rs%E4Eg;_KEOk-?uteIe1Pyc*4_f+4 z*1jhO%GbF?S$$@y#Vxf4C2ueK{vIjNE%`%I;2EiJMC$E(DOXHPq1TQ>R!H>@5hFer5%cw*G;f;wNDdcKd;QTHZl9&Y-ljRrHA zzRjsBpDLnuf~Tr+0dXd>KY)i-M#akphP?v%I2ays&~gLx=Y9?rxon?EMHW$;9QH) z#2JK^ao8z2SI(Z+2rW66-~ux?^%D)FQP6wNrJ+Deb4}6-umGWE2B->+b;RwYaRSCL zE&|42H219PxIqiA(>heG-0%V2uf9f}Y>%2H*aXj%2COC3-Au-6%XuMvM?Dy|-JDPG z&FoTJsDH_Ls{3OmTu)a+Dx=})4?hsaH@in|Q*bcNsGF<%5-oDjeubve zw2r#wn;BF~*1zgFX7<+8)KKFb7(KR@^TSBs;$piK{6L}Swx>4m0GnXq2B6G=@0a1* zA$BH-UHX$&nxkzP? zs&z3%UH?()qxO&daqSe?q2GlwOWJDB`C*S*gm%G?hemB{S@U6wj>>i9AYbUX2sLOb zuFd&#?e9}z`}u!zOoWDphC+kKLKC6g5DIfApf=&)AS`}@VLaQQH0v3>Y*6Gy(;Hj{ ztULVS(Aep)PBwx5NiGCr9a!PxvW-~)i$@|J1%pQt?3YOv?k(V&!UA^!^IRIZJb|N$|wV$}h$HSe(G)}Z#Vo2xkFoU{| zc6~(&@w}ne-GHJcrzc=9GR6)rt+weLDv4OHSdZjx{e4Yb=%ciHY)7+n8ITIp~1Ogg5AsfJcHisrOMWvVR ziadttR1*L(WF|nP;A036WI8Ha6M#OT38j4<&R(`O$ZffaL`b>;t!Px^HV~rQ zZTk1NsAB3i!F3Hzoc+E^uI79MP4RyYED|*GUdkIR%&r`$>=_aFjFde`@|Fh{=TiTN z$2Pnjg_(74xMU5NT;T`qVBtz>*9&F$;rwaI<-c`g`AAVCx_XM)%C2E?*YH=a;gWT@ zjNR_cp9NLj>MdD&OY`PVmE-M7vxpDAQkEn;xwe#?66X#UiO8#?P3 z-TlS+l~~#RZ2t6yH&D1#JY9HwfTR2xc$M(bGNT8y|U@@-5OaQDR9fjKZ~#I zUcGpCsNDZT+4aIVt^Tc(G){c$6`FFiSL|J)y{l~Rsn`cZ`@m{T$v#lFA6TPBJD?Sy zN5#=4I=YIzD>?)uWygWM{*ejv|4o;-;tGG|3QNx)sXTvHeE#f`eu*t)E1d^K`12eP z9S0?sFMn>ctFO}aoY?i;>cMi?3!*8Qw=BK$!0s-2!t3?{$rUU%!G3~W_2A{RzZxn1 zD7OCc#JVFczC7_uVfD&dR&@3j-&z^HGyb*nxzfuMdA;a}OZL{h^?y9pQci~Ek%j%; zu=}jj0Jlf~4kF}i)k-C2@Hzx*D@$thc@lK`E14B zE82UDmqh#M>Zy`_bnPt&l}w`w8bfZbIwZaVB0$A&1QDtlk~0BCcAzD9ra^!kRH56D zW>nJ_!NklP&%za{+9ntRaUF>hYUOwr^i%^HYCb3x2ElkwqpAQoV}lBBWT<=ET8`-l z;D~N^6qdr8#eGAZKsTtHG-D8Gge7Ee)Xd>(Sx{t+c*!seG*|@nF$V_4nIA0mzta-b zuYro~y(~gPttW(WI{*{z(r+EX?`Zx1#Rz{}|4+@p0@ud$PQVGTl6Qhm%W%Cs4*M}@ z&DnCzMBPNA+BQc(q!)ISNw9$t8&_n9QpB)btqJCD9e1l?*VV2`R4@u{)J*U7?vO9+ z1gRJ^0I4dNU<{;G4HVUDH`nVRt(i`;O`MhIF`|n(1%oz8Zux;1KMS8luc=xK@CU20T3~m8#&7V$46O0z`q% zIf)@)2S`Ou)DKXVG1SJ0JbBMfV^JymJ87}w9%IP0^;1-8AtTxWbO|kLU$}pvzZ&?0 zu|50!u`y2Ao7Q_RUm!aNmvcQ;V+Y{%y&A1BmZqzktBSG2B{U&;S+v2Ws5Y+9pBBuu z{8qvIz5JcIL-?tY>;Uh+Hxjo%U$fMDatoI2Jw4Up4#1Fc0WDf}!G-xx3XXK{2c+~(1Skj1BE@-2)Rs&H42i!?0$bE<&idya+@D}|X8)u;* zCPd&xC^H#SuuEt$5Sp0JhSDq;6+_t!_K1)ol%0;HLkf)j*U*Cc7zbf2_Y5Y`3W&8< zfiHgcaj0D^TK9z*1q-J`42+Xb%k;#eshS3_?zq3fVtPQMA!ZC(Citnjie#q~48C$& zY@P_ELvf{@cqk3et*nVDcu2v*JrcuS4Tk_GC*oNJsUia62wcTG1%hPjw>yebbqh0o zjNWPVUPJFw@MJBZO#)asJe&&NHGt6<-VPF+%KaH8Uq+8Z57i1Ti{3nXN%Yd_p)SNN zp!WfIvQ4=-gPR|CPq7egSFkBjN`Hoh{sg_Bqlfda<5k&3USp_hS~cNTUsfovuzLAK zBJif7dSOPQdSVv_?5(^8Av2bnaMcm5g26zs8!Phzd>Wfz!MI??;?+4nJvAT?7!&>j zj8HZyH-#)C`(UHiA3Vad!t8XEKxghtENFyFDzr}U?GQr+^@p5tNrX!7V(15yR(u3P z_Uu1HDgIlK=YULoRAZMgh}?ms#FnW6YWIkf*P*Zz|A&C$Hl(mqWeVN zEO`UBt}S0%p+#?a<&x-qZgqYw`&ZZg;#%pA3*zw$rLiAX#u#yoDNRm^V^iYsDRFp8 z^i1b1k~$rLt+(vU_M%gC5afrqFRPcuzUM{9^J}L>$H`5HyP*5Te%rp{6nz6LW0k?9 z;^5J>%i`b((RbqhfxkWe*T+j^m&BJYiM~ss z^Ir;g1+i-ck(fUJe!;M5_Y`_7Z3AN4z*qKxEsAc6(o*xT6=QLs^z5s@8m*ig7tf8C zUVXP@eebara{(hhGE-I$EVrK6i|oy7y6kK7p%+lDIsL!$rC+M6Xm+@+nk ze^T_Hs`$=|zO$tZ7fZggW#3r-%mcv2rH}uNl?@$MfeN z1bU0(tH;WL7xU*fd|k!x%6nzsk^Gs>!F`p%W8&a3058t~cx(xaRx%#1NA-)_;jd!?=H1E>qA7&aVONX9B1 zgJQ?v%D=onTzccuy63ID1q~-mCAe1%?p+Pue;X_h>)s#bt&+)>pDCOCh2L9g6FUxw zrUMYc#M!c`t&m(95re~`X&55)4d2mI9If;Yi@n2Z!C$>yibU5t{zd-0WOXhLmaU=U z`PHLh?@7^m5+X2qyL)Nu)_cqE6(g&c)|*H2+6|Yt;3}{MYjLXV8oFtM^}h@x89EI6 zdHn7;w6mqv51BVx{ZdEQBO?SM*WUf82|}W2Aa5$Q48U$`@!Se72aBe%Gn_a4gT3t= zSEuCdSh8)p{H4HtIC~}Qe(2uSdTa0U-VLjJX%bC4*1hm-67D=d8M!?I_j@jL-neC@ z9Nv60L6u?S`LWYu=g*%H`|#ZWzWa(sWpgwN&mrJ>H-xRx=;irXQpw@Wz#6#!j9w8v z#AY1ADsue{c2(fN@RDvp;r=a{F$11#OlP7~T#Vsdn1ncrLlukzy@}%N=>13Zoap@} zddR3`TlM-o&L)%5CTd&L03kZeTV~4Tk(@3m6qY)IceKTme`dJ-j^y@89-n0QV=brxf8GJf zAN+3rJG9;m%&6J5w7;M)#Ebh2)3@y<*Dlevd&@{NTfn0A#DzhykrN__+01iL?;lstaP+YVMW z$ z{Mwd8A$n2 zEmC=SmJ*bd6S%CXQNi<2u+D6AV^lbpzr@f^K1Brj;mL zGc{u3S&MRNX_C}fu9~V*CVo(+_JN(*2lipMoSLm&HNrtws9JArcDMXSV!zBj?C!bU zY9YKh$?mqT?)!7@x#ymH?>XPOmp>AO0fXmX|M9OznHmiH7nG47O*ZlT-*60D!YGWw zC$T~4iw`O&1*M!+PN@b}Q|duAj!{lZO=IIGx%u{YL_ukOq8`-hG5H@=LTP`359+?7 zh3+S+K|NJU>4DAw^al7EX%l6f*oR@vZ$KIo^iB*CQ|3W4^p2#n3|bTzO;7}7erWkj zAxSVuLYZQ`dRrbjMUxcigv@6!KJ=jcargqg8PZl%C(}@>hqlj@hzi37Ntzt5mD2fd z$}V%l5f430%K1S%(8jBXm-q-)ri=O4&EsLJgr6RWfTi3gIUdD*m!m}M9Sa>bv{v#D;kc8v=<#+K`UZQ3|vY;H%7j zRp=Wsh72J+g>xE8J#LXXZ+|&GXW)$TnnCO8J7|HD)BiwPpyUiYXf>~t!3Z+!ljRb&qpaO5}=pp)koOU#L{^1! zaCWpVUqN563w;5iztgHhySo_LRc`Z_Q?kq;Ratfy*+NdvNhy)#25pqgqh+D;Pz6`P72d=}*=<}IS3aV+iAG|F*)J3QkxDA{E4gy6Y(yc>I<(|UcO7ZD zEDervh2u5yh_=66#Wn?w`EeB{OUK#7{&0VB-blWGboiZVu!4toYeT6fhG6`A-g}iD zvq0aH1uT^;plo}Pwpbn1BJZpHsc0Q{Xg#Ol-TIw}{gYjXYR-bzRi1s$jR;oJ;p{4^ zgxyw2DR$!pe1IF86~+v9!ZxRv6D5Xn*t=e(S0S$3x*c_@9d*s1<3i{Sd{@XyX^?dg z+vuTOxtq5Kn^CttV{%HeH={UL3YNl^wG?0%L#|iQyO0*aZrDY;;S%>HK8jPid079W zxaj?}UKHQe!ZM1e;?8gLY_D2gJCs7)aoV#OrI&Z*pW}H}7WU)I50|q=ePG9M(U#qk zQ?6TBz2Ra%gM2xIT>UeYn%Dg}bU^&y_3Xdkd4_7{OHNKsx+r>-p=s8|js<2XDc23! zHBU2v`ofIDKb=vH%uY^bl#{eSqaN{iX*NULq?sT+=a!mfRNlb!?TnuG&rZ>dCrD>B zBLQa06U->t*{O`i!;DUO<}#*iBZ-BE4FC%YYNw}ZKLw;T(2v4P?F-UVY(_cd^JkPE z3P|X=AR^Hr20qlk+QTrO+x1lp0wWoMou<7$&!jsTm;f0KBfdFzfbr4(U`EH%Gqbec zOK0@6{+m7)SW_8ocC;BI5=ACe`k)IHD#<8r9)e#J{0?U;nUN&Zo}hOuqx6E{xI3d^gB~Ub ziU74{^kT-1S}GaPfO6LD%P4QcOKJ)gkk(EISdqq%6KRe`hS>Fe&wsq%(KhrJJ>{Y3 zp^m`R$nDEL?pvUdz%AB24KwArF`26v^3Jlsz?9n`plDX6KF~euzdh^mAL+cxpebbz zOnH3%p|RUXs8QNK#EyBUX_UAz9UPj1hLb}q&A_Dj<~_b(z~2Z9$Rdv4H{t_zA<)X4 z1j*41vnK!^hq1K98t&dCZA;ok?I%jU^Zwc8vmaaH1u3#I+>XF2a#E{g4m%{tV)ehOSTh~nvS1|#U)V6CIpX_8 zW4ff`{%gyx#oiW54n{0n8myou)+E?#Bf8D9iu*mwJ+aaFm{8Y}Dr-&ZoM~rOvigYN zY>t?Lw4^LzP8XMD$Je-Cd@RzPwiNM)KX^A*AMZ|8zn(5CU7BB zQx%s#?~K>2bglYR$NN9JndlTME~kpFEUKb-v?)ziY>>4ASsR;)SH%aCbw?kQEn9lb z+4jPOIh^+{M+}?J%9uIjJP0D_FpD*!{HmyS;Y{RYWF}gbE-YTUx_FiEjp6r(W4%IQ z9ZV3u(6xK{&%3tNSaBVU#8wc|q)RLAA6q{5%QqIzMY^ILX^SoD`Cu+hI+lnT61^$z>x&|o8g!; z*0(&rYG3uP8Wy{w9lSkVP_%S*@hs1Ncqv^_v2=0qVytbwpz%q8bIXX;w``d)(jGm$ zaEq^7v$)b^;fq2HzuZU8M^^p!3cuif*53~9=%9W1a>xAkb!O*KaBig80 z&{b|$?u*-h`R>A{$eHMsw8g9*)8U#y2nk?cq_bjly$UPrgp6*y($w(e*zuI<#9!Ov zZIOPlye&o4t=I+AiEorR(f-t28gcV80^v%Us=iSxi0T(gmEQ1T1{c@;Uo^hpaLiTYa8_T{n5_ek{TBF@5V|h|l{-N!27rqn-$ zqnZ&3?BSFa$%$Y{u1+D>p;8g5$gQ+ejW`;;$Q5)DGH`~MxGM6v%5^eVMUJanlg|}+ z$-0||Kt5#1(-g@0@&hvK5VQgb#P6sAj&O3C+wH(0m}9ZqAy!UjMhJbe?5NZLdu8dh z2rlkc6GFj~32O9WctLQcU5^V0xrcn17qrVlak;=(rUS70m!JnTtO0Xw)>LA6;NaA( z^%ec(LURZ{R3YL{&5rhj*uI5RGDWXwk9y0FS(XF>wl1J9lx5xi?%R=WH?I6aJ%hoCYGrQtxzu`Q*X;!Lts|Bv+n z2O1$*2fGD+G6?Sj7(IX5fWVVC;76DWz5#~v`2pp!t`R0M<>~;CMzcPT-^Jc$0lB(@ z0oRSmfOoMsS^#|rLNVx|Fk=L1ydVM+0}wPDygf|=ZuSR{HZ$l9B%@|L{!yAi=V%N%Vqj4G zpV5v?dV!TgPeAlu1ST^k1g>+`xQ|8Wbx8er z5A!1=OEH8BSxSaRQUcuf1bqQN^EOlu!H*pPErziz2UfBt*1KFDFHLwptq`0oNtNTN z*})$W5JucIlXpiJMxs|^P3z{`@R>AG7#$UeiVdPpAnIaFyeUN-2zPA~;09AfWw;A~ z2kGET1`Lih-eAlyI3x$D#AUR_QIw1#r9Zhyho@xx}smL7h2CJ zt1l$`udLgzZrHC2_Ur3z&xU(caF3?!W8w4Z%7$?F&wHbas1;!E<@YZ|&j_kAX+($l zll);`z3^VR8-`0*!l%>rQoeJs9h#~1KRf^a`Dod?s$!F{eRw1~ckjfPL{$V&8|~4P z(V1xHLOVaaW_<05eGg!oCDWpbA6_RL(xv+r&OZh1dU$J;TDZ=0Yed5nOQlRR5c9;X zF>3kxV{-q~Y`ZZ8C}Y88e|#)4vr?4^e(Fz_pLk5Re*=?H6&p$nuhpJPm)68i#%5xj z%k9bi?Luk$qVDN-QB;>US?``-IM45kb;o-Xc)b6?#iaAdx~Ul&E}Y-0#mv<&_Q4?U zb}w{CFMn`p!&EJps(*F(*R2m)Q>Ohec1uCH`>D~sVJsDlr4qh)4hszP=8P0%&=y7k z_48BkTFNtE;85^~xf8t*9z;~)18!0Y&Rz%PXe!<5oj1Tz?? zK`z=0r$-Z=FkS#hQmuGGR^o^A5PuP;+6`>M$x^m;zT3IeB6E;ybG%`8M6P;~D*}LG z-y!v#-fgHxDY;yYeCh_y_69gIWaY(Y9C?73p6&DZP(I}@cXP0cV5Pt+$*vyx)&sOE z!nmw@b)0g$Omy4EJUTnH(*iJ*X;5Cr>37wo zQFg;H)?+)yYJO>;kVW7W6Vm6Mgz05I2;Rcl*Ja%z(%5|EwW}V+@AHoigKG`A zCgC`Vc1eE31;-v$4B8r^Gi5p;1?yfcKzb6LX}z8~i_)Sql)U3K1HMx9u99Okiswn- z+9rKL<_sc6FumTCb(491aC_&q0Yyd+jz{uB8J&3gk-0D8eH#aJ}0 zd5$c*6HOG2oS(#o83CGKA>vbDT*4R_Iyk~GlM!XQpfIA}EG~xtZQsXJUl#92VY`!W zS_tt!j-On9S9Coq#loRe>z2ON@#I^5e>?Tq(w8RfONK=Q-xHfl=!Ct;lBI2r$v4v0 zQsg>f#qoVY)sb~;YhvcHwKZL~CpI9IH7@GECJU2}mX+ew)<@k!$K}W5l`R5u9(ZBK z3LHzVi>-Xey%Q1L*Cl258YX#IDS>I3`A65Wuj+e_~HdYR~6Bvb=K&$Sk>ZnL01#e zr*-DL!~zke9_vaW^fVX2=>mA~>)=be;mj6vHU@z7N5D#mL4*XA0pja-vvtyGxCrD+ zpoNyxm!NPd7a~F+rQ~o<@lYk-4+D3-bQT}d`rijQ{+?)e#J)s}RRU|x2xZye747Z` z7$R%<$C~5 zlYiF#eVTwEJZlY6_$b>DxbJx+_YYqsg&~c(^7IH)rF zaK`bRRhP@q9m#eW4>M;%9_okCNqYMs)I07UdsSEzzXK zR?S5z6hU@ayc+<6=|$;n)7AmmdTpN7L`F%9Bf3xubtsxi2vMq0db_E}$%V=g9%0@@ zuVd&{fnLZILU=HH1CYlIga?4xX(oe@v&c5*TgI$uv>=cIaY6Q^m}Vw`?(g8ou87tV zEY1JK_G!`z=vX`}mhtI^h)?0XQ2WHW!NskELk>sV(s5Wh{jTa@XpO&oK2a-p-AGHDJFHQ(Y2Oir8(gmgb z$$Mv$)y+ad^S!g1C3Ue|D{aZQhLYp6>m@g#aanm(|23Q!-}=SzbVY5C7&GOZ+(0`UL}+^d|9|ZimdaJ1>z9LuPYu@B=AI2s_ON`t(BS2=Qmn< zgqEI1RgZd8EtitL1If1sk;>j45_*R>dTF7TPL7NwedDR#iKJ`tF*)_E5_24op!I&t zWWL)DF~PpLb=}yIR5ge;Iiqt!km&Qf-SFot*2#c}a!)e>xZGjIv;T}X7!VWlwu5jj zFm^B)ICwiS%N#@(1JBT&tj9Cf8MttOzj+bkKKBh@kYy0=5Z4-1>w^#lvv4Is&ud15 zje#5E5=uZGpbMb#416;ts1M3r&s!Pr#eO!(%%V#h)CA~IFo{t2Fyeh3WwJ%e{_V1Q zCy31Wka!t2+}KRtmZEn9+>D{i84=#2LxJ4U1iEy~od9H1H+}RiQ8k}pqVlB>KzAH2 zVrD1lcII8+j$#?s0Pig&j^kfp=C3f@KVgTy(Y%G@75v`&2bT{%$547}T7eZ5rVAVp z3c%$G+(kSmTA(D5zJjWpUysQ$pJ@3TJ@cmoun93BHUsF2&UG@4`D)I@O z-Ih14_O2Vxgges(Wzkbh-HY9Pe{ApaaH^mo+!-l)|H2caE!xZ*1ba=aKfd?D@CvnV z>;Q6=?q}!TKNr=ltDH|vPJU0yR2e?~#8wrnP1*K^&!tVpk<((pXI<`y8B=BZ<1<3p wp_Hj 2: + d = np.argmin(f) + sub_faces.append([f[(d + i) % c_length] for i in range(c_length)]) + + faces = sub_faces + + # Sort faces by lowest vertex indices. If two faces have the same lowest + # index then sort by next lowest and so on. + faces.sort(key=lambda f: tuple(sorted(f))) + num_verts = vertices.shape[0] + vert_connected = np.equal( + np.arange(num_verts)[:, None], np.hstack(faces)[None] + ).any(axis=-1) + vertices = vertices[vert_connected] + + # Re-index faces to re-ordered vertices. + vert_indices = np.arange(num_verts) - np.cumsum(1 - vert_connected.astype("int")) + faces = [vert_indices[f].tolist() for f in faces] + + return vertices, faces + + +def process_mesh(vertices, faces, quantization_bits=8, augment=True, augment_dict=None): + """Process mesh vertices and faces.""" + + # Transpose so that z-axis is vertical. + vertices = vertices[:, [2, 0, 1]] + + # Translate the vertices so that bounding box is centered at zero. + vertices = center_vertices(vertices) + + if augment: + vertices = augment_mesh(vertices, **augment_dict) + + # Scale the vertices so that the long diagonal of the bounding box is equal + # to one. + vertices = normalize_vertices_scale(vertices) + + # Quantize and sort vertices, remove resulting duplicates, sort and reindex + # faces. + vertices, faces = quantize_process_mesh( + vertices, faces, quantization_bits=quantization_bits + ) + vertices = undiscretize(vertices, num_discrete=2**quantization_bits) + + + # Discard degenerate meshes without faces. + return { + "vertices": vertices, + "faces": faces, + } + + +def load_process_mesh(mesh_obj_path, quantization_bits=8, augment=False, augment_dict=None): + """Load obj file and process.""" + # Load mesh + mesh = trimesh.load(mesh_obj_path, force='mesh', process=False) + return process_mesh(mesh.vertices, mesh.faces, quantization_bits, augment=augment, augment_dict=augment_dict) + + +def augment_mesh(vertices, scale_min=0.95, scale_max=1.05, rotation=0., jitter_strength=0.): + '''scale vertices by a factor in [0.75, 1.25]''' + + # vertices [nv, 3] + for i in range(3): + # Generate a random scale factor + scale = random.uniform(scale_min, scale_max) + + # independently applied scaling across each axis of vertices + vertices[:, i] *= scale + + if rotation != 0.: + axis = [random.uniform(-1, 1), random.uniform(-1, 1), random.uniform(-1, 1)] + radian = np.pi / 180 * rotation + rotation = Rotation.from_rotvec(radian * np.array(axis)) + vertices =rotation.apply(vertices) + + + if jitter_strength != 0.: + jitter_amount = np.random.uniform(-jitter_strength, jitter_strength) + vertices += jitter_amount + + + return vertices + + +def discretize( + t, + continuous_range = (-1, 1), + num_discrete: int = 128 +): + lo, hi = continuous_range + assert hi > lo + + t = (t - lo) / (hi - lo) + t *= num_discrete + t -= 0.5 + + return t.round().astype(np.int32).clip(min = 0, max = num_discrete - 1) + + +def undiscretize( + t, + continuous_range = (-1, 1), + num_discrete: int = 128 +): + lo, hi = continuous_range + assert hi > lo + + t = t.astype(np.float32) + + t += 0.5 + t /= num_discrete + return t * (hi - lo) + lo + diff --git a/hy3dgen/shapegen/bpt/model/miche_conditioner.py b/hy3dgen/shapegen/bpt/model/miche_conditioner.py new file mode 100644 index 0000000..1a744d5 --- /dev/null +++ b/hy3dgen/shapegen/bpt/model/miche_conditioner.py @@ -0,0 +1,90 @@ +import torch +import os +from torch import nn +from beartype import beartype +from ..miche.encode import load_model +from ..miche.michelangelo.models.tsal import asl_pl_module + +# helper functions + +def exists(val): + return val is not None + +def default(*values): + for value in values: + if exists(value): + return value + return None + + +# point-cloud encoder from Michelangelo +@beartype +class PointConditioner(torch.nn.Module): + def __init__( + self, + *, + dim_latent = None, + model_name = 'miche-256-feature', + cond_dim = 768, + freeze = True, + ): + super().__init__() + + # open-source version of miche + if model_name == 'miche-256-feature': + ckpt_path = None + dir = os.path.dirname(os.path.abspath(__file__)) + model_path = os.path.join(dir, '..\shapevae-256.yaml') + config_path = model_path + + self.feature_dim = 1024 # embedding dimension + self.cond_length = 257 # length of embedding + self.point_encoder = load_model(ckpt_path=ckpt_path, config_path=config_path) + + # additional layers to connect miche and GPT + self.cond_head_proj = nn.Linear(cond_dim, self.feature_dim) + self.cond_proj = nn.Linear(cond_dim, self.feature_dim) + + else: + raise NotImplementedError + + # whether to finetuen point-cloud encoder + if freeze: + for parameter in self.point_encoder.parameters(): + parameter.requires_grad = False + + self.freeze = freeze + self.model_name = model_name + self.dim_latent = default(dim_latent, self.feature_dim) + + self.register_buffer('_device_param', torch.tensor(0.), persistent = False) + + + @property + def device(self): + return next(self.buffers()).device + + + def embed_pc(self, pc_normal): + # encode point cloud to embeddings + if self.model_name == 'miche-256-feature': + point_feature = self.point_encoder.encode_latents(pc_normal) + pc_embed_head = self.cond_head_proj(point_feature[:, 0:1]) + pc_embed = self.cond_proj(point_feature[:, 1:]) + pc_embed = torch.cat([pc_embed_head, pc_embed], dim=1) + + return pc_embed + + + def forward( + self, + pc = None, + pc_embeds = None, + ): + if pc_embeds is None: + pc_embeds = self.embed_pc(pc.to(next(self.buffers()).dtype)) + + assert not torch.any(torch.isnan(pc_embeds)), 'NAN values in pc embedings' + + return pc_embeds + diff --git a/hy3dgen/shapegen/bpt/model/model.py b/hy3dgen/shapegen/bpt/model/model.py new file mode 100644 index 0000000..8ec2d4d --- /dev/null +++ b/hy3dgen/shapegen/bpt/model/model.py @@ -0,0 +1,382 @@ +import math +import torch +from torch import nn, Tensor +from torch.nn import Module +import torch.nn.functional as F +from einops import rearrange, repeat, pack +from pytorch_custom_utils import save_load +from beartype import beartype +from beartype.typing import Union, Tuple, Callable, Optional, Any +from einops import rearrange, repeat, pack +from x_transformers import Decoder +from x_transformers.x_transformers import LayerIntermediates +from x_transformers.autoregressive_wrapper import ( + eval_decorator, + top_k, +) +from .miche_conditioner import PointConditioner +from functools import partial +from tqdm import tqdm +from .data_utils import discretize + +# helper functions + +def exists(v): + return v is not None + +def default(v, d): + return v if exists(v) else d + +def first(it): + return it[0] + +def divisible_by(num, den): + return (num % den) == 0 + +def pad_at_dim(t, padding, dim = -1, value = 0): + ndim = t.ndim + right_dims = (ndim - dim - 1) if dim >= 0 else (-dim - 1) + zeros = (0, 0) * right_dims + return F.pad(t, (*zeros, *padding), value = value) + + +# main class of auto-regressive Transformer +@save_load() +class MeshTransformer(Module): + @beartype + def __init__( + self, + *, + dim: Union[int, Tuple[int, int]] = 1024, # hidden size of Transformer + max_seq_len = 10000, # max sequence length + flash_attn = True, # wether to use flash attention + attn_depth = 24, # number of layers + attn_dim_head = 64, # dim for each head + attn_heads = 16, # number of heads + attn_kwargs: dict = dict( + ff_glu = True, + num_mem_kv = 4, + attn_qk_norm = True, + ), + dropout = 0.0, + pad_id = -1, + coor_continuous_range = (-1., 1.), + num_discrete_coors = 2**int(7), + block_size = 8, + offset_size = 16, + mode = 'vertices', + special_token = -2, + use_special_block = True, + conditioned_on_pc = True, + encoder_name = 'miche-256-feature', + encoder_freeze = False, + ): + super().__init__() + + if use_special_block: + # block_ids, offset_ids, special_block_ids + vocab_size = block_size**3 + offset_size**3 + block_size**3 + self.sp_block_embed = nn.Parameter(torch.randn(1, dim)) + else: + # block_ids, offset_ids, special_token + vocab_size = block_size**3 + offset_size**3 + 1 + self.special_token = special_token + self.special_token_cb = block_size**3 + offset_size**3 + + self.use_special_block = use_special_block + + self.sos_token = nn.Parameter(torch.randn(dim)) + self.eos_token_id = vocab_size + self.mode = mode + self.token_embed = nn.Embedding(vocab_size + 1, dim) + self.num_discrete_coors = num_discrete_coors + self.coor_continuous_range = coor_continuous_range + self.block_size = block_size + self.offset_size = offset_size + self.abs_pos_emb = nn.Embedding(max_seq_len, dim) + self.max_seq_len = max_seq_len + self.conditioner = None + self.conditioned_on_pc = conditioned_on_pc + cross_attn_dim_context = None + + self.block_embed = nn.Parameter(torch.randn(1, dim)) + self.offset_embed = nn.Parameter(torch.randn(1, dim)) + + assert self.block_size * self.offset_size == self.num_discrete_coors + + # load point_cloud encoder + if conditioned_on_pc: + print(f'Point cloud encoder: {encoder_name} | freeze: {encoder_freeze}') + self.conditioner = PointConditioner(model_name=encoder_name, freeze=encoder_freeze) + cross_attn_dim_context = self.conditioner.dim_latent + else: + raise NotImplementedError + + # main autoregressive attention network + self.decoder = Decoder( + dim = dim, + depth = attn_depth, + dim_head = attn_dim_head, + heads = attn_heads, + attn_flash = flash_attn, + attn_dropout = dropout, + ff_dropout = dropout, + cross_attend = conditioned_on_pc, + cross_attn_dim_context = cross_attn_dim_context, + cross_attn_num_mem_kv = 4, # needed for preventing nan when dropping out text condition + **attn_kwargs + ) + + self.to_logits = nn.Linear(dim, vocab_size + 1) + self.pad_id = pad_id + self.discretize_face_coords = partial( + discretize, + num_discrete = num_discrete_coors, + continuous_range = coor_continuous_range + ) + + @property + def device(self): + return next(self.parameters()).device + + + @eval_decorator + @torch.no_grad() + @beartype + def generate( + self, + prompt: Optional[Tensor] = None, + pc: Optional[Tensor] = None, + cond_embeds: Optional[Tensor] = None, + batch_size: Optional[int] = 1, + filter_logits_fn: Callable = top_k, + filter_kwargs: dict = dict(), + temperature = 0.5, + return_codes = False, + cache_kv = True, + max_seq_len = None, + face_coords_to_file: Optional[Callable[[Tensor], Any]] = None, + tqdm_position = 0, + ): + max_seq_len = default(max_seq_len, self.max_seq_len) + + if exists(prompt): + assert not exists(batch_size) + + prompt = rearrange(prompt, 'b ... -> b (...)') + assert prompt.shape[-1] <= self.max_seq_len + + batch_size = prompt.shape[0] + + # encode point cloud + if cond_embeds is None: + if self.conditioned_on_pc: + cond_embeds = self.conditioner(pc = pc) + + batch_size = default(batch_size, 1) + + codes = default(prompt, torch.empty((batch_size, 0), dtype = torch.long, device = self.device)) + + curr_length = codes.shape[-1] + + cache = None + eos_iter = None + # predict tokens auto-regressively + for i in tqdm(range(curr_length, max_seq_len), position=tqdm_position, + desc=f'Process: {tqdm_position}', dynamic_ncols=True, leave=False): + + output = self.forward_on_codes( + codes, + return_loss = False, + return_cache = cache_kv, + append_eos = False, + cond_embeds = cond_embeds, + cache = cache + ) + + if cache_kv: + logits, cache = output + else: + logits = output + + # sample code from logits + logits = logits[:, -1] + filtered_logits = filter_logits_fn(logits, **filter_kwargs) + probs = F.softmax(filtered_logits / temperature, dim=-1) + sample = torch.multinomial(probs, 1) + codes, _ = pack([codes, sample], 'b *') + + # Check if all sequences have encountered EOS at least once + is_eos_codes = (codes == self.eos_token_id) + if is_eos_codes.any(dim=-1).all(): + # Record the iteration (i.e. current sequence length) when EOS is first detected in all sequences + if eos_iter is None: + eos_iter = codes.shape[-1] + # Once we've generated 20% more tokens than eos_iter, break out of the loop + if codes.shape[-1] >= int(eos_iter * 1.2): + break + + # mask out to padding anything after the first eos + + mask = is_eos_codes.float().cumsum(dim = -1) >= 1 + codes = codes.masked_fill(mask, self.pad_id) + + # early return of raw residual quantizer codes + + if return_codes: + # codes = rearrange(codes, 'b (n q) -> b n q', q = 2) + if not self.use_special_block: + codes[codes == self.special_token_cb] = self.special_token + return codes + + face_coords, face_mask = self.decode_codes(codes) + + if not exists(face_coords_to_file): + return face_coords, face_mask + + files = [face_coords_to_file(coords[mask]) for coords, mask in zip(face_coords, face_mask)] + return files + + + def forward( + self, + *, + codes: Optional[Tensor] = None, + cache: Optional[LayerIntermediates] = None, + **kwargs + ): + # convert special tokens + if not self.use_special_block: + codes[codes == self.special_token] = self.special_token_cb + + return self.forward_on_codes(codes, cache = cache, **kwargs) + + + def forward_on_codes( + self, + codes = None, + return_loss = True, + return_cache = False, + append_eos = True, + cache = None, + pc = None, + cond_embeds = None, + ): + # handle conditions + + attn_context_kwargs = dict() + + if self.conditioned_on_pc: + assert exists(pc) ^ exists(cond_embeds), 'point cloud should be given' + + # preprocess faces and vertices + if not exists(cond_embeds): + cond_embeds = self.conditioner( + pc = pc, + pc_embeds = cond_embeds, + ) + + attn_context_kwargs = dict( + context = cond_embeds, + context_mask = None, + ) + + # take care of codes that may be flattened + + if codes.ndim > 2: + codes = rearrange(codes, 'b ... -> b (...)') + + # prepare mask for position embedding of block and offset tokens + block_mask = (0 <= codes) & (codes < self.block_size**3) + offset_mask = (self.block_size**3 <= codes) & (codes < self.block_size**3 + self.offset_size**3) + if self.use_special_block: + sp_block_mask = ( + self.block_size**3 + self.offset_size**3 <= codes + ) & ( + codes < self.block_size**3 + self.offset_size**3 + self.block_size**3 + ) + + + # get some variable + + batch, seq_len, device = *codes.shape, codes.device + + assert seq_len <= self.max_seq_len, \ + f'received codes of length {seq_len} but needs to be less than {self.max_seq_len}' + + # auto append eos token + + if append_eos: + assert exists(codes) + + code_lens = ((codes == self.pad_id).cumsum(dim = -1) == 0).sum(dim = -1) + + codes = F.pad(codes, (0, 1), value = 0) # value=-1 + + batch_arange = torch.arange(batch, device = device) + + batch_arange = rearrange(batch_arange, '... -> ... 1') + code_lens = rearrange(code_lens, '... -> ... 1') + + codes[batch_arange, code_lens] = self.eos_token_id + + + # if returning loss, save the labels for cross entropy + + if return_loss: + assert seq_len > 0 + codes, labels = codes[:, :-1], codes + + # token embed + + codes = codes.masked_fill(codes == self.pad_id, 0) + codes = self.token_embed(codes) + + # codebook embed + absolute positions + + seq_arange = torch.arange(codes.shape[-2], device = device) + codes = codes + self.abs_pos_emb(seq_arange) + + # add positional embedding for block and offset token + block_embed = repeat(self.block_embed, '1 d -> b n d', n = seq_len, b = batch) + offset_embed = repeat(self.offset_embed, '1 d -> b n d', n = seq_len, b = batch) + codes[block_mask] += block_embed[block_mask] + codes[offset_mask] += offset_embed[offset_mask] + + if self.use_special_block: + sp_block_embed = repeat(self.sp_block_embed, '1 d -> b n d', n = seq_len, b = batch) + codes[sp_block_mask] += sp_block_embed[sp_block_mask] + + # auto prepend sos token + + sos = repeat(self.sos_token, 'd -> b d', b = batch) + codes, _ = pack([sos, codes], 'b * d') + + # attention + + attended, intermediates_with_cache = self.decoder( + codes, + cache = cache, + return_hiddens = True, + **attn_context_kwargs + ) + + # logits + + logits = self.to_logits(attended) + + if not return_loss: + if not return_cache: + return logits + + return logits, intermediates_with_cache + + # loss + + ce_loss = F.cross_entropy( + rearrange(logits, 'b n c -> b c n'), + labels, + ignore_index = self.pad_id + ) + + return ce_loss diff --git a/hy3dgen/shapegen/bpt/model/serializaiton.py b/hy3dgen/shapegen/bpt/model/serializaiton.py new file mode 100644 index 0000000..97c359d --- /dev/null +++ b/hy3dgen/shapegen/bpt/model/serializaiton.py @@ -0,0 +1,241 @@ +import trimesh +import numpy as np +from .data_utils import discretize, undiscretize + + +def patchified_mesh(mesh: trimesh.Trimesh, special_token = -2, fix_orient=True): + sequence = [] + unvisited = np.full(len(mesh.faces), True) + degrees = mesh.vertex_degree.copy() + + # with fix_orient=True, the normal would be correct. + # but this may increase the difficulty for learning. + if fix_orient: + face_orient = {} + for ind, face in enumerate(mesh.faces): + v0, v1, v2 = face[0], face[1], face[2] + face_orient['{}-{}-{}'.format(v0, v1, v2)] = True + face_orient['{}-{}-{}'.format(v1, v2, v0)] = True + face_orient['{}-{}-{}'.format(v2, v0, v1)] = True + face_orient['{}-{}-{}'.format(v2, v1, v0)] = False + face_orient['{}-{}-{}'.format(v1, v0, v2)] = False + face_orient['{}-{}-{}'.format(v0, v2, v1)] = False + + while sum(unvisited): + unvisited_faces = mesh.faces[unvisited] + + # select the patch center + cur_face = unvisited_faces[0] + max_deg_vertex_id = np.argmax(degrees[cur_face]) + max_deg_vertex = cur_face[max_deg_vertex_id] + + # find all connected faces + selected_faces = [] + for face_idx in mesh.vertex_faces[max_deg_vertex]: + if face_idx != -1 and unvisited[face_idx]: + face = mesh.faces[face_idx] + u, v = sorted([vertex for vertex in face if vertex != max_deg_vertex]) + selected_faces.append([u, v, face_idx]) + + face_patch = set() + selected_faces = sorted(selected_faces) + + # select the start vertex, select it if it only appears once (the start or end), + # else select the lowest index + cnt = {} + for u, v, _ in selected_faces: + cnt[u] = cnt.get(u, 0) + 1 + cnt[v] = cnt.get(v, 0) + 1 + starts = [] + for vertex, num in cnt.items(): + if num == 1: + starts.append(vertex) + start_idx = min(starts) if len(starts) else selected_faces[0][0] + + res = [start_idx] + while len(res) <= len(selected_faces): + vertex = res[-1] + for u_i, v_i, face_idx_i in selected_faces: + if face_idx_i not in face_patch and vertex in (u_i, v_i): + u_i, v_i = (u_i, v_i) if vertex == u_i else (v_i, u_i) + res.append(v_i) + face_patch.add(face_idx_i) + break + + if res[-1] == vertex: + break + + if fix_orient and len(res) >= 2 and not face_orient['{}-{}-{}'.format(max_deg_vertex, res[0], res[1])]: + res = res[::-1] + + # reduce the degree of related vertices and mark the visited faces + degrees[max_deg_vertex] = len(selected_faces) - len(res) + 1 + for pos_idx, vertex in enumerate(res): + if pos_idx in [0, len(res) - 1]: + degrees[vertex] -= 1 + else: + degrees[vertex] -= 2 + for face_idx in face_patch: + unvisited[face_idx] = False + sequence.extend( + [mesh.vertices[max_deg_vertex]] + + [mesh.vertices[vertex_idx] for vertex_idx in res] + + [[special_token] * 3] + ) + + assert sum(degrees) == 0, 'All degrees should be zero' + + return np.array(sequence) + + + +def get_block_representation( + sequence, + block_size=8, + offset_size=16, + block_compressed=True, + special_token=-2, + use_special_block=True + ): + ''' + convert coordinates from Cartesian system to block indexes. + ''' + special_block_base = block_size**3 + offset_size**3 + # prepare coordinates + sp_mask = sequence != special_token + sp_mask = np.all(sp_mask, axis=1) + coords = sequence[sp_mask].reshape(-1, 3) + coords = discretize(coords) + + # convert [x, y, z] to [block_id, offset_id] + block_id = coords // offset_size + block_id = block_id[:, 0] * block_size**2 + block_id[:, 1] * block_size + block_id[:, 2] + offset_id = coords % offset_size + offset_id = offset_id[:, 0] * offset_size**2 + offset_id[:, 1] * offset_size + offset_id[:, 2] + offset_id += block_size**3 + block_coords = np.concatenate([block_id[..., None], offset_id[..., None]], axis=-1).astype(np.int64) + sequence[:, :2][sp_mask] = block_coords + sequence = sequence[:, :2] + + # convert to codes + codes = [] + cur_block_id = sequence[0, 0] + codes.append(cur_block_id) + for i in range(len(sequence)): + if sequence[i, 0] == special_token: + if not use_special_block: + codes.append(special_token) + cur_block_id = special_token + + elif sequence[i, 0] == cur_block_id: + if block_compressed: + codes.append(sequence[i, 1]) + else: + codes.extend([sequence[i, 0], sequence[i, 1]]) + + else: + if use_special_block and cur_block_id == special_token: + block_id = sequence[i, 0] + special_block_base + else: + block_id = sequence[i, 0] + codes.extend([block_id, sequence[i, 1]]) + cur_block_id = block_id + + codes = np.array(codes).astype(np.int64) + sequence = codes + + return sequence.flatten() + + +def BPT_serialize(mesh: trimesh.Trimesh): + # serialize mesh with BPT + + # 1. patchify faces into patches + sequence = patchified_mesh(mesh, special_token=-2) + + # 2. convert coordinates to block-wise indexes + codes = get_block_representation( + sequence, block_size=8, offset_size=16, + block_compressed=True, special_token=-2, use_special_block=True + ) + return codes + + +def decode_block(sequence, compressed=True, block_size=8, offset_size=16): + + # decode from compressed representation + if compressed: + res = [] + res_block = 0 + for token_id in range(len(sequence)): + if block_size**3 + offset_size**3 > sequence[token_id] >= block_size**3: + res.append([res_block, sequence[token_id]]) + elif block_size**3 > sequence[token_id] >= 0: + res_block = sequence[token_id] + else: + print('[Warning] too large offset idx!', token_id, sequence[token_id]) + sequence = np.array(res) + + block_id, offset_id = np.array_split(sequence, 2, axis=-1) + + # from hash representation to xyz + coords = [] + offset_id -= block_size**3 + for i in [2, 1, 0]: + axis = (block_id // block_size**i) * offset_size + (offset_id // offset_size**i) + block_id %= block_size**i + offset_id %= offset_size**i + coords.append(axis) + + coords = np.concatenate(coords, axis=-1) # (nf 3) + + # back to continuous space + coords = undiscretize(coords) + + return coords + + +def BPT_deserialize(sequence, block_size=8, offset_size=16, compressed=True, special_token=-2, use_special_block=True): + # decode codes back to coordinates + + special_block_base = block_size**3 + offset_size**3 + start_idx = 0 + vertices = [] + for i in range(len(sequence)): + sub_seq = [] + if not use_special_block and (sequence[i] == special_token or i == len(sequence) - 1): + sub_seq = sequence[start_idx:i] + sub_seq = decode_block(sub_seq, compressed=compressed, block_size=block_size, offset_size=offset_size) + start_idx = i + 1 + + elif use_special_block and \ + (special_block_base <= sequence[i] < special_block_base + block_size**3 or i == len(sequence)-1): + if i != 0: + sub_seq = sequence[start_idx:i] if i != len(sequence) - 1 else sequence[start_idx: i+1] + if special_block_base <= sub_seq[0] < special_block_base + block_size**3: + sub_seq[0] -= special_block_base + sub_seq = decode_block(sub_seq, compressed=compressed, block_size=block_size, offset_size=offset_size) + start_idx = i + + if len(sub_seq): + center, sub_seq = sub_seq[0], sub_seq[1:] + for j in range(len(sub_seq) - 1): + vertices.extend([center.reshape(1, 3), sub_seq[j].reshape(1, 3), sub_seq[j+1].reshape(1, 3)]) + + # (nf, 3) + return np.concatenate(vertices, axis=0) + + +if __name__ == '__main__': + # a simple demo for serialize and deserialize mesh with bpt + from data_utils import load_process_mesh, to_mesh + import torch + mesh = load_process_mesh('/path/to/your/mesh', quantization_bits=7) + mesh['faces'] = np.array(mesh['faces']) + mesh = to_mesh(mesh['vertices'], mesh['faces'], transpose=True) + mesh.export('gt.obj') + codes = BPT_serialize(mesh) + coordinates = BPT_deserialize(codes) + faces = torch.arange(1, len(coordinates) + 1).view(-1, 3) + mesh = to_mesh(coordinates, faces, transpose=False, post_process=False) + mesh.export('reconstructed.obj') diff --git a/hy3dgen/shapegen/bpt/requirements.txt b/hy3dgen/shapegen/bpt/requirements.txt new file mode 100644 index 0000000..3769a05 --- /dev/null +++ b/hy3dgen/shapegen/bpt/requirements.txt @@ -0,0 +1,30 @@ +meshgpt_pytorch==0.6.7 +pytorch-custom-utils==0.0.21 +accelerate>=0.25.0 +beartype +classifier-free-guidance-pytorch==0.5.1 +einops>=0.7.0 +ema-pytorch +pytorch-warmup +torch_geometric +torchtyping +vector-quantize-pytorch==1.12.8 +x-transformers==1.26.6 +tqdm +matplotlib +wandb +pyrr +trimesh +opencv-python +pyrender +open3d-python +easydict +chardet +deepspeed +omegaconf +scikit-image +setuptools +pytorch_lightning +mesh2sdf +numpy +point-cloud-utils \ No newline at end of file diff --git a/hy3dgen/shapegen/bpt/utils.py b/hy3dgen/shapegen/bpt/utils.py new file mode 100644 index 0000000..48a5101 --- /dev/null +++ b/hy3dgen/shapegen/bpt/utils.py @@ -0,0 +1,86 @@ +import trimesh +import numpy as np +from x_transformers.autoregressive_wrapper import top_p, top_k + + +class Dataset: + ''' + A toy dataset for inference + ''' + def __init__(self, input_type, input_list): + super().__init__() + self.data = [] + if input_type == 'pc_normal': + for input_path in input_list: + # load npy + cur_data = np.load(input_path) + # sample 4096 + assert cur_data.shape[0] >= 4096, "input pc_normal should have at least 4096 points" + idx = np.random.choice(cur_data.shape[0], 4096, replace=False) + cur_data = cur_data[idx] + self.data.append({'pc_normal': cur_data, 'uid': input_path.split('/')[-1].split('.')[0]}) + + elif input_type == 'mesh': + mesh_list, pc_list = [], [] + for input_path in input_list: + # sample point cloud and normal from mesh + cur_data = trimesh.load(input_path, force='mesh') + cur_data = apply_normalize(cur_data) + mesh_list.append(cur_data) + pc_list.append(sample_pc(cur_data, pc_num=4096, with_normal=True)) + + for input_path, cur_data in zip(input_list, pc_list): + self.data.append({'pc_normal': cur_data, 'uid': input_path.split('/')[-1].split('.')[0]}) + + print(f"dataset total data samples: {len(self.data)}") + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + data_dict = {} + data_dict['pc_normal'] = self.data[idx]['pc_normal'] + data_dict['uid'] = self.data[idx]['uid'] + + return data_dict + + +def joint_filter(logits, k = 50, p=0.95): + logits = top_k(logits, k = k) + logits = top_p(logits, thres = p) + return logits + + +def apply_normalize(mesh): + ''' + normalize mesh to [-1, 1] + ''' + bbox = mesh.bounds + center = (bbox[1] + bbox[0]) / 2 + scale = (bbox[1] - bbox[0]).max() + + mesh.apply_translation(-center) + mesh.apply_scale(1 / scale * 2 * 0.95) + + return mesh + + + +def sample_pc(trimesh, pc_num, with_normal=False): + mesh = apply_normalize(trimesh) + + if not with_normal: + points, _ = mesh.sample(pc_num, return_index=True) + return points + + points, face_idx = mesh.sample(50000, return_index=True) + normals = mesh.face_normals[face_idx] + pc_normal = np.concatenate([points, normals], axis=-1, dtype=np.float16) + + # random sample point cloud + ind = np.random.choice(pc_normal.shape[0], pc_num, replace=False) + pc_normal = pc_normal[ind] + + return pc_normal + +