Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/extern
diff options
context:
space:
mode:
Diffstat (limited to 'extern')
-rw-r--r--extern/mantaflow/LICENSE222
-rw-r--r--extern/mantaflow/README.md11
-rw-r--r--extern/mantaflow/UPDATE.sh102
-rw-r--r--extern/mantaflow/dependencies/cnpy/LICENSE21
-rw-r--r--extern/mantaflow/dependencies/cnpy/cnpy.cpp385
-rw-r--r--extern/mantaflow/dependencies/cnpy/cnpy.h310
-rw-r--r--extern/mantaflow/helper/pwrapper/manta.h31
-rw-r--r--extern/mantaflow/helper/pwrapper/numpyWrap.cpp132
-rw-r--r--extern/mantaflow/helper/pwrapper/numpyWrap.h86
-rw-r--r--extern/mantaflow/helper/pwrapper/pclass.cpp220
-rw-r--r--extern/mantaflow/helper/pwrapper/pclass.h126
-rw-r--r--extern/mantaflow/helper/pwrapper/pconvert.cpp568
-rw-r--r--extern/mantaflow/helper/pwrapper/pconvert.h251
-rw-r--r--extern/mantaflow/helper/pwrapper/pvec3.cpp414
-rw-r--r--extern/mantaflow/helper/pwrapper/pythonInclude.h48
-rw-r--r--extern/mantaflow/helper/pwrapper/registry.cpp784
-rw-r--r--extern/mantaflow/helper/pwrapper/registry.h106
-rw-r--r--extern/mantaflow/helper/util/integrator.h79
-rw-r--r--extern/mantaflow/helper/util/interpol.h324
-rw-r--r--extern/mantaflow/helper/util/interpolHigh.h204
-rw-r--r--extern/mantaflow/helper/util/matrixbase.h394
-rw-r--r--extern/mantaflow/helper/util/mcubes.h308
-rw-r--r--extern/mantaflow/helper/util/quaternion.h103
-rw-r--r--extern/mantaflow/helper/util/randomstream.h429
-rw-r--r--extern/mantaflow/helper/util/rcmatrix.h1112
-rw-r--r--extern/mantaflow/helper/util/simpleimage.cpp312
-rw-r--r--extern/mantaflow/helper/util/simpleimage.h205
-rw-r--r--extern/mantaflow/helper/util/solvana.h214
-rw-r--r--extern/mantaflow/helper/util/vector4d.cpp50
-rw-r--r--extern/mantaflow/helper/util/vector4d.h515
-rw-r--r--extern/mantaflow/helper/util/vectorbase.cpp49
-rw-r--r--extern/mantaflow/helper/util/vectorbase.h679
-rw-r--r--extern/mantaflow/preprocessed/commonkernels.h1300
-rw-r--r--extern/mantaflow/preprocessed/commonkernels.h.reg.cpp13
-rw-r--r--extern/mantaflow/preprocessed/conjugategrad.cpp719
-rw-r--r--extern/mantaflow/preprocessed/conjugategrad.h479
-rw-r--r--extern/mantaflow/preprocessed/conjugategrad.h.reg.cpp13
-rw-r--r--extern/mantaflow/preprocessed/edgecollapse.cpp700
-rw-r--r--extern/mantaflow/preprocessed/edgecollapse.h51
-rw-r--r--extern/mantaflow/preprocessed/edgecollapse.h.reg.cpp13
-rw-r--r--extern/mantaflow/preprocessed/fastmarch.cpp1200
-rw-r--r--extern/mantaflow/preprocessed/fastmarch.h241
-rw-r--r--extern/mantaflow/preprocessed/fastmarch.h.reg.cpp13
-rw-r--r--extern/mantaflow/preprocessed/fileio/iogrids.cpp1524
-rw-r--r--extern/mantaflow/preprocessed/fileio/iomeshes.cpp490
-rw-r--r--extern/mantaflow/preprocessed/fileio/ioparticles.cpp342
-rw-r--r--extern/mantaflow/preprocessed/fileio/mantaio.h81
-rw-r--r--extern/mantaflow/preprocessed/fileio/mantaio.h.reg.cpp13
-rw-r--r--extern/mantaflow/preprocessed/fluidsolver.cpp397
-rw-r--r--extern/mantaflow/preprocessed/fluidsolver.h395
-rw-r--r--extern/mantaflow/preprocessed/fluidsolver.h.reg.cpp70
-rw-r--r--extern/mantaflow/preprocessed/general.cpp167
-rw-r--r--extern/mantaflow/preprocessed/general.h247
-rw-r--r--extern/mantaflow/preprocessed/general.h.reg.cpp13
-rw-r--r--extern/mantaflow/preprocessed/gitinfo.h3
-rw-r--r--extern/mantaflow/preprocessed/grid.cpp2939
-rw-r--r--extern/mantaflow/preprocessed/grid.h2260
-rw-r--r--extern/mantaflow/preprocessed/grid.h.reg.cpp246
-rw-r--r--extern/mantaflow/preprocessed/grid4d.cpp1798
-rw-r--r--extern/mantaflow/preprocessed/grid4d.h1558
-rw-r--r--extern/mantaflow/preprocessed/grid4d.h.reg.cpp204
-rw-r--r--extern/mantaflow/preprocessed/kernel.cpp61
-rw-r--r--extern/mantaflow/preprocessed/kernel.h99
-rw-r--r--extern/mantaflow/preprocessed/kernel.h.reg.cpp13
-rw-r--r--extern/mantaflow/preprocessed/levelset.cpp876
-rw-r--r--extern/mantaflow/preprocessed/levelset.h245
-rw-r--r--extern/mantaflow/preprocessed/levelset.h.reg.cpp32
-rw-r--r--extern/mantaflow/preprocessed/mesh.cpp2733
-rw-r--r--extern/mantaflow/preprocessed/mesh.h1690
-rw-r--r--extern/mantaflow/preprocessed/mesh.h.reg.cpp239
-rw-r--r--extern/mantaflow/preprocessed/movingobs.cpp112
-rw-r--r--extern/mantaflow/preprocessed/movingobs.h164
-rw-r--r--extern/mantaflow/preprocessed/movingobs.h.reg.cpp26
-rw-r--r--extern/mantaflow/preprocessed/multigrid.cpp1857
-rw-r--r--extern/mantaflow/preprocessed/multigrid.h186
-rw-r--r--extern/mantaflow/preprocessed/multigrid.h.reg.cpp13
-rw-r--r--extern/mantaflow/preprocessed/noisefield.cpp325
-rw-r--r--extern/mantaflow/preprocessed/noisefield.h635
-rw-r--r--extern/mantaflow/preprocessed/noisefield.h.reg.cpp60
-rw-r--r--extern/mantaflow/preprocessed/particle.cpp1620
-rw-r--r--extern/mantaflow/preprocessed/particle.h2582
-rw-r--r--extern/mantaflow/preprocessed/particle.h.reg.cpp437
-rw-r--r--extern/mantaflow/preprocessed/plugin/advection.cpp1521
-rw-r--r--extern/mantaflow/preprocessed/plugin/apic.cpp496
-rw-r--r--extern/mantaflow/preprocessed/plugin/extforces.cpp1559
-rw-r--r--extern/mantaflow/preprocessed/plugin/fire.cpp435
-rw-r--r--extern/mantaflow/preprocessed/plugin/flip.cpp2819
-rw-r--r--extern/mantaflow/preprocessed/plugin/fluidguiding.cpp802
-rw-r--r--extern/mantaflow/preprocessed/plugin/initplugins.cpp2317
-rw-r--r--extern/mantaflow/preprocessed/plugin/kepsilon.cpp578
-rw-r--r--extern/mantaflow/preprocessed/plugin/meshplugins.cpp780
-rw-r--r--extern/mantaflow/preprocessed/plugin/pressure.cpp1511
-rw-r--r--extern/mantaflow/preprocessed/plugin/ptsplugins.cpp502
-rw-r--r--extern/mantaflow/preprocessed/plugin/secondaryparticles.cpp3065
-rw-r--r--extern/mantaflow/preprocessed/plugin/surfaceturbulence.cpp2189
-rw-r--r--extern/mantaflow/preprocessed/plugin/vortexplugins.cpp695
-rw-r--r--extern/mantaflow/preprocessed/plugin/waveletturbulence.cpp1292
-rw-r--r--extern/mantaflow/preprocessed/plugin/waves.cpp483
-rw-r--r--extern/mantaflow/preprocessed/python/defines.py11
-rw-r--r--extern/mantaflow/preprocessed/python/defines.py.reg.cpp24
-rw-r--r--extern/mantaflow/preprocessed/registration.cpp382
-rw-r--r--extern/mantaflow/preprocessed/shapes.cpp1010
-rw-r--r--extern/mantaflow/preprocessed/shapes.h665
-rw-r--r--extern/mantaflow/preprocessed/shapes.h.reg.cpp73
-rw-r--r--extern/mantaflow/preprocessed/test.cpp133
-rw-r--r--extern/mantaflow/preprocessed/timing.cpp128
-rw-r--r--extern/mantaflow/preprocessed/timing.h157
-rw-r--r--extern/mantaflow/preprocessed/timing.h.reg.cpp24
-rw-r--r--extern/mantaflow/preprocessed/turbulencepart.cpp288
-rw-r--r--extern/mantaflow/preprocessed/turbulencepart.h210
-rw-r--r--extern/mantaflow/preprocessed/turbulencepart.h.reg.cpp89
-rw-r--r--extern/mantaflow/preprocessed/vortexpart.cpp251
-rw-r--r--extern/mantaflow/preprocessed/vortexpart.h138
-rw-r--r--extern/mantaflow/preprocessed/vortexpart.h.reg.cpp76
-rw-r--r--extern/mantaflow/preprocessed/vortexsheet.cpp116
-rw-r--r--extern/mantaflow/preprocessed/vortexsheet.h251
-rw-r--r--extern/mantaflow/preprocessed/vortexsheet.h.reg.cpp26
117 files changed, 65384 insertions, 0 deletions
diff --git a/extern/mantaflow/LICENSE b/extern/mantaflow/LICENSE
new file mode 100644
index 00000000000..a5e3b509f7d
--- /dev/null
+++ b/extern/mantaflow/LICENSE
@@ -0,0 +1,222 @@
+
+Copyright 2018, the mantaflow team. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+-------------------------------------------------------------------------
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+-------------------------------------------------------------------------
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+------------------------------------------------------------------------------
+
+APPENDIX: HOW TO APPLY THE APACHE LICENSE TO YOUR WORK
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification
+within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
diff --git a/extern/mantaflow/README.md b/extern/mantaflow/README.md
new file mode 100644
index 00000000000..d5753d07689
--- /dev/null
+++ b/extern/mantaflow/README.md
@@ -0,0 +1,11 @@
+# Mantaflow #
+
+Mantaflow is an open-source framework targeted at fluid simulation research in Computer Graphics.
+Its parallelized C++ solver core, python scene definition interface and plugin system allow for quickly prototyping and testing new algorithms.
+
+In addition, it provides a toolbox of examples for deep learning experiments with fluids. E.g., it contains examples
+how to build convolutional neural network setups in conjunction with the [tensorflow framework](https://www.tensorflow.org).
+
+For more information on how to install, run and code with Mantaflow, please head over to our home page at
+[http://mantaflow.com](http://mantaflow.com)
+
diff --git a/extern/mantaflow/UPDATE.sh b/extern/mantaflow/UPDATE.sh
new file mode 100644
index 00000000000..28d96cdf6d8
--- /dev/null
+++ b/extern/mantaflow/UPDATE.sh
@@ -0,0 +1,102 @@
+#!/bin/bash
+#
+# ========================================================================================
+# UPDATING MANTAFLOW INSIDE BLENDER
+# ========================================================================================
+
+# ==================== 1) ENVIRONMENT SETUP =============================================
+
+# YOUR INSTALLATION PATHS GO HERE:
+MANTA_INSTALLATION=/Users/sebbas/Developer/Mantaflow/mantaflowDevelop
+BLENDER_INSTALLATION=/Users/sebbas/Developer/Blender/fluid-mantaflow
+
+# Try to check out Mantaflow repository before building?
+CLEAN_REPOSITORY=0
+
+# Choose which multithreading platform to use for Mantaflow preprocessing
+USE_OMP=0
+USE_TBB=1
+
+if [[ "$USE_OMP" -eq "1" && "$USE_TBB" -eq "1" ]]; then
+ echo "Cannot build Mantaflow for OpenMP and TBB at the same time"
+ exit 1
+elif [[ "$USE_OMP" -eq "0" && "$USE_TBB" -eq "0" ]]; then
+ echo "WARNING: Building Mantaflow without multithreading"
+else
+ if [[ "$USE_OMP" -eq "1" ]]; then
+ echo "Building Mantaflow with OpenMP multithreading"
+ elif [[ "$USE_TBB" -eq "1" ]]; then
+ echo "Building Mantaflow with TBB multithreading"
+ fi
+fi
+
+# ==================== 2) BUILD MANTAFLOW ================================================
+
+# For OpenMP, we need non-default compiler to build Mantaflow on OSX
+if [[ "$USE_OMP" -eq "1" && "$OSTYPE" == "darwin"* ]]; then
+ export CC=/usr/local/opt/llvm/bin/clang
+ export CXX=/usr/local/opt/llvm/bin/clang++
+ export LDFLAGS=-L/usr/local/opt/llvm/lib
+fi
+
+cd $MANTA_INSTALLATION
+
+# Check-out manta repo from git?
+if [[ "$CLEAN_REPOSITORY" -eq "1" ]]; then
+ if cd mantaflowgit/; then git pull; else git clone git@bitbucket.org:thunil/mantaflowgit.git; cd mantaflowgit; fi
+ git checkout develop
+fi
+
+MANTA_BUILD_PATH=$MANTA_INSTALLATION/mantaflowgit/build_blender/
+mkdir -p $MANTA_BUILD_PATH
+cd $MANTA_BUILD_PATH
+cmake .. -DGUI=OFF -DOPENMP=$USE_OMP -DTBB=$USE_TBB -DBLENDER=ON -DPREPDEBUG=ON && make -j8
+
+# ==================== 3) COPY MANTAFLOW FILES TO BLENDER ROOT ===========================
+
+mkdir -p $BLENDER_INSTALLATION/blender/tmp/dependencies/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/dependencies/cnpy "$_"
+mkdir -p $BLENDER_INSTALLATION/blender/tmp/helper/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/source/util "$_"
+mkdir -p $BLENDER_INSTALLATION/blender/tmp/helper/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/source/pwrapper "$_"
+mkdir -p $BLENDER_INSTALLATION/blender/tmp/preprocessed/ && cp -Rf $MANTA_INSTALLATION/mantaflowgit/build_blender/pp/source/. "$_"
+
+# Remove some files that are not need in Blender
+rm $BLENDER_INSTALLATION/blender/tmp/dependencies/cnpy/example1.cpp
+rm $BLENDER_INSTALLATION/blender/tmp/helper/pwrapper/pymain.cpp
+rm $BLENDER_INSTALLATION/blender/tmp/preprocessed/*.reg
+rm $BLENDER_INSTALLATION/blender/tmp/preprocessed/python/*.reg
+rm $BLENDER_INSTALLATION/blender/tmp/preprocessed/fileio/*.reg
+
+# ==================== 4) CLANG-FORMAT ===================================================
+
+cd $BLENDER_INSTALLATION/blender/tmp/
+
+echo "Applying clang format to Mantaflow source files"
+find . -iname *.h -o -iname *.cpp | xargs clang-format --verbose -i -style=file
+find . -iname *.h -o -iname *.cpp | xargs dos2unix --verbose
+
+# ==================== 5) MOVE MANTAFLOW FILES TO EXTERN/ ================================
+
+BLENDER_MANTA_EXTERN=$BLENDER_INSTALLATION/blender/extern/mantaflow/
+BLENDER_TMP=$BLENDER_INSTALLATION/blender/tmp
+BLENDER_TMP_DEP=$BLENDER_TMP/dependencies
+BLENDER_TMP_HLP=$BLENDER_TMP/helper
+BLENDER_TMP_PP=$BLENDER_TMP/preprocessed
+
+# Move files from tmp dir to extern/
+cp -Rf $BLENDER_TMP_DEP $BLENDER_MANTA_EXTERN
+cp -Rf $BLENDER_TMP_HLP $BLENDER_MANTA_EXTERN
+cp -Rf $BLENDER_TMP_PP $BLENDER_MANTA_EXTERN
+
+# Copy the Mantaflow license and readme files as well
+cp -Rf $MANTA_INSTALLATION/mantaflowgit/LICENSE $BLENDER_MANTA_EXTERN
+cp -Rf $MANTA_INSTALLATION/mantaflowgit/README.md $BLENDER_MANTA_EXTERN
+
+# Cleanup left over dir
+rm -r $BLENDER_TMP
+
+echo "Successfully copied new Mantaflow files to" $BLENDER_INSTALLATION/blender/extern/mantaflow/
+
+# ==================== 6) CHECK CMAKE SETUP ==============================================
+
+# Make sure that all files copied from Mantaflow are listed in intern/mantaflow/CMakeLists.txt
+# Especially if new source files / plugins were added to Mantaflow.
diff --git a/extern/mantaflow/dependencies/cnpy/LICENSE b/extern/mantaflow/dependencies/cnpy/LICENSE
new file mode 100644
index 00000000000..e60eadbccb3
--- /dev/null
+++ b/extern/mantaflow/dependencies/cnpy/LICENSE
@@ -0,0 +1,21 @@
+The MIT License
+
+Copyright (c) Carl Rogers, 2011
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/extern/mantaflow/dependencies/cnpy/cnpy.cpp b/extern/mantaflow/dependencies/cnpy/cnpy.cpp
new file mode 100644
index 00000000000..7f0ce21ece8
--- /dev/null
+++ b/extern/mantaflow/dependencies/cnpy/cnpy.cpp
@@ -0,0 +1,385 @@
+// Copyright (C) 2011 Carl Rogers
+// Released under MIT License
+// license available in LICENSE file, or at http://www.opensource.org/licenses/mit-license.php
+
+#include "cnpy.h"
+#include <complex>
+#include <cstdlib>
+#include <algorithm>
+#include <cstring>
+#include <iomanip>
+#include <stdint.h>
+#include <stdexcept>
+#include <regex>
+
+char cnpy::BigEndianTest()
+{
+ int x = 1;
+ return (((char *)&x)[0]) ? '<' : '>';
+}
+
+char cnpy::map_type(const std::type_info &t)
+{
+ if (t == typeid(float))
+ return 'f';
+ if (t == typeid(double))
+ return 'f';
+ if (t == typeid(long double))
+ return 'f';
+
+ if (t == typeid(int))
+ return 'i';
+ if (t == typeid(char))
+ return 'i';
+ if (t == typeid(short))
+ return 'i';
+ if (t == typeid(long))
+ return 'i';
+ if (t == typeid(long long))
+ return 'i';
+
+ if (t == typeid(unsigned char))
+ return 'u';
+ if (t == typeid(unsigned short))
+ return 'u';
+ if (t == typeid(unsigned long))
+ return 'u';
+ if (t == typeid(unsigned long long))
+ return 'u';
+ if (t == typeid(unsigned int))
+ return 'u';
+
+ if (t == typeid(bool))
+ return 'b';
+
+ if (t == typeid(std::complex<float>))
+ return 'c';
+ if (t == typeid(std::complex<double>))
+ return 'c';
+ if (t == typeid(std::complex<long double>))
+ return 'c';
+
+ else
+ return '?';
+}
+
+template<> std::vector<char> &cnpy::operator+=(std::vector<char> &lhs, const std::string rhs)
+{
+ lhs.insert(lhs.end(), rhs.begin(), rhs.end());
+ return lhs;
+}
+
+template<> std::vector<char> &cnpy::operator+=(std::vector<char> &lhs, const char *rhs)
+{
+ // write in little endian
+ size_t len = strlen(rhs);
+ lhs.reserve(len);
+ for (size_t byte = 0; byte < len; byte++) {
+ lhs.push_back(rhs[byte]);
+ }
+ return lhs;
+}
+
+void cnpy::parse_npy_header(unsigned char *buffer,
+ size_t &word_size,
+ std::vector<size_t> &shape,
+ bool &fortran_order)
+{
+ // std::string magic_string(buffer,6);
+ uint8_t major_version = *reinterpret_cast<uint8_t *>(buffer + 6);
+ uint8_t minor_version = *reinterpret_cast<uint8_t *>(buffer + 7);
+ uint16_t header_len = *reinterpret_cast<uint16_t *>(buffer + 8);
+ std::string header(reinterpret_cast<char *>(buffer + 9), header_len);
+
+ size_t loc1, loc2;
+
+ // fortran order
+ loc1 = header.find("fortran_order") + 16;
+ fortran_order = (header.substr(loc1, 4) == "True" ? true : false);
+
+ // shape
+ loc1 = header.find("(");
+ loc2 = header.find(")");
+
+ std::regex num_regex("[0-9][0-9]*");
+ std::smatch sm;
+ shape.clear();
+
+ std::string str_shape = header.substr(loc1 + 1, loc2 - loc1 - 1);
+ while (std::regex_search(str_shape, sm, num_regex)) {
+ shape.push_back(std::stoi(sm[0].str()));
+ str_shape = sm.suffix().str();
+ }
+
+ // endian, word size, data type
+ // byte order code | stands for not applicable.
+ // not sure when this applies except for byte array
+ loc1 = header.find("descr") + 9;
+ bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
+ assert(littleEndian);
+
+ // char type = header[loc1+1];
+ // assert(type == map_type(T));
+
+ std::string str_ws = header.substr(loc1 + 2);
+ loc2 = str_ws.find("'");
+ word_size = atoi(str_ws.substr(0, loc2).c_str());
+}
+
+void cnpy::parse_npy_header(FILE *fp,
+ size_t &word_size,
+ std::vector<size_t> &shape,
+ bool &fortran_order)
+{
+ char buffer[256];
+ size_t res = fread(buffer, sizeof(char), 11, fp);
+ if (res != 11)
+ throw std::runtime_error("parse_npy_header: failed fread");
+ std::string header = fgets(buffer, 256, fp);
+ assert(header[header.size() - 1] == '\n');
+
+ size_t loc1, loc2;
+
+ // fortran order
+ loc1 = header.find("fortran_order");
+ if (loc1 == std::string::npos)
+ throw std::runtime_error("parse_npy_header: failed to find header keyword: 'fortran_order'");
+ loc1 += 16;
+ fortran_order = (header.substr(loc1, 4) == "True" ? true : false);
+
+ // shape
+ loc1 = header.find("(");
+ loc2 = header.find(")");
+ if (loc1 == std::string::npos || loc2 == std::string::npos)
+ throw std::runtime_error("parse_npy_header: failed to find header keyword: '(' or ')'");
+
+ std::regex num_regex("[0-9][0-9]*");
+ std::smatch sm;
+ shape.clear();
+
+ std::string str_shape = header.substr(loc1 + 1, loc2 - loc1 - 1);
+ while (std::regex_search(str_shape, sm, num_regex)) {
+ shape.push_back(std::stoi(sm[0].str()));
+ str_shape = sm.suffix().str();
+ }
+
+ // endian, word size, data type
+ // byte order code | stands for not applicable.
+ // not sure when this applies except for byte array
+ loc1 = header.find("descr");
+ if (loc1 == std::string::npos)
+ throw std::runtime_error("parse_npy_header: failed to find header keyword: 'descr'");
+ loc1 += 9;
+ bool littleEndian = (header[loc1] == '<' || header[loc1] == '|' ? true : false);
+ assert(littleEndian);
+
+ // char type = header[loc1+1];
+ // assert(type == map_type(T));
+
+ std::string str_ws = header.substr(loc1 + 2);
+ loc2 = str_ws.find("'");
+ word_size = atoi(str_ws.substr(0, loc2).c_str());
+}
+
+void cnpy::parse_zip_footer(FILE *fp,
+ uint16_t &nrecs,
+ size_t &global_header_size,
+ size_t &global_header_offset)
+{
+ std::vector<char> footer(22);
+ fseek(fp, -22, SEEK_END);
+ size_t res = fread(&footer[0], sizeof(char), 22, fp);
+ if (res != 22)
+ throw std::runtime_error("parse_zip_footer: failed fread");
+
+ uint16_t disk_no, disk_start, nrecs_on_disk, comment_len;
+ disk_no = *(uint16_t *)&footer[4];
+ disk_start = *(uint16_t *)&footer[6];
+ nrecs_on_disk = *(uint16_t *)&footer[8];
+ nrecs = *(uint16_t *)&footer[10];
+ global_header_size = *(uint32_t *)&footer[12];
+ global_header_offset = *(uint32_t *)&footer[16];
+ comment_len = *(uint16_t *)&footer[20];
+
+ assert(disk_no == 0);
+ assert(disk_start == 0);
+ assert(nrecs_on_disk == nrecs);
+ assert(comment_len == 0);
+}
+
+cnpy::NpyArray load_the_npy_file(FILE *fp)
+{
+ std::vector<size_t> shape;
+ size_t word_size;
+ bool fortran_order;
+ cnpy::parse_npy_header(fp, word_size, shape, fortran_order);
+
+ cnpy::NpyArray arr(shape, word_size, fortran_order);
+ size_t nread = fread(arr.data<char>(), 1, arr.num_bytes(), fp);
+ if (nread != arr.num_bytes())
+ throw std::runtime_error("load_the_npy_file: failed fread");
+ return arr;
+}
+
+cnpy::NpyArray load_the_npz_array(FILE *fp, uint32_t compr_bytes, uint32_t uncompr_bytes)
+{
+
+ std::vector<unsigned char> buffer_compr(compr_bytes);
+ std::vector<unsigned char> buffer_uncompr(uncompr_bytes);
+ size_t nread = fread(&buffer_compr[0], 1, compr_bytes, fp);
+ if (nread != compr_bytes)
+ throw std::runtime_error("load_the_npy_file: failed fread");
+
+ int err;
+ z_stream d_stream;
+
+ d_stream.zalloc = Z_NULL;
+ d_stream.zfree = Z_NULL;
+ d_stream.opaque = Z_NULL;
+ d_stream.avail_in = 0;
+ d_stream.next_in = Z_NULL;
+ err = inflateInit2(&d_stream, -MAX_WBITS);
+
+ d_stream.avail_in = compr_bytes;
+ d_stream.next_in = &buffer_compr[0];
+ d_stream.avail_out = uncompr_bytes;
+ d_stream.next_out = &buffer_uncompr[0];
+
+ err = inflate(&d_stream, Z_FINISH);
+ err = inflateEnd(&d_stream);
+
+ std::vector<size_t> shape;
+ size_t word_size;
+ bool fortran_order;
+ cnpy::parse_npy_header(&buffer_uncompr[0], word_size, shape, fortran_order);
+
+ cnpy::NpyArray array(shape, word_size, fortran_order);
+
+ size_t offset = uncompr_bytes - array.num_bytes();
+ memcpy(array.data<unsigned char>(), &buffer_uncompr[0] + offset, array.num_bytes());
+
+ return array;
+}
+
+cnpy::npz_t cnpy::npz_load(std::string fname)
+{
+ FILE *fp = fopen(fname.c_str(), "rb");
+
+ if (!fp) {
+ throw std::runtime_error("npz_load: Error! Unable to open file " + fname + "!");
+ }
+
+ cnpy::npz_t arrays;
+
+ while (1) {
+ std::vector<char> local_header(30);
+ size_t headerres = fread(&local_header[0], sizeof(char), 30, fp);
+ if (headerres != 30)
+ throw std::runtime_error("npz_load: failed fread");
+
+ // if we've reached the global header, stop reading
+ if (local_header[2] != 0x03 || local_header[3] != 0x04)
+ break;
+
+ // read in the variable name
+ uint16_t name_len = *(uint16_t *)&local_header[26];
+ std::string varname(name_len, ' ');
+ size_t vname_res = fread(&varname[0], sizeof(char), name_len, fp);
+ if (vname_res != name_len)
+ throw std::runtime_error("npz_load: failed fread");
+
+ // erase the lagging .npy
+ varname.erase(varname.end() - 4, varname.end());
+
+ // read in the extra field
+ uint16_t extra_field_len = *(uint16_t *)&local_header[28];
+ if (extra_field_len > 0) {
+ std::vector<char> buff(extra_field_len);
+ size_t efield_res = fread(&buff[0], sizeof(char), extra_field_len, fp);
+ if (efield_res != extra_field_len)
+ throw std::runtime_error("npz_load: failed fread");
+ }
+
+ uint16_t compr_method = *reinterpret_cast<uint16_t *>(&local_header[0] + 8);
+ uint32_t compr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 18);
+ uint32_t uncompr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 22);
+
+ if (compr_method == 0) {
+ arrays[varname] = load_the_npy_file(fp);
+ }
+ else {
+ arrays[varname] = load_the_npz_array(fp, compr_bytes, uncompr_bytes);
+ }
+ }
+
+ fclose(fp);
+ return arrays;
+}
+
+cnpy::NpyArray cnpy::npz_load(std::string fname, std::string varname)
+{
+ FILE *fp = fopen(fname.c_str(), "rb");
+
+ if (!fp)
+ throw std::runtime_error("npz_load: Unable to open file " + fname);
+
+ while (1) {
+ std::vector<char> local_header(30);
+ size_t header_res = fread(&local_header[0], sizeof(char), 30, fp);
+ if (header_res != 30)
+ throw std::runtime_error("npz_load: failed fread");
+
+ // if we've reached the global header, stop reading
+ if (local_header[2] != 0x03 || local_header[3] != 0x04)
+ break;
+
+ // read in the variable name
+ uint16_t name_len = *(uint16_t *)&local_header[26];
+ std::string vname(name_len, ' ');
+ size_t vname_res = fread(&vname[0], sizeof(char), name_len, fp);
+ if (vname_res != name_len)
+ throw std::runtime_error("npz_load: failed fread");
+ vname.erase(vname.end() - 4, vname.end()); // erase the lagging .npy
+
+ // read in the extra field
+ uint16_t extra_field_len = *(uint16_t *)&local_header[28];
+ fseek(fp, extra_field_len, SEEK_CUR); // skip past the extra field
+
+ uint16_t compr_method = *reinterpret_cast<uint16_t *>(&local_header[0] + 8);
+ uint32_t compr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 18);
+ uint32_t uncompr_bytes = *reinterpret_cast<uint32_t *>(&local_header[0] + 22);
+
+ if (vname == varname) {
+ NpyArray array = (compr_method == 0) ? load_the_npy_file(fp) :
+ load_the_npz_array(fp, compr_bytes, uncompr_bytes);
+ fclose(fp);
+ return array;
+ }
+ else {
+ // skip past the data
+ // uint32_t size = *(uint32_t*) &local_header[22];
+ uint32_t size = *(uint32_t *)&local_header[18]; // using index 18 instead of 22 enables
+ // support for compressed data
+ fseek(fp, size, SEEK_CUR);
+ }
+ }
+
+ fclose(fp);
+
+ // if we get here, we haven't found the variable in the file
+ throw std::runtime_error("npz_load: Variable name " + varname + " not found in " + fname);
+}
+
+cnpy::NpyArray cnpy::npy_load(std::string fname)
+{
+
+ FILE *fp = fopen(fname.c_str(), "rb");
+
+ if (!fp)
+ throw std::runtime_error("npy_load: Unable to open file " + fname);
+
+ NpyArray arr = load_the_npy_file(fp);
+
+ fclose(fp);
+ return arr;
+}
diff --git a/extern/mantaflow/dependencies/cnpy/cnpy.h b/extern/mantaflow/dependencies/cnpy/cnpy.h
new file mode 100644
index 00000000000..e4b6365cb6f
--- /dev/null
+++ b/extern/mantaflow/dependencies/cnpy/cnpy.h
@@ -0,0 +1,310 @@
+// Copyright (C) 2011 Carl Rogers
+// Released under MIT License
+// license available in LICENSE file, or at http://www.opensource.org/licenses/mit-license.php
+
+#ifndef LIBCNPY_H_
+#define LIBCNPY_H_
+
+#include <string>
+#include <stdexcept>
+#include <sstream>
+#include <vector>
+#include <cstdio>
+#include <typeinfo>
+#include <iostream>
+#include <cassert>
+#include <zlib.h>
+#include <map>
+#include <memory>
+#include <stdint.h>
+#include <numeric>
+
+namespace cnpy {
+
+struct NpyArray {
+ NpyArray(const std::vector<size_t> &_shape, size_t _word_size, bool _fortran_order)
+ : shape(_shape), word_size(_word_size), fortran_order(_fortran_order)
+ {
+ num_vals = 1;
+ for (size_t i = 0; i < shape.size(); i++)
+ num_vals *= shape[i];
+ data_holder = std::shared_ptr<std::vector<char>>(new std::vector<char>(num_vals * word_size));
+ }
+
+ NpyArray() : shape(0), word_size(0), fortran_order(0), num_vals(0)
+ {
+ }
+
+ template<typename T> T *data()
+ {
+ return reinterpret_cast<T *>(&(*data_holder)[0]);
+ }
+
+ template<typename T> const T *data() const
+ {
+ return reinterpret_cast<T *>(&(*data_holder)[0]);
+ }
+
+ template<typename T> std::vector<T> as_vec() const
+ {
+ const T *p = data<T>();
+ return std::vector<T>(p, p + num_vals);
+ }
+
+ size_t num_bytes() const
+ {
+ return data_holder->size();
+ }
+
+ std::shared_ptr<std::vector<char>> data_holder;
+ std::vector<size_t> shape;
+ size_t word_size;
+ bool fortran_order;
+ size_t num_vals;
+};
+
+using npz_t = std::map<std::string, NpyArray>;
+
+char BigEndianTest();
+char map_type(const std::type_info &t);
+template<typename T> std::vector<char> create_npy_header(const std::vector<size_t> &shape);
+void parse_npy_header(FILE *fp,
+ size_t &word_size,
+ std::vector<size_t> &shape,
+ bool &fortran_order);
+void parse_npy_header(unsigned char *buffer,
+ size_t &word_size,
+ std::vector<size_t> &shape,
+ bool &fortran_order);
+void parse_zip_footer(FILE *fp,
+ uint16_t &nrecs,
+ size_t &global_header_size,
+ size_t &global_header_offset);
+npz_t npz_load(std::string fname);
+NpyArray npz_load(std::string fname, std::string varname);
+NpyArray npy_load(std::string fname);
+
+template<typename T> std::vector<char> &operator+=(std::vector<char> &lhs, const T rhs)
+{
+ // write in little endian
+ for (size_t byte = 0; byte < sizeof(T); byte++) {
+ char val = *((char *)&rhs + byte);
+ lhs.push_back(val);
+ }
+ return lhs;
+}
+
+template<> std::vector<char> &operator+=(std::vector<char> &lhs, const std::string rhs);
+template<> std::vector<char> &operator+=(std::vector<char> &lhs, const char *rhs);
+
+template<typename T>
+void npy_save(std::string fname,
+ const T *data,
+ const std::vector<size_t> shape,
+ std::string mode = "w")
+{
+ FILE *fp = NULL;
+ std::vector<size_t> true_data_shape; // if appending, the shape of existing + new data
+
+ if (mode == "a")
+ fp = fopen(fname.c_str(), "r+b");
+
+ if (fp) {
+ // file exists. we need to append to it. read the header, modify the array size
+ size_t word_size;
+ bool fortran_order;
+ parse_npy_header(fp, word_size, true_data_shape, fortran_order);
+ assert(!fortran_order);
+
+ if (word_size != sizeof(T)) {
+ std::cout << "libnpy error: " << fname << " has word size " << word_size
+ << " but npy_save appending data sized " << sizeof(T) << "\n";
+ assert(word_size == sizeof(T));
+ }
+ if (true_data_shape.size() != shape.size()) {
+ std::cout << "libnpy error: npy_save attempting to append misdimensioned data to " << fname
+ << "\n";
+ assert(true_data_shape.size() != shape.size());
+ }
+
+ for (size_t i = 1; i < shape.size(); i++) {
+ if (shape[i] != true_data_shape[i]) {
+ std::cout << "libnpy error: npy_save attempting to append misshaped data to " << fname
+ << "\n";
+ assert(shape[i] == true_data_shape[i]);
+ }
+ }
+ true_data_shape[0] += shape[0];
+ }
+ else {
+ fp = fopen(fname.c_str(), "wb");
+ true_data_shape = shape;
+ }
+
+ std::vector<char> header = create_npy_header<T>(true_data_shape);
+ size_t nels = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<size_t>());
+
+ fseek(fp, 0, SEEK_SET);
+ fwrite(&header[0], sizeof(char), header.size(), fp);
+ fseek(fp, 0, SEEK_END);
+ fwrite(data, sizeof(T), nels, fp);
+ fclose(fp);
+}
+
+template<typename T>
+void npz_save(std::string zipname,
+ std::string fname,
+ const T *data,
+ const std::vector<size_t> &shape,
+ std::string mode = "w")
+{
+ // first, append a .npy to the fname
+ fname += ".npy";
+
+ // now, on with the show
+ FILE *fp = NULL;
+ uint16_t nrecs = 0;
+ size_t global_header_offset = 0;
+ std::vector<char> global_header;
+
+ if (mode == "a")
+ fp = fopen(zipname.c_str(), "r+b");
+
+ if (fp) {
+ // zip file exists. we need to add a new npy file to it.
+ // first read the footer. this gives us the offset and size of the global header
+ // then read and store the global header.
+ // below, we will write the the new data at the start of the global header then append the
+ // global header and footer below it
+ size_t global_header_size;
+ parse_zip_footer(fp, nrecs, global_header_size, global_header_offset);
+ fseek(fp, global_header_offset, SEEK_SET);
+ global_header.resize(global_header_size);
+ size_t res = fread(&global_header[0], sizeof(char), global_header_size, fp);
+ if (res != global_header_size) {
+ throw std::runtime_error("npz_save: header read error while adding to existing zip");
+ }
+ fseek(fp, global_header_offset, SEEK_SET);
+ }
+ else {
+ fp = fopen(zipname.c_str(), "wb");
+ }
+
+ std::vector<char> npy_header = create_npy_header<T>(shape);
+
+ size_t nels = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<size_t>());
+ size_t nbytes = nels * sizeof(T) + npy_header.size();
+
+ // get the CRC of the data to be added
+ uint32_t crc = crc32(0L, (uint8_t *)&npy_header[0], npy_header.size());
+ crc = crc32(crc, (uint8_t *)data, nels * sizeof(T));
+
+ // build the local header
+ std::vector<char> local_header;
+ local_header += "PK"; // first part of sig
+ local_header += (uint16_t)0x0403; // second part of sig
+ local_header += (uint16_t)20; // min version to extract
+ local_header += (uint16_t)0; // general purpose bit flag
+ local_header += (uint16_t)0; // compression method
+ local_header += (uint16_t)0; // file last mod time
+ local_header += (uint16_t)0; // file last mod date
+ local_header += (uint32_t)crc; // crc
+ local_header += (uint32_t)nbytes; // compressed size
+ local_header += (uint32_t)nbytes; // uncompressed size
+ local_header += (uint16_t)fname.size(); // fname length
+ local_header += (uint16_t)0; // extra field length
+ local_header += fname;
+
+ // build global header
+ global_header += "PK"; // first part of sig
+ global_header += (uint16_t)0x0201; // second part of sig
+ global_header += (uint16_t)20; // version made by
+ global_header.insert(global_header.end(), local_header.begin() + 4, local_header.begin() + 30);
+ global_header += (uint16_t)0; // file comment length
+ global_header += (uint16_t)0; // disk number where file starts
+ global_header += (uint16_t)0; // internal file attributes
+ global_header += (uint32_t)0; // external file attributes
+ global_header += (uint32_t)
+ global_header_offset; // relative offset of local file header, since it begins where the
+ // global header used to begin
+ global_header += fname;
+
+ // build footer
+ std::vector<char> footer;
+ footer += "PK"; // first part of sig
+ footer += (uint16_t)0x0605; // second part of sig
+ footer += (uint16_t)0; // number of this disk
+ footer += (uint16_t)0; // disk where footer starts
+ footer += (uint16_t)(nrecs + 1); // number of records on this disk
+ footer += (uint16_t)(nrecs + 1); // total number of records
+ footer += (uint32_t)global_header.size(); // nbytes of global headers
+ footer += (uint32_t)(global_header_offset + nbytes +
+ local_header.size()); // offset of start of global headers, since global
+ // header now starts after newly written array
+ footer += (uint16_t)0; // zip file comment length
+
+ // write everything
+ fwrite(&local_header[0], sizeof(char), local_header.size(), fp);
+ fwrite(&npy_header[0], sizeof(char), npy_header.size(), fp);
+ fwrite(data, sizeof(T), nels, fp);
+ fwrite(&global_header[0], sizeof(char), global_header.size(), fp);
+ fwrite(&footer[0], sizeof(char), footer.size(), fp);
+ fclose(fp);
+}
+
+template<typename T>
+void npy_save(std::string fname, const std::vector<T> data, std::string mode = "w")
+{
+ std::vector<size_t> shape;
+ shape.push_back(data.size());
+ npy_save(fname, &data[0], shape, mode);
+}
+
+template<typename T>
+void npz_save(std::string zipname,
+ std::string fname,
+ const std::vector<T> data,
+ std::string mode = "w")
+{
+ std::vector<size_t> shape;
+ shape.push_back(data.size());
+ npz_save(zipname, fname, &data[0], shape, mode);
+}
+
+template<typename T> std::vector<char> create_npy_header(const std::vector<size_t> &shape)
+{
+
+ std::vector<char> dict;
+ dict += "{'descr': '";
+ dict += BigEndianTest();
+ dict += map_type(typeid(T));
+ dict += std::to_string(sizeof(T));
+ dict += "', 'fortran_order': False, 'shape': (";
+ dict += std::to_string(shape[0]);
+ for (size_t i = 1; i < shape.size(); i++) {
+ dict += ", ";
+ dict += std::to_string(shape[i]);
+ }
+ if (shape.size() == 1)
+ dict += ",";
+ dict += "), }";
+ // pad with spaces so that preamble+dict is modulo 16 bytes. preamble is 10 bytes. dict needs to
+ // end with \n
+ int remainder = 16 - (10 + dict.size()) % 16;
+ dict.insert(dict.end(), remainder, ' ');
+ dict.back() = '\n';
+
+ std::vector<char> header;
+ header += (char)0x93;
+ header += "NUMPY";
+ header += (char)0x01; // major version of numpy format
+ header += (char)0x00; // minor version of numpy format
+ header += (uint16_t)dict.size();
+ header.insert(header.end(), dict.begin(), dict.end());
+
+ return header;
+}
+
+} // namespace cnpy
+
+#endif
diff --git a/extern/mantaflow/helper/pwrapper/manta.h b/extern/mantaflow/helper/pwrapper/manta.h
new file mode 100644
index 00000000000..efbca6cc493
--- /dev/null
+++ b/extern/mantaflow/helper/pwrapper/manta.h
@@ -0,0 +1,31 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2014 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Include pwrapper headers
+ *
+ ******************************************************************************/
+
+#ifndef _MANTA_H
+#define _MANTA_H
+
+// Remove preprocessor keywords, so there won't infere with autocompletion etc.
+#define KERNEL(...) extern int i, j, k, idx, X, Y, Z;
+#define PYTHON(...)
+#define returns(X) extern X;
+#define alias typedef
+
+#include "general.h"
+#include "vectorbase.h"
+#include "vector4d.h"
+#include "registry.h"
+#include "pclass.h"
+#include "pconvert.h"
+#include "fluidsolver.h"
+
+#endif
diff --git a/extern/mantaflow/helper/pwrapper/numpyWrap.cpp b/extern/mantaflow/helper/pwrapper/numpyWrap.cpp
new file mode 100644
index 00000000000..d2ddb21be70
--- /dev/null
+++ b/extern/mantaflow/helper/pwrapper/numpyWrap.cpp
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2017-2018 Steffen Wiewel, Moritz Becher, Rachel Chu
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Convert mantaflow grids to/from numpy arrays
+ *
+ ******************************************************************************/
+
+#include "manta.h"
+#include "pythonInclude.h"
+
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+#include "numpy/arrayobject.h"
+
+namespace Manta {
+
+#if PY_VERSION_HEX < 0x03000000
+PyMODINIT_FUNC initNumpy()
+{
+ import_array();
+}
+#endif
+
+// ------------------------------------------------------------------------
+// Class Functions
+// ------------------------------------------------------------------------
+PyArrayContainer::PyArrayContainer(void *_pParentPyArray) : pParentPyArray(_pParentPyArray)
+{
+ ExtractData(pParentPyArray);
+}
+// ------------------------------------------------------------------------
+PyArrayContainer::PyArrayContainer(const PyArrayContainer &_Other)
+ : pParentPyArray(_Other.pParentPyArray)
+{
+ ExtractData(pParentPyArray);
+ Py_INCREF(pParentPyArray);
+}
+// ------------------------------------------------------------------------
+PyArrayContainer::~PyArrayContainer()
+{
+ Py_DECREF(pParentPyArray);
+}
+// ------------------------------------------------------------------------
+PyArrayContainer &PyArrayContainer::operator=(const PyArrayContainer &_Other)
+{
+ if (this != &_Other) {
+ // DecRef the existing resource
+ Py_DECREF(pParentPyArray);
+
+ // Relink new data
+ pParentPyArray = _Other.pParentPyArray;
+ ExtractData(pParentPyArray);
+ Py_INCREF(pParentPyArray);
+ }
+ return *this;
+}
+// ------------------------------------------------------------------------
+void PyArrayContainer::ExtractData(void *_pParentPyArray)
+{
+ PyArrayObject *pParent = reinterpret_cast<PyArrayObject *>(pParentPyArray);
+
+ int numDims = PyArray_NDIM(pParent);
+ long *pDims = (long *)PyArray_DIMS(pParent);
+
+ pData = PyArray_DATA(pParent);
+ TotalSize = PyArray_SIZE(pParent);
+ Dims = std::vector<long>(&pDims[0], &pDims[numDims]);
+
+ int iDataType = PyArray_TYPE(pParent);
+ switch (iDataType) {
+ case NPY_FLOAT:
+ DataType = N_FLOAT;
+ break;
+ case NPY_DOUBLE:
+ DataType = N_DOUBLE;
+ break;
+ case NPY_INT:
+ DataType = N_INT;
+ break;
+ default:
+ errMsg("unknown type of Numpy array");
+ break;
+ }
+}
+
+// ------------------------------------------------------------------------
+// Conversion Functions
+// ------------------------------------------------------------------------
+
+template<> PyArrayContainer fromPy<PyArrayContainer>(PyObject *obj)
+{
+ if (PyArray_API == NULL) {
+ // python 3 uses the return value
+#if PY_VERSION_HEX >= 0x03000000
+ import_array();
+#else
+ initNumpy();
+#endif
+ }
+
+ if (!PyArray_Check(obj)) {
+ errMsg("argument is not an numpy array");
+ }
+
+ PyArrayObject *obj_p = reinterpret_cast<PyArrayObject *>(
+ PyArray_CheckFromAny(obj,
+ NULL,
+ 0,
+ 0,
+ /*NPY_ARRAY_ENSURECOPY*/ NPY_ARRAY_C_CONTIGUOUS |
+ NPY_ARRAY_ENSUREARRAY | NPY_ARRAY_NOTSWAPPED,
+ NULL));
+ PyArrayContainer container = PyArrayContainer(obj_p);
+
+ return container;
+}
+
+// template<> PyArrayContainer* fromPyPtr<PyArrayContainer>(PyObject* obj, std::vector<void*>* tmp)
+// {
+// if (!tmp) throw Error("dynamic de-ref not supported for this type");
+// void* ptr = malloc(sizeof(PyArrayContainer));
+// tmp->push_back(ptr);
+
+// *((PyArrayContainer*) ptr) = fromPy<PyArrayContainer>(obj);
+// return (PyArrayContainer*) ptr;
+// }
+} // namespace Manta
diff --git a/extern/mantaflow/helper/pwrapper/numpyWrap.h b/extern/mantaflow/helper/pwrapper/numpyWrap.h
new file mode 100644
index 00000000000..c92a2eaaa97
--- /dev/null
+++ b/extern/mantaflow/helper/pwrapper/numpyWrap.h
@@ -0,0 +1,86 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2017 Steffen Wiewel, Moritz Baecher, Rachel Chu
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Convert mantaflow grids to/from numpy arrays
+ *
+ ******************************************************************************/
+
+#ifdef _PCONVERT_H
+# ifndef _NUMPYCONVERT_H
+# define _NUMPYCONVERT_H
+
+enum NumpyTypes {
+ N_BOOL = 0,
+ N_BYTE,
+ N_UBYTE,
+ N_SHORT,
+ N_USHORT,
+ N_INT,
+ N_UINT,
+ N_LONG,
+ N_ULONG,
+ N_LONGLONG,
+ N_ULONGLONG,
+ N_FLOAT,
+ N_DOUBLE,
+ N_LONGDOUBLE,
+ N_CFLOAT,
+ N_CDOUBLE,
+ N_CLONGDOUBLE,
+ N_OBJECT = 17,
+ N_STRING,
+ N_UNICODE,
+ N_VOID,
+ /*
+ * New 1.6 types appended, may be integrated
+ * into the above in 2.0.
+ */
+ N_DATETIME,
+ N_TIMEDELTA,
+ N_HALF,
+
+ N_NTYPES,
+ N_NOTYPE,
+ N_CHAR, /* special flag */
+ N_USERDEF = 256, /* leave room for characters */
+
+ /* The number of types not including the new 1.6 types */
+ N_NTYPES_ABI_COMPATIBLE = 21
+};
+
+namespace Manta {
+class PyArrayContainer {
+ public:
+ /// Constructors
+ PyArrayContainer(void *_pParentPyArray);
+ PyArrayContainer(const PyArrayContainer &_Other);
+ ~PyArrayContainer();
+ /// Operators
+ PyArrayContainer &operator=(const PyArrayContainer &_Other);
+
+ private:
+ void ExtractData(void *_pParentPyArray);
+
+ public:
+ void *pData;
+ NumpyTypes DataType;
+ unsigned int TotalSize;
+ std::vector<long> Dims;
+
+ private:
+ void *pParentPyArray;
+};
+
+// template<> PyArrayContainer* fromPyPtr<PyArrayContainer>(PyObject* obj, std::vector<void*>*
+// tmp);
+template<> PyArrayContainer fromPy<PyArrayContainer>(PyObject *obj);
+} // namespace Manta
+
+# endif
+#endif
diff --git a/extern/mantaflow/helper/pwrapper/pclass.cpp b/extern/mantaflow/helper/pwrapper/pclass.cpp
new file mode 100644
index 00000000000..a95254ebe11
--- /dev/null
+++ b/extern/mantaflow/helper/pwrapper/pclass.cpp
@@ -0,0 +1,220 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Functions for property setting/getting via python
+ *
+ ******************************************************************************/
+
+#include "pythonInclude.h"
+#include "structmember.h"
+#include "manta.h"
+#include "general.h"
+#include "timing.h"
+
+#ifdef GUI
+# include <QMutex>
+#else
+class QMutex {
+ public:
+ void lock(){};
+ void unlock(){};
+ bool tryLock()
+ {
+ return true;
+ };
+};
+#endif
+
+using namespace std;
+namespace Manta {
+
+//******************************************************************************
+// Free functions
+
+void pbPreparePlugin(FluidSolver *parent, const string &name, bool doTime)
+{
+ if (doTime)
+ TimingData::instance().start(parent, name);
+}
+
+void pbFinalizePlugin(FluidSolver *parent, const string &name, bool doTime)
+{
+ if (doTime)
+ TimingData::instance().stop(parent, name);
+
+ // GUI update, also print name of parent if there's more than one
+ std::ostringstream msg;
+ if (name != "FluidSolver::step") {
+ if (parent && (parent->getNumInstances() > 0))
+ msg << parent->getName() << string(".");
+ msg << name;
+ }
+ updateQtGui(false, 0, 0., msg.str());
+
+ debMsg(name << " done", 3);
+ // name unnamed PbClass Objects from var name
+ PbClass::renameObjects();
+}
+
+void pbSetError(const string &fn, const string &ex)
+{
+ debMsg("Error in " << fn, 1);
+ if (!ex.empty())
+ PyErr_SetString(PyExc_RuntimeError, ex.c_str());
+}
+
+//******************************************************************************
+// Helpers
+
+string PbTypeVec::str() const
+{
+ if (T.empty())
+ return "";
+ string s = "<";
+ for (int i = 0; i < (int)T.size(); i++) {
+ s += T[i].str();
+ s += (i != (int)T.size() - 1) ? ',' : '>';
+ }
+ return s;
+}
+string PbType::str() const
+{
+ if (S == "float")
+ return "Real";
+ if (S == "manta.vec3")
+ return "Vec3";
+ return S;
+}
+
+//******************************************************************************
+// PbClass
+
+vector<PbClass *> PbClass::mInstances;
+
+PbClass::PbClass(FluidSolver *parent, const string &name, PyObject *obj)
+ : mMutex(NULL), mParent(parent), mPyObject(obj), mName(name), mHidden(false)
+{
+ mMutex = new QMutex();
+}
+
+PbClass::PbClass(const PbClass &a)
+ : mMutex(NULL), mParent(a.mParent), mPyObject(0), mName("_unnamed"), mHidden(false)
+{
+ mMutex = new QMutex();
+}
+
+PbClass::~PbClass()
+{
+ for (vector<PbClass *>::iterator it = mInstances.begin(); it != mInstances.end(); ++it) {
+ if (*it == this) {
+ mInstances.erase(it);
+ break;
+ }
+ }
+ delete mMutex;
+}
+
+void PbClass::lock()
+{
+ mMutex->lock();
+}
+void PbClass::unlock()
+{
+ mMutex->unlock();
+}
+bool PbClass::tryLock()
+{
+ return mMutex->tryLock();
+}
+
+PbClass *PbClass::getInstance(int idx)
+{
+ if (idx < 0 || idx > (int)mInstances.size())
+ errMsg("PbClass::getInstance(): invalid index");
+ return mInstances[idx];
+}
+
+int PbClass::getNumInstances()
+{
+ return mInstances.size();
+}
+
+bool PbClass::isNullRef(PyObject *obj)
+{
+ return PyLong_Check(obj) && PyLong_AsDouble(obj) == 0;
+}
+
+bool PbClass::isNoneRef(PyObject *obj)
+{
+ return (obj == Py_None);
+}
+
+void PbClass::registerObject(PyObject *obj, PbArgs *args)
+{
+ // cross link
+ Pb::setReference(this, obj);
+ mPyObject = obj;
+
+ mInstances.push_back(this);
+
+ if (args) {
+ string _name = args->getOpt<std::string>("name", -1, "");
+ if (!_name.empty())
+ setName(_name);
+ }
+}
+
+PbClass *PbClass::createPyObject(const string &classname,
+ const string &name,
+ PbArgs &args,
+ PbClass *parent)
+{
+ return Pb::createPy(classname, name, args, parent);
+}
+
+void PbClass::checkParent()
+{
+ if (getParent() == NULL) {
+ errMsg("New class " + mName + ": no parent given -- specify using parent=xxx !");
+ }
+}
+//! Assign unnamed PbClass objects their Python variable name
+void PbClass::renameObjects()
+{
+ PyObject *sys_mod_dict = PyImport_GetModuleDict();
+ PyObject *loc_mod = PyMapping_GetItemString(sys_mod_dict, (char *)"__main__");
+ if (!loc_mod)
+ return;
+ PyObject *locdict = PyObject_GetAttrString(loc_mod, "__dict__");
+ if (!locdict)
+ return;
+
+ // iterate all PbClass instances
+ for (size_t i = 0; i < mInstances.size(); i++) {
+ PbClass *obj = mInstances[i];
+ if (obj->getName().empty()) {
+ // empty, try to find instance in module local dictionary
+
+ PyObject *lkey, *lvalue;
+ Py_ssize_t lpos = 0;
+ while (PyDict_Next(locdict, &lpos, &lkey, &lvalue)) {
+ if (lvalue == obj->mPyObject) {
+ string varName = fromPy<string>(PyObject_Str(lkey));
+ obj->setName(varName);
+ // cout << "assigning variable name '" << varName << "' to unnamed instance" << endl;
+ break;
+ }
+ }
+ }
+ }
+ Py_DECREF(locdict);
+ Py_DECREF(loc_mod);
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/helper/pwrapper/pclass.h b/extern/mantaflow/helper/pwrapper/pclass.h
new file mode 100644
index 00000000000..b34103ca9a7
--- /dev/null
+++ b/extern/mantaflow/helper/pwrapper/pclass.h
@@ -0,0 +1,126 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2014 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Base class for all Python-exposed classes
+ *
+ ******************************************************************************/
+
+// -----------------------------------------------------------------
+// NOTE:
+// Do not include this file in user code, include "manta.h" instead
+// -----------------------------------------------------------------
+
+#ifdef _MANTA_H
+# ifndef _PTYPE_H
+# define _PTYPE_H
+
+# include <string>
+# include <vector>
+# include <map>
+
+class QMutex;
+
+namespace Manta {
+struct PbClassData;
+class FluidSolver;
+class PbArgs;
+
+struct PbType {
+ std::string S;
+ std::string str() const;
+};
+struct PbTypeVec {
+ std::vector<PbType> T;
+ std::string str() const;
+};
+
+//! Base class for all classes exposed to Python
+class PbClass {
+ public:
+ PbClass(FluidSolver *parent, const std::string &name = "", PyObject *obj = NULL);
+ PbClass(const PbClass &a);
+ virtual ~PbClass();
+
+ // basic property setter/getters
+ void setName(const std::string &name)
+ {
+ mName = name;
+ }
+ std::string getName() const
+ {
+ return mName;
+ }
+ PyObject *getPyObject() const
+ {
+ return mPyObject;
+ }
+ void registerObject(PyObject *obj, PbArgs *args);
+ FluidSolver *getParent() const
+ {
+ return mParent;
+ }
+ void setParent(FluidSolver *v)
+ {
+ mParent = v;
+ }
+ void checkParent();
+
+ // hidden flag for GUI, debug output
+ inline bool isHidden()
+ {
+ return mHidden;
+ }
+ inline void setHidden(bool v)
+ {
+ mHidden = v;
+ }
+
+ void lock();
+ void unlock();
+ bool tryLock();
+
+ // PbClass instance registry
+ static int getNumInstances();
+ static PbClass *getInstance(int index);
+ static void renameObjects();
+
+ // converters
+ static bool isNullRef(PyObject *o);
+ static bool isNoneRef(PyObject *o);
+ static PbClass *createPyObject(const std::string &classname,
+ const std::string &name,
+ PbArgs &args,
+ PbClass *parent);
+ inline bool canConvertTo(const std::string &classname)
+ {
+ return Pb::canConvert(mPyObject, classname);
+ }
+
+ protected:
+ QMutex *mMutex;
+ FluidSolver *mParent;
+ PyObject *mPyObject;
+ std::string mName;
+ bool mHidden;
+
+ static std::vector<PbClass *> mInstances;
+};
+
+//!\cond Register
+
+void pbFinalizePlugin(FluidSolver *parent, const std::string &name, bool doTime = true);
+void pbPreparePlugin(FluidSolver *parent, const std::string &name, bool doTime = true);
+void pbSetError(const std::string &fn, const std::string &ex);
+
+//!\endcond
+
+} // namespace Manta
+
+# endif
+#endif
diff --git a/extern/mantaflow/helper/pwrapper/pconvert.cpp b/extern/mantaflow/helper/pwrapper/pconvert.cpp
new file mode 100644
index 00000000000..c8c92cbf585
--- /dev/null
+++ b/extern/mantaflow/helper/pwrapper/pconvert.cpp
@@ -0,0 +1,568 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Python argument wrappers and conversion tools
+ *
+ ******************************************************************************/
+
+#include "pythonInclude.h"
+#include <sstream>
+#include <algorithm>
+#include "vectorbase.h"
+#include "manta.h"
+
+using namespace std;
+
+//******************************************************************************
+// Explicit definition and instantiation of python object converters
+
+namespace Manta {
+
+extern PyTypeObject PbVec3Type;
+extern PyTypeObject PbVec4Type;
+
+struct PbVec3 {
+ PyObject_HEAD float data[3];
+};
+
+struct PbVec4 {
+ PyObject_HEAD float data[4];
+};
+
+PyObject *getPyNone()
+{
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+PyObject *incref(PyObject *obj)
+{
+ Py_INCREF(obj);
+ return obj;
+}
+
+/*template<> PyObject* toPy<PyObject*>(PyObject* obj) {
+ return obj;
+}*/
+template<> PyObject *toPy<int>(const int &v)
+{
+ return PyLong_FromLong(v);
+}
+/*template<> PyObject* toPy<char*>(const (char*) & val) {
+ return PyUnicode_DecodeLatin1(val,strlen(val),"replace");
+}*/
+template<> PyObject *toPy<string>(const string &val)
+{
+ return PyUnicode_DecodeLatin1(val.c_str(), val.length(), "replace");
+}
+template<> PyObject *toPy<float>(const float &v)
+{
+ return PyFloat_FromDouble(v);
+}
+template<> PyObject *toPy<double>(const double &v)
+{
+ return PyFloat_FromDouble(v);
+}
+template<> PyObject *toPy<bool>(const bool &v)
+{
+ return PyBool_FromLong(v);
+}
+template<> PyObject *toPy<Vec3i>(const Vec3i &v)
+{
+ float x = (float)v.x, y = (float)v.y, z = (float)v.z;
+ return PyObject_CallFunction((PyObject *)&PbVec3Type, (char *)"fff", x, y, z);
+}
+template<> PyObject *toPy<Vec3>(const Vec3 &v)
+{
+ float x = (float)v.x, y = (float)v.y, z = (float)v.z;
+ return PyObject_CallFunction((PyObject *)&PbVec3Type, (char *)"fff", x, y, z);
+}
+template<> PyObject *toPy<Vec4i>(const Vec4i &v)
+{
+ float x = (float)v.x, y = (float)v.y, z = (float)v.z;
+ return PyObject_CallFunction((PyObject *)&PbVec4Type, (char *)"ffff", x, y, z);
+}
+template<> PyObject *toPy<Vec4>(const Vec4 &v)
+{
+ float x = (float)v.x, y = (float)v.y, z = (float)v.z;
+ return PyObject_CallFunction((PyObject *)&PbVec4Type, (char *)"ffff", x, y, z);
+}
+template<> PyObject *toPy<PbClass *>(const PbClass_Ptr &obj)
+{
+ return obj->getPyObject();
+}
+
+template<> float fromPy<float>(PyObject *obj)
+{
+#if PY_MAJOR_VERSION <= 2
+ if (PyInt_Check(obj))
+ return PyInt_AsLong(obj);
+#endif
+ if (PyFloat_Check(obj))
+ return PyFloat_AsDouble(obj);
+ if (PyLong_Check(obj))
+ return PyLong_AsDouble(obj);
+ errMsg("argument is not a float");
+}
+template<> double fromPy<double>(PyObject *obj)
+{
+#if PY_MAJOR_VERSION <= 2
+ if (PyInt_Check(obj))
+ return PyInt_AsLong(obj);
+#endif
+ if (PyFloat_Check(obj))
+ return PyFloat_AsDouble(obj);
+ if (PyLong_Check(obj))
+ return PyLong_AsDouble(obj);
+ errMsg("argument is not a double");
+}
+template<> PyObject *fromPy<PyObject *>(PyObject *obj)
+{
+ return obj;
+}
+template<> int fromPy<int>(PyObject *obj)
+{
+#if PY_MAJOR_VERSION <= 2
+ if (PyInt_Check(obj))
+ return PyInt_AsLong(obj);
+#endif
+ if (PyLong_Check(obj))
+ return PyLong_AsDouble(obj);
+ if (PyFloat_Check(obj)) {
+ double a = PyFloat_AsDouble(obj);
+ if (fabs(a - floor(a + 0.5)) > 1e-5)
+ errMsg("argument is not an int");
+ return (int)(a + 0.5);
+ }
+ errMsg("argument is not an int");
+}
+template<> string fromPy<string>(PyObject *obj)
+{
+ if (PyUnicode_Check(obj))
+ return PyBytes_AsString(PyUnicode_AsLatin1String(obj));
+#if PY_MAJOR_VERSION <= 2
+ else if (PyString_Check(obj))
+ return PyString_AsString(obj);
+#endif
+ else
+ errMsg("argument is not a string");
+}
+template<> const char *fromPy<const char *>(PyObject *obj)
+{
+ if (PyUnicode_Check(obj))
+ return PyBytes_AsString(PyUnicode_AsLatin1String(obj));
+#if PY_MAJOR_VERSION <= 2
+ else if (PyString_Check(obj))
+ return PyString_AsString(obj);
+#endif
+ else
+ errMsg("argument is not a string");
+}
+template<> bool fromPy<bool>(PyObject *obj)
+{
+ if (!PyBool_Check(obj))
+ errMsg("argument is not a boolean");
+ return PyLong_AsLong(obj) != 0;
+}
+template<> Vec3 fromPy<Vec3>(PyObject *obj)
+{
+ if (PyObject_IsInstance(obj, (PyObject *)&PbVec3Type)) {
+ return Vec3(((PbVec3 *)obj)->data);
+ }
+ else if (PyTuple_Check(obj) && PyTuple_Size(obj) == 3) {
+ return Vec3(fromPy<Real>(PyTuple_GetItem(obj, 0)),
+ fromPy<Real>(PyTuple_GetItem(obj, 1)),
+ fromPy<Real>(PyTuple_GetItem(obj, 2)));
+ }
+ errMsg("argument is not a Vec3");
+}
+template<> Vec3i fromPy<Vec3i>(PyObject *obj)
+{
+ if (PyObject_IsInstance(obj, (PyObject *)&PbVec3Type)) {
+ return toVec3iChecked(((PbVec3 *)obj)->data);
+ }
+ else if (PyTuple_Check(obj) && PyTuple_Size(obj) == 3) {
+ return Vec3i(fromPy<int>(PyTuple_GetItem(obj, 0)),
+ fromPy<int>(PyTuple_GetItem(obj, 1)),
+ fromPy<int>(PyTuple_GetItem(obj, 2)));
+ }
+ errMsg("argument is not a Vec3i");
+}
+template<> Vec4 fromPy<Vec4>(PyObject *obj)
+{
+ if (PyObject_IsInstance(obj, (PyObject *)&PbVec4Type)) {
+ return Vec4(((PbVec4 *)obj)->data);
+ }
+ else if (PyTuple_Check(obj) && PyTuple_Size(obj) == 4) {
+ return Vec4(fromPy<Real>(PyTuple_GetItem(obj, 0)),
+ fromPy<Real>(PyTuple_GetItem(obj, 1)),
+ fromPy<Real>(PyTuple_GetItem(obj, 2)),
+ fromPy<Real>(PyTuple_GetItem(obj, 3)));
+ }
+ errMsg("argument is not a Vec4");
+}
+template<> Vec4i fromPy<Vec4i>(PyObject *obj)
+{
+ if (PyObject_IsInstance(obj, (PyObject *)&PbVec4Type)) {
+ return toVec4i(((PbVec4 *)obj)->data);
+ }
+ else if (PyTuple_Check(obj) && PyTuple_Size(obj) == 4) {
+ return Vec4i(fromPy<int>(PyTuple_GetItem(obj, 0)),
+ fromPy<int>(PyTuple_GetItem(obj, 1)),
+ fromPy<int>(PyTuple_GetItem(obj, 2)),
+ fromPy<int>(PyTuple_GetItem(obj, 3)));
+ }
+ errMsg("argument is not a Vec4i");
+}
+template<> PbType fromPy<PbType>(PyObject *obj)
+{
+ PbType pb = {""};
+ if (!PyType_Check(obj))
+ return pb;
+
+ const char *tname = ((PyTypeObject *)obj)->tp_name;
+ pb.S = tname;
+ return pb;
+}
+template<> PbTypeVec fromPy<PbTypeVec>(PyObject *obj)
+{
+ PbTypeVec vec;
+ if (PyType_Check(obj)) {
+ vec.T.push_back(fromPy<PbType>(obj));
+ }
+ else if (PyTuple_Check(obj)) {
+ int sz = PyTuple_Size(obj);
+ for (int i = 0; i < sz; i++)
+ vec.T.push_back(fromPy<PbType>(PyTuple_GetItem(obj, i)));
+ }
+ else
+ errMsg("argument is not a type tuple");
+ return vec;
+}
+
+template<class T> T *tmpAlloc(PyObject *obj, std::vector<void *> *tmp)
+{
+ if (!tmp)
+ throw Error("dynamic de-ref not supported for this type");
+ void *ptr = malloc(sizeof(T));
+ tmp->push_back(ptr);
+
+ *((T *)ptr) = fromPy<T>(obj);
+ return (T *)ptr;
+}
+template<> float *fromPyPtr<float>(PyObject *obj, std::vector<void *> *tmp)
+{
+ return tmpAlloc<float>(obj, tmp);
+}
+template<> double *fromPyPtr<double>(PyObject *obj, std::vector<void *> *tmp)
+{
+ return tmpAlloc<double>(obj, tmp);
+}
+template<> int *fromPyPtr<int>(PyObject *obj, std::vector<void *> *tmp)
+{
+ return tmpAlloc<int>(obj, tmp);
+}
+template<> std::string *fromPyPtr<std::string>(PyObject *obj, std::vector<void *> *tmp)
+{
+ return tmpAlloc<std::string>(obj, tmp);
+}
+template<> bool *fromPyPtr<bool>(PyObject *obj, std::vector<void *> *tmp)
+{
+ return tmpAlloc<bool>(obj, tmp);
+}
+template<> Vec3 *fromPyPtr<Vec3>(PyObject *obj, std::vector<void *> *tmp)
+{
+ return tmpAlloc<Vec3>(obj, tmp);
+}
+template<> Vec3i *fromPyPtr<Vec3i>(PyObject *obj, std::vector<void *> *tmp)
+{
+ return tmpAlloc<Vec3i>(obj, tmp);
+}
+template<> Vec4 *fromPyPtr<Vec4>(PyObject *obj, std::vector<void *> *tmp)
+{
+ return tmpAlloc<Vec4>(obj, tmp);
+}
+template<> Vec4i *fromPyPtr<Vec4i>(PyObject *obj, std::vector<void *> *tmp)
+{
+ return tmpAlloc<Vec4i>(obj, tmp);
+}
+
+template<> bool isPy<float>(PyObject *obj)
+{
+#if PY_MAJOR_VERSION <= 2
+ if (PyInt_Check(obj))
+ return true;
+#endif
+ return PyFloat_Check(obj) || PyLong_Check(obj);
+}
+template<> bool isPy<double>(PyObject *obj)
+{
+#if PY_MAJOR_VERSION <= 2
+ if (PyInt_Check(obj))
+ return true;
+#endif
+ return PyFloat_Check(obj) || PyLong_Check(obj);
+}
+template<> bool isPy<PyObject *>(PyObject *obj)
+{
+ return true;
+}
+template<> bool isPy<int>(PyObject *obj)
+{
+#if PY_MAJOR_VERSION <= 2
+ if (PyInt_Check(obj))
+ return true;
+#endif
+ if (PyLong_Check(obj))
+ return true;
+ if (PyFloat_Check(obj)) {
+ double a = PyFloat_AsDouble(obj);
+ return fabs(a - floor(a + 0.5)) < 1e-5;
+ }
+ return false;
+}
+template<> bool isPy<string>(PyObject *obj)
+{
+ if (PyUnicode_Check(obj))
+ return true;
+#if PY_MAJOR_VERSION <= 2
+ if (PyString_Check(obj))
+ return true;
+#endif
+ return false;
+}
+template<> bool isPy<const char *>(PyObject *obj)
+{
+ if (PyUnicode_Check(obj))
+ return true;
+#if PY_MAJOR_VERSION <= 2
+ if (PyString_Check(obj))
+ return true;
+#endif
+ return false;
+}
+template<> bool isPy<bool>(PyObject *obj)
+{
+ return PyBool_Check(obj);
+}
+template<> bool isPy<Vec3>(PyObject *obj)
+{
+ if (PyObject_IsInstance(obj, (PyObject *)&PbVec3Type))
+ return true;
+ if (PyTuple_Check(obj) && PyTuple_Size(obj) == 3) {
+ return isPy<Real>(PyTuple_GetItem(obj, 0)) && isPy<Real>(PyTuple_GetItem(obj, 1)) &&
+ isPy<Real>(PyTuple_GetItem(obj, 2));
+ }
+ return false;
+}
+template<> bool isPy<Vec3i>(PyObject *obj)
+{
+ if (PyObject_IsInstance(obj, (PyObject *)&PbVec3Type))
+ return true;
+ if (PyTuple_Check(obj) && PyTuple_Size(obj) == 3) {
+ return isPy<int>(PyTuple_GetItem(obj, 0)) && isPy<int>(PyTuple_GetItem(obj, 1)) &&
+ isPy<int>(PyTuple_GetItem(obj, 2));
+ }
+ return false;
+}
+template<> bool isPy<Vec4>(PyObject *obj)
+{
+ if (PyObject_IsInstance(obj, (PyObject *)&PbVec4Type))
+ return true;
+ if (PyTuple_Check(obj) && PyTuple_Size(obj) == 4) {
+ return isPy<Real>(PyTuple_GetItem(obj, 0)) && isPy<Real>(PyTuple_GetItem(obj, 1)) &&
+ isPy<Real>(PyTuple_GetItem(obj, 2)) && isPy<Real>(PyTuple_GetItem(obj, 3));
+ }
+ return false;
+}
+template<> bool isPy<Vec4i>(PyObject *obj)
+{
+ if (PyObject_IsInstance(obj, (PyObject *)&PbVec4Type))
+ return true;
+ if (PyTuple_Check(obj) && PyTuple_Size(obj) == 4) {
+ return isPy<int>(PyTuple_GetItem(obj, 0)) && isPy<int>(PyTuple_GetItem(obj, 1)) &&
+ isPy<int>(PyTuple_GetItem(obj, 2)) && isPy<int>(PyTuple_GetItem(obj, 3));
+ }
+ return false;
+}
+template<> bool isPy<PbType>(PyObject *obj)
+{
+ return PyType_Check(obj);
+}
+
+//******************************************************************************
+// PbArgs class defs
+
+PbArgs PbArgs::EMPTY(NULL, NULL);
+
+PbArgs::PbArgs(PyObject *linarg, PyObject *dict) : mLinArgs(0), mKwds(0)
+{
+ setup(linarg, dict);
+}
+PbArgs::~PbArgs()
+{
+ for (int i = 0; i < (int)mTmpStorage.size(); i++)
+ free(mTmpStorage[i]);
+ mTmpStorage.clear();
+}
+
+void PbArgs::copy(PbArgs &a)
+{
+ mKwds = a.mKwds;
+ mData = a.mData;
+ mLinData = a.mLinData;
+ mLinArgs = a.mLinArgs;
+}
+void PbArgs::clear()
+{
+ mLinArgs = 0;
+ mKwds = 0;
+ mData.clear();
+ mLinData.clear();
+}
+
+PbArgs &PbArgs::operator=(const PbArgs &a)
+{
+ // mLinArgs = 0;
+ // mKwds = 0;
+ return *this;
+}
+
+void PbArgs::setup(PyObject *linarg, PyObject *dict)
+{
+ if (dict) {
+ PyObject *key, *value;
+ Py_ssize_t pos = 0;
+ while (PyDict_Next(dict, &pos, &key, &value)) {
+ DataElement el;
+ el.obj = value;
+ el.visited = false;
+ mData[fromPy<string>(key)] = el;
+ }
+ mKwds = dict;
+ }
+ if (linarg) {
+ size_t len = PyTuple_Size(linarg);
+ for (size_t i = 0; i < len; i++) {
+ DataElement el;
+ el.obj = PyTuple_GetItem(linarg, i);
+ el.visited = false;
+ mLinData.push_back(el);
+ }
+ mLinArgs = linarg;
+ }
+}
+
+void PbArgs::addLinArg(PyObject *obj)
+{
+ DataElement el = {obj, false};
+ mLinData.push_back(el);
+}
+
+void PbArgs::check()
+{
+ if (has("nocheck"))
+ return;
+
+ for (map<string, DataElement>::iterator it = mData.begin(); it != mData.end(); it++) {
+ if (!it->second.visited)
+ errMsg("Argument '" + it->first + "' unknown");
+ }
+ for (size_t i = 0; i < mLinData.size(); i++) {
+ if (!mLinData[i].visited) {
+ stringstream s;
+ s << "Function does not read argument number #" << i;
+ errMsg(s.str());
+ }
+ }
+}
+
+FluidSolver *PbArgs::obtainParent()
+{
+ FluidSolver *solver = getPtrOpt<FluidSolver>("solver", -1, NULL);
+ if (solver != 0)
+ return solver;
+
+ for (map<string, DataElement>::iterator it = mData.begin(); it != mData.end(); it++) {
+ PbClass *obj = Pb::objFromPy(it->second.obj);
+
+ if (obj) {
+ if (solver == NULL)
+ solver = obj->getParent();
+ }
+ }
+ for (vector<DataElement>::iterator it = mLinData.begin(); it != mLinData.end(); it++) {
+ PbClass *obj = Pb::objFromPy(it->obj);
+
+ if (obj) {
+ if (solver == NULL)
+ solver = obj->getParent();
+ }
+ }
+
+ return solver;
+}
+
+void PbArgs::visit(int number, const string &key)
+{
+ if (number >= 0 && number < (int)mLinData.size())
+ mLinData[number].visited = true;
+ map<string, DataElement>::iterator lu = mData.find(key);
+ if (lu != mData.end())
+ lu->second.visited = true;
+}
+
+PyObject *PbArgs::getItem(const std::string &key, bool strict, ArgLocker *lk)
+{
+ map<string, DataElement>::iterator lu = mData.find(key);
+ if (lu == mData.end()) {
+ if (strict)
+ errMsg("Argument '" + key + "' is not defined.");
+ return NULL;
+ }
+ PbClass *pbo = Pb::objFromPy(lu->second.obj);
+ // try to lock
+ if (pbo && lk)
+ lk->add(pbo);
+ return lu->second.obj;
+}
+
+PyObject *PbArgs::getItem(size_t number, bool strict, ArgLocker *lk)
+{
+ if (number >= mLinData.size()) {
+ if (!strict)
+ return NULL;
+ stringstream s;
+ s << "Argument number #" << number << " not specified.";
+ errMsg(s.str());
+ }
+ PbClass *pbo = Pb::objFromPy(mLinData[number].obj);
+ // try to lock
+ if (pbo && lk)
+ lk->add(pbo);
+ return mLinData[number].obj;
+}
+
+//******************************************************************************
+// ArgLocker class defs
+
+void ArgLocker::add(PbClass *p)
+{
+ if (find(locks.begin(), locks.end(), p) == locks.end()) {
+ locks.push_back(p);
+ p->lock();
+ }
+}
+ArgLocker::~ArgLocker()
+{
+ for (size_t i = 0; i < locks.size(); i++)
+ locks[i]->unlock();
+ locks.clear();
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/helper/pwrapper/pconvert.h b/extern/mantaflow/helper/pwrapper/pconvert.h
new file mode 100644
index 00000000000..9c72b8b57b9
--- /dev/null
+++ b/extern/mantaflow/helper/pwrapper/pconvert.h
@@ -0,0 +1,251 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Python argument wrappers and conversion tools
+ *
+ ******************************************************************************/
+
+// -----------------------------------------------------------------
+// NOTE:
+// Do not include this file in user code, include "manta.h" instead
+// -----------------------------------------------------------------
+
+#ifdef _MANTA_H
+# ifndef _PCONVERT_H
+# define _PCONVERT_H
+
+# include <string>
+# include <map>
+# include <vector>
+
+namespace Manta {
+template<class T> class Grid;
+
+//! Locks the given PbClass Arguments until ArgLocker goes out of scope
+struct ArgLocker {
+ void add(PbClass *p);
+ ~ArgLocker();
+ std::vector<PbClass *> locks;
+};
+
+PyObject *getPyNone();
+
+// for PbClass-derived classes
+template<class T> T *fromPyPtr(PyObject *obj, std::vector<void *> *tmp)
+{
+ if (PbClass::isNullRef(obj) || PbClass::isNoneRef(obj))
+ return 0;
+ PbClass *pbo = Pb::objFromPy(obj);
+ const std::string &type = Namify<T>::S;
+ if (!pbo || !(pbo->canConvertTo(type)))
+ throw Error("can't convert argument to " + type + "*");
+ return (T *)(pbo);
+}
+
+template<> float *fromPyPtr<float>(PyObject *obj, std::vector<void *> *tmp);
+template<> double *fromPyPtr<double>(PyObject *obj, std::vector<void *> *tmp);
+template<> int *fromPyPtr<int>(PyObject *obj, std::vector<void *> *tmp);
+template<> std::string *fromPyPtr<std::string>(PyObject *obj, std::vector<void *> *tmp);
+template<> bool *fromPyPtr<bool>(PyObject *obj, std::vector<void *> *tmp);
+template<> Vec3 *fromPyPtr<Vec3>(PyObject *obj, std::vector<void *> *tmp);
+template<> Vec3i *fromPyPtr<Vec3i>(PyObject *obj, std::vector<void *> *tmp);
+template<> Vec4 *fromPyPtr<Vec4>(PyObject *obj, std::vector<void *> *tmp);
+template<> Vec4i *fromPyPtr<Vec4i>(PyObject *obj, std::vector<void *> *tmp);
+
+PyObject *incref(PyObject *obj);
+template<class T> PyObject *toPy(const T &v)
+{
+ PyObject *obj = v.getPyObject();
+ if (obj) {
+ return incref(obj);
+ }
+ T *co = new T(v);
+ const std::string &type = Namify<typename remove_pointers<T>::type>::S;
+ return Pb::copyObject(co, type);
+}
+template<class T> bool isPy(PyObject *obj)
+{
+ if (PbClass::isNullRef(obj) || PbClass::isNoneRef(obj))
+ return false;
+ PbClass *pbo = Pb::objFromPy(obj);
+ const std::string &type = Namify<typename remove_pointers<T>::type>::S;
+ return pbo && pbo->canConvertTo(type);
+}
+
+template<class T> T fromPy(PyObject *obj)
+{
+ throw Error(
+ "Unknown type conversion. Did you pass a PbClass by value? Instead always pass "
+ "grids/particlesystems/etc. by reference or using a pointer.");
+}
+
+// builtin types
+template<> float fromPy<float>(PyObject *obj);
+template<> double fromPy<double>(PyObject *obj);
+template<> int fromPy<int>(PyObject *obj);
+template<> PyObject *fromPy<PyObject *>(PyObject *obj);
+template<> std::string fromPy<std::string>(PyObject *obj);
+template<> const char *fromPy<const char *>(PyObject *obj);
+template<> bool fromPy<bool>(PyObject *obj);
+template<> Vec3 fromPy<Vec3>(PyObject *obj);
+template<> Vec3i fromPy<Vec3i>(PyObject *obj);
+template<> Vec4 fromPy<Vec4>(PyObject *obj);
+template<> Vec4i fromPy<Vec4i>(PyObject *obj);
+template<> PbType fromPy<PbType>(PyObject *obj);
+template<> PbTypeVec fromPy<PbTypeVec>(PyObject *obj);
+
+template<> PyObject *toPy<int>(const int &v);
+template<> PyObject *toPy<std::string>(const std::string &val);
+template<> PyObject *toPy<float>(const float &v);
+template<> PyObject *toPy<double>(const double &v);
+template<> PyObject *toPy<bool>(const bool &v);
+template<> PyObject *toPy<Vec3i>(const Vec3i &v);
+template<> PyObject *toPy<Vec3>(const Vec3 &v);
+template<> PyObject *toPy<Vec4i>(const Vec4i &v);
+template<> PyObject *toPy<Vec4>(const Vec4 &v);
+typedef PbClass *PbClass_Ptr;
+template<> PyObject *toPy<PbClass *>(const PbClass_Ptr &obj);
+
+template<> bool isPy<float>(PyObject *obj);
+template<> bool isPy<double>(PyObject *obj);
+template<> bool isPy<int>(PyObject *obj);
+template<> bool isPy<PyObject *>(PyObject *obj);
+template<> bool isPy<std::string>(PyObject *obj);
+template<> bool isPy<const char *>(PyObject *obj);
+template<> bool isPy<bool>(PyObject *obj);
+template<> bool isPy<Vec3>(PyObject *obj);
+template<> bool isPy<Vec3i>(PyObject *obj);
+template<> bool isPy<Vec4>(PyObject *obj);
+template<> bool isPy<Vec4i>(PyObject *obj);
+template<> bool isPy<PbType>(PyObject *obj);
+
+//! Encapsulation of python arguments
+class PbArgs {
+ public:
+ PbArgs(PyObject *linargs = NULL, PyObject *dict = NULL);
+ ~PbArgs();
+ void setup(PyObject *linargs = NULL, PyObject *dict = NULL);
+
+ void check();
+ FluidSolver *obtainParent();
+
+ inline int numLinArgs()
+ {
+ return mLinData.size();
+ }
+
+ inline bool has(const std::string &key)
+ {
+ return getItem(key, false) != NULL;
+ }
+ inline void deleteItem(const std::string &key)
+ {
+ if (mData.find(key) != mData.end())
+ mData.erase(mData.find(key));
+ }
+
+ inline PyObject *linArgs()
+ {
+ return mLinArgs;
+ }
+ inline PyObject *kwds()
+ {
+ return mKwds;
+ }
+
+ void addLinArg(PyObject *obj);
+
+ template<class T> inline void add(const std::string &key, T arg)
+ {
+ DataElement el = {toPy(arg), false};
+ mData[key] = el;
+ }
+ template<class T> inline T get(const std::string &key, int number = -1, ArgLocker *lk = NULL)
+ {
+ visit(number, key);
+ PyObject *o = getItem(key, false, lk);
+ if (o)
+ return fromPy<T>(o);
+ o = getItem(number, false, lk);
+ if (o)
+ return fromPy<T>(o);
+ errMsg("Argument '" + key + "' is not defined.");
+ }
+ template<class T>
+ inline T getOpt(const std::string &key, int number, T defarg, ArgLocker *lk = NULL)
+ {
+ visit(number, key);
+ PyObject *o = getItem(key, false, lk);
+ if (o)
+ return fromPy<T>(o);
+ if (number >= 0)
+ o = getItem(number, false, lk);
+ return (o) ? fromPy<T>(o) : defarg;
+ }
+ template<class T>
+ inline T *getPtrOpt(const std::string &key, int number, T *defarg, ArgLocker *lk = NULL)
+ {
+ visit(number, key);
+ PyObject *o = getItem(key, false, lk);
+ if (o)
+ return fromPyPtr<T>(o, &mTmpStorage);
+ if (number >= 0)
+ o = getItem(number, false, lk);
+ return o ? fromPyPtr<T>(o, &mTmpStorage) : defarg;
+ }
+ template<class T> inline T *getPtr(const std::string &key, int number = -1, ArgLocker *lk = NULL)
+ {
+ visit(number, key);
+ PyObject *o = getItem(key, false, lk);
+ if (o)
+ return fromPyPtr<T>(o, &mTmpStorage);
+ o = getItem(number, false, lk);
+ if (o)
+ return fromPyPtr<T>(o, &mTmpStorage);
+ errMsg("Argument '" + key + "' is not defined.");
+ }
+
+ // automatic template type deduction
+ template<class T> bool typeCheck(int num, const std::string &name)
+ {
+ PyObject *o = getItem(name, false, 0);
+ if (!o)
+ o = getItem(num, false, 0);
+ return o ? isPy<typename remove_pointers<T>::type>(o) : false;
+ }
+
+ PbArgs &operator=(const PbArgs &a); // dummy
+ void copy(PbArgs &a);
+ void clear();
+ void visit(int num, const std::string &key);
+
+ static PbArgs EMPTY;
+
+ protected:
+ PyObject *getItem(const std::string &key, bool strict, ArgLocker *lk = NULL);
+ PyObject *getItem(size_t number, bool strict, ArgLocker *lk = NULL);
+
+ struct DataElement {
+ PyObject *obj;
+ bool visited;
+ };
+ std::map<std::string, DataElement> mData;
+ std::vector<DataElement> mLinData;
+ PyObject *mLinArgs, *mKwds;
+ std::vector<void *> mTmpStorage;
+};
+
+} // namespace Manta
+
+# if NUMPY == 1
+# include "numpyWrap.h"
+# endif
+
+# endif
+#endif
diff --git a/extern/mantaflow/helper/pwrapper/pvec3.cpp b/extern/mantaflow/helper/pwrapper/pvec3.cpp
new file mode 100644
index 00000000000..69bde2a2ad0
--- /dev/null
+++ b/extern/mantaflow/helper/pwrapper/pvec3.cpp
@@ -0,0 +1,414 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Vec3 class extension for python
+ *
+ ******************************************************************************/
+
+#include "pythonInclude.h"
+#include <string>
+#include <sstream>
+#include "vectorbase.h"
+#include "structmember.h"
+#include "manta.h"
+
+using namespace std;
+
+namespace Manta {
+
+extern PyTypeObject PbVec3Type;
+
+struct PbVec3 {
+ PyObject_HEAD float data[3];
+};
+
+static void PbVec3Dealloc(PbVec3 *self)
+{
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+static PyObject *PbVec3New(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ return type->tp_alloc(type, 0);
+}
+
+static int PbVec3Init(PbVec3 *self, PyObject *args, PyObject *kwds)
+{
+
+ float x1 = numeric_limits<float>::quiet_NaN(), x2 = x1, x3 = x1;
+ if (!PyArg_ParseTuple(args, "|fff", &x1, &x2, &x3))
+ return -1;
+
+ if (!c_isnan(x1)) {
+ self->data[0] = x1;
+ if (!c_isnan(x2) && !c_isnan(x3)) {
+ self->data[1] = x2;
+ self->data[2] = x3;
+ }
+ else {
+ if (!c_isnan(x2) || !c_isnan(x3)) {
+ errMsg("Invalid partial init of vec3");
+ }
+ self->data[1] = x1;
+ self->data[2] = x1;
+ }
+ }
+ else {
+ self->data[0] = 0;
+ self->data[1] = 0;
+ self->data[2] = 0;
+ }
+ return 0;
+}
+
+static PyObject *PbVec3Repr(PbVec3 *self)
+{
+ Manta::Vec3 v(self->data[0], self->data[1], self->data[2]);
+ return PyUnicode_FromFormat(v.toString().c_str());
+}
+
+static PyMemberDef PbVec3Members[] = {
+ {(char *)"x", T_FLOAT, offsetof(PbVec3, data), 0, (char *)"X"},
+ {(char *)"y", T_FLOAT, offsetof(PbVec3, data) + sizeof(float), 0, (char *)"Y"},
+ {(char *)"z", T_FLOAT, offsetof(PbVec3, data) + sizeof(float) * 2, 0, (char *)"Z"},
+ {NULL} // Sentinel
+};
+
+static PyMethodDef PbVec3Methods[] = {
+ //{"name", (PyCFunction)Noddy_name, METH_NOARGS, "Return the name, combining the first and last
+ //name" },
+ {NULL} // Sentinel
+};
+
+// operator overloads
+
+inline PyObject *PbNew(const Vec3 &a)
+{
+ PbVec3 *obj = (PbVec3 *)PbVec3New(&PbVec3Type, 0, 0);
+ obj->data[0] = a.x;
+ obj->data[1] = a.y;
+ obj->data[2] = a.z;
+ return (PyObject *)obj;
+}
+
+#define CONVERTVEC(obj) \
+ Vec3 v##obj; \
+ if (PyObject_TypeCheck(obj, &PbVec3Type)) \
+ v##obj = Vec3(&(((PbVec3 *)obj)->data[0])); \
+ else if (PyFloat_Check(obj)) \
+ v##obj = Vec3(PyFloat_AsDouble(obj)); \
+ else if (PyLong_Check(obj)) \
+ v##obj = Vec3(PyLong_AsDouble(obj)); \
+ else { \
+ Py_INCREF(Py_NotImplemented); \
+ return Py_NotImplemented; \
+ }
+
+#define OPHEADER \
+ if (!PyObject_TypeCheck(a, &PbVec3Type) && !PyObject_TypeCheck(b, &PbVec3Type)) { \
+ Py_INCREF(Py_NotImplemented); \
+ return Py_NotImplemented; \
+ } \
+ CONVERTVEC(a) \
+ CONVERTVEC(b)
+
+#define OPHEADER1 \
+ if (!PyObject_TypeCheck(a, &PbVec3Type)) { \
+ Py_INCREF(Py_NotImplemented); \
+ return Py_NotImplemented; \
+ } \
+ CONVERTVEC(a)
+
+PyObject *PbVec3Add(PyObject *a, PyObject *b)
+{
+ OPHEADER
+ return PbNew(va + vb);
+}
+
+PyObject *PbVec3Sub(PyObject *a, PyObject *b)
+{
+ OPHEADER
+ return PbNew(va - vb);
+}
+
+PyObject *PbVec3Mult(PyObject *a, PyObject *b)
+{
+ OPHEADER
+ return PbNew(va * vb);
+}
+
+PyObject *PbVec3Div(PyObject *a, PyObject *b)
+{
+ OPHEADER
+ return PbNew(va / vb);
+}
+
+PyObject *PbVec3Negative(PyObject *a)
+{
+ OPHEADER1
+ return PbNew(-va);
+}
+
+// numbers are defined subtely different in Py3 (WTF?)
+#if PY_MAJOR_VERSION >= 3
+static PyNumberMethods PbVec3NumberMethods = {
+ (binaryfunc)PbVec3Add, // binaryfunc nb_add;
+ (binaryfunc)PbVec3Sub, // binaryfunc nb_sub;
+ (binaryfunc)PbVec3Mult, // binaryfunc nb_mult;
+ 0, // binaryfunc nb_remainder;
+ 0, // binaryfunc nb_divmod;
+ 0, // ternaryfunc nb_power;
+ (unaryfunc)PbVec3Negative, // unaryfunc nb_negative;
+ 0, // unaryfunc nb_positive;
+ 0, // unaryfunc nb_absolute;
+ 0, // inquiry nb_bool;
+ 0, // unaryfunc nb_invert;
+ 0, // binaryfunc nb_lshift;
+ 0, // binaryfunc nb_rshift;
+ 0, // binaryfunc nb_and;
+ 0, // binaryfunc nb_xor;
+ 0, // binaryfunc nb_or;
+ 0, // unaryfunc nb_int;
+ 0, // void *nb_reserved;
+ 0, // unaryfunc nb_float;
+ 0, // binaryfunc nb_inplace_add;
+ 0, // binaryfunc nb_inplace_subtract;
+ 0, // binaryfunc nb_inplace_multiply;
+ 0, // binaryfunc nb_inplace_remainder;
+ 0, // ternaryfunc nb_inplace_power;
+ 0, // binaryfunc nb_inplace_lshift;
+ 0, // binaryfunc nb_inplace_rshift;
+ 0, // binaryfunc nb_inplace_and;
+ 0, // binaryfunc nb_inplace_xor;
+ 0, // binaryfunc nb_inplace_or;
+
+ 0, // binaryfunc nb_floor_divide;
+ (binaryfunc)PbVec3Div, // binaryfunc nb_true_divide;
+ 0, // binaryfunc nb_inplace_floor_divide;
+ 0, // binaryfunc nb_inplace_true_divide;
+
+ 0 // unaryfunc nb_index;
+};
+#else
+static PyNumberMethods PbVec3NumberMethods = {
+ (binaryfunc)PbVec3Add, // binaryfunc nb_add;
+ (binaryfunc)PbVec3Sub, // binaryfunc nb_sub;
+ (binaryfunc)PbVec3Mult, // binaryfunc nb_mult;
+ 0, // binaryfunc nb_divide;
+ 0, // binaryfunc nb_remainder;
+ 0, // binaryfunc nb_divmod;
+ 0, // ternaryfunc nb_power;
+ (unaryfunc)PbVec3Negative, // unaryfunc nb_negative;
+ 0, // unaryfunc nb_positive;
+ 0, // unaryfunc nb_absolute;
+ 0, // inquiry nb_nonzero;
+ 0, // unaryfunc nb_invert;
+ 0, // binaryfunc nb_lshift;
+ 0, // binaryfunc nb_rshift;
+ 0, // binaryfunc nb_and;
+ 0, // binaryfunc nb_xor;
+ 0, // binaryfunc nb_or;
+ 0, // coercion nb_coerce;
+ 0, // unaryfunc nb_int;
+ 0, // unaryfunc nb_long;
+ 0, // unaryfunc nb_float;
+ 0, // unaryfunc nb_oct;
+ 0, // unaryfunc nb_hex;
+ 0, // binaryfunc nb_inplace_add;
+ 0, // binaryfunc nb_inplace_subtract;
+ 0, // binaryfunc nb_inplace_multiply;
+ 0, // binaryfunc nb_inplace_divide;
+ 0, // binaryfunc nb_inplace_remainder;
+ 0, // ternaryfunc nb_inplace_power;
+ 0, // binaryfunc nb_inplace_lshift;
+ 0, // binaryfunc nb_inplace_rshift;
+ 0, // binaryfunc nb_inplace_and;
+ 0, // binaryfunc nb_inplace_xor;
+ 0, // binaryfunc nb_inplace_or;
+ 0, // binaryfunc nb_floor_divide;
+ (binaryfunc)PbVec3Div, // binaryfunc nb_true_divide;
+ 0, // binaryfunc nb_inplace_floor_divide;
+ 0, // binaryfunc nb_inplace_true_divide;
+ 0, // unaryfunc nb_index;
+};
+#endif
+
+PyTypeObject PbVec3Type = {
+ PyVarObject_HEAD_INIT(NULL, 0) "manta.vec3", /* tp_name */
+ sizeof(PbVec3), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor)PbVec3Dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_reserved */
+ (reprfunc)PbVec3Repr, /* tp_repr */
+ &PbVec3NumberMethods, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+#if PY_MAJOR_VERSION >= 3
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+#else
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_CHECKTYPES, /* tp_flags */
+#endif
+ "float vector type", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ PbVec3Methods, /* tp_methods */
+ PbVec3Members, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)PbVec3Init, /* tp_init */
+ 0, /* tp_alloc */
+ PbVec3New, /* tp_new */
+};
+
+inline PyObject *castPy(PyTypeObject *p)
+{
+ return reinterpret_cast<PyObject *>(static_cast<void *>(p));
+}
+
+// 4d vector
+
+extern PyTypeObject PbVec4Type;
+
+struct PbVec4 {
+ PyObject_HEAD float data[4];
+};
+
+static PyMethodDef PbVec4Methods[] = {
+ {NULL} // Sentinel
+};
+
+static PyMemberDef PbVec4Members[] = {
+ {(char *)"x", T_FLOAT, offsetof(PbVec4, data), 0, (char *)"X"},
+ {(char *)"y", T_FLOAT, offsetof(PbVec4, data) + sizeof(float) * 1, 0, (char *)"Y"},
+ {(char *)"z", T_FLOAT, offsetof(PbVec4, data) + sizeof(float) * 2, 0, (char *)"Z"},
+ {(char *)"t", T_FLOAT, offsetof(PbVec4, data) + sizeof(float) * 3, 0, (char *)"T"},
+ {NULL} // Sentinel
+};
+
+static void PbVec4Dealloc(PbVec4 *self)
+{
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+static PyObject *PbVec4New(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ return type->tp_alloc(type, 0);
+}
+
+static int PbVec4Init(PbVec4 *self, PyObject *args, PyObject *kwds)
+{
+
+ float x1 = numeric_limits<float>::quiet_NaN(), x2 = x1, x3 = x1, x4 = x1;
+ if (!PyArg_ParseTuple(args, "|ffff", &x1, &x2, &x3, &x4))
+ return -1;
+
+ if (!c_isnan(x1)) {
+ self->data[0] = x1;
+ if (!c_isnan(x2) && !c_isnan(x3) && !c_isnan(x4)) {
+ self->data[1] = x2;
+ self->data[2] = x3;
+ self->data[3] = x4;
+ }
+ else {
+ if (!c_isnan(x2) || !c_isnan(x3) || !c_isnan(x4)) {
+ errMsg("Invalid partial init of vec4");
+ }
+ self->data[1] = self->data[2] = self->data[3] = x1;
+ }
+ }
+ else {
+ self->data[0] = self->data[1] = self->data[2] = self->data[3] = 0;
+ }
+ return 0;
+}
+
+static PyObject *PbVec4Repr(PbVec4 *self)
+{
+ Manta::Vec4 v(self->data[0], self->data[1], self->data[2], self->data[3]);
+ return PyUnicode_FromFormat(v.toString().c_str());
+}
+
+PyTypeObject PbVec4Type = {
+ PyVarObject_HEAD_INIT(NULL, 0) "manta.vec4", /* tp_name */
+ sizeof(PbVec4), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor)PbVec4Dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_reserved */
+ (reprfunc)PbVec4Repr, /* tp_repr */
+ NULL, // &PbVec4NumberMethods, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+#if PY_MAJOR_VERSION >= 3
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */
+#else
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_CHECKTYPES, /* tp_flags */
+#endif
+ "float vector type", /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ PbVec4Methods, /* tp_methods */
+ PbVec4Members, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)PbVec4Init, /* tp_init */
+ 0, /* tp_alloc */
+ PbVec4New, /* tp_new */
+};
+
+// register
+
+void PbVecInitialize(PyObject *module)
+{
+ if (PyType_Ready(&PbVec3Type) < 0)
+ errMsg("can't initialize Vec3 type");
+ Py_INCREF(castPy(&PbVec3Type));
+ PyModule_AddObject(module, "vec3", (PyObject *)&PbVec3Type);
+
+ if (PyType_Ready(&PbVec4Type) < 0)
+ errMsg("can't initialize Vec4 type");
+ Py_INCREF(castPy(&PbVec4Type));
+ PyModule_AddObject(module, "vec4", (PyObject *)&PbVec4Type);
+}
+const static Pb::Register _REG(PbVecInitialize);
+
+} // namespace Manta
diff --git a/extern/mantaflow/helper/pwrapper/pythonInclude.h b/extern/mantaflow/helper/pwrapper/pythonInclude.h
new file mode 100644
index 00000000000..0f78c6641d2
--- /dev/null
+++ b/extern/mantaflow/helper/pwrapper/pythonInclude.h
@@ -0,0 +1,48 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Base class for particle systems
+ *
+ ******************************************************************************/
+
+#ifndef _PYTHONINCLUDE_H
+#define _PYTHONINCLUDE_H
+
+#if defined(WIN32) || defined(_WIN32)
+
+// note - we have to include these first!
+# include <string>
+# include <vector>
+# include <iostream>
+
+#endif
+
+// the PYTHON_DEBUG_WITH_RELEASE define enables linking with python debug libraries
+#if (defined(_DEBUG) || (DEBUG == 1)) && defined(DEBUG_PYTHON_WITH_RELEASE)
+
+// special handling, disable linking with debug version of python libs
+# undef _DEBUG
+# define NDEBUG
+# include <Python.h>
+# if NUMPY == 1
+# define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+# include "numpy/arrayobject.h"
+# endif
+# define _DEBUG
+# undef NDEBUG
+
+#else
+# include <Python.h>
+# if NUMPY == 1
+# define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
+# include "numpy/arrayobject.h"
+# endif
+#endif
+
+#endif
diff --git a/extern/mantaflow/helper/pwrapper/registry.cpp b/extern/mantaflow/helper/pwrapper/registry.cpp
new file mode 100644
index 00000000000..332b9e158ac
--- /dev/null
+++ b/extern/mantaflow/helper/pwrapper/registry.cpp
@@ -0,0 +1,784 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2014 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Auto python registry
+ *
+ ******************************************************************************/
+
+#include <string.h>
+#include "pythonInclude.h"
+#include "structmember.h"
+#include "manta.h"
+
+using namespace std;
+
+const string gDefaultModuleName = "manta";
+
+namespace Pb {
+
+//******************************************************************************
+// Custom object definition
+
+struct Method {
+ Method(const string &n, const string &d, GenericFunction f) : name(n), doc(d), func(f)
+ {
+ }
+ string name, doc;
+ GenericFunction func;
+
+ PyMethodDef def()
+ {
+ PyMethodDef def = {&name[0], (PyCFunction)func, METH_VARARGS | METH_KEYWORDS, &doc[0]};
+ return def;
+ }
+};
+struct GetSet {
+ GetSet() : getter(0), setter(0)
+ {
+ }
+ GetSet(const string &n, const string &d, Getter g, Setter s)
+ : name(n), doc(d), getter(g), setter(s)
+ {
+ }
+ string name, doc;
+ Getter getter;
+ Setter setter;
+
+ PyGetSetDef def()
+ {
+ PyGetSetDef def = {&name[0], getter, setter, &doc[0], NULL};
+ return def;
+ }
+};
+
+struct ClassData {
+ string cName, pyName;
+ string cPureName, cTemplate;
+ InitFunc init;
+ PyTypeObject typeInfo;
+ PyNumberMethods numInfo;
+ // PySequenceMethods seqInfo;
+ vector<Method> methods;
+ map<string, GetSet> getset;
+ map<string, OperatorFunction> ops;
+ ClassData *baseclass;
+ string baseclassName;
+ Constructor constructor;
+
+ vector<PyMethodDef> genMethods;
+ vector<PyGetSetDef> genGetSet;
+};
+
+struct PbObject {
+ PyObject_HEAD Manta::PbClass *instance;
+ ClassData *classdef;
+};
+
+//******************************************************
+// Internal wrapper class
+
+//! Registers all classes and methods exposed to Python.
+/*! This class is only used internally by Pb:: framwork.
+ * Please use the functionality of PbClass to lookup and translate pointers. */
+class WrapperRegistry {
+ public:
+ static WrapperRegistry &instance();
+ void addClass(const std::string &name,
+ const std::string &internalName,
+ const std::string &baseclass);
+ void addEnumEntry(const std::string &name, int value);
+ void addExternalInitializer(InitFunc func);
+ void addMethod(const std::string &classname,
+ const std::string &methodname,
+ GenericFunction method);
+ void addOperator(const std::string &classname,
+ const std::string &methodname,
+ OperatorFunction method);
+ void addConstructor(const std::string &classname, Constructor method);
+ void addGetSet(const std::string &classname,
+ const std::string &property,
+ Getter getfunc,
+ Setter setfunc);
+ void addPythonPath(const std::string &path);
+ void addPythonCode(const std::string &file, const std::string &code);
+ PyObject *createPyObject(const std::string &classname,
+ const std::string &name,
+ Manta::PbArgs &args,
+ Manta::PbClass *parent);
+ void construct(const std::string &scriptname, const vector<string> &args);
+ void cleanup();
+ void renameObjects();
+ void runPreInit();
+ PyObject *initModule();
+ ClassData *lookup(const std::string &name);
+ bool canConvert(ClassData *from, ClassData *to);
+
+ private:
+ ClassData *getOrConstructClass(const string &name);
+ void registerBaseclasses();
+ void registerDummyTypes();
+ void registerMeta();
+ void addConstants(PyObject *module);
+ void registerOperators(ClassData *cls);
+ void addParentMethods(ClassData *cls, ClassData *base);
+ WrapperRegistry();
+ std::map<std::string, ClassData *> mClasses;
+ std::vector<ClassData *> mClassList;
+ std::vector<InitFunc> mExtInitializers;
+ std::vector<std::string> mPaths;
+ std::string mCode, mScriptName;
+ std::vector<std::string> args;
+ std::map<std::string, int> mEnumValues;
+};
+
+//******************************************************************************
+// Callback functions
+
+PyObject *cbGetClass(PbObject *self, void *cl)
+{
+ return Manta::toPy(self->classdef->cPureName);
+}
+
+PyObject *cbGetTemplate(PbObject *self, void *cl)
+{
+ return Manta::toPy(self->classdef->cTemplate);
+}
+
+PyObject *cbGetCName(PbObject *self, void *cl)
+{
+ return Manta::toPy(self->classdef->cName);
+}
+
+void cbDealloc(PbObject *self)
+{
+ // cout << "dealloc " << self->instance->getName() << " " << self->classdef->cName << endl;
+ if (self->instance) {
+#ifndef BLENDER
+ // don't delete top-level objects
+ if (self->instance->getParent() != self->instance)
+ delete self->instance;
+#else
+ // in Blender we *have* to delete all objects
+ delete self->instance;
+#endif
+ }
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+PyObject *cbNew(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PbObject *self = (PbObject *)type->tp_alloc(type, 0);
+ if (self != NULL) {
+ // lookup and link classdef
+ self->classdef = WrapperRegistry::instance().lookup(type->tp_name);
+ self->instance = NULL;
+ // cout << "creating " << self->classdef->cName << endl;
+ }
+ else
+ errMsg("can't allocate new python class object");
+ return (PyObject *)self;
+}
+
+int cbDisableConstructor(PyObject *self, PyObject *args, PyObject *kwds)
+{
+ errMsg("Can't instantiate a class template without template arguments");
+ return -1;
+}
+
+PyMODINIT_FUNC PyInit_Main(void)
+{
+ MantaEnsureRegistration();
+#if PY_MAJOR_VERSION >= 3
+ return WrapperRegistry::instance().initModule();
+#else
+ WrapperRegistry::instance().initModule();
+#endif
+}
+
+//******************************************************
+// WrapperRegistry
+
+WrapperRegistry::WrapperRegistry()
+{
+ addClass("__modclass__", "__modclass__", "");
+ addClass("PbClass", "PbClass", "");
+}
+
+ClassData *WrapperRegistry::getOrConstructClass(const string &classname)
+{
+ map<string, ClassData *>::iterator it = mClasses.find(classname);
+
+ if (it != mClasses.end())
+ return it->second;
+ ClassData *data = new ClassData;
+ data->cName = classname;
+ data->cPureName = classname;
+ data->cTemplate = "";
+ size_t tplIdx = classname.find('<');
+ if (tplIdx != string::npos) {
+ data->cPureName = classname.substr(0, tplIdx);
+ data->cTemplate = classname.substr(tplIdx + 1, classname.find('>') - tplIdx - 1);
+ }
+ data->baseclass = NULL;
+ data->constructor = cbDisableConstructor;
+ mClasses[classname] = data;
+ mClassList.push_back(data);
+ return data;
+}
+
+void replaceAll(string &source, string const &find, string const &replace)
+{
+ for (string::size_type i = 0; (i = source.find(find, i)) != std::string::npos;) {
+ source.replace(i, find.length(), replace);
+ i += replace.length() - find.length() + 1;
+ }
+}
+
+void WrapperRegistry::addClass(const string &pyName,
+ const string &internalName,
+ const string &baseclass)
+{
+ ClassData *data = getOrConstructClass(internalName);
+
+ // regularize python name
+ string pythonName = pyName;
+ replaceAll(pythonName, "<", "_");
+ replaceAll(pythonName, ">", "");
+ replaceAll(pythonName, ",", "_");
+
+ if (data->pyName.empty())
+ data->pyName = pythonName;
+ mClasses[pythonName] = data;
+ if (!baseclass.empty())
+ data->baseclassName = baseclass;
+}
+
+void WrapperRegistry::addEnumEntry(const string &name, int value)
+{
+ /// Gather static definitions to add them as static python objects afterwards
+ if (mEnumValues.insert(std::make_pair(name, value)).second == false) {
+ errMsg("Enum entry '" + name + "' already existing...");
+ }
+}
+
+void WrapperRegistry::addExternalInitializer(InitFunc func)
+{
+ mExtInitializers.push_back(func);
+}
+
+void WrapperRegistry::addPythonPath(const string &path)
+{
+ mPaths.push_back(path);
+}
+
+void WrapperRegistry::addPythonCode(const string &file, const string &code)
+{
+ mCode += code + "\n";
+}
+
+void WrapperRegistry::addGetSet(const string &classname,
+ const string &property,
+ Getter getfunc,
+ Setter setfunc)
+{
+ ClassData *classdef = getOrConstructClass(classname);
+ GetSet &def = classdef->getset[property];
+ if (def.name.empty()) {
+ def.name = property;
+ def.doc = property;
+ }
+ if (getfunc)
+ def.getter = getfunc;
+ if (setfunc)
+ def.setter = setfunc;
+}
+
+void WrapperRegistry::addMethod(const string &classname,
+ const string &methodname,
+ GenericFunction func)
+{
+ string aclass = classname;
+ if (aclass.empty())
+ aclass = "__modclass__";
+
+ ClassData *classdef = getOrConstructClass(aclass);
+ for (int i = 0; i < (int)classdef->methods.size(); i++)
+ if (classdef->methods[i].name == methodname)
+ return; // avoid duplicates
+ classdef->methods.push_back(Method(methodname, methodname, func));
+}
+
+void WrapperRegistry::addOperator(const string &classname,
+ const string &methodname,
+ OperatorFunction func)
+{
+ if (classname.empty())
+ errMsg("PYTHON operators have to be defined within classes.");
+
+ string op = methodname.substr(8);
+ ClassData *classdef = getOrConstructClass(classname);
+ classdef->ops[op] = func;
+}
+
+void WrapperRegistry::addConstructor(const string &classname, Constructor func)
+{
+ ClassData *classdef = getOrConstructClass(classname);
+ classdef->constructor = func;
+}
+
+void WrapperRegistry::addParentMethods(ClassData *cur, ClassData *base)
+{
+ if (base == 0)
+ return;
+
+ for (vector<Method>::iterator it = base->methods.begin(); it != base->methods.end(); ++it)
+ addMethod(cur->cName, it->name, it->func);
+
+ for (map<string, GetSet>::iterator it = base->getset.begin(); it != base->getset.end(); ++it)
+ addGetSet(cur->cName, it->first, it->second.getter, it->second.setter);
+
+ for (map<string, OperatorFunction>::iterator it = base->ops.begin(); it != base->ops.end(); ++it)
+ cur->ops[it->first] = it->second;
+
+ addParentMethods(cur, base->baseclass);
+}
+
+void WrapperRegistry::registerBaseclasses()
+{
+ for (int i = 0; i < (int)mClassList.size(); i++) {
+ string bname = mClassList[i]->baseclassName;
+ if (!bname.empty()) {
+ mClassList[i]->baseclass = lookup(bname);
+ if (!mClassList[i]->baseclass)
+ errMsg("Registering class '" + mClassList[i]->cName + "' : Base class '" + bname +
+ "' not found");
+ }
+ }
+
+ for (int i = 0; i < (int)mClassList.size(); i++) {
+ addParentMethods(mClassList[i], mClassList[i]->baseclass);
+ }
+}
+
+void WrapperRegistry::registerMeta()
+{
+ for (int i = 0; i < (int)mClassList.size(); i++) {
+ mClassList[i]->getset["_class"] = GetSet("_class", "C class name", (Getter)cbGetClass, 0);
+ mClassList[i]->getset["_cname"] = GetSet("_cname", "Full C name", (Getter)cbGetCName, 0);
+ mClassList[i]->getset["_T"] = GetSet("_T", "C template argument", (Getter)cbGetTemplate, 0);
+ }
+}
+
+void WrapperRegistry::registerOperators(ClassData *cls)
+{
+ PyNumberMethods &num = cls->numInfo;
+ for (map<string, OperatorFunction>::iterator it = cls->ops.begin(); it != cls->ops.end(); it++) {
+ const string &op = it->first;
+ OperatorFunction func = it->second;
+ if (op == "+=")
+ num.nb_inplace_add = func;
+ else if (op == "-=")
+ num.nb_inplace_subtract = func;
+ else if (op == "*=")
+ num.nb_inplace_multiply = func;
+ else if (op == "+")
+ num.nb_add = func;
+ else if (op == "-")
+ num.nb_subtract = func;
+ else if (op == "*")
+ num.nb_multiply = func;
+#if PY_MAJOR_VERSION < 3
+ else if (op == "/=")
+ num.nb_inplace_divide = func;
+ else if (op == "/")
+ num.nb_divide = func;
+#else
+ else if (op == "/=")
+ num.nb_inplace_true_divide = func;
+ else if (op == "/")
+ num.nb_true_divide = func;
+#endif
+ else
+ errMsg("PYTHON operator " + op + " not supported");
+ }
+}
+
+void WrapperRegistry::registerDummyTypes()
+{
+ vector<string> add;
+ for (vector<ClassData *>::iterator it = mClassList.begin(); it != mClassList.end(); ++it) {
+ string cName = (*it)->cName;
+ if (cName.find('<') != string::npos)
+ add.push_back(cName.substr(0, cName.find('<')));
+ }
+ for (int i = 0; i < (int)add.size(); i++)
+ addClass(add[i], add[i], "");
+}
+
+ClassData *WrapperRegistry::lookup(const string &name)
+{
+ for (map<string, ClassData *>::iterator it = mClasses.begin(); it != mClasses.end(); ++it) {
+ if (it->first == name || it->second->cName == name)
+ return it->second;
+ }
+ return NULL;
+}
+
+void WrapperRegistry::cleanup()
+{
+ for (vector<ClassData *>::iterator it = mClassList.begin(); it != mClassList.end(); ++it) {
+ delete *it;
+ }
+ mClasses.clear();
+ mClassList.clear();
+}
+
+WrapperRegistry &WrapperRegistry::instance()
+{
+ static WrapperRegistry inst;
+ return inst;
+}
+
+bool WrapperRegistry::canConvert(ClassData *from, ClassData *to)
+{
+ if (from == to)
+ return true;
+ if (from->baseclass)
+ return canConvert(from->baseclass, to);
+ return false;
+}
+
+void WrapperRegistry::addConstants(PyObject *module)
+{
+ // expose arguments
+ PyObject *list = PyList_New(args.size());
+ for (int i = 0; i < (int)args.size(); i++)
+ PyList_SET_ITEM(list, i, Manta::toPy(args[i]));
+ PyModule_AddObject(module, "args", list);
+ PyModule_AddObject(module, "SCENEFILE", Manta::toPy(mScriptName));
+
+ // expose compile flags
+#ifdef DEBUG
+ PyModule_AddObject(module, "DEBUG", Manta::toPy<bool>(true));
+#else
+ PyModule_AddObject(module, "DEBUG", Manta::toPy<bool>(false));
+#endif
+#ifdef MANTA_MT
+ PyModule_AddObject(module, "MT", Manta::toPy<bool>(true));
+#else
+ PyModule_AddObject(module, "MT", Manta::toPy<bool>(false));
+#endif
+#ifdef GUI
+ PyModule_AddObject(module, "GUI", Manta::toPy<bool>(true));
+#else
+ PyModule_AddObject(module, "GUI", Manta::toPy<bool>(false));
+#endif
+#if FLOATINGPOINT_PRECISION == 2
+ PyModule_AddObject(module, "DOUBLEPRECISION", Manta::toPy<bool>(true));
+#else
+ PyModule_AddObject(module, "DOUBLEPRECISION", Manta::toPy<bool>(false));
+#endif
+ // cuda off for now
+ PyModule_AddObject(module, "CUDA", Manta::toPy<bool>(false));
+
+ // expose enum entries
+ std::map<std::string, int>::iterator it;
+ for (it = mEnumValues.begin(); it != mEnumValues.end(); it++) {
+ PyModule_AddObject(module, it->first.c_str(), Manta::toPy(it->second));
+ // Alternative would be:
+ // e.g. PyModule_AddIntConstant(module, "FlagFluid", 1);
+ }
+}
+
+void WrapperRegistry::runPreInit()
+{
+ // add python directories to path
+ PyObject *sys_path = PySys_GetObject((char *)"path");
+ for (size_t i = 0; i < mPaths.size(); i++) {
+ PyObject *path = Manta::toPy(mPaths[i]);
+ if (sys_path == NULL || path == NULL || PyList_Append(sys_path, path) < 0) {
+ errMsg("unable to set python path");
+ }
+ Py_DECREF(path);
+ }
+ if (!mCode.empty()) {
+ mCode = "from manta import *\n" + mCode;
+ PyRun_SimpleString(mCode.c_str());
+ }
+}
+
+PyObject *WrapperRegistry::createPyObject(const string &classname,
+ const string &name,
+ Manta::PbArgs &args,
+ Manta::PbClass *parent)
+{
+ ClassData *classdef = lookup(classname);
+ if (!classdef)
+ errMsg("Class " + classname + " doesn't exist.");
+
+ // create object
+ PyObject *obj = cbNew(&classdef->typeInfo, NULL, NULL);
+ PbObject *self = (PbObject *)obj;
+ PyObject *nkw = 0;
+
+ if (args.kwds())
+ nkw = PyDict_Copy(args.kwds());
+ else
+ nkw = PyDict_New();
+
+ PyObject *nocheck = Py_BuildValue("s", "yes");
+ PyDict_SetItemString(nkw, "nocheck", nocheck);
+ if (parent)
+ PyDict_SetItemString(nkw, "parent", parent->getPyObject());
+
+ // create instance
+ if (self->classdef->constructor(obj, args.linArgs(), nkw) < 0)
+ errMsg("error raised in constructor"); // assume condition is already set
+
+ Py_DECREF(nkw);
+ Py_DECREF(nocheck);
+ self->instance->setName(name);
+
+ return obj;
+}
+
+// prepare typeinfo and register python module
+void WrapperRegistry::construct(const string &scriptname, const vector<string> &args)
+{
+ mScriptName = scriptname;
+ this->args = args;
+
+ registerBaseclasses();
+ registerMeta();
+ registerDummyTypes();
+
+ // work around for certain gcc versions, cast to char*
+ PyImport_AppendInittab((char *)gDefaultModuleName.c_str(), PyInit_Main);
+}
+
+inline PyObject *castPy(PyTypeObject *p)
+{
+ return reinterpret_cast<PyObject *>(static_cast<void *>(p));
+}
+
+PyObject *WrapperRegistry::initModule()
+{
+ // generate and terminate all method lists
+ PyMethodDef sentinelFunc = {NULL, NULL, 0, NULL};
+ PyGetSetDef sentinelGetSet = {NULL, NULL, NULL, NULL, NULL};
+ for (int i = 0; i < (int)mClassList.size(); i++) {
+ ClassData *cls = mClassList[i];
+ cls->genMethods.clear();
+ cls->genGetSet.clear();
+ for (vector<Method>::iterator i2 = cls->methods.begin(); i2 != cls->methods.end(); ++i2)
+ cls->genMethods.push_back(i2->def());
+ for (map<string, GetSet>::iterator i2 = cls->getset.begin(); i2 != cls->getset.end(); ++i2)
+ cls->genGetSet.push_back(i2->second.def());
+
+ cls->genMethods.push_back(sentinelFunc);
+ cls->genGetSet.push_back(sentinelGetSet);
+ }
+
+ // prepare module info
+#if PY_MAJOR_VERSION >= 3
+ static PyModuleDef MainModule = {PyModuleDef_HEAD_INIT,
+ gDefaultModuleName.c_str(),
+ "Bridge module to the C++ solver",
+ -1,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL};
+ // get generic methods (plugin functions)
+ MainModule.m_methods = &mClasses["__modclass__"]->genMethods[0];
+
+ // create module
+ PyObject *module = PyModule_Create(&MainModule);
+#else
+ PyObject *module = Py_InitModule(gDefaultModuleName.c_str(),
+ &mClasses["__modclass__"]->genMethods[0]);
+#endif
+ if (module == NULL)
+ return NULL;
+
+ // load classes
+ for (vector<ClassData *>::iterator it = mClassList.begin(); it != mClassList.end(); ++it) {
+ ClassData &data = **it;
+ char *nameptr = (char *)data.pyName.c_str();
+
+ // define numeric substruct
+ PyNumberMethods *num = 0;
+ if (!data.ops.empty()) {
+ num = &data.numInfo;
+ memset(num, 0, sizeof(PyNumberMethods));
+ registerOperators(&data);
+ }
+
+ // define python classinfo
+ PyTypeObject t = {
+ PyVarObject_HEAD_INIT(NULL, 0)(char *) data.pyName.c_str(), // tp_name
+ sizeof(PbObject), // tp_basicsize
+ 0, // tp_itemsize
+ (destructor)cbDealloc, // tp_dealloc
+ 0, // tp_print
+ 0, // tp_getattr
+ 0, // tp_setattr
+ 0, // tp_reserved
+ 0, // tp_repr
+ num, // tp_as_number
+ 0, // tp_as_sequence
+ 0, // tp_as_mapping
+ 0, // tp_hash
+ 0, // tp_call
+ 0, // tp_str
+ 0, // tp_getattro
+ 0, // tp_setattro
+ 0, // tp_as_buffer
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, // tp_flags
+ nameptr, // tp_doc
+ 0, // tp_traverse
+ 0, // tp_clear
+ 0, // tp_richcompare
+ 0, // tp_weaklistoffset
+ 0, // tp_iter
+ 0, // tp_iternext
+ &data.genMethods[0], // tp_methods
+ 0, // tp_members
+ &data.genGetSet[0], // tp_getset
+ 0, // tp_base
+ 0, // tp_dict
+ 0, // tp_descr_get
+ 0, // tp_descr_set
+ 0, // tp_dictoffset
+ (initproc)(data.constructor), // tp_init
+ 0, // tp_alloc
+ cbNew // tp_new
+ };
+ data.typeInfo = t;
+
+ if (PyType_Ready(&data.typeInfo) < 0)
+ continue;
+
+ for (map<string, ClassData *>::iterator i2 = mClasses.begin(); i2 != mClasses.end(); ++i2) {
+ if (*it != i2->second)
+ continue;
+ // register all aliases
+ Py_INCREF(castPy(&data.typeInfo));
+ PyModule_AddObject(module, (char *)i2->first.c_str(), (PyObject *)&data.typeInfo);
+ }
+ }
+
+ // externals
+ for (vector<InitFunc>::iterator it = mExtInitializers.begin(); it != mExtInitializers.end();
+ ++it) {
+ (*it)(module);
+ }
+
+ addConstants(module);
+
+ return module;
+}
+
+//******************************************************
+// Register members and exposed functions
+
+void setup(const std::string &filename, const std::vector<std::string> &args)
+{
+ WrapperRegistry::instance().construct(filename, args);
+ Py_Initialize();
+ WrapperRegistry::instance().runPreInit();
+}
+
+void finalize()
+{
+ Py_Finalize();
+ WrapperRegistry::instance().cleanup();
+}
+
+bool canConvert(PyObject *obj, const string &classname)
+{
+ ClassData *from = ((PbObject *)obj)->classdef;
+ ClassData *dest = WrapperRegistry::instance().lookup(classname);
+ if (!dest)
+ errMsg("Classname '" + classname + "' is not registered.");
+ return WrapperRegistry::instance().canConvert(from, dest);
+}
+
+Manta::PbClass *objFromPy(PyObject *obj)
+{
+ if (Py_TYPE(obj)->tp_dealloc != (destructor)cbDealloc) // not a manta object
+ return NULL;
+
+ return ((PbObject *)obj)->instance;
+}
+
+PyObject *copyObject(Manta::PbClass *cls, const string &classname)
+{
+ ClassData *classdef = WrapperRegistry::instance().lookup(classname);
+ assertMsg(classdef, "python class " + classname + " does not exist.");
+
+ // allocate new object
+ PbObject *obj = (PbObject *)classdef->typeInfo.tp_alloc(&(classdef->typeInfo), 0);
+ assertMsg(obj, "cannot allocate new python object");
+
+ obj->classdef = classdef;
+ cls->registerObject((PyObject *)obj, 0);
+
+ return cls->getPyObject();
+}
+
+Manta::PbClass *createPy(const std::string &classname,
+ const std::string &name,
+ Manta::PbArgs &args,
+ Manta::PbClass *parent)
+{
+ PyObject *obj = WrapperRegistry::instance().createPyObject(classname, name, args, parent);
+ return ((PbObject *)obj)->instance;
+}
+
+void setReference(Manta::PbClass *cls, PyObject *obj)
+{
+ ((PbObject *)obj)->instance = cls;
+}
+
+Register::Register(const string &className, const string &funcName, GenericFunction func)
+{
+ WrapperRegistry::instance().addMethod(className, funcName, func);
+}
+Register::Register(const string &className, const string &funcName, OperatorFunction func)
+{
+ WrapperRegistry::instance().addOperator(className, funcName, func);
+}
+Register::Register(const string &className, const string &funcName, Constructor func)
+{
+ WrapperRegistry::instance().addConstructor(className, func);
+}
+Register::Register(const string &className, const string &property, Getter getter, Setter setter)
+{
+ WrapperRegistry::instance().addGetSet(className, property, getter, setter);
+}
+Register::Register(const string &className, const string &pyName, const string &baseClass)
+{
+ WrapperRegistry::instance().addClass(pyName, className, baseClass);
+}
+Register::Register(const string &name, const int value)
+{
+ WrapperRegistry::instance().addEnumEntry(name, value);
+}
+Register::Register(const string &file, const string &pythonCode)
+{
+ WrapperRegistry::instance().addPythonCode(file, pythonCode);
+}
+Register::Register(InitFunc func)
+{
+ WrapperRegistry::instance().addExternalInitializer(func);
+}
+
+} // namespace Pb
diff --git a/extern/mantaflow/helper/pwrapper/registry.h b/extern/mantaflow/helper/pwrapper/registry.h
new file mode 100644
index 00000000000..139863df85d
--- /dev/null
+++ b/extern/mantaflow/helper/pwrapper/registry.h
@@ -0,0 +1,106 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2014 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Auto python registry
+ *
+ ******************************************************************************/
+
+#ifndef _REGISTRY_H
+#define _REGISTRY_H
+
+#include <string>
+#include <vector>
+
+// forward declaration to minimize Python.h includes
+#ifndef PyObject_HEAD
+# ifndef PyObject_Fake
+struct _object;
+typedef _object PyObject;
+# define PyObject_Fake
+# endif
+#endif
+
+namespace Manta {
+class PbClass;
+class PbArgs;
+} // namespace Manta
+
+// **************************************************
+// NOTE
+// Everything in this file is intend only for internal
+// use by the generated wrappers or pclass/pconvert.
+// For user code, use the functionality exposed in
+// pclass.h / pconvert.h instead.
+// **************************************************
+
+// Used to turn names into strings
+namespace Manta {
+template<class T> struct Namify {
+ static const char *S;
+};
+} // namespace Manta
+namespace Pb {
+
+// internal registry access
+void setup(const std::string &filename, const std::vector<std::string> &args);
+void finalize();
+bool canConvert(PyObject *obj, const std::string &to);
+Manta::PbClass *objFromPy(PyObject *obj);
+Manta::PbClass *createPy(const std::string &classname,
+ const std::string &name,
+ Manta::PbArgs &args,
+ Manta::PbClass *parent);
+void setReference(Manta::PbClass *cls, PyObject *obj);
+PyObject *copyObject(Manta::PbClass *cls, const std::string &classname);
+void MantaEnsureRegistration();
+
+#ifdef BLENDER
+# ifdef PyMODINIT_FUNC
+PyMODINIT_FUNC PyInit_Main(void);
+# endif
+#endif
+
+// callback type
+typedef void (*InitFunc)(PyObject *);
+typedef PyObject *(*GenericFunction)(PyObject *self, PyObject *args, PyObject *kwds);
+typedef PyObject *(*OperatorFunction)(PyObject *self, PyObject *o);
+typedef int (*Constructor)(PyObject *self, PyObject *args, PyObject *kwds);
+typedef PyObject *(*Getter)(PyObject *self, void *closure);
+typedef int (*Setter)(PyObject *self, PyObject *value, void *closure);
+
+//! Auto registry of python methods and classes
+struct Register {
+ //! register method
+ Register(const std::string &className, const std::string &funcName, GenericFunction func);
+ //! register operator
+ Register(const std::string &className, const std::string &funcName, OperatorFunction func);
+ //! register constructor
+ Register(const std::string &className, const std::string &funcName, Constructor func);
+ //! register getter/setter
+ Register(const std::string &className,
+ const std::string &property,
+ Getter getter,
+ Setter setter);
+ //! register class
+ Register(const std::string &className, const std::string &pyName, const std::string &baseClass);
+ //! register enum entry
+ Register(const std::string &name, const int value);
+ //! register python code
+ Register(const std::string &file, const std::string &pythonCode);
+ //! register external code
+ Register(InitFunc func);
+};
+
+#define KEEP_UNUSED(var) \
+ do { \
+ (void)var; \
+ } while (false);
+
+} // namespace Pb
+#endif
diff --git a/extern/mantaflow/helper/util/integrator.h b/extern/mantaflow/helper/util/integrator.h
new file mode 100644
index 00000000000..5b1b02a5197
--- /dev/null
+++ b/extern/mantaflow/helper/util/integrator.h
@@ -0,0 +1,79 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Helper functions for simple integration
+ *
+ ******************************************************************************/
+
+#ifndef _INTEGRATE_H
+#define _INTEGRATE_H
+
+#include <vector>
+#include "vectorbase.h"
+#include "kernel.h"
+
+namespace Manta {
+
+enum IntegrationMode { IntEuler = 0, IntRK2, IntRK4 };
+
+//! Integrate a particle set with a given velocity kernel
+template<class VelKernel> void integratePointSet(VelKernel &k, int mode)
+{
+ typedef typename VelKernel::type0 PosType;
+ PosType &x = k.getArg0();
+ const std::vector<Vec3> &u = k.getRet();
+ const int N = x.size();
+
+ if (mode == IntEuler) {
+ for (int i = 0; i < N; i++)
+ x[i].pos += u[i];
+ }
+ else if (mode == IntRK2) {
+ PosType x0(x);
+
+ for (int i = 0; i < N; i++)
+ x[i].pos = x0[i].pos + 0.5 * u[i];
+
+ k.run();
+ for (int i = 0; i < N; i++)
+ x[i].pos = x0[i].pos + u[i];
+ }
+ else if (mode == IntRK4) {
+ PosType x0(x);
+ std::vector<Vec3> uTotal(u);
+
+ for (int i = 0; i < N; i++)
+ x[i].pos = x0[i].pos + 0.5 * u[i];
+
+ k.run();
+ for (int i = 0; i < N; i++) {
+ x[i].pos = x0[i].pos + 0.5 * u[i];
+ uTotal[i] += 2 * u[i];
+ }
+
+ k.run();
+ for (int i = 0; i < N; i++) {
+ x[i].pos = x0[i].pos + u[i];
+ uTotal[i] += 2 * u[i];
+ }
+
+ k.run();
+ for (int i = 0; i < N; i++)
+ x[i].pos = x0[i].pos + (Real)(1. / 6.) * (uTotal[i] + u[i]);
+ }
+ else
+ errMsg("unknown integration type");
+
+ // for(int i=0; i<N; i++) std::cout << x[i].pos.y-x[0].pos.y << std::endl;
+ // std::cout << "<><><>" << std::endl;
+}
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/helper/util/interpol.h b/extern/mantaflow/helper/util/interpol.h
new file mode 100644
index 00000000000..24d5d2ada06
--- /dev/null
+++ b/extern/mantaflow/helper/util/interpol.h
@@ -0,0 +1,324 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Helper functions for interpolation
+ *
+ ******************************************************************************/
+
+#ifndef _INTERPOL_H
+#define _INTERPOL_H
+
+#include "vectorbase.h"
+
+// Grid values are stored at i+0.5, j+0.5, k+0.5
+// MAC grid values are stored at i,j+0.5,k+0.5 (for x) ...
+
+namespace Manta {
+
+inline Vec3 fdTangent(const Vec3 &p0, const Vec3 &p1, const Vec3 &p2)
+{
+ return 0.5 * (getNormalized(p2 - p1) + getNormalized(p1 - p0));
+}
+
+inline Vec3 crTangent(const Vec3 &p0, const Vec3 &p1, const Vec3 &p2)
+{
+ return 0.5 * (p2 - p0);
+}
+
+inline Vec3 hermiteSpline(const Vec3 &p0, const Vec3 &p1, const Vec3 &m0, const Vec3 &m1, Real t)
+{
+ const Real t2 = t * t, t3 = t2 * t;
+ return (2.0 * t3 - 3.0 * t2 + 1.0) * p0 + (t3 - 2.0 * t2 + t) * m0 +
+ (-2.0 * t3 + 3.0 * t2) * p1 + (t3 - t2) * m1;
+}
+
+static inline void checkIndexInterpol(const Vec3i &size, IndexInt idx)
+{
+ if (idx < 0 || idx > (IndexInt)size.x * size.y * size.z) {
+ std::ostringstream s;
+ s << "Grid interpol dim " << size << " : index " << idx << " out of bound ";
+ errMsg(s.str());
+ }
+}
+
+// ----------------------------------------------------------------------
+// Grid interpolators
+// ----------------------------------------------------------------------
+
+#define BUILD_INDEX \
+ Real px = pos.x - 0.5f, py = pos.y - 0.5f, pz = pos.z - 0.5f; \
+ int xi = (int)px; \
+ int yi = (int)py; \
+ int zi = (int)pz; \
+ Real s1 = px - (Real)xi, s0 = 1. - s1; \
+ Real t1 = py - (Real)yi, t0 = 1. - t1; \
+ Real f1 = pz - (Real)zi, f0 = 1. - f1; \
+ /* clamp to border */ \
+ if (px < 0.) { \
+ xi = 0; \
+ s0 = 1.0; \
+ s1 = 0.0; \
+ } \
+ if (py < 0.) { \
+ yi = 0; \
+ t0 = 1.0; \
+ t1 = 0.0; \
+ } \
+ if (pz < 0.) { \
+ zi = 0; \
+ f0 = 1.0; \
+ f1 = 0.0; \
+ } \
+ if (xi >= size.x - 1) { \
+ xi = size.x - 2; \
+ s0 = 0.0; \
+ s1 = 1.0; \
+ } \
+ if (yi >= size.y - 1) { \
+ yi = size.y - 2; \
+ t0 = 0.0; \
+ t1 = 1.0; \
+ } \
+ if (size.z > 1) { \
+ if (zi >= size.z - 1) { \
+ zi = size.z - 2; \
+ f0 = 0.0; \
+ f1 = 1.0; \
+ } \
+ } \
+ const int X = 1; \
+ const int Y = size.x;
+
+template<class T> inline T interpol(const T *data, const Vec3i &size, const int Z, const Vec3 &pos)
+{
+ BUILD_INDEX
+ IndexInt idx = (IndexInt)xi + (IndexInt)Y * yi + (IndexInt)Z * zi;
+ DEBUG_ONLY(checkIndexInterpol(size, idx));
+ DEBUG_ONLY(checkIndexInterpol(size, idx + X + Y + Z));
+
+ return ((data[idx] * t0 + data[idx + Y] * t1) * s0 +
+ (data[idx + X] * t0 + data[idx + X + Y] * t1) * s1) *
+ f0 +
+ ((data[idx + Z] * t0 + data[idx + Y + Z] * t1) * s0 +
+ (data[idx + X + Z] * t0 + data[idx + X + Y + Z] * t1) * s1) *
+ f1;
+}
+
+template<int c>
+inline Real interpolComponent(const Vec3 *data, const Vec3i &size, const int Z, const Vec3 &pos)
+{
+ BUILD_INDEX
+ IndexInt idx = (IndexInt)xi + (IndexInt)Y * yi + (IndexInt)Z * zi;
+ DEBUG_ONLY(checkIndexInterpol(size, idx));
+ DEBUG_ONLY(checkIndexInterpol(size, idx + X + Y + Z));
+
+ return ((data[idx][c] * t0 + data[idx + Y][c] * t1) * s0 +
+ (data[idx + X][c] * t0 + data[idx + X + Y][c] * t1) * s1) *
+ f0 +
+ ((data[idx + Z][c] * t0 + data[idx + Y + Z][c] * t1) * s0 +
+ (data[idx + X + Z][c] * t0 + data[idx + X + Y + Z][c] * t1) * s1) *
+ f1;
+}
+
+template<class T>
+inline void setInterpol(
+ T *data, const Vec3i &size, const int Z, const Vec3 &pos, const T &v, Real *sumBuffer)
+{
+ BUILD_INDEX
+ IndexInt idx = (IndexInt)xi + (IndexInt)Y * yi + (IndexInt)Z * zi;
+ DEBUG_ONLY(checkIndexInterpol(size, idx));
+ DEBUG_ONLY(checkIndexInterpol(size, idx + X + Y + Z));
+
+ T *ref = &data[idx];
+ Real *sum = &sumBuffer[idx];
+ Real s0f0 = s0 * f0, s1f0 = s1 * f0, s0f1 = s0 * f1, s1f1 = s1 * f1;
+ Real w0 = t0 * s0f0, wx = t0 * s1f0, wy = t1 * s0f0, wxy = t1 * s1f0;
+ Real wz = t0 * s0f1, wxz = t0 * s1f1, wyz = t1 * s0f1, wxyz = t1 * s1f1;
+
+ sum[Z] += wz;
+ sum[X + Z] += wxz;
+ sum[Y + Z] += wyz;
+ sum[X + Y + Z] += wxyz;
+ ref[Z] += wz * v;
+ ref[X + Z] += wxz * v;
+ ref[Y + Z] += wyz * v;
+ ref[X + Y + Z] += wxyz * v;
+ sum[0] += w0;
+ sum[X] += wx;
+ sum[Y] += wy;
+ sum[X + Y] += wxy;
+ ref[0] += w0 * v;
+ ref[X] += wx * v;
+ ref[Y] += wy * v;
+ ref[X + Y] += wxy * v;
+}
+
+#define BUILD_INDEX_SHIFT \
+ BUILD_INDEX \
+ /* shifted coords */ \
+ int s_xi = (int)pos.x, s_yi = (int)pos.y, s_zi = (int)pos.z; \
+ Real s_s1 = pos.x - (Real)s_xi, s_s0 = 1. - s_s1; \
+ Real s_t1 = pos.y - (Real)s_yi, s_t0 = 1. - s_t1; \
+ Real s_f1 = pos.z - (Real)s_zi, s_f0 = 1. - s_f1; \
+ /* clamp to border */ \
+ if (pos.x < 0) { \
+ s_xi = 0; \
+ s_s0 = 1.0; \
+ s_s1 = 0.0; \
+ } \
+ if (pos.y < 0) { \
+ s_yi = 0; \
+ s_t0 = 1.0; \
+ s_t1 = 0.0; \
+ } \
+ if (pos.z < 0) { \
+ s_zi = 0; \
+ s_f0 = 1.0; \
+ s_f1 = 0.0; \
+ } \
+ if (s_xi >= size.x - 1) { \
+ s_xi = size.x - 2; \
+ s_s0 = 0.0; \
+ s_s1 = 1.0; \
+ } \
+ if (s_yi >= size.y - 1) { \
+ s_yi = size.y - 2; \
+ s_t0 = 0.0; \
+ s_t1 = 1.0; \
+ } \
+ if (size.z > 1) { \
+ if (s_zi >= size.z - 1) { \
+ s_zi = size.z - 2; \
+ s_f0 = 0.0; \
+ s_f1 = 1.0; \
+ } \
+ }
+
+inline Vec3 interpolMAC(const Vec3 *data, const Vec3i &size, const int Z, const Vec3 &pos)
+{
+ BUILD_INDEX_SHIFT;
+ DEBUG_ONLY(checkIndexInterpol(size, (zi * (IndexInt)size.y + yi) * (IndexInt)size.x + xi));
+ DEBUG_ONLY(checkIndexInterpol(
+ size, (s_zi * (IndexInt)size.y + s_yi) * (IndexInt)size.x + s_xi + X + Y + Z));
+
+ // process individual components
+ Vec3 ret(0.);
+ { // X
+ const Vec3 *ref = &data[((zi * size.y + yi) * size.x + s_xi)];
+ ret.x = f0 * ((ref[0].x * t0 + ref[Y].x * t1) * s_s0 +
+ (ref[X].x * t0 + ref[X + Y].x * t1) * s_s1) +
+ f1 * ((ref[Z].x * t0 + ref[Z + Y].x * t1) * s_s0 +
+ (ref[X + Z].x * t0 + ref[X + Y + Z].x * t1) * s_s1);
+ }
+ { // Y
+ const Vec3 *ref = &data[((zi * size.y + s_yi) * size.x + xi)];
+ ret.y = f0 * ((ref[0].y * s_t0 + ref[Y].y * s_t1) * s0 +
+ (ref[X].y * s_t0 + ref[X + Y].y * s_t1) * s1) +
+ f1 * ((ref[Z].y * s_t0 + ref[Z + Y].y * s_t1) * s0 +
+ (ref[X + Z].y * s_t0 + ref[X + Y + Z].y * s_t1) * s1);
+ }
+ { // Z
+ const Vec3 *ref = &data[((s_zi * size.y + yi) * size.x + xi)];
+ ret.z = s_f0 *
+ ((ref[0].z * t0 + ref[Y].z * t1) * s0 + (ref[X].z * t0 + ref[X + Y].z * t1) * s1) +
+ s_f1 * ((ref[Z].z * t0 + ref[Z + Y].z * t1) * s0 +
+ (ref[X + Z].z * t0 + ref[X + Y + Z].z * t1) * s1);
+ }
+ return ret;
+}
+
+inline void setInterpolMAC(
+ Vec3 *data, const Vec3i &size, const int Z, const Vec3 &pos, const Vec3 &val, Vec3 *sumBuffer)
+{
+ BUILD_INDEX_SHIFT;
+ DEBUG_ONLY(checkIndexInterpol(size, (zi * (IndexInt)size.y + yi) * (IndexInt)size.x + xi));
+ DEBUG_ONLY(checkIndexInterpol(
+ size, (s_zi * (IndexInt)size.y + s_yi) * (IndexInt)size.x + s_xi + X + Y + Z));
+
+ // process individual components
+ { // X
+ const IndexInt idx = (IndexInt)(zi * size.y + yi) * size.x + s_xi;
+ Vec3 *ref = &data[idx], *sum = &sumBuffer[idx];
+ Real s0f0 = s_s0 * f0, s1f0 = s_s1 * f0, s0f1 = s_s0 * f1, s1f1 = s_s1 * f1;
+ Real w0 = t0 * s0f0, wx = t0 * s1f0, wy = t1 * s0f0, wxy = t1 * s1f0;
+ Real wz = t0 * s0f1, wxz = t0 * s1f1, wyz = t1 * s0f1, wxyz = t1 * s1f1;
+
+ sum[Z].x += wz;
+ sum[X + Z].x += wxz;
+ sum[Y + Z].x += wyz;
+ sum[X + Y + Z].x += wxyz;
+ ref[Z].x += wz * val.x;
+ ref[X + Z].x += wxz * val.x;
+ ref[Y + Z].x += wyz * val.x;
+ ref[X + Y + Z].x += wxyz * val.x;
+ sum[0].x += w0;
+ sum[X].x += wx;
+ sum[Y].x += wy;
+ sum[X + Y].x += wxy;
+ ref[0].x += w0 * val.x;
+ ref[X].x += wx * val.x;
+ ref[Y].x += wy * val.x;
+ ref[X + Y].x += wxy * val.x;
+ }
+ { // Y
+ const IndexInt idx = (IndexInt)(zi * size.y + s_yi) * size.x + xi;
+ Vec3 *ref = &data[idx], *sum = &sumBuffer[idx];
+ Real s0f0 = s0 * f0, s1f0 = s1 * f0, s0f1 = s0 * f1, s1f1 = s1 * f1;
+ Real w0 = s_t0 * s0f0, wx = s_t0 * s1f0, wy = s_t1 * s0f0, wxy = s_t1 * s1f0;
+ Real wz = s_t0 * s0f1, wxz = s_t0 * s1f1, wyz = s_t1 * s0f1, wxyz = s_t1 * s1f1;
+
+ sum[Z].y += wz;
+ sum[X + Z].y += wxz;
+ sum[Y + Z].y += wyz;
+ sum[X + Y + Z].y += wxyz;
+ ref[Z].y += wz * val.y;
+ ref[X + Z].y += wxz * val.y;
+ ref[Y + Z].y += wyz * val.y;
+ ref[X + Y + Z].y += wxyz * val.y;
+ sum[0].y += w0;
+ sum[X].y += wx;
+ sum[Y].y += wy;
+ sum[X + Y].y += wxy;
+ ref[0].y += w0 * val.y;
+ ref[X].y += wx * val.y;
+ ref[Y].y += wy * val.y;
+ ref[X + Y].y += wxy * val.y;
+ }
+ { // Z
+ const IndexInt idx = (IndexInt)(s_zi * size.y + yi) * size.x + xi;
+ Vec3 *ref = &data[idx], *sum = &sumBuffer[idx];
+ Real s0f0 = s0 * s_f0, s1f0 = s1 * s_f0, s0f1 = s0 * s_f1, s1f1 = s1 * s_f1;
+ Real w0 = t0 * s0f0, wx = t0 * s1f0, wy = t1 * s0f0, wxy = t1 * s1f0;
+ Real wz = t0 * s0f1, wxz = t0 * s1f1, wyz = t1 * s0f1, wxyz = t1 * s1f1;
+
+ sum[0].z += w0;
+ sum[X].z += wx;
+ sum[Y].z += wy;
+ sum[X + Y].z += wxy;
+ sum[Z].z += wz;
+ sum[X + Z].z += wxz;
+ sum[Y + Z].z += wyz;
+ sum[X + Y + Z].z += wxyz;
+ ref[0].z += w0 * val.z;
+ ref[X].z += wx * val.z;
+ ref[Y].z += wy * val.z;
+ ref[X + Y].z += wxy * val.z;
+ ref[Z].z += wz * val.z;
+ ref[X + Z].z += wxz * val.z;
+ ref[Y + Z].z += wyz * val.z;
+ ref[X + Y + Z].z += wxyz * val.z;
+ }
+}
+
+#undef BUILD_INDEX
+#undef BUILD_INDEX_SHIFT
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/helper/util/interpolHigh.h b/extern/mantaflow/helper/util/interpolHigh.h
new file mode 100644
index 00000000000..c2a77442b9c
--- /dev/null
+++ b/extern/mantaflow/helper/util/interpolHigh.h
@@ -0,0 +1,204 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2014 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Helper functions for higher order interpolation
+ *
+ ******************************************************************************/
+
+#ifndef _INTERPOLHIGH_H
+#define _INTERPOLHIGH_H
+
+#include "vectorbase.h"
+
+namespace Manta {
+
+template<class T> inline T cubicInterp(const Real interp, const T *points)
+{
+ T d0 = (points[2] - points[0]) * 0.5;
+ T d1 = (points[3] - points[1]) * 0.5;
+ T deltak = (points[2] - points[1]);
+
+ // disabled: if (deltak * d0 < 0.0) d0 = 0;
+ // disabled: if (deltak * d1 < 0.0) d1 = 0;
+
+ T a0 = points[1];
+ T a1 = d0;
+ T a2 = 3.0 * deltak - 2.0 * d0 - d1;
+ T a3 = -2.0 * deltak + d0 + d1;
+
+ Real squared = interp * interp;
+ Real cubed = squared * interp;
+ return a3 * cubed + a2 * squared + a1 * interp + a0;
+}
+
+template<class T> inline T interpolCubic2D(const T *data, const Vec3i &size, const Vec3 &pos)
+{
+ const Real px = pos.x - 0.5f, py = pos.y - 0.5f;
+
+ const int x1 = (int)px;
+ const int x2 = x1 + 1;
+ const int x3 = x1 + 2;
+ const int x0 = x1 - 1;
+
+ const int y1 = (int)py;
+ const int y2 = y1 + 1;
+ const int y3 = y1 + 2;
+ const int y0 = y1 - 1;
+
+ if (x0 < 0 || y0 < 0 || x3 >= size[0] || y3 >= size[1]) {
+ return interpol(data, size, 0, pos);
+ }
+
+ const Real xInterp = px - x1;
+ const Real yInterp = py - y1;
+
+ const int y0x = y0 * size[0];
+ const int y1x = y1 * size[0];
+ const int y2x = y2 * size[0];
+ const int y3x = y3 * size[0];
+
+ const T p0[] = {data[x0 + y0x], data[x1 + y0x], data[x2 + y0x], data[x3 + y0x]};
+ const T p1[] = {data[x0 + y1x], data[x1 + y1x], data[x2 + y1x], data[x3 + y1x]};
+ const T p2[] = {data[x0 + y2x], data[x1 + y2x], data[x2 + y2x], data[x3 + y2x]};
+ const T p3[] = {data[x0 + y3x], data[x1 + y3x], data[x2 + y3x], data[x3 + y3x]};
+
+ const T finalPoints[] = {cubicInterp(xInterp, p0),
+ cubicInterp(xInterp, p1),
+ cubicInterp(xInterp, p2),
+ cubicInterp(xInterp, p3)};
+
+ return cubicInterp(yInterp, finalPoints);
+}
+
+template<class T>
+inline T interpolCubic(const T *data, const Vec3i &size, const int Z, const Vec3 &pos)
+{
+ if (Z == 0)
+ return interpolCubic2D(data, size, pos);
+
+ const Real px = pos.x - 0.5f, py = pos.y - 0.5f, pz = pos.z - 0.5f;
+
+ const int x1 = (int)px;
+ const int x2 = x1 + 1;
+ const int x3 = x1 + 2;
+ const int x0 = x1 - 1;
+
+ const int y1 = (int)py;
+ const int y2 = y1 + 1;
+ const int y3 = y1 + 2;
+ const int y0 = y1 - 1;
+
+ const int z1 = (int)pz;
+ const int z2 = z1 + 1;
+ const int z3 = z1 + 2;
+ const int z0 = z1 - 1;
+
+ if (x0 < 0 || y0 < 0 || z0 < 0 || x3 >= size[0] || y3 >= size[1] || z3 >= size[2]) {
+ return interpol(data, size, Z, pos);
+ }
+
+ const Real xInterp = px - x1;
+ const Real yInterp = py - y1;
+ const Real zInterp = pz - z1;
+
+ const int slabsize = size[0] * size[1];
+ const int z0Slab = z0 * slabsize;
+ const int z1Slab = z1 * slabsize;
+ const int z2Slab = z2 * slabsize;
+ const int z3Slab = z3 * slabsize;
+
+ const int y0x = y0 * size[0];
+ const int y1x = y1 * size[0];
+ const int y2x = y2 * size[0];
+ const int y3x = y3 * size[0];
+
+ const int y0z0 = y0x + z0Slab;
+ const int y1z0 = y1x + z0Slab;
+ const int y2z0 = y2x + z0Slab;
+ const int y3z0 = y3x + z0Slab;
+
+ const int y0z1 = y0x + z1Slab;
+ const int y1z1 = y1x + z1Slab;
+ const int y2z1 = y2x + z1Slab;
+ const int y3z1 = y3x + z1Slab;
+
+ const int y0z2 = y0x + z2Slab;
+ const int y1z2 = y1x + z2Slab;
+ const int y2z2 = y2x + z2Slab;
+ const int y3z2 = y3x + z2Slab;
+
+ const int y0z3 = y0x + z3Slab;
+ const int y1z3 = y1x + z3Slab;
+ const int y2z3 = y2x + z3Slab;
+ const int y3z3 = y3x + z3Slab;
+
+ // get the z0 slice
+ const T p0[] = {data[x0 + y0z0], data[x1 + y0z0], data[x2 + y0z0], data[x3 + y0z0]};
+ const T p1[] = {data[x0 + y1z0], data[x1 + y1z0], data[x2 + y1z0], data[x3 + y1z0]};
+ const T p2[] = {data[x0 + y2z0], data[x1 + y2z0], data[x2 + y2z0], data[x3 + y2z0]};
+ const T p3[] = {data[x0 + y3z0], data[x1 + y3z0], data[x2 + y3z0], data[x3 + y3z0]};
+
+ // get the z1 slice
+ const T p4[] = {data[x0 + y0z1], data[x1 + y0z1], data[x2 + y0z1], data[x3 + y0z1]};
+ const T p5[] = {data[x0 + y1z1], data[x1 + y1z1], data[x2 + y1z1], data[x3 + y1z1]};
+ const T p6[] = {data[x0 + y2z1], data[x1 + y2z1], data[x2 + y2z1], data[x3 + y2z1]};
+ const T p7[] = {data[x0 + y3z1], data[x1 + y3z1], data[x2 + y3z1], data[x3 + y3z1]};
+
+ // get the z2 slice
+ const T p8[] = {data[x0 + y0z2], data[x1 + y0z2], data[x2 + y0z2], data[x3 + y0z2]};
+ const T p9[] = {data[x0 + y1z2], data[x1 + y1z2], data[x2 + y1z2], data[x3 + y1z2]};
+ const T p10[] = {data[x0 + y2z2], data[x1 + y2z2], data[x2 + y2z2], data[x3 + y2z2]};
+ const T p11[] = {data[x0 + y3z2], data[x1 + y3z2], data[x2 + y3z2], data[x3 + y3z2]};
+
+ // get the z3 slice
+ const T p12[] = {data[x0 + y0z3], data[x1 + y0z3], data[x2 + y0z3], data[x3 + y0z3]};
+ const T p13[] = {data[x0 + y1z3], data[x1 + y1z3], data[x2 + y1z3], data[x3 + y1z3]};
+ const T p14[] = {data[x0 + y2z3], data[x1 + y2z3], data[x2 + y2z3], data[x3 + y2z3]};
+ const T p15[] = {data[x0 + y3z3], data[x1 + y3z3], data[x2 + y3z3], data[x3 + y3z3]};
+
+ // interpolate
+ const T z0Points[] = {cubicInterp(xInterp, p0),
+ cubicInterp(xInterp, p1),
+ cubicInterp(xInterp, p2),
+ cubicInterp(xInterp, p3)};
+ const T z1Points[] = {cubicInterp(xInterp, p4),
+ cubicInterp(xInterp, p5),
+ cubicInterp(xInterp, p6),
+ cubicInterp(xInterp, p7)};
+ const T z2Points[] = {cubicInterp(xInterp, p8),
+ cubicInterp(xInterp, p9),
+ cubicInterp(xInterp, p10),
+ cubicInterp(xInterp, p11)};
+ const T z3Points[] = {cubicInterp(xInterp, p12),
+ cubicInterp(xInterp, p13),
+ cubicInterp(xInterp, p14),
+ cubicInterp(xInterp, p15)};
+
+ const T finalPoints[] = {cubicInterp(yInterp, z0Points),
+ cubicInterp(yInterp, z1Points),
+ cubicInterp(yInterp, z2Points),
+ cubicInterp(yInterp, z3Points)};
+
+ return cubicInterp(zInterp, finalPoints);
+}
+
+inline Vec3 interpolCubicMAC(const Vec3 *data, const Vec3i &size, const int Z, const Vec3 &pos)
+{
+ // warning - not yet optimized...
+ Real vx = interpolCubic<Vec3>(data, size, Z, pos + Vec3(0.5, 0, 0))[0];
+ Real vy = interpolCubic<Vec3>(data, size, Z, pos + Vec3(0, 0.5, 0))[1];
+ Real vz = 0.f;
+ if (Z != 0)
+ vz = interpolCubic<Vec3>(data, size, Z, pos + Vec3(0, 0, 0.5))[2];
+ return Vec3(vx, vy, vz);
+}
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/helper/util/matrixbase.h b/extern/mantaflow/helper/util/matrixbase.h
new file mode 100644
index 00000000000..9875998f0be
--- /dev/null
+++ b/extern/mantaflow/helper/util/matrixbase.h
@@ -0,0 +1,394 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2015 Kiwon Um, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * GNU General Public License (GPL)
+ * http://www.gnu.org/licenses
+ *
+ * Matrix (3x3) class
+ *
+ ******************************************************************************/
+
+#ifndef MATRIXBASE_H
+#define MATRIXBASE_H
+
+#include "vectorbase.h"
+
+namespace Manta {
+
+template<typename T> class Matrix3x3 {
+ public:
+ // NOTE: default is the identity matrix!
+ explicit Matrix3x3(const T &p00 = 1,
+ const T &p01 = 0,
+ const T &p02 = 0,
+ const T &p10 = 0,
+ const T &p11 = 1,
+ const T &p12 = 0,
+ const T &p20 = 0,
+ const T &p21 = 0,
+ const T &p22 = 1)
+ {
+ v[0][0] = p00;
+ v[0][1] = p01;
+ v[0][2] = p02;
+ v[1][0] = p10;
+ v[1][1] = p11;
+ v[1][2] = p12;
+ v[2][0] = p20;
+ v[2][1] = p21;
+ v[2][2] = p22;
+ }
+
+ explicit Matrix3x3(const Vector3D<T> &diag)
+ {
+ v[0][0] = diag.x;
+ v[0][1] = 0;
+ v[0][2] = 0;
+ v[1][0] = 0;
+ v[1][1] = diag.y;
+ v[1][2] = 0;
+ v[2][0] = 0;
+ v[2][1] = 0;
+ v[2][2] = diag.z;
+ }
+
+ Matrix3x3(const Vector3D<T> &c0, const Vector3D<T> &c1, const Vector3D<T> &c2)
+ {
+ v[0][0] = c0.x;
+ v[0][1] = c1.x;
+ v[0][2] = c2.x;
+ v[1][0] = c0.y;
+ v[1][1] = c1.y;
+ v[1][2] = c2.y;
+ v[2][0] = c0.z;
+ v[2][1] = c1.z;
+ v[2][2] = c2.z;
+ }
+
+ // assignment operators
+ Matrix3x3 &operator+=(const Matrix3x3 &m)
+ {
+ v00 += m.v00;
+ v01 += m.v01;
+ v02 += m.v02;
+ v10 += m.v10;
+ v11 += m.v11;
+ v12 += m.v12;
+ v20 += m.v20;
+ v21 += m.v21;
+ v22 += m.v22;
+ return *this;
+ }
+ Matrix3x3 &operator-=(const Matrix3x3 &m)
+ {
+ v00 -= m.v00;
+ v01 -= m.v01;
+ v02 -= m.v02;
+ v10 -= m.v10;
+ v11 -= m.v11;
+ v12 -= m.v12;
+ v20 -= m.v20;
+ v21 -= m.v21;
+ v22 -= m.v22;
+ return *this;
+ }
+ Matrix3x3 &operator*=(const T s)
+ {
+ v00 *= s;
+ v01 *= s;
+ v02 *= s;
+ v10 *= s;
+ v11 *= s;
+ v12 *= s;
+ v20 *= s;
+ v21 *= s;
+ v22 *= s;
+ return *this;
+ }
+ Matrix3x3 &operator/=(const T s)
+ {
+ v00 /= s;
+ v01 /= s;
+ v02 /= s;
+ v10 /= s;
+ v11 /= s;
+ v12 /= s;
+ v20 /= s;
+ v21 /= s;
+ v22 /= s;
+ return *this;
+ }
+
+ // binary operators
+ Matrix3x3 operator+(const Matrix3x3 &m) const
+ {
+ return Matrix3x3(*this) += m;
+ }
+ Matrix3x3 operator-(const Matrix3x3 &m) const
+ {
+ return Matrix3x3(*this) -= m;
+ }
+ Matrix3x3 operator*(const Matrix3x3 &m) const
+ {
+ return Matrix3x3(v00 * m.v00 + v01 * m.v10 + v02 * m.v20,
+ v00 * m.v01 + v01 * m.v11 + v02 * m.v21,
+ v00 * m.v02 + v01 * m.v12 + v02 * m.v22,
+
+ v10 * m.v00 + v11 * m.v10 + v12 * m.v20,
+ v10 * m.v01 + v11 * m.v11 + v12 * m.v21,
+ v10 * m.v02 + v11 * m.v12 + v12 * m.v22,
+
+ v20 * m.v00 + v21 * m.v10 + v22 * m.v20,
+ v20 * m.v01 + v21 * m.v11 + v22 * m.v21,
+ v20 * m.v02 + v21 * m.v12 + v22 * m.v22);
+ }
+ Matrix3x3 operator*(const T s) const
+ {
+ return Matrix3x3(*this) *= s;
+ }
+ Vector3D<T> operator*(const Vector3D<T> &v) const
+ {
+ return Vector3D<T>(v00 * v.x + v01 * v.y + v02 * v.z,
+ v10 * v.x + v11 * v.y + v12 * v.z,
+ v20 * v.x + v21 * v.y + v22 * v.z);
+ }
+ Vector3D<T> transposedMul(const Vector3D<T> &v) const
+ {
+ // M^T*v
+ return Vector3D<T>(v00 * v.x + v10 * v.y + v20 * v.z,
+ v01 * v.x + v11 * v.y + v21 * v.z,
+ v02 * v.x + v12 * v.y + v22 * v.z);
+ }
+ Matrix3x3 transposedMul(const Matrix3x3 &m) const
+ {
+ // M^T*M
+ return Matrix3x3(v00 * m.v00 + v10 * m.v10 + v20 * m.v20,
+ v00 * m.v01 + v10 * m.v11 + v20 * m.v21,
+ v00 * m.v02 + v10 * m.v12 + v20 * m.v22,
+
+ v01 * m.v00 + v11 * m.v10 + v21 * m.v20,
+ v01 * m.v01 + v11 * m.v11 + v21 * m.v21,
+ v01 * m.v02 + v11 * m.v12 + v21 * m.v22,
+
+ v02 * m.v00 + v12 * m.v10 + v22 * m.v20,
+ v02 * m.v01 + v12 * m.v11 + v22 * m.v21,
+ v02 * m.v02 + v12 * m.v12 + v22 * m.v22);
+ }
+ Matrix3x3 mulTranspose(const Matrix3x3 &m) const
+ {
+ // M*m^T
+ return Matrix3x3(v00 * m.v00 + v01 * m.v01 + v02 * m.v02,
+ v00 * m.v10 + v01 * m.v11 + v02 * m.v12,
+ v00 * m.v20 + v01 * m.v21 + v02 * m.v22,
+
+ v10 * m.v00 + v11 * m.v01 + v12 * m.v02,
+ v10 * m.v10 + v11 * m.v11 + v12 * m.v12,
+ v10 * m.v20 + v11 * m.v21 + v12 * m.v22,
+
+ v20 * m.v00 + v21 * m.v01 + v22 * m.v02,
+ v20 * m.v10 + v21 * m.v11 + v22 * m.v12,
+ v20 * m.v20 + v21 * m.v21 + v22 * m.v22);
+ }
+
+ bool operator==(const Matrix3x3 &m) const
+ {
+ return (v00 == m.v00 && v01 == m.v01 && v02 == m.v02 && v10 == m.v10 && v11 == m.v11 &&
+ v12 == m.v12 && v20 == m.v20 && v21 == m.v21 && v22 == m.v22);
+ }
+
+ const T &operator()(const int r, const int c) const
+ {
+ return v[r][c];
+ }
+ T &operator()(const int r, const int c)
+ {
+ return const_cast<T &>(const_cast<const Matrix3x3 &>(*this)(r, c));
+ }
+
+ T trace() const
+ {
+ return v00 + v11 + v22;
+ }
+ T sumSqr() const
+ {
+ return (v00 * v00 + v01 * v01 + v02 * v02 + v10 * v10 + v11 * v11 + v12 * v12 + v20 * v20 +
+ v21 * v21 + v22 * v22);
+ }
+
+ Real determinant() const
+ {
+ return (v00 * v11 * v22 - v00 * v12 * v21 + v01 * v12 * v20 - v01 * v10 * v22 +
+ v02 * v10 * v21 - v02 * v11 * v20);
+ }
+ Matrix3x3 &transpose()
+ {
+ return *this = transposed();
+ }
+ Matrix3x3 transposed() const
+ {
+ return Matrix3x3(v00, v10, v20, v01, v11, v21, v02, v12, v22);
+ }
+ Matrix3x3 &invert()
+ {
+ return *this = inverse();
+ }
+ Matrix3x3 inverse() const
+ {
+ const Real det = determinant(); // FIXME: assert(det);
+ const Real idet = 1e0 / det;
+ return Matrix3x3(idet * (v11 * v22 - v12 * v21),
+ idet * (v02 * v21 - v01 * v22),
+ idet * (v01 * v12 - v02 * v11),
+ idet * (v12 * v20 - v10 * v22),
+ idet * (v00 * v22 - v02 * v20),
+ idet * (v02 * v10 - v00 * v12),
+ idet * (v10 * v21 - v11 * v20),
+ idet * (v01 * v20 - v00 * v21),
+ idet * (v00 * v11 - v01 * v10));
+ }
+ bool getInverse(Matrix3x3 &inv) const
+ {
+ const Real det = determinant();
+ if (det == 0e0)
+ return false; // FIXME: is it likely to happen the floating error?
+
+ const Real idet = 1e0 / det;
+ inv.v00 = idet * (v11 * v22 - v12 * v21);
+ inv.v01 = idet * (v02 * v21 - v01 * v22);
+ inv.v02 = idet * (v01 * v12 - v02 * v11);
+
+ inv.v10 = idet * (v12 * v20 - v10 * v22);
+ inv.v11 = idet * (v00 * v22 - v02 * v20);
+ inv.v12 = idet * (v02 * v10 - v00 * v12);
+
+ inv.v20 = idet * (v10 * v21 - v11 * v20);
+ inv.v21 = idet * (v01 * v20 - v00 * v21);
+ inv.v22 = idet * (v00 * v11 - v01 * v10);
+
+ return true;
+ }
+
+ Real normOne() const
+ {
+ // the maximum absolute column sum of the matrix
+ return max(std::fabs(v00) + std::fabs(v10) + std::fabs(v20),
+ std::fabs(v01) + std::fabs(v11) + std::fabs(v21),
+ std::fabs(v02) + std::fabs(v12) + std::fabs(v22));
+ }
+ Real normInf() const
+ {
+ // the maximum absolute row sum of the matrix
+ return max(std::fabs(v00) + std::fabs(v01) + std::fabs(v02),
+ std::fabs(v10) + std::fabs(v11) + std::fabs(v12),
+ std::fabs(v20) + std::fabs(v21) + std::fabs(v22));
+ }
+
+ Vector3D<T> eigenvalues() const
+ {
+ Vector3D<T> eigen;
+
+ const Real b = -v00 - v11 - v22;
+ const Real c = v00 * (v11 + v22) + v11 * v22 - v12 * v21 - v01 * v10 - v02 * v20;
+ Real d = -v00 * (v11 * v22 - v12 * v21) - v20 * (v01 * v12 - v11 * v02) -
+ v10 * (v02 * v21 - v22 * v01);
+ const Real f = (3.0 * c - b * b) / 3.0;
+ const Real g = (2.0 * b * b * b - 9.0 * b * c + 27.0 * d) / 27.0;
+ const Real h = g * g / 4.0 + f * f * f / 27.0;
+
+ Real sign;
+ if (h > 0) {
+ Real r = -g / 2.0 + std::sqrt(h);
+ if (r < 0) {
+ r = -r;
+ sign = -1.0;
+ }
+ else
+ sign = 1.0;
+ Real s = sign * std::pow(r, 1.0 / 3.0);
+ Real t = -g / 2.0 - std::sqrt(h);
+ if (t < 0) {
+ t = -t;
+ sign = -1.0;
+ }
+ else
+ sign = 1.0;
+ Real u = sign * std::pow(t, 1.0 / 3.0);
+ eigen[0] = (s + u) - b / 3.0;
+ eigen[1] = eigen[2] = 0;
+ }
+ else if (h == 0) {
+ if (d < 0) {
+ d = -d;
+ sign = -1.0;
+ }
+ sign = 1.0;
+ eigen[0] = -1.0 * sign * std::pow(d, 1.0 / 3.0);
+ eigen[1] = eigen[2] = 0;
+ }
+ else {
+ const Real i = std::sqrt(g * g / 4.0 - h);
+ const Real j = std::pow(i, 1.0 / 3.0);
+ const Real k = std::acos(-g / (2.0 * i));
+ const Real l = -j;
+ const Real m = std::cos(k / 3.0);
+ const Real n = std::sqrt(3.0) * std::sin(k / 3.0);
+ const Real p = -b / 3.0;
+ eigen[0] = 2e0 * j * m + p;
+ eigen[1] = l * (m + n) + p;
+ eigen[2] = l * (m - n) + p;
+ }
+
+ return eigen;
+ }
+
+ static Matrix3x3 I()
+ {
+ return Matrix3x3(1, 0, 0, 0, 1, 0, 0, 0, 1);
+ }
+
+#ifdef _WIN32
+# pragma warning(disable : 4201)
+#endif
+ union {
+ struct {
+ T v00, v01, v02, v10, v11, v12, v20, v21, v22;
+ };
+ T v[3][3];
+ T v1[9];
+ };
+#ifdef _WIN32
+# pragma warning(default : 4201)
+#endif
+};
+
+template<typename T1, typename T> inline Matrix3x3<T> operator*(const T1 s, const Matrix3x3<T> &m)
+{
+ return m * static_cast<T>(s);
+}
+
+template<typename T> inline Matrix3x3<T> crossProductMatrix(const Vector3D<T> &v)
+{
+ return Matrix3x3<T>(0, -v.z, v.y, v.z, 0, -v.x, -v.y, v.x, 0);
+}
+
+template<typename T> inline Matrix3x3<T> outerProduct(const Vector3D<T> &a, const Vector3D<T> &b)
+{
+ return Matrix3x3<T>(a.x * b.x,
+ a.x * b.y,
+ a.x * b.z,
+ a.y * b.x,
+ a.y * b.y,
+ a.y * b.z,
+ a.z * b.x,
+ a.z * b.y,
+ a.z * b.z);
+}
+
+typedef Matrix3x3<Real> Matrix3x3f;
+
+} // namespace Manta
+
+#endif /* MATRIXBASE_H */
diff --git a/extern/mantaflow/helper/util/mcubes.h b/extern/mantaflow/helper/util/mcubes.h
new file mode 100644
index 00000000000..bd1c780e932
--- /dev/null
+++ b/extern/mantaflow/helper/util/mcubes.h
@@ -0,0 +1,308 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Marching cubes lookup indices
+ *
+ ******************************************************************************/
+
+#ifndef _MCUBES_H_
+#define _MCUBES_H_
+
+static const int mcEdges[24] = {0, 1, 1, 2, 2, 3, 3, 0, 4, 5, 5, 6,
+ 6, 7, 7, 4, 0, 4, 1, 5, 2, 6, 3, 7};
+
+static const int cubieOffsetX[8] = {0, 1, 1, 0, 0, 1, 1, 0};
+static const int cubieOffsetY[8] = {0, 0, 1, 1, 0, 0, 1, 1};
+static const int cubieOffsetZ[8] = {0, 0, 0, 0, 1, 1, 1, 1};
+
+/* which edges are needed ? */
+/* cf. http://astronomy.swin.edu.au/~pbourke/modelling/polygonise/ */
+static const short mcEdgeTable[256] = {
+ 0x0, 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c, 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a,
+ 0xd03, 0xe09, 0xf00, 0x190, 0x99, 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c, 0x99c, 0x895,
+ 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90, 0x230, 0x339, 0x33, 0x13a, 0x636, 0x73f, 0x435,
+ 0x53c, 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30, 0x3a0, 0x2a9, 0x1a3, 0xaa,
+ 0x7a6, 0x6af, 0x5a5, 0x4ac, 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0, 0x460,
+ 0x569, 0x663, 0x76a, 0x66, 0x16f, 0x265, 0x36c, 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963,
+ 0xa69, 0xb60, 0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff, 0x3f5, 0x2fc, 0xdfc, 0xcf5, 0xfff,
+ 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0, 0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55, 0x15c,
+ 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950, 0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6,
+ 0x2cf, 0x1c5, 0xcc, 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0, 0x8c0, 0x9c9,
+ 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc, 0xcc, 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9,
+ 0x7c0, 0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c, 0x15c, 0x55, 0x35f, 0x256,
+ 0x55a, 0x453, 0x759, 0x650, 0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc, 0x2fc,
+ 0x3f5, 0xff, 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0, 0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f,
+ 0xd65, 0xc6c, 0x36c, 0x265, 0x16f, 0x66, 0x76a, 0x663, 0x569, 0x460, 0xca0, 0xda9, 0xea3,
+ 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac, 0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa, 0x1a3, 0x2a9, 0x3a0,
+ 0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c, 0x53c, 0x435, 0x73f, 0x636, 0x13a,
+ 0x33, 0x339, 0x230, 0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c, 0x69c, 0x795,
+ 0x49f, 0x596, 0x29a, 0x393, 0x99, 0x190, 0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905,
+ 0x80c, 0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0};
+
+/* triangles for the 256 intersection possibilities */
+/* cf. http://astronomy.swin.edu.au/~pbourke/modelling/polygonise/ */
+static const short mcTriTable[256][16] = {
+ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
+ {3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
+ {3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
+ {3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
+ {9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
+ {9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
+ {2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
+ {8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
+ {9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
+ {4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
+ {3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
+ {1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
+ {4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
+ {4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
+ {9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
+ {5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
+ {2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
+ {9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
+ {0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
+ {2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
+ {10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
+ {4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
+ {5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
+ {5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
+ {9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
+ {0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
+ {1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
+ {10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
+ {8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
+ {2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
+ {7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
+ {9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
+ {2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
+ {11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
+ {9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
+ {5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
+ {11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
+ {11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
+ {1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
+ {9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
+ {5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
+ {2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
+ {0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
+ {5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
+ {6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
+ {3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
+ {6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
+ {5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
+ {1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
+ {10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
+ {6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
+ {8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
+ {7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
+ {3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
+ {5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
+ {0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
+ {9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
+ {8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
+ {5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
+ {0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
+ {6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
+ {10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
+ {10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
+ {8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
+ {1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
+ {3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
+ {0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
+ {10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
+ {3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
+ {6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
+ {9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
+ {8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
+ {3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
+ {6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
+ {0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
+ {10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
+ {10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
+ {2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
+ {7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
+ {7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
+ {2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
+ {1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
+ {11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
+ {8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
+ {0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
+ {7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
+ {10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
+ {2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
+ {6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
+ {7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
+ {2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
+ {1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
+ {10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
+ {10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
+ {0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
+ {7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
+ {6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
+ {8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
+ {6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
+ {4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
+ {10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
+ {8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
+ {0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
+ {1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
+ {8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
+ {10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
+ {4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
+ {10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
+ {5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
+ {11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
+ {9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
+ {6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
+ {7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
+ {3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
+ {7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
+ {9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
+ {3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
+ {6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
+ {9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
+ {1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
+ {4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
+ {7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
+ {6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
+ {3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
+ {0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
+ {6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
+ {0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
+ {11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
+ {6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
+ {5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
+ {9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
+ {1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
+ {1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
+ {10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
+ {0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
+ {5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
+ {10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
+ {11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
+ {9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
+ {7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
+ {2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
+ {8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
+ {9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
+ {9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
+ {1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
+ {9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
+ {9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
+ {5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
+ {0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
+ {10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
+ {2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
+ {0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
+ {0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
+ {9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
+ {5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
+ {3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
+ {5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
+ {8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
+ {9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
+ {0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
+ {1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
+ {3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
+ {4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
+ {9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
+ {11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
+ {11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
+ {2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
+ {9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
+ {3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
+ {1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
+ {4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
+ {4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
+ {0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
+ {3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
+ {3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
+ {0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
+ {9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
+ {1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
+ {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}};
+
+#endif \ No newline at end of file
diff --git a/extern/mantaflow/helper/util/quaternion.h b/extern/mantaflow/helper/util/quaternion.h
new file mode 100644
index 00000000000..c4e161baee2
--- /dev/null
+++ b/extern/mantaflow/helper/util/quaternion.h
@@ -0,0 +1,103 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Basic quaternion class
+ *
+ ******************************************************************************/
+
+#ifndef _QUATERNION_H
+#define _QUATERNION_H
+
+#include "vectorbase.h"
+
+namespace Manta {
+
+//! Very basic quaternion class
+class Quaternion {
+ public:
+ //! default constructor
+ Quaternion() : x(0), y(0), z(0), w(0)
+ {
+ }
+
+ //! copy constructor
+ Quaternion(const Quaternion &q) : x(q.x), y(q.y), z(q.z), w(q.w)
+ {
+ }
+
+ //! construct a quaternion from members
+ Quaternion(Real _x, Real _y, Real _z, Real _w) : x(_x), y(_y), z(_z), w(_w)
+ {
+ }
+
+ //! construct a quaternion from imag/real parts
+ Quaternion(Vec3 i, Real r) : x(i.x), y(i.y), z(i.z), w(r)
+ {
+ }
+
+ //! Assign operator
+ inline Quaternion &operator=(const Quaternion &q)
+ {
+ x = q.x;
+ y = q.y;
+ z = q.z;
+ w = q.w;
+ return *this;
+ }
+
+ //! Assign multiplication operator
+ inline Quaternion &operator*=(const Real a)
+ {
+ x *= a;
+ y *= a;
+ z *= a;
+ w *= a;
+ return *this;
+ }
+
+ //! return inverse quaternion
+ inline Quaternion inverse() const
+ {
+ Real mag = 1.0 / (x * x + y * y + z * z + w * w);
+ return Quaternion(-x * mag, -y * mag, -z * mag, w * mag);
+ }
+
+ //! imaginary part accessor
+ inline Vec3 imag()
+ {
+ return Vec3(x, y, z);
+ }
+
+ // imaginary part
+ Real x;
+ Real y;
+ Real z;
+
+ // real part
+ Real w;
+};
+
+//! Multiplication operator
+inline Quaternion operator*(const Quaternion &q1, const Quaternion &q2)
+{
+ return Quaternion(q2.w * q1.x + q2.x * q1.w + q2.y * q1.z - q2.z * q1.y,
+ q2.w * q1.y + q2.y * q1.w + q2.z * q1.x - q2.x * q1.z,
+ q2.w * q1.z + q2.z * q1.w + q2.x * q1.y - q2.y * q1.x,
+ q2.w * q1.w - q2.x * q1.x - q2.y * q1.y - q2.z * q1.z);
+}
+
+//! Multiplication operator
+inline Quaternion operator*(const Quaternion &q, const Real a)
+{
+ return Quaternion(q.x * a, q.y * a, q.z * a, q.w * a);
+}
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/helper/util/randomstream.h b/extern/mantaflow/helper/util/randomstream.h
new file mode 100644
index 00000000000..35b9c7d8858
--- /dev/null
+++ b/extern/mantaflow/helper/util/randomstream.h
@@ -0,0 +1,429 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Random numbers
+ *
+ * Based on an example by Makoto Matsumoto, Takuji Nishimura, Shawn Cokus, and Richard J. Wagner
+ *
+ ******************************************************************************/
+
+#ifndef _RANDOMSTREAM_H
+#define _RANDOMSTREAM_H
+
+namespace Manta {
+
+#include <iostream>
+#include <stdio.h>
+#include <time.h>
+#include "vectorbase.h"
+
+class MTRand {
+ // Data
+ public:
+ typedef unsigned long uint32; // unsigned integer type, at least 32 bits
+
+ enum { N = 624 }; // length of state vector
+ enum { SAVE = N + 1 }; // length of array for save()
+
+ protected:
+ enum { M = 397 }; // period parameter
+
+ uint32 state[N]; // internal state
+ uint32 *pNext; // next value to get from state
+ int left; // number of values left before reload needed
+
+ // Methods
+ public:
+ MTRand(const uint32 &oneSeed); // initialize with a simple uint32
+ MTRand(uint32 *const bigSeed, uint32 const seedLength = N); // or an array
+ MTRand(); // auto-initialize with /dev/urandom or time() and clock()
+
+ // Do NOT use for CRYPTOGRAPHY without securely hashing several returned
+ // values together, otherwise the generator state can be learned after
+ // reading 624 consecutive values.
+
+ // Access to 32-bit random numbers
+ double rand(); // real number in [0,1]
+ double rand(const double &n); // real number in [0,n]
+ double randExc(); // real number in [0,1)
+ double randExc(const double &n); // real number in [0,n)
+ double randDblExc(); // real number in (0,1)
+ double randDblExc(const double &n); // real number in (0,n)
+ uint32 randInt(); // integer in [0,2^32-1]
+ uint32 randInt(const uint32 &n); // integer in [0,n] for n < 2^32
+ double operator()()
+ {
+ return rand();
+ } // same as rand()
+
+ // Access to 53-bit random numbers (capacity of IEEE double precision)
+ double rand53(); // real number in [0,1)
+
+ // Access to nonuniform random number distributions
+ double randNorm(const double &mean = 0.0, const double &variance = 1.0);
+
+ // Re-seeding functions with same behavior as initializers
+ void seed(const uint32 oneSeed);
+ void seed(uint32 *const bigSeed, const uint32 seedLength = N);
+ void seed();
+
+ // Saving and loading generator state
+ void save(uint32 *saveArray) const; // to array of size SAVE
+ void load(uint32 *const loadArray); // from such array
+ friend std::ostream &operator<<(std::ostream &os, const MTRand &mtrand);
+ friend std::istream &operator>>(std::istream &is, MTRand &mtrand);
+
+ protected:
+ void initialize(const uint32 oneSeed);
+ void reload();
+ uint32 hiBit(const uint32 &u) const
+ {
+ return u & 0x80000000UL;
+ }
+ uint32 loBit(const uint32 &u) const
+ {
+ return u & 0x00000001UL;
+ }
+ uint32 loBits(const uint32 &u) const
+ {
+ return u & 0x7fffffffUL;
+ }
+ uint32 mixBits(const uint32 &u, const uint32 &v) const
+ {
+ return hiBit(u) | loBits(v);
+ }
+ uint32 twist(const uint32 &m, const uint32 &s0, const uint32 &s1) const
+ {
+ return m ^ (mixBits(s0, s1) >> 1) ^ (-loBit(s1) & 0x9908b0dfUL);
+ }
+ static uint32 hash(time_t t, clock_t c);
+};
+
+inline MTRand::MTRand(const uint32 &oneSeed)
+{
+ seed(oneSeed);
+}
+
+inline MTRand::MTRand(uint32 *const bigSeed, const uint32 seedLength)
+{
+ seed(bigSeed, seedLength);
+}
+
+inline MTRand::MTRand()
+{
+ seed();
+}
+
+inline double MTRand::rand()
+{
+ return double(randInt()) * (1.0 / 4294967295.0);
+}
+
+inline double MTRand::rand(const double &n)
+{
+ return rand() * n;
+}
+
+inline double MTRand::randExc()
+{
+ return double(randInt()) * (1.0 / 4294967296.0);
+}
+
+inline double MTRand::randExc(const double &n)
+{
+ return randExc() * n;
+}
+
+inline double MTRand::randDblExc()
+{
+ return (double(randInt()) + 0.5) * (1.0 / 4294967296.0);
+}
+
+inline double MTRand::randDblExc(const double &n)
+{
+ return randDblExc() * n;
+}
+
+inline double MTRand::rand53()
+{
+ uint32 a = randInt() >> 5, b = randInt() >> 6;
+ return (a * 67108864.0 + b) * (1.0 / 9007199254740992.0); // by Isaku Wada
+}
+
+inline double MTRand::randNorm(const double &mean, const double &variance)
+{
+ // Return a real number from a normal (Gaussian) distribution with given
+ // mean and variance by Box-Muller method
+ double r = sqrt(-2.0 * log(1.0 - randDblExc())) * variance;
+ double phi = 2.0 * 3.14159265358979323846264338328 * randExc();
+ return mean + r * cos(phi);
+}
+
+inline MTRand::uint32 MTRand::randInt()
+{
+ // Pull a 32-bit integer from the generator state
+ // Every other access function simply transforms the numbers extracted here
+
+ if (left == 0)
+ reload();
+ --left;
+
+ uint32 s1;
+ s1 = *pNext++;
+ s1 ^= (s1 >> 11);
+ s1 ^= (s1 << 7) & 0x9d2c5680UL;
+ s1 ^= (s1 << 15) & 0xefc60000UL;
+ return (s1 ^ (s1 >> 18));
+}
+
+inline MTRand::uint32 MTRand::randInt(const uint32 &n)
+{
+ // Find which bits are used in n
+ // Optimized by Magnus Jonsson (magnus@smartelectronix.com)
+ uint32 used = n;
+ used |= used >> 1;
+ used |= used >> 2;
+ used |= used >> 4;
+ used |= used >> 8;
+ used |= used >> 16;
+
+ // Draw numbers until one is found in [0,n]
+ uint32 i;
+ do
+ i = randInt() & used; // toss unused bits to shorten search
+ while (i > n);
+ return i;
+}
+
+inline void MTRand::seed(const uint32 oneSeed)
+{
+ // Seed the generator with a simple uint32
+ initialize(oneSeed);
+ reload();
+}
+
+inline void MTRand::seed(uint32 *const bigSeed, const uint32 seedLength)
+{
+ // Seed the generator with an array of uint32's
+ // There are 2^19937-1 possible initial states. This function allows
+ // all of those to be accessed by providing at least 19937 bits (with a
+ // default seed length of N = 624 uint32's). Any bits above the lower 32
+ // in each element are discarded.
+ // Just call seed() if you want to get array from /dev/urandom
+ initialize(19650218UL);
+ const unsigned int Nenum = N;
+ int i = 1;
+ uint32 j = 0;
+ int k = (Nenum > seedLength ? Nenum : seedLength);
+ for (; k; --k) {
+ state[i] = state[i] ^ ((state[i - 1] ^ (state[i - 1] >> 30)) * 1664525UL);
+ state[i] += (bigSeed[j] & 0xffffffffUL) + j;
+ state[i] &= 0xffffffffUL;
+ ++i;
+ ++j;
+ if (i >= N) {
+ state[0] = state[N - 1];
+ i = 1;
+ }
+ if (j >= seedLength)
+ j = 0;
+ }
+ for (k = N - 1; k; --k) {
+ state[i] = state[i] ^ ((state[i - 1] ^ (state[i - 1] >> 30)) * 1566083941UL);
+ state[i] -= i;
+ state[i] &= 0xffffffffUL;
+ ++i;
+ if (i >= N) {
+ state[0] = state[N - 1];
+ i = 1;
+ }
+ }
+ state[0] = 0x80000000UL; // MSB is 1, assuring non-zero initial array
+ reload();
+}
+
+inline void MTRand::seed()
+{
+ // Seed the generator with an array from /dev/urandom if available
+ // Otherwise use a hash of time() and clock() values
+
+ // First try getting an array from /dev/urandom
+ FILE *urandom = fopen("/dev/urandom", "rb");
+ if (urandom) {
+ uint32 bigSeed[N];
+ uint32 *s = bigSeed;
+ int i = N;
+ bool success = true;
+ while (success && i--)
+ success = fread(s++, sizeof(uint32), 1, urandom);
+ fclose(urandom);
+ if (success) {
+ seed(bigSeed, N);
+ return;
+ }
+ }
+
+ // Was not successful, so use time() and clock() instead
+ seed(hash(time(NULL), clock()));
+}
+
+inline void MTRand::initialize(const uint32 intseed)
+{
+ // Initialize generator state with seed
+ // See Knuth TAOCP Vol 2, 3rd Ed, p.106 for multiplier.
+ // In previous versions, most significant bits (MSBs) of the seed affect
+ // only MSBs of the state array. Modified 9 Jan 2002 by Makoto Matsumoto.
+ uint32 *s = state;
+ uint32 *r = state;
+ int i = 1;
+ *s++ = intseed & 0xffffffffUL;
+ for (; i < N; ++i) {
+ *s++ = (1812433253UL * (*r ^ (*r >> 30)) + i) & 0xffffffffUL;
+ r++;
+ }
+}
+
+inline void MTRand::reload()
+{
+ // Generate N new values in state
+ // Made clearer and faster by Matthew Bellew (matthew.bellew@home.com)
+ uint32 *p = state;
+ int i;
+ for (i = N - M; i--; ++p)
+ *p = twist(p[M], p[0], p[1]);
+ for (i = M; --i; ++p)
+ *p = twist(p[M - N], p[0], p[1]);
+ *p = twist(p[M - N], p[0], state[0]);
+
+ left = N, pNext = state;
+}
+
+inline MTRand::uint32 MTRand::hash(time_t t, clock_t c)
+{
+ // Get a uint32 from t and c
+ // Better than uint32(x) in case x is floating point in [0,1]
+ // Based on code by Lawrence Kirby (fred@genesis.demon.co.uk)
+
+ static uint32 differ = 0; // guarantee time-based seeds will change
+
+ uint32 h1 = 0;
+ unsigned char *p = (unsigned char *)&t;
+ for (size_t i = 0; i < sizeof(t); ++i) {
+ h1 *= std::numeric_limits<unsigned char>::max() + 2U;
+ h1 += p[i];
+ }
+ uint32 h2 = 0;
+ p = (unsigned char *)&c;
+ for (size_t j = 0; j < sizeof(c); ++j) {
+ h2 *= std::numeric_limits<unsigned char>::max() + 2U;
+ h2 += p[j];
+ }
+ return (h1 + differ++) ^ h2;
+}
+
+inline void MTRand::save(uint32 *saveArray) const
+{
+ uint32 *sa = saveArray;
+ const uint32 *s = state;
+ int i = N;
+ for (; i--; *sa++ = *s++) {
+ }
+ *sa = left;
+}
+
+inline void MTRand::load(uint32 *const loadArray)
+{
+ uint32 *s = state;
+ uint32 *la = loadArray;
+ int i = N;
+ for (; i--; *s++ = *la++) {
+ }
+ left = *la;
+ pNext = &state[N - left];
+}
+
+inline std::ostream &operator<<(std::ostream &os, const MTRand &mtrand)
+{
+ const MTRand::uint32 *s = mtrand.state;
+ int i = mtrand.N;
+ for (; i--; os << *s++ << "\t") {
+ }
+ return os << mtrand.left;
+}
+
+inline std::istream &operator>>(std::istream &is, MTRand &mtrand)
+{
+ MTRand::uint32 *s = mtrand.state;
+ int i = mtrand.N;
+ for (; i--; is >> *s++) {
+ }
+ is >> mtrand.left;
+ mtrand.pNext = &mtrand.state[mtrand.N - mtrand.left];
+ return is;
+}
+
+// simple interface to mersenne twister
+class RandomStream {
+ public:
+ inline RandomStream(long seed) : mtr(seed){};
+ ~RandomStream()
+ {
+ }
+
+ /*! get a random number from the stream */
+ inline double getDouble(void)
+ {
+ return mtr.rand();
+ };
+ inline float getFloat(void)
+ {
+ return (float)mtr.rand();
+ };
+
+ inline float getFloat(float min, float max)
+ {
+ return mtr.rand(max - min) + min;
+ };
+ inline float getRandNorm(float mean, float var)
+ {
+ return mtr.randNorm(mean, var);
+ };
+
+#if FLOATINGPOINT_PRECISION == 1
+ inline Real getReal()
+ {
+ return getFloat();
+ }
+
+#else
+ inline Real getReal()
+ {
+ return getDouble();
+ }
+#endif
+
+ inline Vec3 getVec3()
+ {
+ Real a = getReal(), b = getReal(), c = getReal();
+ return Vec3(a, b, c);
+ }
+ inline Vec3 getVec3Norm()
+ {
+ Vec3 a = getVec3();
+ normalize(a);
+ return a;
+ }
+
+ private:
+ MTRand mtr;
+};
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/helper/util/rcmatrix.h b/extern/mantaflow/helper/util/rcmatrix.h
new file mode 100644
index 00000000000..39951cece2e
--- /dev/null
+++ b/extern/mantaflow/helper/util/rcmatrix.h
@@ -0,0 +1,1112 @@
+//
+// Helper matrix class, RCMatrix.h
+// Required for PD optimizations (guiding)
+// Thanks to Ryoichi Ando, and Robert Bridson
+//
+
+#ifndef RCMATRIX3_H
+#define RCMATRIX3_H
+
+#include <iterator>
+#include <cassert>
+#include <vector>
+#include <fstream>
+
+// index type
+#define int_index long long
+
+// link to omp & tbb for now
+#if OPENMP == 1 || TBB == 1
+# define MANTA_ENABLE_PARALLEL 0
+// allow the preconditioner to be computed in parallel? (can lead to slightly non-deterministic
+// results)
+# define MANTA_ENABLE_PARALLEL_PC 0
+// use c++11 code?
+# define MANTA_USE_CPP11 1
+#else
+# define MANTA_ENABLE_PARALLEL 0
+# define MANTA_ENABLE_PARALLEL_PC 0
+# define MANTA_USE_CPP11 0
+#endif
+
+#if MANTA_ENABLE_PARALLEL == 1
+# include <thread>
+# include <algorithm>
+
+static const int manta_num_threads = std::thread::hardware_concurrency();
+
+// For clang
+# define parallel_for(total_size) \
+ { \
+ int_index parallel_array_size = (total_size); \
+ std::vector<std::thread> threads(manta_num_threads); \
+ for (int thread_number = 0; thread_number < manta_num_threads; thread_number++) { \
+ threads[thread_number] = std::thread([&](int_index parallel_array_size, int_index thread_number ) { \
+ for( int_index parallel_index=thread_number; parallel_index < parallel_array_size; parallel_index += manta_num_threads ) {
+
+# define parallel_end \
+ } \
+ },parallel_array_size,thread_number); \
+ } \
+ for (auto &thread : threads) \
+ thread.join(); \
+ }
+
+# define parallel_block \
+ { \
+ std::vector<std::thread> threads; \
+ {
+
+# define do_parallel threads.push_back( std::thread([&]() {
+# define do_end \
+ } ) );
+
+# define block_end \
+ } \
+ for (auto &thread : threads) { \
+ thread.join(); \
+ } \
+ }
+
+#else
+
+# define parallel_for(size) \
+ { \
+ int thread_number = 0; \
+ int_index parallel_index = 0; \
+ for (int_index parallel_index = 0; parallel_index < (int_index)size; parallel_index++) {
+# define parallel_end \
+ } \
+ thread_number = parallel_index = 0; \
+ }
+
+# define parallel_block
+# define do_parallel
+# define do_end
+# define block_end
+
+#endif
+
+#include "vectorbase.h"
+
+namespace Manta {
+
+static const unsigned default_expected_none_zeros = 7;
+
+template<class N, class T> struct RCMatrix {
+ struct RowEntry {
+ std::vector<N> index;
+ std::vector<T> value;
+ };
+ RCMatrix() : n(0), expected_none_zeros(default_expected_none_zeros)
+ {
+ }
+ RCMatrix(N size, N expected_none_zeros = default_expected_none_zeros)
+ : n(0), expected_none_zeros(expected_none_zeros)
+ {
+ resize(size);
+ }
+ RCMatrix(const RCMatrix &m) : n(0), expected_none_zeros(default_expected_none_zeros)
+ {
+ init(m);
+ }
+ RCMatrix &operator=(const RCMatrix &m)
+ {
+ expected_none_zeros = m.expected_none_zeros;
+ init(m);
+ return *this;
+ }
+ RCMatrix &operator=(RCMatrix &&m)
+ {
+ matrix = m.matrix;
+ offsets = m.offsets;
+ expected_none_zeros = m.expected_none_zeros;
+ n = m.n;
+ m.n = 0;
+ m.matrix.clear();
+ m.offsets.clear();
+ return *this;
+ }
+ RCMatrix(RCMatrix &&m)
+ : n(m.n), expected_none_zeros(m.expected_none_zeros), matrix(m.matrix), offsets(m.offsets)
+ {
+ m.n = 0;
+ m.matrix.clear();
+ m.offsets.clear();
+ }
+ void init(const RCMatrix &m)
+ {
+ expected_none_zeros = m.expected_none_zeros;
+ resize(m.n);
+ parallel_for(n)
+ {
+ N i = parallel_index;
+ if (m.matrix[i]) {
+ alloc_row(i);
+ matrix[i]->index = m.matrix[i]->index;
+ matrix[i]->value = m.matrix[i]->value;
+ }
+ else {
+ dealloc_row(i);
+ }
+ }
+ parallel_end
+ }
+ ~RCMatrix()
+ {
+ clear();
+ }
+ void clear()
+ {
+ for (N i = 0; i < n; i++) {
+ dealloc_row(i);
+ matrix[i] = NULL;
+ if (offsets.size())
+ offsets[i] = 0;
+ }
+ };
+ bool empty(N i) const
+ {
+ return matrix[i] == NULL;
+ }
+ N row_nonzero_size(N i) const
+ {
+ return matrix[i] == NULL ? 0 : matrix[i]->index.size();
+ }
+ void resize(N size, N expected_none_zeros = 0)
+ {
+ if (!expected_none_zeros) {
+ expected_none_zeros = this->expected_none_zeros;
+ }
+ if (n > size) {
+ // Shrinking
+ for (N i = size ? size - 1 : 0; i < n; i++)
+ dealloc_row(i);
+ matrix.resize(size);
+ }
+ else if (n < size) {
+ // Expanding
+ matrix.resize(size);
+ for (N i = n; i < size; i++) {
+ matrix[i] = NULL;
+ if (offsets.size())
+ offsets[i] = 0;
+ }
+ }
+ n = size;
+ }
+ void alloc_row(N i)
+ {
+ assert(i < n);
+ if (!matrix[i]) {
+ matrix[i] = new RowEntry;
+ matrix[i]->index.reserve(expected_none_zeros);
+ matrix[i]->value.reserve(expected_none_zeros);
+ if (offsets.size())
+ offsets[i] = 0;
+ }
+ }
+ void dealloc_row(N i)
+ {
+ assert(i < n);
+ if (matrix[i]) {
+ if (offsets.empty() || !offsets[i])
+ delete matrix[i];
+ matrix[i] = NULL;
+ if (offsets.size())
+ offsets[i] = 0;
+ }
+ }
+ T operator()(N i, N j) const
+ {
+ assert(i < n);
+ for (Iterator it = row_begin(i); it; ++it) {
+ if (it.index() == j)
+ return it.value();
+ }
+ return T(0.0);
+ }
+ void add_to_element_checked(N i, N j, T val)
+ {
+ if ((i < 0) || (j < 0) || (i >= n) || (j >= n))
+ return;
+ add_to_element(i, j, val);
+ }
+ void add_to_element(N i, N j, T increment_value)
+ {
+ if (std::abs(increment_value) > VECTOR_EPSILON) {
+ assert(i < n);
+ assert(offsets.empty() || offsets[i] == 0);
+ alloc_row(i);
+ std::vector<N> &index = matrix[i]->index;
+ std::vector<T> &value = matrix[i]->value;
+ for (N k = 0; k < (N)index.size(); ++k) {
+ if (index[k] == j) {
+ value[k] += increment_value;
+ return;
+ }
+ else if (index[k] > j) {
+ index.insert(index.begin() + k, j);
+ value.insert(value.begin() + k, increment_value);
+ return;
+ }
+ }
+ index.push_back(j);
+ value.push_back(increment_value);
+ }
+ }
+
+ void set_element(N i, N j, T v)
+ {
+ if (std::abs(v) > VECTOR_EPSILON) {
+ assert(i < n);
+ assert(offsets.empty() || offsets[i] == 0);
+ alloc_row(i);
+ std::vector<N> &index = matrix[i]->index;
+ std::vector<T> &value = matrix[i]->value;
+ for (N k = 0; k < (N)index.size(); ++k) {
+ if (index[k] == j) {
+ value[k] = v;
+ return;
+ }
+ else if (index[k] > j) {
+ index.insert(index.begin() + k, j);
+ value.insert(value.begin() + k, v);
+ return;
+ }
+ }
+ index.push_back(j);
+ value.push_back(v);
+ }
+ }
+
+ // Make sure that j is the biggest column in the row, no duplication allowed
+ void fix_element(N i, N j, T v)
+ {
+ if (std::abs(v) > VECTOR_EPSILON) {
+ assert(i < n);
+ assert(offsets.empty() || offsets[i] == 0);
+ alloc_row(i);
+ std::vector<N> &index = matrix[i]->index;
+ std::vector<T> &value = matrix[i]->value;
+ index.push_back(j);
+ value.push_back(v);
+ }
+ }
+ int_index trim_zero_entries(double e = VECTOR_EPSILON)
+ {
+ std::vector<int_index> deleted_entries(n, 0);
+ parallel_for(n)
+ {
+ N i = parallel_index;
+ if (matrix[i]) {
+ std::vector<N> &index = matrix[i]->index;
+ std::vector<T> &value = matrix[i]->value;
+ N head = 0;
+ N k = 0;
+ for (k = 0; k < index.size(); ++k) {
+ if (std::abs(value[k]) > e) {
+ index[head] = index[k];
+ value[head] = value[k];
+ ++head;
+ }
+ }
+ if (head != k) {
+ index.erase(index.begin() + head, index.end());
+ value.erase(value.begin() + head, value.end());
+ deleted_entries[i] += k - head;
+ }
+ if (!offsets.size() && !head) {
+ remove_row(i);
+ }
+ }
+ }
+ parallel_end
+ //
+ int_index sum_deleted(0);
+ for (int_index i = 0; i < n; i++)
+ sum_deleted += deleted_entries[i];
+ return sum_deleted;
+ }
+ void remove_reference(N i)
+ {
+ if (offsets.size() && offsets[i] && matrix[i]) {
+ RowEntry *save = matrix[i];
+ matrix[i] = new RowEntry;
+ *matrix[i] = *save;
+ for (N &index : matrix[i]->index)
+ index += offsets[i];
+ offsets[i] = 0;
+ }
+ }
+ void remove_row(N i)
+ {
+ dealloc_row(i);
+ }
+ bool is_symmetric(double e = VECTOR_EPSILON) const
+ {
+ std::vector<bool> flags(n, true);
+ parallel_for(n)
+ {
+ N i = parallel_index;
+ bool flag = true;
+ for (Iterator it = row_begin(i); it; ++it) {
+ N index = it.index();
+ T value = it.value();
+ if (std::abs(value) > e) {
+ bool found_entry = false;
+ for (Iterator it_i = row_begin(index); it_i; ++it_i) {
+ if (it_i.index() == i) {
+ found_entry = true;
+ if (std::abs(value - it_i.value()) > e) {
+ flag = false;
+ break;
+ }
+ }
+ }
+ if (!found_entry)
+ flag = false;
+ if (!flag)
+ break;
+ }
+ }
+ flags[i] = flag;
+ }
+ parallel_end for (N i = 0; i < matrix.size(); ++i)
+ {
+ if (!flags[i])
+ return false;
+ }
+ return true;
+ }
+
+ void expand()
+ {
+ if (offsets.empty())
+ return;
+ for (N i = 1; i < n; i++) {
+ if (offsets[i]) {
+ RowEntry *ref = matrix[i];
+ matrix[i] = new RowEntry;
+ *matrix[i] = *ref;
+ for (N j = 0; j < (N)matrix[i]->index.size(); j++) {
+ matrix[i]->index[j] += offsets[i];
+ }
+ }
+ }
+ offsets.resize(0);
+ }
+
+ N column(N i) const
+ {
+ return empty(i) ? 0 : row_begin(i, row_nonzero_size(i) - 1).index();
+ }
+ N getColumnSize() const
+ {
+ N max_column(0);
+ auto column = [&](N i) {
+ N max_column(0);
+ for (Iterator it = row_begin(i); it; ++it)
+ max_column = std::max(max_column, it.index());
+ return max_column + 1;
+ };
+ for (N i = 0; i < n; i++)
+ max_column = std::max(max_column, column(i));
+ return max_column;
+ }
+ N getNonzeroSize() const
+ {
+ N nonzeros(0);
+ for (N i = 0; i < n; ++i) {
+ nonzeros += row_nonzero_size(i);
+ }
+ return nonzeros;
+ }
+ class Iterator : std::iterator<std::input_iterator_tag, T> {
+ public:
+ Iterator(const RowEntry *rowEntry, N k, N offset) : rowEntry(rowEntry), k(k), offset(offset)
+ {
+ }
+ operator bool() const
+ {
+ return rowEntry != NULL && k < (N)rowEntry->index.size();
+ }
+ Iterator &operator++()
+ {
+ ++k;
+ return *this;
+ }
+ T value() const
+ {
+ return rowEntry->value[k];
+ }
+ N index() const
+ {
+ return rowEntry->index[k] + offset;
+ }
+ N index_raw() const
+ {
+ return rowEntry->index[k];
+ }
+ N size() const
+ {
+ return rowEntry == NULL ? 0 : rowEntry->index.size();
+ }
+
+ protected:
+ const RowEntry *rowEntry;
+ N k, offset;
+ };
+ Iterator row_begin(N n, N k = 0) const
+ {
+ return Iterator(matrix[n], k, offsets.size() ? offsets[n] : 0);
+ }
+ class DynamicIterator : public Iterator {
+ public:
+ DynamicIterator(RowEntry *rowEntry, N k, N offset)
+ : rowEntry(rowEntry), Iterator(rowEntry, k, offset)
+ {
+ }
+ void setValue(T value)
+ {
+ rowEntry->value[Iterator::k] = value;
+ }
+ void setIndex(N index)
+ {
+ rowEntry->index[Iterator::k] = index;
+ }
+
+ protected:
+ RowEntry *rowEntry;
+ };
+ DynamicIterator dynamic_row_begin(N n, N k = 0)
+ {
+ N offset = offsets.size() ? offsets[n] : 0;
+ if (offset) {
+ printf("---- Warning ----\n");
+ printf("Dynamic iterator is not allowed for referenced rows.\n");
+ printf("You should be very careful otherwise this causes some bugs.\n");
+ printf(
+ "We encourage you that you convert this row into a raw format, then loop over it...\n");
+ printf("-----------------\n");
+ exit(0);
+ }
+ return DynamicIterator(matrix[n], k, offset);
+ }
+ RCMatrix transpose(N rowsize = 0,
+ unsigned expected_none_zeros = default_expected_none_zeros) const
+ {
+ if (!rowsize)
+ rowsize = getColumnSize();
+ RCMatrix result(rowsize, expected_none_zeros);
+ for (N i = 0; i < n; i++) {
+ for (Iterator it = row_begin(i); it; ++it)
+ result.fix_element(it.index(), i, it.value());
+ }
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+
+ RCMatrix getKtK() const
+ {
+ RCMatrix m = transpose();
+ RCMatrix result(n, expected_none_zeros);
+ // Run in parallel
+ parallel_for(result.n)
+ {
+ N i = parallel_index;
+ for (Iterator it_A = m.row_begin(i); it_A; ++it_A) {
+ N j = it_A.index();
+ assert(j < n);
+ T a = it_A.value();
+ if (std::abs(a) > VECTOR_EPSILON) {
+ for (Iterator it_B = row_begin(j); it_B; ++it_B) {
+ // result.add_to_element(i,it_B.index(),it_B.value()*a);
+ double value = it_B.value() * a;
+ if (std::abs(value) > VECTOR_EPSILON)
+ result.add_to_element(i, it_B.index(), value);
+ }
+ }
+ }
+ }
+ parallel_end
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+
+ RCMatrix operator*(const RCMatrix &m) const
+ {
+ RCMatrix result(n, expected_none_zeros);
+ // Run in parallel
+ parallel_for(result.n)
+ {
+ N i = parallel_index;
+ for (Iterator it_A = row_begin(i); it_A; ++it_A) {
+ N j = it_A.index();
+ assert(j < m.n);
+ T a = it_A.value();
+ if (std::abs(a) > VECTOR_EPSILON) {
+ for (Iterator it_B = m.row_begin(j); it_B; ++it_B) {
+ // result.add_to_element(i,it_B.index(),it_B.value()*a);
+ double value = it_B.value() * a;
+ if (std::abs(value) > VECTOR_EPSILON)
+ result.add_to_element(i, it_B.index(), value);
+ }
+ }
+ }
+ }
+ parallel_end
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+
+ RCMatrix sqrt() const
+ {
+ RCMatrix result(n, expected_none_zeros);
+ // Run in parallel
+ parallel_for(result.n)
+ {
+ N i = parallel_index;
+ for (Iterator it_A = row_begin(i); it_A; ++it_A) {
+ N j = it_A.index();
+ result.set_element(i, j, std::sqrt(it_A.value()));
+ }
+ }
+ parallel_end
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+
+ RCMatrix operator*(const double k) const
+ {
+ RCMatrix result(n, expected_none_zeros);
+ // Run in parallel
+ parallel_for(result.n)
+ {
+ N i = parallel_index;
+ for (Iterator it_A = row_begin(i); it_A; ++it_A) {
+ N j = it_A.index();
+ result.add_to_element(i, j, it_A.value() * k);
+ }
+ }
+ parallel_end
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+
+ RCMatrix applyKernel(const RCMatrix &kernel, const int nx, const int ny) const
+ {
+ RCMatrix result(n, expected_none_zeros);
+ // find center position of kernel (half of kernel size)
+ int kCols = kernel.n, kRows = kernel.n, rows = nx, cols = ny;
+ int kCenterX = kCols / 2;
+ int kCenterY = kRows / 2;
+ // Run in parallel
+ parallel_for(result.n)
+ {
+ N i = parallel_index;
+ if (i >= rows)
+ break;
+ for (Iterator it_A = row_begin(i); it_A; ++it_A) {
+ N j = it_A.index();
+ if (j >= cols)
+ break;
+ for (int m = 0; m < kRows; ++m) { // kernel rows
+ int mm = kRows - 1 - m; // row index of flipped kernel
+ for (int n = 0; n < kCols; ++n) { // kernel columns
+ int nn = kCols - 1 - n; // column index of flipped kernel
+ // index of input signal, used for checking boundary
+ int ii = i + (m - kCenterY);
+ int jj = j + (n - kCenterX);
+ // ignore input samples which are out of bound
+ if (ii >= 0 && ii < rows && jj >= 0 && jj < cols)
+ result.add_to_element(i, j, (*this)(ii, jj) * kernel(mm, nn));
+ }
+ }
+ }
+ }
+ parallel_end
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+
+ RCMatrix applyHorizontalKernel(const RCMatrix &kernel, const int nx, const int ny) const
+ {
+ RCMatrix result(n, expected_none_zeros);
+ // find center position of kernel (half of kernel size)
+ int kCols = kernel.n, kRows = 1, rows = nx, cols = ny;
+ int kCenterX = kCols / 2;
+ int kCenterY = kRows / 2;
+ // Run in parallel
+ parallel_for(result.n)
+ {
+ N i = parallel_index;
+ if (i >= rows)
+ break;
+ for (Iterator it_A = row_begin(i); it_A; ++it_A) {
+ N j = it_A.index();
+ if (j >= cols)
+ break;
+ for (int m = 0; m < kRows; ++m) { // kernel rows
+ int mm = kRows - 1 - m; // row index of flipped kernel
+ for (int n = 0; n < kCols; ++n) { // kernel columns
+ int nn = kCols - 1 - n; // column index of flipped kernel
+ // index of input signal, used for checking boundary
+ int ii = i + (m - kCenterY);
+ int jj = j + (n - kCenterX);
+ // ignore input samples which are out of bound
+ if (ii >= 0 && ii < rows && jj >= 0 && jj < cols)
+ result.add_to_element(i, j, (*this)(ii, jj) * kernel(mm, nn));
+ }
+ }
+ }
+ }
+ parallel_end
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+
+ RCMatrix applyVerticalKernel(const RCMatrix &kernel, const int nx, const int ny) const
+ {
+ RCMatrix result(n, expected_none_zeros);
+ // find center position of kernel (half of kernel size)
+ int kCols = 1, kRows = kernel.n, rows = nx, cols = ny;
+ int kCenterX = kCols / 2;
+ int kCenterY = kRows / 2;
+ // Run in parallel
+ parallel_for(result.n)
+ {
+ N i = parallel_index;
+ if (i >= rows)
+ break;
+ for (Iterator it_A = row_begin(i); it_A; ++it_A) {
+ N j = it_A.index();
+ if (j >= cols)
+ break;
+ for (int m = 0; m < kRows; ++m) { // kernel rows
+ int mm = kRows - 1 - m; // row index of flipped kernel
+ for (int n = 0; n < kCols; ++n) { // kernel columns
+ int nn = kCols - 1 - n; // column index of flipped kernel
+ // index of input signal, used for checking boundary
+ int ii = i + (m - kCenterY);
+ int jj = j + (n - kCenterX);
+ // ignore input samples which are out of bound
+ if (ii >= 0 && ii < rows && jj >= 0 && jj < cols)
+ result.add_to_element(i, j, (*this)(ii, jj) * kernel(mm, nn));
+ }
+ }
+ }
+ }
+ parallel_end
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+
+ RCMatrix applySeparableKernel(const RCMatrix &kernelH,
+ const RCMatrix &kernelV,
+ const int nx,
+ const int ny) const
+ {
+ return applyHorizontalKernel(kernelH, nx, ny).applyVerticalKernel(kernelV, nx, ny);
+ }
+
+ RCMatrix applySeparableKernelTwice(const RCMatrix &kernelH,
+ const RCMatrix &kernelV,
+ const int nx,
+ const int ny) const
+ {
+ return applySeparableKernel(kernelH, kernelV, nx, ny)
+ .applySeparableKernel(kernelH, kernelV, nx, ny);
+ }
+
+ std::vector<T> operator*(const std::vector<T> &rhs) const
+ {
+ std::vector<T> result(n, 0.0);
+ multiply(rhs, result);
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+ void multiply(const std::vector<T> &rhs, std::vector<T> &result) const
+ {
+ result.resize(n);
+ for (N i = 0; i < n; i++) {
+ T new_value = 0.0;
+ for (Iterator it = row_begin(i); it; ++it) {
+ N j_index = it.index();
+ assert(j_index < rhs.size());
+ new_value += rhs[j_index] * it.value();
+ }
+ result[i] = new_value;
+ }
+ }
+ RCMatrix operator+(const RCMatrix &m) const
+ {
+ RCMatrix A(*this);
+ return std::move(A.add(m));
+ }
+ RCMatrix &add(const RCMatrix &m)
+ {
+ if (m.n > n)
+ resize(m.n);
+ parallel_for(m.n)
+ {
+ N i = parallel_index;
+ for (Iterator it = m.row_begin(i); it; ++it) {
+ add_to_element(i, it.index(), it.value());
+ }
+ }
+ parallel_end return *this;
+ }
+ RCMatrix operator-(const RCMatrix &m) const
+ {
+ RCMatrix A(*this);
+ return std::move(A.sub(m));
+ }
+ RCMatrix &sub(const RCMatrix &m)
+ {
+ if (m.n > n)
+ resize(m.n);
+ parallel_for(m.n)
+ {
+ N i = parallel_index;
+ for (Iterator it = m.row_begin(i); it; ++it) {
+ add_to_element(i, it.index(), -it.value());
+ }
+ }
+ parallel_end return *this;
+ }
+ RCMatrix &replace(const RCMatrix &m, int rowInd, int colInd)
+ {
+ if (m.n > n)
+ resize(m.n);
+ parallel_for(m.n)
+ {
+ N i = parallel_index;
+ for (Iterator it = m.row_begin(i); it; ++it) {
+ set_element(i + rowInd, it.index() + colInd, it.value());
+ }
+ }
+ parallel_end return *this;
+ }
+ Real max_residual(const std::vector<T> &lhs, const std::vector<T> &rhs) const
+ {
+ std::vector<T> r = operator*(lhs);
+ Real max_residual = 0.0;
+ for (N i = 0; i < rhs.size(); i++) {
+ if (!empty(i))
+ max_residual = std::max(max_residual, std::abs(r[i] - rhs[i]));
+ }
+ return max_residual;
+ }
+ std::vector<T> residual_vector(const std::vector<T> &lhs, const std::vector<T> &rhs) const
+ {
+ std::vector<T> result = operator*(lhs);
+ assert(result.size() == rhs.size());
+ for (N i = 0; i < result.size(); i++) {
+ result[i] = std::abs(result[i] - rhs[i]);
+ }
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+ T norm() const
+ {
+ T result(0.0);
+ for (N i = 0; i < n; ++i) {
+ for (Iterator it = row_begin(i); it; ++it) {
+ result = std::max(result, std::abs(it.value()));
+ }
+ }
+ return result;
+ }
+
+ T norm_L2_sqr() const
+ {
+ T result(0.0);
+ for (N i = 0; i < n; ++i) {
+ for (Iterator it = row_begin(i); it; ++it) {
+ result += (it.value()) * (it.value());
+ }
+ }
+ return result;
+ }
+
+ void write_matlab(std::ostream &output,
+ unsigned int rows,
+ unsigned int columns,
+ const char *variable_name)
+ {
+ output << variable_name << "=sparse([";
+ for (N i = 0; i < n; ++i) {
+ if (matrix[i]) {
+ const std::vector<N> &index = matrix[i]->index;
+ for (N j = 0; j < (N)index.size(); ++j) {
+ output << i + 1 << " ";
+ }
+ }
+ }
+ output << "],...\n [";
+ for (N i = 0; i < n; ++i) {
+ if (matrix[i]) {
+ const std::vector<N> &index = matrix[i]->index;
+ for (N j = 0; j < (N)index.size(); ++j) {
+ output << index[j] + (offsets.empty() ? 0 : offsets[i]) + 1 << " ";
+ }
+ }
+ }
+ output << "],...\n [";
+ for (N i = 0; i < n; ++i) {
+ if (matrix[i]) {
+ const std::vector<T> &value = matrix[i]->value;
+ for (N j = 0; j < value.size(); ++j) {
+ output << value[j] << " ";
+ }
+ }
+ }
+ output << "], " << rows << ", " << columns << ");" << std::endl;
+ };
+ void export_matlab(std::string filename, std::string name)
+ {
+ // Export this matrix
+ std::ofstream file;
+ file.open(filename.c_str());
+ write_matlab(file, n, getColumnSize(), name.c_str());
+ file.close();
+ }
+ void print_readable(std::string name, bool printNonZero = true)
+ {
+ std::cout << name << " \n";
+ for (int i = 0; i < n; ++i) {
+ for (int j = 0; j < n; ++j) {
+ if (printNonZero) {
+ if ((*this)(i, j) == 0) {
+ std::cout << " .";
+ continue;
+ }
+ }
+ else {
+ if ((*this)(i, j) == 0) {
+ continue;
+ }
+ }
+
+ if ((*this)(i, j) >= 0)
+ std::cout << " ";
+ std::cout << " " << (*this)(i, j);
+ }
+ std::cout << " \n";
+ }
+ }
+ ///
+ N n;
+ N expected_none_zeros;
+ std::vector<RowEntry *> matrix;
+ std::vector<int> offsets;
+};
+
+template<class N, class T>
+static inline RCMatrix<N, T> operator*(const std::vector<T> &diagonal, const RCMatrix<N, T> &A)
+{
+ RCMatrix<N, T> result(A);
+ parallel_for(result.n)
+ {
+ N row(parallel_index);
+ for (auto it = result.dynamic_row_begin(row); it; ++it) {
+ it.setValue(it.value() * diagonal[row]);
+ }
+ }
+ parallel_end return std::move(result);
+}
+
+template<class N, class T> struct RCFixedMatrix {
+ std::vector<N> rowstart;
+ std::vector<N> index;
+ std::vector<T> value;
+ N n;
+ N max_rowlength;
+ //
+ RCFixedMatrix() : n(0), max_rowlength(0)
+ {
+ }
+ RCFixedMatrix(const RCMatrix<N, T> &matrix)
+ {
+ n = matrix.n;
+ rowstart.resize(n + 1);
+ rowstart[0] = 0;
+ max_rowlength = 0;
+ for (N i = 0; i < n; i++) {
+ if (!matrix.empty(i)) {
+ rowstart[i + 1] = rowstart[i] + matrix.row_nonzero_size(i);
+ max_rowlength = std::max(max_rowlength, rowstart[i + 1] - rowstart[i]);
+ }
+ else {
+ rowstart[i + 1] = rowstart[i];
+ }
+ }
+ value.resize(rowstart[n]);
+ index.resize(rowstart[n]);
+ N j = 0;
+ for (N i = 0; i < n; i++) {
+ for (typename RCMatrix<N, T>::Iterator it = matrix.row_begin(i); it; ++it) {
+ value[j] = it.value();
+ index[j] = it.index();
+ ++j;
+ }
+ }
+ }
+ class Iterator : std::iterator<std::input_iterator_tag, T> {
+ public:
+ Iterator(N start, N end, const std::vector<N> &index, const std::vector<T> &value)
+ : index_array(index), value_array(value), k(start), start(start), end(end)
+ {
+ }
+ operator bool() const
+ {
+ return k < end;
+ }
+ Iterator &operator++()
+ {
+ ++k;
+ return *this;
+ }
+ T value() const
+ {
+ return value_array[k];
+ }
+ N index() const
+ {
+ return index_array[k];
+ }
+ N size() const
+ {
+ return end - start;
+ }
+
+ private:
+ const std::vector<N> &index_array;
+ const std::vector<T> &value_array;
+ N k, start, end;
+ };
+ Iterator row_begin(N n) const
+ {
+ return Iterator(rowstart[n], rowstart[n + 1], index, value);
+ }
+ std::vector<T> operator*(const std::vector<T> &rhs) const
+ {
+ std::vector<T> result(n, 0.0);
+ multiply(rhs, result);
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+ void multiply(const std::vector<T> &rhs, std::vector<T> &result) const
+ {
+ result.resize(n);
+ parallel_for(n)
+ {
+ N i = parallel_index;
+ T new_value = 0.0;
+ for (Iterator it = row_begin(i); it; ++it) {
+ N j_index = it.index();
+ assert(j_index < rhs.size());
+ new_value += rhs[j_index] * it.value();
+ }
+ result[i] = new_value;
+ }
+ parallel_end
+ }
+ RCMatrix<N, T> operator*(const RCFixedMatrix &m) const
+ {
+ RCMatrix<N, T> result(n, max_rowlength);
+ // Run in parallel
+ parallel_for(result.n)
+ {
+ N i = parallel_index;
+ for (Iterator it_A = row_begin(i); it_A; ++it_A) {
+ N j = it_A.index();
+ assert(j < m.n);
+ T a = it_A.value();
+ if (std::abs(a) > VECTOR_EPSILON) {
+ for (Iterator it_B = m.row_begin(j); it_B; ++it_B) {
+ result.add_to_element(i, it_B.index(), it_B.value() * a);
+ }
+ }
+ }
+ }
+ parallel_end
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+
+ RCMatrix<N, T> toRCMatrix() const
+ {
+ RCMatrix<N, T> result(n, 0);
+ parallel_for(n)
+ {
+ N i = parallel_index;
+ N size = rowstart[i + 1] - rowstart[i];
+ result.matrix[i] = new typename RCMatrix<N, T>::RowEntry;
+ result.matrix[i]->index.resize(size);
+ result.matrix[i]->value.resize(size);
+ for (N j = 0; j < size; j++) {
+ result.matrix[i]->index[j] = index[rowstart[i] + j];
+ result.matrix[i]->value[j] = value[rowstart[i] + j];
+ }
+ }
+ parallel_end
+#if MANTA_USE_CPP11 == 1
+ return std::move(result);
+#else
+ return result;
+#endif
+ }
+};
+
+typedef RCMatrix<int, Real> Matrix;
+typedef RCFixedMatrix<int, Real> FixedMatrix;
+
+} // namespace Manta
+
+#undef parallel_for
+#undef parallel_end
+
+#undef parallel_block
+#undef do_parallel
+#undef do_end
+#undef block_end
+
+#endif
diff --git a/extern/mantaflow/helper/util/simpleimage.cpp b/extern/mantaflow/helper/util/simpleimage.cpp
new file mode 100644
index 00000000000..9846fa5bd96
--- /dev/null
+++ b/extern/mantaflow/helper/util/simpleimage.cpp
@@ -0,0 +1,312 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2014 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Simple image IO
+ *
+ ******************************************************************************/
+
+#include "vectorbase.h"
+#include "simpleimage.h"
+
+namespace Manta {
+
+// write rectangle to ppm
+bool SimpleImage::writePpm(
+ std::string filename, int minx, int miny, int maxx, int maxy, bool invertXY)
+{
+ int w = maxx - minx;
+ int h = maxy - miny;
+
+ if (w <= 0 || h <= 0 || w > mSize[0] || h > mSize[1]) {
+ errMsg("SimpleImage::WritePPM Invalid rect: w="
+ << w << ", h=" << h << ", size=" << mSize[0] << "," << mSize[1] << " min/max: " << minx
+ << "," << miny << " to " << maxx << "," << maxy << ", resetting... ");
+ minx = miny = 0;
+ maxx = mSize[0] - 1;
+ maxy = mSize[1] - 1;
+ w = mSize[0] - 1;
+ h = mSize[1] - 1;
+ }
+
+ FILE *fp = fopen(filename.c_str(), "wb");
+ if (fp == NULL) {
+ errMsg("SimpleImage::WritePPM Unable to open '" << filename << "' for writing");
+ return false;
+ }
+ fprintf(fp, "P6\n%d %d\n255\n", w, h);
+
+ int pixCnt = 0;
+ for (int j = maxy - 1; j >= miny; j--)
+ for (int i = minx; i < maxx; i++) {
+ unsigned char col[3];
+ for (int l = 0; l < 3; l++) {
+ float val;
+ if (invertXY)
+ val = (float)get(j, i)[l];
+ else
+ val = (float)get(i, j)[l];
+
+ val = clamp(val, (float)0., (float)1.);
+ col[l] = (unsigned char)(255. * val);
+ }
+ // col[1] = col[2] = col[0];
+ // if (fwrite(col,1,3, fp) != 3) errMsg("SimpleImage::writePpm fwrite failed");
+ fwrite(col, 1, 3, fp);
+ pixCnt++;
+ // fprintf(stderr,"%d %d %d \n",col[0],i,j);
+ }
+
+ fclose(fp);
+ // debMsg("WritePPM Wrote '"<<filename<<"', region="<<minx<<","<<miny<<" to
+ // "<<maxx<<","<<maxy<<"; "<<pixCnt, 1);
+
+ return true;
+}
+
+bool SimpleImage::writePpm(std::string filename)
+{
+ return writePpm(filename, 0, 0, getSize()[0], getSize()[1]);
+}
+
+// read in a ppm file, and init the image accordingly
+bool SimpleImage::initFromPpm(std::string filename)
+{
+ // maximum length of a line of text
+ const int MAXLINE = 1024;
+
+ int filetype = 0;
+ enum { PGM, PPM }; // possible file types
+
+ FILE *fp;
+ char line[MAXLINE];
+ int size, rowsize;
+
+ // Read in file type
+ fp = fopen(filename.c_str(), "rb");
+ if (!fp) {
+ if (mAbortOnError)
+ debMsg("SimpleImage Error - unable to open file '" << filename << "' for reading", 1);
+ return 0;
+ }
+
+ // 1st line: PPM or PGM
+ if (fgets(line, MAXLINE, fp) == NULL) {
+ if (mAbortOnError)
+ debMsg("SimpleImage::initFromPpm fgets failed", 1);
+ return 0;
+ }
+
+ if (line[1] == '5')
+ filetype = PGM;
+ else if (line[1] == '6')
+ filetype = PPM;
+ else {
+ if (mAbortOnError)
+ debMsg("SimpleImage Error: need PPM or PGM file as input!", 1);
+ return 0;
+ }
+
+ // Read in width and height, & allocate space
+ // 2nd line: width height
+ if (fgets(line, MAXLINE, fp) == NULL) {
+ if (mAbortOnError)
+ errMsg("SimpleImage::initFromPpm fgets failed");
+ return 0;
+ }
+ int windW = 0, windH = 0; // size of the window on the screen
+ int intsFound = sscanf(line, "%d %d", &windW, &windH);
+ if (intsFound == 1) {
+ // only X found, search on next line as well for Y...
+ if (sscanf(line, "%d", &windH) != 1) {
+ if (mAbortOnError)
+ errMsg("initFromPpm Ppm dimensions not found!" << windW << "," << windH);
+ return 0;
+ }
+ else {
+ // ok, found 2 lines
+ // debMsg("initFromPpm Ppm dimensions found!"<<windW<<","<<windH, 1);
+ }
+ }
+ else if (intsFound == 2) {
+ // ok!
+ }
+ else {
+ if (mAbortOnError)
+ errMsg("initFromPpm Ppm dimensions not found at all!" << windW << "," << windH);
+ return 0;
+ }
+
+ if (filetype == PGM) {
+ size = windH * windW; // greymap: 1 byte per pixel
+ rowsize = windW;
+ }
+ else {
+ // filetype == PPM
+ size = windH * windW * 3; // pixmap: 3 bytes per pixel
+ rowsize = windW * 3;
+ }
+
+ unsigned char *pic = new unsigned char[size]; // (GLubyte *)malloc (size);
+
+ // Read in maximum value (ignore) , could be scanned with sscanf as well, but this should be
+ // 255... 3rd line
+ if (fgets(line, MAXLINE, fp) == NULL) {
+ if (mAbortOnError)
+ errMsg("SimpleImage::initFromPpm fgets failed");
+ return 0;
+ }
+
+ // Read in the pixel array row-by-row: 1st row = top scanline */
+ unsigned char *ptr = NULL;
+ ptr = &pic[(windH - 1) * rowsize];
+ for (int i = windH; i > 0; i--) {
+ assertMsg(fread((void *)ptr, 1, rowsize, fp) == rowsize,
+ "SimpleImage::initFromPpm couldn't read data");
+ ptr -= rowsize;
+ }
+
+ // init image
+ this->init(windW, windH);
+ if (filetype == PGM) {
+ // grayscale
+ for (int i = 0; i < windW; i++) {
+ for (int j = 0; j < windH; j++) {
+ double r = (double)pic[(j * windW + i) * 1 + 0] / 255.;
+ (*this)(i, j) = Vec3(r, r, r);
+ }
+ }
+ }
+ else {
+ // convert grid to RGB vec's
+ for (int i = 0; i < windW; i++) {
+ for (int j = 0; j < windH; j++) {
+ // return mpData[y*mSize[0]+x];
+ double r = (double)pic[(j * windW + i) * 3 + 0] / 255.;
+ double g = (double)pic[(j * windW + i) * 3 + 1] / 255.;
+ double b = (double)pic[(j * windW + i) * 3 + 2] / 255.;
+
+ //(*this)(i,j) = Vec3(r,g,b);
+
+ // RGB values have to be rotated to get the right colors!?
+ // this might also be an artifact of photoshop export...?
+ (*this)(i, j) = Vec3(g, b, r);
+ }
+ }
+ }
+
+ delete[] pic;
+ fclose(fp);
+ return 1;
+}
+
+// check index is valid
+bool SimpleImage::indexIsValid(int i, int j)
+{
+ if (i < 0)
+ return false;
+ if (j < 0)
+ return false;
+ if (i >= mSize[0])
+ return false;
+ if (j >= mSize[1])
+ return false;
+ return true;
+}
+
+}; // namespace Manta
+
+//*****************************************************************************
+
+#include "grid.h"
+namespace Manta {
+
+// simple shaded output , note requires grid functionality!
+static void gridPrecompLight(const Grid<Real> &density, Grid<Real> &L, Vec3 light = Vec3(1, 1, 1))
+{
+ FOR_IJK(density)
+ {
+ Vec3 n = getGradient(density, i, j, k) * -1.;
+ normalize(n);
+
+ Real d = dot(light, n);
+ L(i, j, k) = d;
+ }
+}
+
+// simple shading with pre-computed gradient
+static inline void shadeCell(
+ Vec3 &dst, int shadeMode, Real src, Real light, int depthPos, Real depthInv)
+{
+ switch (shadeMode) {
+
+ case 1: {
+ // surfaces
+ Vec3 ambient = Vec3(0.1, 0.1, 0.1);
+ Vec3 diffuse = Vec3(0.9, 0.9, 0.9);
+ Real alpha = src;
+
+ // different color for depth?
+ diffuse[0] *= ((Real)depthPos * depthInv) * 0.7 + 0.3;
+ diffuse[1] *= ((Real)depthPos * depthInv) * 0.7 + 0.3;
+
+ Vec3 col = ambient + diffuse * light;
+
+ // img( 0+i, j ) = (1.-alpha) * img( 0+i, j ) + alpha * col;
+ dst = (1. - alpha) * dst + alpha * col;
+ } break;
+
+ default: {
+ // volumetrics / smoke
+ dst += depthInv * Vec3(src, src, src);
+ } break;
+ }
+}
+
+//! helper to project a grid intro an image (used for ppm export and GUI displauy)
+void projectImg(SimpleImage &img, const Grid<Real> &val, int shadeMode = 0, Real scale = 1.)
+{
+ Vec3i s = val.getSize();
+ Vec3 si = Vec3(1. / (Real)s[0], 1. / (Real)s[1], 1. / (Real)s[2]);
+
+ // init image size
+ int imgSx = s[0];
+ if (val.is3D())
+ imgSx += s[2] + s[0]; // mult views in 3D
+ img.init(imgSx, std::max(s[0], std::max(s[1], s[2])));
+
+ // precompute lighting
+ Grid<Real> L(val);
+ gridPrecompLight(val, L, Vec3(1, 1, 1));
+
+ FOR_IJK(val)
+ {
+ Vec3i idx(i, j, k);
+ shadeCell(img(0 + i, j), shadeMode, val(idx), L(idx), k, si[2]);
+ }
+
+ if (val.is3D()) {
+
+ FOR_IJK(val)
+ {
+ Vec3i idx(i, j, k);
+ shadeCell(img(s[0] + k, j), shadeMode, val(idx), L(idx), i, si[0]);
+ }
+
+ FOR_IJK(val)
+ {
+ Vec3i idx(i, j, k);
+ shadeCell(img(s[0] + s[2] + i, k), shadeMode, val(idx), L(idx), j, si[1]);
+ }
+
+ } // 3d
+
+ img.mapRange(1. / scale);
+}
+
+}; // namespace Manta
diff --git a/extern/mantaflow/helper/util/simpleimage.h b/extern/mantaflow/helper/util/simpleimage.h
new file mode 100644
index 00000000000..d7e88b83f74
--- /dev/null
+++ b/extern/mantaflow/helper/util/simpleimage.h
@@ -0,0 +1,205 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2014 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Simple image IO
+ *
+ ******************************************************************************/
+
+#ifndef MANTA_SIMPLEIMAGE_H
+#define MANTA_SIMPLEIMAGE_H
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "manta.h"
+#include "vectorbase.h"
+
+namespace Manta {
+
+//*****************************************************************************
+// simple 2d image class
+// template<class Scalar>
+class SimpleImage {
+ public:
+ // cons/des
+ SimpleImage() : mSize(-1), mpData(NULL), mAbortOnError(true){};
+ virtual ~SimpleImage()
+ {
+ if (mpData)
+ delete[] mpData;
+ };
+
+ //! set to constant
+ void reset(Real val = 0.)
+ {
+ const Vec3 v = Vec3(val);
+ for (int i = 0; i < mSize[0] * mSize[1]; i++)
+ mpData[i] = v;
+ }
+ //! init memory & reset to zero
+ void init(int x, int y)
+ {
+ mSize = Vec3i(x, y, 0);
+ mpData = new Vec3[x * y];
+ reset();
+ };
+
+ inline bool checkIndex(int x, int y)
+ {
+ if ((x < 0) || (y < 0) || (x > mSize[0] - 1) || (y > mSize[1] - 1)) {
+ errMsg("SimpleImage::operator() Invalid access to " << x << "," << y << ", size=" << mSize);
+ return false;
+ }
+ return true;
+ }
+
+ // access element
+ inline Vec3 &operator()(int x, int y)
+ {
+ DEBUG_ONLY(checkIndex(x, y));
+ return mpData[y * mSize[0] + x];
+ };
+ inline Vec3 &get(int x, int y)
+ {
+ return (*this)(x, y);
+ }
+ inline Vec3 &getMap(int x, int y, int z, int axis)
+ {
+ int i = x, j = y;
+ if (axis == 1)
+ j = z;
+ if (axis == 0) {
+ i = y;
+ j = z;
+ }
+ return get(i, j);
+ }
+
+ // output as string, debug
+ std::string toString()
+ {
+ std::ostringstream out;
+
+ for (int j = 0; j < mSize[1]; j++) {
+ for (int i = 0; i < mSize[0]; i++) {
+ // normal zyx order */
+ out << (*this)(i, j);
+ out << " ";
+ }
+ // if (format)
+ out << std::endl;
+ }
+
+ return out.str();
+ }
+
+ // multiply all values by f
+ void add(Vec3 f)
+ {
+ for (int j = 0; j < mSize[1]; j++)
+ for (int i = 0; i < mSize[0]; i++) {
+ get(i, j) += f;
+ }
+ }
+ // multiply all values by f
+ void multiply(Real f)
+ {
+ for (int j = 0; j < mSize[1]; j++)
+ for (int i = 0; i < mSize[0]; i++) {
+ get(i, j) *= f;
+ }
+ }
+ // map 0-f to 0-1 range, clamp
+ void mapRange(Real f)
+ {
+ for (int j = 0; j < mSize[1]; j++)
+ for (int i = 0; i < mSize[0]; i++) {
+ get(i, j) /= f;
+ for (int c = 0; c < 3; ++c)
+ get(i, j)[c] = clamp(get(i, j)[c], (Real)0., (Real)1.);
+ }
+ }
+
+ // normalize max values
+ void normalizeMax()
+ {
+ Real max = normSquare(get(0, 0));
+ for (int j = 0; j < mSize[1]; j++)
+ for (int i = 0; i < mSize[0]; i++) {
+ if (normSquare(get(i, j)) > max)
+ max = normSquare(get(i, j));
+ }
+ max = sqrt(max);
+ Real invMax = 1. / max;
+ for (int j = 0; j < mSize[1]; j++)
+ for (int i = 0; i < mSize[0]; i++) {
+ get(i, j) *= invMax;
+ }
+ };
+
+ // normalize min and max values
+ void normalizeMinMax()
+ {
+ Real max = normSquare(get(0, 0));
+ Real min = max;
+ for (int j = 0; j < mSize[1]; j++)
+ for (int i = 0; i < mSize[0]; i++) {
+ if (normSquare(get(i, j)) > max)
+ max = normSquare(get(i, j));
+ if (normSquare(get(i, j)) < min)
+ min = normSquare(get(i, j));
+ }
+ max = sqrt(max);
+ min = sqrt(min);
+ Real factor = 1. / (max - min);
+ for (int j = 0; j < mSize[1]; j++)
+ for (int i = 0; i < mSize[0]; i++) {
+ get(i, j) -= min;
+ get(i, j) *= factor;
+ }
+ };
+
+ void setAbortOnError(bool set)
+ {
+ mAbortOnError = set;
+ }
+
+ // ppm in/output
+
+ // write whole image
+ bool writePpm(std::string filename);
+ // write rectangle to ppm
+ bool writePpm(
+ std::string filename, int minx, int miny, int maxx, int maxy, bool invertXY = false);
+ // read in a ppm file, and init the image accordingly
+ bool initFromPpm(std::string filename);
+
+ // check index is valid
+ bool indexIsValid(int i, int j);
+
+ //! access
+ inline Vec3i getSize() const
+ {
+ return mSize;
+ }
+
+ protected:
+ //! size
+ Vec3i mSize;
+ //! data
+ Vec3 *mpData;
+ // make errors fatal, or continue?
+ bool mAbortOnError;
+
+}; // SimpleImage
+
+}; // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/helper/util/solvana.h b/extern/mantaflow/helper/util/solvana.h
new file mode 100644
index 00000000000..9dc1ec83654
--- /dev/null
+++ b/extern/mantaflow/helper/util/solvana.h
@@ -0,0 +1,214 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Analytical solutions to some problems
+ * generated using MATLAB symbolic math ccode
+ *
+ ******************************************************************************/
+
+#ifndef _SOLVANA_H
+#define _SOLVANA_H
+
+//! solves the equation [e1 e2 e3; 1 1 1]*x = g using least squares
+inline void SolveOverconstraint34(float e1x,
+ float e1y,
+ float e1z,
+ float e2x,
+ float e2y,
+ float e2z,
+ float e3x,
+ float e3y,
+ float e3z,
+ float g1,
+ float g2,
+ float g3,
+ float &x1,
+ float &x2,
+ float &x3)
+{
+ float e1x2 = e1x * e1x, e1y2 = e1y * e1y, e1z2 = e1z * e1z;
+ float e2x2 = e2x * e2x, e2y2 = e2y * e2y, e2z2 = e2z * e2z;
+ float e3x2 = e3x * e3x, e3y2 = e3y * e3y, e3z2 = e3z * e3z;
+ float e1xy = e1x * e1y, e1xz = e1x * e1z, e1yz = e1y * e1z;
+ float e2xy = e2x * e2y, e2xz = e2x * e2z, e2yz = e2y * e2z;
+ float e3xy = e3x * e3y, e3xz = e3x * e3z, e3yz = e3y * e3z;
+ float e12x = e1x * e2x, e12y = e1y * e2y, e12z = e1z * e2z;
+ float e13x = e1x * e3x, e13y = e1y * e3y, e13z = e1z * e3z;
+ float e23x = e2x * e3x, e23y = e2y * e3y, e23z = e2z * e3z;
+ float t1543 = e3y2 * e2x2;
+ float t1544 = e3x2 * e2y2;
+ float t1545 = e3z2 * e2x2;
+ float t1546 = e3x2 * e2z2;
+ float t1547 = e3z2 * e2y2;
+ float t1548 = e3y2 * e2z2;
+ float t1549 = e2y2 * e1x2;
+ float t1550 = e2x2 * e1y2;
+ float t1551 = e2z2 * e1x2;
+ float t1552 = e2x2 * e1z2;
+ float t1553 = e2z2 * e1y2;
+ float t1554 = e2y2 * e1z2;
+ float t1555 = e3y2 * e1x2;
+ float t1556 = e3x2 * e1y2;
+ float t1557 = e3z2 * e1x2;
+ float t1558 = e3x2 * e1z2;
+ float t1559 = e3z2 * e1y2;
+ float t1560 = e3y2 * e1z2;
+ float t1561 = e3z2 * e2y2 * e1x2;
+ float t1562 = e3y2 * e2z2 * e1x2;
+ float t1563 = e3z2 * e2x2 * e1y2;
+ float t1564 = e3x2 * e2z2 * e1y2;
+ float t1565 = e3y2 * e2x2 * e1z2;
+ float t1566 = e3x2 * e2y2 * e1z2;
+ float t1567 = e1xy * e2x * e3y * 2.0;
+ float t1568 = e1xy * e2y * e3x * 2.0;
+ float t1569 = e1xz * e2x * e3z * 2.0;
+ float t1570 = e1xz * e2z * e3x * 2.0;
+ float t1571 = e1yz * e2y * e3z * 2.0;
+ float t1572 = e1yz * e2z * e3y * 2.0;
+ float t1573 = e1x * e2xy * e3y * 2.0;
+ float t1574 = e1y * e2xy * e3x * 2.0;
+ float t1575 = e1x * e2xz * e3z * 2.0;
+ float t1576 = e1z * e2xz * e3x * 2.0;
+ float t1577 = e1y * e2yz * e3z * 2.0;
+ float t1578 = e1z * e2yz * e3y * 2.0;
+ float t1579 = e1x * e2y * e3xy * 2.0;
+ float t1580 = e1y * e2x * e3xy * 2.0;
+ float t1581 = e1x * e2z * e3xz * 2.0;
+ float t1582 = e1z * e2x * e3xz * 2.0;
+ float t1583 = e1y * e2z * e3yz * 2.0;
+ float t1584 = e1z * e2y * e3yz * 2.0;
+ float t1585 = e1xy * e2xz * e3yz * 2.0;
+ float t1586 = e1xy * e2yz * e3xz * 2.0;
+ float t1587 = e1xz * e2xy * e3yz * 2.0;
+ float t1588 = e1xz * e2yz * e3xy * 2.0;
+ float t1589 = e1yz * e2xy * e3xz * 2.0;
+ float t1590 = e1yz * e2xz * e3xy * 2.0;
+ float t1596 = e12x * e3y2 * 2.0;
+ float t1597 = e13x * e2y2 * 2.0;
+ float t1598 = e23x * e1y2 * 2.0;
+ float t1599 = e12x * e3z2 * 2.0;
+ float t1600 = e13x * e2z2 * 2.0;
+ float t1601 = e12y * e3x2 * 2.0;
+ float t1602 = e13y * e2x2 * 2.0;
+ float t1603 = e23y * e1x2 * 2.0;
+ float t1604 = e23x * e1z2 * 2.0;
+ float t1605 = e12y * e3z2 * 2.0;
+ float t1606 = e13y * e2z2 * 2.0;
+ float t1607 = e12z * e3x2 * 2.0;
+ float t1608 = e13z * e2x2 * 2.0;
+ float t1609 = e23z * e1x2 * 2.0;
+ float t1610 = e23y * e1z2 * 2.0;
+ float t1611 = e12z * e3y2 * 2.0;
+ float t1612 = e13z * e2y2 * 2.0;
+ float t1613 = e23z * e1y2 * 2.0;
+ float t1614 = e1xy * e2xy * 2.0;
+ float t1615 = e1xz * e2xz * 2.0;
+ float t1616 = e1yz * e2yz * 2.0;
+ float t1617 = e1xy * e3xy * 2.0;
+ float t1618 = e1xz * e3xz * 2.0;
+ float t1619 = e1yz * e3yz * 2.0;
+ float t1620 = e2xy * e3xy * 2.0;
+ float t1621 = e2xz * e3xz * 2.0;
+ float t1622 = e2yz * e3yz * 2.0;
+ float t1623 = e1xy * e2xy * e3z2 * 2.0;
+ float t1624 = e1xz * e2xz * e3y2 * 2.0;
+ float t1625 = e1yz * e2yz * e3x2 * 2.0;
+ float t1626 = e1xy * e3xy * e2z2 * 2.0;
+ float t1627 = e1xz * e3xz * e2y2 * 2.0;
+ float t1628 = e1yz * e3yz * e2x2 * 2.0;
+ float t1629 = e2xy * e3xy * e1z2 * 2.0;
+ float t1630 = e2xz * e3xz * e1y2 * 2.0;
+ float t1631 = e2yz * e3yz * e1x2 * 2.0;
+ float t1591 = t1550 + t1551 + t1560 + t1543 + t1552 + t1561 + t1570 + t1544 + t1553 + t1562 +
+ t1571 + t1580 + t1545 + t1554 + t1563 + t1572 + t1581 + t1590 + t1546 + t1555 +
+ t1564 + t1573 + t1582 + t1547 + t1556 + t1565 + t1574 + t1583 + t1548 + t1557 +
+ t1566 + t1575 + t1584 + t1549 + t1558 + t1567 + t1576 + t1585 + t1559 + t1568 +
+ t1577 + t1586 + t1569 + t1578 + t1587 - t1596 + t1579 + t1588 - t1597 + t1589 -
+ t1598 - t1599 - t1600 - t1601 - t1610 - t1602 - t1611 - t1620 - t1603 - t1612 -
+ t1621 - t1630 - t1604 - t1613 - t1622 - t1631 - t1605 - t1614 - t1623 - t1606 -
+ t1615 - t1624 - t1607 - t1616 - t1625 - t1608 - t1617 - t1626 - t1609 - t1618 -
+ t1627 - t1619 - t1628 - t1629;
+ float t1592 = 1.0 / t1591;
+ float t1635 = e13x * e2y2;
+ float t1636 = e13x * e2z2;
+ float t1637 = e13y * e2x2;
+ float t1638 = e13y * e2z2;
+ float t1639 = e13z * e2x2;
+ float t1640 = e13z * e2y2;
+ float t1653 = e23x * 2.0;
+ float t1654 = e23y * 2.0;
+ float t1655 = e23z * 2.0;
+ float t1641 = e3x2 + e3z2 + e3y2 + e2y2 + t1543 + e2z2 + t1544 + e2x2 + t1545 + t1546 + t1547 +
+ t1548 - t1620 - t1621 - t1622 - t1653 - t1654 - t1655;
+ float t1642 = e12x * e3y2;
+ float t1643 = e12x * e3z2;
+ float t1644 = e12y * e3x2;
+ float t1645 = e12y * e3z2;
+ float t1646 = e12z * e3x2;
+ float t1647 = e12z * e3y2;
+ float t1656 = e1x * e2y * e3xy;
+ float t1657 = e1y * e2x * e3xy;
+ float t1658 = e1x * e2z * e3xz;
+ float t1659 = e1z * e2x * e3xz;
+ float t1660 = e1y * e2z * e3yz;
+ float t1661 = e1z * e2y * e3yz;
+ float t1648 = e3x2 + e3z2 + e3y2 - e13x - e13y - e13z + e12x - e23y + e12y + t1642 - e23z -
+ t1660 + e12z + t1643 - t1661 + t1644 + t1645 + t1646 + t1647 - t1656 - t1657 -
+ e23x - t1658 - t1659;
+ float t1679 = e1x * e2xy * e3y;
+ float t1680 = e1y * e2xy * e3x;
+ float t1681 = e1x * e2xz * e3z;
+ float t1682 = e1z * e2xz * e3x;
+ float t1683 = e1y * e2yz * e3z;
+ float t1684 = e1z * e2yz * e3y;
+ float t1652 = e2y2 + e2z2 + e2x2 + e13x + e13y + e13z + t1640 - e12x - e23y - e12y - e23z -
+ e12z + t1635 - t1680 + t1636 - t1681 + t1637 - t1682 + t1638 - t1683 + t1639 -
+ t1684 - e23x - t1679;
+ float t1662 = e23x * e1y2;
+ float t1663 = e23y * e1x2;
+ float t1664 = e23x * e1z2;
+ float t1665 = e23z * e1x2;
+ float t1666 = e23y * e1z2;
+ float t1667 = e23z * e1y2;
+ float t1670 = e1xy * e2x * e3y;
+ float t1671 = e1xy * e2y * e3x;
+ float t1672 = e1xz * e2x * e3z;
+ float t1673 = e1xz * e2z * e3x;
+ float t1674 = e1yz * e2y * e3z;
+ float t1675 = e1yz * e2z * e3y;
+ float t1668 = e1x2 + e1y2 + e1z2 - e13x - e13y - e13z - e12x + e23y - e12y + e23z - e12z -
+ t1670 + t1662 - t1671 + t1663 - t1672 + t1664 - t1673 + t1665 - t1674 + t1666 -
+ t1675 + e23x + t1667;
+ float t1676 = e13x * 2.0;
+ float t1677 = e13y * 2.0;
+ float t1678 = e13z * 2.0;
+ float t1669 = e3x2 + e3z2 + e3y2 + t1560 + e1x2 + t1555 + e1y2 + t1556 + e1z2 + t1557 + t1558 +
+ t1559 - t1617 - t1618 - t1619 - t1676 - t1677 - t1678;
+ float t1686 = e12x * 2.0;
+ float t1687 = e12y * 2.0;
+ float t1688 = e12z * 2.0;
+ float t1685 = t1550 + t1551 + e2y2 + t1552 + e2z2 + t1553 + e2x2 + t1554 + e1x2 + e1y2 + e1z2 +
+ t1549 - t1614 - t1615 - t1616 - t1686 - t1687 - t1688;
+ x1 = -g2 * (-e1y * t1592 * t1641 + e2y * t1592 * t1648 + e3y * t1592 * t1652) -
+ g3 * (-e1z * t1592 * t1641 + e2z * t1592 * t1648 + e3z * t1592 * t1652) -
+ g1 * (-e1x * t1592 * t1641 + e2x * t1592 * t1648 +
+ e3x * t1592 *
+ (e2y2 + e2z2 + e2x2 + e13x + e13y + e13z + t1640 + t1635 + t1636 + t1637 + t1638 +
+ t1639 - e12x - e12y - e12z - e23x - e23y - e23z - e1x * e2xy * e3y -
+ e1y * e2xy * e3x - e1x * e2xz * e3z - e1z * e2xz * e3x - e1y * e2yz * e3z -
+ e1z * e2yz * e3y));
+ x2 = -g1 * (e1x * t1592 * t1648 - e2x * t1592 * t1669 + e3x * t1592 * t1668) -
+ g2 * (e1y * t1592 * t1648 - e2y * t1592 * t1669 + e3y * t1592 * t1668) -
+ g3 * (e1z * t1592 * t1648 - e2z * t1592 * t1669 + e3z * t1592 * t1668);
+ x3 = -g1 * (e1x * t1592 * t1652 + e2x * t1592 * t1668 - e3x * t1592 * t1685) -
+ g2 * (e1y * t1592 * t1652 + e2y * t1592 * t1668 - e3y * t1592 * t1685) -
+ g3 * (e1z * t1592 * t1652 + e2z * t1592 * t1668 - e3z * t1592 * t1685);
+}
+
+#endif \ No newline at end of file
diff --git a/extern/mantaflow/helper/util/vector4d.cpp b/extern/mantaflow/helper/util/vector4d.cpp
new file mode 100644
index 00000000000..d342df607f5
--- /dev/null
+++ b/extern/mantaflow/helper/util/vector4d.cpp
@@ -0,0 +1,50 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Basic vector class
+ *
+ ******************************************************************************/
+
+#include "vector4d.h"
+
+using namespace std;
+
+namespace Manta {
+
+template<> const Vector4D<int> Vector4D<int>::Zero(0, 0, 0, 0);
+template<> const Vector4D<float> Vector4D<float>::Zero(0.f, 0.f, 0.f, 0.f);
+template<> const Vector4D<double> Vector4D<double>::Zero(0., 0., 0., 0.);
+template<>
+const Vector4D<float> Vector4D<float>::Invalid(numeric_limits<float>::quiet_NaN(),
+ numeric_limits<float>::quiet_NaN(),
+ numeric_limits<float>::quiet_NaN(),
+ numeric_limits<float>::quiet_NaN());
+template<>
+const Vector4D<double> Vector4D<double>::Invalid(numeric_limits<double>::quiet_NaN(),
+ numeric_limits<double>::quiet_NaN(),
+ numeric_limits<double>::quiet_NaN(),
+ numeric_limits<double>::quiet_NaN());
+template<> bool Vector4D<float>::isValid() const
+{
+ return !c_isnan(x) && !c_isnan(y) && !c_isnan(z) && !c_isnan(t);
+}
+template<> bool Vector4D<double>::isValid() const
+{
+ return !c_isnan(x) && !c_isnan(y) && !c_isnan(z) && !c_isnan(t);
+}
+
+//! Specialization for readable ints
+template<> std::string Vector4D<int>::toString() const
+{
+ char buf[256];
+ snprintf(buf, 256, "[%d,%d,%d,%d]", (*this)[0], (*this)[1], (*this)[2], (*this)[3]);
+ return std::string(buf);
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/helper/util/vector4d.h b/extern/mantaflow/helper/util/vector4d.h
new file mode 100644
index 00000000000..c3d72ac8aff
--- /dev/null
+++ b/extern/mantaflow/helper/util/vector4d.h
@@ -0,0 +1,515 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * 4D vector class
+ *
+ ******************************************************************************/
+
+#ifndef _VECTOR4D_H
+#define _VECTOR4D_H
+
+#include "vectorbase.h"
+
+namespace Manta {
+
+//! Basic inlined vector class
+template<class S> class Vector4D {
+ public:
+ //! Constructor
+ inline Vector4D() : x(0), y(0), z(0), t(0)
+ {
+ }
+
+ //! Copy-Constructor
+ inline Vector4D(const Vector4D<S> &v) : x(v.x), y(v.y), z(v.z), t(v.t)
+ {
+ }
+
+ //! Copy-Constructor
+ inline Vector4D(const float *v) : x((S)v[0]), y((S)v[1]), z((S)v[2]), t((S)v[3])
+ {
+ }
+
+ //! Copy-Constructor
+ inline Vector4D(const double *v) : x((S)v[0]), y((S)v[1]), z((S)v[2]), t((S)v[3])
+ {
+ }
+
+ //! Construct a vector from one S
+ inline Vector4D(S v) : x(v), y(v), z(v), t(v)
+ {
+ }
+
+ //! Construct a vector from three Ss
+ inline Vector4D(S vx, S vy, S vz, S vw) : x(vx), y(vy), z(vz), t(vw)
+ {
+ }
+
+ // Operators
+
+ //! Assignment operator
+ inline const Vector4D<S> &operator=(const Vector4D<S> &v)
+ {
+ x = v.x;
+ y = v.y;
+ z = v.z;
+ t = v.t;
+ return *this;
+ }
+ //! Assignment operator
+ inline const Vector4D<S> &operator=(S s)
+ {
+ x = y = z = t = s;
+ return *this;
+ }
+ //! Assign and add operator
+ inline const Vector4D<S> &operator+=(const Vector4D<S> &v)
+ {
+ x += v.x;
+ y += v.y;
+ z += v.z;
+ t += v.t;
+ return *this;
+ }
+ //! Assign and add operator
+ inline const Vector4D<S> &operator+=(S s)
+ {
+ x += s;
+ y += s;
+ z += s;
+ t += s;
+ return *this;
+ }
+ //! Assign and sub operator
+ inline const Vector4D<S> &operator-=(const Vector4D<S> &v)
+ {
+ x -= v.x;
+ y -= v.y;
+ z -= v.z;
+ t -= v.t;
+ return *this;
+ }
+ //! Assign and sub operator
+ inline const Vector4D<S> &operator-=(S s)
+ {
+ x -= s;
+ y -= s;
+ z -= s;
+ t -= s;
+ return *this;
+ }
+ //! Assign and mult operator
+ inline const Vector4D<S> &operator*=(const Vector4D<S> &v)
+ {
+ x *= v.x;
+ y *= v.y;
+ z *= v.z;
+ t *= v.t;
+ return *this;
+ }
+ //! Assign and mult operator
+ inline const Vector4D<S> &operator*=(S s)
+ {
+ x *= s;
+ y *= s;
+ z *= s;
+ t *= s;
+ return *this;
+ }
+ //! Assign and div operator
+ inline const Vector4D<S> &operator/=(const Vector4D<S> &v)
+ {
+ x /= v.x;
+ y /= v.y;
+ z /= v.z;
+ t /= v.t;
+ return *this;
+ }
+ //! Assign and div operator
+ inline const Vector4D<S> &operator/=(S s)
+ {
+ x /= s;
+ y /= s;
+ z /= s;
+ t /= s;
+ return *this;
+ }
+ //! Negation operator
+ inline Vector4D<S> operator-() const
+ {
+ return Vector4D<S>(-x, -y, -z, -t);
+ }
+
+ //! Get smallest component
+ // inline S min() const { return ( x<y ) ? ( ( x<z ) ? x:z ) : ( ( y<z ) ? y:z ); }
+ //! Get biggest component
+ // inline S max() const { return ( x>y ) ? ( ( x>z ) ? x:z ) : ( ( y>z ) ? y:z ); }
+
+ //! Test if all components are zero
+ inline bool empty()
+ {
+ return x == 0 && y == 0 && z == 0 && t == 0;
+ }
+
+ //! access operator
+ inline S &operator[](unsigned int i)
+ {
+ return value[i];
+ }
+ //! constant access operator
+ inline const S &operator[](unsigned int i) const
+ {
+ return value[i];
+ }
+
+ //! debug output vector to a string
+ std::string toString() const;
+
+ //! test if nans are present
+ bool isValid() const;
+
+ //! actual values
+ union {
+ S value[4];
+ struct {
+ S x;
+ S y;
+ S z;
+ S t;
+ };
+ struct {
+ S X;
+ S Y;
+ S Z;
+ S T;
+ };
+ };
+
+ // zero element
+ static const Vector4D<S> Zero, Invalid;
+
+ protected:
+};
+
+//************************************************************************
+// Additional operators
+//************************************************************************
+
+//! Addition operator
+template<class S> inline Vector4D<S> operator+(const Vector4D<S> &v1, const Vector4D<S> &v2)
+{
+ return Vector4D<S>(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z, v1.t + v2.t);
+}
+//! Addition operator
+template<class S, class S2> inline Vector4D<S> operator+(const Vector4D<S> &v, S2 s)
+{
+ return Vector4D<S>(v.x + s, v.y + s, v.z + s, v.t + s);
+}
+//! Addition operator
+template<class S, class S2> inline Vector4D<S> operator+(S2 s, const Vector4D<S> &v)
+{
+ return Vector4D<S>(v.x + s, v.y + s, v.z + s, v.t + s);
+}
+
+//! Subtraction operator
+template<class S> inline Vector4D<S> operator-(const Vector4D<S> &v1, const Vector4D<S> &v2)
+{
+ return Vector4D<S>(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z, v1.t - v2.t);
+}
+//! Subtraction operator
+template<class S, class S2> inline Vector4D<S> operator-(const Vector4D<S> &v, S2 s)
+{
+ return Vector4D<S>(v.x - s, v.y - s, v.z - s, v.t - s);
+}
+//! Subtraction operator
+template<class S, class S2> inline Vector4D<S> operator-(S2 s, const Vector4D<S> &v)
+{
+ return Vector4D<S>(s - v.x, s - v.y, s - v.z, s - v.t);
+}
+
+//! Multiplication operator
+template<class S> inline Vector4D<S> operator*(const Vector4D<S> &v1, const Vector4D<S> &v2)
+{
+ return Vector4D<S>(v1.x * v2.x, v1.y * v2.y, v1.z * v2.z, v1.t * v2.t);
+}
+//! Multiplication operator
+template<class S, class S2> inline Vector4D<S> operator*(const Vector4D<S> &v, S2 s)
+{
+ return Vector4D<S>(v.x * s, v.y * s, v.z * s, v.t * s);
+}
+//! Multiplication operator
+template<class S, class S2> inline Vector4D<S> operator*(S2 s, const Vector4D<S> &v)
+{
+ return Vector4D<S>(s * v.x, s * v.y, s * v.z, s * v.t);
+}
+
+//! Division operator
+template<class S> inline Vector4D<S> operator/(const Vector4D<S> &v1, const Vector4D<S> &v2)
+{
+ return Vector4D<S>(v1.x / v2.x, v1.y / v2.y, v1.z / v2.z, v1.t / v2.t);
+}
+//! Division operator
+template<class S, class S2> inline Vector4D<S> operator/(const Vector4D<S> &v, S2 s)
+{
+ return Vector4D<S>(v.x / s, v.y / s, v.z / s, v.t / s);
+}
+//! Division operator
+template<class S, class S2> inline Vector4D<S> operator/(S2 s, const Vector4D<S> &v)
+{
+ return Vector4D<S>(s / v.x, s / v.y, s / v.z, s / v.t);
+}
+
+//! Comparison operator
+template<class S> inline bool operator==(const Vector4D<S> &s1, const Vector4D<S> &s2)
+{
+ return s1.x == s2.x && s1.y == s2.y && s1.z == s2.z && s1.t == s2.t;
+}
+
+//! Comparison operator
+template<class S> inline bool operator!=(const Vector4D<S> &s1, const Vector4D<S> &s2)
+{
+ return s1.x != s2.x || s1.y != s2.y || s1.z != s2.z || s1.t != s2.t;
+}
+
+//************************************************************************
+// External functions
+//************************************************************************
+
+//! Dot product
+template<class S> inline S dot(const Vector4D<S> &t, const Vector4D<S> &v)
+{
+ return t.x * v.x + t.y * v.y + t.z * v.z + t.t * v.t;
+}
+
+//! Cross product
+/*template<class S>
+inline Vector4D<S> cross ( const Vector4D<S> &t, const Vector4D<S> &v ) {
+ NYI Vector4D<S> cp (
+ ( ( t.y*v.z ) - ( t.z*v.y ) ),
+ ( ( t.z*v.x ) - ( t.x*v.z ) ),
+ ( ( t.x*v.y ) - ( t.y*v.x ) ) );
+ return cp;
+}*/
+
+//! Compute the magnitude (length) of the vector
+template<class S> inline S norm(const Vector4D<S> &v)
+{
+ S l = v.x * v.x + v.y * v.y + v.z * v.z + v.t * v.t;
+ return (fabs(l - 1.) < VECTOR_EPSILON * VECTOR_EPSILON) ? 1. : sqrt(l);
+}
+
+//! Compute squared magnitude
+template<class S> inline S normSquare(const Vector4D<S> &v)
+{
+ return v.x * v.x + v.y * v.y + v.z * v.z + v.t * v.t;
+}
+
+//! Returns a normalized vector
+template<class S> inline Vector4D<S> getNormalized(const Vector4D<S> &v)
+{
+ S l = v.x * v.x + v.y * v.y + v.z * v.z + v.t * v.t;
+ if (fabs(l - 1.) < VECTOR_EPSILON * VECTOR_EPSILON)
+ return v; /* normalized "enough"... */
+ else if (l > VECTOR_EPSILON * VECTOR_EPSILON) {
+ S fac = 1. / sqrt(l);
+ return Vector4D<S>(v.x * fac, v.y * fac, v.z * fac, v.t * fac);
+ }
+ else
+ return Vector4D<S>((S)0);
+}
+
+//! Compute the norm of the vector and normalize it.
+/*! \return The value of the norm */
+template<class S> inline S normalize(Vector4D<S> &v)
+{
+ S norm;
+ S l = v.x * v.x + v.y * v.y + v.z * v.z + v.t * v.t;
+ if (fabs(l - 1.) < VECTOR_EPSILON * VECTOR_EPSILON) {
+ norm = 1.;
+ }
+ else if (l > VECTOR_EPSILON * VECTOR_EPSILON) {
+ norm = sqrt(l);
+ v *= 1. / norm;
+ }
+ else {
+ v = Vector4D<S>::Zero;
+ norm = 0.;
+ }
+ return (S)norm;
+}
+
+//! Outputs the object in human readable form as string
+template<class S> std::string Vector4D<S>::toString() const
+{
+ char buf[256];
+ snprintf(buf,
+ 256,
+ "[%+4.6f,%+4.6f,%+4.6f,%+4.6f]",
+ (double)(*this)[0],
+ (double)(*this)[1],
+ (double)(*this)[2],
+ (double)(*this)[3]);
+ // for debugging, optionally increase precision:
+ // snprintf ( buf,256,"[%+4.16f,%+4.16f,%+4.16f,%+4.16f]", ( double ) ( *this ) [0], ( double ) (
+ // *this ) [1], ( double ) ( *this ) [2], ( double ) ( *this ) [3] );
+ return std::string(buf);
+}
+
+template<> std::string Vector4D<int>::toString() const;
+
+//! Outputs the object in human readable form to stream
+template<class S> std::ostream &operator<<(std::ostream &os, const Vector4D<S> &i)
+{
+ os << i.toString();
+ return os;
+}
+
+//! Reads the contents of the object from a stream
+template<class S> std::istream &operator>>(std::istream &is, Vector4D<S> &i)
+{
+ char c;
+ char dummy[4];
+ is >> c >> i[0] >> dummy >> i[1] >> dummy >> i[2] >> dummy >> i[3] >> c;
+ return is;
+}
+
+/**************************************************************************/
+// Define default vector alias
+/**************************************************************************/
+
+//! 3D vector class of type Real (typically float)
+typedef Vector4D<Real> Vec4;
+
+//! 3D vector class of type int
+typedef Vector4D<int> Vec4i;
+
+//! convert to Real Vector
+template<class T> inline Vec4 toVec4(T v)
+{
+ return Vec4(v[0], v[1], v[2], v[3]);
+}
+template<class T> inline Vec4i toVec4i(T v)
+{
+ return Vec4i(v[0], v[1], v[2], v[3]);
+}
+
+/**************************************************************************/
+// Specializations for common math functions
+/**************************************************************************/
+
+template<> inline Vec4 clamp<Vec4>(const Vec4 &a, const Vec4 &b, const Vec4 &c)
+{
+ return Vec4(
+ clamp(a.x, b.x, c.x), clamp(a.y, b.y, c.y), clamp(a.z, b.z, c.z), clamp(a.t, b.t, c.t));
+}
+template<> inline Vec4 safeDivide<Vec4>(const Vec4 &a, const Vec4 &b)
+{
+ return Vec4(
+ safeDivide(a.x, b.x), safeDivide(a.y, b.y), safeDivide(a.z, b.z), safeDivide(a.t, b.t));
+}
+template<> inline Vec4 nmod<Vec4>(const Vec4 &a, const Vec4 &b)
+{
+ return Vec4(nmod(a.x, b.x), nmod(a.y, b.y), nmod(a.z, b.z), nmod(a.t, b.t));
+}
+
+/**************************************************************************/
+// 4d interpolation (note only 4d here, 2d/3d interpolations are in interpol.h)
+/**************************************************************************/
+
+#define BUILD_INDEX_4D \
+ Real px = pos.x - 0.5f, py = pos.y - 0.5f, pz = pos.z - 0.5f, pt = pos.t - 0.5f; \
+ int xi = (int)px; \
+ int yi = (int)py; \
+ int zi = (int)pz; \
+ int ti = (int)pt; \
+ Real s1 = px - (Real)xi, s0 = 1. - s1; \
+ Real t1 = py - (Real)yi, t0 = 1. - t1; \
+ Real f1 = pz - (Real)zi, f0 = 1. - f1; \
+ Real g1 = pt - (Real)ti, g0 = 1. - g1; \
+ /* clamp to border */ \
+ if (px < 0.) { \
+ xi = 0; \
+ s0 = 1.0; \
+ s1 = 0.0; \
+ } \
+ if (py < 0.) { \
+ yi = 0; \
+ t0 = 1.0; \
+ t1 = 0.0; \
+ } \
+ if (pz < 0.) { \
+ zi = 0; \
+ f0 = 1.0; \
+ f1 = 0.0; \
+ } \
+ if (pt < 0.) { \
+ ti = 0; \
+ g0 = 1.0; \
+ g1 = 0.0; \
+ } \
+ if (xi >= size.x - 1) { \
+ xi = size.x - 2; \
+ s0 = 0.0; \
+ s1 = 1.0; \
+ } \
+ if (yi >= size.y - 1) { \
+ yi = size.y - 2; \
+ t0 = 0.0; \
+ t1 = 1.0; \
+ } \
+ if (zi >= size.z - 1) { \
+ zi = size.z - 2; \
+ f0 = 0.0; \
+ f1 = 1.0; \
+ } \
+ if (ti >= size.t - 1) { \
+ ti = size.t - 2; \
+ g0 = 0.0; \
+ g1 = 1.0; \
+ } \
+ const int sX = 1; \
+ const int sY = size.x;
+
+static inline void checkIndexInterpol4d(const Vec4i &size, int idx)
+{
+ if (idx < 0 || idx > size.x * size.y * size.z * size.t) {
+ std::ostringstream s;
+ s << "Grid interpol4d dim " << size << " : index " << idx << " out of bound ";
+ errMsg(s.str());
+ }
+}
+
+template<class T>
+inline T interpol4d(
+ const T *data, const Vec4i &size, const IndexInt sZ, const IndexInt sT, const Vec4 &pos)
+{
+ BUILD_INDEX_4D
+ IndexInt idx = (IndexInt)xi + sY * (IndexInt)yi + sZ * (IndexInt)zi + sT * (IndexInt)ti;
+ DEBUG_ONLY(checkIndexInterpol4d(size, idx));
+ DEBUG_ONLY(checkIndexInterpol4d(size, idx + sX + sY + sZ + sT));
+
+ return (((data[idx] * t0 + data[idx + sY] * t1) * s0 +
+ (data[idx + sX] * t0 + data[idx + sX + sY] * t1) * s1) *
+ f0 +
+ ((data[idx + sZ] * t0 + data[idx + sY + sZ] * t1) * s0 +
+ (data[idx + sX + sZ] * t0 + data[idx + sX + sY + sZ] * t1) * s1) *
+ f1) *
+ g0 +
+ (((data[idx + sT] * t0 + data[idx + sT + sY] * t1) * s0 +
+ (data[idx + sT + sX] * t0 + data[idx + sT + sX + sY] * t1) * s1) *
+ f0 +
+ ((data[idx + sT + sZ] * t0 + data[idx + sT + sY + sZ] * t1) * s0 +
+ (data[idx + sT + sX + sZ] * t0 + data[idx + sT + sX + sY + sZ] * t1) * s1) *
+ f1) *
+ g1;
+}
+
+}; // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/helper/util/vectorbase.cpp b/extern/mantaflow/helper/util/vectorbase.cpp
new file mode 100644
index 00000000000..413ae086d1c
--- /dev/null
+++ b/extern/mantaflow/helper/util/vectorbase.cpp
@@ -0,0 +1,49 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Basic vector class
+ *
+ ******************************************************************************/
+
+#include "vectorbase.h"
+
+using namespace std;
+
+namespace Manta {
+
+template<> const Vector3D<int> Vector3D<int>::Zero(0, 0, 0);
+template<> const Vector3D<float> Vector3D<float>::Zero(0.f, 0.f, 0.f);
+template<> const Vector3D<double> Vector3D<double>::Zero(0., 0., 0.);
+template<>
+const Vector3D<float> Vector3D<float>::Invalid(numeric_limits<float>::quiet_NaN(),
+ numeric_limits<float>::quiet_NaN(),
+ numeric_limits<float>::quiet_NaN());
+template<>
+const Vector3D<double> Vector3D<double>::Invalid(numeric_limits<double>::quiet_NaN(),
+ numeric_limits<double>::quiet_NaN(),
+ numeric_limits<double>::quiet_NaN());
+
+template<> bool Vector3D<float>::isValid() const
+{
+ return !c_isnan(x) && !c_isnan(y) && !c_isnan(z);
+}
+template<> bool Vector3D<double>::isValid() const
+{
+ return !c_isnan(x) && !c_isnan(y) && !c_isnan(z);
+}
+
+//! Specialization for readable ints
+template<> std::string Vector3D<int>::toString() const
+{
+ char buf[256];
+ snprintf(buf, 256, "[%d,%d,%d]", (*this)[0], (*this)[1], (*this)[2]);
+ return std::string(buf);
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/helper/util/vectorbase.h b/extern/mantaflow/helper/util/vectorbase.h
new file mode 100644
index 00000000000..a3135431eb3
--- /dev/null
+++ b/extern/mantaflow/helper/util/vectorbase.h
@@ -0,0 +1,679 @@
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2016 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Basic vector class
+ *
+ ******************************************************************************/
+
+#ifndef _VECTORBASE_H
+#define _VECTORBASE_H
+
+// get rid of windos min/max defines
+#if defined(WIN32) || defined(_WIN32)
+# define NOMINMAX
+#endif
+
+#include <stdio.h>
+#include <string>
+#include <limits>
+#include <iostream>
+#include "general.h"
+
+// if min/max are still around...
+#if defined(WIN32) || defined(_WIN32)
+# undef min
+# undef max
+#endif
+
+// redefine usage of some windows functions
+#if defined(WIN32) || defined(_WIN32)
+# ifndef snprintf
+# define snprintf _snprintf
+# endif
+#endif
+
+// use which fp-precision? 1=float, 2=double
+#ifndef FLOATINGPOINT_PRECISION
+# define FLOATINGPOINT_PRECISION 1
+#endif
+
+// VECTOR_EPSILON is the minimal vector length
+// In order to be able to discriminate floating point values near zero, and
+// to be sure not to fail a comparison because of roundoff errors, use this
+// value as a threshold.
+#if FLOATINGPOINT_PRECISION == 1
+typedef float Real;
+# define VECTOR_EPSILON (1e-6f)
+#else
+typedef double Real;
+# define VECTOR_EPSILON (1e-10)
+#endif
+
+#ifndef M_PI
+# define M_PI 3.1415926536
+#endif
+#ifndef M_E
+# define M_E 2.7182818284
+#endif
+
+namespace Manta {
+
+//! Basic inlined vector class
+template<class S> class Vector3D {
+ public:
+ //! Constructor
+ inline Vector3D() : x(0), y(0), z(0)
+ {
+ }
+
+ //! Copy-Constructor
+ inline Vector3D(const Vector3D<S> &v) : x(v.x), y(v.y), z(v.z)
+ {
+ }
+
+ //! Copy-Constructor
+ inline Vector3D(const int *v) : x((S)v[0]), y((S)v[1]), z((S)v[2])
+ {
+ }
+
+ //! Copy-Constructor
+ inline Vector3D(const float *v) : x((S)v[0]), y((S)v[1]), z((S)v[2])
+ {
+ }
+
+ //! Copy-Constructor
+ inline Vector3D(const double *v) : x((S)v[0]), y((S)v[1]), z((S)v[2])
+ {
+ }
+
+ //! Construct a vector from one S
+ inline Vector3D(S v) : x(v), y(v), z(v)
+ {
+ }
+
+ //! Construct a vector from three Ss
+ inline Vector3D(S vx, S vy, S vz) : x(vx), y(vy), z(vz)
+ {
+ }
+
+ // Operators
+
+ //! Assignment operator
+ inline const Vector3D<S> &operator=(const Vector3D<S> &v)
+ {
+ x = v.x;
+ y = v.y;
+ z = v.z;
+ return *this;
+ }
+ //! Assignment operator
+ inline const Vector3D<S> &operator=(S s)
+ {
+ x = y = z = s;
+ return *this;
+ }
+ //! Assign and add operator
+ inline const Vector3D<S> &operator+=(const Vector3D<S> &v)
+ {
+ x += v.x;
+ y += v.y;
+ z += v.z;
+ return *this;
+ }
+ //! Assign and add operator
+ inline const Vector3D<S> &operator+=(S s)
+ {
+ x += s;
+ y += s;
+ z += s;
+ return *this;
+ }
+ //! Assign and sub operator
+ inline const Vector3D<S> &operator-=(const Vector3D<S> &v)
+ {
+ x -= v.x;
+ y -= v.y;
+ z -= v.z;
+ return *this;
+ }
+ //! Assign and sub operator
+ inline const Vector3D<S> &operator-=(S s)
+ {
+ x -= s;
+ y -= s;
+ z -= s;
+ return *this;
+ }
+ //! Assign and mult operator
+ inline const Vector3D<S> &operator*=(const Vector3D<S> &v)
+ {
+ x *= v.x;
+ y *= v.y;
+ z *= v.z;
+ return *this;
+ }
+ //! Assign and mult operator
+ inline const Vector3D<S> &operator*=(S s)
+ {
+ x *= s;
+ y *= s;
+ z *= s;
+ return *this;
+ }
+ //! Assign and div operator
+ inline const Vector3D<S> &operator/=(const Vector3D<S> &v)
+ {
+ x /= v.x;
+ y /= v.y;
+ z /= v.z;
+ return *this;
+ }
+ //! Assign and div operator
+ inline const Vector3D<S> &operator/=(S s)
+ {
+ x /= s;
+ y /= s;
+ z /= s;
+ return *this;
+ }
+ //! Negation operator
+ inline Vector3D<S> operator-() const
+ {
+ return Vector3D<S>(-x, -y, -z);
+ }
+
+ //! Get smallest component
+ inline S min() const
+ {
+ return (x < y) ? ((x < z) ? x : z) : ((y < z) ? y : z);
+ }
+ //! Get biggest component
+ inline S max() const
+ {
+ return (x > y) ? ((x > z) ? x : z) : ((y > z) ? y : z);
+ }
+
+ //! Test if all components are zero
+ inline bool empty()
+ {
+ return x == 0 && y == 0 && z == 0;
+ }
+
+ //! access operator
+ inline S &operator[](unsigned int i)
+ {
+ return value[i];
+ }
+ //! constant access operator
+ inline const S &operator[](unsigned int i) const
+ {
+ return value[i];
+ }
+
+ //! debug output vector to a string
+ std::string toString() const;
+
+ //! test if nans are present
+ bool isValid() const;
+
+ //! actual values
+ union {
+ S value[3];
+ struct {
+ S x;
+ S y;
+ S z;
+ };
+ struct {
+ S X;
+ S Y;
+ S Z;
+ };
+ };
+
+ //! zero element
+ static const Vector3D<S> Zero, Invalid;
+
+ //! For compatibility with 4d vectors (discards 4th comp)
+ inline Vector3D(S vx, S vy, S vz, S vDummy) : x(vx), y(vy), z(vz)
+ {
+ }
+
+ protected:
+};
+
+//! helper to check whether float/double value is non-zero
+inline bool notZero(Real f)
+{
+ if (std::abs(f) > VECTOR_EPSILON)
+ return true;
+ return false;
+}
+
+//************************************************************************
+// Additional operators
+//************************************************************************
+
+//! Addition operator
+template<class S> inline Vector3D<S> operator+(const Vector3D<S> &v1, const Vector3D<S> &v2)
+{
+ return Vector3D<S>(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z);
+}
+//! Addition operator
+template<class S, class S2> inline Vector3D<S> operator+(const Vector3D<S> &v, S2 s)
+{
+ return Vector3D<S>(v.x + s, v.y + s, v.z + s);
+}
+//! Addition operator
+template<class S, class S2> inline Vector3D<S> operator+(S2 s, const Vector3D<S> &v)
+{
+ return Vector3D<S>(v.x + s, v.y + s, v.z + s);
+}
+
+//! Subtraction operator
+template<class S> inline Vector3D<S> operator-(const Vector3D<S> &v1, const Vector3D<S> &v2)
+{
+ return Vector3D<S>(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z);
+}
+//! Subtraction operator
+template<class S, class S2> inline Vector3D<S> operator-(const Vector3D<S> &v, S2 s)
+{
+ return Vector3D<S>(v.x - s, v.y - s, v.z - s);
+}
+//! Subtraction operator
+template<class S, class S2> inline Vector3D<S> operator-(S2 s, const Vector3D<S> &v)
+{
+ return Vector3D<S>(s - v.x, s - v.y, s - v.z);
+}
+
+//! Multiplication operator
+template<class S> inline Vector3D<S> operator*(const Vector3D<S> &v1, const Vector3D<S> &v2)
+{
+ return Vector3D<S>(v1.x * v2.x, v1.y * v2.y, v1.z * v2.z);
+}
+//! Multiplication operator
+template<class S, class S2> inline Vector3D<S> operator*(const Vector3D<S> &v, S2 s)
+{
+ return Vector3D<S>(v.x * s, v.y * s, v.z * s);
+}
+//! Multiplication operator
+template<class S, class S2> inline Vector3D<S> operator*(S2 s, const Vector3D<S> &v)
+{
+ return Vector3D<S>(s * v.x, s * v.y, s * v.z);
+}
+
+//! Division operator
+template<class S> inline Vector3D<S> operator/(const Vector3D<S> &v1, const Vector3D<S> &v2)
+{
+ return Vector3D<S>(v1.x / v2.x, v1.y / v2.y, v1.z / v2.z);
+}
+//! Division operator
+template<class S, class S2> inline Vector3D<S> operator/(const Vector3D<S> &v, S2 s)
+{
+ return Vector3D<S>(v.x / s, v.y / s, v.z / s);
+}
+//! Division operator
+template<class S, class S2> inline Vector3D<S> operator/(S2 s, const Vector3D<S> &v)
+{
+ return Vector3D<S>(s / v.x, s / v.y, s / v.z);
+}
+
+//! Comparison operator
+template<class S> inline bool operator==(const Vector3D<S> &s1, const Vector3D<S> &s2)
+{
+ return s1.x == s2.x && s1.y == s2.y && s1.z == s2.z;
+}
+
+//! Comparison operator
+template<class S> inline bool operator!=(const Vector3D<S> &s1, const Vector3D<S> &s2)
+{
+ return s1.x != s2.x || s1.y != s2.y || s1.z != s2.z;
+}
+
+//************************************************************************
+// External functions
+//************************************************************************
+
+//! Min operator
+template<class S> inline Vector3D<S> vmin(const Vector3D<S> &s1, const Vector3D<S> &s2)
+{
+ return Vector3D<S>(std::min(s1.x, s2.x), std::min(s1.y, s2.y), std::min(s1.z, s2.z));
+}
+
+//! Min operator
+template<class S, class S2> inline Vector3D<S> vmin(const Vector3D<S> &s1, S2 s2)
+{
+ return Vector3D<S>(std::min(s1.x, s2), std::min(s1.y, s2), std::min(s1.z, s2));
+}
+
+//! Min operator
+template<class S1, class S> inline Vector3D<S> vmin(S1 s1, const Vector3D<S> &s2)
+{
+ return Vector3D<S>(std::min(s1, s2.x), std::min(s1, s2.y), std::min(s1, s2.z));
+}
+
+//! Max operator
+template<class S> inline Vector3D<S> vmax(const Vector3D<S> &s1, const Vector3D<S> &s2)
+{
+ return Vector3D<S>(std::max(s1.x, s2.x), std::max(s1.y, s2.y), std::max(s1.z, s2.z));
+}
+
+//! Max operator
+template<class S, class S2> inline Vector3D<S> vmax(const Vector3D<S> &s1, S2 s2)
+{
+ return Vector3D<S>(std::max(s1.x, s2), std::max(s1.y, s2), std::max(s1.z, s2));
+}
+
+//! Max operator
+template<class S1, class S> inline Vector3D<S> vmax(S1 s1, const Vector3D<S> &s2)
+{
+ return Vector3D<S>(std::max(s1, s2.x), std::max(s1, s2.y), std::max(s1, s2.z));
+}
+
+//! Dot product
+template<class S> inline S dot(const Vector3D<S> &t, const Vector3D<S> &v)
+{
+ return t.x * v.x + t.y * v.y + t.z * v.z;
+}
+
+//! Cross product
+template<class S> inline Vector3D<S> cross(const Vector3D<S> &t, const Vector3D<S> &v)
+{
+ Vector3D<S> cp(
+ ((t.y * v.z) - (t.z * v.y)), ((t.z * v.x) - (t.x * v.z)), ((t.x * v.y) - (t.y * v.x)));
+ return cp;
+}
+
+//! Project a vector into a plane, defined by its normal
+/*! Projects a vector into a plane normal to the given vector, which must
+ have unit length. Self is modified.
+ \param v The vector to project
+ \param n The plane normal
+ \return The projected vector */
+template<class S>
+inline const Vector3D<S> &projectNormalTo(const Vector3D<S> &v, const Vector3D<S> &n)
+{
+ S sprod = dot(v, n);
+ return v - n * dot(v, n);
+}
+
+//! Compute the magnitude (length) of the vector
+//! (clamps to 0 and 1 with VECTOR_EPSILON)
+template<class S> inline S norm(const Vector3D<S> &v)
+{
+ S l = v.x * v.x + v.y * v.y + v.z * v.z;
+ if (l <= VECTOR_EPSILON * VECTOR_EPSILON)
+ return (0.);
+ return (fabs(l - 1.) < VECTOR_EPSILON * VECTOR_EPSILON) ? 1. : sqrt(l);
+}
+
+//! Compute squared magnitude
+template<class S> inline S normSquare(const Vector3D<S> &v)
+{
+ return v.x * v.x + v.y * v.y + v.z * v.z;
+}
+
+//! compatibility, allow use of int, Real and Vec inputs with norm/normSquare
+inline Real norm(const Real v)
+{
+ return fabs(v);
+}
+inline Real normSquare(const Real v)
+{
+ return square(v);
+}
+inline Real norm(const int v)
+{
+ return abs(v);
+}
+inline Real normSquare(const int v)
+{
+ return square(v);
+}
+
+//! Returns a normalized vector
+template<class S> inline Vector3D<S> getNormalized(const Vector3D<S> &v)
+{
+ S l = v.x * v.x + v.y * v.y + v.z * v.z;
+ if (fabs(l - 1.) < VECTOR_EPSILON * VECTOR_EPSILON)
+ return v; /* normalized "enough"... */
+ else if (l > VECTOR_EPSILON * VECTOR_EPSILON) {
+ S fac = 1. / sqrt(l);
+ return Vector3D<S>(v.x * fac, v.y * fac, v.z * fac);
+ }
+ else
+ return Vector3D<S>((S)0);
+}
+
+//! Compute the norm of the vector and normalize it.
+/*! \return The value of the norm */
+template<class S> inline S normalize(Vector3D<S> &v)
+{
+ S norm;
+ S l = v.x * v.x + v.y * v.y + v.z * v.z;
+ if (fabs(l - 1.) < VECTOR_EPSILON * VECTOR_EPSILON) {
+ norm = 1.;
+ }
+ else if (l > VECTOR_EPSILON * VECTOR_EPSILON) {
+ norm = sqrt(l);
+ v *= 1. / norm;
+ }
+ else {
+ v = Vector3D<S>::Zero;
+ norm = 0.;
+ }
+ return (S)norm;
+}
+
+//! Obtain an orthogonal vector
+/*! Compute a vector that is orthonormal to the given vector.
+ * Nothing else can be assumed for the direction of the new vector.
+ * \return The orthonormal vector */
+template<class S> Vector3D<S> getOrthogonalVector(const Vector3D<S> &v)
+{
+ // Determine the component with max. absolute value
+ int maxIndex = (fabs(v.x) > fabs(v.y)) ? 0 : 1;
+ maxIndex = (fabs(v[maxIndex]) > fabs(v.z)) ? maxIndex : 2;
+
+ // Choose another axis than the one with max. component and project
+ // orthogonal to self
+ Vector3D<S> o(0.0);
+ o[(maxIndex + 1) % 3] = 1;
+
+ Vector3D<S> c = cross(v, o);
+ normalize(c);
+ return c;
+}
+
+//! Convert vector to polar coordinates
+/*! Stable vector to angle conversion
+ *\param v vector to convert
+ \param phi unique angle [0,2PI]
+ \param theta unique angle [0,PI]
+ */
+template<class S> inline void vecToAngle(const Vector3D<S> &v, S &phi, S &theta)
+{
+ if (fabs(v.y) < VECTOR_EPSILON)
+ theta = M_PI / 2;
+ else if (fabs(v.x) < VECTOR_EPSILON && fabs(v.z) < VECTOR_EPSILON)
+ theta = (v.y >= 0) ? 0 : M_PI;
+ else
+ theta = atan(sqrt(v.x * v.x + v.z * v.z) / v.y);
+ if (theta < 0)
+ theta += M_PI;
+
+ if (fabs(v.x) < VECTOR_EPSILON)
+ phi = M_PI / 2;
+ else
+ phi = atan(v.z / v.x);
+ if (phi < 0)
+ phi += M_PI;
+ if (fabs(v.z) < VECTOR_EPSILON)
+ phi = (v.x >= 0) ? 0 : M_PI;
+ else if (v.z < 0)
+ phi += M_PI;
+}
+
+//! Compute vector reflected at a surface
+/*! Compute a vector, that is self (as an incoming vector)
+ * reflected at a surface with a distinct normal vector.
+ * Note that the normal is reversed, if the scalar product with it is positive.
+ \param t The incoming vector
+ \param n The surface normal
+ \return The new reflected vector
+ */
+template<class S> inline Vector3D<S> reflectVector(const Vector3D<S> &t, const Vector3D<S> &n)
+{
+ Vector3D<S> nn = (dot(t, n) > 0.0) ? (n * -1.0) : n;
+ return (t - nn * (2.0 * dot(nn, t)));
+}
+
+//! Compute vector refracted at a surface
+/*! \param t The incoming vector
+ * \param n The surface normal
+ * \param nt The "inside" refraction index
+ * \param nair The "outside" refraction index
+ * \param refRefl Set to 1 on total reflection
+ * \return The refracted vector
+ */
+template<class S>
+inline Vector3D<S> refractVector(
+ const Vector3D<S> &t, const Vector3D<S> &normal, S nt, S nair, int &refRefl)
+{
+ // from Glassner's book, section 5.2 (Heckberts method)
+ S eta = nair / nt;
+ S n = -dot(t, normal);
+ S tt = 1.0 + eta * eta * (n * n - 1.0);
+ if (tt < 0.0) {
+ // we have total reflection!
+ refRefl = 1;
+ }
+ else {
+ // normal reflection
+ tt = eta * n - sqrt(tt);
+ return (t * eta + normal * tt);
+ }
+ return t;
+}
+
+//! Outputs the object in human readable form as string
+template<class S> std::string Vector3D<S>::toString() const
+{
+ char buf[256];
+ snprintf(buf,
+ 256,
+ "[%+4.6f,%+4.6f,%+4.6f]",
+ (double)(*this)[0],
+ (double)(*this)[1],
+ (double)(*this)[2]);
+ // for debugging, optionally increase precision:
+ // snprintf ( buf,256,"[%+4.16f,%+4.16f,%+4.16f]", ( double ) ( *this ) [0], ( double ) ( *this )
+ // [1], ( double ) ( *this ) [2] );
+ return std::string(buf);
+}
+
+template<> std::string Vector3D<int>::toString() const;
+
+//! Outputs the object in human readable form to stream
+/*! Output format [x,y,z] */
+template<class S> std::ostream &operator<<(std::ostream &os, const Vector3D<S> &i)
+{
+ os << i.toString();
+ return os;
+}
+
+//! Reads the contents of the object from a stream
+/*! Input format [x,y,z] */
+template<class S> std::istream &operator>>(std::istream &is, Vector3D<S> &i)
+{
+ char c;
+ char dummy[3];
+ is >> c >> i[0] >> dummy >> i[1] >> dummy >> i[2] >> c;
+ return is;
+}
+
+/**************************************************************************/
+// Define default vector alias
+/**************************************************************************/
+
+//! 3D vector class of type Real (typically float)
+typedef Vector3D<Real> Vec3;
+
+//! 3D vector class of type int
+typedef Vector3D<int> Vec3i;
+
+//! convert to Real Vector
+template<class T> inline Vec3 toVec3(T v)
+{
+ return Vec3(v[0], v[1], v[2]);
+}
+
+//! convert to int Vector
+template<class T> inline Vec3i toVec3i(T v)
+{
+ return Vec3i((int)v[0], (int)v[1], (int)v[2]);
+}
+
+//! convert to int Vector
+template<class T> inline Vec3i toVec3i(T v0, T v1, T v2)
+{
+ return Vec3i((int)v0, (int)v1, (int)v2);
+}
+
+//! round, and convert to int Vector
+template<class T> inline Vec3i toVec3iRound(T v)
+{
+ return Vec3i((int)round(v[0]), (int)round(v[1]), (int)round(v[2]));
+}
+
+//! convert to int Vector if values are close enough to an int
+template<class T> inline Vec3i toVec3iChecked(T v)
+{
+ Vec3i ret;
+ for (size_t i = 0; i < 3; i++) {
+ Real a = v[i];
+ if (fabs(a - floor(a + 0.5)) > 1e-5)
+ errMsg("argument is not an int, cannot convert");
+ ret[i] = (int)(a + 0.5);
+ }
+ return ret;
+}
+
+//! convert to double Vector
+template<class T> inline Vector3D<double> toVec3d(T v)
+{
+ return Vector3D<double>(v[0], v[1], v[2]);
+}
+
+//! convert to float Vector
+template<class T> inline Vector3D<float> toVec3f(T v)
+{
+ return Vector3D<float>(v[0], v[1], v[2]);
+}
+
+/**************************************************************************/
+// Specializations for common math functions
+/**************************************************************************/
+
+template<> inline Vec3 clamp<Vec3>(const Vec3 &a, const Vec3 &b, const Vec3 &c)
+{
+ return Vec3(clamp(a.x, b.x, c.x), clamp(a.y, b.y, c.y), clamp(a.z, b.z, c.z));
+}
+template<> inline Vec3 safeDivide<Vec3>(const Vec3 &a, const Vec3 &b)
+{
+ return Vec3(safeDivide(a.x, b.x), safeDivide(a.y, b.y), safeDivide(a.z, b.z));
+}
+template<> inline Vec3 nmod<Vec3>(const Vec3 &a, const Vec3 &b)
+{
+ return Vec3(nmod(a.x, b.x), nmod(a.y, b.y), nmod(a.z, b.z));
+}
+
+}; // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/commonkernels.h b/extern/mantaflow/preprocessed/commonkernels.h
new file mode 100644
index 00000000000..7fa6f185146
--- /dev/null
+++ b/extern/mantaflow/preprocessed/commonkernels.h
@@ -0,0 +1,1300 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Common grid kernels
+ *
+ ******************************************************************************/
+
+#ifndef _COMMONKERNELS_H
+#define _COMMONKERNELS_H
+
+#include "general.h"
+#include "kernel.h"
+#include "grid.h"
+
+namespace Manta {
+
+//! Kernel: Invert real values, if positive and fluid
+
+struct InvertCheckFluid : public KernelBase {
+ InvertCheckFluid(const FlagGrid &flags, Grid<Real> &grid)
+ : KernelBase(&flags, 0), flags(flags), grid(grid)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const FlagGrid &flags, Grid<Real> &grid) const
+ {
+ if (flags.isFluid(idx) && grid[idx] > 0)
+ grid[idx] = 1.0 / grid[idx];
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return grid;
+ }
+ typedef Grid<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel InvertCheckFluid ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, flags, grid);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &grid;
+};
+
+//! Kernel: Squared sum over grid
+
+struct GridSumSqr : public KernelBase {
+ GridSumSqr(const Grid<Real> &grid) : KernelBase(&grid, 0), grid(grid), sum(0)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Real> &grid, double &sum)
+ {
+ sum += square((double)grid[idx]);
+ }
+ inline operator double()
+ {
+ return sum;
+ }
+ inline double &getRet()
+ {
+ return sum;
+ }
+ inline const Grid<Real> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<Real> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel GridSumSqr ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, grid, sum);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ GridSumSqr(GridSumSqr &o, tbb::split) : KernelBase(o), grid(o.grid), sum(0)
+ {
+ }
+ void join(const GridSumSqr &o)
+ {
+ sum += o.sum;
+ }
+ const Grid<Real> &grid;
+ double sum;
+};
+
+//! Kernel: rotation operator \nabla x v for centered vector fields
+
+struct CurlOp : public KernelBase {
+ CurlOp(const Grid<Vec3> &grid, Grid<Vec3> &dst) : KernelBase(&grid, 1), grid(grid), dst(dst)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const Grid<Vec3> &grid, Grid<Vec3> &dst) const
+ {
+ Vec3 v = Vec3(0.,
+ 0.,
+ 0.5 * ((grid(i + 1, j, k).y - grid(i - 1, j, k).y) -
+ (grid(i, j + 1, k).x - grid(i, j - 1, k).x)));
+ if (dst.is3D()) {
+ v[0] = 0.5 * ((grid(i, j + 1, k).z - grid(i, j - 1, k).z) -
+ (grid(i, j, k + 1).y - grid(i, j, k - 1).y));
+ v[1] = 0.5 * ((grid(i, j, k + 1).x - grid(i, j, k - 1).x) -
+ (grid(i + 1, j, k).z - grid(i - 1, j, k).z));
+ }
+ dst(i, j, k) = v;
+ }
+ inline const Grid<Vec3> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<Vec3> type0;
+ inline Grid<Vec3> &getArg1()
+ {
+ return dst;
+ }
+ typedef Grid<Vec3> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel CurlOp ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, grid, dst);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, grid, dst);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const Grid<Vec3> &grid;
+ Grid<Vec3> &dst;
+};
+;
+
+//! Kernel: divergence operator (from MAC grid)
+
+struct DivergenceOpMAC : public KernelBase {
+ DivergenceOpMAC(Grid<Real> &div, const MACGrid &grid) : KernelBase(&div, 1), div(div), grid(grid)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Real> &div, const MACGrid &grid) const
+ {
+ Vec3 del = Vec3(grid(i + 1, j, k).x, grid(i, j + 1, k).y, 0.) - grid(i, j, k);
+ if (grid.is3D())
+ del[2] += grid(i, j, k + 1).z;
+ else
+ del[2] = 0.;
+ div(i, j, k) = del.x + del.y + del.z;
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return div;
+ }
+ typedef Grid<Real> type0;
+ inline const MACGrid &getArg1()
+ {
+ return grid;
+ }
+ typedef MACGrid type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel DivergenceOpMAC ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, div, grid);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, div, grid);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<Real> &div;
+ const MACGrid &grid;
+};
+
+//! Kernel: gradient operator for MAC grid
+struct GradientOpMAC : public KernelBase {
+ GradientOpMAC(MACGrid &gradient, const Grid<Real> &grid)
+ : KernelBase(&gradient, 1), gradient(gradient), grid(grid)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, MACGrid &gradient, const Grid<Real> &grid) const
+ {
+ Vec3 grad = (Vec3(grid(i, j, k)) - Vec3(grid(i - 1, j, k), grid(i, j - 1, k), 0.));
+ if (grid.is3D())
+ grad[2] -= grid(i, j, k - 1);
+ else
+ grad[2] = 0.;
+ gradient(i, j, k) = grad;
+ }
+ inline MACGrid &getArg0()
+ {
+ return gradient;
+ }
+ typedef MACGrid type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return grid;
+ }
+ typedef Grid<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel GradientOpMAC ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, gradient, grid);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, gradient, grid);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ MACGrid &gradient;
+ const Grid<Real> &grid;
+};
+
+//! Kernel: centered gradient operator
+struct GradientOp : public KernelBase {
+ GradientOp(Grid<Vec3> &gradient, const Grid<Real> &grid)
+ : KernelBase(&gradient, 1), gradient(gradient), grid(grid)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Vec3> &gradient, const Grid<Real> &grid) const
+ {
+ Vec3 grad = 0.5 * Vec3(grid(i + 1, j, k) - grid(i - 1, j, k),
+ grid(i, j + 1, k) - grid(i, j - 1, k),
+ 0.);
+ if (grid.is3D())
+ grad[2] = 0.5 * (grid(i, j, k + 1) - grid(i, j, k - 1));
+ gradient(i, j, k) = grad;
+ }
+ inline Grid<Vec3> &getArg0()
+ {
+ return gradient;
+ }
+ typedef Grid<Vec3> type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return grid;
+ }
+ typedef Grid<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel GradientOp ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, gradient, grid);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, gradient, grid);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<Vec3> &gradient;
+ const Grid<Real> &grid;
+};
+
+//! Kernel: Laplace operator
+struct LaplaceOp : public KernelBase {
+ LaplaceOp(Grid<Real> &laplace, const Grid<Real> &grid)
+ : KernelBase(&laplace, 1), laplace(laplace), grid(grid)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Real> &laplace, const Grid<Real> &grid) const
+ {
+ laplace(i, j, k) = grid(i + 1, j, k) - 2.0 * grid(i, j, k) + grid(i - 1, j, k);
+ laplace(i, j, k) += grid(i, j + 1, k) - 2.0 * grid(i, j, k) + grid(i, j - 1, k);
+ if (grid.is3D()) {
+ laplace(i, j, k) += grid(i, j, k + 1) - 2.0 * grid(i, j, k) + grid(i, j, k - 1);
+ }
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return laplace;
+ }
+ typedef Grid<Real> type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return grid;
+ }
+ typedef Grid<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel LaplaceOp ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, laplace, grid);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, laplace, grid);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<Real> &laplace;
+ const Grid<Real> &grid;
+};
+
+//! Kernel: Curvature operator
+struct CurvatureOp : public KernelBase {
+ CurvatureOp(Grid<Real> &curv, const Grid<Real> &grid, const Real h)
+ : KernelBase(&curv, 1), curv(curv), grid(grid), h(h)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Real> &curv, const Grid<Real> &grid, const Real h) const
+ {
+ const Real over_h = 1.0 / h;
+ const Real x = 0.5 * (grid(i + 1, j, k) - grid(i - 1, j, k)) * over_h;
+ const Real y = 0.5 * (grid(i, j + 1, k) - grid(i, j - 1, k)) * over_h;
+ const Real xx = (grid(i + 1, j, k) - 2.0 * grid(i, j, k) + grid(i - 1, j, k)) * over_h *
+ over_h;
+ const Real yy = (grid(i, j + 1, k) - 2.0 * grid(i, j, k) + grid(i, j - 1, k)) * over_h *
+ over_h;
+ const Real xy = 0.25 *
+ (grid(i + 1, j + 1, k) + grid(i - 1, j - 1, k) - grid(i - 1, j + 1, k) -
+ grid(i + 1, j - 1, k)) *
+ over_h * over_h;
+ curv(i, j, k) = x * x * yy + y * y * xx - 2.0 * x * y * xy;
+ Real denom = x * x + y * y;
+ if (grid.is3D()) {
+ const Real z = 0.5 * (grid(i, j, k + 1) - grid(i, j, k - 1)) * over_h;
+ const Real zz = (grid(i, j, k + 1) - 2.0 * grid(i, j, k) + grid(i, j, k - 1)) * over_h *
+ over_h;
+ const Real xz = 0.25 *
+ (grid(i + 1, j, k + 1) + grid(i - 1, j, k - 1) - grid(i - 1, j, k + 1) -
+ grid(i + 1, j, k - 1)) *
+ over_h * over_h;
+ const Real yz = 0.25 *
+ (grid(i, j + 1, k + 1) + grid(i, j - 1, k - 1) - grid(i, j + 1, k - 1) -
+ grid(i, j - 1, k + 1)) *
+ over_h * over_h;
+ curv(i, j, k) += x * x * zz + z * z * xx + y * y * zz + z * z * yy -
+ 2.0 * (x * z * xz + y * z * yz);
+ denom += z * z;
+ }
+ curv(i, j, k) /= std::pow(std::max(denom, VECTOR_EPSILON), 1.5);
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return curv;
+ }
+ typedef Grid<Real> type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return grid;
+ }
+ typedef Grid<Real> type1;
+ inline const Real &getArg2()
+ {
+ return h;
+ }
+ typedef Real type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel CurvatureOp ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, curv, grid, h);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, curv, grid, h);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<Real> &curv;
+ const Grid<Real> &grid;
+ const Real h;
+};
+
+//! Kernel: get component at MAC positions
+struct GetShiftedComponent : public KernelBase {
+ GetShiftedComponent(const Grid<Vec3> &grid, Grid<Real> &comp, int dim)
+ : KernelBase(&grid, 1), grid(grid), comp(comp), dim(dim)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const Grid<Vec3> &grid, Grid<Real> &comp, int dim) const
+ {
+ Vec3i ishift(i, j, k);
+ ishift[dim]--;
+ comp(i, j, k) = 0.5 * (grid(i, j, k)[dim] + grid(ishift)[dim]);
+ }
+ inline const Grid<Vec3> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<Vec3> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return comp;
+ }
+ typedef Grid<Real> type1;
+ inline int &getArg2()
+ {
+ return dim;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel GetShiftedComponent ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, grid, comp, dim);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, grid, comp, dim);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const Grid<Vec3> &grid;
+ Grid<Real> &comp;
+ int dim;
+};
+;
+
+//! Kernel: get component (not shifted)
+struct GetComponent : public KernelBase {
+ GetComponent(const Grid<Vec3> &grid, Grid<Real> &comp, int dim)
+ : KernelBase(&grid, 0), grid(grid), comp(comp), dim(dim)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Vec3> &grid, Grid<Real> &comp, int dim) const
+ {
+ comp[idx] = grid[idx][dim];
+ }
+ inline const Grid<Vec3> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<Vec3> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return comp;
+ }
+ typedef Grid<Real> type1;
+ inline int &getArg2()
+ {
+ return dim;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel GetComponent ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, grid, comp, dim);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const Grid<Vec3> &grid;
+ Grid<Real> &comp;
+ int dim;
+};
+;
+
+//! Kernel: get norm of centered grid
+struct GridNorm : public KernelBase {
+ GridNorm(Grid<Real> &n, const Grid<Vec3> &grid) : KernelBase(&n, 0), n(n), grid(grid)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<Real> &n, const Grid<Vec3> &grid) const
+ {
+ n[idx] = norm(grid[idx]);
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return n;
+ }
+ typedef Grid<Real> type0;
+ inline const Grid<Vec3> &getArg1()
+ {
+ return grid;
+ }
+ typedef Grid<Vec3> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel GridNorm ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, n, grid);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<Real> &n;
+ const Grid<Vec3> &grid;
+};
+;
+
+//! Kernel: set component (not shifted)
+struct SetComponent : public KernelBase {
+ SetComponent(Grid<Vec3> &grid, const Grid<Real> &comp, int dim)
+ : KernelBase(&grid, 0), grid(grid), comp(comp), dim(dim)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<Vec3> &grid, const Grid<Real> &comp, int dim) const
+ {
+ grid[idx][dim] = comp[idx];
+ }
+ inline Grid<Vec3> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<Vec3> type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return comp;
+ }
+ typedef Grid<Real> type1;
+ inline int &getArg2()
+ {
+ return dim;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel SetComponent ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, grid, comp, dim);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<Vec3> &grid;
+ const Grid<Real> &comp;
+ int dim;
+};
+;
+
+//! Kernel: compute centered velocity field from MAC
+struct GetCentered : public KernelBase {
+ GetCentered(Grid<Vec3> &center, const MACGrid &vel)
+ : KernelBase(&center, 1), center(center), vel(vel)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Vec3> &center, const MACGrid &vel) const
+ {
+ Vec3 v = 0.5 * (vel(i, j, k) + Vec3(vel(i + 1, j, k).x, vel(i, j + 1, k).y, 0.));
+ if (vel.is3D())
+ v[2] += 0.5 * vel(i, j, k + 1).z;
+ else
+ v[2] = 0.;
+ center(i, j, k) = v;
+ }
+ inline Grid<Vec3> &getArg0()
+ {
+ return center;
+ }
+ typedef Grid<Vec3> type0;
+ inline const MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel GetCentered ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, center, vel);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, center, vel);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<Vec3> &center;
+ const MACGrid &vel;
+};
+;
+
+//! Kernel: compute MAC from centered velocity field
+struct GetMAC : public KernelBase {
+ GetMAC(MACGrid &vel, const Grid<Vec3> &center) : KernelBase(&vel, 1), vel(vel), center(center)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, MACGrid &vel, const Grid<Vec3> &center) const
+ {
+ Vec3 v = 0.5 * (center(i, j, k) + Vec3(center(i - 1, j, k).x, center(i, j - 1, k).y, 0.));
+ if (vel.is3D())
+ v[2] += 0.5 * center(i, j, k - 1).z;
+ else
+ v[2] = 0.;
+ vel(i, j, k) = v;
+ }
+ inline MACGrid &getArg0()
+ {
+ return vel;
+ }
+ typedef MACGrid type0;
+ inline const Grid<Vec3> &getArg1()
+ {
+ return center;
+ }
+ typedef Grid<Vec3> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel GetMAC ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, center);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, center);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ MACGrid &vel;
+ const Grid<Vec3> &center;
+};
+;
+
+//! Fill in the domain boundary cells (i,j,k=0/size-1) from the neighboring cells
+struct FillInBoundary : public KernelBase {
+ FillInBoundary(Grid<Vec3> &grid, int g) : KernelBase(&grid, 0), grid(grid), g(g)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Vec3> &grid, int g) const
+ {
+ if (i == 0)
+ grid(i, j, k) = grid(i + 1, j, k);
+ if (j == 0)
+ grid(i, j, k) = grid(i, j + 1, k);
+ if (k == 0)
+ grid(i, j, k) = grid(i, j, k + 1);
+ if (i == grid.getSizeX() - 1)
+ grid(i, j, k) = grid(i - 1, j, k);
+ if (j == grid.getSizeY() - 1)
+ grid(i, j, k) = grid(i, j - 1, k);
+ if (k == grid.getSizeZ() - 1)
+ grid(i, j, k) = grid(i, j, k - 1);
+ }
+ inline Grid<Vec3> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<Vec3> type0;
+ inline int &getArg1()
+ {
+ return g;
+ }
+ typedef int type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel FillInBoundary ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, g);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, g);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<Vec3> &grid;
+ int g;
+};
+
+// ****************************************************************************
+
+// helper functions for converting mex data to manta grids and back (for matlab integration)
+
+// MAC grids
+
+struct kn_conv_mex_in_to_MAC : public KernelBase {
+ kn_conv_mex_in_to_MAC(const double *p_lin_array, MACGrid *p_result)
+ : KernelBase(p_result, 0), p_lin_array(p_lin_array), p_result(p_result)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const double *p_lin_array, MACGrid *p_result) const
+ {
+ int ijk = i + j * p_result->getSizeX() + k * p_result->getSizeX() * p_result->getSizeY();
+ const int n = p_result->getSizeX() * p_result->getSizeY() * p_result->getSizeZ();
+
+ p_result->get(i, j, k).x = p_lin_array[ijk];
+ p_result->get(i, j, k).y = p_lin_array[ijk + n];
+ p_result->get(i, j, k).z = p_lin_array[ijk + 2 * n];
+ }
+ inline const double *getArg0()
+ {
+ return p_lin_array;
+ }
+ typedef double type0;
+ inline MACGrid *getArg1()
+ {
+ return p_result;
+ }
+ typedef MACGrid type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn_conv_mex_in_to_MAC ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_lin_array, p_result);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_lin_array, p_result);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const double *p_lin_array;
+ MACGrid *p_result;
+};
+
+struct kn_conv_MAC_to_mex_out : public KernelBase {
+ kn_conv_MAC_to_mex_out(const MACGrid *p_mac, double *p_result)
+ : KernelBase(p_mac, 0), p_mac(p_mac), p_result(p_result)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const MACGrid *p_mac, double *p_result) const
+ {
+ int ijk = i + j * p_mac->getSizeX() + k * p_mac->getSizeX() * p_mac->getSizeY();
+ const int n = p_mac->getSizeX() * p_mac->getSizeY() * p_mac->getSizeZ();
+
+ p_result[ijk] = p_mac->get(i, j, k).x;
+ p_result[ijk + n] = p_mac->get(i, j, k).y;
+ p_result[ijk + 2 * n] = p_mac->get(i, j, k).z;
+ }
+ inline const MACGrid *getArg0()
+ {
+ return p_mac;
+ }
+ typedef MACGrid type0;
+ inline double *getArg1()
+ {
+ return p_result;
+ }
+ typedef double type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn_conv_MAC_to_mex_out ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_mac, p_result);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_mac, p_result);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const MACGrid *p_mac;
+ double *p_result;
+};
+
+// Vec3 Grids
+
+struct kn_conv_mex_in_to_Vec3 : public KernelBase {
+ kn_conv_mex_in_to_Vec3(const double *p_lin_array, Grid<Vec3> *p_result)
+ : KernelBase(p_result, 0), p_lin_array(p_lin_array), p_result(p_result)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const double *p_lin_array, Grid<Vec3> *p_result) const
+ {
+ int ijk = i + j * p_result->getSizeX() + k * p_result->getSizeX() * p_result->getSizeY();
+ const int n = p_result->getSizeX() * p_result->getSizeY() * p_result->getSizeZ();
+
+ p_result->get(i, j, k).x = p_lin_array[ijk];
+ p_result->get(i, j, k).y = p_lin_array[ijk + n];
+ p_result->get(i, j, k).z = p_lin_array[ijk + 2 * n];
+ }
+ inline const double *getArg0()
+ {
+ return p_lin_array;
+ }
+ typedef double type0;
+ inline Grid<Vec3> *getArg1()
+ {
+ return p_result;
+ }
+ typedef Grid<Vec3> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn_conv_mex_in_to_Vec3 ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_lin_array, p_result);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_lin_array, p_result);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const double *p_lin_array;
+ Grid<Vec3> *p_result;
+};
+
+struct kn_conv_Vec3_to_mex_out : public KernelBase {
+ kn_conv_Vec3_to_mex_out(const Grid<Vec3> *p_Vec3, double *p_result)
+ : KernelBase(p_Vec3, 0), p_Vec3(p_Vec3), p_result(p_result)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const Grid<Vec3> *p_Vec3, double *p_result) const
+ {
+ int ijk = i + j * p_Vec3->getSizeX() + k * p_Vec3->getSizeX() * p_Vec3->getSizeY();
+ const int n = p_Vec3->getSizeX() * p_Vec3->getSizeY() * p_Vec3->getSizeZ();
+
+ p_result[ijk] = p_Vec3->get(i, j, k).x;
+ p_result[ijk + n] = p_Vec3->get(i, j, k).y;
+ p_result[ijk + 2 * n] = p_Vec3->get(i, j, k).z;
+ }
+ inline const Grid<Vec3> *getArg0()
+ {
+ return p_Vec3;
+ }
+ typedef Grid<Vec3> type0;
+ inline double *getArg1()
+ {
+ return p_result;
+ }
+ typedef double type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn_conv_Vec3_to_mex_out ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_Vec3, p_result);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_Vec3, p_result);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const Grid<Vec3> *p_Vec3;
+ double *p_result;
+};
+
+// Real Grids
+
+struct kn_conv_mex_in_to_Real : public KernelBase {
+ kn_conv_mex_in_to_Real(const double *p_lin_array, Grid<Real> *p_result)
+ : KernelBase(p_result, 0), p_lin_array(p_lin_array), p_result(p_result)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const double *p_lin_array, Grid<Real> *p_result) const
+ {
+ int ijk = i + j * p_result->getSizeX() + k * p_result->getSizeX() * p_result->getSizeY();
+
+ p_result->get(i, j, k) = p_lin_array[ijk];
+ }
+ inline const double *getArg0()
+ {
+ return p_lin_array;
+ }
+ typedef double type0;
+ inline Grid<Real> *getArg1()
+ {
+ return p_result;
+ }
+ typedef Grid<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn_conv_mex_in_to_Real ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_lin_array, p_result);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_lin_array, p_result);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const double *p_lin_array;
+ Grid<Real> *p_result;
+};
+
+struct kn_conv_Real_to_mex_out : public KernelBase {
+ kn_conv_Real_to_mex_out(const Grid<Real> *p_grid, double *p_result)
+ : KernelBase(p_grid, 0), p_grid(p_grid), p_result(p_result)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const Grid<Real> *p_grid, double *p_result) const
+ {
+ int ijk = i + j * p_grid->getSizeX() + k * p_grid->getSizeX() * p_grid->getSizeY();
+
+ p_result[ijk] = p_grid->get(i, j, k);
+ }
+ inline const Grid<Real> *getArg0()
+ {
+ return p_grid;
+ }
+ typedef Grid<Real> type0;
+ inline double *getArg1()
+ {
+ return p_result;
+ }
+ typedef double type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn_conv_Real_to_mex_out ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_grid, p_result);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, p_grid, p_result);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const Grid<Real> *p_grid;
+ double *p_result;
+};
+
+} // namespace Manta
+#endif
diff --git a/extern/mantaflow/preprocessed/commonkernels.h.reg.cpp b/extern/mantaflow/preprocessed/commonkernels.h.reg.cpp
new file mode 100644
index 00000000000..0a7a55b7147
--- /dev/null
+++ b/extern/mantaflow/preprocessed/commonkernels.h.reg.cpp
@@ -0,0 +1,13 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "commonkernels.h"
+namespace Manta {
+extern "C" {
+void PbRegister_file_2()
+{
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/conjugategrad.cpp b/extern/mantaflow/preprocessed/conjugategrad.cpp
new file mode 100644
index 00000000000..ac317402a49
--- /dev/null
+++ b/extern/mantaflow/preprocessed/conjugategrad.cpp
@@ -0,0 +1,719 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Conjugate gradient solver, for pressure and viscosity
+ *
+ ******************************************************************************/
+
+#include "conjugategrad.h"
+#include "commonkernels.h"
+
+using namespace std;
+namespace Manta {
+
+const int CG_DEBUGLEVEL = 3;
+
+//*****************************************************************************
+// Precondition helpers
+
+//! Preconditioning a la Wavelet Turbulence (needs 4 add. grids)
+void InitPreconditionIncompCholesky(const FlagGrid &flags,
+ Grid<Real> &A0,
+ Grid<Real> &Ai,
+ Grid<Real> &Aj,
+ Grid<Real> &Ak,
+ Grid<Real> &orgA0,
+ Grid<Real> &orgAi,
+ Grid<Real> &orgAj,
+ Grid<Real> &orgAk)
+{
+ // compute IC according to Golub and Van Loan
+ A0.copyFrom(orgA0);
+ Ai.copyFrom(orgAi);
+ Aj.copyFrom(orgAj);
+ Ak.copyFrom(orgAk);
+
+ FOR_IJK(A0)
+ {
+ if (flags.isFluid(i, j, k)) {
+ const IndexInt idx = A0.index(i, j, k);
+ A0[idx] = sqrt(A0[idx]);
+
+ // correct left and top stencil in other entries
+ // for i = k+1:n
+ // if (A(i,k) != 0)
+ // A(i,k) = A(i,k) / A(k,k)
+ Real invDiagonal = 1.0f / A0[idx];
+ Ai[idx] *= invDiagonal;
+ Aj[idx] *= invDiagonal;
+ Ak[idx] *= invDiagonal;
+
+ // correct the right and bottom stencil in other entries
+ // for j = k+1:n
+ // for i = j:n
+ // if (A(i,j) != 0)
+ // A(i,j) = A(i,j) - A(i,k) * A(j,k)
+ A0(i + 1, j, k) -= square(Ai[idx]);
+ A0(i, j + 1, k) -= square(Aj[idx]);
+ A0(i, j, k + 1) -= square(Ak[idx]);
+ }
+ }
+
+ // invert A0 for faster computation later
+ InvertCheckFluid(flags, A0);
+};
+
+//! Preconditioning using modified IC ala Bridson (needs 1 add. grid)
+void InitPreconditionModifiedIncompCholesky2(const FlagGrid &flags,
+ Grid<Real> &Aprecond,
+ Grid<Real> &A0,
+ Grid<Real> &Ai,
+ Grid<Real> &Aj,
+ Grid<Real> &Ak)
+{
+ // compute IC according to Golub and Van Loan
+ Aprecond.clear();
+
+ FOR_IJK(flags)
+ {
+ if (!flags.isFluid(i, j, k))
+ continue;
+
+ const Real tau = 0.97;
+ const Real sigma = 0.25;
+
+ // compute modified incomplete cholesky
+ Real e = 0.;
+ e = A0(i, j, k) - square(Ai(i - 1, j, k) * Aprecond(i - 1, j, k)) -
+ square(Aj(i, j - 1, k) * Aprecond(i, j - 1, k)) -
+ square(Ak(i, j, k - 1) * Aprecond(i, j, k - 1));
+ e -= tau *
+ (Ai(i - 1, j, k) * (Aj(i - 1, j, k) + Ak(i - 1, j, k)) * square(Aprecond(i - 1, j, k)) +
+ Aj(i, j - 1, k) * (Ai(i, j - 1, k) + Ak(i, j - 1, k)) * square(Aprecond(i, j - 1, k)) +
+ Ak(i, j, k - 1) * (Ai(i, j, k - 1) + Aj(i, j, k - 1)) * square(Aprecond(i, j, k - 1)) +
+ 0.);
+
+ // stability cutoff
+ if (e < sigma * A0(i, j, k))
+ e = A0(i, j, k);
+
+ Aprecond(i, j, k) = 1. / sqrt(e);
+ }
+};
+
+//! Preconditioning using multigrid ala Dick et al.
+void InitPreconditionMultigrid(
+ GridMg *MG, Grid<Real> &A0, Grid<Real> &Ai, Grid<Real> &Aj, Grid<Real> &Ak, Real mAccuracy)
+{
+ // build multigrid hierarchy if necessary
+ if (!MG->isASet())
+ MG->setA(&A0, &Ai, &Aj, &Ak);
+ MG->setCoarsestLevelAccuracy(mAccuracy * 1E-4);
+ MG->setSmoothing(1, 1);
+};
+
+//! Apply WT-style ICP
+void ApplyPreconditionIncompCholesky(Grid<Real> &dst,
+ Grid<Real> &Var1,
+ const FlagGrid &flags,
+ Grid<Real> &A0,
+ Grid<Real> &Ai,
+ Grid<Real> &Aj,
+ Grid<Real> &Ak,
+ Grid<Real> &orgA0,
+ Grid<Real> &orgAi,
+ Grid<Real> &orgAj,
+ Grid<Real> &orgAk)
+{
+
+ // forward substitution
+ FOR_IJK(dst)
+ {
+ if (!flags.isFluid(i, j, k))
+ continue;
+ dst(i, j, k) = A0(i, j, k) *
+ (Var1(i, j, k) - dst(i - 1, j, k) * Ai(i - 1, j, k) -
+ dst(i, j - 1, k) * Aj(i, j - 1, k) - dst(i, j, k - 1) * Ak(i, j, k - 1));
+ }
+
+ // backward substitution
+ FOR_IJK_REVERSE(dst)
+ {
+ const IndexInt idx = A0.index(i, j, k);
+ if (!flags.isFluid(idx))
+ continue;
+ dst[idx] = A0[idx] * (dst[idx] - dst(i + 1, j, k) * Ai[idx] - dst(i, j + 1, k) * Aj[idx] -
+ dst(i, j, k + 1) * Ak[idx]);
+ }
+}
+
+//! Apply Bridson-style mICP
+void ApplyPreconditionModifiedIncompCholesky2(Grid<Real> &dst,
+ Grid<Real> &Var1,
+ const FlagGrid &flags,
+ Grid<Real> &Aprecond,
+ Grid<Real> &A0,
+ Grid<Real> &Ai,
+ Grid<Real> &Aj,
+ Grid<Real> &Ak)
+{
+ // forward substitution
+ FOR_IJK(dst)
+ {
+ if (!flags.isFluid(i, j, k))
+ continue;
+ const Real p = Aprecond(i, j, k);
+ dst(i, j, k) = p *
+ (Var1(i, j, k) - dst(i - 1, j, k) * Ai(i - 1, j, k) * Aprecond(i - 1, j, k) -
+ dst(i, j - 1, k) * Aj(i, j - 1, k) * Aprecond(i, j - 1, k) -
+ dst(i, j, k - 1) * Ak(i, j, k - 1) * Aprecond(i, j, k - 1));
+ }
+
+ // backward substitution
+ FOR_IJK_REVERSE(dst)
+ {
+ const IndexInt idx = A0.index(i, j, k);
+ if (!flags.isFluid(idx))
+ continue;
+ const Real p = Aprecond[idx];
+ dst[idx] = p * (dst[idx] - dst(i + 1, j, k) * Ai[idx] * p - dst(i, j + 1, k) * Aj[idx] * p -
+ dst(i, j, k + 1) * Ak[idx] * p);
+ }
+}
+
+//! Perform one Multigrid VCycle
+void ApplyPreconditionMultigrid(GridMg *pMG, Grid<Real> &dst, Grid<Real> &Var1)
+{
+ // one VCycle on "A*dst = Var1" with initial guess dst=0
+ pMG->setRhs(Var1);
+ pMG->doVCycle(dst);
+}
+
+//*****************************************************************************
+// Kernels
+
+//! Kernel: Compute the dot product between two Real grids
+/*! Uses double precision internally */
+
+struct GridDotProduct : public KernelBase {
+ GridDotProduct(const Grid<Real> &a, const Grid<Real> &b)
+ : KernelBase(&a, 0), a(a), b(b), result(0.0)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Real> &a, const Grid<Real> &b, double &result)
+ {
+ result += (a[idx] * b[idx]);
+ }
+ inline operator double()
+ {
+ return result;
+ }
+ inline double &getRet()
+ {
+ return result;
+ }
+ inline const Grid<Real> &getArg0()
+ {
+ return a;
+ }
+ typedef Grid<Real> type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return b;
+ }
+ typedef Grid<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel GridDotProduct ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, a, b, result);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ GridDotProduct(GridDotProduct &o, tbb::split) : KernelBase(o), a(o.a), b(o.b), result(0.0)
+ {
+ }
+ void join(const GridDotProduct &o)
+ {
+ result += o.result;
+ }
+ const Grid<Real> &a;
+ const Grid<Real> &b;
+ double result;
+};
+;
+
+//! Kernel: compute residual (init) and add to sigma
+
+struct InitSigma : public KernelBase {
+ InitSigma(const FlagGrid &flags, Grid<Real> &dst, Grid<Real> &rhs, Grid<Real> &temp)
+ : KernelBase(&flags, 0), flags(flags), dst(dst), rhs(rhs), temp(temp), sigma(0)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const FlagGrid &flags,
+ Grid<Real> &dst,
+ Grid<Real> &rhs,
+ Grid<Real> &temp,
+ double &sigma)
+ {
+ const double res = rhs[idx] - temp[idx];
+ dst[idx] = (Real)res;
+
+ // only compute residual in fluid region
+ if (flags.isFluid(idx))
+ sigma += res * res;
+ }
+ inline operator double()
+ {
+ return sigma;
+ }
+ inline double &getRet()
+ {
+ return sigma;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return dst;
+ }
+ typedef Grid<Real> type1;
+ inline Grid<Real> &getArg2()
+ {
+ return rhs;
+ }
+ typedef Grid<Real> type2;
+ inline Grid<Real> &getArg3()
+ {
+ return temp;
+ }
+ typedef Grid<Real> type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel InitSigma ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, flags, dst, rhs, temp, sigma);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ InitSigma(InitSigma &o, tbb::split)
+ : KernelBase(o), flags(o.flags), dst(o.dst), rhs(o.rhs), temp(o.temp), sigma(0)
+ {
+ }
+ void join(const InitSigma &o)
+ {
+ sigma += o.sigma;
+ }
+ const FlagGrid &flags;
+ Grid<Real> &dst;
+ Grid<Real> &rhs;
+ Grid<Real> &temp;
+ double sigma;
+};
+;
+
+//! Kernel: update search vector
+
+struct UpdateSearchVec : public KernelBase {
+ UpdateSearchVec(Grid<Real> &dst, Grid<Real> &src, Real factor)
+ : KernelBase(&dst, 0), dst(dst), src(src), factor(factor)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<Real> &dst, Grid<Real> &src, Real factor) const
+ {
+ dst[idx] = src[idx] + factor * dst[idx];
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return dst;
+ }
+ typedef Grid<Real> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return src;
+ }
+ typedef Grid<Real> type1;
+ inline Real &getArg2()
+ {
+ return factor;
+ }
+ typedef Real type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel UpdateSearchVec ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, dst, src, factor);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<Real> &dst;
+ Grid<Real> &src;
+ Real factor;
+};
+
+//*****************************************************************************
+// CG class
+
+template<class APPLYMAT>
+GridCg<APPLYMAT>::GridCg(Grid<Real> &dst,
+ Grid<Real> &rhs,
+ Grid<Real> &residual,
+ Grid<Real> &search,
+ const FlagGrid &flags,
+ Grid<Real> &tmp,
+ Grid<Real> *pA0,
+ Grid<Real> *pAi,
+ Grid<Real> *pAj,
+ Grid<Real> *pAk)
+ : GridCgInterface(),
+ mInited(false),
+ mIterations(0),
+ mDst(dst),
+ mRhs(rhs),
+ mResidual(residual),
+ mSearch(search),
+ mFlags(flags),
+ mTmp(tmp),
+ mpA0(pA0),
+ mpAi(pAi),
+ mpAj(pAj),
+ mpAk(pAk),
+ mPcMethod(PC_None),
+ mpPCA0(nullptr),
+ mpPCAi(nullptr),
+ mpPCAj(nullptr),
+ mpPCAk(nullptr),
+ mMG(nullptr),
+ mSigma(0.),
+ mAccuracy(VECTOR_EPSILON),
+ mResNorm(1e20)
+{
+}
+
+template<class APPLYMAT> void GridCg<APPLYMAT>::doInit()
+{
+ mInited = true;
+ mIterations = 0;
+
+ mDst.clear();
+ mResidual.copyFrom(mRhs); // p=0, residual = b
+
+ if (mPcMethod == PC_ICP) {
+ assertMsg(mDst.is3D(), "ICP only supports 3D grids so far");
+ InitPreconditionIncompCholesky(
+ mFlags, *mpPCA0, *mpPCAi, *mpPCAj, *mpPCAk, *mpA0, *mpAi, *mpAj, *mpAk);
+ ApplyPreconditionIncompCholesky(
+ mTmp, mResidual, mFlags, *mpPCA0, *mpPCAi, *mpPCAj, *mpPCAk, *mpA0, *mpAi, *mpAj, *mpAk);
+ }
+ else if (mPcMethod == PC_mICP) {
+ assertMsg(mDst.is3D(), "mICP only supports 3D grids so far");
+ InitPreconditionModifiedIncompCholesky2(mFlags, *mpPCA0, *mpA0, *mpAi, *mpAj, *mpAk);
+ ApplyPreconditionModifiedIncompCholesky2(
+ mTmp, mResidual, mFlags, *mpPCA0, *mpA0, *mpAi, *mpAj, *mpAk);
+ }
+ else if (mPcMethod == PC_MGP) {
+ InitPreconditionMultigrid(mMG, *mpA0, *mpAi, *mpAj, *mpAk, mAccuracy);
+ ApplyPreconditionMultigrid(mMG, mTmp, mResidual);
+ }
+ else {
+ mTmp.copyFrom(mResidual);
+ }
+
+ mSearch.copyFrom(mTmp);
+
+ mSigma = GridDotProduct(mTmp, mResidual);
+}
+
+template<class APPLYMAT> bool GridCg<APPLYMAT>::iterate()
+{
+ if (!mInited)
+ doInit();
+
+ mIterations++;
+
+ // create matrix application operator passed as template argument,
+ // this could reinterpret the mpA pointers (not so clean right now)
+ // tmp = applyMat(search)
+
+ APPLYMAT(mFlags, mTmp, mSearch, *mpA0, *mpAi, *mpAj, *mpAk);
+
+ // alpha = sigma/dot(tmp, search)
+ Real dp = GridDotProduct(mTmp, mSearch);
+ Real alpha = 0.;
+ if (fabs(dp) > 0.)
+ alpha = mSigma / (Real)dp;
+
+ gridScaledAdd<Real, Real>(mDst, mSearch, alpha); // dst += search * alpha
+ gridScaledAdd<Real, Real>(mResidual, mTmp, -alpha); // residual += tmp * -alpha
+
+ if (mPcMethod == PC_ICP)
+ ApplyPreconditionIncompCholesky(
+ mTmp, mResidual, mFlags, *mpPCA0, *mpPCAi, *mpPCAj, *mpPCAk, *mpA0, *mpAi, *mpAj, *mpAk);
+ else if (mPcMethod == PC_mICP)
+ ApplyPreconditionModifiedIncompCholesky2(
+ mTmp, mResidual, mFlags, *mpPCA0, *mpA0, *mpAi, *mpAj, *mpAk);
+ else if (mPcMethod == PC_MGP)
+ ApplyPreconditionMultigrid(mMG, mTmp, mResidual);
+ else
+ mTmp.copyFrom(mResidual);
+
+ // use the l2 norm of the residual for convergence check? (usually max norm is recommended
+ // instead)
+ if (this->mUseL2Norm) {
+ mResNorm = GridSumSqr(mResidual).sum;
+ }
+ else {
+ mResNorm = mResidual.getMaxAbs();
+ }
+
+ // abort here to safe some work...
+ if (mResNorm < mAccuracy) {
+ mSigma = mResNorm; // this will be returned later on to the caller...
+ return false;
+ }
+
+ Real sigmaNew = GridDotProduct(mTmp, mResidual);
+ Real beta = sigmaNew / mSigma;
+
+ // search = tmp + beta * search
+ UpdateSearchVec(mSearch, mTmp, beta);
+
+ debMsg("GridCg::iterate i=" << mIterations << " sigmaNew=" << sigmaNew << " sigmaLast=" << mSigma
+ << " alpha=" << alpha << " beta=" << beta << " ",
+ CG_DEBUGLEVEL);
+ mSigma = sigmaNew;
+
+ if (!(mResNorm < 1e35)) {
+ if (mPcMethod == PC_MGP) {
+ // diverging solves can be caused by the static multigrid mode, we cannot detect this here,
+ // though only the pressure solve call "knows" whether the MG is static or dynamics...
+ debMsg(
+ "GridCg::iterate: Warning - this diverging solve can be caused by the 'static' mode of "
+ "the MG preconditioner. If the static mode is active, try switching to dynamic.",
+ 1);
+ }
+ errMsg("GridCg::iterate: The CG solver diverged, residual norm > 1e30, stopping.");
+ }
+
+ // debMsg("PB-CG-Norms::p"<<sqrt( GridOpNormNosqrt(mpDst, mpFlags).getValue() ) <<"
+ // search"<<sqrt( GridOpNormNosqrt(mpSearch, mpFlags).getValue(), CG_DEBUGLEVEL ) <<" res"<<sqrt(
+ // GridOpNormNosqrt(mpResidual, mpFlags).getValue() ) <<" tmp"<<sqrt( GridOpNormNosqrt(mpTmp,
+ // mpFlags).getValue() ), CG_DEBUGLEVEL ); // debug
+ return true;
+}
+
+template<class APPLYMAT> void GridCg<APPLYMAT>::solve(int maxIter)
+{
+ for (int iter = 0; iter < maxIter; iter++) {
+ if (!iterate())
+ iter = maxIter;
+ }
+ return;
+}
+
+static bool gPrint2dWarning = true;
+template<class APPLYMAT>
+void GridCg<APPLYMAT>::setICPreconditioner(
+ PreconditionType method, Grid<Real> *A0, Grid<Real> *Ai, Grid<Real> *Aj, Grid<Real> *Ak)
+{
+ assertMsg(method == PC_ICP || method == PC_mICP,
+ "GridCg<APPLYMAT>::setICPreconditioner: Invalid method specified.");
+
+ mPcMethod = method;
+ if ((!A0->is3D())) {
+ if (gPrint2dWarning) {
+ debMsg("ICP/mICP pre-conditioning only supported in 3D for now, disabling it.", 1);
+ gPrint2dWarning = false;
+ }
+ mPcMethod = PC_None;
+ }
+ mpPCA0 = A0;
+ mpPCAi = Ai;
+ mpPCAj = Aj;
+ mpPCAk = Ak;
+}
+
+template<class APPLYMAT>
+void GridCg<APPLYMAT>::setMGPreconditioner(PreconditionType method, GridMg *MG)
+{
+ assertMsg(method == PC_MGP, "GridCg<APPLYMAT>::setMGPreconditioner: Invalid method specified.");
+
+ mPcMethod = method;
+
+ mMG = MG;
+}
+
+// explicit instantiation
+template class GridCg<ApplyMatrix>;
+template class GridCg<ApplyMatrix2D>;
+
+//*****************************************************************************
+// diffusion for real and vec grids, e.g. for viscosity
+
+//! do a CG solve for diffusion; note: diffusion coefficient alpha given in grid space,
+// rescale in python file for discretization independence (or physical correspondence)
+// see lidDrivenCavity.py for an example
+
+void cgSolveDiffusion(const FlagGrid &flags,
+ GridBase &grid,
+ Real alpha = 0.25,
+ Real cgMaxIterFac = 1.0,
+ Real cgAccuracy = 1e-4)
+{
+ // reserve temp grids
+ FluidSolver *parent = flags.getParent();
+ Grid<Real> rhs(parent);
+ Grid<Real> residual(parent), search(parent), tmp(parent);
+ Grid<Real> A0(parent), Ai(parent), Aj(parent), Ak(parent);
+
+ // setup matrix and boundaries
+ FlagGrid flagsDummy(parent);
+ flagsDummy.setConst(FlagGrid::TypeFluid);
+ MakeLaplaceMatrix(flagsDummy, A0, Ai, Aj, Ak);
+
+ FOR_IJK(flags)
+ {
+ if (flags.isObstacle(i, j, k)) {
+ Ai(i, j, k) = Aj(i, j, k) = Ak(i, j, k) = 0.0;
+ A0(i, j, k) = 1.0;
+ }
+ else {
+ Ai(i, j, k) *= alpha;
+ Aj(i, j, k) *= alpha;
+ Ak(i, j, k) *= alpha;
+ A0(i, j, k) *= alpha;
+ A0(i, j, k) += 1.;
+ }
+ }
+
+ GridCgInterface *gcg;
+ // note , no preconditioning for now...
+ const int maxIter = (int)(cgMaxIterFac * flags.getSize().max()) * (flags.is3D() ? 1 : 4);
+
+ if (grid.getType() & GridBase::TypeReal) {
+ Grid<Real> &u = ((Grid<Real> &)grid);
+ rhs.copyFrom(u);
+ if (flags.is3D())
+ gcg = new GridCg<ApplyMatrix>(u, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
+ else
+ gcg = new GridCg<ApplyMatrix2D>(u, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
+
+ gcg->setAccuracy(cgAccuracy);
+ gcg->solve(maxIter);
+
+ debMsg("FluidSolver::solveDiffusion iterations:" << gcg->getIterations()
+ << ", res:" << gcg->getSigma(),
+ CG_DEBUGLEVEL);
+ }
+ else if ((grid.getType() & GridBase::TypeVec3) || (grid.getType() & GridBase::TypeVec3)) {
+ Grid<Vec3> &vec = ((Grid<Vec3> &)grid);
+ Grid<Real> u(parent);
+
+ // core solve is same as for a regular real grid
+ if (flags.is3D())
+ gcg = new GridCg<ApplyMatrix>(u, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
+ else
+ gcg = new GridCg<ApplyMatrix2D>(u, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
+ gcg->setAccuracy(cgAccuracy);
+
+ // diffuse every component separately
+ for (int component = 0; component < (grid.is3D() ? 3 : 2); ++component) {
+ getComponent(vec, u, component);
+ gcg->forceReinit();
+
+ rhs.copyFrom(u);
+ gcg->solve(maxIter);
+ debMsg("FluidSolver::solveDiffusion vec3, iterations:" << gcg->getIterations()
+ << ", res:" << gcg->getSigma(),
+ CG_DEBUGLEVEL);
+
+ setComponent(u, vec, component);
+ }
+ }
+ else {
+ errMsg("cgSolveDiffusion: Grid Type is not supported (only Real, Vec3, MAC, or Levelset)");
+ }
+
+ delete gcg;
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "cgSolveDiffusion", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ GridBase &grid = *_args.getPtr<GridBase>("grid", 1, &_lock);
+ Real alpha = _args.getOpt<Real>("alpha", 2, 0.25, &_lock);
+ Real cgMaxIterFac = _args.getOpt<Real>("cgMaxIterFac", 3, 1.0, &_lock);
+ Real cgAccuracy = _args.getOpt<Real>("cgAccuracy", 4, 1e-4, &_lock);
+ _retval = getPyNone();
+ cgSolveDiffusion(flags, grid, alpha, cgMaxIterFac, cgAccuracy);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "cgSolveDiffusion", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("cgSolveDiffusion", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_cgSolveDiffusion("", "cgSolveDiffusion", _W_0);
+extern "C" {
+void PbRegister_cgSolveDiffusion()
+{
+ KEEP_UNUSED(_RP_cgSolveDiffusion);
+}
+}
+
+}; // namespace Manta
diff --git a/extern/mantaflow/preprocessed/conjugategrad.h b/extern/mantaflow/preprocessed/conjugategrad.h
new file mode 100644
index 00000000000..58ccff28179
--- /dev/null
+++ b/extern/mantaflow/preprocessed/conjugategrad.h
@@ -0,0 +1,479 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Conjugate gradient solver
+ *
+ ******************************************************************************/
+
+#ifndef _CONJUGATEGRADIENT_H
+#define _CONJUGATEGRADIENT_H
+
+#include "vectorbase.h"
+#include "grid.h"
+#include "kernel.h"
+#include "multigrid.h"
+
+namespace Manta {
+
+static const bool CG_DEBUG = false;
+
+//! Basic CG interface
+class GridCgInterface {
+ public:
+ enum PreconditionType { PC_None = 0, PC_ICP, PC_mICP, PC_MGP };
+
+ GridCgInterface() : mUseL2Norm(true){};
+ virtual ~GridCgInterface(){};
+
+ // solving functions
+ virtual bool iterate() = 0;
+ virtual void solve(int maxIter) = 0;
+
+ // precond
+ virtual void setICPreconditioner(
+ PreconditionType method, Grid<Real> *A0, Grid<Real> *Ai, Grid<Real> *Aj, Grid<Real> *Ak) = 0;
+ virtual void setMGPreconditioner(PreconditionType method, GridMg *MG) = 0;
+
+ // access
+ virtual Real getSigma() const = 0;
+ virtual Real getIterations() const = 0;
+ virtual Real getResNorm() const = 0;
+ virtual void setAccuracy(Real set) = 0;
+ virtual Real getAccuracy() const = 0;
+
+ //! force reinit upon next iterate() call, can be used for doing multiple solves
+ virtual void forceReinit() = 0;
+
+ void setUseL2Norm(bool set)
+ {
+ mUseL2Norm = set;
+ }
+
+ protected:
+ // use l2 norm of residualfor threshold? (otherwise uses max norm)
+ bool mUseL2Norm;
+};
+
+//! Run single iteration of the cg solver
+/*! the template argument determines the type of matrix multiplication,
+ typically a ApplyMatrix kernel, another one is needed e.g. for the
+ mesh-based wave equation solver */
+template<class APPLYMAT> class GridCg : public GridCgInterface {
+ public:
+ //! constructor
+ GridCg(Grid<Real> &dst,
+ Grid<Real> &rhs,
+ Grid<Real> &residual,
+ Grid<Real> &search,
+ const FlagGrid &flags,
+ Grid<Real> &tmp,
+ Grid<Real> *A0,
+ Grid<Real> *pAi,
+ Grid<Real> *pAj,
+ Grid<Real> *pAk);
+ ~GridCg()
+ {
+ }
+
+ void doInit();
+ bool iterate();
+ void solve(int maxIter);
+ //! init pointers, and copy values from "normal" matrix
+ void setICPreconditioner(
+ PreconditionType method, Grid<Real> *A0, Grid<Real> *Ai, Grid<Real> *Aj, Grid<Real> *Ak);
+ void setMGPreconditioner(PreconditionType method, GridMg *MG);
+ void forceReinit()
+ {
+ mInited = false;
+ }
+
+ // Accessors
+ Real getSigma() const
+ {
+ return mSigma;
+ }
+ Real getIterations() const
+ {
+ return mIterations;
+ }
+
+ Real getResNorm() const
+ {
+ return mResNorm;
+ }
+
+ void setAccuracy(Real set)
+ {
+ mAccuracy = set;
+ }
+ Real getAccuracy() const
+ {
+ return mAccuracy;
+ }
+
+ protected:
+ bool mInited;
+ int mIterations;
+ // grids
+ Grid<Real> &mDst;
+ Grid<Real> &mRhs;
+ Grid<Real> &mResidual;
+ Grid<Real> &mSearch;
+ const FlagGrid &mFlags;
+ Grid<Real> &mTmp;
+
+ Grid<Real> *mpA0, *mpAi, *mpAj, *mpAk;
+
+ PreconditionType mPcMethod;
+ //! preconditioning grids
+ Grid<Real> *mpPCA0, *mpPCAi, *mpPCAj, *mpPCAk;
+ GridMg *mMG;
+
+ //! sigma / residual
+ Real mSigma;
+ //! accuracy of solver (max. residuum)
+ Real mAccuracy;
+ //! norm of the residual
+ Real mResNorm;
+}; // GridCg
+
+//! Kernel: Apply symmetric stored Matrix
+
+struct ApplyMatrix : public KernelBase {
+ ApplyMatrix(const FlagGrid &flags,
+ Grid<Real> &dst,
+ const Grid<Real> &src,
+ Grid<Real> &A0,
+ Grid<Real> &Ai,
+ Grid<Real> &Aj,
+ Grid<Real> &Ak)
+ : KernelBase(&flags, 0), flags(flags), dst(dst), src(src), A0(A0), Ai(Ai), Aj(Aj), Ak(Ak)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const FlagGrid &flags,
+ Grid<Real> &dst,
+ const Grid<Real> &src,
+ Grid<Real> &A0,
+ Grid<Real> &Ai,
+ Grid<Real> &Aj,
+ Grid<Real> &Ak) const
+ {
+ if (!flags.isFluid(idx)) {
+ dst[idx] = src[idx];
+ return;
+ }
+
+ dst[idx] = src[idx] * A0[idx] + src[idx - X] * Ai[idx - X] + src[idx + X] * Ai[idx] +
+ src[idx - Y] * Aj[idx - Y] + src[idx + Y] * Aj[idx] + src[idx - Z] * Ak[idx - Z] +
+ src[idx + Z] * Ak[idx];
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return dst;
+ }
+ typedef Grid<Real> type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return src;
+ }
+ typedef Grid<Real> type2;
+ inline Grid<Real> &getArg3()
+ {
+ return A0;
+ }
+ typedef Grid<Real> type3;
+ inline Grid<Real> &getArg4()
+ {
+ return Ai;
+ }
+ typedef Grid<Real> type4;
+ inline Grid<Real> &getArg5()
+ {
+ return Aj;
+ }
+ typedef Grid<Real> type5;
+ inline Grid<Real> &getArg6()
+ {
+ return Ak;
+ }
+ typedef Grid<Real> type6;
+ void runMessage()
+ {
+ debMsg("Executing kernel ApplyMatrix ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, flags, dst, src, A0, Ai, Aj, Ak);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &dst;
+ const Grid<Real> &src;
+ Grid<Real> &A0;
+ Grid<Real> &Ai;
+ Grid<Real> &Aj;
+ Grid<Real> &Ak;
+};
+
+//! Kernel: Apply symmetric stored Matrix. 2D version
+
+struct ApplyMatrix2D : public KernelBase {
+ ApplyMatrix2D(const FlagGrid &flags,
+ Grid<Real> &dst,
+ const Grid<Real> &src,
+ Grid<Real> &A0,
+ Grid<Real> &Ai,
+ Grid<Real> &Aj,
+ Grid<Real> &Ak)
+ : KernelBase(&flags, 0), flags(flags), dst(dst), src(src), A0(A0), Ai(Ai), Aj(Aj), Ak(Ak)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const FlagGrid &flags,
+ Grid<Real> &dst,
+ const Grid<Real> &src,
+ Grid<Real> &A0,
+ Grid<Real> &Ai,
+ Grid<Real> &Aj,
+ Grid<Real> &Ak) const
+ {
+ unusedParameter(Ak); // only there for parameter compatibility with ApplyMatrix
+
+ if (!flags.isFluid(idx)) {
+ dst[idx] = src[idx];
+ return;
+ }
+
+ dst[idx] = src[idx] * A0[idx] + src[idx - X] * Ai[idx - X] + src[idx + X] * Ai[idx] +
+ src[idx - Y] * Aj[idx - Y] + src[idx + Y] * Aj[idx];
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return dst;
+ }
+ typedef Grid<Real> type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return src;
+ }
+ typedef Grid<Real> type2;
+ inline Grid<Real> &getArg3()
+ {
+ return A0;
+ }
+ typedef Grid<Real> type3;
+ inline Grid<Real> &getArg4()
+ {
+ return Ai;
+ }
+ typedef Grid<Real> type4;
+ inline Grid<Real> &getArg5()
+ {
+ return Aj;
+ }
+ typedef Grid<Real> type5;
+ inline Grid<Real> &getArg6()
+ {
+ return Ak;
+ }
+ typedef Grid<Real> type6;
+ void runMessage()
+ {
+ debMsg("Executing kernel ApplyMatrix2D ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, flags, dst, src, A0, Ai, Aj, Ak);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &dst;
+ const Grid<Real> &src;
+ Grid<Real> &A0;
+ Grid<Real> &Ai;
+ Grid<Real> &Aj;
+ Grid<Real> &Ak;
+};
+
+//! Kernel: Construct the matrix for the poisson equation
+
+struct MakeLaplaceMatrix : public KernelBase {
+ MakeLaplaceMatrix(const FlagGrid &flags,
+ Grid<Real> &A0,
+ Grid<Real> &Ai,
+ Grid<Real> &Aj,
+ Grid<Real> &Ak,
+ const MACGrid *fractions = 0)
+ : KernelBase(&flags, 1), flags(flags), A0(A0), Ai(Ai), Aj(Aj), Ak(Ak), fractions(fractions)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<Real> &A0,
+ Grid<Real> &Ai,
+ Grid<Real> &Aj,
+ Grid<Real> &Ak,
+ const MACGrid *fractions = 0) const
+ {
+ if (!flags.isFluid(i, j, k))
+ return;
+
+ if (!fractions) {
+ // diagonal, A0
+ if (!flags.isObstacle(i - 1, j, k))
+ A0(i, j, k) += 1.;
+ if (!flags.isObstacle(i + 1, j, k))
+ A0(i, j, k) += 1.;
+ if (!flags.isObstacle(i, j - 1, k))
+ A0(i, j, k) += 1.;
+ if (!flags.isObstacle(i, j + 1, k))
+ A0(i, j, k) += 1.;
+ if (flags.is3D() && !flags.isObstacle(i, j, k - 1))
+ A0(i, j, k) += 1.;
+ if (flags.is3D() && !flags.isObstacle(i, j, k + 1))
+ A0(i, j, k) += 1.;
+
+ // off-diagonal entries
+ if (flags.isFluid(i + 1, j, k))
+ Ai(i, j, k) = -1.;
+ if (flags.isFluid(i, j + 1, k))
+ Aj(i, j, k) = -1.;
+ if (flags.is3D() && flags.isFluid(i, j, k + 1))
+ Ak(i, j, k) = -1.;
+ }
+ else {
+ // diagonal
+ A0(i, j, k) += fractions->get(i, j, k).x;
+ A0(i, j, k) += fractions->get(i + 1, j, k).x;
+ A0(i, j, k) += fractions->get(i, j, k).y;
+ A0(i, j, k) += fractions->get(i, j + 1, k).y;
+ if (flags.is3D())
+ A0(i, j, k) += fractions->get(i, j, k).z;
+ if (flags.is3D())
+ A0(i, j, k) += fractions->get(i, j, k + 1).z;
+
+ // off-diagonal entries
+ if (flags.isFluid(i + 1, j, k))
+ Ai(i, j, k) = -fractions->get(i + 1, j, k).x;
+ if (flags.isFluid(i, j + 1, k))
+ Aj(i, j, k) = -fractions->get(i, j + 1, k).y;
+ if (flags.is3D() && flags.isFluid(i, j, k + 1))
+ Ak(i, j, k) = -fractions->get(i, j, k + 1).z;
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return A0;
+ }
+ typedef Grid<Real> type1;
+ inline Grid<Real> &getArg2()
+ {
+ return Ai;
+ }
+ typedef Grid<Real> type2;
+ inline Grid<Real> &getArg3()
+ {
+ return Aj;
+ }
+ typedef Grid<Real> type3;
+ inline Grid<Real> &getArg4()
+ {
+ return Ak;
+ }
+ typedef Grid<Real> type4;
+ inline const MACGrid *getArg5()
+ {
+ return fractions;
+ }
+ typedef MACGrid type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel MakeLaplaceMatrix ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, A0, Ai, Aj, Ak, fractions);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, A0, Ai, Aj, Ak, fractions);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &A0;
+ Grid<Real> &Ai;
+ Grid<Real> &Aj;
+ Grid<Real> &Ak;
+ const MACGrid *fractions;
+};
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/conjugategrad.h.reg.cpp b/extern/mantaflow/preprocessed/conjugategrad.h.reg.cpp
new file mode 100644
index 00000000000..d152fe8f113
--- /dev/null
+++ b/extern/mantaflow/preprocessed/conjugategrad.h.reg.cpp
@@ -0,0 +1,13 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "conjugategrad.h"
+namespace Manta {
+extern "C" {
+void PbRegister_file_3()
+{
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/edgecollapse.cpp b/extern/mantaflow/preprocessed/edgecollapse.cpp
new file mode 100644
index 00000000000..72c76ca9200
--- /dev/null
+++ b/extern/mantaflow/preprocessed/edgecollapse.cpp
@@ -0,0 +1,700 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Mesh edge collapse and subdivision
+ *
+ ******************************************************************************/
+
+/******************************************************************************/
+// Copyright note:
+//
+// These functions (C) Chris Wojtan
+// Long-term goal is to unify with his split&merge codebase
+//
+/******************************************************************************/
+
+#include "edgecollapse.h"
+#include <queue>
+
+using namespace std;
+
+namespace Manta {
+
+// 8-point butterfly subdivision scheme (as described by Brochu&Bridson 2009)
+Vec3 ButterflySubdivision(Mesh &m, const Corner &ca, const Corner &cb)
+{
+ Vec3 p = m.nodes(m.corners(ca.prev).node).pos + m.nodes(m.corners(ca.next).node).pos;
+ Vec3 q = m.nodes(ca.node).pos + m.nodes(cb.node).pos;
+ Vec3 r = m.nodes(m.corners(m.corners(ca.next).opposite).node).pos +
+ m.nodes(m.corners(m.corners(ca.prev).opposite).node).pos +
+ m.nodes(m.corners(m.corners(cb.next).opposite).node).pos +
+ m.nodes(m.corners(m.corners(cb.prev).opposite).node).pos;
+ return (8 * p + 2 * q - r) / 16.0;
+}
+
+// Modified Butterfly Subdivision Scheme from:
+// Interpolating Subdivision for Meshes with Arbitrary Topology
+// Denis Zorin, Peter Schroder, and Wim Sweldens
+// input the Corner that satisfies the following:
+// c.prev.node is the extraordinary vertex,
+// and c.next.node is the other vertex involved in the subdivision
+Vec3 OneSidedButterflySubdivision(Mesh &m, const int valence, const Corner &c)
+{
+ Vec3 out;
+ Vec3 p0 = m.nodes(m.corners(c.prev).node).pos;
+ Vec3 p1 = m.nodes(m.corners(c.next).node).pos;
+
+ if (valence == 3) {
+ Vec3 p2 = m.nodes(c.node).pos;
+ Vec3 p3 = m.nodes(m.corners(m.corners(c.next).opposite).node).pos;
+ out = (5.0 / 12.0) * p1 - (1.0 / 12.0) * (p2 + p3) + 0.75 * p0;
+ }
+ else if (valence == 4) {
+ Vec3 p2 = m.nodes(m.corners(m.corners(c.next).opposite).node).pos;
+ out = 0.375 * p1 - 0.125 * p2 + 0.75 * p0;
+ }
+ else {
+ // rotate around extraordinary vertex,
+ // calculate subdivision weights,
+ // and interpolate vertex position
+ double rv = 1.0 / (double)valence;
+ out = 0.0;
+ int current = c.prev;
+ for (int j = 0; j < valence; j++) {
+ double s = (0.25 + cos(2 * M_PI * j * rv) + 0.5 * cos(4 * M_PI * j * rv)) * rv;
+ Vec3 p = m.nodes(m.corners(m.corners(current).prev).node).pos;
+
+ out += s * p;
+ current = m.corners(m.corners(m.corners(current).next).opposite).next;
+ }
+ out += 0.75 * m.nodes(m.corners(c.prev).node).pos;
+ }
+ return out;
+}
+
+// Modified Butterfly Subdivision Scheme from:
+// Interpolating Subdivision for Meshes with Arbitrary Topology
+// Denis Zorin, Peter Schroder, and Wim Sweldens
+Vec3 ModifiedButterflySubdivision(Mesh &m,
+ const Corner &ca,
+ const Corner &cb,
+ const Vec3 &fallback)
+{
+ // calculate the valence of the two parent vertices
+ int start = ca.prev;
+ int current = start;
+ int valenceA = 0;
+ do {
+ valenceA++;
+ int op = m.corners(m.corners(current).next).opposite;
+ if (op < 0)
+ return fallback;
+ current = m.corners(op).next;
+ } while (current != start);
+ start = ca.next;
+ current = start;
+ int valenceB = 0;
+ do {
+ valenceB++;
+ int op = m.corners(m.corners(current).next).opposite;
+ if (op < 0)
+ return fallback;
+ current = m.corners(op).next;
+ } while (current != start);
+
+ // if both vertices have valence 6, use butterfly subdivision
+ if (valenceA == 6 && valenceB == 6) {
+ return ButterflySubdivision(m, ca, cb);
+ }
+ else if (valenceA == 6) // use a one-sided scheme
+ {
+ return OneSidedButterflySubdivision(m, valenceB, cb);
+ }
+ else if (valenceB == 6) // use a one-sided scheme
+ {
+ return OneSidedButterflySubdivision(m, valenceA, ca);
+ }
+ else // average the results from two one-sided schemes
+ {
+ return 0.5 * (OneSidedButterflySubdivision(m, valenceA, ca) +
+ OneSidedButterflySubdivision(m, valenceB, cb));
+ }
+}
+
+bool gAbort = false;
+
+// collapse an edge on triangle "trinum".
+// "which" is 0,1, or 2,
+// where which==0 is the triangle edge from p0 to p1,
+// which==1 is the triangle edge from p1 to p2,
+// and which==2 is the triangle edge from p2 to p0,
+void CollapseEdge(Mesh &m,
+ const int trinum,
+ const int which,
+ const Vec3 &edgevect,
+ const Vec3 &endpoint,
+ vector<int> &deletedNodes,
+ std::map<int, bool> &taintedTris,
+ int &numCollapses,
+ bool doTubeCutting)
+{
+ if (gAbort)
+ return;
+ // I wanted to draw a pretty picture of an edge collapse,
+ // but I don't know how to make wacky angled lines in ASCII.
+ // Instead, I will show the before case and tell you what needs to be done.
+
+ // BEFORE:
+ // *
+ // / \.
+ // /C0 \.
+ // / \.
+ // / \.
+ // / B \.
+ // / \.
+ // /C1 C2 \.
+ // P0 *---------------* P1
+ // \C2 C1 /
+ // \ /
+ // \ A /
+ // \ /
+ // \ /
+ // \C0 /
+ // \ /
+ // *
+ //
+ // We are going to collapse the edge between P0 and P1
+ // by deleting P1,
+ // and taking all references to P1,
+ // and rerouting them to P0 instead
+ //
+ // What we need to do:
+ // Move position of P0
+ // Preserve connectivity in both triangles:
+ // (C1.opposite).opposite = C2.o
+ // (C2.opposite).opposite = C1.o
+ // Delete references to Corners of deleted triangles in both P0 and P1's Corner list
+ // Reassign references to P1:
+ // loop through P1 triangles:
+ // rename P1 references to P0 in p lists.
+ // rename Corner.v references
+ // Copy P1's list of Corners over to P0's list of Corners
+ // Delete P1
+
+ Corner ca_old[3], cb_old[3];
+ ca_old[0] = m.corners(trinum, which);
+ ca_old[1] = m.corners(ca_old[0].next);
+ ca_old[2] = m.corners(ca_old[0].prev);
+ bool haveB = false;
+ if (ca_old[0].opposite >= 0) {
+ cb_old[0] = m.corners(ca_old[0].opposite);
+ cb_old[1] = m.corners(cb_old[0].next);
+ cb_old[2] = m.corners(cb_old[0].prev);
+ haveB = true;
+ }
+ if (!haveB) {
+ // for now, don't collapse
+ return;
+ }
+
+ int P0 = ca_old[2].node;
+ int P1 = ca_old[1].node;
+
+ ///////////////
+ // avoid creating nonmanifold edges
+ bool nonmanifold = false;
+ bool nonmanifold2 = false;
+
+ set<int> &ring0 = m.get1Ring(P0).nodes;
+ set<int> &ring1 = m.get1Ring(P1).nodes;
+
+ // check for intersections of the 1-rings of P0,P1
+ int cl = 0, commonVert = -1;
+ for (set<int>::iterator it = ring1.begin(); it != ring1.end(); ++it)
+ if (ring0.find(*it) != ring0.end()) {
+ cl++;
+ if (*it != ca_old[0].node && *it != cb_old[0].node)
+ commonVert = *it;
+ }
+
+ nonmanifold = cl > 2;
+ nonmanifold2 = cl > 3;
+
+ if (nonmanifold && ca_old[1].opposite >= 0 && cb_old[1].opposite >= 0 &&
+ ca_old[2].opposite >= 0 &&
+ cb_old[2].opposite >= 0) // collapsing this edge would create a non-manifold edge
+ {
+ if (nonmanifold2)
+ return;
+
+ bool topTet = false;
+ bool botTet = false;
+ // check if collapsing this edge will collapse a tet.
+ if (m.corners(ca_old[1].opposite).node == m.corners(ca_old[2].opposite).node)
+ botTet = true;
+
+ if (m.corners(cb_old[1].opposite).node == m.corners(cb_old[2].opposite).node)
+ topTet = true;
+
+ if (topTet ^ botTet) {
+
+ // safe pyramid case.
+ // collapse the whole tet!
+ // First collapse the top of the pyramid,
+ // then carry on collapsing the original verts.
+ Corner cc_old[3], cd_old[3];
+ if (botTet)
+ cc_old[0] = m.corners(ca_old[1].opposite);
+ else // topTet
+ cc_old[0] = cb_old[2];
+ cc_old[1] = m.corners(cc_old[0].next);
+ cc_old[2] = m.corners(cc_old[0].prev);
+ if (cc_old[0].opposite < 0)
+ return;
+ cd_old[0] = m.corners(cc_old[0].opposite);
+ cd_old[1] = m.corners(cd_old[0].next);
+ cd_old[2] = m.corners(cd_old[0].prev);
+ int P2 = cc_old[2].node;
+ int P3 = cc_old[1].node;
+
+ // update tri props of all adjacent triangles of P0,P1 (do before CT updates!)
+ for (int i = 0; i < m.numTriChannels(); i++) {
+ }; // TODO: handleTriPropertyEdgeCollapse(trinum, P2,P3, cc_old[0], cd_old[0]);
+
+ m.mergeNode(P2, P3);
+
+ // Preserve connectivity in both triangles
+ if (cc_old[1].opposite >= 0)
+ m.corners(cc_old[1].opposite).opposite = cc_old[2].opposite;
+ if (cc_old[2].opposite >= 0)
+ m.corners(cc_old[2].opposite).opposite = cc_old[1].opposite;
+ if (cd_old[1].opposite >= 0)
+ m.corners(cd_old[1].opposite).opposite = cd_old[2].opposite;
+ if (cd_old[2].opposite >= 0)
+ m.corners(cd_old[2].opposite).opposite = cd_old[1].opposite;
+
+ ////////////////////
+ // mark the two triangles and the one node for deletion
+ int tmpTrinum = cc_old[0].tri;
+ int tmpOthertri = cd_old[0].tri;
+ m.removeTriFromLookup(tmpTrinum);
+ m.removeTriFromLookup(tmpOthertri);
+ taintedTris[tmpTrinum] = true;
+ taintedTris[tmpOthertri] = true;
+ deletedNodes.push_back(P3);
+
+ numCollapses++;
+
+ // recompute Corners for triangles A and B
+ if (botTet)
+ ca_old[0] = m.corners(ca_old[2].opposite);
+ else
+ ca_old[0] = m.corners(ca_old[1].prev);
+ ca_old[1] = m.corners(ca_old[0].next);
+ ca_old[2] = m.corners(ca_old[0].prev);
+ cb_old[0] = m.corners(ca_old[0].opposite);
+ cb_old[1] = m.corners(cb_old[0].next);
+ cb_old[2] = m.corners(cb_old[0].prev);
+
+ ///////////////
+ // avoid creating nonmanifold edges... again
+ ring0 = m.get1Ring(ca_old[2].node).nodes;
+ ring1 = m.get1Ring(ca_old[1].node).nodes;
+
+ // check for intersections of the 1-rings of P0,P1
+ cl = 0;
+ for (set<int>::iterator it = ring1.begin(); it != ring1.end(); ++it)
+ if (*it != ca_old[0].node && ring0.find(*it) != ring0.end())
+ cl++;
+
+ if (cl > 2) { // nonmanifold
+ // this can happen if collapsing the first tet leads to another similar collapse that
+ // requires the collapse of a tet. for now, just move on and pick this up later.
+
+ // if the original component was very small, this first collapse could have led to a tiny
+ // piece of nonmanifold geometry. in this case, just delete everything that remains.
+ if (m.corners(ca_old[0].opposite).tri == cb_old[0].tri &&
+ m.corners(ca_old[1].opposite).tri == cb_old[0].tri &&
+ m.corners(ca_old[2].opposite).tri == cb_old[0].tri) {
+ taintedTris[ca_old[0].tri] = true;
+ taintedTris[cb_old[0].tri] = true;
+ m.removeTriFromLookup(ca_old[0].tri);
+ m.removeTriFromLookup(cb_old[0].tri);
+ deletedNodes.push_back(ca_old[0].node);
+ deletedNodes.push_back(ca_old[1].node);
+ deletedNodes.push_back(ca_old[2].node);
+ }
+ return;
+ }
+ }
+ else if (topTet && botTet && ca_old[1].opposite >= 0 && ca_old[2].opposite >= 0 &&
+ cb_old[1].opposite >= 0 && cb_old[2].opposite >= 0) {
+ if (!(m.corners(ca_old[1].opposite).node == m.corners(ca_old[2].opposite).node &&
+ m.corners(cb_old[1].opposite).node == m.corners(cb_old[2].opposite).node &&
+ (m.corners(ca_old[1].opposite).node == m.corners(cb_old[1].opposite).node ||
+ (m.corners(ca_old[1].opposite).node == cb_old[0].node &&
+ m.corners(cb_old[1].opposite).node == ca_old[0].node)))) {
+ // just collapse one for now.
+
+ // collapse the whole tet!
+ // First collapse the top of the pyramid,
+ // then carry on collapsing the original verts.
+ Corner cc_old[3], cd_old[3];
+
+ // collapse top
+ {
+ cc_old[0] = m.corners(ca_old[1].opposite);
+ cc_old[1] = m.corners(cc_old[0].next);
+ cc_old[2] = m.corners(cc_old[0].prev);
+ if (cc_old[0].opposite < 0)
+ return;
+ cd_old[0] = m.corners(cc_old[0].opposite);
+ cd_old[1] = m.corners(cd_old[0].next);
+ cd_old[2] = m.corners(cd_old[0].prev);
+ int P2 = cc_old[2].node;
+ int P3 = cc_old[1].node;
+
+ // update tri props of all adjacent triangles of P0,P1 (do before CT updates!)
+ // TODO: handleTriPropertyEdgeCollapse(trinum, P2,P3, cc_old[0], cd_old[0]);
+
+ m.mergeNode(P2, P3);
+
+ // Preserve connectivity in both triangles
+ if (cc_old[1].opposite >= 0)
+ m.corners(cc_old[1].opposite).opposite = cc_old[2].opposite;
+ if (cc_old[2].opposite >= 0)
+ m.corners(cc_old[2].opposite).opposite = cc_old[1].opposite;
+ if (cd_old[1].opposite >= 0)
+ m.corners(cd_old[1].opposite).opposite = cd_old[2].opposite;
+ if (cd_old[2].opposite >= 0)
+ m.corners(cd_old[2].opposite).opposite = cd_old[1].opposite;
+
+ ////////////////////
+ // mark the two triangles and the one node for deletion
+ int tmpTrinum = cc_old[0].tri;
+ int tmpOthertri = cd_old[0].tri;
+ taintedTris[tmpTrinum] = true;
+ taintedTris[tmpOthertri] = true;
+ m.removeTriFromLookup(tmpTrinum);
+ m.removeTriFromLookup(tmpOthertri);
+ deletedNodes.push_back(P3);
+
+ numCollapses++;
+ }
+ // then collapse bottom
+ {
+ // cc_old[0] = [ca_old[1].opposite;
+ cc_old[0] = cb_old[2];
+ cc_old[1] = m.corners(cc_old[0].next);
+ cc_old[2] = m.corners(cc_old[0].prev);
+ if (cc_old[0].opposite < 0)
+ return;
+ cd_old[0] = m.corners(cc_old[0].opposite);
+ cd_old[1] = m.corners(cd_old[0].next);
+ cd_old[2] = m.corners(cd_old[0].prev);
+ int P2 = cc_old[2].node;
+ int P3 = cc_old[1].node;
+
+ // update tri props of all adjacent triangles of P0,P1 (do before CT updates!)
+ // TODO: handleTriPropertyEdgeCollapse(trinum, P2,P3, cc_old[0], cd_old[0]);
+
+ m.mergeNode(P2, P3);
+
+ // Preserve connectivity in both triangles
+ if (cc_old[1].opposite >= 0)
+ m.corners(cc_old[1].opposite).opposite = cc_old[2].opposite;
+ if (cc_old[2].opposite >= 0)
+ m.corners(cc_old[2].opposite).opposite = cc_old[1].opposite;
+ if (cd_old[1].opposite >= 0)
+ m.corners(cd_old[1].opposite).opposite = cd_old[2].opposite;
+ if (cd_old[2].opposite >= 0)
+ m.corners(cd_old[2].opposite).opposite = cd_old[1].opposite;
+
+ ////////////////////
+ // mark the two triangles and the one node for deletion
+ int tmpTrinum = cc_old[0].tri;
+ int tmpOthertri = cd_old[0].tri;
+ taintedTris[tmpTrinum] = true;
+ taintedTris[tmpOthertri] = true;
+ deletedNodes.push_back(P3);
+
+ numCollapses++;
+ }
+
+ // Though we've collapsed a lot of stuff, we still haven't collapsed the original edge.
+ // At this point we still haven't guaranteed that this original collapse weill be safe.
+ // quit for now, and we'll catch the remaining short edges the next time this function is
+ // called.
+ return;
+ }
+ }
+ else if (doTubeCutting) {
+ // tube case
+ // cout<<"CollapseEdge:tube case" << endl;
+
+ // find the edges that touch the common vert
+ int P2 = commonVert;
+ int P1P2 = -1, P2P1, P2P0 = -1, P0P2 = -1; // corners across from the cutting seam
+ int start = ca_old[0].next;
+ int end = cb_old[0].prev;
+ int current = start;
+ do {
+ // rotate around vertex P1 counter-clockwise
+ int op = m.corners(m.corners(current).next).opposite;
+ if (op < 0)
+ errMsg("tube cutting failed, no opposite");
+ current = m.corners(op).next;
+
+ if (m.corners(m.corners(current).prev).node == commonVert)
+ P1P2 = m.corners(current).next;
+ } while (current != end);
+
+ start = ca_old[0].prev;
+ end = cb_old[0].next;
+ current = start;
+ do {
+ // rotate around vertex P0 clockwise
+ int op = m.corners(m.corners(current).prev).opposite;
+ if (op < 0)
+ errMsg("tube cutting failed, no opposite");
+
+ current = m.corners(op).prev;
+ if (m.corners(m.corners(current).next).node == commonVert)
+ P2P0 = m.corners(current).prev;
+ } while (current != end);
+
+ if (P1P2 < 0 || P2P0 < 0)
+ errMsg("tube cutting failed, ill geometry");
+
+ P2P1 = m.corners(P1P2).opposite;
+ P0P2 = m.corners(P2P0).opposite;
+
+ // duplicate vertices on the top half of the cut,
+ // and use them to split the tube at this seam
+ int P0b = m.addNode(Node(m.nodes(P0).pos));
+ int P1b = m.addNode(Node(m.nodes(P1).pos));
+ int P2b = m.addNode(Node(m.nodes(P2).pos));
+ for (int i = 0; i < m.numNodeChannels(); i++) {
+ m.nodeChannel(i)->addInterpol(P0, P0, 0.5);
+ m.nodeChannel(i)->addInterpol(P1, P1, 0.5);
+ m.nodeChannel(i)->addInterpol(P2, P2, 0.5);
+ }
+
+ // offset the verts in the normal directions to avoid self intersections
+ Vec3 offsetVec = cross(m.nodes(P1).pos - m.nodes(P0).pos, m.nodes(P2).pos - m.nodes(P0).pos);
+ normalize(offsetVec);
+ offsetVec *= 0.01; // HACK:
+ m.nodes(P0).pos -= offsetVec;
+ m.nodes(P1).pos -= offsetVec;
+ m.nodes(P2).pos -= offsetVec;
+ m.nodes(P0b).pos += offsetVec;
+ m.nodes(P1b).pos += offsetVec;
+ m.nodes(P2b).pos += offsetVec;
+
+ // create a list of all triangles which touch P0, P1, and P2 from the top,
+ map<int, bool> topTris;
+ start = cb_old[0].next;
+ end = m.corners(P0P2).prev;
+ current = start;
+ topTris[start / 3] = true;
+ do {
+ // rotate around vertex P0 counter-clockwise
+ current = m.corners(m.corners(m.corners(current).next).opposite).next;
+ topTris[current / 3] = true;
+ } while (current != end);
+ start = m.corners(P0P2).next;
+ end = m.corners(P2P1).prev;
+ current = start;
+ topTris[start / 3] = true;
+ do {
+ // rotate around vertex P0 counter-clockwise
+ current = m.corners(m.corners(m.corners(current).next).opposite).next;
+ topTris[current / 3] = true;
+ } while (current != end);
+ start = m.corners(P2P1).next;
+ end = cb_old[0].prev;
+ current = start;
+ topTris[start / 3] = true;
+ do {
+ // rotate around vertex P0 counter-clockwise
+ current = m.corners(m.corners(m.corners(current).next).opposite).next;
+ topTris[current / 3] = true;
+ } while (current != end);
+
+ // create two new triangles,
+ int Ta = m.addTri(Triangle(P0, P1, P2));
+ int Tb = m.addTri(Triangle(P1b, P0b, P2b));
+ for (int i = 0; i < m.numTriChannels(); i++) {
+ m.triChannel(i)->addNew();
+ m.triChannel(i)->addNew();
+ }
+
+ // sew the tris to close the cut on each side
+ for (int c = 0; c < 3; c++)
+ m.addCorner(Corner(Ta, m.tris(Ta).c[c]));
+ for (int c = 0; c < 3; c++)
+ m.addCorner(Corner(Tb, m.tris(Tb).c[c]));
+ for (int c = 0; c < 3; c++) {
+ m.corners(Ta, c).next = 3 * Ta + ((c + 1) % 3);
+ m.corners(Ta, c).prev = 3 * Ta + ((c + 2) % 3);
+ m.corners(Tb, c).next = 3 * Tb + ((c + 1) % 3);
+ m.corners(Tb, c).prev = 3 * Tb + ((c + 2) % 3);
+ }
+ m.corners(Ta, 0).opposite = P1P2;
+ m.corners(Ta, 1).opposite = P2P0;
+ m.corners(Ta, 2).opposite = ca_old[1].prev;
+ m.corners(Tb, 0).opposite = P0P2;
+ m.corners(Tb, 1).opposite = P2P1;
+ m.corners(Tb, 2).opposite = cb_old[1].prev;
+ for (int c = 0; c < 3; c++) {
+ m.corners(m.corners(Ta, c).opposite).opposite = 3 * Ta + c;
+ m.corners(m.corners(Tb, c).opposite).opposite = 3 * Tb + c;
+ }
+ // replace P0,P1,P2 on the top with P0b,P1b,P2b.
+ for (map<int, bool>::iterator tti = topTris.begin(); tti != topTris.end(); tti++) {
+ // cout << "H " << tti->first << " : " << m.tris(tti->first).c[0] << " " <<
+ // m.tris(tti->first).c[1] << " " << m.tris(tti->first).c[2] << " " << endl;
+ for (int i = 0; i < 3; i++) {
+ int cn = m.tris(tti->first).c[i];
+ set<int> &ring = m.get1Ring(cn).nodes;
+
+ if (ring.find(P0) != ring.end() && cn != P0 && cn != P1 && cn != P2 && cn != P0b &&
+ cn != P1b && cn != P2b) {
+ ring.erase(P0);
+ ring.insert(P0b);
+ m.get1Ring(P0).nodes.erase(cn);
+ m.get1Ring(P0b).nodes.insert(cn);
+ }
+ if (ring.find(P1) != ring.end() && cn != P0 && cn != P1 && cn != P2 && cn != P0b &&
+ cn != P1b && cn != P2b) {
+ ring.erase(P1);
+ ring.insert(P1b);
+ m.get1Ring(P1).nodes.erase(cn);
+ m.get1Ring(P1b).nodes.insert(cn);
+ }
+ if (ring.find(P2) != ring.end() && cn != P0 && cn != P1 && cn != P2 && cn != P0b &&
+ cn != P1b && cn != P2b) {
+ ring.erase(P2);
+ ring.insert(P2b);
+ m.get1Ring(P2).nodes.erase(cn);
+ m.get1Ring(P2b).nodes.insert(cn);
+ }
+ if (cn == P0) {
+ m.tris(tti->first).c[i] = P0b;
+ m.corners(tti->first, i).node = P0b;
+ m.get1Ring(P0).tris.erase(tti->first);
+ m.get1Ring(P0b).tris.insert(tti->first);
+ }
+ else if (cn == P1) {
+ m.tris(tti->first).c[i] = P1b;
+ m.corners(tti->first, i).node = P1b;
+ m.get1Ring(P1).tris.erase(tti->first);
+ m.get1Ring(P1b).tris.insert(tti->first);
+ }
+ else if (cn == P2) {
+ m.tris(tti->first).c[i] = P2b;
+ m.corners(tti->first, i).node = P2b;
+ m.get1Ring(P2).tris.erase(tti->first);
+ m.get1Ring(P2b).tris.insert(tti->first);
+ }
+ }
+ }
+
+ // m.sanityCheck(true, &deletedNodes, &taintedTris);
+
+ return;
+ }
+ return;
+ }
+ if (ca_old[1].opposite >= 0 && ca_old[2].opposite >= 0 && cb_old[1].opposite >= 0 &&
+ cb_old[2].opposite >= 0 && ca_old[0].opposite >= 0 && cb_old[0].opposite >= 0 &&
+ ((m.corners(ca_old[1].opposite).node ==
+ m.corners(ca_old[2].opposite).node && // two-pyramid tubey case (6 tris, 5 verts)
+ m.corners(cb_old[1].opposite).node == m.corners(cb_old[2].opposite).node &&
+ (m.corners(ca_old[1].opposite).node == m.corners(cb_old[1].opposite).node ||
+ (m.corners(ca_old[1].opposite).node == cb_old[0].node && // single tetrahedron case
+ m.corners(cb_old[1].opposite).node == ca_old[0].node))) ||
+ (m.corners(ca_old[0].opposite).tri == m.corners(cb_old[0].opposite).tri &&
+ m.corners(ca_old[1].opposite).tri == m.corners(cb_old[0].opposite).tri &&
+ m.corners(ca_old[2].opposite).tri ==
+ m.corners(cb_old[0].opposite).tri // nonmanifold: 2 tris, 3 verts
+ && m.corners(cb_old[0].opposite).tri == m.corners(ca_old[0].opposite).tri &&
+ m.corners(cb_old[1].opposite).tri == m.corners(ca_old[0].opposite).tri &&
+ m.corners(cb_old[2].opposite).tri == m.corners(ca_old[0].opposite).tri))) {
+ // both top and bottom are closed pyramid caps, or it is a single tet
+ // delete the whole component!
+ // flood fill to mark all triangles in the component
+ map<int, bool> markedTris;
+ queue<int> triQ;
+ triQ.push(trinum);
+ markedTris[trinum] = true;
+ int iters = 0;
+ while (!triQ.empty()) {
+ int trival = triQ.front();
+ triQ.pop();
+ for (int i = 0; i < 3; i++) {
+ int newtri = m.corners(m.corners(trival, i).opposite).tri;
+ if (markedTris.find(newtri) == markedTris.end()) {
+ triQ.push(newtri);
+ markedTris[newtri] = true;
+ }
+ }
+ iters++;
+ }
+ map<int, bool> markedverts;
+ for (map<int, bool>::iterator mit = markedTris.begin(); mit != markedTris.end(); mit++) {
+ taintedTris[mit->first] = true;
+ markedverts[m.tris(mit->first).c[0]] = true;
+ markedverts[m.tris(mit->first).c[1]] = true;
+ markedverts[m.tris(mit->first).c[2]] = true;
+ }
+ for (map<int, bool>::iterator mit = markedverts.begin(); mit != markedverts.end(); mit++)
+ deletedNodes.push_back(mit->first);
+ return;
+ }
+
+ //////////////////////////
+ // begin original edge collapse
+
+ // update tri props of all adjacent triangles of P0,P1 (do before CT updates!)
+ // TODO: handleTriPropertyEdgeCollapse(trinum, P0,P1, ca_old[0], cb_old[0]);
+
+ m.mergeNode(P0, P1);
+
+ // Move position of P0
+ m.nodes(P0).pos = endpoint + 0.5 * edgevect;
+
+ // Preserve connectivity in both triangles
+ if (ca_old[1].opposite >= 0)
+ m.corners(ca_old[1].opposite).opposite = ca_old[2].opposite;
+ if (ca_old[2].opposite >= 0)
+ m.corners(ca_old[2].opposite).opposite = ca_old[1].opposite;
+ if (haveB && cb_old[1].opposite >= 0)
+ m.corners(cb_old[1].opposite).opposite = cb_old[2].opposite;
+ if (haveB && cb_old[2].opposite >= 0)
+ m.corners(cb_old[2].opposite).opposite = cb_old[1].opposite;
+
+ ////////////////////
+ // mark the two triangles and the one node for deletion
+ taintedTris[ca_old[0].tri] = true;
+ m.removeTriFromLookup(ca_old[0].tri);
+ if (haveB) {
+ taintedTris[cb_old[0].tri] = true;
+ m.removeTriFromLookup(cb_old[0].tri);
+ }
+ deletedNodes.push_back(P1);
+ numCollapses++;
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/edgecollapse.h b/extern/mantaflow/preprocessed/edgecollapse.h
new file mode 100644
index 00000000000..c482255c6ce
--- /dev/null
+++ b/extern/mantaflow/preprocessed/edgecollapse.h
@@ -0,0 +1,51 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Mesh edge collapse and subdivision
+ *
+ ******************************************************************************/
+
+/******************************************************************************/
+// Copyright note:
+//
+// These functions (C) Chris Wojtan
+// Long-term goal is to unify with his split&merge codebase
+//
+/******************************************************************************/
+
+#ifndef _EDGECOLLAPSE_H
+#define _EDGECOLLAPSE_H
+
+#include "mesh.h"
+
+namespace Manta {
+
+void CollapseEdge(Mesh &mesh,
+ const int trinum,
+ const int which,
+ const Vec3 &edgevect,
+ const Vec3 &endpoint,
+ std::vector<int> &deletedNodes,
+ std::map<int, bool> &taintedTris,
+ int &numCollapses,
+ bool doTubeCutting);
+
+Vec3 ModifiedButterflySubdivision(Mesh &mesh,
+ const Corner &ca,
+ const Corner &cb,
+ const Vec3 &fallback);
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/edgecollapse.h.reg.cpp b/extern/mantaflow/preprocessed/edgecollapse.h.reg.cpp
new file mode 100644
index 00000000000..002756b3a9c
--- /dev/null
+++ b/extern/mantaflow/preprocessed/edgecollapse.h.reg.cpp
@@ -0,0 +1,13 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "edgecollapse.h"
+namespace Manta {
+extern "C" {
+void PbRegister_file_19()
+{
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/fastmarch.cpp b/extern/mantaflow/preprocessed/fastmarch.cpp
new file mode 100644
index 00000000000..7792ddafe6a
--- /dev/null
+++ b/extern/mantaflow/preprocessed/fastmarch.cpp
@@ -0,0 +1,1200 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Fast marching and extrapolation
+ *
+ ******************************************************************************/
+
+#include "fastmarch.h"
+#include "levelset.h"
+#include "kernel.h"
+#include <algorithm>
+
+using namespace std;
+
+namespace Manta {
+
+template<class COMP, int TDIR>
+FastMarch<COMP, TDIR>::FastMarch(const FlagGrid &flags,
+ Grid<int> &fmFlags,
+ Grid<Real> &levelset,
+ Real maxTime,
+ MACGrid *velTransport)
+ : mLevelset(levelset), mFlags(flags), mFmFlags(fmFlags)
+{
+ if (velTransport)
+ mVelTransport.initMarching(velTransport, &flags);
+
+ mMaxTime = maxTime * TDIR;
+}
+
+// helper for individual components to calculateDistance
+template<class COMP, int TDIR>
+template<int C>
+Real FastMarch<COMP, TDIR>::calcWeights(int &okcnt, int &invcnt, Real *v, const Vec3i &idx)
+{
+ Real val = 0.;
+ Vec3i idxPlus(idx), idxMinus(idx);
+ idxPlus[C]++;
+ idxMinus[C]--;
+
+ mWeights[C * 2] = mWeights[C * 2 + 1] = 0.;
+ if (mFmFlags(idxPlus) == FlagInited) {
+ // somewhat arbitrary - choose +1 value over -1 ...
+ val = mLevelset(idxPlus);
+ v[okcnt] = val;
+ okcnt++;
+ mWeights[C * 2] = 1.;
+ }
+ else if (mFmFlags(idxMinus) == FlagInited) {
+ val = mLevelset(idxMinus);
+ v[okcnt] = val;
+ okcnt++;
+ mWeights[C * 2 + 1] = 1.;
+ }
+ else {
+ invcnt++;
+ }
+ return val;
+}
+
+template<class COMP, int TDIR>
+inline Real FastMarch<COMP, TDIR>::calculateDistance(const Vec3i &idx)
+{
+ // int invflag = 0;
+ int invcnt = 0;
+ Real v[3];
+ int okcnt = 0;
+
+ Real aVal = calcWeights<0>(okcnt, invcnt, v, idx);
+ Real bVal = calcWeights<1>(okcnt, invcnt, v, idx);
+ Real cVal = 0.;
+ if (mLevelset.is3D())
+ cVal = calcWeights<2>(okcnt, invcnt, v, idx);
+ else {
+ invcnt++;
+ mWeights[4] = mWeights[5] = 0.;
+ }
+
+ Real ret = InvalidTime();
+ switch (invcnt) {
+ case 0: {
+ // take all values
+ const Real ca = v[0], cb = v[1], cc = v[2];
+ const Real csqrt = max(0.,
+ -2. * (ca * ca + cb * cb - cb * cc + cc * cc - ca * (cb + cc)) + 3);
+ // clamp to make sure the sqrt is valid
+ ret = 0.333333 * (ca + cb + cc + TDIR * sqrt(csqrt));
+
+ // weights needed for transport (transpTouch)
+ mWeights[0] *= fabs(ret - ca);
+ mWeights[1] *= fabs(ret - ca);
+ mWeights[2] *= fabs(ret - cb);
+ mWeights[3] *= fabs(ret - cb);
+ mWeights[4] *= fabs(ret - cc);
+ mWeights[5] *= fabs(ret - cc);
+
+ Real norm = 0.0; // try to force normalization
+ for (int i = 0; i < 6; i++) {
+ norm += mWeights[i];
+ }
+ norm = 1.0 / norm;
+ for (int i = 0; i < 6; i++) {
+ mWeights[i] *= norm;
+ }
+
+ } break;
+ case 1: {
+ // take just the 2 ok values
+ // t=0.5*( a+b+ (2*g*g-(b-a)*(b-a))^0.5)
+ const Real csqrt = max(0., 2. - (v[1] - v[0]) * (v[1] - v[0]));
+ // clamp to make sure the sqrt is valid
+ ret = 0.5 * (v[0] + v[1] + TDIR * sqrt(csqrt));
+
+ // weights needed for transport (transpTouch)
+ mWeights[0] *= fabs(ret - aVal);
+ mWeights[1] *= fabs(ret - aVal);
+ mWeights[2] *= fabs(ret - bVal);
+ mWeights[3] *= fabs(ret - bVal);
+ mWeights[4] *= fabs(ret - cVal);
+ mWeights[5] *= fabs(ret - cVal);
+
+ Real norm = 0.0; // try to force normalization
+ for (int i = 0; i < 6; i++) {
+ norm += mWeights[i];
+ }
+ norm = 1.0 / norm;
+ for (int i = 0; i < 6; i++) {
+ mWeights[i] *= norm;
+ }
+ // */
+
+ } break;
+ case 2: {
+ // just use the one remaining value
+ ret = v[0] + (Real)(TDIR); // direction = +- 1
+ } break;
+ default:
+ errMsg("FastMarch :: Invalid invcnt");
+ break;
+ }
+ return ret;
+}
+
+template<class COMP, int TDIR>
+void FastMarch<COMP, TDIR>::addToList(const Vec3i &p, const Vec3i &src)
+{
+ if (!mLevelset.isInBounds(p, 1))
+ return;
+ const IndexInt idx = mLevelset.index(p);
+
+ // already known value, value alreay set to valid value? skip cell...
+ if (mFmFlags[idx] == FlagInited)
+ return;
+
+ // discard by source time now , TODO do instead before calling all addtolists?
+ Real srct = mLevelset(src);
+ if (COMP::compare(srct, mMaxTime))
+ return;
+
+ Real ttime = calculateDistance(p);
+
+ // remove old entry if larger
+ bool found = false;
+
+ Real oldt = mLevelset[idx];
+ if (mFmFlags[idx] == FlagIsOnHeap) {
+ found = true;
+ // is old time better?
+ if (COMP::compare(ttime, oldt))
+ return;
+ }
+
+ // update field
+ mFmFlags[idx] = FlagIsOnHeap;
+ mLevelset[idx] = ttime;
+ // debug info std::cout<<"set "<< idx <<","<< ttime <<"\n";
+
+ if (mVelTransport.isInitialized())
+ mVelTransport.transpTouch(p.x, p.y, p.z, mWeights, ttime);
+
+ // the following adds entries to the heap of active cells
+ // current: (!found) , previous: always add, might lead to duplicate
+ // entries, but the earlier will be handled earlier, the second one will skip to the
+ // FlagInited check above
+ if (!found) {
+ // add list entry with source value
+ COMP entry;
+ entry.p = p;
+ entry.time = mLevelset[idx];
+
+ mHeap.push(entry);
+ // debug info std::cout<<"push "<< entry.p <<","<< entry.time <<"\n";
+ }
+}
+
+//! Enforce delta_phi = 0 on boundaries
+
+struct SetLevelsetBoundaries : public KernelBase {
+ SetLevelsetBoundaries(Grid<Real> &phi) : KernelBase(&phi, 0), phi(phi)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Real> &phi)
+ {
+ if (i == 0)
+ phi(i, j, k) = phi(1, j, k);
+ if (i == maxX - 1)
+ phi(i, j, k) = phi(i - 1, j, k);
+
+ if (j == 0)
+ phi(i, j, k) = phi(i, 1, k);
+ if (j == maxY - 1)
+ phi(i, j, k) = phi(i, j - 1, k);
+
+ if (phi.is3D()) {
+ if (k == 0)
+ phi(i, j, k) = phi(i, j, 1);
+ if (k == maxZ - 1)
+ phi(i, j, k) = phi(i, j, k - 1);
+ }
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel SetLevelsetBoundaries ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void run()
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ for (int k = minZ; k < maxZ; k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, phi);
+ }
+ Grid<Real> &phi;
+};
+
+/*****************************************************************************/
+//! Walk...
+template<class COMP, int TDIR> void FastMarch<COMP, TDIR>::performMarching()
+{
+ mReheapVal = 0.0;
+ while (mHeap.size() > 0) {
+
+ const COMP &ce = mHeap.top();
+ Vec3i p = ce.p;
+ mFmFlags(p) = FlagInited;
+ mHeap.pop();
+ // debug info std::cout<<"pop "<< ce.p <<","<< ce.time <<"\n";
+
+ addToList(Vec3i(p.x - 1, p.y, p.z), p);
+ addToList(Vec3i(p.x + 1, p.y, p.z), p);
+ addToList(Vec3i(p.x, p.y - 1, p.z), p);
+ addToList(Vec3i(p.x, p.y + 1, p.z), p);
+ if (mLevelset.is3D()) {
+ addToList(Vec3i(p.x, p.y, p.z - 1), p);
+ addToList(Vec3i(p.x, p.y, p.z + 1), p);
+ }
+ }
+
+ // set boundary for plain array
+ SetLevelsetBoundaries setls(mLevelset);
+ setls.getArg0(); // get rid of compiler warning...
+}
+
+// explicit instantiation
+template class FastMarch<FmHeapEntryIn, -1>;
+template class FastMarch<FmHeapEntryOut, +1>;
+
+/*****************************************************************************/
+// simpler extrapolation functions (primarily for FLIP)
+
+struct knExtrapolateMACSimple : public KernelBase {
+ knExtrapolateMACSimple(MACGrid &vel, int distance, Grid<int> &tmp, const int d, const int c)
+ : KernelBase(&vel, 1), vel(vel), distance(distance), tmp(tmp), d(d), c(c)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ MACGrid &vel,
+ int distance,
+ Grid<int> &tmp,
+ const int d,
+ const int c) const
+ {
+ static const Vec3i nb[6] = {Vec3i(1, 0, 0),
+ Vec3i(-1, 0, 0),
+ Vec3i(0, 1, 0),
+ Vec3i(0, -1, 0),
+ Vec3i(0, 0, 1),
+ Vec3i(0, 0, -1)};
+ const int dim = (vel.is3D() ? 3 : 2);
+
+ if (tmp(i, j, k) != 0)
+ return;
+
+ // copy from initialized neighbors
+ Vec3i p(i, j, k);
+ int nbs = 0;
+ Real avgVel = 0.;
+ for (int n = 0; n < 2 * dim; ++n) {
+ if (tmp(p + nb[n]) == d) {
+ // vel(p)[c] = (c+1.)*0.1;
+ avgVel += vel(p + nb[n])[c];
+ nbs++;
+ }
+ }
+
+ if (nbs > 0) {
+ tmp(p) = d + 1;
+ vel(p)[c] = avgVel / nbs;
+ }
+ }
+ inline MACGrid &getArg0()
+ {
+ return vel;
+ }
+ typedef MACGrid type0;
+ inline int &getArg1()
+ {
+ return distance;
+ }
+ typedef int type1;
+ inline Grid<int> &getArg2()
+ {
+ return tmp;
+ }
+ typedef Grid<int> type2;
+ inline const int &getArg3()
+ {
+ return d;
+ }
+ typedef int type3;
+ inline const int &getArg4()
+ {
+ return c;
+ }
+ typedef int type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel knExtrapolateMACSimple ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, distance, tmp, d, c);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, distance, tmp, d, c);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ MACGrid &vel;
+ int distance;
+ Grid<int> &tmp;
+ const int d;
+ const int c;
+};
+//! copy velocity into domain side, note - don't read & write same grid, hence velTmp copy
+
+struct knExtrapolateIntoBnd : public KernelBase {
+ knExtrapolateIntoBnd(FlagGrid &flags, MACGrid &vel, const MACGrid &velTmp)
+ : KernelBase(&flags, 0), flags(flags), vel(vel), velTmp(velTmp)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, FlagGrid &flags, MACGrid &vel, const MACGrid &velTmp) const
+ {
+ int c = 0;
+ Vec3 v(0, 0, 0);
+ const bool isObs = flags.isObstacle(i, j, k);
+ if (i == 0) {
+ v = velTmp(i + 1, j, k);
+ if (isObs && v[0] < 0.)
+ v[0] = 0.;
+ c++;
+ }
+ else if (i == (flags.getSizeX() - 1)) {
+ v = velTmp(i - 1, j, k);
+ if (isObs && v[0] > 0.)
+ v[0] = 0.;
+ c++;
+ }
+ if (j == 0) {
+ v = velTmp(i, j + 1, k);
+ if (isObs && v[1] < 0.)
+ v[1] = 0.;
+ c++;
+ }
+ else if (j == (flags.getSizeY() - 1)) {
+ v = velTmp(i, j - 1, k);
+ if (isObs && v[1] > 0.)
+ v[1] = 0.;
+ c++;
+ }
+ if (flags.is3D()) {
+ if (k == 0) {
+ v = velTmp(i, j, k + 1);
+ if (isObs && v[2] < 0.)
+ v[2] = 0.;
+ c++;
+ }
+ else if (k == (flags.getSizeZ() - 1)) {
+ v = velTmp(i, j, k - 1);
+ if (isObs && v[2] > 0.)
+ v[2] = 0.;
+ c++;
+ }
+ }
+ if (c > 0) {
+ vel(i, j, k) = v / (Real)c;
+ }
+ }
+ inline FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline const MACGrid &getArg2()
+ {
+ return velTmp;
+ }
+ typedef MACGrid type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knExtrapolateIntoBnd ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, vel, velTmp);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, vel, velTmp);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ FlagGrid &flags;
+ MACGrid &vel;
+ const MACGrid &velTmp;
+};
+
+// todo - use getGradient instead?
+inline Vec3 getNormal(const Grid<Real> &data, int i, int j, int k)
+{
+ if (i > data.getSizeX() - 2)
+ i = data.getSizeX() - 2;
+ if (i < 1)
+ i = 1;
+ if (j > data.getSizeY() - 2)
+ j = data.getSizeY() - 2;
+ if (j < 1)
+ j = 1;
+
+ int kd = 1;
+ if (data.is3D()) {
+ if (k > data.getSizeZ() - 2)
+ k = data.getSizeZ() - 2;
+ if (k < 1)
+ k = 1;
+ }
+ else {
+ kd = 0;
+ }
+
+ return Vec3(data(i + 1, j, k) - data(i - 1, j, k),
+ data(i, j + 1, k) - data(i, j - 1, k),
+ data(i, j, k + kd) - data(i, j, k - kd));
+}
+
+struct knUnprojectNormalComp : public KernelBase {
+ knUnprojectNormalComp(FlagGrid &flags, MACGrid &vel, Grid<Real> &phi, Real maxDist)
+ : KernelBase(&flags, 1), flags(flags), vel(vel), phi(phi), maxDist(maxDist)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, FlagGrid &flags, MACGrid &vel, Grid<Real> &phi, Real maxDist) const
+ {
+ // apply inside, within range near obstacle surface
+ if (phi(i, j, k) > 0. || phi(i, j, k) < -maxDist)
+ return;
+
+ Vec3 n = getNormal(phi, i, j, k);
+ Vec3 v = vel(i, j, k);
+ if (dot(n, v) < 0.) {
+ normalize(n);
+ Real l = dot(n, v);
+ vel(i, j, k) -= n * l;
+ }
+ }
+ inline FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline Grid<Real> &getArg2()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type2;
+ inline Real &getArg3()
+ {
+ return maxDist;
+ }
+ typedef Real type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knUnprojectNormalComp ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, phi, maxDist);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, phi, maxDist);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ FlagGrid &flags;
+ MACGrid &vel;
+ Grid<Real> &phi;
+ Real maxDist;
+};
+// a simple extrapolation step , used for cases where there's no levelset
+// (note, less accurate than fast marching extrapolation.)
+// into obstacle is a special mode for second order obstable boundaries (extrapolating
+// only fluid velocities, not those at obstacles)
+
+void extrapolateMACSimple(FlagGrid &flags,
+ MACGrid &vel,
+ int distance = 4,
+ LevelsetGrid *phiObs = NULL,
+ bool intoObs = false)
+{
+ Grid<int> tmp(flags.getParent());
+ int dim = (flags.is3D() ? 3 : 2);
+
+ for (int c = 0; c < dim; ++c) {
+ Vec3i dir = 0;
+ dir[c] = 1;
+ tmp.clear();
+
+ // remove all fluid cells (not touching obstacles)
+ FOR_IJK_BND(flags, 1)
+ {
+ Vec3i p(i, j, k);
+ bool mark = false;
+ if (!intoObs) {
+ if (flags.isFluid(p) || flags.isFluid(p - dir))
+ mark = true;
+ }
+ else {
+ if ((flags.isFluid(p) || flags.isFluid(p - dir)) && (!flags.isObstacle(p)) &&
+ (!flags.isObstacle(p - dir)))
+ mark = true;
+ }
+
+ if (mark)
+ tmp(p) = 1;
+ }
+
+ // extrapolate for distance
+ for (int d = 1; d < 1 + distance; ++d) {
+ knExtrapolateMACSimple(vel, distance, tmp, d, c);
+ } // d
+ }
+
+ if (phiObs) {
+ knUnprojectNormalComp(flags, vel, *phiObs, distance);
+ }
+
+ // copy tangential values into sides of domain
+ MACGrid velTmp(flags.getParent());
+ velTmp.copyFrom(vel);
+ knExtrapolateIntoBnd(flags, vel, velTmp);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "extrapolateMACSimple", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ int distance = _args.getOpt<int>("distance", 2, 4, &_lock);
+ LevelsetGrid *phiObs = _args.getPtrOpt<LevelsetGrid>("phiObs", 3, NULL, &_lock);
+ bool intoObs = _args.getOpt<bool>("intoObs", 4, false, &_lock);
+ _retval = getPyNone();
+ extrapolateMACSimple(flags, vel, distance, phiObs, intoObs);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "extrapolateMACSimple", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("extrapolateMACSimple", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_extrapolateMACSimple("", "extrapolateMACSimple", _W_0);
+extern "C" {
+void PbRegister_extrapolateMACSimple()
+{
+ KEEP_UNUSED(_RP_extrapolateMACSimple);
+}
+}
+
+struct knExtrapolateMACFromWeight : public KernelBase {
+ knExtrapolateMACFromWeight(
+ MACGrid &vel, Grid<Vec3> &weight, int distance, const int d, const int c)
+ : KernelBase(&vel, 1), vel(vel), weight(weight), distance(distance), d(d), c(c)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ MACGrid &vel,
+ Grid<Vec3> &weight,
+ int distance,
+ const int d,
+ const int c) const
+ {
+ static const Vec3i nb[6] = {Vec3i(1, 0, 0),
+ Vec3i(-1, 0, 0),
+ Vec3i(0, 1, 0),
+ Vec3i(0, -1, 0),
+ Vec3i(0, 0, 1),
+ Vec3i(0, 0, -1)};
+ const int dim = (vel.is3D() ? 3 : 2);
+
+ if (weight(i, j, k)[c] != 0)
+ return;
+
+ // copy from initialized neighbors
+ Vec3i p(i, j, k);
+ int nbs = 0;
+ Real avgVel = 0.;
+ for (int n = 0; n < 2 * dim; ++n) {
+ if (weight(p + nb[n])[c] == d) {
+ avgVel += vel(p + nb[n])[c];
+ nbs++;
+ }
+ }
+
+ if (nbs > 0) {
+ weight(p)[c] = d + 1;
+ vel(p)[c] = avgVel / nbs;
+ }
+ }
+ inline MACGrid &getArg0()
+ {
+ return vel;
+ }
+ typedef MACGrid type0;
+ inline Grid<Vec3> &getArg1()
+ {
+ return weight;
+ }
+ typedef Grid<Vec3> type1;
+ inline int &getArg2()
+ {
+ return distance;
+ }
+ typedef int type2;
+ inline const int &getArg3()
+ {
+ return d;
+ }
+ typedef int type3;
+ inline const int &getArg4()
+ {
+ return c;
+ }
+ typedef int type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel knExtrapolateMACFromWeight ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, weight, distance, d, c);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, weight, distance, d, c);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ MACGrid &vel;
+ Grid<Vec3> &weight;
+ int distance;
+ const int d;
+ const int c;
+};
+
+// same as extrapolateMACSimple, but uses weight vec3 grid instead of flags to check
+// for valid values (to be used in combination with mapPartsToMAC)
+// note - the weight grid values are destroyed! the function is necessary due to discrepancies
+// between velocity mapping on surface-levelset / fluid-flag creation. With this
+// extrapolation we make sure the fluid region is covered by initial velocities
+
+void extrapolateMACFromWeight(MACGrid &vel, Grid<Vec3> &weight, int distance = 2)
+{
+ const int dim = (vel.is3D() ? 3 : 2);
+
+ for (int c = 0; c < dim; ++c) {
+ Vec3i dir = 0;
+ dir[c] = 1;
+
+ // reset weight values to 0 (uninitialized), and 1 (initialized inner values)
+ FOR_IJK_BND(vel, 1)
+ {
+ Vec3i p(i, j, k);
+ if (weight(p)[c] > 0.)
+ weight(p)[c] = 1.0;
+ }
+
+ // extrapolate for distance
+ for (int d = 1; d < 1 + distance; ++d) {
+ knExtrapolateMACFromWeight(vel, weight, distance, d, c);
+ } // d
+ }
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "extrapolateMACFromWeight", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 0, &_lock);
+ Grid<Vec3> &weight = *_args.getPtr<Grid<Vec3>>("weight", 1, &_lock);
+ int distance = _args.getOpt<int>("distance", 2, 2, &_lock);
+ _retval = getPyNone();
+ extrapolateMACFromWeight(vel, weight, distance);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "extrapolateMACFromWeight", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("extrapolateMACFromWeight", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_extrapolateMACFromWeight("", "extrapolateMACFromWeight", _W_1);
+extern "C" {
+void PbRegister_extrapolateMACFromWeight()
+{
+ KEEP_UNUSED(_RP_extrapolateMACFromWeight);
+}
+}
+
+// simple extrapolation functions for levelsets
+
+static const Vec3i nb[6] = {Vec3i(1, 0, 0),
+ Vec3i(-1, 0, 0),
+ Vec3i(0, 1, 0),
+ Vec3i(0, -1, 0),
+ Vec3i(0, 0, 1),
+ Vec3i(0, 0, -1)};
+
+template<class S> struct knExtrapolateLsSimple : public KernelBase {
+ knExtrapolateLsSimple(Grid<S> &val, int distance, Grid<int> &tmp, const int d, S direction)
+ : KernelBase(&val, 1), val(val), distance(distance), tmp(tmp), d(d), direction(direction)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ Grid<S> &val,
+ int distance,
+ Grid<int> &tmp,
+ const int d,
+ S direction) const
+ {
+ const int dim = (val.is3D() ? 3 : 2);
+ if (tmp(i, j, k) != 0)
+ return;
+
+ // copy from initialized neighbors
+ Vec3i p(i, j, k);
+ int nbs = 0;
+ S avg(0.);
+ for (int n = 0; n < 2 * dim; ++n) {
+ if (tmp(p + nb[n]) == d) {
+ avg += val(p + nb[n]);
+ nbs++;
+ }
+ }
+
+ if (nbs > 0) {
+ tmp(p) = d + 1;
+ val(p) = avg / nbs + direction;
+ }
+ }
+ inline Grid<S> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid<S> type0;
+ inline int &getArg1()
+ {
+ return distance;
+ }
+ typedef int type1;
+ inline Grid<int> &getArg2()
+ {
+ return tmp;
+ }
+ typedef Grid<int> type2;
+ inline const int &getArg3()
+ {
+ return d;
+ }
+ typedef int type3;
+ inline S &getArg4()
+ {
+ return direction;
+ }
+ typedef S type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel knExtrapolateLsSimple ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, val, distance, tmp, d, direction);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, val, distance, tmp, d, direction);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<S> &val;
+ int distance;
+ Grid<int> &tmp;
+ const int d;
+ S direction;
+};
+
+template<class S> struct knSetRemaining : public KernelBase {
+ knSetRemaining(Grid<S> &phi, Grid<int> &tmp, S distance)
+ : KernelBase(&phi, 1), phi(phi), tmp(tmp), distance(distance)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<S> &phi, Grid<int> &tmp, S distance) const
+ {
+ if (tmp(i, j, k) != 0)
+ return;
+ phi(i, j, k) = distance;
+ }
+ inline Grid<S> &getArg0()
+ {
+ return phi;
+ }
+ typedef Grid<S> type0;
+ inline Grid<int> &getArg1()
+ {
+ return tmp;
+ }
+ typedef Grid<int> type1;
+ inline S &getArg2()
+ {
+ return distance;
+ }
+ typedef S type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetRemaining ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, phi, tmp, distance);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, phi, tmp, distance);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<S> &phi;
+ Grid<int> &tmp;
+ S distance;
+};
+
+void extrapolateLsSimple(Grid<Real> &phi, int distance = 4, bool inside = false)
+{
+ Grid<int> tmp(phi.getParent());
+ tmp.clear();
+ const int dim = (phi.is3D() ? 3 : 2);
+
+ // by default, march outside
+ Real direction = 1.;
+ if (!inside) {
+ // mark all inside
+ FOR_IJK_BND(phi, 1)
+ {
+ if (phi(i, j, k) < 0.) {
+ tmp(i, j, k) = 1;
+ }
+ }
+ }
+ else {
+ direction = -1.;
+ FOR_IJK_BND(phi, 1)
+ {
+ if (phi(i, j, k) > 0.) {
+ tmp(i, j, k) = 1;
+ }
+ }
+ }
+ // + first layer around
+ FOR_IJK_BND(phi, 1)
+ {
+ Vec3i p(i, j, k);
+ if (tmp(p))
+ continue;
+ for (int n = 0; n < 2 * dim; ++n) {
+ if (tmp(p + nb[n]) == 1) {
+ tmp(i, j, k) = 2;
+ n = 2 * dim;
+ }
+ }
+ }
+
+ // extrapolate for distance
+ for (int d = 2; d < 1 + distance; ++d) {
+ knExtrapolateLsSimple<Real>(phi, distance, tmp, d, direction);
+ }
+
+ // set all remaining cells to max
+ knSetRemaining<Real>(phi, tmp, Real(direction * (distance + 2)));
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "extrapolateLsSimple", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &phi = *_args.getPtr<Grid<Real>>("phi", 0, &_lock);
+ int distance = _args.getOpt<int>("distance", 1, 4, &_lock);
+ bool inside = _args.getOpt<bool>("inside", 2, false, &_lock);
+ _retval = getPyNone();
+ extrapolateLsSimple(phi, distance, inside);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "extrapolateLsSimple", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("extrapolateLsSimple", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_extrapolateLsSimple("", "extrapolateLsSimple", _W_2);
+extern "C" {
+void PbRegister_extrapolateLsSimple()
+{
+ KEEP_UNUSED(_RP_extrapolateLsSimple);
+}
+}
+
+// extrapolate centered vec3 values from marked fluid cells
+
+void extrapolateVec3Simple(Grid<Vec3> &vel, Grid<Real> &phi, int distance = 4, bool inside = false)
+{
+ Grid<int> tmp(vel.getParent());
+ tmp.clear();
+ const int dim = (vel.is3D() ? 3 : 2);
+
+ // mark initial cells, by default, march outside
+ if (!inside) {
+ // mark all inside
+ FOR_IJK_BND(phi, 1)
+ {
+ if (phi(i, j, k) < 0.) {
+ tmp(i, j, k) = 1;
+ }
+ }
+ }
+ else {
+ FOR_IJK_BND(phi, 1)
+ {
+ if (phi(i, j, k) > 0.) {
+ tmp(i, j, k) = 1;
+ }
+ }
+ }
+ // + first layer next to initial cells
+ FOR_IJK_BND(vel, 1)
+ {
+ Vec3i p(i, j, k);
+ if (tmp(p))
+ continue;
+ for (int n = 0; n < 2 * dim; ++n) {
+ if (tmp(p + nb[n]) == 1) {
+ tmp(i, j, k) = 2;
+ n = 2 * dim;
+ }
+ }
+ }
+
+ for (int d = 2; d < 1 + distance; ++d) {
+ knExtrapolateLsSimple<Vec3>(vel, distance, tmp, d, Vec3(0.));
+ }
+ knSetRemaining<Vec3>(vel, tmp, Vec3(0.));
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "extrapolateVec3Simple", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &vel = *_args.getPtr<Grid<Vec3>>("vel", 0, &_lock);
+ Grid<Real> &phi = *_args.getPtr<Grid<Real>>("phi", 1, &_lock);
+ int distance = _args.getOpt<int>("distance", 2, 4, &_lock);
+ bool inside = _args.getOpt<bool>("inside", 3, false, &_lock);
+ _retval = getPyNone();
+ extrapolateVec3Simple(vel, phi, distance, inside);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "extrapolateVec3Simple", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("extrapolateVec3Simple", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_extrapolateVec3Simple("", "extrapolateVec3Simple", _W_3);
+extern "C" {
+void PbRegister_extrapolateVec3Simple()
+{
+ KEEP_UNUSED(_RP_extrapolateVec3Simple);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/fastmarch.h b/extern/mantaflow/preprocessed/fastmarch.h
new file mode 100644
index 00000000000..8d0511cc06d
--- /dev/null
+++ b/extern/mantaflow/preprocessed/fastmarch.h
@@ -0,0 +1,241 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Fast marching
+ *
+ ******************************************************************************/
+
+#ifndef _FASTMARCH_H
+#define _FASTMARCH_H
+
+#include <queue>
+#include "levelset.h"
+
+namespace Manta {
+
+//! Fast marching. Transport certain values
+// This class exists in two versions: for scalar, and for vector values - the only difference are
+// flag checks i transpTouch (for simplicity in separate classes)
+
+template<class GRID, class T>
+inline T fmInterpolateNeighbors(GRID *mpVal, int x, int y, int z, Real *weights)
+{
+ T val(0.);
+ if (weights[0] > 0.0)
+ val += mpVal->get(x + 1, y + 0, z + 0) * weights[0];
+ if (weights[1] > 0.0)
+ val += mpVal->get(x - 1, y + 0, z + 0) * weights[1];
+ if (weights[2] > 0.0)
+ val += mpVal->get(x + 0, y + 1, z + 0) * weights[2];
+ if (weights[3] > 0.0)
+ val += mpVal->get(x + 0, y - 1, z + 0) * weights[3];
+ if (mpVal->is3D()) {
+ if (weights[4] > 0.0)
+ val += mpVal->get(x + 0, y + 0, z + 1) * weights[4];
+ if (weights[5] > 0.0)
+ val += mpVal->get(x + 0, y + 0, z - 1) * weights[5];
+ }
+ return val;
+}
+
+template<class GRID, class T> class FmValueTransportScalar {
+ public:
+ FmValueTransportScalar() : mpVal(0), mpFlags(0){};
+ ~FmValueTransportScalar(){};
+ void initMarching(GRID *val, FlagGrid *flags)
+ {
+ mpVal = val;
+ mpFlags = flags;
+ }
+ inline bool isInitialized()
+ {
+ return mpVal != 0;
+ }
+
+ //! cell is touched by marching from source cell
+ inline void transpTouch(int x, int y, int z, Real *weights, Real time)
+ {
+ if (!mpVal || !mpFlags->isEmpty(x, y, z))
+ return;
+ T val = fmInterpolateNeighbors<GRID, T>(mpVal, x, y, z, weights);
+ (*mpVal)(x, y, z) = val;
+ };
+
+ protected:
+ GRID *mpVal;
+ FlagGrid *mpFlags;
+};
+
+template<class GRID, class T> class FmValueTransportVec3 {
+ public:
+ FmValueTransportVec3() : mpVal(0), mpFlags(0){};
+ ~FmValueTransportVec3(){};
+ inline bool isInitialized()
+ {
+ return mpVal != 0;
+ }
+ void initMarching(GRID *val, const FlagGrid *flags)
+ {
+ mpVal = val;
+ mpFlags = flags;
+ }
+
+ //! cell is touched by marching from source cell
+ inline void transpTouch(int x, int y, int z, Real *weights, Real time)
+ {
+ if (!mpVal || !mpFlags->isEmpty(x, y, z))
+ return;
+
+ T val = fmInterpolateNeighbors<GRID, T>(mpVal, x, y, z, weights);
+
+ // set velocity components if adjacent is empty
+ if (mpFlags->isEmpty(x - 1, y, z))
+ (*mpVal)(x, y, z).x = val.x;
+ if (mpFlags->isEmpty(x, y - 1, z))
+ (*mpVal)(x, y, z).y = val.y;
+ if (mpVal->is3D()) {
+ if (mpFlags->isEmpty(x, y, z - 1))
+ (*mpVal)(x, y, z).z = val.z;
+ }
+ };
+
+ protected:
+ GRID *mpVal;
+ const FlagGrid *mpFlags;
+};
+
+class FmHeapEntryOut {
+ public:
+ Vec3i p;
+ // quick time access for sorting
+ Real time;
+ static inline bool compare(const Real x, const Real y)
+ {
+ return x > y;
+ }
+
+ inline bool operator<(const FmHeapEntryOut &o) const
+ {
+ const Real d = fabs((time) - ((o.time)));
+ if (d > 0.)
+ return (time) > ((o.time));
+ if (p.z != o.p.z)
+ return p.z > o.p.z;
+ if (p.y != o.p.y)
+ return p.y > o.p.y;
+ return p.x > o.p.x;
+ };
+};
+
+class FmHeapEntryIn {
+ public:
+ Vec3i p;
+ // quick time access for sorting
+ Real time;
+ static inline bool compare(const Real x, const Real y)
+ {
+ return x < y;
+ }
+
+ inline bool operator<(const FmHeapEntryIn &o) const
+ {
+ const Real d = fabs((time) - ((o.time)));
+ if (d > 0.)
+ return (time) < ((o.time));
+ if (p.z != o.p.z)
+ return p.z < o.p.z;
+ if (p.y != o.p.y)
+ return p.y < o.p.y;
+ return p.x < o.p.x;
+ };
+};
+
+//! fast marching algorithm wrapper class
+template<class T, int TDIR> class FastMarch {
+
+ public:
+ // MSVC doesn't allow static const variables in template classes
+ static inline Real InvalidTime()
+ {
+ return -1000;
+ }
+ static inline Real InvtOffset()
+ {
+ return 500;
+ }
+
+ enum SpecialValues { FlagInited = 1, FlagIsOnHeap = 2 };
+
+ FastMarch(const FlagGrid &flags,
+ Grid<int> &fmFlags,
+ Grid<Real> &levelset,
+ Real maxTime,
+ MACGrid *velTransport = NULL);
+ ~FastMarch()
+ {
+ }
+
+ //! advect level set function with given velocity */
+ void performMarching();
+
+ //! test value for invalidity
+ inline bool isInvalid(Real v) const
+ {
+ return (v <= InvalidTime());
+ }
+
+ void addToList(const Vec3i &p, const Vec3i &src);
+
+ //! convert phi to time value
+ inline Real phi2time(Real phival)
+ {
+ return (phival - InvalidTime() + InvtOffset()) * -1.0;
+ }
+
+ //! ... and back
+ inline Real time2phi(Real tval)
+ {
+ return (InvalidTime() - InvtOffset() - tval);
+ }
+
+ inline Real _phi(int i, int j, int k)
+ {
+ return mLevelset(i, j, k);
+ }
+
+ protected:
+ Grid<Real> &mLevelset;
+ const FlagGrid &mFlags;
+ Grid<int> &mFmFlags;
+
+ //! velocity extrpolation
+ FmValueTransportVec3<MACGrid, Vec3> mVelTransport;
+
+ //! maximal time to march for
+ Real mMaxTime;
+
+ //! fast marching list
+ std::priority_queue<T, std::vector<T>, std::less<T>> mHeap;
+ Real mReheapVal;
+
+ //! weights for touching points
+ Real mWeights[6];
+
+ template<int C> inline Real calcWeights(int &okCnt, int &invcnt, Real *v, const Vec3i &idx);
+
+ inline Real calculateDistance(const Vec3i &pos);
+};
+
+} // namespace Manta
+#endif
diff --git a/extern/mantaflow/preprocessed/fastmarch.h.reg.cpp b/extern/mantaflow/preprocessed/fastmarch.h.reg.cpp
new file mode 100644
index 00000000000..903637af502
--- /dev/null
+++ b/extern/mantaflow/preprocessed/fastmarch.h.reg.cpp
@@ -0,0 +1,13 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "fastmarch.h"
+namespace Manta {
+extern "C" {
+void PbRegister_file_5()
+{
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/fileio/iogrids.cpp b/extern/mantaflow/preprocessed/fileio/iogrids.cpp
new file mode 100644
index 00000000000..2f6cdaa6209
--- /dev/null
+++ b/extern/mantaflow/preprocessed/fileio/iogrids.cpp
@@ -0,0 +1,1524 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2016 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Loading and writing grids and meshes to disk
+ *
+ ******************************************************************************/
+
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <cstring>
+
+#if NO_ZLIB != 1
+extern "C" {
+# include <zlib.h>
+}
+#endif
+
+#if OPENVDB == 1
+# include "openvdb/openvdb.h"
+#endif
+
+#include "cnpy.h"
+#include "mantaio.h"
+#include "grid.h"
+#include "vector4d.h"
+#include "grid4d.h"
+
+using namespace std;
+
+namespace Manta {
+
+static const int STR_LEN_GRID = 252;
+
+//! uni file header, v4
+typedef struct {
+ int dimX, dimY, dimZ; // grid size
+ int gridType, elementType, bytesPerElement; // data type info
+ char info[STR_LEN_GRID]; // mantaflow build information
+ int dimT; // optionally store forth dimension for 4d grids
+ unsigned long long timestamp; // creation time
+} UniHeader;
+
+// note: header v4 only uses 4 bytes of the info string to store the fourth dimension, not needed
+// for pdata
+
+//*****************************************************************************
+// conversion functions for double precision
+// (note - uni files always store single prec. values)
+//*****************************************************************************
+
+#if NO_ZLIB != 1
+template<class GRIDT> void gridConvertWrite(gzFile &gzf, GRIDT &grid, void *ptr, UniHeader &head)
+{
+ errMsg("gridConvertWrite: unknown type, not yet supported");
+}
+
+template<> void gridConvertWrite(gzFile &gzf, Grid<int> &grid, void *ptr, UniHeader &head)
+{
+ gzwrite(gzf, &head, sizeof(UniHeader));
+ gzwrite(gzf, &grid[0], sizeof(int) * head.dimX * head.dimY * head.dimZ);
+}
+template<> void gridConvertWrite(gzFile &gzf, Grid<double> &grid, void *ptr, UniHeader &head)
+{
+ head.bytesPerElement = sizeof(float);
+ gzwrite(gzf, &head, sizeof(UniHeader));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < grid.getSizeX() * grid.getSizeY() * grid.getSizeZ(); ++i, ++ptrf) {
+ *ptrf = (float)grid[i];
+ }
+ gzwrite(gzf, ptr, sizeof(float) * head.dimX * head.dimY * head.dimZ);
+}
+template<>
+void gridConvertWrite(gzFile &gzf, Grid<Vector3D<double>> &grid, void *ptr, UniHeader &head)
+{
+ head.bytesPerElement = sizeof(Vector3D<float>);
+ gzwrite(gzf, &head, sizeof(UniHeader));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < grid.getSizeX() * grid.getSizeY() * grid.getSizeZ(); ++i) {
+ for (int c = 0; c < 3; ++c) {
+ *ptrf = (float)grid[i][c];
+ ptrf++;
+ }
+ }
+ gzwrite(gzf, ptr, sizeof(Vector3D<float>) * head.dimX * head.dimY * head.dimZ);
+}
+
+template<> void gridConvertWrite(gzFile &gzf, Grid4d<int> &grid, void *ptr, UniHeader &head)
+{
+ gzwrite(gzf, &head, sizeof(UniHeader));
+ gzwrite(gzf, &grid[0], sizeof(int) * head.dimX * head.dimY * head.dimZ * head.dimT);
+}
+template<> void gridConvertWrite(gzFile &gzf, Grid4d<double> &grid, void *ptr, UniHeader &head)
+{
+ head.bytesPerElement = sizeof(float);
+ gzwrite(gzf, &head, sizeof(UniHeader));
+ float *ptrf = (float *)ptr;
+ IndexInt s = grid.getStrideT() * grid.getSizeT();
+ for (IndexInt i = 0; i < s; ++i, ++ptrf) {
+ *ptrf = (float)grid[i];
+ }
+ gzwrite(gzf, ptr, sizeof(float) * s);
+}
+template<>
+void gridConvertWrite(gzFile &gzf, Grid4d<Vector3D<double>> &grid, void *ptr, UniHeader &head)
+{
+ head.bytesPerElement = sizeof(Vector3D<float>);
+ gzwrite(gzf, &head, sizeof(UniHeader));
+ float *ptrf = (float *)ptr;
+ IndexInt s = grid.getStrideT() * grid.getSizeT();
+ for (IndexInt i = 0; i < s; ++i) {
+ for (int c = 0; c < 3; ++c) {
+ *ptrf = (float)grid[i][c];
+ ptrf++;
+ }
+ }
+ gzwrite(gzf, ptr, sizeof(Vector3D<float>) * s);
+}
+template<>
+void gridConvertWrite(gzFile &gzf, Grid4d<Vector4D<double>> &grid, void *ptr, UniHeader &head)
+{
+ head.bytesPerElement = sizeof(Vector4D<float>);
+ gzwrite(gzf, &head, sizeof(UniHeader));
+ float *ptrf = (float *)ptr;
+ IndexInt s = grid.getStrideT() * grid.getSizeT();
+ for (IndexInt i = 0; i < s; ++i) {
+ for (int c = 0; c < 4; ++c) {
+ *ptrf = (float)grid[i][c];
+ ptrf++;
+ }
+ }
+ gzwrite(gzf, ptr, sizeof(Vector4D<float>) * s);
+}
+
+template<class T> void gridReadConvert(gzFile &gzf, Grid<T> &grid, void *ptr, int bytesPerElement)
+{
+ errMsg("gridReadConvert: unknown type, not yet supported");
+}
+
+template<> void gridReadConvert<int>(gzFile &gzf, Grid<int> &grid, void *ptr, int bytesPerElement)
+{
+ gzread(gzf, ptr, sizeof(int) * grid.getSizeX() * grid.getSizeY() * grid.getSizeZ());
+ assertMsg(bytesPerElement == sizeof(int),
+ "grid element size doesn't match " << bytesPerElement << " vs " << sizeof(int));
+ // easy, nothing to do for ints
+ memcpy(&(grid[0]), ptr, sizeof(int) * grid.getSizeX() * grid.getSizeY() * grid.getSizeZ());
+}
+
+template<>
+void gridReadConvert<double>(gzFile &gzf, Grid<double> &grid, void *ptr, int bytesPerElement)
+{
+ gzread(gzf, ptr, sizeof(float) * grid.getSizeX() * grid.getSizeY() * grid.getSizeZ());
+ assertMsg(bytesPerElement == sizeof(float),
+ "grid element size doesn't match " << bytesPerElement << " vs " << sizeof(float));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < grid.getSizeX() * grid.getSizeY() * grid.getSizeZ(); ++i, ++ptrf) {
+ grid[i] = (double)(*ptrf);
+ }
+}
+
+template<>
+void gridReadConvert<Vec3>(gzFile &gzf, Grid<Vec3> &grid, void *ptr, int bytesPerElement)
+{
+ gzread(gzf, ptr, sizeof(Vector3D<float>) * grid.getSizeX() * grid.getSizeY() * grid.getSizeZ());
+ assertMsg(bytesPerElement == sizeof(Vector3D<float>),
+ "grid element size doesn't match " << bytesPerElement << " vs "
+ << sizeof(Vector3D<float>));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < grid.getSizeX() * grid.getSizeY() * grid.getSizeZ(); ++i) {
+ Vec3 v;
+ for (int c = 0; c < 3; ++c) {
+ v[c] = double(*ptrf);
+ ptrf++;
+ }
+ grid[i] = v;
+ }
+}
+
+template<class T>
+void gridReadConvert4d(gzFile &gzf, Grid4d<T> &grid, void *ptr, int bytesPerElement, int t)
+{
+ errMsg("gridReadConvert4d: unknown type, not yet supported");
+}
+
+template<>
+void gridReadConvert4d<int>(gzFile &gzf, Grid4d<int> &grid, void *ptr, int bytesPerElement, int t)
+{
+ gzread(gzf, ptr, sizeof(int) * grid.getSizeX() * grid.getSizeY() * grid.getSizeZ());
+ assertMsg(bytesPerElement == sizeof(int),
+ "grid element size doesn't match " << bytesPerElement << " vs " << sizeof(int));
+ // nothing to do for ints
+ memcpy(&(grid[grid.getSizeX() * grid.getSizeY() * grid.getSizeZ() * t]),
+ ptr,
+ sizeof(int) * grid.getSizeX() * grid.getSizeY() * grid.getSizeZ());
+}
+
+template<>
+void gridReadConvert4d<double>(
+ gzFile &gzf, Grid4d<double> &grid, void *ptr, int bytesPerElement, int t)
+{
+ assertMsg(bytesPerElement == sizeof(float),
+ "grid element size doesn't match " << bytesPerElement << " vs " << sizeof(float));
+
+ float *ptrf = (float *)ptr;
+ gzread(gzf, ptr, sizeof(float) * grid.getSizeX() * grid.getSizeY() * grid.getSizeZ());
+ for (IndexInt i = 0; i < grid.getSizeX() * grid.getSizeY() * grid.getSizeZ(); ++i, ++ptrf) {
+ grid[grid.getSizeX() * grid.getSizeY() * grid.getSizeZ() * t + i] = (double)(*ptrf);
+ }
+}
+
+template<>
+void gridReadConvert4d<Vec3>(
+ gzFile &gzf, Grid4d<Vec3> &grid, void *ptr, int bytesPerElement, int t)
+{
+ assertMsg(bytesPerElement == sizeof(Vector3D<float>),
+ "grid element size doesn't match " << bytesPerElement << " vs " << sizeof(float));
+
+ gzread(gzf, ptr, sizeof(Vector3D<float>) * grid.getSizeX() * grid.getSizeY() * grid.getSizeZ());
+ float *ptrf = (float *)ptr;
+ for (IndexInt i = 0; i < grid.getSizeX() * grid.getSizeY() * grid.getSizeZ(); ++i) {
+ Vec3 v;
+ for (int c = 0; c < 3; ++c) {
+ v[c] = double(*ptrf);
+ ptrf++;
+ }
+ grid[grid.getSizeX() * grid.getSizeY() * grid.getSizeZ() * t + i] = v;
+ }
+}
+
+template<>
+void gridReadConvert4d<Vec4>(
+ gzFile &gzf, Grid4d<Vec4> &grid, void *ptr, int bytesPerElement, int t)
+{
+ assertMsg(bytesPerElement == sizeof(Vector4D<float>),
+ "grid element size doesn't match " << bytesPerElement << " vs " << sizeof(float));
+
+ gzread(gzf, ptr, sizeof(Vector4D<float>) * grid.getSizeX() * grid.getSizeY() * grid.getSizeZ());
+ float *ptrf = (float *)ptr;
+ for (IndexInt i = 0; i < grid.getSizeX() * grid.getSizeY() * grid.getSizeZ(); ++i) {
+ Vec4 v;
+ for (int c = 0; c < 4; ++c) {
+ v[c] = double(*ptrf);
+ ptrf++;
+ }
+ grid[grid.getSizeX() * grid.getSizeY() * grid.getSizeZ() * t + i] = v;
+ }
+}
+
+// make sure compatible grid types dont lead to errors...
+static int unifyGridType(int type)
+{
+ // real <> levelset
+ if (type & GridBase::TypeReal)
+ type |= GridBase::TypeLevelset;
+ if (type & GridBase::TypeLevelset)
+ type |= GridBase::TypeReal;
+ // vec3 <> mac
+ if (type & GridBase::TypeVec3)
+ type |= GridBase::TypeMAC;
+ if (type & GridBase::TypeMAC)
+ type |= GridBase::TypeVec3;
+ return type;
+}
+
+#endif // NO_ZLIB!=1
+
+//*****************************************************************************
+// grid data
+//*****************************************************************************
+
+template<class T> void writeGridTxt(const string &name, Grid<T> *grid)
+{
+ debMsg("writing grid " << grid->getName() << " to text file " << name, 1);
+
+ ofstream ofs(name.c_str());
+ if (!ofs.good())
+ errMsg("writeGridTxt: can't open file " << name);
+ FOR_IJK(*grid)
+ {
+ ofs << Vec3i(i, j, k) << " = " << (*grid)(i, j, k) << "\n";
+ }
+ ofs.close();
+}
+
+template<class T> void writeGridRaw(const string &name, Grid<T> *grid)
+{
+ debMsg("writing grid " << grid->getName() << " to raw file " << name, 1);
+
+#if NO_ZLIB != 1
+ gzFile gzf = gzopen(name.c_str(), "wb1"); // do some compression
+ if (!gzf)
+ errMsg("writeGridRaw: can't open file " << name);
+ gzwrite(gzf, &((*grid)[0]), sizeof(T) * grid->getSizeX() * grid->getSizeY() * grid->getSizeZ());
+ gzclose(gzf);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+}
+
+template<class T> void readGridRaw(const string &name, Grid<T> *grid)
+{
+ debMsg("reading grid " << grid->getName() << " from raw file " << name, 1);
+
+#if NO_ZLIB != 1
+ gzFile gzf = gzopen(name.c_str(), "rb");
+ if (!gzf)
+ errMsg("readGridRaw: can't open file " << name);
+
+ IndexInt bytes = sizeof(T) * grid->getSizeX() * grid->getSizeY() * grid->getSizeZ();
+ IndexInt readBytes = gzread(gzf, &((*grid)[0]), bytes);
+ assertMsg(bytes == readBytes,
+ "can't read raw file, stream length does not match, " << bytes << " vs " << readBytes);
+ gzclose(gzf);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+}
+
+//! legacy headers for reading old files
+typedef struct {
+ int dimX, dimY, dimZ;
+ int frames, elements, elementType, bytesPerElement, bytesPerFrame;
+} UniLegacyHeader;
+
+typedef struct {
+ int dimX, dimY, dimZ;
+ int gridType, elementType, bytesPerElement;
+} UniLegacyHeader2;
+
+typedef struct {
+ int dimX, dimY, dimZ;
+ int gridType, elementType, bytesPerElement;
+ char info[256];
+ unsigned long long timestamp;
+} UniLegacyHeader3;
+
+//! for auto-init & check of results of test runs , optionally returns info string of header
+void getUniFileSize(const string &name, int &x, int &y, int &z, int *t, std::string *info)
+{
+ x = y = z = 0;
+#if NO_ZLIB != 1
+ gzFile gzf = gzopen(name.c_str(), "rb");
+ if (gzf) {
+ char ID[5] = {0, 0, 0, 0, 0};
+ gzread(gzf, ID, 4);
+
+ // v3
+ if ((!strcmp(ID, "MNT2")) || (!strcmp(ID, "M4T2"))) {
+ UniLegacyHeader3 head;
+ assertMsg(gzread(gzf, &head, sizeof(UniLegacyHeader3)) == sizeof(UniLegacyHeader3),
+ "can't read file, no header present");
+ x = head.dimX;
+ y = head.dimY;
+ z = head.dimZ;
+
+ // optionally , read fourth dim
+ if ((!strcmp(ID, "M4T2")) && t) {
+ int dimT = 0;
+ gzread(gzf, &dimT, sizeof(int));
+ (*t) = dimT;
+ }
+ }
+
+ // v4
+ if ((!strcmp(ID, "MNT3")) || (!strcmp(ID, "M4T3"))) {
+ UniHeader head;
+ assertMsg(gzread(gzf, &head, sizeof(UniHeader)) == sizeof(UniHeader),
+ "can't read file, no header present");
+ x = head.dimX;
+ y = head.dimY;
+ z = head.dimZ;
+ if (t)
+ (*t) = head.dimT;
+ }
+
+ gzclose(gzf);
+ }
+#endif
+ if (info) {
+ std::ostringstream out;
+ out << x << "," << y << "," << z;
+ if (t && (*t) > 0)
+ out << "," << (*t);
+ *info = out.str();
+ }
+}
+Vec3 getUniFileSize(const string &name)
+{
+ int x, y, z;
+ getUniFileSize(name, x, y, z);
+ return Vec3(Real(x), Real(y), Real(z));
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getUniFileSize", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const string &name = _args.get<string>("name", 0, &_lock);
+ _retval = toPy(getUniFileSize(name));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getUniFileSize", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getUniFileSize", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getUniFileSize("", "getUniFileSize", _W_0);
+extern "C" {
+void PbRegister_getUniFileSize()
+{
+ KEEP_UNUSED(_RP_getUniFileSize);
+}
+}
+
+//! for test run debugging
+void printUniFileInfoString(const string &name)
+{
+ std::string info("<file not found>");
+ int x = -1, y = -1, z = -1, t = -1;
+ // use getUniFileSize to parse the different headers
+ getUniFileSize(name, x, y, z, &t, &info);
+ debMsg("File '" << name << "' info: " << info, 1);
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "printUniFileInfoString", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const string &name = _args.get<string>("name", 0, &_lock);
+ _retval = getPyNone();
+ printUniFileInfoString(name);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "printUniFileInfoString", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("printUniFileInfoString", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_printUniFileInfoString("", "printUniFileInfoString", _W_1);
+extern "C" {
+void PbRegister_printUniFileInfoString()
+{
+ KEEP_UNUSED(_RP_printUniFileInfoString);
+}
+}
+
+// actual read/write functions
+
+template<class T> void writeGridUni(const string &name, Grid<T> *grid)
+{
+ debMsg("Writing grid " << grid->getName() << " to uni file " << name, 1);
+
+#if NO_ZLIB != 1
+ char ID[5] = "MNT3";
+ UniHeader head;
+ head.dimX = grid->getSizeX();
+ head.dimY = grid->getSizeY();
+ head.dimZ = grid->getSizeZ();
+ head.dimT = 0;
+ head.gridType = grid->getType();
+ head.bytesPerElement = sizeof(T);
+ snprintf(head.info, STR_LEN_GRID, "%s", buildInfoString().c_str());
+ MuTime stamp;
+ head.timestamp = stamp.time;
+
+ if (grid->getType() & GridBase::TypeInt)
+ head.elementType = 0;
+ else if (grid->getType() & GridBase::TypeReal)
+ head.elementType = 1;
+ else if (grid->getType() & GridBase::TypeVec3)
+ head.elementType = 2;
+ else
+ errMsg("writeGridUni: unknown element type");
+
+ gzFile gzf = gzopen(name.c_str(), "wb1"); // do some compression
+ if (!gzf)
+ errMsg("writeGridUni: can't open file " << name);
+
+ gzwrite(gzf, ID, 4);
+# if FLOATINGPOINT_PRECISION != 1
+ // always write float values, even if compiled with double precision...
+ Grid<T> temp(grid->getParent());
+ // "misuse" temp grid as storage for floating point values (we have double, so it will always
+ // fit)
+ gridConvertWrite(gzf, *grid, &(temp[0]), head);
+# else
+ void *ptr = &((*grid)[0]);
+ gzwrite(gzf, &head, sizeof(UniHeader));
+ gzwrite(gzf, ptr, sizeof(T) * head.dimX * head.dimY * head.dimZ);
+# endif
+ gzclose(gzf);
+
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+};
+
+template<class T> void readGridUni(const string &name, Grid<T> *grid)
+{
+ debMsg("Reading grid " << grid->getName() << " from uni file " << name, 1);
+
+#if NO_ZLIB != 1
+ gzFile gzf = gzopen(name.c_str(), "rb");
+ if (!gzf)
+ errMsg("readGridUni: can't open file " << name);
+
+ char ID[5] = {0, 0, 0, 0, 0};
+ gzread(gzf, ID, 4);
+
+ if (!strcmp(ID, "DDF2")) {
+ // legacy file format
+ UniLegacyHeader head;
+ assertMsg(gzread(gzf, &head, sizeof(UniLegacyHeader)) == sizeof(UniLegacyHeader),
+ "can't read file, no header present");
+ assertMsg(head.dimX == grid->getSizeX() && head.dimY == grid->getSizeY() &&
+ head.dimZ == grid->getSizeZ(),
+ "grid dim doesn't match");
+ assertMsg(head.bytesPerElement * head.elements == sizeof(T), "grid type doesn't match");
+ // skip flags
+ int numEl = head.dimX * head.dimY * head.dimZ;
+ gzseek(gzf, numEl, SEEK_CUR);
+ // actual grid read
+ gzread(gzf, &((*grid)[0]), sizeof(T) * numEl);
+ }
+ else if (!strcmp(ID, "MNT1")) {
+ // legacy file format 2
+ UniLegacyHeader2 head;
+ assertMsg(gzread(gzf, &head, sizeof(UniLegacyHeader2)) == sizeof(UniLegacyHeader2),
+ "can't read file, no header present");
+ assertMsg(head.dimX == grid->getSizeX() && head.dimY == grid->getSizeY() &&
+ head.dimZ == grid->getSizeZ(),
+ "grid dim doesn't match, " << Vec3(head.dimX, head.dimY, head.dimZ) << " vs "
+ << grid->getSize());
+ assertMsg(head.gridType == grid->getType(),
+ "grid type doesn't match " << head.gridType << " vs " << grid->getType());
+ assertMsg(head.bytesPerElement == sizeof(T),
+ "grid element size doesn't match " << head.bytesPerElement << " vs " << sizeof(T));
+ gzread(gzf, &((*grid)[0]), sizeof(T) * head.dimX * head.dimY * head.dimZ);
+ }
+ else if (!strcmp(ID, "MNT2")) {
+ // a bit ugly, almost identical to MNT3
+ UniLegacyHeader3 head;
+ assertMsg(gzread(gzf, &head, sizeof(UniLegacyHeader3)) == sizeof(UniLegacyHeader3),
+ "can't read file, no header present");
+ assertMsg(head.dimX == grid->getSizeX() && head.dimY == grid->getSizeY() &&
+ head.dimZ == grid->getSizeZ(),
+ "grid dim doesn't match, " << Vec3(head.dimX, head.dimY, head.dimZ) << " vs "
+ << grid->getSize());
+ assertMsg(unifyGridType(head.gridType) == unifyGridType(grid->getType()),
+ "grid type doesn't match " << head.gridType << " vs " << grid->getType());
+# if FLOATINGPOINT_PRECISION != 1
+ Grid<T> temp(grid->getParent());
+ void *ptr = &(temp[0]);
+ gridReadConvert<T>(gzf, *grid, ptr, head.bytesPerElement);
+# else
+ assertMsg(head.bytesPerElement == sizeof(T),
+ "grid element size doesn't match " << head.bytesPerElement << " vs " << sizeof(T));
+ gzread(gzf, &((*grid)[0]), sizeof(T) * head.dimX * head.dimY * head.dimZ);
+# endif
+ }
+ else if (!strcmp(ID, "MNT3")) {
+ // current file format
+ UniHeader head;
+ assertMsg(gzread(gzf, &head, sizeof(UniHeader)) == sizeof(UniHeader),
+ "can't read file, no header present");
+ assertMsg(head.dimX == grid->getSizeX() && head.dimY == grid->getSizeY() &&
+ head.dimZ == grid->getSizeZ(),
+ "grid dim doesn't match, " << Vec3(head.dimX, head.dimY, head.dimZ) << " vs "
+ << grid->getSize());
+ assertMsg(unifyGridType(head.gridType) == unifyGridType(grid->getType()),
+ "grid type doesn't match " << head.gridType << " vs " << grid->getType());
+# if FLOATINGPOINT_PRECISION != 1
+ // convert float to double
+ Grid<T> temp(grid->getParent());
+ void *ptr = &(temp[0]);
+ gridReadConvert<T>(gzf, *grid, ptr, head.bytesPerElement);
+# else
+ assertMsg(head.bytesPerElement == sizeof(T),
+ "grid element size doesn't match " << head.bytesPerElement << " vs " << sizeof(T));
+ gzread(gzf, &((*grid)[0]), sizeof(T) * head.dimX * head.dimY * head.dimZ);
+# endif
+ }
+ else {
+ errMsg("readGridUni: Unknown header '" << ID << "' ");
+ }
+ gzclose(gzf);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+};
+
+template<class T> void writeGridVol(const string &name, Grid<T> *grid)
+{
+ debMsg("writing grid " << grid->getName() << " to vol file " << name, 1);
+ errMsg("writeGridVol: Type not yet supported!");
+}
+
+struct volHeader {
+ char ID[3];
+ char version;
+ int encoding;
+ int dimX, dimY, dimZ;
+ int channels;
+ Vec3 bboxMin, bboxMax;
+};
+
+template<> void writeGridVol<Real>(const string &name, Grid<Real> *grid)
+{
+ debMsg("writing real grid " << grid->getName() << " to vol file " << name, 1);
+
+ volHeader header;
+ header.ID[0] = 'V';
+ header.ID[1] = 'O';
+ header.ID[2] = 'L';
+ header.version = 3;
+ header.encoding = 1; // float32 precision
+ header.dimX = grid->getSizeX();
+ header.dimY = grid->getSizeY();
+ header.dimZ = grid->getSizeZ();
+ header.channels = 1; // only 1 channel
+ header.bboxMin = Vec3(-0.5);
+ header.bboxMax = Vec3(0.5);
+
+ FILE *fp = fopen(name.c_str(), "wb");
+ if (fp == NULL) {
+ errMsg("writeGridVol: Cannot open '" << name << "'");
+ return;
+ }
+
+ fwrite(&header, sizeof(volHeader), 1, fp);
+
+#if FLOATINGPOINT_PRECISION == 1
+ // for float, write one big chunk
+ fwrite(&(*grid)[0], sizeof(float), grid->getSizeX() * grid->getSizeY() * grid->getSizeZ(), fp);
+#else
+ // explicitly convert each entry to float - we might have double precision in mantaflow
+ FOR_IDX(*grid)
+ {
+ float value = (*grid)[idx];
+ fwrite(&value, sizeof(float), 1, fp);
+ }
+#endif
+
+ fclose(fp);
+};
+
+template<class T> void readGridVol(const string &name, Grid<T> *grid)
+{
+ debMsg("writing grid " << grid->getName() << " to vol file " << name, 1);
+ errMsg("readGridVol: Type not yet supported!");
+}
+
+template<> void readGridVol<Real>(const string &name, Grid<Real> *grid)
+{
+ debMsg("reading real grid " << grid->getName() << " from vol file " << name, 1);
+
+ volHeader header;
+ FILE *fp = fopen(name.c_str(), "rb");
+ if (fp == NULL) {
+ errMsg("readGridVol: Cannot open '" << name << "'");
+ return;
+ }
+
+ // note, only very basic file format checks here!
+ assertMsg(fread(&header, 1, sizeof(volHeader), fp) == sizeof(volHeader),
+ "can't read file, no header present");
+ if (header.dimX != grid->getSizeX() || header.dimY != grid->getSizeY() ||
+ header.dimZ != grid->getSizeZ())
+ errMsg("grid dim doesn't match, " << Vec3(header.dimX, header.dimY, header.dimZ) << " vs "
+ << grid->getSize());
+#if FLOATINGPOINT_PRECISION != 1
+ errMsg("readGridVol: Double precision not yet supported");
+#else
+ const unsigned int s = sizeof(float) * header.dimX * header.dimY * header.dimZ;
+ assertMsg(fread(&((*grid)[0]), 1, s, fp) == s, "can't read file, no / not enough data");
+#endif
+
+ fclose(fp);
+};
+
+// 4d grids IO
+
+template<class T> void writeGrid4dUni(const string &name, Grid4d<T> *grid)
+{
+ debMsg("writing grid4d " << grid->getName() << " to uni file " << name, 1);
+
+#if NO_ZLIB != 1
+ char ID[5] = "M4T3";
+ UniHeader head;
+ head.dimX = grid->getSizeX();
+ head.dimY = grid->getSizeY();
+ head.dimZ = grid->getSizeZ();
+ head.dimT = grid->getSizeT();
+ head.gridType = grid->getType();
+ head.bytesPerElement = sizeof(T);
+ snprintf(head.info, STR_LEN_GRID, "%s", buildInfoString().c_str());
+ MuTime stamp;
+ stamp.get();
+ head.timestamp = stamp.time;
+
+ if (grid->getType() & Grid4dBase::TypeInt)
+ head.elementType = 0;
+ else if (grid->getType() & Grid4dBase::TypeReal)
+ head.elementType = 1;
+ else if (grid->getType() & Grid4dBase::TypeVec3)
+ head.elementType = 2;
+ else if (grid->getType() & Grid4dBase::TypeVec4)
+ head.elementType = 2;
+ else
+ errMsg("writeGrid4dUni: unknown element type");
+
+ gzFile gzf = gzopen(name.c_str(), "wb1"); // do some compression
+ if (!gzf)
+ errMsg("writeGrid4dUni: can't open file " << name);
+
+ gzwrite(gzf, ID, 4);
+# if FLOATINGPOINT_PRECISION != 1
+ Grid4d<T> temp(grid->getParent());
+ gridConvertWrite<Grid4d<T>>(gzf, *grid, &(temp[0]), head);
+# else
+ gzwrite(gzf, &head, sizeof(UniHeader));
+
+ // can be too large - write in chunks
+ for (int t = 0; t < head.dimT; ++t) {
+ void *ptr = &((*grid)[head.dimX * head.dimY * head.dimZ * t]);
+ gzwrite(gzf, ptr, sizeof(T) * head.dimX * head.dimY * head.dimZ * 1);
+ }
+# endif
+ gzclose(gzf);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+};
+
+//! note, reading 4d uni grids is slightly more complicated than 3d ones
+//! as it optionally supports sliced reading
+template<class T>
+void readGrid4dUni(
+ const string &name, Grid4d<T> *grid, int readTslice, Grid4d<T> *slice, void **fileHandle)
+{
+ if (grid)
+ debMsg("reading grid " << grid->getName() << " from uni file " << name, 1);
+ if (slice)
+ debMsg("reading slice " << slice->getName() << ",t=" << readTslice << " from uni file "
+ << name,
+ 1);
+
+#if NO_ZLIB != 1
+ gzFile gzf = NULL;
+ char ID[5] = {0, 0, 0, 0, 0};
+
+ // optionally - reuse file handle, if valid one is passed in fileHandle pointer...
+ if ((!fileHandle) || (fileHandle && (*fileHandle == NULL))) {
+ gzf = gzopen(name.c_str(), "rb");
+ if (!gzf)
+ errMsg("readGrid4dUni: can't open file " << name);
+
+ gzread(gzf, ID, 4);
+ if (fileHandle) {
+ *fileHandle = gzf;
+ }
+ }
+ else {
+ // optimized read - reduced sanity checks
+ gzf = (gzFile)(*fileHandle);
+ void *ptr = &((*slice)[0]);
+ gzread(gzf, ptr, sizeof(T) * slice->getStrideT() * 1); // quick and dirty...
+ return;
+ }
+
+ if ((!strcmp(ID, "M4T2")) || (!strcmp(ID, "M4T3"))) {
+ int headerSize = -1;
+
+ // current file format
+ UniHeader head;
+ if (!strcmp(ID, "M4T3")) {
+ headerSize = sizeof(UniHeader);
+ assertMsg(gzread(gzf, &head, sizeof(UniHeader)) == sizeof(UniHeader),
+ "can't read file, no 4d header present");
+ if (FLOATINGPOINT_PRECISION == 1)
+ assertMsg(head.bytesPerElement == sizeof(T),
+ "4d grid element size doesn't match " << head.bytesPerElement << " vs "
+ << sizeof(T));
+ }
+ // old header
+ if (!strcmp(ID, "M4T2")) {
+ UniLegacyHeader3 lhead;
+ headerSize = sizeof(UniLegacyHeader3) + sizeof(int);
+ assertMsg(gzread(gzf, &lhead, sizeof(UniLegacyHeader3)) == sizeof(UniLegacyHeader3),
+ "can't read file, no 4dl header present");
+ if (FLOATINGPOINT_PRECISION == 1)
+ assertMsg(lhead.bytesPerElement == sizeof(T),
+ "4d grid element size doesn't match " << lhead.bytesPerElement << " vs "
+ << sizeof(T));
+
+ int fourthDim = 0;
+ gzread(gzf, &fourthDim, sizeof(fourthDim));
+
+ head.dimX = lhead.dimX;
+ head.dimY = lhead.dimY;
+ head.dimZ = lhead.dimZ;
+ head.dimT = fourthDim;
+ head.gridType = lhead.gridType;
+ }
+
+ if (readTslice < 0) {
+ assertMsg(head.dimX == grid->getSizeX() && head.dimY == grid->getSizeY() &&
+ head.dimZ == grid->getSizeZ(),
+ "grid dim doesn't match, " << Vec3(head.dimX, head.dimY, head.dimZ) << " vs "
+ << grid->getSize());
+ assertMsg(unifyGridType(head.gridType) == unifyGridType(grid->getType()),
+ "grid type doesn't match " << head.gridType << " vs " << grid->getType());
+
+ // read full 4d grid
+ assertMsg(head.dimT == grid->getSizeT(),
+ "grid dim4 doesn't match, " << head.dimT << " vs " << grid->getSize());
+
+ // can be too large - read in chunks
+# if FLOATINGPOINT_PRECISION != 1
+ Grid4d<T> temp(grid->getParent());
+ void *ptr = &(temp[0]);
+ for (int t = 0; t < head.dimT; ++t) {
+ gridReadConvert4d<T>(gzf, *grid, ptr, head.bytesPerElement, t);
+ }
+# else
+ for (int t = 0; t < head.dimT; ++t) {
+ void *ptr = &((*grid)[head.dimX * head.dimY * head.dimZ * t]);
+ gzread(gzf, ptr, sizeof(T) * head.dimX * head.dimY * head.dimZ * 1);
+ }
+# endif
+ }
+ else {
+ // read chosen slice only
+ assertMsg(head.dimX == slice->getSizeX() && head.dimY == slice->getSizeY() &&
+ head.dimZ == slice->getSizeZ(),
+ "grid dim doesn't match, " << Vec3(head.dimX, head.dimY, head.dimZ) << " vs "
+ << slice->getSize());
+ assertMsg(unifyGridType(head.gridType) == unifyGridType(slice->getType()),
+ "grid type doesn't match " << head.gridType << " vs " << slice->getType());
+
+# if FLOATINGPOINT_PRECISION != 1
+ errMsg("readGrid4dUni: NYI (2)"); // slice read not yet supported for double
+# else
+ assertMsg(slice, "No 3d slice grid data given");
+ assertMsg(readTslice < head.dimT,
+ "grid dim4 slice too large " << readTslice << " vs " << head.dimT);
+ void *ptr = &((*slice)[0]);
+ gzseek(gzf,
+ sizeof(T) * head.dimX * head.dimY * head.dimZ * readTslice + headerSize + 4,
+ SEEK_SET);
+ gzread(gzf, ptr, sizeof(T) * head.dimX * head.dimY * head.dimZ * 1);
+# endif
+ }
+ }
+ else {
+ debMsg("Unknown header!", 1);
+ }
+
+ if (!fileHandle) {
+ gzclose(gzf);
+ }
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+};
+void readGrid4dUniCleanup(void **fileHandle)
+{
+ gzFile gzf = NULL;
+ if (fileHandle) {
+ gzf = (gzFile)(*fileHandle);
+ gzclose(gzf);
+ *fileHandle = NULL;
+ }
+}
+
+template<class T> void writeGrid4dRaw(const string &name, Grid4d<T> *grid)
+{
+ debMsg("writing grid4d " << grid->getName() << " to raw file " << name, 1);
+
+#if NO_ZLIB != 1
+ gzFile gzf = gzopen(name.c_str(), "wb1"); // do some compression
+ if (!gzf)
+ errMsg("writeGrid4dRaw: can't open file " << name);
+ gzwrite(gzf,
+ &((*grid)[0]),
+ sizeof(T) * grid->getSizeX() * grid->getSizeY() * grid->getSizeZ() * grid->getSizeT());
+ gzclose(gzf);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+}
+
+template<class T> void readGrid4dRaw(const string &name, Grid4d<T> *grid)
+{
+ debMsg("reading grid4d " << grid->getName() << " from raw file " << name, 1);
+
+#if NO_ZLIB != 1
+ gzFile gzf = gzopen(name.c_str(), "rb");
+ if (!gzf)
+ errMsg("readGrid4dRaw: can't open file " << name);
+
+ IndexInt bytes = sizeof(T) * grid->getSizeX() * grid->getSizeY() * grid->getSizeZ() *
+ grid->getSizeT();
+ IndexInt readBytes = gzread(gzf, &((*grid)[0]), bytes);
+ assertMsg(bytes == readBytes,
+ "can't read raw file, stream length does not match, " << bytes << " vs " << readBytes);
+ gzclose(gzf);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+}
+
+//*****************************************************************************
+// optional openvdb export
+
+#if OPENVDB == 1
+
+template<class T> void writeGridVDB(const string &name, Grid<T> *grid)
+{
+ debMsg("Writing grid " << grid->getName() << " to vdb file " << name << " not yet supported!",
+ 1);
+}
+
+template<class T> void readGridVDB(const string &name, Grid<T> *grid)
+{
+ debMsg("Reading grid " << grid->getName() << " from vdb file " << name << " not yet supported!",
+ 1);
+}
+
+template<> void writeGridVDB(const string &name, Grid<Real> *grid)
+{
+ debMsg("Writing real grid " << grid->getName() << " to vdb file " << name, 1);
+
+ // Create an empty floating-point grid with background value 0.
+ openvdb::initialize();
+ openvdb::FloatGrid::Ptr gridVDB = openvdb::FloatGrid::create();
+ gridVDB->setTransform(
+ openvdb::math::Transform::createLinearTransform(1. / grid->getSizeX())); // voxel size
+
+ // Get an accessor for coordinate-based access to voxels.
+ openvdb::FloatGrid::Accessor accessor = gridVDB->getAccessor();
+
+ // Identify the grid as a level set.
+ gridVDB->setGridClass(openvdb::GRID_FOG_VOLUME);
+
+ // Name the grid "density".
+ gridVDB->setName(grid->getName());
+
+ openvdb::io::File file(name);
+
+ FOR_IJK(*grid)
+ {
+ openvdb::Coord xyz(i, j, k);
+ accessor.setValue(xyz, (*grid)(i, j, k));
+ }
+
+ // Add the grid pointer to a container.
+ openvdb::GridPtrVec gridsVDB;
+ gridsVDB.push_back(gridVDB);
+
+ // Write out the contents of the container.
+ file.write(gridsVDB);
+ file.close();
+};
+
+template<> void readGridVDB(const string &name, Grid<Real> *grid)
+{
+ debMsg("Reading real grid " << grid->getName() << " from vdb file " << name, 1);
+
+ openvdb::initialize();
+ openvdb::io::File file(name);
+ file.open();
+
+ openvdb::GridBase::Ptr baseGrid;
+ for (openvdb::io::File::NameIterator nameIter = file.beginName(); nameIter != file.endName();
+ ++nameIter) {
+# ifndef BLENDER
+ // Read in only the grid we are interested in.
+ if (nameIter.gridName() == grid->getName()) {
+ baseGrid = file.readGrid(nameIter.gridName());
+ }
+ else {
+ debMsg("skipping grid " << nameIter.gridName(), 1);
+ }
+# else
+ // For Blender, skip name check and pick first grid from loop
+ baseGrid = file.readGrid(nameIter.gridName());
+ break;
+# endif
+ }
+ file.close();
+ openvdb::FloatGrid::Ptr gridVDB = openvdb::gridPtrCast<openvdb::FloatGrid>(baseGrid);
+
+ openvdb::FloatGrid::Accessor accessor = gridVDB->getAccessor();
+
+ FOR_IJK(*grid)
+ {
+ openvdb::Coord xyz(i, j, k);
+ float v = accessor.getValue(xyz);
+ (*grid)(i, j, k) = v;
+ }
+};
+
+template<> void writeGridVDB(const string &name, Grid<Vec3> *grid)
+{
+ debMsg("Writing vec3 grid " << grid->getName() << " to vdb file " << name, 1);
+
+ openvdb::initialize();
+ openvdb::Vec3SGrid::Ptr gridVDB = openvdb::Vec3SGrid::create();
+ // note , warning - velocity content currently not scaled...
+ gridVDB->setTransform(
+ openvdb::math::Transform::createLinearTransform(1. / grid->getSizeX())); // voxel size
+ openvdb::Vec3SGrid::Accessor accessor = gridVDB->getAccessor();
+
+ // MAC or regular vec grid?
+ if (grid->getType() & GridBase::TypeMAC)
+ gridVDB->setGridClass(openvdb::GRID_STAGGERED);
+ else
+ gridVDB->setGridClass(openvdb::GRID_UNKNOWN);
+
+ gridVDB->setName(grid->getName());
+
+ openvdb::io::File file(name);
+ FOR_IJK(*grid)
+ {
+ openvdb::Coord xyz(i, j, k);
+ Vec3 v = (*grid)(i, j, k);
+ openvdb::Vec3f vo((float)v[0], (float)v[1], (float)v[2]);
+ accessor.setValue(xyz, vo);
+ }
+
+ openvdb::GridPtrVec gridsVDB;
+ gridsVDB.push_back(gridVDB);
+
+ file.write(gridsVDB);
+ file.close();
+};
+
+template<> void readGridVDB(const string &name, Grid<Vec3> *grid)
+{
+ debMsg("Reading vec3 grid " << grid->getName() << " from vdb file " << name, 1);
+
+ openvdb::initialize();
+ openvdb::io::File file(name);
+ file.open();
+
+ openvdb::GridBase::Ptr baseGrid;
+ for (openvdb::io::File::NameIterator nameIter = file.beginName(); nameIter != file.endName();
+ ++nameIter) {
+# ifndef BLENDER
+ // Read in only the grid we are interested in.
+ if (nameIter.gridName() == grid->getName()) {
+ baseGrid = file.readGrid(nameIter.gridName());
+ }
+ else {
+ debMsg("skipping grid " << nameIter.gridName(), 1);
+ }
+# else
+ // For Blender, skip name check and pick first grid from loop
+ baseGrid = file.readGrid(nameIter.gridName());
+ break;
+# endif
+ }
+ file.close();
+ openvdb::Vec3SGrid::Ptr gridVDB = openvdb::gridPtrCast<openvdb::Vec3SGrid>(baseGrid);
+
+ openvdb::Vec3SGrid::Accessor accessor = gridVDB->getAccessor();
+
+ FOR_IJK(*grid)
+ {
+ openvdb::Coord xyz(i, j, k);
+ openvdb::Vec3f v = accessor.getValue(xyz);
+ (*grid)(i, j, k).x = (float)v[0];
+ (*grid)(i, j, k).y = (float)v[1];
+ (*grid)(i, j, k).z = (float)v[2];
+ }
+};
+
+#endif // OPENVDB==1
+
+//*****************************************************************************
+// npz file support (warning - read works, but write generates uncompressed npz; i.e. not
+// recommended for large volumes)
+
+template<class T> void writeGridNumpy(const string &name, Grid<T> *grid)
+{
+#if NO_ZLIB == 1
+ debMsg("file format not supported without zlib", 1);
+ return;
+#endif
+#if FLOATINGPOINT_PRECISION != 1
+ errMsg("writeGridNumpy: Double precision not yet supported");
+#endif
+
+ // find suffix to differentiate between npy <-> npz , TODO: check for actual "npy" string
+ std::string::size_type idx;
+ bool bUseNpz = false;
+ idx = name.rfind('.');
+ if (idx != std::string::npos) {
+ bUseNpz = name.substr(idx + 1) == "npz";
+ debMsg("Writing grid " << grid->getName() << " to npz file " << name, 1);
+ }
+ else {
+ debMsg("Writing grid " << grid->getName() << " to npy file " << name, 1);
+ }
+
+ // storage code
+ size_t uDim = 1;
+ if (grid->getType() & GridBase::TypeInt || grid->getType() & GridBase::TypeReal ||
+ grid->getType() & GridBase::TypeLevelset)
+ uDim = 1;
+ else if (grid->getType() & GridBase::TypeVec3 || grid->getType() & GridBase::TypeMAC)
+ uDim = 3;
+ else
+ errMsg("writeGridNumpy: unknown element type");
+
+ const std::vector<size_t> shape = {static_cast<size_t>(grid->getSizeZ()),
+ static_cast<size_t>(grid->getSizeY()),
+ static_cast<size_t>(grid->getSizeX()),
+ uDim};
+
+ if (bUseNpz) {
+ // note, the following generates a zip file without compression
+ if (grid->getType() & GridBase::TypeVec3 || grid->getType() & GridBase::TypeMAC) {
+ // cast to float* for export!
+ float *ptr = (float *)&((*grid)[0]);
+ cnpy::npz_save(name, "arr_0", ptr, shape, "w");
+ }
+ else {
+ T *ptr = &((*grid)[0]);
+ cnpy::npz_save(name, "arr_0", ptr, shape, "w");
+ }
+ }
+ else {
+ cnpy::npy_save(name, &grid[0], shape, "w");
+ }
+};
+
+template<class T> void readGridNumpy(const string &name, Grid<T> *grid)
+{
+#if NO_ZLIB == 1
+ debMsg("file format not supported without zlib", 1);
+ return;
+#endif
+#if FLOATINGPOINT_PRECISION != 1
+ errMsg("readGridNumpy: Double precision not yet supported");
+#endif
+
+ // find suffix to differentiate between npy <-> npz
+ std::string::size_type idx;
+ bool bUseNpz = false;
+ idx = name.rfind('.');
+ if (idx != std::string::npos) {
+ bUseNpz = name.substr(idx + 1) == "npz";
+ debMsg("Reading grid " << grid->getName() << " as npz file " << name, 1);
+ }
+ else {
+ debMsg("Reading grid " << grid->getName() << " as npy file " << name, 1);
+ }
+
+ cnpy::NpyArray gridArr;
+ if (bUseNpz) {
+ cnpy::npz_t fNpz = cnpy::npz_load(name);
+ gridArr = fNpz["arr_0"];
+ }
+ else {
+ gridArr = cnpy::npy_load(name);
+ }
+
+ // Check the file meta information
+ assertMsg(gridArr.shape[2] == grid->getSizeX() && gridArr.shape[1] == grid->getSizeY() &&
+ gridArr.shape[0] == grid->getSizeZ(),
+ "grid dim doesn't match, "
+ << Vec3(gridArr.shape[2], gridArr.shape[1], gridArr.shape[0]) << " vs "
+ << grid->getSize());
+ size_t uDim = 1;
+ if (grid->getType() & GridBase::TypeInt || grid->getType() & GridBase::TypeReal ||
+ grid->getType() & GridBase::TypeLevelset)
+ uDim = 1;
+ else if (grid->getType() & GridBase::TypeVec3 || grid->getType() & GridBase::TypeMAC)
+ uDim = 3;
+ else
+ errMsg("readGridNumpy: unknown element type");
+ assertMsg(gridArr.shape[3] == uDim,
+ "grid data dim doesn't match, " << gridArr.shape[3] << " vs " << uDim);
+
+ if (grid->getType() & GridBase::TypeVec3 || grid->getType() & GridBase::TypeMAC) {
+ // treated as float* for export , thus consider 3 elements
+ assertMsg(3 * gridArr.word_size == sizeof(T),
+ "vec3 grid data size doesn't match, " << 3 * gridArr.word_size << " vs "
+ << sizeof(T));
+ }
+ else {
+ assertMsg(gridArr.word_size == sizeof(T),
+ "grid data size doesn't match, " << gridArr.word_size << " vs " << sizeof(T));
+ }
+
+ // copy back, TODO: beautify...
+ memcpy(&((*grid)[0]),
+ gridArr.data<T>(),
+ sizeof(T) * grid->getSizeX() * grid->getSizeY() * grid->getSizeZ());
+};
+
+// adopted from getUniFileSize
+void getNpzFileSize(
+ const string &name, int &x, int &y, int &z, int *t = NULL, std::string *info = NULL)
+{
+ x = y = z = 0;
+#if NO_ZLIB != 1
+ debMsg("file format not supported without zlib", 1);
+ return;
+#endif
+#if FLOATINGPOINT_PRECISION != 1
+ errMsg("getNpzFileSize: Double precision not yet supported");
+#endif
+ // find suffix to differentiate between npy <-> npz
+ cnpy::NpyArray gridArr;
+ cnpy::npz_t fNpz = cnpy::npz_load(name);
+ gridArr = fNpz["arr_0"];
+
+ z = gridArr.shape[0];
+ y = gridArr.shape[1];
+ x = gridArr.shape[2];
+ if (t)
+ (*t) = 0; // unused for now
+}
+Vec3 getNpzFileSize(const string &name)
+{
+ int x, y, z;
+ getNpzFileSize(name, x, y, z);
+ return Vec3(Real(x), Real(y), Real(z));
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getNpzFileSize", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const string &name = _args.get<string>("name", 0, &_lock);
+ _retval = toPy(getNpzFileSize(name));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getNpzFileSize", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getNpzFileSize", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getNpzFileSize("", "getNpzFileSize", _W_2);
+extern "C" {
+void PbRegister_getNpzFileSize()
+{
+ KEEP_UNUSED(_RP_getNpzFileSize);
+}
+}
+
+//*****************************************************************************
+// helper functions
+
+void quantizeReal(Real &v, const Real step)
+{
+ int q = int(v / step + step * 0.5);
+ double qd = q * (double)step;
+ v = (Real)qd;
+}
+struct knQuantize : public KernelBase {
+ knQuantize(Grid<Real> &grid, Real step) : KernelBase(&grid, 0), grid(grid), step(step)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<Real> &grid, Real step) const
+ {
+ quantizeReal(grid(idx), step);
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<Real> type0;
+ inline Real &getArg1()
+ {
+ return step;
+ }
+ typedef Real type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knQuantize ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, grid, step);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<Real> &grid;
+ Real step;
+};
+void quantizeGrid(Grid<Real> &grid, Real step)
+{
+ knQuantize(grid, step);
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "quantizeGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &grid = *_args.getPtr<Grid<Real>>("grid", 0, &_lock);
+ Real step = _args.get<Real>("step", 1, &_lock);
+ _retval = getPyNone();
+ quantizeGrid(grid, step);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "quantizeGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("quantizeGrid", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_quantizeGrid("", "quantizeGrid", _W_3);
+extern "C" {
+void PbRegister_quantizeGrid()
+{
+ KEEP_UNUSED(_RP_quantizeGrid);
+}
+}
+
+struct knQuantizeVec3 : public KernelBase {
+ knQuantizeVec3(Grid<Vec3> &grid, Real step) : KernelBase(&grid, 0), grid(grid), step(step)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<Vec3> &grid, Real step) const
+ {
+ for (int c = 0; c < 3; ++c)
+ quantizeReal(grid(idx)[c], step);
+ }
+ inline Grid<Vec3> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<Vec3> type0;
+ inline Real &getArg1()
+ {
+ return step;
+ }
+ typedef Real type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knQuantizeVec3 ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, grid, step);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<Vec3> &grid;
+ Real step;
+};
+void quantizeGridVec3(Grid<Vec3> &grid, Real step)
+{
+ knQuantizeVec3(grid, step);
+}
+static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "quantizeGridVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &grid = *_args.getPtr<Grid<Vec3>>("grid", 0, &_lock);
+ Real step = _args.get<Real>("step", 1, &_lock);
+ _retval = getPyNone();
+ quantizeGridVec3(grid, step);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "quantizeGridVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("quantizeGridVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_quantizeGridVec3("", "quantizeGridVec3", _W_4);
+extern "C" {
+void PbRegister_quantizeGridVec3()
+{
+ KEEP_UNUSED(_RP_quantizeGridVec3);
+}
+}
+
+// explicit instantiation
+template void writeGridRaw<int>(const string &name, Grid<int> *grid);
+template void writeGridRaw<Real>(const string &name, Grid<Real> *grid);
+template void writeGridRaw<Vec3>(const string &name, Grid<Vec3> *grid);
+template void writeGridUni<int>(const string &name, Grid<int> *grid);
+template void writeGridUni<Real>(const string &name, Grid<Real> *grid);
+template void writeGridUni<Vec3>(const string &name, Grid<Vec3> *grid);
+template void writeGridVol<int>(const string &name, Grid<int> *grid);
+template void writeGridVol<Vec3>(const string &name, Grid<Vec3> *grid);
+template void writeGridTxt<int>(const string &name, Grid<int> *grid);
+template void writeGridTxt<Real>(const string &name, Grid<Real> *grid);
+template void writeGridTxt<Vec3>(const string &name, Grid<Vec3> *grid);
+
+template void readGridRaw<int>(const string &name, Grid<int> *grid);
+template void readGridRaw<Real>(const string &name, Grid<Real> *grid);
+template void readGridRaw<Vec3>(const string &name, Grid<Vec3> *grid);
+template void readGridUni<int>(const string &name, Grid<int> *grid);
+template void readGridUni<Real>(const string &name, Grid<Real> *grid);
+template void readGridUni<Vec3>(const string &name, Grid<Vec3> *grid);
+template void readGridVol<int>(const string &name, Grid<int> *grid);
+template void readGridVol<Vec3>(const string &name, Grid<Vec3> *grid);
+
+template void readGrid4dUni<int>(
+ const string &name, Grid4d<int> *grid, int readTslice, Grid4d<int> *slice, void **fileHandle);
+template void readGrid4dUni<Real>(const string &name,
+ Grid4d<Real> *grid,
+ int readTslice,
+ Grid4d<Real> *slice,
+ void **fileHandle);
+template void readGrid4dUni<Vec3>(const string &name,
+ Grid4d<Vec3> *grid,
+ int readTslice,
+ Grid4d<Vec3> *slice,
+ void **fileHandle);
+template void readGrid4dUni<Vec4>(const string &name,
+ Grid4d<Vec4> *grid,
+ int readTslice,
+ Grid4d<Vec4> *slice,
+ void **fileHandle);
+template void writeGrid4dUni<int>(const string &name, Grid4d<int> *grid);
+template void writeGrid4dUni<Real>(const string &name, Grid4d<Real> *grid);
+template void writeGrid4dUni<Vec3>(const string &name, Grid4d<Vec3> *grid);
+template void writeGrid4dUni<Vec4>(const string &name, Grid4d<Vec4> *grid);
+
+template void readGrid4dRaw<int>(const string &name, Grid4d<int> *grid);
+template void readGrid4dRaw<Real>(const string &name, Grid4d<Real> *grid);
+template void readGrid4dRaw<Vec3>(const string &name, Grid4d<Vec3> *grid);
+template void readGrid4dRaw<Vec4>(const string &name, Grid4d<Vec4> *grid);
+template void writeGrid4dRaw<int>(const string &name, Grid4d<int> *grid);
+template void writeGrid4dRaw<Real>(const string &name, Grid4d<Real> *grid);
+template void writeGrid4dRaw<Vec3>(const string &name, Grid4d<Vec3> *grid);
+template void writeGrid4dRaw<Vec4>(const string &name, Grid4d<Vec4> *grid);
+
+template void writeGridNumpy<int>(const string &name, Grid<int> *grid);
+template void writeGridNumpy<Real>(const string &name, Grid<Real> *grid);
+template void writeGridNumpy<Vec3>(const string &name, Grid<Vec3> *grid);
+template void readGridNumpy<int>(const string &name, Grid<int> *grid);
+template void readGridNumpy<Real>(const string &name, Grid<Real> *grid);
+template void readGridNumpy<Vec3>(const string &name, Grid<Vec3> *grid);
+
+#if OPENVDB == 1
+template void writeGridVDB<int>(const string &name, Grid<int> *grid);
+template void writeGridVDB<Vec3>(const string &name, Grid<Vec3> *grid);
+template void writeGridVDB<Real>(const string &name, Grid<Real> *grid);
+
+template void readGridVDB<int>(const string &name, Grid<int> *grid);
+template void readGridVDB<Vec3>(const string &name, Grid<Vec3> *grid);
+template void readGridVDB<Real>(const string &name, Grid<Real> *grid);
+#endif // OPENVDB==1
+
+} // namespace Manta
+
+namespace Manta {
+
+}
diff --git a/extern/mantaflow/preprocessed/fileio/iomeshes.cpp b/extern/mantaflow/preprocessed/fileio/iomeshes.cpp
new file mode 100644
index 00000000000..fc57e2a8c2b
--- /dev/null
+++ b/extern/mantaflow/preprocessed/fileio/iomeshes.cpp
@@ -0,0 +1,490 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2016 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Loading and writing grids and meshes to disk
+ *
+ ******************************************************************************/
+
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#if NO_ZLIB != 1
+extern "C" {
+# include <zlib.h>
+}
+#endif
+
+#include "mantaio.h"
+#include "grid.h"
+#include "mesh.h"
+#include "vortexsheet.h"
+#include <cstring>
+
+using namespace std;
+
+namespace Manta {
+
+static const int STR_LEN_PDATA = 256;
+
+//! mdata uni header, v3 (similar to grid header and mdata header)
+typedef struct {
+ int dim; // number of vertices
+ int dimX, dimY, dimZ; // underlying solver resolution (all data in local coordinates!)
+ int elementType, bytesPerElement; // type id and byte size
+ char info[STR_LEN_PDATA]; // mantaflow build information
+ unsigned long long timestamp; // creation time
+} UniMeshHeader;
+
+//*****************************************************************************
+// conversion functions for double precision
+// (note - uni files always store single prec. values)
+//*****************************************************************************
+
+#if NO_ZLIB != 1
+
+template<class T>
+void mdataConvertWrite(gzFile &gzf, MeshDataImpl<T> &mdata, void *ptr, UniMeshHeader &head)
+{
+ errMsg("mdataConvertWrite: unknown type, not yet supported");
+}
+
+template<>
+void mdataConvertWrite(gzFile &gzf, MeshDataImpl<int> &mdata, void *ptr, UniMeshHeader &head)
+{
+ gzwrite(gzf, &head, sizeof(UniMeshHeader));
+ gzwrite(gzf, &mdata[0], sizeof(int) * head.dim);
+}
+template<>
+void mdataConvertWrite(gzFile &gzf, MeshDataImpl<double> &mdata, void *ptr, UniMeshHeader &head)
+{
+ head.bytesPerElement = sizeof(float);
+ gzwrite(gzf, &head, sizeof(UniMeshHeader));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < mdata.size(); ++i, ++ptrf) {
+ *ptrf = (float)mdata[i];
+ }
+ gzwrite(gzf, ptr, sizeof(float) * head.dim);
+}
+template<>
+void mdataConvertWrite(gzFile &gzf, MeshDataImpl<Vec3> &mdata, void *ptr, UniMeshHeader &head)
+{
+ head.bytesPerElement = sizeof(Vector3D<float>);
+ gzwrite(gzf, &head, sizeof(UniMeshHeader));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < mdata.size(); ++i) {
+ for (int c = 0; c < 3; ++c) {
+ *ptrf = (float)mdata[i][c];
+ ptrf++;
+ }
+ }
+ gzwrite(gzf, ptr, sizeof(Vector3D<float>) * head.dim);
+}
+
+template<class T>
+void mdataReadConvert(gzFile &gzf, MeshDataImpl<T> &grid, void *ptr, int bytesPerElement)
+{
+ errMsg("mdataReadConvert: unknown mdata type, not yet supported");
+}
+
+template<>
+void mdataReadConvert<int>(gzFile &gzf, MeshDataImpl<int> &mdata, void *ptr, int bytesPerElement)
+{
+ gzread(gzf, ptr, sizeof(int) * mdata.size());
+ assertMsg(bytesPerElement == sizeof(int),
+ "mdata element size doesn't match " << bytesPerElement << " vs " << sizeof(int));
+ // int dont change in double precision mode - copy over
+ memcpy(&(mdata[0]), ptr, sizeof(int) * mdata.size());
+}
+
+template<>
+void mdataReadConvert<double>(gzFile &gzf,
+ MeshDataImpl<double> &mdata,
+ void *ptr,
+ int bytesPerElement)
+{
+ gzread(gzf, ptr, sizeof(float) * mdata.size());
+ assertMsg(bytesPerElement == sizeof(float),
+ "mdata element size doesn't match " << bytesPerElement << " vs " << sizeof(float));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < mdata.size(); ++i, ++ptrf) {
+ mdata[i] = double(*ptrf);
+ }
+}
+
+template<>
+void mdataReadConvert<Vec3>(gzFile &gzf, MeshDataImpl<Vec3> &mdata, void *ptr, int bytesPerElement)
+{
+ gzread(gzf, ptr, sizeof(Vector3D<float>) * mdata.size());
+ assertMsg(bytesPerElement == sizeof(Vector3D<float>),
+ "mdata element size doesn't match " << bytesPerElement << " vs "
+ << sizeof(Vector3D<float>));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < mdata.size(); ++i) {
+ Vec3 v;
+ for (int c = 0; c < 3; ++c) {
+ v[c] = double(*ptrf);
+ ptrf++;
+ }
+ mdata[i] = v;
+ }
+}
+
+#endif // NO_ZLIB!=1
+
+//*****************************************************************************
+// mesh data
+//*****************************************************************************
+
+void readBobjFile(const string &name, Mesh *mesh, bool append)
+{
+ debMsg("reading mesh file " << name, 1);
+ if (!append)
+ mesh->clear();
+ else
+ errMsg("readBobj: append not yet implemented!");
+
+#if NO_ZLIB != 1
+ const Real dx = mesh->getParent()->getDx();
+ const Vec3 gs = toVec3(mesh->getParent()->getGridSize());
+
+ gzFile gzf = gzopen(name.c_str(), "rb1"); // do some compression
+ if (!gzf)
+ errMsg("readBobj: unable to open file");
+
+ // read vertices
+ int num = 0;
+ gzread(gzf, &num, sizeof(int));
+ mesh->resizeNodes(num);
+ debMsg("read mesh , verts " << num, 1);
+ for (int i = 0; i < num; i++) {
+ Vector3D<float> pos;
+ gzread(gzf, &pos.value[0], sizeof(float) * 3);
+ mesh->nodes(i).pos = toVec3(pos);
+
+ // convert to grid space
+ mesh->nodes(i).pos /= dx;
+ mesh->nodes(i).pos += gs * 0.5;
+ }
+
+ // normals
+ num = 0;
+ gzread(gzf, &num, sizeof(int));
+ for (int i = 0; i < num; i++) {
+ Vector3D<float> pos;
+ gzread(gzf, &pos.value[0], sizeof(float) * 3);
+ mesh->nodes(i).normal = toVec3(pos);
+ }
+
+ // read tris
+ num = 0;
+ gzread(gzf, &num, sizeof(int));
+ mesh->resizeTris(num);
+ for (int t = 0; t < num; t++) {
+ for (int j = 0; j < 3; j++) {
+ int trip = 0;
+ gzread(gzf, &trip, sizeof(int));
+ mesh->tris(t).c[j] = trip;
+ }
+ }
+ // note - vortex sheet info ignored for now... (see writeBobj)
+ gzclose(gzf);
+ debMsg("read mesh , triangles " << mesh->numTris() << ", vertices " << mesh->numNodes() << " ",
+ 1);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+}
+
+void writeBobjFile(const string &name, Mesh *mesh)
+{
+ debMsg("writing mesh file " << name, 1);
+#if NO_ZLIB != 1
+ const Real dx = mesh->getParent()->getDx();
+ const Vec3i gs = mesh->getParent()->getGridSize();
+
+ gzFile gzf = gzopen(name.c_str(), "wb1"); // do some compression
+ if (!gzf)
+ errMsg("writeBobj: unable to open file");
+
+ // write vertices
+ int numVerts = mesh->numNodes();
+ gzwrite(gzf, &numVerts, sizeof(int));
+ for (int i = 0; i < numVerts; i++) {
+ Vector3D<float> pos = toVec3f(mesh->nodes(i).pos);
+ // normalize to unit cube around 0
+ pos -= toVec3f(gs) * 0.5;
+ pos *= dx;
+ gzwrite(gzf, &pos.value[0], sizeof(float) * 3);
+ }
+
+ // normals
+ mesh->computeVertexNormals();
+ gzwrite(gzf, &numVerts, sizeof(int));
+ for (int i = 0; i < numVerts; i++) {
+ Vector3D<float> pos = toVec3f(mesh->nodes(i).normal);
+ gzwrite(gzf, &pos.value[0], sizeof(float) * 3);
+ }
+
+ // write tris
+ int numTris = mesh->numTris();
+ gzwrite(gzf, &numTris, sizeof(int));
+ for (int t = 0; t < numTris; t++) {
+ for (int j = 0; j < 3; j++) {
+ int trip = mesh->tris(t).c[j];
+ gzwrite(gzf, &trip, sizeof(int));
+ }
+ }
+
+ // per vertex smoke densities
+ if (mesh->getType() == Mesh::TypeVortexSheet) {
+ VortexSheetMesh *vmesh = (VortexSheetMesh *)mesh;
+ int densId[4] = {0, 'v', 'd', 'e'};
+ gzwrite(gzf, &densId[0], sizeof(int) * 4);
+
+ // compute densities
+ vector<float> triDensity(numTris);
+ for (int tri = 0; tri < numTris; tri++) {
+ Real area = vmesh->getFaceArea(tri);
+ if (area > 0)
+ triDensity[tri] = vmesh->sheet(tri).smokeAmount;
+ }
+
+ // project triangle data to vertex
+ vector<int> triPerVertex(numVerts);
+ vector<float> density(numVerts);
+ for (int tri = 0; tri < numTris; tri++) {
+ for (int c = 0; c < 3; c++) {
+ int vertex = mesh->tris(tri).c[c];
+ density[vertex] += triDensity[tri];
+ triPerVertex[vertex]++;
+ }
+ }
+
+ // averaged smoke densities
+ for (int point = 0; point < numVerts; point++) {
+ float dens = 0;
+ if (triPerVertex[point] > 0)
+ dens = density[point] / triPerVertex[point];
+ gzwrite(gzf, &dens, sizeof(float));
+ }
+ }
+
+ // vertex flags
+ if (mesh->getType() == Mesh::TypeVortexSheet) {
+ int Id[4] = {0, 'v', 'x', 'f'};
+ gzwrite(gzf, &Id[0], sizeof(int) * 4);
+
+ // averaged smoke densities
+ for (int point = 0; point < numVerts; point++) {
+ float alpha = (mesh->nodes(point).flags & Mesh::NfMarked) ? 1 : 0;
+ gzwrite(gzf, &alpha, sizeof(float));
+ }
+ }
+
+ gzclose(gzf);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+}
+
+void readObjFile(const std::string &name, Mesh *mesh, bool append)
+{
+ ifstream ifs(name.c_str());
+
+ if (!ifs.good())
+ errMsg("can't open file '" + name + "'");
+
+ if (!append)
+ mesh->clear();
+ int nodebase = mesh->numNodes();
+ int cnt = nodebase;
+ while (ifs.good() && !ifs.eof()) {
+ string id;
+ ifs >> id;
+
+ if (id[0] == '#') {
+ // comment
+ getline(ifs, id);
+ continue;
+ }
+ if (id == "vt") {
+ // tex coord, ignore
+ }
+ else if (id == "vn") {
+ // normals
+ if (!mesh->numNodes())
+ errMsg("invalid amount of nodes");
+ Node n = mesh->nodes(cnt);
+ ifs >> n.normal.x >> n.normal.y >> n.normal.z;
+ cnt++;
+ }
+ else if (id == "v") {
+ // vertex
+ Node n;
+ ifs >> n.pos.x >> n.pos.y >> n.pos.z;
+ mesh->addNode(n);
+ }
+ else if (id == "g") {
+ // group
+ string group;
+ ifs >> group;
+ }
+ else if (id == "f") {
+ // face
+ string face;
+ Triangle t;
+ for (int i = 0; i < 3; i++) {
+ ifs >> face;
+ if (face.find('/') != string::npos)
+ face = face.substr(0, face.find('/')); // ignore other indices
+ int idx = atoi(face.c_str()) - 1;
+ if (idx < 0)
+ errMsg("invalid face encountered");
+ idx += nodebase;
+ t.c[i] = idx;
+ }
+ mesh->addTri(t);
+ }
+ else {
+ // whatever, ignore
+ }
+ // kill rest of line
+ getline(ifs, id);
+ }
+ ifs.close();
+}
+
+// write regular .obj file, in line with bobj.gz output (but only verts & tris for now)
+void writeObjFile(const string &name, Mesh *mesh)
+{
+ const Real dx = mesh->getParent()->getDx();
+ const Vec3i gs = mesh->getParent()->getGridSize();
+
+ ofstream ofs(name.c_str());
+ if (!ofs.good())
+ errMsg("writeObjFile: can't open file " << name);
+
+ ofs << "o MantaMesh\n";
+
+ // write vertices
+ int numVerts = mesh->numNodes();
+ for (int i = 0; i < numVerts; i++) {
+ Vector3D<float> pos = toVec3f(mesh->nodes(i).pos);
+ // normalize to unit cube around 0
+ pos -= toVec3f(gs) * 0.5;
+ pos *= dx;
+ ofs << "v " << pos.value[0] << " " << pos.value[1] << " " << pos.value[2] << " "
+ << "\n";
+ }
+
+ // write normals
+ for (int i = 0; i < numVerts; i++) {
+ Vector3D<float> n = toVec3f(mesh->nodes(i).normal);
+ // normalize to unit cube around 0
+ ofs << "vn " << n.value[0] << " " << n.value[1] << " " << n.value[2] << " "
+ << "\n";
+ }
+
+ // write tris
+ int numTris = mesh->numTris();
+ for (int t = 0; t < numTris; t++) {
+ ofs << "f " << (mesh->tris(t).c[0] + 1) << " " << (mesh->tris(t).c[1] + 1) << " "
+ << (mesh->tris(t).c[2] + 1) << " "
+ << "\n";
+ }
+
+ ofs.close();
+}
+
+template<class T> void readMdataUni(const std::string &name, MeshDataImpl<T> *mdata)
+{
+ debMsg("reading mesh data " << mdata->getName() << " from uni file " << name, 1);
+
+#if NO_ZLIB != 1
+ gzFile gzf = gzopen(name.c_str(), "rb");
+ if (!gzf)
+ errMsg("can't open file " << name);
+
+ char ID[5] = {0, 0, 0, 0, 0};
+ gzread(gzf, ID, 4);
+
+ if (!strcmp(ID, "MD01")) {
+ UniMeshHeader head;
+ assertMsg(gzread(gzf, &head, sizeof(UniMeshHeader)) == sizeof(UniMeshHeader),
+ "can't read file, no header present");
+ assertMsg(head.dim == mdata->size(), "mdata size doesn't match");
+# if FLOATINGPOINT_PRECISION != 1
+ MeshDataImpl<T> temp(mdata->getParent());
+ temp.resize(mdata->size());
+ mdataReadConvert<T>(gzf, *mdata, &(temp[0]), head.bytesPerElement);
+# else
+ assertMsg(((head.bytesPerElement == sizeof(T)) && (head.elementType == 1)),
+ "mdata type doesn't match");
+ IndexInt bytes = sizeof(T) * head.dim;
+ IndexInt readBytes = gzread(gzf, &(mdata->get(0)), sizeof(T) * head.dim);
+ assertMsg(bytes == readBytes,
+ "can't read uni file, stream length does not match, " << bytes << " vs "
+ << readBytes);
+# endif
+ }
+ gzclose(gzf);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+}
+
+template<class T> void writeMdataUni(const std::string &name, MeshDataImpl<T> *mdata)
+{
+ debMsg("writing mesh data " << mdata->getName() << " to uni file " << name, 1);
+
+#if NO_ZLIB != 1
+ char ID[5] = "MD01";
+ UniMeshHeader head;
+ head.dim = mdata->size();
+ head.bytesPerElement = sizeof(T);
+ head.elementType = 1; // 1 for mesh data, todo - add sub types?
+ snprintf(head.info, STR_LEN_PDATA, "%s", buildInfoString().c_str());
+ MuTime stamp;
+ head.timestamp = stamp.time;
+
+ gzFile gzf = gzopen(name.c_str(), "wb1"); // do some compression
+ if (!gzf)
+ errMsg("can't open file " << name);
+ gzwrite(gzf, ID, 4);
+
+# if FLOATINGPOINT_PRECISION != 1
+ // always write float values, even if compiled with double precision (as for grids)
+ MeshDataImpl<T> temp(mdata->getParent());
+ temp.resize(mdata->size());
+ mdataConvertWrite(gzf, *mdata, &(temp[0]), head);
+# else
+ gzwrite(gzf, &head, sizeof(UniMeshHeader));
+ gzwrite(gzf, &(mdata->get(0)), sizeof(T) * head.dim);
+# endif
+ gzclose(gzf);
+
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+};
+
+// explicit instantiation
+template void writeMdataUni<int>(const std::string &name, MeshDataImpl<int> *mdata);
+template void writeMdataUni<Real>(const std::string &name, MeshDataImpl<Real> *mdata);
+template void writeMdataUni<Vec3>(const std::string &name, MeshDataImpl<Vec3> *mdata);
+template void readMdataUni<int>(const std::string &name, MeshDataImpl<int> *mdata);
+template void readMdataUni<Real>(const std::string &name, MeshDataImpl<Real> *mdata);
+template void readMdataUni<Vec3>(const std::string &name, MeshDataImpl<Vec3> *mdata);
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/fileio/ioparticles.cpp b/extern/mantaflow/preprocessed/fileio/ioparticles.cpp
new file mode 100644
index 00000000000..432cbc9f100
--- /dev/null
+++ b/extern/mantaflow/preprocessed/fileio/ioparticles.cpp
@@ -0,0 +1,342 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2016 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Loading and writing grids and meshes to disk
+ *
+ ******************************************************************************/
+
+#include <iostream>
+#include <fstream>
+#include <cstdlib>
+#include <cstring>
+#if NO_ZLIB != 1
+extern "C" {
+# include <zlib.h>
+}
+#endif
+
+#include "mantaio.h"
+#include "grid.h"
+#include "particle.h"
+#include "vector4d.h"
+#include "grid4d.h"
+
+using namespace std;
+
+namespace Manta {
+
+static const int STR_LEN_PDATA = 256;
+
+//! pdata uni header, v3 (similar to grid header)
+typedef struct {
+ int dim; // number of partilces
+ int dimX, dimY, dimZ; // underlying solver resolution (all data in local coordinates!)
+ int elementType, bytesPerElement; // type id and byte size
+ char info[STR_LEN_PDATA]; // mantaflow build information
+ unsigned long long timestamp; // creation time
+} UniPartHeader;
+
+//*****************************************************************************
+// conversion functions for double precision
+// (note - uni files always store single prec. values)
+//*****************************************************************************
+
+#if NO_ZLIB != 1
+
+template<class T>
+void pdataConvertWrite(gzFile &gzf, ParticleDataImpl<T> &pdata, void *ptr, UniPartHeader &head)
+{
+ errMsg("pdataConvertWrite: unknown type, not yet supported");
+}
+
+template<>
+void pdataConvertWrite(gzFile &gzf, ParticleDataImpl<int> &pdata, void *ptr, UniPartHeader &head)
+{
+ gzwrite(gzf, &head, sizeof(UniPartHeader));
+ gzwrite(gzf, &pdata[0], sizeof(int) * head.dim);
+}
+template<>
+void pdataConvertWrite(gzFile &gzf,
+ ParticleDataImpl<double> &pdata,
+ void *ptr,
+ UniPartHeader &head)
+{
+ head.bytesPerElement = sizeof(float);
+ gzwrite(gzf, &head, sizeof(UniPartHeader));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < pdata.size(); ++i, ++ptrf) {
+ *ptrf = (float)pdata[i];
+ }
+ gzwrite(gzf, ptr, sizeof(float) * head.dim);
+}
+template<>
+void pdataConvertWrite(gzFile &gzf, ParticleDataImpl<Vec3> &pdata, void *ptr, UniPartHeader &head)
+{
+ head.bytesPerElement = sizeof(Vector3D<float>);
+ gzwrite(gzf, &head, sizeof(UniPartHeader));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < pdata.size(); ++i) {
+ for (int c = 0; c < 3; ++c) {
+ *ptrf = (float)pdata[i][c];
+ ptrf++;
+ }
+ }
+ gzwrite(gzf, ptr, sizeof(Vector3D<float>) * head.dim);
+}
+
+template<class T>
+void pdataReadConvert(gzFile &gzf, ParticleDataImpl<T> &grid, void *ptr, int bytesPerElement)
+{
+ errMsg("pdataReadConvert: unknown pdata type, not yet supported");
+}
+
+template<>
+void pdataReadConvert<int>(gzFile &gzf,
+ ParticleDataImpl<int> &pdata,
+ void *ptr,
+ int bytesPerElement)
+{
+ gzread(gzf, ptr, sizeof(int) * pdata.size());
+ assertMsg(bytesPerElement == sizeof(int),
+ "pdata element size doesn't match " << bytesPerElement << " vs " << sizeof(int));
+ // int dont change in double precision mode - copy over
+ memcpy(&(pdata[0]), ptr, sizeof(int) * pdata.size());
+}
+
+template<>
+void pdataReadConvert<double>(gzFile &gzf,
+ ParticleDataImpl<double> &pdata,
+ void *ptr,
+ int bytesPerElement)
+{
+ gzread(gzf, ptr, sizeof(float) * pdata.size());
+ assertMsg(bytesPerElement == sizeof(float),
+ "pdata element size doesn't match " << bytesPerElement << " vs " << sizeof(float));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < pdata.size(); ++i, ++ptrf) {
+ pdata[i] = double(*ptrf);
+ }
+}
+
+template<>
+void pdataReadConvert<Vec3>(gzFile &gzf,
+ ParticleDataImpl<Vec3> &pdata,
+ void *ptr,
+ int bytesPerElement)
+{
+ gzread(gzf, ptr, sizeof(Vector3D<float>) * pdata.size());
+ assertMsg(bytesPerElement == sizeof(Vector3D<float>),
+ "pdata element size doesn't match " << bytesPerElement << " vs "
+ << sizeof(Vector3D<float>));
+ float *ptrf = (float *)ptr;
+ for (int i = 0; i < pdata.size(); ++i) {
+ Vec3 v;
+ for (int c = 0; c < 3; ++c) {
+ v[c] = double(*ptrf);
+ ptrf++;
+ }
+ pdata[i] = v;
+ }
+}
+
+#endif // NO_ZLIB!=1
+
+//*****************************************************************************
+// particles and particle data
+//*****************************************************************************
+
+static const int PartSysSize = sizeof(Vector3D<float>) + sizeof(int);
+
+void writeParticlesUni(const std::string &name, const BasicParticleSystem *parts)
+{
+ debMsg("writing particles " << parts->getName() << " to uni file " << name, 1);
+
+#if NO_ZLIB != 1
+ char ID[5] = "PB02";
+ UniPartHeader head;
+ head.dim = parts->size();
+ Vec3i gridSize = parts->getParent()->getGridSize();
+ head.dimX = gridSize.x;
+ head.dimY = gridSize.y;
+ head.dimZ = gridSize.z;
+ head.bytesPerElement = PartSysSize;
+ head.elementType = 0; // 0 for base data
+ snprintf(head.info, STR_LEN_PDATA, "%s", buildInfoString().c_str());
+ MuTime stamp;
+ head.timestamp = stamp.time;
+
+ gzFile gzf = gzopen(name.c_str(), "wb1"); // do some compression
+ if (!gzf)
+ errMsg("can't open file " << name);
+
+ gzwrite(gzf, ID, 4);
+# if FLOATINGPOINT_PRECISION != 1
+ // warning - hard coded conversion of byte size here...
+ gzwrite(gzf, &head, sizeof(UniPartHeader));
+ for (int i = 0; i < parts->size(); ++i) {
+ Vector3D<float> pos = toVec3f((*parts)[i].pos);
+ int flag = (*parts)[i].flag;
+ gzwrite(gzf, &pos, sizeof(Vector3D<float>));
+ gzwrite(gzf, &flag, sizeof(int));
+ }
+# else
+ assertMsg(sizeof(BasicParticleData) == PartSysSize, "particle data size doesn't match");
+ gzwrite(gzf, &head, sizeof(UniPartHeader));
+ gzwrite(gzf, &((*parts)[0]), PartSysSize * head.dim);
+# endif
+ gzclose(gzf);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+};
+
+void readParticlesUni(const std::string &name, BasicParticleSystem *parts)
+{
+ debMsg("reading particles " << parts->getName() << " from uni file " << name, 1);
+
+#if NO_ZLIB != 1
+ gzFile gzf = gzopen(name.c_str(), "rb");
+ if (!gzf)
+ errMsg("can't open file " << name);
+
+ char ID[5] = {0, 0, 0, 0, 0};
+ gzread(gzf, ID, 4);
+
+ if (!strcmp(ID, "PB01")) {
+ errMsg("particle uni file format v01 not supported anymore");
+ }
+ else if (!strcmp(ID, "PB02")) {
+ // current file format
+ UniPartHeader head;
+ assertMsg(gzread(gzf, &head, sizeof(UniPartHeader)) == sizeof(UniPartHeader),
+ "can't read file, no header present");
+ assertMsg(((head.bytesPerElement == PartSysSize) && (head.elementType == 0)),
+ "particle type doesn't match");
+
+ // re-allocate all data
+ parts->resizeAll(head.dim);
+
+ assertMsg(head.dim == parts->size(), "particle size doesn't match");
+# if FLOATINGPOINT_PRECISION != 1
+ for (int i = 0; i < parts->size(); ++i) {
+ Vector3D<float> pos;
+ int flag;
+ gzread(gzf, &pos, sizeof(Vector3D<float>));
+ gzread(gzf, &flag, sizeof(int));
+ (*parts)[i].pos = toVec3d(pos);
+ (*parts)[i].flag = flag;
+ }
+# else
+ assertMsg(sizeof(BasicParticleData) == PartSysSize, "particle data size doesn't match");
+ IndexInt bytes = PartSysSize * head.dim;
+ IndexInt readBytes = gzread(gzf, &(parts->getData()[0]), bytes);
+ assertMsg(bytes == readBytes,
+ "can't read uni file, stream length does not match, " << bytes << " vs "
+ << readBytes);
+# endif
+
+ parts->transformPositions(Vec3i(head.dimX, head.dimY, head.dimZ),
+ parts->getParent()->getGridSize());
+ }
+ gzclose(gzf);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+};
+
+template<class T> void writePdataUni(const std::string &name, ParticleDataImpl<T> *pdata)
+{
+ debMsg("writing particle data " << pdata->getName() << " to uni file " << name, 1);
+
+#if NO_ZLIB != 1
+ char ID[5] = "PD01";
+ UniPartHeader head;
+ head.dim = pdata->size();
+ Vec3i gridSize = pdata->getParent()->getGridSize();
+ head.dimX = gridSize.x;
+ head.dimY = gridSize.y;
+ head.dimZ = gridSize.z;
+ head.bytesPerElement = sizeof(T);
+ head.elementType = 1; // 1 for particle data, todo - add sub types?
+ snprintf(head.info, STR_LEN_PDATA, "%s", buildInfoString().c_str());
+ MuTime stamp;
+ head.timestamp = stamp.time;
+
+ gzFile gzf = gzopen(name.c_str(), "wb1"); // do some compression
+ if (!gzf)
+ errMsg("can't open file " << name);
+ gzwrite(gzf, ID, 4);
+
+# if FLOATINGPOINT_PRECISION != 1
+ // always write float values, even if compiled with double precision (as for grids)
+ ParticleDataImpl<T> temp(pdata->getParent());
+ temp.resize(pdata->size());
+ pdataConvertWrite(gzf, *pdata, &(temp[0]), head);
+# else
+ gzwrite(gzf, &head, sizeof(UniPartHeader));
+ gzwrite(gzf, &(pdata->get(0)), sizeof(T) * head.dim);
+# endif
+ gzclose(gzf);
+
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+};
+
+template<class T> void readPdataUni(const std::string &name, ParticleDataImpl<T> *pdata)
+{
+ debMsg("reading particle data " << pdata->getName() << " from uni file " << name, 1);
+
+#if NO_ZLIB != 1
+ gzFile gzf = gzopen(name.c_str(), "rb");
+ if (!gzf)
+ errMsg("can't open file " << name);
+
+ char ID[5] = {0, 0, 0, 0, 0};
+ gzread(gzf, ID, 4);
+
+ if (!strcmp(ID, "PD01")) {
+ UniPartHeader head;
+ assertMsg(gzread(gzf, &head, sizeof(UniPartHeader)) == sizeof(UniPartHeader),
+ "can't read file, no header present");
+ assertMsg(head.dim == pdata->size(), "pdata size doesn't match");
+# if FLOATINGPOINT_PRECISION != 1
+ ParticleDataImpl<T> temp(pdata->getParent());
+ temp.resize(pdata->size());
+ pdataReadConvert<T>(gzf, *pdata, &(temp[0]), head.bytesPerElement);
+# else
+ assertMsg(((head.bytesPerElement == sizeof(T)) && (head.elementType == 1)),
+ "pdata type doesn't match");
+ IndexInt bytes = sizeof(T) * head.dim;
+ IndexInt readBytes = gzread(gzf, &(pdata->get(0)), sizeof(T) * head.dim);
+ assertMsg(bytes == readBytes,
+ "can't read uni file, stream length does not match, " << bytes << " vs "
+ << readBytes);
+# endif
+ }
+ gzclose(gzf);
+#else
+ debMsg("file format not supported without zlib", 1);
+#endif
+}
+
+// explicit instantiation
+template void writePdataUni<int>(const std::string &name, ParticleDataImpl<int> *pdata);
+template void writePdataUni<Real>(const std::string &name, ParticleDataImpl<Real> *pdata);
+template void writePdataUni<Vec3>(const std::string &name, ParticleDataImpl<Vec3> *pdata);
+template void readPdataUni<int>(const std::string &name, ParticleDataImpl<int> *pdata);
+template void readPdataUni<Real>(const std::string &name, ParticleDataImpl<Real> *pdata);
+template void readPdataUni<Vec3>(const std::string &name, ParticleDataImpl<Vec3> *pdata);
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/fileio/mantaio.h b/extern/mantaflow/preprocessed/fileio/mantaio.h
new file mode 100644
index 00000000000..8bb0a5af6a4
--- /dev/null
+++ b/extern/mantaflow/preprocessed/fileio/mantaio.h
@@ -0,0 +1,81 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Loading and writing grids and meshes to disk
+ *
+ ******************************************************************************/
+
+#ifndef _FILEIO_H
+#define _FILEIO_H
+
+#include <string>
+
+namespace Manta {
+
+// forward decl.
+class Mesh;
+class FlagGrid;
+template<class T> class Grid;
+template<class T> class Grid4d;
+class BasicParticleSystem;
+template<class T> class ParticleDataImpl;
+template<class T> class MeshDataImpl;
+
+void writeObjFile(const std::string &name, Mesh *mesh);
+void writeBobjFile(const std::string &name, Mesh *mesh);
+void readObjFile(const std::string &name, Mesh *mesh, bool append);
+void readBobjFile(const std::string &name, Mesh *mesh, bool append);
+
+template<class T> void writeGridRaw(const std::string &name, Grid<T> *grid);
+template<class T> void writeGridUni(const std::string &name, Grid<T> *grid);
+template<class T> void writeGridVol(const std::string &name, Grid<T> *grid);
+template<class T> void writeGridTxt(const std::string &name, Grid<T> *grid);
+
+#if OPENVDB == 1
+template<class T> void writeGridVDB(const std::string &name, Grid<T> *grid);
+template<class T> void readGridVDB(const std::string &name, Grid<T> *grid);
+#endif // OPENVDB==1
+template<class T> void writeGridNumpy(const std::string &name, Grid<T> *grid);
+template<class T> void readGridNumpy(const std::string &name, Grid<T> *grid);
+
+template<class T> void readGridUni(const std::string &name, Grid<T> *grid);
+template<class T> void readGridRaw(const std::string &name, Grid<T> *grid);
+template<class T> void readGridVol(const std::string &name, Grid<T> *grid);
+
+template<class T> void writeGrid4dUni(const std::string &name, Grid4d<T> *grid);
+template<class T>
+void readGrid4dUni(const std::string &name,
+ Grid4d<T> *grid,
+ int readTslice = -1,
+ Grid4d<T> *slice = NULL,
+ void **fileHandle = NULL);
+void readGrid4dUniCleanup(void **fileHandle);
+template<class T> void writeGrid4dRaw(const std::string &name, Grid4d<T> *grid);
+template<class T> void readGrid4dRaw(const std::string &name, Grid4d<T> *grid);
+
+void writeParticlesUni(const std::string &name, const BasicParticleSystem *parts);
+void readParticlesUni(const std::string &name, BasicParticleSystem *parts);
+
+template<class T> void writePdataUni(const std::string &name, ParticleDataImpl<T> *pdata);
+template<class T> void readPdataUni(const std::string &name, ParticleDataImpl<T> *pdata);
+
+template<class T> void writeMdataUni(const std::string &name, MeshDataImpl<T> *mdata);
+template<class T> void readMdataUni(const std::string &name, MeshDataImpl<T> *mdata);
+
+void getUniFileSize(
+ const std::string &name, int &x, int &y, int &z, int *t = NULL, std::string *info = NULL);
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/fileio/mantaio.h.reg.cpp b/extern/mantaflow/preprocessed/fileio/mantaio.h.reg.cpp
new file mode 100644
index 00000000000..6520786181e
--- /dev/null
+++ b/extern/mantaflow/preprocessed/fileio/mantaio.h.reg.cpp
@@ -0,0 +1,13 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "fileio/mantaio.h"
+namespace Manta {
+extern "C" {
+void PbRegister_file_18()
+{
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/fluidsolver.cpp b/extern/mantaflow/preprocessed/fluidsolver.cpp
new file mode 100644
index 00000000000..814d5444b15
--- /dev/null
+++ b/extern/mantaflow/preprocessed/fluidsolver.cpp
@@ -0,0 +1,397 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Main class for the fluid solver
+ *
+ ******************************************************************************/
+
+#include "fluidsolver.h"
+#include "grid.h"
+#include <sstream>
+#include <fstream>
+
+using namespace std;
+namespace Manta {
+
+//******************************************************************************
+// Gridstorage-related members
+
+template<class T> void FluidSolver::GridStorage<T>::free()
+{
+ if (used != 0)
+ errMsg("can't clean grid cache, some grids are still in use");
+ for (size_t i = 0; i < grids.size(); i++)
+ delete[] grids[i];
+ grids.clear();
+}
+template<class T> T *FluidSolver::GridStorage<T>::get(Vec3i size)
+{
+ if ((int)grids.size() <= used) {
+ debMsg("FluidSolver::GridStorage::get Allocating new " << size.x << "," << size.y << ","
+ << size.z << " ",
+ 3);
+ grids.push_back(new T[(long long)(size.x) * size.y * size.z]);
+ }
+ if (used > 200)
+ errMsg("too many temp grids used -- are they released properly ?");
+ return grids[used++];
+}
+template<class T> void FluidSolver::GridStorage<T>::release(T *ptr)
+{
+ // rewrite pointer, as it may have changed due to swap operations
+ used--;
+ if (used < 0)
+ errMsg("temp grid inconsistency");
+ grids[used] = ptr;
+}
+
+template<> int *FluidSolver::getGridPointer<int>()
+{
+ return mGridsInt.get(mGridSize);
+}
+template<> Real *FluidSolver::getGridPointer<Real>()
+{
+ return mGridsReal.get(mGridSize);
+}
+template<> Vec3 *FluidSolver::getGridPointer<Vec3>()
+{
+ return mGridsVec.get(mGridSize);
+}
+template<> Vec4 *FluidSolver::getGridPointer<Vec4>()
+{
+ return mGridsVec4.get(mGridSize);
+}
+template<> void FluidSolver::freeGridPointer<int>(int *ptr)
+{
+ mGridsInt.release(ptr);
+}
+template<> void FluidSolver::freeGridPointer<Real>(Real *ptr)
+{
+ mGridsReal.release(ptr);
+}
+template<> void FluidSolver::freeGridPointer<Vec3>(Vec3 *ptr)
+{
+ mGridsVec.release(ptr);
+}
+template<> void FluidSolver::freeGridPointer<Vec4>(Vec4 *ptr)
+{
+ mGridsVec4.release(ptr);
+}
+
+// 4d data (work around for now, convert to 1d length)
+
+template<> int *FluidSolver::getGrid4dPointer<int>()
+{
+ return mGrids4dInt.get(Vec3i(mGridSize[0] * mGridSize[1], mGridSize[2], mFourthDim));
+}
+template<> Real *FluidSolver::getGrid4dPointer<Real>()
+{
+ return mGrids4dReal.get(Vec3i(mGridSize[0] * mGridSize[1], mGridSize[2], mFourthDim));
+}
+template<> Vec3 *FluidSolver::getGrid4dPointer<Vec3>()
+{
+ return mGrids4dVec.get(Vec3i(mGridSize[0] * mGridSize[1], mGridSize[2], mFourthDim));
+}
+template<> Vec4 *FluidSolver::getGrid4dPointer<Vec4>()
+{
+ return mGrids4dVec4.get(Vec3i(mGridSize[0] * mGridSize[1], mGridSize[2], mFourthDim));
+}
+template<> void FluidSolver::freeGrid4dPointer<int>(int *ptr)
+{
+ mGrids4dInt.release(ptr);
+}
+template<> void FluidSolver::freeGrid4dPointer<Real>(Real *ptr)
+{
+ mGrids4dReal.release(ptr);
+}
+template<> void FluidSolver::freeGrid4dPointer<Vec3>(Vec3 *ptr)
+{
+ mGrids4dVec.release(ptr);
+}
+template<> void FluidSolver::freeGrid4dPointer<Vec4>(Vec4 *ptr)
+{
+ mGrids4dVec4.release(ptr);
+}
+
+//******************************************************************************
+// FluidSolver members
+
+FluidSolver::FluidSolver(Vec3i gridsize, int dim, int fourthDim)
+ : PbClass(this),
+ mDt(1.0),
+ mTimeTotal(0.),
+ mFrame(0),
+ mCflCond(1000),
+ mDtMin(1.),
+ mDtMax(1.),
+ mFrameLength(1.),
+ mGridSize(gridsize),
+ mDim(dim),
+ mTimePerFrame(0.),
+ mLockDt(false),
+ mFourthDim(fourthDim)
+{
+ if (dim == 4 && mFourthDim > 0)
+ errMsg("Don't create 4D solvers, use 3D with fourth-dim parameter >0 instead.");
+ assertMsg(dim == 2 || dim == 3, "Only 2D and 3D solvers allowed.");
+ assertMsg(dim != 2 || gridsize.z == 1, "Trying to create 2D solver with size.z != 1");
+}
+
+FluidSolver::~FluidSolver()
+{
+ mGridsInt.free();
+ mGridsReal.free();
+ mGridsVec.free();
+ mGridsVec4.free();
+
+ mGrids4dInt.free();
+ mGrids4dReal.free();
+ mGrids4dVec.free();
+ mGrids4dVec4.free();
+}
+
+PbClass *FluidSolver::create(PbType t, PbTypeVec T, const string &name)
+{
+#if NOPYTHON != 1
+ _args.add("nocheck", true);
+ if (t.str() == "")
+ errMsg(
+ "Need to specify object type. Use e.g. Solver.create(FlagGrid, ...) or "
+ "Solver.create(type=FlagGrid, ...)");
+
+ PbClass *ret = PbClass::createPyObject(t.str() + T.str(), name, _args, this);
+#else
+ PbClass *ret = NULL;
+#endif
+ return ret;
+}
+
+void FluidSolver::step()
+{
+ // update simulation time with adaptive time stepping
+ // (use eps value to prevent roundoff errors)
+ mTimePerFrame += mDt;
+ mTimeTotal += mDt;
+
+ if ((mTimePerFrame + VECTOR_EPSILON) > mFrameLength) {
+ mFrame++;
+
+ // re-calc total time, prevent drift...
+ mTimeTotal = (double)mFrame * mFrameLength;
+ mTimePerFrame = 0.;
+ mLockDt = false;
+ }
+
+ updateQtGui(true, mFrame, mTimeTotal, "FluidSolver::step");
+}
+
+void FluidSolver::printMemInfo()
+{
+ std::ostringstream msg;
+ msg << "Allocated grids: int " << mGridsInt.used << "/" << mGridsInt.grids.size() << ", ";
+ msg << " real " << mGridsReal.used << "/" << mGridsReal.grids.size() << ", ";
+ msg << " vec3 " << mGridsVec.used << "/" << mGridsVec.grids.size() << ". ";
+ msg << " vec4 " << mGridsVec4.used << "/" << mGridsVec4.grids.size() << ". ";
+ if (supports4D()) {
+ msg << "Allocated 4d grids: int " << mGrids4dInt.used << "/" << mGrids4dInt.grids.size()
+ << ", ";
+ msg << " real " << mGrids4dReal.used << "/" << mGrids4dReal.grids.size()
+ << ", ";
+ msg << " vec3 " << mGrids4dVec.used << "/" << mGrids4dVec.grids.size()
+ << ". ";
+ msg << " vec4 " << mGrids4dVec4.used << "/" << mGrids4dVec4.grids.size()
+ << ". ";
+ }
+ printf("%s\n", msg.str().c_str());
+}
+
+//! warning, uses 10^-4 epsilon values, thus only use around "regular" FPS time scales, e.g. 30
+//! frames per time unit pass max magnitude of current velocity as maxvel, not yet scaled by dt!
+void FluidSolver::adaptTimestep(Real maxVel)
+{
+ const Real mvt = maxVel * mDt;
+ if (!mLockDt) {
+ // calculate current timestep from maxvel, clamp range
+ mDt = std::max(std::min(mDt * (Real)(mCflCond / (mvt + 1e-05)), mDtMax), mDtMin);
+ if ((mTimePerFrame + mDt * 1.05) > mFrameLength) {
+ // within 5% of full step? add epsilon to prevent roundoff errors...
+ mDt = (mFrameLength - mTimePerFrame) + 1e-04;
+ }
+ else if ((mTimePerFrame + mDt + mDtMin) > mFrameLength ||
+ (mTimePerFrame + (mDt * 1.25)) > mFrameLength) {
+ // avoid tiny timesteps and strongly varying ones, do 2 medium size ones if necessary...
+ mDt = (mFrameLength - mTimePerFrame + 1e-04) * 0.5;
+ mLockDt = true;
+ }
+ }
+ debMsg("Frame " << mFrame << ", max vel per step: " << mvt << " , dt: " << mDt << ", frame time "
+ << mTimePerFrame << "/" << mFrameLength << "; lock:" << mLockDt,
+ 2);
+
+ // sanity check
+ assertMsg((mDt > (mDtMin / 2.)), "Invalid dt encountered! Shouldnt happen...");
+}
+
+//******************************************************************************
+// Generic helpers (no PYTHON funcs in general.cpp, thus they're here...)
+
+//! helper to unify printing from python scripts and printing internal messages (optionally pass
+//! debug level to control amount of output)
+void mantaMsg(const std::string &out, int level = 1)
+{
+ debMsg(out, level);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "mantaMsg", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const std::string &out = _args.get<std::string>("out", 0, &_lock);
+ int level = _args.getOpt<int>("level", 1, 1, &_lock);
+ _retval = getPyNone();
+ mantaMsg(out, level);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "mantaMsg", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("mantaMsg", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_mantaMsg("", "mantaMsg", _W_0);
+extern "C" {
+void PbRegister_mantaMsg()
+{
+ KEEP_UNUSED(_RP_mantaMsg);
+}
+}
+
+std::string printBuildInfo()
+{
+ string infoString = buildInfoString();
+ debMsg("Build info: " << infoString.c_str() << " ", 1);
+ return infoString;
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "printBuildInfo", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ _retval = toPy(printBuildInfo());
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "printBuildInfo", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("printBuildInfo", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_printBuildInfo("", "printBuildInfo", _W_1);
+extern "C" {
+void PbRegister_printBuildInfo()
+{
+ KEEP_UNUSED(_RP_printBuildInfo);
+}
+}
+
+//! set debug level for messages (0 off, 1 regular, higher = more, up to 10)
+void setDebugLevel(int level = 1)
+{
+ gDebugLevel = level;
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setDebugLevel", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int level = _args.getOpt<int>("level", 0, 1, &_lock);
+ _retval = getPyNone();
+ setDebugLevel(level);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setDebugLevel", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setDebugLevel", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setDebugLevel("", "setDebugLevel", _W_2);
+extern "C" {
+void PbRegister_setDebugLevel()
+{
+ KEEP_UNUSED(_RP_setDebugLevel);
+}
+}
+
+//! helper function to check for numpy compilation
+void assertNumpy()
+{
+#if NUMPY == 1
+ // all good, nothing to do...
+#else
+ errMsg("This scene requires numpy support. Enable compilation in cmake with \"-DNUMPY=1\" ");
+#endif
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "assertNumpy", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ _retval = getPyNone();
+ assertNumpy();
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "assertNumpy", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("assertNumpy", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_assertNumpy("", "assertNumpy", _W_3);
+extern "C" {
+void PbRegister_assertNumpy()
+{
+ KEEP_UNUSED(_RP_assertNumpy);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/fluidsolver.h b/extern/mantaflow/preprocessed/fluidsolver.h
new file mode 100644
index 00000000000..d01f87082b6
--- /dev/null
+++ b/extern/mantaflow/preprocessed/fluidsolver.h
@@ -0,0 +1,395 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Main class for the fluid solver
+ *
+ ******************************************************************************/
+
+#ifndef _FLUIDSOLVER_H
+#define _FLUIDSOLVER_H
+
+#include "manta.h"
+#include "vectorbase.h"
+#include "vector4d.h"
+#include <vector>
+#include <map>
+
+namespace Manta {
+
+//! Encodes grid size, timstep etc.
+
+class FluidSolver : public PbClass {
+ public:
+ FluidSolver(Vec3i gridSize, int dim = 3, int fourthDim = -1);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "FluidSolver::FluidSolver", !noTiming);
+ {
+ ArgLocker _lock;
+ Vec3i gridSize = _args.get<Vec3i>("gridSize", 0, &_lock);
+ int dim = _args.getOpt<int>("dim", 1, 3, &_lock);
+ int fourthDim = _args.getOpt<int>("fourthDim", 2, -1, &_lock);
+ obj = new FluidSolver(gridSize, dim, fourthDim);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "FluidSolver::FluidSolver", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("FluidSolver::FluidSolver", e.what());
+ return -1;
+ }
+ }
+
+ virtual ~FluidSolver();
+
+ // accessors
+ Vec3i getGridSize()
+ {
+ return mGridSize;
+ }
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "FluidSolver::getGridSize", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getGridSize());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "FluidSolver::getGridSize", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("FluidSolver::getGridSize", e.what());
+ return 0;
+ }
+ }
+
+ inline Real getDt() const
+ {
+ return mDt;
+ }
+ inline Real getDx() const
+ {
+ return 1.0 / mGridSize.max();
+ }
+ inline Real getTime() const
+ {
+ return mTimeTotal;
+ }
+
+ //! Check dimensionality
+ inline bool is2D() const
+ {
+ return mDim == 2;
+ }
+ //! Check dimensionality (3d or above)
+ inline bool is3D() const
+ {
+ return mDim == 3;
+ }
+
+ void printMemInfo();
+ static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "FluidSolver::printMemInfo", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->printMemInfo();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "FluidSolver::printMemInfo", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("FluidSolver::printMemInfo", e.what());
+ return 0;
+ }
+ }
+
+ //! Advance the solver one timestep, update GUI if present
+ void step();
+ static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "FluidSolver::step", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->step();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "FluidSolver::step", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("FluidSolver::step", e.what());
+ return 0;
+ }
+ }
+
+ //! Update the timestep size based on given maximal velocity magnitude
+ void adaptTimestep(Real maxVel);
+ static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "FluidSolver::adaptTimestep", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Real maxVel = _args.get<Real>("maxVel", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->adaptTimestep(maxVel);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "FluidSolver::adaptTimestep", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("FluidSolver::adaptTimestep", e.what());
+ return 0;
+ }
+ }
+
+ //! create a object with the solver as its parent
+ PbClass *create(PbType type, PbTypeVec T = PbTypeVec(), const std::string &name = "");
+ static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "FluidSolver::create", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ PbType type = _args.get<PbType>("type", 0, &_lock);
+ PbTypeVec T = _args.getOpt<PbTypeVec>("T", 1, PbTypeVec(), &_lock);
+ const std::string &name = _args.getOpt<std::string>("name", 2, "", &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->create(type, T, name));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "FluidSolver::create", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("FluidSolver::create", e.what());
+ return 0;
+ }
+ }
+
+ // temp grid and plugin functions: you shouldn't call this manually
+ template<class T> T *getGridPointer();
+ template<class T> void freeGridPointer(T *ptr);
+
+ //! expose animation time to python
+ Real mDt;
+ static PyObject *_GET_mDt(PyObject *self, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ return toPy(pbo->mDt);
+ }
+ static int _SET_mDt(PyObject *self, PyObject *val, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ pbo->mDt = fromPy<Real>(val);
+ return 0;
+ }
+
+ Real mTimeTotal;
+ static PyObject *_GET_mTimeTotal(PyObject *self, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ return toPy(pbo->mTimeTotal);
+ }
+ static int _SET_mTimeTotal(PyObject *self, PyObject *val, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ pbo->mTimeTotal = fromPy<Real>(val);
+ return 0;
+ }
+
+ int mFrame;
+ static PyObject *_GET_mFrame(PyObject *self, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ return toPy(pbo->mFrame);
+ }
+ static int _SET_mFrame(PyObject *self, PyObject *val, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ pbo->mFrame = fromPy<int>(val);
+ return 0;
+ }
+
+ //! parameters for adaptive time stepping
+ Real mCflCond;
+ static PyObject *_GET_mCflCond(PyObject *self, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ return toPy(pbo->mCflCond);
+ }
+ static int _SET_mCflCond(PyObject *self, PyObject *val, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ pbo->mCflCond = fromPy<Real>(val);
+ return 0;
+ }
+
+ Real mDtMin;
+ static PyObject *_GET_mDtMin(PyObject *self, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ return toPy(pbo->mDtMin);
+ }
+ static int _SET_mDtMin(PyObject *self, PyObject *val, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ pbo->mDtMin = fromPy<Real>(val);
+ return 0;
+ }
+
+ Real mDtMax;
+ static PyObject *_GET_mDtMax(PyObject *self, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ return toPy(pbo->mDtMax);
+ }
+ static int _SET_mDtMax(PyObject *self, PyObject *val, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ pbo->mDtMax = fromPy<Real>(val);
+ return 0;
+ }
+
+ Real mFrameLength;
+ static PyObject *_GET_mFrameLength(PyObject *self, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ return toPy(pbo->mFrameLength);
+ }
+ static int _SET_mFrameLength(PyObject *self, PyObject *val, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ pbo->mFrameLength = fromPy<Real>(val);
+ return 0;
+ }
+
+ //! Per frame duration. Blender needs access in order to restore value in new solver object
+ Real mTimePerFrame;
+ static PyObject *_GET_mTimePerFrame(PyObject *self, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ return toPy(pbo->mTimePerFrame);
+ }
+ static int _SET_mTimePerFrame(PyObject *self, PyObject *val, void *cl)
+ {
+ FluidSolver *pbo = dynamic_cast<FluidSolver *>(Pb::objFromPy(self));
+ pbo->mTimePerFrame = fromPy<Real>(val);
+ return 0;
+ }
+
+ protected:
+ Vec3i mGridSize;
+ const int mDim;
+ bool mLockDt;
+
+ //! subclass for managing grid memory
+ //! stored as a stack to allow fast allocation
+ template<class T> struct GridStorage {
+ GridStorage() : used(0)
+ {
+ }
+ T *get(Vec3i size);
+ void free();
+ void release(T *ptr);
+
+ std::vector<T *> grids;
+ int used;
+ };
+
+ //! memory for regular (3d) grids
+ GridStorage<int> mGridsInt;
+ GridStorage<Real> mGridsReal;
+ GridStorage<Vec3> mGridsVec;
+
+ //! 4d data section, only required for simulations working with space-time data
+
+ public:
+ //! 4D enabled? note, there's intentionally no "is4D" function, there are only 3D solvers that
+ //! also support 4D of a certain size
+ inline bool supports4D() const
+ {
+ return mFourthDim > 0;
+ }
+ //! fourth dimension size
+ inline int getFourthDim() const
+ {
+ return mFourthDim;
+ }
+ //! 4d data allocation
+ template<class T> T *getGrid4dPointer();
+ template<class T> void freeGrid4dPointer(T *ptr);
+
+ protected:
+ //! 4d size. Note - 4d is not treated like going from 2d to 3d! 4D grids are a separate data
+ //! type. Normally all grids are forced to have the same size. In contrast, a solver can create
+ //! and work with 3D as well as 4D grids, when fourth-dim is >0.
+ int mFourthDim;
+
+ //! 4d grid storage
+ GridStorage<Vec4> mGridsVec4;
+ GridStorage<int> mGrids4dInt;
+ GridStorage<Real> mGrids4dReal;
+ GridStorage<Vec3> mGrids4dVec;
+ GridStorage<Vec4> mGrids4dVec4;
+ public:
+ PbArgs _args;
+}
+#define _C_FluidSolver
+;
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/fluidsolver.h.reg.cpp b/extern/mantaflow/preprocessed/fluidsolver.h.reg.cpp
new file mode 100644
index 00000000000..764c7c59021
--- /dev/null
+++ b/extern/mantaflow/preprocessed/fluidsolver.h.reg.cpp
@@ -0,0 +1,70 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "fluidsolver.h"
+namespace Manta {
+#ifdef _C_FluidSolver
+static const Pb::Register _R_6("FluidSolver", "Solver", "PbClass");
+template<> const char *Namify<FluidSolver>::S = "FluidSolver";
+static const Pb::Register _R_7("FluidSolver", "FluidSolver", FluidSolver::_W_0);
+static const Pb::Register _R_8("FluidSolver", "getGridSize", FluidSolver::_W_1);
+static const Pb::Register _R_9("FluidSolver", "printMemInfo", FluidSolver::_W_2);
+static const Pb::Register _R_10("FluidSolver", "step", FluidSolver::_W_3);
+static const Pb::Register _R_11("FluidSolver", "adaptTimestep", FluidSolver::_W_4);
+static const Pb::Register _R_12("FluidSolver", "create", FluidSolver::_W_5);
+static const Pb::Register _R_13("FluidSolver",
+ "timestep",
+ FluidSolver::_GET_mDt,
+ FluidSolver::_SET_mDt);
+static const Pb::Register _R_14("FluidSolver",
+ "timeTotal",
+ FluidSolver::_GET_mTimeTotal,
+ FluidSolver::_SET_mTimeTotal);
+static const Pb::Register _R_15("FluidSolver",
+ "frame",
+ FluidSolver::_GET_mFrame,
+ FluidSolver::_SET_mFrame);
+static const Pb::Register _R_16("FluidSolver",
+ "cfl",
+ FluidSolver::_GET_mCflCond,
+ FluidSolver::_SET_mCflCond);
+static const Pb::Register _R_17("FluidSolver",
+ "timestepMin",
+ FluidSolver::_GET_mDtMin,
+ FluidSolver::_SET_mDtMin);
+static const Pb::Register _R_18("FluidSolver",
+ "timestepMax",
+ FluidSolver::_GET_mDtMax,
+ FluidSolver::_SET_mDtMax);
+static const Pb::Register _R_19("FluidSolver",
+ "frameLength",
+ FluidSolver::_GET_mFrameLength,
+ FluidSolver::_SET_mFrameLength);
+static const Pb::Register _R_20("FluidSolver",
+ "timePerFrame",
+ FluidSolver::_GET_mTimePerFrame,
+ FluidSolver::_SET_mTimePerFrame);
+#endif
+extern "C" {
+void PbRegister_file_6()
+{
+ KEEP_UNUSED(_R_6);
+ KEEP_UNUSED(_R_7);
+ KEEP_UNUSED(_R_8);
+ KEEP_UNUSED(_R_9);
+ KEEP_UNUSED(_R_10);
+ KEEP_UNUSED(_R_11);
+ KEEP_UNUSED(_R_12);
+ KEEP_UNUSED(_R_13);
+ KEEP_UNUSED(_R_14);
+ KEEP_UNUSED(_R_15);
+ KEEP_UNUSED(_R_16);
+ KEEP_UNUSED(_R_17);
+ KEEP_UNUSED(_R_18);
+ KEEP_UNUSED(_R_19);
+ KEEP_UNUSED(_R_20);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/general.cpp b/extern/mantaflow/preprocessed/general.cpp
new file mode 100644
index 00000000000..266e6c8719d
--- /dev/null
+++ b/extern/mantaflow/preprocessed/general.cpp
@@ -0,0 +1,167 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2016 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Globally used macros and functions (e.g. time measurements),
+ * and doxygen documentation collection.
+ *
+ ******************************************************************************/
+
+/*! \mainpage Welcome to mantaflow!
+ *
+ * Here you can find the auto-generated documentation of the mantaflow framework.
+ *
+ * One of the most useful parts is probably the list of python functions, classes and the C++
+ * kernels. Those can be found found in the ''Modules'' section. For python functions the
+ * parameters (e.g. Grids, Real or int values) are automatically transferred to and from python.
+ * Thus, this list is a good reference how to call the functions used in the example scenes.
+ *
+ */
+
+// Define plugin documentation group
+// all kernels, plugin functions and classes will automatically be added to this group
+//! @defgroup Plugins Functions callable from Python
+//! @defgroup PyClasses Classes exposed to Python
+//! @defgroup Kernels Computation Kernels
+
+#include "general.h"
+#if defined(WIN32) || defined(_WIN32)
+# define WIN32_LEAN_AND_MEAN
+# define NOMINMAX
+# include <windows.h>
+# undef WIN32_LEAN_AND_MEAN
+# undef NOMINMAX
+#else
+# include <sys/time.h>
+# include "gitinfo.h"
+#endif
+
+using namespace std;
+
+namespace Manta {
+
+int gDebugLevel = 1;
+
+void MuTime::get()
+{
+#if defined(WIN32) || defined(_WIN32)
+ LARGE_INTEGER liTimerFrequency;
+ QueryPerformanceFrequency(&liTimerFrequency);
+ LARGE_INTEGER liLastTime;
+ QueryPerformanceCounter(&liLastTime);
+ time = (INT)(((double)liLastTime.QuadPart / liTimerFrequency.QuadPart) * 1000);
+#else
+ struct timeval tv;
+ struct timezone tz;
+ tz.tz_minuteswest = 0;
+ tz.tz_dsttime = 0;
+ gettimeofday(&tv, &tz);
+ time = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
+#endif
+}
+
+MuTime MuTime::update()
+{
+ MuTime o = *this;
+ get();
+ return *this - o;
+}
+
+string MuTime::toString()
+{
+ stringstream ss;
+ ss << *this;
+ return ss.str();
+}
+
+ostream &operator<<(ostream &os, const MuTime &t)
+{
+ unsigned long ms = (unsigned long)((double)t.time / (60.0 * 1000.0));
+ unsigned long ss = (unsigned long)(((double)t.time / 1000.0) - ((double)ms * 60.0));
+ int ps = (int)(((double)t.time - (double)ss * 1000.0) / 1.0);
+
+ if (ms > 0) {
+ os << ms << "m" << ss << "s";
+ }
+ else {
+ if (ps > 0) {
+ os << ss << ".";
+ if (ps < 10) {
+ os << "0";
+ }
+ if (ps < 100) {
+ os << "0";
+ }
+ os << ps << "s";
+ }
+ else {
+ os << ss << "s";
+ }
+ }
+ return os;
+}
+
+//! print info about this mantaflow build, used eg by printBuildInfo in fluidsolver.cpp
+std::string buildInfoString()
+{
+ std::ostringstream infoStr;
+#ifndef MANTAVERSION
+# define MANTAVERSION "<unknown-version>"
+#endif
+ infoStr << "mantaflow " << MANTAVERSION;
+
+ // os
+#if defined(WIN32) || defined(_WIN32)
+ infoStr << " win";
+#endif
+#ifdef __APPLE__
+ infoStr << " mac";
+#endif
+#ifdef LINUX
+ infoStr << " linux";
+#endif
+
+ // 32/64 bit
+ if (sizeof(size_t) == 8)
+ infoStr << " 64bit";
+ else
+ infoStr << " 32bit";
+
+ // fp precision
+#if FLOATINGPOINT_PRECISION == 2
+ infoStr << " fp2";
+#else
+ infoStr << " fp1";
+#endif
+
+ // other compile switches
+#ifdef DEBUG
+ infoStr << " debug";
+#endif
+#ifdef OPENMP
+ infoStr << " omp";
+#endif
+
+ // repository info (git commit id)
+#ifndef MANTA_GIT_VERSION
+# define MANTA_GIT_VERSION "<unknown-commit>"
+#endif
+ infoStr << " " << MANTA_GIT_VERSION;
+
+ infoStr << " from " << __DATE__ << ", " << __TIME__;
+ return infoStr.str();
+}
+
+//! note - generic PYTHON helpers in fluidsolver.cpp , no python bindings here
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/general.h b/extern/mantaflow/preprocessed/general.h
new file mode 100644
index 00000000000..7a840517cef
--- /dev/null
+++ b/extern/mantaflow/preprocessed/general.h
@@ -0,0 +1,247 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Globally used macros and functions
+ *
+ ******************************************************************************/
+
+#ifndef _GENERAL_H
+#define _GENERAL_H
+
+#include <iostream>
+#include <sstream>
+#include <cmath>
+#include <algorithm>
+
+namespace Manta {
+
+// ui data exchange
+#ifdef GUI
+// defined in qtmain.cpp
+extern void updateQtGui(bool full, int frame, float time, const std::string &curPlugin);
+#else
+// dummy function if GUI is not enabled
+inline void updateQtGui(bool full, int frame, float time, const std::string &curPlugin)
+{
+}
+#endif
+
+// activate debug mode if _DEBUG is defined (eg for windows)
+#ifndef DEBUG
+# ifdef _DEBUG
+# define DEBUG 1
+# endif // _DEBUG
+#endif // DEBUG
+
+// Standard exception
+class Error : public std::exception {
+ public:
+ Error(const std::string &s) : mS(s)
+ {
+#ifdef DEBUG
+ // print error
+ std::cerr << "Aborting: " << s << " \n";
+ // then force immedieate crash in debug mode
+ *(volatile int *)(0) = 1;
+#endif
+ }
+ virtual ~Error() throw()
+ {
+ }
+ virtual const char *what() const throw()
+ {
+ return mS.c_str();
+ }
+
+ private:
+ std::string mS;
+};
+
+// mark unused parameter variables
+#define unusedParameter(x) ((void)x)
+
+// Debug output functions and macros
+extern int gDebugLevel;
+
+#define MSGSTREAM \
+ std::ostringstream msg; \
+ msg.precision(7); \
+ msg.width(9);
+#define debMsg(mStr, level) \
+ if (_chklevel(level)) { \
+ MSGSTREAM; \
+ msg << mStr; \
+ std::cout << msg.str() << std::endl; \
+ }
+inline bool _chklevel(int level = 0)
+{
+ return gDebugLevel >= level;
+}
+
+// error and assertation macros
+#ifdef DEBUG
+# define DEBUG_ONLY(a) a
+#else
+# define DEBUG_ONLY(a)
+#endif
+#define throwError(msg) \
+ { \
+ std::ostringstream __s; \
+ __s << msg << std::endl << "Error raised in " << __FILE__ << ":" << __LINE__; \
+ throw Manta::Error(__s.str()); \
+ }
+#define errMsg(msg) throwError(msg);
+#define assertMsg(cond, msg) \
+ if (!(cond)) \
+ throwError(msg)
+#define assertDeb(cond, msg) DEBUG_ONLY(assertMsg(cond, msg))
+
+// for compatibility with blender, blender only defines WITH_FLUID, make sure we have "BLENDER"
+#ifndef BLENDER
+# ifdef WITH_FLUID
+# define BLENDER 1
+# endif
+#endif
+
+// common type for indexing large grids
+typedef long long IndexInt;
+
+// template tricks
+template<typename T> struct remove_pointers {
+ typedef T type;
+};
+
+template<typename T> struct remove_pointers<T *> {
+ typedef T type;
+};
+
+template<typename T> struct remove_pointers<T &> {
+ typedef T type;
+};
+
+// Commonly used enums and types
+//! Timing class for preformance measuring
+struct MuTime {
+ MuTime()
+ {
+ get();
+ }
+ MuTime operator-(const MuTime &a)
+ {
+ MuTime b;
+ b.time = time - a.time;
+ return b;
+ };
+ MuTime operator+(const MuTime &a)
+ {
+ MuTime b;
+ b.time = time + a.time;
+ return b;
+ };
+ MuTime operator/(unsigned long a)
+ {
+ MuTime b;
+ b.time = time / a;
+ return b;
+ };
+ MuTime &operator+=(const MuTime &a)
+ {
+ time += a.time;
+ return *this;
+ }
+ MuTime &operator-=(const MuTime &a)
+ {
+ time -= a.time;
+ return *this;
+ }
+ MuTime &operator/=(unsigned long a)
+ {
+ time /= a;
+ return *this;
+ }
+ std::string toString();
+
+ void clear()
+ {
+ time = 0;
+ }
+ void get();
+ MuTime update();
+
+ unsigned long time;
+};
+std::ostream &operator<<(std::ostream &os, const MuTime &t);
+
+//! generate a string with infos about the current mantaflow build
+std::string buildInfoString();
+
+// Some commonly used math helpers
+template<class T> inline T square(T a)
+{
+ return a * a;
+}
+template<class T> inline T cubed(T a)
+{
+ return a * a * a;
+}
+
+template<class T> inline T clamp(const T &val, const T &vmin, const T &vmax)
+{
+ if (val < vmin)
+ return vmin;
+ if (val > vmax)
+ return vmax;
+ return val;
+}
+
+template<class T> inline T nmod(const T &a, const T &b);
+template<> inline int nmod(const int &a, const int &b)
+{
+ int c = a % b;
+ return (c < 0) ? (c + b) : c;
+}
+template<> inline float nmod(const float &a, const float &b)
+{
+ float c = std::fmod(a, b);
+ return (c < 0) ? (c + b) : c;
+}
+template<> inline double nmod(const double &a, const double &b)
+{
+ double c = std::fmod(a, b);
+ return (c < 0) ? (c + b) : c;
+}
+
+template<class T> inline T safeDivide(const T &a, const T &b);
+template<> inline int safeDivide<int>(const int &a, const int &b)
+{
+ return (b) ? (a / b) : a;
+}
+template<> inline float safeDivide<float>(const float &a, const float &b)
+{
+ return (b) ? (a / b) : a;
+}
+template<> inline double safeDivide<double>(const double &a, const double &b)
+{
+ return (b) ? (a / b) : a;
+}
+
+inline bool c_isnan(float c)
+{
+ volatile float d = c;
+ return d != d;
+}
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/general.h.reg.cpp b/extern/mantaflow/preprocessed/general.h.reg.cpp
new file mode 100644
index 00000000000..cab2c4782b0
--- /dev/null
+++ b/extern/mantaflow/preprocessed/general.h.reg.cpp
@@ -0,0 +1,13 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "general.h"
+namespace Manta {
+extern "C" {
+void PbRegister_file_1()
+{
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/gitinfo.h b/extern/mantaflow/preprocessed/gitinfo.h
new file mode 100644
index 00000000000..154f928dc2f
--- /dev/null
+++ b/extern/mantaflow/preprocessed/gitinfo.h
@@ -0,0 +1,3 @@
+
+
+#define MANTA_GIT_VERSION "commit 761849c592daaea320f9026768b5a0750528009c"
diff --git a/extern/mantaflow/preprocessed/grid.cpp b/extern/mantaflow/preprocessed/grid.cpp
new file mode 100644
index 00000000000..c21d56d8879
--- /dev/null
+++ b/extern/mantaflow/preprocessed/grid.cpp
@@ -0,0 +1,2939 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Grid representation
+ *
+ ******************************************************************************/
+
+#include "grid.h"
+#include "levelset.h"
+#include "kernel.h"
+#include "mantaio.h"
+#include <limits>
+#include <sstream>
+#include <cstring>
+
+#include "commonkernels.h"
+
+using namespace std;
+namespace Manta {
+
+//******************************************************************************
+// GridBase members
+
+GridBase::GridBase(FluidSolver *parent) : PbClass(parent), mType(TypeNone)
+{
+ checkParent();
+ m3D = getParent()->is3D();
+}
+
+//******************************************************************************
+// Grid<T> members
+
+// helpers to set type
+template<class T> inline GridBase::GridType typeList()
+{
+ return GridBase::TypeNone;
+}
+template<> inline GridBase::GridType typeList<Real>()
+{
+ return GridBase::TypeReal;
+}
+template<> inline GridBase::GridType typeList<int>()
+{
+ return GridBase::TypeInt;
+}
+template<> inline GridBase::GridType typeList<Vec3>()
+{
+ return GridBase::TypeVec3;
+}
+
+template<class T>
+Grid<T>::Grid(FluidSolver *parent, bool show) : GridBase(parent), externalData(false)
+{
+ mType = typeList<T>();
+ mSize = parent->getGridSize();
+ mData = parent->getGridPointer<T>();
+
+ mStrideZ = parent->is2D() ? 0 : (mSize.x * mSize.y);
+ mDx = 1.0 / mSize.max();
+ clear();
+ setHidden(!show);
+}
+
+template<class T>
+Grid<T>::Grid(FluidSolver *parent, T *data, bool show)
+ : GridBase(parent), mData(data), externalData(true)
+{
+ mType = typeList<T>();
+ mSize = parent->getGridSize();
+
+ mStrideZ = parent->is2D() ? 0 : (mSize.x * mSize.y);
+ mDx = 1.0 / mSize.max();
+
+ setHidden(!show);
+}
+
+template<class T> Grid<T>::Grid(const Grid<T> &a) : GridBase(a.getParent()), externalData(false)
+{
+ mSize = a.mSize;
+ mType = a.mType;
+ mStrideZ = a.mStrideZ;
+ mDx = a.mDx;
+ FluidSolver *gp = a.getParent();
+ mData = gp->getGridPointer<T>();
+ memcpy(mData, a.mData, sizeof(T) * a.mSize.x * a.mSize.y * a.mSize.z);
+}
+
+template<class T> Grid<T>::~Grid()
+{
+ if (!externalData) {
+ mParent->freeGridPointer<T>(mData);
+ }
+}
+
+template<class T> void Grid<T>::clear()
+{
+ memset(mData, 0, sizeof(T) * mSize.x * mSize.y * mSize.z);
+}
+
+template<class T> void Grid<T>::swap(Grid<T> &other)
+{
+ if (other.getSizeX() != getSizeX() || other.getSizeY() != getSizeY() ||
+ other.getSizeZ() != getSizeZ())
+ errMsg("Grid::swap(): Grid dimensions mismatch.");
+
+ if (externalData || other.externalData)
+ errMsg("Grid::swap(): Cannot swap if one grid stores externalData.");
+
+ T *dswap = other.mData;
+ other.mData = mData;
+ mData = dswap;
+}
+
+template<class T> void Grid<T>::load(string name)
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".raw")
+ readGridRaw(name, this);
+ else if (ext == ".uni")
+ readGridUni(name, this);
+ else if (ext == ".vol")
+ readGridVol(name, this);
+ else if (ext == ".npz")
+ readGridNumpy(name, this);
+#if OPENVDB == 1
+ else if (ext == ".vdb")
+ readGridVDB(name, this);
+#endif // OPENVDB==1
+ else
+ errMsg("file '" + name + "' filetype not supported");
+}
+
+template<class T> void Grid<T>::save(string name)
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".raw")
+ writeGridRaw(name, this);
+ else if (ext == ".uni")
+ writeGridUni(name, this);
+ else if (ext == ".vol")
+ writeGridVol(name, this);
+#if OPENVDB == 1
+ else if (ext == ".vdb")
+ writeGridVDB(name, this);
+#endif // OPENVDB==1
+ else if (ext == ".npz")
+ writeGridNumpy(name, this);
+ else if (ext == ".txt")
+ writeGridTxt(name, this);
+ else
+ errMsg("file '" + name + "' filetype not supported");
+}
+
+//******************************************************************************
+// Grid<T> operators
+
+//! Kernel: Compute min value of Real grid
+
+struct CompMinReal : public KernelBase {
+ CompMinReal(const Grid<Real> &val)
+ : KernelBase(&val, 0), val(val), minVal(std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Real> &val, Real &minVal)
+ {
+ if (val[idx] < minVal)
+ minVal = val[idx];
+ }
+ inline operator Real()
+ {
+ return minVal;
+ }
+ inline Real &getRet()
+ {
+ return minVal;
+ }
+ inline const Grid<Real> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid<Real> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompMinReal ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, minVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompMinReal(CompMinReal &o, tbb::split)
+ : KernelBase(o), val(o.val), minVal(std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const CompMinReal &o)
+ {
+ minVal = min(minVal, o.minVal);
+ }
+ const Grid<Real> &val;
+ Real minVal;
+};
+
+//! Kernel: Compute max value of Real grid
+
+struct CompMaxReal : public KernelBase {
+ CompMaxReal(const Grid<Real> &val)
+ : KernelBase(&val, 0), val(val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Real> &val, Real &maxVal)
+ {
+ if (val[idx] > maxVal)
+ maxVal = val[idx];
+ }
+ inline operator Real()
+ {
+ return maxVal;
+ }
+ inline Real &getRet()
+ {
+ return maxVal;
+ }
+ inline const Grid<Real> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid<Real> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompMaxReal ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, maxVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompMaxReal(CompMaxReal &o, tbb::split)
+ : KernelBase(o), val(o.val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const CompMaxReal &o)
+ {
+ maxVal = max(maxVal, o.maxVal);
+ }
+ const Grid<Real> &val;
+ Real maxVal;
+};
+
+//! Kernel: Compute min value of int grid
+
+struct CompMinInt : public KernelBase {
+ CompMinInt(const Grid<int> &val)
+ : KernelBase(&val, 0), val(val), minVal(std::numeric_limits<int>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<int> &val, int &minVal)
+ {
+ if (val[idx] < minVal)
+ minVal = val[idx];
+ }
+ inline operator int()
+ {
+ return minVal;
+ }
+ inline int &getRet()
+ {
+ return minVal;
+ }
+ inline const Grid<int> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid<int> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompMinInt ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, minVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompMinInt(CompMinInt &o, tbb::split)
+ : KernelBase(o), val(o.val), minVal(std::numeric_limits<int>::max())
+ {
+ }
+ void join(const CompMinInt &o)
+ {
+ minVal = min(minVal, o.minVal);
+ }
+ const Grid<int> &val;
+ int minVal;
+};
+
+//! Kernel: Compute max value of int grid
+
+struct CompMaxInt : public KernelBase {
+ CompMaxInt(const Grid<int> &val)
+ : KernelBase(&val, 0), val(val), maxVal(-std::numeric_limits<int>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<int> &val, int &maxVal)
+ {
+ if (val[idx] > maxVal)
+ maxVal = val[idx];
+ }
+ inline operator int()
+ {
+ return maxVal;
+ }
+ inline int &getRet()
+ {
+ return maxVal;
+ }
+ inline const Grid<int> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid<int> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompMaxInt ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, maxVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompMaxInt(CompMaxInt &o, tbb::split)
+ : KernelBase(o), val(o.val), maxVal(-std::numeric_limits<int>::max())
+ {
+ }
+ void join(const CompMaxInt &o)
+ {
+ maxVal = max(maxVal, o.maxVal);
+ }
+ const Grid<int> &val;
+ int maxVal;
+};
+
+//! Kernel: Compute min norm of vec grid
+
+struct CompMinVec : public KernelBase {
+ CompMinVec(const Grid<Vec3> &val)
+ : KernelBase(&val, 0), val(val), minVal(std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Vec3> &val, Real &minVal)
+ {
+ const Real s = normSquare(val[idx]);
+ if (s < minVal)
+ minVal = s;
+ }
+ inline operator Real()
+ {
+ return minVal;
+ }
+ inline Real &getRet()
+ {
+ return minVal;
+ }
+ inline const Grid<Vec3> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid<Vec3> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompMinVec ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, minVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompMinVec(CompMinVec &o, tbb::split)
+ : KernelBase(o), val(o.val), minVal(std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const CompMinVec &o)
+ {
+ minVal = min(minVal, o.minVal);
+ }
+ const Grid<Vec3> &val;
+ Real minVal;
+};
+
+//! Kernel: Compute max norm of vec grid
+
+struct CompMaxVec : public KernelBase {
+ CompMaxVec(const Grid<Vec3> &val)
+ : KernelBase(&val, 0), val(val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Vec3> &val, Real &maxVal)
+ {
+ const Real s = normSquare(val[idx]);
+ if (s > maxVal)
+ maxVal = s;
+ }
+ inline operator Real()
+ {
+ return maxVal;
+ }
+ inline Real &getRet()
+ {
+ return maxVal;
+ }
+ inline const Grid<Vec3> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid<Vec3> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompMaxVec ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, maxVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompMaxVec(CompMaxVec &o, tbb::split)
+ : KernelBase(o), val(o.val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const CompMaxVec &o)
+ {
+ maxVal = max(maxVal, o.maxVal);
+ }
+ const Grid<Vec3> &val;
+ Real maxVal;
+};
+
+template<class T> Grid<T> &Grid<T>::copyFrom(const Grid<T> &a, bool copyType)
+{
+ assertMsg(a.mSize.x == mSize.x && a.mSize.y == mSize.y && a.mSize.z == mSize.z,
+ "different grid resolutions " << a.mSize << " vs " << this->mSize);
+ memcpy(mData, a.mData, sizeof(T) * mSize.x * mSize.y * mSize.z);
+ if (copyType)
+ mType = a.mType; // copy type marker
+ return *this;
+}
+/*template<class T> Grid<T>& Grid<T>::operator= (const Grid<T>& a) {
+ note: do not use , use copyFrom instead
+}*/
+
+template<class T> struct knGridSetConstReal : public KernelBase {
+ knGridSetConstReal(Grid<T> &me, T val) : KernelBase(&me, 0), me(me), val(val)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, T val) const
+ {
+ me[idx] = val;
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline T &getArg1()
+ {
+ return val;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knGridSetConstReal ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, val);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ T val;
+};
+template<class T> struct knGridAddConstReal : public KernelBase {
+ knGridAddConstReal(Grid<T> &me, T val) : KernelBase(&me, 0), me(me), val(val)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, T val) const
+ {
+ me[idx] += val;
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline T &getArg1()
+ {
+ return val;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knGridAddConstReal ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, val);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ T val;
+};
+template<class T> struct knGridMultConst : public KernelBase {
+ knGridMultConst(Grid<T> &me, T val) : KernelBase(&me, 0), me(me), val(val)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, T val) const
+ {
+ me[idx] *= val;
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline T &getArg1()
+ {
+ return val;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knGridMultConst ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, val);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ T val;
+};
+
+template<class T> struct knGridSafeDiv : public KernelBase {
+ knGridSafeDiv(Grid<T> &me, const Grid<T> &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, const Grid<T> &other) const
+ {
+ me[idx] = safeDivide(me[idx], other[idx]);
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline const Grid<T> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid<T> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knGridSafeDiv ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ const Grid<T> &other;
+};
+// KERNEL(idx) template<class T> void gridSafeDiv (Grid<T>& me, const Grid<T>& other) { me[idx] =
+// safeDivide(me[idx], other[idx]); }
+
+template<class T> struct knGridClamp : public KernelBase {
+ knGridClamp(Grid<T> &me, const T &min, const T &max)
+ : KernelBase(&me, 0), me(me), min(min), max(max)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, const T &min, const T &max) const
+ {
+ me[idx] = clamp(me[idx], min, max);
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline const T &getArg1()
+ {
+ return min;
+ }
+ typedef T type1;
+ inline const T &getArg2()
+ {
+ return max;
+ }
+ typedef T type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knGridClamp ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, min, max);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ const T &min;
+ const T &max;
+};
+
+template<typename T> inline void stomp(T &v, const T &th)
+{
+ if (v < th)
+ v = 0;
+}
+template<> inline void stomp<Vec3>(Vec3 &v, const Vec3 &th)
+{
+ if (v[0] < th[0])
+ v[0] = 0;
+ if (v[1] < th[1])
+ v[1] = 0;
+ if (v[2] < th[2])
+ v[2] = 0;
+}
+template<class T> struct knGridStomp : public KernelBase {
+ knGridStomp(Grid<T> &me, const T &threshold) : KernelBase(&me, 0), me(me), threshold(threshold)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, const T &threshold) const
+ {
+ stomp(me[idx], threshold);
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline const T &getArg1()
+ {
+ return threshold;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knGridStomp ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, threshold);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ const T &threshold;
+};
+
+template<class T> struct knPermuteAxes : public KernelBase {
+ knPermuteAxes(Grid<T> &self, Grid<T> &target, int axis0, int axis1, int axis2)
+ : KernelBase(&self, 0), self(self), target(target), axis0(axis0), axis1(axis1), axis2(axis2)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, Grid<T> &self, Grid<T> &target, int axis0, int axis1, int axis2) const
+ {
+ int i0 = axis0 == 0 ? i : (axis0 == 1 ? j : k);
+ int i1 = axis1 == 0 ? i : (axis1 == 1 ? j : k);
+ int i2 = axis2 == 0 ? i : (axis2 == 1 ? j : k);
+ target(i0, i1, i2) = self(i, j, k);
+ }
+ inline Grid<T> &getArg0()
+ {
+ return self;
+ }
+ typedef Grid<T> type0;
+ inline Grid<T> &getArg1()
+ {
+ return target;
+ }
+ typedef Grid<T> type1;
+ inline int &getArg2()
+ {
+ return axis0;
+ }
+ typedef int type2;
+ inline int &getArg3()
+ {
+ return axis1;
+ }
+ typedef int type3;
+ inline int &getArg4()
+ {
+ return axis2;
+ }
+ typedef int type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPermuteAxes ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, self, target, axis0, axis1, axis2);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, self, target, axis0, axis1, axis2);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<T> &self;
+ Grid<T> &target;
+ int axis0;
+ int axis1;
+ int axis2;
+};
+
+template<class T> Grid<T> &Grid<T>::safeDivide(const Grid<T> &a)
+{
+ knGridSafeDiv<T>(*this, a);
+ return *this;
+}
+
+template<class T> int Grid<T>::getGridType()
+{
+ return static_cast<int>(mType);
+}
+
+template<class T> void Grid<T>::add(const Grid<T> &a)
+{
+ gridAdd<T, T>(*this, a);
+}
+template<class T> void Grid<T>::sub(const Grid<T> &a)
+{
+ gridSub<T, T>(*this, a);
+}
+template<class T> void Grid<T>::addScaled(const Grid<T> &a, const T &factor)
+{
+ gridScaledAdd<T, T>(*this, a, factor);
+}
+template<class T> void Grid<T>::setConst(T a)
+{
+ knGridSetConstReal<T>(*this, T(a));
+}
+template<class T> void Grid<T>::addConst(T a)
+{
+ knGridAddConstReal<T>(*this, T(a));
+}
+template<class T> void Grid<T>::multConst(T a)
+{
+ knGridMultConst<T>(*this, a);
+}
+
+template<class T> void Grid<T>::mult(const Grid<T> &a)
+{
+ gridMult<T, T>(*this, a);
+}
+
+template<class T> void Grid<T>::clamp(Real min, Real max)
+{
+ knGridClamp<T>(*this, T(min), T(max));
+}
+template<class T> void Grid<T>::stomp(const T &threshold)
+{
+ knGridStomp<T>(*this, threshold);
+}
+template<class T> void Grid<T>::permuteAxes(int axis0, int axis1, int axis2)
+{
+ if (axis0 == axis1 || axis0 == axis2 || axis1 == axis2 || axis0 > 2 || axis1 > 2 || axis2 > 2 ||
+ axis0 < 0 || axis1 < 0 || axis2 < 0)
+ return;
+ Vec3i size = mParent->getGridSize();
+ assertMsg(mParent->is2D() ? size.x == size.y : size.x == size.y && size.y == size.z,
+ "Grid must be cubic!");
+ Grid<T> tmp(mParent);
+ knPermuteAxes<T>(*this, tmp, axis0, axis1, axis2);
+ this->swap(tmp);
+}
+template<class T>
+void Grid<T>::permuteAxesCopyToGrid(int axis0, int axis1, int axis2, Grid<T> &out)
+{
+ if (axis0 == axis1 || axis0 == axis2 || axis1 == axis2 || axis0 > 2 || axis1 > 2 || axis2 > 2 ||
+ axis0 < 0 || axis1 < 0 || axis2 < 0)
+ return;
+ assertMsg(this->getGridType() == out.getGridType(), "Grids must have same data type!");
+ Vec3i size = mParent->getGridSize();
+ Vec3i sizeTarget = out.getParent()->getGridSize();
+ assertMsg(sizeTarget[axis0] == size[0] && sizeTarget[axis1] == size[1] &&
+ sizeTarget[axis2] == size[2],
+ "Permuted grids must have the same dimensions!");
+ knPermuteAxes<T>(*this, out, axis0, axis1, axis2);
+}
+
+template<> Real Grid<Real>::getMax() const
+{
+ return CompMaxReal(*this);
+}
+template<> Real Grid<Real>::getMin() const
+{
+ return CompMinReal(*this);
+}
+template<> Real Grid<Real>::getMaxAbs() const
+{
+ Real amin = CompMinReal(*this);
+ Real amax = CompMaxReal(*this);
+ return max(fabs(amin), fabs(amax));
+}
+template<> Real Grid<Vec3>::getMax() const
+{
+ return sqrt(CompMaxVec(*this));
+}
+template<> Real Grid<Vec3>::getMin() const
+{
+ return sqrt(CompMinVec(*this));
+}
+template<> Real Grid<Vec3>::getMaxAbs() const
+{
+ return sqrt(CompMaxVec(*this));
+}
+template<> Real Grid<int>::getMax() const
+{
+ return (Real)CompMaxInt(*this);
+}
+template<> Real Grid<int>::getMin() const
+{
+ return (Real)CompMinInt(*this);
+}
+template<> Real Grid<int>::getMaxAbs() const
+{
+ int amin = CompMinInt(*this);
+ int amax = CompMaxInt(*this);
+ return max(fabs((Real)amin), fabs((Real)amax));
+}
+template<class T> std::string Grid<T>::getDataPointer()
+{
+ std::ostringstream out;
+ out << mData;
+ return out.str();
+}
+
+// L1 / L2 functions
+
+//! calculate L1 norm for whole grid with non-parallelized loop
+template<class GRID> Real loop_calcL1Grid(const GRID &grid, int bnd)
+{
+ double accu = 0.;
+ FOR_IJKT_BND(grid, bnd)
+ {
+ accu += norm(grid(i, j, k, t));
+ }
+ return (Real)accu;
+}
+
+//! calculate L2 norm for whole grid with non-parallelized loop
+// note - kernels "could" be used here, but they can't be templated at the moment (also, that would
+// mean the bnd parameter is fixed)
+template<class GRID> Real loop_calcL2Grid(const GRID &grid, int bnd)
+{
+ double accu = 0.;
+ FOR_IJKT_BND(grid, bnd)
+ {
+ accu += normSquare(grid(i, j, k, t)); // supported for real and vec3,4 types
+ }
+ return (Real)sqrt(accu);
+}
+
+//! compute L1 norm of whole grid content (note, not parallel at the moment)
+template<class T> Real Grid<T>::getL1(int bnd)
+{
+ return loop_calcL1Grid<Grid<T>>(*this, bnd);
+}
+//! compute L2 norm of whole grid content (note, not parallel at the moment)
+template<class T> Real Grid<T>::getL2(int bnd)
+{
+ return loop_calcL2Grid<Grid<T>>(*this, bnd);
+}
+
+struct knCountCells : public KernelBase {
+ knCountCells(const FlagGrid &flags, int flag, int bnd, Grid<Real> *mask)
+ : KernelBase(&flags, 0), flags(flags), flag(flag), bnd(bnd), mask(mask), cnt(0)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, const FlagGrid &flags, int flag, int bnd, Grid<Real> *mask, int &cnt)
+ {
+ if (mask)
+ (*mask)(i, j, k) = 0.;
+ if (bnd > 0 && (!flags.isInBounds(Vec3i(i, j, k), bnd)))
+ return;
+ if (flags(i, j, k) & flag) {
+ cnt++;
+ if (mask)
+ (*mask)(i, j, k) = 1.;
+ }
+ }
+ inline operator int()
+ {
+ return cnt;
+ }
+ inline int &getRet()
+ {
+ return cnt;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline int &getArg1()
+ {
+ return flag;
+ }
+ typedef int type1;
+ inline int &getArg2()
+ {
+ return bnd;
+ }
+ typedef int type2;
+ inline Grid<Real> *getArg3()
+ {
+ return mask;
+ }
+ typedef Grid<Real> type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knCountCells ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, flag, bnd, mask, cnt);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, flag, bnd, mask, cnt);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ knCountCells(knCountCells &o, tbb::split)
+ : KernelBase(o), flags(o.flags), flag(o.flag), bnd(o.bnd), mask(o.mask), cnt(0)
+ {
+ }
+ void join(const knCountCells &o)
+ {
+ cnt += o.cnt;
+ }
+ const FlagGrid &flags;
+ int flag;
+ int bnd;
+ Grid<Real> *mask;
+ int cnt;
+};
+
+//! count number of cells of a certain type flag (can contain multiple bits, checks if any one of
+//! them is set - not all!)
+int FlagGrid::countCells(int flag, int bnd, Grid<Real> *mask)
+{
+ return knCountCells(*this, flag, bnd, mask);
+}
+
+// compute maximal diference of two cells in the grid
+// used for testing system
+
+Real gridMaxDiff(Grid<Real> &g1, Grid<Real> &g2)
+{
+ double maxVal = 0.;
+ FOR_IJK(g1)
+ {
+ maxVal = std::max(maxVal, (double)fabs(g1(i, j, k) - g2(i, j, k)));
+ }
+ return maxVal;
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "gridMaxDiff", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &g1 = *_args.getPtr<Grid<Real>>("g1", 0, &_lock);
+ Grid<Real> &g2 = *_args.getPtr<Grid<Real>>("g2", 1, &_lock);
+ _retval = toPy(gridMaxDiff(g1, g2));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "gridMaxDiff", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("gridMaxDiff", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_gridMaxDiff("", "gridMaxDiff", _W_0);
+extern "C" {
+void PbRegister_gridMaxDiff()
+{
+ KEEP_UNUSED(_RP_gridMaxDiff);
+}
+}
+
+Real gridMaxDiffInt(Grid<int> &g1, Grid<int> &g2)
+{
+ double maxVal = 0.;
+ FOR_IJK(g1)
+ {
+ maxVal = std::max(maxVal, (double)fabs((double)g1(i, j, k) - g2(i, j, k)));
+ }
+ return maxVal;
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "gridMaxDiffInt", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<int> &g1 = *_args.getPtr<Grid<int>>("g1", 0, &_lock);
+ Grid<int> &g2 = *_args.getPtr<Grid<int>>("g2", 1, &_lock);
+ _retval = toPy(gridMaxDiffInt(g1, g2));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "gridMaxDiffInt", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("gridMaxDiffInt", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_gridMaxDiffInt("", "gridMaxDiffInt", _W_1);
+extern "C" {
+void PbRegister_gridMaxDiffInt()
+{
+ KEEP_UNUSED(_RP_gridMaxDiffInt);
+}
+}
+
+Real gridMaxDiffVec3(Grid<Vec3> &g1, Grid<Vec3> &g2)
+{
+ double maxVal = 0.;
+ FOR_IJK(g1)
+ {
+ // accumulate differences with double precision
+ // note - don't use norm here! should be as precise as possible...
+ double d = 0.;
+ for (int c = 0; c < 3; ++c) {
+ d += fabs((double)g1(i, j, k)[c] - (double)g2(i, j, k)[c]);
+ }
+ maxVal = std::max(maxVal, d);
+ // maxVal = std::max(maxVal, (double)fabs( norm(g1(i,j,k)-g2(i,j,k)) ));
+ }
+ return maxVal;
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "gridMaxDiffVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &g1 = *_args.getPtr<Grid<Vec3>>("g1", 0, &_lock);
+ Grid<Vec3> &g2 = *_args.getPtr<Grid<Vec3>>("g2", 1, &_lock);
+ _retval = toPy(gridMaxDiffVec3(g1, g2));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "gridMaxDiffVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("gridMaxDiffVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_gridMaxDiffVec3("", "gridMaxDiffVec3", _W_2);
+extern "C" {
+void PbRegister_gridMaxDiffVec3()
+{
+ KEEP_UNUSED(_RP_gridMaxDiffVec3);
+}
+}
+
+// simple helper functions to copy (convert) mac to vec3 , and levelset to real grids
+// (are assumed to be the same for running the test cases - in general they're not!)
+
+void copyMacToVec3(MACGrid &source, Grid<Vec3> &target)
+{
+ FOR_IJK(target)
+ {
+ target(i, j, k) = source(i, j, k);
+ }
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "copyMacToVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &source = *_args.getPtr<MACGrid>("source", 0, &_lock);
+ Grid<Vec3> &target = *_args.getPtr<Grid<Vec3>>("target", 1, &_lock);
+ _retval = getPyNone();
+ copyMacToVec3(source, target);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "copyMacToVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("copyMacToVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_copyMacToVec3("", "copyMacToVec3", _W_3);
+extern "C" {
+void PbRegister_copyMacToVec3()
+{
+ KEEP_UNUSED(_RP_copyMacToVec3);
+}
+}
+
+void convertMacToVec3(MACGrid &source, Grid<Vec3> &target)
+{
+ debMsg("Deprecated - do not use convertMacToVec3... use copyMacToVec3 instead", 1);
+ copyMacToVec3(source, target);
+}
+static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "convertMacToVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &source = *_args.getPtr<MACGrid>("source", 0, &_lock);
+ Grid<Vec3> &target = *_args.getPtr<Grid<Vec3>>("target", 1, &_lock);
+ _retval = getPyNone();
+ convertMacToVec3(source, target);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "convertMacToVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("convertMacToVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_convertMacToVec3("", "convertMacToVec3", _W_4);
+extern "C" {
+void PbRegister_convertMacToVec3()
+{
+ KEEP_UNUSED(_RP_convertMacToVec3);
+}
+}
+
+//! vec3->mac grid conversion , but with full resampling
+void resampleVec3ToMac(Grid<Vec3> &source, MACGrid &target)
+{
+ FOR_IJK_BND(target, 1)
+ {
+ target(i, j, k)[0] = 0.5 * (source(i - 1, j, k)[0] + source(i, j, k))[0];
+ target(i, j, k)[1] = 0.5 * (source(i, j - 1, k)[1] + source(i, j, k))[1];
+ if (target.is3D()) {
+ target(i, j, k)[2] = 0.5 * (source(i, j, k - 1)[2] + source(i, j, k))[2];
+ }
+ }
+}
+static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "resampleVec3ToMac", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &source = *_args.getPtr<Grid<Vec3>>("source", 0, &_lock);
+ MACGrid &target = *_args.getPtr<MACGrid>("target", 1, &_lock);
+ _retval = getPyNone();
+ resampleVec3ToMac(source, target);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "resampleVec3ToMac", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("resampleVec3ToMac", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_resampleVec3ToMac("", "resampleVec3ToMac", _W_5);
+extern "C" {
+void PbRegister_resampleVec3ToMac()
+{
+ KEEP_UNUSED(_RP_resampleVec3ToMac);
+}
+}
+
+//! mac->vec3 grid conversion , with full resampling
+void resampleMacToVec3(MACGrid &source, Grid<Vec3> &target)
+{
+ FOR_IJK_BND(target, 1)
+ {
+ target(i, j, k) = source.getCentered(i, j, k);
+ }
+}
+static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "resampleMacToVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &source = *_args.getPtr<MACGrid>("source", 0, &_lock);
+ Grid<Vec3> &target = *_args.getPtr<Grid<Vec3>>("target", 1, &_lock);
+ _retval = getPyNone();
+ resampleMacToVec3(source, target);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "resampleMacToVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("resampleMacToVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_resampleMacToVec3("", "resampleMacToVec3", _W_6);
+extern "C" {
+void PbRegister_resampleMacToVec3()
+{
+ KEEP_UNUSED(_RP_resampleMacToVec3);
+}
+}
+
+void copyLevelsetToReal(LevelsetGrid &source, Grid<Real> &target)
+{
+ FOR_IJK(target)
+ {
+ target(i, j, k) = source(i, j, k);
+ }
+}
+static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "copyLevelsetToReal", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ LevelsetGrid &source = *_args.getPtr<LevelsetGrid>("source", 0, &_lock);
+ Grid<Real> &target = *_args.getPtr<Grid<Real>>("target", 1, &_lock);
+ _retval = getPyNone();
+ copyLevelsetToReal(source, target);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "copyLevelsetToReal", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("copyLevelsetToReal", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_copyLevelsetToReal("", "copyLevelsetToReal", _W_7);
+extern "C" {
+void PbRegister_copyLevelsetToReal()
+{
+ KEEP_UNUSED(_RP_copyLevelsetToReal);
+}
+}
+
+void copyVec3ToReal(Grid<Vec3> &source,
+ Grid<Real> &targetX,
+ Grid<Real> &targetY,
+ Grid<Real> &targetZ)
+{
+ FOR_IJK(source)
+ {
+ targetX(i, j, k) = source(i, j, k).x;
+ targetY(i, j, k) = source(i, j, k).y;
+ targetZ(i, j, k) = source(i, j, k).z;
+ }
+}
+static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "copyVec3ToReal", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &source = *_args.getPtr<Grid<Vec3>>("source", 0, &_lock);
+ Grid<Real> &targetX = *_args.getPtr<Grid<Real>>("targetX", 1, &_lock);
+ Grid<Real> &targetY = *_args.getPtr<Grid<Real>>("targetY", 2, &_lock);
+ Grid<Real> &targetZ = *_args.getPtr<Grid<Real>>("targetZ", 3, &_lock);
+ _retval = getPyNone();
+ copyVec3ToReal(source, targetX, targetY, targetZ);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "copyVec3ToReal", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("copyVec3ToReal", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_copyVec3ToReal("", "copyVec3ToReal", _W_8);
+extern "C" {
+void PbRegister_copyVec3ToReal()
+{
+ KEEP_UNUSED(_RP_copyVec3ToReal);
+}
+}
+
+void copyRealToVec3(Grid<Real> &sourceX,
+ Grid<Real> &sourceY,
+ Grid<Real> &sourceZ,
+ Grid<Vec3> &target)
+{
+ FOR_IJK(target)
+ {
+ target(i, j, k).x = sourceX(i, j, k);
+ target(i, j, k).y = sourceY(i, j, k);
+ target(i, j, k).z = sourceZ(i, j, k);
+ }
+}
+static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "copyRealToVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &sourceX = *_args.getPtr<Grid<Real>>("sourceX", 0, &_lock);
+ Grid<Real> &sourceY = *_args.getPtr<Grid<Real>>("sourceY", 1, &_lock);
+ Grid<Real> &sourceZ = *_args.getPtr<Grid<Real>>("sourceZ", 2, &_lock);
+ Grid<Vec3> &target = *_args.getPtr<Grid<Vec3>>("target", 3, &_lock);
+ _retval = getPyNone();
+ copyRealToVec3(sourceX, sourceY, sourceZ, target);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "copyRealToVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("copyRealToVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_copyRealToVec3("", "copyRealToVec3", _W_9);
+extern "C" {
+void PbRegister_copyRealToVec3()
+{
+ KEEP_UNUSED(_RP_copyRealToVec3);
+}
+}
+
+void convertLevelsetToReal(LevelsetGrid &source, Grid<Real> &target)
+{
+ debMsg("Deprecated - do not use convertLevelsetToReal... use copyLevelsetToReal instead", 1);
+ copyLevelsetToReal(source, target);
+}
+static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "convertLevelsetToReal", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ LevelsetGrid &source = *_args.getPtr<LevelsetGrid>("source", 0, &_lock);
+ Grid<Real> &target = *_args.getPtr<Grid<Real>>("target", 1, &_lock);
+ _retval = getPyNone();
+ convertLevelsetToReal(source, target);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "convertLevelsetToReal", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("convertLevelsetToReal", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_convertLevelsetToReal("", "convertLevelsetToReal", _W_10);
+extern "C" {
+void PbRegister_convertLevelsetToReal()
+{
+ KEEP_UNUSED(_RP_convertLevelsetToReal);
+}
+}
+
+template<class T> void Grid<T>::printGrid(int zSlice, bool printIndex, int bnd)
+{
+ std::ostringstream out;
+ out << std::endl;
+ FOR_IJK_BND(*this, bnd)
+ {
+ IndexInt idx = (*this).index(i, j, k);
+ if ((zSlice >= 0 && k == zSlice) || (zSlice < 0)) {
+ out << " ";
+ if (printIndex && this->is3D())
+ out << " " << i << "," << j << "," << k << ":";
+ if (printIndex && !this->is3D())
+ out << " " << i << "," << j << ":";
+ out << (*this)[idx];
+ if (i == (*this).getSizeX() - 1 - bnd)
+ out << std::endl;
+ }
+ }
+ out << endl;
+ debMsg("Printing " << this->getName() << out.str().c_str(), 1);
+}
+
+//! helper to swap components of a grid (eg for data import)
+void swapComponents(Grid<Vec3> &vel, int c1 = 0, int c2 = 1, int c3 = 2)
+{
+ FOR_IJK(vel)
+ {
+ Vec3 v = vel(i, j, k);
+ vel(i, j, k)[0] = v[c1];
+ vel(i, j, k)[1] = v[c2];
+ vel(i, j, k)[2] = v[c3];
+ }
+}
+static PyObject *_W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "swapComponents", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &vel = *_args.getPtr<Grid<Vec3>>("vel", 0, &_lock);
+ int c1 = _args.getOpt<int>("c1", 1, 0, &_lock);
+ int c2 = _args.getOpt<int>("c2", 2, 1, &_lock);
+ int c3 = _args.getOpt<int>("c3", 3, 2, &_lock);
+ _retval = getPyNone();
+ swapComponents(vel, c1, c2, c3);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "swapComponents", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("swapComponents", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_swapComponents("", "swapComponents", _W_11);
+extern "C" {
+void PbRegister_swapComponents()
+{
+ KEEP_UNUSED(_RP_swapComponents);
+}
+}
+
+// helper functions for UV grid data (stored grid coordinates as Vec3 values, and uv weight in
+// entry zero)
+
+// make uv weight accesible in python
+Real getUvWeight(Grid<Vec3> &uv)
+{
+ return uv[0][0];
+}
+static PyObject *_W_12(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getUvWeight", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &uv = *_args.getPtr<Grid<Vec3>>("uv", 0, &_lock);
+ _retval = toPy(getUvWeight(uv));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getUvWeight", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getUvWeight", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getUvWeight("", "getUvWeight", _W_12);
+extern "C" {
+void PbRegister_getUvWeight()
+{
+ KEEP_UNUSED(_RP_getUvWeight);
+}
+}
+
+// note - right now the UV grids have 0 values at the border after advection... could be fixed with
+// an extrapolation step...
+
+// compute normalized modulo interval
+static inline Real computeUvGridTime(Real t, Real resetTime)
+{
+ return fmod((t / resetTime), (Real)1.);
+}
+// create ramp function in 0..1 range with half frequency
+static inline Real computeUvRamp(Real t)
+{
+ Real uvWeight = 2. * t;
+ if (uvWeight > 1.)
+ uvWeight = 2. - uvWeight;
+ return uvWeight;
+}
+
+struct knResetUvGrid : public KernelBase {
+ knResetUvGrid(Grid<Vec3> &target, const Vec3 *offset)
+ : KernelBase(&target, 0), target(target), offset(offset)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Vec3> &target, const Vec3 *offset) const
+ {
+ Vec3 coord = Vec3((Real)i, (Real)j, (Real)k);
+ if (offset)
+ coord += (*offset);
+ target(i, j, k) = coord;
+ }
+ inline Grid<Vec3> &getArg0()
+ {
+ return target;
+ }
+ typedef Grid<Vec3> type0;
+ inline const Vec3 *getArg1()
+ {
+ return offset;
+ }
+ typedef Vec3 type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knResetUvGrid ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, target, offset);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, target, offset);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<Vec3> &target;
+ const Vec3 *offset;
+};
+
+void resetUvGrid(Grid<Vec3> &target, const Vec3 *offset = NULL)
+{
+ knResetUvGrid reset(target,
+ offset); // note, llvm complains about anonymous declaration here... ?
+}
+static PyObject *_W_13(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "resetUvGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &target = *_args.getPtr<Grid<Vec3>>("target", 0, &_lock);
+ const Vec3 *offset = _args.getPtrOpt<Vec3>("offset", 1, NULL, &_lock);
+ _retval = getPyNone();
+ resetUvGrid(target, offset);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "resetUvGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("resetUvGrid", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_resetUvGrid("", "resetUvGrid", _W_13);
+extern "C" {
+void PbRegister_resetUvGrid()
+{
+ KEEP_UNUSED(_RP_resetUvGrid);
+}
+}
+
+void updateUvWeight(
+ Real resetTime, int index, int numUvs, Grid<Vec3> &uv, const Vec3 *offset = NULL)
+{
+ const Real t = uv.getParent()->getTime();
+ Real timeOff = resetTime / (Real)numUvs;
+
+ Real lastt = computeUvGridTime(t + (Real)index * timeOff - uv.getParent()->getDt(), resetTime);
+ Real currt = computeUvGridTime(t + (Real)index * timeOff, resetTime);
+ Real uvWeight = computeUvRamp(currt);
+
+ // normalize the uvw weights , note: this is a bit wasteful...
+ Real uvWTotal = 0.;
+ for (int i = 0; i < numUvs; ++i) {
+ uvWTotal += computeUvRamp(computeUvGridTime(t + (Real)i * timeOff, resetTime));
+ }
+ if (uvWTotal <= VECTOR_EPSILON) {
+ uvWeight = uvWTotal = 1.;
+ }
+ else
+ uvWeight /= uvWTotal;
+
+ // check for reset
+ if (currt < lastt)
+ knResetUvGrid reset(uv, offset);
+
+ // write new weight value to grid
+ uv[0] = Vec3(uvWeight, 0., 0.);
+
+ // print info about uv weights?
+ debMsg("Uv grid " << index << "/" << numUvs << " t=" << currt << " w=" << uvWeight
+ << ", reset:" << (int)(currt < lastt),
+ 2);
+}
+static PyObject *_W_14(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "updateUvWeight", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Real resetTime = _args.get<Real>("resetTime", 0, &_lock);
+ int index = _args.get<int>("index", 1, &_lock);
+ int numUvs = _args.get<int>("numUvs", 2, &_lock);
+ Grid<Vec3> &uv = *_args.getPtr<Grid<Vec3>>("uv", 3, &_lock);
+ const Vec3 *offset = _args.getPtrOpt<Vec3>("offset", 4, NULL, &_lock);
+ _retval = getPyNone();
+ updateUvWeight(resetTime, index, numUvs, uv, offset);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "updateUvWeight", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("updateUvWeight", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_updateUvWeight("", "updateUvWeight", _W_14);
+extern "C" {
+void PbRegister_updateUvWeight()
+{
+ KEEP_UNUSED(_RP_updateUvWeight);
+}
+}
+
+template<class T> struct knSetBoundary : public KernelBase {
+ knSetBoundary(Grid<T> &grid, T value, int w)
+ : KernelBase(&grid, 0), grid(grid), value(value), w(w)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<T> &grid, T value, int w) const
+ {
+ bool bnd = (i <= w || i >= grid.getSizeX() - 1 - w || j <= w || j >= grid.getSizeY() - 1 - w ||
+ (grid.is3D() && (k <= w || k >= grid.getSizeZ() - 1 - w)));
+ if (bnd)
+ grid(i, j, k) = value;
+ }
+ inline Grid<T> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<T> type0;
+ inline T &getArg1()
+ {
+ return value;
+ }
+ typedef T type1;
+ inline int &getArg2()
+ {
+ return w;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetBoundary ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, value, w);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, value, w);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<T> &grid;
+ T value;
+ int w;
+};
+
+template<class T> void Grid<T>::setBound(T value, int boundaryWidth)
+{
+ knSetBoundary<T>(*this, value, boundaryWidth);
+}
+
+template<class T> struct knSetBoundaryNeumann : public KernelBase {
+ knSetBoundaryNeumann(Grid<T> &grid, int w) : KernelBase(&grid, 0), grid(grid), w(w)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<T> &grid, int w) const
+ {
+ bool set = false;
+ int si = i, sj = j, sk = k;
+ if (i <= w) {
+ si = w + 1;
+ set = true;
+ }
+ if (i >= grid.getSizeX() - 1 - w) {
+ si = grid.getSizeX() - 1 - w - 1;
+ set = true;
+ }
+ if (j <= w) {
+ sj = w + 1;
+ set = true;
+ }
+ if (j >= grid.getSizeY() - 1 - w) {
+ sj = grid.getSizeY() - 1 - w - 1;
+ set = true;
+ }
+ if (grid.is3D()) {
+ if (k <= w) {
+ sk = w + 1;
+ set = true;
+ }
+ if (k >= grid.getSizeZ() - 1 - w) {
+ sk = grid.getSizeZ() - 1 - w - 1;
+ set = true;
+ }
+ }
+ if (set)
+ grid(i, j, k) = grid(si, sj, sk);
+ }
+ inline Grid<T> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<T> type0;
+ inline int &getArg1()
+ {
+ return w;
+ }
+ typedef int type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetBoundaryNeumann ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, w);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, w);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<T> &grid;
+ int w;
+};
+
+template<class T> void Grid<T>::setBoundNeumann(int boundaryWidth)
+{
+ knSetBoundaryNeumann<T>(*this, boundaryWidth);
+}
+
+//! kernel to set velocity components of mac grid to value for a boundary of w cells
+struct knSetBoundaryMAC : public KernelBase {
+ knSetBoundaryMAC(Grid<Vec3> &grid, Vec3 value, int w)
+ : KernelBase(&grid, 0), grid(grid), value(value), w(w)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Vec3> &grid, Vec3 value, int w) const
+ {
+ if (i <= w || i >= grid.getSizeX() - w || j <= w - 1 || j >= grid.getSizeY() - 1 - w ||
+ (grid.is3D() && (k <= w - 1 || k >= grid.getSizeZ() - 1 - w)))
+ grid(i, j, k).x = value.x;
+ if (i <= w - 1 || i >= grid.getSizeX() - 1 - w || j <= w || j >= grid.getSizeY() - w ||
+ (grid.is3D() && (k <= w - 1 || k >= grid.getSizeZ() - 1 - w)))
+ grid(i, j, k).y = value.y;
+ if (i <= w - 1 || i >= grid.getSizeX() - 1 - w || j <= w - 1 || j >= grid.getSizeY() - 1 - w ||
+ (grid.is3D() && (k <= w || k >= grid.getSizeZ() - w)))
+ grid(i, j, k).z = value.z;
+ }
+ inline Grid<Vec3> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<Vec3> type0;
+ inline Vec3 &getArg1()
+ {
+ return value;
+ }
+ typedef Vec3 type1;
+ inline int &getArg2()
+ {
+ return w;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetBoundaryMAC ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, value, w);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, value, w);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<Vec3> &grid;
+ Vec3 value;
+ int w;
+};
+
+//! only set normal velocity components of mac grid to value for a boundary of w cells
+struct knSetBoundaryMACNorm : public KernelBase {
+ knSetBoundaryMACNorm(Grid<Vec3> &grid, Vec3 value, int w)
+ : KernelBase(&grid, 0), grid(grid), value(value), w(w)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Vec3> &grid, Vec3 value, int w) const
+ {
+ if (i <= w || i >= grid.getSizeX() - w)
+ grid(i, j, k).x = value.x;
+ if (j <= w || j >= grid.getSizeY() - w)
+ grid(i, j, k).y = value.y;
+ if ((grid.is3D() && (k <= w || k >= grid.getSizeZ() - w)))
+ grid(i, j, k).z = value.z;
+ }
+ inline Grid<Vec3> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<Vec3> type0;
+ inline Vec3 &getArg1()
+ {
+ return value;
+ }
+ typedef Vec3 type1;
+ inline int &getArg2()
+ {
+ return w;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetBoundaryMACNorm ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, value, w);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, value, w);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<Vec3> &grid;
+ Vec3 value;
+ int w;
+};
+
+//! set velocity components of mac grid to value for a boundary of w cells (optionally only normal
+//! values)
+void MACGrid::setBoundMAC(Vec3 value, int boundaryWidth, bool normalOnly)
+{
+ if (!normalOnly)
+ knSetBoundaryMAC(*this, value, boundaryWidth);
+ else
+ knSetBoundaryMACNorm(*this, value, boundaryWidth);
+}
+
+//! helper kernels for getGridAvg
+
+struct knGridTotalSum : public KernelBase {
+ knGridTotalSum(const Grid<Real> &a, FlagGrid *flags)
+ : KernelBase(&a, 0), a(a), flags(flags), result(0.0)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Real> &a, FlagGrid *flags, double &result)
+ {
+ if (flags) {
+ if (flags->isFluid(idx))
+ result += a[idx];
+ }
+ else {
+ result += a[idx];
+ }
+ }
+ inline operator double()
+ {
+ return result;
+ }
+ inline double &getRet()
+ {
+ return result;
+ }
+ inline const Grid<Real> &getArg0()
+ {
+ return a;
+ }
+ typedef Grid<Real> type0;
+ inline FlagGrid *getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knGridTotalSum ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, a, flags, result);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ knGridTotalSum(knGridTotalSum &o, tbb::split)
+ : KernelBase(o), a(o.a), flags(o.flags), result(0.0)
+ {
+ }
+ void join(const knGridTotalSum &o)
+ {
+ result += o.result;
+ }
+ const Grid<Real> &a;
+ FlagGrid *flags;
+ double result;
+};
+
+struct knCountFluidCells : public KernelBase {
+ knCountFluidCells(FlagGrid &flags) : KernelBase(&flags, 0), flags(flags), numEmpty(0)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, FlagGrid &flags, int &numEmpty)
+ {
+ if (flags.isFluid(idx))
+ numEmpty++;
+ }
+ inline operator int()
+ {
+ return numEmpty;
+ }
+ inline int &getRet()
+ {
+ return numEmpty;
+ }
+ inline FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel knCountFluidCells ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, flags, numEmpty);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ knCountFluidCells(knCountFluidCells &o, tbb::split) : KernelBase(o), flags(o.flags), numEmpty(0)
+ {
+ }
+ void join(const knCountFluidCells &o)
+ {
+ numEmpty += o.numEmpty;
+ }
+ FlagGrid &flags;
+ int numEmpty;
+};
+
+//! averaged value for all cells (if flags are given, only for fluid cells)
+
+Real getGridAvg(Grid<Real> &source, FlagGrid *flags = NULL)
+{
+ double sum = knGridTotalSum(source, flags);
+
+ double cells;
+ if (flags) {
+ cells = knCountFluidCells(*flags);
+ }
+ else {
+ cells = source.getSizeX() * source.getSizeY() * source.getSizeZ();
+ }
+
+ if (cells > 0.)
+ sum *= 1. / cells;
+ else
+ sum = -1.;
+ return sum;
+}
+static PyObject *_W_15(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getGridAvg", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &source = *_args.getPtr<Grid<Real>>("source", 0, &_lock);
+ FlagGrid *flags = _args.getPtrOpt<FlagGrid>("flags", 1, NULL, &_lock);
+ _retval = toPy(getGridAvg(source, flags));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getGridAvg", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getGridAvg", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getGridAvg("", "getGridAvg", _W_15);
+extern "C" {
+void PbRegister_getGridAvg()
+{
+ KEEP_UNUSED(_RP_getGridAvg);
+}
+}
+
+//! transfer data between real and vec3 grids
+
+struct knGetComponent : public KernelBase {
+ knGetComponent(const Grid<Vec3> &source, Grid<Real> &target, int component)
+ : KernelBase(&source, 0), source(source), target(target), component(component)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Vec3> &source, Grid<Real> &target, int component) const
+ {
+ target[idx] = source[idx][component];
+ }
+ inline const Grid<Vec3> &getArg0()
+ {
+ return source;
+ }
+ typedef Grid<Vec3> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return target;
+ }
+ typedef Grid<Real> type1;
+ inline int &getArg2()
+ {
+ return component;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knGetComponent ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, source, target, component);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const Grid<Vec3> &source;
+ Grid<Real> &target;
+ int component;
+};
+void getComponent(const Grid<Vec3> &source, Grid<Real> &target, int component)
+{
+ knGetComponent(source, target, component);
+}
+static PyObject *_W_16(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getComponent", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<Vec3> &source = *_args.getPtr<Grid<Vec3>>("source", 0, &_lock);
+ Grid<Real> &target = *_args.getPtr<Grid<Real>>("target", 1, &_lock);
+ int component = _args.get<int>("component", 2, &_lock);
+ _retval = getPyNone();
+ getComponent(source, target, component);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getComponent", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getComponent", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getComponent("", "getComponent", _W_16);
+extern "C" {
+void PbRegister_getComponent()
+{
+ KEEP_UNUSED(_RP_getComponent);
+}
+}
+
+struct knSetComponent : public KernelBase {
+ knSetComponent(const Grid<Real> &source, Grid<Vec3> &target, int component)
+ : KernelBase(&source, 0), source(source), target(target), component(component)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Real> &source, Grid<Vec3> &target, int component) const
+ {
+ target[idx][component] = source[idx];
+ }
+ inline const Grid<Real> &getArg0()
+ {
+ return source;
+ }
+ typedef Grid<Real> type0;
+ inline Grid<Vec3> &getArg1()
+ {
+ return target;
+ }
+ typedef Grid<Vec3> type1;
+ inline int &getArg2()
+ {
+ return component;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetComponent ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, source, target, component);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const Grid<Real> &source;
+ Grid<Vec3> &target;
+ int component;
+};
+void setComponent(const Grid<Real> &source, Grid<Vec3> &target, int component)
+{
+ knSetComponent(source, target, component);
+}
+static PyObject *_W_17(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setComponent", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<Real> &source = *_args.getPtr<Grid<Real>>("source", 0, &_lock);
+ Grid<Vec3> &target = *_args.getPtr<Grid<Vec3>>("target", 1, &_lock);
+ int component = _args.get<int>("component", 2, &_lock);
+ _retval = getPyNone();
+ setComponent(source, target, component);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setComponent", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setComponent", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setComponent("", "setComponent", _W_17);
+extern "C" {
+void PbRegister_setComponent()
+{
+ KEEP_UNUSED(_RP_setComponent);
+}
+}
+
+//******************************************************************************
+// Specialization classes
+
+void FlagGrid::InitMinXWall(const int &boundaryWidth, Grid<Real> &phiWalls)
+{
+ const int w = boundaryWidth;
+ FOR_IJK(phiWalls)
+ {
+ phiWalls(i, j, k) = std::min(i - w - .5, (double)phiWalls(i, j, k));
+ }
+}
+
+void FlagGrid::InitMaxXWall(const int &boundaryWidth, Grid<Real> &phiWalls)
+{
+ const int w = boundaryWidth;
+ FOR_IJK(phiWalls)
+ {
+ phiWalls(i, j, k) = std::min(mSize.x - i - 1.5 - w, (double)phiWalls(i, j, k));
+ }
+}
+
+void FlagGrid::InitMinYWall(const int &boundaryWidth, Grid<Real> &phiWalls)
+{
+ const int w = boundaryWidth;
+ FOR_IJK(phiWalls)
+ {
+ phiWalls(i, j, k) = std::min(j - w - .5, (double)phiWalls(i, j, k));
+ }
+}
+
+void FlagGrid::InitMaxYWall(const int &boundaryWidth, Grid<Real> &phiWalls)
+{
+ const int w = boundaryWidth;
+ FOR_IJK(phiWalls)
+ {
+ phiWalls(i, j, k) = std::min(mSize.y - j - 1.5 - w, (double)phiWalls(i, j, k));
+ }
+}
+
+void FlagGrid::InitMinZWall(const int &boundaryWidth, Grid<Real> &phiWalls)
+{
+ const int w = boundaryWidth;
+ FOR_IJK(phiWalls)
+ {
+ phiWalls(i, j, k) = std::min(k - w - .5, (double)phiWalls(i, j, k));
+ }
+}
+
+void FlagGrid::InitMaxZWall(const int &boundaryWidth, Grid<Real> &phiWalls)
+{
+ const int w = boundaryWidth;
+ FOR_IJK(phiWalls)
+ {
+ phiWalls(i, j, k) = std::min(mSize.z - k - 1.5 - w, (double)phiWalls(i, j, k));
+ }
+}
+
+void FlagGrid::initDomain(const int &boundaryWidth,
+ const string &wallIn,
+ const string &openIn,
+ const string &inflowIn,
+ const string &outflowIn,
+ Grid<Real> *phiWalls)
+{
+
+ int types[6] = {0};
+ bool set[6] = {false};
+ // make sure we have at least 6 entries
+ string wall = wallIn;
+ wall.append(" ");
+ string open = openIn;
+ open.append(" ");
+ string inflow = inflowIn;
+ inflow.append(" ");
+ string outflow = outflowIn;
+ outflow.append(" ");
+
+ if (phiWalls)
+ phiWalls->setConst(1000000000);
+
+ for (int i = 0; i < 6; ++i) {
+ // min x-direction
+ if (!set[0]) {
+ if (open[i] == 'x') {
+ types[0] = TypeOpen;
+ set[0] = true;
+ }
+ else if (inflow[i] == 'x') {
+ types[0] = TypeInflow;
+ set[0] = true;
+ }
+ else if (outflow[i] == 'x') {
+ types[0] = TypeOutflow;
+ set[0] = true;
+ }
+ else if (wall[i] == 'x') {
+ types[0] = TypeObstacle;
+ if (phiWalls)
+ InitMinXWall(boundaryWidth, *phiWalls);
+ set[0] = true;
+ }
+ }
+ // max x-direction
+ if (!set[1]) {
+ if (open[i] == 'X') {
+ types[1] = TypeOpen;
+ set[1] = true;
+ }
+ else if (inflow[i] == 'X') {
+ types[1] = TypeInflow;
+ set[1] = true;
+ }
+ else if (outflow[i] == 'X') {
+ types[1] = TypeOutflow;
+ set[1] = true;
+ }
+ else if (wall[i] == 'X') {
+ types[1] = TypeObstacle;
+ if (phiWalls)
+ InitMaxXWall(boundaryWidth, *phiWalls);
+ set[1] = true;
+ }
+ }
+ // min y-direction
+ if (!set[2]) {
+ if (open[i] == 'y') {
+ types[2] = TypeOpen;
+ set[2] = true;
+ }
+ else if (inflow[i] == 'y') {
+ types[2] = TypeInflow;
+ set[2] = true;
+ }
+ else if (outflow[i] == 'y') {
+ types[2] = TypeOutflow;
+ set[2] = true;
+ }
+ else if (wall[i] == 'y') {
+ types[2] = TypeObstacle;
+ if (phiWalls)
+ InitMinYWall(boundaryWidth, *phiWalls);
+ set[2] = true;
+ }
+ }
+ // max y-direction
+ if (!set[3]) {
+ if (open[i] == 'Y') {
+ types[3] = TypeOpen;
+ set[3] = true;
+ }
+ else if (inflow[i] == 'Y') {
+ types[3] = TypeInflow;
+ set[3] = true;
+ }
+ else if (outflow[i] == 'Y') {
+ types[3] = TypeOutflow;
+ set[3] = true;
+ }
+ else if (wall[i] == 'Y') {
+ types[3] = TypeObstacle;
+ if (phiWalls)
+ InitMaxYWall(boundaryWidth, *phiWalls);
+ set[3] = true;
+ }
+ }
+ if (this->is3D()) {
+ // min z-direction
+ if (!set[4]) {
+ if (open[i] == 'z') {
+ types[4] = TypeOpen;
+ set[4] = true;
+ }
+ else if (inflow[i] == 'z') {
+ types[4] = TypeInflow;
+ set[4] = true;
+ }
+ else if (outflow[i] == 'z') {
+ types[4] = TypeOutflow;
+ set[4] = true;
+ }
+ else if (wall[i] == 'z') {
+ types[4] = TypeObstacle;
+ if (phiWalls)
+ InitMinZWall(boundaryWidth, *phiWalls);
+ set[4] = true;
+ }
+ }
+ // max z-direction
+ if (!set[5]) {
+ if (open[i] == 'Z') {
+ types[5] = TypeOpen;
+ set[5] = true;
+ }
+ else if (inflow[i] == 'Z') {
+ types[5] = TypeInflow;
+ set[5] = true;
+ }
+ else if (outflow[i] == 'Z') {
+ types[5] = TypeOutflow;
+ set[5] = true;
+ }
+ else if (wall[i] == 'Z') {
+ types[5] = TypeObstacle;
+ if (phiWalls)
+ InitMaxZWall(boundaryWidth, *phiWalls);
+ set[5] = true;
+ }
+ }
+ }
+ }
+
+ setConst(TypeEmpty);
+ initBoundaries(boundaryWidth, types);
+}
+
+void FlagGrid::initBoundaries(const int &boundaryWidth, const int *types)
+{
+ const int w = boundaryWidth;
+ FOR_IJK(*this)
+ {
+ bool bnd = (i <= w);
+ if (bnd)
+ mData[index(i, j, k)] = types[0];
+ bnd = (i >= mSize.x - 1 - w);
+ if (bnd)
+ mData[index(i, j, k)] = types[1];
+ bnd = (j <= w);
+ if (bnd)
+ mData[index(i, j, k)] = types[2];
+ bnd = (j >= mSize.y - 1 - w);
+ if (bnd)
+ mData[index(i, j, k)] = types[3];
+ if (is3D()) {
+ bnd = (k <= w);
+ if (bnd)
+ mData[index(i, j, k)] = types[4];
+ bnd = (k >= mSize.z - 1 - w);
+ if (bnd)
+ mData[index(i, j, k)] = types[5];
+ }
+ }
+}
+
+void FlagGrid::updateFromLevelset(LevelsetGrid &levelset)
+{
+ FOR_IDX(*this)
+ {
+ if (!isObstacle(idx) && !isOutflow(idx)) {
+ const Real phi = levelset[idx];
+ if (phi <= levelset.invalidTimeValue())
+ continue;
+
+ mData[idx] &= ~(TypeEmpty | TypeFluid); // clear empty/fluid flags
+ mData[idx] |= (phi <= 0) ? TypeFluid : TypeEmpty; // set resepctive flag
+ }
+ }
+}
+
+void FlagGrid::fillGrid(int type)
+{
+ FOR_IDX(*this)
+ {
+ if ((mData[idx] & TypeObstacle) == 0 && (mData[idx] & TypeInflow) == 0 &&
+ (mData[idx] & TypeOutflow) == 0 && (mData[idx] & TypeOpen) == 0)
+ mData[idx] = (mData[idx] & ~(TypeEmpty | TypeFluid)) | type;
+ }
+}
+
+// flag grid helper
+
+bool isIsolatedFluidCell(const IndexInt idx, const FlagGrid &flags)
+{
+ if (!flags.isFluid(idx))
+ return false;
+ if (flags.isFluid(idx - flags.getStrideX()))
+ return false;
+ if (flags.isFluid(idx + flags.getStrideX()))
+ return false;
+ if (flags.isFluid(idx - flags.getStrideY()))
+ return false;
+ if (flags.isFluid(idx + flags.getStrideY()))
+ return false;
+ if (!flags.is3D())
+ return true;
+ if (flags.isFluid(idx - flags.getStrideZ()))
+ return false;
+ if (flags.isFluid(idx + flags.getStrideZ()))
+ return false;
+ return true;
+}
+
+struct knMarkIsolatedFluidCell : public KernelBase {
+ knMarkIsolatedFluidCell(FlagGrid &flags, const int mark)
+ : KernelBase(&flags, 0), flags(flags), mark(mark)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, FlagGrid &flags, const int mark) const
+ {
+ if (isIsolatedFluidCell(idx, flags))
+ flags[idx] = mark;
+ }
+ inline FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const int &getArg1()
+ {
+ return mark;
+ }
+ typedef int type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMarkIsolatedFluidCell ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, flags, mark);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ FlagGrid &flags;
+ const int mark;
+};
+
+void markIsolatedFluidCell(FlagGrid &flags, const int mark)
+{
+ knMarkIsolatedFluidCell(flags, mark);
+}
+static PyObject *_W_18(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "markIsolatedFluidCell", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ const int mark = _args.get<int>("mark", 1, &_lock);
+ _retval = getPyNone();
+ markIsolatedFluidCell(flags, mark);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "markIsolatedFluidCell", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("markIsolatedFluidCell", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_markIsolatedFluidCell("", "markIsolatedFluidCell", _W_18);
+extern "C" {
+void PbRegister_markIsolatedFluidCell()
+{
+ KEEP_UNUSED(_RP_markIsolatedFluidCell);
+}
+}
+
+void copyMACData(
+ const MACGrid &source, MACGrid &target, const FlagGrid &flags, const int flag, const int bnd)
+{
+ assertMsg(source.getSize().x == target.getSize().x && source.getSize().y == target.getSize().y &&
+ source.getSize().z == target.getSize().z,
+ "different grid resolutions " << source.getSize() << " vs " << target.getSize());
+
+ // Grid<Real> divGrid(target.getParent());
+ // DivergenceOpMAC(divGrid, target);
+ // Real fDivOrig = GridSumSqr(divGrid);
+
+ FOR_IJK_BND(target, bnd)
+ {
+ if (flags.get(i, j, k) & flag) {
+ target(i, j, k) = source(i, j, k);
+ }
+ }
+
+ // DivergenceOpMAC(divGrid, target);
+ // Real fDivTransfer = GridSumSqr(divGrid);
+ // std::cout << "Divergence: " << fDivOrig << " -> " << fDivTransfer << std::endl;
+}
+static PyObject *_W_19(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "copyMACData", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MACGrid &source = *_args.getPtr<MACGrid>("source", 0, &_lock);
+ MACGrid &target = *_args.getPtr<MACGrid>("target", 1, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 2, &_lock);
+ const int flag = _args.get<int>("flag", 3, &_lock);
+ const int bnd = _args.get<int>("bnd", 4, &_lock);
+ _retval = getPyNone();
+ copyMACData(source, target, flags, flag, bnd);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "copyMACData", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("copyMACData", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_copyMACData("", "copyMACData", _W_19);
+extern "C" {
+void PbRegister_copyMACData()
+{
+ KEEP_UNUSED(_RP_copyMACData);
+}
+}
+
+// explicit instantiation
+template class Grid<int>;
+template class Grid<Real>;
+template class Grid<Vec3>;
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/grid.h b/extern/mantaflow/preprocessed/grid.h
new file mode 100644
index 00000000000..bd4e0f99f85
--- /dev/null
+++ b/extern/mantaflow/preprocessed/grid.h
@@ -0,0 +1,2260 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Grid representation
+ *
+ ******************************************************************************/
+
+#ifndef _GRID_H
+#define _GRID_H
+
+#include "manta.h"
+#include "vectorbase.h"
+#include "interpol.h"
+#include "interpolHigh.h"
+#include "kernel.h"
+
+namespace Manta {
+class LevelsetGrid;
+
+//! Base class for all grids
+class GridBase : public PbClass {
+ public:
+ enum GridType {
+ TypeNone = 0,
+ TypeReal = 1,
+ TypeInt = 2,
+ TypeVec3 = 4,
+ TypeMAC = 8,
+ TypeLevelset = 16,
+ TypeFlags = 32
+ };
+
+ GridBase(FluidSolver *parent);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "GridBase::GridBase", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new GridBase(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "GridBase::GridBase", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("GridBase::GridBase", e.what());
+ return -1;
+ }
+ }
+
+ //! Get the grids X dimension
+ inline int getSizeX() const
+ {
+ return mSize.x;
+ }
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ GridBase *pbo = dynamic_cast<GridBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "GridBase::getSizeX", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getSizeX());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "GridBase::getSizeX", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("GridBase::getSizeX", e.what());
+ return 0;
+ }
+ }
+
+ //! Get the grids Y dimension
+ inline int getSizeY() const
+ {
+ return mSize.y;
+ }
+ static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ GridBase *pbo = dynamic_cast<GridBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "GridBase::getSizeY", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getSizeY());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "GridBase::getSizeY", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("GridBase::getSizeY", e.what());
+ return 0;
+ }
+ }
+
+ //! Get the grids Z dimension
+ inline int getSizeZ() const
+ {
+ return mSize.z;
+ }
+ static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ GridBase *pbo = dynamic_cast<GridBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "GridBase::getSizeZ", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getSizeZ());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "GridBase::getSizeZ", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("GridBase::getSizeZ", e.what());
+ return 0;
+ }
+ }
+
+ //! Get the grids dimensions
+ inline Vec3i getSize() const
+ {
+ return mSize;
+ }
+ static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ GridBase *pbo = dynamic_cast<GridBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "GridBase::getSize", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getSize());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "GridBase::getSize", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("GridBase::getSize", e.what());
+ return 0;
+ }
+ }
+
+ //! Get Stride in X dimension
+ inline IndexInt getStrideX() const
+ {
+ return 1;
+ }
+ //! Get Stride in Y dimension
+ inline IndexInt getStrideY() const
+ {
+ return mSize.x;
+ }
+ //! Get Stride in Z dimension
+ inline IndexInt getStrideZ() const
+ {
+ return mStrideZ;
+ }
+
+ inline Real getDx() const
+ {
+ return mDx;
+ }
+
+ //! Check if indices are within bounds, otherwise error (should only be called when debugging)
+ inline void checkIndex(int i, int j, int k) const;
+ //! Check if indices are within bounds, otherwise error (should only be called when debugging)
+ inline void checkIndex(IndexInt idx) const;
+ //! Check if index is within given boundaries
+ inline bool isInBounds(const Vec3i &p, int bnd) const;
+ //! Check if index is within given boundaries
+ inline bool isInBounds(const Vec3i &p) const;
+ //! Check if index is within given boundaries
+ inline bool isInBounds(const Vec3 &p, int bnd = 0) const
+ {
+ return isInBounds(toVec3i(p), bnd);
+ }
+ //! Check if linear index is in the range of the array
+ inline bool isInBounds(IndexInt idx) const;
+
+ //! Get the type of grid
+ inline GridType getType() const
+ {
+ return mType;
+ }
+ //! Check dimensionality
+ inline bool is3D() const
+ {
+ return m3D;
+ }
+ static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ GridBase *pbo = dynamic_cast<GridBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "GridBase::is3D", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->is3D());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "GridBase::is3D", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("GridBase::is3D", e.what());
+ return 0;
+ }
+ }
+
+ //! Get index into the data
+ inline IndexInt index(int i, int j, int k) const
+ {
+ DEBUG_ONLY(checkIndex(i, j, k));
+ return (IndexInt)i + (IndexInt)mSize.x * j + (IndexInt)mStrideZ * k;
+ }
+ //! Get index into the data
+ inline IndexInt index(const Vec3i &pos) const
+ {
+ DEBUG_ONLY(checkIndex(pos.x, pos.y, pos.z));
+ return (IndexInt)pos.x + (IndexInt)mSize.x * pos.y + (IndexInt)mStrideZ * pos.z;
+ }
+
+ //! grid4d compatibility functions
+ inline bool is4D() const
+ {
+ return false;
+ }
+ static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ GridBase *pbo = dynamic_cast<GridBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "GridBase::is4D", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->is4D());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "GridBase::is4D", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("GridBase::is4D", e.what());
+ return 0;
+ }
+ }
+
+ inline int getSizeT() const
+ {
+ return 1;
+ }
+ static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ GridBase *pbo = dynamic_cast<GridBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "GridBase::getSizeT", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getSizeT());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "GridBase::getSizeT", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("GridBase::getSizeT", e.what());
+ return 0;
+ }
+ }
+
+ inline int getStrideT() const
+ {
+ return 0;
+ }
+ static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ GridBase *pbo = dynamic_cast<GridBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "GridBase::getStrideT", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getStrideT());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "GridBase::getStrideT", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("GridBase::getStrideT", e.what());
+ return 0;
+ }
+ }
+
+ inline int index(int i, int j, int k, int unused) const
+ {
+ return index(i, j, k);
+ }
+ inline bool isInBounds(int i, int j, int k, int t, int bnd) const
+ {
+ if (t != 0)
+ return false;
+ return isInBounds(Vec3i(i, j, k), bnd);
+ }
+
+ protected:
+ GridType mType;
+ Vec3i mSize;
+ Real mDx;
+ bool m3D; // precomputed Z shift: to ensure 2D compatibility, always use this instead of sx*sy !
+ IndexInt mStrideZ;
+ public:
+ PbArgs _args;
+}
+#define _C_GridBase
+;
+
+//! Grid class
+
+template<class T> class Grid : public GridBase {
+ public:
+ //! init new grid, values are set to zero
+ Grid(FluidSolver *parent, bool show = true);
+ static int _W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "Grid::Grid", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ bool show = _args.getOpt<bool>("show", 1, true, &_lock);
+ obj = new Grid(parent, show);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "Grid::Grid", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::Grid", e.what());
+ return -1;
+ }
+ }
+
+ //! init new grid with an existing array
+ Grid(FluidSolver *parent, T *data, bool show = true);
+ //! create new & copy content from another grid
+ Grid(const Grid<T> &a);
+ //! return memory to solver
+ virtual ~Grid();
+
+ typedef T BASETYPE;
+ typedef GridBase BASETYPE_GRID;
+
+ void save(std::string name);
+ static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::save", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ std::string name = _args.get<std::string>("name", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->save(name);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::save", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::save", e.what());
+ return 0;
+ }
+ }
+
+ void load(std::string name);
+ static PyObject *_W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::load", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ std::string name = _args.get<std::string>("name", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->load(name);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::load", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::load", e.what());
+ return 0;
+ }
+ }
+
+ //! set all cells to zero
+ void clear();
+ static PyObject *_W_12(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::clear", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clear();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::clear", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::clear", e.what());
+ return 0;
+ }
+ }
+
+ //! all kinds of access functions, use grid(), grid[] or grid.get()
+ //! access data
+ inline T get(int i, int j, int k) const
+ {
+ return mData[index(i, j, k)];
+ }
+ //! access data
+ inline T &get(int i, int j, int k)
+ {
+ return mData[index(i, j, k)];
+ }
+ //! access data
+ inline T get(IndexInt idx) const
+ {
+ DEBUG_ONLY(checkIndex(idx));
+ return mData[idx];
+ }
+ //! access data
+ inline T get(const Vec3i &pos) const
+ {
+ return mData[index(pos)];
+ }
+ //! access data
+ inline T &operator()(int i, int j, int k)
+ {
+ return mData[index(i, j, k)];
+ }
+ //! access data
+ inline T operator()(int i, int j, int k) const
+ {
+ return mData[index(i, j, k)];
+ }
+ //! access data
+ inline T &operator()(IndexInt idx)
+ {
+ DEBUG_ONLY(checkIndex(idx));
+ return mData[idx];
+ }
+ //! access data
+ inline T operator()(IndexInt idx) const
+ {
+ DEBUG_ONLY(checkIndex(idx));
+ return mData[idx];
+ }
+ //! access data
+ inline T &operator()(const Vec3i &pos)
+ {
+ return mData[index(pos)];
+ }
+ //! access data
+ inline T operator()(const Vec3i &pos) const
+ {
+ return mData[index(pos)];
+ }
+ //! access data
+ inline T &operator[](IndexInt idx)
+ {
+ DEBUG_ONLY(checkIndex(idx));
+ return mData[idx];
+ }
+ //! access data
+ inline const T operator[](IndexInt idx) const
+ {
+ DEBUG_ONLY(checkIndex(idx));
+ return mData[idx];
+ }
+
+ // interpolated access
+ inline T getInterpolated(const Vec3 &pos) const
+ {
+ return interpol<T>(mData, mSize, mStrideZ, pos);
+ }
+ inline void setInterpolated(const Vec3 &pos, const T &val, Grid<Real> &sumBuffer) const
+ {
+ setInterpol<T>(mData, mSize, mStrideZ, pos, val, &sumBuffer[0]);
+ }
+ // higher order interpolation (1=linear, 2=cubic)
+ inline T getInterpolatedHi(const Vec3 &pos, int order) const
+ {
+ switch (order) {
+ case 1:
+ return interpol<T>(mData, mSize, mStrideZ, pos);
+ case 2:
+ return interpolCubic<T>(mData, mSize, mStrideZ, pos);
+ default:
+ assertMsg(false, "Unknown interpolation order " << order);
+ }
+ return T(0.); // should never be reached, just to prevent compiler warnings
+ }
+
+ // assignment / copy
+
+ //! warning - do not use "=" for grids in python, this copies the reference! not the grid
+ //! content...
+ // Grid<T>& operator=(const Grid<T>& a);
+ //! copy content from other grid (use this one instead of operator= !)
+ Grid<T> &copyFrom(const Grid<T> &a, bool copyType = true);
+ static PyObject *_W_13(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::copyFrom", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<T> &a = *_args.getPtr<Grid<T>>("a", 0, &_lock);
+ bool copyType = _args.getOpt<bool>("copyType", 1, true, &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->copyFrom(a, copyType));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::copyFrom", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::copyFrom", e.what());
+ return 0;
+ }
+ }
+ // old: { *this = a; }
+
+ // helper functions to work with grids in scene files
+
+ //! get grid type
+ int getGridType();
+ static PyObject *_W_14(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::getGridType", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getGridType());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::getGridType", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::getGridType", e.what());
+ return 0;
+ }
+ }
+
+ //! add/subtract other grid
+ void add(const Grid<T> &a);
+ static PyObject *_W_15(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::add", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<T> &a = *_args.getPtr<Grid<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->add(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::add", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::add", e.what());
+ return 0;
+ }
+ }
+
+ void sub(const Grid<T> &a);
+ static PyObject *_W_16(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::sub", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<T> &a = *_args.getPtr<Grid<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->sub(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::sub", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::sub", e.what());
+ return 0;
+ }
+ }
+
+ //! set all cells to constant value
+ void setConst(T s);
+ static PyObject *_W_17(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::setConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T s = _args.get<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::setConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::setConst", e.what());
+ return 0;
+ }
+ }
+
+ //! add constant to all grid cells
+ void addConst(T s);
+ static PyObject *_W_18(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::addConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T s = _args.get<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->addConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::addConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::addConst", e.what());
+ return 0;
+ }
+ }
+
+ //! add scaled other grid to current one (note, only "Real" factor, "T" type not supported here!)
+ void addScaled(const Grid<T> &a, const T &factor);
+ static PyObject *_W_19(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::addScaled", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<T> &a = *_args.getPtr<Grid<T>>("a", 0, &_lock);
+ const T &factor = *_args.getPtr<T>("factor", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->addScaled(a, factor);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::addScaled", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::addScaled", e.what());
+ return 0;
+ }
+ }
+
+ //! multiply contents of grid
+ void mult(const Grid<T> &a);
+ static PyObject *_W_20(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::mult", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<T> &a = *_args.getPtr<Grid<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->mult(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::mult", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::mult", e.what());
+ return 0;
+ }
+ }
+
+ //! multiply each cell by a constant scalar value
+ void multConst(T s);
+ static PyObject *_W_21(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::multConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T s = _args.get<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->multConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::multConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::multConst", e.what());
+ return 0;
+ }
+ }
+
+ //! clamp content to range (for vec3, clamps each component separately)
+ void clamp(Real min, Real max);
+ static PyObject *_W_22(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::clamp", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Real min = _args.get<Real>("min", 0, &_lock);
+ Real max = _args.get<Real>("max", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clamp(min, max);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::clamp", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::clamp", e.what());
+ return 0;
+ }
+ }
+
+ //! reduce small values to zero
+ void stomp(const T &threshold);
+ static PyObject *_W_23(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::stomp", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const T &threshold = *_args.getPtr<T>("threshold", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->stomp(threshold);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::stomp", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::stomp", e.what());
+ return 0;
+ }
+ }
+
+ //! permute grid axes, e.g. switch y with z (0,2,1)
+ void permuteAxes(int axis0, int axis1, int axis2);
+ static PyObject *_W_24(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::permuteAxes", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int axis0 = _args.get<int>("axis0", 0, &_lock);
+ int axis1 = _args.get<int>("axis1", 1, &_lock);
+ int axis2 = _args.get<int>("axis2", 2, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->permuteAxes(axis0, axis1, axis2);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::permuteAxes", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::permuteAxes", e.what());
+ return 0;
+ }
+ }
+
+ //! permute grid axes, e.g. switch y with z (0,2,1)
+ void permuteAxesCopyToGrid(int axis0, int axis1, int axis2, Grid<T> &out);
+ static PyObject *_W_25(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::permuteAxesCopyToGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int axis0 = _args.get<int>("axis0", 0, &_lock);
+ int axis1 = _args.get<int>("axis1", 1, &_lock);
+ int axis2 = _args.get<int>("axis2", 2, &_lock);
+ Grid<T> &out = *_args.getPtr<Grid<T>>("out", 3, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->permuteAxesCopyToGrid(axis0, axis1, axis2, out);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::permuteAxesCopyToGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::permuteAxesCopyToGrid", e.what());
+ return 0;
+ }
+ }
+
+ // common compound operators
+ //! get absolute max value in grid
+ Real getMaxAbs() const;
+ static PyObject *_W_26(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::getMaxAbs", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMaxAbs());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::getMaxAbs", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::getMaxAbs", e.what());
+ return 0;
+ }
+ }
+
+ //! get max value in grid
+ Real getMax() const;
+ static PyObject *_W_27(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::getMax", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMax());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::getMax", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::getMax", e.what());
+ return 0;
+ }
+ }
+
+ //! get min value in grid
+ Real getMin() const;
+ static PyObject *_W_28(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::getMin", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMin());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::getMin", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::getMin", e.what());
+ return 0;
+ }
+ }
+
+ //! calculate L1 norm of grid content
+ Real getL1(int bnd = 0);
+ static PyObject *_W_29(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::getL1", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int bnd = _args.getOpt<int>("bnd", 0, 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getL1(bnd));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::getL1", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::getL1", e.what());
+ return 0;
+ }
+ }
+
+ //! calculate L2 norm of grid content
+ Real getL2(int bnd = 0);
+ static PyObject *_W_30(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::getL2", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int bnd = _args.getOpt<int>("bnd", 0, 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getL2(bnd));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::getL2", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::getL2", e.what());
+ return 0;
+ }
+ }
+
+ //! set all boundary cells to constant value (Dirichlet)
+ void setBound(T value, int boundaryWidth = 1);
+ static PyObject *_W_31(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::setBound", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T value = _args.get<T>("value", 0, &_lock);
+ int boundaryWidth = _args.getOpt<int>("boundaryWidth", 1, 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setBound(value, boundaryWidth);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::setBound", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::setBound", e.what());
+ return 0;
+ }
+ }
+
+ //! set all boundary cells to last inner value (Neumann)
+ void setBoundNeumann(int boundaryWidth = 1);
+ static PyObject *_W_32(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::setBoundNeumann", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int boundaryWidth = _args.getOpt<int>("boundaryWidth", 0, 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setBoundNeumann(boundaryWidth);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::setBoundNeumann", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::setBoundNeumann", e.what());
+ return 0;
+ }
+ }
+
+ //! get data pointer of grid
+ std::string getDataPointer();
+ static PyObject *_W_33(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::getDataPointer", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getDataPointer());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::getDataPointer", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::getDataPointer", e.what());
+ return 0;
+ }
+ }
+
+ //! debugging helper, print grid from python. skip boundary of width bnd
+ void printGrid(int zSlice = -1, bool printIndex = false, int bnd = 1);
+ static PyObject *_W_34(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid *pbo = dynamic_cast<Grid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid::printGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int zSlice = _args.getOpt<int>("zSlice", 0, -1, &_lock);
+ bool printIndex = _args.getOpt<bool>("printIndex", 1, false, &_lock);
+ int bnd = _args.getOpt<int>("bnd", 2, 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->printGrid(zSlice, printIndex, bnd);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid::printGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid::printGrid", e.what());
+ return 0;
+ }
+ }
+
+ // c++ only operators
+ template<class S> Grid<T> &operator+=(const Grid<S> &a);
+ template<class S> Grid<T> &operator+=(const S &a);
+ template<class S> Grid<T> &operator-=(const Grid<S> &a);
+ template<class S> Grid<T> &operator-=(const S &a);
+ template<class S> Grid<T> &operator*=(const Grid<S> &a);
+ template<class S> Grid<T> &operator*=(const S &a);
+ template<class S> Grid<T> &operator/=(const Grid<S> &a);
+ template<class S> Grid<T> &operator/=(const S &a);
+ Grid<T> &safeDivide(const Grid<T> &a);
+
+ //! Swap data with another grid (no actual data is moved)
+ void swap(Grid<T> &other);
+
+ //! grid4d compatibility functions
+ inline T &operator()(int i, int j, int k, int unused)
+ {
+ return mData[index(i, j, k)];
+ }
+ inline T operator()(int i, int j, int k, int unused) const
+ {
+ return mData[index(i, j, k)];
+ }
+
+ protected:
+ T *mData;
+ bool externalData; // True if mData is managed outside of the Fluidsolver
+ public:
+ PbArgs _args;
+}
+#define _C_Grid
+;
+
+// Python doesn't know about templates: explicit aliases needed
+
+//! Special function for staggered grids
+class MACGrid : public Grid<Vec3> {
+ public:
+ MACGrid(FluidSolver *parent, bool show = true) : Grid<Vec3>(parent, show)
+ {
+ mType = (GridType)(TypeMAC | TypeVec3);
+ }
+ static int _W_35(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "MACGrid::MACGrid", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ bool show = _args.getOpt<bool>("show", 1, true, &_lock);
+ obj = new MACGrid(parent, show);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "MACGrid::MACGrid", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("MACGrid::MACGrid", e.what());
+ return -1;
+ }
+ }
+
+ MACGrid(FluidSolver *parent, Vec3 *data, bool show = true) : Grid<Vec3>(parent, data, show)
+ {
+ mType = (GridType)(TypeMAC | TypeVec3);
+ }
+
+ // specialized functions for interpolating MAC information
+ inline Vec3 getCentered(int i, int j, int k) const;
+ inline Vec3 getCentered(const Vec3i &pos) const
+ {
+ return getCentered(pos.x, pos.y, pos.z);
+ }
+ inline Vec3 getAtMACX(int i, int j, int k) const;
+ inline Vec3 getAtMACY(int i, int j, int k) const;
+ inline Vec3 getAtMACZ(int i, int j, int k) const;
+ // interpolation
+ inline Vec3 getInterpolated(const Vec3 &pos) const
+ {
+ return interpolMAC(mData, mSize, mStrideZ, pos);
+ }
+ inline void setInterpolated(const Vec3 &pos, const Vec3 &val, Vec3 *tmp) const
+ {
+ return setInterpolMAC(mData, mSize, mStrideZ, pos, val, tmp);
+ }
+ inline Vec3 getInterpolatedHi(const Vec3 &pos, int order) const
+ {
+ switch (order) {
+ case 1:
+ return interpolMAC(mData, mSize, mStrideZ, pos);
+ case 2:
+ return interpolCubicMAC(mData, mSize, mStrideZ, pos);
+ default:
+ assertMsg(false, "Unknown interpolation order " << order);
+ }
+ return Vec3(0.); // should never be reached, just to prevent compiler warnings
+ }
+ // specials for mac grid:
+ template<int comp> inline Real getInterpolatedComponent(Vec3 pos) const
+ {
+ return interpolComponent<comp>(mData, mSize, mStrideZ, pos);
+ }
+ template<int comp> inline Real getInterpolatedComponentHi(const Vec3 &pos, int order) const
+ {
+ switch (order) {
+ case 1:
+ return interpolComponent<comp>(mData, mSize, mStrideZ, pos);
+ case 2:
+ return interpolCubicMAC(mData, mSize, mStrideZ, pos)[comp]; // warning - not yet optimized
+ default:
+ assertMsg(false, "Unknown interpolation order " << order);
+ }
+ return 0.; // should never be reached, just to prevent compiler warnings
+ }
+
+ //! set all boundary cells of a MAC grid to certain value (Dirchlet). Respects staggered grid
+ //! locations optionally, only set normal components
+ void setBoundMAC(Vec3 value, int boundaryWidth, bool normalOnly = false);
+ static PyObject *_W_36(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MACGrid *pbo = dynamic_cast<MACGrid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MACGrid::setBoundMAC", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Vec3 value = _args.get<Vec3>("value", 0, &_lock);
+ int boundaryWidth = _args.get<int>("boundaryWidth", 1, &_lock);
+ bool normalOnly = _args.getOpt<bool>("normalOnly", 2, false, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setBoundMAC(value, boundaryWidth, normalOnly);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MACGrid::setBoundMAC", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MACGrid::setBoundMAC", e.what());
+ return 0;
+ }
+ }
+
+ protected:
+ public:
+ PbArgs _args;
+}
+#define _C_MACGrid
+;
+
+//! Special functions for FlagGrid
+class FlagGrid : public Grid<int> {
+ public:
+ FlagGrid(FluidSolver *parent, int dim = 3, bool show = true) : Grid<int>(parent, show)
+ {
+ mType = (GridType)(TypeFlags | TypeInt);
+ }
+ static int _W_37(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "FlagGrid::FlagGrid", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ int dim = _args.getOpt<int>("dim", 1, 3, &_lock);
+ bool show = _args.getOpt<bool>("show", 2, true, &_lock);
+ obj = new FlagGrid(parent, dim, show);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "FlagGrid::FlagGrid", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("FlagGrid::FlagGrid", e.what());
+ return -1;
+ }
+ }
+
+ FlagGrid(FluidSolver *parent, int *data, int dim = 3, bool show = true)
+ : Grid<int>(parent, data, show)
+ {
+ mType = (GridType)(TypeFlags | TypeInt);
+ }
+
+ //! types of cells, in/outflow can be combined, e.g., TypeFluid|TypeInflow
+ enum CellType {
+ TypeNone = 0,
+ TypeFluid = 1,
+ TypeObstacle = 2,
+ TypeEmpty = 4,
+ TypeInflow = 8,
+ TypeOutflow = 16,
+ TypeOpen = 32,
+ TypeStick = 64,
+ TypeReserved = 256
+ };
+
+ //! access for particles
+ inline int getAt(const Vec3 &pos) const
+ {
+ return mData[index((int)pos.x, (int)pos.y, (int)pos.z)];
+ }
+
+ //! check for different flag types
+ inline bool isObstacle(IndexInt idx) const
+ {
+ return get(idx) & TypeObstacle;
+ }
+ inline bool isObstacle(int i, int j, int k) const
+ {
+ return get(i, j, k) & TypeObstacle;
+ }
+ inline bool isObstacle(const Vec3i &pos) const
+ {
+ return get(pos) & TypeObstacle;
+ }
+ inline bool isObstacle(const Vec3 &pos) const
+ {
+ return getAt(pos) & TypeObstacle;
+ }
+ inline bool isFluid(IndexInt idx) const
+ {
+ return get(idx) & TypeFluid;
+ }
+ inline bool isFluid(int i, int j, int k) const
+ {
+ return get(i, j, k) & TypeFluid;
+ }
+ inline bool isFluid(const Vec3i &pos) const
+ {
+ return get(pos) & TypeFluid;
+ }
+ inline bool isFluid(const Vec3 &pos) const
+ {
+ return getAt(pos) & TypeFluid;
+ }
+ inline bool isInflow(IndexInt idx) const
+ {
+ return get(idx) & TypeInflow;
+ }
+ inline bool isInflow(int i, int j, int k) const
+ {
+ return get(i, j, k) & TypeInflow;
+ }
+ inline bool isInflow(const Vec3i &pos) const
+ {
+ return get(pos) & TypeInflow;
+ }
+ inline bool isInflow(const Vec3 &pos) const
+ {
+ return getAt(pos) & TypeInflow;
+ }
+ inline bool isEmpty(IndexInt idx) const
+ {
+ return get(idx) & TypeEmpty;
+ }
+ inline bool isEmpty(int i, int j, int k) const
+ {
+ return get(i, j, k) & TypeEmpty;
+ }
+ inline bool isEmpty(const Vec3i &pos) const
+ {
+ return get(pos) & TypeEmpty;
+ }
+ inline bool isEmpty(const Vec3 &pos) const
+ {
+ return getAt(pos) & TypeEmpty;
+ }
+ inline bool isOutflow(IndexInt idx) const
+ {
+ return get(idx) & TypeOutflow;
+ }
+ inline bool isOutflow(int i, int j, int k) const
+ {
+ return get(i, j, k) & TypeOutflow;
+ }
+ inline bool isOutflow(const Vec3i &pos) const
+ {
+ return get(pos) & TypeOutflow;
+ }
+ inline bool isOutflow(const Vec3 &pos) const
+ {
+ return getAt(pos) & TypeOutflow;
+ }
+ inline bool isOpen(IndexInt idx) const
+ {
+ return get(idx) & TypeOpen;
+ }
+ inline bool isOpen(int i, int j, int k) const
+ {
+ return get(i, j, k) & TypeOpen;
+ }
+ inline bool isOpen(const Vec3i &pos) const
+ {
+ return get(pos) & TypeOpen;
+ }
+ inline bool isOpen(const Vec3 &pos) const
+ {
+ return getAt(pos) & TypeOpen;
+ }
+ inline bool isStick(IndexInt idx) const
+ {
+ return get(idx) & TypeStick;
+ }
+ inline bool isStick(int i, int j, int k) const
+ {
+ return get(i, j, k) & TypeStick;
+ }
+ inline bool isStick(const Vec3i &pos) const
+ {
+ return get(pos) & TypeStick;
+ }
+ inline bool isStick(const Vec3 &pos) const
+ {
+ return getAt(pos) & TypeStick;
+ }
+
+ void InitMinXWall(const int &boundaryWidth, Grid<Real> &phiWalls);
+ void InitMaxXWall(const int &boundaryWidth, Grid<Real> &phiWalls);
+ void InitMinYWall(const int &boundaryWidth, Grid<Real> &phiWalls);
+ void InitMaxYWall(const int &boundaryWidth, Grid<Real> &phiWalls);
+ void InitMinZWall(const int &boundaryWidth, Grid<Real> &phiWalls);
+ void InitMaxZWall(const int &boundaryWidth, Grid<Real> &phiWalls);
+ // Python callables
+
+ void initDomain(const int &boundaryWidth = 0,
+ const std::string &wall = "xXyYzZ",
+ const std::string &open = " ",
+ const std::string &inflow = " ",
+ const std::string &outflow = " ",
+ Grid<Real> *phiWalls = 0x00);
+ static PyObject *_W_38(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FlagGrid *pbo = dynamic_cast<FlagGrid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "FlagGrid::initDomain", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const int &boundaryWidth = _args.getOpt<int>("boundaryWidth", 0, 0, &_lock);
+ const std::string &wall = _args.getOpt<std::string>("wall", 1, "xXyYzZ", &_lock);
+ const std::string &open = _args.getOpt<std::string>("open", 2, " ", &_lock);
+ const std::string &inflow = _args.getOpt<std::string>("inflow", 3, " ", &_lock);
+ const std::string &outflow = _args.getOpt<std::string>("outflow", 4, " ", &_lock);
+ Grid<Real> *phiWalls = _args.getPtrOpt<Grid<Real>>("phiWalls", 5, 0x00, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->initDomain(boundaryWidth, wall, open, inflow, outflow, phiWalls);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "FlagGrid::initDomain", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("FlagGrid::initDomain", e.what());
+ return 0;
+ }
+ }
+
+ void initBoundaries(const int &boundaryWidth, const int *types);
+
+ //! set fluid flags inside levelset (liquids)
+ void updateFromLevelset(LevelsetGrid &levelset);
+ static PyObject *_W_39(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FlagGrid *pbo = dynamic_cast<FlagGrid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "FlagGrid::updateFromLevelset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ LevelsetGrid &levelset = *_args.getPtr<LevelsetGrid>("levelset", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->updateFromLevelset(levelset);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "FlagGrid::updateFromLevelset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("FlagGrid::updateFromLevelset", e.what());
+ return 0;
+ }
+ }
+
+ //! set all cells (except obs/in/outflow) to type (fluid by default)
+ void fillGrid(int type = TypeFluid);
+ static PyObject *_W_40(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FlagGrid *pbo = dynamic_cast<FlagGrid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "FlagGrid::fillGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int type = _args.getOpt<int>("type", 0, TypeFluid, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->fillGrid(type);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "FlagGrid::fillGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("FlagGrid::fillGrid", e.what());
+ return 0;
+ }
+ }
+
+ //! count no. of cells matching flags via "AND"
+ //! warning for large grids! only regular int returned (due to python interface)
+ //! optionally creates mask in RealGrid (1 where flag matches, 0 otherwise)
+ int countCells(int flag, int bnd = 0, Grid<Real> *mask = NULL);
+ static PyObject *_W_41(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FlagGrid *pbo = dynamic_cast<FlagGrid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "FlagGrid::countCells", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int flag = _args.get<int>("flag", 0, &_lock);
+ int bnd = _args.getOpt<int>("bnd", 1, 0, &_lock);
+ Grid<Real> *mask = _args.getPtrOpt<Grid<Real>>("mask", 2, NULL, &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->countCells(flag, bnd, mask));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "FlagGrid::countCells", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("FlagGrid::countCells", e.what());
+ return 0;
+ }
+ }
+
+ public:
+ PbArgs _args;
+}
+#define _C_FlagGrid
+;
+
+//! helper to compute grid conversion factor between local coordinates of two grids
+inline Vec3 calcGridSizeFactor(Vec3i s1, Vec3i s2)
+{
+ return Vec3(Real(s1[0]) / s2[0], Real(s1[1]) / s2[1], Real(s1[2]) / s2[2]);
+}
+
+// prototypes for grid plugins
+void copyMacToVec3(MACGrid &source, Grid<Vec3> &target);
+void convertMacToVec3(MACGrid &source, Grid<Vec3> &target);
+void resampleVec3ToMac(Grid<Vec3> &source, MACGrid &target);
+void resampleMacToVec3(MACGrid &source, Grid<Vec3> &target);
+
+void getComponent(const Grid<Vec3> &source, Grid<Real> &target, int component);
+void setComponent(const Grid<Real> &source, Grid<Vec3> &target, int component);
+
+//******************************************************************************
+// Implementation of inline functions
+
+inline void GridBase::checkIndex(int i, int j, int k) const
+{
+ if (i < 0 || j < 0 || k < 0 || i >= mSize.x || j >= mSize.y || k >= mSize.z) {
+ std::ostringstream s;
+ s << "Grid " << mName << " dim " << mSize << " : index " << i << "," << j << "," << k
+ << " out of bound ";
+ errMsg(s.str());
+ }
+}
+
+inline void GridBase::checkIndex(IndexInt idx) const
+{
+ if (idx < 0 || idx >= mSize.x * mSize.y * mSize.z) {
+ std::ostringstream s;
+ s << "Grid " << mName << " dim " << mSize << " : index " << idx << " out of bound ";
+ errMsg(s.str());
+ }
+}
+
+bool GridBase::isInBounds(const Vec3i &p) const
+{
+ return (p.x >= 0 && p.y >= 0 && p.z >= 0 && p.x < mSize.x && p.y < mSize.y && p.z < mSize.z);
+}
+
+bool GridBase::isInBounds(const Vec3i &p, int bnd) const
+{
+ bool ret = (p.x >= bnd && p.y >= bnd && p.x < mSize.x - bnd && p.y < mSize.y - bnd);
+ if (this->is3D()) {
+ ret &= (p.z >= bnd && p.z < mSize.z - bnd);
+ }
+ else {
+ ret &= (p.z == 0);
+ }
+ return ret;
+}
+//! Check if linear index is in the range of the array
+bool GridBase::isInBounds(IndexInt idx) const
+{
+ if (idx < 0 || idx >= mSize.x * mSize.y * mSize.z) {
+ return false;
+ }
+ return true;
+}
+
+inline Vec3 MACGrid::getCentered(int i, int j, int k) const
+{
+ DEBUG_ONLY(checkIndex(i + 1, j + 1, k));
+ const IndexInt idx = index(i, j, k);
+ Vec3 v = Vec3(
+ 0.5 * (mData[idx].x + mData[idx + 1].x), 0.5 * (mData[idx].y + mData[idx + mSize.x].y), 0.);
+ if (this->is3D()) {
+ DEBUG_ONLY(checkIndex(idx + mStrideZ));
+ v[2] = 0.5 * (mData[idx].z + mData[idx + mStrideZ].z);
+ }
+ return v;
+}
+
+inline Vec3 MACGrid::getAtMACX(int i, int j, int k) const
+{
+ DEBUG_ONLY(checkIndex(i - 1, j + 1, k));
+ const IndexInt idx = index(i, j, k);
+ Vec3 v = Vec3((mData[idx].x),
+ 0.25 * (mData[idx].y + mData[idx - 1].y + mData[idx + mSize.x].y +
+ mData[idx + mSize.x - 1].y),
+ 0.);
+ if (this->is3D()) {
+ DEBUG_ONLY(checkIndex(idx + mStrideZ - 1));
+ v[2] = 0.25 * (mData[idx].z + mData[idx - 1].z + mData[idx + mStrideZ].z +
+ mData[idx + mStrideZ - 1].z);
+ }
+ return v;
+}
+
+inline Vec3 MACGrid::getAtMACY(int i, int j, int k) const
+{
+ DEBUG_ONLY(checkIndex(i + 1, j - 1, k));
+ const IndexInt idx = index(i, j, k);
+ Vec3 v = Vec3(0.25 * (mData[idx].x + mData[idx - mSize.x].x + mData[idx + 1].x +
+ mData[idx + 1 - mSize.x].x),
+ (mData[idx].y),
+ 0.);
+ if (this->is3D()) {
+ DEBUG_ONLY(checkIndex(idx + mStrideZ - mSize.x));
+ v[2] = 0.25 * (mData[idx].z + mData[idx - mSize.x].z + mData[idx + mStrideZ].z +
+ mData[idx + mStrideZ - mSize.x].z);
+ }
+ return v;
+}
+
+inline Vec3 MACGrid::getAtMACZ(int i, int j, int k) const
+{
+ const IndexInt idx = index(i, j, k);
+ DEBUG_ONLY(checkIndex(idx - mStrideZ));
+ DEBUG_ONLY(checkIndex(idx + mSize.x - mStrideZ));
+ Vec3 v = Vec3(0.25 * (mData[idx].x + mData[idx - mStrideZ].x + mData[idx + 1].x +
+ mData[idx + 1 - mStrideZ].x),
+ 0.25 * (mData[idx].y + mData[idx - mStrideZ].y + mData[idx + mSize.x].y +
+ mData[idx + mSize.x - mStrideZ].y),
+ (mData[idx].z));
+ return v;
+}
+
+template<class T, class S> struct gridAdd : public KernelBase {
+ gridAdd(Grid<T> &me, const Grid<S> &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, const Grid<S> &other) const
+ {
+ me[idx] += other[idx];
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline const Grid<S> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel gridAdd ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ const Grid<S> &other;
+};
+template<class T, class S> struct gridSub : public KernelBase {
+ gridSub(Grid<T> &me, const Grid<S> &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, const Grid<S> &other) const
+ {
+ me[idx] -= other[idx];
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline const Grid<S> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel gridSub ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ const Grid<S> &other;
+};
+template<class T, class S> struct gridMult : public KernelBase {
+ gridMult(Grid<T> &me, const Grid<S> &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, const Grid<S> &other) const
+ {
+ me[idx] *= other[idx];
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline const Grid<S> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel gridMult ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ const Grid<S> &other;
+};
+template<class T, class S> struct gridDiv : public KernelBase {
+ gridDiv(Grid<T> &me, const Grid<S> &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, const Grid<S> &other) const
+ {
+ me[idx] /= other[idx];
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline const Grid<S> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel gridDiv ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ const Grid<S> &other;
+};
+template<class T, class S> struct gridAddScalar : public KernelBase {
+ gridAddScalar(Grid<T> &me, const S &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, const S &other) const
+ {
+ me[idx] += other;
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel gridAddScalar ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ const S &other;
+};
+template<class T, class S> struct gridMultScalar : public KernelBase {
+ gridMultScalar(Grid<T> &me, const S &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, const S &other) const
+ {
+ me[idx] *= other;
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel gridMultScalar ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ const S &other;
+};
+template<class T, class S> struct gridScaledAdd : public KernelBase {
+ gridScaledAdd(Grid<T> &me, const Grid<T> &other, const S &factor)
+ : KernelBase(&me, 0), me(me), other(other), factor(factor)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &me, const Grid<T> &other, const S &factor) const
+ {
+ me[idx] += factor * other[idx];
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline const Grid<T> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid<T> type1;
+ inline const S &getArg2()
+ {
+ return factor;
+ }
+ typedef S type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel gridScaledAdd ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other, factor);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ const Grid<T> &other;
+ const S &factor;
+};
+
+template<class T> struct gridSetConst : public KernelBase {
+ gridSetConst(Grid<T> &grid, T value) : KernelBase(&grid, 0), grid(grid), value(value)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<T> &grid, T value) const
+ {
+ grid[idx] = value;
+ }
+ inline Grid<T> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<T> type0;
+ inline T &getArg1()
+ {
+ return value;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel gridSetConst ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, grid, value);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &grid;
+ T value;
+};
+
+template<class T> template<class S> Grid<T> &Grid<T>::operator+=(const Grid<S> &a)
+{
+ gridAdd<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid<T> &Grid<T>::operator+=(const S &a)
+{
+ gridAddScalar<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid<T> &Grid<T>::operator-=(const Grid<S> &a)
+{
+ gridSub<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid<T> &Grid<T>::operator-=(const S &a)
+{
+ gridAddScalar<T, S>(*this, -a);
+ return *this;
+}
+template<class T> template<class S> Grid<T> &Grid<T>::operator*=(const Grid<S> &a)
+{
+ gridMult<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid<T> &Grid<T>::operator*=(const S &a)
+{
+ gridMultScalar<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid<T> &Grid<T>::operator/=(const Grid<S> &a)
+{
+ gridDiv<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid<T> &Grid<T>::operator/=(const S &a)
+{
+ S rez((S)1.0 / a);
+ gridMultScalar<T, S>(*this, rez);
+ return *this;
+}
+
+//******************************************************************************
+// Other helper functions
+
+// compute gradient of a scalar grid
+inline Vec3 getGradient(const Grid<Real> &data, int i, int j, int k)
+{
+ Vec3 v;
+
+ if (i > data.getSizeX() - 2)
+ i = data.getSizeX() - 2;
+ if (j > data.getSizeY() - 2)
+ j = data.getSizeY() - 2;
+ if (i < 1)
+ i = 1;
+ if (j < 1)
+ j = 1;
+ v = Vec3(data(i + 1, j, k) - data(i - 1, j, k), data(i, j + 1, k) - data(i, j - 1, k), 0.);
+
+ if (data.is3D()) {
+ if (k > data.getSizeZ() - 2)
+ k = data.getSizeZ() - 2;
+ if (k < 1)
+ k = 1;
+ v[2] = data(i, j, k + 1) - data(i, j, k - 1);
+ }
+
+ return v;
+}
+
+// interpolate grid from one size to another size
+
+template<class S> struct knInterpolateGridTempl : public KernelBase {
+ knInterpolateGridTempl(Grid<S> &target,
+ const Grid<S> &source,
+ const Vec3 &sourceFactor,
+ Vec3 offset,
+ int orderSpace = 1)
+ : KernelBase(&target, 0),
+ target(target),
+ source(source),
+ sourceFactor(sourceFactor),
+ offset(offset),
+ orderSpace(orderSpace)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ Grid<S> &target,
+ const Grid<S> &source,
+ const Vec3 &sourceFactor,
+ Vec3 offset,
+ int orderSpace = 1) const
+ {
+ Vec3 pos = Vec3(i, j, k) * sourceFactor + offset;
+ if (!source.is3D())
+ pos[2] = 0; // allow 2d -> 3d
+ target(i, j, k) = source.getInterpolatedHi(pos, orderSpace);
+ }
+ inline Grid<S> &getArg0()
+ {
+ return target;
+ }
+ typedef Grid<S> type0;
+ inline const Grid<S> &getArg1()
+ {
+ return source;
+ }
+ typedef Grid<S> type1;
+ inline const Vec3 &getArg2()
+ {
+ return sourceFactor;
+ }
+ typedef Vec3 type2;
+ inline Vec3 &getArg3()
+ {
+ return offset;
+ }
+ typedef Vec3 type3;
+ inline int &getArg4()
+ {
+ return orderSpace;
+ }
+ typedef int type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel knInterpolateGridTempl ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, target, source, sourceFactor, offset, orderSpace);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, target, source, sourceFactor, offset, orderSpace);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<S> &target;
+ const Grid<S> &source;
+ const Vec3 &sourceFactor;
+ Vec3 offset;
+ int orderSpace;
+};
+// template glue code - choose interpolation based on template arguments
+template<class GRID> void interpolGridTempl(GRID &target, GRID &source)
+{
+ errMsg("interpolGridTempl - Only valid for specific instantiations");
+}
+
+} // namespace Manta
+#endif
diff --git a/extern/mantaflow/preprocessed/grid.h.reg.cpp b/extern/mantaflow/preprocessed/grid.h.reg.cpp
new file mode 100644
index 00000000000..d7f87604edf
--- /dev/null
+++ b/extern/mantaflow/preprocessed/grid.h.reg.cpp
@@ -0,0 +1,246 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "grid.h"
+namespace Manta {
+#ifdef _C_FlagGrid
+static const Pb::Register _R_26("FlagGrid", "FlagGrid", "Grid<int>");
+template<> const char *Namify<FlagGrid>::S = "FlagGrid";
+static const Pb::Register _R_27("FlagGrid", "FlagGrid", FlagGrid::_W_37);
+static const Pb::Register _R_28("FlagGrid", "initDomain", FlagGrid::_W_38);
+static const Pb::Register _R_29("FlagGrid", "updateFromLevelset", FlagGrid::_W_39);
+static const Pb::Register _R_30("FlagGrid", "fillGrid", FlagGrid::_W_40);
+static const Pb::Register _R_31("FlagGrid", "countCells", FlagGrid::_W_41);
+#endif
+#ifdef _C_Grid
+static const Pb::Register _R_32("Grid<int>", "Grid<int>", "GridBase");
+template<> const char *Namify<Grid<int>>::S = "Grid<int>";
+static const Pb::Register _R_33("Grid<int>", "Grid", Grid<int>::_W_9);
+static const Pb::Register _R_34("Grid<int>", "save", Grid<int>::_W_10);
+static const Pb::Register _R_35("Grid<int>", "load", Grid<int>::_W_11);
+static const Pb::Register _R_36("Grid<int>", "clear", Grid<int>::_W_12);
+static const Pb::Register _R_37("Grid<int>", "copyFrom", Grid<int>::_W_13);
+static const Pb::Register _R_38("Grid<int>", "getGridType", Grid<int>::_W_14);
+static const Pb::Register _R_39("Grid<int>", "add", Grid<int>::_W_15);
+static const Pb::Register _R_40("Grid<int>", "sub", Grid<int>::_W_16);
+static const Pb::Register _R_41("Grid<int>", "setConst", Grid<int>::_W_17);
+static const Pb::Register _R_42("Grid<int>", "addConst", Grid<int>::_W_18);
+static const Pb::Register _R_43("Grid<int>", "addScaled", Grid<int>::_W_19);
+static const Pb::Register _R_44("Grid<int>", "mult", Grid<int>::_W_20);
+static const Pb::Register _R_45("Grid<int>", "multConst", Grid<int>::_W_21);
+static const Pb::Register _R_46("Grid<int>", "clamp", Grid<int>::_W_22);
+static const Pb::Register _R_47("Grid<int>", "stomp", Grid<int>::_W_23);
+static const Pb::Register _R_48("Grid<int>", "permuteAxes", Grid<int>::_W_24);
+static const Pb::Register _R_49("Grid<int>", "permuteAxesCopyToGrid", Grid<int>::_W_25);
+static const Pb::Register _R_50("Grid<int>", "getMaxAbs", Grid<int>::_W_26);
+static const Pb::Register _R_51("Grid<int>", "getMax", Grid<int>::_W_27);
+static const Pb::Register _R_52("Grid<int>", "getMin", Grid<int>::_W_28);
+static const Pb::Register _R_53("Grid<int>", "getL1", Grid<int>::_W_29);
+static const Pb::Register _R_54("Grid<int>", "getL2", Grid<int>::_W_30);
+static const Pb::Register _R_55("Grid<int>", "setBound", Grid<int>::_W_31);
+static const Pb::Register _R_56("Grid<int>", "setBoundNeumann", Grid<int>::_W_32);
+static const Pb::Register _R_57("Grid<int>", "getDataPointer", Grid<int>::_W_33);
+static const Pb::Register _R_58("Grid<int>", "printGrid", Grid<int>::_W_34);
+static const Pb::Register _R_59("Grid<Real>", "Grid<Real>", "GridBase");
+template<> const char *Namify<Grid<Real>>::S = "Grid<Real>";
+static const Pb::Register _R_60("Grid<Real>", "Grid", Grid<Real>::_W_9);
+static const Pb::Register _R_61("Grid<Real>", "save", Grid<Real>::_W_10);
+static const Pb::Register _R_62("Grid<Real>", "load", Grid<Real>::_W_11);
+static const Pb::Register _R_63("Grid<Real>", "clear", Grid<Real>::_W_12);
+static const Pb::Register _R_64("Grid<Real>", "copyFrom", Grid<Real>::_W_13);
+static const Pb::Register _R_65("Grid<Real>", "getGridType", Grid<Real>::_W_14);
+static const Pb::Register _R_66("Grid<Real>", "add", Grid<Real>::_W_15);
+static const Pb::Register _R_67("Grid<Real>", "sub", Grid<Real>::_W_16);
+static const Pb::Register _R_68("Grid<Real>", "setConst", Grid<Real>::_W_17);
+static const Pb::Register _R_69("Grid<Real>", "addConst", Grid<Real>::_W_18);
+static const Pb::Register _R_70("Grid<Real>", "addScaled", Grid<Real>::_W_19);
+static const Pb::Register _R_71("Grid<Real>", "mult", Grid<Real>::_W_20);
+static const Pb::Register _R_72("Grid<Real>", "multConst", Grid<Real>::_W_21);
+static const Pb::Register _R_73("Grid<Real>", "clamp", Grid<Real>::_W_22);
+static const Pb::Register _R_74("Grid<Real>", "stomp", Grid<Real>::_W_23);
+static const Pb::Register _R_75("Grid<Real>", "permuteAxes", Grid<Real>::_W_24);
+static const Pb::Register _R_76("Grid<Real>", "permuteAxesCopyToGrid", Grid<Real>::_W_25);
+static const Pb::Register _R_77("Grid<Real>", "getMaxAbs", Grid<Real>::_W_26);
+static const Pb::Register _R_78("Grid<Real>", "getMax", Grid<Real>::_W_27);
+static const Pb::Register _R_79("Grid<Real>", "getMin", Grid<Real>::_W_28);
+static const Pb::Register _R_80("Grid<Real>", "getL1", Grid<Real>::_W_29);
+static const Pb::Register _R_81("Grid<Real>", "getL2", Grid<Real>::_W_30);
+static const Pb::Register _R_82("Grid<Real>", "setBound", Grid<Real>::_W_31);
+static const Pb::Register _R_83("Grid<Real>", "setBoundNeumann", Grid<Real>::_W_32);
+static const Pb::Register _R_84("Grid<Real>", "getDataPointer", Grid<Real>::_W_33);
+static const Pb::Register _R_85("Grid<Real>", "printGrid", Grid<Real>::_W_34);
+static const Pb::Register _R_86("Grid<Vec3>", "Grid<Vec3>", "GridBase");
+template<> const char *Namify<Grid<Vec3>>::S = "Grid<Vec3>";
+static const Pb::Register _R_87("Grid<Vec3>", "Grid", Grid<Vec3>::_W_9);
+static const Pb::Register _R_88("Grid<Vec3>", "save", Grid<Vec3>::_W_10);
+static const Pb::Register _R_89("Grid<Vec3>", "load", Grid<Vec3>::_W_11);
+static const Pb::Register _R_90("Grid<Vec3>", "clear", Grid<Vec3>::_W_12);
+static const Pb::Register _R_91("Grid<Vec3>", "copyFrom", Grid<Vec3>::_W_13);
+static const Pb::Register _R_92("Grid<Vec3>", "getGridType", Grid<Vec3>::_W_14);
+static const Pb::Register _R_93("Grid<Vec3>", "add", Grid<Vec3>::_W_15);
+static const Pb::Register _R_94("Grid<Vec3>", "sub", Grid<Vec3>::_W_16);
+static const Pb::Register _R_95("Grid<Vec3>", "setConst", Grid<Vec3>::_W_17);
+static const Pb::Register _R_96("Grid<Vec3>", "addConst", Grid<Vec3>::_W_18);
+static const Pb::Register _R_97("Grid<Vec3>", "addScaled", Grid<Vec3>::_W_19);
+static const Pb::Register _R_98("Grid<Vec3>", "mult", Grid<Vec3>::_W_20);
+static const Pb::Register _R_99("Grid<Vec3>", "multConst", Grid<Vec3>::_W_21);
+static const Pb::Register _R_100("Grid<Vec3>", "clamp", Grid<Vec3>::_W_22);
+static const Pb::Register _R_101("Grid<Vec3>", "stomp", Grid<Vec3>::_W_23);
+static const Pb::Register _R_102("Grid<Vec3>", "permuteAxes", Grid<Vec3>::_W_24);
+static const Pb::Register _R_103("Grid<Vec3>", "permuteAxesCopyToGrid", Grid<Vec3>::_W_25);
+static const Pb::Register _R_104("Grid<Vec3>", "getMaxAbs", Grid<Vec3>::_W_26);
+static const Pb::Register _R_105("Grid<Vec3>", "getMax", Grid<Vec3>::_W_27);
+static const Pb::Register _R_106("Grid<Vec3>", "getMin", Grid<Vec3>::_W_28);
+static const Pb::Register _R_107("Grid<Vec3>", "getL1", Grid<Vec3>::_W_29);
+static const Pb::Register _R_108("Grid<Vec3>", "getL2", Grid<Vec3>::_W_30);
+static const Pb::Register _R_109("Grid<Vec3>", "setBound", Grid<Vec3>::_W_31);
+static const Pb::Register _R_110("Grid<Vec3>", "setBoundNeumann", Grid<Vec3>::_W_32);
+static const Pb::Register _R_111("Grid<Vec3>", "getDataPointer", Grid<Vec3>::_W_33);
+static const Pb::Register _R_112("Grid<Vec3>", "printGrid", Grid<Vec3>::_W_34);
+#endif
+#ifdef _C_GridBase
+static const Pb::Register _R_113("GridBase", "GridBase", "PbClass");
+template<> const char *Namify<GridBase>::S = "GridBase";
+static const Pb::Register _R_114("GridBase", "GridBase", GridBase::_W_0);
+static const Pb::Register _R_115("GridBase", "getSizeX", GridBase::_W_1);
+static const Pb::Register _R_116("GridBase", "getSizeY", GridBase::_W_2);
+static const Pb::Register _R_117("GridBase", "getSizeZ", GridBase::_W_3);
+static const Pb::Register _R_118("GridBase", "getSize", GridBase::_W_4);
+static const Pb::Register _R_119("GridBase", "is3D", GridBase::_W_5);
+static const Pb::Register _R_120("GridBase", "is4D", GridBase::_W_6);
+static const Pb::Register _R_121("GridBase", "getSizeT", GridBase::_W_7);
+static const Pb::Register _R_122("GridBase", "getStrideT", GridBase::_W_8);
+#endif
+#ifdef _C_MACGrid
+static const Pb::Register _R_123("MACGrid", "MACGrid", "Grid<Vec3>");
+template<> const char *Namify<MACGrid>::S = "MACGrid";
+static const Pb::Register _R_124("MACGrid", "MACGrid", MACGrid::_W_35);
+static const Pb::Register _R_125("MACGrid", "setBoundMAC", MACGrid::_W_36);
+#endif
+static const Pb::Register _R_7("GridType_TypeNone", 0);
+static const Pb::Register _R_8("GridType_TypeReal", 1);
+static const Pb::Register _R_9("GridType_TypeInt", 2);
+static const Pb::Register _R_10("GridType_TypeVec3", 4);
+static const Pb::Register _R_11("GridType_TypeMAC", 8);
+static const Pb::Register _R_12("GridType_TypeLevelset", 16);
+static const Pb::Register _R_13("GridType_TypeFlags", 32);
+static const Pb::Register _R_14("Grid<int>", "IntGrid", "");
+static const Pb::Register _R_15("Grid<Real>", "RealGrid", "");
+static const Pb::Register _R_16("Grid<Vec3>", "VecGrid", "");
+static const Pb::Register _R_17("CellType_TypeNone", 0);
+static const Pb::Register _R_18("CellType_TypeFluid", 1);
+static const Pb::Register _R_19("CellType_TypeObstacle", 2);
+static const Pb::Register _R_20("CellType_TypeEmpty", 4);
+static const Pb::Register _R_21("CellType_TypeInflow", 8);
+static const Pb::Register _R_22("CellType_TypeOutflow", 16);
+static const Pb::Register _R_23("CellType_TypeOpen", 32);
+static const Pb::Register _R_24("CellType_TypeStick", 64);
+static const Pb::Register _R_25("CellType_TypeReserved", 256);
+extern "C" {
+void PbRegister_file_7()
+{
+ KEEP_UNUSED(_R_26);
+ KEEP_UNUSED(_R_27);
+ KEEP_UNUSED(_R_28);
+ KEEP_UNUSED(_R_29);
+ KEEP_UNUSED(_R_30);
+ KEEP_UNUSED(_R_31);
+ KEEP_UNUSED(_R_32);
+ KEEP_UNUSED(_R_33);
+ KEEP_UNUSED(_R_34);
+ KEEP_UNUSED(_R_35);
+ KEEP_UNUSED(_R_36);
+ KEEP_UNUSED(_R_37);
+ KEEP_UNUSED(_R_38);
+ KEEP_UNUSED(_R_39);
+ KEEP_UNUSED(_R_40);
+ KEEP_UNUSED(_R_41);
+ KEEP_UNUSED(_R_42);
+ KEEP_UNUSED(_R_43);
+ KEEP_UNUSED(_R_44);
+ KEEP_UNUSED(_R_45);
+ KEEP_UNUSED(_R_46);
+ KEEP_UNUSED(_R_47);
+ KEEP_UNUSED(_R_48);
+ KEEP_UNUSED(_R_49);
+ KEEP_UNUSED(_R_50);
+ KEEP_UNUSED(_R_51);
+ KEEP_UNUSED(_R_52);
+ KEEP_UNUSED(_R_53);
+ KEEP_UNUSED(_R_54);
+ KEEP_UNUSED(_R_55);
+ KEEP_UNUSED(_R_56);
+ KEEP_UNUSED(_R_57);
+ KEEP_UNUSED(_R_58);
+ KEEP_UNUSED(_R_59);
+ KEEP_UNUSED(_R_60);
+ KEEP_UNUSED(_R_61);
+ KEEP_UNUSED(_R_62);
+ KEEP_UNUSED(_R_63);
+ KEEP_UNUSED(_R_64);
+ KEEP_UNUSED(_R_65);
+ KEEP_UNUSED(_R_66);
+ KEEP_UNUSED(_R_67);
+ KEEP_UNUSED(_R_68);
+ KEEP_UNUSED(_R_69);
+ KEEP_UNUSED(_R_70);
+ KEEP_UNUSED(_R_71);
+ KEEP_UNUSED(_R_72);
+ KEEP_UNUSED(_R_73);
+ KEEP_UNUSED(_R_74);
+ KEEP_UNUSED(_R_75);
+ KEEP_UNUSED(_R_76);
+ KEEP_UNUSED(_R_77);
+ KEEP_UNUSED(_R_78);
+ KEEP_UNUSED(_R_79);
+ KEEP_UNUSED(_R_80);
+ KEEP_UNUSED(_R_81);
+ KEEP_UNUSED(_R_82);
+ KEEP_UNUSED(_R_83);
+ KEEP_UNUSED(_R_84);
+ KEEP_UNUSED(_R_85);
+ KEEP_UNUSED(_R_86);
+ KEEP_UNUSED(_R_87);
+ KEEP_UNUSED(_R_88);
+ KEEP_UNUSED(_R_89);
+ KEEP_UNUSED(_R_90);
+ KEEP_UNUSED(_R_91);
+ KEEP_UNUSED(_R_92);
+ KEEP_UNUSED(_R_93);
+ KEEP_UNUSED(_R_94);
+ KEEP_UNUSED(_R_95);
+ KEEP_UNUSED(_R_96);
+ KEEP_UNUSED(_R_97);
+ KEEP_UNUSED(_R_98);
+ KEEP_UNUSED(_R_99);
+ KEEP_UNUSED(_R_100);
+ KEEP_UNUSED(_R_101);
+ KEEP_UNUSED(_R_102);
+ KEEP_UNUSED(_R_103);
+ KEEP_UNUSED(_R_104);
+ KEEP_UNUSED(_R_105);
+ KEEP_UNUSED(_R_106);
+ KEEP_UNUSED(_R_107);
+ KEEP_UNUSED(_R_108);
+ KEEP_UNUSED(_R_109);
+ KEEP_UNUSED(_R_110);
+ KEEP_UNUSED(_R_111);
+ KEEP_UNUSED(_R_112);
+ KEEP_UNUSED(_R_113);
+ KEEP_UNUSED(_R_114);
+ KEEP_UNUSED(_R_115);
+ KEEP_UNUSED(_R_116);
+ KEEP_UNUSED(_R_117);
+ KEEP_UNUSED(_R_118);
+ KEEP_UNUSED(_R_119);
+ KEEP_UNUSED(_R_120);
+ KEEP_UNUSED(_R_121);
+ KEEP_UNUSED(_R_122);
+ KEEP_UNUSED(_R_123);
+ KEEP_UNUSED(_R_124);
+ KEEP_UNUSED(_R_125);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/grid4d.cpp b/extern/mantaflow/preprocessed/grid4d.cpp
new file mode 100644
index 00000000000..41d69b2d33a
--- /dev/null
+++ b/extern/mantaflow/preprocessed/grid4d.cpp
@@ -0,0 +1,1798 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Grid representation
+ *
+ ******************************************************************************/
+
+#include <limits>
+#include <sstream>
+#include <cstring>
+
+#include "grid4d.h"
+#include "levelset.h"
+#include "kernel.h"
+#include "mantaio.h"
+
+using namespace std;
+namespace Manta {
+
+//******************************************************************************
+// GridBase members
+
+Grid4dBase::Grid4dBase(FluidSolver *parent) : PbClass(parent), mType(TypeNone)
+{
+ checkParent();
+}
+
+//******************************************************************************
+// Grid4d<T> members
+
+// helpers to set type
+template<class T> inline Grid4dBase::Grid4dType typeList()
+{
+ return Grid4dBase::TypeNone;
+}
+template<> inline Grid4dBase::Grid4dType typeList<Real>()
+{
+ return Grid4dBase::TypeReal;
+}
+template<> inline Grid4dBase::Grid4dType typeList<int>()
+{
+ return Grid4dBase::TypeInt;
+}
+template<> inline Grid4dBase::Grid4dType typeList<Vec3>()
+{
+ return Grid4dBase::TypeVec3;
+}
+template<> inline Grid4dBase::Grid4dType typeList<Vec4>()
+{
+ return Grid4dBase::TypeVec4;
+}
+
+template<class T> Grid4d<T>::Grid4d(FluidSolver *parent, bool show) : Grid4dBase(parent)
+{
+ assertMsg(parent->is3D() && parent->supports4D(),
+ "To use 4d grids create a 3d solver with fourthDim>0");
+
+ mType = typeList<T>();
+ Vec3i s = parent->getGridSize();
+ mSize = Vec4i(s.x, s.y, s.z, parent->getFourthDim());
+ mData = parent->getGrid4dPointer<T>();
+ assertMsg(mData, "Couldnt allocate data pointer!");
+
+ mStrideZ = (mSize.x * mSize.y);
+ mStrideT = (mStrideZ * mSize.z);
+
+ Real sizemax = (Real)mSize[0];
+ for (int c = 1; c < 3; ++c)
+ if (mSize[c] > sizemax)
+ sizemax = mSize[c];
+ // note - the 4d component is ignored for dx! keep same scaling as for 3d...
+ mDx = 1.0 / sizemax;
+
+ clear();
+ setHidden(!show);
+}
+
+template<class T> Grid4d<T>::Grid4d(const Grid4d<T> &a) : Grid4dBase(a.getParent())
+{
+ mSize = a.mSize;
+ mType = a.mType;
+ mStrideZ = a.mStrideZ;
+ mStrideT = a.mStrideT;
+ mDx = a.mDx;
+ FluidSolver *gp = a.getParent();
+ mData = gp->getGrid4dPointer<T>();
+ assertMsg(mData, "Couldnt allocate data pointer!");
+
+ memcpy(mData, a.mData, sizeof(T) * a.mSize.x * a.mSize.y * a.mSize.z * a.mSize.t);
+}
+
+template<class T> Grid4d<T>::~Grid4d()
+{
+ mParent->freeGrid4dPointer<T>(mData);
+}
+
+template<class T> void Grid4d<T>::clear()
+{
+ memset(mData, 0, sizeof(T) * mSize.x * mSize.y * mSize.z * mSize.t);
+}
+
+template<class T> void Grid4d<T>::swap(Grid4d<T> &other)
+{
+ if (other.getSizeX() != getSizeX() || other.getSizeY() != getSizeY() ||
+ other.getSizeZ() != getSizeZ() || other.getSizeT() != getSizeT())
+ errMsg("Grid4d::swap(): Grid4d dimensions mismatch.");
+
+ T *dswap = other.mData;
+ other.mData = mData;
+ mData = dswap;
+}
+
+template<class T> void Grid4d<T>::load(string name)
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".uni")
+ readGrid4dUni(name, this);
+ else if (ext == ".raw")
+ readGrid4dRaw(name, this);
+ else
+ errMsg("file '" + name + "' filetype not supported");
+}
+
+template<class T> void Grid4d<T>::save(string name)
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".uni")
+ writeGrid4dUni(name, this);
+ else if (ext == ".raw")
+ writeGrid4dRaw(name, this);
+ else
+ errMsg("file '" + name + "' filetype not supported");
+}
+
+//******************************************************************************
+// Grid4d<T> operators
+
+//! Kernel: Compute min value of Real Grid4d
+
+struct kn4dMinReal : public KernelBase {
+ kn4dMinReal(Grid4d<Real> &val)
+ : KernelBase(&val, 0), val(val), minVal(std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<Real> &val, Real &minVal)
+ {
+ if (val[idx] < minVal)
+ minVal = val[idx];
+ }
+ inline operator Real()
+ {
+ return minVal;
+ }
+ inline Real &getRet()
+ {
+ return minVal;
+ }
+ inline Grid4d<Real> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid4d<Real> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn4dMinReal ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, minVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ kn4dMinReal(kn4dMinReal &o, tbb::split)
+ : KernelBase(o), val(o.val), minVal(std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const kn4dMinReal &o)
+ {
+ minVal = min(minVal, o.minVal);
+ }
+ Grid4d<Real> &val;
+ Real minVal;
+};
+
+//! Kernel: Compute max value of Real Grid4d
+
+struct kn4dMaxReal : public KernelBase {
+ kn4dMaxReal(Grid4d<Real> &val)
+ : KernelBase(&val, 0), val(val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<Real> &val, Real &maxVal)
+ {
+ if (val[idx] > maxVal)
+ maxVal = val[idx];
+ }
+ inline operator Real()
+ {
+ return maxVal;
+ }
+ inline Real &getRet()
+ {
+ return maxVal;
+ }
+ inline Grid4d<Real> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid4d<Real> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn4dMaxReal ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, maxVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ kn4dMaxReal(kn4dMaxReal &o, tbb::split)
+ : KernelBase(o), val(o.val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const kn4dMaxReal &o)
+ {
+ maxVal = max(maxVal, o.maxVal);
+ }
+ Grid4d<Real> &val;
+ Real maxVal;
+};
+
+//! Kernel: Compute min value of int Grid4d
+
+struct kn4dMinInt : public KernelBase {
+ kn4dMinInt(Grid4d<int> &val)
+ : KernelBase(&val, 0), val(val), minVal(std::numeric_limits<int>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<int> &val, int &minVal)
+ {
+ if (val[idx] < minVal)
+ minVal = val[idx];
+ }
+ inline operator int()
+ {
+ return minVal;
+ }
+ inline int &getRet()
+ {
+ return minVal;
+ }
+ inline Grid4d<int> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid4d<int> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn4dMinInt ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, minVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ kn4dMinInt(kn4dMinInt &o, tbb::split)
+ : KernelBase(o), val(o.val), minVal(std::numeric_limits<int>::max())
+ {
+ }
+ void join(const kn4dMinInt &o)
+ {
+ minVal = min(minVal, o.minVal);
+ }
+ Grid4d<int> &val;
+ int minVal;
+};
+
+//! Kernel: Compute max value of int Grid4d
+
+struct kn4dMaxInt : public KernelBase {
+ kn4dMaxInt(Grid4d<int> &val)
+ : KernelBase(&val, 0), val(val), maxVal(std::numeric_limits<int>::min())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<int> &val, int &maxVal)
+ {
+ if (val[idx] > maxVal)
+ maxVal = val[idx];
+ }
+ inline operator int()
+ {
+ return maxVal;
+ }
+ inline int &getRet()
+ {
+ return maxVal;
+ }
+ inline Grid4d<int> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid4d<int> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn4dMaxInt ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, maxVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ kn4dMaxInt(kn4dMaxInt &o, tbb::split)
+ : KernelBase(o), val(o.val), maxVal(std::numeric_limits<int>::min())
+ {
+ }
+ void join(const kn4dMaxInt &o)
+ {
+ maxVal = max(maxVal, o.maxVal);
+ }
+ Grid4d<int> &val;
+ int maxVal;
+};
+
+//! Kernel: Compute min norm of vec Grid4d
+
+template<class VEC> struct kn4dMinVec : public KernelBase {
+ kn4dMinVec(Grid4d<VEC> &val)
+ : KernelBase(&val, 0), val(val), minVal(std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<VEC> &val, Real &minVal)
+ {
+ const Real s = normSquare(val[idx]);
+ if (s < minVal)
+ minVal = s;
+ }
+ inline operator Real()
+ {
+ return minVal;
+ }
+ inline Real &getRet()
+ {
+ return minVal;
+ }
+ inline Grid4d<VEC> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid4d<VEC> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn4dMinVec ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, minVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ kn4dMinVec(kn4dMinVec &o, tbb::split)
+ : KernelBase(o), val(o.val), minVal(std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const kn4dMinVec &o)
+ {
+ minVal = min(minVal, o.minVal);
+ }
+ Grid4d<VEC> &val;
+ Real minVal;
+};
+
+//! Kernel: Compute max norm of vec Grid4d
+
+template<class VEC> struct kn4dMaxVec : public KernelBase {
+ kn4dMaxVec(Grid4d<VEC> &val)
+ : KernelBase(&val, 0), val(val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<VEC> &val, Real &maxVal)
+ {
+ const Real s = normSquare(val[idx]);
+ if (s > maxVal)
+ maxVal = s;
+ }
+ inline operator Real()
+ {
+ return maxVal;
+ }
+ inline Real &getRet()
+ {
+ return maxVal;
+ }
+ inline Grid4d<VEC> &getArg0()
+ {
+ return val;
+ }
+ typedef Grid4d<VEC> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn4dMaxVec ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, maxVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ kn4dMaxVec(kn4dMaxVec &o, tbb::split)
+ : KernelBase(o), val(o.val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const kn4dMaxVec &o)
+ {
+ maxVal = max(maxVal, o.maxVal);
+ }
+ Grid4d<VEC> &val;
+ Real maxVal;
+};
+
+template<class T> Grid4d<T> &Grid4d<T>::safeDivide(const Grid4d<T> &a)
+{
+ Grid4dSafeDiv<T>(*this, a);
+ return *this;
+}
+template<class T> Grid4d<T> &Grid4d<T>::copyFrom(const Grid4d<T> &a, bool copyType)
+{
+ assertMsg(a.mSize.x == mSize.x && a.mSize.y == mSize.y && a.mSize.z == mSize.z &&
+ a.mSize.t == mSize.t,
+ "different Grid4d resolutions " << a.mSize << " vs " << this->mSize);
+ memcpy(mData, a.mData, sizeof(T) * mSize.x * mSize.y * mSize.z * mSize.t);
+ if (copyType)
+ mType = a.mType; // copy type marker
+ return *this;
+}
+/*template<class T> Grid4d<T>& Grid4d<T>::operator= (const Grid4d<T>& a) {
+ note: do not use , use copyFrom instead
+}*/
+
+template<class T> struct kn4dSetConstReal : public KernelBase {
+ kn4dSetConstReal(Grid4d<T> &me, T val) : KernelBase(&me, 0), me(me), val(val)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, T val) const
+ {
+ me[idx] = val;
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline T &getArg1()
+ {
+ return val;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn4dSetConstReal ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, val);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ T val;
+};
+template<class T> struct kn4dAddConstReal : public KernelBase {
+ kn4dAddConstReal(Grid4d<T> &me, T val) : KernelBase(&me, 0), me(me), val(val)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, T val) const
+ {
+ me[idx] += val;
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline T &getArg1()
+ {
+ return val;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn4dAddConstReal ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, val);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ T val;
+};
+template<class T> struct kn4dMultConst : public KernelBase {
+ kn4dMultConst(Grid4d<T> &me, T val) : KernelBase(&me, 0), me(me), val(val)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, T val) const
+ {
+ me[idx] *= val;
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline T &getArg1()
+ {
+ return val;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn4dMultConst ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, val);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ T val;
+};
+template<class T> struct kn4dClamp : public KernelBase {
+ kn4dClamp(Grid4d<T> &me, T min, T max) : KernelBase(&me, 0), me(me), min(min), max(max)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, T min, T max) const
+ {
+ me[idx] = clamp(me[idx], min, max);
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline T &getArg1()
+ {
+ return min;
+ }
+ typedef T type1;
+ inline T &getArg2()
+ {
+ return max;
+ }
+ typedef T type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel kn4dClamp ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, min, max);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ T min;
+ T max;
+};
+
+template<class T> void Grid4d<T>::add(const Grid4d<T> &a)
+{
+ Grid4dAdd<T, T>(*this, a);
+}
+template<class T> void Grid4d<T>::sub(const Grid4d<T> &a)
+{
+ Grid4dSub<T, T>(*this, a);
+}
+template<class T> void Grid4d<T>::addScaled(const Grid4d<T> &a, const T &factor)
+{
+ Grid4dScaledAdd<T, T>(*this, a, factor);
+}
+template<class T> void Grid4d<T>::setConst(T a)
+{
+ kn4dSetConstReal<T>(*this, T(a));
+}
+template<class T> void Grid4d<T>::addConst(T a)
+{
+ kn4dAddConstReal<T>(*this, T(a));
+}
+template<class T> void Grid4d<T>::multConst(T a)
+{
+ kn4dMultConst<T>(*this, a);
+}
+
+template<class T> void Grid4d<T>::mult(const Grid4d<T> &a)
+{
+ Grid4dMult<T, T>(*this, a);
+}
+
+template<class T> void Grid4d<T>::clamp(Real min, Real max)
+{
+ kn4dClamp<T>(*this, T(min), T(max));
+}
+
+template<> Real Grid4d<Real>::getMax()
+{
+ return kn4dMaxReal(*this);
+}
+template<> Real Grid4d<Real>::getMin()
+{
+ return kn4dMinReal(*this);
+}
+template<> Real Grid4d<Real>::getMaxAbs()
+{
+ Real amin = kn4dMinReal(*this);
+ Real amax = kn4dMaxReal(*this);
+ return max(fabs(amin), fabs(amax));
+}
+template<> Real Grid4d<Vec4>::getMax()
+{
+ return sqrt(kn4dMaxVec<Vec4>(*this));
+}
+template<> Real Grid4d<Vec4>::getMin()
+{
+ return sqrt(kn4dMinVec<Vec4>(*this));
+}
+template<> Real Grid4d<Vec4>::getMaxAbs()
+{
+ return sqrt(kn4dMaxVec<Vec4>(*this));
+}
+template<> Real Grid4d<int>::getMax()
+{
+ return (Real)kn4dMaxInt(*this);
+}
+template<> Real Grid4d<int>::getMin()
+{
+ return (Real)kn4dMinInt(*this);
+}
+template<> Real Grid4d<int>::getMaxAbs()
+{
+ int amin = kn4dMinInt(*this);
+ int amax = kn4dMaxInt(*this);
+ return max(fabs((Real)amin), fabs((Real)amax));
+}
+template<> Real Grid4d<Vec3>::getMax()
+{
+ return sqrt(kn4dMaxVec<Vec3>(*this));
+}
+template<> Real Grid4d<Vec3>::getMin()
+{
+ return sqrt(kn4dMinVec<Vec3>(*this));
+}
+template<> Real Grid4d<Vec3>::getMaxAbs()
+{
+ return sqrt(kn4dMaxVec<Vec3>(*this));
+}
+
+template<class T> void Grid4d<T>::printGrid(int zSlice, int tSlice, bool printIndex, int bnd)
+{
+ std::ostringstream out;
+ out << std::endl;
+ FOR_IJKT_BND(*this, bnd)
+ {
+ IndexInt idx = (*this).index(i, j, k, t);
+ if (((zSlice >= 0 && k == zSlice) || (zSlice < 0)) &&
+ ((tSlice >= 0 && t == tSlice) || (tSlice < 0))) {
+ out << " ";
+ if (printIndex)
+ out << " " << i << "," << j << "," << k << "," << t << ":";
+ out << (*this)[idx];
+ if (i == (*this).getSizeX() - 1 - bnd) {
+ out << std::endl;
+ if (j == (*this).getSizeY() - 1 - bnd) {
+ out << std::endl;
+ if (k == (*this).getSizeZ() - 1 - bnd) {
+ out << std::endl;
+ }
+ }
+ }
+ }
+ }
+ out << endl;
+ debMsg("Printing '" << this->getName() << "' " << out.str().c_str() << " ", 1);
+}
+
+// helper to set/get components of vec4 Grids
+struct knGetComp4d : public KernelBase {
+ knGetComp4d(const Grid4d<Vec4> &src, Grid4d<Real> &dst, int c)
+ : KernelBase(&src, 0), src(src), dst(dst), c(c)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid4d<Vec4> &src, Grid4d<Real> &dst, int c) const
+ {
+ dst[idx] = src[idx][c];
+ }
+ inline const Grid4d<Vec4> &getArg0()
+ {
+ return src;
+ }
+ typedef Grid4d<Vec4> type0;
+ inline Grid4d<Real> &getArg1()
+ {
+ return dst;
+ }
+ typedef Grid4d<Real> type1;
+ inline int &getArg2()
+ {
+ return c;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knGetComp4d ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, src, dst, c);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const Grid4d<Vec4> &src;
+ Grid4d<Real> &dst;
+ int c;
+};
+;
+struct knSetComp4d : public KernelBase {
+ knSetComp4d(const Grid4d<Real> &src, Grid4d<Vec4> &dst, int c)
+ : KernelBase(&src, 0), src(src), dst(dst), c(c)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid4d<Real> &src, Grid4d<Vec4> &dst, int c) const
+ {
+ dst[idx][c] = src[idx];
+ }
+ inline const Grid4d<Real> &getArg0()
+ {
+ return src;
+ }
+ typedef Grid4d<Real> type0;
+ inline Grid4d<Vec4> &getArg1()
+ {
+ return dst;
+ }
+ typedef Grid4d<Vec4> type1;
+ inline int &getArg2()
+ {
+ return c;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetComp4d ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, src, dst, c);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const Grid4d<Real> &src;
+ Grid4d<Vec4> &dst;
+ int c;
+};
+;
+void getComp4d(const Grid4d<Vec4> &src, Grid4d<Real> &dst, int c)
+{
+ knGetComp4d(src, dst, c);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getComp4d", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid4d<Vec4> &src = *_args.getPtr<Grid4d<Vec4>>("src", 0, &_lock);
+ Grid4d<Real> &dst = *_args.getPtr<Grid4d<Real>>("dst", 1, &_lock);
+ int c = _args.get<int>("c", 2, &_lock);
+ _retval = getPyNone();
+ getComp4d(src, dst, c);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getComp4d", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getComp4d", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getComp4d("", "getComp4d", _W_0);
+extern "C" {
+void PbRegister_getComp4d()
+{
+ KEEP_UNUSED(_RP_getComp4d);
+}
+}
+
+;
+void setComp4d(const Grid4d<Real> &src, Grid4d<Vec4> &dst, int c)
+{
+ knSetComp4d(src, dst, c);
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setComp4d", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid4d<Real> &src = *_args.getPtr<Grid4d<Real>>("src", 0, &_lock);
+ Grid4d<Vec4> &dst = *_args.getPtr<Grid4d<Vec4>>("dst", 1, &_lock);
+ int c = _args.get<int>("c", 2, &_lock);
+ _retval = getPyNone();
+ setComp4d(src, dst, c);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setComp4d", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setComp4d", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setComp4d("", "setComp4d", _W_1);
+extern "C" {
+void PbRegister_setComp4d()
+{
+ KEEP_UNUSED(_RP_setComp4d);
+}
+}
+
+;
+
+template<class T> struct knSetBnd4d : public KernelBase {
+ knSetBnd4d(Grid4d<T> &grid, T value, int w)
+ : KernelBase(&grid, 0), grid(grid), value(value), w(w)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, int t, Grid4d<T> &grid, T value, int w) const
+ {
+ bool bnd = (i <= w || i >= grid.getSizeX() - 1 - w || j <= w || j >= grid.getSizeY() - 1 - w ||
+ k <= w || k >= grid.getSizeZ() - 1 - w || t <= w || t >= grid.getSizeT() - 1 - w);
+ if (bnd)
+ grid(i, j, k, t) = value;
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid4d<T> type0;
+ inline T &getArg1()
+ {
+ return value;
+ }
+ typedef T type1;
+ inline int &getArg2()
+ {
+ return w;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetBnd4d ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ
+ << " "
+ " t "
+ << minT << " - " << maxT,
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ if (maxT > 1) {
+ for (int t = __r.begin(); t != (int)__r.end(); t++)
+ for (int k = 0; k < maxZ; k++)
+ for (int j = 0; j < maxY; j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, grid, value, w);
+ }
+ else if (maxZ > 1) {
+ const int t = 0;
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < maxY; j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, grid, value, w);
+ }
+ else {
+ const int t = 0;
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, grid, value, w);
+ }
+ }
+ void run()
+ {
+ if (maxT > 1) {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minT, maxT), *this);
+ }
+ else if (maxZ > 1) {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ }
+ else {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ }
+ Grid4d<T> &grid;
+ T value;
+ int w;
+};
+
+template<class T> void Grid4d<T>::setBound(T value, int boundaryWidth)
+{
+ knSetBnd4d<T>(*this, value, boundaryWidth);
+}
+
+template<class T> struct knSetBnd4dNeumann : public KernelBase {
+ knSetBnd4dNeumann(Grid4d<T> &grid, int w) : KernelBase(&grid, 0), grid(grid), w(w)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, int t, Grid4d<T> &grid, int w) const
+ {
+ bool set = false;
+ int si = i, sj = j, sk = k, st = t;
+ if (i <= w) {
+ si = w + 1;
+ set = true;
+ }
+ if (i >= grid.getSizeX() - 1 - w) {
+ si = grid.getSizeX() - 1 - w - 1;
+ set = true;
+ }
+ if (j <= w) {
+ sj = w + 1;
+ set = true;
+ }
+ if (j >= grid.getSizeY() - 1 - w) {
+ sj = grid.getSizeY() - 1 - w - 1;
+ set = true;
+ }
+ if (k <= w) {
+ sk = w + 1;
+ set = true;
+ }
+ if (k >= grid.getSizeZ() - 1 - w) {
+ sk = grid.getSizeZ() - 1 - w - 1;
+ set = true;
+ }
+ if (t <= w) {
+ st = w + 1;
+ set = true;
+ }
+ if (t >= grid.getSizeT() - 1 - w) {
+ st = grid.getSizeT() - 1 - w - 1;
+ set = true;
+ }
+ if (set)
+ grid(i, j, k, t) = grid(si, sj, sk, st);
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return grid;
+ }
+ typedef Grid4d<T> type0;
+ inline int &getArg1()
+ {
+ return w;
+ }
+ typedef int type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetBnd4dNeumann ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ
+ << " "
+ " t "
+ << minT << " - " << maxT,
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ if (maxT > 1) {
+ for (int t = __r.begin(); t != (int)__r.end(); t++)
+ for (int k = 0; k < maxZ; k++)
+ for (int j = 0; j < maxY; j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, grid, w);
+ }
+ else if (maxZ > 1) {
+ const int t = 0;
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < maxY; j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, grid, w);
+ }
+ else {
+ const int t = 0;
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, grid, w);
+ }
+ }
+ void run()
+ {
+ if (maxT > 1) {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minT, maxT), *this);
+ }
+ else if (maxZ > 1) {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ }
+ else {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ }
+ Grid4d<T> &grid;
+ int w;
+};
+
+template<class T> void Grid4d<T>::setBoundNeumann(int boundaryWidth)
+{
+ knSetBnd4dNeumann<T>(*this, boundaryWidth);
+}
+
+//******************************************************************************
+// testing helpers
+
+//! compute maximal diference of two cells in the grid, needed for testing system
+
+Real grid4dMaxDiff(Grid4d<Real> &g1, Grid4d<Real> &g2)
+{
+ double maxVal = 0.;
+ FOR_IJKT_BND(g1, 0)
+ {
+ maxVal = std::max(maxVal, (double)fabs(g1(i, j, k, t) - g2(i, j, k, t)));
+ }
+ return maxVal;
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "grid4dMaxDiff", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid4d<Real> &g1 = *_args.getPtr<Grid4d<Real>>("g1", 0, &_lock);
+ Grid4d<Real> &g2 = *_args.getPtr<Grid4d<Real>>("g2", 1, &_lock);
+ _retval = toPy(grid4dMaxDiff(g1, g2));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "grid4dMaxDiff", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("grid4dMaxDiff", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_grid4dMaxDiff("", "grid4dMaxDiff", _W_2);
+extern "C" {
+void PbRegister_grid4dMaxDiff()
+{
+ KEEP_UNUSED(_RP_grid4dMaxDiff);
+}
+}
+
+Real grid4dMaxDiffInt(Grid4d<int> &g1, Grid4d<int> &g2)
+{
+ double maxVal = 0.;
+ FOR_IJKT_BND(g1, 0)
+ {
+ maxVal = std::max(maxVal, (double)fabs((double)g1(i, j, k, t) - g2(i, j, k, t)));
+ }
+ return maxVal;
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "grid4dMaxDiffInt", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid4d<int> &g1 = *_args.getPtr<Grid4d<int>>("g1", 0, &_lock);
+ Grid4d<int> &g2 = *_args.getPtr<Grid4d<int>>("g2", 1, &_lock);
+ _retval = toPy(grid4dMaxDiffInt(g1, g2));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "grid4dMaxDiffInt", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("grid4dMaxDiffInt", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_grid4dMaxDiffInt("", "grid4dMaxDiffInt", _W_3);
+extern "C" {
+void PbRegister_grid4dMaxDiffInt()
+{
+ KEEP_UNUSED(_RP_grid4dMaxDiffInt);
+}
+}
+
+Real grid4dMaxDiffVec3(Grid4d<Vec3> &g1, Grid4d<Vec3> &g2)
+{
+ double maxVal = 0.;
+ FOR_IJKT_BND(g1, 0)
+ {
+ double d = 0.;
+ for (int c = 0; c < 3; ++c) {
+ d += fabs((double)g1(i, j, k, t)[c] - (double)g2(i, j, k, t)[c]);
+ }
+ maxVal = std::max(maxVal, d);
+ }
+ return maxVal;
+}
+static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "grid4dMaxDiffVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid4d<Vec3> &g1 = *_args.getPtr<Grid4d<Vec3>>("g1", 0, &_lock);
+ Grid4d<Vec3> &g2 = *_args.getPtr<Grid4d<Vec3>>("g2", 1, &_lock);
+ _retval = toPy(grid4dMaxDiffVec3(g1, g2));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "grid4dMaxDiffVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("grid4dMaxDiffVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_grid4dMaxDiffVec3("", "grid4dMaxDiffVec3", _W_4);
+extern "C" {
+void PbRegister_grid4dMaxDiffVec3()
+{
+ KEEP_UNUSED(_RP_grid4dMaxDiffVec3);
+}
+}
+
+Real grid4dMaxDiffVec4(Grid4d<Vec4> &g1, Grid4d<Vec4> &g2)
+{
+ double maxVal = 0.;
+ FOR_IJKT_BND(g1, 0)
+ {
+ double d = 0.;
+ for (int c = 0; c < 4; ++c) {
+ d += fabs((double)g1(i, j, k, t)[c] - (double)g2(i, j, k, t)[c]);
+ }
+ maxVal = std::max(maxVal, d);
+ }
+ return maxVal;
+}
+static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "grid4dMaxDiffVec4", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid4d<Vec4> &g1 = *_args.getPtr<Grid4d<Vec4>>("g1", 0, &_lock);
+ Grid4d<Vec4> &g2 = *_args.getPtr<Grid4d<Vec4>>("g2", 1, &_lock);
+ _retval = toPy(grid4dMaxDiffVec4(g1, g2));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "grid4dMaxDiffVec4", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("grid4dMaxDiffVec4", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_grid4dMaxDiffVec4("", "grid4dMaxDiffVec4", _W_5);
+extern "C" {
+void PbRegister_grid4dMaxDiffVec4()
+{
+ KEEP_UNUSED(_RP_grid4dMaxDiffVec4);
+}
+}
+
+// set a region to some value
+
+template<class S> struct knSetRegion4d : public KernelBase {
+ knSetRegion4d(Grid4d<S> &dst, Vec4 start, Vec4 end, S value)
+ : KernelBase(&dst, 0), dst(dst), start(start), end(end), value(value)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, int t, Grid4d<S> &dst, Vec4 start, Vec4 end, S value) const
+ {
+ Vec4 p(i, j, k, t);
+ for (int c = 0; c < 4; ++c)
+ if (p[c] < start[c] || p[c] > end[c])
+ return;
+ dst(i, j, k, t) = value;
+ }
+ inline Grid4d<S> &getArg0()
+ {
+ return dst;
+ }
+ typedef Grid4d<S> type0;
+ inline Vec4 &getArg1()
+ {
+ return start;
+ }
+ typedef Vec4 type1;
+ inline Vec4 &getArg2()
+ {
+ return end;
+ }
+ typedef Vec4 type2;
+ inline S &getArg3()
+ {
+ return value;
+ }
+ typedef S type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetRegion4d ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ
+ << " "
+ " t "
+ << minT << " - " << maxT,
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ if (maxT > 1) {
+ for (int t = __r.begin(); t != (int)__r.end(); t++)
+ for (int k = 0; k < maxZ; k++)
+ for (int j = 0; j < maxY; j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, dst, start, end, value);
+ }
+ else if (maxZ > 1) {
+ const int t = 0;
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < maxY; j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, dst, start, end, value);
+ }
+ else {
+ const int t = 0;
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, dst, start, end, value);
+ }
+ }
+ void run()
+ {
+ if (maxT > 1) {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minT, maxT), *this);
+ }
+ else if (maxZ > 1) {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ }
+ else {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ }
+ Grid4d<S> &dst;
+ Vec4 start;
+ Vec4 end;
+ S value;
+};
+//! simple init functions in 4d
+void setRegion4d(Grid4d<Real> &dst, Vec4 start, Vec4 end, Real value)
+{
+ knSetRegion4d<Real>(dst, start, end, value);
+}
+static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setRegion4d", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid4d<Real> &dst = *_args.getPtr<Grid4d<Real>>("dst", 0, &_lock);
+ Vec4 start = _args.get<Vec4>("start", 1, &_lock);
+ Vec4 end = _args.get<Vec4>("end", 2, &_lock);
+ Real value = _args.get<Real>("value", 3, &_lock);
+ _retval = getPyNone();
+ setRegion4d(dst, start, end, value);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setRegion4d", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setRegion4d", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setRegion4d("", "setRegion4d", _W_6);
+extern "C" {
+void PbRegister_setRegion4d()
+{
+ KEEP_UNUSED(_RP_setRegion4d);
+}
+}
+
+//! simple init functions in 4d, vec4
+void setRegion4dVec4(Grid4d<Vec4> &dst, Vec4 start, Vec4 end, Vec4 value)
+{
+ knSetRegion4d<Vec4>(dst, start, end, value);
+}
+static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setRegion4dVec4", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid4d<Vec4> &dst = *_args.getPtr<Grid4d<Vec4>>("dst", 0, &_lock);
+ Vec4 start = _args.get<Vec4>("start", 1, &_lock);
+ Vec4 end = _args.get<Vec4>("end", 2, &_lock);
+ Vec4 value = _args.get<Vec4>("value", 3, &_lock);
+ _retval = getPyNone();
+ setRegion4dVec4(dst, start, end, value);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setRegion4dVec4", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setRegion4dVec4", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setRegion4dVec4("", "setRegion4dVec4", _W_7);
+extern "C" {
+void PbRegister_setRegion4dVec4()
+{
+ KEEP_UNUSED(_RP_setRegion4dVec4);
+}
+}
+
+//! slow helper to visualize tests, get a 3d slice of a 4d grid
+void getSliceFrom4d(Grid4d<Real> &src, int srct, Grid<Real> &dst)
+{
+ const int bnd = 0;
+ if (!src.isInBounds(Vec4i(bnd, bnd, bnd, srct)))
+ return;
+
+ for (int k = bnd; k < src.getSizeZ() - bnd; k++)
+ for (int j = bnd; j < src.getSizeY() - bnd; j++)
+ for (int i = bnd; i < src.getSizeX() - bnd; i++) {
+ if (!dst.isInBounds(Vec3i(i, j, k)))
+ continue;
+ dst(i, j, k) = src(i, j, k, srct);
+ }
+}
+static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getSliceFrom4d", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid4d<Real> &src = *_args.getPtr<Grid4d<Real>>("src", 0, &_lock);
+ int srct = _args.get<int>("srct", 1, &_lock);
+ Grid<Real> &dst = *_args.getPtr<Grid<Real>>("dst", 2, &_lock);
+ _retval = getPyNone();
+ getSliceFrom4d(src, srct, dst);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getSliceFrom4d", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getSliceFrom4d", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getSliceFrom4d("", "getSliceFrom4d", _W_8);
+extern "C" {
+void PbRegister_getSliceFrom4d()
+{
+ KEEP_UNUSED(_RP_getSliceFrom4d);
+}
+}
+
+//! slow helper to visualize tests, get a 3d slice of a 4d vec4 grid
+void getSliceFrom4dVec(Grid4d<Vec4> &src, int srct, Grid<Vec3> &dst, Grid<Real> *dstt = NULL)
+{
+ const int bnd = 0;
+ if (!src.isInBounds(Vec4i(bnd, bnd, bnd, srct)))
+ return;
+
+ for (int k = bnd; k < src.getSizeZ() - bnd; k++)
+ for (int j = bnd; j < src.getSizeY() - bnd; j++)
+ for (int i = bnd; i < src.getSizeX() - bnd; i++) {
+ if (!dst.isInBounds(Vec3i(i, j, k)))
+ continue;
+ for (int c = 0; c < 3; ++c)
+ dst(i, j, k)[c] = src(i, j, k, srct)[c];
+ if (dstt)
+ (*dstt)(i, j, k) = src(i, j, k, srct)[3];
+ }
+}
+static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getSliceFrom4dVec", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid4d<Vec4> &src = *_args.getPtr<Grid4d<Vec4>>("src", 0, &_lock);
+ int srct = _args.get<int>("srct", 1, &_lock);
+ Grid<Vec3> &dst = *_args.getPtr<Grid<Vec3>>("dst", 2, &_lock);
+ Grid<Real> *dstt = _args.getPtrOpt<Grid<Real>>("dstt", 3, NULL, &_lock);
+ _retval = getPyNone();
+ getSliceFrom4dVec(src, srct, dst, dstt);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getSliceFrom4dVec", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getSliceFrom4dVec", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getSliceFrom4dVec("", "getSliceFrom4dVec", _W_9);
+extern "C" {
+void PbRegister_getSliceFrom4dVec()
+{
+ KEEP_UNUSED(_RP_getSliceFrom4dVec);
+}
+}
+
+//******************************************************************************
+// interpolation
+
+//! same as in grid.h , but takes an additional optional "desired" size
+static inline void gridFactor4d(
+ Vec4 s1, Vec4 s2, Vec4 optSize, Vec4 scale, Vec4 &srcFac, Vec4 &retOff)
+{
+ for (int c = 0; c < 4; c++) {
+ if (optSize[c] > 0.) {
+ s2[c] = optSize[c];
+ }
+ }
+ srcFac = calcGridSizeFactor4d(s1, s2) / scale;
+ retOff = -retOff * srcFac + srcFac * 0.5;
+}
+
+//! interpolate 4d grid from one size to another size
+// real valued offsets & scale
+
+template<class S> struct knInterpol4d : public KernelBase {
+ knInterpol4d(Grid4d<S> &target, Grid4d<S> &source, const Vec4 &srcFac, const Vec4 &offset)
+ : KernelBase(&target, 0), target(target), source(source), srcFac(srcFac), offset(offset)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ int t,
+ Grid4d<S> &target,
+ Grid4d<S> &source,
+ const Vec4 &srcFac,
+ const Vec4 &offset) const
+ {
+ Vec4 pos = Vec4(i, j, k, t) * srcFac + offset;
+ target(i, j, k, t) = source.getInterpolated(pos);
+ }
+ inline Grid4d<S> &getArg0()
+ {
+ return target;
+ }
+ typedef Grid4d<S> type0;
+ inline Grid4d<S> &getArg1()
+ {
+ return source;
+ }
+ typedef Grid4d<S> type1;
+ inline const Vec4 &getArg2()
+ {
+ return srcFac;
+ }
+ typedef Vec4 type2;
+ inline const Vec4 &getArg3()
+ {
+ return offset;
+ }
+ typedef Vec4 type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knInterpol4d ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ
+ << " "
+ " t "
+ << minT << " - " << maxT,
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ if (maxT > 1) {
+ for (int t = __r.begin(); t != (int)__r.end(); t++)
+ for (int k = 0; k < maxZ; k++)
+ for (int j = 0; j < maxY; j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, target, source, srcFac, offset);
+ }
+ else if (maxZ > 1) {
+ const int t = 0;
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < maxY; j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, target, source, srcFac, offset);
+ }
+ else {
+ const int t = 0;
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, target, source, srcFac, offset);
+ }
+ }
+ void run()
+ {
+ if (maxT > 1) {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minT, maxT), *this);
+ }
+ else if (maxZ > 1) {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ }
+ else {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ }
+ Grid4d<S> &target;
+ Grid4d<S> &source;
+ const Vec4 &srcFac;
+ const Vec4 &offset;
+};
+//! linearly interpolate data of a 4d grid
+
+void interpolateGrid4d(Grid4d<Real> &target,
+ Grid4d<Real> &source,
+ Vec4 offset = Vec4(0.),
+ Vec4 scale = Vec4(1.),
+ Vec4 size = Vec4(-1.))
+{
+ Vec4 srcFac(1.), off2 = offset;
+ gridFactor4d(toVec4(source.getSize()), toVec4(target.getSize()), size, scale, srcFac, off2);
+ knInterpol4d<Real>(target, source, srcFac, off2);
+}
+static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "interpolateGrid4d", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid4d<Real> &target = *_args.getPtr<Grid4d<Real>>("target", 0, &_lock);
+ Grid4d<Real> &source = *_args.getPtr<Grid4d<Real>>("source", 1, &_lock);
+ Vec4 offset = _args.getOpt<Vec4>("offset", 2, Vec4(0.), &_lock);
+ Vec4 scale = _args.getOpt<Vec4>("scale", 3, Vec4(1.), &_lock);
+ Vec4 size = _args.getOpt<Vec4>("size", 4, Vec4(-1.), &_lock);
+ _retval = getPyNone();
+ interpolateGrid4d(target, source, offset, scale, size);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "interpolateGrid4d", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("interpolateGrid4d", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_interpolateGrid4d("", "interpolateGrid4d", _W_10);
+extern "C" {
+void PbRegister_interpolateGrid4d()
+{
+ KEEP_UNUSED(_RP_interpolateGrid4d);
+}
+}
+
+//! linearly interpolate vec4 data of a 4d grid
+
+void interpolateGrid4dVec(Grid4d<Vec4> &target,
+ Grid4d<Vec4> &source,
+ Vec4 offset = Vec4(0.),
+ Vec4 scale = Vec4(1.),
+ Vec4 size = Vec4(-1.))
+{
+ Vec4 srcFac(1.), off2 = offset;
+ gridFactor4d(toVec4(source.getSize()), toVec4(target.getSize()), size, scale, srcFac, off2);
+ knInterpol4d<Vec4>(target, source, srcFac, off2);
+}
+static PyObject *_W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "interpolateGrid4dVec", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid4d<Vec4> &target = *_args.getPtr<Grid4d<Vec4>>("target", 0, &_lock);
+ Grid4d<Vec4> &source = *_args.getPtr<Grid4d<Vec4>>("source", 1, &_lock);
+ Vec4 offset = _args.getOpt<Vec4>("offset", 2, Vec4(0.), &_lock);
+ Vec4 scale = _args.getOpt<Vec4>("scale", 3, Vec4(1.), &_lock);
+ Vec4 size = _args.getOpt<Vec4>("size", 4, Vec4(-1.), &_lock);
+ _retval = getPyNone();
+ interpolateGrid4dVec(target, source, offset, scale, size);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "interpolateGrid4dVec", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("interpolateGrid4dVec", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_interpolateGrid4dVec("", "interpolateGrid4dVec", _W_11);
+extern "C" {
+void PbRegister_interpolateGrid4dVec()
+{
+ KEEP_UNUSED(_RP_interpolateGrid4dVec);
+}
+}
+
+// explicit instantiation
+template class Grid4d<int>;
+template class Grid4d<Real>;
+template class Grid4d<Vec3>;
+template class Grid4d<Vec4>;
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/grid4d.h b/extern/mantaflow/preprocessed/grid4d.h
new file mode 100644
index 00000000000..c3a98788da3
--- /dev/null
+++ b/extern/mantaflow/preprocessed/grid4d.h
@@ -0,0 +1,1558 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Grid representation
+ *
+ ******************************************************************************/
+
+#ifndef _GRID4D_H
+#define _GRID4D_H
+
+#include "manta.h"
+#include "vectorbase.h"
+#include "vector4d.h"
+#include "kernel.h"
+
+namespace Manta {
+
+//! Base class for all grids
+class Grid4dBase : public PbClass {
+ public:
+ enum Grid4dType { TypeNone = 0, TypeReal = 1, TypeInt = 2, TypeVec3 = 4, TypeVec4 = 8 };
+
+ Grid4dBase(FluidSolver *parent);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "Grid4dBase::Grid4dBase", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new Grid4dBase(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "Grid4dBase::Grid4dBase", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4dBase::Grid4dBase", e.what());
+ return -1;
+ }
+ }
+
+ //! Get the grids X dimension
+ inline int getSizeX() const
+ {
+ return mSize.x;
+ }
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4dBase *pbo = dynamic_cast<Grid4dBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4dBase::getSizeX", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getSizeX());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4dBase::getSizeX", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4dBase::getSizeX", e.what());
+ return 0;
+ }
+ }
+
+ //! Get the grids Y dimension
+ inline int getSizeY() const
+ {
+ return mSize.y;
+ }
+ static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4dBase *pbo = dynamic_cast<Grid4dBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4dBase::getSizeY", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getSizeY());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4dBase::getSizeY", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4dBase::getSizeY", e.what());
+ return 0;
+ }
+ }
+
+ //! Get the grids Z dimension
+ inline int getSizeZ() const
+ {
+ return mSize.z;
+ }
+ static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4dBase *pbo = dynamic_cast<Grid4dBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4dBase::getSizeZ", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getSizeZ());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4dBase::getSizeZ", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4dBase::getSizeZ", e.what());
+ return 0;
+ }
+ }
+
+ //! Get the grids T dimension
+ inline int getSizeT() const
+ {
+ return mSize.t;
+ }
+ static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4dBase *pbo = dynamic_cast<Grid4dBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4dBase::getSizeT", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getSizeT());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4dBase::getSizeT", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4dBase::getSizeT", e.what());
+ return 0;
+ }
+ }
+
+ //! Get the grids dimensions
+ inline Vec4i getSize() const
+ {
+ return mSize;
+ }
+ static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4dBase *pbo = dynamic_cast<Grid4dBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4dBase::getSize", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getSize());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4dBase::getSize", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4dBase::getSize", e.what());
+ return 0;
+ }
+ }
+
+ //! Get Stride in X dimension
+ inline IndexInt getStrideX() const
+ {
+ return 1;
+ }
+ //! Get Stride in Y dimension
+ inline IndexInt getStrideY() const
+ {
+ return mSize.x;
+ }
+ //! Get Stride in Z dimension
+ inline IndexInt getStrideZ() const
+ {
+ return mStrideZ;
+ }
+ //! Get Stride in T dimension
+ inline IndexInt getStrideT() const
+ {
+ return mStrideT;
+ }
+
+ inline Real getDx()
+ {
+ return mDx;
+ }
+
+ //! Check if indices are within bounds, otherwise error (should only be called when debugging)
+ inline void checkIndex(int i, int j, int k, int t) const;
+ //! Check if indices are within bounds, otherwise error (should only be called when debugging)
+ inline void checkIndex(IndexInt idx) const;
+ //! Check if index is within given boundaries
+ inline bool isInBounds(const Vec4i &p, int bnd) const;
+ //! Check if index is within given boundaries
+ inline bool isInBounds(const Vec4i &p) const;
+ //! Check if index is within given boundaries
+ inline bool isInBounds(const Vec4 &p, int bnd = 0) const
+ {
+ return isInBounds(toVec4i(p), bnd);
+ }
+ //! Check if linear index is in the range of the array
+ inline bool isInBounds(IndexInt idx) const;
+
+ //! Get the type of grid
+ inline Grid4dType getType() const
+ {
+ return mType;
+ }
+ //! Check dimensionality
+ inline bool is3D() const
+ {
+ return true;
+ }
+ static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4dBase *pbo = dynamic_cast<Grid4dBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4dBase::is3D", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->is3D());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4dBase::is3D", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4dBase::is3D", e.what());
+ return 0;
+ }
+ }
+
+ inline bool is4D() const
+ {
+ return true;
+ }
+ static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4dBase *pbo = dynamic_cast<Grid4dBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4dBase::is4D", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->is4D());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4dBase::is4D", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4dBase::is4D", e.what());
+ return 0;
+ }
+ }
+
+ //! 3d compatibility
+ inline bool isInBounds(int i, int j, int k, int t, int bnd) const
+ {
+ return isInBounds(Vec4i(i, j, k, t), bnd);
+ }
+
+ //! Get index into the data
+ inline IndexInt index(int i, int j, int k, int t) const
+ {
+ DEBUG_ONLY(checkIndex(i, j, k, t));
+ return (IndexInt)i + (IndexInt)mSize.x * j + (IndexInt)mStrideZ * k + (IndexInt)mStrideT * t;
+ }
+ //! Get index into the data
+ inline IndexInt index(const Vec4i &pos) const
+ {
+ DEBUG_ONLY(checkIndex(pos.x, pos.y, pos.z, pos.t));
+ return (IndexInt)pos.x + (IndexInt)mSize.x * pos.y + (IndexInt)mStrideZ * pos.z +
+ (IndexInt)mStrideT * pos.t;
+ }
+
+ protected:
+ Grid4dType mType;
+ Vec4i mSize;
+ Real mDx;
+ // precomputed Z,T shift: to ensure 2D compatibility, always use this instead of sx*sy !
+ IndexInt mStrideZ;
+ IndexInt mStrideT;
+ public:
+ PbArgs _args;
+}
+#define _C_Grid4dBase
+;
+
+//! Grid class
+
+template<class T> class Grid4d : public Grid4dBase {
+ public:
+ //! init new grid, values are set to zero
+ Grid4d(FluidSolver *parent, bool show = true);
+ static int _W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "Grid4d::Grid4d", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ bool show = _args.getOpt<bool>("show", 1, true, &_lock);
+ obj = new Grid4d(parent, show);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "Grid4d::Grid4d", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::Grid4d", e.what());
+ return -1;
+ }
+ }
+
+ //! create new & copy content from another grid
+ Grid4d(const Grid4d<T> &a);
+ //! return memory to solver
+ virtual ~Grid4d();
+
+ typedef T BASETYPE;
+ typedef Grid4dBase BASETYPE_GRID;
+
+ void save(std::string name);
+ static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::save", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ std::string name = _args.get<std::string>("name", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->save(name);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::save", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::save", e.what());
+ return 0;
+ }
+ }
+
+ void load(std::string name);
+ static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::load", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ std::string name = _args.get<std::string>("name", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->load(name);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::load", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::load", e.what());
+ return 0;
+ }
+ }
+
+ //! set all cells to zero
+ void clear();
+ static PyObject *_W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::clear", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clear();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::clear", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::clear", e.what());
+ return 0;
+ }
+ }
+
+ //! all kinds of access functions, use grid(), grid[] or grid.get()
+ //! access data
+ inline T get(int i, int j, int k, int t) const
+ {
+ return mData[index(i, j, k, t)];
+ }
+ //! access data
+ inline T &get(int i, int j, int k, int t)
+ {
+ return mData[index(i, j, k, t)];
+ }
+ //! access data
+ inline T get(IndexInt idx) const
+ {
+ DEBUG_ONLY(checkIndex(idx));
+ return mData[idx];
+ }
+ //! access data
+ inline T get(const Vec4i &pos) const
+ {
+ return mData[index(pos)];
+ }
+ //! access data
+ inline T &operator()(int i, int j, int k, int t)
+ {
+ return mData[index(i, j, k, t)];
+ }
+ //! access data
+ inline T operator()(int i, int j, int k, int t) const
+ {
+ return mData[index(i, j, k, t)];
+ }
+ //! access data
+ inline T &operator()(IndexInt idx)
+ {
+ DEBUG_ONLY(checkIndex(idx));
+ return mData[idx];
+ }
+ //! access data
+ inline T operator()(IndexInt idx) const
+ {
+ DEBUG_ONLY(checkIndex(idx));
+ return mData[idx];
+ }
+ //! access data
+ inline T &operator()(const Vec4i &pos)
+ {
+ return mData[index(pos)];
+ }
+ //! access data
+ inline T operator()(const Vec4i &pos) const
+ {
+ return mData[index(pos)];
+ }
+ //! access data
+ inline T &operator[](IndexInt idx)
+ {
+ DEBUG_ONLY(checkIndex(idx));
+ return mData[idx];
+ }
+ //! access data
+ inline const T operator[](IndexInt idx) const
+ {
+ DEBUG_ONLY(checkIndex(idx));
+ return mData[idx];
+ }
+
+ // interpolated access
+ inline T getInterpolated(const Vec4 &pos) const
+ {
+ return interpol4d<T>(mData, mSize, mStrideZ, mStrideT, pos);
+ }
+
+ // assignment / copy
+
+ //! warning - do not use "=" for grids in python, this copies the reference! not the grid
+ //! content...
+ // Grid4d<T>& operator=(const Grid4d<T>& a);
+ //! copy content from other grid (use this one instead of operator= !)
+ Grid4d<T> &copyFrom(const Grid4d<T> &a, bool copyType = true);
+ static PyObject *_W_12(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::copyFrom", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid4d<T> &a = *_args.getPtr<Grid4d<T>>("a", 0, &_lock);
+ bool copyType = _args.getOpt<bool>("copyType", 1, true, &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->copyFrom(a, copyType));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::copyFrom", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::copyFrom", e.what());
+ return 0;
+ }
+ }
+ // old: { *this = a; }
+
+ // helper functions to work with grids in scene files
+
+ //! add/subtract other grid
+ void add(const Grid4d<T> &a);
+ static PyObject *_W_13(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::add", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid4d<T> &a = *_args.getPtr<Grid4d<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->add(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::add", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::add", e.what());
+ return 0;
+ }
+ }
+
+ void sub(const Grid4d<T> &a);
+ static PyObject *_W_14(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::sub", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid4d<T> &a = *_args.getPtr<Grid4d<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->sub(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::sub", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::sub", e.what());
+ return 0;
+ }
+ }
+
+ //! set all cells to constant value
+ void setConst(T s);
+ static PyObject *_W_15(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::setConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T s = _args.get<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::setConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::setConst", e.what());
+ return 0;
+ }
+ }
+
+ //! add constant to all grid cells
+ void addConst(T s);
+ static PyObject *_W_16(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::addConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T s = _args.get<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->addConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::addConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::addConst", e.what());
+ return 0;
+ }
+ }
+
+ //! add scaled other grid to current one (note, only "Real" factor, "T" type not supported here!)
+ void addScaled(const Grid4d<T> &a, const T &factor);
+ static PyObject *_W_17(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::addScaled", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid4d<T> &a = *_args.getPtr<Grid4d<T>>("a", 0, &_lock);
+ const T &factor = *_args.getPtr<T>("factor", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->addScaled(a, factor);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::addScaled", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::addScaled", e.what());
+ return 0;
+ }
+ }
+
+ //! multiply contents of grid
+ void mult(const Grid4d<T> &a);
+ static PyObject *_W_18(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::mult", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid4d<T> &a = *_args.getPtr<Grid4d<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->mult(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::mult", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::mult", e.what());
+ return 0;
+ }
+ }
+
+ //! multiply each cell by a constant scalar value
+ void multConst(T s);
+ static PyObject *_W_19(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::multConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T s = _args.get<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->multConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::multConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::multConst", e.what());
+ return 0;
+ }
+ }
+
+ //! clamp content to range (for vec3, clamps each component separately)
+ void clamp(Real min, Real max);
+ static PyObject *_W_20(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::clamp", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Real min = _args.get<Real>("min", 0, &_lock);
+ Real max = _args.get<Real>("max", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clamp(min, max);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::clamp", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::clamp", e.what());
+ return 0;
+ }
+ }
+
+ // common compound operators
+ //! get absolute max value in grid
+ Real getMaxAbs();
+ static PyObject *_W_21(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::getMaxAbs", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMaxAbs());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::getMaxAbs", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::getMaxAbs", e.what());
+ return 0;
+ }
+ }
+
+ //! get max value in grid
+ Real getMax();
+ static PyObject *_W_22(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::getMax", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMax());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::getMax", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::getMax", e.what());
+ return 0;
+ }
+ }
+
+ //! get min value in grid
+ Real getMin();
+ static PyObject *_W_23(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::getMin", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMin());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::getMin", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::getMin", e.what());
+ return 0;
+ }
+ }
+
+ //! set all boundary cells to constant value (Dirichlet)
+ void setBound(T value, int boundaryWidth = 1);
+ static PyObject *_W_24(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::setBound", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T value = _args.get<T>("value", 0, &_lock);
+ int boundaryWidth = _args.getOpt<int>("boundaryWidth", 1, 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setBound(value, boundaryWidth);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::setBound", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::setBound", e.what());
+ return 0;
+ }
+ }
+
+ //! set all boundary cells to last inner value (Neumann)
+ void setBoundNeumann(int boundaryWidth = 1);
+ static PyObject *_W_25(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::setBoundNeumann", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int boundaryWidth = _args.getOpt<int>("boundaryWidth", 0, 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setBoundNeumann(boundaryWidth);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::setBoundNeumann", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::setBoundNeumann", e.what());
+ return 0;
+ }
+ }
+
+ //! debugging helper, print grid from Python
+ void printGrid(int zSlice = -1, int tSlice = -1, bool printIndex = false, int bnd = 0);
+ static PyObject *_W_26(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Grid4d *pbo = dynamic_cast<Grid4d *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Grid4d::printGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int zSlice = _args.getOpt<int>("zSlice", 0, -1, &_lock);
+ int tSlice = _args.getOpt<int>("tSlice", 1, -1, &_lock);
+ bool printIndex = _args.getOpt<bool>("printIndex", 2, false, &_lock);
+ int bnd = _args.getOpt<int>("bnd", 3, 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->printGrid(zSlice, tSlice, printIndex, bnd);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Grid4d::printGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Grid4d::printGrid", e.what());
+ return 0;
+ }
+ }
+
+ // c++ only operators
+ template<class S> Grid4d<T> &operator+=(const Grid4d<S> &a);
+ template<class S> Grid4d<T> &operator+=(const S &a);
+ template<class S> Grid4d<T> &operator-=(const Grid4d<S> &a);
+ template<class S> Grid4d<T> &operator-=(const S &a);
+ template<class S> Grid4d<T> &operator*=(const Grid4d<S> &a);
+ template<class S> Grid4d<T> &operator*=(const S &a);
+ template<class S> Grid4d<T> &operator/=(const Grid4d<S> &a);
+ template<class S> Grid4d<T> &operator/=(const S &a);
+ Grid4d<T> &safeDivide(const Grid4d<T> &a);
+
+ //! Swap data with another grid (no actual data is moved)
+ void swap(Grid4d<T> &other);
+
+ protected:
+ T *mData;
+ public:
+ PbArgs _args;
+}
+#define _C_Grid4d
+;
+
+// Python doesn't know about templates: explicit aliases needed
+
+//! helper to compute grid conversion factor between local coordinates of two grids
+inline Vec4 calcGridSizeFactor4d(Vec4i s1, Vec4i s2)
+{
+ return Vec4(Real(s1[0]) / s2[0], Real(s1[1]) / s2[1], Real(s1[2]) / s2[2], Real(s1[3]) / s2[3]);
+}
+inline Vec4 calcGridSizeFactor4d(Vec4 s1, Vec4 s2)
+{
+ return Vec4(s1[0] / s2[0], s1[1] / s2[1], s1[2] / s2[2], s1[3] / s2[3]);
+}
+
+// prototypes for grid plugins
+void getComponent4d(const Grid4d<Vec4> &src, Grid4d<Real> &dst, int c);
+void setComponent4d(const Grid4d<Real> &src, Grid4d<Vec4> &dst, int c);
+
+//******************************************************************************
+// Implementation of inline functions
+
+inline void Grid4dBase::checkIndex(int i, int j, int k, int t) const
+{
+ if (i < 0 || j < 0 || i >= mSize.x || j >= mSize.y || k < 0 || k >= mSize.z || t < 0 ||
+ t >= mSize.t) {
+ std::ostringstream s;
+ s << "Grid4d " << mName << " dim " << mSize << " : index " << i << "," << j << "," << k << ","
+ << t << " out of bound ";
+ errMsg(s.str());
+ }
+}
+
+inline void Grid4dBase::checkIndex(IndexInt idx) const
+{
+ if (idx < 0 || idx >= mSize.x * mSize.y * mSize.z * mSize.t) {
+ std::ostringstream s;
+ s << "Grid4d " << mName << " dim " << mSize << " : index " << idx << " out of bound ";
+ errMsg(s.str());
+ }
+}
+
+bool Grid4dBase::isInBounds(const Vec4i &p) const
+{
+ return (p.x >= 0 && p.y >= 0 && p.z >= 0 && p.t >= 0 && p.x < mSize.x && p.y < mSize.y &&
+ p.z < mSize.z && p.t < mSize.t);
+}
+
+bool Grid4dBase::isInBounds(const Vec4i &p, int bnd) const
+{
+ bool ret = (p.x >= bnd && p.y >= bnd && p.x < mSize.x - bnd && p.y < mSize.y - bnd);
+ ret &= (p.z >= bnd && p.z < mSize.z - bnd);
+ ret &= (p.t >= bnd && p.t < mSize.t - bnd);
+ return ret;
+}
+//! Check if linear index is in the range of the array
+bool Grid4dBase::isInBounds(IndexInt idx) const
+{
+ if (idx < 0 || idx >= mSize.x * mSize.y * mSize.z * mSize.t) {
+ return false;
+ }
+ return true;
+}
+
+// note - ugly, mostly copied from normal GRID!
+
+template<class T, class S> struct Grid4dAdd : public KernelBase {
+ Grid4dAdd(Grid4d<T> &me, const Grid4d<S> &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, const Grid4d<S> &other) const
+ {
+ me[idx] += other[idx];
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline const Grid4d<S> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid4d<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel Grid4dAdd ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ const Grid4d<S> &other;
+};
+template<class T, class S> struct Grid4dSub : public KernelBase {
+ Grid4dSub(Grid4d<T> &me, const Grid4d<S> &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, const Grid4d<S> &other) const
+ {
+ me[idx] -= other[idx];
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline const Grid4d<S> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid4d<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel Grid4dSub ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ const Grid4d<S> &other;
+};
+template<class T, class S> struct Grid4dMult : public KernelBase {
+ Grid4dMult(Grid4d<T> &me, const Grid4d<S> &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, const Grid4d<S> &other) const
+ {
+ me[idx] *= other[idx];
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline const Grid4d<S> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid4d<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel Grid4dMult ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ const Grid4d<S> &other;
+};
+template<class T, class S> struct Grid4dDiv : public KernelBase {
+ Grid4dDiv(Grid4d<T> &me, const Grid4d<S> &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, const Grid4d<S> &other) const
+ {
+ me[idx] /= other[idx];
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline const Grid4d<S> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid4d<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel Grid4dDiv ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ const Grid4d<S> &other;
+};
+template<class T, class S> struct Grid4dAddScalar : public KernelBase {
+ Grid4dAddScalar(Grid4d<T> &me, const S &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, const S &other) const
+ {
+ me[idx] += other;
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel Grid4dAddScalar ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ const S &other;
+};
+template<class T, class S> struct Grid4dMultScalar : public KernelBase {
+ Grid4dMultScalar(Grid4d<T> &me, const S &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, const S &other) const
+ {
+ me[idx] *= other;
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel Grid4dMultScalar ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ const S &other;
+};
+template<class T, class S> struct Grid4dScaledAdd : public KernelBase {
+ Grid4dScaledAdd(Grid4d<T> &me, const Grid4d<T> &other, const S &factor)
+ : KernelBase(&me, 0), me(me), other(other), factor(factor)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, const Grid4d<T> &other, const S &factor) const
+ {
+ me[idx] += factor * other[idx];
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline const Grid4d<T> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid4d<T> type1;
+ inline const S &getArg2()
+ {
+ return factor;
+ }
+ typedef S type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel Grid4dScaledAdd ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other, factor);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ const Grid4d<T> &other;
+ const S &factor;
+};
+
+template<class T> struct Grid4dSafeDiv : public KernelBase {
+ Grid4dSafeDiv(Grid4d<T> &me, const Grid4d<T> &other) : KernelBase(&me, 0), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, const Grid4d<T> &other) const
+ {
+ me[idx] = safeDivide(me[idx], other[idx]);
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline const Grid4d<T> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid4d<T> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel Grid4dSafeDiv ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ const Grid4d<T> &other;
+};
+template<class T> struct Grid4dSetConst : public KernelBase {
+ Grid4dSetConst(Grid4d<T> &me, T value) : KernelBase(&me, 0), me(me), value(value)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid4d<T> &me, T value) const
+ {
+ me[idx] = value;
+ }
+ inline Grid4d<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid4d<T> type0;
+ inline T &getArg1()
+ {
+ return value;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel Grid4dSetConst ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, value);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid4d<T> &me;
+ T value;
+};
+
+template<class T> template<class S> Grid4d<T> &Grid4d<T>::operator+=(const Grid4d<S> &a)
+{
+ Grid4dAdd<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid4d<T> &Grid4d<T>::operator+=(const S &a)
+{
+ Grid4dAddScalar<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid4d<T> &Grid4d<T>::operator-=(const Grid4d<S> &a)
+{
+ Grid4dSub<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid4d<T> &Grid4d<T>::operator-=(const S &a)
+{
+ Grid4dAddScalar<T, S>(*this, -a);
+ return *this;
+}
+template<class T> template<class S> Grid4d<T> &Grid4d<T>::operator*=(const Grid4d<S> &a)
+{
+ Grid4dMult<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid4d<T> &Grid4d<T>::operator*=(const S &a)
+{
+ Grid4dMultScalar<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid4d<T> &Grid4d<T>::operator/=(const Grid4d<S> &a)
+{
+ Grid4dDiv<T, S>(*this, a);
+ return *this;
+}
+template<class T> template<class S> Grid4d<T> &Grid4d<T>::operator/=(const S &a)
+{
+ S rez((S)1.0 / a);
+ Grid4dMultScalar<T, S>(*this, rez);
+ return *this;
+}
+
+//******************************************************************************
+// Other helper functions
+
+inline Vec4 getGradient4d(const Grid4d<Real> &data, int i, int j, int k, int t)
+{
+ Vec4 v;
+ if (i > data.getSizeX() - 2)
+ i = data.getSizeX() - 2;
+ if (j > data.getSizeY() - 2)
+ j = data.getSizeY() - 2;
+ if (k > data.getSizeZ() - 2)
+ k = data.getSizeZ() - 2;
+ if (t > data.getSizeT() - 2)
+ t = data.getSizeT() - 2;
+ if (i < 1)
+ i = 1;
+ if (j < 1)
+ j = 1;
+ if (k < 1)
+ k = 1;
+ if (t < 1)
+ t = 1;
+ v = Vec4(data(i + 1, j, k, t) - data(i - 1, j, k, t),
+ data(i, j + 1, k, t) - data(i, j - 1, k, t),
+ data(i, j, k + 1, t) - data(i, j, k - 1, t),
+ data(i, j, k, t + 1) - data(i, j, k, t - 1));
+ return v;
+}
+
+template<class S> struct KnInterpolateGrid4dTempl : public KernelBase {
+ KnInterpolateGrid4dTempl(Grid4d<S> &target,
+ Grid4d<S> &source,
+ const Vec4 &sourceFactor,
+ Vec4 offset)
+ : KernelBase(&target, 0),
+ target(target),
+ source(source),
+ sourceFactor(sourceFactor),
+ offset(offset)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ int t,
+ Grid4d<S> &target,
+ Grid4d<S> &source,
+ const Vec4 &sourceFactor,
+ Vec4 offset) const
+ {
+ Vec4 pos = Vec4(i, j, k, t) * sourceFactor + offset;
+ if (!source.is3D())
+ pos[2] = 0.; // allow 2d -> 3d
+ if (!source.is4D())
+ pos[3] = 0.; // allow 3d -> 4d
+ target(i, j, k, t) = source.getInterpolated(pos);
+ }
+ inline Grid4d<S> &getArg0()
+ {
+ return target;
+ }
+ typedef Grid4d<S> type0;
+ inline Grid4d<S> &getArg1()
+ {
+ return source;
+ }
+ typedef Grid4d<S> type1;
+ inline const Vec4 &getArg2()
+ {
+ return sourceFactor;
+ }
+ typedef Vec4 type2;
+ inline Vec4 &getArg3()
+ {
+ return offset;
+ }
+ typedef Vec4 type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnInterpolateGrid4dTempl ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ
+ << " "
+ " t "
+ << minT << " - " << maxT,
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ if (maxT > 1) {
+ for (int t = __r.begin(); t != (int)__r.end(); t++)
+ for (int k = 0; k < maxZ; k++)
+ for (int j = 0; j < maxY; j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, target, source, sourceFactor, offset);
+ }
+ else if (maxZ > 1) {
+ const int t = 0;
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < maxY; j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, target, source, sourceFactor, offset);
+ }
+ else {
+ const int t = 0;
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < maxX; i++)
+ op(i, j, k, t, target, source, sourceFactor, offset);
+ }
+ }
+ void run()
+ {
+ if (maxT > 1) {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minT, maxT), *this);
+ }
+ else if (maxZ > 1) {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ }
+ else {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ }
+ Grid4d<S> &target;
+ Grid4d<S> &source;
+ const Vec4 &sourceFactor;
+ Vec4 offset;
+};
+
+} // namespace Manta
+#endif
diff --git a/extern/mantaflow/preprocessed/grid4d.h.reg.cpp b/extern/mantaflow/preprocessed/grid4d.h.reg.cpp
new file mode 100644
index 00000000000..7e490221ee6
--- /dev/null
+++ b/extern/mantaflow/preprocessed/grid4d.h.reg.cpp
@@ -0,0 +1,204 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "grid4d.h"
+namespace Manta {
+#ifdef _C_Grid4d
+static const Pb::Register _R_12("Grid4d<int>", "Grid4d<int>", "Grid4dBase");
+template<> const char *Namify<Grid4d<int>>::S = "Grid4d<int>";
+static const Pb::Register _R_13("Grid4d<int>", "Grid4d", Grid4d<int>::_W_8);
+static const Pb::Register _R_14("Grid4d<int>", "save", Grid4d<int>::_W_9);
+static const Pb::Register _R_15("Grid4d<int>", "load", Grid4d<int>::_W_10);
+static const Pb::Register _R_16("Grid4d<int>", "clear", Grid4d<int>::_W_11);
+static const Pb::Register _R_17("Grid4d<int>", "copyFrom", Grid4d<int>::_W_12);
+static const Pb::Register _R_18("Grid4d<int>", "add", Grid4d<int>::_W_13);
+static const Pb::Register _R_19("Grid4d<int>", "sub", Grid4d<int>::_W_14);
+static const Pb::Register _R_20("Grid4d<int>", "setConst", Grid4d<int>::_W_15);
+static const Pb::Register _R_21("Grid4d<int>", "addConst", Grid4d<int>::_W_16);
+static const Pb::Register _R_22("Grid4d<int>", "addScaled", Grid4d<int>::_W_17);
+static const Pb::Register _R_23("Grid4d<int>", "mult", Grid4d<int>::_W_18);
+static const Pb::Register _R_24("Grid4d<int>", "multConst", Grid4d<int>::_W_19);
+static const Pb::Register _R_25("Grid4d<int>", "clamp", Grid4d<int>::_W_20);
+static const Pb::Register _R_26("Grid4d<int>", "getMaxAbs", Grid4d<int>::_W_21);
+static const Pb::Register _R_27("Grid4d<int>", "getMax", Grid4d<int>::_W_22);
+static const Pb::Register _R_28("Grid4d<int>", "getMin", Grid4d<int>::_W_23);
+static const Pb::Register _R_29("Grid4d<int>", "setBound", Grid4d<int>::_W_24);
+static const Pb::Register _R_30("Grid4d<int>", "setBoundNeumann", Grid4d<int>::_W_25);
+static const Pb::Register _R_31("Grid4d<int>", "printGrid", Grid4d<int>::_W_26);
+static const Pb::Register _R_32("Grid4d<Real>", "Grid4d<Real>", "Grid4dBase");
+template<> const char *Namify<Grid4d<Real>>::S = "Grid4d<Real>";
+static const Pb::Register _R_33("Grid4d<Real>", "Grid4d", Grid4d<Real>::_W_8);
+static const Pb::Register _R_34("Grid4d<Real>", "save", Grid4d<Real>::_W_9);
+static const Pb::Register _R_35("Grid4d<Real>", "load", Grid4d<Real>::_W_10);
+static const Pb::Register _R_36("Grid4d<Real>", "clear", Grid4d<Real>::_W_11);
+static const Pb::Register _R_37("Grid4d<Real>", "copyFrom", Grid4d<Real>::_W_12);
+static const Pb::Register _R_38("Grid4d<Real>", "add", Grid4d<Real>::_W_13);
+static const Pb::Register _R_39("Grid4d<Real>", "sub", Grid4d<Real>::_W_14);
+static const Pb::Register _R_40("Grid4d<Real>", "setConst", Grid4d<Real>::_W_15);
+static const Pb::Register _R_41("Grid4d<Real>", "addConst", Grid4d<Real>::_W_16);
+static const Pb::Register _R_42("Grid4d<Real>", "addScaled", Grid4d<Real>::_W_17);
+static const Pb::Register _R_43("Grid4d<Real>", "mult", Grid4d<Real>::_W_18);
+static const Pb::Register _R_44("Grid4d<Real>", "multConst", Grid4d<Real>::_W_19);
+static const Pb::Register _R_45("Grid4d<Real>", "clamp", Grid4d<Real>::_W_20);
+static const Pb::Register _R_46("Grid4d<Real>", "getMaxAbs", Grid4d<Real>::_W_21);
+static const Pb::Register _R_47("Grid4d<Real>", "getMax", Grid4d<Real>::_W_22);
+static const Pb::Register _R_48("Grid4d<Real>", "getMin", Grid4d<Real>::_W_23);
+static const Pb::Register _R_49("Grid4d<Real>", "setBound", Grid4d<Real>::_W_24);
+static const Pb::Register _R_50("Grid4d<Real>", "setBoundNeumann", Grid4d<Real>::_W_25);
+static const Pb::Register _R_51("Grid4d<Real>", "printGrid", Grid4d<Real>::_W_26);
+static const Pb::Register _R_52("Grid4d<Vec3>", "Grid4d<Vec3>", "Grid4dBase");
+template<> const char *Namify<Grid4d<Vec3>>::S = "Grid4d<Vec3>";
+static const Pb::Register _R_53("Grid4d<Vec3>", "Grid4d", Grid4d<Vec3>::_W_8);
+static const Pb::Register _R_54("Grid4d<Vec3>", "save", Grid4d<Vec3>::_W_9);
+static const Pb::Register _R_55("Grid4d<Vec3>", "load", Grid4d<Vec3>::_W_10);
+static const Pb::Register _R_56("Grid4d<Vec3>", "clear", Grid4d<Vec3>::_W_11);
+static const Pb::Register _R_57("Grid4d<Vec3>", "copyFrom", Grid4d<Vec3>::_W_12);
+static const Pb::Register _R_58("Grid4d<Vec3>", "add", Grid4d<Vec3>::_W_13);
+static const Pb::Register _R_59("Grid4d<Vec3>", "sub", Grid4d<Vec3>::_W_14);
+static const Pb::Register _R_60("Grid4d<Vec3>", "setConst", Grid4d<Vec3>::_W_15);
+static const Pb::Register _R_61("Grid4d<Vec3>", "addConst", Grid4d<Vec3>::_W_16);
+static const Pb::Register _R_62("Grid4d<Vec3>", "addScaled", Grid4d<Vec3>::_W_17);
+static const Pb::Register _R_63("Grid4d<Vec3>", "mult", Grid4d<Vec3>::_W_18);
+static const Pb::Register _R_64("Grid4d<Vec3>", "multConst", Grid4d<Vec3>::_W_19);
+static const Pb::Register _R_65("Grid4d<Vec3>", "clamp", Grid4d<Vec3>::_W_20);
+static const Pb::Register _R_66("Grid4d<Vec3>", "getMaxAbs", Grid4d<Vec3>::_W_21);
+static const Pb::Register _R_67("Grid4d<Vec3>", "getMax", Grid4d<Vec3>::_W_22);
+static const Pb::Register _R_68("Grid4d<Vec3>", "getMin", Grid4d<Vec3>::_W_23);
+static const Pb::Register _R_69("Grid4d<Vec3>", "setBound", Grid4d<Vec3>::_W_24);
+static const Pb::Register _R_70("Grid4d<Vec3>", "setBoundNeumann", Grid4d<Vec3>::_W_25);
+static const Pb::Register _R_71("Grid4d<Vec3>", "printGrid", Grid4d<Vec3>::_W_26);
+static const Pb::Register _R_72("Grid4d<Vec4>", "Grid4d<Vec4>", "Grid4dBase");
+template<> const char *Namify<Grid4d<Vec4>>::S = "Grid4d<Vec4>";
+static const Pb::Register _R_73("Grid4d<Vec4>", "Grid4d", Grid4d<Vec4>::_W_8);
+static const Pb::Register _R_74("Grid4d<Vec4>", "save", Grid4d<Vec4>::_W_9);
+static const Pb::Register _R_75("Grid4d<Vec4>", "load", Grid4d<Vec4>::_W_10);
+static const Pb::Register _R_76("Grid4d<Vec4>", "clear", Grid4d<Vec4>::_W_11);
+static const Pb::Register _R_77("Grid4d<Vec4>", "copyFrom", Grid4d<Vec4>::_W_12);
+static const Pb::Register _R_78("Grid4d<Vec4>", "add", Grid4d<Vec4>::_W_13);
+static const Pb::Register _R_79("Grid4d<Vec4>", "sub", Grid4d<Vec4>::_W_14);
+static const Pb::Register _R_80("Grid4d<Vec4>", "setConst", Grid4d<Vec4>::_W_15);
+static const Pb::Register _R_81("Grid4d<Vec4>", "addConst", Grid4d<Vec4>::_W_16);
+static const Pb::Register _R_82("Grid4d<Vec4>", "addScaled", Grid4d<Vec4>::_W_17);
+static const Pb::Register _R_83("Grid4d<Vec4>", "mult", Grid4d<Vec4>::_W_18);
+static const Pb::Register _R_84("Grid4d<Vec4>", "multConst", Grid4d<Vec4>::_W_19);
+static const Pb::Register _R_85("Grid4d<Vec4>", "clamp", Grid4d<Vec4>::_W_20);
+static const Pb::Register _R_86("Grid4d<Vec4>", "getMaxAbs", Grid4d<Vec4>::_W_21);
+static const Pb::Register _R_87("Grid4d<Vec4>", "getMax", Grid4d<Vec4>::_W_22);
+static const Pb::Register _R_88("Grid4d<Vec4>", "getMin", Grid4d<Vec4>::_W_23);
+static const Pb::Register _R_89("Grid4d<Vec4>", "setBound", Grid4d<Vec4>::_W_24);
+static const Pb::Register _R_90("Grid4d<Vec4>", "setBoundNeumann", Grid4d<Vec4>::_W_25);
+static const Pb::Register _R_91("Grid4d<Vec4>", "printGrid", Grid4d<Vec4>::_W_26);
+#endif
+#ifdef _C_Grid4dBase
+static const Pb::Register _R_92("Grid4dBase", "Grid4dBase", "PbClass");
+template<> const char *Namify<Grid4dBase>::S = "Grid4dBase";
+static const Pb::Register _R_93("Grid4dBase", "Grid4dBase", Grid4dBase::_W_0);
+static const Pb::Register _R_94("Grid4dBase", "getSizeX", Grid4dBase::_W_1);
+static const Pb::Register _R_95("Grid4dBase", "getSizeY", Grid4dBase::_W_2);
+static const Pb::Register _R_96("Grid4dBase", "getSizeZ", Grid4dBase::_W_3);
+static const Pb::Register _R_97("Grid4dBase", "getSizeT", Grid4dBase::_W_4);
+static const Pb::Register _R_98("Grid4dBase", "getSize", Grid4dBase::_W_5);
+static const Pb::Register _R_99("Grid4dBase", "is3D", Grid4dBase::_W_6);
+static const Pb::Register _R_100("Grid4dBase", "is4D", Grid4dBase::_W_7);
+#endif
+static const Pb::Register _R_8("Grid4d<int>", "Grid4Int", "");
+static const Pb::Register _R_9("Grid4d<Real>", "Grid4Real", "");
+static const Pb::Register _R_10("Grid4d<Vec3>", "Grid4Vec3", "");
+static const Pb::Register _R_11("Grid4d<Vec4>", "Grid4Vec4", "");
+extern "C" {
+void PbRegister_file_8()
+{
+ KEEP_UNUSED(_R_12);
+ KEEP_UNUSED(_R_13);
+ KEEP_UNUSED(_R_14);
+ KEEP_UNUSED(_R_15);
+ KEEP_UNUSED(_R_16);
+ KEEP_UNUSED(_R_17);
+ KEEP_UNUSED(_R_18);
+ KEEP_UNUSED(_R_19);
+ KEEP_UNUSED(_R_20);
+ KEEP_UNUSED(_R_21);
+ KEEP_UNUSED(_R_22);
+ KEEP_UNUSED(_R_23);
+ KEEP_UNUSED(_R_24);
+ KEEP_UNUSED(_R_25);
+ KEEP_UNUSED(_R_26);
+ KEEP_UNUSED(_R_27);
+ KEEP_UNUSED(_R_28);
+ KEEP_UNUSED(_R_29);
+ KEEP_UNUSED(_R_30);
+ KEEP_UNUSED(_R_31);
+ KEEP_UNUSED(_R_32);
+ KEEP_UNUSED(_R_33);
+ KEEP_UNUSED(_R_34);
+ KEEP_UNUSED(_R_35);
+ KEEP_UNUSED(_R_36);
+ KEEP_UNUSED(_R_37);
+ KEEP_UNUSED(_R_38);
+ KEEP_UNUSED(_R_39);
+ KEEP_UNUSED(_R_40);
+ KEEP_UNUSED(_R_41);
+ KEEP_UNUSED(_R_42);
+ KEEP_UNUSED(_R_43);
+ KEEP_UNUSED(_R_44);
+ KEEP_UNUSED(_R_45);
+ KEEP_UNUSED(_R_46);
+ KEEP_UNUSED(_R_47);
+ KEEP_UNUSED(_R_48);
+ KEEP_UNUSED(_R_49);
+ KEEP_UNUSED(_R_50);
+ KEEP_UNUSED(_R_51);
+ KEEP_UNUSED(_R_52);
+ KEEP_UNUSED(_R_53);
+ KEEP_UNUSED(_R_54);
+ KEEP_UNUSED(_R_55);
+ KEEP_UNUSED(_R_56);
+ KEEP_UNUSED(_R_57);
+ KEEP_UNUSED(_R_58);
+ KEEP_UNUSED(_R_59);
+ KEEP_UNUSED(_R_60);
+ KEEP_UNUSED(_R_61);
+ KEEP_UNUSED(_R_62);
+ KEEP_UNUSED(_R_63);
+ KEEP_UNUSED(_R_64);
+ KEEP_UNUSED(_R_65);
+ KEEP_UNUSED(_R_66);
+ KEEP_UNUSED(_R_67);
+ KEEP_UNUSED(_R_68);
+ KEEP_UNUSED(_R_69);
+ KEEP_UNUSED(_R_70);
+ KEEP_UNUSED(_R_71);
+ KEEP_UNUSED(_R_72);
+ KEEP_UNUSED(_R_73);
+ KEEP_UNUSED(_R_74);
+ KEEP_UNUSED(_R_75);
+ KEEP_UNUSED(_R_76);
+ KEEP_UNUSED(_R_77);
+ KEEP_UNUSED(_R_78);
+ KEEP_UNUSED(_R_79);
+ KEEP_UNUSED(_R_80);
+ KEEP_UNUSED(_R_81);
+ KEEP_UNUSED(_R_82);
+ KEEP_UNUSED(_R_83);
+ KEEP_UNUSED(_R_84);
+ KEEP_UNUSED(_R_85);
+ KEEP_UNUSED(_R_86);
+ KEEP_UNUSED(_R_87);
+ KEEP_UNUSED(_R_88);
+ KEEP_UNUSED(_R_89);
+ KEEP_UNUSED(_R_90);
+ KEEP_UNUSED(_R_91);
+ KEEP_UNUSED(_R_92);
+ KEEP_UNUSED(_R_93);
+ KEEP_UNUSED(_R_94);
+ KEEP_UNUSED(_R_95);
+ KEEP_UNUSED(_R_96);
+ KEEP_UNUSED(_R_97);
+ KEEP_UNUSED(_R_98);
+ KEEP_UNUSED(_R_99);
+ KEEP_UNUSED(_R_100);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/kernel.cpp b/extern/mantaflow/preprocessed/kernel.cpp
new file mode 100644
index 00000000000..72a5efff795
--- /dev/null
+++ b/extern/mantaflow/preprocessed/kernel.cpp
@@ -0,0 +1,61 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2014 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Function and macros for defining compution kernels over grids
+ *
+ ******************************************************************************/
+
+#include "kernel.h"
+#include "grid.h"
+#include "grid4d.h"
+#include "particle.h"
+
+namespace Manta {
+
+KernelBase::KernelBase(const GridBase *base, int bnd)
+ : maxX(base->getSizeX() - bnd),
+ maxY(base->getSizeY() - bnd),
+ maxZ(base->is3D() ? (base->getSizeZ() - bnd) : 1),
+ minZ(base->is3D() ? bnd : 0),
+ maxT(1),
+ minT(0),
+ X(base->getStrideX()),
+ Y(base->getStrideY()),
+ Z(base->getStrideZ()),
+ dimT(0),
+ size(base->getSizeX() * base->getSizeY() * (IndexInt)base->getSizeZ())
+{
+}
+
+KernelBase::KernelBase(IndexInt num)
+ : maxX(0), maxY(0), maxZ(0), minZ(0), maxT(0), X(0), Y(0), Z(0), dimT(0), size(num)
+{
+}
+
+KernelBase::KernelBase(const Grid4dBase *base, int bnd)
+ : maxX(base->getSizeX() - bnd),
+ maxY(base->getSizeY() - bnd),
+ maxZ(base->getSizeZ() - bnd),
+ minZ(bnd),
+ maxT(base->getSizeT() - bnd),
+ minT(bnd),
+ X(base->getStrideX()),
+ Y(base->getStrideY()),
+ Z(base->getStrideZ()),
+ dimT(base->getStrideT()),
+ size(base->getSizeX() * base->getSizeY() * base->getSizeZ() * (IndexInt)base->getSizeT())
+{
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/kernel.h b/extern/mantaflow/preprocessed/kernel.h
new file mode 100644
index 00000000000..90e30cd21e1
--- /dev/null
+++ b/extern/mantaflow/preprocessed/kernel.h
@@ -0,0 +1,99 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2014 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Function and macros for defining compution kernels over grids
+ *
+ ******************************************************************************/
+
+#ifndef _KERNEL_H
+#define _KERNEL_H
+
+#if TBB == 1
+# include <tbb/blocked_range3d.h>
+# include <tbb/blocked_range.h>
+# include <tbb/parallel_for.h>
+# include <tbb/parallel_reduce.h>
+#endif
+
+#if OPENMP == 1
+# include <omp.h>
+#endif
+
+#include "general.h"
+
+namespace Manta {
+
+// fwd decl
+class GridBase;
+class Grid4dBase;
+class ParticleBase;
+
+// simple iteration
+#define FOR_IJK_BND(grid, bnd) \
+ for (int k = ((grid).is3D() ? bnd : 0), \
+ __kmax = ((grid).is3D() ? ((grid).getSizeZ() - bnd) : 1); \
+ k < __kmax; \
+ k++) \
+ for (int j = bnd; j < (grid).getSizeY() - bnd; j++) \
+ for (int i = bnd; i < (grid).getSizeX() - bnd; i++)
+
+#define FOR_IJK_REVERSE(grid) \
+ for (int k = (grid).getSizeZ() - 1; k >= 0; k--) \
+ for (int j = (grid).getSizeY() - 1; j >= 0; j--) \
+ for (int i = (grid).getSizeX() - 1; i >= 0; i--)
+
+#define FOR_IDX(grid) \
+ for (IndexInt idx = 0, total = (grid).getSizeX() * (grid).getSizeY() * (grid).getSizeZ(); \
+ idx < total; \
+ idx++)
+
+#define FOR_IJK(grid) FOR_IJK_BND(grid, 0)
+
+#define FOR_PARTS(parts) for (IndexInt idx = 0, total = (parts).size(); idx < total; idx++)
+
+// simple loop over 4d grids
+#define FOR_IJKT_BND(grid, bnd) \
+ for (int t = ((grid).is4D() ? bnd : 0); t < ((grid).is4D() ? ((grid).getSizeT() - bnd) : 1); \
+ ++t) \
+ for (int k = ((grid).is3D() ? bnd : 0); k < ((grid).is3D() ? ((grid).getSizeZ() - bnd) : 1); \
+ ++k) \
+ for (int j = bnd; j < (grid).getSizeY() - bnd; ++j) \
+ for (int i = bnd; i < (grid).getSizeX() - bnd; ++i)
+
+//! Basic data structure for kernel data, initialized based on kernel type (e.g. single, idx, etc).
+struct KernelBase {
+ int maxX, maxY, maxZ, minZ, maxT, minT;
+ int X, Y, Z, dimT;
+ IndexInt size;
+
+ KernelBase(IndexInt num);
+ KernelBase(const GridBase *base, int bnd);
+ KernelBase(const Grid4dBase *base, int bnd);
+
+ // specify in your derived classes:
+
+ // kernel operators
+ // ijk mode: void operator() (int i, int j, int k)
+ // idx mode: void operator() (IndexInt idx)
+
+ // reduce mode:
+ // void join(classname& other)
+ // void setup()
+};
+
+} // namespace Manta
+
+// all kernels will automatically be added to the "Kernels" group in doxygen
+
+#endif
diff --git a/extern/mantaflow/preprocessed/kernel.h.reg.cpp b/extern/mantaflow/preprocessed/kernel.h.reg.cpp
new file mode 100644
index 00000000000..002396024ea
--- /dev/null
+++ b/extern/mantaflow/preprocessed/kernel.h.reg.cpp
@@ -0,0 +1,13 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "kernel.h"
+namespace Manta {
+extern "C" {
+void PbRegister_file_15()
+{
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/levelset.cpp b/extern/mantaflow/preprocessed/levelset.cpp
new file mode 100644
index 00000000000..dcc10718d71
--- /dev/null
+++ b/extern/mantaflow/preprocessed/levelset.cpp
@@ -0,0 +1,876 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Levelset
+ *
+ ******************************************************************************/
+
+#include "levelset.h"
+#include "fastmarch.h"
+#include "kernel.h"
+#include "mcubes.h"
+#include "mesh.h"
+#include <stack>
+
+using namespace std;
+namespace Manta {
+
+//************************************************************************
+// Helper functions and kernels for marching
+
+static const int FlagInited = FastMarch<FmHeapEntryOut, +1>::FlagInited;
+
+// neighbor lookup vectors
+static const Vec3i neighbors[6] = {Vec3i(-1, 0, 0),
+ Vec3i(1, 0, 0),
+ Vec3i(0, -1, 0),
+ Vec3i(0, 1, 0),
+ Vec3i(0, 0, -1),
+ Vec3i(0, 0, 1)};
+
+struct InitFmIn : public KernelBase {
+ InitFmIn(const FlagGrid &flags,
+ Grid<int> &fmFlags,
+ Grid<Real> &phi,
+ bool ignoreWalls,
+ int obstacleType)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ fmFlags(fmFlags),
+ phi(phi),
+ ignoreWalls(ignoreWalls),
+ obstacleType(obstacleType)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<int> &fmFlags,
+ Grid<Real> &phi,
+ bool ignoreWalls,
+ int obstacleType) const
+ {
+ const IndexInt idx = flags.index(i, j, k);
+ const Real v = phi[idx];
+ if (ignoreWalls) {
+ if (v >= 0. && ((flags[idx] & obstacleType) == 0))
+ fmFlags[idx] = FlagInited;
+ else
+ fmFlags[idx] = 0;
+ }
+ else {
+ if (v >= 0)
+ fmFlags[idx] = FlagInited;
+ else
+ fmFlags[idx] = 0;
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<int> &getArg1()
+ {
+ return fmFlags;
+ }
+ typedef Grid<int> type1;
+ inline Grid<Real> &getArg2()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type2;
+ inline bool &getArg3()
+ {
+ return ignoreWalls;
+ }
+ typedef bool type3;
+ inline int &getArg4()
+ {
+ return obstacleType;
+ }
+ typedef int type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel InitFmIn ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, fmFlags, phi, ignoreWalls, obstacleType);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, fmFlags, phi, ignoreWalls, obstacleType);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<int> &fmFlags;
+ Grid<Real> &phi;
+ bool ignoreWalls;
+ int obstacleType;
+};
+
+struct InitFmOut : public KernelBase {
+ InitFmOut(const FlagGrid &flags,
+ Grid<int> &fmFlags,
+ Grid<Real> &phi,
+ bool ignoreWalls,
+ int obstacleType)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ fmFlags(fmFlags),
+ phi(phi),
+ ignoreWalls(ignoreWalls),
+ obstacleType(obstacleType)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<int> &fmFlags,
+ Grid<Real> &phi,
+ bool ignoreWalls,
+ int obstacleType) const
+ {
+ const IndexInt idx = flags.index(i, j, k);
+ const Real v = phi[idx];
+ if (ignoreWalls) {
+ fmFlags[idx] = (v < 0) ? FlagInited : 0;
+ if ((flags[idx] & obstacleType) != 0) {
+ fmFlags[idx] = 0;
+ phi[idx] = 0;
+ }
+ }
+ else {
+ fmFlags[idx] = (v < 0) ? FlagInited : 0;
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<int> &getArg1()
+ {
+ return fmFlags;
+ }
+ typedef Grid<int> type1;
+ inline Grid<Real> &getArg2()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type2;
+ inline bool &getArg3()
+ {
+ return ignoreWalls;
+ }
+ typedef bool type3;
+ inline int &getArg4()
+ {
+ return obstacleType;
+ }
+ typedef int type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel InitFmOut ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, fmFlags, phi, ignoreWalls, obstacleType);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, fmFlags, phi, ignoreWalls, obstacleType);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<int> &fmFlags;
+ Grid<Real> &phi;
+ bool ignoreWalls;
+ int obstacleType;
+};
+
+struct SetUninitialized : public KernelBase {
+ SetUninitialized(const Grid<int> &flags,
+ Grid<int> &fmFlags,
+ Grid<Real> &phi,
+ const Real val,
+ int ignoreWalls,
+ int obstacleType)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ fmFlags(fmFlags),
+ phi(phi),
+ val(val),
+ ignoreWalls(ignoreWalls),
+ obstacleType(obstacleType)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const Grid<int> &flags,
+ Grid<int> &fmFlags,
+ Grid<Real> &phi,
+ const Real val,
+ int ignoreWalls,
+ int obstacleType) const
+ {
+ if (ignoreWalls) {
+ if ((fmFlags(i, j, k) != FlagInited) && ((flags(i, j, k) & obstacleType) == 0)) {
+ phi(i, j, k) = val;
+ }
+ }
+ else {
+ if ((fmFlags(i, j, k) != FlagInited))
+ phi(i, j, k) = val;
+ }
+ }
+ inline const Grid<int> &getArg0()
+ {
+ return flags;
+ }
+ typedef Grid<int> type0;
+ inline Grid<int> &getArg1()
+ {
+ return fmFlags;
+ }
+ typedef Grid<int> type1;
+ inline Grid<Real> &getArg2()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type2;
+ inline const Real &getArg3()
+ {
+ return val;
+ }
+ typedef Real type3;
+ inline int &getArg4()
+ {
+ return ignoreWalls;
+ }
+ typedef int type4;
+ inline int &getArg5()
+ {
+ return obstacleType;
+ }
+ typedef int type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel SetUninitialized ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, fmFlags, phi, val, ignoreWalls, obstacleType);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, fmFlags, phi, val, ignoreWalls, obstacleType);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const Grid<int> &flags;
+ Grid<int> &fmFlags;
+ Grid<Real> &phi;
+ const Real val;
+ int ignoreWalls;
+ int obstacleType;
+};
+
+template<bool inward>
+inline bool isAtInterface(const Grid<int> &fmFlags, Grid<Real> &phi, const Vec3i &p)
+{
+ // check for interface
+ int max = phi.is3D() ? 6 : 4;
+ for (int nb = 0; nb < max; nb++) {
+ const Vec3i pn(p + neighbors[nb]);
+ if (!fmFlags.isInBounds(pn))
+ continue;
+
+ if (fmFlags(pn) != FlagInited)
+ continue;
+ if ((inward && phi(pn) >= 0.) || (!inward && phi(pn) < 0.))
+ return true;
+ }
+ return false;
+}
+
+//************************************************************************
+// Levelset class def
+
+LevelsetGrid::LevelsetGrid(FluidSolver *parent, bool show) : Grid<Real>(parent, show)
+{
+ mType = (GridType)(TypeLevelset | TypeReal);
+}
+
+LevelsetGrid::LevelsetGrid(FluidSolver *parent, Real *data, bool show)
+ : Grid<Real>(parent, data, show)
+{
+ mType = (GridType)(TypeLevelset | TypeReal);
+}
+
+Real LevelsetGrid::invalidTimeValue()
+{
+ return FastMarch<FmHeapEntryOut, 1>::InvalidTime();
+}
+
+//! Kernel: perform levelset union
+struct KnJoin : public KernelBase {
+ KnJoin(Grid<Real> &a, const Grid<Real> &b) : KernelBase(&a, 0), a(a), b(b)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<Real> &a, const Grid<Real> &b) const
+ {
+ a[idx] = min(a[idx], b[idx]);
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return a;
+ }
+ typedef Grid<Real> type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return b;
+ }
+ typedef Grid<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnJoin ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, a, b);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<Real> &a;
+ const Grid<Real> &b;
+};
+void LevelsetGrid::join(const LevelsetGrid &o)
+{
+ KnJoin(*this, o);
+}
+
+//! subtract b, note does not preserve SDF!
+struct KnSubtract : public KernelBase {
+ KnSubtract(Grid<Real> &a, const Grid<Real> &b, const FlagGrid *flags, int subtractType)
+ : KernelBase(&a, 0), a(a), b(b), flags(flags), subtractType(subtractType)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ Grid<Real> &a,
+ const Grid<Real> &b,
+ const FlagGrid *flags,
+ int subtractType) const
+ {
+ if (flags && ((*flags)(idx)&subtractType) == 0)
+ return;
+ if (b[idx] < 0.)
+ a[idx] = b[idx] * -1.;
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return a;
+ }
+ typedef Grid<Real> type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return b;
+ }
+ typedef Grid<Real> type1;
+ inline const FlagGrid *getArg2()
+ {
+ return flags;
+ }
+ typedef FlagGrid type2;
+ inline int &getArg3()
+ {
+ return subtractType;
+ }
+ typedef int type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnSubtract ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, a, b, flags, subtractType);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<Real> &a;
+ const Grid<Real> &b;
+ const FlagGrid *flags;
+ int subtractType;
+};
+void LevelsetGrid::subtract(const LevelsetGrid &o, const FlagGrid *flags, const int subtractType)
+{
+ KnSubtract(*this, o, flags, subtractType);
+}
+
+//! re-init levelset and extrapolate velocities (in & out)
+// note - uses flags to identify border (could also be done based on ls values)
+static void doReinitMarch(Grid<Real> &phi,
+ const FlagGrid &flags,
+ Real maxTime,
+ MACGrid *velTransport,
+ bool ignoreWalls,
+ bool correctOuterLayer,
+ int obstacleType)
+{
+ const int dim = (phi.is3D() ? 3 : 2);
+ Grid<int> fmFlags(phi.getParent());
+
+ FastMarch<FmHeapEntryIn, -1> marchIn(flags, fmFlags, phi, maxTime, NULL);
+
+ // march inside
+ InitFmIn(flags, fmFlags, phi, ignoreWalls, obstacleType);
+
+ FOR_IJK_BND(flags, 1)
+ {
+ if (fmFlags(i, j, k) == FlagInited)
+ continue;
+ if (ignoreWalls && ((flags(i, j, k) & obstacleType) != 0))
+ continue;
+ const Vec3i p(i, j, k);
+
+ if (isAtInterface<true>(fmFlags, phi, p)) {
+ // set value
+ fmFlags(p) = FlagInited;
+
+ // add neighbors that are not at the interface
+ for (int nb = 0; nb < 2 * dim; nb++) {
+ const Vec3i pn(p + neighbors[nb]); // index always valid due to bnd=1
+ if (ignoreWalls && ((flags.get(pn) & obstacleType) != 0))
+ continue;
+
+ // check neighbors of neighbor
+ if (phi(pn) < 0. && !isAtInterface<true>(fmFlags, phi, pn)) {
+ marchIn.addToList(pn, p);
+ }
+ }
+ }
+ }
+ marchIn.performMarching();
+ // done with inwards marching
+
+ // now march out...
+
+ // set un initialized regions
+ SetUninitialized(flags, fmFlags, phi, -maxTime - 1., ignoreWalls, obstacleType);
+
+ InitFmOut(flags, fmFlags, phi, ignoreWalls, obstacleType);
+
+ FastMarch<FmHeapEntryOut, +1> marchOut(flags, fmFlags, phi, maxTime, velTransport);
+
+ // by default, correctOuterLayer is on
+ if (correctOuterLayer) {
+ // normal version, inwards march is done, now add all outside values (0..2] to list
+ // note, this might move the interface a bit! but keeps a nice signed distance field...
+ FOR_IJK_BND(flags, 1)
+ {
+ if (ignoreWalls && ((flags(i, j, k) & obstacleType) != 0))
+ continue;
+ const Vec3i p(i, j, k);
+
+ // check nbs
+ for (int nb = 0; nb < 2 * dim; nb++) {
+ const Vec3i pn(p + neighbors[nb]); // index always valid due to bnd=1
+
+ if (fmFlags(pn) != FlagInited)
+ continue;
+ if (ignoreWalls && ((flags.get(pn) & obstacleType)) != 0)
+ continue;
+
+ const Real nbPhi = phi(pn);
+
+ // only add nodes near interface, not e.g. outer boundary vs. invalid region
+ if (nbPhi < 0 && nbPhi >= -2)
+ marchOut.addToList(p, pn);
+ }
+ }
+ }
+ else {
+ // alternative version, keep interface, do not distort outer cells
+ // add all ouside values, but not those at the IF layer
+ FOR_IJK_BND(flags, 1)
+ {
+ if (ignoreWalls && ((flags(i, j, k) & obstacleType) != 0))
+ continue;
+
+ // only look at ouside values
+ const Vec3i p(i, j, k);
+ if (phi(p) < 0)
+ continue;
+
+ if (isAtInterface<false>(fmFlags, phi, p)) {
+ // now add all non, interface neighbors
+ fmFlags(p) = FlagInited;
+
+ // add neighbors that are not at the interface
+ for (int nb = 0; nb < 2 * dim; nb++) {
+ const Vec3i pn(p + neighbors[nb]); // index always valid due to bnd=1
+ if (ignoreWalls && ((flags.get(pn) & obstacleType) != 0))
+ continue;
+
+ // check neighbors of neighbor
+ if (phi(pn) > 0. && !isAtInterface<false>(fmFlags, phi, pn)) {
+ marchOut.addToList(pn, p);
+ }
+ }
+ }
+ }
+ }
+ marchOut.performMarching();
+
+ // set un initialized regions
+ SetUninitialized(flags, fmFlags, phi, +maxTime + 1., ignoreWalls, obstacleType);
+}
+
+//! call for levelset grids & external real grids
+
+void LevelsetGrid::reinitMarching(const FlagGrid &flags,
+ Real maxTime,
+ MACGrid *velTransport,
+ bool ignoreWalls,
+ bool correctOuterLayer,
+ int obstacleType)
+{
+ doReinitMarch(*this, flags, maxTime, velTransport, ignoreWalls, correctOuterLayer, obstacleType);
+}
+
+void LevelsetGrid::initFromFlags(const FlagGrid &flags, bool ignoreWalls)
+{
+ FOR_IDX(*this)
+ {
+ if (flags.isFluid(idx) || (ignoreWalls && flags.isObstacle(idx)))
+ mData[idx] = -0.5;
+ else
+ mData[idx] = 0.5;
+ }
+}
+
+void LevelsetGrid::fillHoles(int maxDepth, int boundaryWidth)
+{
+ Real curVal, i1, i2, j1, j2, k1, k2;
+ Vec3i c, cTmp;
+ std::stack<Vec3i> undoPos;
+ std::stack<Real> undoVal;
+ std::stack<Vec3i> todoPos;
+
+ FOR_IJK_BND(*this, boundaryWidth)
+ {
+
+ curVal = mData[index(i, j, k)];
+ i1 = mData[index(i - 1, j, k)];
+ i2 = mData[index(i + 1, j, k)];
+ j1 = mData[index(i, j - 1, k)];
+ j2 = mData[index(i, j + 1, k)];
+ k1 = mData[index(i, j, k - 1)];
+ k2 = mData[index(i, j, k + 1)];
+
+ /* Skip cells inside and cells outside with no inside neighbours early */
+ if (curVal < 0.)
+ continue;
+ if (curVal > 0. && i1 > 0. && i2 > 0. && j1 > 0. && j2 > 0. && k1 > 0. && k2 > 0.)
+ continue;
+
+ /* Cell at c is positive (outside) and has at least one negative (inside) neighbour cell */
+ c = Vec3i(i, j, k);
+
+ /* Current cell is outside and has inside neighbour(s) */
+ undoPos.push(c);
+ undoVal.push(curVal);
+ todoPos.push(c);
+
+ /* Enforce negative cell - if search depth gets exceeded this will be reverted to the original
+ * value */
+ mData[index(c.x, c.y, c.z)] = -0.5;
+
+ while (!todoPos.empty()) {
+ todoPos.pop();
+
+ /* Add neighbouring positive (inside) cells to stacks and set negavtive cell value */
+ if (c.x > 0 && mData[index(c.x - 1, c.y, c.z)] > 0.) {
+ cTmp = Vec3i(c.x - 1, c.y, c.z);
+ undoPos.push(cTmp);
+ undoVal.push(mData[index(cTmp)]);
+ todoPos.push(cTmp);
+ mData[index(cTmp)] = -0.5;
+ }
+ if (c.y > 0 && mData[index(c.x, c.y - 1, c.z)] > 0.) {
+ cTmp = Vec3i(c.x, c.y - 1, c.z);
+ undoPos.push(cTmp);
+ undoVal.push(mData[index(cTmp)]);
+ todoPos.push(cTmp);
+ mData[index(cTmp)] = -0.5;
+ }
+ if (c.z > 0 && mData[index(c.x, c.y, c.z - 1)] > 0.) {
+ cTmp = Vec3i(c.x, c.y, c.z - 1);
+ undoPos.push(cTmp);
+ undoVal.push(mData[index(cTmp)]);
+ todoPos.push(cTmp);
+ mData[index(cTmp)] = -0.5;
+ }
+ if (c.x < (*this).getSizeX() - 1 && mData[index(c.x + 1, c.y, c.z)] > 0.) {
+ cTmp = Vec3i(c.x + 1, c.y, c.z);
+ undoPos.push(cTmp);
+ undoVal.push(mData[index(cTmp)]);
+ todoPos.push(cTmp);
+ mData[index(cTmp)] = -0.5;
+ }
+ if (c.y < (*this).getSizeY() - 1 && mData[index(c.x, c.y + 1, c.z)] > 0.) {
+ cTmp = Vec3i(c.x, c.y + 1, c.z);
+ undoPos.push(cTmp);
+ undoVal.push(mData[index(cTmp)]);
+ todoPos.push(cTmp);
+ mData[index(cTmp)] = -0.5;
+ }
+ if (c.z < (*this).getSizeZ() - 1 && mData[index(c.x, c.y, c.z + 1)] > 0.) {
+ cTmp = Vec3i(c.x, c.y, c.z + 1);
+ undoPos.push(cTmp);
+ undoVal.push(mData[index(cTmp)]);
+ todoPos.push(cTmp);
+ mData[index(cTmp)] = -0.5;
+ }
+
+ /* Restore original value in cells if undo needed ie once cell undo count exceeds given limit
+ */
+ if (undoPos.size() > maxDepth) {
+ /* Clear todo stack */
+ while (!todoPos.empty()) {
+ todoPos.pop();
+ }
+ /* Clear undo stack and revert value */
+ while (!undoPos.empty()) {
+ c = undoPos.top();
+ curVal = undoVal.top();
+ undoPos.pop();
+ undoVal.pop();
+ mData[index(c.x, c.y, c.z)] = curVal;
+ }
+ break;
+ }
+
+ /* Ensure that undo stack is cleared at the end if no more items in todo stack left */
+ if (todoPos.empty()) {
+ while (!undoPos.empty()) {
+ undoPos.pop();
+ }
+ while (!undoVal.empty()) {
+ undoVal.pop();
+ }
+ }
+ /* Pop value for next while iteration */
+ else {
+ c = todoPos.top();
+ }
+ }
+ }
+}
+
+//! run marching cubes to create a mesh for the 0-levelset
+void LevelsetGrid::createMesh(Mesh &mesh)
+{
+ assertMsg(is3D(), "Only 3D grids supported so far");
+
+ mesh.clear();
+
+ const Real invalidTime = invalidTimeValue();
+ const Real isoValue = 1e-4;
+
+ // create some temp grids
+ Grid<int> edgeVX(mParent);
+ Grid<int> edgeVY(mParent);
+ Grid<int> edgeVZ(mParent);
+
+ for (int i = 0; i < mSize.x - 1; i++)
+ for (int j = 0; j < mSize.y - 1; j++)
+ for (int k = 0; k < mSize.z - 1; k++) {
+ Real value[8] = {get(i, j, k),
+ get(i + 1, j, k),
+ get(i + 1, j + 1, k),
+ get(i, j + 1, k),
+ get(i, j, k + 1),
+ get(i + 1, j, k + 1),
+ get(i + 1, j + 1, k + 1),
+ get(i, j + 1, k + 1)};
+
+ // build lookup index, check for invalid times
+ bool skip = false;
+ int cubeIdx = 0;
+ for (int l = 0; l < 8; l++) {
+ value[l] *= -1;
+ if (-value[l] <= invalidTime)
+ skip = true;
+ if (value[l] < isoValue)
+ cubeIdx |= 1 << l;
+ }
+ if (skip || (mcEdgeTable[cubeIdx] == 0))
+ continue;
+
+ // where to look up if this point already exists
+ int triIndices[12];
+ int *eVert[12] = {&edgeVX(i, j, k),
+ &edgeVY(i + 1, j, k),
+ &edgeVX(i, j + 1, k),
+ &edgeVY(i, j, k),
+ &edgeVX(i, j, k + 1),
+ &edgeVY(i + 1, j, k + 1),
+ &edgeVX(i, j + 1, k + 1),
+ &edgeVY(i, j, k + 1),
+ &edgeVZ(i, j, k),
+ &edgeVZ(i + 1, j, k),
+ &edgeVZ(i + 1, j + 1, k),
+ &edgeVZ(i, j + 1, k)};
+
+ const Vec3 pos[9] = {Vec3(i, j, k),
+ Vec3(i + 1, j, k),
+ Vec3(i + 1, j + 1, k),
+ Vec3(i, j + 1, k),
+ Vec3(i, j, k + 1),
+ Vec3(i + 1, j, k + 1),
+ Vec3(i + 1, j + 1, k + 1),
+ Vec3(i, j + 1, k + 1)};
+
+ for (int e = 0; e < 12; e++) {
+ if (mcEdgeTable[cubeIdx] & (1 << e)) {
+ // vertex already calculated ?
+ if (*eVert[e] == 0) {
+ // interpolate edge
+ const int e1 = mcEdges[e * 2];
+ const int e2 = mcEdges[e * 2 + 1];
+ const Vec3 p1 = pos[e1]; // scalar field pos 1
+ const Vec3 p2 = pos[e2]; // scalar field pos 2
+ const float valp1 = value[e1]; // scalar field val 1
+ const float valp2 = value[e2]; // scalar field val 2
+ const float mu = (isoValue - valp1) / (valp2 - valp1);
+
+ // init isolevel vertex
+ Node vertex;
+ vertex.pos = p1 + (p2 - p1) * mu + Vec3(Real(0.5));
+ vertex.normal = getNormalized(
+ getGradient(
+ *this, i + cubieOffsetX[e1], j + cubieOffsetY[e1], k + cubieOffsetZ[e1]) *
+ (1.0 - mu) +
+ getGradient(
+ *this, i + cubieOffsetX[e2], j + cubieOffsetY[e2], k + cubieOffsetZ[e2]) *
+ (mu));
+
+ triIndices[e] = mesh.addNode(vertex) + 1;
+
+ // store vertex
+ *eVert[e] = triIndices[e];
+ }
+ else {
+ // retrieve from vert array
+ triIndices[e] = *eVert[e];
+ }
+ }
+ }
+
+ // Create the triangles...
+ for (int e = 0; mcTriTable[cubeIdx][e] != -1; e += 3) {
+ mesh.addTri(Triangle(triIndices[mcTriTable[cubeIdx][e + 0]] - 1,
+ triIndices[mcTriTable[cubeIdx][e + 1]] - 1,
+ triIndices[mcTriTable[cubeIdx][e + 2]] - 1));
+ }
+ }
+
+ // mesh.rebuildCorners();
+ // mesh.rebuildLookup();
+
+ // Update mdata fields
+ mesh.updateDataFields();
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/levelset.h b/extern/mantaflow/preprocessed/levelset.h
new file mode 100644
index 00000000000..ab36ac24903
--- /dev/null
+++ b/extern/mantaflow/preprocessed/levelset.h
@@ -0,0 +1,245 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Levelset
+ *
+ ******************************************************************************/
+
+#ifndef _LEVELSET_H_
+#define _LEVELSET_H_
+
+#include "grid.h"
+
+namespace Manta {
+class Mesh;
+
+//! Special function for levelsets
+class LevelsetGrid : public Grid<Real> {
+ public:
+ LevelsetGrid(FluidSolver *parent, bool show = true);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "LevelsetGrid::LevelsetGrid", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ bool show = _args.getOpt<bool>("show", 1, true, &_lock);
+ obj = new LevelsetGrid(parent, show);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "LevelsetGrid::LevelsetGrid", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("LevelsetGrid::LevelsetGrid", e.what());
+ return -1;
+ }
+ }
+
+ LevelsetGrid(FluidSolver *parent, Real *data, bool show = true);
+
+ //! reconstruct the levelset using fast marching
+
+ void reinitMarching(const FlagGrid &flags,
+ Real maxTime = 4.0,
+ MACGrid *velTransport = NULL,
+ bool ignoreWalls = false,
+ bool correctOuterLayer = true,
+ int obstacleType = FlagGrid::TypeObstacle);
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ LevelsetGrid *pbo = dynamic_cast<LevelsetGrid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "LevelsetGrid::reinitMarching", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Real maxTime = _args.getOpt<Real>("maxTime", 1, 4.0, &_lock);
+ MACGrid *velTransport = _args.getPtrOpt<MACGrid>("velTransport", 2, NULL, &_lock);
+ bool ignoreWalls = _args.getOpt<bool>("ignoreWalls", 3, false, &_lock);
+ bool correctOuterLayer = _args.getOpt<bool>("correctOuterLayer", 4, true, &_lock);
+ int obstacleType = _args.getOpt<int>("obstacleType", 5, FlagGrid::TypeObstacle, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->reinitMarching(
+ flags, maxTime, velTransport, ignoreWalls, correctOuterLayer, obstacleType);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "LevelsetGrid::reinitMarching", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("LevelsetGrid::reinitMarching", e.what());
+ return 0;
+ }
+ }
+
+ //! create a triangle mesh from the levelset isosurface
+ void createMesh(Mesh &mesh);
+ static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ LevelsetGrid *pbo = dynamic_cast<LevelsetGrid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "LevelsetGrid::createMesh", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Mesh &mesh = *_args.getPtr<Mesh>("mesh", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->createMesh(mesh);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "LevelsetGrid::createMesh", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("LevelsetGrid::createMesh", e.what());
+ return 0;
+ }
+ }
+
+ //! union with another levelset
+ void join(const LevelsetGrid &o);
+ static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ LevelsetGrid *pbo = dynamic_cast<LevelsetGrid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "LevelsetGrid::join", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const LevelsetGrid &o = *_args.getPtr<LevelsetGrid>("o", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->join(o);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "LevelsetGrid::join", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("LevelsetGrid::join", e.what());
+ return 0;
+ }
+ }
+
+ void subtract(const LevelsetGrid &o, const FlagGrid *flags = NULL, const int subtractType = 0);
+ static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ LevelsetGrid *pbo = dynamic_cast<LevelsetGrid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "LevelsetGrid::subtract", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const LevelsetGrid &o = *_args.getPtr<LevelsetGrid>("o", 0, &_lock);
+ const FlagGrid *flags = _args.getPtrOpt<FlagGrid>("flags", 1, NULL, &_lock);
+ const int subtractType = _args.getOpt<int>("subtractType", 2, 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->subtract(o, flags, subtractType);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "LevelsetGrid::subtract", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("LevelsetGrid::subtract", e.what());
+ return 0;
+ }
+ }
+
+ //! initialize levelset from flags (+/- 0.5 heaviside)
+ void initFromFlags(const FlagGrid &flags, bool ignoreWalls = false);
+ static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ LevelsetGrid *pbo = dynamic_cast<LevelsetGrid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "LevelsetGrid::initFromFlags", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ bool ignoreWalls = _args.getOpt<bool>("ignoreWalls", 1, false, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->initFromFlags(flags, ignoreWalls);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "LevelsetGrid::initFromFlags", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("LevelsetGrid::initFromFlags", e.what());
+ return 0;
+ }
+ }
+
+ //! fill holes (pos cells enclosed by neg ones) up to given size with -0.5 (ie not preserving
+ //! sdf)
+ void fillHoles(int maxDepth = 10, int boundaryWidth = 1);
+ static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ LevelsetGrid *pbo = dynamic_cast<LevelsetGrid *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "LevelsetGrid::fillHoles", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int maxDepth = _args.getOpt<int>("maxDepth", 0, 10, &_lock);
+ int boundaryWidth = _args.getOpt<int>("boundaryWidth", 1, 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->fillHoles(maxDepth, boundaryWidth);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "LevelsetGrid::fillHoles", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("LevelsetGrid::fillHoles", e.what());
+ return 0;
+ }
+ }
+
+ static Real invalidTimeValue();
+ public:
+ PbArgs _args;
+}
+#define _C_LevelsetGrid
+;
+
+} // namespace Manta
+#endif
diff --git a/extern/mantaflow/preprocessed/levelset.h.reg.cpp b/extern/mantaflow/preprocessed/levelset.h.reg.cpp
new file mode 100644
index 00000000000..dc6669b5da3
--- /dev/null
+++ b/extern/mantaflow/preprocessed/levelset.h.reg.cpp
@@ -0,0 +1,32 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "levelset.h"
+namespace Manta {
+#ifdef _C_LevelsetGrid
+static const Pb::Register _R_11("LevelsetGrid", "LevelsetGrid", "Grid<Real>");
+template<> const char *Namify<LevelsetGrid>::S = "LevelsetGrid";
+static const Pb::Register _R_12("LevelsetGrid", "LevelsetGrid", LevelsetGrid::_W_0);
+static const Pb::Register _R_13("LevelsetGrid", "reinitMarching", LevelsetGrid::_W_1);
+static const Pb::Register _R_14("LevelsetGrid", "createMesh", LevelsetGrid::_W_2);
+static const Pb::Register _R_15("LevelsetGrid", "join", LevelsetGrid::_W_3);
+static const Pb::Register _R_16("LevelsetGrid", "subtract", LevelsetGrid::_W_4);
+static const Pb::Register _R_17("LevelsetGrid", "initFromFlags", LevelsetGrid::_W_5);
+static const Pb::Register _R_18("LevelsetGrid", "fillHoles", LevelsetGrid::_W_6);
+#endif
+extern "C" {
+void PbRegister_file_11()
+{
+ KEEP_UNUSED(_R_11);
+ KEEP_UNUSED(_R_12);
+ KEEP_UNUSED(_R_13);
+ KEEP_UNUSED(_R_14);
+ KEEP_UNUSED(_R_15);
+ KEEP_UNUSED(_R_16);
+ KEEP_UNUSED(_R_17);
+ KEEP_UNUSED(_R_18);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/mesh.cpp b/extern/mantaflow/preprocessed/mesh.cpp
new file mode 100644
index 00000000000..d93c2ac04c0
--- /dev/null
+++ b/extern/mantaflow/preprocessed/mesh.cpp
@@ -0,0 +1,2733 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Meshes
+ *
+ * note: this is only a temporary solution, details are bound to change
+ * long term goal is integration with Split&Merge code by Wojtan et al.
+ *
+ ******************************************************************************/
+
+#include "mesh.h"
+#include "integrator.h"
+#include "mantaio.h"
+#include "kernel.h"
+#include "shapes.h"
+#include "noisefield.h"
+//#include "grid.h"
+#include <stack>
+#include <cstring>
+
+using namespace std;
+namespace Manta {
+
+Mesh::Mesh(FluidSolver *parent) : PbClass(parent)
+{
+}
+
+Mesh::~Mesh()
+{
+ for (IndexInt i = 0; i < (IndexInt)mMeshData.size(); ++i)
+ mMeshData[i]->setMesh(NULL);
+
+ if (mFreeMdata) {
+ for (IndexInt i = 0; i < (IndexInt)mMeshData.size(); ++i)
+ delete mMeshData[i];
+ }
+}
+
+Mesh *Mesh::clone()
+{
+ Mesh *nm = new Mesh(mParent);
+ *nm = *this;
+ nm->setName(getName());
+ return nm;
+}
+
+void Mesh::deregister(MeshDataBase *mdata)
+{
+ bool done = false;
+ // remove pointer from mesh data list
+ for (IndexInt i = 0; i < (IndexInt)mMeshData.size(); ++i) {
+ if (mMeshData[i] == mdata) {
+ if (i < (IndexInt)mMeshData.size() - 1)
+ mMeshData[i] = mMeshData[mMeshData.size() - 1];
+ mMeshData.pop_back();
+ done = true;
+ }
+ }
+ if (!done)
+ errMsg("Invalid pointer given, not registered!");
+}
+
+// create and attach a new mdata field to this mesh
+PbClass *Mesh::create(PbType t, PbTypeVec T, const string &name)
+{
+#if NOPYTHON != 1
+ _args.add("nocheck", true);
+ if (t.str() == "")
+ errMsg("Specify mesh data type to create");
+ // debMsg( "Mdata creating '"<< t.str <<" with size "<< this->getSizeSlow(), 5 );
+
+ PbClass *pyObj = PbClass::createPyObject(t.str() + T.str(), name, _args, this->getParent());
+
+ MeshDataBase *mdata = dynamic_cast<MeshDataBase *>(pyObj);
+ if (!mdata) {
+ errMsg(
+ "Unable to get mesh data pointer from newly created object. Only create MeshData type "
+ "with a Mesh.creat() call, eg, MdataReal, MdataVec3 etc.");
+ delete pyObj;
+ return NULL;
+ }
+ else {
+ this->registerMdata(mdata);
+ }
+
+ // directly init size of new mdata field:
+ mdata->resize(this->getSizeSlow());
+#else
+ PbClass *pyObj = NULL;
+#endif
+ return pyObj;
+}
+
+void Mesh::registerMdata(MeshDataBase *mdata)
+{
+ mdata->setMesh(this);
+ mMeshData.push_back(mdata);
+
+ if (mdata->getType() == MeshDataBase::TypeReal) {
+ MeshDataImpl<Real> *pd = dynamic_cast<MeshDataImpl<Real> *>(mdata);
+ if (!pd)
+ errMsg("Invalid mdata object posing as real!");
+ this->registerMdataReal(pd);
+ }
+ else if (mdata->getType() == MeshDataBase::TypeInt) {
+ MeshDataImpl<int> *pd = dynamic_cast<MeshDataImpl<int> *>(mdata);
+ if (!pd)
+ errMsg("Invalid mdata object posing as int!");
+ this->registerMdataInt(pd);
+ }
+ else if (mdata->getType() == MeshDataBase::TypeVec3) {
+ MeshDataImpl<Vec3> *pd = dynamic_cast<MeshDataImpl<Vec3> *>(mdata);
+ if (!pd)
+ errMsg("Invalid mdata object posing as vec3!");
+ this->registerMdataVec3(pd);
+ }
+}
+void Mesh::registerMdataReal(MeshDataImpl<Real> *pd)
+{
+ mMdataReal.push_back(pd);
+}
+void Mesh::registerMdataVec3(MeshDataImpl<Vec3> *pd)
+{
+ mMdataVec3.push_back(pd);
+}
+void Mesh::registerMdataInt(MeshDataImpl<int> *pd)
+{
+ mMdataInt.push_back(pd);
+}
+
+void Mesh::addAllMdata()
+{
+ for (IndexInt i = 0; i < (IndexInt)mMeshData.size(); ++i) {
+ mMeshData[i]->addEntry();
+ }
+}
+
+Real Mesh::computeCenterOfMass(Vec3 &cm) const
+{
+
+ // use double precision for summation, otherwise too much error accumulation
+ double vol = 0;
+ Vector3D<double> cmd(0.0);
+ for (size_t tri = 0; tri < mTris.size(); tri++) {
+ Vector3D<double> p1(toVec3d(getNode(tri, 0)));
+ Vector3D<double> p2(toVec3d(getNode(tri, 1)));
+ Vector3D<double> p3(toVec3d(getNode(tri, 2)));
+
+ double cvol = dot(cross(p1, p2), p3) / 6.0;
+ cmd += (p1 + p2 + p3) * (cvol / 4.0);
+ vol += cvol;
+ }
+ if (vol != 0.0)
+ cmd /= vol;
+
+ cm = toVec3(cmd);
+ return (Real)vol;
+}
+
+void Mesh::clear()
+{
+ mNodes.clear();
+ mTris.clear();
+ mCorners.clear();
+ m1RingLookup.clear();
+ for (size_t i = 0; i < mNodeChannels.size(); i++)
+ mNodeChannels[i]->resize(0);
+ for (size_t i = 0; i < mTriChannels.size(); i++)
+ mTriChannels[i]->resize(0);
+
+ // clear mdata fields as well
+ for (size_t i = 0; i < mMdataReal.size(); i++)
+ mMdataReal[i]->resize(0);
+ for (size_t i = 0; i < mMdataVec3.size(); i++)
+ mMdataVec3[i]->resize(0);
+ for (size_t i = 0; i < mMdataInt.size(); i++)
+ mMdataInt[i]->resize(0);
+}
+
+Mesh &Mesh::operator=(const Mesh &o)
+{
+ // wipe current data
+ clear();
+ if (mNodeChannels.size() != o.mNodeChannels.size() ||
+ mTriChannels.size() != o.mTriChannels.size())
+ errMsg("can't copy mesh, channels not identical");
+ mNodeChannels.clear();
+ mTriChannels.clear();
+
+ // copy corner, nodes, tris
+ mCorners = o.mCorners;
+ mNodes = o.mNodes;
+ mTris = o.mTris;
+ m1RingLookup = o.m1RingLookup;
+
+ // copy channels
+ for (size_t i = 0; i < mNodeChannels.size(); i++)
+ mNodeChannels[i] = o.mNodeChannels[i];
+ for (size_t i = 0; i < o.mTriChannels.size(); i++)
+ mTriChannels[i] = o.mTriChannels[i];
+
+ return *this;
+}
+
+void Mesh::load(string name, bool append)
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".gz") // assume bobj gz
+ readBobjFile(name, this, append);
+ else if (ext == ".obj")
+ readObjFile(name, this, append);
+ else
+ errMsg("file '" + name + "' filetype not supported");
+
+ // dont always rebuild...
+ // rebuildCorners();
+ // rebuildLookup();
+}
+
+void Mesh::save(string name)
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".obj")
+ writeObjFile(name, this);
+ else if (ext == ".gz")
+ writeBobjFile(name, this);
+ else
+ errMsg("file '" + name + "' filetype not supported");
+}
+
+void Mesh::fromShape(Shape &shape, bool append)
+{
+ if (!append)
+ clear();
+ shape.generateMesh(this);
+}
+
+void Mesh::resizeTris(int numTris)
+{
+ mTris.resize(numTris);
+ rebuildChannels();
+}
+void Mesh::resizeNodes(int numNodes)
+{
+ mNodes.resize(numNodes);
+ rebuildChannels();
+}
+
+//! do a quick check whether a rebuild is necessary, and if yes do rebuild
+void Mesh::rebuildQuickCheck()
+{
+ if (mCorners.size() != 3 * mTris.size())
+ rebuildCorners();
+ if (m1RingLookup.size() != mNodes.size())
+ rebuildLookup();
+}
+
+void Mesh::rebuildCorners(int from, int to)
+{
+ mCorners.resize(3 * mTris.size());
+ if (to < 0)
+ to = mTris.size();
+
+ // fill in basic info
+ for (int tri = from; tri < to; tri++) {
+ for (int c = 0; c < 3; c++) {
+ const int idx = tri * 3 + c;
+ mCorners[idx].tri = tri;
+ mCorners[idx].node = mTris[tri].c[c];
+ mCorners[idx].next = 3 * tri + ((c + 1) % 3);
+ mCorners[idx].prev = 3 * tri + ((c + 2) % 3);
+ mCorners[idx].opposite = -1;
+ }
+ }
+
+ // set opposite info
+ int maxc = to * 3;
+ for (int c = from * 3; c < maxc; c++) {
+ int next = mCorners[mCorners[c].next].node;
+ int prev = mCorners[mCorners[c].prev].node;
+
+ // find corner with same next/prev nodes
+ for (int c2 = c + 1; c2 < maxc; c2++) {
+ int next2 = mCorners[mCorners[c2].next].node;
+ if (next2 != next && next2 != prev)
+ continue;
+ int prev2 = mCorners[mCorners[c2].prev].node;
+ if (prev2 != next && prev2 != prev)
+ continue;
+
+ // found
+ mCorners[c].opposite = c2;
+ mCorners[c2].opposite = c;
+ break;
+ }
+ if (mCorners[c].opposite < 0) {
+ // didn't find opposite
+ errMsg("can't rebuild corners, index without an opposite");
+ }
+ }
+
+ rebuildChannels();
+}
+
+void Mesh::rebuildLookup(int from, int to)
+{
+ if (from == 0 && to < 0)
+ m1RingLookup.clear();
+ m1RingLookup.resize(mNodes.size());
+ if (to < 0)
+ to = mTris.size();
+ from *= 3;
+ to *= 3;
+ for (int i = from; i < to; i++) {
+ const int node = mCorners[i].node;
+ m1RingLookup[node].nodes.insert(mCorners[mCorners[i].next].node);
+ m1RingLookup[node].nodes.insert(mCorners[mCorners[i].prev].node);
+ m1RingLookup[node].tris.insert(mCorners[i].tri);
+ }
+}
+
+void Mesh::rebuildChannels()
+{
+ for (size_t i = 0; i < mTriChannels.size(); i++)
+ mTriChannels[i]->resize(mTris.size());
+ for (size_t i = 0; i < mNodeChannels.size(); i++)
+ mNodeChannels[i]->resize(mNodes.size());
+}
+
+struct _KnAdvectMeshInGrid : public KernelBase {
+ _KnAdvectMeshInGrid(const KernelBase &base,
+ vector<Node> &nodes,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ const Real dt,
+ vector<Vec3> &u)
+ : KernelBase(base), nodes(nodes), flags(flags), vel(vel), dt(dt), u(u)
+ {
+ }
+ inline void op(IndexInt idx,
+ vector<Node> &nodes,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ const Real dt,
+ vector<Vec3> &u) const
+ {
+ if (nodes[idx].flags & Mesh::NfFixed)
+ u[idx] = 0.0;
+ else if (!flags.isInBounds(nodes[idx].pos, 1))
+ u[idx] = 0.0;
+ else
+ u[idx] = vel.getInterpolated(nodes[idx].pos) * dt;
+ }
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, nodes, flags, vel, dt, u);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ vector<Node> &nodes;
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ const Real dt;
+ vector<Vec3> &u;
+};
+struct KnAdvectMeshInGrid : public KernelBase {
+ KnAdvectMeshInGrid(vector<Node> &nodes, const FlagGrid &flags, const MACGrid &vel, const Real dt)
+ : KernelBase(nodes.size()),
+ _inner(KernelBase(nodes.size()), nodes, flags, vel, dt, u),
+ nodes(nodes),
+ flags(flags),
+ vel(vel),
+ dt(dt),
+ u((size))
+ {
+ runMessage();
+ run();
+ }
+ void run()
+ {
+ _inner.run();
+ }
+ inline operator vector<Vec3>()
+ {
+ return u;
+ }
+ inline vector<Vec3> &getRet()
+ {
+ return u;
+ }
+ inline vector<Node> &getArg0()
+ {
+ return nodes;
+ }
+ typedef vector<Node> type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const MACGrid &getArg2()
+ {
+ return vel;
+ }
+ typedef MACGrid type2;
+ inline const Real &getArg3()
+ {
+ return dt;
+ }
+ typedef Real type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnAdvectMeshInGrid ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ _KnAdvectMeshInGrid _inner;
+ vector<Node> &nodes;
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ const Real dt;
+ vector<Vec3> u;
+};
+
+// advection plugin
+void Mesh::advectInGrid(FlagGrid &flags, MACGrid &vel, int integrationMode)
+{
+ KnAdvectMeshInGrid kernel(mNodes, flags, vel, getParent()->getDt());
+ integratePointSet(kernel, integrationMode);
+}
+
+void Mesh::scale(Vec3 s)
+{
+ for (size_t i = 0; i < mNodes.size(); i++)
+ mNodes[i].pos *= s;
+}
+
+void Mesh::offset(Vec3 o)
+{
+ for (size_t i = 0; i < mNodes.size(); i++)
+ mNodes[i].pos += o;
+}
+
+void Mesh::rotate(Vec3 thetas)
+{
+ // rotation thetas are in radians (e.g. pi is equal to 180 degrees)
+ auto rotate = [&](Real theta, unsigned int first_axis, unsigned int second_axis) {
+ if (theta == 0.0f)
+ return;
+
+ Real sin_t = sin(theta);
+ Real cos_t = cos(theta);
+
+ Real sin_sign = first_axis == 0u && second_axis == 2u ? -1.0f : 1.0f;
+ sin_t *= sin_sign;
+
+ size_t length = mNodes.size();
+ for (size_t n = 0; n < length; ++n) {
+ Vec3 &node = mNodes[n].pos;
+ Real first_axis_val = node[first_axis];
+ Real second_axis_val = node[second_axis];
+ node[first_axis] = first_axis_val * cos_t - second_axis_val * sin_t;
+ node[second_axis] = second_axis_val * cos_t + first_axis_val * sin_t;
+ }
+ };
+
+ // rotate x
+ rotate(thetas[0], 1u, 2u);
+ // rotate y
+ rotate(thetas[1], 0u, 2u);
+ // rotate z
+ rotate(thetas[2], 0u, 1u);
+}
+
+void Mesh::computeVelocity(Mesh &oldMesh, MACGrid &vel)
+{
+ // Early return if sizes do not match
+ if (oldMesh.mNodes.size() != mNodes.size())
+ return;
+
+ // temp grid
+ Grid<Vec3> veloMeanCounter(getParent());
+ veloMeanCounter.setConst(0.0f);
+
+ bool bIs2D = getParent()->is2D();
+
+ // calculate velocities from previous to current frame (per vertex)
+ for (size_t i = 0; i < mNodes.size(); ++i) {
+ // skip vertices that are not needed for 2D
+ if (bIs2D && (mNodes[i].pos.z < -0.5f || mNodes[i].pos.z > 0.5f))
+ continue;
+
+ Vec3 velo = mNodes[i].pos - oldMesh.mNodes[i].pos;
+ vel.setInterpolated(mNodes[i].pos, velo, &(veloMeanCounter[0]));
+ }
+
+ // discretize the vertex velocities by averaging them on the grid
+ vel.safeDivide(veloMeanCounter);
+}
+
+void Mesh::removeTri(int tri)
+{
+ // delete triangles by overwriting them with elements from the end of the array.
+ if (tri != (int)mTris.size() - 1) {
+ // if this is the last element, and it is marked for deletion,
+ // don't waste cycles transfering data to itself,
+ // and DEFINITELY don't transfer .opposite data to other, untainted triangles.
+
+ // old corners hold indices on the end of the corners array
+ // new corners holds indices in the new spot in the middle of the array
+ Corner *oldcorners[3];
+ Corner *newcorners[3];
+ int oldtri = mTris.size() - 1;
+ for (int c = 0; c < 3; c++) {
+ oldcorners[c] = &corners(oldtri, c);
+ newcorners[c] = &corners(tri, c);
+ }
+
+ // move the position of the triangle
+ mTris[tri] = mTris[oldtri];
+
+ // 1) update c.node, c.opposite (c.next and c.prev should be fine as they are)
+ for (int c = 0; c < 3; c++) {
+ newcorners[c]->node = mTris[tri].c[c];
+ newcorners[c]->opposite = oldcorners[c]->opposite;
+ }
+
+ // 2) c.opposite.opposite = c
+ for (int c = 0; c < 3; c++) {
+ if (newcorners[c]->opposite >= 0)
+ mCorners[newcorners[c]->opposite].opposite = 3 * tri + c;
+ }
+
+ // update tri lookup
+ for (int c = 0; c < 3; c++) {
+ int node = mTris[tri].c[c];
+ m1RingLookup[node].tris.erase(oldtri);
+ m1RingLookup[node].tris.insert(tri);
+ }
+ }
+
+ // transfer tri props
+ for (size_t p = 0; p < mTriChannels.size(); p++)
+ mTriChannels[p]->remove(tri);
+
+ // pop the triangle and corners out of the vector
+ mTris.pop_back();
+ mCorners.resize(mTris.size() * 3);
+}
+
+void Mesh::removeNodes(const vector<int> &deletedNodes)
+{
+ // After we delete the nodes that are marked for removal,
+ // the size of mNodes will be the current size - the size of the deleted array.
+ // We are going to move the elements at the end of the array
+ // (everything with an index >= newsize)
+ // to the deleted spots.
+ // We have to map all references to the last few nodes to their new locations.
+ int newsize = (int)(mNodes.size() - deletedNodes.size());
+
+ vector<int> new_index(deletedNodes.size());
+ int di, ni;
+ for (ni = 0; ni < (int)new_index.size(); ni++)
+ new_index[ni] = 0;
+ for (di = 0; di < (int)deletedNodes.size(); di++) {
+ if (deletedNodes[di] >= newsize)
+ new_index[deletedNodes[di] - newsize] = -1; // tag this node as invalid
+ }
+ for (di = 0, ni = 0; ni < (int)new_index.size(); ni++, di++) {
+ // we need to find a valid node to move
+ // we marked invalid nodes in the earlier loop with a (-1),
+ // so pick anything but those
+ while (ni < (int)new_index.size() && new_index[ni] == -1)
+ ni++;
+
+ if (ni >= (int)new_index.size())
+ break;
+
+ // next we need to find a valid spot to move the node to.
+ // we iterate through deleted[] until we find a valid spot
+ while (di < (int)new_index.size() && deletedNodes[di] >= newsize)
+ di++;
+
+ // now we assign the valid node to the valid spot
+ new_index[ni] = deletedNodes[di];
+ }
+
+ // Now we have a map of valid indices.
+ // we move node[newsize+i] to location new_index[i].
+ // We ignore the nodes with a -1 index, because they should not be moved.
+ for (int i = 0; i < (int)new_index.size(); i++) {
+ if (new_index[i] != -1)
+ mNodes[new_index[i]] = mNodes[newsize + i];
+ }
+ mNodes.resize(newsize);
+
+ // handle vertex properties
+ for (size_t i = 0; i < mNodeChannels.size(); i++)
+ mNodeChannels[i]->renumber(new_index, newsize);
+
+ // finally, we reconnect everything that used to point to this vertex.
+ for (size_t tri = 0, n = 0; tri < mTris.size(); tri++) {
+ for (int c = 0; c < 3; c++, n++) {
+ if (mCorners[n].node >= newsize) {
+ int newindex = new_index[mCorners[n].node - newsize];
+ mCorners[n].node = newindex;
+ mTris[mCorners[n].tri].c[c] = newindex;
+ }
+ }
+ }
+
+ // renumber 1-ring
+ for (int i = 0; i < (int)new_index.size(); i++) {
+ if (new_index[i] != -1) {
+ m1RingLookup[new_index[i]].nodes.swap(m1RingLookup[newsize + i].nodes);
+ m1RingLookup[new_index[i]].tris.swap(m1RingLookup[newsize + i].tris);
+ }
+ }
+ m1RingLookup.resize(newsize);
+ vector<int> reStack(new_index.size());
+ for (int i = 0; i < newsize; i++) {
+ set<int> &cs = m1RingLookup[i].nodes;
+ int reNum = 0;
+ // find all nodes > newsize
+ set<int>::reverse_iterator itend = cs.rend();
+ for (set<int>::reverse_iterator it = cs.rbegin(); it != itend; ++it) {
+ if (*it < newsize)
+ break;
+ reStack[reNum++] = *it;
+ }
+ // kill them and insert shifted values
+ if (reNum > 0) {
+ cs.erase(cs.find(reStack[reNum - 1]), cs.end());
+ for (int j = 0; j < reNum; j++) {
+ cs.insert(new_index[reStack[j] - newsize]);
+#ifdef DEBUG
+ if (new_index[reStack[j] - newsize] == -1)
+ errMsg("invalid node present in 1-ring set");
+#endif
+ }
+ }
+ }
+}
+
+void Mesh::mergeNode(int node, int delnode)
+{
+ set<int> &ring = m1RingLookup[delnode].nodes;
+ for (set<int>::iterator it = ring.begin(); it != ring.end(); ++it) {
+ m1RingLookup[*it].nodes.erase(delnode);
+ if (*it != node) {
+ m1RingLookup[*it].nodes.insert(node);
+ m1RingLookup[node].nodes.insert(*it);
+ }
+ }
+ set<int> &ringt = m1RingLookup[delnode].tris;
+ for (set<int>::iterator it = ringt.begin(); it != ringt.end(); ++it) {
+ const int t = *it;
+ for (int c = 0; c < 3; c++) {
+ if (mCorners[3 * t + c].node == delnode) {
+ mCorners[3 * t + c].node = node;
+ mTris[t].c[c] = node;
+ }
+ }
+ m1RingLookup[node].tris.insert(t);
+ }
+ for (size_t i = 0; i < mNodeChannels.size(); i++) {
+ // weight is fixed to 1/2 for now
+ mNodeChannels[i]->mergeWith(node, delnode, 0.5);
+ }
+}
+
+void Mesh::removeTriFromLookup(int tri)
+{
+ for (int c = 0; c < 3; c++) {
+ int node = mTris[tri].c[c];
+ m1RingLookup[node].tris.erase(tri);
+ }
+}
+
+void Mesh::addCorner(Corner a)
+{
+ mCorners.push_back(a);
+}
+
+int Mesh::addTri(Triangle a)
+{
+ mTris.push_back(a);
+ for (int c = 0; c < 3; c++) {
+ int node = a.c[c];
+ int nextnode = a.c[(c + 1) % 3];
+ if ((int)m1RingLookup.size() <= node)
+ m1RingLookup.resize(node + 1);
+ if ((int)m1RingLookup.size() <= nextnode)
+ m1RingLookup.resize(nextnode + 1);
+ m1RingLookup[node].nodes.insert(nextnode);
+ m1RingLookup[nextnode].nodes.insert(node);
+ m1RingLookup[node].tris.insert(mTris.size() - 1);
+ }
+ return mTris.size() - 1;
+}
+
+int Mesh::addNode(Node a)
+{
+ mNodes.push_back(a);
+ if (m1RingLookup.size() < mNodes.size())
+ m1RingLookup.resize(mNodes.size());
+
+ // if mdata exists, add zero init for every node
+ addAllMdata();
+
+ return mNodes.size() - 1;
+}
+
+void Mesh::computeVertexNormals()
+{
+ for (size_t i = 0; i < mNodes.size(); i++) {
+ mNodes[i].normal = 0.0;
+ }
+ for (size_t t = 0; t < mTris.size(); t++) {
+ Vec3 p0 = getNode(t, 0), p1 = getNode(t, 1), p2 = getNode(t, 2);
+ Vec3 n0 = p0 - p1, n1 = p1 - p2, n2 = p2 - p0;
+ Real l0 = normSquare(n0), l1 = normSquare(n1), l2 = normSquare(n2);
+
+ Vec3 nm = cross(n0, n1);
+
+ mNodes[mTris[t].c[0]].normal += nm * (1.0 / (l0 * l2));
+ mNodes[mTris[t].c[1]].normal += nm * (1.0 / (l0 * l1));
+ mNodes[mTris[t].c[2]].normal += nm * (1.0 / (l1 * l2));
+ }
+ for (size_t i = 0; i < mNodes.size(); i++) {
+ normalize(mNodes[i].normal);
+ }
+}
+
+void Mesh::fastNodeLookupRebuild(int corner)
+{
+ int node = mCorners[corner].node;
+ m1RingLookup[node].nodes.clear();
+ m1RingLookup[node].tris.clear();
+ int start = mCorners[corner].prev;
+ int current = start;
+ do {
+ m1RingLookup[node].nodes.insert(mCorners[current].node);
+ m1RingLookup[node].tris.insert(mCorners[current].tri);
+ current = mCorners[mCorners[current].opposite].next;
+ if (current < 0)
+ errMsg("Can't use fastNodeLookupRebuild on incomplete surfaces");
+ } while (current != start);
+}
+
+void Mesh::sanityCheck(bool strict, vector<int> *deletedNodes, map<int, bool> *taintedTris)
+{
+ const int nodes = numNodes(), tris = numTris(), corners = 3 * tris;
+ for (size_t i = 0; i < mNodeChannels.size(); i++) {
+ if (mNodeChannels[i]->size() != nodes)
+ errMsg("Node channel size mismatch");
+ }
+ for (size_t i = 0; i < mTriChannels.size(); i++) {
+ if (mTriChannels[i]->size() != tris)
+ errMsg("Tri channel size mismatch");
+ }
+ if ((int)m1RingLookup.size() != nodes)
+ errMsg("1Ring size wrong");
+ for (size_t t = 0; t < mTris.size(); t++) {
+ if (taintedTris && taintedTris->find(t) != taintedTris->end())
+ continue;
+ for (int c = 0; c < 3; c++) {
+ int corner = t * 3 + c;
+ int node = mTris[t].c[c];
+ int next = mTris[t].c[(c + 1) % 3];
+ int prev = mTris[t].c[(c + 2) % 3];
+ int rnext = mCorners[corner].next;
+ int rprev = mCorners[corner].prev;
+ int ro = mCorners[corner].opposite;
+ if (node < 0 || node >= nodes || next < 0 || next >= nodes || prev < 0 || prev >= nodes)
+ errMsg("invalid node entry");
+ if (mCorners[corner].node != node || mCorners[corner].tri != (int)t)
+ errMsg("invalid basic corner entry");
+ if (rnext < 0 || rnext >= corners || rprev < 0 || rprev >= corners || ro >= corners)
+ errMsg("invalid corner links");
+ if (mCorners[rnext].node != next || mCorners[rprev].node != prev)
+ errMsg("invalid corner next/prev");
+ if (strict && ro < 0)
+ errMsg("opposite missing");
+ if (mCorners[ro].opposite != corner)
+ errMsg("invalid opposite ref");
+ set<int> &rnodes = m1RingLookup[node].nodes;
+ set<int> &rtris = m1RingLookup[node].tris;
+ if (rnodes.find(next) == rnodes.end() || rnodes.find(prev) == rnodes.end()) {
+ debMsg("Tri " << t << " " << node << " " << next << " " << prev, 1);
+ for (set<int>::iterator it = rnodes.begin(); it != rnodes.end(); ++it)
+ debMsg(*it, 1);
+ errMsg("node missing in 1ring");
+ }
+ if (rtris.find(t) == rtris.end()) {
+ debMsg("Tri " << t << " " << node, 1);
+ errMsg("tri missing in 1ring");
+ }
+ }
+ }
+ for (int n = 0; n < nodes; n++) {
+ bool docheck = true;
+ if (deletedNodes)
+ for (size_t e = 0; e < deletedNodes->size(); e++)
+ if ((*deletedNodes)[e] == n)
+ docheck = false;
+ ;
+
+ if (docheck) {
+ set<int> &sn = m1RingLookup[n].nodes;
+ set<int> &st = m1RingLookup[n].tris;
+ set<int> sn2;
+
+ for (set<int>::iterator it = st.begin(); it != st.end(); ++it) {
+ bool found = false;
+ for (int c = 0; c < 3; c++) {
+ if (mTris[*it].c[c] == n)
+ found = true;
+ else
+ sn2.insert(mTris[*it].c[c]);
+ }
+ if (!found) {
+ cout << *it << " " << n << endl;
+ for (int c = 0; c < 3; c++)
+ cout << mTris[*it].c[c] << endl;
+ errMsg("invalid triangle in 1ring");
+ }
+ if (taintedTris && taintedTris->find(*it) != taintedTris->end()) {
+ cout << *it << endl;
+ errMsg("tainted tri still is use");
+ }
+ }
+ if (sn.size() != sn2.size())
+ errMsg("invalid nodes in 1ring");
+ for (set<int>::iterator it = sn.begin(), it2 = sn2.begin(); it != sn.end(); ++it, ++it2) {
+ if (*it != *it2) {
+ cout << "Node " << n << ": " << *it << " vs " << *it2 << endl;
+ errMsg("node ring mismatch");
+ }
+ }
+ }
+ }
+}
+
+//*****************************************************************************
+// rasterization
+
+void meshSDF(Mesh &mesh, LevelsetGrid &levelset, Real sigma, Real cutoff = 0.);
+
+//! helper vec3 array container
+struct CVec3Ptr {
+ Real *x, *y, *z;
+ inline Vec3 get(int i) const
+ {
+ return Vec3(x[i], y[i], z[i]);
+ };
+ inline void set(int i, const Vec3 &v)
+ {
+ x[i] = v.x;
+ y[i] = v.y;
+ z[i] = v.z;
+ };
+};
+//! helper vec3 array, for CUDA compatibility, remove at some point
+struct CVec3Array {
+ CVec3Array(int sz)
+ {
+ x.resize(sz);
+ y.resize(sz);
+ z.resize(sz);
+ }
+ CVec3Array(const std::vector<Vec3> &v)
+ {
+ x.resize(v.size());
+ y.resize(v.size());
+ z.resize(v.size());
+ for (size_t i = 0; i < v.size(); i++) {
+ x[i] = v[i].x;
+ y[i] = v[i].y;
+ z[i] = v[i].z;
+ }
+ }
+ CVec3Ptr data()
+ {
+ CVec3Ptr a = {x.data(), y.data(), z.data()};
+ return a;
+ }
+ inline const Vec3 operator[](int idx) const
+ {
+ return Vec3((Real)x[idx], (Real)y[idx], (Real)z[idx]);
+ }
+ inline void set(int idx, const Vec3 &v)
+ {
+ x[idx] = v.x;
+ y[idx] = v.y;
+ z[idx] = v.z;
+ }
+ inline int size()
+ {
+ return x.size();
+ }
+ std::vector<Real> x, y, z;
+};
+
+// void SDFKernel(const int* partStart, const int* partLen, CVec3Ptr pos, CVec3Ptr normal, Real*
+// sdf, Vec3i gridRes, int intRadius, Real safeRadius2, Real cutoff2, Real isigma2);
+//! helper for rasterization
+static void SDFKernel(Grid<int> &partStart,
+ Grid<int> &partLen,
+ CVec3Ptr pos,
+ CVec3Ptr normal,
+ LevelsetGrid &sdf,
+ Vec3i gridRes,
+ int intRadius,
+ Real safeRadius2,
+ Real cutoff2,
+ Real isigma2)
+{
+ for (int cnt_x(0); cnt_x < gridRes[0]; ++cnt_x) {
+ for (int cnt_y(0); cnt_y < gridRes[1]; ++cnt_y) {
+ for (int cnt_z(0); cnt_z < gridRes[2]; ++cnt_z) {
+ // cell index, center
+ Vec3i cell = Vec3i(cnt_x, cnt_y, cnt_z);
+ if (cell.x >= gridRes.x || cell.y >= gridRes.y || cell.z >= gridRes.z)
+ return;
+ Vec3 cpos = Vec3(cell.x + 0.5f, cell.y + 0.5f, cell.z + 0.5f);
+ Real sum = 0.0f;
+ Real dist = 0.0f;
+
+ // query cells within block radius
+ Vec3i minBlock = Vec3i(
+ max(cell.x - intRadius, 0), max(cell.y - intRadius, 0), max(cell.z - intRadius, 0));
+ Vec3i maxBlock = Vec3i(min(cell.x + intRadius, gridRes.x - 1),
+ min(cell.y + intRadius, gridRes.y - 1),
+ min(cell.z + intRadius, gridRes.z - 1));
+ for (int i = minBlock.x; i <= maxBlock.x; i++)
+ for (int j = minBlock.y; j <= maxBlock.y; j++)
+ for (int k = minBlock.z; k <= maxBlock.z; k++) {
+ // test if block is within radius
+ Vec3 d = Vec3(cell.x - i, cell.y - j, cell.z - k);
+ Real normSqr = d[0] * d[0] + d[1] * d[1] + d[2] * d[2];
+ if (normSqr > safeRadius2)
+ continue;
+
+ // find source cell, and divide it into thread blocks
+ int block = i + gridRes.x * (j + gridRes.y * k);
+ int slen = partLen[block];
+ if (slen == 0)
+ continue;
+ int start = partStart[block];
+
+ // process sources
+ for (int s = 0; s < slen; s++) {
+
+ // actual sdf kernel
+ Vec3 r = cpos - pos.get(start + s);
+ Real normSqr = r[0] * r[0] + r[1] * r[1] + r[2] * r[2];
+ Real r2 = normSqr;
+ if (r2 < cutoff2) {
+ Real w = expf(-r2 * isigma2);
+ sum += w;
+ dist += dot(normal.get(start + s), r) * w;
+ }
+ }
+ }
+ // writeback
+ if (sum > 0.0f) {
+ // sdf[cell.x + gridRes.x * (cell.y + gridRes.y * cell.z)] = dist / sum;
+ sdf(cell.x, cell.y, cell.z) = dist / sum;
+ }
+ }
+ }
+ }
+}
+
+static inline IndexInt _cIndex(const Vec3 &pos, const Vec3i &s)
+{
+ Vec3i p = toVec3i(pos);
+ if (p.x < 0 || p.y < 0 || p.z < 0 || p.x >= s.x || p.y >= s.y || p.z >= s.z)
+ return -1;
+ return p.x + s.x * (p.y + s.y * p.z);
+}
+
+//! Kernel: Apply a shape to a grid, setting value inside
+
+template<class T> struct ApplyMeshToGrid : public KernelBase {
+ ApplyMeshToGrid(Grid<T> *grid, Grid<Real> &sdf, T value, FlagGrid *respectFlags)
+ : KernelBase(grid, 0), grid(grid), sdf(sdf), value(value), respectFlags(respectFlags)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, Grid<T> *grid, Grid<Real> &sdf, T value, FlagGrid *respectFlags) const
+ {
+ if (respectFlags && respectFlags->isObstacle(i, j, k))
+ return;
+ if (sdf(i, j, k) < 0) {
+ (*grid)(i, j, k) = value;
+ }
+ }
+ inline Grid<T> *getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<T> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return sdf;
+ }
+ typedef Grid<Real> type1;
+ inline T &getArg2()
+ {
+ return value;
+ }
+ typedef T type2;
+ inline FlagGrid *getArg3()
+ {
+ return respectFlags;
+ }
+ typedef FlagGrid type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel ApplyMeshToGrid ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, sdf, value, respectFlags);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, sdf, value, respectFlags);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<T> *grid;
+ Grid<Real> &sdf;
+ T value;
+ FlagGrid *respectFlags;
+};
+
+void Mesh::applyMeshToGrid(GridBase *grid, FlagGrid *respectFlags, Real cutoff, Real meshSigma)
+{
+ FluidSolver dummy(grid->getSize());
+ LevelsetGrid mesh_sdf(&dummy, false);
+ meshSDF(*this, mesh_sdf, meshSigma, cutoff); // meshSigma=2 fixed here
+
+#if NOPYTHON != 1
+ if (grid->getType() & GridBase::TypeInt)
+ ApplyMeshToGrid<int>((Grid<int> *)grid, mesh_sdf, _args.get<int>("value"), respectFlags);
+ else if (grid->getType() & GridBase::TypeReal)
+ ApplyMeshToGrid<Real>((Grid<Real> *)grid, mesh_sdf, _args.get<Real>("value"), respectFlags);
+ else if (grid->getType() & GridBase::TypeVec3)
+ ApplyMeshToGrid<Vec3>((Grid<Vec3> *)grid, mesh_sdf, _args.get<Vec3>("value"), respectFlags);
+ else
+ errMsg("Shape::applyToGrid(): unknown grid type");
+#else
+ errMsg("Not yet supported...");
+#endif
+}
+
+void Mesh::computeLevelset(LevelsetGrid &levelset, Real sigma, Real cutoff)
+{
+ meshSDF(*this, levelset, sigma, cutoff);
+}
+
+LevelsetGrid Mesh::getLevelset(Real sigma, Real cutoff)
+{
+ LevelsetGrid phi(getParent());
+ meshSDF(*this, phi, sigma, cutoff);
+ return phi;
+}
+
+void meshSDF(Mesh &mesh, LevelsetGrid &levelset, Real sigma, Real cutoff)
+{
+ if (cutoff < 0)
+ cutoff = 2 * sigma;
+ Real maxEdgeLength = 0.75;
+ Real numSamplesPerCell = 0.75;
+
+ Vec3i gridRes = levelset.getSize();
+ Vec3 mult = toVec3(gridRes) / toVec3(mesh.getParent()->getGridSize());
+
+ // prepare center values
+ std::vector<Vec3> center;
+ std::vector<Vec3> normals;
+ short bigEdges(0);
+ std::vector<Vec3> samplePoints;
+ for (int i = 0; i < mesh.numTris(); i++) {
+ center.push_back(Vec3(mesh.getFaceCenter(i) * mult));
+ normals.push_back(mesh.getFaceNormal(i));
+ // count big, stretched edges
+ bigEdges = 0;
+ for (short edge(0); edge < 3; ++edge) {
+ if (norm(mesh.getEdge(i, edge)) > maxEdgeLength) {
+ bigEdges += 1 << edge;
+ }
+ }
+ if (bigEdges > 0) {
+ samplePoints.clear();
+ short iterA, pointA, iterB, pointB;
+ int numSamples0 = norm(mesh.getEdge(i, 1)) * numSamplesPerCell;
+ int numSamples1 = norm(mesh.getEdge(i, 2)) * numSamplesPerCell;
+ int numSamples2 = norm(mesh.getEdge(i, 0)) * numSamplesPerCell;
+ if (!(bigEdges & (1 << 0))) {
+ // loop through 0,1
+ iterA = numSamples1;
+ pointA = 0;
+ iterB = numSamples2;
+ pointB = 1;
+ }
+ else if (!(bigEdges & (1 << 1))) {
+ // loop through 1,2
+ iterA = numSamples2;
+ pointA = 1;
+ iterB = numSamples0;
+ pointB = 2;
+ }
+ else {
+ // loop through 2,0
+ iterA = numSamples0;
+ pointA = 2;
+ iterB = numSamples1;
+ pointB = 0;
+ }
+
+ Real u(0.), v(0.), w(0.); // barycentric uvw coords
+ Vec3 samplePoint, normal;
+ for (int sample0(0); sample0 < iterA; ++sample0) {
+ u = Real(1. * sample0 / iterA);
+ for (int sample1(0); sample1 < iterB; ++sample1) {
+ v = Real(1. * sample1 / iterB);
+ w = 1 - u - v;
+ if (w < 0.)
+ continue;
+ samplePoint = mesh.getNode(i, pointA) * mult * u + mesh.getNode(i, pointB) * mult * v +
+ mesh.getNode(i, (3 - pointA - pointB)) * mult * w;
+ samplePoints.push_back(samplePoint);
+ normal = mesh.getFaceNormal(i);
+ normals.push_back(normal);
+ }
+ }
+ center.insert(center.end(), samplePoints.begin(), samplePoints.end());
+ }
+ }
+
+ // prepare grid
+ levelset.setConst(-cutoff);
+
+ // 1. count sources per cell
+ Grid<int> srcPerCell(levelset.getParent());
+ for (size_t i = 0; i < center.size(); i++) {
+ IndexInt idx = _cIndex(center[i], gridRes);
+ if (idx >= 0)
+ srcPerCell[idx]++;
+ }
+
+ // 2. create start index lookup
+ Grid<int> srcCellStart(levelset.getParent());
+ int cnt = 0;
+ FOR_IJK(srcCellStart)
+ {
+ IndexInt idx = srcCellStart.index(i, j, k);
+ srcCellStart[idx] = cnt;
+ cnt += srcPerCell[idx];
+ }
+
+ // 3. reorder nodes
+ CVec3Array reorderPos(center.size());
+ CVec3Array reorderNormal(center.size());
+ {
+ Grid<int> curSrcCell(levelset.getParent());
+ for (int i = 0; i < (int)center.size(); i++) {
+ IndexInt idx = _cIndex(center[i], gridRes);
+ if (idx < 0)
+ continue;
+ IndexInt idx2 = srcCellStart[idx] + curSrcCell[idx];
+ reorderPos.set(idx2, center[i]);
+ reorderNormal.set(idx2, normals[i]);
+ curSrcCell[idx]++;
+ }
+ }
+
+ // construct parameters
+ Real safeRadius = cutoff + sqrt(3.0) * 0.5;
+ Real safeRadius2 = safeRadius * safeRadius;
+ Real cutoff2 = cutoff * cutoff;
+ Real isigma2 = 1.0 / (sigma * sigma);
+ int intRadius = (int)(cutoff + 0.5);
+
+ SDFKernel(srcCellStart,
+ srcPerCell,
+ reorderPos.data(),
+ reorderNormal.data(),
+ levelset,
+ gridRes,
+ intRadius,
+ safeRadius2,
+ cutoff2,
+ isigma2);
+
+ // floodfill outside
+ std::stack<Vec3i> outside;
+ FOR_IJK(levelset)
+ {
+ if (levelset(i, j, k) >= cutoff - 1.0f)
+ outside.push(Vec3i(i, j, k));
+ }
+ while (!outside.empty()) {
+ Vec3i c = outside.top();
+ outside.pop();
+ levelset(c) = cutoff;
+ if (c.x > 0 && levelset(c.x - 1, c.y, c.z) < 0)
+ outside.push(Vec3i(c.x - 1, c.y, c.z));
+ if (c.y > 0 && levelset(c.x, c.y - 1, c.z) < 0)
+ outside.push(Vec3i(c.x, c.y - 1, c.z));
+ if (c.z > 0 && levelset(c.x, c.y, c.z - 1) < 0)
+ outside.push(Vec3i(c.x, c.y, c.z - 1));
+ if (c.x < levelset.getSizeX() - 1 && levelset(c.x + 1, c.y, c.z) < 0)
+ outside.push(Vec3i(c.x + 1, c.y, c.z));
+ if (c.y < levelset.getSizeY() - 1 && levelset(c.x, c.y + 1, c.z) < 0)
+ outside.push(Vec3i(c.x, c.y + 1, c.z));
+ if (c.z < levelset.getSizeZ() - 1 && levelset(c.x, c.y, c.z + 1) < 0)
+ outside.push(Vec3i(c.x, c.y, c.z + 1));
+ };
+}
+
+// Blender data pointer accessors
+std::string Mesh::getNodesDataPointer()
+{
+ std::ostringstream out;
+ out << &mNodes;
+ return out.str();
+}
+std::string Mesh::getTrisDataPointer()
+{
+ std::ostringstream out;
+ out << &mTris;
+ return out.str();
+}
+
+// mesh data
+
+MeshDataBase::MeshDataBase(FluidSolver *parent) : PbClass(parent), mMesh(NULL)
+{
+}
+
+MeshDataBase::~MeshDataBase()
+{
+ // notify parent of deletion
+ if (mMesh)
+ mMesh->deregister(this);
+}
+
+// actual data implementation
+
+template<class T>
+MeshDataImpl<T>::MeshDataImpl(FluidSolver *parent)
+ : MeshDataBase(parent), mpGridSource(NULL), mGridSourceMAC(false)
+{
+}
+
+template<class T>
+MeshDataImpl<T>::MeshDataImpl(FluidSolver *parent, MeshDataImpl<T> *other)
+ : MeshDataBase(parent), mpGridSource(NULL), mGridSourceMAC(false)
+{
+ this->mData = other->mData;
+ setName(other->getName());
+}
+
+template<class T> MeshDataImpl<T>::~MeshDataImpl()
+{
+}
+
+template<class T> IndexInt MeshDataImpl<T>::getSizeSlow() const
+{
+ return mData.size();
+}
+template<class T> void MeshDataImpl<T>::addEntry()
+{
+ // add zero'ed entry
+ T tmp = T(0.);
+ // for debugging, force init:
+ // tmp = T(0.02 * mData.size()); // increasing
+ // tmp = T(1.); // constant 1
+ return mData.push_back(tmp);
+}
+template<class T> void MeshDataImpl<T>::resize(IndexInt s)
+{
+ mData.resize(s);
+}
+template<class T> void MeshDataImpl<T>::copyValueSlow(IndexInt from, IndexInt to)
+{
+ this->copyValue(from, to);
+}
+template<class T> MeshDataBase *MeshDataImpl<T>::clone()
+{
+ MeshDataImpl<T> *npd = new MeshDataImpl<T>(getParent(), this);
+ return npd;
+}
+
+template<class T> void MeshDataImpl<T>::setSource(Grid<T> *grid, bool isMAC)
+{
+ mpGridSource = grid;
+ mGridSourceMAC = isMAC;
+ if (isMAC)
+ assertMsg(dynamic_cast<MACGrid *>(grid) != NULL, "Given grid is not a valid MAC grid");
+}
+
+template<class T> void MeshDataImpl<T>::initNewValue(IndexInt idx, Vec3 pos)
+{
+ if (!mpGridSource)
+ mData[idx] = 0;
+ else {
+ mData[idx] = mpGridSource->getInterpolated(pos);
+ }
+}
+
+// special handling needed for velocities
+template<> void MeshDataImpl<Vec3>::initNewValue(IndexInt idx, Vec3 pos)
+{
+ if (!mpGridSource)
+ mData[idx] = 0;
+ else {
+ if (!mGridSourceMAC)
+ mData[idx] = mpGridSource->getInterpolated(pos);
+ else
+ mData[idx] = ((MACGrid *)mpGridSource)->getInterpolated(pos);
+ }
+}
+
+//! update additional mesh data
+void Mesh::updateDataFields()
+{
+ for (size_t i = 0; i < mNodes.size(); ++i) {
+ Vec3 pos = mNodes[i].pos;
+ for (IndexInt md = 0; md < (IndexInt)mMdataReal.size(); ++md)
+ mMdataReal[md]->initNewValue(i, mNodes[i].pos);
+ for (IndexInt md = 0; md < (IndexInt)mMdataVec3.size(); ++md)
+ mMdataVec3[md]->initNewValue(i, mNodes[i].pos);
+ for (IndexInt md = 0; md < (IndexInt)mMdataInt.size(); ++md)
+ mMdataInt[md]->initNewValue(i, mNodes[i].pos);
+ }
+}
+
+template<typename T> void MeshDataImpl<T>::load(string name)
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".uni")
+ readMdataUni<T>(name, this);
+ else if (ext == ".raw") // raw = uni for now
+ readMdataUni<T>(name, this);
+ else
+ errMsg("mesh data '" + name + "' filetype not supported for loading");
+}
+
+template<typename T> void MeshDataImpl<T>::save(string name)
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".uni")
+ writeMdataUni<T>(name, this);
+ else if (ext == ".raw") // raw = uni for now
+ writeMdataUni<T>(name, this);
+ else
+ errMsg("mesh data '" + name + "' filetype not supported for saving");
+}
+
+// specializations
+
+template<> MeshDataBase::MdataType MeshDataImpl<Real>::getType() const
+{
+ return MeshDataBase::TypeReal;
+}
+template<> MeshDataBase::MdataType MeshDataImpl<int>::getType() const
+{
+ return MeshDataBase::TypeInt;
+}
+template<> MeshDataBase::MdataType MeshDataImpl<Vec3>::getType() const
+{
+ return MeshDataBase::TypeVec3;
+}
+
+template<class T> struct knSetMdataConst : public KernelBase {
+ knSetMdataConst(MeshDataImpl<T> &mdata, T value)
+ : KernelBase(mdata.size()), mdata(mdata), value(value)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &mdata, T value) const
+ {
+ mdata[idx] = value;
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return mdata;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline T &getArg1()
+ {
+ return value;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetMdataConst ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, mdata, value);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &mdata;
+ T value;
+};
+
+template<class T, class S> struct knMdataSet : public KernelBase {
+ knMdataSet(MeshDataImpl<T> &me, const MeshDataImpl<S> &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, const MeshDataImpl<S> &other) const
+ {
+ me[idx] += other[idx];
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const MeshDataImpl<S> &getArg1()
+ {
+ return other;
+ }
+ typedef MeshDataImpl<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataSet ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const MeshDataImpl<S> &other;
+};
+template<class T, class S> struct knMdataAdd : public KernelBase {
+ knMdataAdd(MeshDataImpl<T> &me, const MeshDataImpl<S> &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, const MeshDataImpl<S> &other) const
+ {
+ me[idx] += other[idx];
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const MeshDataImpl<S> &getArg1()
+ {
+ return other;
+ }
+ typedef MeshDataImpl<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataAdd ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const MeshDataImpl<S> &other;
+};
+template<class T, class S> struct knMdataSub : public KernelBase {
+ knMdataSub(MeshDataImpl<T> &me, const MeshDataImpl<S> &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, const MeshDataImpl<S> &other) const
+ {
+ me[idx] -= other[idx];
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const MeshDataImpl<S> &getArg1()
+ {
+ return other;
+ }
+ typedef MeshDataImpl<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataSub ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const MeshDataImpl<S> &other;
+};
+template<class T, class S> struct knMdataMult : public KernelBase {
+ knMdataMult(MeshDataImpl<T> &me, const MeshDataImpl<S> &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, const MeshDataImpl<S> &other) const
+ {
+ me[idx] *= other[idx];
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const MeshDataImpl<S> &getArg1()
+ {
+ return other;
+ }
+ typedef MeshDataImpl<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataMult ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const MeshDataImpl<S> &other;
+};
+template<class T, class S> struct knMdataDiv : public KernelBase {
+ knMdataDiv(MeshDataImpl<T> &me, const MeshDataImpl<S> &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, const MeshDataImpl<S> &other) const
+ {
+ me[idx] /= other[idx];
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const MeshDataImpl<S> &getArg1()
+ {
+ return other;
+ }
+ typedef MeshDataImpl<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataDiv ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const MeshDataImpl<S> &other;
+};
+
+template<class T, class S> struct knMdataSetScalar : public KernelBase {
+ knMdataSetScalar(MeshDataImpl<T> &me, const S &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, const S &other) const
+ {
+ me[idx] = other;
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataSetScalar ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const S &other;
+};
+template<class T, class S> struct knMdataAddScalar : public KernelBase {
+ knMdataAddScalar(MeshDataImpl<T> &me, const S &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, const S &other) const
+ {
+ me[idx] += other;
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataAddScalar ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const S &other;
+};
+template<class T, class S> struct knMdataMultScalar : public KernelBase {
+ knMdataMultScalar(MeshDataImpl<T> &me, const S &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, const S &other) const
+ {
+ me[idx] *= other;
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataMultScalar ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const S &other;
+};
+template<class T, class S> struct knMdataScaledAdd : public KernelBase {
+ knMdataScaledAdd(MeshDataImpl<T> &me, const MeshDataImpl<T> &other, const S &factor)
+ : KernelBase(me.size()), me(me), other(other), factor(factor)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ MeshDataImpl<T> &me,
+ const MeshDataImpl<T> &other,
+ const S &factor) const
+ {
+ me[idx] += factor * other[idx];
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const MeshDataImpl<T> &getArg1()
+ {
+ return other;
+ }
+ typedef MeshDataImpl<T> type1;
+ inline const S &getArg2()
+ {
+ return factor;
+ }
+ typedef S type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataScaledAdd ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other, factor);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const MeshDataImpl<T> &other;
+ const S &factor;
+};
+
+template<class T> struct knMdataSafeDiv : public KernelBase {
+ knMdataSafeDiv(MeshDataImpl<T> &me, const MeshDataImpl<T> &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, const MeshDataImpl<T> &other) const
+ {
+ me[idx] = safeDivide(me[idx], other[idx]);
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const MeshDataImpl<T> &getArg1()
+ {
+ return other;
+ }
+ typedef MeshDataImpl<T> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataSafeDiv ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const MeshDataImpl<T> &other;
+};
+template<class T> struct knMdataSetConst : public KernelBase {
+ knMdataSetConst(MeshDataImpl<T> &mdata, T value)
+ : KernelBase(mdata.size()), mdata(mdata), value(value)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &mdata, T value) const
+ {
+ mdata[idx] = value;
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return mdata;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline T &getArg1()
+ {
+ return value;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataSetConst ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, mdata, value);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &mdata;
+ T value;
+};
+
+template<class T> struct knMdataClamp : public KernelBase {
+ knMdataClamp(MeshDataImpl<T> &me, T min, T max)
+ : KernelBase(me.size()), me(me), min(min), max(max)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, T min, T max) const
+ {
+ me[idx] = clamp(me[idx], min, max);
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline T &getArg1()
+ {
+ return min;
+ }
+ typedef T type1;
+ inline T &getArg2()
+ {
+ return max;
+ }
+ typedef T type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataClamp ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, min, max);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ T min;
+ T max;
+};
+template<class T> struct knMdataClampMin : public KernelBase {
+ knMdataClampMin(MeshDataImpl<T> &me, const T vmin) : KernelBase(me.size()), me(me), vmin(vmin)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, const T vmin) const
+ {
+ me[idx] = std::max(vmin, me[idx]);
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const T &getArg1()
+ {
+ return vmin;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataClampMin ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, vmin);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const T vmin;
+};
+template<class T> struct knMdataClampMax : public KernelBase {
+ knMdataClampMax(MeshDataImpl<T> &me, const T vmax) : KernelBase(me.size()), me(me), vmax(vmax)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<T> &me, const T vmax) const
+ {
+ me[idx] = std::min(vmax, me[idx]);
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const T &getArg1()
+ {
+ return vmax;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataClampMax ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, vmax);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const T vmax;
+};
+struct knMdataClampMinVec3 : public KernelBase {
+ knMdataClampMinVec3(MeshDataImpl<Vec3> &me, const Real vmin)
+ : KernelBase(me.size()), me(me), vmin(vmin)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<Vec3> &me, const Real vmin) const
+ {
+ me[idx].x = std::max(vmin, me[idx].x);
+ me[idx].y = std::max(vmin, me[idx].y);
+ me[idx].z = std::max(vmin, me[idx].z);
+ }
+ inline MeshDataImpl<Vec3> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<Vec3> type0;
+ inline const Real &getArg1()
+ {
+ return vmin;
+ }
+ typedef Real type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataClampMinVec3 ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, vmin);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<Vec3> &me;
+ const Real vmin;
+};
+struct knMdataClampMaxVec3 : public KernelBase {
+ knMdataClampMaxVec3(MeshDataImpl<Vec3> &me, const Real vmax)
+ : KernelBase(me.size()), me(me), vmax(vmax)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, MeshDataImpl<Vec3> &me, const Real vmax) const
+ {
+ me[idx].x = std::min(vmax, me[idx].x);
+ me[idx].y = std::min(vmax, me[idx].y);
+ me[idx].z = std::min(vmax, me[idx].z);
+ }
+ inline MeshDataImpl<Vec3> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<Vec3> type0;
+ inline const Real &getArg1()
+ {
+ return vmax;
+ }
+ typedef Real type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataClampMaxVec3 ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, vmax);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<Vec3> &me;
+ const Real vmax;
+};
+
+// python operators
+
+template<typename T> MeshDataImpl<T> &MeshDataImpl<T>::copyFrom(const MeshDataImpl<T> &a)
+{
+ assertMsg(a.mData.size() == mData.size(),
+ "different mdata size " << a.mData.size() << " vs " << this->mData.size());
+ memcpy(&mData[0], &a.mData[0], sizeof(T) * mData.size());
+ return *this;
+}
+
+template<typename T> void MeshDataImpl<T>::setConst(T s)
+{
+ knMdataSetScalar<T, T> op(*this, s);
+}
+
+template<typename T> void MeshDataImpl<T>::setConstRange(T s, const int begin, const int end)
+{
+ for (int i = begin; i < end; ++i)
+ (*this)[i] = s;
+}
+
+// special set by flag
+template<class T, class S> struct knMdataSetScalarIntFlag : public KernelBase {
+ knMdataSetScalarIntFlag(MeshDataImpl<T> &me,
+ const S &other,
+ const MeshDataImpl<int> &t,
+ const int itype)
+ : KernelBase(me.size()), me(me), other(other), t(t), itype(itype)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ MeshDataImpl<T> &me,
+ const S &other,
+ const MeshDataImpl<int> &t,
+ const int itype) const
+ {
+ if (t[idx] & itype)
+ me[idx] = other;
+ }
+ inline MeshDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ inline const MeshDataImpl<int> &getArg2()
+ {
+ return t;
+ }
+ typedef MeshDataImpl<int> type2;
+ inline const int &getArg3()
+ {
+ return itype;
+ }
+ typedef int type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMdataSetScalarIntFlag ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other, t, itype);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MeshDataImpl<T> &me;
+ const S &other;
+ const MeshDataImpl<int> &t;
+ const int itype;
+};
+template<typename T>
+void MeshDataImpl<T>::setConstIntFlag(T s, const MeshDataImpl<int> &t, const int itype)
+{
+ knMdataSetScalarIntFlag<T, T> op(*this, s, t, itype);
+}
+
+template<typename T> void MeshDataImpl<T>::add(const MeshDataImpl<T> &a)
+{
+ knMdataAdd<T, T> op(*this, a);
+}
+template<typename T> void MeshDataImpl<T>::sub(const MeshDataImpl<T> &a)
+{
+ knMdataSub<T, T> op(*this, a);
+}
+
+template<typename T> void MeshDataImpl<T>::addConst(T s)
+{
+ knMdataAddScalar<T, T> op(*this, s);
+}
+
+template<typename T> void MeshDataImpl<T>::addScaled(const MeshDataImpl<T> &a, const T &factor)
+{
+ knMdataScaledAdd<T, T> op(*this, a, factor);
+}
+
+template<typename T> void MeshDataImpl<T>::mult(const MeshDataImpl<T> &a)
+{
+ knMdataMult<T, T> op(*this, a);
+}
+
+template<typename T> void MeshDataImpl<T>::safeDiv(const MeshDataImpl<T> &a)
+{
+ knMdataSafeDiv<T> op(*this, a);
+}
+
+template<typename T> void MeshDataImpl<T>::multConst(T s)
+{
+ knMdataMultScalar<T, T> op(*this, s);
+}
+
+template<typename T> void MeshDataImpl<T>::clamp(Real vmin, Real vmax)
+{
+ knMdataClamp<T> op(*this, vmin, vmax);
+}
+
+template<typename T> void MeshDataImpl<T>::clampMin(Real vmin)
+{
+ knMdataClampMin<T> op(*this, vmin);
+}
+template<typename T> void MeshDataImpl<T>::clampMax(Real vmax)
+{
+ knMdataClampMax<T> op(*this, vmax);
+}
+
+template<> void MeshDataImpl<Vec3>::clampMin(Real vmin)
+{
+ knMdataClampMinVec3 op(*this, vmin);
+}
+template<> void MeshDataImpl<Vec3>::clampMax(Real vmax)
+{
+ knMdataClampMaxVec3 op(*this, vmax);
+}
+
+template<typename T> struct KnPtsSum : public KernelBase {
+ KnPtsSum(const MeshDataImpl<T> &val, const MeshDataImpl<int> *t, const int itype)
+ : KernelBase(val.size()), val(val), t(t), itype(itype), result(T(0.))
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const MeshDataImpl<T> &val,
+ const MeshDataImpl<int> *t,
+ const int itype,
+ T &result)
+ {
+ if (t && !((*t)[idx] & itype))
+ return;
+ result += val[idx];
+ }
+ inline operator T()
+ {
+ return result;
+ }
+ inline T &getRet()
+ {
+ return result;
+ }
+ inline const MeshDataImpl<T> &getArg0()
+ {
+ return val;
+ }
+ typedef MeshDataImpl<T> type0;
+ inline const MeshDataImpl<int> *getArg1()
+ {
+ return t;
+ }
+ typedef MeshDataImpl<int> type1;
+ inline const int &getArg2()
+ {
+ return itype;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnPtsSum ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, t, itype, result);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ KnPtsSum(KnPtsSum &o, tbb::split)
+ : KernelBase(o), val(o.val), t(o.t), itype(o.itype), result(T(0.))
+ {
+ }
+ void join(const KnPtsSum &o)
+ {
+ result += o.result;
+ }
+ const MeshDataImpl<T> &val;
+ const MeshDataImpl<int> *t;
+ const int itype;
+ T result;
+};
+template<typename T> struct KnPtsSumSquare : public KernelBase {
+ KnPtsSumSquare(const MeshDataImpl<T> &val) : KernelBase(val.size()), val(val), result(0.)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const MeshDataImpl<T> &val, Real &result)
+ {
+ result += normSquare(val[idx]);
+ }
+ inline operator Real()
+ {
+ return result;
+ }
+ inline Real &getRet()
+ {
+ return result;
+ }
+ inline const MeshDataImpl<T> &getArg0()
+ {
+ return val;
+ }
+ typedef MeshDataImpl<T> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnPtsSumSquare ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, result);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ KnPtsSumSquare(KnPtsSumSquare &o, tbb::split) : KernelBase(o), val(o.val), result(0.)
+ {
+ }
+ void join(const KnPtsSumSquare &o)
+ {
+ result += o.result;
+ }
+ const MeshDataImpl<T> &val;
+ Real result;
+};
+template<typename T> struct KnPtsSumMagnitude : public KernelBase {
+ KnPtsSumMagnitude(const MeshDataImpl<T> &val) : KernelBase(val.size()), val(val), result(0.)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const MeshDataImpl<T> &val, Real &result)
+ {
+ result += norm(val[idx]);
+ }
+ inline operator Real()
+ {
+ return result;
+ }
+ inline Real &getRet()
+ {
+ return result;
+ }
+ inline const MeshDataImpl<T> &getArg0()
+ {
+ return val;
+ }
+ typedef MeshDataImpl<T> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnPtsSumMagnitude ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, result);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ KnPtsSumMagnitude(KnPtsSumMagnitude &o, tbb::split) : KernelBase(o), val(o.val), result(0.)
+ {
+ }
+ void join(const KnPtsSumMagnitude &o)
+ {
+ result += o.result;
+ }
+ const MeshDataImpl<T> &val;
+ Real result;
+};
+
+template<typename T> T MeshDataImpl<T>::sum(const MeshDataImpl<int> *t, const int itype) const
+{
+ return KnPtsSum<T>(*this, t, itype);
+}
+template<typename T> Real MeshDataImpl<T>::sumSquare() const
+{
+ return KnPtsSumSquare<T>(*this);
+}
+template<typename T> Real MeshDataImpl<T>::sumMagnitude() const
+{
+ return KnPtsSumMagnitude<T>(*this);
+}
+
+template<typename T>
+
+struct CompMdata_Min : public KernelBase {
+ CompMdata_Min(const MeshDataImpl<T> &val)
+ : KernelBase(val.size()), val(val), minVal(std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const MeshDataImpl<T> &val, Real &minVal)
+ {
+ if (val[idx] < minVal)
+ minVal = val[idx];
+ }
+ inline operator Real()
+ {
+ return minVal;
+ }
+ inline Real &getRet()
+ {
+ return minVal;
+ }
+ inline const MeshDataImpl<T> &getArg0()
+ {
+ return val;
+ }
+ typedef MeshDataImpl<T> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompMdata_Min ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, minVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompMdata_Min(CompMdata_Min &o, tbb::split)
+ : KernelBase(o), val(o.val), minVal(std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const CompMdata_Min &o)
+ {
+ minVal = min(minVal, o.minVal);
+ }
+ const MeshDataImpl<T> &val;
+ Real minVal;
+};
+
+template<typename T>
+
+struct CompMdata_Max : public KernelBase {
+ CompMdata_Max(const MeshDataImpl<T> &val)
+ : KernelBase(val.size()), val(val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const MeshDataImpl<T> &val, Real &maxVal)
+ {
+ if (val[idx] > maxVal)
+ maxVal = val[idx];
+ }
+ inline operator Real()
+ {
+ return maxVal;
+ }
+ inline Real &getRet()
+ {
+ return maxVal;
+ }
+ inline const MeshDataImpl<T> &getArg0()
+ {
+ return val;
+ }
+ typedef MeshDataImpl<T> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompMdata_Max ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, maxVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompMdata_Max(CompMdata_Max &o, tbb::split)
+ : KernelBase(o), val(o.val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const CompMdata_Max &o)
+ {
+ maxVal = max(maxVal, o.maxVal);
+ }
+ const MeshDataImpl<T> &val;
+ Real maxVal;
+};
+
+template<typename T> Real MeshDataImpl<T>::getMin()
+{
+ return CompMdata_Min<T>(*this);
+}
+
+template<typename T> Real MeshDataImpl<T>::getMaxAbs()
+{
+ Real amin = CompMdata_Min<T>(*this);
+ Real amax = CompMdata_Max<T>(*this);
+ return max(fabs(amin), fabs(amax));
+}
+
+template<typename T> Real MeshDataImpl<T>::getMax()
+{
+ return CompMdata_Max<T>(*this);
+}
+
+template<typename T>
+void MeshDataImpl<T>::printMdata(IndexInt start, IndexInt stop, bool printIndex)
+{
+ std::ostringstream sstr;
+ IndexInt s = (start > 0 ? start : 0);
+ IndexInt e = (stop > 0 ? stop : (IndexInt)mData.size());
+ s = Manta::clamp(s, (IndexInt)0, (IndexInt)mData.size());
+ e = Manta::clamp(e, (IndexInt)0, (IndexInt)mData.size());
+
+ for (IndexInt i = s; i < e; ++i) {
+ if (printIndex)
+ sstr << i << ": ";
+ sstr << mData[i] << " "
+ << "\n";
+ }
+ debMsg(sstr.str(), 1);
+}
+template<class T> std::string MeshDataImpl<T>::getDataPointer()
+{
+ std::ostringstream out;
+ out << &mData;
+ return out.str();
+}
+
+// specials for vec3
+// work on length values, ie, always positive (in contrast to scalar versions above)
+
+struct CompMdata_MinVec3 : public KernelBase {
+ CompMdata_MinVec3(const MeshDataImpl<Vec3> &val)
+ : KernelBase(val.size()), val(val), minVal(-std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const MeshDataImpl<Vec3> &val, Real &minVal)
+ {
+ const Real s = normSquare(val[idx]);
+ if (s < minVal)
+ minVal = s;
+ }
+ inline operator Real()
+ {
+ return minVal;
+ }
+ inline Real &getRet()
+ {
+ return minVal;
+ }
+ inline const MeshDataImpl<Vec3> &getArg0()
+ {
+ return val;
+ }
+ typedef MeshDataImpl<Vec3> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompMdata_MinVec3 ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, minVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompMdata_MinVec3(CompMdata_MinVec3 &o, tbb::split)
+ : KernelBase(o), val(o.val), minVal(-std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const CompMdata_MinVec3 &o)
+ {
+ minVal = min(minVal, o.minVal);
+ }
+ const MeshDataImpl<Vec3> &val;
+ Real minVal;
+};
+
+struct CompMdata_MaxVec3 : public KernelBase {
+ CompMdata_MaxVec3(const MeshDataImpl<Vec3> &val)
+ : KernelBase(val.size()), val(val), maxVal(-std::numeric_limits<Real>::min())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const MeshDataImpl<Vec3> &val, Real &maxVal)
+ {
+ const Real s = normSquare(val[idx]);
+ if (s > maxVal)
+ maxVal = s;
+ }
+ inline operator Real()
+ {
+ return maxVal;
+ }
+ inline Real &getRet()
+ {
+ return maxVal;
+ }
+ inline const MeshDataImpl<Vec3> &getArg0()
+ {
+ return val;
+ }
+ typedef MeshDataImpl<Vec3> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompMdata_MaxVec3 ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, maxVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompMdata_MaxVec3(CompMdata_MaxVec3 &o, tbb::split)
+ : KernelBase(o), val(o.val), maxVal(-std::numeric_limits<Real>::min())
+ {
+ }
+ void join(const CompMdata_MaxVec3 &o)
+ {
+ maxVal = max(maxVal, o.maxVal);
+ }
+ const MeshDataImpl<Vec3> &val;
+ Real maxVal;
+};
+
+template<> Real MeshDataImpl<Vec3>::getMin()
+{
+ return sqrt(CompMdata_MinVec3(*this));
+}
+
+template<> Real MeshDataImpl<Vec3>::getMaxAbs()
+{
+ return sqrt(CompMdata_MaxVec3(*this)); // no minimum necessary here
+}
+
+template<> Real MeshDataImpl<Vec3>::getMax()
+{
+ return sqrt(CompMdata_MaxVec3(*this));
+}
+
+// explicit instantiation
+template class MeshDataImpl<int>;
+template class MeshDataImpl<Real>;
+template class MeshDataImpl<Vec3>;
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/mesh.h b/extern/mantaflow/preprocessed/mesh.h
new file mode 100644
index 00000000000..f49619515ce
--- /dev/null
+++ b/extern/mantaflow/preprocessed/mesh.h
@@ -0,0 +1,1690 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Meshes
+ *
+ * note: this is only a temporary solution, details are bound to change
+ * long term goal is integration with Split&Merge code by Wojtan et al.
+ *
+ ******************************************************************************/
+
+#ifndef _MESH_H
+#define _MESH_H
+
+#include <vector>
+#include "manta.h"
+#include "vectorbase.h"
+#include <set>
+#include "levelset.h"
+
+namespace Manta {
+
+// fwd decl
+class GridBase;
+// class LevelsetGrid;
+class FlagGrid;
+class MACGrid;
+class Shape;
+class MeshDataBase;
+template<class T> class MeshDataImpl;
+
+//! Node position and flags
+struct Node {
+ Node() : flags(0), pos(Vec3::Zero), normal(Vec3::Zero)
+ {
+ }
+ Node(const Vec3 &p) : flags(0), pos(p)
+ {
+ }
+ int flags;
+ Vec3 pos, normal;
+};
+
+//! Carries indices of its nodes
+struct Triangle {
+ Triangle() : flags(0)
+ {
+ c[0] = c[1] = c[2] = 0;
+ }
+ Triangle(int n0, int n1, int n2) : flags(0)
+ {
+ c[0] = n0;
+ c[1] = n1;
+ c[2] = n2;
+ }
+
+ int c[3];
+ int flags;
+};
+
+//! For fast access to nodes and neighboring triangles
+struct Corner {
+ Corner() : tri(-1), node(-1), opposite(-1), next(-1), prev(-1){};
+ Corner(int t, int n) : tri(t), node(n), opposite(-1), next(-1), prev(-1)
+ {
+ }
+
+ int tri;
+ int node;
+ int opposite;
+ int next;
+ int prev;
+};
+
+//! Base class for mesh data channels (texture coords, vorticity, ...)
+struct NodeChannel {
+ virtual ~NodeChannel(){};
+ virtual void resize(int num) = 0;
+ virtual int size() = 0;
+ virtual NodeChannel *clone() = 0;
+
+ virtual void addInterpol(int a, int b, Real alpha) = 0;
+ virtual void mergeWith(int node, int delnode, Real alpha) = 0;
+ virtual void renumber(const std::vector<int> &newIndex, int newsize) = 0;
+};
+
+//! Node channel using only a vector
+template<class T> struct SimpleNodeChannel : public NodeChannel {
+ SimpleNodeChannel(){};
+ SimpleNodeChannel(const SimpleNodeChannel<T> &a) : data(a.data)
+ {
+ }
+ void resize(int num)
+ {
+ data.resize(num);
+ }
+ virtual int size()
+ {
+ return data.size();
+ }
+ virtual void renumber(const std::vector<int> &newIndex, int newsize);
+
+ // virtual void addSplit(int from, Real alpha) { data.push_back(data[from]); }
+
+ std::vector<T> data;
+};
+
+//! Base class for mesh data channels (texture coords, vorticity, ...)
+struct TriChannel {
+ virtual ~TriChannel(){};
+ virtual void resize(int num) = 0;
+ virtual TriChannel *clone() = 0;
+ virtual int size() = 0;
+
+ virtual void addNew() = 0;
+ virtual void addSplit(int from, Real alpha) = 0;
+ virtual void remove(int tri) = 0;
+};
+
+//! Tri channel using only a vector
+template<class T> struct SimpleTriChannel : public TriChannel {
+ SimpleTriChannel(){};
+ SimpleTriChannel(const SimpleTriChannel<T> &a) : data(a.data)
+ {
+ }
+ void resize(int num)
+ {
+ data.resize(num);
+ }
+ void remove(int tri)
+ {
+ if (tri != (int)data.size() - 1)
+ data[tri] = *data.rbegin();
+ data.pop_back();
+ }
+ virtual int size()
+ {
+ return data.size();
+ }
+
+ virtual void addSplit(int from, Real alpha)
+ {
+ data.push_back(data[from]);
+ }
+ virtual void addNew()
+ {
+ data.push_back(T());
+ }
+
+ std::vector<T> data;
+};
+
+struct OneRing {
+ OneRing()
+ {
+ }
+ std::set<int> nodes;
+ std::set<int> tris;
+};
+
+//! Triangle mesh class
+/*! note: this is only a temporary solution, details are bound to change
+ long term goal is integration with Split&Merge code by Wojtan et al.*/
+class Mesh : public PbClass {
+ public:
+ Mesh(FluidSolver *parent);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "Mesh::Mesh", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new Mesh(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "Mesh::Mesh", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::Mesh", e.what());
+ return -1;
+ }
+ }
+
+ virtual ~Mesh();
+ virtual Mesh *clone();
+
+ enum NodeFlags { NfNone = 0, NfFixed = 1, NfMarked = 2, NfKillme = 4, NfCollide = 8 };
+ enum FaceFlags { FfNone = 0, FfDoubled = 1, FfMarked = 2 };
+ enum MeshType { TypeNormal = 0, TypeVortexSheet };
+
+ virtual MeshType getType()
+ {
+ return TypeNormal;
+ }
+
+ Real computeCenterOfMass(Vec3 &cm) const;
+ void computeVertexNormals();
+
+ // plugins
+ void clear();
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::clear", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clear();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::clear", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::clear", e.what());
+ return 0;
+ }
+ }
+
+ void load(std::string name, bool append = false);
+ static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::load", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ std::string name = _args.get<std::string>("name", 0, &_lock);
+ bool append = _args.getOpt<bool>("append", 1, false, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->load(name, append);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::load", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::load", e.what());
+ return 0;
+ }
+ }
+
+ void fromShape(Shape &shape, bool append = false);
+ static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::fromShape", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Shape &shape = *_args.getPtr<Shape>("shape", 0, &_lock);
+ bool append = _args.getOpt<bool>("append", 1, false, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->fromShape(shape, append);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::fromShape", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::fromShape", e.what());
+ return 0;
+ }
+ }
+
+ void save(std::string name);
+ static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::save", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ std::string name = _args.get<std::string>("name", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->save(name);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::save", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::save", e.what());
+ return 0;
+ }
+ }
+
+ void advectInGrid(FlagGrid &flags, MACGrid &vel, int integrationMode);
+ static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::advectInGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ int integrationMode = _args.get<int>("integrationMode", 2, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->advectInGrid(flags, vel, integrationMode);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::advectInGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::advectInGrid", e.what());
+ return 0;
+ }
+ }
+
+ void scale(Vec3 s);
+ static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::scale", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Vec3 s = _args.get<Vec3>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->scale(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::scale", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::scale", e.what());
+ return 0;
+ }
+ }
+
+ void offset(Vec3 o);
+ static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::offset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Vec3 o = _args.get<Vec3>("o", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->offset(o);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::offset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::offset", e.what());
+ return 0;
+ }
+ }
+
+ void rotate(Vec3 thetas);
+ static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::rotate", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Vec3 thetas = _args.get<Vec3>("thetas", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->rotate(thetas);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::rotate", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::rotate", e.what());
+ return 0;
+ }
+ }
+
+ void computeVelocity(Mesh &oldMesh, MACGrid &vel);
+ static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::computeVelocity", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Mesh &oldMesh = *_args.getPtr<Mesh>("oldMesh", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->computeVelocity(oldMesh, vel);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::computeVelocity", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::computeVelocity", e.what());
+ return 0;
+ }
+ }
+
+ void computeLevelset(LevelsetGrid &levelset, Real sigma, Real cutoff = -1.);
+ static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::computeLevelset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ LevelsetGrid &levelset = *_args.getPtr<LevelsetGrid>("levelset", 0, &_lock);
+ Real sigma = _args.get<Real>("sigma", 1, &_lock);
+ Real cutoff = _args.getOpt<Real>("cutoff", 2, -1., &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->computeLevelset(levelset, sigma, cutoff);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::computeLevelset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::computeLevelset", e.what());
+ return 0;
+ }
+ }
+
+ LevelsetGrid getLevelset(Real sigma, Real cutoff = -1.);
+ static PyObject *_W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::getLevelset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Real sigma = _args.get<Real>("sigma", 0, &_lock);
+ Real cutoff = _args.getOpt<Real>("cutoff", 1, -1., &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getLevelset(sigma, cutoff));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::getLevelset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::getLevelset", e.what());
+ return 0;
+ }
+ }
+
+ //! map mesh to grid with sdf
+ void applyMeshToGrid(GridBase *grid,
+ FlagGrid *respectFlags = 0,
+ Real cutoff = -1.,
+ Real meshSigma = 2.);
+ static PyObject *_W_12(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::applyMeshToGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ GridBase *grid = _args.getPtr<GridBase>("grid", 0, &_lock);
+ FlagGrid *respectFlags = _args.getPtrOpt<FlagGrid>("respectFlags", 1, 0, &_lock);
+ Real cutoff = _args.getOpt<Real>("cutoff", 2, -1., &_lock);
+ Real meshSigma = _args.getOpt<Real>("meshSigma", 3, 2., &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->applyMeshToGrid(grid, respectFlags, cutoff, meshSigma);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::applyMeshToGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::applyMeshToGrid", e.what());
+ return 0;
+ }
+ }
+
+ //! get data pointer of nodes
+ std::string getNodesDataPointer();
+ static PyObject *_W_13(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::getNodesDataPointer", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getNodesDataPointer());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::getNodesDataPointer", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::getNodesDataPointer", e.what());
+ return 0;
+ }
+ }
+
+ //! get data pointer of tris
+ std::string getTrisDataPointer();
+ static PyObject *_W_14(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::getTrisDataPointer", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getTrisDataPointer());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::getTrisDataPointer", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::getTrisDataPointer", e.what());
+ return 0;
+ }
+ }
+
+ // ops
+ Mesh &operator=(const Mesh &o);
+
+ // accessors
+ inline int numTris() const
+ {
+ return mTris.size();
+ }
+ inline int numNodes() const
+ {
+ return mNodes.size();
+ }
+ inline int numTriChannels() const
+ {
+ return mTriChannels.size();
+ }
+ inline int numNodeChannels() const
+ {
+ return mNodeChannels.size();
+ }
+
+ //! return size of container
+ //! note , python binding disabled for now! cannot yet deal with long-long types
+ inline IndexInt size() const
+ {
+ return mNodes.size();
+ }
+ //! slow virtual function of base class, also returns size
+ virtual IndexInt getSizeSlow() const
+ {
+ return size();
+ }
+
+ inline Triangle &tris(int i)
+ {
+ return mTris[i];
+ }
+ inline Node &nodes(int i)
+ {
+ return mNodes[i];
+ }
+ inline Corner &corners(int tri, int c)
+ {
+ return mCorners[tri * 3 + c];
+ }
+ inline Corner &corners(int c)
+ {
+ return mCorners[c];
+ }
+ inline NodeChannel *nodeChannel(int i)
+ {
+ return mNodeChannels[i];
+ }
+ inline TriChannel *triChannel(int i)
+ {
+ return mTriChannels[i];
+ }
+
+ // allocate memory (eg upon load)
+ void resizeTris(int numTris);
+ void resizeNodes(int numNodes);
+
+ inline bool isNodeFixed(int n)
+ {
+ return mNodes[n].flags & NfFixed;
+ }
+ inline bool isTriangleFixed(int t)
+ {
+ return (mNodes[mTris[t].c[0]].flags & NfFixed) || (mNodes[mTris[t].c[1]].flags & NfFixed) ||
+ (mNodes[mTris[t].c[2]].flags & NfFixed);
+ }
+
+ inline const Vec3 getNode(int tri, int c) const
+ {
+ return mNodes[mTris[tri].c[c]].pos;
+ }
+ inline Vec3 &getNode(int tri, int c)
+ {
+ return mNodes[mTris[tri].c[c]].pos;
+ }
+ inline const Vec3 getEdge(int tri, int e) const
+ {
+ return getNode(tri, (e + 1) % 3) - getNode(tri, e);
+ }
+ inline OneRing &get1Ring(int node)
+ {
+ return m1RingLookup[node];
+ }
+ inline Real getFaceArea(int t) const
+ {
+ Vec3 c0 = mNodes[mTris[t].c[0]].pos;
+ return 0.5 * norm(cross(mNodes[mTris[t].c[1]].pos - c0, mNodes[mTris[t].c[2]].pos - c0));
+ }
+ inline Vec3 getFaceNormal(int t)
+ {
+ Vec3 c0 = mNodes[mTris[t].c[0]].pos;
+ return getNormalized(cross(mNodes[mTris[t].c[1]].pos - c0, mNodes[mTris[t].c[2]].pos - c0));
+ }
+ inline Vec3 getFaceCenter(int t) const
+ {
+ return (mNodes[mTris[t].c[0]].pos + mNodes[mTris[t].c[1]].pos + mNodes[mTris[t].c[2]].pos) /
+ 3.0;
+ }
+ inline std::vector<Node> &getNodeData()
+ {
+ return mNodes;
+ }
+
+ void mergeNode(int node, int delnode);
+ int addNode(Node a);
+ int addTri(Triangle a);
+ void addCorner(Corner a);
+ void removeTri(int tri);
+ void removeTriFromLookup(int tri);
+ void removeNodes(const std::vector<int> &deletedNodes);
+ void rebuildCorners(int from = 0, int to = -1);
+ void rebuildLookup(int from = 0, int to = -1);
+ void rebuildQuickCheck();
+ void fastNodeLookupRebuild(int corner);
+ void sanityCheck(bool strict = true,
+ std::vector<int> *deletedNodes = 0,
+ std::map<int, bool> *taintedTris = 0);
+
+ void addTriChannel(TriChannel *c)
+ {
+ mTriChannels.push_back(c);
+ rebuildChannels();
+ }
+ void addNodeChannel(NodeChannel *c)
+ {
+ mNodeChannels.push_back(c);
+ rebuildChannels();
+ }
+
+ //! mesh data functions
+
+ //! create a mesh data object
+ PbClass *create(PbType type, PbTypeVec T = PbTypeVec(), const std::string &name = "");
+ static PyObject *_W_15(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Mesh *pbo = dynamic_cast<Mesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Mesh::create", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ PbType type = _args.get<PbType>("type", 0, &_lock);
+ PbTypeVec T = _args.getOpt<PbTypeVec>("T", 1, PbTypeVec(), &_lock);
+ const std::string &name = _args.getOpt<std::string>("name", 2, "", &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->create(type, T, name));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Mesh::create", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Mesh::create", e.what());
+ return 0;
+ }
+ }
+
+ //! add a mesh data field, set its parent mesh pointer
+ void registerMdata(MeshDataBase *mdata);
+ void registerMdataReal(MeshDataImpl<Real> *mdata);
+ void registerMdataVec3(MeshDataImpl<Vec3> *mdata);
+ void registerMdataInt(MeshDataImpl<int> *mdata);
+ //! remove a mesh data entry
+ void deregister(MeshDataBase *mdata);
+ //! add one zero entry to all data fields
+ void addAllMdata();
+ // note - deletion of mdata is handled in compress function
+
+ //! how many are there?
+ IndexInt getNumMdata() const
+ {
+ return mMeshData.size();
+ }
+ //! access one of the fields
+ MeshDataBase *getMdata(int i)
+ {
+ return mMeshData[i];
+ }
+
+ //! update data fields
+ void updateDataFields();
+
+ protected:
+ void rebuildChannels();
+
+ std::vector<Node> mNodes;
+ std::vector<Triangle> mTris;
+ std::vector<Corner> mCorners;
+ std::vector<NodeChannel *> mNodeChannels;
+ std::vector<TriChannel *> mTriChannels;
+ std::vector<OneRing> m1RingLookup;
+
+ //! store mesh data , each pointer has its own storage vector of a certain type (int, real, vec3)
+ std::vector<MeshDataBase *> mMeshData;
+ //! lists of different types, for fast operations w/o virtual function calls
+ std::vector<MeshDataImpl<Real> *> mMdataReal;
+ std::vector<MeshDataImpl<Vec3> *> mMdataVec3;
+ std::vector<MeshDataImpl<int> *>
+ mMdataInt; //! indicate that mdata of this mesh is copied, and needs to be freed
+ bool mFreeMdata;
+ public:
+ PbArgs _args;
+}
+#define _C_Mesh
+;
+
+//******************************************************************************
+
+//! abstract interface for mesh data
+class MeshDataBase : public PbClass {
+ public:
+ MeshDataBase(FluidSolver *parent);
+ static int _W_16(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "MeshDataBase::MeshDataBase", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new MeshDataBase(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "MeshDataBase::MeshDataBase", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataBase::MeshDataBase", e.what());
+ return -1;
+ }
+ }
+
+ virtual ~MeshDataBase();
+
+ //! data type IDs, in line with those for grids
+ enum MdataType { TypeNone = 0, TypeReal = 1, TypeInt = 2, TypeVec3 = 4 };
+
+ //! interface functions, using assert instead of pure virtual for python compatibility
+ virtual IndexInt getSizeSlow() const
+ {
+ assertMsg(false, "Dont use, override...");
+ return 0;
+ }
+ virtual void addEntry()
+ {
+ assertMsg(false, "Dont use, override...");
+ return;
+ }
+ virtual MeshDataBase *clone()
+ {
+ assertMsg(false, "Dont use, override...");
+ return NULL;
+ }
+ virtual MdataType getType() const
+ {
+ assertMsg(false, "Dont use, override...");
+ return TypeNone;
+ }
+ virtual void resize(IndexInt size)
+ {
+ assertMsg(false, "Dont use, override...");
+ return;
+ }
+ virtual void copyValueSlow(IndexInt from, IndexInt to)
+ {
+ assertMsg(false, "Dont use, override...");
+ return;
+ }
+
+ //! set base pointer
+ void setMesh(Mesh *set)
+ {
+ mMesh = set;
+ }
+
+ //! debugging
+ inline void checkNodeIndex(IndexInt idx) const;
+
+ protected:
+ Mesh *mMesh;
+ public:
+ PbArgs _args;
+}
+#define _C_MeshDataBase
+;
+
+//! abstract interface for mesh data
+
+template<class T> class MeshDataImpl : public MeshDataBase {
+ public:
+ MeshDataImpl(FluidSolver *parent);
+ static int _W_17(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "MeshDataImpl::MeshDataImpl", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new MeshDataImpl(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "MeshDataImpl::MeshDataImpl", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::MeshDataImpl", e.what());
+ return -1;
+ }
+ }
+
+ MeshDataImpl(FluidSolver *parent, MeshDataImpl<T> *other);
+ virtual ~MeshDataImpl();
+
+ //! access data
+ inline T &get(IndexInt idx)
+ {
+ DEBUG_ONLY(checkNodeIndex(idx));
+ return mData[idx];
+ }
+ inline const T &get(IndexInt idx) const
+ {
+ DEBUG_ONLY(checkNodeIndex(idx));
+ return mData[idx];
+ }
+ inline T &operator[](IndexInt idx)
+ {
+ DEBUG_ONLY(checkNodeIndex(idx));
+ return mData[idx];
+ }
+ inline const T &operator[](IndexInt idx) const
+ {
+ DEBUG_ONLY(checkNodeIndex(idx));
+ return mData[idx];
+ }
+
+ //! set all values to 0, note - different from meshSystem::clear! doesnt modify size of array
+ //! (has to stay in sync with parent system)
+ void clear();
+ static PyObject *_W_18(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::clear", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clear();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::clear", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::clear", e.what());
+ return 0;
+ }
+ }
+
+ //! set grid from which to get data...
+ void setSource(Grid<T> *grid, bool isMAC = false);
+ static PyObject *_W_19(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::setSource", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<T> *grid = _args.getPtr<Grid<T>>("grid", 0, &_lock);
+ bool isMAC = _args.getOpt<bool>("isMAC", 1, false, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setSource(grid, isMAC);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::setSource", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::setSource", e.what());
+ return 0;
+ }
+ }
+
+ //! mesh data base interface
+ virtual IndexInt getSizeSlow() const;
+ virtual void addEntry();
+ virtual MeshDataBase *clone();
+ virtual MdataType getType() const;
+ virtual void resize(IndexInt s);
+ virtual void copyValueSlow(IndexInt from, IndexInt to);
+
+ IndexInt size() const
+ {
+ return mData.size();
+ }
+
+ //! fast inlined functions for per mesh operations
+ inline void copyValue(IndexInt from, IndexInt to)
+ {
+ get(to) = get(from);
+ }
+ void initNewValue(IndexInt idx, Vec3 pos);
+
+ //! python interface (similar to grid data)
+ void setConst(T s);
+ static PyObject *_W_20(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::setConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T s = _args.get<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::setConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::setConst", e.what());
+ return 0;
+ }
+ }
+
+ void setConstRange(T s, const int begin, const int end);
+ static PyObject *_W_21(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::setConstRange", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T s = _args.get<T>("s", 0, &_lock);
+ const int begin = _args.get<int>("begin", 1, &_lock);
+ const int end = _args.get<int>("end", 2, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setConstRange(s, begin, end);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::setConstRange", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::setConstRange", e.what());
+ return 0;
+ }
+ }
+
+ MeshDataImpl<T> &copyFrom(const MeshDataImpl<T> &a);
+ static PyObject *_W_22(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::copyFrom", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MeshDataImpl<T> &a = *_args.getPtr<MeshDataImpl<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->copyFrom(a));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::copyFrom", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::copyFrom", e.what());
+ return 0;
+ }
+ }
+
+ void add(const MeshDataImpl<T> &a);
+ static PyObject *_W_23(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::add", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MeshDataImpl<T> &a = *_args.getPtr<MeshDataImpl<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->add(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::add", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::add", e.what());
+ return 0;
+ }
+ }
+
+ void sub(const MeshDataImpl<T> &a);
+ static PyObject *_W_24(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::sub", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MeshDataImpl<T> &a = *_args.getPtr<MeshDataImpl<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->sub(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::sub", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::sub", e.what());
+ return 0;
+ }
+ }
+
+ void addConst(T s);
+ static PyObject *_W_25(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::addConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T s = _args.get<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->addConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::addConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::addConst", e.what());
+ return 0;
+ }
+ }
+
+ void addScaled(const MeshDataImpl<T> &a, const T &factor);
+ static PyObject *_W_26(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::addScaled", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MeshDataImpl<T> &a = *_args.getPtr<MeshDataImpl<T>>("a", 0, &_lock);
+ const T &factor = *_args.getPtr<T>("factor", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->addScaled(a, factor);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::addScaled", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::addScaled", e.what());
+ return 0;
+ }
+ }
+
+ void mult(const MeshDataImpl<T> &a);
+ static PyObject *_W_27(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::mult", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MeshDataImpl<T> &a = *_args.getPtr<MeshDataImpl<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->mult(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::mult", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::mult", e.what());
+ return 0;
+ }
+ }
+
+ void multConst(T s);
+ static PyObject *_W_28(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::multConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T s = _args.get<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->multConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::multConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::multConst", e.what());
+ return 0;
+ }
+ }
+
+ void safeDiv(const MeshDataImpl<T> &a);
+ static PyObject *_W_29(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::safeDiv", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MeshDataImpl<T> &a = *_args.getPtr<MeshDataImpl<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->safeDiv(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::safeDiv", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::safeDiv", e.what());
+ return 0;
+ }
+ }
+
+ void clamp(Real min, Real max);
+ static PyObject *_W_30(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::clamp", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Real min = _args.get<Real>("min", 0, &_lock);
+ Real max = _args.get<Real>("max", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clamp(min, max);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::clamp", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::clamp", e.what());
+ return 0;
+ }
+ }
+
+ void clampMin(Real vmin);
+ static PyObject *_W_31(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::clampMin", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Real vmin = _args.get<Real>("vmin", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clampMin(vmin);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::clampMin", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::clampMin", e.what());
+ return 0;
+ }
+ }
+
+ void clampMax(Real vmax);
+ static PyObject *_W_32(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::clampMax", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Real vmax = _args.get<Real>("vmax", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clampMax(vmax);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::clampMax", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::clampMax", e.what());
+ return 0;
+ }
+ }
+
+ Real getMaxAbs();
+ static PyObject *_W_33(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::getMaxAbs", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMaxAbs());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::getMaxAbs", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::getMaxAbs", e.what());
+ return 0;
+ }
+ }
+
+ Real getMax();
+ static PyObject *_W_34(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::getMax", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMax());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::getMax", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::getMax", e.what());
+ return 0;
+ }
+ }
+
+ Real getMin();
+ static PyObject *_W_35(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::getMin", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMin());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::getMin", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::getMin", e.what());
+ return 0;
+ }
+ }
+
+ T sum(const MeshDataImpl<int> *t = NULL, const int itype = 0) const;
+ static PyObject *_W_36(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::sum", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MeshDataImpl<int> *t = _args.getPtrOpt<MeshDataImpl<int>>("t", 0, NULL, &_lock);
+ const int itype = _args.getOpt<int>("itype", 1, 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->sum(t, itype));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::sum", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::sum", e.what());
+ return 0;
+ }
+ }
+
+ Real sumSquare() const;
+ static PyObject *_W_37(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::sumSquare", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->sumSquare());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::sumSquare", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::sumSquare", e.what());
+ return 0;
+ }
+ }
+
+ Real sumMagnitude() const;
+ static PyObject *_W_38(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::sumMagnitude", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->sumMagnitude());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::sumMagnitude", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::sumMagnitude", e.what());
+ return 0;
+ }
+ }
+
+ //! special, set if int flag in t has "flag"
+ void setConstIntFlag(T s, const MeshDataImpl<int> &t, const int flag);
+ static PyObject *_W_39(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::setConstIntFlag", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ T s = _args.get<T>("s", 0, &_lock);
+ const MeshDataImpl<int> &t = *_args.getPtr<MeshDataImpl<int>>("t", 1, &_lock);
+ const int flag = _args.get<int>("flag", 2, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setConstIntFlag(s, t, flag);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::setConstIntFlag", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::setConstIntFlag", e.what());
+ return 0;
+ }
+ }
+
+ void printMdata(IndexInt start = -1, IndexInt stop = -1, bool printIndex = false);
+ static PyObject *_W_40(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::printMdata", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ IndexInt start = _args.getOpt<IndexInt>("start", 0, -1, &_lock);
+ IndexInt stop = _args.getOpt<IndexInt>("stop", 1, -1, &_lock);
+ bool printIndex = _args.getOpt<bool>("printIndex", 2, false, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->printMdata(start, stop, printIndex);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::printMdata", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::printMdata", e.what());
+ return 0;
+ }
+ }
+
+ //! file io
+ void save(const std::string name);
+ static PyObject *_W_41(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::save", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const std::string name = _args.get<std::string>("name", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->save(name);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::save", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::save", e.what());
+ return 0;
+ }
+ }
+
+ void load(const std::string name);
+ static PyObject *_W_42(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::load", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const std::string name = _args.get<std::string>("name", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->load(name);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::load", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::load", e.what());
+ return 0;
+ }
+ }
+
+ //! get data pointer of mesh data
+ std::string getDataPointer();
+ static PyObject *_W_43(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MeshDataImpl *pbo = dynamic_cast<MeshDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MeshDataImpl::getDataPointer", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getDataPointer());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MeshDataImpl::getDataPointer", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MeshDataImpl::getDataPointer", e.what());
+ return 0;
+ }
+ }
+
+ protected:
+ //! data storage
+ std::vector<T> mData;
+
+ //! optionally , we might have an associated grid from which to grab new data
+ Grid<T> *mpGridSource; //! unfortunately , we need to distinguish mac vs regular vec3
+ bool mGridSourceMAC;
+ public:
+ PbArgs _args;
+}
+#define _C_MeshDataImpl
+;
+
+// ***************************************************************************************************************
+// Implementation
+
+template<class T>
+void SimpleNodeChannel<T>::renumber(const std::vector<int> &newIndex, int newsize)
+{
+ for (size_t i = 0; i < newIndex.size(); i++) {
+ if (newIndex[i] != -1)
+ data[newIndex[i]] = data[newsize + i];
+ }
+ data.resize(newsize);
+}
+
+inline void MeshDataBase::checkNodeIndex(IndexInt idx) const
+{
+ IndexInt mySize = this->getSizeSlow();
+ if (idx < 0 || idx > mySize) {
+ errMsg("MeshData "
+ << " size " << mySize << " : index " << idx << " out of bound ");
+ }
+ if (mMesh && mMesh->getSizeSlow() != mySize) {
+ errMsg("MeshData "
+ << " size " << mySize << " does not match parent! (" << mMesh->getSizeSlow() << ") ");
+ }
+}
+
+template<class T> void MeshDataImpl<T>::clear()
+{
+ for (IndexInt i = 0; i < (IndexInt)mData.size(); ++i)
+ mData[i] = 0.;
+}
+
+} // namespace Manta
+#endif
diff --git a/extern/mantaflow/preprocessed/mesh.h.reg.cpp b/extern/mantaflow/preprocessed/mesh.h.reg.cpp
new file mode 100644
index 00000000000..b2ba3e22032
--- /dev/null
+++ b/extern/mantaflow/preprocessed/mesh.h.reg.cpp
@@ -0,0 +1,239 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "mesh.h"
+namespace Manta {
+#ifdef _C_Mesh
+static const Pb::Register _R_12("Mesh", "Mesh", "PbClass");
+template<> const char *Namify<Mesh>::S = "Mesh";
+static const Pb::Register _R_13("Mesh", "Mesh", Mesh::_W_0);
+static const Pb::Register _R_14("Mesh", "clear", Mesh::_W_1);
+static const Pb::Register _R_15("Mesh", "load", Mesh::_W_2);
+static const Pb::Register _R_16("Mesh", "fromShape", Mesh::_W_3);
+static const Pb::Register _R_17("Mesh", "save", Mesh::_W_4);
+static const Pb::Register _R_18("Mesh", "advectInGrid", Mesh::_W_5);
+static const Pb::Register _R_19("Mesh", "scale", Mesh::_W_6);
+static const Pb::Register _R_20("Mesh", "offset", Mesh::_W_7);
+static const Pb::Register _R_21("Mesh", "rotate", Mesh::_W_8);
+static const Pb::Register _R_22("Mesh", "computeVelocity", Mesh::_W_9);
+static const Pb::Register _R_23("Mesh", "computeLevelset", Mesh::_W_10);
+static const Pb::Register _R_24("Mesh", "getLevelset", Mesh::_W_11);
+static const Pb::Register _R_25("Mesh", "applyMeshToGrid", Mesh::_W_12);
+static const Pb::Register _R_26("Mesh", "getNodesDataPointer", Mesh::_W_13);
+static const Pb::Register _R_27("Mesh", "getTrisDataPointer", Mesh::_W_14);
+static const Pb::Register _R_28("Mesh", "create", Mesh::_W_15);
+#endif
+#ifdef _C_MeshDataBase
+static const Pb::Register _R_29("MeshDataBase", "MeshDataBase", "PbClass");
+template<> const char *Namify<MeshDataBase>::S = "MeshDataBase";
+static const Pb::Register _R_30("MeshDataBase", "MeshDataBase", MeshDataBase::_W_16);
+#endif
+#ifdef _C_MeshDataImpl
+static const Pb::Register _R_31("MeshDataImpl<int>", "MeshDataImpl<int>", "MeshDataBase");
+template<> const char *Namify<MeshDataImpl<int>>::S = "MeshDataImpl<int>";
+static const Pb::Register _R_32("MeshDataImpl<int>", "MeshDataImpl", MeshDataImpl<int>::_W_17);
+static const Pb::Register _R_33("MeshDataImpl<int>", "clear", MeshDataImpl<int>::_W_18);
+static const Pb::Register _R_34("MeshDataImpl<int>", "setSource", MeshDataImpl<int>::_W_19);
+static const Pb::Register _R_35("MeshDataImpl<int>", "setConst", MeshDataImpl<int>::_W_20);
+static const Pb::Register _R_36("MeshDataImpl<int>", "setConstRange", MeshDataImpl<int>::_W_21);
+static const Pb::Register _R_37("MeshDataImpl<int>", "copyFrom", MeshDataImpl<int>::_W_22);
+static const Pb::Register _R_38("MeshDataImpl<int>", "add", MeshDataImpl<int>::_W_23);
+static const Pb::Register _R_39("MeshDataImpl<int>", "sub", MeshDataImpl<int>::_W_24);
+static const Pb::Register _R_40("MeshDataImpl<int>", "addConst", MeshDataImpl<int>::_W_25);
+static const Pb::Register _R_41("MeshDataImpl<int>", "addScaled", MeshDataImpl<int>::_W_26);
+static const Pb::Register _R_42("MeshDataImpl<int>", "mult", MeshDataImpl<int>::_W_27);
+static const Pb::Register _R_43("MeshDataImpl<int>", "multConst", MeshDataImpl<int>::_W_28);
+static const Pb::Register _R_44("MeshDataImpl<int>", "safeDiv", MeshDataImpl<int>::_W_29);
+static const Pb::Register _R_45("MeshDataImpl<int>", "clamp", MeshDataImpl<int>::_W_30);
+static const Pb::Register _R_46("MeshDataImpl<int>", "clampMin", MeshDataImpl<int>::_W_31);
+static const Pb::Register _R_47("MeshDataImpl<int>", "clampMax", MeshDataImpl<int>::_W_32);
+static const Pb::Register _R_48("MeshDataImpl<int>", "getMaxAbs", MeshDataImpl<int>::_W_33);
+static const Pb::Register _R_49("MeshDataImpl<int>", "getMax", MeshDataImpl<int>::_W_34);
+static const Pb::Register _R_50("MeshDataImpl<int>", "getMin", MeshDataImpl<int>::_W_35);
+static const Pb::Register _R_51("MeshDataImpl<int>", "sum", MeshDataImpl<int>::_W_36);
+static const Pb::Register _R_52("MeshDataImpl<int>", "sumSquare", MeshDataImpl<int>::_W_37);
+static const Pb::Register _R_53("MeshDataImpl<int>", "sumMagnitude", MeshDataImpl<int>::_W_38);
+static const Pb::Register _R_54("MeshDataImpl<int>", "setConstIntFlag", MeshDataImpl<int>::_W_39);
+static const Pb::Register _R_55("MeshDataImpl<int>", "printMdata", MeshDataImpl<int>::_W_40);
+static const Pb::Register _R_56("MeshDataImpl<int>", "save", MeshDataImpl<int>::_W_41);
+static const Pb::Register _R_57("MeshDataImpl<int>", "load", MeshDataImpl<int>::_W_42);
+static const Pb::Register _R_58("MeshDataImpl<int>", "getDataPointer", MeshDataImpl<int>::_W_43);
+static const Pb::Register _R_59("MeshDataImpl<Real>", "MeshDataImpl<Real>", "MeshDataBase");
+template<> const char *Namify<MeshDataImpl<Real>>::S = "MeshDataImpl<Real>";
+static const Pb::Register _R_60("MeshDataImpl<Real>", "MeshDataImpl", MeshDataImpl<Real>::_W_17);
+static const Pb::Register _R_61("MeshDataImpl<Real>", "clear", MeshDataImpl<Real>::_W_18);
+static const Pb::Register _R_62("MeshDataImpl<Real>", "setSource", MeshDataImpl<Real>::_W_19);
+static const Pb::Register _R_63("MeshDataImpl<Real>", "setConst", MeshDataImpl<Real>::_W_20);
+static const Pb::Register _R_64("MeshDataImpl<Real>", "setConstRange", MeshDataImpl<Real>::_W_21);
+static const Pb::Register _R_65("MeshDataImpl<Real>", "copyFrom", MeshDataImpl<Real>::_W_22);
+static const Pb::Register _R_66("MeshDataImpl<Real>", "add", MeshDataImpl<Real>::_W_23);
+static const Pb::Register _R_67("MeshDataImpl<Real>", "sub", MeshDataImpl<Real>::_W_24);
+static const Pb::Register _R_68("MeshDataImpl<Real>", "addConst", MeshDataImpl<Real>::_W_25);
+static const Pb::Register _R_69("MeshDataImpl<Real>", "addScaled", MeshDataImpl<Real>::_W_26);
+static const Pb::Register _R_70("MeshDataImpl<Real>", "mult", MeshDataImpl<Real>::_W_27);
+static const Pb::Register _R_71("MeshDataImpl<Real>", "multConst", MeshDataImpl<Real>::_W_28);
+static const Pb::Register _R_72("MeshDataImpl<Real>", "safeDiv", MeshDataImpl<Real>::_W_29);
+static const Pb::Register _R_73("MeshDataImpl<Real>", "clamp", MeshDataImpl<Real>::_W_30);
+static const Pb::Register _R_74("MeshDataImpl<Real>", "clampMin", MeshDataImpl<Real>::_W_31);
+static const Pb::Register _R_75("MeshDataImpl<Real>", "clampMax", MeshDataImpl<Real>::_W_32);
+static const Pb::Register _R_76("MeshDataImpl<Real>", "getMaxAbs", MeshDataImpl<Real>::_W_33);
+static const Pb::Register _R_77("MeshDataImpl<Real>", "getMax", MeshDataImpl<Real>::_W_34);
+static const Pb::Register _R_78("MeshDataImpl<Real>", "getMin", MeshDataImpl<Real>::_W_35);
+static const Pb::Register _R_79("MeshDataImpl<Real>", "sum", MeshDataImpl<Real>::_W_36);
+static const Pb::Register _R_80("MeshDataImpl<Real>", "sumSquare", MeshDataImpl<Real>::_W_37);
+static const Pb::Register _R_81("MeshDataImpl<Real>", "sumMagnitude", MeshDataImpl<Real>::_W_38);
+static const Pb::Register _R_82("MeshDataImpl<Real>",
+ "setConstIntFlag",
+ MeshDataImpl<Real>::_W_39);
+static const Pb::Register _R_83("MeshDataImpl<Real>", "printMdata", MeshDataImpl<Real>::_W_40);
+static const Pb::Register _R_84("MeshDataImpl<Real>", "save", MeshDataImpl<Real>::_W_41);
+static const Pb::Register _R_85("MeshDataImpl<Real>", "load", MeshDataImpl<Real>::_W_42);
+static const Pb::Register _R_86("MeshDataImpl<Real>", "getDataPointer", MeshDataImpl<Real>::_W_43);
+static const Pb::Register _R_87("MeshDataImpl<Vec3>", "MeshDataImpl<Vec3>", "MeshDataBase");
+template<> const char *Namify<MeshDataImpl<Vec3>>::S = "MeshDataImpl<Vec3>";
+static const Pb::Register _R_88("MeshDataImpl<Vec3>", "MeshDataImpl", MeshDataImpl<Vec3>::_W_17);
+static const Pb::Register _R_89("MeshDataImpl<Vec3>", "clear", MeshDataImpl<Vec3>::_W_18);
+static const Pb::Register _R_90("MeshDataImpl<Vec3>", "setSource", MeshDataImpl<Vec3>::_W_19);
+static const Pb::Register _R_91("MeshDataImpl<Vec3>", "setConst", MeshDataImpl<Vec3>::_W_20);
+static const Pb::Register _R_92("MeshDataImpl<Vec3>", "setConstRange", MeshDataImpl<Vec3>::_W_21);
+static const Pb::Register _R_93("MeshDataImpl<Vec3>", "copyFrom", MeshDataImpl<Vec3>::_W_22);
+static const Pb::Register _R_94("MeshDataImpl<Vec3>", "add", MeshDataImpl<Vec3>::_W_23);
+static const Pb::Register _R_95("MeshDataImpl<Vec3>", "sub", MeshDataImpl<Vec3>::_W_24);
+static const Pb::Register _R_96("MeshDataImpl<Vec3>", "addConst", MeshDataImpl<Vec3>::_W_25);
+static const Pb::Register _R_97("MeshDataImpl<Vec3>", "addScaled", MeshDataImpl<Vec3>::_W_26);
+static const Pb::Register _R_98("MeshDataImpl<Vec3>", "mult", MeshDataImpl<Vec3>::_W_27);
+static const Pb::Register _R_99("MeshDataImpl<Vec3>", "multConst", MeshDataImpl<Vec3>::_W_28);
+static const Pb::Register _R_100("MeshDataImpl<Vec3>", "safeDiv", MeshDataImpl<Vec3>::_W_29);
+static const Pb::Register _R_101("MeshDataImpl<Vec3>", "clamp", MeshDataImpl<Vec3>::_W_30);
+static const Pb::Register _R_102("MeshDataImpl<Vec3>", "clampMin", MeshDataImpl<Vec3>::_W_31);
+static const Pb::Register _R_103("MeshDataImpl<Vec3>", "clampMax", MeshDataImpl<Vec3>::_W_32);
+static const Pb::Register _R_104("MeshDataImpl<Vec3>", "getMaxAbs", MeshDataImpl<Vec3>::_W_33);
+static const Pb::Register _R_105("MeshDataImpl<Vec3>", "getMax", MeshDataImpl<Vec3>::_W_34);
+static const Pb::Register _R_106("MeshDataImpl<Vec3>", "getMin", MeshDataImpl<Vec3>::_W_35);
+static const Pb::Register _R_107("MeshDataImpl<Vec3>", "sum", MeshDataImpl<Vec3>::_W_36);
+static const Pb::Register _R_108("MeshDataImpl<Vec3>", "sumSquare", MeshDataImpl<Vec3>::_W_37);
+static const Pb::Register _R_109("MeshDataImpl<Vec3>", "sumMagnitude", MeshDataImpl<Vec3>::_W_38);
+static const Pb::Register _R_110("MeshDataImpl<Vec3>",
+ "setConstIntFlag",
+ MeshDataImpl<Vec3>::_W_39);
+static const Pb::Register _R_111("MeshDataImpl<Vec3>", "printMdata", MeshDataImpl<Vec3>::_W_40);
+static const Pb::Register _R_112("MeshDataImpl<Vec3>", "save", MeshDataImpl<Vec3>::_W_41);
+static const Pb::Register _R_113("MeshDataImpl<Vec3>", "load", MeshDataImpl<Vec3>::_W_42);
+static const Pb::Register _R_114("MeshDataImpl<Vec3>",
+ "getDataPointer",
+ MeshDataImpl<Vec3>::_W_43);
+#endif
+static const Pb::Register _R_9("MeshDataImpl<int>", "MdataInt", "");
+static const Pb::Register _R_10("MeshDataImpl<Real>", "MdataReal", "");
+static const Pb::Register _R_11("MeshDataImpl<Vec3>", "MdataVec3", "");
+extern "C" {
+void PbRegister_file_9()
+{
+ KEEP_UNUSED(_R_12);
+ KEEP_UNUSED(_R_13);
+ KEEP_UNUSED(_R_14);
+ KEEP_UNUSED(_R_15);
+ KEEP_UNUSED(_R_16);
+ KEEP_UNUSED(_R_17);
+ KEEP_UNUSED(_R_18);
+ KEEP_UNUSED(_R_19);
+ KEEP_UNUSED(_R_20);
+ KEEP_UNUSED(_R_21);
+ KEEP_UNUSED(_R_22);
+ KEEP_UNUSED(_R_23);
+ KEEP_UNUSED(_R_24);
+ KEEP_UNUSED(_R_25);
+ KEEP_UNUSED(_R_26);
+ KEEP_UNUSED(_R_27);
+ KEEP_UNUSED(_R_28);
+ KEEP_UNUSED(_R_29);
+ KEEP_UNUSED(_R_30);
+ KEEP_UNUSED(_R_31);
+ KEEP_UNUSED(_R_32);
+ KEEP_UNUSED(_R_33);
+ KEEP_UNUSED(_R_34);
+ KEEP_UNUSED(_R_35);
+ KEEP_UNUSED(_R_36);
+ KEEP_UNUSED(_R_37);
+ KEEP_UNUSED(_R_38);
+ KEEP_UNUSED(_R_39);
+ KEEP_UNUSED(_R_40);
+ KEEP_UNUSED(_R_41);
+ KEEP_UNUSED(_R_42);
+ KEEP_UNUSED(_R_43);
+ KEEP_UNUSED(_R_44);
+ KEEP_UNUSED(_R_45);
+ KEEP_UNUSED(_R_46);
+ KEEP_UNUSED(_R_47);
+ KEEP_UNUSED(_R_48);
+ KEEP_UNUSED(_R_49);
+ KEEP_UNUSED(_R_50);
+ KEEP_UNUSED(_R_51);
+ KEEP_UNUSED(_R_52);
+ KEEP_UNUSED(_R_53);
+ KEEP_UNUSED(_R_54);
+ KEEP_UNUSED(_R_55);
+ KEEP_UNUSED(_R_56);
+ KEEP_UNUSED(_R_57);
+ KEEP_UNUSED(_R_58);
+ KEEP_UNUSED(_R_59);
+ KEEP_UNUSED(_R_60);
+ KEEP_UNUSED(_R_61);
+ KEEP_UNUSED(_R_62);
+ KEEP_UNUSED(_R_63);
+ KEEP_UNUSED(_R_64);
+ KEEP_UNUSED(_R_65);
+ KEEP_UNUSED(_R_66);
+ KEEP_UNUSED(_R_67);
+ KEEP_UNUSED(_R_68);
+ KEEP_UNUSED(_R_69);
+ KEEP_UNUSED(_R_70);
+ KEEP_UNUSED(_R_71);
+ KEEP_UNUSED(_R_72);
+ KEEP_UNUSED(_R_73);
+ KEEP_UNUSED(_R_74);
+ KEEP_UNUSED(_R_75);
+ KEEP_UNUSED(_R_76);
+ KEEP_UNUSED(_R_77);
+ KEEP_UNUSED(_R_78);
+ KEEP_UNUSED(_R_79);
+ KEEP_UNUSED(_R_80);
+ KEEP_UNUSED(_R_81);
+ KEEP_UNUSED(_R_82);
+ KEEP_UNUSED(_R_83);
+ KEEP_UNUSED(_R_84);
+ KEEP_UNUSED(_R_85);
+ KEEP_UNUSED(_R_86);
+ KEEP_UNUSED(_R_87);
+ KEEP_UNUSED(_R_88);
+ KEEP_UNUSED(_R_89);
+ KEEP_UNUSED(_R_90);
+ KEEP_UNUSED(_R_91);
+ KEEP_UNUSED(_R_92);
+ KEEP_UNUSED(_R_93);
+ KEEP_UNUSED(_R_94);
+ KEEP_UNUSED(_R_95);
+ KEEP_UNUSED(_R_96);
+ KEEP_UNUSED(_R_97);
+ KEEP_UNUSED(_R_98);
+ KEEP_UNUSED(_R_99);
+ KEEP_UNUSED(_R_100);
+ KEEP_UNUSED(_R_101);
+ KEEP_UNUSED(_R_102);
+ KEEP_UNUSED(_R_103);
+ KEEP_UNUSED(_R_104);
+ KEEP_UNUSED(_R_105);
+ KEEP_UNUSED(_R_106);
+ KEEP_UNUSED(_R_107);
+ KEEP_UNUSED(_R_108);
+ KEEP_UNUSED(_R_109);
+ KEEP_UNUSED(_R_110);
+ KEEP_UNUSED(_R_111);
+ KEEP_UNUSED(_R_112);
+ KEEP_UNUSED(_R_113);
+ KEEP_UNUSED(_R_114);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/movingobs.cpp b/extern/mantaflow/preprocessed/movingobs.cpp
new file mode 100644
index 00000000000..a6ff01fd1e0
--- /dev/null
+++ b/extern/mantaflow/preprocessed/movingobs.cpp
@@ -0,0 +1,112 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Moving obstacles
+ *
+ ******************************************************************************/
+
+#include "movingobs.h"
+#include "commonkernels.h"
+#include "randomstream.h"
+
+using namespace std;
+namespace Manta {
+
+//******************************************************************************
+// MovingObs class members
+
+int MovingObstacle::sIDcnt = 10;
+
+MovingObstacle::MovingObstacle(FluidSolver *parent, int emptyType)
+ : PbClass(parent), mEmptyType(emptyType)
+{
+ mID = 1 << sIDcnt;
+ sIDcnt++;
+ if (sIDcnt > 15)
+ errMsg(
+ "currently only 5 separate moving obstacles supported (are you generating them in a "
+ "loop?)");
+}
+
+void MovingObstacle::add(Shape *shape)
+{
+ mShapes.push_back(shape);
+}
+
+void MovingObstacle::projectOutside(FlagGrid &flags, BasicParticleSystem &parts)
+{
+ LevelsetGrid levelset(mParent, false);
+ Grid<Vec3> gradient(mParent);
+
+ // rebuild obstacle levelset
+ FOR_IDX(levelset)
+ {
+ levelset[idx] = flags.isObstacle(idx) ? -0.5 : 0.5;
+ }
+ levelset.reinitMarching(flags, 6.0, 0, true, false, FlagGrid::TypeReserved);
+
+ // build levelset gradient
+ GradientOp(gradient, levelset);
+
+ parts.projectOutside(gradient);
+}
+
+void MovingObstacle::moveLinear(
+ Real t, Real t0, Real t1, Vec3 p0, Vec3 p1, FlagGrid &flags, MACGrid &vel, bool smooth)
+{
+ Real alpha = (t - t0) / (t1 - t0);
+ if (alpha >= 0 && alpha <= 1) {
+ Vec3 v = (p1 - p0) / ((t1 - t0) * getParent()->getDt());
+
+ // ease in and out
+ if (smooth) {
+ v *= 6.0f * (alpha - square(alpha));
+ alpha = square(alpha) * (3.0f - 2.0f * alpha);
+ }
+
+ Vec3 pos = alpha * p1 + (1.0f - alpha) * p0;
+ for (size_t i = 0; i < mShapes.size(); i++)
+ mShapes[i]->setCenter(pos);
+
+ // reset flags
+ FOR_IDX(flags)
+ {
+ if ((flags[idx] & mID) != 0)
+ flags[idx] = mEmptyType;
+ }
+ // apply new flags
+ for (size_t i = 0; i < mShapes.size(); i++) {
+#if NOPYTHON != 1
+ mShapes[i]->_args.clear();
+ mShapes[i]->_args.add("value", FlagGrid::TypeObstacle | mID);
+ mShapes[i]->applyToGrid(&flags, 0);
+#else
+ errMsg("Not yet supported...");
+#endif
+ }
+ // apply velocities
+ FOR_IJK_BND(flags, 1)
+ {
+ bool cur = (flags(i, j, k) & mID) != 0;
+ if (cur || (flags(i - 1, j, k) & mID) != 0)
+ vel(i, j, k).x = v.x;
+ if (cur || (flags(i, j - 1, k) & mID) != 0)
+ vel(i, j, k).y = v.y;
+ if (cur || (flags(i, j, k - 1) & mID) != 0)
+ vel(i, j, k).z = v.z;
+ }
+ }
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/movingobs.h b/extern/mantaflow/preprocessed/movingobs.h
new file mode 100644
index 00000000000..71cc441f1d0
--- /dev/null
+++ b/extern/mantaflow/preprocessed/movingobs.h
@@ -0,0 +1,164 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * moving obstacles
+ *
+ ******************************************************************************/
+
+#ifndef _MOVINGOBS_H
+#define _MOVINGOBS_H
+
+#include "shapes.h"
+#include "particle.h"
+
+namespace Manta {
+
+//! Moving obstacle composed of basic shapes
+class MovingObstacle : public PbClass {
+ public:
+ MovingObstacle(FluidSolver *parent, int emptyType = FlagGrid::TypeEmpty);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "MovingObstacle::MovingObstacle", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ int emptyType = _args.getOpt<int>("emptyType", 1, FlagGrid::TypeEmpty, &_lock);
+ obj = new MovingObstacle(parent, emptyType);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "MovingObstacle::MovingObstacle", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("MovingObstacle::MovingObstacle", e.what());
+ return -1;
+ }
+ }
+
+ void add(Shape *shape);
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MovingObstacle *pbo = dynamic_cast<MovingObstacle *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MovingObstacle::add", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Shape *shape = _args.getPtr<Shape>("shape", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->add(shape);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MovingObstacle::add", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MovingObstacle::add", e.what());
+ return 0;
+ }
+ }
+
+ //! If t in [t0,t1], apply linear motion path from p0 to p1
+ void moveLinear(Real t,
+ Real t0,
+ Real t1,
+ Vec3 p0,
+ Vec3 p1,
+ FlagGrid &flags,
+ MACGrid &vel,
+ bool smooth = true);
+ static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MovingObstacle *pbo = dynamic_cast<MovingObstacle *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MovingObstacle::moveLinear", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Real t = _args.get<Real>("t", 0, &_lock);
+ Real t0 = _args.get<Real>("t0", 1, &_lock);
+ Real t1 = _args.get<Real>("t1", 2, &_lock);
+ Vec3 p0 = _args.get<Vec3>("p0", 3, &_lock);
+ Vec3 p1 = _args.get<Vec3>("p1", 4, &_lock);
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 5, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 6, &_lock);
+ bool smooth = _args.getOpt<bool>("smooth", 7, true, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->moveLinear(t, t0, t1, p0, p1, flags, vel, smooth);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MovingObstacle::moveLinear", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MovingObstacle::moveLinear", e.what());
+ return 0;
+ }
+ }
+
+ //! Compute levelset, and project FLIP particles outside obstacles
+ void projectOutside(FlagGrid &flags, BasicParticleSystem &flip);
+ static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ MovingObstacle *pbo = dynamic_cast<MovingObstacle *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "MovingObstacle::projectOutside", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ BasicParticleSystem &flip = *_args.getPtr<BasicParticleSystem>("flip", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->projectOutside(flags, flip);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "MovingObstacle::projectOutside", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("MovingObstacle::projectOutside", e.what());
+ return 0;
+ }
+ }
+
+ protected:
+ std::vector<Shape *> mShapes;
+ int mEmptyType;
+ int mID;
+ static int sIDcnt;
+ public:
+ PbArgs _args;
+}
+#define _C_MovingObstacle
+;
+
+} // namespace Manta
+#endif
diff --git a/extern/mantaflow/preprocessed/movingobs.h.reg.cpp b/extern/mantaflow/preprocessed/movingobs.h.reg.cpp
new file mode 100644
index 00000000000..b5a01a83c3b
--- /dev/null
+++ b/extern/mantaflow/preprocessed/movingobs.h.reg.cpp
@@ -0,0 +1,26 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "movingobs.h"
+namespace Manta {
+#ifdef _C_MovingObstacle
+static const Pb::Register _R_17("MovingObstacle", "MovingObstacle", "PbClass");
+template<> const char *Namify<MovingObstacle>::S = "MovingObstacle";
+static const Pb::Register _R_18("MovingObstacle", "MovingObstacle", MovingObstacle::_W_0);
+static const Pb::Register _R_19("MovingObstacle", "add", MovingObstacle::_W_1);
+static const Pb::Register _R_20("MovingObstacle", "moveLinear", MovingObstacle::_W_2);
+static const Pb::Register _R_21("MovingObstacle", "projectOutside", MovingObstacle::_W_3);
+#endif
+extern "C" {
+void PbRegister_file_17()
+{
+ KEEP_UNUSED(_R_17);
+ KEEP_UNUSED(_R_18);
+ KEEP_UNUSED(_R_19);
+ KEEP_UNUSED(_R_20);
+ KEEP_UNUSED(_R_21);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/multigrid.cpp b/extern/mantaflow/preprocessed/multigrid.cpp
new file mode 100644
index 00000000000..9e35c6f9368
--- /dev/null
+++ b/extern/mantaflow/preprocessed/multigrid.cpp
@@ -0,0 +1,1857 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Multigrid solver
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Copyright 2016, by Florian Ferstl (florian.ferstl.ff@gmail.com)
+ *
+ * This is an implementation of the solver developed by Dick et al. [1]
+ * without topology awareness (= vertex duplication on coarser levels). This
+ * simplification allows us to use regular grids for all levels of the multigrid
+ * hierarchy and works well for moderately complex domains.
+ *
+ * [1] Solving the Fluid Pressure Poisson Equation Using Multigrid-Evaluation
+ * and Improvements, C. Dick, M. Rogowsky, R. Westermann, IEEE TVCG 2015
+ *
+ ******************************************************************************/
+
+#include "multigrid.h"
+
+#define FOR_LVL(IDX, LVL) for (int IDX = 0; IDX < mb[LVL].size(); IDX++)
+
+#define FOR_VEC_MINMAX(VEC, MIN, MAX) \
+ Vec3i VEC; \
+ const Vec3i VEC##__min = (MIN), VEC##__max = (MAX); \
+ for (VEC.z = VEC##__min.z; VEC.z <= VEC##__max.z; VEC.z++) \
+ for (VEC.y = VEC##__min.y; VEC.y <= VEC##__max.y; VEC.y++) \
+ for (VEC.x = VEC##__min.x; VEC.x <= VEC##__max.x; VEC.x++)
+
+#define FOR_VECLIN_MINMAX(VEC, LIN, MIN, MAX) \
+ Vec3i VEC; \
+ int LIN = 0; \
+ const Vec3i VEC##__min = (MIN), VEC##__max = (MAX); \
+ for (VEC.z = VEC##__min.z; VEC.z <= VEC##__max.z; VEC.z++) \
+ for (VEC.y = VEC##__min.y; VEC.y <= VEC##__max.y; VEC.y++) \
+ for (VEC.x = VEC##__min.x; VEC.x <= VEC##__max.x; VEC.x++, LIN++)
+
+#define MG_TIMINGS(X)
+//#define MG_TIMINGS(X) X
+
+using namespace std;
+namespace Manta {
+
+// Helper class for calling mantaflow kernels with a specific number of threads
+class ThreadSize {
+ IndexInt s;
+
+ public:
+ ThreadSize(IndexInt _s)
+ {
+ s = _s;
+ }
+ IndexInt size()
+ {
+ return s;
+ }
+};
+
+// ----------------------------------------------------------------------------
+// Efficient min heap for <ID, key> pairs with 0<=ID<N and 0<=key<K
+// (elements are stored in K buckets, where each bucket is a doubly linked list).
+// - if K<<N, all ops are O(1) on avg (worst case O(K)).
+// - memory usage O(K+N): (K+N) * 3 * sizeof(int).
+class NKMinHeap {
+ private:
+ struct Entry {
+ int key, prev, next;
+ Entry() : key(-1), prev(-1), next(-1)
+ {
+ }
+ };
+
+ int mN, mK, mSize, mMinKey;
+
+ // Double linked lists of IDs, one for each bucket/key.
+ // The first K entries are the buckets' head pointers,
+ // and the last N entries correspond to the IDs.
+ std::vector<Entry> mEntries;
+
+ public:
+ NKMinHeap(int N, int K) : mN(N), mK(K), mSize(0), mMinKey(-1), mEntries(N + K)
+ {
+ }
+
+ int size()
+ {
+ return mSize;
+ }
+ int getKey(int ID)
+ {
+ return mEntries[mK + ID].key;
+ }
+
+ // Insert, decrease or increase key (or delete by setting key to -1)
+ void setKey(int ID, int key);
+
+ // peek min key (returns ID/key pair)
+ std::pair<int, int> peekMin();
+
+ // pop min key (returns ID/key pair)
+ std::pair<int, int> popMin();
+
+ void print(); // for debugging
+};
+
+void NKMinHeap::setKey(int ID, int key)
+{
+ assertMsg(0 <= ID && ID < mN, "NKMinHeap::setKey: ID out of range");
+ assertMsg(-1 <= key && key < mK, "NKMinHeap::setKey: key out of range");
+
+ const int kid = mK + ID;
+
+ if (mEntries[kid].key == key)
+ return; // nothing changes
+
+ // remove from old key-list if ID existed previously
+ if (mEntries[kid].key != -1) {
+ int pred = mEntries[kid].prev;
+ int succ = mEntries[kid].next; // can be -1
+
+ mEntries[pred].next = succ;
+ if (succ != -1)
+ mEntries[succ].prev = pred;
+
+ // if removed key was minimum key, mMinKey may need to be updated
+ int removedKey = mEntries[kid].key;
+ if (removedKey == mMinKey) {
+ if (mSize == 1) {
+ mMinKey = -1;
+ }
+ else {
+ for (; mMinKey < mK; mMinKey++) {
+ if (mEntries[mMinKey].next != -1)
+ break;
+ }
+ }
+ }
+
+ mSize--;
+ }
+
+ // set new key of ID
+ mEntries[kid].key = key;
+
+ if (key == -1) {
+ // finished if key was set to -1
+ mEntries[kid].next = mEntries[kid].prev = -1;
+ return;
+ }
+
+ // add key
+ mSize++;
+ if (mMinKey == -1)
+ mMinKey = key;
+ else
+ mMinKey = std::min(mMinKey, key);
+
+ // insert into new key-list (headed by mEntries[key])
+ int tmp = mEntries[key].next;
+
+ mEntries[key].next = kid;
+ mEntries[kid].prev = key;
+
+ mEntries[kid].next = tmp;
+ if (tmp != -1)
+ mEntries[tmp].prev = kid;
+}
+
+std::pair<int, int> NKMinHeap::peekMin()
+{
+ if (mSize == 0)
+ return std::pair<int, int>(-1, -1); // error
+
+ const int ID = mEntries[mMinKey].next - mK;
+ return std::pair<int, int>(ID, mMinKey);
+}
+
+std::pair<int, int> NKMinHeap::popMin()
+{
+ if (mSize == 0)
+ return std::pair<int, int>(-1, -1); // error
+
+ const int kid = mEntries[mMinKey].next;
+ const int ID = kid - mK;
+ const int key = mMinKey;
+
+ // remove from key-list
+ int pred = mEntries[kid].prev;
+ int succ = mEntries[kid].next; // can be -1
+
+ mEntries[pred].next = succ;
+ if (succ != -1)
+ mEntries[succ].prev = pred;
+
+ // remove entry
+ mEntries[kid] = Entry();
+ mSize--;
+
+ // update mMinKey
+ if (mSize == 0) {
+ mMinKey = -1;
+ }
+ else {
+ for (; mMinKey < mK; mMinKey++) {
+ if (mEntries[mMinKey].next != -1)
+ break;
+ }
+ }
+
+ // return result
+ return std::pair<int, int>(ID, key);
+}
+
+void NKMinHeap::print()
+{
+ std::cout << "Size: " << mSize << ", MinKey: " << mMinKey << std::endl;
+ for (int key = 0; key < mK; key++) {
+ if (mEntries[key].next != -1) {
+ std::cout << "Key " << key << ": ";
+ int kid = mEntries[key].next;
+ while (kid != -1) {
+ std::cout << kid - mK << " ";
+ kid = mEntries[kid].next;
+ }
+ std::cout << std::endl;
+ }
+ }
+ std::cout << std::endl;
+}
+
+// ----------------------------------------------------------------------------
+// GridMg methods
+//
+// Illustration of 27-point stencil indices
+// y | z = -1 z = 0 z = 1
+// ^ | 6 7 8, 15 16 17, 24 25 26
+// | | 3 4 5, 12 13 14, 21 22 23
+// o-> x | 0 1 2, 9 10 11, 18 19 20
+//
+// Symmetric storage with only 14 entries per vertex
+// y | z = -1 z = 0 z = 1
+// ^ | - - -, 2 3 4, 11 12 13
+// | | - - -, - 0 1, 8 9 10
+// o-> x | - - -, - - -, 5 6 7
+
+GridMg::GridMg(const Vec3i &gridSize)
+ : mNumPreSmooth(1),
+ mNumPostSmooth(1),
+ mCoarsestLevelAccuracy(Real(1E-8)),
+ mTrivialEquationScale(Real(1E-6)),
+ mIsASet(false),
+ mIsRhsSet(false)
+{
+ MG_TIMINGS(MuTime time;)
+
+ // 2D or 3D mode
+ mIs3D = (gridSize.z > 1);
+ mDim = mIs3D ? 3 : 2;
+ mStencilSize = mIs3D ? 14 : 5; // A has a full 27-point stencil on levels > 0
+ mStencilSize0 = mIs3D ? 4 : 3; // A has a 7-point stencil on level 0
+ mStencilMin = Vec3i(-1, -1, mIs3D ? -1 : 0);
+ mStencilMax = Vec3i(1, 1, mIs3D ? 1 : 0);
+
+ // Create level 0 (=original grid)
+ mSize.push_back(gridSize);
+ mPitch.push_back(Vec3i(1, mSize.back().x, mSize.back().x * mSize.back().y));
+ int n = mSize.back().x * mSize.back().y * mSize.back().z;
+
+ mA.push_back(std::vector<Real>(n * mStencilSize0));
+ mx.push_back(std::vector<Real>(n));
+ mb.push_back(std::vector<Real>(n));
+ mr.push_back(std::vector<Real>(n));
+ mType.push_back(std::vector<VertexType>(n));
+ mCGtmp1.push_back(std::vector<double>());
+ mCGtmp2.push_back(std::vector<double>());
+ mCGtmp3.push_back(std::vector<double>());
+ mCGtmp4.push_back(std::vector<double>());
+
+ debMsg("GridMg::GridMg level 0: " << mSize[0].x << " x " << mSize[0].y << " x " << mSize[0].z
+ << " x ",
+ 2);
+
+ // Create coarse levels >0
+ for (int l = 1; l <= 100; l++) {
+ if (mSize[l - 1].x <= 5 && mSize[l - 1].y <= 5 && mSize[l - 1].z <= 5)
+ break;
+ if (n <= 1000)
+ break;
+
+ mSize.push_back((mSize[l - 1] + 2) / 2);
+ mPitch.push_back(Vec3i(1, mSize.back().x, mSize.back().x * mSize.back().y));
+ n = mSize.back().x * mSize.back().y * mSize.back().z;
+
+ mA.push_back(std::vector<Real>(n * mStencilSize));
+ mx.push_back(std::vector<Real>(n));
+ mb.push_back(std::vector<Real>(n));
+ mr.push_back(std::vector<Real>(n));
+ mType.push_back(std::vector<VertexType>(n));
+ mCGtmp1.push_back(std::vector<double>());
+ mCGtmp2.push_back(std::vector<double>());
+ mCGtmp3.push_back(std::vector<double>());
+ mCGtmp4.push_back(std::vector<double>());
+
+ debMsg("GridMg::GridMg level " << l << ": " << mSize[l].x << " x " << mSize[l].y << " x "
+ << mSize[l].z << " x ",
+ 2);
+ }
+
+ // Additional memory for CG on coarsest level
+ mCGtmp1.back() = std::vector<double>(n);
+ mCGtmp2.back() = std::vector<double>(n);
+ mCGtmp3.back() = std::vector<double>(n);
+ mCGtmp4.back() = std::vector<double>(n);
+
+ MG_TIMINGS(debMsg("GridMg: Allocation done in " << time.update(), 1);)
+
+ // Precalculate coarsening paths:
+ // (V) <--restriction-- (U) <--A_{l-1}-- (W) <--interpolation-- (N)
+ Vec3i p7stencil[7] = {Vec3i(0, 0, 0),
+ Vec3i(-1, 0, 0),
+ Vec3i(1, 0, 0),
+ Vec3i(0, -1, 0),
+ Vec3i(0, 1, 0),
+ Vec3i(0, 0, -1),
+ Vec3i(0, 0, 1)};
+ Vec3i V(1, 1, 1); // reference coarse grid vertex at (1,1,1)
+ FOR_VEC_MINMAX(U, V * 2 + mStencilMin, V * 2 + mStencilMax)
+ {
+ for (int i = 0; i < 1 + 2 * mDim; i++) {
+ Vec3i W = U + p7stencil[i];
+ FOR_VEC_MINMAX(N, W / 2, (W + 1) / 2)
+ {
+ int s = dot(N, Vec3i(1, 3, 9));
+
+ if (s >= 13) {
+ CoarseningPath path;
+ path.N = N - 1; // offset of N on coarse grid
+ path.U = U - V * 2; // offset of U on fine grid
+ path.W = W - V * 2; // offset of W on fine grid
+ path.sc = s - 13; // stencil index corresponding to V<-N on coarse grid
+ path.sf = (i + 1) / 2; // stencil index corresponding to U<-W on coarse grid
+ path.inUStencil = (i % 2 == 0); // fine grid stencil entry stored at U or W?
+ path.rw = Real(1) /
+ Real(1 << ((U.x % 2) + (U.y % 2) + (U.z % 2))); // restriction weight V<-U
+ path.iw = Real(1) /
+ Real(1 << ((W.x % 2) + (W.y % 2) + (W.z % 2))); // interpolation weight W<-N
+ mCoarseningPaths0.push_back(path);
+ }
+ }
+ }
+ }
+
+ auto pathLess = [](const GridMg::CoarseningPath &p1, const GridMg::CoarseningPath &p2) {
+ if (p1.sc == p2.sc)
+ return dot(p1.U + 1, Vec3i(1, 3, 9)) < dot(p2.U + 1, Vec3i(1, 3, 9));
+ return p1.sc < p2.sc;
+ };
+ std::sort(mCoarseningPaths0.begin(), mCoarseningPaths0.end(), pathLess);
+}
+
+void GridMg::analyzeStencil(int v,
+ bool is3D,
+ bool &isStencilSumNonZero,
+ bool &isEquationTrivial) const
+{
+ Vec3i V = vecIdx(v, 0);
+
+ // collect stencil entries
+ Real A[7];
+ A[0] = mA[0][v * mStencilSize0 + 0];
+ A[1] = mA[0][v * mStencilSize0 + 1];
+ A[2] = mA[0][v * mStencilSize0 + 2];
+ A[3] = is3D ? mA[0][v * mStencilSize0 + 3] : Real(0);
+ A[4] = V.x != 0 ? mA[0][(v - mPitch[0].x) * mStencilSize0 + 1] : Real(0);
+ A[5] = V.y != 0 ? mA[0][(v - mPitch[0].y) * mStencilSize0 + 2] : Real(0);
+ A[6] = V.z != 0 && is3D ? mA[0][(v - mPitch[0].z) * mStencilSize0 + 3] : Real(0);
+
+ // compute sum of stencil entries
+ Real stencilMax = Real(0), stencilSum = Real(0);
+ for (int i = 0; i < 7; i++) {
+ stencilSum += A[i];
+ stencilMax = max(stencilMax, std::abs(A[i]));
+ }
+
+ // check if sum is numerically zero
+ isStencilSumNonZero = std::abs(stencilSum / stencilMax) > Real(1E-6);
+
+ // check for trivial equation (exact comparisons)
+ isEquationTrivial = A[0] == Real(1) && A[1] == Real(0) && A[2] == Real(0) && A[3] == Real(0) &&
+ A[4] == Real(0) && A[5] == Real(0) && A[6] == Real(0);
+}
+
+struct knCopyA : public KernelBase {
+ knCopyA(std::vector<Real> &sizeRef,
+ std::vector<Real> &A0,
+ int stencilSize0,
+ bool is3D,
+ const Grid<Real> *pA0,
+ const Grid<Real> *pAi,
+ const Grid<Real> *pAj,
+ const Grid<Real> *pAk)
+ : KernelBase(sizeRef.size()),
+ sizeRef(sizeRef),
+ A0(A0),
+ stencilSize0(stencilSize0),
+ is3D(is3D),
+ pA0(pA0),
+ pAi(pAi),
+ pAj(pAj),
+ pAk(pAk)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ std::vector<Real> &sizeRef,
+ std::vector<Real> &A0,
+ int stencilSize0,
+ bool is3D,
+ const Grid<Real> *pA0,
+ const Grid<Real> *pAi,
+ const Grid<Real> *pAj,
+ const Grid<Real> *pAk) const
+ {
+ A0[idx * stencilSize0 + 0] = (*pA0)[idx];
+ A0[idx * stencilSize0 + 1] = (*pAi)[idx];
+ A0[idx * stencilSize0 + 2] = (*pAj)[idx];
+ if (is3D)
+ A0[idx * stencilSize0 + 3] = (*pAk)[idx];
+ }
+ inline std::vector<Real> &getArg0()
+ {
+ return sizeRef;
+ }
+ typedef std::vector<Real> type0;
+ inline std::vector<Real> &getArg1()
+ {
+ return A0;
+ }
+ typedef std::vector<Real> type1;
+ inline int &getArg2()
+ {
+ return stencilSize0;
+ }
+ typedef int type2;
+ inline bool &getArg3()
+ {
+ return is3D;
+ }
+ typedef bool type3;
+ inline const Grid<Real> *getArg4()
+ {
+ return pA0;
+ }
+ typedef Grid<Real> type4;
+ inline const Grid<Real> *getArg5()
+ {
+ return pAi;
+ }
+ typedef Grid<Real> type5;
+ inline const Grid<Real> *getArg6()
+ {
+ return pAj;
+ }
+ typedef Grid<Real> type6;
+ inline const Grid<Real> *getArg7()
+ {
+ return pAk;
+ }
+ typedef Grid<Real> type7;
+ void runMessage()
+ {
+ debMsg("Executing kernel knCopyA ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, sizeRef, A0, stencilSize0, is3D, pA0, pAi, pAj, pAk);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<Real> &sizeRef;
+ std::vector<Real> &A0;
+ int stencilSize0;
+ bool is3D;
+ const Grid<Real> *pA0;
+ const Grid<Real> *pAi;
+ const Grid<Real> *pAj;
+ const Grid<Real> *pAk;
+};
+
+struct knActivateVertices : public KernelBase {
+ knActivateVertices(std::vector<GridMg::VertexType> &type_0,
+ std::vector<Real> &A0,
+ bool &nonZeroStencilSumFound,
+ bool &trivialEquationsFound,
+ const GridMg &mg)
+ : KernelBase(type_0.size()),
+ type_0(type_0),
+ A0(A0),
+ nonZeroStencilSumFound(nonZeroStencilSumFound),
+ trivialEquationsFound(trivialEquationsFound),
+ mg(mg)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ std::vector<GridMg::VertexType> &type_0,
+ std::vector<Real> &A0,
+ bool &nonZeroStencilSumFound,
+ bool &trivialEquationsFound,
+ const GridMg &mg) const
+ {
+ // active vertices on level 0 are vertices with non-zero diagonal entry in A
+ type_0[idx] = GridMg::vtInactive;
+
+ if (mg.mA[0][idx * mg.mStencilSize0 + 0] != Real(0)) {
+ type_0[idx] = GridMg::vtActive;
+
+ bool isStencilSumNonZero = false, isEquationTrivial = false;
+ mg.analyzeStencil(int(idx), mg.mIs3D, isStencilSumNonZero, isEquationTrivial);
+
+ // Note: nonZeroStencilSumFound and trivialEquationsFound are only
+ // changed from false to true, and hence there are no race conditions.
+ if (isStencilSumNonZero)
+ nonZeroStencilSumFound = true;
+
+ // scale down trivial equations
+ if (isEquationTrivial) {
+ type_0[idx] = GridMg::vtActiveTrivial;
+ A0[idx * mg.mStencilSize0 + 0] *= mg.mTrivialEquationScale;
+ trivialEquationsFound = true;
+ };
+ }
+ }
+ inline std::vector<GridMg::VertexType> &getArg0()
+ {
+ return type_0;
+ }
+ typedef std::vector<GridMg::VertexType> type0;
+ inline std::vector<Real> &getArg1()
+ {
+ return A0;
+ }
+ typedef std::vector<Real> type1;
+ inline bool &getArg2()
+ {
+ return nonZeroStencilSumFound;
+ }
+ typedef bool type2;
+ inline bool &getArg3()
+ {
+ return trivialEquationsFound;
+ }
+ typedef bool type3;
+ inline const GridMg &getArg4()
+ {
+ return mg;
+ }
+ typedef GridMg type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel knActivateVertices ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, type_0, A0, nonZeroStencilSumFound, trivialEquationsFound, mg);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<GridMg::VertexType> &type_0;
+ std::vector<Real> &A0;
+ bool &nonZeroStencilSumFound;
+ bool &trivialEquationsFound;
+ const GridMg &mg;
+};
+
+void GridMg::setA(const Grid<Real> *pA0,
+ const Grid<Real> *pAi,
+ const Grid<Real> *pAj,
+ const Grid<Real> *pAk)
+{
+ MG_TIMINGS(MuTime time;)
+
+ // Copy level 0
+ knCopyA(mx[0], mA[0], mStencilSize0, mIs3D, pA0, pAi, pAj, pAk);
+
+ // Determine active vertices and scale trivial equations
+ bool nonZeroStencilSumFound = false;
+ bool trivialEquationsFound = false;
+
+ knActivateVertices(mType[0], mA[0], nonZeroStencilSumFound, trivialEquationsFound, *this);
+
+ if (trivialEquationsFound)
+ debMsg("GridMg::setA: Found at least one trivial equation", 2);
+
+ // Sanity check: if all rows of A sum up to 0 --> A doesn't have full rank (opposite direction
+ // isn't necessarily true)
+ if (!nonZeroStencilSumFound)
+ debMsg(
+ "GridMg::setA: Found constant mode: A*1=0! A does not have full rank and multigrid may "
+ "not converge. (forgot to fix a pressure value?)",
+ 1);
+
+ // Create coarse grids and operators on levels >0
+ for (int l = 1; l < mA.size(); l++) {
+ MG_TIMINGS(time.get();)
+ genCoarseGrid(l);
+ MG_TIMINGS(debMsg("GridMg: Generated level " << l << " in " << time.update(), 1);)
+ genCoraseGridOperator(l);
+ MG_TIMINGS(debMsg("GridMg: Generated operator " << l << " in " << time.update(), 1);)
+ }
+
+ mIsASet = true;
+ mIsRhsSet = false; // invalidate rhs
+}
+
+struct knSetRhs : public KernelBase {
+ knSetRhs(std::vector<Real> &b, const Grid<Real> &rhs, const GridMg &mg)
+ : KernelBase(b.size()), b(b), rhs(rhs), mg(mg)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, std::vector<Real> &b, const Grid<Real> &rhs, const GridMg &mg) const
+ {
+ b[idx] = rhs[idx];
+
+ // scale down trivial equations
+ if (mg.mType[0][idx] == GridMg::vtActiveTrivial) {
+ b[idx] *= mg.mTrivialEquationScale;
+ };
+ }
+ inline std::vector<Real> &getArg0()
+ {
+ return b;
+ }
+ typedef std::vector<Real> type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return rhs;
+ }
+ typedef Grid<Real> type1;
+ inline const GridMg &getArg2()
+ {
+ return mg;
+ }
+ typedef GridMg type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetRhs ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, b, rhs, mg);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<Real> &b;
+ const Grid<Real> &rhs;
+ const GridMg &mg;
+};
+
+void GridMg::setRhs(const Grid<Real> &rhs)
+{
+ assertMsg(mIsASet, "GridMg::setRhs Error: A has not been set.");
+
+ knSetRhs(mb[0], rhs, *this);
+
+ mIsRhsSet = true;
+}
+
+template<class T> struct knSet : public KernelBase {
+ knSet(std::vector<T> &data, T value) : KernelBase(data.size()), data(data), value(value)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, std::vector<T> &data, T value) const
+ {
+ data[idx] = value;
+ }
+ inline std::vector<T> &getArg0()
+ {
+ return data;
+ }
+ typedef std::vector<T> type0;
+ inline T &getArg1()
+ {
+ return value;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSet ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, data, value);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<T> &data;
+ T value;
+};
+
+template<class T> struct knCopyToVector : public KernelBase {
+ knCopyToVector(std::vector<T> &dst, const Grid<T> &src)
+ : KernelBase(dst.size()), dst(dst), src(src)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, std::vector<T> &dst, const Grid<T> &src) const
+ {
+ dst[idx] = src[idx];
+ }
+ inline std::vector<T> &getArg0()
+ {
+ return dst;
+ }
+ typedef std::vector<T> type0;
+ inline const Grid<T> &getArg1()
+ {
+ return src;
+ }
+ typedef Grid<T> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knCopyToVector ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, dst, src);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<T> &dst;
+ const Grid<T> &src;
+};
+
+template<class T> struct knCopyToGrid : public KernelBase {
+ knCopyToGrid(const std::vector<T> &src, Grid<T> &dst)
+ : KernelBase(src.size()), src(src), dst(dst)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const std::vector<T> &src, Grid<T> &dst) const
+ {
+ dst[idx] = src[idx];
+ }
+ inline const std::vector<T> &getArg0()
+ {
+ return src;
+ }
+ typedef std::vector<T> type0;
+ inline Grid<T> &getArg1()
+ {
+ return dst;
+ }
+ typedef Grid<T> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knCopyToGrid ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, src, dst);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const std::vector<T> &src;
+ Grid<T> &dst;
+};
+
+template<class T> struct knAddAssign : public KernelBase {
+ knAddAssign(std::vector<T> &dst, const std::vector<T> &src)
+ : KernelBase(dst.size()), dst(dst), src(src)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, std::vector<T> &dst, const std::vector<T> &src) const
+ {
+ dst[idx] += src[idx];
+ }
+ inline std::vector<T> &getArg0()
+ {
+ return dst;
+ }
+ typedef std::vector<T> type0;
+ inline const std::vector<T> &getArg1()
+ {
+ return src;
+ }
+ typedef std::vector<T> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knAddAssign ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, dst, src);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<T> &dst;
+ const std::vector<T> &src;
+};
+
+Real GridMg::doVCycle(Grid<Real> &dst, const Grid<Real> *src)
+{
+ MG_TIMINGS(MuTime timeSmooth; MuTime timeCG; MuTime timeI; MuTime timeR; MuTime timeTotal;
+ MuTime time;)
+ MG_TIMINGS(timeSmooth.clear(); timeCG.clear(); timeI.clear(); timeR.clear();)
+
+ assertMsg(mIsASet && mIsRhsSet, "GridMg::doVCycle Error: A and/or rhs have not been set.");
+
+ const int maxLevel = int(mA.size()) - 1;
+
+ if (src) {
+ knCopyToVector<Real>(mx[0], *src);
+ }
+ else {
+ knSet<Real>(mx[0], Real(0));
+ }
+
+ for (int l = 0; l < maxLevel; l++) {
+ MG_TIMINGS(time.update();)
+ for (int i = 0; i < mNumPreSmooth; i++) {
+ smoothGS(l, false);
+ }
+
+ MG_TIMINGS(timeSmooth += time.update();)
+
+ calcResidual(l);
+ restrict(l + 1, mr[l], mb[l + 1]);
+
+ knSet<Real>(mx[l + 1], Real(0));
+
+ MG_TIMINGS(timeR += time.update();)
+ }
+
+ MG_TIMINGS(time.update();)
+ solveCG(maxLevel);
+ MG_TIMINGS(timeCG += time.update();)
+
+ for (int l = maxLevel - 1; l >= 0; l--) {
+ MG_TIMINGS(time.update();)
+ interpolate(l, mx[l + 1], mr[l]);
+
+ knAddAssign<Real>(mx[l], mr[l]);
+
+ MG_TIMINGS(timeI += time.update();)
+
+ for (int i = 0; i < mNumPostSmooth; i++) {
+ smoothGS(l, true);
+ }
+ MG_TIMINGS(timeSmooth += time.update();)
+ }
+
+ calcResidual(0);
+ Real res = calcResidualNorm(0);
+
+ knCopyToGrid<Real>(mx[0], dst);
+
+ MG_TIMINGS(debMsg("GridMg: Finished VCycle in "
+ << timeTotal.update() << " (smoothing: " << timeSmooth
+ << ", CG: " << timeCG << ", R: " << timeR << ", I: " << timeI << ")",
+ 1);)
+
+ return res;
+}
+
+struct knActivateCoarseVertices : public KernelBase {
+ knActivateCoarseVertices(std::vector<GridMg::VertexType> &type, int unused)
+ : KernelBase(type.size()), type(type), unused(unused)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, std::vector<GridMg::VertexType> &type, int unused) const
+ {
+ // set all remaining 'free' vertices to 'removed',
+ if (type[idx] == GridMg::vtFree)
+ type[idx] = GridMg::vtRemoved;
+
+ // then convert 'zero' vertices to 'active' and 'removed' vertices to 'inactive'
+ if (type[idx] == GridMg::vtZero)
+ type[idx] = GridMg::vtActive;
+ if (type[idx] == GridMg::vtRemoved)
+ type[idx] = GridMg::vtInactive;
+ }
+ inline std::vector<GridMg::VertexType> &getArg0()
+ {
+ return type;
+ }
+ typedef std::vector<GridMg::VertexType> type0;
+ inline int &getArg1()
+ {
+ return unused;
+ }
+ typedef int type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knActivateCoarseVertices ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, type, unused);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<GridMg::VertexType> &type;
+ int unused;
+};
+
+// Determine active cells on coarse level l from active cells on fine level l-1
+// while ensuring a full-rank interpolation operator (see Section 3.3 in [1]).
+void GridMg::genCoarseGrid(int l)
+{
+ // AF_Free: unused/untouched vertices
+ // AF_Zero: vertices selected for coarser level
+ // AF_Removed: vertices removed from coarser level
+ enum activeFlags : char { AF_Removed = 0, AF_Zero = 1, AF_Free = 2 };
+
+ // initialize all coarse vertices with 'free'
+ knSet<VertexType>(mType[l], vtFree);
+
+ // initialize min heap of (ID: fine grid vertex, key: #free interpolation vertices) pairs
+ NKMinHeap heap(int(mb[l - 1].size()),
+ mIs3D ? 9 : 5); // max 8 (or 4 in 2D) free interpolation vertices
+
+ FOR_LVL(v, l - 1)
+ {
+ if (mType[l - 1][v] != vtInactive) {
+ Vec3i V = vecIdx(v, l - 1);
+ int fiv = 1 << ((V.x % 2) + (V.y % 2) + (V.z % 2));
+ heap.setKey(v, fiv);
+ }
+ }
+
+ // process fine vertices in heap consecutively, always choosing the vertex with
+ // the currently smallest number of free interpolation vertices
+ while (heap.size() > 0) {
+ int v = heap.popMin().first;
+ Vec3i V = vecIdx(v, l - 1);
+
+ // loop over associated interpolation vertices of V on coarse level l:
+ // the first encountered 'free' vertex is set to 'zero',
+ // all remaining 'free' vertices are set to 'removed'.
+ bool vdone = false;
+
+ FOR_VEC_MINMAX(I, V / 2, (V + 1) / 2)
+ {
+ int i = linIdx(I, l);
+
+ if (mType[l][i] == vtFree) {
+ if (vdone) {
+ mType[l][i] = vtRemoved;
+ }
+ else {
+ mType[l][i] = vtZero;
+ vdone = true;
+ }
+
+ // update #free interpolation vertices in heap:
+ // loop over all associated restriction vertices of I on fine level l-1
+ FOR_VEC_MINMAX(R, vmax(0, I * 2 - 1), vmin(mSize[l - 1] - 1, I * 2 + 1))
+ {
+ int r = linIdx(R, l - 1);
+ int key = heap.getKey(r);
+
+ if (key > 1) {
+ heap.setKey(r, key - 1);
+ } // decrease key of r
+ else if (key > -1) {
+ heap.setKey(r, -1);
+ } // removes r from heap
+ }
+ }
+ }
+ }
+
+ knActivateCoarseVertices(mType[l], 0);
+}
+
+struct knGenCoarseGridOperator : public KernelBase {
+ knGenCoarseGridOperator(std::vector<Real> &sizeRef,
+ std::vector<Real> &A,
+ int l,
+ const GridMg &mg)
+ : KernelBase(sizeRef.size()), sizeRef(sizeRef), A(A), l(l), mg(mg)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ std::vector<Real> &sizeRef,
+ std::vector<Real> &A,
+ int l,
+ const GridMg &mg) const
+ {
+ if (mg.mType[l][idx] == GridMg::vtInactive)
+ return;
+
+ for (int i = 0; i < mg.mStencilSize; i++) {
+ A[idx * mg.mStencilSize + i] = Real(0);
+ } // clear stencil
+
+ Vec3i V = mg.vecIdx(int(idx), l);
+
+ // Calculate the stencil of A_l at V by considering all vertex paths of the form:
+ // (V) <--restriction-- (U) <--A_{l-1}-- (W) <--interpolation-- (N)
+ // V and N are vertices on the coarse grid level l,
+ // U and W are vertices on the fine grid level l-1.
+
+ if (l == 1) {
+ // loop over precomputed paths
+ for (auto it = mg.mCoarseningPaths0.begin(); it != mg.mCoarseningPaths0.end(); it++) {
+ Vec3i N = V + it->N;
+ int n = mg.linIdx(N, l);
+ if (!mg.inGrid(N, l) || mg.mType[l][n] == GridMg::vtInactive)
+ continue;
+
+ Vec3i U = V * 2 + it->U;
+ int u = mg.linIdx(U, l - 1);
+ if (!mg.inGrid(U, l - 1) || mg.mType[l - 1][u] == GridMg::vtInactive)
+ continue;
+
+ Vec3i W = V * 2 + it->W;
+ int w = mg.linIdx(W, l - 1);
+ if (!mg.inGrid(W, l - 1) || mg.mType[l - 1][w] == GridMg::vtInactive)
+ continue;
+
+ if (it->inUStencil) {
+ A[idx * mg.mStencilSize + it->sc] += it->rw *
+ mg.mA[l - 1][u * mg.mStencilSize0 + it->sf] *
+ it->iw;
+ }
+ else {
+ A[idx * mg.mStencilSize + it->sc] += it->rw *
+ mg.mA[l - 1][w * mg.mStencilSize0 + it->sf] *
+ it->iw;
+ }
+ }
+ }
+ else {
+ // l > 1:
+ // loop over restriction vertices U on level l-1 associated with V
+ FOR_VEC_MINMAX(U, vmax(0, V * 2 - 1), vmin(mg.mSize[l - 1] - 1, V * 2 + 1))
+ {
+ int u = mg.linIdx(U, l - 1);
+ if (mg.mType[l - 1][u] == GridMg::vtInactive)
+ continue;
+
+ // restriction weight
+ Real rw = Real(1) / Real(1 << ((U.x % 2) + (U.y % 2) + (U.z % 2)));
+
+ // loop over all stencil neighbors N of V on level l that can be reached via restriction to
+ // U
+ FOR_VEC_MINMAX(N, (U - 1) / 2, vmin(mg.mSize[l] - 1, (U + 2) / 2))
+ {
+ int n = mg.linIdx(N, l);
+ if (mg.mType[l][n] == GridMg::vtInactive)
+ continue;
+
+ // stencil entry at V associated to N (coarse grid level l)
+ Vec3i SC = N - V + mg.mStencilMax;
+ int sc = SC.x + 3 * SC.y + 9 * SC.z;
+ if (sc < mg.mStencilSize - 1)
+ continue;
+
+ // loop over all vertices W which are in the stencil of A_{l-1} at U
+ // and which interpolate from N
+ FOR_VEC_MINMAX(W,
+ vmax(0, vmax(U - 1, N * 2 - 1)),
+ vmin(mg.mSize[l - 1] - 1, vmin(U + 1, N * 2 + 1)))
+ {
+ int w = mg.linIdx(W, l - 1);
+ if (mg.mType[l - 1][w] == GridMg::vtInactive)
+ continue;
+
+ // stencil entry at U associated to W (fine grid level l-1)
+ Vec3i SF = W - U + mg.mStencilMax;
+ int sf = SF.x + 3 * SF.y + 9 * SF.z;
+
+ Real iw = Real(1) /
+ Real(1 << ((W.x % 2) + (W.y % 2) + (W.z % 2))); // interpolation weight
+
+ if (sf < mg.mStencilSize) {
+ A[idx * mg.mStencilSize + sc - mg.mStencilSize + 1] +=
+ rw * mg.mA[l - 1][w * mg.mStencilSize + mg.mStencilSize - 1 - sf] * iw;
+ }
+ else {
+ A[idx * mg.mStencilSize + sc - mg.mStencilSize + 1] +=
+ rw * mg.mA[l - 1][u * mg.mStencilSize + sf - mg.mStencilSize + 1] * iw;
+ }
+ }
+ }
+ }
+ }
+ }
+ inline std::vector<Real> &getArg0()
+ {
+ return sizeRef;
+ }
+ typedef std::vector<Real> type0;
+ inline std::vector<Real> &getArg1()
+ {
+ return A;
+ }
+ typedef std::vector<Real> type1;
+ inline int &getArg2()
+ {
+ return l;
+ }
+ typedef int type2;
+ inline const GridMg &getArg3()
+ {
+ return mg;
+ }
+ typedef GridMg type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knGenCoarseGridOperator ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, sizeRef, A, l, mg);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<Real> &sizeRef;
+ std::vector<Real> &A;
+ int l;
+ const GridMg &mg;
+};
+
+// Calculate A_l on coarse level l from A_{l-1} on fine level l-1 using
+// Galerkin-based coarsening, i.e., compute A_l = R * A_{l-1} * I.
+void GridMg::genCoraseGridOperator(int l)
+{
+ // for each coarse grid vertex V
+ knGenCoarseGridOperator(mx[l], mA[l], l, *this);
+}
+
+struct knSmoothColor : public KernelBase {
+ knSmoothColor(ThreadSize &numBlocks,
+ std::vector<Real> &x,
+ const Vec3i &blockSize,
+ const std::vector<Vec3i> &colorOffs,
+ int l,
+ const GridMg &mg)
+ : KernelBase(numBlocks.size()),
+ numBlocks(numBlocks),
+ x(x),
+ blockSize(blockSize),
+ colorOffs(colorOffs),
+ l(l),
+ mg(mg)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ ThreadSize &numBlocks,
+ std::vector<Real> &x,
+ const Vec3i &blockSize,
+ const std::vector<Vec3i> &colorOffs,
+ int l,
+ const GridMg &mg) const
+ {
+ Vec3i blockOff(int(idx) % blockSize.x,
+ (int(idx) % (blockSize.x * blockSize.y)) / blockSize.x,
+ int(idx) / (blockSize.x * blockSize.y));
+
+ for (int off = 0; off < colorOffs.size(); off++) {
+
+ Vec3i V = blockOff * 2 + colorOffs[off];
+ if (!mg.inGrid(V, l))
+ continue;
+
+ const int v = mg.linIdx(V, l);
+ if (mg.mType[l][v] == GridMg::vtInactive)
+ continue;
+
+ Real sum = mg.mb[l][v];
+
+ if (l == 0) {
+ int n;
+ for (int d = 0; d < mg.mDim; d++) {
+ if (V[d] > 0) {
+ n = v - mg.mPitch[0][d];
+ sum -= mg.mA[0][n * mg.mStencilSize0 + d + 1] * mg.mx[0][n];
+ }
+ if (V[d] < mg.mSize[0][d] - 1) {
+ n = v + mg.mPitch[0][d];
+ sum -= mg.mA[0][v * mg.mStencilSize0 + d + 1] * mg.mx[0][n];
+ }
+ }
+
+ x[v] = sum / mg.mA[0][v * mg.mStencilSize0 + 0];
+ }
+ else {
+ FOR_VECLIN_MINMAX(S, s, mg.mStencilMin, mg.mStencilMax)
+ {
+ if (s == mg.mStencilSize - 1)
+ continue;
+
+ Vec3i N = V + S;
+ int n = mg.linIdx(N, l);
+
+ if (mg.inGrid(N, l) && mg.mType[l][n] != GridMg::vtInactive) {
+ if (s < mg.mStencilSize) {
+ sum -= mg.mA[l][n * mg.mStencilSize + mg.mStencilSize - 1 - s] * mg.mx[l][n];
+ }
+ else {
+ sum -= mg.mA[l][v * mg.mStencilSize + s - mg.mStencilSize + 1] * mg.mx[l][n];
+ }
+ }
+ }
+
+ x[v] = sum / mg.mA[l][v * mg.mStencilSize + 0];
+ }
+ }
+ }
+ inline ThreadSize &getArg0()
+ {
+ return numBlocks;
+ }
+ typedef ThreadSize type0;
+ inline std::vector<Real> &getArg1()
+ {
+ return x;
+ }
+ typedef std::vector<Real> type1;
+ inline const Vec3i &getArg2()
+ {
+ return blockSize;
+ }
+ typedef Vec3i type2;
+ inline const std::vector<Vec3i> &getArg3()
+ {
+ return colorOffs;
+ }
+ typedef std::vector<Vec3i> type3;
+ inline int &getArg4()
+ {
+ return l;
+ }
+ typedef int type4;
+ inline const GridMg &getArg5()
+ {
+ return mg;
+ }
+ typedef GridMg type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSmoothColor ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, numBlocks, x, blockSize, colorOffs, l, mg);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ThreadSize &numBlocks;
+ std::vector<Real> &x;
+ const Vec3i &blockSize;
+ const std::vector<Vec3i> &colorOffs;
+ int l;
+ const GridMg &mg;
+};
+
+void GridMg::smoothGS(int l, bool reversedOrder)
+{
+ // Multicolor Gauss-Seidel with two colors for the 5/7-point stencil on level 0
+ // and with four/eight colors for the 9/27-point stencil on levels > 0
+ std::vector<std::vector<Vec3i>> colorOffs;
+ const Vec3i a[8] = {Vec3i(0, 0, 0),
+ Vec3i(1, 0, 0),
+ Vec3i(0, 1, 0),
+ Vec3i(1, 1, 0),
+ Vec3i(0, 0, 1),
+ Vec3i(1, 0, 1),
+ Vec3i(0, 1, 1),
+ Vec3i(1, 1, 1)};
+ if (mIs3D) {
+ if (l == 0)
+ colorOffs = {{a[0], a[3], a[5], a[6]}, {a[1], a[2], a[4], a[7]}};
+ else
+ colorOffs = {{a[0]}, {a[1]}, {a[2]}, {a[3]}, {a[4]}, {a[5]}, {a[6]}, {a[7]}};
+ }
+ else {
+ if (l == 0)
+ colorOffs = {{a[0], a[3]}, {a[1], a[2]}};
+ else
+ colorOffs = {{a[0]}, {a[1]}, {a[2]}, {a[3]}};
+ }
+
+ // Divide grid into 2x2 blocks for parallelization
+ Vec3i blockSize = (mSize[l] + 1) / 2;
+ ThreadSize numBlocks(blockSize.x * blockSize.y * blockSize.z);
+
+ for (int c = 0; c < colorOffs.size(); c++) {
+ int color = reversedOrder ? int(colorOffs.size()) - 1 - c : c;
+
+ knSmoothColor(numBlocks, mx[l], blockSize, colorOffs[color], l, *this);
+ }
+}
+
+struct knCalcResidual : public KernelBase {
+ knCalcResidual(std::vector<Real> &r, int l, const GridMg &mg)
+ : KernelBase(r.size()), r(r), l(l), mg(mg)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, std::vector<Real> &r, int l, const GridMg &mg) const
+ {
+ if (mg.mType[l][idx] == GridMg::vtInactive)
+ return;
+
+ Vec3i V = mg.vecIdx(int(idx), l);
+
+ Real sum = mg.mb[l][idx];
+
+ if (l == 0) {
+ int n;
+ for (int d = 0; d < mg.mDim; d++) {
+ if (V[d] > 0) {
+ n = int(idx) - mg.mPitch[0][d];
+ sum -= mg.mA[0][n * mg.mStencilSize0 + d + 1] * mg.mx[0][n];
+ }
+ if (V[d] < mg.mSize[0][d] - 1) {
+ n = int(idx) + mg.mPitch[0][d];
+ sum -= mg.mA[0][idx * mg.mStencilSize0 + d + 1] * mg.mx[0][n];
+ }
+ }
+ sum -= mg.mA[0][idx * mg.mStencilSize0 + 0] * mg.mx[0][idx];
+ }
+ else {
+ FOR_VECLIN_MINMAX(S, s, mg.mStencilMin, mg.mStencilMax)
+ {
+ Vec3i N = V + S;
+ int n = mg.linIdx(N, l);
+
+ if (mg.inGrid(N, l) && mg.mType[l][n] != GridMg::vtInactive) {
+ if (s < mg.mStencilSize) {
+ sum -= mg.mA[l][n * mg.mStencilSize + mg.mStencilSize - 1 - s] * mg.mx[l][n];
+ }
+ else {
+ sum -= mg.mA[l][idx * mg.mStencilSize + s - mg.mStencilSize + 1] * mg.mx[l][n];
+ }
+ }
+ }
+ }
+
+ r[idx] = sum;
+ }
+ inline std::vector<Real> &getArg0()
+ {
+ return r;
+ }
+ typedef std::vector<Real> type0;
+ inline int &getArg1()
+ {
+ return l;
+ }
+ typedef int type1;
+ inline const GridMg &getArg2()
+ {
+ return mg;
+ }
+ typedef GridMg type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knCalcResidual ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, r, l, mg);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<Real> &r;
+ int l;
+ const GridMg &mg;
+};
+
+void GridMg::calcResidual(int l)
+{
+ knCalcResidual(mr[l], l, *this);
+}
+
+struct knResidualNormSumSqr : public KernelBase {
+ knResidualNormSumSqr(const vector<Real> &r, int l, const GridMg &mg)
+ : KernelBase(r.size()), r(r), l(l), mg(mg), result(Real(0))
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const vector<Real> &r, int l, const GridMg &mg, Real &result)
+ {
+ if (mg.mType[l][idx] == GridMg::vtInactive)
+ return;
+
+ result += r[idx] * r[idx];
+ }
+ inline operator Real()
+ {
+ return result;
+ }
+ inline Real &getRet()
+ {
+ return result;
+ }
+ inline const vector<Real> &getArg0()
+ {
+ return r;
+ }
+ typedef vector<Real> type0;
+ inline int &getArg1()
+ {
+ return l;
+ }
+ typedef int type1;
+ inline const GridMg &getArg2()
+ {
+ return mg;
+ }
+ typedef GridMg type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knResidualNormSumSqr ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, r, l, mg, result);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ knResidualNormSumSqr(knResidualNormSumSqr &o, tbb::split)
+ : KernelBase(o), r(o.r), l(o.l), mg(o.mg), result(Real(0))
+ {
+ }
+ void join(const knResidualNormSumSqr &o)
+ {
+ result += o.result;
+ }
+ const vector<Real> &r;
+ int l;
+ const GridMg &mg;
+ Real result;
+};
+;
+
+Real GridMg::calcResidualNorm(int l)
+{
+ Real res = knResidualNormSumSqr(mr[l], l, *this);
+
+ return std::sqrt(res);
+}
+
+// Standard conjugate gradients with Jacobi preconditioner
+// Notes: Always run at double precision. Not parallelized since
+// coarsest level is assumed to be small.
+void GridMg::solveCG(int l)
+{
+ auto applyAStencil = [this](int v, int l, const std::vector<double> &vec) -> double {
+ Vec3i V = vecIdx(v, l);
+
+ double sum = 0;
+
+ if (l == 0) {
+ int n;
+ for (int d = 0; d < mDim; d++) {
+ if (V[d] > 0) {
+ n = v - mPitch[0][d];
+ sum += mA[0][n * mStencilSize0 + d + 1] * vec[n];
+ }
+ if (V[d] < mSize[0][d] - 1) {
+ n = v + mPitch[0][d];
+ sum += mA[0][v * mStencilSize0 + d + 1] * vec[n];
+ }
+ }
+ sum += mA[0][v * mStencilSize0 + 0] * vec[v];
+ }
+ else {
+ FOR_VECLIN_MINMAX(S, s, mStencilMin, mStencilMax)
+ {
+ Vec3i N = V + S;
+ int n = linIdx(N, l);
+
+ if (inGrid(N, l) && mType[l][n] != vtInactive) {
+ if (s < mStencilSize) {
+ sum += mA[l][n * mStencilSize + mStencilSize - 1 - s] * vec[n];
+ }
+ else {
+ sum += mA[l][v * mStencilSize + s - mStencilSize + 1] * vec[n];
+ }
+ }
+ }
+ }
+
+ return sum;
+ };
+
+ std::vector<double> &z = mCGtmp1[l];
+ std::vector<double> &p = mCGtmp2[l];
+ std::vector<double> &x = mCGtmp3[l];
+ std::vector<double> &r = mCGtmp4[l];
+
+ // Initialization:
+ double alphaTop = 0;
+ double initialResidual = 0;
+
+ FOR_LVL(v, l)
+ {
+ x[v] = mx[l][v];
+ }
+
+ FOR_LVL(v, l)
+ {
+ if (mType[l][v] == vtInactive)
+ continue;
+
+ r[v] = mb[l][v] - applyAStencil(v, l, x);
+ if (l == 0) {
+ z[v] = r[v] / mA[0][v * mStencilSize0 + 0];
+ }
+ else {
+ z[v] = r[v] / mA[l][v * mStencilSize + 0];
+ }
+
+ initialResidual += r[v] * r[v];
+ p[v] = z[v];
+ alphaTop += r[v] * z[v];
+ }
+
+ initialResidual = std::sqrt(initialResidual);
+
+ int iter = 0;
+ const int maxIter = 10000;
+ double residual = -1;
+
+ // CG iterations
+ for (; iter < maxIter && initialResidual > 1E-12; iter++) {
+ double alphaBot = 0;
+
+ FOR_LVL(v, l)
+ {
+ if (mType[l][v] == vtInactive)
+ continue;
+
+ z[v] = applyAStencil(v, l, p);
+ alphaBot += p[v] * z[v];
+ }
+
+ double alpha = alphaTop / alphaBot;
+
+ double alphaTopNew = 0;
+ residual = 0;
+
+ FOR_LVL(v, l)
+ {
+ if (mType[l][v] == vtInactive)
+ continue;
+
+ x[v] += alpha * p[v];
+ r[v] -= alpha * z[v];
+ residual += r[v] * r[v];
+ if (l == 0)
+ z[v] = r[v] / mA[0][v * mStencilSize0 + 0];
+ else
+ z[v] = r[v] / mA[l][v * mStencilSize + 0];
+ alphaTopNew += r[v] * z[v];
+ }
+
+ residual = std::sqrt(residual);
+
+ if (residual / initialResidual < mCoarsestLevelAccuracy)
+ break;
+
+ double beta = alphaTopNew / alphaTop;
+ alphaTop = alphaTopNew;
+
+ FOR_LVL(v, l)
+ {
+ p[v] = z[v] + beta * p[v];
+ }
+ debMsg("GridMg::solveCG i=" << iter << " rel-residual=" << (residual / initialResidual), 5);
+ }
+
+ FOR_LVL(v, l)
+ {
+ mx[l][v] = Real(x[v]);
+ }
+
+ if (iter == maxIter) {
+ debMsg("GridMg::solveCG Warning: Reached maximum number of CG iterations", 1);
+ }
+ else {
+ debMsg("GridMg::solveCG Info: Reached residual " << residual << " in " << iter
+ << " iterations",
+ 2);
+ }
+}
+
+struct knRestrict : public KernelBase {
+ knRestrict(std::vector<Real> &dst, const std::vector<Real> &src, int l_dst, const GridMg &mg)
+ : KernelBase(dst.size()), dst(dst), src(src), l_dst(l_dst), mg(mg)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ std::vector<Real> &dst,
+ const std::vector<Real> &src,
+ int l_dst,
+ const GridMg &mg) const
+ {
+ if (mg.mType[l_dst][idx] == GridMg::vtInactive)
+ return;
+
+ const int l_src = l_dst - 1;
+
+ // Coarse grid vertex
+ Vec3i V = mg.vecIdx(int(idx), l_dst);
+
+ Real sum = Real(0);
+
+ FOR_VEC_MINMAX(R, vmax(0, V * 2 - 1), vmin(mg.mSize[l_src] - 1, V * 2 + 1))
+ {
+ int r = mg.linIdx(R, l_src);
+ if (mg.mType[l_src][r] == GridMg::vtInactive)
+ continue;
+
+ // restriction weight
+ Real rw = Real(1) / Real(1 << ((R.x % 2) + (R.y % 2) + (R.z % 2)));
+
+ sum += rw * src[r];
+ }
+
+ dst[idx] = sum;
+ }
+ inline std::vector<Real> &getArg0()
+ {
+ return dst;
+ }
+ typedef std::vector<Real> type0;
+ inline const std::vector<Real> &getArg1()
+ {
+ return src;
+ }
+ typedef std::vector<Real> type1;
+ inline int &getArg2()
+ {
+ return l_dst;
+ }
+ typedef int type2;
+ inline const GridMg &getArg3()
+ {
+ return mg;
+ }
+ typedef GridMg type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knRestrict ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, dst, src, l_dst, mg);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<Real> &dst;
+ const std::vector<Real> &src;
+ int l_dst;
+ const GridMg &mg;
+};
+
+void GridMg::restrict(int l_dst, const std::vector<Real> &src, std::vector<Real> &dst) const
+{
+ knRestrict(dst, src, l_dst, *this);
+}
+
+struct knInterpolate : public KernelBase {
+ knInterpolate(std::vector<Real> &dst, const std::vector<Real> &src, int l_dst, const GridMg &mg)
+ : KernelBase(dst.size()), dst(dst), src(src), l_dst(l_dst), mg(mg)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ std::vector<Real> &dst,
+ const std::vector<Real> &src,
+ int l_dst,
+ const GridMg &mg) const
+ {
+ if (mg.mType[l_dst][idx] == GridMg::vtInactive)
+ return;
+
+ const int l_src = l_dst + 1;
+
+ Vec3i V = mg.vecIdx(int(idx), l_dst);
+
+ Real sum = Real(0);
+
+ FOR_VEC_MINMAX(I, V / 2, (V + 1) / 2)
+ {
+ int i = mg.linIdx(I, l_src);
+ if (mg.mType[l_src][i] != GridMg::vtInactive)
+ sum += src[i];
+ }
+
+ // interpolation weight
+ Real iw = Real(1) / Real(1 << ((V.x % 2) + (V.y % 2) + (V.z % 2)));
+
+ dst[idx] = iw * sum;
+ }
+ inline std::vector<Real> &getArg0()
+ {
+ return dst;
+ }
+ typedef std::vector<Real> type0;
+ inline const std::vector<Real> &getArg1()
+ {
+ return src;
+ }
+ typedef std::vector<Real> type1;
+ inline int &getArg2()
+ {
+ return l_dst;
+ }
+ typedef int type2;
+ inline const GridMg &getArg3()
+ {
+ return mg;
+ }
+ typedef GridMg type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knInterpolate ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, dst, src, l_dst, mg);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<Real> &dst;
+ const std::vector<Real> &src;
+ int l_dst;
+ const GridMg &mg;
+};
+
+void GridMg::interpolate(int l_dst, const std::vector<Real> &src, std::vector<Real> &dst) const
+{
+ knInterpolate(dst, src, l_dst, *this);
+}
+
+}; // namespace Manta
diff --git a/extern/mantaflow/preprocessed/multigrid.h b/extern/mantaflow/preprocessed/multigrid.h
new file mode 100644
index 00000000000..12cc4d9abce
--- /dev/null
+++ b/extern/mantaflow/preprocessed/multigrid.h
@@ -0,0 +1,186 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Multigrid solver by Florian Ferstl (florian.ferstl.ff@gmail.com)
+ *
+ * This is an implementation of the solver developed by Dick et al. [1]
+ * without topology awareness (= vertex duplication on coarser levels). This
+ * simplification allows us to use regular grids for all levels of the multigrid
+ * hierarchy and works well for moderately complex domains.
+ *
+ * [1] Solving the Fluid Pressure Poisson Equation Using Multigrid-Evaluation
+ * and Improvements, C. Dick, M. Rogowsky, R. Westermann, IEEE TVCG 2015
+ *
+ ******************************************************************************/
+
+#ifndef _MULTIGRID_H
+#define _MULTIGRID_H
+
+#include "vectorbase.h"
+#include "grid.h"
+
+namespace Manta {
+
+//! Multigrid solver
+class GridMg {
+ public:
+ //! constructor: preallocates most of required memory for multigrid hierarchy
+ GridMg(const Vec3i &gridSize);
+ ~GridMg(){};
+
+ //! update system matrix A from symmetric 7-point stencil
+ void setA(const Grid<Real> *pA0,
+ const Grid<Real> *pAi,
+ const Grid<Real> *pAj,
+ const Grid<Real> *pAk);
+
+ //! set right-hand side after setting A
+ void setRhs(const Grid<Real> &rhs);
+
+ bool isASet() const
+ {
+ return mIsASet;
+ }
+ bool isRhsSet() const
+ {
+ return mIsRhsSet;
+ }
+
+ //! perform VCycle iteration
+ // - if src is null, then a zero vector is used instead
+ // - returns norm of residual after VCylcle
+ Real doVCycle(Grid<Real> &dst, const Grid<Real> *src = nullptr);
+
+ // access
+ void setCoarsestLevelAccuracy(Real accuracy)
+ {
+ mCoarsestLevelAccuracy = accuracy;
+ }
+ Real getCoarsestLevelAccuracy() const
+ {
+ return mCoarsestLevelAccuracy;
+ }
+ void setSmoothing(int numPreSmooth, int numPostSmooth)
+ {
+ mNumPreSmooth = numPreSmooth;
+ mNumPostSmooth = numPostSmooth;
+ }
+ int getNumPreSmooth() const
+ {
+ return mNumPreSmooth;
+ }
+ int getNumPostSmooth() const
+ {
+ return mNumPostSmooth;
+ }
+
+ //! Set factor for automated downscaling of trivial equations:
+ // 1*x_i = b_i ---> trivialEquationScale*x_i = trivialEquationScale*b_i
+ // Factor should be significantly smaller than the scale of the entries in A.
+ // Info: Trivial equations of the form x_i = b_i can have a negative
+ // effect on the coarse grid operators of the multigrid hierarchy (due
+ // to scaling mismatches), which can lead to slow multigrid convergence.
+ // To avoid this, the solver checks for such equations when updating A
+ // (and rhs) and scales these equations by a fixed factor < 1.
+ void setTrivialEquationScale(Real scale)
+ {
+ mTrivialEquationScale = scale;
+ }
+
+ private:
+ Vec3i vecIdx(int v, int l) const
+ {
+ return Vec3i(v % mSize[l].x,
+ (v % (mSize[l].x * mSize[l].y)) / mSize[l].x,
+ v / (mSize[l].x * mSize[l].y));
+ }
+ int linIdx(Vec3i V, int l) const
+ {
+ return V.x + V.y * mPitch[l].y + V.z * mPitch[l].z;
+ }
+ bool inGrid(Vec3i V, int l) const
+ {
+ return V.x >= 0 && V.y >= 0 && V.z >= 0 && V.x < mSize[l].x && V.y < mSize[l].y &&
+ V.z < mSize[l].z;
+ }
+
+ void analyzeStencil(int v, bool is3D, bool &isStencilSumNonZero, bool &isEquationTrivial) const;
+
+ void genCoarseGrid(int l);
+ void genCoraseGridOperator(int l);
+
+ void smoothGS(int l, bool reversedOrder);
+ void calcResidual(int l);
+ Real calcResidualNorm(int l);
+ void solveCG(int l);
+
+ void restrict(int l_dst, const std::vector<Real> &src, std::vector<Real> &dst) const;
+ void interpolate(int l_dst, const std::vector<Real> &src, std::vector<Real> &dst) const;
+
+ private:
+ enum VertexType : char {
+ vtInactive = 0,
+ vtActive = 1,
+ vtActiveTrivial = 2, // only on finest level 0
+ vtRemoved = 3, //-+
+ vtZero = 4, // +-- only during coarse grid generation
+ vtFree = 5 //-+
+ };
+
+ struct CoarseningPath {
+ Vec3i U, W, N;
+ int sc, sf;
+ Real rw, iw;
+ bool inUStencil;
+ };
+
+ int mNumPreSmooth;
+ int mNumPostSmooth;
+ Real mCoarsestLevelAccuracy;
+ Real mTrivialEquationScale;
+
+ std::vector<std::vector<Real>> mA;
+ std::vector<std::vector<Real>> mx;
+ std::vector<std::vector<Real>> mb;
+ std::vector<std::vector<Real>> mr;
+ std::vector<std::vector<VertexType>> mType;
+ std::vector<std::vector<double>> mCGtmp1, mCGtmp2, mCGtmp3, mCGtmp4;
+ std::vector<Vec3i> mSize, mPitch;
+ std::vector<CoarseningPath> mCoarseningPaths0;
+
+ bool mIs3D;
+ int mDim;
+ int mStencilSize;
+ int mStencilSize0;
+ Vec3i mStencilMin;
+ Vec3i mStencilMax;
+
+ bool mIsASet;
+ bool mIsRhsSet;
+
+ // provide kernels with access
+ friend struct knActivateVertices;
+ friend struct knActivateCoarseVertices;
+ friend struct knSetRhs;
+ friend struct knGenCoarseGridOperator;
+ friend struct knSmoothColor;
+ friend struct knCalcResidual;
+ friend struct knResidualNormSumSqr;
+ friend struct knRestrict;
+ friend struct knInterpolate;
+}; // GridMg
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/multigrid.h.reg.cpp b/extern/mantaflow/preprocessed/multigrid.h.reg.cpp
new file mode 100644
index 00000000000..8f91e6ecf2e
--- /dev/null
+++ b/extern/mantaflow/preprocessed/multigrid.h.reg.cpp
@@ -0,0 +1,13 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "multigrid.h"
+namespace Manta {
+extern "C" {
+void PbRegister_file_4()
+{
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/noisefield.cpp b/extern/mantaflow/preprocessed/noisefield.cpp
new file mode 100644
index 00000000000..98a92309b05
--- /dev/null
+++ b/extern/mantaflow/preprocessed/noisefield.cpp
@@ -0,0 +1,325 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Noise field
+ *
+ ******************************************************************************/
+
+#include "noisefield.h"
+#include "randomstream.h"
+#include "grid.h"
+
+using namespace std;
+
+//*****************************************************************************
+// Wavelet noise
+
+#if FLOATINGPOINT_PRECISION == 1
+# define TILENAME "waveletNoiseTile.bin"
+#else
+# define TILENAME "waveletNoiseTileD.bin"
+#endif
+
+namespace Manta {
+
+int WaveletNoiseField::randomSeed = 13322223;
+Real *WaveletNoiseField::mNoiseTile = NULL;
+std::atomic<int> WaveletNoiseField::mNoiseReferenceCount(0);
+
+static Real _aCoeffs[32] = {
+ 0.000334, -0.001528, 0.000410, 0.003545, -0.000938, -0.008233, 0.002172, 0.019120,
+ -0.005040, -0.044412, 0.011655, 0.103311, -0.025936, -0.243780, 0.033979, 0.655340,
+ 0.655340, 0.033979, -0.243780, -0.025936, 0.103311, 0.011655, -0.044412, -0.005040,
+ 0.019120, 0.002172, -0.008233, -0.000938, 0.003546, 0.000410, -0.001528, 0.000334};
+
+void WaveletNoiseField::downsample(Real *from, Real *to, int n, int stride)
+{
+ const Real *a = &_aCoeffs[16];
+ for (int i = 0; i < n / 2; i++) {
+ to[i * stride] = 0;
+ for (int k = 2 * i - 16; k < 2 * i + 16; k++) {
+ to[i * stride] += a[k - 2 * i] * from[modFast128(k) * stride];
+ }
+ }
+}
+
+static Real _pCoeffs[4] = {0.25, 0.75, 0.75, 0.25};
+
+void WaveletNoiseField::upsample(Real *from, Real *to, int n, int stride)
+{
+ const Real *pp = &_pCoeffs[1];
+
+ for (int i = 0; i < n; i++) {
+ to[i * stride] = 0;
+ for (int k = i / 2 - 1; k < i / 2 + 3; k++) {
+ to[i * stride] += 0.5 * pp[k - i / 2] * from[modSlow(k, n / 2) * stride];
+ } // new */
+ }
+}
+
+WaveletNoiseField::WaveletNoiseField(FluidSolver *parent, int fixedSeed, int loadFromFile)
+ : PbClass(parent),
+ mPosOffset(0.),
+ mPosScale(1.),
+ mValOffset(0.),
+ mValScale(1.),
+ mClamp(false),
+ mClampNeg(0),
+ mClampPos(1),
+ mTimeAnim(0),
+ mGsInvX(0),
+ mGsInvY(0),
+ mGsInvZ(0)
+{
+ Real scale = 1.0 / parent->getGridSize().max();
+ mGsInvX = scale;
+ mGsInvY = scale;
+ mGsInvZ = parent->is3D() ? scale : 1;
+
+ // use global random seed with offset if none is given
+ if (fixedSeed == -1) {
+ fixedSeed = randomSeed + 123;
+ }
+ RandomStream randStreamPos(fixedSeed);
+ mSeedOffset = Vec3(randStreamPos.getVec3Norm());
+
+ generateTile(loadFromFile);
+};
+
+string WaveletNoiseField::toString()
+{
+ std::ostringstream out;
+ out << "NoiseField: name '" << mName << "' "
+ << " pos off=" << mPosOffset << " scale=" << mPosScale << " val off=" << mValOffset
+ << " scale=" << mValScale << " clamp =" << mClamp << " val=" << mClampNeg << " to "
+ << mClampPos << " timeAni =" << mTimeAnim
+ << " gridInv =" << Vec3(mGsInvX, mGsInvY, mGsInvZ);
+ return out.str();
+}
+
+void WaveletNoiseField::generateTile(int loadFromFile)
+{
+ // generate tile
+ const int n = NOISE_TILE_SIZE;
+ const int n3 = n * n * n, n3d = n3 * 3;
+
+ if (mNoiseTile) {
+ mNoiseReferenceCount++;
+ return;
+ }
+ Real *noise3 = new Real[n3d];
+ if (loadFromFile) {
+ FILE *fp = fopen(TILENAME, "rb");
+ if (fp) {
+ assertMsg(fread(noise3, sizeof(Real), n3d, fp) == n3d,
+ "Failed to read wavelet noise tile, file invalid/corrupt? (" << TILENAME << ") ");
+ fclose(fp);
+ debMsg("Noise tile loaded from file " TILENAME, 1);
+ mNoiseTile = noise3;
+ mNoiseReferenceCount++;
+ return;
+ }
+ }
+
+ debMsg("Generating 3x " << n << "^3 noise tile ", 1);
+ Real *temp13 = new Real[n3d];
+ Real *temp23 = new Real[n3d];
+
+ // initialize
+ for (int i = 0; i < n3d; i++) {
+ temp13[i] = temp23[i] = noise3[i] = 0.;
+ }
+
+ // Step 1. Fill the tile with random numbers in the range -1 to 1.
+ RandomStream randStreamTile(randomSeed);
+ for (int i = 0; i < n3d; i++) {
+ // noise3[i] = (randStream.getReal() + randStream2.getReal()) -1.; // produces repeated
+ // values??
+ noise3[i] = randStreamTile.getRandNorm(0, 1);
+ }
+
+ // Steps 2 and 3. Downsample and upsample the tile
+ for (int tile = 0; tile < 3; tile++) {
+ for (int iy = 0; iy < n; iy++)
+ for (int iz = 0; iz < n; iz++) {
+ const int i = iy * n + iz * n * n + tile * n3;
+ downsample(&noise3[i], &temp13[i], n, 1);
+ upsample(&temp13[i], &temp23[i], n, 1);
+ }
+ for (int ix = 0; ix < n; ix++)
+ for (int iz = 0; iz < n; iz++) {
+ const int i = ix + iz * n * n + tile * n3;
+ downsample(&temp23[i], &temp13[i], n, n);
+ upsample(&temp13[i], &temp23[i], n, n);
+ }
+ for (int ix = 0; ix < n; ix++)
+ for (int iy = 0; iy < n; iy++) {
+ const int i = ix + iy * n + tile * n3;
+ downsample(&temp23[i], &temp13[i], n, n * n);
+ upsample(&temp13[i], &temp23[i], n, n * n);
+ }
+ }
+
+ // Step 4. Subtract out the coarse-scale contribution
+ for (int i = 0; i < n3d; i++) {
+ noise3[i] -= temp23[i];
+ }
+
+ // Avoid even/odd variance difference by adding odd-offset version of noise to itself.
+ int offset = n / 2;
+ if (offset % 2 == 0)
+ offset++;
+
+ if (n != 128)
+ errMsg("WaveletNoise::Fast 128 mod used, change for non-128 resolution");
+
+ int icnt = 0;
+ for (int tile = 0; tile < 3; tile++)
+ for (int ix = 0; ix < n; ix++)
+ for (int iy = 0; iy < n; iy++)
+ for (int iz = 0; iz < n; iz++) {
+ temp13[icnt] = noise3[modFast128(ix + offset) + modFast128(iy + offset) * n +
+ modFast128(iz + offset) * n * n + tile * n3];
+ icnt++;
+ }
+
+ for (int i = 0; i < n3d; i++) {
+ noise3[i] += temp13[i];
+ }
+
+ mNoiseTile = noise3;
+ mNoiseReferenceCount++;
+ delete[] temp13;
+ delete[] temp23;
+
+ if (loadFromFile) {
+ FILE *fp = fopen(TILENAME, "wb");
+ if (fp) {
+ fwrite(noise3, sizeof(Real), n3d, fp);
+ fclose(fp);
+ debMsg("Noise field saved to file ", 1);
+ }
+ }
+}
+
+void WaveletNoiseField::downsampleNeumann(const Real *from, Real *to, int n, int stride)
+{
+ // if these values are not local incorrect results are generated
+ static const Real *const aCoCenter = &_aCoeffs[16];
+ for (int i = 0; i < n / 2; i++) {
+ to[i * stride] = 0;
+ for (int k = 2 * i - 16; k < 2 * i + 16; k++) {
+ // handle boundary
+ Real fromval;
+ if (k < 0) {
+ fromval = from[0];
+ }
+ else if (k > n - 1) {
+ fromval = from[(n - 1) * stride];
+ }
+ else {
+ fromval = from[k * stride];
+ }
+ to[i * stride] += aCoCenter[k - 2 * i] * fromval;
+ }
+ }
+}
+
+void WaveletNoiseField::upsampleNeumann(const Real *from, Real *to, int n, int stride)
+{
+ static const Real *const pp = &_pCoeffs[1];
+ for (int i = 0; i < n; i++) {
+ to[i * stride] = 0;
+ for (int k = i / 2 - 1; k < i / 2 + 3; k++) {
+ Real fromval;
+ if (k > n / 2 - 1) {
+ fromval = from[(n / 2 - 1) * stride];
+ }
+ else if (k < 0) {
+ fromval = from[0];
+ }
+ else {
+ fromval = from[k * stride];
+ }
+ to[i * stride] += 0.5 * pp[k - i / 2] * fromval;
+ }
+ }
+}
+
+void WaveletNoiseField::computeCoefficients(Grid<Real> &input,
+ Grid<Real> &tempIn1,
+ Grid<Real> &tempIn2)
+{
+ // generate tile
+ const int sx = input.getSizeX();
+ const int sy = input.getSizeY();
+ const int sz = input.getSizeZ();
+ const int n3 = sx * sy * sz;
+ // just for compatibility with wavelet turb code
+ Real *temp13 = &tempIn1(0, 0, 0);
+ Real *temp23 = &tempIn2(0, 0, 0);
+ Real *noise3 = &input(0, 0, 0);
+
+ // clear grids
+ for (int i = 0; i < n3; i++) {
+ temp13[i] = temp23[i] = 0.f;
+ }
+
+ // Steps 2 and 3. Downsample and upsample the tile
+ for (int iz = 0; iz < sz; iz++)
+ for (int iy = 0; iy < sy; iy++) {
+ const int i = iz * sx * sy + iy * sx;
+ downsampleNeumann(&noise3[i], &temp13[i], sx, 1);
+ upsampleNeumann(&temp13[i], &temp23[i], sx, 1);
+ }
+
+ for (int iz = 0; iz < sz; iz++)
+ for (int ix = 0; ix < sx; ix++) {
+ const int i = iz * sx * sy + ix;
+ downsampleNeumann(&temp23[i], &temp13[i], sy, sx);
+ upsampleNeumann(&temp13[i], &temp23[i], sy, sx);
+ }
+
+ if (input.is3D()) {
+ for (int iy = 0; iy < sy; iy++)
+ for (int ix = 0; ix < sx; ix++) {
+ const int i = iy * sx + ix;
+ downsampleNeumann(&temp23[i], &temp13[i], sz, sy * sx);
+ upsampleNeumann(&temp13[i], &temp23[i], sz, sy * sx);
+ }
+ }
+
+ // Step 4. Subtract out the coarse-scale contribution
+ for (int i = 0; i < n3; i++) {
+ Real residual = noise3[i] - temp23[i];
+ temp13[i] = sqrtf(fabs(residual));
+ }
+
+ // copy back, and compute actual weight for wavelet turbulence...
+ Real smoothingFactor = 1. / 6.;
+ if (!input.is3D())
+ smoothingFactor = 1. / 4.;
+ FOR_IJK_BND(input, 1)
+ {
+ // apply some brute force smoothing
+ Real res = temp13[k * sx * sy + j * sx + i - 1] + temp13[k * sx * sy + j * sx + i + 1];
+ res += temp13[k * sx * sy + j * sx + i - sx] + temp13[k * sx * sy + j * sx + i + sx];
+ if (input.is3D())
+ res += temp13[k * sx * sy + j * sx + i - sx * sy] +
+ temp13[k * sx * sy + j * sx + i + sx * sy];
+ input(i, j, k) = res * smoothingFactor;
+ }
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/noisefield.h b/extern/mantaflow/preprocessed/noisefield.h
new file mode 100644
index 00000000000..ddf47573dd9
--- /dev/null
+++ b/extern/mantaflow/preprocessed/noisefield.h
@@ -0,0 +1,635 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Wavelet noise field
+ *
+ ******************************************************************************/
+
+#ifndef _NOISEFIELD_H_
+#define _NOISEFIELD_H_
+
+#include "vectorbase.h"
+#include "manta.h"
+#include "grid.h"
+#include <atomic>
+
+namespace Manta {
+
+#define NOISE_TILE_SIZE 128
+
+// wrapper for a parametrized field of wavelet noise
+
+class WaveletNoiseField : public PbClass {
+ public:
+ WaveletNoiseField(FluidSolver *parent, int fixedSeed = -1, int loadFromFile = false);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "WaveletNoiseField::WaveletNoiseField", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ int fixedSeed = _args.getOpt<int>("fixedSeed", 1, -1, &_lock);
+ int loadFromFile = _args.getOpt<int>("loadFromFile", 2, false, &_lock);
+ obj = new WaveletNoiseField(parent, fixedSeed, loadFromFile);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "WaveletNoiseField::WaveletNoiseField", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("WaveletNoiseField::WaveletNoiseField", e.what());
+ return -1;
+ }
+ }
+
+ ~WaveletNoiseField()
+ {
+ if (mNoiseTile && !mNoiseReferenceCount) {
+ delete mNoiseTile;
+ mNoiseTile = NULL;
+ }
+ };
+
+ //! evaluate noise
+ inline Real evaluate(Vec3 pos, int tile = 0) const;
+ //! evaluate noise as a vector
+ inline Vec3 evaluateVec(Vec3 pos, int tile = 0) const;
+ //! evaluate curl noise
+ inline Vec3 evaluateCurl(Vec3 pos) const;
+
+ //! direct data access
+ Real *data()
+ {
+ return mNoiseTile;
+ }
+
+ //! compute wavelet decomposition of an input grid (stores residual coefficients)
+ static void computeCoefficients(Grid<Real> &input, Grid<Real> &tempIn1, Grid<Real> &tempIn2);
+
+ // helper
+ std::string toString();
+
+ // texcoord position and scale
+ Vec3 mPosOffset;
+ static PyObject *_GET_mPosOffset(PyObject *self, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ return toPy(pbo->mPosOffset);
+ }
+ static int _SET_mPosOffset(PyObject *self, PyObject *val, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ pbo->mPosOffset = fromPy<Vec3>(val);
+ return 0;
+ }
+
+ Vec3 mPosScale;
+ static PyObject *_GET_mPosScale(PyObject *self, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ return toPy(pbo->mPosScale);
+ }
+ static int _SET_mPosScale(PyObject *self, PyObject *val, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ pbo->mPosScale = fromPy<Vec3>(val);
+ return 0;
+ }
+
+ // value offset & scale
+ Real mValOffset;
+ static PyObject *_GET_mValOffset(PyObject *self, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ return toPy(pbo->mValOffset);
+ }
+ static int _SET_mValOffset(PyObject *self, PyObject *val, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ pbo->mValOffset = fromPy<Real>(val);
+ return 0;
+ }
+
+ Real mValScale;
+ static PyObject *_GET_mValScale(PyObject *self, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ return toPy(pbo->mValScale);
+ }
+ static int _SET_mValScale(PyObject *self, PyObject *val, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ pbo->mValScale = fromPy<Real>(val);
+ return 0;
+ }
+
+ // clamp? (default 0-1)
+ bool mClamp;
+ static PyObject *_GET_mClamp(PyObject *self, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ return toPy(pbo->mClamp);
+ }
+ static int _SET_mClamp(PyObject *self, PyObject *val, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ pbo->mClamp = fromPy<bool>(val);
+ return 0;
+ }
+
+ Real mClampNeg;
+ static PyObject *_GET_mClampNeg(PyObject *self, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ return toPy(pbo->mClampNeg);
+ }
+ static int _SET_mClampNeg(PyObject *self, PyObject *val, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ pbo->mClampNeg = fromPy<Real>(val);
+ return 0;
+ }
+
+ Real mClampPos;
+ static PyObject *_GET_mClampPos(PyObject *self, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ return toPy(pbo->mClampPos);
+ }
+ static int _SET_mClampPos(PyObject *self, PyObject *val, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ pbo->mClampPos = fromPy<Real>(val);
+ return 0;
+ }
+
+ // animated over time
+ Real mTimeAnim;
+ static PyObject *_GET_mTimeAnim(PyObject *self, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ return toPy(pbo->mTimeAnim);
+ }
+ static int _SET_mTimeAnim(PyObject *self, PyObject *val, void *cl)
+ {
+ WaveletNoiseField *pbo = dynamic_cast<WaveletNoiseField *>(Pb::objFromPy(self));
+ pbo->mTimeAnim = fromPy<Real>(val);
+ return 0;
+ }
+
+ protected:
+ // noise evaluation functions
+ static inline Real WNoiseDx(const Vec3 &p, Real *data);
+ static inline Vec3 WNoiseVec(const Vec3 &p, Real *data);
+ static inline Real WNoise(const Vec3 &p, Real *data);
+
+ // helpers for tile generation , for periodic 128 grids only
+ static void downsample(Real *from, Real *to, int n, int stride);
+ static void upsample(Real *from, Real *to, int n, int stride);
+
+ // for grids with arbitrary sizes, and neumann boundary conditions
+ static void downsampleNeumann(const Real *from, Real *to, int n, int stride);
+ static void upsampleNeumann(const Real *from, Real *to, int n, int stride);
+
+ static inline int modSlow(int x, int n)
+ {
+ int m = x % n;
+ return (m < 0) ? m + n : m;
+ }
+// warning - noiseTileSize has to be 128^3!
+#define modFast128(x) ((x)&127)
+
+ inline Real getTime() const
+ {
+ return mParent->getTime() * mParent->getDx() * mTimeAnim;
+ }
+
+ // pre-compute tile data for wavelet noise
+ void generateTile(int loadFromFile);
+
+ // animation over time
+ // grid size normalization (inverse size)
+ Real mGsInvX, mGsInvY, mGsInvZ;
+ // random offset into tile to simulate different random seeds
+ Vec3 mSeedOffset;
+
+ static Real *mNoiseTile;
+ // global random seed storage
+ static int randomSeed;
+ // global reference count for noise tile
+ static std::atomic<int> mNoiseReferenceCount;
+ public:
+ PbArgs _args;
+}
+#define _C_WaveletNoiseField
+;
+
+// **************************************************************************
+// Implementation
+
+#define ADD_WEIGHTED(x, y, z) \
+ weight = 1.0f; \
+ xC = modFast128(midX + (x)); \
+ weight *= w[0][(x) + 1]; \
+ yC = modFast128(midY + (y)); \
+ weight *= w[1][(y) + 1]; \
+ zC = modFast128(midZ + (z)); \
+ weight *= w[2][(z) + 1]; \
+ result += weight * data[(zC * NOISE_TILE_SIZE + yC) * NOISE_TILE_SIZE + xC];
+
+//////////////////////////////////////////////////////////////////////////////////////////
+// derivatives of 3D noise - unrolled for performance
+//////////////////////////////////////////////////////////////////////////////////////////
+inline Real WaveletNoiseField::WNoiseDx(const Vec3 &p, Real *data)
+{
+ Real w[3][3], t, result = 0;
+
+ // Evaluate quadratic B-spline basis functions
+ int midX = (int)ceil(p[0] - 0.5f);
+ t = midX - (p[0] - 0.5f);
+ w[0][0] = -t;
+ w[0][2] = (1.f - t);
+ w[0][1] = 2.0f * t - 1.0f;
+
+ int midY = (int)ceil(p[1] - 0.5f);
+ t = midY - (p[1] - 0.5f);
+ w[1][0] = t * t * 0.5f;
+ w[1][2] = (1.f - t) * (1.f - t) * 0.5f;
+ w[1][1] = 1.f - w[1][0] - w[1][2];
+
+ int midZ = (int)ceil(p[2] - 0.5f);
+ t = midZ - (p[2] - 0.5f);
+ w[2][0] = t * t * 0.5f;
+ w[2][2] = (1.f - t) * (1.f - t) * 0.5f;
+ w[2][1] = 1.f - w[2][0] - w[2][2];
+
+ // Evaluate noise by weighting noise coefficients by basis function values
+ int xC, yC, zC;
+ Real weight = 1;
+
+ ADD_WEIGHTED(-1, -1, -1);
+ ADD_WEIGHTED(0, -1, -1);
+ ADD_WEIGHTED(1, -1, -1);
+ ADD_WEIGHTED(-1, 0, -1);
+ ADD_WEIGHTED(0, 0, -1);
+ ADD_WEIGHTED(1, 0, -1);
+ ADD_WEIGHTED(-1, 1, -1);
+ ADD_WEIGHTED(0, 1, -1);
+ ADD_WEIGHTED(1, 1, -1);
+
+ ADD_WEIGHTED(-1, -1, 0);
+ ADD_WEIGHTED(0, -1, 0);
+ ADD_WEIGHTED(1, -1, 0);
+ ADD_WEIGHTED(-1, 0, 0);
+ ADD_WEIGHTED(0, 0, 0);
+ ADD_WEIGHTED(1, 0, 0);
+ ADD_WEIGHTED(-1, 1, 0);
+ ADD_WEIGHTED(0, 1, 0);
+ ADD_WEIGHTED(1, 1, 0);
+
+ ADD_WEIGHTED(-1, -1, 1);
+ ADD_WEIGHTED(0, -1, 1);
+ ADD_WEIGHTED(1, -1, 1);
+ ADD_WEIGHTED(-1, 0, 1);
+ ADD_WEIGHTED(0, 0, 1);
+ ADD_WEIGHTED(1, 0, 1);
+ ADD_WEIGHTED(-1, 1, 1);
+ ADD_WEIGHTED(0, 1, 1);
+ ADD_WEIGHTED(1, 1, 1);
+
+ return result;
+}
+
+inline Real WaveletNoiseField::WNoise(const Vec3 &p, Real *data)
+{
+ Real w[3][3], t, result = 0;
+
+ // Evaluate quadratic B-spline basis functions
+ int midX = (int)ceilf(p[0] - 0.5f);
+ t = midX - (p[0] - 0.5f);
+ w[0][0] = t * t * 0.5f;
+ w[0][2] = (1.f - t) * (1.f - t) * 0.5f;
+ w[0][1] = 1.f - w[0][0] - w[0][2];
+
+ int midY = (int)ceilf(p[1] - 0.5f);
+ t = midY - (p[1] - 0.5f);
+ w[1][0] = t * t * 0.5f;
+ w[1][2] = (1.f - t) * (1.f - t) * 0.5f;
+ w[1][1] = 1.f - w[1][0] - w[1][2];
+
+ int midZ = (int)ceilf(p[2] - 0.5f);
+ t = midZ - (p[2] - 0.5f);
+ w[2][0] = t * t * 0.5f;
+ w[2][2] = (1.f - t) * (1.f - t) * 0.5f;
+ w[2][1] = 1.f - w[2][0] - w[2][2];
+
+ // Evaluate noise by weighting noise coefficients by basis function values
+ int xC, yC, zC;
+ Real weight = 1;
+
+ ADD_WEIGHTED(-1, -1, -1);
+ ADD_WEIGHTED(0, -1, -1);
+ ADD_WEIGHTED(1, -1, -1);
+ ADD_WEIGHTED(-1, 0, -1);
+ ADD_WEIGHTED(0, 0, -1);
+ ADD_WEIGHTED(1, 0, -1);
+ ADD_WEIGHTED(-1, 1, -1);
+ ADD_WEIGHTED(0, 1, -1);
+ ADD_WEIGHTED(1, 1, -1);
+
+ ADD_WEIGHTED(-1, -1, 0);
+ ADD_WEIGHTED(0, -1, 0);
+ ADD_WEIGHTED(1, -1, 0);
+ ADD_WEIGHTED(-1, 0, 0);
+ ADD_WEIGHTED(0, 0, 0);
+ ADD_WEIGHTED(1, 0, 0);
+ ADD_WEIGHTED(-1, 1, 0);
+ ADD_WEIGHTED(0, 1, 0);
+ ADD_WEIGHTED(1, 1, 0);
+
+ ADD_WEIGHTED(-1, -1, 1);
+ ADD_WEIGHTED(0, -1, 1);
+ ADD_WEIGHTED(1, -1, 1);
+ ADD_WEIGHTED(-1, 0, 1);
+ ADD_WEIGHTED(0, 0, 1);
+ ADD_WEIGHTED(1, 0, 1);
+ ADD_WEIGHTED(-1, 1, 1);
+ ADD_WEIGHTED(0, 1, 1);
+ ADD_WEIGHTED(1, 1, 1);
+
+ return result;
+}
+
+#define ADD_WEIGHTEDX(x, y, z) \
+ weight = dw[0][(x) + 1] * w[1][(y) + 1] * w[2][(z) + 1]; \
+ result += weight * neighbors[x + 1][y + 1][z + 1];
+
+#define ADD_WEIGHTEDY(x, y, z) \
+ weight = w[0][(x) + 1] * dw[1][(y) + 1] * w[2][(z) + 1]; \
+ result += weight * neighbors[x + 1][y + 1][z + 1];
+
+#define ADD_WEIGHTEDZ(x, y, z) \
+ weight = w[0][(x) + 1] * w[1][(y) + 1] * dw[2][(z) + 1]; \
+ result += weight * neighbors[x + 1][y + 1][z + 1];
+
+//////////////////////////////////////////////////////////////////////////////////////////
+// compute all derivatives in at once
+//////////////////////////////////////////////////////////////////////////////////////////
+inline Vec3 WaveletNoiseField::WNoiseVec(const Vec3 &p, Real *data)
+{
+ Vec3 final(0.);
+ Real w[3][3];
+ Real dw[3][3];
+ Real result = 0;
+ int xC, yC, zC;
+ Real weight;
+
+ int midX = (int)ceil(p[0] - 0.5f);
+ int midY = (int)ceil(p[1] - 0.5f);
+ int midZ = (int)ceil(p[2] - 0.5f);
+
+ Real t0 = midX - (p[0] - 0.5f);
+ Real t1 = midY - (p[1] - 0.5f);
+ Real t2 = midZ - (p[2] - 0.5f);
+
+ // precache all the neighbors for fast access
+ Real neighbors[3][3][3];
+ for (int z = -1; z <= 1; z++)
+ for (int y = -1; y <= 1; y++)
+ for (int x = -1; x <= 1; x++) {
+ xC = modFast128(midX + (x));
+ yC = modFast128(midY + (y));
+ zC = modFast128(midZ + (z));
+ neighbors[x + 1][y + 1][z + 1] =
+ data[zC * NOISE_TILE_SIZE * NOISE_TILE_SIZE + yC * NOISE_TILE_SIZE + xC];
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+ // evaluate splines
+ ///////////////////////////////////////////////////////////////////////////////////////
+ dw[0][0] = -t0;
+ dw[0][2] = (1.f - t0);
+ dw[0][1] = 2.0f * t0 - 1.0f;
+
+ dw[1][0] = -t1;
+ dw[1][2] = (1.0f - t1);
+ dw[1][1] = 2.0f * t1 - 1.0f;
+
+ dw[2][0] = -t2;
+ dw[2][2] = (1.0f - t2);
+ dw[2][1] = 2.0f * t2 - 1.0f;
+
+ w[0][0] = t0 * t0 * 0.5f;
+ w[0][2] = (1.f - t0) * (1.f - t0) * 0.5f;
+ w[0][1] = 1.f - w[0][0] - w[0][2];
+
+ w[1][0] = t1 * t1 * 0.5f;
+ w[1][2] = (1.f - t1) * (1.f - t1) * 0.5f;
+ w[1][1] = 1.f - w[1][0] - w[1][2];
+
+ w[2][0] = t2 * t2 * 0.5f;
+ w[2][2] = (1.f - t2) * (1.f - t2) * 0.5f;
+ w[2][1] = 1.f - w[2][0] - w[2][2];
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+ // x derivative
+ ///////////////////////////////////////////////////////////////////////////////////////
+ result = 0.0f;
+ ADD_WEIGHTEDX(-1, -1, -1);
+ ADD_WEIGHTEDX(0, -1, -1);
+ ADD_WEIGHTEDX(1, -1, -1);
+ ADD_WEIGHTEDX(-1, 0, -1);
+ ADD_WEIGHTEDX(0, 0, -1);
+ ADD_WEIGHTEDX(1, 0, -1);
+ ADD_WEIGHTEDX(-1, 1, -1);
+ ADD_WEIGHTEDX(0, 1, -1);
+ ADD_WEIGHTEDX(1, 1, -1);
+
+ ADD_WEIGHTEDX(-1, -1, 0);
+ ADD_WEIGHTEDX(0, -1, 0);
+ ADD_WEIGHTEDX(1, -1, 0);
+ ADD_WEIGHTEDX(-1, 0, 0);
+ ADD_WEIGHTEDX(0, 0, 0);
+ ADD_WEIGHTEDX(1, 0, 0);
+ ADD_WEIGHTEDX(-1, 1, 0);
+ ADD_WEIGHTEDX(0, 1, 0);
+ ADD_WEIGHTEDX(1, 1, 0);
+
+ ADD_WEIGHTEDX(-1, -1, 1);
+ ADD_WEIGHTEDX(0, -1, 1);
+ ADD_WEIGHTEDX(1, -1, 1);
+ ADD_WEIGHTEDX(-1, 0, 1);
+ ADD_WEIGHTEDX(0, 0, 1);
+ ADD_WEIGHTEDX(1, 0, 1);
+ ADD_WEIGHTEDX(-1, 1, 1);
+ ADD_WEIGHTEDX(0, 1, 1);
+ ADD_WEIGHTEDX(1, 1, 1);
+ final[0] = result;
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+ // y derivative
+ ///////////////////////////////////////////////////////////////////////////////////////
+ result = 0.0f;
+ ADD_WEIGHTEDY(-1, -1, -1);
+ ADD_WEIGHTEDY(0, -1, -1);
+ ADD_WEIGHTEDY(1, -1, -1);
+ ADD_WEIGHTEDY(-1, 0, -1);
+ ADD_WEIGHTEDY(0, 0, -1);
+ ADD_WEIGHTEDY(1, 0, -1);
+ ADD_WEIGHTEDY(-1, 1, -1);
+ ADD_WEIGHTEDY(0, 1, -1);
+ ADD_WEIGHTEDY(1, 1, -1);
+
+ ADD_WEIGHTEDY(-1, -1, 0);
+ ADD_WEIGHTEDY(0, -1, 0);
+ ADD_WEIGHTEDY(1, -1, 0);
+ ADD_WEIGHTEDY(-1, 0, 0);
+ ADD_WEIGHTEDY(0, 0, 0);
+ ADD_WEIGHTEDY(1, 0, 0);
+ ADD_WEIGHTEDY(-1, 1, 0);
+ ADD_WEIGHTEDY(0, 1, 0);
+ ADD_WEIGHTEDY(1, 1, 0);
+
+ ADD_WEIGHTEDY(-1, -1, 1);
+ ADD_WEIGHTEDY(0, -1, 1);
+ ADD_WEIGHTEDY(1, -1, 1);
+ ADD_WEIGHTEDY(-1, 0, 1);
+ ADD_WEIGHTEDY(0, 0, 1);
+ ADD_WEIGHTEDY(1, 0, 1);
+ ADD_WEIGHTEDY(-1, 1, 1);
+ ADD_WEIGHTEDY(0, 1, 1);
+ ADD_WEIGHTEDY(1, 1, 1);
+ final[1] = result;
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+ // z derivative
+ ///////////////////////////////////////////////////////////////////////////////////////
+ result = 0.0f;
+ ADD_WEIGHTEDZ(-1, -1, -1);
+ ADD_WEIGHTEDZ(0, -1, -1);
+ ADD_WEIGHTEDZ(1, -1, -1);
+ ADD_WEIGHTEDZ(-1, 0, -1);
+ ADD_WEIGHTEDZ(0, 0, -1);
+ ADD_WEIGHTEDZ(1, 0, -1);
+ ADD_WEIGHTEDZ(-1, 1, -1);
+ ADD_WEIGHTEDZ(0, 1, -1);
+ ADD_WEIGHTEDZ(1, 1, -1);
+
+ ADD_WEIGHTEDZ(-1, -1, 0);
+ ADD_WEIGHTEDZ(0, -1, 0);
+ ADD_WEIGHTEDZ(1, -1, 0);
+ ADD_WEIGHTEDZ(-1, 0, 0);
+ ADD_WEIGHTEDZ(0, 0, 0);
+ ADD_WEIGHTEDZ(1, 0, 0);
+ ADD_WEIGHTEDZ(-1, 1, 0);
+ ADD_WEIGHTEDZ(0, 1, 0);
+ ADD_WEIGHTEDZ(1, 1, 0);
+
+ ADD_WEIGHTEDZ(-1, -1, 1);
+ ADD_WEIGHTEDZ(0, -1, 1);
+ ADD_WEIGHTEDZ(1, -1, 1);
+ ADD_WEIGHTEDZ(-1, 0, 1);
+ ADD_WEIGHTEDZ(0, 0, 1);
+ ADD_WEIGHTEDZ(1, 0, 1);
+ ADD_WEIGHTEDZ(-1, 1, 1);
+ ADD_WEIGHTEDZ(0, 1, 1);
+ ADD_WEIGHTEDZ(1, 1, 1);
+ final[2] = result;
+
+ // debMsg("FINAL","at "<<p<<" = "<<final); // DEBUG
+ return final;
+}
+#undef ADD_WEIGHTEDX
+#undef ADD_WEIGHTEDY
+#undef ADD_WEIGHTEDZ
+
+inline Real WaveletNoiseField::evaluate(Vec3 pos, int tile) const
+{
+ pos[0] *= mGsInvX;
+ pos[1] *= mGsInvY;
+ pos[2] *= mGsInvZ;
+ pos += mSeedOffset;
+
+ // time anim
+ pos += Vec3(getTime());
+
+ pos[0] *= mPosScale[0];
+ pos[1] *= mPosScale[1];
+ pos[2] *= mPosScale[2];
+ pos += mPosOffset;
+
+ const int n3 = square(NOISE_TILE_SIZE) * NOISE_TILE_SIZE;
+ Real v = WNoise(pos, &mNoiseTile[tile * n3]);
+
+ v += mValOffset;
+ v *= mValScale;
+ if (mClamp) {
+ if (v < mClampNeg)
+ v = mClampNeg;
+ if (v > mClampPos)
+ v = mClampPos;
+ }
+ return v;
+}
+
+inline Vec3 WaveletNoiseField::evaluateVec(Vec3 pos, int tile) const
+{
+ pos[0] *= mGsInvX;
+ pos[1] *= mGsInvY;
+ pos[2] *= mGsInvZ;
+ pos += mSeedOffset;
+
+ // time anim
+ pos += Vec3(getTime());
+
+ pos[0] *= mPosScale[0];
+ pos[1] *= mPosScale[1];
+ pos[2] *= mPosScale[2];
+ pos += mPosOffset;
+
+ const int n3 = square(NOISE_TILE_SIZE) * NOISE_TILE_SIZE;
+ Vec3 v = WNoiseVec(pos, &mNoiseTile[tile * n3]);
+
+ v += Vec3(mValOffset);
+ v *= mValScale;
+
+ if (mClamp) {
+ for (int i = 0; i < 3; i++) {
+ if (v[i] < mClampNeg)
+ v[i] = mClampNeg;
+ if (v[i] > mClampPos)
+ v[i] = mClampPos;
+ }
+ }
+ return v;
+}
+
+inline Vec3 WaveletNoiseField::evaluateCurl(Vec3 pos) const
+{
+ // gradients of w0-w2
+ Vec3 d0 = evaluateVec(pos, 0), d1 = evaluateVec(pos, 1), d2 = evaluateVec(pos, 2);
+
+ return Vec3(d0.y - d1.z, d2.z - d0.x, d1.x - d2.y);
+}
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/noisefield.h.reg.cpp b/extern/mantaflow/preprocessed/noisefield.h.reg.cpp
new file mode 100644
index 00000000000..f4967abcb43
--- /dev/null
+++ b/extern/mantaflow/preprocessed/noisefield.h.reg.cpp
@@ -0,0 +1,60 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "noisefield.h"
+namespace Manta {
+#ifdef _C_WaveletNoiseField
+static const Pb::Register _R_13("WaveletNoiseField", "NoiseField", "PbClass");
+template<> const char *Namify<WaveletNoiseField>::S = "WaveletNoiseField";
+static const Pb::Register _R_14("WaveletNoiseField", "WaveletNoiseField", WaveletNoiseField::_W_0);
+static const Pb::Register _R_15("WaveletNoiseField",
+ "posOffset",
+ WaveletNoiseField::_GET_mPosOffset,
+ WaveletNoiseField::_SET_mPosOffset);
+static const Pb::Register _R_16("WaveletNoiseField",
+ "posScale",
+ WaveletNoiseField::_GET_mPosScale,
+ WaveletNoiseField::_SET_mPosScale);
+static const Pb::Register _R_17("WaveletNoiseField",
+ "valOffset",
+ WaveletNoiseField::_GET_mValOffset,
+ WaveletNoiseField::_SET_mValOffset);
+static const Pb::Register _R_18("WaveletNoiseField",
+ "valScale",
+ WaveletNoiseField::_GET_mValScale,
+ WaveletNoiseField::_SET_mValScale);
+static const Pb::Register _R_19("WaveletNoiseField",
+ "clamp",
+ WaveletNoiseField::_GET_mClamp,
+ WaveletNoiseField::_SET_mClamp);
+static const Pb::Register _R_20("WaveletNoiseField",
+ "clampNeg",
+ WaveletNoiseField::_GET_mClampNeg,
+ WaveletNoiseField::_SET_mClampNeg);
+static const Pb::Register _R_21("WaveletNoiseField",
+ "clampPos",
+ WaveletNoiseField::_GET_mClampPos,
+ WaveletNoiseField::_SET_mClampPos);
+static const Pb::Register _R_22("WaveletNoiseField",
+ "timeAnim",
+ WaveletNoiseField::_GET_mTimeAnim,
+ WaveletNoiseField::_SET_mTimeAnim);
+#endif
+extern "C" {
+void PbRegister_file_13()
+{
+ KEEP_UNUSED(_R_13);
+ KEEP_UNUSED(_R_14);
+ KEEP_UNUSED(_R_15);
+ KEEP_UNUSED(_R_16);
+ KEEP_UNUSED(_R_17);
+ KEEP_UNUSED(_R_18);
+ KEEP_UNUSED(_R_19);
+ KEEP_UNUSED(_R_20);
+ KEEP_UNUSED(_R_21);
+ KEEP_UNUSED(_R_22);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/particle.cpp b/extern/mantaflow/preprocessed/particle.cpp
new file mode 100644
index 00000000000..478f1417109
--- /dev/null
+++ b/extern/mantaflow/preprocessed/particle.cpp
@@ -0,0 +1,1620 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2013 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Particle data functionality
+ *
+ ******************************************************************************/
+
+#include <fstream>
+#include <cstring>
+#if NO_ZLIB != 1
+# include <zlib.h>
+#endif
+#include "particle.h"
+#include "levelset.h"
+#include "mantaio.h"
+
+using namespace std;
+namespace Manta {
+
+ParticleBase::ParticleBase(FluidSolver *parent)
+ : PbClass(parent), mAllowCompress(true), mFreePdata(false)
+{
+}
+
+ParticleBase::~ParticleBase()
+{
+ // make sure data fields now parent system is deleted
+ for (IndexInt i = 0; i < (IndexInt)mPartData.size(); ++i)
+ mPartData[i]->setParticleSys(NULL);
+
+ if (mFreePdata) {
+ for (IndexInt i = 0; i < (IndexInt)mPartData.size(); ++i)
+ delete mPartData[i];
+ }
+}
+
+std::string ParticleBase::infoString() const
+{
+ return "ParticleSystem " + mName + " <no info>";
+}
+
+void ParticleBase::cloneParticleData(ParticleBase *nm)
+{
+ // clone additional data , and make sure the copied particle system deletes it
+ nm->mFreePdata = true;
+ for (IndexInt i = 0; i < (IndexInt)mPartData.size(); ++i) {
+ ParticleDataBase *pdata = mPartData[i]->clone();
+ nm->registerPdata(pdata);
+ }
+}
+
+void ParticleBase::deregister(ParticleDataBase *pdata)
+{
+ bool done = false;
+ // remove pointer from particle data list
+ for (IndexInt i = 0; i < (IndexInt)mPartData.size(); ++i) {
+ if (mPartData[i] == pdata) {
+ if (i < (IndexInt)mPartData.size() - 1)
+ mPartData[i] = mPartData[mPartData.size() - 1];
+ mPartData.pop_back();
+ done = true;
+ }
+ }
+ if (!done)
+ errMsg("Invalid pointer given, not registered!");
+}
+
+// create and attach a new pdata field to this particle system
+PbClass *ParticleBase::create(PbType t, PbTypeVec T, const string &name)
+{
+#if NOPYTHON != 1
+ _args.add("nocheck", true);
+ if (t.str() == "")
+ errMsg("Specify particle data type to create");
+ // debMsg( "Pdata creating '"<< t.str <<" with size "<< this->getSizeSlow(), 5 );
+
+ PbClass *pyObj = PbClass::createPyObject(t.str() + T.str(), name, _args, this->getParent());
+
+ ParticleDataBase *pdata = dynamic_cast<ParticleDataBase *>(pyObj);
+ if (!pdata) {
+ errMsg(
+ "Unable to get particle data pointer from newly created object. Only create ParticleData "
+ "type with a ParticleSys.creat() call, eg, PdataReal, PdataVec3 etc.");
+ delete pyObj;
+ return NULL;
+ }
+ else {
+ this->registerPdata(pdata);
+ }
+
+ // directly init size of new pdata field:
+ pdata->resize(this->getSizeSlow());
+#else
+ PbClass *pyObj = NULL;
+#endif
+ return pyObj;
+}
+
+void ParticleBase::registerPdata(ParticleDataBase *pdata)
+{
+ pdata->setParticleSys(this);
+ mPartData.push_back(pdata);
+
+ if (pdata->getType() == ParticleDataBase::TypeReal) {
+ ParticleDataImpl<Real> *pd = dynamic_cast<ParticleDataImpl<Real> *>(pdata);
+ if (!pd)
+ errMsg("Invalid pdata object posing as real!");
+ this->registerPdataReal(pd);
+ }
+ else if (pdata->getType() == ParticleDataBase::TypeInt) {
+ ParticleDataImpl<int> *pd = dynamic_cast<ParticleDataImpl<int> *>(pdata);
+ if (!pd)
+ errMsg("Invalid pdata object posing as int!");
+ this->registerPdataInt(pd);
+ }
+ else if (pdata->getType() == ParticleDataBase::TypeVec3) {
+ ParticleDataImpl<Vec3> *pd = dynamic_cast<ParticleDataImpl<Vec3> *>(pdata);
+ if (!pd)
+ errMsg("Invalid pdata object posing as vec3!");
+ this->registerPdataVec3(pd);
+ }
+}
+void ParticleBase::registerPdataReal(ParticleDataImpl<Real> *pd)
+{
+ mPdataReal.push_back(pd);
+}
+void ParticleBase::registerPdataVec3(ParticleDataImpl<Vec3> *pd)
+{
+ mPdataVec3.push_back(pd);
+}
+void ParticleBase::registerPdataInt(ParticleDataImpl<int> *pd)
+{
+ mPdataInt.push_back(pd);
+}
+
+void ParticleBase::addAllPdata()
+{
+ for (IndexInt i = 0; i < (IndexInt)mPartData.size(); ++i) {
+ mPartData[i]->addEntry();
+ }
+}
+
+BasicParticleSystem::BasicParticleSystem(FluidSolver *parent)
+ : ParticleSystem<BasicParticleData>(parent)
+{
+ this->mAllowCompress = false;
+}
+
+// file io
+
+void BasicParticleSystem::writeParticlesText(const string name) const
+{
+ ofstream ofs(name.c_str());
+ if (!ofs.good())
+ errMsg("can't open file!");
+ ofs << this->size() << ", pdata: " << mPartData.size() << " (" << mPdataInt.size() << ","
+ << mPdataReal.size() << "," << mPdataVec3.size() << ") \n";
+ for (IndexInt i = 0; i < this->size(); ++i) {
+ ofs << i << ": " << this->getPos(i) << " , " << this->getStatus(i) << ". ";
+ for (IndexInt pd = 0; pd < (IndexInt)mPdataInt.size(); ++pd)
+ ofs << mPdataInt[pd]->get(i) << " ";
+ for (IndexInt pd = 0; pd < (IndexInt)mPdataReal.size(); ++pd)
+ ofs << mPdataReal[pd]->get(i) << " ";
+ for (IndexInt pd = 0; pd < (IndexInt)mPdataVec3.size(); ++pd)
+ ofs << mPdataVec3[pd]->get(i) << " ";
+ ofs << "\n";
+ }
+ ofs.close();
+}
+
+void BasicParticleSystem::writeParticlesRawPositionsGz(const string name) const
+{
+#if NO_ZLIB != 1
+ gzFile gzf = gzopen(name.c_str(), "wb1");
+ if (!gzf)
+ errMsg("can't open file " << name);
+ for (IndexInt i = 0; i < this->size(); ++i) {
+ Vector3D<float> p = toVec3f(this->getPos(i));
+ gzwrite(gzf, &p, sizeof(float) * 3);
+ }
+ gzclose(gzf);
+#else
+ cout << "file format not supported without zlib" << endl;
+#endif
+}
+
+void BasicParticleSystem::writeParticlesRawVelocityGz(const string name) const
+{
+#if NO_ZLIB != 1
+ gzFile gzf = gzopen(name.c_str(), "wb1");
+ if (!gzf)
+ errMsg("can't open file " << name);
+ if (mPdataVec3.size() < 1)
+ errMsg("no vec3 particle data channel found!");
+ // note , assuming particle data vec3 0 is velocity! make optional...
+ for (IndexInt i = 0; i < this->size(); ++i) {
+ Vector3D<float> p = toVec3f(mPdataVec3[0]->get(i));
+ gzwrite(gzf, &p, sizeof(float) * 3);
+ }
+ gzclose(gzf);
+#else
+ cout << "file format not supported without zlib" << endl;
+#endif
+}
+
+void BasicParticleSystem::load(const string name)
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".uni")
+ readParticlesUni(name, this);
+ else if (ext == ".raw") // raw = uni for now
+ readParticlesUni(name, this);
+ else
+ errMsg("particle '" + name + "' filetype not supported for loading");
+}
+
+void BasicParticleSystem::save(const string name) const
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".txt")
+ this->writeParticlesText(name);
+ else if (ext == ".uni")
+ writeParticlesUni(name, this);
+ else if (ext == ".raw") // raw = uni for now
+ writeParticlesUni(name, this);
+ // raw data formats, very basic for simple data transfer to other programs
+ else if (ext == ".posgz")
+ this->writeParticlesRawPositionsGz(name);
+ else if (ext == ".velgz")
+ this->writeParticlesRawVelocityGz(name);
+ else
+ errMsg("particle '" + name + "' filetype not supported for saving");
+}
+
+void BasicParticleSystem::printParts(IndexInt start, IndexInt stop, bool printIndex)
+{
+ std::ostringstream sstr;
+ IndexInt s = (start > 0 ? start : 0);
+ IndexInt e = (stop > 0 ? stop : (IndexInt)mData.size());
+ s = Manta::clamp(s, (IndexInt)0, (IndexInt)mData.size());
+ e = Manta::clamp(e, (IndexInt)0, (IndexInt)mData.size());
+
+ for (IndexInt i = s; i < e; ++i) {
+ if (printIndex)
+ sstr << i << ": ";
+ sstr << mData[i].pos << " " << mData[i].flag << "\n";
+ }
+ debMsg(sstr.str(), 1);
+}
+
+std::string BasicParticleSystem::getDataPointer()
+{
+ std::ostringstream out;
+ out << &mData;
+ return out.str();
+}
+
+void BasicParticleSystem::readParticles(BasicParticleSystem *from)
+{
+ // re-allocate all data
+ this->resizeAll(from->size());
+ assertMsg(from->size() == this->size(), "particle size doesn't match");
+
+ for (int i = 0; i < this->size(); ++i) {
+ (*this)[i].pos = (*from)[i].pos;
+ (*this)[i].flag = (*from)[i].flag;
+ }
+ this->transformPositions(from->getParent()->getGridSize(), this->getParent()->getGridSize());
+}
+
+// particle data
+
+ParticleDataBase::ParticleDataBase(FluidSolver *parent) : PbClass(parent), mpParticleSys(NULL)
+{
+}
+
+ParticleDataBase::~ParticleDataBase()
+{
+ // notify parent of deletion
+ if (mpParticleSys)
+ mpParticleSys->deregister(this);
+}
+
+// actual data implementation
+
+template<class T>
+ParticleDataImpl<T>::ParticleDataImpl(FluidSolver *parent)
+ : ParticleDataBase(parent), mpGridSource(NULL), mGridSourceMAC(false)
+{
+}
+
+template<class T>
+ParticleDataImpl<T>::ParticleDataImpl(FluidSolver *parent, ParticleDataImpl<T> *other)
+ : ParticleDataBase(parent), mpGridSource(NULL), mGridSourceMAC(false)
+{
+ this->mData = other->mData;
+ setName(other->getName());
+}
+
+template<class T> ParticleDataImpl<T>::~ParticleDataImpl()
+{
+}
+
+template<class T> IndexInt ParticleDataImpl<T>::getSizeSlow() const
+{
+ return mData.size();
+}
+template<class T> void ParticleDataImpl<T>::addEntry()
+{
+ // add zero'ed entry
+ T tmp = T(0.);
+ // for debugging, force init:
+ // tmp = T(0.02 * mData.size()); // increasing
+ // tmp = T(1.); // constant 1
+ return mData.push_back(tmp);
+}
+template<class T> void ParticleDataImpl<T>::resize(IndexInt s)
+{
+ mData.resize(s);
+}
+template<class T> void ParticleDataImpl<T>::copyValueSlow(IndexInt from, IndexInt to)
+{
+ this->copyValue(from, to);
+}
+template<class T> ParticleDataBase *ParticleDataImpl<T>::clone()
+{
+ ParticleDataImpl<T> *npd = new ParticleDataImpl<T>(getParent(), this);
+ return npd;
+}
+
+template<class T> void ParticleDataImpl<T>::setSource(Grid<T> *grid, bool isMAC)
+{
+ mpGridSource = grid;
+ mGridSourceMAC = isMAC;
+ if (isMAC)
+ assertMsg(dynamic_cast<MACGrid *>(grid) != NULL, "Given grid is not a valid MAC grid");
+}
+
+template<class T> void ParticleDataImpl<T>::initNewValue(IndexInt idx, Vec3 pos)
+{
+ if (!mpGridSource)
+ mData[idx] = 0;
+ else {
+ mData[idx] = mpGridSource->getInterpolated(pos);
+ }
+}
+// special handling needed for velocities
+template<> void ParticleDataImpl<Vec3>::initNewValue(IndexInt idx, Vec3 pos)
+{
+ if (!mpGridSource)
+ mData[idx] = 0;
+ else {
+ if (!mGridSourceMAC)
+ mData[idx] = mpGridSource->getInterpolated(pos);
+ else
+ mData[idx] = ((MACGrid *)mpGridSource)->getInterpolated(pos);
+ }
+}
+
+template<typename T> void ParticleDataImpl<T>::load(string name)
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".uni")
+ readPdataUni<T>(name, this);
+ else if (ext == ".raw") // raw = uni for now
+ readPdataUni<T>(name, this);
+ else
+ errMsg("particle data '" + name + "' filetype not supported for loading");
+}
+
+template<typename T> void ParticleDataImpl<T>::save(string name)
+{
+ if (name.find_last_of('.') == string::npos)
+ errMsg("file '" + name + "' does not have an extension");
+ string ext = name.substr(name.find_last_of('.'));
+ if (ext == ".uni")
+ writePdataUni<T>(name, this);
+ else if (ext == ".raw") // raw = uni for now
+ writePdataUni<T>(name, this);
+ else
+ errMsg("particle data '" + name + "' filetype not supported for saving");
+}
+
+// specializations
+
+template<> ParticleDataBase::PdataType ParticleDataImpl<Real>::getType() const
+{
+ return ParticleDataBase::TypeReal;
+}
+template<> ParticleDataBase::PdataType ParticleDataImpl<int>::getType() const
+{
+ return ParticleDataBase::TypeInt;
+}
+template<> ParticleDataBase::PdataType ParticleDataImpl<Vec3>::getType() const
+{
+ return ParticleDataBase::TypeVec3;
+}
+
+// note, we need a flag value for functions such as advection
+// ideally, this value should never be modified
+int ParticleIndexData::flag = 0;
+Vec3 ParticleIndexData::pos = Vec3(0., 0., 0.);
+
+template<class T, class S> struct knPdataAdd : public KernelBase {
+ knPdataAdd(ParticleDataImpl<T> &me, const ParticleDataImpl<S> &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<T> &me, const ParticleDataImpl<S> &other) const
+ {
+ me[idx] += other[idx];
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const ParticleDataImpl<S> &getArg1()
+ {
+ return other;
+ }
+ typedef ParticleDataImpl<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataAdd ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const ParticleDataImpl<S> &other;
+};
+template<class T, class S> struct knPdataSub : public KernelBase {
+ knPdataSub(ParticleDataImpl<T> &me, const ParticleDataImpl<S> &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<T> &me, const ParticleDataImpl<S> &other) const
+ {
+ me[idx] -= other[idx];
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const ParticleDataImpl<S> &getArg1()
+ {
+ return other;
+ }
+ typedef ParticleDataImpl<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataSub ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const ParticleDataImpl<S> &other;
+};
+template<class T, class S> struct knPdataMult : public KernelBase {
+ knPdataMult(ParticleDataImpl<T> &me, const ParticleDataImpl<S> &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<T> &me, const ParticleDataImpl<S> &other) const
+ {
+ me[idx] *= other[idx];
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const ParticleDataImpl<S> &getArg1()
+ {
+ return other;
+ }
+ typedef ParticleDataImpl<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataMult ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const ParticleDataImpl<S> &other;
+};
+template<class T, class S> struct knPdataDiv : public KernelBase {
+ knPdataDiv(ParticleDataImpl<T> &me, const ParticleDataImpl<S> &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<T> &me, const ParticleDataImpl<S> &other) const
+ {
+ me[idx] /= other[idx];
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const ParticleDataImpl<S> &getArg1()
+ {
+ return other;
+ }
+ typedef ParticleDataImpl<S> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataDiv ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const ParticleDataImpl<S> &other;
+};
+template<class T> struct knPdataSafeDiv : public KernelBase {
+ knPdataSafeDiv(ParticleDataImpl<T> &me, const ParticleDataImpl<T> &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<T> &me, const ParticleDataImpl<T> &other) const
+ {
+ me[idx] = safeDivide(me[idx], other[idx]);
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const ParticleDataImpl<T> &getArg1()
+ {
+ return other;
+ }
+ typedef ParticleDataImpl<T> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataSafeDiv ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const ParticleDataImpl<T> &other;
+};
+
+template<class T, class S> struct knPdataSetScalar : public KernelBase {
+ knPdataSetScalar(ParticleDataImpl<T> &me, const S &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<T> &me, const S &other) const
+ {
+ me[idx] = other;
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataSetScalar ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const S &other;
+};
+template<class T, class S> struct knPdataAddScalar : public KernelBase {
+ knPdataAddScalar(ParticleDataImpl<T> &me, const S &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<T> &me, const S &other) const
+ {
+ me[idx] += other;
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataAddScalar ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const S &other;
+};
+template<class T, class S> struct knPdataMultScalar : public KernelBase {
+ knPdataMultScalar(ParticleDataImpl<T> &me, const S &other)
+ : KernelBase(me.size()), me(me), other(other)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<T> &me, const S &other) const
+ {
+ me[idx] *= other;
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataMultScalar ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const S &other;
+};
+template<class T, class S> struct knPdataScaledAdd : public KernelBase {
+ knPdataScaledAdd(ParticleDataImpl<T> &me, const ParticleDataImpl<T> &other, const S &factor)
+ : KernelBase(me.size()), me(me), other(other), factor(factor)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ ParticleDataImpl<T> &me,
+ const ParticleDataImpl<T> &other,
+ const S &factor) const
+ {
+ me[idx] += factor * other[idx];
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const ParticleDataImpl<T> &getArg1()
+ {
+ return other;
+ }
+ typedef ParticleDataImpl<T> type1;
+ inline const S &getArg2()
+ {
+ return factor;
+ }
+ typedef S type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataScaledAdd ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other, factor);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const ParticleDataImpl<T> &other;
+ const S &factor;
+};
+
+template<class T> struct knPdataClamp : public KernelBase {
+ knPdataClamp(ParticleDataImpl<T> &me, const T vmin, const T vmax)
+ : KernelBase(me.size()), me(me), vmin(vmin), vmax(vmax)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<T> &me, const T vmin, const T vmax) const
+ {
+ me[idx] = clamp(me[idx], vmin, vmax);
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const T &getArg1()
+ {
+ return vmin;
+ }
+ typedef T type1;
+ inline const T &getArg2()
+ {
+ return vmax;
+ }
+ typedef T type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataClamp ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, vmin, vmax);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const T vmin;
+ const T vmax;
+};
+template<class T> struct knPdataClampMin : public KernelBase {
+ knPdataClampMin(ParticleDataImpl<T> &me, const T vmin)
+ : KernelBase(me.size()), me(me), vmin(vmin)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<T> &me, const T vmin) const
+ {
+ me[idx] = std::max(vmin, me[idx]);
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const T &getArg1()
+ {
+ return vmin;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataClampMin ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, vmin);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const T vmin;
+};
+template<class T> struct knPdataClampMax : public KernelBase {
+ knPdataClampMax(ParticleDataImpl<T> &me, const T vmax)
+ : KernelBase(me.size()), me(me), vmax(vmax)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<T> &me, const T vmax) const
+ {
+ me[idx] = std::min(vmax, me[idx]);
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const T &getArg1()
+ {
+ return vmax;
+ }
+ typedef T type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataClampMax ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, vmax);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const T vmax;
+};
+
+struct knPdataClampMinVec3 : public KernelBase {
+ knPdataClampMinVec3(ParticleDataImpl<Vec3> &me, const Real vmin)
+ : KernelBase(me.size()), me(me), vmin(vmin)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<Vec3> &me, const Real vmin) const
+ {
+ me[idx].x = std::max(vmin, me[idx].x);
+ me[idx].y = std::max(vmin, me[idx].y);
+ me[idx].z = std::max(vmin, me[idx].z);
+ }
+ inline ParticleDataImpl<Vec3> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<Vec3> type0;
+ inline const Real &getArg1()
+ {
+ return vmin;
+ }
+ typedef Real type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataClampMinVec3 ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, vmin);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<Vec3> &me;
+ const Real vmin;
+};
+
+struct knPdataClampMaxVec3 : public KernelBase {
+ knPdataClampMaxVec3(ParticleDataImpl<Vec3> &me, const Real vmax)
+ : KernelBase(me.size()), me(me), vmax(vmax)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleDataImpl<Vec3> &me, const Real vmax) const
+ {
+ me[idx].x = std::min(vmax, me[idx].x);
+ me[idx].y = std::min(vmax, me[idx].y);
+ me[idx].z = std::min(vmax, me[idx].z);
+ }
+ inline ParticleDataImpl<Vec3> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<Vec3> type0;
+ inline const Real &getArg1()
+ {
+ return vmax;
+ }
+ typedef Real type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataClampMaxVec3 ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, vmax);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<Vec3> &me;
+ const Real vmax;
+};
+
+// python operators
+
+template<typename T>
+ParticleDataImpl<T> &ParticleDataImpl<T>::copyFrom(const ParticleDataImpl<T> &a)
+{
+ assertMsg(a.mData.size() == mData.size(),
+ "different pdata size " << a.mData.size() << " vs " << this->mData.size());
+ mData = a.mData;
+ return *this;
+}
+
+template<typename T> void ParticleDataImpl<T>::setConst(const T &s)
+{
+ knPdataSetScalar<T, T> op(*this, s);
+}
+
+template<typename T>
+void ParticleDataImpl<T>::setConstRange(const T &s, const int begin, const int end)
+{
+ for (int i = begin; i < end; ++i)
+ (*this)[i] = s;
+}
+
+// special set by flag
+
+template<class T, class S> struct knPdataSetScalarIntFlag : public KernelBase {
+ knPdataSetScalarIntFlag(ParticleDataImpl<T> &me,
+ const S &other,
+ const ParticleDataImpl<int> &t,
+ const int itype)
+ : KernelBase(me.size()), me(me), other(other), t(t), itype(itype)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ ParticleDataImpl<T> &me,
+ const S &other,
+ const ParticleDataImpl<int> &t,
+ const int itype) const
+ {
+ if (t[idx] & itype)
+ me[idx] = other;
+ }
+ inline ParticleDataImpl<T> &getArg0()
+ {
+ return me;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const S &getArg1()
+ {
+ return other;
+ }
+ typedef S type1;
+ inline const ParticleDataImpl<int> &getArg2()
+ {
+ return t;
+ }
+ typedef ParticleDataImpl<int> type2;
+ inline const int &getArg3()
+ {
+ return itype;
+ }
+ typedef int type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPdataSetScalarIntFlag ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other, t, itype);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<T> &me;
+ const S &other;
+ const ParticleDataImpl<int> &t;
+ const int itype;
+};
+template<typename T>
+void ParticleDataImpl<T>::setConstIntFlag(const T &s,
+ const ParticleDataImpl<int> &t,
+ const int itype)
+{
+ knPdataSetScalarIntFlag<T, T> op(*this, s, t, itype);
+}
+
+template<typename T> void ParticleDataImpl<T>::add(const ParticleDataImpl<T> &a)
+{
+ knPdataAdd<T, T> op(*this, a);
+}
+template<typename T> void ParticleDataImpl<T>::sub(const ParticleDataImpl<T> &a)
+{
+ knPdataSub<T, T> op(*this, a);
+}
+
+template<typename T> void ParticleDataImpl<T>::addConst(const T &s)
+{
+ knPdataAddScalar<T, T> op(*this, s);
+}
+
+template<typename T>
+void ParticleDataImpl<T>::addScaled(const ParticleDataImpl<T> &a, const T &factor)
+{
+ knPdataScaledAdd<T, T> op(*this, a, factor);
+}
+
+template<typename T> void ParticleDataImpl<T>::mult(const ParticleDataImpl<T> &a)
+{
+ knPdataMult<T, T> op(*this, a);
+}
+
+template<typename T> void ParticleDataImpl<T>::safeDiv(const ParticleDataImpl<T> &a)
+{
+ knPdataSafeDiv<T> op(*this, a);
+}
+
+template<typename T> void ParticleDataImpl<T>::multConst(const T &s)
+{
+ knPdataMultScalar<T, T> op(*this, s);
+}
+
+template<typename T> void ParticleDataImpl<T>::clamp(const Real vmin, const Real vmax)
+{
+ knPdataClamp<T> op(*this, vmin, vmax);
+}
+
+template<typename T> void ParticleDataImpl<T>::clampMin(const Real vmin)
+{
+ knPdataClampMin<T> op(*this, vmin);
+}
+template<typename T> void ParticleDataImpl<T>::clampMax(const Real vmax)
+{
+ knPdataClampMax<T> op(*this, vmax);
+}
+
+template<> void ParticleDataImpl<Vec3>::clampMin(const Real vmin)
+{
+ knPdataClampMinVec3 op(*this, vmin);
+}
+template<> void ParticleDataImpl<Vec3>::clampMax(const Real vmax)
+{
+ knPdataClampMaxVec3 op(*this, vmax);
+}
+
+template<typename T> struct KnPtsSum : public KernelBase {
+ KnPtsSum(const ParticleDataImpl<T> &val, const ParticleDataImpl<int> *t, const int itype)
+ : KernelBase(val.size()), val(val), t(t), itype(itype), result(T(0.))
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const ParticleDataImpl<T> &val,
+ const ParticleDataImpl<int> *t,
+ const int itype,
+ T &result)
+ {
+ if (t && !((*t)[idx] & itype))
+ return;
+ result += val[idx];
+ }
+ inline operator T()
+ {
+ return result;
+ }
+ inline T &getRet()
+ {
+ return result;
+ }
+ inline const ParticleDataImpl<T> &getArg0()
+ {
+ return val;
+ }
+ typedef ParticleDataImpl<T> type0;
+ inline const ParticleDataImpl<int> *getArg1()
+ {
+ return t;
+ }
+ typedef ParticleDataImpl<int> type1;
+ inline const int &getArg2()
+ {
+ return itype;
+ }
+ typedef int type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnPtsSum ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, t, itype, result);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ KnPtsSum(KnPtsSum &o, tbb::split)
+ : KernelBase(o), val(o.val), t(o.t), itype(o.itype), result(T(0.))
+ {
+ }
+ void join(const KnPtsSum &o)
+ {
+ result += o.result;
+ }
+ const ParticleDataImpl<T> &val;
+ const ParticleDataImpl<int> *t;
+ const int itype;
+ T result;
+};
+template<typename T> struct KnPtsSumSquare : public KernelBase {
+ KnPtsSumSquare(const ParticleDataImpl<T> &val) : KernelBase(val.size()), val(val), result(0.)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const ParticleDataImpl<T> &val, Real &result)
+ {
+ result += normSquare(val[idx]);
+ }
+ inline operator Real()
+ {
+ return result;
+ }
+ inline Real &getRet()
+ {
+ return result;
+ }
+ inline const ParticleDataImpl<T> &getArg0()
+ {
+ return val;
+ }
+ typedef ParticleDataImpl<T> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnPtsSumSquare ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, result);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ KnPtsSumSquare(KnPtsSumSquare &o, tbb::split) : KernelBase(o), val(o.val), result(0.)
+ {
+ }
+ void join(const KnPtsSumSquare &o)
+ {
+ result += o.result;
+ }
+ const ParticleDataImpl<T> &val;
+ Real result;
+};
+template<typename T> struct KnPtsSumMagnitude : public KernelBase {
+ KnPtsSumMagnitude(const ParticleDataImpl<T> &val) : KernelBase(val.size()), val(val), result(0.)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const ParticleDataImpl<T> &val, Real &result)
+ {
+ result += norm(val[idx]);
+ }
+ inline operator Real()
+ {
+ return result;
+ }
+ inline Real &getRet()
+ {
+ return result;
+ }
+ inline const ParticleDataImpl<T> &getArg0()
+ {
+ return val;
+ }
+ typedef ParticleDataImpl<T> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnPtsSumMagnitude ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, result);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ KnPtsSumMagnitude(KnPtsSumMagnitude &o, tbb::split) : KernelBase(o), val(o.val), result(0.)
+ {
+ }
+ void join(const KnPtsSumMagnitude &o)
+ {
+ result += o.result;
+ }
+ const ParticleDataImpl<T> &val;
+ Real result;
+};
+
+template<typename T>
+T ParticleDataImpl<T>::sum(const ParticleDataImpl<int> *t, const int itype) const
+{
+ return KnPtsSum<T>(*this, t, itype);
+}
+template<typename T> Real ParticleDataImpl<T>::sumSquare() const
+{
+ return KnPtsSumSquare<T>(*this);
+}
+template<typename T> Real ParticleDataImpl<T>::sumMagnitude() const
+{
+ return KnPtsSumMagnitude<T>(*this);
+}
+
+template<typename T>
+
+struct CompPdata_Min : public KernelBase {
+ CompPdata_Min(const ParticleDataImpl<T> &val)
+ : KernelBase(val.size()), val(val), minVal(std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const ParticleDataImpl<T> &val, Real &minVal)
+ {
+ if (val[idx] < minVal)
+ minVal = val[idx];
+ }
+ inline operator Real()
+ {
+ return minVal;
+ }
+ inline Real &getRet()
+ {
+ return minVal;
+ }
+ inline const ParticleDataImpl<T> &getArg0()
+ {
+ return val;
+ }
+ typedef ParticleDataImpl<T> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompPdata_Min ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, minVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompPdata_Min(CompPdata_Min &o, tbb::split)
+ : KernelBase(o), val(o.val), minVal(std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const CompPdata_Min &o)
+ {
+ minVal = min(minVal, o.minVal);
+ }
+ const ParticleDataImpl<T> &val;
+ Real minVal;
+};
+
+template<typename T>
+
+struct CompPdata_Max : public KernelBase {
+ CompPdata_Max(const ParticleDataImpl<T> &val)
+ : KernelBase(val.size()), val(val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const ParticleDataImpl<T> &val, Real &maxVal)
+ {
+ if (val[idx] > maxVal)
+ maxVal = val[idx];
+ }
+ inline operator Real()
+ {
+ return maxVal;
+ }
+ inline Real &getRet()
+ {
+ return maxVal;
+ }
+ inline const ParticleDataImpl<T> &getArg0()
+ {
+ return val;
+ }
+ typedef ParticleDataImpl<T> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompPdata_Max ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, maxVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompPdata_Max(CompPdata_Max &o, tbb::split)
+ : KernelBase(o), val(o.val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const CompPdata_Max &o)
+ {
+ maxVal = max(maxVal, o.maxVal);
+ }
+ const ParticleDataImpl<T> &val;
+ Real maxVal;
+};
+
+template<typename T> Real ParticleDataImpl<T>::getMin() const
+{
+ return CompPdata_Min<T>(*this);
+}
+
+template<typename T> Real ParticleDataImpl<T>::getMaxAbs() const
+{
+ Real amin = CompPdata_Min<T>(*this);
+ Real amax = CompPdata_Max<T>(*this);
+ return max(fabs(amin), fabs(amax));
+}
+
+template<typename T> Real ParticleDataImpl<T>::getMax() const
+{
+ return CompPdata_Max<T>(*this);
+}
+
+template<typename T>
+void ParticleDataImpl<T>::printPdata(IndexInt start, IndexInt stop, bool printIndex)
+{
+ std::ostringstream sstr;
+ IndexInt s = (start > 0 ? start : 0);
+ IndexInt e = (stop > 0 ? stop : (IndexInt)mData.size());
+ s = Manta::clamp(s, (IndexInt)0, (IndexInt)mData.size());
+ e = Manta::clamp(e, (IndexInt)0, (IndexInt)mData.size());
+
+ for (IndexInt i = s; i < e; ++i) {
+ if (printIndex)
+ sstr << i << ": ";
+ sstr << mData[i] << " "
+ << "\n";
+ }
+ debMsg(sstr.str(), 1);
+}
+template<class T> std::string ParticleDataImpl<T>::getDataPointer()
+{
+ std::ostringstream out;
+ out << &mData;
+ return out.str();
+}
+
+// specials for vec3
+// work on length values, ie, always positive (in contrast to scalar versions above)
+
+struct CompPdata_MinVec3 : public KernelBase {
+ CompPdata_MinVec3(const ParticleDataImpl<Vec3> &val)
+ : KernelBase(val.size()), val(val), minVal(std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const ParticleDataImpl<Vec3> &val, Real &minVal)
+ {
+ const Real s = normSquare(val[idx]);
+ if (s < minVal)
+ minVal = s;
+ }
+ inline operator Real()
+ {
+ return minVal;
+ }
+ inline Real &getRet()
+ {
+ return minVal;
+ }
+ inline const ParticleDataImpl<Vec3> &getArg0()
+ {
+ return val;
+ }
+ typedef ParticleDataImpl<Vec3> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompPdata_MinVec3 ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, minVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompPdata_MinVec3(CompPdata_MinVec3 &o, tbb::split)
+ : KernelBase(o), val(o.val), minVal(std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const CompPdata_MinVec3 &o)
+ {
+ minVal = min(minVal, o.minVal);
+ }
+ const ParticleDataImpl<Vec3> &val;
+ Real minVal;
+};
+
+struct CompPdata_MaxVec3 : public KernelBase {
+ CompPdata_MaxVec3(const ParticleDataImpl<Vec3> &val)
+ : KernelBase(val.size()), val(val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const ParticleDataImpl<Vec3> &val, Real &maxVal)
+ {
+ const Real s = normSquare(val[idx]);
+ if (s > maxVal)
+ maxVal = s;
+ }
+ inline operator Real()
+ {
+ return maxVal;
+ }
+ inline Real &getRet()
+ {
+ return maxVal;
+ }
+ inline const ParticleDataImpl<Vec3> &getArg0()
+ {
+ return val;
+ }
+ typedef ParticleDataImpl<Vec3> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CompPdata_MaxVec3 ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, val, maxVal);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CompPdata_MaxVec3(CompPdata_MaxVec3 &o, tbb::split)
+ : KernelBase(o), val(o.val), maxVal(-std::numeric_limits<Real>::max())
+ {
+ }
+ void join(const CompPdata_MaxVec3 &o)
+ {
+ maxVal = max(maxVal, o.maxVal);
+ }
+ const ParticleDataImpl<Vec3> &val;
+ Real maxVal;
+};
+
+template<> Real ParticleDataImpl<Vec3>::getMin() const
+{
+ return sqrt(CompPdata_MinVec3(*this));
+}
+
+template<> Real ParticleDataImpl<Vec3>::getMaxAbs() const
+{
+ return sqrt(CompPdata_MaxVec3(*this)); // no minimum necessary here
+}
+
+template<> Real ParticleDataImpl<Vec3>::getMax() const
+{
+ return sqrt(CompPdata_MaxVec3(*this));
+}
+
+// explicit instantiation
+template class ParticleDataImpl<int>;
+template class ParticleDataImpl<Real>;
+template class ParticleDataImpl<Vec3>;
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/particle.h b/extern/mantaflow/preprocessed/particle.h
new file mode 100644
index 00000000000..2d41397a961
--- /dev/null
+++ b/extern/mantaflow/preprocessed/particle.h
@@ -0,0 +1,2582 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Base class for particle systems
+ *
+ ******************************************************************************/
+
+#ifndef _PARTICLE_H
+#define _PARTICLE_H
+
+#include <vector>
+#include "grid.h"
+#include "vectorbase.h"
+#include "integrator.h"
+#include "randomstream.h"
+namespace Manta {
+
+// fwd decl
+template<class T> class Grid;
+class ParticleDataBase;
+template<class T> class ParticleDataImpl;
+
+//! Baseclass for particle systems. Does not implement any data
+class ParticleBase : public PbClass {
+ public:
+ enum SystemType { BASE = 0, PARTICLE, VORTEX, FILAMENT, FLIP, TURBULENCE, INDEX };
+
+ enum ParticleStatus {
+ PNONE = 0,
+ PNEW = (1 << 0), // particles newly created in this step
+ PSPRAY = (1 << 1), // secondary particle types
+ PBUBBLE = (1 << 2),
+ PFOAM = (1 << 3),
+ PTRACER = (1 << 4),
+ PDELETE = (1 << 10), // mark as deleted, will be deleted in next compress() step
+ PINVALID = (1 << 30), // unused
+ };
+
+ ParticleBase(FluidSolver *parent);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "ParticleBase::ParticleBase", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new ParticleBase(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "ParticleBase::ParticleBase", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleBase::ParticleBase", e.what());
+ return -1;
+ }
+ }
+
+ virtual ~ParticleBase();
+
+ //! copy all the particle data thats registered with the other particle system to this one
+ virtual void cloneParticleData(ParticleBase *nm);
+
+ virtual SystemType getType() const
+ {
+ return BASE;
+ }
+ virtual std::string infoString() const;
+ virtual ParticleBase *clone()
+ {
+ assertMsg(false, "Dont use, override...");
+ return NULL;
+ }
+
+ //! slow virtual function to query size, do not use in kernels! use size() instead
+ virtual IndexInt getSizeSlow() const
+ {
+ assertMsg(false, "Dont use, override...");
+ return 0;
+ }
+
+ //! add a position as potential candidate for new particle (todo, make usable from parallel
+ //! threads)
+ inline void addBuffered(const Vec3 &pos, int flag = 0);
+
+ //! particle data functions
+
+ //! create a particle data object
+ PbClass *create(PbType type, PbTypeVec T = PbTypeVec(), const std::string &name = "");
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleBase *pbo = dynamic_cast<ParticleBase *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleBase::create", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ PbType type = _args.get<PbType>("type", 0, &_lock);
+ PbTypeVec T = _args.getOpt<PbTypeVec>("T", 1, PbTypeVec(), &_lock);
+ const std::string &name = _args.getOpt<std::string>("name", 2, "", &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->create(type, T, name));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleBase::create", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleBase::create", e.what());
+ return 0;
+ }
+ }
+
+ //! add a particle data field, set its parent particle-system pointer
+ void registerPdata(ParticleDataBase *pdata);
+ void registerPdataReal(ParticleDataImpl<Real> *pdata);
+ void registerPdataVec3(ParticleDataImpl<Vec3> *pdata);
+ void registerPdataInt(ParticleDataImpl<int> *pdata);
+ //! remove a particle data entry
+ void deregister(ParticleDataBase *pdata);
+ //! add one zero entry to all data fields
+ void addAllPdata();
+ // note - deletion of pdata is handled in compress function
+
+ //! how many are there?
+ IndexInt getNumPdata() const
+ {
+ return mPartData.size();
+ }
+ //! access one of the fields
+ ParticleDataBase *getPdata(int i)
+ {
+ return mPartData[i];
+ }
+
+ protected:
+ //! new particle candidates
+ std::vector<Vec3> mNewBufferPos;
+ std::vector<int> mNewBufferFlag;
+
+ //! allow automatic compression / resize? disallowed for, eg, flip particle systems
+ bool mAllowCompress;
+
+ //! store particle data , each pointer has its own storage vector of a certain type (int, real,
+ //! vec3)
+ std::vector<ParticleDataBase *> mPartData;
+ //! lists of different types, for fast operations w/o virtual function calls (all calls necessary
+ //! per particle)
+ std::vector<ParticleDataImpl<Real> *> mPdataReal;
+ std::vector<ParticleDataImpl<Vec3> *> mPdataVec3;
+ std::vector<ParticleDataImpl<int> *>
+ mPdataInt; //! indicate that pdata of this particle system is copied, and needs to be freed
+ bool mFreePdata;
+ public:
+ PbArgs _args;
+}
+#define _C_ParticleBase
+;
+
+//! Main class for particle systems
+/*! Basetype S must at least contain flag, pos fields */
+template<class S> class ParticleSystem : public ParticleBase {
+ public:
+ ParticleSystem(FluidSolver *parent) : ParticleBase(parent), mDeletes(0), mDeleteChunk(0)
+ {
+ }
+ static int _W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "ParticleSystem::ParticleSystem", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new ParticleSystem(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "ParticleSystem::ParticleSystem", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleSystem::ParticleSystem", e.what());
+ return -1;
+ }
+ }
+
+ virtual ~ParticleSystem(){};
+
+ virtual SystemType getType() const
+ {
+ return S::getType();
+ };
+
+ //! accessors
+ inline S &operator[](IndexInt idx)
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return mData[idx];
+ }
+ inline const S &operator[](IndexInt idx) const
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return mData[idx];
+ }
+ //! return size of container
+ //! note , python binding disabled for now! cannot yet deal with long-long types
+ inline IndexInt size() const
+ {
+ return mData.size();
+ }
+ //! slow virtual function of base class, also returns size
+ virtual IndexInt getSizeSlow() const
+ {
+ return size();
+ }
+ //! note , special call for python, note - doesnt support more than 2b parts!
+ int pySize() const
+ {
+ return (int)mData.size();
+ }
+ static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleSystem *pbo = dynamic_cast<ParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleSystem::pySize", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->pySize());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleSystem::pySize", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleSystem::pySize", e.what());
+ return 0;
+ }
+ }
+
+ //! query status
+ inline int getStatus(IndexInt idx) const
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return mData[idx].flag;
+ }
+ inline bool isActive(IndexInt idx) const
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return (mData[idx].flag & PDELETE) == 0;
+ }
+ inline bool isSpray(IndexInt idx) const
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return (mData[idx].flag & PSPRAY);
+ }
+ inline bool isBubble(IndexInt idx) const
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return (mData[idx].flag & PBUBBLE);
+ }
+ inline bool isFoam(IndexInt idx) const
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return (mData[idx].flag & PFOAM);
+ }
+ inline bool isTracer(IndexInt idx) const
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return (mData[idx].flag & PTRACER);
+ }
+
+ //! update status
+ inline void setStatus(IndexInt idx, const int status)
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ mData[idx].flag = status;
+ }
+
+ //! safe accessor for python
+ void setPos(const IndexInt idx, const Vec3 &pos)
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ mData[idx].pos = pos;
+ }
+ static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleSystem *pbo = dynamic_cast<ParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleSystem::setPos", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const IndexInt idx = _args.get<IndexInt>("idx", 0, &_lock);
+ const Vec3 &pos = _args.get<Vec3>("pos", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setPos(idx, pos);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleSystem::setPos", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleSystem::setPos", e.what());
+ return 0;
+ }
+ }
+
+ const Vec3 &getPos(const IndexInt idx) const
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return mData[idx].pos;
+ }
+ static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleSystem *pbo = dynamic_cast<ParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleSystem::getPos", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const IndexInt idx = _args.get<IndexInt>("idx", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getPos(idx));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleSystem::getPos", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleSystem::getPos", e.what());
+ return 0;
+ }
+ }
+
+ //! copy all positions into pdata vec3 field
+ void getPosPdata(ParticleDataImpl<Vec3> &target) const;
+ static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleSystem *pbo = dynamic_cast<ParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleSystem::getPosPdata", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ ParticleDataImpl<Vec3> &target = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "target", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->getPosPdata(target);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleSystem::getPosPdata", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleSystem::getPosPdata", e.what());
+ return 0;
+ }
+ }
+
+ void setPosPdata(const ParticleDataImpl<Vec3> &source);
+ static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleSystem *pbo = dynamic_cast<ParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleSystem::setPosPdata", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const ParticleDataImpl<Vec3> &source = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "source", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setPosPdata(source);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleSystem::setPosPdata", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleSystem::setPosPdata", e.what());
+ return 0;
+ }
+ }
+
+ //! transform coordinate system from one grid size to another (usually upon load)
+ void transformPositions(Vec3i dimOld, Vec3i dimNew);
+
+ //! explicitly trigger compression from outside
+ void doCompress()
+ {
+ if (mDeletes > mDeleteChunk)
+ compress();
+ }
+ //! insert buffered positions as new particles, update additional particle data
+ void insertBufferedParticles();
+ //! resize data vector, and all pdata fields
+ void resizeAll(IndexInt newsize);
+
+ //! adding and deleting
+ inline void kill(IndexInt idx);
+ IndexInt add(const S &data);
+ //! remove all particles, init 0 length arrays (also pdata)
+ void clear();
+ static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleSystem *pbo = dynamic_cast<ParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleSystem::clear", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clear();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleSystem::clear", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleSystem::clear", e.what());
+ return 0;
+ }
+ }
+
+ //! Advect particle in grid velocity field
+ void advectInGrid(const FlagGrid &flags,
+ const MACGrid &vel,
+ const int integrationMode,
+ const bool deleteInObstacle = true,
+ const bool stopInObstacle = true,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0);
+ static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleSystem *pbo = dynamic_cast<ParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleSystem::advectInGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const int integrationMode = _args.get<int>("integrationMode", 2, &_lock);
+ const bool deleteInObstacle = _args.getOpt<bool>("deleteInObstacle", 3, true, &_lock);
+ const bool stopInObstacle = _args.getOpt<bool>("stopInObstacle", 4, true, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 5, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 6, 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->advectInGrid(
+ flags, vel, integrationMode, deleteInObstacle, stopInObstacle, ptype, exclude);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleSystem::advectInGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleSystem::advectInGrid", e.what());
+ return 0;
+ }
+ }
+
+ //! Project particles outside obstacles
+ void projectOutside(Grid<Vec3> &gradient);
+ static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleSystem *pbo = dynamic_cast<ParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleSystem::projectOutside", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &gradient = *_args.getPtr<Grid<Vec3>>("gradient", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->projectOutside(gradient);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleSystem::projectOutside", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleSystem::projectOutside", e.what());
+ return 0;
+ }
+ }
+
+ void projectOutOfBnd(const FlagGrid &flags,
+ const Real bnd,
+ const std::string &plane = "xXyYzZ",
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0);
+ static PyObject *_W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleSystem *pbo = dynamic_cast<ParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleSystem::projectOutOfBnd", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ const Real bnd = _args.get<Real>("bnd", 1, &_lock);
+ const std::string &plane = _args.getOpt<std::string>("plane", 2, "xXyYzZ", &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 3, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 4, 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->projectOutOfBnd(flags, bnd, plane, ptype, exclude);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleSystem::projectOutOfBnd", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleSystem::projectOutOfBnd", e.what());
+ return 0;
+ }
+ }
+
+ virtual ParticleBase *clone();
+ virtual std::string infoString() const;
+
+ //! debugging
+ inline void checkPartIndex(IndexInt idx) const;
+
+ protected:
+ //! deletion count , and interval for re-compressing
+ IndexInt mDeletes, mDeleteChunk;
+ //! the particle data
+ std::vector<S> mData;
+ //! reduce storage , called by doCompress
+ virtual void compress();
+ public:
+ PbArgs _args;
+}
+#define _C_ParticleSystem
+;
+
+//******************************************************************************
+
+//! Simplest data class for particle systems
+//! contains a position and an int flag; note that these are deprectated, and will at
+//! some point be replaced by the more flexible pdata fields. For now manually copy with
+//! getPosPdata / setPosPdata.
+struct BasicParticleData {
+ public:
+ BasicParticleData() : pos(0.), flag(0)
+ {
+ }
+ BasicParticleData(const Vec3 &p) : pos(p), flag(0)
+ {
+ }
+ static ParticleBase::SystemType getType()
+ {
+ return ParticleBase::PARTICLE;
+ }
+
+ //! data (note, this size is currently hard coded for uni i/o)
+ Vec3 pos;
+ int flag;
+};
+
+class BasicParticleSystem : public ParticleSystem<BasicParticleData> {
+ public:
+ BasicParticleSystem(FluidSolver *parent);
+ static int _W_12(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "BasicParticleSystem::BasicParticleSystem", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new BasicParticleSystem(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "BasicParticleSystem::BasicParticleSystem", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("BasicParticleSystem::BasicParticleSystem", e.what());
+ return -1;
+ }
+ }
+
+ //! file io
+ void save(const std::string name) const;
+ static PyObject *_W_13(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ BasicParticleSystem *pbo = dynamic_cast<BasicParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "BasicParticleSystem::save", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const std::string name = _args.get<std::string>("name", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->save(name);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "BasicParticleSystem::save", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("BasicParticleSystem::save", e.what());
+ return 0;
+ }
+ }
+
+ void load(const std::string name);
+ static PyObject *_W_14(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ BasicParticleSystem *pbo = dynamic_cast<BasicParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "BasicParticleSystem::load", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const std::string name = _args.get<std::string>("name", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->load(name);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "BasicParticleSystem::load", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("BasicParticleSystem::load", e.what());
+ return 0;
+ }
+ }
+
+ //! save to text file
+ void writeParticlesText(const std::string name) const;
+ //! other output formats
+ void writeParticlesRawPositionsGz(const std::string name) const;
+ void writeParticlesRawVelocityGz(const std::string name) const;
+
+ //! read from other particle system (with resize)
+ void readParticles(BasicParticleSystem *from);
+ static PyObject *_W_15(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ BasicParticleSystem *pbo = dynamic_cast<BasicParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "BasicParticleSystem::readParticles", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ BasicParticleSystem *from = _args.getPtr<BasicParticleSystem>("from", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->readParticles(from);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "BasicParticleSystem::readParticles", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("BasicParticleSystem::readParticles", e.what());
+ return 0;
+ }
+ }
+
+ //! add particles in python
+ void addParticle(Vec3 pos)
+ {
+ add(BasicParticleData(pos));
+ }
+ static PyObject *_W_16(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ BasicParticleSystem *pbo = dynamic_cast<BasicParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "BasicParticleSystem::addParticle", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Vec3 pos = _args.get<Vec3>("pos", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->addParticle(pos);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "BasicParticleSystem::addParticle", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("BasicParticleSystem::addParticle", e.what());
+ return 0;
+ }
+ }
+
+ //! dangerous, get low level access - avoid usage, only used in vortex filament advection for now
+ std::vector<BasicParticleData> &getData()
+ {
+ return mData;
+ }
+
+ void printParts(IndexInt start = -1, IndexInt stop = -1, bool printIndex = false);
+ static PyObject *_W_17(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ BasicParticleSystem *pbo = dynamic_cast<BasicParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "BasicParticleSystem::printParts", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ IndexInt start = _args.getOpt<IndexInt>("start", 0, -1, &_lock);
+ IndexInt stop = _args.getOpt<IndexInt>("stop", 1, -1, &_lock);
+ bool printIndex = _args.getOpt<bool>("printIndex", 2, false, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->printParts(start, stop, printIndex);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "BasicParticleSystem::printParts", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("BasicParticleSystem::printParts", e.what());
+ return 0;
+ }
+ }
+
+ //! get data pointer of particle data
+ std::string getDataPointer();
+ static PyObject *_W_18(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ BasicParticleSystem *pbo = dynamic_cast<BasicParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "BasicParticleSystem::getDataPointer", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getDataPointer());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "BasicParticleSystem::getDataPointer", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("BasicParticleSystem::getDataPointer", e.what());
+ return 0;
+ }
+ }
+
+ public:
+ PbArgs _args;
+}
+#define _C_BasicParticleSystem
+;
+
+//******************************************************************************
+
+//! Index into other particle system
+// used for grid based neighborhood searches on generic particle systems (stores
+// only active particles, and reduces copied data)
+// note - pos & flag are disabled here, do not use!
+struct ParticleIndexData {
+ public:
+ ParticleIndexData() : sourceIndex(0)
+ {
+ }
+ static ParticleBase::SystemType getType()
+ {
+ return ParticleBase::INDEX;
+ }
+
+ IndexInt sourceIndex; // index of this particle in the original particle system
+ //! note - the following two are needed for template instantiation, but not used
+ //! for the particle index system (use values from original one!)
+ static Vec3 pos; // do not use...
+ static int flag; // not needed usally
+ // Vec3 pos; // enable for debugging
+};
+
+class ParticleIndexSystem : public ParticleSystem<ParticleIndexData> {
+ public:
+ ParticleIndexSystem(FluidSolver *parent) : ParticleSystem<ParticleIndexData>(parent)
+ {
+ }
+ static int _W_19(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "ParticleIndexSystem::ParticleIndexSystem", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new ParticleIndexSystem(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "ParticleIndexSystem::ParticleIndexSystem", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleIndexSystem::ParticleIndexSystem", e.what());
+ return -1;
+ }
+ };
+ //! we only need a resize function...
+ void resize(IndexInt size)
+ {
+ mData.resize(size);
+ }
+ public:
+ PbArgs _args;
+}
+#define _C_ParticleIndexSystem
+;
+
+//******************************************************************************
+
+//! Particle set with connectivity
+
+template<class DATA, class CON> class ConnectedParticleSystem : public ParticleSystem<DATA> {
+ public:
+ ConnectedParticleSystem(FluidSolver *parent) : ParticleSystem<DATA>(parent)
+ {
+ }
+ static int _W_20(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "ConnectedParticleSystem::ConnectedParticleSystem", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new ConnectedParticleSystem(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(
+ obj->getParent(), "ConnectedParticleSystem::ConnectedParticleSystem", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("ConnectedParticleSystem::ConnectedParticleSystem", e.what());
+ return -1;
+ }
+ }
+
+ //! accessors
+ inline bool isSegActive(int i)
+ {
+ return (mSegments[i].flag & ParticleBase::PDELETE) == 0;
+ }
+ inline int segSize() const
+ {
+ return mSegments.size();
+ }
+ inline CON &seg(int i)
+ {
+ return mSegments[i];
+ }
+ inline const CON &seg(int i) const
+ {
+ return mSegments[i];
+ }
+
+ virtual ParticleBase *clone();
+
+ protected:
+ std::vector<CON> mSegments;
+ virtual void compress();
+ public:
+ PbArgs _args;
+}
+#define _C_ConnectedParticleSystem
+;
+
+//******************************************************************************
+
+//! abstract interface for particle data
+class ParticleDataBase : public PbClass {
+ public:
+ ParticleDataBase(FluidSolver *parent);
+ static int _W_21(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "ParticleDataBase::ParticleDataBase", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new ParticleDataBase(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "ParticleDataBase::ParticleDataBase", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataBase::ParticleDataBase", e.what());
+ return -1;
+ }
+ }
+
+ virtual ~ParticleDataBase();
+
+ //! data type IDs, in line with those for grids
+ enum PdataType { TypeNone = 0, TypeReal = 1, TypeInt = 2, TypeVec3 = 4 };
+
+ //! interface functions, using assert instead of pure virtual for python compatibility
+ virtual IndexInt getSizeSlow() const
+ {
+ assertMsg(false, "Dont use, override...");
+ return 0;
+ }
+ virtual void addEntry()
+ {
+ assertMsg(false, "Dont use, override...");
+ return;
+ }
+ virtual ParticleDataBase *clone()
+ {
+ assertMsg(false, "Dont use, override...");
+ return NULL;
+ }
+ virtual PdataType getType() const
+ {
+ assertMsg(false, "Dont use, override...");
+ return TypeNone;
+ }
+ virtual void resize(IndexInt size)
+ {
+ assertMsg(false, "Dont use, override...");
+ return;
+ }
+ virtual void copyValueSlow(IndexInt from, IndexInt to)
+ {
+ assertMsg(false, "Dont use, override...");
+ return;
+ }
+
+ //! set base pointer
+ void setParticleSys(ParticleBase *set)
+ {
+ mpParticleSys = set;
+ }
+
+ //! debugging
+ inline void checkPartIndex(IndexInt idx) const;
+
+ protected:
+ ParticleBase *mpParticleSys;
+ public:
+ PbArgs _args;
+}
+#define _C_ParticleDataBase
+;
+
+//! abstract interface for particle data
+
+template<class T> class ParticleDataImpl : public ParticleDataBase {
+ public:
+ ParticleDataImpl(FluidSolver *parent);
+ static int _W_22(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "ParticleDataImpl::ParticleDataImpl", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new ParticleDataImpl(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "ParticleDataImpl::ParticleDataImpl", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::ParticleDataImpl", e.what());
+ return -1;
+ }
+ }
+
+ ParticleDataImpl(FluidSolver *parent, ParticleDataImpl<T> *other);
+ virtual ~ParticleDataImpl();
+
+ //! access data
+ inline T &get(const IndexInt idx)
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return mData[idx];
+ }
+ inline const T &get(const IndexInt idx) const
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return mData[idx];
+ }
+ inline T &operator[](const IndexInt idx)
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return mData[idx];
+ }
+ inline const T &operator[](const IndexInt idx) const
+ {
+ DEBUG_ONLY(checkPartIndex(idx));
+ return mData[idx];
+ }
+
+ //! set all values to 0, note - different from particleSystem::clear! doesnt modify size of array
+ //! (has to stay in sync with parent system)
+ void clear();
+ static PyObject *_W_23(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::clear", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clear();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::clear", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::clear", e.what());
+ return 0;
+ }
+ }
+
+ //! set grid from which to get data...
+ void setSource(Grid<T> *grid, bool isMAC = false);
+ static PyObject *_W_24(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::setSource", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<T> *grid = _args.getPtr<Grid<T>>("grid", 0, &_lock);
+ bool isMAC = _args.getOpt<bool>("isMAC", 1, false, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setSource(grid, isMAC);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::setSource", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::setSource", e.what());
+ return 0;
+ }
+ }
+
+ //! particle data base interface
+ virtual IndexInt getSizeSlow() const;
+ virtual void addEntry();
+ virtual ParticleDataBase *clone();
+ virtual PdataType getType() const;
+ virtual void resize(IndexInt s);
+ virtual void copyValueSlow(IndexInt from, IndexInt to);
+
+ IndexInt size() const
+ {
+ return mData.size();
+ }
+
+ //! fast inlined functions for per particle operations
+ inline void copyValue(IndexInt from, IndexInt to)
+ {
+ get(to) = get(from);
+ }
+ void initNewValue(IndexInt idx, Vec3 pos);
+
+ //! python interface (similar to grid data)
+ ParticleDataImpl<T> &copyFrom(const ParticleDataImpl<T> &a);
+ static PyObject *_W_25(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::copyFrom", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const ParticleDataImpl<T> &a = *_args.getPtr<ParticleDataImpl<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->copyFrom(a));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::copyFrom", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::copyFrom", e.what());
+ return 0;
+ }
+ }
+
+ void setConst(const T &s);
+ static PyObject *_W_26(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::setConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const T &s = *_args.getPtr<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::setConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::setConst", e.what());
+ return 0;
+ }
+ }
+
+ void setConstRange(const T &s, const int begin, const int end);
+ static PyObject *_W_27(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::setConstRange", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const T &s = *_args.getPtr<T>("s", 0, &_lock);
+ const int begin = _args.get<int>("begin", 1, &_lock);
+ const int end = _args.get<int>("end", 2, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setConstRange(s, begin, end);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::setConstRange", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::setConstRange", e.what());
+ return 0;
+ }
+ }
+
+ void add(const ParticleDataImpl<T> &a);
+ static PyObject *_W_28(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::add", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const ParticleDataImpl<T> &a = *_args.getPtr<ParticleDataImpl<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->add(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::add", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::add", e.what());
+ return 0;
+ }
+ }
+
+ void sub(const ParticleDataImpl<T> &a);
+ static PyObject *_W_29(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::sub", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const ParticleDataImpl<T> &a = *_args.getPtr<ParticleDataImpl<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->sub(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::sub", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::sub", e.what());
+ return 0;
+ }
+ }
+
+ void addConst(const T &s);
+ static PyObject *_W_30(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::addConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const T &s = *_args.getPtr<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->addConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::addConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::addConst", e.what());
+ return 0;
+ }
+ }
+
+ void addScaled(const ParticleDataImpl<T> &a, const T &factor);
+ static PyObject *_W_31(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::addScaled", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const ParticleDataImpl<T> &a = *_args.getPtr<ParticleDataImpl<T>>("a", 0, &_lock);
+ const T &factor = *_args.getPtr<T>("factor", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->addScaled(a, factor);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::addScaled", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::addScaled", e.what());
+ return 0;
+ }
+ }
+
+ void mult(const ParticleDataImpl<T> &a);
+ static PyObject *_W_32(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::mult", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const ParticleDataImpl<T> &a = *_args.getPtr<ParticleDataImpl<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->mult(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::mult", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::mult", e.what());
+ return 0;
+ }
+ }
+
+ void multConst(const T &s);
+ static PyObject *_W_33(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::multConst", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const T &s = *_args.getPtr<T>("s", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->multConst(s);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::multConst", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::multConst", e.what());
+ return 0;
+ }
+ }
+
+ void safeDiv(const ParticleDataImpl<T> &a);
+ static PyObject *_W_34(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::safeDiv", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const ParticleDataImpl<T> &a = *_args.getPtr<ParticleDataImpl<T>>("a", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->safeDiv(a);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::safeDiv", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::safeDiv", e.what());
+ return 0;
+ }
+ }
+
+ void clamp(const Real vmin, const Real vmax);
+ static PyObject *_W_35(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::clamp", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Real vmin = _args.get<Real>("vmin", 0, &_lock);
+ const Real vmax = _args.get<Real>("vmax", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clamp(vmin, vmax);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::clamp", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::clamp", e.what());
+ return 0;
+ }
+ }
+
+ void clampMin(const Real vmin);
+ static PyObject *_W_36(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::clampMin", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Real vmin = _args.get<Real>("vmin", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clampMin(vmin);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::clampMin", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::clampMin", e.what());
+ return 0;
+ }
+ }
+
+ void clampMax(const Real vmax);
+ static PyObject *_W_37(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::clampMax", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Real vmax = _args.get<Real>("vmax", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->clampMax(vmax);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::clampMax", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::clampMax", e.what());
+ return 0;
+ }
+ }
+
+ Real getMaxAbs() const;
+ static PyObject *_W_38(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::getMaxAbs", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMaxAbs());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::getMaxAbs", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::getMaxAbs", e.what());
+ return 0;
+ }
+ }
+
+ Real getMax() const;
+ static PyObject *_W_39(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::getMax", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMax());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::getMax", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::getMax", e.what());
+ return 0;
+ }
+ }
+
+ Real getMin() const;
+ static PyObject *_W_40(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::getMin", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getMin());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::getMin", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::getMin", e.what());
+ return 0;
+ }
+ }
+
+ T sum(const ParticleDataImpl<int> *t = NULL, const int itype = 0) const;
+ static PyObject *_W_41(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::sum", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const ParticleDataImpl<int> *t = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "t", 0, NULL, &_lock);
+ const int itype = _args.getOpt<int>("itype", 1, 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->sum(t, itype));
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::sum", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::sum", e.what());
+ return 0;
+ }
+ }
+
+ Real sumSquare() const;
+ static PyObject *_W_42(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::sumSquare", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->sumSquare());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::sumSquare", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::sumSquare", e.what());
+ return 0;
+ }
+ }
+
+ Real sumMagnitude() const;
+ static PyObject *_W_43(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::sumMagnitude", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->sumMagnitude());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::sumMagnitude", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::sumMagnitude", e.what());
+ return 0;
+ }
+ }
+
+ //! special, set if int flag in t has "flag"
+ void setConstIntFlag(const T &s, const ParticleDataImpl<int> &t, const int flag);
+ static PyObject *_W_44(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::setConstIntFlag", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const T &s = *_args.getPtr<T>("s", 0, &_lock);
+ const ParticleDataImpl<int> &t = *_args.getPtr<ParticleDataImpl<int>>("t", 1, &_lock);
+ const int flag = _args.get<int>("flag", 2, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setConstIntFlag(s, t, flag);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::setConstIntFlag", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::setConstIntFlag", e.what());
+ return 0;
+ }
+ }
+
+ void printPdata(IndexInt start = -1, IndexInt stop = -1, bool printIndex = false);
+ static PyObject *_W_45(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::printPdata", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ IndexInt start = _args.getOpt<IndexInt>("start", 0, -1, &_lock);
+ IndexInt stop = _args.getOpt<IndexInt>("stop", 1, -1, &_lock);
+ bool printIndex = _args.getOpt<bool>("printIndex", 2, false, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->printPdata(start, stop, printIndex);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::printPdata", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::printPdata", e.what());
+ return 0;
+ }
+ }
+
+ //! file io
+ void save(const std::string name);
+ static PyObject *_W_46(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::save", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const std::string name = _args.get<std::string>("name", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->save(name);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::save", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::save", e.what());
+ return 0;
+ }
+ }
+
+ void load(const std::string name);
+ static PyObject *_W_47(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::load", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const std::string name = _args.get<std::string>("name", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->load(name);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::load", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::load", e.what());
+ return 0;
+ }
+ }
+
+ //! get data pointer of particle data
+ std::string getDataPointer();
+ static PyObject *_W_48(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ ParticleDataImpl *pbo = dynamic_cast<ParticleDataImpl *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "ParticleDataImpl::getDataPointer", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getDataPointer());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "ParticleDataImpl::getDataPointer", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("ParticleDataImpl::getDataPointer", e.what());
+ return 0;
+ }
+ }
+
+ protected:
+ //! data storage
+ std::vector<T> mData;
+
+ //! optionally , we might have an associated grid from which to grab new data
+ Grid<T> *mpGridSource; //! unfortunately , we need to distinguish mac vs regular vec3
+ bool mGridSourceMAC;
+ public:
+ PbArgs _args;
+}
+#define _C_ParticleDataImpl
+;
+
+//******************************************************************************
+// Implementation
+//******************************************************************************
+
+const int DELETE_PART = 20; // chunk size for compression
+
+void ParticleBase::addBuffered(const Vec3 &pos, int flag)
+{
+ mNewBufferPos.push_back(pos);
+ mNewBufferFlag.push_back(flag);
+}
+
+template<class S> void ParticleSystem<S>::clear()
+{
+ mDeleteChunk = mDeletes = 0;
+ this->resizeAll(0); // instead of mData.clear
+}
+
+template<class S> IndexInt ParticleSystem<S>::add(const S &data)
+{
+ mData.push_back(data);
+ mDeleteChunk = mData.size() / DELETE_PART;
+ this->addAllPdata();
+ return mData.size() - 1;
+}
+
+template<class S> inline void ParticleSystem<S>::kill(IndexInt idx)
+{
+ assertMsg(idx >= 0 && idx < size(), "Index out of bounds");
+ mData[idx].flag |= PDELETE;
+ if ((++mDeletes > mDeleteChunk) && (mAllowCompress))
+ compress();
+}
+
+template<class S> void ParticleSystem<S>::getPosPdata(ParticleDataImpl<Vec3> &target) const
+{
+ for (IndexInt i = 0; i < (IndexInt)this->size(); ++i) {
+ target[i] = this->getPos(i);
+ }
+}
+template<class S> void ParticleSystem<S>::setPosPdata(const ParticleDataImpl<Vec3> &source)
+{
+ for (IndexInt i = 0; i < (IndexInt)this->size(); ++i) {
+ this->setPos(i, source[i]);
+ }
+}
+
+template<class S> void ParticleSystem<S>::transformPositions(Vec3i dimOld, Vec3i dimNew)
+{
+ const Vec3 factor = calcGridSizeFactor(dimNew, dimOld);
+ for (IndexInt i = 0; i < (IndexInt)this->size(); ++i) {
+ this->setPos(i, this->getPos(i) * factor);
+ }
+}
+
+// check for deletion/invalid position, otherwise return velocity
+
+template<class S> struct _GridAdvectKernel : public KernelBase {
+ _GridAdvectKernel(const KernelBase &base,
+ std::vector<S> &p,
+ const MACGrid &vel,
+ const FlagGrid &flags,
+ const Real dt,
+ const bool deleteInObstacle,
+ const bool stopInObstacle,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude,
+ std::vector<Vec3> &u)
+ : KernelBase(base),
+ p(p),
+ vel(vel),
+ flags(flags),
+ dt(dt),
+ deleteInObstacle(deleteInObstacle),
+ stopInObstacle(stopInObstacle),
+ ptype(ptype),
+ exclude(exclude),
+ u(u)
+ {
+ }
+ inline void op(IndexInt idx,
+ std::vector<S> &p,
+ const MACGrid &vel,
+ const FlagGrid &flags,
+ const Real dt,
+ const bool deleteInObstacle,
+ const bool stopInObstacle,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude,
+ std::vector<Vec3> &u) const
+ {
+ if ((p[idx].flag & ParticleBase::PDELETE) || (ptype && ((*ptype)[idx] & exclude))) {
+ u[idx] = 0.;
+ return;
+ }
+ // special handling
+ if (deleteInObstacle || stopInObstacle) {
+ if (!flags.isInBounds(p[idx].pos, 1) || flags.isObstacle(p[idx].pos)) {
+ if (stopInObstacle)
+ u[idx] = 0.;
+ // for simple tracer particles, its convenient to delete particles right away
+ // for other sim types, eg flip, we can try to fix positions later on
+ if (deleteInObstacle)
+ p[idx].flag |= ParticleBase::PDELETE;
+ return;
+ }
+ }
+ u[idx] = vel.getInterpolated(p[idx].pos) * dt;
+ }
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, p, vel, flags, dt, deleteInObstacle, stopInObstacle, ptype, exclude, u);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<S> &p;
+ const MACGrid &vel;
+ const FlagGrid &flags;
+ const Real dt;
+ const bool deleteInObstacle;
+ const bool stopInObstacle;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+ std::vector<Vec3> &u;
+};
+template<class S> struct GridAdvectKernel : public KernelBase {
+ GridAdvectKernel(std::vector<S> &p,
+ const MACGrid &vel,
+ const FlagGrid &flags,
+ const Real dt,
+ const bool deleteInObstacle,
+ const bool stopInObstacle,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(p.size()),
+ _inner(KernelBase(p.size()),
+ p,
+ vel,
+ flags,
+ dt,
+ deleteInObstacle,
+ stopInObstacle,
+ ptype,
+ exclude,
+ u),
+ p(p),
+ vel(vel),
+ flags(flags),
+ dt(dt),
+ deleteInObstacle(deleteInObstacle),
+ stopInObstacle(stopInObstacle),
+ ptype(ptype),
+ exclude(exclude),
+ u((size))
+ {
+ runMessage();
+ run();
+ }
+ void run()
+ {
+ _inner.run();
+ }
+ inline operator std::vector<Vec3>()
+ {
+ return u;
+ }
+ inline std::vector<Vec3> &getRet()
+ {
+ return u;
+ }
+ inline std::vector<S> &getArg0()
+ {
+ return p;
+ }
+ typedef std::vector<S> type0;
+ inline const MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline const FlagGrid &getArg2()
+ {
+ return flags;
+ }
+ typedef FlagGrid type2;
+ inline const Real &getArg3()
+ {
+ return dt;
+ }
+ typedef Real type3;
+ inline const bool &getArg4()
+ {
+ return deleteInObstacle;
+ }
+ typedef bool type4;
+ inline const bool &getArg5()
+ {
+ return stopInObstacle;
+ }
+ typedef bool type5;
+ inline const ParticleDataImpl<int> *getArg6()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type6;
+ inline const int &getArg7()
+ {
+ return exclude;
+ }
+ typedef int type7;
+ void runMessage()
+ {
+ debMsg("Executing kernel GridAdvectKernel ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ _GridAdvectKernel<S> _inner;
+ std::vector<S> &p;
+ const MACGrid &vel;
+ const FlagGrid &flags;
+ const Real dt;
+ const bool deleteInObstacle;
+ const bool stopInObstacle;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+ std::vector<Vec3> u;
+};
+;
+
+// final check after advection to make sure particles haven't escaped
+// (similar to particle advection kernel)
+
+template<class S> struct KnDeleteInObstacle : public KernelBase {
+ KnDeleteInObstacle(std::vector<S> &p, const FlagGrid &flags)
+ : KernelBase(p.size()), p(p), flags(flags)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, std::vector<S> &p, const FlagGrid &flags) const
+ {
+ if (p[idx].flag & ParticleBase::PDELETE)
+ return;
+ if (!flags.isInBounds(p[idx].pos, 1) || flags.isObstacle(p[idx].pos)) {
+ p[idx].flag |= ParticleBase::PDELETE;
+ }
+ }
+ inline std::vector<S> &getArg0()
+ {
+ return p;
+ }
+ typedef std::vector<S> type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnDeleteInObstacle ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, p, flags);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<S> &p;
+ const FlagGrid &flags;
+};
+
+// try to get closer to actual obstacle boundary
+static inline Vec3 bisectBacktracePos(const FlagGrid &flags, const Vec3 &oldp, const Vec3 &newp)
+{
+ Real s = 0.;
+ for (int i = 1; i < 5; ++i) {
+ Real ds = 1. / (Real)(1 << i);
+ if (!flags.isObstacle(oldp * (1. - (s + ds)) + newp * (s + ds))) {
+ s += ds;
+ }
+ }
+ return (oldp * (1. - (s)) + newp * (s));
+}
+
+// at least make sure all particles are inside domain
+
+template<class S> struct KnClampPositions : public KernelBase {
+ KnClampPositions(std::vector<S> &p,
+ const FlagGrid &flags,
+ ParticleDataImpl<Vec3> *posOld = NULL,
+ bool stopInObstacle = true,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0)
+ : KernelBase(p.size()),
+ p(p),
+ flags(flags),
+ posOld(posOld),
+ stopInObstacle(stopInObstacle),
+ ptype(ptype),
+ exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ std::vector<S> &p,
+ const FlagGrid &flags,
+ ParticleDataImpl<Vec3> *posOld = NULL,
+ bool stopInObstacle = true,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0) const
+ {
+ if (p[idx].flag & ParticleBase::PDELETE)
+ return;
+ if (ptype && ((*ptype)[idx] & exclude)) {
+ if (posOld)
+ p[idx].pos = (*posOld)[idx];
+ return;
+ }
+ if (!flags.isInBounds(p[idx].pos, 0)) {
+ p[idx].pos = clamp(p[idx].pos, Vec3(0.), toVec3(flags.getSize()) - Vec3(1.));
+ }
+ if (stopInObstacle && (flags.isObstacle(p[idx].pos))) {
+ p[idx].pos = bisectBacktracePos(flags, (*posOld)[idx], p[idx].pos);
+ }
+ }
+ inline std::vector<S> &getArg0()
+ {
+ return p;
+ }
+ typedef std::vector<S> type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline ParticleDataImpl<Vec3> *getArg2()
+ {
+ return posOld;
+ }
+ typedef ParticleDataImpl<Vec3> type2;
+ inline bool &getArg3()
+ {
+ return stopInObstacle;
+ }
+ typedef bool type3;
+ inline const ParticleDataImpl<int> *getArg4()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type4;
+ inline const int &getArg5()
+ {
+ return exclude;
+ }
+ typedef int type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnClampPositions ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, p, flags, posOld, stopInObstacle, ptype, exclude);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ std::vector<S> &p;
+ const FlagGrid &flags;
+ ParticleDataImpl<Vec3> *posOld;
+ bool stopInObstacle;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+
+// advection plugin
+template<class S>
+void ParticleSystem<S>::advectInGrid(const FlagGrid &flags,
+ const MACGrid &vel,
+ const int integrationMode,
+ const bool deleteInObstacle,
+ const bool stopInObstacle,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+{
+ // position clamp requires old positions, backup
+ ParticleDataImpl<Vec3> *posOld = NULL;
+ if (!deleteInObstacle) {
+ posOld = new ParticleDataImpl<Vec3>(this->getParent());
+ posOld->resize(mData.size());
+ for (IndexInt i = 0; i < (IndexInt)mData.size(); ++i)
+ (*posOld)[i] = mData[i].pos;
+ }
+
+ // update positions
+ GridAdvectKernel<S> kernel(
+ mData, vel, flags, getParent()->getDt(), deleteInObstacle, stopInObstacle, ptype, exclude);
+ integratePointSet(kernel, integrationMode);
+
+ if (!deleteInObstacle) {
+ KnClampPositions<S>(mData, flags, posOld, stopInObstacle, ptype, exclude);
+ delete posOld;
+ }
+ else {
+ KnDeleteInObstacle<S>(mData, flags);
+ }
+}
+
+template<class S> struct KnProjectParticles : public KernelBase {
+ KnProjectParticles(ParticleSystem<S> &part, Grid<Vec3> &gradient)
+ : KernelBase(part.size()), part(part), gradient(gradient)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, ParticleSystem<S> &part, Grid<Vec3> &gradient)
+ {
+ static RandomStream rand(3123984);
+ const double jlen = 0.1;
+
+ if (part.isActive(idx)) {
+ // project along levelset gradient
+ Vec3 p = part[idx].pos;
+ if (gradient.isInBounds(p)) {
+ Vec3 n = gradient.getInterpolated(p);
+ Real dist = normalize(n);
+ Vec3 dx = n * (-dist + jlen * (1 + rand.getReal()));
+ p += dx;
+ }
+ // clamp to outer boundaries (+jitter)
+ const double jlen = 0.1;
+ Vec3 jitter = jlen * rand.getVec3();
+ part[idx].pos = clamp(p, Vec3(1, 1, 1) + jitter, toVec3(gradient.getSize() - 1) - jitter);
+ }
+ }
+ inline ParticleSystem<S> &getArg0()
+ {
+ return part;
+ }
+ typedef ParticleSystem<S> type0;
+ inline Grid<Vec3> &getArg1()
+ {
+ return gradient;
+ }
+ typedef Grid<Vec3> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnProjectParticles ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void run()
+ {
+ const IndexInt _sz = size;
+ for (IndexInt i = 0; i < _sz; i++)
+ op(i, part, gradient);
+ }
+ ParticleSystem<S> &part;
+ Grid<Vec3> &gradient;
+};
+
+template<class S> void ParticleSystem<S>::projectOutside(Grid<Vec3> &gradient)
+{
+ KnProjectParticles<S>(*this, gradient);
+}
+
+template<class S> struct KnProjectOutOfBnd : public KernelBase {
+ KnProjectOutOfBnd(ParticleSystem<S> &part,
+ const FlagGrid &flags,
+ const Real bnd,
+ const bool *axis,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(part.size()),
+ part(part),
+ flags(flags),
+ bnd(bnd),
+ axis(axis),
+ ptype(ptype),
+ exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ ParticleSystem<S> &part,
+ const FlagGrid &flags,
+ const Real bnd,
+ const bool *axis,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude) const
+ {
+ if (!part.isActive(idx) || (ptype && ((*ptype)[idx] & exclude)))
+ return;
+ if (axis[0])
+ part[idx].pos.x = std::max(part[idx].pos.x, bnd);
+ if (axis[1])
+ part[idx].pos.x = std::min(part[idx].pos.x, static_cast<Real>(flags.getSizeX()) - bnd);
+ if (axis[2])
+ part[idx].pos.y = std::max(part[idx].pos.y, bnd);
+ if (axis[3])
+ part[idx].pos.y = std::min(part[idx].pos.y, static_cast<Real>(flags.getSizeY()) - bnd);
+ if (flags.is3D()) {
+ if (axis[4])
+ part[idx].pos.z = std::max(part[idx].pos.z, bnd);
+ if (axis[5])
+ part[idx].pos.z = std::min(part[idx].pos.z, static_cast<Real>(flags.getSizeZ()) - bnd);
+ }
+ }
+ inline ParticleSystem<S> &getArg0()
+ {
+ return part;
+ }
+ typedef ParticleSystem<S> type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const Real &getArg2()
+ {
+ return bnd;
+ }
+ typedef Real type2;
+ inline const bool *getArg3()
+ {
+ return axis;
+ }
+ typedef bool type3;
+ inline const ParticleDataImpl<int> *getArg4()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type4;
+ inline const int &getArg5()
+ {
+ return exclude;
+ }
+ typedef int type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnProjectOutOfBnd ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, part, flags, bnd, axis, ptype, exclude);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleSystem<S> &part;
+ const FlagGrid &flags;
+ const Real bnd;
+ const bool *axis;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+
+template<class S>
+void ParticleSystem<S>::projectOutOfBnd(const FlagGrid &flags,
+ const Real bnd,
+ const std::string &plane,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+{
+ bool axis[6] = {false};
+ for (std::string::const_iterator it = plane.begin(); it != plane.end(); ++it) {
+ if (*it == 'x')
+ axis[0] = true;
+ if (*it == 'X')
+ axis[1] = true;
+ if (*it == 'y')
+ axis[2] = true;
+ if (*it == 'Y')
+ axis[3] = true;
+ if (*it == 'z')
+ axis[4] = true;
+ if (*it == 'Z')
+ axis[5] = true;
+ }
+ KnProjectOutOfBnd<S>(*this, flags, bnd, axis, ptype, exclude);
+}
+
+template<class S> void ParticleSystem<S>::resizeAll(IndexInt size)
+{
+ // resize all buffers to target size in 1 go
+ mData.resize(size);
+ for (IndexInt i = 0; i < (IndexInt)mPartData.size(); ++i)
+ mPartData[i]->resize(size);
+}
+
+template<class S> void ParticleSystem<S>::compress()
+{
+ IndexInt nextRead = mData.size();
+ for (IndexInt i = 0; i < (IndexInt)mData.size(); i++) {
+ while ((mData[i].flag & PDELETE) != 0) {
+ nextRead--;
+ mData[i] = mData[nextRead];
+ // ugly, but prevent virtual function calls here:
+ for (IndexInt pd = 0; pd < (IndexInt)mPdataReal.size(); ++pd)
+ mPdataReal[pd]->copyValue(nextRead, i);
+ for (IndexInt pd = 0; pd < (IndexInt)mPdataVec3.size(); ++pd)
+ mPdataVec3[pd]->copyValue(nextRead, i);
+ for (IndexInt pd = 0; pd < (IndexInt)mPdataInt.size(); ++pd)
+ mPdataInt[pd]->copyValue(nextRead, i);
+ mData[nextRead].flag = PINVALID;
+ }
+ }
+ if (nextRead < (IndexInt)mData.size())
+ debMsg("Deleted " << ((IndexInt)mData.size() - nextRead) << " particles", 1); // debug info
+
+ resizeAll(nextRead);
+ mDeletes = 0;
+ mDeleteChunk = mData.size() / DELETE_PART;
+}
+
+//! insert buffered positions as new particles, update additional particle data
+template<class S> void ParticleSystem<S>::insertBufferedParticles()
+{
+ if (mNewBufferPos.size() == 0)
+ return;
+ IndexInt newCnt = mData.size();
+ resizeAll(newCnt + mNewBufferPos.size());
+
+ // clear new flag everywhere
+ for (IndexInt i = 0; i < (IndexInt)mData.size(); ++i)
+ mData[i].flag &= ~PNEW;
+
+ for (IndexInt i = 0; i < (IndexInt)mNewBufferPos.size(); ++i) {
+ int flag = (mNewBufferFlag.size() > 0) ? mNewBufferFlag[i] : 0;
+ // note, other fields are not initialized here...
+ mData[newCnt].pos = mNewBufferPos[i];
+ mData[newCnt].flag = PNEW | flag;
+ // now init pdata fields from associated grids...
+ for (IndexInt pd = 0; pd < (IndexInt)mPdataReal.size(); ++pd)
+ mPdataReal[pd]->initNewValue(newCnt, mNewBufferPos[i]);
+ for (IndexInt pd = 0; pd < (IndexInt)mPdataVec3.size(); ++pd)
+ mPdataVec3[pd]->initNewValue(newCnt, mNewBufferPos[i]);
+ for (IndexInt pd = 0; pd < (IndexInt)mPdataInt.size(); ++pd)
+ mPdataInt[pd]->initNewValue(newCnt, mNewBufferPos[i]);
+ newCnt++;
+ }
+ if (mNewBufferPos.size() > 0)
+ debMsg("Added & initialized " << (IndexInt)mNewBufferPos.size() << " particles",
+ 2); // debug info
+ mNewBufferPos.clear();
+ mNewBufferFlag.clear();
+}
+
+template<class DATA, class CON> void ConnectedParticleSystem<DATA, CON>::compress()
+{
+ const IndexInt sz = ParticleSystem<DATA>::size();
+ IndexInt *renumber_back = new IndexInt[sz];
+ IndexInt *renumber = new IndexInt[sz];
+ for (IndexInt i = 0; i < sz; i++)
+ renumber[i] = renumber_back[i] = -1;
+
+ // reorder elements
+ std::vector<DATA> &data = ParticleSystem<DATA>::mData;
+ IndexInt nextRead = sz;
+ for (IndexInt i = 0; i < nextRead; i++) {
+ if ((data[i].flag & ParticleBase::PDELETE) != 0) {
+ nextRead--;
+ data[i] = data[nextRead];
+ data[nextRead].flag = 0;
+ renumber_back[i] = nextRead;
+ }
+ else
+ renumber_back[i] = i;
+ }
+
+ // acceleration structure
+ for (IndexInt i = 0; i < nextRead; i++)
+ renumber[renumber_back[i]] = i;
+
+ // rename indices in filaments
+ for (IndexInt i = 0; i < (IndexInt)mSegments.size(); i++)
+ mSegments[i].renumber(renumber);
+
+ ParticleSystem<DATA>::mData.resize(nextRead);
+ ParticleSystem<DATA>::mDeletes = 0;
+ ParticleSystem<DATA>::mDeleteChunk = ParticleSystem<DATA>::size() / DELETE_PART;
+
+ delete[] renumber;
+ delete[] renumber_back;
+}
+
+template<class S> ParticleBase *ParticleSystem<S>::clone()
+{
+ ParticleSystem<S> *nm = new ParticleSystem<S>(getParent());
+ if (this->mAllowCompress)
+ compress();
+
+ nm->mData = mData;
+ nm->setName(getName());
+ this->cloneParticleData(nm);
+ return nm;
+}
+
+template<class DATA, class CON> ParticleBase *ConnectedParticleSystem<DATA, CON>::clone()
+{
+ ConnectedParticleSystem<DATA, CON> *nm = new ConnectedParticleSystem<DATA, CON>(
+ this->getParent());
+ if (this->mAllowCompress)
+ compress();
+
+ nm->mData = this->mData;
+ nm->mSegments = mSegments;
+ nm->setName(this->getName());
+ this->cloneParticleData(nm);
+ return nm;
+}
+
+template<class S> std::string ParticleSystem<S>::infoString() const
+{
+ std::stringstream s;
+ s << "ParticleSys '" << getName() << "'\n-> ";
+ if (this->getNumPdata() > 0)
+ s << "pdata: " << this->getNumPdata();
+ s << "parts: " << size();
+ // for(IndexInt i=0; i<(IndexInt)mPartData.size(); ++i) { sstr << i<<":" << mPartData[i]->size()
+ // <<" "; }
+ return s.str();
+}
+
+template<class S> inline void ParticleSystem<S>::checkPartIndex(IndexInt idx) const
+{
+ IndexInt mySize = this->size();
+ if (idx < 0 || idx > mySize) {
+ errMsg("ParticleBase "
+ << " size " << mySize << " : index " << idx << " out of bound ");
+ }
+}
+
+inline void ParticleDataBase::checkPartIndex(IndexInt idx) const
+{
+ IndexInt mySize = this->getSizeSlow();
+ if (idx < 0 || idx > mySize) {
+ errMsg("ParticleData "
+ << " size " << mySize << " : index " << idx << " out of bound ");
+ }
+ if (mpParticleSys && mpParticleSys->getSizeSlow() != mySize) {
+ errMsg("ParticleData "
+ << " size " << mySize << " does not match parent! (" << mpParticleSys->getSizeSlow()
+ << ") ");
+ }
+}
+
+// set contents to zero, as for a grid
+template<class T> void ParticleDataImpl<T>::clear()
+{
+ for (IndexInt i = 0; i < (IndexInt)mData.size(); ++i)
+ mData[i] = 0.;
+}
+
+//! count by type flag
+int countParticles(const ParticleDataImpl<int> &t, const int flag);
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/particle.h.reg.cpp b/extern/mantaflow/preprocessed/particle.h.reg.cpp
new file mode 100644
index 00000000000..6e0466d0203
--- /dev/null
+++ b/extern/mantaflow/preprocessed/particle.h.reg.cpp
@@ -0,0 +1,437 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "particle.h"
+namespace Manta {
+#ifdef _C_BasicParticleSystem
+static const Pb::Register _R_13("BasicParticleSystem",
+ "BasicParticleSystem",
+ "ParticleSystem<BasicParticleData>");
+template<> const char *Namify<BasicParticleSystem>::S = "BasicParticleSystem";
+static const Pb::Register _R_14("BasicParticleSystem",
+ "BasicParticleSystem",
+ BasicParticleSystem::_W_12);
+static const Pb::Register _R_15("BasicParticleSystem", "save", BasicParticleSystem::_W_13);
+static const Pb::Register _R_16("BasicParticleSystem", "load", BasicParticleSystem::_W_14);
+static const Pb::Register _R_17("BasicParticleSystem",
+ "readParticles",
+ BasicParticleSystem::_W_15);
+static const Pb::Register _R_18("BasicParticleSystem", "addParticle", BasicParticleSystem::_W_16);
+static const Pb::Register _R_19("BasicParticleSystem", "printParts", BasicParticleSystem::_W_17);
+static const Pb::Register _R_20("BasicParticleSystem",
+ "getDataPointer",
+ BasicParticleSystem::_W_18);
+#endif
+#ifdef _C_ParticleBase
+static const Pb::Register _R_21("ParticleBase", "ParticleBase", "PbClass");
+template<> const char *Namify<ParticleBase>::S = "ParticleBase";
+static const Pb::Register _R_22("ParticleBase", "ParticleBase", ParticleBase::_W_0);
+static const Pb::Register _R_23("ParticleBase", "create", ParticleBase::_W_1);
+#endif
+#ifdef _C_ParticleDataBase
+static const Pb::Register _R_24("ParticleDataBase", "ParticleDataBase", "PbClass");
+template<> const char *Namify<ParticleDataBase>::S = "ParticleDataBase";
+static const Pb::Register _R_25("ParticleDataBase", "ParticleDataBase", ParticleDataBase::_W_21);
+#endif
+#ifdef _C_ParticleDataImpl
+static const Pb::Register _R_26("ParticleDataImpl<int>",
+ "ParticleDataImpl<int>",
+ "ParticleDataBase");
+template<> const char *Namify<ParticleDataImpl<int>>::S = "ParticleDataImpl<int>";
+static const Pb::Register _R_27("ParticleDataImpl<int>",
+ "ParticleDataImpl",
+ ParticleDataImpl<int>::_W_22);
+static const Pb::Register _R_28("ParticleDataImpl<int>", "clear", ParticleDataImpl<int>::_W_23);
+static const Pb::Register _R_29("ParticleDataImpl<int>",
+ "setSource",
+ ParticleDataImpl<int>::_W_24);
+static const Pb::Register _R_30("ParticleDataImpl<int>", "copyFrom", ParticleDataImpl<int>::_W_25);
+static const Pb::Register _R_31("ParticleDataImpl<int>", "setConst", ParticleDataImpl<int>::_W_26);
+static const Pb::Register _R_32("ParticleDataImpl<int>",
+ "setConstRange",
+ ParticleDataImpl<int>::_W_27);
+static const Pb::Register _R_33("ParticleDataImpl<int>", "add", ParticleDataImpl<int>::_W_28);
+static const Pb::Register _R_34("ParticleDataImpl<int>", "sub", ParticleDataImpl<int>::_W_29);
+static const Pb::Register _R_35("ParticleDataImpl<int>", "addConst", ParticleDataImpl<int>::_W_30);
+static const Pb::Register _R_36("ParticleDataImpl<int>",
+ "addScaled",
+ ParticleDataImpl<int>::_W_31);
+static const Pb::Register _R_37("ParticleDataImpl<int>", "mult", ParticleDataImpl<int>::_W_32);
+static const Pb::Register _R_38("ParticleDataImpl<int>",
+ "multConst",
+ ParticleDataImpl<int>::_W_33);
+static const Pb::Register _R_39("ParticleDataImpl<int>", "safeDiv", ParticleDataImpl<int>::_W_34);
+static const Pb::Register _R_40("ParticleDataImpl<int>", "clamp", ParticleDataImpl<int>::_W_35);
+static const Pb::Register _R_41("ParticleDataImpl<int>", "clampMin", ParticleDataImpl<int>::_W_36);
+static const Pb::Register _R_42("ParticleDataImpl<int>", "clampMax", ParticleDataImpl<int>::_W_37);
+static const Pb::Register _R_43("ParticleDataImpl<int>",
+ "getMaxAbs",
+ ParticleDataImpl<int>::_W_38);
+static const Pb::Register _R_44("ParticleDataImpl<int>", "getMax", ParticleDataImpl<int>::_W_39);
+static const Pb::Register _R_45("ParticleDataImpl<int>", "getMin", ParticleDataImpl<int>::_W_40);
+static const Pb::Register _R_46("ParticleDataImpl<int>", "sum", ParticleDataImpl<int>::_W_41);
+static const Pb::Register _R_47("ParticleDataImpl<int>",
+ "sumSquare",
+ ParticleDataImpl<int>::_W_42);
+static const Pb::Register _R_48("ParticleDataImpl<int>",
+ "sumMagnitude",
+ ParticleDataImpl<int>::_W_43);
+static const Pb::Register _R_49("ParticleDataImpl<int>",
+ "setConstIntFlag",
+ ParticleDataImpl<int>::_W_44);
+static const Pb::Register _R_50("ParticleDataImpl<int>",
+ "printPdata",
+ ParticleDataImpl<int>::_W_45);
+static const Pb::Register _R_51("ParticleDataImpl<int>", "save", ParticleDataImpl<int>::_W_46);
+static const Pb::Register _R_52("ParticleDataImpl<int>", "load", ParticleDataImpl<int>::_W_47);
+static const Pb::Register _R_53("ParticleDataImpl<int>",
+ "getDataPointer",
+ ParticleDataImpl<int>::_W_48);
+static const Pb::Register _R_54("ParticleDataImpl<Real>",
+ "ParticleDataImpl<Real>",
+ "ParticleDataBase");
+template<> const char *Namify<ParticleDataImpl<Real>>::S = "ParticleDataImpl<Real>";
+static const Pb::Register _R_55("ParticleDataImpl<Real>",
+ "ParticleDataImpl",
+ ParticleDataImpl<Real>::_W_22);
+static const Pb::Register _R_56("ParticleDataImpl<Real>", "clear", ParticleDataImpl<Real>::_W_23);
+static const Pb::Register _R_57("ParticleDataImpl<Real>",
+ "setSource",
+ ParticleDataImpl<Real>::_W_24);
+static const Pb::Register _R_58("ParticleDataImpl<Real>",
+ "copyFrom",
+ ParticleDataImpl<Real>::_W_25);
+static const Pb::Register _R_59("ParticleDataImpl<Real>",
+ "setConst",
+ ParticleDataImpl<Real>::_W_26);
+static const Pb::Register _R_60("ParticleDataImpl<Real>",
+ "setConstRange",
+ ParticleDataImpl<Real>::_W_27);
+static const Pb::Register _R_61("ParticleDataImpl<Real>", "add", ParticleDataImpl<Real>::_W_28);
+static const Pb::Register _R_62("ParticleDataImpl<Real>", "sub", ParticleDataImpl<Real>::_W_29);
+static const Pb::Register _R_63("ParticleDataImpl<Real>",
+ "addConst",
+ ParticleDataImpl<Real>::_W_30);
+static const Pb::Register _R_64("ParticleDataImpl<Real>",
+ "addScaled",
+ ParticleDataImpl<Real>::_W_31);
+static const Pb::Register _R_65("ParticleDataImpl<Real>", "mult", ParticleDataImpl<Real>::_W_32);
+static const Pb::Register _R_66("ParticleDataImpl<Real>",
+ "multConst",
+ ParticleDataImpl<Real>::_W_33);
+static const Pb::Register _R_67("ParticleDataImpl<Real>",
+ "safeDiv",
+ ParticleDataImpl<Real>::_W_34);
+static const Pb::Register _R_68("ParticleDataImpl<Real>", "clamp", ParticleDataImpl<Real>::_W_35);
+static const Pb::Register _R_69("ParticleDataImpl<Real>",
+ "clampMin",
+ ParticleDataImpl<Real>::_W_36);
+static const Pb::Register _R_70("ParticleDataImpl<Real>",
+ "clampMax",
+ ParticleDataImpl<Real>::_W_37);
+static const Pb::Register _R_71("ParticleDataImpl<Real>",
+ "getMaxAbs",
+ ParticleDataImpl<Real>::_W_38);
+static const Pb::Register _R_72("ParticleDataImpl<Real>", "getMax", ParticleDataImpl<Real>::_W_39);
+static const Pb::Register _R_73("ParticleDataImpl<Real>", "getMin", ParticleDataImpl<Real>::_W_40);
+static const Pb::Register _R_74("ParticleDataImpl<Real>", "sum", ParticleDataImpl<Real>::_W_41);
+static const Pb::Register _R_75("ParticleDataImpl<Real>",
+ "sumSquare",
+ ParticleDataImpl<Real>::_W_42);
+static const Pb::Register _R_76("ParticleDataImpl<Real>",
+ "sumMagnitude",
+ ParticleDataImpl<Real>::_W_43);
+static const Pb::Register _R_77("ParticleDataImpl<Real>",
+ "setConstIntFlag",
+ ParticleDataImpl<Real>::_W_44);
+static const Pb::Register _R_78("ParticleDataImpl<Real>",
+ "printPdata",
+ ParticleDataImpl<Real>::_W_45);
+static const Pb::Register _R_79("ParticleDataImpl<Real>", "save", ParticleDataImpl<Real>::_W_46);
+static const Pb::Register _R_80("ParticleDataImpl<Real>", "load", ParticleDataImpl<Real>::_W_47);
+static const Pb::Register _R_81("ParticleDataImpl<Real>",
+ "getDataPointer",
+ ParticleDataImpl<Real>::_W_48);
+static const Pb::Register _R_82("ParticleDataImpl<Vec3>",
+ "ParticleDataImpl<Vec3>",
+ "ParticleDataBase");
+template<> const char *Namify<ParticleDataImpl<Vec3>>::S = "ParticleDataImpl<Vec3>";
+static const Pb::Register _R_83("ParticleDataImpl<Vec3>",
+ "ParticleDataImpl",
+ ParticleDataImpl<Vec3>::_W_22);
+static const Pb::Register _R_84("ParticleDataImpl<Vec3>", "clear", ParticleDataImpl<Vec3>::_W_23);
+static const Pb::Register _R_85("ParticleDataImpl<Vec3>",
+ "setSource",
+ ParticleDataImpl<Vec3>::_W_24);
+static const Pb::Register _R_86("ParticleDataImpl<Vec3>",
+ "copyFrom",
+ ParticleDataImpl<Vec3>::_W_25);
+static const Pb::Register _R_87("ParticleDataImpl<Vec3>",
+ "setConst",
+ ParticleDataImpl<Vec3>::_W_26);
+static const Pb::Register _R_88("ParticleDataImpl<Vec3>",
+ "setConstRange",
+ ParticleDataImpl<Vec3>::_W_27);
+static const Pb::Register _R_89("ParticleDataImpl<Vec3>", "add", ParticleDataImpl<Vec3>::_W_28);
+static const Pb::Register _R_90("ParticleDataImpl<Vec3>", "sub", ParticleDataImpl<Vec3>::_W_29);
+static const Pb::Register _R_91("ParticleDataImpl<Vec3>",
+ "addConst",
+ ParticleDataImpl<Vec3>::_W_30);
+static const Pb::Register _R_92("ParticleDataImpl<Vec3>",
+ "addScaled",
+ ParticleDataImpl<Vec3>::_W_31);
+static const Pb::Register _R_93("ParticleDataImpl<Vec3>", "mult", ParticleDataImpl<Vec3>::_W_32);
+static const Pb::Register _R_94("ParticleDataImpl<Vec3>",
+ "multConst",
+ ParticleDataImpl<Vec3>::_W_33);
+static const Pb::Register _R_95("ParticleDataImpl<Vec3>",
+ "safeDiv",
+ ParticleDataImpl<Vec3>::_W_34);
+static const Pb::Register _R_96("ParticleDataImpl<Vec3>", "clamp", ParticleDataImpl<Vec3>::_W_35);
+static const Pb::Register _R_97("ParticleDataImpl<Vec3>",
+ "clampMin",
+ ParticleDataImpl<Vec3>::_W_36);
+static const Pb::Register _R_98("ParticleDataImpl<Vec3>",
+ "clampMax",
+ ParticleDataImpl<Vec3>::_W_37);
+static const Pb::Register _R_99("ParticleDataImpl<Vec3>",
+ "getMaxAbs",
+ ParticleDataImpl<Vec3>::_W_38);
+static const Pb::Register _R_100("ParticleDataImpl<Vec3>",
+ "getMax",
+ ParticleDataImpl<Vec3>::_W_39);
+static const Pb::Register _R_101("ParticleDataImpl<Vec3>",
+ "getMin",
+ ParticleDataImpl<Vec3>::_W_40);
+static const Pb::Register _R_102("ParticleDataImpl<Vec3>", "sum", ParticleDataImpl<Vec3>::_W_41);
+static const Pb::Register _R_103("ParticleDataImpl<Vec3>",
+ "sumSquare",
+ ParticleDataImpl<Vec3>::_W_42);
+static const Pb::Register _R_104("ParticleDataImpl<Vec3>",
+ "sumMagnitude",
+ ParticleDataImpl<Vec3>::_W_43);
+static const Pb::Register _R_105("ParticleDataImpl<Vec3>",
+ "setConstIntFlag",
+ ParticleDataImpl<Vec3>::_W_44);
+static const Pb::Register _R_106("ParticleDataImpl<Vec3>",
+ "printPdata",
+ ParticleDataImpl<Vec3>::_W_45);
+static const Pb::Register _R_107("ParticleDataImpl<Vec3>", "save", ParticleDataImpl<Vec3>::_W_46);
+static const Pb::Register _R_108("ParticleDataImpl<Vec3>", "load", ParticleDataImpl<Vec3>::_W_47);
+static const Pb::Register _R_109("ParticleDataImpl<Vec3>",
+ "getDataPointer",
+ ParticleDataImpl<Vec3>::_W_48);
+#endif
+#ifdef _C_ParticleIndexSystem
+static const Pb::Register _R_110("ParticleIndexSystem",
+ "ParticleIndexSystem",
+ "ParticleSystem<ParticleIndexData>");
+template<> const char *Namify<ParticleIndexSystem>::S = "ParticleIndexSystem";
+static const Pb::Register _R_111("ParticleIndexSystem",
+ "ParticleIndexSystem",
+ ParticleIndexSystem::_W_19);
+#endif
+#ifdef _C_ParticleSystem
+static const Pb::Register _R_112("ParticleSystem<BasicParticleData>",
+ "ParticleSystem<BasicParticleData>",
+ "ParticleBase");
+template<>
+const char *Namify<ParticleSystem<BasicParticleData>>::S = "ParticleSystem<BasicParticleData>";
+static const Pb::Register _R_113("ParticleSystem<BasicParticleData>",
+ "ParticleSystem",
+ ParticleSystem<BasicParticleData>::_W_2);
+static const Pb::Register _R_114("ParticleSystem<BasicParticleData>",
+ "pySize",
+ ParticleSystem<BasicParticleData>::_W_3);
+static const Pb::Register _R_115("ParticleSystem<BasicParticleData>",
+ "setPos",
+ ParticleSystem<BasicParticleData>::_W_4);
+static const Pb::Register _R_116("ParticleSystem<BasicParticleData>",
+ "getPos",
+ ParticleSystem<BasicParticleData>::_W_5);
+static const Pb::Register _R_117("ParticleSystem<BasicParticleData>",
+ "getPosPdata",
+ ParticleSystem<BasicParticleData>::_W_6);
+static const Pb::Register _R_118("ParticleSystem<BasicParticleData>",
+ "setPosPdata",
+ ParticleSystem<BasicParticleData>::_W_7);
+static const Pb::Register _R_119("ParticleSystem<BasicParticleData>",
+ "clear",
+ ParticleSystem<BasicParticleData>::_W_8);
+static const Pb::Register _R_120("ParticleSystem<BasicParticleData>",
+ "advectInGrid",
+ ParticleSystem<BasicParticleData>::_W_9);
+static const Pb::Register _R_121("ParticleSystem<BasicParticleData>",
+ "projectOutside",
+ ParticleSystem<BasicParticleData>::_W_10);
+static const Pb::Register _R_122("ParticleSystem<BasicParticleData>",
+ "projectOutOfBnd",
+ ParticleSystem<BasicParticleData>::_W_11);
+static const Pb::Register _R_123("ParticleSystem<ParticleIndexData>",
+ "ParticleSystem<ParticleIndexData>",
+ "ParticleBase");
+template<>
+const char *Namify<ParticleSystem<ParticleIndexData>>::S = "ParticleSystem<ParticleIndexData>";
+static const Pb::Register _R_124("ParticleSystem<ParticleIndexData>",
+ "ParticleSystem",
+ ParticleSystem<ParticleIndexData>::_W_2);
+static const Pb::Register _R_125("ParticleSystem<ParticleIndexData>",
+ "pySize",
+ ParticleSystem<ParticleIndexData>::_W_3);
+static const Pb::Register _R_126("ParticleSystem<ParticleIndexData>",
+ "setPos",
+ ParticleSystem<ParticleIndexData>::_W_4);
+static const Pb::Register _R_127("ParticleSystem<ParticleIndexData>",
+ "getPos",
+ ParticleSystem<ParticleIndexData>::_W_5);
+static const Pb::Register _R_128("ParticleSystem<ParticleIndexData>",
+ "getPosPdata",
+ ParticleSystem<ParticleIndexData>::_W_6);
+static const Pb::Register _R_129("ParticleSystem<ParticleIndexData>",
+ "setPosPdata",
+ ParticleSystem<ParticleIndexData>::_W_7);
+static const Pb::Register _R_130("ParticleSystem<ParticleIndexData>",
+ "clear",
+ ParticleSystem<ParticleIndexData>::_W_8);
+static const Pb::Register _R_131("ParticleSystem<ParticleIndexData>",
+ "advectInGrid",
+ ParticleSystem<ParticleIndexData>::_W_9);
+static const Pb::Register _R_132("ParticleSystem<ParticleIndexData>",
+ "projectOutside",
+ ParticleSystem<ParticleIndexData>::_W_10);
+static const Pb::Register _R_133("ParticleSystem<ParticleIndexData>",
+ "projectOutOfBnd",
+ ParticleSystem<ParticleIndexData>::_W_11);
+#endif
+static const Pb::Register _R_10("ParticleDataImpl<int>", "PdataInt", "");
+static const Pb::Register _R_11("ParticleDataImpl<Real>", "PdataReal", "");
+static const Pb::Register _R_12("ParticleDataImpl<Vec3>", "PdataVec3", "");
+extern "C" {
+void PbRegister_file_10()
+{
+ KEEP_UNUSED(_R_13);
+ KEEP_UNUSED(_R_14);
+ KEEP_UNUSED(_R_15);
+ KEEP_UNUSED(_R_16);
+ KEEP_UNUSED(_R_17);
+ KEEP_UNUSED(_R_18);
+ KEEP_UNUSED(_R_19);
+ KEEP_UNUSED(_R_20);
+ KEEP_UNUSED(_R_21);
+ KEEP_UNUSED(_R_22);
+ KEEP_UNUSED(_R_23);
+ KEEP_UNUSED(_R_24);
+ KEEP_UNUSED(_R_25);
+ KEEP_UNUSED(_R_26);
+ KEEP_UNUSED(_R_27);
+ KEEP_UNUSED(_R_28);
+ KEEP_UNUSED(_R_29);
+ KEEP_UNUSED(_R_30);
+ KEEP_UNUSED(_R_31);
+ KEEP_UNUSED(_R_32);
+ KEEP_UNUSED(_R_33);
+ KEEP_UNUSED(_R_34);
+ KEEP_UNUSED(_R_35);
+ KEEP_UNUSED(_R_36);
+ KEEP_UNUSED(_R_37);
+ KEEP_UNUSED(_R_38);
+ KEEP_UNUSED(_R_39);
+ KEEP_UNUSED(_R_40);
+ KEEP_UNUSED(_R_41);
+ KEEP_UNUSED(_R_42);
+ KEEP_UNUSED(_R_43);
+ KEEP_UNUSED(_R_44);
+ KEEP_UNUSED(_R_45);
+ KEEP_UNUSED(_R_46);
+ KEEP_UNUSED(_R_47);
+ KEEP_UNUSED(_R_48);
+ KEEP_UNUSED(_R_49);
+ KEEP_UNUSED(_R_50);
+ KEEP_UNUSED(_R_51);
+ KEEP_UNUSED(_R_52);
+ KEEP_UNUSED(_R_53);
+ KEEP_UNUSED(_R_54);
+ KEEP_UNUSED(_R_55);
+ KEEP_UNUSED(_R_56);
+ KEEP_UNUSED(_R_57);
+ KEEP_UNUSED(_R_58);
+ KEEP_UNUSED(_R_59);
+ KEEP_UNUSED(_R_60);
+ KEEP_UNUSED(_R_61);
+ KEEP_UNUSED(_R_62);
+ KEEP_UNUSED(_R_63);
+ KEEP_UNUSED(_R_64);
+ KEEP_UNUSED(_R_65);
+ KEEP_UNUSED(_R_66);
+ KEEP_UNUSED(_R_67);
+ KEEP_UNUSED(_R_68);
+ KEEP_UNUSED(_R_69);
+ KEEP_UNUSED(_R_70);
+ KEEP_UNUSED(_R_71);
+ KEEP_UNUSED(_R_72);
+ KEEP_UNUSED(_R_73);
+ KEEP_UNUSED(_R_74);
+ KEEP_UNUSED(_R_75);
+ KEEP_UNUSED(_R_76);
+ KEEP_UNUSED(_R_77);
+ KEEP_UNUSED(_R_78);
+ KEEP_UNUSED(_R_79);
+ KEEP_UNUSED(_R_80);
+ KEEP_UNUSED(_R_81);
+ KEEP_UNUSED(_R_82);
+ KEEP_UNUSED(_R_83);
+ KEEP_UNUSED(_R_84);
+ KEEP_UNUSED(_R_85);
+ KEEP_UNUSED(_R_86);
+ KEEP_UNUSED(_R_87);
+ KEEP_UNUSED(_R_88);
+ KEEP_UNUSED(_R_89);
+ KEEP_UNUSED(_R_90);
+ KEEP_UNUSED(_R_91);
+ KEEP_UNUSED(_R_92);
+ KEEP_UNUSED(_R_93);
+ KEEP_UNUSED(_R_94);
+ KEEP_UNUSED(_R_95);
+ KEEP_UNUSED(_R_96);
+ KEEP_UNUSED(_R_97);
+ KEEP_UNUSED(_R_98);
+ KEEP_UNUSED(_R_99);
+ KEEP_UNUSED(_R_100);
+ KEEP_UNUSED(_R_101);
+ KEEP_UNUSED(_R_102);
+ KEEP_UNUSED(_R_103);
+ KEEP_UNUSED(_R_104);
+ KEEP_UNUSED(_R_105);
+ KEEP_UNUSED(_R_106);
+ KEEP_UNUSED(_R_107);
+ KEEP_UNUSED(_R_108);
+ KEEP_UNUSED(_R_109);
+ KEEP_UNUSED(_R_110);
+ KEEP_UNUSED(_R_111);
+ KEEP_UNUSED(_R_112);
+ KEEP_UNUSED(_R_113);
+ KEEP_UNUSED(_R_114);
+ KEEP_UNUSED(_R_115);
+ KEEP_UNUSED(_R_116);
+ KEEP_UNUSED(_R_117);
+ KEEP_UNUSED(_R_118);
+ KEEP_UNUSED(_R_119);
+ KEEP_UNUSED(_R_120);
+ KEEP_UNUSED(_R_121);
+ KEEP_UNUSED(_R_122);
+ KEEP_UNUSED(_R_123);
+ KEEP_UNUSED(_R_124);
+ KEEP_UNUSED(_R_125);
+ KEEP_UNUSED(_R_126);
+ KEEP_UNUSED(_R_127);
+ KEEP_UNUSED(_R_128);
+ KEEP_UNUSED(_R_129);
+ KEEP_UNUSED(_R_130);
+ KEEP_UNUSED(_R_131);
+ KEEP_UNUSED(_R_132);
+ KEEP_UNUSED(_R_133);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/plugin/advection.cpp b/extern/mantaflow/preprocessed/plugin/advection.cpp
new file mode 100644
index 00000000000..13f53140348
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/advection.cpp
@@ -0,0 +1,1521 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011-2015 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Plugins for advection
+ *
+ ******************************************************************************/
+
+#include "vectorbase.h"
+#include "grid.h"
+#include "kernel.h"
+#include <limits>
+
+using namespace std;
+
+namespace Manta {
+
+//! Semi-Lagrange interpolation kernel
+
+template<class T> struct SemiLagrange : public KernelBase {
+ SemiLagrange(const FlagGrid &flags,
+ const MACGrid &vel,
+ Grid<T> &dst,
+ const Grid<T> &src,
+ Real dt,
+ bool isLevelset,
+ int orderSpace,
+ int orderTrace)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ vel(vel),
+ dst(dst),
+ src(src),
+ dt(dt),
+ isLevelset(isLevelset),
+ orderSpace(orderSpace),
+ orderTrace(orderTrace)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ Grid<T> &dst,
+ const Grid<T> &src,
+ Real dt,
+ bool isLevelset,
+ int orderSpace,
+ int orderTrace) const
+ {
+ if (orderTrace == 1) {
+ // traceback position
+ Vec3 pos = Vec3(i + 0.5f, j + 0.5f, k + 0.5f) - vel.getCentered(i, j, k) * dt;
+ dst(i, j, k) = src.getInterpolatedHi(pos, orderSpace);
+ }
+ else if (orderTrace == 2) {
+ // backtracing using explicit midpoint
+ Vec3 p0 = Vec3(i + 0.5f, j + 0.5f, k + 0.5f);
+ Vec3 p1 = p0 - vel.getCentered(i, j, k) * dt * 0.5;
+ Vec3 p2 = p0 - vel.getInterpolated(p1) * dt;
+ dst(i, j, k) = src.getInterpolatedHi(p2, orderSpace);
+ }
+ else {
+ assertMsg(false, "Unknown backtracing order " << orderTrace);
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline Grid<T> &getArg2()
+ {
+ return dst;
+ }
+ typedef Grid<T> type2;
+ inline const Grid<T> &getArg3()
+ {
+ return src;
+ }
+ typedef Grid<T> type3;
+ inline Real &getArg4()
+ {
+ return dt;
+ }
+ typedef Real type4;
+ inline bool &getArg5()
+ {
+ return isLevelset;
+ }
+ typedef bool type5;
+ inline int &getArg6()
+ {
+ return orderSpace;
+ }
+ typedef int type6;
+ inline int &getArg7()
+ {
+ return orderTrace;
+ }
+ typedef int type7;
+ void runMessage()
+ {
+ debMsg("Executing kernel SemiLagrange ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, dst, src, dt, isLevelset, orderSpace, orderTrace);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, dst, src, dt, isLevelset, orderSpace, orderTrace);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ Grid<T> &dst;
+ const Grid<T> &src;
+ Real dt;
+ bool isLevelset;
+ int orderSpace;
+ int orderTrace;
+};
+
+//! Semi-Lagrange interpolation kernel for MAC grids
+
+struct SemiLagrangeMAC : public KernelBase {
+ SemiLagrangeMAC(const FlagGrid &flags,
+ const MACGrid &vel,
+ MACGrid &dst,
+ const MACGrid &src,
+ Real dt,
+ int orderSpace,
+ int orderTrace)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ vel(vel),
+ dst(dst),
+ src(src),
+ dt(dt),
+ orderSpace(orderSpace),
+ orderTrace(orderTrace)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ MACGrid &dst,
+ const MACGrid &src,
+ Real dt,
+ int orderSpace,
+ int orderTrace) const
+ {
+ if (orderTrace == 1) {
+ // get currect velocity at MAC position
+ // no need to shift xpos etc. as lookup field is also shifted
+ Vec3 xpos = Vec3(i + 0.5f, j + 0.5f, k + 0.5f) - vel.getAtMACX(i, j, k) * dt;
+ Real vx = src.getInterpolatedComponentHi<0>(xpos, orderSpace);
+ Vec3 ypos = Vec3(i + 0.5f, j + 0.5f, k + 0.5f) - vel.getAtMACY(i, j, k) * dt;
+ Real vy = src.getInterpolatedComponentHi<1>(ypos, orderSpace);
+ Vec3 zpos = Vec3(i + 0.5f, j + 0.5f, k + 0.5f) - vel.getAtMACZ(i, j, k) * dt;
+ Real vz = src.getInterpolatedComponentHi<2>(zpos, orderSpace);
+
+ dst(i, j, k) = Vec3(vx, vy, vz);
+ }
+ else if (orderTrace == 2) {
+ Vec3 p0 = Vec3(i + 0.5, j + 0.5, k + 0.5);
+ Vec3 xp0 = Vec3(i, j + 0.5f, k + 0.5f);
+ Vec3 xp1 = xp0 - src.getAtMACX(i, j, k) * dt * 0.5;
+ Vec3 xp2 = p0 - src.getInterpolated(xp1) * dt;
+ Real vx = src.getInterpolatedComponentHi<0>(xp2, orderSpace);
+ Vec3 yp0 = Vec3(i + 0.5f, j, k + 0.5f);
+ Vec3 yp1 = yp0 - src.getAtMACY(i, j, k) * dt * 0.5;
+ Vec3 yp2 = p0 - src.getInterpolated(yp1) * dt;
+ Real vy = src.getInterpolatedComponentHi<1>(yp2, orderSpace);
+ Vec3 zp0 = Vec3(i + 0.5f, j + 0.5f, k);
+ Vec3 zp1 = zp0 - src.getAtMACZ(i, j, k) * dt * 0.5;
+ Vec3 zp2 = p0 - src.getInterpolated(zp1) * dt;
+ Real vz = src.getInterpolatedComponentHi<2>(zp2, orderSpace);
+
+ dst(i, j, k) = Vec3(vx, vy, vz);
+ }
+ else {
+ assertMsg(false, "Unknown backtracing order " << orderTrace);
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline MACGrid &getArg2()
+ {
+ return dst;
+ }
+ typedef MACGrid type2;
+ inline const MACGrid &getArg3()
+ {
+ return src;
+ }
+ typedef MACGrid type3;
+ inline Real &getArg4()
+ {
+ return dt;
+ }
+ typedef Real type4;
+ inline int &getArg5()
+ {
+ return orderSpace;
+ }
+ typedef int type5;
+ inline int &getArg6()
+ {
+ return orderTrace;
+ }
+ typedef int type6;
+ void runMessage()
+ {
+ debMsg("Executing kernel SemiLagrangeMAC ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, dst, src, dt, orderSpace, orderTrace);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, dst, src, dt, orderSpace, orderTrace);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ MACGrid &dst;
+ const MACGrid &src;
+ Real dt;
+ int orderSpace;
+ int orderTrace;
+};
+
+//! Kernel: Correct based on forward and backward SL steps (for both centered & mac grids)
+
+template<class T> struct MacCormackCorrect : public KernelBase {
+ MacCormackCorrect(const FlagGrid &flags,
+ Grid<T> &dst,
+ const Grid<T> &old,
+ const Grid<T> &fwd,
+ const Grid<T> &bwd,
+ Real strength,
+ bool isLevelSet,
+ bool isMAC = false)
+ : KernelBase(&flags, 0),
+ flags(flags),
+ dst(dst),
+ old(old),
+ fwd(fwd),
+ bwd(bwd),
+ strength(strength),
+ isLevelSet(isLevelSet),
+ isMAC(isMAC)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const FlagGrid &flags,
+ Grid<T> &dst,
+ const Grid<T> &old,
+ const Grid<T> &fwd,
+ const Grid<T> &bwd,
+ Real strength,
+ bool isLevelSet,
+ bool isMAC = false) const
+ {
+ dst[idx] = fwd[idx];
+
+ if (flags.isFluid(idx)) {
+ // only correct inside fluid region; note, strenth of correction can be modified here
+ dst[idx] += strength * 0.5 * (old[idx] - bwd[idx]);
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<T> &getArg1()
+ {
+ return dst;
+ }
+ typedef Grid<T> type1;
+ inline const Grid<T> &getArg2()
+ {
+ return old;
+ }
+ typedef Grid<T> type2;
+ inline const Grid<T> &getArg3()
+ {
+ return fwd;
+ }
+ typedef Grid<T> type3;
+ inline const Grid<T> &getArg4()
+ {
+ return bwd;
+ }
+ typedef Grid<T> type4;
+ inline Real &getArg5()
+ {
+ return strength;
+ }
+ typedef Real type5;
+ inline bool &getArg6()
+ {
+ return isLevelSet;
+ }
+ typedef bool type6;
+ inline bool &getArg7()
+ {
+ return isMAC;
+ }
+ typedef bool type7;
+ void runMessage()
+ {
+ debMsg("Executing kernel MacCormackCorrect ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, flags, dst, old, fwd, bwd, strength, isLevelSet, isMAC);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const FlagGrid &flags;
+ Grid<T> &dst;
+ const Grid<T> &old;
+ const Grid<T> &fwd;
+ const Grid<T> &bwd;
+ Real strength;
+ bool isLevelSet;
+ bool isMAC;
+};
+
+//! Kernel: Correct based on forward and backward SL steps (for both centered & mac grids)
+
+template<class T> struct MacCormackCorrectMAC : public KernelBase {
+ MacCormackCorrectMAC(const FlagGrid &flags,
+ Grid<T> &dst,
+ const Grid<T> &old,
+ const Grid<T> &fwd,
+ const Grid<T> &bwd,
+ Real strength,
+ bool isLevelSet,
+ bool isMAC = false)
+ : KernelBase(&flags, 0),
+ flags(flags),
+ dst(dst),
+ old(old),
+ fwd(fwd),
+ bwd(bwd),
+ strength(strength),
+ isLevelSet(isLevelSet),
+ isMAC(isMAC)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<T> &dst,
+ const Grid<T> &old,
+ const Grid<T> &fwd,
+ const Grid<T> &bwd,
+ Real strength,
+ bool isLevelSet,
+ bool isMAC = false) const
+ {
+ bool skip[3] = {false, false, false};
+
+ if (!flags.isFluid(i, j, k))
+ skip[0] = skip[1] = skip[2] = true;
+ if (isMAC) {
+ if ((i > 0) && (!flags.isFluid(i - 1, j, k)))
+ skip[0] = true;
+ if ((j > 0) && (!flags.isFluid(i, j - 1, k)))
+ skip[1] = true;
+ if ((k > 0) && (!flags.isFluid(i, j, k - 1)))
+ skip[2] = true;
+ }
+
+ for (int c = 0; c < 3; ++c) {
+ if (skip[c]) {
+ dst(i, j, k)[c] = fwd(i, j, k)[c];
+ }
+ else {
+ // perform actual correction with given strength
+ dst(i, j, k)[c] = fwd(i, j, k)[c] + strength * 0.5 * (old(i, j, k)[c] - bwd(i, j, k)[c]);
+ }
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<T> &getArg1()
+ {
+ return dst;
+ }
+ typedef Grid<T> type1;
+ inline const Grid<T> &getArg2()
+ {
+ return old;
+ }
+ typedef Grid<T> type2;
+ inline const Grid<T> &getArg3()
+ {
+ return fwd;
+ }
+ typedef Grid<T> type3;
+ inline const Grid<T> &getArg4()
+ {
+ return bwd;
+ }
+ typedef Grid<T> type4;
+ inline Real &getArg5()
+ {
+ return strength;
+ }
+ typedef Real type5;
+ inline bool &getArg6()
+ {
+ return isLevelSet;
+ }
+ typedef bool type6;
+ inline bool &getArg7()
+ {
+ return isMAC;
+ }
+ typedef bool type7;
+ void runMessage()
+ {
+ debMsg("Executing kernel MacCormackCorrectMAC ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, dst, old, fwd, bwd, strength, isLevelSet, isMAC);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, dst, old, fwd, bwd, strength, isLevelSet, isMAC);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<T> &dst;
+ const Grid<T> &old;
+ const Grid<T> &fwd;
+ const Grid<T> &bwd;
+ Real strength;
+ bool isLevelSet;
+ bool isMAC;
+};
+
+// Helper to collect min/max in a template
+template<class T> inline void getMinMax(T &minv, T &maxv, const T &val)
+{
+ if (val < minv)
+ minv = val;
+ if (val > maxv)
+ maxv = val;
+}
+template<> inline void getMinMax<Vec3>(Vec3 &minv, Vec3 &maxv, const Vec3 &val)
+{
+ getMinMax(minv.x, maxv.x, val.x);
+ getMinMax(minv.y, maxv.y, val.y);
+ getMinMax(minv.z, maxv.z, val.z);
+}
+
+//! detect out of bounds value
+template<class T> inline bool cmpMinMax(T &minv, T &maxv, const T &val)
+{
+ if (val < minv)
+ return true;
+ if (val > maxv)
+ return true;
+ return false;
+}
+template<> inline bool cmpMinMax<Vec3>(Vec3 &minv, Vec3 &maxv, const Vec3 &val)
+{
+ return (cmpMinMax(minv.x, maxv.x, val.x) | cmpMinMax(minv.y, maxv.y, val.y) |
+ cmpMinMax(minv.z, maxv.z, val.z));
+}
+
+#define checkFlag(x, y, z) (flags((x), (y), (z)) & (FlagGrid::TypeFluid | FlagGrid::TypeEmpty))
+
+//! Helper function for clamping non-mac grids (those have specialized per component version below)
+// Note - 2 clamp modes, a sharper one (default, clampMode 1, also uses backward step),
+// and a softer version (clampMode 2) that is recommended in Andy's paper
+template<class T>
+inline T doClampComponent(const Vec3i &gridSize,
+ const FlagGrid &flags,
+ T dst,
+ const Grid<T> &orig,
+ const T fwd,
+ const Vec3 &pos,
+ const Vec3 &vel,
+ const int clampMode)
+{
+ T minv(std::numeric_limits<Real>::max()), maxv(-std::numeric_limits<Real>::max());
+ bool haveFl = false;
+
+ // forward (and optionally) backward
+ Vec3i positions[2];
+ int numPos = 1;
+ positions[0] = toVec3i(pos - vel);
+ if (clampMode == 1) {
+ numPos = 2;
+ positions[1] = toVec3i(pos + vel);
+ }
+
+ for (int l = 0; l < numPos; ++l) {
+ Vec3i &currPos = positions[l];
+
+ // clamp lookup to grid
+ const int i0 = clamp(currPos.x, 0, gridSize.x - 1); // note! gridsize already has -1 from call
+ const int j0 = clamp(currPos.y, 0, gridSize.y - 1);
+ const int k0 = clamp(currPos.z, 0, (orig.is3D() ? (gridSize.z - 1) : 1));
+ const int i1 = i0 + 1, j1 = j0 + 1, k1 = (orig.is3D() ? (k0 + 1) : k0);
+
+ // find min/max around source pos
+ if (checkFlag(i0, j0, k0)) {
+ getMinMax(minv, maxv, orig(i0, j0, k0));
+ haveFl = true;
+ }
+ if (checkFlag(i1, j0, k0)) {
+ getMinMax(minv, maxv, orig(i1, j0, k0));
+ haveFl = true;
+ }
+ if (checkFlag(i0, j1, k0)) {
+ getMinMax(minv, maxv, orig(i0, j1, k0));
+ haveFl = true;
+ }
+ if (checkFlag(i1, j1, k0)) {
+ getMinMax(minv, maxv, orig(i1, j1, k0));
+ haveFl = true;
+ }
+
+ if (orig.is3D()) {
+ if (checkFlag(i0, j0, k1)) {
+ getMinMax(minv, maxv, orig(i0, j0, k1));
+ haveFl = true;
+ }
+ if (checkFlag(i1, j0, k1)) {
+ getMinMax(minv, maxv, orig(i1, j0, k1));
+ haveFl = true;
+ }
+ if (checkFlag(i0, j1, k1)) {
+ getMinMax(minv, maxv, orig(i0, j1, k1));
+ haveFl = true;
+ }
+ if (checkFlag(i1, j1, k1)) {
+ getMinMax(minv, maxv, orig(i1, j1, k1));
+ haveFl = true;
+ }
+ }
+ }
+
+ if (!haveFl)
+ return fwd;
+ if (clampMode == 1) {
+ dst = clamp(dst, minv, maxv); // hard clamp
+ }
+ else {
+ if (cmpMinMax(minv, maxv, dst))
+ dst = fwd; // recommended in paper, "softer"
+ }
+ return dst;
+}
+
+//! Helper function for clamping MAC grids, slight differences in flag checks
+// similar to scalar version, just uses single component c of vec3 values
+// for symmetry, reverts to first order near boundaries for clampMode 2
+template<int c>
+inline Real doClampComponentMAC(const FlagGrid &flags,
+ const Vec3i &gridSize,
+ Real dst,
+ const MACGrid &orig,
+ Real fwd,
+ const Vec3 &pos,
+ const Vec3 &vel,
+ const int clampMode)
+{
+ Real minv = std::numeric_limits<Real>::max(), maxv = -std::numeric_limits<Real>::max();
+ // bool haveFl = false;
+
+ // forward (and optionally) backward
+ Vec3i positions[2];
+ int numPos = 1;
+ positions[0] = toVec3i(pos - vel);
+ if (clampMode == 1) {
+ numPos = 2;
+ positions[1] = toVec3i(pos + vel);
+ }
+
+ Vec3i oPos = toVec3i(pos);
+ Vec3i nbPos = oPos;
+ nbPos[c] -= 1;
+ if (clampMode == 2 &&
+ (!(checkFlag(oPos.x, oPos.y, oPos.z) && checkFlag(nbPos.x, nbPos.y, nbPos.z))))
+ return fwd; // replaces haveFl check
+
+ for (int l = 0; l < numPos; ++l) {
+ Vec3i &currPos = positions[l];
+
+ const int i0 = clamp(currPos.x, 0, gridSize.x - 1); // note! gridsize already has -1 from call
+ const int j0 = clamp(
+ currPos.y, 0, gridSize.y - 1); // but we need a clamp to -2 for the +1 offset below
+ const int k0 = clamp(currPos.z, 0, (orig.is3D() ? (gridSize.z - 1) : 0));
+ const int i1 = i0 + 1, j1 = j0 + 1, k1 = (orig.is3D() ? (k0 + 1) : k0);
+
+ // find min/max around source pos
+ getMinMax(minv, maxv, orig(i0, j0, k0)[c]);
+ getMinMax(minv, maxv, orig(i1, j0, k0)[c]);
+ getMinMax(minv, maxv, orig(i0, j1, k0)[c]);
+ getMinMax(minv, maxv, orig(i1, j1, k0)[c]);
+
+ if (orig.is3D()) {
+ getMinMax(minv, maxv, orig(i0, j0, k1)[c]);
+ getMinMax(minv, maxv, orig(i1, j0, k1)[c]);
+ getMinMax(minv, maxv, orig(i0, j1, k1)[c]);
+ getMinMax(minv, maxv, orig(i1, j1, k1)[c]);
+ }
+ }
+
+ if (clampMode == 1) {
+ dst = clamp(dst, minv, maxv); // hard clamp
+ }
+ else {
+ if (cmpMinMax(minv, maxv, dst))
+ dst = fwd; // recommended in paper, "softer"
+ }
+ return dst;
+}
+
+#undef checkFlag
+
+//! Kernel: Clamp obtained value to min/max in source area, and reset values that point out of grid
+//! or into boundaries
+// (note - MAC grids are handled below)
+
+template<class T> struct MacCormackClamp : public KernelBase {
+ MacCormackClamp(const FlagGrid &flags,
+ const MACGrid &vel,
+ Grid<T> &dst,
+ const Grid<T> &orig,
+ const Grid<T> &fwd,
+ Real dt,
+ const int clampMode)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ vel(vel),
+ dst(dst),
+ orig(orig),
+ fwd(fwd),
+ dt(dt),
+ clampMode(clampMode)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ Grid<T> &dst,
+ const Grid<T> &orig,
+ const Grid<T> &fwd,
+ Real dt,
+ const int clampMode) const
+ {
+ T dval = dst(i, j, k);
+ Vec3i gridUpper = flags.getSize() - 1;
+
+ dval = doClampComponent<T>(gridUpper,
+ flags,
+ dval,
+ orig,
+ fwd(i, j, k),
+ Vec3(i, j, k),
+ vel.getCentered(i, j, k) * dt,
+ clampMode);
+
+ if (1 && clampMode == 1) {
+ // lookup forward/backward , round to closest NB
+ Vec3i posFwd = toVec3i(Vec3(i, j, k) + Vec3(0.5, 0.5, 0.5) - vel.getCentered(i, j, k) * dt);
+ Vec3i posBwd = toVec3i(Vec3(i, j, k) + Vec3(0.5, 0.5, 0.5) + vel.getCentered(i, j, k) * dt);
+
+ // test if lookups point out of grid or into obstacle (note doClampComponent already checks
+ // sides, below is needed for valid flags access)
+ if (posFwd.x < 0 || posFwd.y < 0 || posFwd.z < 0 || posBwd.x < 0 || posBwd.y < 0 ||
+ posBwd.z < 0 || posFwd.x > gridUpper.x || posFwd.y > gridUpper.y ||
+ ((posFwd.z > gridUpper.z) && flags.is3D()) || posBwd.x > gridUpper.x ||
+ posBwd.y > gridUpper.y || ((posBwd.z > gridUpper.z) && flags.is3D()) ||
+ flags.isObstacle(posFwd) || flags.isObstacle(posBwd)) {
+ dval = fwd(i, j, k);
+ }
+ }
+ // clampMode 2 handles flags in doClampComponent call
+
+ dst(i, j, k) = dval;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline Grid<T> &getArg2()
+ {
+ return dst;
+ }
+ typedef Grid<T> type2;
+ inline const Grid<T> &getArg3()
+ {
+ return orig;
+ }
+ typedef Grid<T> type3;
+ inline const Grid<T> &getArg4()
+ {
+ return fwd;
+ }
+ typedef Grid<T> type4;
+ inline Real &getArg5()
+ {
+ return dt;
+ }
+ typedef Real type5;
+ inline const int &getArg6()
+ {
+ return clampMode;
+ }
+ typedef int type6;
+ void runMessage()
+ {
+ debMsg("Executing kernel MacCormackClamp ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, dst, orig, fwd, dt, clampMode);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, dst, orig, fwd, dt, clampMode);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ Grid<T> &dst;
+ const Grid<T> &orig;
+ const Grid<T> &fwd;
+ Real dt;
+ const int clampMode;
+};
+
+//! Kernel: same as MacCormackClamp above, but specialized version for MAC grids
+
+struct MacCormackClampMAC : public KernelBase {
+ MacCormackClampMAC(const FlagGrid &flags,
+ const MACGrid &vel,
+ MACGrid &dst,
+ const MACGrid &orig,
+ const MACGrid &fwd,
+ Real dt,
+ const int clampMode)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ vel(vel),
+ dst(dst),
+ orig(orig),
+ fwd(fwd),
+ dt(dt),
+ clampMode(clampMode)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ MACGrid &dst,
+ const MACGrid &orig,
+ const MACGrid &fwd,
+ Real dt,
+ const int clampMode) const
+ {
+ Vec3 pos(i, j, k);
+ Vec3 dval = dst(i, j, k);
+ Vec3 dfwd = fwd(i, j, k);
+ Vec3i gridUpper = flags.getSize() - 1;
+
+ dval.x = doClampComponentMAC<0>(
+ flags, gridUpper, dval.x, orig, dfwd.x, pos, vel.getAtMACX(i, j, k) * dt, clampMode);
+ dval.y = doClampComponentMAC<1>(
+ flags, gridUpper, dval.y, orig, dfwd.y, pos, vel.getAtMACY(i, j, k) * dt, clampMode);
+ if (flags.is3D())
+ dval.z = doClampComponentMAC<2>(
+ flags, gridUpper, dval.z, orig, dfwd.z, pos, vel.getAtMACZ(i, j, k) * dt, clampMode);
+
+ // note - the MAC version currently does not check whether source points were inside an
+ // obstacle! (unlike centered version) this would need to be done for each face separately to
+ // stay symmetric...
+
+ dst(i, j, k) = dval;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline MACGrid &getArg2()
+ {
+ return dst;
+ }
+ typedef MACGrid type2;
+ inline const MACGrid &getArg3()
+ {
+ return orig;
+ }
+ typedef MACGrid type3;
+ inline const MACGrid &getArg4()
+ {
+ return fwd;
+ }
+ typedef MACGrid type4;
+ inline Real &getArg5()
+ {
+ return dt;
+ }
+ typedef Real type5;
+ inline const int &getArg6()
+ {
+ return clampMode;
+ }
+ typedef int type6;
+ void runMessage()
+ {
+ debMsg("Executing kernel MacCormackClampMAC ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, dst, orig, fwd, dt, clampMode);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, dst, orig, fwd, dt, clampMode);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ MACGrid &dst;
+ const MACGrid &orig;
+ const MACGrid &fwd;
+ Real dt;
+ const int clampMode;
+};
+
+//! template function for performing SL advection
+//! (Note boundary width only needed for specialization for MAC grids below)
+template<class GridType>
+void fnAdvectSemiLagrange(FluidSolver *parent,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ GridType &orig,
+ int order,
+ Real strength,
+ int orderSpace,
+ int clampMode,
+ int orderTrace)
+{
+ typedef typename GridType::BASETYPE T;
+
+ Real dt = parent->getDt();
+ bool levelset = orig.getType() & GridBase::TypeLevelset;
+
+ // forward step
+ GridType fwd(parent);
+ SemiLagrange<T>(flags, vel, fwd, orig, dt, levelset, orderSpace, orderTrace);
+
+ if (order == 1) {
+ orig.swap(fwd);
+ }
+ else if (order == 2) { // MacCormack
+ GridType bwd(parent);
+ GridType newGrid(parent);
+
+ // bwd <- backwards step
+ SemiLagrange<T>(flags, vel, bwd, fwd, -dt, levelset, orderSpace, orderTrace);
+
+ // newGrid <- compute correction
+ MacCormackCorrect<T>(flags, newGrid, orig, fwd, bwd, strength, levelset);
+
+ // clamp values
+ MacCormackClamp<T>(flags, vel, newGrid, orig, fwd, dt, clampMode);
+
+ orig.swap(newGrid);
+ }
+}
+
+// outflow functions
+
+//! calculate local propagation velocity for cell (i,j,k)
+Vec3 getBulkVel(const FlagGrid &flags, const MACGrid &vel, int i, int j, int k)
+{
+ Vec3 avg = Vec3(0.);
+ int count = 0;
+ int size = 1; // stencil size
+ int nmax = (flags.is3D() ? size : 0);
+ // average the neighboring fluid / outflow cell's velocity
+ for (int n = -nmax; n <= nmax; n++) {
+ for (int m = -size; m <= size; m++) {
+ for (int l = -size; l <= size; l++) {
+ if (flags.isInBounds(Vec3i(i + l, j + m, k + n)) &&
+ (flags.isFluid(i + l, j + m, k + n) || flags.isOutflow(i + l, j + m, k + n))) {
+ avg += vel(i + l, j + m, k + n);
+ count++;
+ }
+ }
+ }
+ }
+ return count > 0 ? avg / count : avg;
+}
+
+//! extrapolate normal velocity components into outflow cell
+struct extrapolateVelConvectiveBC : public KernelBase {
+ extrapolateVelConvectiveBC(const FlagGrid &flags,
+ const MACGrid &vel,
+ MACGrid &velDst,
+ const MACGrid &velPrev,
+ Real timeStep)
+ : KernelBase(&flags, 0),
+ flags(flags),
+ vel(vel),
+ velDst(velDst),
+ velPrev(velPrev),
+ timeStep(timeStep)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ MACGrid &velDst,
+ const MACGrid &velPrev,
+ Real timeStep) const
+ {
+ if (flags.isOutflow(i, j, k)) {
+ Vec3 bulkVel = getBulkVel(flags, vel, i, j, k);
+ int dim = flags.is3D() ? 3 : 2;
+ const Vec3i cur = Vec3i(i, j, k);
+ Vec3i low, up, flLow, flUp;
+ int cnt = 0;
+ // iterate over each velocity component x, y, z
+ for (int c = 0; c < dim; c++) {
+ low = up = flLow = flUp = cur;
+ Real factor = timeStep *
+ max((Real)1.0, bulkVel[c]); // prevent the extrapolated velocity from
+ // exploding when bulk velocity below 1
+ low[c] = flLow[c] = cur[c] - 1;
+ up[c] = flUp[c] = cur[c] + 1;
+ // iterate over bWidth to allow for extrapolation into more distant outflow cells;
+ // hard-coded extrapolation distance of two cells
+ for (int d = 0; d < 2; d++) {
+ bool extrapolateFromLower = flags.isInBounds(flLow) && flags.isFluid(flLow);
+ bool extrapolateFromUpper = flags.isInBounds(flUp) && flags.isFluid(flUp);
+ if (extrapolateFromLower || extrapolateFromUpper) {
+ if (extrapolateFromLower) {
+ velDst(i, j, k) += ((vel(i, j, k) - velPrev(i, j, k)) / factor) + vel(low);
+ cnt++;
+ }
+ if (extrapolateFromUpper) {
+ // check for cells equally far away from two fluid cells -> average value between
+ // both sides
+ velDst(i, j, k) += ((vel(i, j, k) - velPrev(i, j, k)) / factor) + vel(up);
+ cnt++;
+ }
+ break;
+ }
+ flLow[c]--;
+ flUp[c]++;
+ }
+ }
+ if (cnt > 0)
+ velDst(i, j, k) /= cnt;
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline MACGrid &getArg2()
+ {
+ return velDst;
+ }
+ typedef MACGrid type2;
+ inline const MACGrid &getArg3()
+ {
+ return velPrev;
+ }
+ typedef MACGrid type3;
+ inline Real &getArg4()
+ {
+ return timeStep;
+ }
+ typedef Real type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel extrapolateVelConvectiveBC ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, vel, velDst, velPrev, timeStep);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, vel, velDst, velPrev, timeStep);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ MACGrid &velDst;
+ const MACGrid &velPrev;
+ Real timeStep;
+};
+
+//! copy extrapolated velocity components
+struct copyChangedVels : public KernelBase {
+ copyChangedVels(const FlagGrid &flags, const MACGrid &velDst, MACGrid &vel)
+ : KernelBase(&flags, 0), flags(flags), velDst(velDst), vel(vel)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, const FlagGrid &flags, const MACGrid &velDst, MACGrid &vel) const
+ {
+ if (flags.isOutflow(i, j, k))
+ vel(i, j, k) = velDst(i, j, k);
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return velDst;
+ }
+ typedef MACGrid type1;
+ inline MACGrid &getArg2()
+ {
+ return vel;
+ }
+ typedef MACGrid type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel copyChangedVels ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, velDst, vel);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, velDst, vel);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ const MACGrid &velDst;
+ MACGrid &vel;
+};
+
+//! extrapolate normal velocity components into open boundary cells (marked as outflow cells)
+void applyOutflowBC(const FlagGrid &flags, MACGrid &vel, const MACGrid &velPrev, double timeStep)
+{
+ MACGrid velDst(vel.getParent()); // do not overwrite vel while it is read
+ extrapolateVelConvectiveBC(flags, vel, velDst, velPrev, max(1.0, timeStep * 4));
+ copyChangedVels(flags, velDst, vel);
+}
+
+// advection helpers
+
+//! prevent parts of the surface getting "stuck" in obstacle regions
+struct knResetPhiInObs : public KernelBase {
+ knResetPhiInObs(const FlagGrid &flags, Grid<Real> &sdf)
+ : KernelBase(&flags, 0), flags(flags), sdf(sdf)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const FlagGrid &flags, Grid<Real> &sdf) const
+ {
+ if (flags.isObstacle(i, j, k) && (sdf(i, j, k) < 0.)) {
+ sdf(i, j, k) = 0.1;
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return sdf;
+ }
+ typedef Grid<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knResetPhiInObs ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, sdf);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, sdf);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &sdf;
+};
+void resetPhiInObs(const FlagGrid &flags, Grid<Real> &sdf)
+{
+ knResetPhiInObs(flags, sdf);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "resetPhiInObs", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &sdf = *_args.getPtr<Grid<Real>>("sdf", 1, &_lock);
+ _retval = getPyNone();
+ resetPhiInObs(flags, sdf);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "resetPhiInObs", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("resetPhiInObs", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_resetPhiInObs("", "resetPhiInObs", _W_0);
+extern "C" {
+void PbRegister_resetPhiInObs()
+{
+ KEEP_UNUSED(_RP_resetPhiInObs);
+}
+}
+
+// advection main calls
+
+//! template function for performing SL advection: specialized version for MAC grids
+template<>
+void fnAdvectSemiLagrange<MACGrid>(FluidSolver *parent,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ MACGrid &orig,
+ int order,
+ Real strength,
+ int orderSpace,
+ int clampMode,
+ int orderTrace)
+{
+ Real dt = parent->getDt();
+
+ // forward step
+ MACGrid fwd(parent);
+ SemiLagrangeMAC(flags, vel, fwd, orig, dt, orderSpace, orderTrace);
+
+ if (orderSpace != 1) {
+ debMsg("Warning higher order for MAC grids not yet implemented...", 1);
+ }
+
+ if (order == 1) {
+ applyOutflowBC(flags, fwd, orig, dt);
+ orig.swap(fwd);
+ }
+ else if (order == 2) { // MacCormack
+ MACGrid bwd(parent);
+ MACGrid newGrid(parent);
+
+ // bwd <- backwards step
+ SemiLagrangeMAC(flags, vel, bwd, fwd, -dt, orderSpace, orderTrace);
+
+ // newGrid <- compute correction
+ MacCormackCorrectMAC<Vec3>(flags, newGrid, orig, fwd, bwd, strength, false, true);
+
+ // clamp values
+ MacCormackClampMAC(flags, vel, newGrid, orig, fwd, dt, clampMode);
+
+ applyOutflowBC(flags, newGrid, orig, dt);
+ orig.swap(newGrid);
+ }
+}
+
+//! Perform semi-lagrangian advection of target Real- or Vec3 grid
+//! Open boundary handling needs information about width of border
+//! Clamping modes: 1 regular clamp leading to more overshoot and sharper results, 2 revert to 1st
+//! order slightly smoother less overshoot (enable when 1 gives artifacts)
+
+void advectSemiLagrange(const FlagGrid *flags,
+ const MACGrid *vel,
+ GridBase *grid,
+ int order = 1,
+ Real strength = 1.0,
+ int orderSpace = 1,
+ bool openBounds = false,
+ int boundaryWidth = -1,
+ int clampMode = 2,
+ int orderTrace = 1)
+{
+ assertMsg(order == 1 || order == 2,
+ "AdvectSemiLagrange: Only order 1 (regular SL) and 2 (MacCormack) supported");
+ if ((boundaryWidth != -1) || (openBounds)) {
+ debMsg(
+ "Warning: boundaryWidth and openBounds parameters in AdvectSemiLagrange plugin are "
+ "deprecated (and have no more effect), please remove.",
+ 0);
+ }
+
+ // determine type of grid
+ if (grid->getType() & GridBase::TypeReal) {
+ fnAdvectSemiLagrange<Grid<Real>>(flags->getParent(),
+ *flags,
+ *vel,
+ *((Grid<Real> *)grid),
+ order,
+ strength,
+ orderSpace,
+ clampMode,
+ orderTrace);
+ }
+ else if (grid->getType() & GridBase::TypeMAC) {
+ fnAdvectSemiLagrange<MACGrid>(flags->getParent(),
+ *flags,
+ *vel,
+ *((MACGrid *)grid),
+ order,
+ strength,
+ orderSpace,
+ clampMode,
+ orderTrace);
+ }
+ else if (grid->getType() & GridBase::TypeVec3) {
+ fnAdvectSemiLagrange<Grid<Vec3>>(flags->getParent(),
+ *flags,
+ *vel,
+ *((Grid<Vec3> *)grid),
+ order,
+ strength,
+ orderSpace,
+ clampMode,
+ orderTrace);
+ }
+ else
+ errMsg("AdvectSemiLagrange: Grid Type is not supported (only Real, Vec3, MAC, Levelset)");
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "advectSemiLagrange", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid *flags = _args.getPtr<FlagGrid>("flags", 0, &_lock);
+ const MACGrid *vel = _args.getPtr<MACGrid>("vel", 1, &_lock);
+ GridBase *grid = _args.getPtr<GridBase>("grid", 2, &_lock);
+ int order = _args.getOpt<int>("order", 3, 1, &_lock);
+ Real strength = _args.getOpt<Real>("strength", 4, 1.0, &_lock);
+ int orderSpace = _args.getOpt<int>("orderSpace", 5, 1, &_lock);
+ bool openBounds = _args.getOpt<bool>("openBounds", 6, false, &_lock);
+ int boundaryWidth = _args.getOpt<int>("boundaryWidth", 7, -1, &_lock);
+ int clampMode = _args.getOpt<int>("clampMode", 8, 2, &_lock);
+ int orderTrace = _args.getOpt<int>("orderTrace", 9, 1, &_lock);
+ _retval = getPyNone();
+ advectSemiLagrange(flags,
+ vel,
+ grid,
+ order,
+ strength,
+ orderSpace,
+ openBounds,
+ boundaryWidth,
+ clampMode,
+ orderTrace);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "advectSemiLagrange", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("advectSemiLagrange", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_advectSemiLagrange("", "advectSemiLagrange", _W_1);
+extern "C" {
+void PbRegister_advectSemiLagrange()
+{
+ KEEP_UNUSED(_RP_advectSemiLagrange);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/apic.cpp b/extern/mantaflow/preprocessed/plugin/apic.cpp
new file mode 100644
index 00000000000..6ff893014c9
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/apic.cpp
@@ -0,0 +1,496 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+// ----------------------------------------------------------------------------
+//
+// MantaFlow fluid solver framework
+// Copyright 2016-2017 Kiwon Um, Nils Thuerey
+//
+// This program is free software, distributed under the terms of the
+// Apache License, Version 2.0
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Affine Particle-In-Cell
+//
+// ----------------------------------------------------------------------------
+
+#include "particle.h"
+#include "grid.h"
+
+namespace Manta {
+
+struct knApicMapLinearVec3ToMACGrid : public KernelBase {
+ knApicMapLinearVec3ToMACGrid(const BasicParticleSystem &p,
+ MACGrid &mg,
+ MACGrid &vg,
+ const ParticleDataImpl<Vec3> &vp,
+ const ParticleDataImpl<Vec3> &cpx,
+ const ParticleDataImpl<Vec3> &cpy,
+ const ParticleDataImpl<Vec3> &cpz,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(p.size()),
+ p(p),
+ mg(mg),
+ vg(vg),
+ vp(vp),
+ cpx(cpx),
+ cpy(cpy),
+ cpz(cpz),
+ ptype(ptype),
+ exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystem &p,
+ MACGrid &mg,
+ MACGrid &vg,
+ const ParticleDataImpl<Vec3> &vp,
+ const ParticleDataImpl<Vec3> &cpx,
+ const ParticleDataImpl<Vec3> &cpy,
+ const ParticleDataImpl<Vec3> &cpz,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ {
+ if (!p.isActive(idx) || (ptype && ((*ptype)[idx] & exclude)))
+ return;
+ const IndexInt dX[2] = {0, vg.getStrideX()};
+ const IndexInt dY[2] = {0, vg.getStrideY()};
+ const IndexInt dZ[2] = {0, vg.getStrideZ()};
+
+ const Vec3 &pos = p[idx].pos, &vel = vp[idx];
+ const IndexInt fi = static_cast<IndexInt>(pos.x), fj = static_cast<IndexInt>(pos.y),
+ fk = static_cast<IndexInt>(pos.z);
+ const IndexInt ci = static_cast<IndexInt>(pos.x - 0.5),
+ cj = static_cast<IndexInt>(pos.y - 0.5),
+ ck = static_cast<IndexInt>(pos.z - 0.5);
+ const Real wfi = clamp(pos.x - fi, Real(0), Real(1)),
+ wfj = clamp(pos.y - fj, Real(0), Real(1)),
+ wfk = clamp(pos.z - fk, Real(0), Real(1));
+ const Real wci = clamp(Real(pos.x - ci - 0.5), Real(0), Real(1)),
+ wcj = clamp(Real(pos.y - cj - 0.5), Real(0), Real(1)),
+ wck = clamp(Real(pos.z - ck - 0.5), Real(0), Real(1));
+ // TODO: check index for safety
+ { // u-face
+ const IndexInt gidx = fi * dX[1] + cj * dY[1] + ck * dZ[1];
+ const Vec3 gpos(fi, cj + 0.5, ck + 0.5);
+ const Real wi[2] = {Real(1) - wfi, wfi};
+ const Real wj[2] = {Real(1) - wcj, wcj};
+ const Real wk[2] = {Real(1) - wck, wck};
+ for (int i = 0; i < 2; ++i)
+ for (int j = 0; j < 2; ++j)
+ for (int k = 0; k < 2; ++k) {
+ const Real w = wi[i] * wj[j] * wk[k];
+ mg[gidx + dX[i] + dY[j] + dZ[k]].x += w;
+ vg[gidx + dX[i] + dY[j] + dZ[k]].x += w * vel.x;
+ vg[gidx + dX[i] + dY[j] + dZ[k]].x += w * dot(cpx[idx], gpos + Vec3(i, j, k) - pos);
+ }
+ }
+ { // v-face
+ const IndexInt gidx = ci * dX[1] + fj * dY[1] + ck * dZ[1];
+ const Vec3 gpos(ci + 0.5, fj, ck + 0.5);
+ const Real wi[2] = {Real(1) - wci, wci};
+ const Real wj[2] = {Real(1) - wfj, wfj};
+ const Real wk[2] = {Real(1) - wck, wck};
+ for (int i = 0; i < 2; ++i)
+ for (int j = 0; j < 2; ++j)
+ for (int k = 0; k < 2; ++k) {
+ const Real w = wi[i] * wj[j] * wk[k];
+ mg[gidx + dX[i] + dY[j] + dZ[k]].y += w;
+ vg[gidx + dX[i] + dY[j] + dZ[k]].y += w * vel.y;
+ vg[gidx + dX[i] + dY[j] + dZ[k]].y += w * dot(cpy[idx], gpos + Vec3(i, j, k) - pos);
+ }
+ }
+ if (!vg.is3D())
+ return;
+ { // w-face
+ const IndexInt gidx = ci * dX[1] + cj * dY[1] + fk * dZ[1];
+ const Vec3 gpos(ci + 0.5, cj + 0.5, fk);
+ const Real wi[2] = {Real(1) - wci, wci};
+ const Real wj[2] = {Real(1) - wcj, wcj};
+ const Real wk[2] = {Real(1) - wfk, wfk};
+ for (int i = 0; i < 2; ++i)
+ for (int j = 0; j < 2; ++j)
+ for (int k = 0; k < 2; ++k) {
+ const Real w = wi[i] * wj[j] * wk[k];
+ mg[gidx + dX[i] + dY[j] + dZ[k]].z += w;
+ vg[gidx + dX[i] + dY[j] + dZ[k]].z += w * vel.z;
+ vg[gidx + dX[i] + dY[j] + dZ[k]].z += w * dot(cpz[idx], gpos + Vec3(i, j, k) - pos);
+ }
+ }
+ }
+ inline const BasicParticleSystem &getArg0()
+ {
+ return p;
+ }
+ typedef BasicParticleSystem type0;
+ inline MACGrid &getArg1()
+ {
+ return mg;
+ }
+ typedef MACGrid type1;
+ inline MACGrid &getArg2()
+ {
+ return vg;
+ }
+ typedef MACGrid type2;
+ inline const ParticleDataImpl<Vec3> &getArg3()
+ {
+ return vp;
+ }
+ typedef ParticleDataImpl<Vec3> type3;
+ inline const ParticleDataImpl<Vec3> &getArg4()
+ {
+ return cpx;
+ }
+ typedef ParticleDataImpl<Vec3> type4;
+ inline const ParticleDataImpl<Vec3> &getArg5()
+ {
+ return cpy;
+ }
+ typedef ParticleDataImpl<Vec3> type5;
+ inline const ParticleDataImpl<Vec3> &getArg6()
+ {
+ return cpz;
+ }
+ typedef ParticleDataImpl<Vec3> type6;
+ inline const ParticleDataImpl<int> *getArg7()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type7;
+ inline const int &getArg8()
+ {
+ return exclude;
+ }
+ typedef int type8;
+ void runMessage()
+ {
+ debMsg("Executing kernel knApicMapLinearVec3ToMACGrid ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void run()
+ {
+ const IndexInt _sz = size;
+ for (IndexInt i = 0; i < _sz; i++)
+ op(i, p, mg, vg, vp, cpx, cpy, cpz, ptype, exclude);
+ }
+ const BasicParticleSystem &p;
+ MACGrid &mg;
+ MACGrid &vg;
+ const ParticleDataImpl<Vec3> &vp;
+ const ParticleDataImpl<Vec3> &cpx;
+ const ParticleDataImpl<Vec3> &cpy;
+ const ParticleDataImpl<Vec3> &cpz;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+
+void apicMapPartsToMAC(const FlagGrid &flags,
+ MACGrid &vel,
+ const BasicParticleSystem &parts,
+ const ParticleDataImpl<Vec3> &partVel,
+ const ParticleDataImpl<Vec3> &cpx,
+ const ParticleDataImpl<Vec3> &cpy,
+ const ParticleDataImpl<Vec3> &cpz,
+ MACGrid *mass = NULL,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0)
+{
+ // affine map
+ // let's assume that the particle mass is constant, 1.0
+ const bool freeMass = !mass;
+ if (!mass)
+ mass = new MACGrid(flags.getParent());
+ else
+ mass->clear();
+
+ vel.clear();
+ knApicMapLinearVec3ToMACGrid(parts, *mass, vel, partVel, cpx, cpy, cpz, ptype, exclude);
+ mass->stomp(VECTOR_EPSILON);
+ vel.safeDivide(*mass);
+
+ if (freeMass)
+ delete mass;
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "apicMapPartsToMAC", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 2, &_lock);
+ const ParticleDataImpl<Vec3> &partVel = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "partVel", 3, &_lock);
+ const ParticleDataImpl<Vec3> &cpx = *_args.getPtr<ParticleDataImpl<Vec3>>("cpx", 4, &_lock);
+ const ParticleDataImpl<Vec3> &cpy = *_args.getPtr<ParticleDataImpl<Vec3>>("cpy", 5, &_lock);
+ const ParticleDataImpl<Vec3> &cpz = *_args.getPtr<ParticleDataImpl<Vec3>>("cpz", 6, &_lock);
+ MACGrid *mass = _args.getPtrOpt<MACGrid>("mass", 7, NULL, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 8, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 9, 0, &_lock);
+ _retval = getPyNone();
+ apicMapPartsToMAC(flags, vel, parts, partVel, cpx, cpy, cpz, mass, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "apicMapPartsToMAC", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("apicMapPartsToMAC", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_apicMapPartsToMAC("", "apicMapPartsToMAC", _W_0);
+extern "C" {
+void PbRegister_apicMapPartsToMAC()
+{
+ KEEP_UNUSED(_RP_apicMapPartsToMAC);
+}
+}
+
+struct knApicMapLinearMACGridToVec3 : public KernelBase {
+ knApicMapLinearMACGridToVec3(ParticleDataImpl<Vec3> &vp,
+ ParticleDataImpl<Vec3> &cpx,
+ ParticleDataImpl<Vec3> &cpy,
+ ParticleDataImpl<Vec3> &cpz,
+ const BasicParticleSystem &p,
+ const MACGrid &vg,
+ const FlagGrid &flags,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(vp.size()),
+ vp(vp),
+ cpx(cpx),
+ cpy(cpy),
+ cpz(cpz),
+ p(p),
+ vg(vg),
+ flags(flags),
+ ptype(ptype),
+ exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ ParticleDataImpl<Vec3> &vp,
+ ParticleDataImpl<Vec3> &cpx,
+ ParticleDataImpl<Vec3> &cpy,
+ ParticleDataImpl<Vec3> &cpz,
+ const BasicParticleSystem &p,
+ const MACGrid &vg,
+ const FlagGrid &flags,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude) const
+ {
+ if (!p.isActive(idx) || (ptype && ((*ptype)[idx] & exclude)))
+ return;
+
+ vp[idx] = cpx[idx] = cpy[idx] = cpz[idx] = Vec3(Real(0));
+ const IndexInt dX[2] = {0, vg.getStrideX()}, dY[2] = {0, vg.getStrideY()},
+ dZ[2] = {0, vg.getStrideZ()};
+ const Real gw[2] = {-Real(1), Real(1)};
+
+ const Vec3 &pos = p[idx].pos;
+ const IndexInt fi = static_cast<IndexInt>(pos.x), fj = static_cast<IndexInt>(pos.y),
+ fk = static_cast<IndexInt>(pos.z);
+ const IndexInt ci = static_cast<IndexInt>(pos.x - 0.5),
+ cj = static_cast<IndexInt>(pos.y - 0.5),
+ ck = static_cast<IndexInt>(pos.z - 0.5);
+ const Real wfi = clamp(pos.x - fi, Real(0), Real(1)),
+ wfj = clamp(pos.y - fj, Real(0), Real(1)),
+ wfk = clamp(pos.z - fk, Real(0), Real(1));
+ const Real wci = clamp(Real(pos.x - ci - 0.5), Real(0), Real(1)),
+ wcj = clamp(Real(pos.y - cj - 0.5), Real(0), Real(1)),
+ wck = clamp(Real(pos.z - ck - 0.5), Real(0), Real(1));
+ // TODO: check index for safety
+ { // u
+ const IndexInt gidx = fi * dX[1] + cj * dY[1] + ck * dZ[1];
+ const Real wx[2] = {Real(1) - wfi, wfi};
+ const Real wy[2] = {Real(1) - wcj, wcj};
+ const Real wz[2] = {Real(1) - wck, wck};
+ for (int i = 0; i < 2; ++i)
+ for (int j = 0; j < 2; ++j)
+ for (int k = 0; k < 2; ++k) {
+ const IndexInt vidx = gidx + dX[i] + dY[j] + dZ[k];
+ Real vgx = vg[vidx].x;
+ vp[idx].x += wx[i] * wy[j] * wz[k] * vgx;
+ cpx[idx].x += gw[i] * wy[j] * wz[k] * vgx;
+ cpx[idx].y += wx[i] * gw[j] * wz[k] * vgx;
+ cpx[idx].z += wx[i] * wy[j] * gw[k] * vgx;
+ }
+ }
+ { // v
+ const IndexInt gidx = ci * dX[1] + fj * dY[1] + ck * dZ[1];
+ const Real wx[2] = {Real(1) - wci, wci};
+ const Real wy[2] = {Real(1) - wfj, wfj};
+ const Real wz[2] = {Real(1) - wck, wck};
+ for (int i = 0; i < 2; ++i)
+ for (int j = 0; j < 2; ++j)
+ for (int k = 0; k < 2; ++k) {
+ const IndexInt vidx = gidx + dX[i] + dY[j] + dZ[k];
+ Real vgy = vg[vidx].y;
+ vp[idx].y += wx[i] * wy[j] * wz[k] * vgy;
+ cpy[idx].x += gw[i] * wy[j] * wz[k] * vgy;
+ cpy[idx].y += wx[i] * gw[j] * wz[k] * vgy;
+ cpy[idx].z += wx[i] * wy[j] * gw[k] * vgy;
+ }
+ }
+ if (!vg.is3D())
+ return;
+ { // w
+ const IndexInt gidx = ci * dX[1] + cj * dY[1] + fk * dZ[1];
+ const Real wx[2] = {Real(1) - wci, wci};
+ const Real wy[2] = {Real(1) - wcj, wcj};
+ const Real wz[2] = {Real(1) - wfk, wfk};
+ for (int i = 0; i < 2; ++i)
+ for (int j = 0; j < 2; ++j)
+ for (int k = 0; k < 2; ++k) {
+ const IndexInt vidx = gidx + dX[i] + dY[j] + dZ[k];
+ Real vgz = vg[vidx].z;
+ vp[idx].z += wx[i] * wy[j] * wz[k] * vgz;
+ cpz[idx].x += gw[i] * wy[j] * wz[k] * vgz;
+ cpz[idx].y += wx[i] * gw[j] * wz[k] * vgz;
+ cpz[idx].z += wx[i] * wy[j] * gw[k] * vgz;
+ }
+ }
+ }
+ inline ParticleDataImpl<Vec3> &getArg0()
+ {
+ return vp;
+ }
+ typedef ParticleDataImpl<Vec3> type0;
+ inline ParticleDataImpl<Vec3> &getArg1()
+ {
+ return cpx;
+ }
+ typedef ParticleDataImpl<Vec3> type1;
+ inline ParticleDataImpl<Vec3> &getArg2()
+ {
+ return cpy;
+ }
+ typedef ParticleDataImpl<Vec3> type2;
+ inline ParticleDataImpl<Vec3> &getArg3()
+ {
+ return cpz;
+ }
+ typedef ParticleDataImpl<Vec3> type3;
+ inline const BasicParticleSystem &getArg4()
+ {
+ return p;
+ }
+ typedef BasicParticleSystem type4;
+ inline const MACGrid &getArg5()
+ {
+ return vg;
+ }
+ typedef MACGrid type5;
+ inline const FlagGrid &getArg6()
+ {
+ return flags;
+ }
+ typedef FlagGrid type6;
+ inline const ParticleDataImpl<int> *getArg7()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type7;
+ inline const int &getArg8()
+ {
+ return exclude;
+ }
+ typedef int type8;
+ void runMessage()
+ {
+ debMsg("Executing kernel knApicMapLinearMACGridToVec3 ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, vp, cpx, cpy, cpz, p, vg, flags, ptype, exclude);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<Vec3> &vp;
+ ParticleDataImpl<Vec3> &cpx;
+ ParticleDataImpl<Vec3> &cpy;
+ ParticleDataImpl<Vec3> &cpz;
+ const BasicParticleSystem &p;
+ const MACGrid &vg;
+ const FlagGrid &flags;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+
+void apicMapMACGridToParts(ParticleDataImpl<Vec3> &partVel,
+ ParticleDataImpl<Vec3> &cpx,
+ ParticleDataImpl<Vec3> &cpy,
+ ParticleDataImpl<Vec3> &cpz,
+ const BasicParticleSystem &parts,
+ const MACGrid &vel,
+ const FlagGrid &flags,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0)
+{
+ knApicMapLinearMACGridToVec3(partVel, cpx, cpy, cpz, parts, vel, flags, ptype, exclude);
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "apicMapMACGridToParts", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ ParticleDataImpl<Vec3> &partVel = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "partVel", 0, &_lock);
+ ParticleDataImpl<Vec3> &cpx = *_args.getPtr<ParticleDataImpl<Vec3>>("cpx", 1, &_lock);
+ ParticleDataImpl<Vec3> &cpy = *_args.getPtr<ParticleDataImpl<Vec3>>("cpy", 2, &_lock);
+ ParticleDataImpl<Vec3> &cpz = *_args.getPtr<ParticleDataImpl<Vec3>>("cpz", 3, &_lock);
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 4, &_lock);
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 5, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 6, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 7, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 8, 0, &_lock);
+ _retval = getPyNone();
+ apicMapMACGridToParts(partVel, cpx, cpy, cpz, parts, vel, flags, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "apicMapMACGridToParts", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("apicMapMACGridToParts", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_apicMapMACGridToParts("", "apicMapMACGridToParts", _W_1);
+extern "C" {
+void PbRegister_apicMapMACGridToParts()
+{
+ KEEP_UNUSED(_RP_apicMapMACGridToParts);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/extforces.cpp b/extern/mantaflow/preprocessed/plugin/extforces.cpp
new file mode 100644
index 00000000000..3e1e5733257
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/extforces.cpp
@@ -0,0 +1,1559 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * GNU General Public License (GPL)
+ * http://www.gnu.org/licenses
+ *
+ * Set boundary conditions, gravity
+ *
+ ******************************************************************************/
+
+#include "vectorbase.h"
+#include "grid.h"
+#include "commonkernels.h"
+#include "particle.h"
+
+using namespace std;
+
+namespace Manta {
+
+//! add constant force between fl/fl and fl/em cells
+struct KnApplyForceField : public KernelBase {
+ KnApplyForceField(const FlagGrid &flags,
+ MACGrid &vel,
+ const Grid<Vec3> &force,
+ const Grid<Real> *include,
+ bool additive,
+ bool isMAC)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ vel(vel),
+ force(force),
+ include(include),
+ additive(additive),
+ isMAC(isMAC)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ MACGrid &vel,
+ const Grid<Vec3> &force,
+ const Grid<Real> *include,
+ bool additive,
+ bool isMAC) const
+ {
+ bool curFluid = flags.isFluid(i, j, k);
+ bool curEmpty = flags.isEmpty(i, j, k);
+ if (!curFluid && !curEmpty)
+ return;
+ if (include && ((*include)(i, j, k) > 0.))
+ return;
+
+ Real forceX = (isMAC) ? force(i, j, k).x : 0.5 * (force(i - 1, j, k).x + force(i, j, k).x);
+ Real forceY = (isMAC) ? force(i, j, k).y : 0.5 * (force(i, j - 1, k).y + force(i, j, k).y);
+
+ Real forceZ = 0.;
+ if (vel.is3D())
+ forceZ = (isMAC) ? force(i, j, k).z : 0.5 * (force(i, j, k - 1).z + force(i, j, k).z);
+
+ if (flags.isFluid(i - 1, j, k) || (curFluid && flags.isEmpty(i - 1, j, k)))
+ vel(i, j, k).x = (additive) ? vel(i, j, k).x + forceX : forceX;
+ if (flags.isFluid(i, j - 1, k) || (curFluid && flags.isEmpty(i, j - 1, k)))
+ vel(i, j, k).y = (additive) ? vel(i, j, k).y + forceY : forceY;
+ if (vel.is3D() && (flags.isFluid(i, j, k - 1) || (curFluid && flags.isEmpty(i, j, k - 1))))
+ vel(i, j, k).z = (additive) ? vel(i, j, k).z + forceZ : forceZ;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline const Grid<Vec3> &getArg2()
+ {
+ return force;
+ }
+ typedef Grid<Vec3> type2;
+ inline const Grid<Real> *getArg3()
+ {
+ return include;
+ }
+ typedef Grid<Real> type3;
+ inline bool &getArg4()
+ {
+ return additive;
+ }
+ typedef bool type4;
+ inline bool &getArg5()
+ {
+ return isMAC;
+ }
+ typedef bool type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnApplyForceField ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, force, include, additive, isMAC);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, force, include, additive, isMAC);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ MACGrid &vel;
+ const Grid<Vec3> &force;
+ const Grid<Real> *include;
+ bool additive;
+ bool isMAC;
+};
+
+//! add constant force between fl/fl and fl/em cells
+struct KnApplyForce : public KernelBase {
+ KnApplyForce(
+ const FlagGrid &flags, MACGrid &vel, Vec3 force, const Grid<Real> *exclude, bool additive)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ vel(vel),
+ force(force),
+ exclude(exclude),
+ additive(additive)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ MACGrid &vel,
+ Vec3 force,
+ const Grid<Real> *exclude,
+ bool additive) const
+ {
+ bool curFluid = flags.isFluid(i, j, k);
+ bool curEmpty = flags.isEmpty(i, j, k);
+ if (!curFluid && !curEmpty)
+ return;
+ if (exclude && ((*exclude)(i, j, k) < 0.))
+ return;
+
+ if (flags.isFluid(i - 1, j, k) || (curFluid && flags.isEmpty(i - 1, j, k)))
+ vel(i, j, k).x = (additive) ? vel(i, j, k).x + force.x : force.x;
+ if (flags.isFluid(i, j - 1, k) || (curFluid && flags.isEmpty(i, j - 1, k)))
+ vel(i, j, k).y = (additive) ? vel(i, j, k).y + force.y : force.y;
+ if (vel.is3D() && (flags.isFluid(i, j, k - 1) || (curFluid && flags.isEmpty(i, j, k - 1))))
+ vel(i, j, k).z = (additive) ? vel(i, j, k).z + force.z : force.z;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline Vec3 &getArg2()
+ {
+ return force;
+ }
+ typedef Vec3 type2;
+ inline const Grid<Real> *getArg3()
+ {
+ return exclude;
+ }
+ typedef Grid<Real> type3;
+ inline bool &getArg4()
+ {
+ return additive;
+ }
+ typedef bool type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnApplyForce ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, force, exclude, additive);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, force, exclude, additive);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ MACGrid &vel;
+ Vec3 force;
+ const Grid<Real> *exclude;
+ bool additive;
+};
+
+//! add gravity forces to all fluid cells, automatically adapts to different grid sizes
+void addGravity(const FlagGrid &flags,
+ MACGrid &vel,
+ Vec3 gravity,
+ const Grid<Real> *exclude = NULL)
+{
+ Vec3 f = gravity * flags.getParent()->getDt() / flags.getDx();
+ KnApplyForce(flags, vel, f, exclude, true);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "addGravity", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ Vec3 gravity = _args.get<Vec3>("gravity", 2, &_lock);
+ const Grid<Real> *exclude = _args.getPtrOpt<Grid<Real>>("exclude", 3, NULL, &_lock);
+ _retval = getPyNone();
+ addGravity(flags, vel, gravity, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "addGravity", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("addGravity", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_addGravity("", "addGravity", _W_0);
+extern "C" {
+void PbRegister_addGravity()
+{
+ KEEP_UNUSED(_RP_addGravity);
+}
+}
+
+//! add gravity forces to all fluid cells , but dont account for changing cell size
+void addGravityNoScale(const FlagGrid &flags,
+ MACGrid &vel,
+ const Vec3 &gravity,
+ const Grid<Real> *exclude = NULL)
+{
+ const Vec3 f = gravity * flags.getParent()->getDt();
+ KnApplyForce(flags, vel, f, exclude, true);
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "addGravityNoScale", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const Vec3 &gravity = _args.get<Vec3>("gravity", 2, &_lock);
+ const Grid<Real> *exclude = _args.getPtrOpt<Grid<Real>>("exclude", 3, NULL, &_lock);
+ _retval = getPyNone();
+ addGravityNoScale(flags, vel, gravity, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "addGravityNoScale", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("addGravityNoScale", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_addGravityNoScale("", "addGravityNoScale", _W_1);
+extern "C" {
+void PbRegister_addGravityNoScale()
+{
+ KEEP_UNUSED(_RP_addGravityNoScale);
+}
+}
+
+//! kernel to add Buoyancy force
+struct KnAddBuoyancy : public KernelBase {
+ KnAddBuoyancy(const FlagGrid &flags, const Grid<Real> &factor, MACGrid &vel, Vec3 strength)
+ : KernelBase(&flags, 1), flags(flags), factor(factor), vel(vel), strength(strength)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ const Grid<Real> &factor,
+ MACGrid &vel,
+ Vec3 strength) const
+ {
+ if (!flags.isFluid(i, j, k))
+ return;
+ if (flags.isFluid(i - 1, j, k))
+ vel(i, j, k).x += (0.5 * strength.x) * (factor(i, j, k) + factor(i - 1, j, k));
+ if (flags.isFluid(i, j - 1, k))
+ vel(i, j, k).y += (0.5 * strength.y) * (factor(i, j, k) + factor(i, j - 1, k));
+ if (vel.is3D() && flags.isFluid(i, j, k - 1))
+ vel(i, j, k).z += (0.5 * strength.z) * (factor(i, j, k) + factor(i, j, k - 1));
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return factor;
+ }
+ typedef Grid<Real> type1;
+ inline MACGrid &getArg2()
+ {
+ return vel;
+ }
+ typedef MACGrid type2;
+ inline Vec3 &getArg3()
+ {
+ return strength;
+ }
+ typedef Vec3 type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnAddBuoyancy ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, factor, vel, strength);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, factor, vel, strength);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ const Grid<Real> &factor;
+ MACGrid &vel;
+ Vec3 strength;
+};
+
+//! add Buoyancy force based on fctor (e.g. smoke density)
+void addBuoyancy(const FlagGrid &flags,
+ const Grid<Real> &density,
+ MACGrid &vel,
+ Vec3 gravity,
+ Real coefficient = 1.)
+{
+ Vec3 f = -gravity * flags.getParent()->getDt() / flags.getParent()->getDx() * coefficient;
+ KnAddBuoyancy(flags, density, vel, f);
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "addBuoyancy", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ const Grid<Real> &density = *_args.getPtr<Grid<Real>>("density", 1, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 2, &_lock);
+ Vec3 gravity = _args.get<Vec3>("gravity", 3, &_lock);
+ Real coefficient = _args.getOpt<Real>("coefficient", 4, 1., &_lock);
+ _retval = getPyNone();
+ addBuoyancy(flags, density, vel, gravity, coefficient);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "addBuoyancy", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("addBuoyancy", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_addBuoyancy("", "addBuoyancy", _W_2);
+extern "C" {
+void PbRegister_addBuoyancy()
+{
+ KEEP_UNUSED(_RP_addBuoyancy);
+}
+}
+
+// inflow / outflow boundaries
+
+//! helper to parse openbounds string [xXyYzZ] , convert to vec3
+inline void convertDescToVec(const string &desc, Vector3D<bool> &lo, Vector3D<bool> &up)
+{
+ for (size_t i = 0; i < desc.size(); i++) {
+ if (desc[i] == 'x')
+ lo.x = true;
+ else if (desc[i] == 'y')
+ lo.y = true;
+ else if (desc[i] == 'z')
+ lo.z = true;
+ else if (desc[i] == 'X')
+ up.x = true;
+ else if (desc[i] == 'Y')
+ up.y = true;
+ else if (desc[i] == 'Z')
+ up.z = true;
+ else
+ errMsg("invalid character in boundary description string. Only [xyzXYZ] allowed.");
+ }
+}
+
+//! add empty and outflow flag to cells of open boundaries
+void setOpenBound(FlagGrid &flags,
+ int bWidth,
+ string openBound = "",
+ int type = FlagGrid::TypeOutflow | FlagGrid::TypeEmpty)
+{
+ if (openBound == "")
+ return;
+ Vector3D<bool> lo, up;
+ convertDescToVec(openBound, lo, up);
+
+ FOR_IJK(flags)
+ {
+ bool loX = lo.x && i <= bWidth; // a cell which belongs to the lower x open bound
+ bool loY = lo.y && j <= bWidth;
+ bool upX = up.x && i >= flags.getSizeX() - bWidth -
+ 1; // a cell which belongs to the upper x open bound
+ bool upY = up.y && j >= flags.getSizeY() - bWidth - 1;
+ bool innerI = i > bWidth &&
+ i < flags.getSizeX() - bWidth -
+ 1; // a cell which does not belong to the lower or upper x bound
+ bool innerJ = j > bWidth && j < flags.getSizeY() - bWidth - 1;
+
+ // when setting boundaries to open: don't set shared part of wall to empty if neighboring wall
+ // is not open
+ if ((!flags.is3D()) && (loX || upX || loY || upY)) {
+ if ((loX || upX || innerI) && (loY || upY || innerJ) && flags.isObstacle(i, j, k))
+ flags(i, j, k) = type;
+ }
+ else {
+ bool loZ = lo.z && k <= bWidth; // a cell which belongs to the lower z open bound
+ bool upZ = up.z && k >= flags.getSizeZ() - bWidth -
+ 1; // a cell which belongs to the upper z open bound
+ bool innerK = k > bWidth &&
+ k < flags.getSizeZ() - bWidth -
+ 1; // a cell which does not belong to the lower or upper z bound
+ if (loX || upX || loY || upY || loZ || upZ) {
+ if ((loX || upX || innerI) && (loY || upY || innerJ) && (loZ || upZ || innerK) &&
+ flags.isObstacle(i, j, k))
+ flags(i, j, k) = type;
+ }
+ }
+ }
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setOpenBound", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ int bWidth = _args.get<int>("bWidth", 1, &_lock);
+ string openBound = _args.getOpt<string>("openBound", 2, "", &_lock);
+ int type = _args.getOpt<int>("type", 3, FlagGrid::TypeOutflow | FlagGrid::TypeEmpty, &_lock);
+ _retval = getPyNone();
+ setOpenBound(flags, bWidth, openBound, type);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setOpenBound", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setOpenBound", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setOpenBound("", "setOpenBound", _W_3);
+extern "C" {
+void PbRegister_setOpenBound()
+{
+ KEEP_UNUSED(_RP_setOpenBound);
+}
+}
+
+//! delete fluid and ensure empty flag in outflow cells, delete particles and density and set phi
+//! to 0.5
+void resetOutflow(FlagGrid &flags,
+ Grid<Real> *phi = 0,
+ BasicParticleSystem *parts = 0,
+ Grid<Real> *real = 0,
+ Grid<int> *index = 0,
+ ParticleIndexSystem *indexSys = 0)
+{
+ // check if phi and parts -> pindex and gpi already created -> access particles from cell index,
+ // avoid extra looping over particles
+ if (parts && (!index || !indexSys)) {
+ if (phi)
+ debMsg(
+ "resetOpenBound for phi and particles, but missing index and indexSys for enhanced "
+ "particle access!",
+ 1);
+ for (int idx = 0; idx < (int)parts->size(); idx++)
+ if (parts->isActive(idx) && flags.isInBounds(parts->getPos(idx)) &&
+ flags.isOutflow(parts->getPos(idx)))
+ parts->kill(idx);
+ }
+ FOR_IJK(flags)
+ {
+ if (flags.isOutflow(i, j, k)) {
+ flags(i, j, k) = (flags(i, j, k) | FlagGrid::TypeEmpty) &
+ ~FlagGrid::TypeFluid; // make sure there is not fluid flag set and to reset
+ // the empty flag
+ // the particles in a cell i,j,k are particles[index(i,j,k)] to particles[index(i+1,j,k)-1]
+ if (parts && index && indexSys) {
+ int isysIdxS = index->index(i, j, k);
+ int pStart = (*index)(isysIdxS), pEnd = 0;
+ if (flags.isInBounds(isysIdxS + 1))
+ pEnd = (*index)(isysIdxS + 1);
+ else
+ pEnd = indexSys->size();
+ // now loop over particles in cell
+ for (int p = pStart; p < pEnd; ++p) {
+ int psrc = (*indexSys)[p].sourceIndex;
+ if (parts->isActive(psrc) && flags.isInBounds(parts->getPos(psrc)))
+ parts->kill(psrc);
+ }
+ }
+ if (phi)
+ (*phi)(i, j, k) = 0.5;
+ if (real)
+ (*real)(i, j, k) = 0;
+ }
+ }
+ if (parts)
+ parts->doCompress();
+}
+static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "resetOutflow", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> *phi = _args.getPtrOpt<Grid<Real>>("phi", 1, 0, &_lock);
+ BasicParticleSystem *parts = _args.getPtrOpt<BasicParticleSystem>("parts", 2, 0, &_lock);
+ Grid<Real> *real = _args.getPtrOpt<Grid<Real>>("real", 3, 0, &_lock);
+ Grid<int> *index = _args.getPtrOpt<Grid<int>>("index", 4, 0, &_lock);
+ ParticleIndexSystem *indexSys = _args.getPtrOpt<ParticleIndexSystem>(
+ "indexSys", 5, 0, &_lock);
+ _retval = getPyNone();
+ resetOutflow(flags, phi, parts, real, index, indexSys);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "resetOutflow", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("resetOutflow", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_resetOutflow("", "resetOutflow", _W_4);
+extern "C" {
+void PbRegister_resetOutflow()
+{
+ KEEP_UNUSED(_RP_resetOutflow);
+}
+}
+
+//! enforce a constant inflow/outflow at the grid boundaries
+struct KnSetInflow : public KernelBase {
+ KnSetInflow(MACGrid &vel, int dim, int p0, const Vec3 &val)
+ : KernelBase(&vel, 0), vel(vel), dim(dim), p0(p0), val(val)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, MACGrid &vel, int dim, int p0, const Vec3 &val) const
+ {
+ Vec3i p(i, j, k);
+ if (p[dim] == p0 || p[dim] == p0 + 1)
+ vel(i, j, k) = val;
+ }
+ inline MACGrid &getArg0()
+ {
+ return vel;
+ }
+ typedef MACGrid type0;
+ inline int &getArg1()
+ {
+ return dim;
+ }
+ typedef int type1;
+ inline int &getArg2()
+ {
+ return p0;
+ }
+ typedef int type2;
+ inline const Vec3 &getArg3()
+ {
+ return val;
+ }
+ typedef Vec3 type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnSetInflow ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, vel, dim, p0, val);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, vel, dim, p0, val);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ MACGrid &vel;
+ int dim;
+ int p0;
+ const Vec3 &val;
+};
+
+//! enforce a constant inflow/outflow at the grid boundaries
+void setInflowBcs(MACGrid &vel, string dir, Vec3 value)
+{
+ for (size_t i = 0; i < dir.size(); i++) {
+ if (dir[i] >= 'x' && dir[i] <= 'z') {
+ int dim = dir[i] - 'x';
+ KnSetInflow(vel, dim, 0, value);
+ }
+ else if (dir[i] >= 'X' && dir[i] <= 'Z') {
+ int dim = dir[i] - 'X';
+ KnSetInflow(vel, dim, vel.getSize()[dim] - 1, value);
+ }
+ else
+ errMsg("invalid character in direction string. Only [xyzXYZ] allowed.");
+ }
+}
+static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setInflowBcs", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 0, &_lock);
+ string dir = _args.get<string>("dir", 1, &_lock);
+ Vec3 value = _args.get<Vec3>("value", 2, &_lock);
+ _retval = getPyNone();
+ setInflowBcs(vel, dir, value);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setInflowBcs", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setInflowBcs", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setInflowBcs("", "setInflowBcs", _W_5);
+extern "C" {
+void PbRegister_setInflowBcs()
+{
+ KEEP_UNUSED(_RP_setInflowBcs);
+}
+}
+
+// set obstacle boundary conditions
+
+//! set no-stick wall boundary condition between ob/fl and ob/ob cells
+struct KnSetWallBcs : public KernelBase {
+ KnSetWallBcs(const FlagGrid &flags, MACGrid &vel, const MACGrid *obvel)
+ : KernelBase(&flags, 0), flags(flags), vel(vel), obvel(obvel)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, const FlagGrid &flags, MACGrid &vel, const MACGrid *obvel) const
+ {
+
+ bool curFluid = flags.isFluid(i, j, k);
+ bool curObs = flags.isObstacle(i, j, k);
+ Vec3 bcsVel(0., 0., 0.);
+ if (!curFluid && !curObs)
+ return;
+
+ if (obvel) {
+ bcsVel.x = (*obvel)(i, j, k).x;
+ bcsVel.y = (*obvel)(i, j, k).y;
+ if ((*obvel).is3D())
+ bcsVel.z = (*obvel)(i, j, k).z;
+ }
+
+ // we use i>0 instead of bnd=1 to check outer wall
+ if (i > 0 && flags.isObstacle(i - 1, j, k))
+ vel(i, j, k).x = bcsVel.x;
+ if (i > 0 && curObs && flags.isFluid(i - 1, j, k))
+ vel(i, j, k).x = bcsVel.x;
+ if (j > 0 && flags.isObstacle(i, j - 1, k))
+ vel(i, j, k).y = bcsVel.y;
+ if (j > 0 && curObs && flags.isFluid(i, j - 1, k))
+ vel(i, j, k).y = bcsVel.y;
+
+ if (!vel.is3D()) {
+ vel(i, j, k).z = 0;
+ }
+ else {
+ if (k > 0 && flags.isObstacle(i, j, k - 1))
+ vel(i, j, k).z = bcsVel.z;
+ if (k > 0 && curObs && flags.isFluid(i, j, k - 1))
+ vel(i, j, k).z = bcsVel.z;
+ }
+
+ if (curFluid) {
+ if ((i > 0 && flags.isStick(i - 1, j, k)) ||
+ (i < flags.getSizeX() - 1 && flags.isStick(i + 1, j, k)))
+ vel(i, j, k).y = vel(i, j, k).z = 0;
+ if ((j > 0 && flags.isStick(i, j - 1, k)) ||
+ (j < flags.getSizeY() - 1 && flags.isStick(i, j + 1, k)))
+ vel(i, j, k).x = vel(i, j, k).z = 0;
+ if (vel.is3D() && ((k > 0 && flags.isStick(i, j, k - 1)) ||
+ (k < flags.getSizeZ() - 1 && flags.isStick(i, j, k + 1))))
+ vel(i, j, k).x = vel(i, j, k).y = 0;
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline const MACGrid *getArg2()
+ {
+ return obvel;
+ }
+ typedef MACGrid type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnSetWallBcs ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, vel, obvel);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, vel, obvel);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ MACGrid &vel;
+ const MACGrid *obvel;
+};
+
+//! set wall BCs for fill fraction mode, note - only needs obstacle SDF
+
+struct KnSetWallBcsFrac : public KernelBase {
+ KnSetWallBcsFrac(const FlagGrid &flags,
+ const MACGrid &vel,
+ MACGrid &velTarget,
+ const MACGrid *obvel,
+ const Grid<Real> *phiObs,
+ const int &boundaryWidth = 0)
+ : KernelBase(&flags, 0),
+ flags(flags),
+ vel(vel),
+ velTarget(velTarget),
+ obvel(obvel),
+ phiObs(phiObs),
+ boundaryWidth(boundaryWidth)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ MACGrid &velTarget,
+ const MACGrid *obvel,
+ const Grid<Real> *phiObs,
+ const int &boundaryWidth = 0) const
+ {
+ bool curFluid = flags.isFluid(i, j, k);
+ bool curObs = flags.isObstacle(i, j, k);
+ velTarget(i, j, k) = vel(i, j, k);
+ if (!curFluid && !curObs)
+ return;
+
+ // zero normal component in all obstacle regions
+ if (flags.isInBounds(Vec3i(i, j, k), 1)) {
+
+ if (curObs | flags.isObstacle(i - 1, j, k)) {
+ Vec3 dphi(0., 0., 0.);
+ const Real tmp1 = (phiObs->get(i, j, k) + phiObs->get(i - 1, j, k)) * .5;
+ Real tmp2 = (phiObs->get(i, j + 1, k) + phiObs->get(i - 1, j + 1, k)) * .5;
+ Real phi1 = (tmp1 + tmp2) * .5;
+ tmp2 = (phiObs->get(i, j - 1, k) + phiObs->get(i - 1, j - 1, k)) * .5;
+ Real phi2 = (tmp1 + tmp2) * .5;
+
+ dphi.x = phiObs->get(i, j, k) - phiObs->get(i - 1, j, k);
+ dphi.y = phi1 - phi2;
+
+ if (phiObs->is3D()) {
+ tmp2 = (phiObs->get(i, j, k + 1) + phiObs->get(i - 1, j, k + 1)) * .5;
+ phi1 = (tmp1 + tmp2) * .5;
+ tmp2 = (phiObs->get(i, j, k - 1) + phiObs->get(i - 1, j, k - 1)) * .5;
+ phi2 = (tmp1 + tmp2) * .5;
+ dphi.z = phi1 - phi2;
+ }
+
+ normalize(dphi);
+ Vec3 velMAC = vel.getAtMACX(i, j, k);
+ velTarget(i, j, k).x = velMAC.x - dot(dphi, velMAC) * dphi.x;
+ }
+
+ if (curObs | flags.isObstacle(i, j - 1, k)) {
+ Vec3 dphi(0., 0., 0.);
+ const Real tmp1 = (phiObs->get(i, j, k) + phiObs->get(i, j - 1, k)) * .5;
+ Real tmp2 = (phiObs->get(i + 1, j, k) + phiObs->get(i + 1, j - 1, k)) * .5;
+ Real phi1 = (tmp1 + tmp2) * .5;
+ tmp2 = (phiObs->get(i - 1, j, k) + phiObs->get(i - 1, j - 1, k)) * .5;
+ Real phi2 = (tmp1 + tmp2) * .5;
+
+ dphi.x = phi1 - phi2;
+ dphi.y = phiObs->get(i, j, k) - phiObs->get(i, j - 1, k);
+ if (phiObs->is3D()) {
+ tmp2 = (phiObs->get(i, j, k + 1) + phiObs->get(i, j - 1, k + 1)) * .5;
+ phi1 = (tmp1 + tmp2) * .5;
+ tmp2 = (phiObs->get(i, j, k - 1) + phiObs->get(i, j - 1, k - 1)) * .5;
+ phi2 = (tmp1 + tmp2) * .5;
+ dphi.z = phi1 - phi2;
+ }
+
+ normalize(dphi);
+ Vec3 velMAC = vel.getAtMACY(i, j, k);
+ velTarget(i, j, k).y = velMAC.y - dot(dphi, velMAC) * dphi.y;
+ }
+
+ if (phiObs->is3D() && (curObs | flags.isObstacle(i, j, k - 1))) {
+ Vec3 dphi(0., 0., 0.);
+ const Real tmp1 = (phiObs->get(i, j, k) + phiObs->get(i, j, k - 1)) * .5;
+
+ Real tmp2;
+ tmp2 = (phiObs->get(i + 1, j, k) + phiObs->get(i + 1, j, k - 1)) * .5;
+ Real phi1 = (tmp1 + tmp2) * .5;
+ tmp2 = (phiObs->get(i - 1, j, k) + phiObs->get(i - 1, j, k - 1)) * .5;
+ Real phi2 = (tmp1 + tmp2) * .5;
+ dphi.x = phi1 - phi2;
+
+ tmp2 = (phiObs->get(i, j + 1, k) + phiObs->get(i, j + 1, k - 1)) * .5;
+ phi1 = (tmp1 + tmp2) * .5;
+ tmp2 = (phiObs->get(i, j - 1, k) + phiObs->get(i, j - 1, k - 1)) * .5;
+ phi2 = (tmp1 + tmp2) * .5;
+ dphi.y = phi1 - phi2;
+
+ dphi.z = phiObs->get(i, j, k) - phiObs->get(i, j, k - 1);
+
+ normalize(dphi);
+ Vec3 velMAC = vel.getAtMACZ(i, j, k);
+ velTarget(i, j, k).z = velMAC.z - dot(dphi, velMAC) * dphi.z;
+ }
+ } // not at boundary
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline MACGrid &getArg2()
+ {
+ return velTarget;
+ }
+ typedef MACGrid type2;
+ inline const MACGrid *getArg3()
+ {
+ return obvel;
+ }
+ typedef MACGrid type3;
+ inline const Grid<Real> *getArg4()
+ {
+ return phiObs;
+ }
+ typedef Grid<Real> type4;
+ inline const int &getArg5()
+ {
+ return boundaryWidth;
+ }
+ typedef int type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnSetWallBcsFrac ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, vel, velTarget, obvel, phiObs, boundaryWidth);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, vel, velTarget, obvel, phiObs, boundaryWidth);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ MACGrid &velTarget;
+ const MACGrid *obvel;
+ const Grid<Real> *phiObs;
+ const int &boundaryWidth;
+};
+
+//! set zero normal velocity boundary condition on walls
+// (optionally with second order accuracy using the obstacle SDF , fractions grid currentlyl not
+// needed)
+void setWallBcs(const FlagGrid &flags,
+ MACGrid &vel,
+ const MACGrid *obvel = 0,
+ const MACGrid *fractions = 0,
+ const Grid<Real> *phiObs = 0,
+ int boundaryWidth = 0)
+{
+ if (!phiObs || !fractions) {
+ KnSetWallBcs(flags, vel, obvel);
+ }
+ else {
+ MACGrid tmpvel(vel.getParent());
+ KnSetWallBcsFrac(flags, vel, tmpvel, obvel, phiObs, boundaryWidth);
+ vel.swap(tmpvel);
+ }
+}
+static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setWallBcs", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const MACGrid *obvel = _args.getPtrOpt<MACGrid>("obvel", 2, 0, &_lock);
+ const MACGrid *fractions = _args.getPtrOpt<MACGrid>("fractions", 3, 0, &_lock);
+ const Grid<Real> *phiObs = _args.getPtrOpt<Grid<Real>>("phiObs", 4, 0, &_lock);
+ int boundaryWidth = _args.getOpt<int>("boundaryWidth", 5, 0, &_lock);
+ _retval = getPyNone();
+ setWallBcs(flags, vel, obvel, fractions, phiObs, boundaryWidth);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setWallBcs", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setWallBcs", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setWallBcs("", "setWallBcs", _W_6);
+extern "C" {
+void PbRegister_setWallBcs()
+{
+ KEEP_UNUSED(_RP_setWallBcs);
+}
+}
+
+//! add Forces between fl/fl and fl/em cells (interpolate cell centered forces to MAC grid)
+struct KnAddForceIfLower : public KernelBase {
+ KnAddForceIfLower(const FlagGrid &flags, MACGrid &vel, const Grid<Vec3> &force)
+ : KernelBase(&flags, 1), flags(flags), vel(vel), force(force)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, const FlagGrid &flags, MACGrid &vel, const Grid<Vec3> &force) const
+ {
+ bool curFluid = flags.isFluid(i, j, k);
+ bool curEmpty = flags.isEmpty(i, j, k);
+ if (!curFluid && !curEmpty)
+ return;
+
+ if (flags.isFluid(i - 1, j, k) || (curFluid && flags.isEmpty(i - 1, j, k))) {
+ Real forceMACX = 0.5 * (force(i - 1, j, k).x + force(i, j, k).x);
+ Real min = std::min(vel(i, j, k).x, forceMACX);
+ Real max = std::max(vel(i, j, k).x, forceMACX);
+ Real sum = vel(i, j, k).x + forceMACX;
+ vel(i, j, k).x = (forceMACX > 0) ? std::min(sum, max) : std::max(sum, min);
+ }
+ if (flags.isFluid(i, j - 1, k) || (curFluid && flags.isEmpty(i, j - 1, k))) {
+ Real forceMACY = 0.5 * (force(i, j - 1, k).y + force(i, j, k).y);
+ Real min = std::min(vel(i, j, k).y, forceMACY);
+ Real max = std::max(vel(i, j, k).y, forceMACY);
+ Real sum = vel(i, j, k).y + forceMACY;
+ vel(i, j, k).y = (forceMACY > 0) ? std::min(sum, max) : std::max(sum, min);
+ }
+ if (vel.is3D() && (flags.isFluid(i, j, k - 1) || (curFluid && flags.isEmpty(i, j, k - 1)))) {
+ Real forceMACZ = 0.5 * (force(i, j, k - 1).z + force(i, j, k).z);
+ Real min = std::min(vel(i, j, k).z, forceMACZ);
+ Real max = std::max(vel(i, j, k).z, forceMACZ);
+ Real sum = vel(i, j, k).z + forceMACZ;
+ vel(i, j, k).z = (forceMACZ > 0) ? std::min(sum, max) : std::max(sum, min);
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline const Grid<Vec3> &getArg2()
+ {
+ return force;
+ }
+ typedef Grid<Vec3> type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnAddForceIfLower ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, force);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, force);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ MACGrid &vel;
+ const Grid<Vec3> &force;
+};
+
+// Initial velocity for smoke
+void setInitialVelocity(const FlagGrid &flags, MACGrid &vel, const Grid<Vec3> &invel)
+{
+ KnAddForceIfLower(flags, vel, invel);
+}
+static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setInitialVelocity", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const Grid<Vec3> &invel = *_args.getPtr<Grid<Vec3>>("invel", 2, &_lock);
+ _retval = getPyNone();
+ setInitialVelocity(flags, vel, invel);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setInitialVelocity", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setInitialVelocity", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setInitialVelocity("", "setInitialVelocity", _W_7);
+extern "C" {
+void PbRegister_setInitialVelocity()
+{
+ KEEP_UNUSED(_RP_setInitialVelocity);
+}
+}
+
+//! Kernel: gradient norm operator
+struct KnConfForce : public KernelBase {
+ KnConfForce(Grid<Vec3> &force,
+ const Grid<Real> &grid,
+ const Grid<Vec3> &curl,
+ Real str,
+ const Grid<Real> *strGrid)
+ : KernelBase(&force, 1), force(force), grid(grid), curl(curl), str(str), strGrid(strGrid)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ Grid<Vec3> &force,
+ const Grid<Real> &grid,
+ const Grid<Vec3> &curl,
+ Real str,
+ const Grid<Real> *strGrid) const
+ {
+ Vec3 grad = 0.5 * Vec3(grid(i + 1, j, k) - grid(i - 1, j, k),
+ grid(i, j + 1, k) - grid(i, j - 1, k),
+ 0.);
+ if (grid.is3D())
+ grad[2] = 0.5 * (grid(i, j, k + 1) - grid(i, j, k - 1));
+ normalize(grad);
+ if (strGrid)
+ str += (*strGrid)(i, j, k);
+ force(i, j, k) = str * cross(grad, curl(i, j, k));
+ }
+ inline Grid<Vec3> &getArg0()
+ {
+ return force;
+ }
+ typedef Grid<Vec3> type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return grid;
+ }
+ typedef Grid<Real> type1;
+ inline const Grid<Vec3> &getArg2()
+ {
+ return curl;
+ }
+ typedef Grid<Vec3> type2;
+ inline Real &getArg3()
+ {
+ return str;
+ }
+ typedef Real type3;
+ inline const Grid<Real> *getArg4()
+ {
+ return strGrid;
+ }
+ typedef Grid<Real> type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnConfForce ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, force, grid, curl, str, strGrid);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, force, grid, curl, str, strGrid);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<Vec3> &force;
+ const Grid<Real> &grid;
+ const Grid<Vec3> &curl;
+ Real str;
+ const Grid<Real> *strGrid;
+};
+
+void vorticityConfinement(MACGrid &vel,
+ const FlagGrid &flags,
+ Real strengthGlobal = 0,
+ const Grid<Real> *strengthCell = NULL)
+{
+ Grid<Vec3> velCenter(flags.getParent()), curl(flags.getParent()), force(flags.getParent());
+ Grid<Real> norm(flags.getParent());
+
+ GetCentered(velCenter, vel);
+ CurlOp(velCenter, curl);
+ GridNorm(norm, curl);
+ KnConfForce(force, norm, curl, strengthGlobal, strengthCell);
+ KnApplyForceField(flags, vel, force, NULL, true, false);
+}
+static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "vorticityConfinement", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 0, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 1, &_lock);
+ Real strengthGlobal = _args.getOpt<Real>("strengthGlobal", 2, 0, &_lock);
+ const Grid<Real> *strengthCell = _args.getPtrOpt<Grid<Real>>(
+ "strengthCell", 3, NULL, &_lock);
+ _retval = getPyNone();
+ vorticityConfinement(vel, flags, strengthGlobal, strengthCell);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "vorticityConfinement", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("vorticityConfinement", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_vorticityConfinement("", "vorticityConfinement", _W_8);
+extern "C" {
+void PbRegister_vorticityConfinement()
+{
+ KEEP_UNUSED(_RP_vorticityConfinement);
+}
+}
+
+void addForceField(const FlagGrid &flags,
+ MACGrid &vel,
+ const Grid<Vec3> &force,
+ const Grid<Real> *region = NULL,
+ bool isMAC = false)
+{
+ KnApplyForceField(flags, vel, force, region, true, isMAC);
+}
+static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "addForceField", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const Grid<Vec3> &force = *_args.getPtr<Grid<Vec3>>("force", 2, &_lock);
+ const Grid<Real> *region = _args.getPtrOpt<Grid<Real>>("region", 3, NULL, &_lock);
+ bool isMAC = _args.getOpt<bool>("isMAC", 4, false, &_lock);
+ _retval = getPyNone();
+ addForceField(flags, vel, force, region, isMAC);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "addForceField", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("addForceField", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_addForceField("", "addForceField", _W_9);
+extern "C" {
+void PbRegister_addForceField()
+{
+ KEEP_UNUSED(_RP_addForceField);
+}
+}
+
+void setForceField(const FlagGrid &flags,
+ MACGrid &vel,
+ const Grid<Vec3> &force,
+ const Grid<Real> *region = NULL,
+ bool isMAC = false)
+{
+ KnApplyForceField(flags, vel, force, region, false, isMAC);
+}
+static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setForceField", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const Grid<Vec3> &force = *_args.getPtr<Grid<Vec3>>("force", 2, &_lock);
+ const Grid<Real> *region = _args.getPtrOpt<Grid<Real>>("region", 3, NULL, &_lock);
+ bool isMAC = _args.getOpt<bool>("isMAC", 4, false, &_lock);
+ _retval = getPyNone();
+ setForceField(flags, vel, force, region, isMAC);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setForceField", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setForceField", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setForceField("", "setForceField", _W_10);
+extern "C" {
+void PbRegister_setForceField()
+{
+ KEEP_UNUSED(_RP_setForceField);
+}
+}
+
+void dissolveSmoke(const FlagGrid &flags,
+ Grid<Real> &density,
+ Grid<Real> *heat = NULL,
+ Grid<Real> *red = NULL,
+ Grid<Real> *green = NULL,
+ Grid<Real> *blue = NULL,
+ int speed = 5,
+ bool logFalloff = true)
+{
+ float dydx = 1.0f / (float)speed; // max density/speed = dydx
+ float fac = 1.0f - dydx;
+
+ FOR_IJK_BND(density, 0)
+ {
+ bool curFluid = flags.isFluid(i, j, k);
+ if (!curFluid)
+ continue;
+
+ if (logFalloff) {
+ density(i, j, k) *= fac;
+ if (heat) {
+ (*heat)(i, j, k) *= fac;
+ }
+ if (red) {
+ (*red)(i, j, k) *= fac;
+ (*green)(i, j, k) *= fac;
+ (*blue)(i, j, k) *= fac;
+ }
+ }
+ else { // linear falloff
+ float d = density(i, j, k);
+ density(i, j, k) -= dydx;
+ if (density(i, j, k) < 0.0f)
+ density(i, j, k) = 0.0f;
+ if (heat) {
+ if (fabs((*heat)(i, j, k)) < dydx)
+ (*heat)(i, j, k) = 0.0f;
+ else if ((*heat)(i, j, k) > 0.0f)
+ (*heat)(i, j, k) -= dydx;
+ else if ((*heat)(i, j, k) < 0.0f)
+ (*heat)(i, j, k) += dydx;
+ }
+ if (red && notZero(d)) {
+ (*red)(i, j, k) *= (density(i, j, k) / d);
+ (*green)(i, j, k) *= (density(i, j, k) / d);
+ (*blue)(i, j, k) *= (density(i, j, k) / d);
+ }
+ }
+ }
+}
+static PyObject *_W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "dissolveSmoke", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &density = *_args.getPtr<Grid<Real>>("density", 1, &_lock);
+ Grid<Real> *heat = _args.getPtrOpt<Grid<Real>>("heat", 2, NULL, &_lock);
+ Grid<Real> *red = _args.getPtrOpt<Grid<Real>>("red", 3, NULL, &_lock);
+ Grid<Real> *green = _args.getPtrOpt<Grid<Real>>("green", 4, NULL, &_lock);
+ Grid<Real> *blue = _args.getPtrOpt<Grid<Real>>("blue", 5, NULL, &_lock);
+ int speed = _args.getOpt<int>("speed", 6, 5, &_lock);
+ bool logFalloff = _args.getOpt<bool>("logFalloff", 7, true, &_lock);
+ _retval = getPyNone();
+ dissolveSmoke(flags, density, heat, red, green, blue, speed, logFalloff);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "dissolveSmoke", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("dissolveSmoke", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_dissolveSmoke("", "dissolveSmoke", _W_11);
+extern "C" {
+void PbRegister_dissolveSmoke()
+{
+ KEEP_UNUSED(_RP_dissolveSmoke);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/fire.cpp b/extern/mantaflow/preprocessed/plugin/fire.cpp
new file mode 100644
index 00000000000..9047d4bf8a1
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/fire.cpp
@@ -0,0 +1,435 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2016 Sebastian Barschkis, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Fire modeling plugin
+ *
+ ******************************************************************************/
+
+#include "general.h"
+#include "grid.h"
+#include "vectorbase.h"
+
+using namespace std;
+
+namespace Manta {
+
+struct KnProcessBurn : public KernelBase {
+ KnProcessBurn(Grid<Real> &fuel,
+ Grid<Real> &density,
+ Grid<Real> &react,
+ Grid<Real> *red,
+ Grid<Real> *green,
+ Grid<Real> *blue,
+ Grid<Real> *heat,
+ Real burningRate,
+ Real flameSmoke,
+ Real ignitionTemp,
+ Real maxTemp,
+ Real dt,
+ Vec3 flameSmokeColor)
+ : KernelBase(&fuel, 1),
+ fuel(fuel),
+ density(density),
+ react(react),
+ red(red),
+ green(green),
+ blue(blue),
+ heat(heat),
+ burningRate(burningRate),
+ flameSmoke(flameSmoke),
+ ignitionTemp(ignitionTemp),
+ maxTemp(maxTemp),
+ dt(dt),
+ flameSmokeColor(flameSmokeColor)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ Grid<Real> &fuel,
+ Grid<Real> &density,
+ Grid<Real> &react,
+ Grid<Real> *red,
+ Grid<Real> *green,
+ Grid<Real> *blue,
+ Grid<Real> *heat,
+ Real burningRate,
+ Real flameSmoke,
+ Real ignitionTemp,
+ Real maxTemp,
+ Real dt,
+ Vec3 flameSmokeColor) const
+ {
+ // Save initial values
+ Real origFuel = fuel(i, j, k);
+ Real origSmoke = density(i, j, k);
+ Real smokeEmit = 0.0f;
+ Real flame = 0.0f;
+
+ // Process fuel
+ fuel(i, j, k) -= burningRate * dt;
+ if (fuel(i, j, k) < 0.0f)
+ fuel(i, j, k) = 0.0f;
+
+ // Process reaction coordinate
+ if (origFuel > VECTOR_EPSILON) {
+ react(i, j, k) *= fuel(i, j, k) / origFuel;
+ flame = pow(react(i, j, k), 0.5f);
+ }
+ else {
+ react(i, j, k) = 0.0f;
+ }
+
+ // Set fluid temperature based on fuel burn rate and "flameSmoke" factor
+ smokeEmit = (origFuel < 1.0f) ? (1.0 - origFuel) * 0.5f : 0.0f;
+ smokeEmit = (smokeEmit + 0.5f) * (origFuel - fuel(i, j, k)) * 0.1f * flameSmoke;
+ density(i, j, k) += smokeEmit;
+ clamp(density(i, j, k), (Real)0.0f, (Real)1.0f);
+
+ // Set fluid temperature from the flame temperature profile
+ if (heat && flame)
+ (*heat)(i, j, k) = (1.0f - flame) * ignitionTemp + flame * maxTemp;
+
+ // Mix new color
+ if (smokeEmit > VECTOR_EPSILON) {
+ float smokeFactor = density(i, j, k) / (origSmoke + smokeEmit);
+ if (red)
+ (*red)(i, j, k) = ((*red)(i, j, k) + flameSmokeColor.x * smokeEmit) * smokeFactor;
+ if (green)
+ (*green)(i, j, k) = ((*green)(i, j, k) + flameSmokeColor.y * smokeEmit) * smokeFactor;
+ if (blue)
+ (*blue)(i, j, k) = ((*blue)(i, j, k) + flameSmokeColor.z * smokeEmit) * smokeFactor;
+ }
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return fuel;
+ }
+ typedef Grid<Real> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return density;
+ }
+ typedef Grid<Real> type1;
+ inline Grid<Real> &getArg2()
+ {
+ return react;
+ }
+ typedef Grid<Real> type2;
+ inline Grid<Real> *getArg3()
+ {
+ return red;
+ }
+ typedef Grid<Real> type3;
+ inline Grid<Real> *getArg4()
+ {
+ return green;
+ }
+ typedef Grid<Real> type4;
+ inline Grid<Real> *getArg5()
+ {
+ return blue;
+ }
+ typedef Grid<Real> type5;
+ inline Grid<Real> *getArg6()
+ {
+ return heat;
+ }
+ typedef Grid<Real> type6;
+ inline Real &getArg7()
+ {
+ return burningRate;
+ }
+ typedef Real type7;
+ inline Real &getArg8()
+ {
+ return flameSmoke;
+ }
+ typedef Real type8;
+ inline Real &getArg9()
+ {
+ return ignitionTemp;
+ }
+ typedef Real type9;
+ inline Real &getArg10()
+ {
+ return maxTemp;
+ }
+ typedef Real type10;
+ inline Real &getArg11()
+ {
+ return dt;
+ }
+ typedef Real type11;
+ inline Vec3 &getArg12()
+ {
+ return flameSmokeColor;
+ }
+ typedef Vec3 type12;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnProcessBurn ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i,
+ j,
+ k,
+ fuel,
+ density,
+ react,
+ red,
+ green,
+ blue,
+ heat,
+ burningRate,
+ flameSmoke,
+ ignitionTemp,
+ maxTemp,
+ dt,
+ flameSmokeColor);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i,
+ j,
+ k,
+ fuel,
+ density,
+ react,
+ red,
+ green,
+ blue,
+ heat,
+ burningRate,
+ flameSmoke,
+ ignitionTemp,
+ maxTemp,
+ dt,
+ flameSmokeColor);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<Real> &fuel;
+ Grid<Real> &density;
+ Grid<Real> &react;
+ Grid<Real> *red;
+ Grid<Real> *green;
+ Grid<Real> *blue;
+ Grid<Real> *heat;
+ Real burningRate;
+ Real flameSmoke;
+ Real ignitionTemp;
+ Real maxTemp;
+ Real dt;
+ Vec3 flameSmokeColor;
+};
+
+void processBurn(Grid<Real> &fuel,
+ Grid<Real> &density,
+ Grid<Real> &react,
+ Grid<Real> *red = NULL,
+ Grid<Real> *green = NULL,
+ Grid<Real> *blue = NULL,
+ Grid<Real> *heat = NULL,
+ Real burningRate = 0.75f,
+ Real flameSmoke = 1.0f,
+ Real ignitionTemp = 1.25f,
+ Real maxTemp = 1.75f,
+ Vec3 flameSmokeColor = Vec3(0.7f, 0.7f, 0.7f))
+{
+ Real dt = fuel.getParent()->getDt();
+ KnProcessBurn(fuel,
+ density,
+ react,
+ red,
+ green,
+ blue,
+ heat,
+ burningRate,
+ flameSmoke,
+ ignitionTemp,
+ maxTemp,
+ dt,
+ flameSmokeColor);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "processBurn", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &fuel = *_args.getPtr<Grid<Real>>("fuel", 0, &_lock);
+ Grid<Real> &density = *_args.getPtr<Grid<Real>>("density", 1, &_lock);
+ Grid<Real> &react = *_args.getPtr<Grid<Real>>("react", 2, &_lock);
+ Grid<Real> *red = _args.getPtrOpt<Grid<Real>>("red", 3, NULL, &_lock);
+ Grid<Real> *green = _args.getPtrOpt<Grid<Real>>("green", 4, NULL, &_lock);
+ Grid<Real> *blue = _args.getPtrOpt<Grid<Real>>("blue", 5, NULL, &_lock);
+ Grid<Real> *heat = _args.getPtrOpt<Grid<Real>>("heat", 6, NULL, &_lock);
+ Real burningRate = _args.getOpt<Real>("burningRate", 7, 0.75f, &_lock);
+ Real flameSmoke = _args.getOpt<Real>("flameSmoke", 8, 1.0f, &_lock);
+ Real ignitionTemp = _args.getOpt<Real>("ignitionTemp", 9, 1.25f, &_lock);
+ Real maxTemp = _args.getOpt<Real>("maxTemp", 10, 1.75f, &_lock);
+ Vec3 flameSmokeColor = _args.getOpt<Vec3>(
+ "flameSmokeColor", 11, Vec3(0.7f, 0.7f, 0.7f), &_lock);
+ _retval = getPyNone();
+ processBurn(fuel,
+ density,
+ react,
+ red,
+ green,
+ blue,
+ heat,
+ burningRate,
+ flameSmoke,
+ ignitionTemp,
+ maxTemp,
+ flameSmokeColor);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "processBurn", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("processBurn", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_processBurn("", "processBurn", _W_0);
+extern "C" {
+void PbRegister_processBurn()
+{
+ KEEP_UNUSED(_RP_processBurn);
+}
+}
+
+struct KnUpdateFlame : public KernelBase {
+ KnUpdateFlame(const Grid<Real> &react, Grid<Real> &flame)
+ : KernelBase(&react, 1), react(react), flame(flame)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const Grid<Real> &react, Grid<Real> &flame) const
+ {
+ if (react(i, j, k) > 0.0f)
+ flame(i, j, k) = pow(react(i, j, k), 0.5f);
+ else
+ flame(i, j, k) = 0.0f;
+ }
+ inline const Grid<Real> &getArg0()
+ {
+ return react;
+ }
+ typedef Grid<Real> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return flame;
+ }
+ typedef Grid<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnUpdateFlame ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, react, flame);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, react, flame);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const Grid<Real> &react;
+ Grid<Real> &flame;
+};
+
+void updateFlame(const Grid<Real> &react, Grid<Real> &flame)
+{
+ KnUpdateFlame(react, flame);
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "updateFlame", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<Real> &react = *_args.getPtr<Grid<Real>>("react", 0, &_lock);
+ Grid<Real> &flame = *_args.getPtr<Grid<Real>>("flame", 1, &_lock);
+ _retval = getPyNone();
+ updateFlame(react, flame);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "updateFlame", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("updateFlame", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_updateFlame("", "updateFlame", _W_1);
+extern "C" {
+void PbRegister_updateFlame()
+{
+ KEEP_UNUSED(_RP_updateFlame);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/flip.cpp b/extern/mantaflow/preprocessed/plugin/flip.cpp
new file mode 100644
index 00000000000..f6d082900b5
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/flip.cpp
@@ -0,0 +1,2819 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * FLIP (fluid implicit particles)
+ * for use with particle data fields
+ *
+ ******************************************************************************/
+
+#include "particle.h"
+#include "grid.h"
+#include "commonkernels.h"
+#include "randomstream.h"
+#include "levelset.h"
+#include "shapes.h"
+#include "matrixbase.h"
+
+using namespace std;
+namespace Manta {
+
+// init
+
+//! note - this is a simplified version , sampleLevelsetWithParticles has more functionality
+
+void sampleFlagsWithParticles(const FlagGrid &flags,
+ BasicParticleSystem &parts,
+ const int discretization,
+ const Real randomness)
+{
+ const bool is3D = flags.is3D();
+ const Real jlen = randomness / discretization;
+ const Vec3 disp(1.0 / discretization, 1.0 / discretization, 1.0 / discretization);
+ RandomStream mRand(9832);
+
+ FOR_IJK_BND(flags, 0)
+ {
+ if (flags.isObstacle(i, j, k))
+ continue;
+ if (flags.isFluid(i, j, k)) {
+ const Vec3 pos(i, j, k);
+ for (int dk = 0; dk < (is3D ? discretization : 1); dk++)
+ for (int dj = 0; dj < discretization; dj++)
+ for (int di = 0; di < discretization; di++) {
+ Vec3 subpos = pos + disp * Vec3(0.5 + di, 0.5 + dj, 0.5 + dk);
+ subpos += jlen * (Vec3(1, 1, 1) - 2.0 * mRand.getVec3());
+ if (!is3D)
+ subpos[2] = 0.5;
+ parts.addBuffered(subpos);
+ }
+ }
+ }
+ parts.insertBufferedParticles();
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "sampleFlagsWithParticles", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 1, &_lock);
+ const int discretization = _args.get<int>("discretization", 2, &_lock);
+ const Real randomness = _args.get<Real>("randomness", 3, &_lock);
+ _retval = getPyNone();
+ sampleFlagsWithParticles(flags, parts, discretization, randomness);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "sampleFlagsWithParticles", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("sampleFlagsWithParticles", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_sampleFlagsWithParticles("", "sampleFlagsWithParticles", _W_0);
+extern "C" {
+void PbRegister_sampleFlagsWithParticles()
+{
+ KEEP_UNUSED(_RP_sampleFlagsWithParticles);
+}
+}
+
+//! sample a level set with particles, use reset to clear the particle buffer,
+//! and skipEmpty for a continuous inflow (in the latter case, only empty cells will
+//! be re-filled once they empty when calling sampleLevelsetWithParticles during
+//! the main loop).
+
+void sampleLevelsetWithParticles(const LevelsetGrid &phi,
+ const FlagGrid &flags,
+ BasicParticleSystem &parts,
+ const int discretization,
+ const Real randomness,
+ const bool reset = false,
+ const bool refillEmpty = false,
+ const int particleFlag = -1)
+{
+ const bool is3D = phi.is3D();
+ const Real jlen = randomness / discretization;
+ const Vec3 disp(1.0 / discretization, 1.0 / discretization, 1.0 / discretization);
+ RandomStream mRand(9832);
+
+ if (reset) {
+ parts.clear();
+ parts.doCompress();
+ }
+
+ FOR_IJK_BND(phi, 0)
+ {
+ if (flags.isObstacle(i, j, k))
+ continue;
+ if (refillEmpty && flags.isFluid(i, j, k))
+ continue;
+ if (phi(i, j, k) < 1.733) {
+ const Vec3 pos(i, j, k);
+ for (int dk = 0; dk < (is3D ? discretization : 1); dk++)
+ for (int dj = 0; dj < discretization; dj++)
+ for (int di = 0; di < discretization; di++) {
+ Vec3 subpos = pos + disp * Vec3(0.5 + di, 0.5 + dj, 0.5 + dk);
+ subpos += jlen * (Vec3(1, 1, 1) - 2.0 * mRand.getVec3());
+ if (!is3D)
+ subpos[2] = 0.5;
+ if (phi.getInterpolated(subpos) > 0.)
+ continue;
+ if (particleFlag < 0) {
+ parts.addBuffered(subpos);
+ }
+ else {
+ parts.addBuffered(subpos, particleFlag);
+ }
+ }
+ }
+ }
+
+ parts.insertBufferedParticles();
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "sampleLevelsetWithParticles", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const LevelsetGrid &phi = *_args.getPtr<LevelsetGrid>("phi", 0, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 1, &_lock);
+ BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 2, &_lock);
+ const int discretization = _args.get<int>("discretization", 3, &_lock);
+ const Real randomness = _args.get<Real>("randomness", 4, &_lock);
+ const bool reset = _args.getOpt<bool>("reset", 5, false, &_lock);
+ const bool refillEmpty = _args.getOpt<bool>("refillEmpty", 6, false, &_lock);
+ const int particleFlag = _args.getOpt<int>("particleFlag", 7, -1, &_lock);
+ _retval = getPyNone();
+ sampleLevelsetWithParticles(
+ phi, flags, parts, discretization, randomness, reset, refillEmpty, particleFlag);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "sampleLevelsetWithParticles", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("sampleLevelsetWithParticles", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_sampleLevelsetWithParticles("", "sampleLevelsetWithParticles", _W_1);
+extern "C" {
+void PbRegister_sampleLevelsetWithParticles()
+{
+ KEEP_UNUSED(_RP_sampleLevelsetWithParticles);
+}
+}
+
+//! sample a shape with particles, use reset to clear the particle buffer,
+//! and skipEmpty for a continuous inflow (in the latter case, only empty cells will
+//! be re-filled once they empty when calling sampleShapeWithParticles during
+//! the main loop).
+
+void sampleShapeWithParticles(const Shape &shape,
+ const FlagGrid &flags,
+ BasicParticleSystem &parts,
+ const int discretization,
+ const Real randomness,
+ const bool reset = false,
+ const bool refillEmpty = false,
+ const LevelsetGrid *exclude = NULL)
+{
+ const bool is3D = flags.is3D();
+ const Real jlen = randomness / discretization;
+ const Vec3 disp(1.0 / discretization, 1.0 / discretization, 1.0 / discretization);
+ RandomStream mRand(9832);
+
+ if (reset) {
+ parts.clear();
+ parts.doCompress();
+ }
+
+ FOR_IJK_BND(flags, 0)
+ {
+ if (flags.isObstacle(i, j, k))
+ continue;
+ if (refillEmpty && flags.isFluid(i, j, k))
+ continue;
+ const Vec3 pos(i, j, k);
+ for (int dk = 0; dk < (is3D ? discretization : 1); dk++)
+ for (int dj = 0; dj < discretization; dj++)
+ for (int di = 0; di < discretization; di++) {
+ Vec3 subpos = pos + disp * Vec3(0.5 + di, 0.5 + dj, 0.5 + dk);
+ subpos += jlen * (Vec3(1, 1, 1) - 2.0 * mRand.getVec3());
+ if (!is3D)
+ subpos[2] = 0.5;
+ if (exclude && exclude->getInterpolated(subpos) <= 0.)
+ continue;
+ if (!shape.isInside(subpos))
+ continue;
+ parts.addBuffered(subpos);
+ }
+ }
+
+ parts.insertBufferedParticles();
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "sampleShapeWithParticles", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Shape &shape = *_args.getPtr<Shape>("shape", 0, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 1, &_lock);
+ BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 2, &_lock);
+ const int discretization = _args.get<int>("discretization", 3, &_lock);
+ const Real randomness = _args.get<Real>("randomness", 4, &_lock);
+ const bool reset = _args.getOpt<bool>("reset", 5, false, &_lock);
+ const bool refillEmpty = _args.getOpt<bool>("refillEmpty", 6, false, &_lock);
+ const LevelsetGrid *exclude = _args.getPtrOpt<LevelsetGrid>("exclude", 7, NULL, &_lock);
+ _retval = getPyNone();
+ sampleShapeWithParticles(
+ shape, flags, parts, discretization, randomness, reset, refillEmpty, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "sampleShapeWithParticles", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("sampleShapeWithParticles", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_sampleShapeWithParticles("", "sampleShapeWithParticles", _W_2);
+extern "C" {
+void PbRegister_sampleShapeWithParticles()
+{
+ KEEP_UNUSED(_RP_sampleShapeWithParticles);
+}
+}
+
+//! mark fluid cells and helpers
+struct knClearFluidFlags : public KernelBase {
+ knClearFluidFlags(FlagGrid &flags, int dummy = 0)
+ : KernelBase(&flags, 0), flags(flags), dummy(dummy)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, FlagGrid &flags, int dummy = 0) const
+ {
+ if (flags.isFluid(i, j, k)) {
+ flags(i, j, k) = (flags(i, j, k) | FlagGrid::TypeEmpty) & ~FlagGrid::TypeFluid;
+ }
+ }
+ inline FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline int &getArg1()
+ {
+ return dummy;
+ }
+ typedef int type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knClearFluidFlags ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, dummy);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, dummy);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ FlagGrid &flags;
+ int dummy;
+};
+
+struct knSetNbObstacle : public KernelBase {
+ knSetNbObstacle(FlagGrid &nflags, const FlagGrid &flags, const Grid<Real> &phiObs)
+ : KernelBase(&nflags, 1), nflags(nflags), flags(flags), phiObs(phiObs)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, FlagGrid &nflags, const FlagGrid &flags, const Grid<Real> &phiObs) const
+ {
+ if (phiObs(i, j, k) > 0.)
+ return;
+ if (flags.isEmpty(i, j, k)) {
+ bool set = false;
+ if ((flags.isFluid(i - 1, j, k)) && (phiObs(i + 1, j, k) <= 0.))
+ set = true;
+ if ((flags.isFluid(i + 1, j, k)) && (phiObs(i - 1, j, k) <= 0.))
+ set = true;
+ if ((flags.isFluid(i, j - 1, k)) && (phiObs(i, j + 1, k) <= 0.))
+ set = true;
+ if ((flags.isFluid(i, j + 1, k)) && (phiObs(i, j - 1, k) <= 0.))
+ set = true;
+ if (flags.is3D()) {
+ if ((flags.isFluid(i, j, k - 1)) && (phiObs(i, j, k + 1) <= 0.))
+ set = true;
+ if ((flags.isFluid(i, j, k + 1)) && (phiObs(i, j, k - 1) <= 0.))
+ set = true;
+ }
+ if (set)
+ nflags(i, j, k) = (flags(i, j, k) | FlagGrid::TypeFluid) & ~FlagGrid::TypeEmpty;
+ }
+ }
+ inline FlagGrid &getArg0()
+ {
+ return nflags;
+ }
+ typedef FlagGrid type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return phiObs;
+ }
+ typedef Grid<Real> type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetNbObstacle ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, nflags, flags, phiObs);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, nflags, flags, phiObs);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ FlagGrid &nflags;
+ const FlagGrid &flags;
+ const Grid<Real> &phiObs;
+};
+void markFluidCells(const BasicParticleSystem &parts,
+ FlagGrid &flags,
+ const Grid<Real> *phiObs = NULL,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0)
+{
+ // remove all fluid cells
+ knClearFluidFlags(flags, 0);
+
+ // mark all particles in flaggrid as fluid
+ for (IndexInt idx = 0; idx < parts.size(); idx++) {
+ if (!parts.isActive(idx) || (ptype && ((*ptype)[idx] & exclude)))
+ continue;
+ Vec3i p = toVec3i(parts.getPos(idx));
+ if (flags.isInBounds(p) && flags.isEmpty(p))
+ flags(p) = (flags(p) | FlagGrid::TypeFluid) & ~FlagGrid::TypeEmpty;
+ }
+
+ // special for second order obstacle BCs, check empty cells in boundary region
+ if (phiObs) {
+ FlagGrid tmp(flags);
+ knSetNbObstacle(tmp, flags, *phiObs);
+ flags.swap(tmp);
+ }
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "markFluidCells", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 1, &_lock);
+ const Grid<Real> *phiObs = _args.getPtrOpt<Grid<Real>>("phiObs", 2, NULL, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 3, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 4, 0, &_lock);
+ _retval = getPyNone();
+ markFluidCells(parts, flags, phiObs, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "markFluidCells", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("markFluidCells", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_markFluidCells("", "markFluidCells", _W_3);
+extern "C" {
+void PbRegister_markFluidCells()
+{
+ KEEP_UNUSED(_RP_markFluidCells);
+}
+}
+
+// for testing purposes only...
+void testInitGridWithPos(Grid<Real> &grid)
+{
+ FOR_IJK(grid)
+ {
+ grid(i, j, k) = norm(Vec3(i, j, k));
+ }
+}
+static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "testInitGridWithPos", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &grid = *_args.getPtr<Grid<Real>>("grid", 0, &_lock);
+ _retval = getPyNone();
+ testInitGridWithPos(grid);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "testInitGridWithPos", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("testInitGridWithPos", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_testInitGridWithPos("", "testInitGridWithPos", _W_4);
+extern "C" {
+void PbRegister_testInitGridWithPos()
+{
+ KEEP_UNUSED(_RP_testInitGridWithPos);
+}
+}
+
+//! helper to calculate particle radius factor to cover the diagonal of a cell in 2d/3d
+inline Real calculateRadiusFactor(const Grid<Real> &grid, Real factor)
+{
+ return (grid.is3D() ? sqrt(3.) : sqrt(2.)) *
+ (factor + .01); // note, a 1% safety factor is added here
+}
+
+//! re-sample particles based on an input levelset
+// optionally skip seeding new particles in "exclude" SDF
+
+void adjustNumber(BasicParticleSystem &parts,
+ const MACGrid &vel,
+ const FlagGrid &flags,
+ int minParticles,
+ int maxParticles,
+ const LevelsetGrid &phi,
+ Real radiusFactor = 1.,
+ Real narrowBand = -1.,
+ const Grid<Real> *exclude = NULL)
+{
+ // which levelset to use as threshold
+ const Real SURFACE_LS = -1.0 * calculateRadiusFactor(phi, radiusFactor);
+ Grid<int> tmp(vel.getParent());
+ std::ostringstream out;
+
+ // count particles in cells, and delete excess particles
+ for (IndexInt idx = 0; idx < (int)parts.size(); idx++) {
+ if (parts.isActive(idx)) {
+ Vec3i p = toVec3i(parts.getPos(idx));
+ if (!tmp.isInBounds(p)) {
+ parts.kill(idx); // out of domain, remove
+ continue;
+ }
+
+ Real phiv = phi.getInterpolated(parts.getPos(idx));
+ if (phiv > 0) {
+ parts.kill(idx);
+ continue;
+ }
+ if (narrowBand > 0. && phiv < -narrowBand) {
+ parts.kill(idx);
+ continue;
+ }
+
+ bool atSurface = false;
+ if (phiv > SURFACE_LS)
+ atSurface = true;
+ int num = tmp(p);
+
+ // dont delete particles in non fluid cells here, the particles are "always right"
+ if (num > maxParticles && (!atSurface)) {
+ parts.kill(idx);
+ }
+ else {
+ tmp(p) = num + 1;
+ }
+ }
+ }
+
+ // seed new particles
+ RandomStream mRand(9832);
+ FOR_IJK(tmp)
+ {
+ int cnt = tmp(i, j, k);
+
+ // skip cells near surface
+ if (phi(i, j, k) > SURFACE_LS)
+ continue;
+ if (narrowBand > 0. && phi(i, j, k) < -narrowBand) {
+ continue;
+ }
+ if (exclude && ((*exclude)(i, j, k) < 0.)) {
+ continue;
+ }
+
+ if (flags.isFluid(i, j, k) && cnt < minParticles) {
+ for (int m = cnt; m < minParticles; m++) {
+ Vec3 pos = Vec3(i, j, k) + mRand.getVec3();
+ // Vec3 pos (i + 0.5, j + 0.5, k + 0.5); // cell center
+ parts.addBuffered(pos);
+ }
+ }
+ }
+
+ parts.doCompress();
+ parts.insertBufferedParticles();
+}
+static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "adjustNumber", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 2, &_lock);
+ int minParticles = _args.get<int>("minParticles", 3, &_lock);
+ int maxParticles = _args.get<int>("maxParticles", 4, &_lock);
+ const LevelsetGrid &phi = *_args.getPtr<LevelsetGrid>("phi", 5, &_lock);
+ Real radiusFactor = _args.getOpt<Real>("radiusFactor", 6, 1., &_lock);
+ Real narrowBand = _args.getOpt<Real>("narrowBand", 7, -1., &_lock);
+ const Grid<Real> *exclude = _args.getPtrOpt<Grid<Real>>("exclude", 8, NULL, &_lock);
+ _retval = getPyNone();
+ adjustNumber(
+ parts, vel, flags, minParticles, maxParticles, phi, radiusFactor, narrowBand, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "adjustNumber", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("adjustNumber", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_adjustNumber("", "adjustNumber", _W_5);
+extern "C" {
+void PbRegister_adjustNumber()
+{
+ KEEP_UNUSED(_RP_adjustNumber);
+}
+}
+
+// simple and slow helper conversion to show contents of int grids like a real grid in the ui
+// (use eg to quickly display contents of the particle-index grid)
+
+void debugIntToReal(const Grid<int> &source, Grid<Real> &dest, Real factor = 1.)
+{
+ FOR_IJK(source)
+ {
+ dest(i, j, k) = (Real)source(i, j, k) * factor;
+ }
+}
+static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "debugIntToReal", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<int> &source = *_args.getPtr<Grid<int>>("source", 0, &_lock);
+ Grid<Real> &dest = *_args.getPtr<Grid<Real>>("dest", 1, &_lock);
+ Real factor = _args.getOpt<Real>("factor", 2, 1., &_lock);
+ _retval = getPyNone();
+ debugIntToReal(source, dest, factor);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "debugIntToReal", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("debugIntToReal", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_debugIntToReal("", "debugIntToReal", _W_6);
+extern "C" {
+void PbRegister_debugIntToReal()
+{
+ KEEP_UNUSED(_RP_debugIntToReal);
+}
+}
+
+// build a grid that contains indices for a particle system
+// the particles in a cell i,j,k are particles[index(i,j,k)] to particles[index(i+1,j,k)-1]
+// (ie, particles[index(i+1,j,k)] already belongs to cell i+1,j,k)
+
+void gridParticleIndex(const BasicParticleSystem &parts,
+ ParticleIndexSystem &indexSys,
+ const FlagGrid &flags,
+ Grid<int> &index,
+ Grid<int> *counter = NULL)
+{
+ bool delCounter = false;
+ if (!counter) {
+ counter = new Grid<int>(flags.getParent());
+ delCounter = true;
+ }
+ else {
+ counter->clear();
+ }
+
+ // count particles in cells, and delete excess particles
+ index.clear();
+ int inactive = 0;
+ for (IndexInt idx = 0; idx < (IndexInt)parts.size(); idx++) {
+ if (parts.isActive(idx)) {
+ // check index for validity...
+ Vec3i p = toVec3i(parts.getPos(idx));
+ if (!index.isInBounds(p)) {
+ inactive++;
+ continue;
+ }
+
+ index(p)++;
+ }
+ else {
+ inactive++;
+ }
+ }
+
+ // note - this one might be smaller...
+ indexSys.resize(parts.size() - inactive);
+
+ // convert per cell number to continuous index
+ IndexInt idx = 0;
+ FOR_IJK(index)
+ {
+ int num = index(i, j, k);
+ index(i, j, k) = idx;
+ idx += num;
+ }
+
+ // add particles to indexed array, we still need a per cell particle counter
+ for (IndexInt idx = 0; idx < (IndexInt)parts.size(); idx++) {
+ if (!parts.isActive(idx))
+ continue;
+ Vec3i p = toVec3i(parts.getPos(idx));
+ if (!index.isInBounds(p)) {
+ continue;
+ }
+
+ // initialize position and index into original array
+ // indexSys[ index(p)+(*counter)(p) ].pos = parts[idx].pos;
+ indexSys[index(p) + (*counter)(p)].sourceIndex = idx;
+ (*counter)(p)++;
+ }
+
+ if (delCounter)
+ delete counter;
+}
+static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "gridParticleIndex", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ ParticleIndexSystem &indexSys = *_args.getPtr<ParticleIndexSystem>("indexSys", 1, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 2, &_lock);
+ Grid<int> &index = *_args.getPtr<Grid<int>>("index", 3, &_lock);
+ Grid<int> *counter = _args.getPtrOpt<Grid<int>>("counter", 4, NULL, &_lock);
+ _retval = getPyNone();
+ gridParticleIndex(parts, indexSys, flags, index, counter);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "gridParticleIndex", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("gridParticleIndex", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_gridParticleIndex("", "gridParticleIndex", _W_7);
+extern "C" {
+void PbRegister_gridParticleIndex()
+{
+ KEEP_UNUSED(_RP_gridParticleIndex);
+}
+}
+
+struct ComputeUnionLevelsetPindex : public KernelBase {
+ ComputeUnionLevelsetPindex(const Grid<int> &index,
+ const BasicParticleSystem &parts,
+ const ParticleIndexSystem &indexSys,
+ LevelsetGrid &phi,
+ const Real radius,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(&index, 0),
+ index(index),
+ parts(parts),
+ indexSys(indexSys),
+ phi(phi),
+ radius(radius),
+ ptype(ptype),
+ exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const Grid<int> &index,
+ const BasicParticleSystem &parts,
+ const ParticleIndexSystem &indexSys,
+ LevelsetGrid &phi,
+ const Real radius,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude) const
+ {
+ const Vec3 gridPos = Vec3(i, j, k) + Vec3(0.5); // shifted by half cell
+ Real phiv = radius * 1.0; // outside
+
+ int r = int(radius) + 1;
+ int rZ = phi.is3D() ? r : 0;
+ for (int zj = k - rZ; zj <= k + rZ; zj++)
+ for (int yj = j - r; yj <= j + r; yj++)
+ for (int xj = i - r; xj <= i + r; xj++) {
+ if (!phi.isInBounds(Vec3i(xj, yj, zj)))
+ continue;
+
+ // note, for the particle indices in indexSys the access is periodic (ie, dont skip for
+ // eg inBounds(sx,10,10)
+ IndexInt isysIdxS = index.index(xj, yj, zj);
+ IndexInt pStart = index(isysIdxS), pEnd = 0;
+ if (phi.isInBounds(isysIdxS + 1))
+ pEnd = index(isysIdxS + 1);
+ else
+ pEnd = indexSys.size();
+
+ // now loop over particles in cell
+ for (IndexInt p = pStart; p < pEnd; ++p) {
+ const int psrc = indexSys[p].sourceIndex;
+ if (ptype && ((*ptype)[psrc] & exclude))
+ continue;
+ const Vec3 pos = parts[psrc].pos;
+ phiv = std::min(phiv, fabs(norm(gridPos - pos)) - radius);
+ }
+ }
+ phi(i, j, k) = phiv;
+ }
+ inline const Grid<int> &getArg0()
+ {
+ return index;
+ }
+ typedef Grid<int> type0;
+ inline const BasicParticleSystem &getArg1()
+ {
+ return parts;
+ }
+ typedef BasicParticleSystem type1;
+ inline const ParticleIndexSystem &getArg2()
+ {
+ return indexSys;
+ }
+ typedef ParticleIndexSystem type2;
+ inline LevelsetGrid &getArg3()
+ {
+ return phi;
+ }
+ typedef LevelsetGrid type3;
+ inline const Real &getArg4()
+ {
+ return radius;
+ }
+ typedef Real type4;
+ inline const ParticleDataImpl<int> *getArg5()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type5;
+ inline const int &getArg6()
+ {
+ return exclude;
+ }
+ typedef int type6;
+ void runMessage()
+ {
+ debMsg("Executing kernel ComputeUnionLevelsetPindex ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, index, parts, indexSys, phi, radius, ptype, exclude);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, index, parts, indexSys, phi, radius, ptype, exclude);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const Grid<int> &index;
+ const BasicParticleSystem &parts;
+ const ParticleIndexSystem &indexSys;
+ LevelsetGrid &phi;
+ const Real radius;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+
+void unionParticleLevelset(const BasicParticleSystem &parts,
+ const ParticleIndexSystem &indexSys,
+ const FlagGrid &flags,
+ const Grid<int> &index,
+ LevelsetGrid &phi,
+ const Real radiusFactor = 1.,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0)
+{
+ // use half a cell diagonal as base radius
+ const Real radius = 0.5 * calculateRadiusFactor(phi, radiusFactor);
+ // no reset of phi necessary here
+ ComputeUnionLevelsetPindex(index, parts, indexSys, phi, radius, ptype, exclude);
+
+ phi.setBound(0.5, 0);
+}
+static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "unionParticleLevelset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ const ParticleIndexSystem &indexSys = *_args.getPtr<ParticleIndexSystem>(
+ "indexSys", 1, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 2, &_lock);
+ const Grid<int> &index = *_args.getPtr<Grid<int>>("index", 3, &_lock);
+ LevelsetGrid &phi = *_args.getPtr<LevelsetGrid>("phi", 4, &_lock);
+ const Real radiusFactor = _args.getOpt<Real>("radiusFactor", 5, 1., &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 6, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 7, 0, &_lock);
+ _retval = getPyNone();
+ unionParticleLevelset(parts, indexSys, flags, index, phi, radiusFactor, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "unionParticleLevelset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("unionParticleLevelset", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_unionParticleLevelset("", "unionParticleLevelset", _W_8);
+extern "C" {
+void PbRegister_unionParticleLevelset()
+{
+ KEEP_UNUSED(_RP_unionParticleLevelset);
+}
+}
+
+//! kernel for computing averaged particle level set weights
+
+struct ComputeAveragedLevelsetWeight : public KernelBase {
+ ComputeAveragedLevelsetWeight(const BasicParticleSystem &parts,
+ const Grid<int> &index,
+ const ParticleIndexSystem &indexSys,
+ LevelsetGrid &phi,
+ const Real radius,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude,
+ Grid<Vec3> *save_pAcc = NULL,
+ Grid<Real> *save_rAcc = NULL)
+ : KernelBase(&index, 0),
+ parts(parts),
+ index(index),
+ indexSys(indexSys),
+ phi(phi),
+ radius(radius),
+ ptype(ptype),
+ exclude(exclude),
+ save_pAcc(save_pAcc),
+ save_rAcc(save_rAcc)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const BasicParticleSystem &parts,
+ const Grid<int> &index,
+ const ParticleIndexSystem &indexSys,
+ LevelsetGrid &phi,
+ const Real radius,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude,
+ Grid<Vec3> *save_pAcc = NULL,
+ Grid<Real> *save_rAcc = NULL) const
+ {
+ const Vec3 gridPos = Vec3(i, j, k) + Vec3(0.5); // shifted by half cell
+ Real phiv = radius * 1.0; // outside
+
+ // loop over neighborhood, similar to ComputeUnionLevelsetPindex
+ const Real sradiusInv = 1. / (4. * radius * radius);
+ int r = int(1. * radius) + 1;
+ int rZ = phi.is3D() ? r : 0;
+ // accumulators
+ Real wacc = 0.;
+ Vec3 pacc = Vec3(0.);
+ Real racc = 0.;
+
+ for (int zj = k - rZ; zj <= k + rZ; zj++)
+ for (int yj = j - r; yj <= j + r; yj++)
+ for (int xj = i - r; xj <= i + r; xj++) {
+ if (!phi.isInBounds(Vec3i(xj, yj, zj)))
+ continue;
+
+ IndexInt isysIdxS = index.index(xj, yj, zj);
+ IndexInt pStart = index(isysIdxS), pEnd = 0;
+ if (phi.isInBounds(isysIdxS + 1))
+ pEnd = index(isysIdxS + 1);
+ else
+ pEnd = indexSys.size();
+ for (IndexInt p = pStart; p < pEnd; ++p) {
+ IndexInt psrc = indexSys[p].sourceIndex;
+ if (ptype && ((*ptype)[psrc] & exclude))
+ continue;
+
+ Vec3 pos = parts[psrc].pos;
+ Real s = normSquare(gridPos - pos) * sradiusInv;
+ // Real w = std::max(0., cubed(1.-s) );
+ Real w = std::max(0., (1. - s)); // a bit smoother
+ wacc += w;
+ racc += radius * w;
+ pacc += pos * w;
+ }
+ }
+
+ if (wacc > VECTOR_EPSILON) {
+ racc /= wacc;
+ pacc /= wacc;
+ phiv = fabs(norm(gridPos - pacc)) - racc;
+
+ if (save_pAcc)
+ (*save_pAcc)(i, j, k) = pacc;
+ if (save_rAcc)
+ (*save_rAcc)(i, j, k) = racc;
+ }
+ phi(i, j, k) = phiv;
+ }
+ inline const BasicParticleSystem &getArg0()
+ {
+ return parts;
+ }
+ typedef BasicParticleSystem type0;
+ inline const Grid<int> &getArg1()
+ {
+ return index;
+ }
+ typedef Grid<int> type1;
+ inline const ParticleIndexSystem &getArg2()
+ {
+ return indexSys;
+ }
+ typedef ParticleIndexSystem type2;
+ inline LevelsetGrid &getArg3()
+ {
+ return phi;
+ }
+ typedef LevelsetGrid type3;
+ inline const Real &getArg4()
+ {
+ return radius;
+ }
+ typedef Real type4;
+ inline const ParticleDataImpl<int> *getArg5()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type5;
+ inline const int &getArg6()
+ {
+ return exclude;
+ }
+ typedef int type6;
+ inline Grid<Vec3> *getArg7()
+ {
+ return save_pAcc;
+ }
+ typedef Grid<Vec3> type7;
+ inline Grid<Real> *getArg8()
+ {
+ return save_rAcc;
+ }
+ typedef Grid<Real> type8;
+ void runMessage()
+ {
+ debMsg("Executing kernel ComputeAveragedLevelsetWeight ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, parts, index, indexSys, phi, radius, ptype, exclude, save_pAcc, save_rAcc);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, parts, index, indexSys, phi, radius, ptype, exclude, save_pAcc, save_rAcc);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const BasicParticleSystem &parts;
+ const Grid<int> &index;
+ const ParticleIndexSystem &indexSys;
+ LevelsetGrid &phi;
+ const Real radius;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+ Grid<Vec3> *save_pAcc;
+ Grid<Real> *save_rAcc;
+};
+
+template<class T> T smoothingValue(const Grid<T> val, int i, int j, int k, T center)
+{
+ return val(i, j, k);
+}
+
+// smoothing, and
+
+template<class T> struct knSmoothGrid : public KernelBase {
+ knSmoothGrid(const Grid<T> &me, Grid<T> &tmp, Real factor)
+ : KernelBase(&me, 1), me(me), tmp(tmp), factor(factor)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const Grid<T> &me, Grid<T> &tmp, Real factor) const
+ {
+ T val = me(i, j, k) + me(i + 1, j, k) + me(i - 1, j, k) + me(i, j + 1, k) + me(i, j - 1, k);
+ if (me.is3D()) {
+ val += me(i, j, k + 1) + me(i, j, k - 1);
+ }
+ tmp(i, j, k) = val * factor;
+ }
+ inline const Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline Grid<T> &getArg1()
+ {
+ return tmp;
+ }
+ typedef Grid<T> type1;
+ inline Real &getArg2()
+ {
+ return factor;
+ }
+ typedef Real type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSmoothGrid ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, me, tmp, factor);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, me, tmp, factor);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const Grid<T> &me;
+ Grid<T> &tmp;
+ Real factor;
+};
+
+template<class T> struct knSmoothGridNeg : public KernelBase {
+ knSmoothGridNeg(const Grid<T> &me, Grid<T> &tmp, Real factor)
+ : KernelBase(&me, 1), me(me), tmp(tmp), factor(factor)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const Grid<T> &me, Grid<T> &tmp, Real factor) const
+ {
+ T val = me(i, j, k) + me(i + 1, j, k) + me(i - 1, j, k) + me(i, j + 1, k) + me(i, j - 1, k);
+ if (me.is3D()) {
+ val += me(i, j, k + 1) + me(i, j, k - 1);
+ }
+ val *= factor;
+ if (val < tmp(i, j, k))
+ tmp(i, j, k) = val;
+ else
+ tmp(i, j, k) = me(i, j, k);
+ }
+ inline const Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline Grid<T> &getArg1()
+ {
+ return tmp;
+ }
+ typedef Grid<T> type1;
+ inline Real &getArg2()
+ {
+ return factor;
+ }
+ typedef Real type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSmoothGridNeg ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, me, tmp, factor);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, me, tmp, factor);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const Grid<T> &me;
+ Grid<T> &tmp;
+ Real factor;
+};
+
+//! Zhu & Bridson particle level set creation
+
+void averagedParticleLevelset(const BasicParticleSystem &parts,
+ const ParticleIndexSystem &indexSys,
+ const FlagGrid &flags,
+ const Grid<int> &index,
+ LevelsetGrid &phi,
+ const Real radiusFactor = 1.,
+ const int smoothen = 1,
+ const int smoothenNeg = 1,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0)
+{
+ // use half a cell diagonal as base radius
+ const Real radius = 0.5 * calculateRadiusFactor(phi, radiusFactor);
+ ComputeAveragedLevelsetWeight(parts, index, indexSys, phi, radius, ptype, exclude);
+
+ // post-process level-set
+ for (int i = 0; i < std::max(smoothen, smoothenNeg); ++i) {
+ LevelsetGrid tmp(flags.getParent());
+ if (i < smoothen) {
+ knSmoothGrid<Real>(phi, tmp, 1. / (phi.is3D() ? 7. : 5.));
+ phi.swap(tmp);
+ }
+ if (i < smoothenNeg) {
+ knSmoothGridNeg<Real>(phi, tmp, 1. / (phi.is3D() ? 7. : 5.));
+ phi.swap(tmp);
+ }
+ }
+ phi.setBound(0.5, 0);
+}
+static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "averagedParticleLevelset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ const ParticleIndexSystem &indexSys = *_args.getPtr<ParticleIndexSystem>(
+ "indexSys", 1, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 2, &_lock);
+ const Grid<int> &index = *_args.getPtr<Grid<int>>("index", 3, &_lock);
+ LevelsetGrid &phi = *_args.getPtr<LevelsetGrid>("phi", 4, &_lock);
+ const Real radiusFactor = _args.getOpt<Real>("radiusFactor", 5, 1., &_lock);
+ const int smoothen = _args.getOpt<int>("smoothen", 6, 1, &_lock);
+ const int smoothenNeg = _args.getOpt<int>("smoothenNeg", 7, 1, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 8, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 9, 0, &_lock);
+ _retval = getPyNone();
+ averagedParticleLevelset(
+ parts, indexSys, flags, index, phi, radiusFactor, smoothen, smoothenNeg, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "averagedParticleLevelset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("averagedParticleLevelset", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_averagedParticleLevelset("", "averagedParticleLevelset", _W_9);
+extern "C" {
+void PbRegister_averagedParticleLevelset()
+{
+ KEEP_UNUSED(_RP_averagedParticleLevelset);
+}
+}
+
+//! kernel for improvedParticleLevelset
+
+struct correctLevelset : public KernelBase {
+ correctLevelset(LevelsetGrid &phi,
+ const Grid<Vec3> &pAcc,
+ const Grid<Real> &rAcc,
+ const Real radius,
+ const Real t_low,
+ const Real t_high)
+ : KernelBase(&phi, 1),
+ phi(phi),
+ pAcc(pAcc),
+ rAcc(rAcc),
+ radius(radius),
+ t_low(t_low),
+ t_high(t_high)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ LevelsetGrid &phi,
+ const Grid<Vec3> &pAcc,
+ const Grid<Real> &rAcc,
+ const Real radius,
+ const Real t_low,
+ const Real t_high) const
+ {
+ if (rAcc(i, j, k) <= VECTOR_EPSILON)
+ return; // outside nothing happens
+ Real x = pAcc(i, j, k).x;
+
+ // create jacobian of pAcc via central differences
+ Matrix3x3f jacobian = Matrix3x3f(0.5 * (pAcc(i + 1, j, k).x - pAcc(i - 1, j, k).x),
+ 0.5 * (pAcc(i, j + 1, k).x - pAcc(i, j - 1, k).x),
+ 0.5 * (pAcc(i, j, k + 1).x - pAcc(i, j, k - 1).x),
+ 0.5 * (pAcc(i + 1, j, k).y - pAcc(i - 1, j, k).y),
+ 0.5 * (pAcc(i, j + 1, k).y - pAcc(i, j - 1, k).y),
+ 0.5 * (pAcc(i, j, k + 1).y - pAcc(i, j, k - 1).y),
+ 0.5 * (pAcc(i + 1, j, k).z - pAcc(i - 1, j, k).z),
+ 0.5 * (pAcc(i, j + 1, k).z - pAcc(i, j - 1, k).z),
+ 0.5 * (pAcc(i, j, k + 1).z - pAcc(i, j, k - 1).z));
+
+ // compute largest eigenvalue of jacobian
+ Vec3 EV = jacobian.eigenvalues();
+ Real maxEV = std::max(std::max(EV.x, EV.y), EV.z);
+
+ // calculate correction factor
+ Real correction = 1;
+ if (maxEV >= t_low) {
+ Real t = (t_high - maxEV) / (t_high - t_low);
+ correction = t * t * t - 3 * t * t + 3 * t;
+ }
+ correction = (correction < 0) ?
+ 0 :
+ correction; // enforce correction factor to [0,1] (not explicitly in paper)
+
+ const Vec3 gridPos = Vec3(i, j, k) + Vec3(0.5); // shifted by half cell
+ const Real correctedPhi = fabs(norm(gridPos - pAcc(i, j, k))) - rAcc(i, j, k) * correction;
+ phi(i, j, k) = (correctedPhi > radius) ?
+ radius :
+ correctedPhi; // adjust too high outside values when too few particles are
+ // nearby to make smoothing possible (not in paper)
+ }
+ inline LevelsetGrid &getArg0()
+ {
+ return phi;
+ }
+ typedef LevelsetGrid type0;
+ inline const Grid<Vec3> &getArg1()
+ {
+ return pAcc;
+ }
+ typedef Grid<Vec3> type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return rAcc;
+ }
+ typedef Grid<Real> type2;
+ inline const Real &getArg3()
+ {
+ return radius;
+ }
+ typedef Real type3;
+ inline const Real &getArg4()
+ {
+ return t_low;
+ }
+ typedef Real type4;
+ inline const Real &getArg5()
+ {
+ return t_high;
+ }
+ typedef Real type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel correctLevelset ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, phi, pAcc, rAcc, radius, t_low, t_high);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, phi, pAcc, rAcc, radius, t_low, t_high);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ LevelsetGrid &phi;
+ const Grid<Vec3> &pAcc;
+ const Grid<Real> &rAcc;
+ const Real radius;
+ const Real t_low;
+ const Real t_high;
+};
+
+//! Approach from "A unified particle model for fluid-solid interactions" by Solenthaler et al. in
+//! 2007
+
+void improvedParticleLevelset(const BasicParticleSystem &parts,
+ const ParticleIndexSystem &indexSys,
+ const FlagGrid &flags,
+ const Grid<int> &index,
+ LevelsetGrid &phi,
+ const Real radiusFactor = 1.,
+ const int smoothen = 1,
+ const int smoothenNeg = 1,
+ const Real t_low = 0.4,
+ const Real t_high = 3.5,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0)
+{
+ // create temporary grids to store values from levelset weight computation
+ Grid<Vec3> save_pAcc(flags.getParent());
+ Grid<Real> save_rAcc(flags.getParent());
+
+ const Real radius = 0.5 * calculateRadiusFactor(
+ phi, radiusFactor); // use half a cell diagonal as base radius
+ ComputeAveragedLevelsetWeight(
+ parts, index, indexSys, phi, radius, ptype, exclude, &save_pAcc, &save_rAcc);
+ correctLevelset(phi, save_pAcc, save_rAcc, radius, t_low, t_high);
+
+ // post-process level-set
+ for (int i = 0; i < std::max(smoothen, smoothenNeg); ++i) {
+ LevelsetGrid tmp(flags.getParent());
+ if (i < smoothen) {
+ knSmoothGrid<Real>(phi, tmp, 1. / (phi.is3D() ? 7. : 5.));
+ phi.swap(tmp);
+ }
+ if (i < smoothenNeg) {
+ knSmoothGridNeg<Real>(phi, tmp, 1. / (phi.is3D() ? 7. : 5.));
+ phi.swap(tmp);
+ }
+ }
+ phi.setBound(0.5, 0);
+}
+static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "improvedParticleLevelset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ const ParticleIndexSystem &indexSys = *_args.getPtr<ParticleIndexSystem>(
+ "indexSys", 1, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 2, &_lock);
+ const Grid<int> &index = *_args.getPtr<Grid<int>>("index", 3, &_lock);
+ LevelsetGrid &phi = *_args.getPtr<LevelsetGrid>("phi", 4, &_lock);
+ const Real radiusFactor = _args.getOpt<Real>("radiusFactor", 5, 1., &_lock);
+ const int smoothen = _args.getOpt<int>("smoothen", 6, 1, &_lock);
+ const int smoothenNeg = _args.getOpt<int>("smoothenNeg", 7, 1, &_lock);
+ const Real t_low = _args.getOpt<Real>("t_low", 8, 0.4, &_lock);
+ const Real t_high = _args.getOpt<Real>("t_high", 9, 3.5, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 10, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 11, 0, &_lock);
+ _retval = getPyNone();
+ improvedParticleLevelset(parts,
+ indexSys,
+ flags,
+ index,
+ phi,
+ radiusFactor,
+ smoothen,
+ smoothenNeg,
+ t_low,
+ t_high,
+ ptype,
+ exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "improvedParticleLevelset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("improvedParticleLevelset", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_improvedParticleLevelset("", "improvedParticleLevelset", _W_10);
+extern "C" {
+void PbRegister_improvedParticleLevelset()
+{
+ KEEP_UNUSED(_RP_improvedParticleLevelset);
+}
+}
+
+struct knPushOutofObs : public KernelBase {
+ knPushOutofObs(BasicParticleSystem &parts,
+ const FlagGrid &flags,
+ const Grid<Real> &phiObs,
+ const Real shift,
+ const Real thresh,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(parts.size()),
+ parts(parts),
+ flags(flags),
+ phiObs(phiObs),
+ shift(shift),
+ thresh(thresh),
+ ptype(ptype),
+ exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ BasicParticleSystem &parts,
+ const FlagGrid &flags,
+ const Grid<Real> &phiObs,
+ const Real shift,
+ const Real thresh,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude) const
+ {
+ if (!parts.isActive(idx) || (ptype && ((*ptype)[idx] & exclude)))
+ return;
+ Vec3i p = toVec3i(parts.getPos(idx));
+
+ if (!flags.isInBounds(p))
+ return;
+ Real v = phiObs.getInterpolated(parts.getPos(idx));
+ if (v < thresh) {
+ Vec3 grad = getGradient(phiObs, p.x, p.y, p.z);
+ if (normalize(grad) < VECTOR_EPSILON)
+ return;
+ parts.setPos(idx, parts.getPos(idx) + grad * (thresh - v + shift));
+ }
+ }
+ inline BasicParticleSystem &getArg0()
+ {
+ return parts;
+ }
+ typedef BasicParticleSystem type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return phiObs;
+ }
+ typedef Grid<Real> type2;
+ inline const Real &getArg3()
+ {
+ return shift;
+ }
+ typedef Real type3;
+ inline const Real &getArg4()
+ {
+ return thresh;
+ }
+ typedef Real type4;
+ inline const ParticleDataImpl<int> *getArg5()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type5;
+ inline const int &getArg6()
+ {
+ return exclude;
+ }
+ typedef int type6;
+ void runMessage()
+ {
+ debMsg("Executing kernel knPushOutofObs ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, parts, flags, phiObs, shift, thresh, ptype, exclude);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ BasicParticleSystem &parts;
+ const FlagGrid &flags;
+ const Grid<Real> &phiObs;
+ const Real shift;
+ const Real thresh;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+//! push particles out of obstacle levelset
+
+void pushOutofObs(BasicParticleSystem &parts,
+ const FlagGrid &flags,
+ const Grid<Real> &phiObs,
+ const Real shift = 0,
+ const Real thresh = 0,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0)
+{
+ knPushOutofObs(parts, flags, phiObs, shift, thresh, ptype, exclude);
+}
+static PyObject *_W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "pushOutofObs", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 1, &_lock);
+ const Grid<Real> &phiObs = *_args.getPtr<Grid<Real>>("phiObs", 2, &_lock);
+ const Real shift = _args.getOpt<Real>("shift", 3, 0, &_lock);
+ const Real thresh = _args.getOpt<Real>("thresh", 4, 0, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 5, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 6, 0, &_lock);
+ _retval = getPyNone();
+ pushOutofObs(parts, flags, phiObs, shift, thresh, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "pushOutofObs", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("pushOutofObs", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_pushOutofObs("", "pushOutofObs", _W_11);
+extern "C" {
+void PbRegister_pushOutofObs()
+{
+ KEEP_UNUSED(_RP_pushOutofObs);
+}
+}
+
+//******************************************************************************
+// grid interpolation functions
+
+template<class T> struct knSafeDivReal : public KernelBase {
+ knSafeDivReal(Grid<T> &me, const Grid<Real> &other, Real cutoff = VECTOR_EPSILON)
+ : KernelBase(&me, 0), me(me), other(other), cutoff(cutoff)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ Grid<T> &me,
+ const Grid<Real> &other,
+ Real cutoff = VECTOR_EPSILON) const
+ {
+ if (other[idx] < cutoff) {
+ me[idx] = 0.;
+ }
+ else {
+ T div(other[idx]);
+ me[idx] = safeDivide(me[idx], div);
+ }
+ }
+ inline Grid<T> &getArg0()
+ {
+ return me;
+ }
+ typedef Grid<T> type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return other;
+ }
+ typedef Grid<Real> type1;
+ inline Real &getArg2()
+ {
+ return cutoff;
+ }
+ typedef Real type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSafeDivReal ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, me, other, cutoff);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<T> &me;
+ const Grid<Real> &other;
+ Real cutoff;
+};
+
+// Set velocities on the grid from the particle system
+
+struct knMapLinearVec3ToMACGrid : public KernelBase {
+ knMapLinearVec3ToMACGrid(const BasicParticleSystem &p,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ Grid<Vec3> &tmp,
+ const ParticleDataImpl<Vec3> &pvel,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(p.size()),
+ p(p),
+ flags(flags),
+ vel(vel),
+ tmp(tmp),
+ pvel(pvel),
+ ptype(ptype),
+ exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystem &p,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ Grid<Vec3> &tmp,
+ const ParticleDataImpl<Vec3> &pvel,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ {
+ unusedParameter(flags);
+ if (!p.isActive(idx) || (ptype && ((*ptype)[idx] & exclude)))
+ return;
+ vel.setInterpolated(p[idx].pos, pvel[idx], &tmp[0]);
+ }
+ inline const BasicParticleSystem &getArg0()
+ {
+ return p;
+ }
+ typedef BasicParticleSystem type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const MACGrid &getArg2()
+ {
+ return vel;
+ }
+ typedef MACGrid type2;
+ inline Grid<Vec3> &getArg3()
+ {
+ return tmp;
+ }
+ typedef Grid<Vec3> type3;
+ inline const ParticleDataImpl<Vec3> &getArg4()
+ {
+ return pvel;
+ }
+ typedef ParticleDataImpl<Vec3> type4;
+ inline const ParticleDataImpl<int> *getArg5()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type5;
+ inline const int &getArg6()
+ {
+ return exclude;
+ }
+ typedef int type6;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMapLinearVec3ToMACGrid ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void run()
+ {
+ const IndexInt _sz = size;
+ for (IndexInt i = 0; i < _sz; i++)
+ op(i, p, flags, vel, tmp, pvel, ptype, exclude);
+ }
+ const BasicParticleSystem &p;
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ Grid<Vec3> &tmp;
+ const ParticleDataImpl<Vec3> &pvel;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+
+// optionally , this function can use an existing vec3 grid to store the weights
+// this is useful in combination with the simple extrapolation function
+
+void mapPartsToMAC(const FlagGrid &flags,
+ MACGrid &vel,
+ MACGrid &velOld,
+ const BasicParticleSystem &parts,
+ const ParticleDataImpl<Vec3> &partVel,
+ Grid<Vec3> *weight = NULL,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0)
+{
+ // interpol -> grid. tmpgrid for particle contribution weights
+ bool freeTmp = false;
+ if (!weight) {
+ weight = new Grid<Vec3>(flags.getParent());
+ freeTmp = true;
+ }
+ else {
+ weight->clear(); // make sure we start with a zero grid!
+ }
+ vel.clear();
+ knMapLinearVec3ToMACGrid(parts, flags, vel, *weight, partVel, ptype, exclude);
+
+ // stomp small values in weight to zero to prevent roundoff errors
+ weight->stomp(Vec3(VECTOR_EPSILON));
+ vel.safeDivide(*weight);
+
+ // store original state
+ velOld.copyFrom(vel);
+ if (freeTmp)
+ delete weight;
+}
+static PyObject *_W_12(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "mapPartsToMAC", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ MACGrid &velOld = *_args.getPtr<MACGrid>("velOld", 2, &_lock);
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 3, &_lock);
+ const ParticleDataImpl<Vec3> &partVel = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "partVel", 4, &_lock);
+ Grid<Vec3> *weight = _args.getPtrOpt<Grid<Vec3>>("weight", 5, NULL, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 6, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 7, 0, &_lock);
+ _retval = getPyNone();
+ mapPartsToMAC(flags, vel, velOld, parts, partVel, weight, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "mapPartsToMAC", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("mapPartsToMAC", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_mapPartsToMAC("", "mapPartsToMAC", _W_12);
+extern "C" {
+void PbRegister_mapPartsToMAC()
+{
+ KEEP_UNUSED(_RP_mapPartsToMAC);
+}
+}
+
+template<class T> struct knMapLinear : public KernelBase {
+ knMapLinear(const BasicParticleSystem &p,
+ const FlagGrid &flags,
+ const Grid<T> &target,
+ Grid<Real> &gtmp,
+ const ParticleDataImpl<T> &psource)
+ : KernelBase(p.size()), p(p), flags(flags), target(target), gtmp(gtmp), psource(psource)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystem &p,
+ const FlagGrid &flags,
+ const Grid<T> &target,
+ Grid<Real> &gtmp,
+ const ParticleDataImpl<T> &psource)
+ {
+ unusedParameter(flags);
+ if (!p.isActive(idx))
+ return;
+ target.setInterpolated(p[idx].pos, psource[idx], gtmp);
+ }
+ inline const BasicParticleSystem &getArg0()
+ {
+ return p;
+ }
+ typedef BasicParticleSystem type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const Grid<T> &getArg2()
+ {
+ return target;
+ }
+ typedef Grid<T> type2;
+ inline Grid<Real> &getArg3()
+ {
+ return gtmp;
+ }
+ typedef Grid<Real> type3;
+ inline const ParticleDataImpl<T> &getArg4()
+ {
+ return psource;
+ }
+ typedef ParticleDataImpl<T> type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMapLinear ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void run()
+ {
+ const IndexInt _sz = size;
+ for (IndexInt i = 0; i < _sz; i++)
+ op(i, p, flags, target, gtmp, psource);
+ }
+ const BasicParticleSystem &p;
+ const FlagGrid &flags;
+ const Grid<T> &target;
+ Grid<Real> &gtmp;
+ const ParticleDataImpl<T> &psource;
+};
+
+template<class T>
+void mapLinearRealHelper(const FlagGrid &flags,
+ Grid<T> &target,
+ const BasicParticleSystem &parts,
+ const ParticleDataImpl<T> &source)
+{
+ Grid<Real> tmp(flags.getParent());
+ target.clear();
+ knMapLinear<T>(parts, flags, target, tmp, source);
+ knSafeDivReal<T>(target, tmp);
+}
+
+void mapPartsToGrid(const FlagGrid &flags,
+ Grid<Real> &target,
+ const BasicParticleSystem &parts,
+ const ParticleDataImpl<Real> &source)
+{
+ mapLinearRealHelper<Real>(flags, target, parts, source);
+}
+static PyObject *_W_13(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "mapPartsToGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &target = *_args.getPtr<Grid<Real>>("target", 1, &_lock);
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 2, &_lock);
+ const ParticleDataImpl<Real> &source = *_args.getPtr<ParticleDataImpl<Real>>(
+ "source", 3, &_lock);
+ _retval = getPyNone();
+ mapPartsToGrid(flags, target, parts, source);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "mapPartsToGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("mapPartsToGrid", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_mapPartsToGrid("", "mapPartsToGrid", _W_13);
+extern "C" {
+void PbRegister_mapPartsToGrid()
+{
+ KEEP_UNUSED(_RP_mapPartsToGrid);
+}
+}
+
+void mapPartsToGridVec3(const FlagGrid &flags,
+ Grid<Vec3> &target,
+ const BasicParticleSystem &parts,
+ const ParticleDataImpl<Vec3> &source)
+{
+ mapLinearRealHelper<Vec3>(flags, target, parts, source);
+}
+static PyObject *_W_14(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "mapPartsToGridVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Vec3> &target = *_args.getPtr<Grid<Vec3>>("target", 1, &_lock);
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 2, &_lock);
+ const ParticleDataImpl<Vec3> &source = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "source", 3, &_lock);
+ _retval = getPyNone();
+ mapPartsToGridVec3(flags, target, parts, source);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "mapPartsToGridVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("mapPartsToGridVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_mapPartsToGridVec3("", "mapPartsToGridVec3", _W_14);
+extern "C" {
+void PbRegister_mapPartsToGridVec3()
+{
+ KEEP_UNUSED(_RP_mapPartsToGridVec3);
+}
+}
+
+// integers need "max" mode, not yet implemented
+// PYTHON() void mapPartsToGridInt ( FlagGrid& flags, Grid<int >& target , BasicParticleSystem&
+// parts , ParticleDataImpl<int >& source ) { mapLinearRealHelper<int >(flags,target,parts,source);
+//}
+
+template<class T> struct knMapFromGrid : public KernelBase {
+ knMapFromGrid(const BasicParticleSystem &p, const Grid<T> &gsrc, ParticleDataImpl<T> &target)
+ : KernelBase(p.size()), p(p), gsrc(gsrc), target(target)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystem &p,
+ const Grid<T> &gsrc,
+ ParticleDataImpl<T> &target) const
+ {
+ if (!p.isActive(idx))
+ return;
+ target[idx] = gsrc.getInterpolated(p[idx].pos);
+ }
+ inline const BasicParticleSystem &getArg0()
+ {
+ return p;
+ }
+ typedef BasicParticleSystem type0;
+ inline const Grid<T> &getArg1()
+ {
+ return gsrc;
+ }
+ typedef Grid<T> type1;
+ inline ParticleDataImpl<T> &getArg2()
+ {
+ return target;
+ }
+ typedef ParticleDataImpl<T> type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMapFromGrid ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, p, gsrc, target);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystem &p;
+ const Grid<T> &gsrc;
+ ParticleDataImpl<T> &target;
+};
+void mapGridToParts(const Grid<Real> &source,
+ const BasicParticleSystem &parts,
+ ParticleDataImpl<Real> &target)
+{
+ knMapFromGrid<Real>(parts, source, target);
+}
+static PyObject *_W_15(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "mapGridToParts", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<Real> &source = *_args.getPtr<Grid<Real>>("source", 0, &_lock);
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 1, &_lock);
+ ParticleDataImpl<Real> &target = *_args.getPtr<ParticleDataImpl<Real>>("target", 2, &_lock);
+ _retval = getPyNone();
+ mapGridToParts(source, parts, target);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "mapGridToParts", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("mapGridToParts", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_mapGridToParts("", "mapGridToParts", _W_15);
+extern "C" {
+void PbRegister_mapGridToParts()
+{
+ KEEP_UNUSED(_RP_mapGridToParts);
+}
+}
+
+void mapGridToPartsVec3(const Grid<Vec3> &source,
+ const BasicParticleSystem &parts,
+ ParticleDataImpl<Vec3> &target)
+{
+ knMapFromGrid<Vec3>(parts, source, target);
+}
+static PyObject *_W_16(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "mapGridToPartsVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<Vec3> &source = *_args.getPtr<Grid<Vec3>>("source", 0, &_lock);
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 1, &_lock);
+ ParticleDataImpl<Vec3> &target = *_args.getPtr<ParticleDataImpl<Vec3>>("target", 2, &_lock);
+ _retval = getPyNone();
+ mapGridToPartsVec3(source, parts, target);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "mapGridToPartsVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("mapGridToPartsVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_mapGridToPartsVec3("", "mapGridToPartsVec3", _W_16);
+extern "C" {
+void PbRegister_mapGridToPartsVec3()
+{
+ KEEP_UNUSED(_RP_mapGridToPartsVec3);
+}
+}
+
+// Get velocities from grid
+
+struct knMapLinearMACGridToVec3_PIC : public KernelBase {
+ knMapLinearMACGridToVec3_PIC(const BasicParticleSystem &p,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ ParticleDataImpl<Vec3> &pvel,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(p.size()),
+ p(p),
+ flags(flags),
+ vel(vel),
+ pvel(pvel),
+ ptype(ptype),
+ exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystem &p,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ ParticleDataImpl<Vec3> &pvel,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude) const
+ {
+ if (!p.isActive(idx) || (ptype && ((*ptype)[idx] & exclude)))
+ return;
+ // pure PIC
+ pvel[idx] = vel.getInterpolated(p[idx].pos);
+ }
+ inline const BasicParticleSystem &getArg0()
+ {
+ return p;
+ }
+ typedef BasicParticleSystem type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const MACGrid &getArg2()
+ {
+ return vel;
+ }
+ typedef MACGrid type2;
+ inline ParticleDataImpl<Vec3> &getArg3()
+ {
+ return pvel;
+ }
+ typedef ParticleDataImpl<Vec3> type3;
+ inline const ParticleDataImpl<int> *getArg4()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type4;
+ inline const int &getArg5()
+ {
+ return exclude;
+ }
+ typedef int type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMapLinearMACGridToVec3_PIC ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, p, flags, vel, pvel, ptype, exclude);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystem &p;
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ ParticleDataImpl<Vec3> &pvel;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+
+void mapMACToParts(const FlagGrid &flags,
+ const MACGrid &vel,
+ const BasicParticleSystem &parts,
+ ParticleDataImpl<Vec3> &partVel,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0)
+{
+ knMapLinearMACGridToVec3_PIC(parts, flags, vel, partVel, ptype, exclude);
+}
+static PyObject *_W_17(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "mapMACToParts", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 2, &_lock);
+ ParticleDataImpl<Vec3> &partVel = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "partVel", 3, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 4, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 5, 0, &_lock);
+ _retval = getPyNone();
+ mapMACToParts(flags, vel, parts, partVel, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "mapMACToParts", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("mapMACToParts", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_mapMACToParts("", "mapMACToParts", _W_17);
+extern "C" {
+void PbRegister_mapMACToParts()
+{
+ KEEP_UNUSED(_RP_mapMACToParts);
+}
+}
+
+// with flip delta interpolation
+
+struct knMapLinearMACGridToVec3_FLIP : public KernelBase {
+ knMapLinearMACGridToVec3_FLIP(const BasicParticleSystem &p,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ const MACGrid &oldVel,
+ ParticleDataImpl<Vec3> &pvel,
+ const Real flipRatio,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(p.size()),
+ p(p),
+ flags(flags),
+ vel(vel),
+ oldVel(oldVel),
+ pvel(pvel),
+ flipRatio(flipRatio),
+ ptype(ptype),
+ exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystem &p,
+ const FlagGrid &flags,
+ const MACGrid &vel,
+ const MACGrid &oldVel,
+ ParticleDataImpl<Vec3> &pvel,
+ const Real flipRatio,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude) const
+ {
+ if (!p.isActive(idx) || (ptype && ((*ptype)[idx] & exclude)))
+ return;
+ Vec3 v = vel.getInterpolated(p[idx].pos);
+ Vec3 delta = v - oldVel.getInterpolated(p[idx].pos);
+ pvel[idx] = flipRatio * (pvel[idx] + delta) + (1.0 - flipRatio) * v;
+ }
+ inline const BasicParticleSystem &getArg0()
+ {
+ return p;
+ }
+ typedef BasicParticleSystem type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const MACGrid &getArg2()
+ {
+ return vel;
+ }
+ typedef MACGrid type2;
+ inline const MACGrid &getArg3()
+ {
+ return oldVel;
+ }
+ typedef MACGrid type3;
+ inline ParticleDataImpl<Vec3> &getArg4()
+ {
+ return pvel;
+ }
+ typedef ParticleDataImpl<Vec3> type4;
+ inline const Real &getArg5()
+ {
+ return flipRatio;
+ }
+ typedef Real type5;
+ inline const ParticleDataImpl<int> *getArg6()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type6;
+ inline const int &getArg7()
+ {
+ return exclude;
+ }
+ typedef int type7;
+ void runMessage()
+ {
+ debMsg("Executing kernel knMapLinearMACGridToVec3_FLIP ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, p, flags, vel, oldVel, pvel, flipRatio, ptype, exclude);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystem &p;
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ const MACGrid &oldVel;
+ ParticleDataImpl<Vec3> &pvel;
+ const Real flipRatio;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+
+void flipVelocityUpdate(const FlagGrid &flags,
+ const MACGrid &vel,
+ const MACGrid &velOld,
+ const BasicParticleSystem &parts,
+ ParticleDataImpl<Vec3> &partVel,
+ const Real flipRatio,
+ const ParticleDataImpl<int> *ptype = NULL,
+ const int exclude = 0)
+{
+ knMapLinearMACGridToVec3_FLIP(parts, flags, vel, velOld, partVel, flipRatio, ptype, exclude);
+}
+static PyObject *_W_18(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "flipVelocityUpdate", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const MACGrid &velOld = *_args.getPtr<MACGrid>("velOld", 2, &_lock);
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 3, &_lock);
+ ParticleDataImpl<Vec3> &partVel = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "partVel", 4, &_lock);
+ const Real flipRatio = _args.get<Real>("flipRatio", 5, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtrOpt<ParticleDataImpl<int>>(
+ "ptype", 6, NULL, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 7, 0, &_lock);
+ _retval = getPyNone();
+ flipVelocityUpdate(flags, vel, velOld, parts, partVel, flipRatio, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "flipVelocityUpdate", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("flipVelocityUpdate", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_flipVelocityUpdate("", "flipVelocityUpdate", _W_18);
+extern "C" {
+void PbRegister_flipVelocityUpdate()
+{
+ KEEP_UNUSED(_RP_flipVelocityUpdate);
+}
+}
+
+//******************************************************************************
+// narrow band
+
+struct knCombineVels : public KernelBase {
+ knCombineVels(MACGrid &vel,
+ const Grid<Vec3> &w,
+ MACGrid &combineVel,
+ const LevelsetGrid *phi,
+ Real narrowBand,
+ Real thresh)
+ : KernelBase(&vel, 0),
+ vel(vel),
+ w(w),
+ combineVel(combineVel),
+ phi(phi),
+ narrowBand(narrowBand),
+ thresh(thresh)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ MACGrid &vel,
+ const Grid<Vec3> &w,
+ MACGrid &combineVel,
+ const LevelsetGrid *phi,
+ Real narrowBand,
+ Real thresh) const
+ {
+ int idx = vel.index(i, j, k);
+
+ for (int c = 0; c < 3; ++c) {
+ // Correct narrow-band FLIP
+ if (phi) {
+ Vec3 pos(i, j, k);
+ pos[(c + 1) % 3] += Real(0.5);
+ pos[(c + 2) % 3] += Real(0.5);
+ Real p = phi->getInterpolated(pos);
+ if (p < -narrowBand) {
+ vel[idx][c] = 0;
+ continue;
+ }
+ }
+
+ if (w[idx][c] > thresh) {
+ combineVel[idx][c] = vel[idx][c];
+ vel[idx][c] = -1;
+ }
+ else {
+ vel[idx][c] = 0;
+ }
+ }
+ }
+ inline MACGrid &getArg0()
+ {
+ return vel;
+ }
+ typedef MACGrid type0;
+ inline const Grid<Vec3> &getArg1()
+ {
+ return w;
+ }
+ typedef Grid<Vec3> type1;
+ inline MACGrid &getArg2()
+ {
+ return combineVel;
+ }
+ typedef MACGrid type2;
+ inline const LevelsetGrid *getArg3()
+ {
+ return phi;
+ }
+ typedef LevelsetGrid type3;
+ inline Real &getArg4()
+ {
+ return narrowBand;
+ }
+ typedef Real type4;
+ inline Real &getArg5()
+ {
+ return thresh;
+ }
+ typedef Real type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel knCombineVels ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, vel, w, combineVel, phi, narrowBand, thresh);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, vel, w, combineVel, phi, narrowBand, thresh);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ MACGrid &vel;
+ const Grid<Vec3> &w;
+ MACGrid &combineVel;
+ const LevelsetGrid *phi;
+ Real narrowBand;
+ Real thresh;
+};
+
+//! narrow band velocity combination
+
+void combineGridVel(MACGrid &vel,
+ const Grid<Vec3> &weight,
+ MACGrid &combineVel,
+ const LevelsetGrid *phi = NULL,
+ Real narrowBand = 0.0,
+ Real thresh = 0.0)
+{
+ knCombineVels(vel, weight, combineVel, phi, narrowBand, thresh);
+}
+static PyObject *_W_19(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "combineGridVel", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 0, &_lock);
+ const Grid<Vec3> &weight = *_args.getPtr<Grid<Vec3>>("weight", 1, &_lock);
+ MACGrid &combineVel = *_args.getPtr<MACGrid>("combineVel", 2, &_lock);
+ const LevelsetGrid *phi = _args.getPtrOpt<LevelsetGrid>("phi", 3, NULL, &_lock);
+ Real narrowBand = _args.getOpt<Real>("narrowBand", 4, 0.0, &_lock);
+ Real thresh = _args.getOpt<Real>("thresh", 5, 0.0, &_lock);
+ _retval = getPyNone();
+ combineGridVel(vel, weight, combineVel, phi, narrowBand, thresh);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "combineGridVel", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("combineGridVel", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_combineGridVel("", "combineGridVel", _W_19);
+extern "C" {
+void PbRegister_combineGridVel()
+{
+ KEEP_UNUSED(_RP_combineGridVel);
+}
+}
+
+//! surface tension helper
+void getLaplacian(Grid<Real> &laplacian, const Grid<Real> &grid)
+{
+ LaplaceOp(laplacian, grid);
+}
+static PyObject *_W_20(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getLaplacian", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &laplacian = *_args.getPtr<Grid<Real>>("laplacian", 0, &_lock);
+ const Grid<Real> &grid = *_args.getPtr<Grid<Real>>("grid", 1, &_lock);
+ _retval = getPyNone();
+ getLaplacian(laplacian, grid);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getLaplacian", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getLaplacian", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getLaplacian("", "getLaplacian", _W_20);
+extern "C" {
+void PbRegister_getLaplacian()
+{
+ KEEP_UNUSED(_RP_getLaplacian);
+}
+}
+
+void getCurvature(Grid<Real> &curv, const Grid<Real> &grid, const Real h = 1.0)
+{
+ CurvatureOp(curv, grid, h);
+}
+static PyObject *_W_21(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getCurvature", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &curv = *_args.getPtr<Grid<Real>>("curv", 0, &_lock);
+ const Grid<Real> &grid = *_args.getPtr<Grid<Real>>("grid", 1, &_lock);
+ const Real h = _args.getOpt<Real>("h", 2, 1.0, &_lock);
+ _retval = getPyNone();
+ getCurvature(curv, grid, h);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getCurvature", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getCurvature", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getCurvature("", "getCurvature", _W_21);
+extern "C" {
+void PbRegister_getCurvature()
+{
+ KEEP_UNUSED(_RP_getCurvature);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/fluidguiding.cpp b/extern/mantaflow/preprocessed/plugin/fluidguiding.cpp
new file mode 100644
index 00000000000..13383581123
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/fluidguiding.cpp
@@ -0,0 +1,802 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Plugins for pressure correction: solve_pressure, and ghost fluid helpers
+ *
+ ******************************************************************************/
+#include "vectorbase.h"
+#include "grid.h"
+#include "kernel.h"
+#include "conjugategrad.h"
+#include "rcmatrix.h"
+
+using namespace std;
+namespace Manta {
+
+// only supports a single blur size for now, globals stored here
+bool gBlurPrecomputed = false;
+int gBlurKernelRadius = -1;
+Matrix gBlurKernel;
+
+// *****************************************************************************
+// Helper functions for fluid guiding
+
+//! creates a 1D (horizontal) Gaussian blur kernel of size n and standard deviation sigma
+Matrix get1DGaussianBlurKernel(const int n, const int sigma)
+{
+ Matrix x(n), y(n);
+ for (int j = 0; j < n; j++) {
+ x.add_to_element(0, j, -(n - 1) * 0.5);
+ y.add_to_element(0, j, j - (n - 1) * 0.5);
+ }
+ Matrix G(n);
+ Real sumG = 0;
+ for (int j = 0; j < n; j++) {
+ G.add_to_element(0,
+ j,
+ 1 / (2 * M_PI * sigma * sigma) *
+ exp(-(x(0, j) * x(0, j) + y(0, j) * y(0, j)) / (2 * sigma * sigma)));
+ sumG += G(0, j);
+ }
+ G = G * (1.0 / sumG);
+ return G;
+}
+
+//! convolves in with 1D kernel (centred at the kernel's midpoint) in the x-direction
+//! (out must be a grid of zeros)
+struct apply1DKernelDirX : public KernelBase {
+ apply1DKernelDirX(const MACGrid &in, MACGrid &out, const Matrix &kernel)
+ : KernelBase(&in, 0), in(in), out(out), kernel(kernel)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const MACGrid &in, MACGrid &out, const Matrix &kernel) const
+ {
+ int nx = in.getSizeX();
+ int kn = kernel.n;
+ int kCentre = kn / 2;
+ for (int m = 0, ind = kn - 1, ii = i - kCentre; m < kn; m++, ind--, ii++) {
+ if (ii < 0)
+ continue;
+ else if (ii >= nx)
+ break;
+ else
+ out(i, j, k) += in(ii, j, k) * kernel(0, ind);
+ }
+ }
+ inline const MACGrid &getArg0()
+ {
+ return in;
+ }
+ typedef MACGrid type0;
+ inline MACGrid &getArg1()
+ {
+ return out;
+ }
+ typedef MACGrid type1;
+ inline const Matrix &getArg2()
+ {
+ return kernel;
+ }
+ typedef Matrix type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel apply1DKernelDirX ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, in, out, kernel);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, in, out, kernel);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const MACGrid &in;
+ MACGrid &out;
+ const Matrix &kernel;
+};
+
+//! convolves in with 1D kernel (centred at the kernel's midpoint) in the y-direction
+//! (out must be a grid of zeros)
+struct apply1DKernelDirY : public KernelBase {
+ apply1DKernelDirY(const MACGrid &in, MACGrid &out, const Matrix &kernel)
+ : KernelBase(&in, 0), in(in), out(out), kernel(kernel)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const MACGrid &in, MACGrid &out, const Matrix &kernel) const
+ {
+ int ny = in.getSizeY();
+ int kn = kernel.n;
+ int kCentre = kn / 2;
+ for (int m = 0, ind = kn - 1, jj = j - kCentre; m < kn; m++, ind--, jj++) {
+ if (jj < 0)
+ continue;
+ else if (jj >= ny)
+ break;
+ else
+ out(i, j, k) += in(i, jj, k) * kernel(0, ind);
+ }
+ }
+ inline const MACGrid &getArg0()
+ {
+ return in;
+ }
+ typedef MACGrid type0;
+ inline MACGrid &getArg1()
+ {
+ return out;
+ }
+ typedef MACGrid type1;
+ inline const Matrix &getArg2()
+ {
+ return kernel;
+ }
+ typedef Matrix type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel apply1DKernelDirY ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, in, out, kernel);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, in, out, kernel);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const MACGrid &in;
+ MACGrid &out;
+ const Matrix &kernel;
+};
+
+//! convolves in with 1D kernel (centred at the kernel's midpoint) in the z-direction
+//! (out must be a grid of zeros)
+struct apply1DKernelDirZ : public KernelBase {
+ apply1DKernelDirZ(const MACGrid &in, MACGrid &out, const Matrix &kernel)
+ : KernelBase(&in, 0), in(in), out(out), kernel(kernel)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const MACGrid &in, MACGrid &out, const Matrix &kernel) const
+ {
+ int nz = in.getSizeZ();
+ int kn = kernel.n;
+ int kCentre = kn / 2;
+ for (int m = 0, ind = kn - 1, kk = k - kCentre; m < kn; m++, ind--, kk++) {
+ if (kk < 0)
+ continue;
+ else if (kk >= nz)
+ break;
+ else
+ out(i, j, k) += in(i, j, kk) * kernel(0, ind);
+ }
+ }
+ inline const MACGrid &getArg0()
+ {
+ return in;
+ }
+ typedef MACGrid type0;
+ inline MACGrid &getArg1()
+ {
+ return out;
+ }
+ typedef MACGrid type1;
+ inline const Matrix &getArg2()
+ {
+ return kernel;
+ }
+ typedef Matrix type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel apply1DKernelDirZ ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, in, out, kernel);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, in, out, kernel);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const MACGrid &in;
+ MACGrid &out;
+ const Matrix &kernel;
+};
+
+//! Apply separable Gaussian blur in 2D
+void applySeparableKernel2D(MACGrid &grid, const FlagGrid &flags, const Matrix &kernel)
+{
+ // int nx = grid.getSizeX(), ny = grid.getSizeY();
+ // int kn = kernel.n;
+ // int kCentre = kn / 2;
+ FluidSolver *parent = grid.getParent();
+ MACGrid orig = MACGrid(parent);
+ orig.copyFrom(grid);
+ MACGrid gridX = MACGrid(parent);
+ apply1DKernelDirX(grid, gridX, kernel);
+ MACGrid gridXY = MACGrid(parent);
+ apply1DKernelDirY(gridX, gridXY, kernel);
+ grid.copyFrom(gridXY);
+ FOR_IJK(grid)
+ {
+ if ((i > 0 && flags.isObstacle(i - 1, j, k)) || (j > 0 && flags.isObstacle(i, j - 1, k)) ||
+ flags.isObstacle(i, j, k)) {
+ grid(i, j, k).x = orig(i, j, k).x;
+ grid(i, j, k).y = orig(i, j, k).y;
+ grid(i, j, k).z = orig(i, j, k).z;
+ }
+ }
+}
+
+//! Apply separable Gaussian blur in 3D
+void applySeparableKernel3D(MACGrid &grid, const FlagGrid &flags, const Matrix &kernel)
+{
+ // int nx = grid.getSizeX(), ny = grid.getSizeY(), nz = grid.getSizeZ();
+ // int kn = kernel.n;
+ // int kCentre = kn / 2;
+ FluidSolver *parent = grid.getParent();
+ MACGrid orig = MACGrid(parent);
+ orig.copyFrom(grid);
+ MACGrid gridX = MACGrid(parent);
+ apply1DKernelDirX(grid, gridX, kernel);
+ MACGrid gridXY = MACGrid(parent);
+ apply1DKernelDirY(gridX, gridXY, kernel);
+ MACGrid gridXYZ = MACGrid(parent);
+ apply1DKernelDirZ(gridXY, gridXYZ, kernel);
+ grid.copyFrom(gridXYZ);
+ FOR_IJK(grid)
+ {
+ if ((i > 0 && flags.isObstacle(i - 1, j, k)) || (j > 0 && flags.isObstacle(i, j - 1, k)) ||
+ (k > 0 && flags.isObstacle(i, j, k - 1)) || flags.isObstacle(i, j, k)) {
+ grid(i, j, k).x = orig(i, j, k).x;
+ grid(i, j, k).y = orig(i, j, k).y;
+ grid(i, j, k).z = orig(i, j, k).z;
+ }
+ }
+}
+
+//! Apply separable Gaussian blur in 2D or 3D depending on input dimensions
+void applySeparableKernel(MACGrid &grid, const FlagGrid &flags, const Matrix &kernel)
+{
+ if (!grid.is3D())
+ applySeparableKernel2D(grid, flags, kernel);
+ else
+ applySeparableKernel3D(grid, flags, kernel);
+}
+
+//! Compute r-norm for the stopping criterion
+Real getRNorm(const MACGrid &x, const MACGrid &z)
+{
+ MACGrid r = MACGrid(x.getParent());
+ r.copyFrom(x);
+ r.sub(z);
+ return r.getMaxAbs();
+}
+
+//! Compute s-norm for the stopping criterion
+Real getSNorm(const Real rho, const MACGrid &z, const MACGrid &z_prev)
+{
+ MACGrid s = MACGrid(z_prev.getParent());
+ s.copyFrom(z_prev);
+ s.sub(z);
+ s.multConst(rho);
+ return s.getMaxAbs();
+}
+
+//! Compute primal eps for the stopping criterion
+Real getEpsPri(const Real eps_abs, const Real eps_rel, const MACGrid &x, const MACGrid &z)
+{
+ Real max_norm = max(x.getMaxAbs(), z.getMaxAbs());
+ Real eps_pri = sqrt(x.is3D() ? 3.0 : 2.0) * eps_abs + eps_rel * max_norm;
+ return eps_pri;
+}
+
+//! Compute dual eps for the stopping criterion
+Real getEpsDual(const Real eps_abs, const Real eps_rel, const MACGrid &y)
+{
+ Real eps_dual = sqrt(y.is3D() ? 3.0 : 2.0) * eps_abs + eps_rel * y.getMaxAbs();
+ return eps_dual;
+}
+
+//! Create a spiral velocity field in 2D as a test scene (optionally in 3D)
+void getSpiralVelocity(const FlagGrid &flags,
+ MACGrid &vel,
+ Real strength = 1.0,
+ bool with3D = false)
+{
+ int nx = flags.getSizeX(), ny = flags.getSizeY(), nz = 1;
+ if (with3D)
+ nz = flags.getSizeZ();
+ Real midX = 0.5 * (Real)(nx - 1);
+ Real midY = 0.5 * (Real)(ny - 1);
+ Real midZ = 0.5 * (Real)(nz - 1);
+ for (int i = 0; i < nx; i++) {
+ for (int j = 0; j < ny; j++) {
+ for (int k = 0; k < nz; k++) {
+ int idx = flags.index(i, j, k);
+ Real diffX = midX - i;
+ Real diffY = midY - j;
+ Real hypotenuse = sqrt(diffX * diffX + diffY * diffY);
+ if (hypotenuse > 0) {
+ vel[idx].x = diffY / hypotenuse;
+ vel[idx].y = -diffX / hypotenuse;
+ }
+ }
+ }
+ }
+ vel.multConst(strength);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getSpiralVelocity", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ Real strength = _args.getOpt<Real>("strength", 2, 1.0, &_lock);
+ bool with3D = _args.getOpt<bool>("with3D", 3, false, &_lock);
+ _retval = getPyNone();
+ getSpiralVelocity(flags, vel, strength, with3D);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getSpiralVelocity", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getSpiralVelocity", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getSpiralVelocity("", "getSpiralVelocity", _W_0);
+extern "C" {
+void PbRegister_getSpiralVelocity()
+{
+ KEEP_UNUSED(_RP_getSpiralVelocity);
+}
+}
+
+//! Set the guiding weight W as a gradient in the y-direction
+void setGradientYWeight(
+ Grid<Real> &W, const int minY, const int maxY, const Real valAtMin, const Real valAtMax)
+{
+ FOR_IJK(W)
+ {
+ if (minY <= j && j <= maxY) {
+ Real val = valAtMin;
+ if (valAtMax != valAtMin) {
+ Real ratio = (Real)(j - minY) / (Real)(maxY - minY);
+ val = ratio * valAtMax + (1.0 - ratio) * valAtMin;
+ }
+ W(i, j, k) = val;
+ }
+ }
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setGradientYWeight", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &W = *_args.getPtr<Grid<Real>>("W", 0, &_lock);
+ const int minY = _args.get<int>("minY", 1, &_lock);
+ const int maxY = _args.get<int>("maxY", 2, &_lock);
+ const Real valAtMin = _args.get<Real>("valAtMin", 3, &_lock);
+ const Real valAtMax = _args.get<Real>("valAtMax", 4, &_lock);
+ _retval = getPyNone();
+ setGradientYWeight(W, minY, maxY, valAtMin, valAtMax);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setGradientYWeight", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setGradientYWeight", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setGradientYWeight("", "setGradientYWeight", _W_1);
+extern "C" {
+void PbRegister_setGradientYWeight()
+{
+ KEEP_UNUSED(_RP_setGradientYWeight);
+}
+}
+
+// *****************************************************************************
+// More helper functions for fluid guiding
+
+//! Apply Gaussian blur (either 2D or 3D) in a separable way
+void applySeparableGaussianBlur(MACGrid &grid, const FlagGrid &flags, const Matrix &kernel1D)
+{
+ assertMsg(gBlurPrecomputed, "Error - blue kernel not precomputed");
+ applySeparableKernel(grid, flags, kernel1D);
+}
+
+//! Precomputation performed before the first PD iteration
+void ADMM_precompute_Separable(int blurRadius)
+{
+ if (gBlurPrecomputed) {
+ assertMsg(gBlurKernelRadius == blurRadius,
+ "More than a single blur radius not supported at the moment.");
+ return;
+ }
+ int kernelSize = 2 * blurRadius + 1;
+ gBlurKernel = get1DGaussianBlurKernel(kernelSize, kernelSize);
+ gBlurPrecomputed = true;
+ gBlurKernelRadius = blurRadius;
+}
+
+//! Apply approximate multiplication of inverse(M)
+void applyApproxInvM(MACGrid &v, const FlagGrid &flags, const MACGrid &invA)
+{
+ MACGrid v_new = MACGrid(v.getParent());
+ v_new.copyFrom(v);
+ v_new.mult(invA);
+ applySeparableGaussianBlur(v_new, flags, gBlurKernel);
+ applySeparableGaussianBlur(v_new, flags, gBlurKernel);
+ v_new.multConst(2.0);
+ v_new.mult(invA);
+ v.mult(invA);
+ v.sub(v_new);
+}
+
+//! Precompute Q, a reused quantity in the PD iterations
+//! Q = 2*G*G*(velT-velC)-sigma*velC
+void precomputeQ(MACGrid &Q,
+ const FlagGrid &flags,
+ const MACGrid &velT_region,
+ const MACGrid &velC,
+ const Matrix &gBlurKernel,
+ const Real sigma)
+{
+ Q.copyFrom(velT_region);
+ Q.sub(velC);
+ applySeparableGaussianBlur(Q, flags, gBlurKernel);
+ applySeparableGaussianBlur(Q, flags, gBlurKernel);
+ Q.multConst(2.0);
+ Q.addScaled(velC, -sigma);
+}
+
+//! Precompute inverse(A), a reused quantity in the PD iterations
+//! A = 2*S^2 + p*I, invA = elementwise 1/A
+void precomputeInvA(MACGrid &invA, const Grid<Real> &weight, const Real sigma)
+{
+ FOR_IJK(invA)
+ {
+ Real val = 2 * weight(i, j, k) * weight(i, j, k) + sigma;
+ if (val < 0.01)
+ val = 0.01;
+ Real invVal = 1.0 / val;
+ invA(i, j, k).x = invVal;
+ invA(i, j, k).y = invVal;
+ invA(i, j, k).z = invVal;
+ }
+}
+
+//! proximal operator of f , guiding
+void prox_f(MACGrid &v,
+ const FlagGrid &flags,
+ const MACGrid &Q,
+ const MACGrid &velC,
+ const Real sigma,
+ const MACGrid &invA)
+{
+ v.multConst(sigma);
+ v.add(Q);
+ applyApproxInvM(v, flags, invA);
+ v.add(velC);
+}
+
+// *****************************************************************************
+
+// re-uses main pressure solve from pressure.cpp
+void solvePressure(MACGrid &vel,
+ Grid<Real> &pressure,
+ const FlagGrid &flags,
+ Real cgAccuracy = 1e-3,
+ const Grid<Real> *phi = 0,
+ const Grid<Real> *perCellCorr = 0,
+ const MACGrid *fractions = 0,
+ const MACGrid *obvel = 0,
+ Real gfClamp = 1e-04,
+ Real cgMaxIterFac = 1.5,
+ bool precondition = true,
+ int preconditioner = 1,
+ bool enforceCompatibility = false,
+ bool useL2Norm = false,
+ bool zeroPressureFixing = false,
+ const Grid<Real> *curv = NULL,
+ const Real surfTens = 0.0,
+ Grid<Real> *retRhs = NULL);
+
+//! Main function for fluid guiding , includes "regular" pressure solve
+
+void PD_fluid_guiding(MACGrid &vel,
+ MACGrid &velT,
+ Grid<Real> &pressure,
+ FlagGrid &flags,
+ Grid<Real> &weight,
+ int blurRadius = 5,
+ Real theta = 1.0,
+ Real tau = 1.0,
+ Real sigma = 1.0,
+ Real epsRel = 1e-3,
+ Real epsAbs = 1e-3,
+ int maxIters = 200,
+ Grid<Real> *phi = 0,
+ Grid<Real> *perCellCorr = 0,
+ MACGrid *fractions = 0,
+ MACGrid *obvel = 0,
+ Real gfClamp = 1e-04,
+ Real cgMaxIterFac = 1.5,
+ Real cgAccuracy = 1e-3,
+ int preconditioner = 1,
+ bool zeroPressureFixing = false,
+ const Grid<Real> *curv = NULL,
+ const Real surfTens = 0.)
+{
+ FluidSolver *parent = vel.getParent();
+
+ // initialize dual/slack variables
+ MACGrid velC = MACGrid(parent);
+ velC.copyFrom(vel);
+ MACGrid x = MACGrid(parent);
+ MACGrid y = MACGrid(parent);
+ MACGrid z = MACGrid(parent);
+ MACGrid x0 = MACGrid(parent);
+ MACGrid z0 = MACGrid(parent);
+
+ // precomputation
+ ADMM_precompute_Separable(blurRadius);
+ MACGrid Q = MACGrid(parent);
+ precomputeQ(Q, flags, velT, velC, gBlurKernel, sigma);
+ MACGrid invA = MACGrid(parent);
+ precomputeInvA(invA, weight, sigma);
+
+ // loop
+ int iter = 0;
+ for (iter = 0; iter < maxIters; iter++) {
+ // x-update
+ x0.copyFrom(x);
+ x.multConst(1.0 / sigma);
+ x.add(y);
+ prox_f(x, flags, Q, velC, sigma, invA);
+ x.multConst(-sigma);
+ x.addScaled(y, sigma);
+ x.add(x0);
+
+ // z-update
+ z0.copyFrom(z);
+ z.addScaled(x, -tau);
+ Real cgAccuracyAdaptive = cgAccuracy;
+
+ solvePressure(z,
+ pressure,
+ flags,
+ cgAccuracyAdaptive,
+ phi,
+ perCellCorr,
+ fractions,
+ obvel,
+ gfClamp,
+ cgMaxIterFac,
+ true,
+ preconditioner,
+ false,
+ false,
+ zeroPressureFixing,
+ curv,
+ surfTens);
+
+ // y-update
+ y.copyFrom(z);
+ y.sub(z0);
+ y.multConst(theta);
+ y.add(z);
+
+ // stopping criterion
+ bool stop = (iter > 0 && getRNorm(z, z0) < getEpsDual(epsAbs, epsRel, z));
+
+ if (stop || (iter == maxIters - 1))
+ break;
+ }
+
+ // vel_new = z
+ vel.copyFrom(z);
+
+ debMsg("PD_fluid_guiding iterations:" << iter, 1);
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "PD_fluid_guiding", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 0, &_lock);
+ MACGrid &velT = *_args.getPtr<MACGrid>("velT", 1, &_lock);
+ Grid<Real> &pressure = *_args.getPtr<Grid<Real>>("pressure", 2, &_lock);
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 3, &_lock);
+ Grid<Real> &weight = *_args.getPtr<Grid<Real>>("weight", 4, &_lock);
+ int blurRadius = _args.getOpt<int>("blurRadius", 5, 5, &_lock);
+ Real theta = _args.getOpt<Real>("theta", 6, 1.0, &_lock);
+ Real tau = _args.getOpt<Real>("tau", 7, 1.0, &_lock);
+ Real sigma = _args.getOpt<Real>("sigma", 8, 1.0, &_lock);
+ Real epsRel = _args.getOpt<Real>("epsRel", 9, 1e-3, &_lock);
+ Real epsAbs = _args.getOpt<Real>("epsAbs", 10, 1e-3, &_lock);
+ int maxIters = _args.getOpt<int>("maxIters", 11, 200, &_lock);
+ Grid<Real> *phi = _args.getPtrOpt<Grid<Real>>("phi", 12, 0, &_lock);
+ Grid<Real> *perCellCorr = _args.getPtrOpt<Grid<Real>>("perCellCorr", 13, 0, &_lock);
+ MACGrid *fractions = _args.getPtrOpt<MACGrid>("fractions", 14, 0, &_lock);
+ MACGrid *obvel = _args.getPtrOpt<MACGrid>("obvel", 15, 0, &_lock);
+ Real gfClamp = _args.getOpt<Real>("gfClamp", 16, 1e-04, &_lock);
+ Real cgMaxIterFac = _args.getOpt<Real>("cgMaxIterFac", 17, 1.5, &_lock);
+ Real cgAccuracy = _args.getOpt<Real>("cgAccuracy", 18, 1e-3, &_lock);
+ int preconditioner = _args.getOpt<int>("preconditioner", 19, 1, &_lock);
+ bool zeroPressureFixing = _args.getOpt<bool>("zeroPressureFixing", 20, false, &_lock);
+ const Grid<Real> *curv = _args.getPtrOpt<Grid<Real>>("curv", 21, NULL, &_lock);
+ const Real surfTens = _args.getOpt<Real>("surfTens", 22, 0., &_lock);
+ _retval = getPyNone();
+ PD_fluid_guiding(vel,
+ velT,
+ pressure,
+ flags,
+ weight,
+ blurRadius,
+ theta,
+ tau,
+ sigma,
+ epsRel,
+ epsAbs,
+ maxIters,
+ phi,
+ perCellCorr,
+ fractions,
+ obvel,
+ gfClamp,
+ cgMaxIterFac,
+ cgAccuracy,
+ preconditioner,
+ zeroPressureFixing,
+ curv,
+ surfTens);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "PD_fluid_guiding", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("PD_fluid_guiding", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_PD_fluid_guiding("", "PD_fluid_guiding", _W_2);
+extern "C" {
+void PbRegister_PD_fluid_guiding()
+{
+ KEEP_UNUSED(_RP_PD_fluid_guiding);
+}
+}
+
+//! reset precomputation
+void releaseBlurPrecomp()
+{
+ gBlurPrecomputed = false;
+ gBlurKernelRadius = -1;
+ gBlurKernel = 0.f;
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "releaseBlurPrecomp", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ _retval = getPyNone();
+ releaseBlurPrecomp();
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "releaseBlurPrecomp", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("releaseBlurPrecomp", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_releaseBlurPrecomp("", "releaseBlurPrecomp", _W_3);
+extern "C" {
+void PbRegister_releaseBlurPrecomp()
+{
+ KEEP_UNUSED(_RP_releaseBlurPrecomp);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/initplugins.cpp b/extern/mantaflow/preprocessed/plugin/initplugins.cpp
new file mode 100644
index 00000000000..3e28c947424
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/initplugins.cpp
@@ -0,0 +1,2317 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Tools to setup fields and inflows
+ *
+ ******************************************************************************/
+
+#include "vectorbase.h"
+#include "shapes.h"
+#include "commonkernels.h"
+#include "particle.h"
+#include "noisefield.h"
+#include "simpleimage.h"
+#include "mesh.h"
+
+using namespace std;
+
+namespace Manta {
+
+//! Apply noise to grid
+
+struct KnApplyNoiseInfl : public KernelBase {
+ KnApplyNoiseInfl(const FlagGrid &flags,
+ Grid<Real> &density,
+ const WaveletNoiseField &noise,
+ const Grid<Real> &sdf,
+ Real scale,
+ Real sigma)
+ : KernelBase(&flags, 0),
+ flags(flags),
+ density(density),
+ noise(noise),
+ sdf(sdf),
+ scale(scale),
+ sigma(sigma)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<Real> &density,
+ const WaveletNoiseField &noise,
+ const Grid<Real> &sdf,
+ Real scale,
+ Real sigma) const
+ {
+ if (!flags.isFluid(i, j, k) || sdf(i, j, k) > sigma)
+ return;
+ Real factor = clamp(1.0 - 0.5 / sigma * (sdf(i, j, k) + sigma), 0.0, 1.0);
+
+ Real target = noise.evaluate(Vec3(i, j, k)) * scale * factor;
+ if (density(i, j, k) < target)
+ density(i, j, k) = target;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return density;
+ }
+ typedef Grid<Real> type1;
+ inline const WaveletNoiseField &getArg2()
+ {
+ return noise;
+ }
+ typedef WaveletNoiseField type2;
+ inline const Grid<Real> &getArg3()
+ {
+ return sdf;
+ }
+ typedef Grid<Real> type3;
+ inline Real &getArg4()
+ {
+ return scale;
+ }
+ typedef Real type4;
+ inline Real &getArg5()
+ {
+ return sigma;
+ }
+ typedef Real type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnApplyNoiseInfl ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, density, noise, sdf, scale, sigma);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, density, noise, sdf, scale, sigma);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &density;
+ const WaveletNoiseField &noise;
+ const Grid<Real> &sdf;
+ Real scale;
+ Real sigma;
+};
+
+//! Init noise-modulated density inside shape
+
+void densityInflow(const FlagGrid &flags,
+ Grid<Real> &density,
+ const WaveletNoiseField &noise,
+ Shape *shape,
+ Real scale = 1.0,
+ Real sigma = 0)
+{
+ Grid<Real> sdf = shape->computeLevelset();
+ KnApplyNoiseInfl(flags, density, noise, sdf, scale, sigma);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "densityInflow", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &density = *_args.getPtr<Grid<Real>>("density", 1, &_lock);
+ const WaveletNoiseField &noise = *_args.getPtr<WaveletNoiseField>("noise", 2, &_lock);
+ Shape *shape = _args.getPtr<Shape>("shape", 3, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 4, 1.0, &_lock);
+ Real sigma = _args.getOpt<Real>("sigma", 5, 0, &_lock);
+ _retval = getPyNone();
+ densityInflow(flags, density, noise, shape, scale, sigma);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "densityInflow", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("densityInflow", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_densityInflow("", "densityInflow", _W_0);
+extern "C" {
+void PbRegister_densityInflow()
+{
+ KEEP_UNUSED(_RP_densityInflow);
+}
+}
+
+//! Apply noise to real grid based on an SDF
+struct KnAddNoise : public KernelBase {
+ KnAddNoise(const FlagGrid &flags,
+ Grid<Real> &density,
+ const WaveletNoiseField &noise,
+ const Grid<Real> *sdf,
+ Real scale)
+ : KernelBase(&flags, 0), flags(flags), density(density), noise(noise), sdf(sdf), scale(scale)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<Real> &density,
+ const WaveletNoiseField &noise,
+ const Grid<Real> *sdf,
+ Real scale) const
+ {
+ if (!flags.isFluid(i, j, k) || (sdf && (*sdf)(i, j, k) > 0.))
+ return;
+ density(i, j, k) += noise.evaluate(Vec3(i, j, k)) * scale;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return density;
+ }
+ typedef Grid<Real> type1;
+ inline const WaveletNoiseField &getArg2()
+ {
+ return noise;
+ }
+ typedef WaveletNoiseField type2;
+ inline const Grid<Real> *getArg3()
+ {
+ return sdf;
+ }
+ typedef Grid<Real> type3;
+ inline Real &getArg4()
+ {
+ return scale;
+ }
+ typedef Real type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnAddNoise ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, density, noise, sdf, scale);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, density, noise, sdf, scale);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &density;
+ const WaveletNoiseField &noise;
+ const Grid<Real> *sdf;
+ Real scale;
+};
+void addNoise(const FlagGrid &flags,
+ Grid<Real> &density,
+ const WaveletNoiseField &noise,
+ const Grid<Real> *sdf = NULL,
+ Real scale = 1.0)
+{
+ KnAddNoise(flags, density, noise, sdf, scale);
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "addNoise", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &density = *_args.getPtr<Grid<Real>>("density", 1, &_lock);
+ const WaveletNoiseField &noise = *_args.getPtr<WaveletNoiseField>("noise", 2, &_lock);
+ const Grid<Real> *sdf = _args.getPtrOpt<Grid<Real>>("sdf", 3, NULL, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 4, 1.0, &_lock);
+ _retval = getPyNone();
+ addNoise(flags, density, noise, sdf, scale);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "addNoise", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("addNoise", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_addNoise("", "addNoise", _W_1);
+extern "C" {
+void PbRegister_addNoise()
+{
+ KEEP_UNUSED(_RP_addNoise);
+}
+}
+
+//! sample noise field and set pdata with its values (for convenience, scale the noise values)
+
+template<class T> struct knSetPdataNoise : public KernelBase {
+ knSetPdataNoise(const BasicParticleSystem &parts,
+ ParticleDataImpl<T> &pdata,
+ const WaveletNoiseField &noise,
+ Real scale)
+ : KernelBase(parts.size()), parts(parts), pdata(pdata), noise(noise), scale(scale)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystem &parts,
+ ParticleDataImpl<T> &pdata,
+ const WaveletNoiseField &noise,
+ Real scale) const
+ {
+ pdata[idx] = noise.evaluate(parts.getPos(idx)) * scale;
+ }
+ inline const BasicParticleSystem &getArg0()
+ {
+ return parts;
+ }
+ typedef BasicParticleSystem type0;
+ inline ParticleDataImpl<T> &getArg1()
+ {
+ return pdata;
+ }
+ typedef ParticleDataImpl<T> type1;
+ inline const WaveletNoiseField &getArg2()
+ {
+ return noise;
+ }
+ typedef WaveletNoiseField type2;
+ inline Real &getArg3()
+ {
+ return scale;
+ }
+ typedef Real type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetPdataNoise ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, parts, pdata, noise, scale);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystem &parts;
+ ParticleDataImpl<T> &pdata;
+ const WaveletNoiseField &noise;
+ Real scale;
+};
+
+template<class T> struct knSetPdataNoiseVec : public KernelBase {
+ knSetPdataNoiseVec(const BasicParticleSystem &parts,
+ ParticleDataImpl<T> &pdata,
+ const WaveletNoiseField &noise,
+ Real scale)
+ : KernelBase(parts.size()), parts(parts), pdata(pdata), noise(noise), scale(scale)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystem &parts,
+ ParticleDataImpl<T> &pdata,
+ const WaveletNoiseField &noise,
+ Real scale) const
+ {
+ pdata[idx] = noise.evaluateVec(parts.getPos(idx)) * scale;
+ }
+ inline const BasicParticleSystem &getArg0()
+ {
+ return parts;
+ }
+ typedef BasicParticleSystem type0;
+ inline ParticleDataImpl<T> &getArg1()
+ {
+ return pdata;
+ }
+ typedef ParticleDataImpl<T> type1;
+ inline const WaveletNoiseField &getArg2()
+ {
+ return noise;
+ }
+ typedef WaveletNoiseField type2;
+ inline Real &getArg3()
+ {
+ return scale;
+ }
+ typedef Real type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetPdataNoiseVec ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, parts, pdata, noise, scale);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystem &parts;
+ ParticleDataImpl<T> &pdata;
+ const WaveletNoiseField &noise;
+ Real scale;
+};
+void setNoisePdata(const BasicParticleSystem &parts,
+ ParticleDataImpl<Real> &pd,
+ const WaveletNoiseField &noise,
+ Real scale = 1.)
+{
+ knSetPdataNoise<Real>(parts, pd, noise, scale);
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setNoisePdata", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ ParticleDataImpl<Real> &pd = *_args.getPtr<ParticleDataImpl<Real>>("pd", 1, &_lock);
+ const WaveletNoiseField &noise = *_args.getPtr<WaveletNoiseField>("noise", 2, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 3, 1., &_lock);
+ _retval = getPyNone();
+ setNoisePdata(parts, pd, noise, scale);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setNoisePdata", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setNoisePdata", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setNoisePdata("", "setNoisePdata", _W_2);
+extern "C" {
+void PbRegister_setNoisePdata()
+{
+ KEEP_UNUSED(_RP_setNoisePdata);
+}
+}
+
+void setNoisePdataVec3(const BasicParticleSystem &parts,
+ ParticleDataImpl<Vec3> &pd,
+ const WaveletNoiseField &noise,
+ Real scale = 1.)
+{
+ knSetPdataNoiseVec<Vec3>(parts, pd, noise, scale);
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setNoisePdataVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ ParticleDataImpl<Vec3> &pd = *_args.getPtr<ParticleDataImpl<Vec3>>("pd", 1, &_lock);
+ const WaveletNoiseField &noise = *_args.getPtr<WaveletNoiseField>("noise", 2, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 3, 1., &_lock);
+ _retval = getPyNone();
+ setNoisePdataVec3(parts, pd, noise, scale);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setNoisePdataVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setNoisePdataVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setNoisePdataVec3("", "setNoisePdataVec3", _W_3);
+extern "C" {
+void PbRegister_setNoisePdataVec3()
+{
+ KEEP_UNUSED(_RP_setNoisePdataVec3);
+}
+}
+
+void setNoisePdataInt(const BasicParticleSystem &parts,
+ ParticleDataImpl<int> &pd,
+ const WaveletNoiseField &noise,
+ Real scale = 1.)
+{
+ knSetPdataNoise<int>(parts, pd, noise, scale);
+}
+static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setNoisePdataInt", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ ParticleDataImpl<int> &pd = *_args.getPtr<ParticleDataImpl<int>>("pd", 1, &_lock);
+ const WaveletNoiseField &noise = *_args.getPtr<WaveletNoiseField>("noise", 2, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 3, 1., &_lock);
+ _retval = getPyNone();
+ setNoisePdataInt(parts, pd, noise, scale);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setNoisePdataInt", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setNoisePdataInt", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setNoisePdataInt("", "setNoisePdataInt", _W_4);
+extern "C" {
+void PbRegister_setNoisePdataInt()
+{
+ KEEP_UNUSED(_RP_setNoisePdataInt);
+}
+}
+
+//! SDF gradient from obstacle flags, for turbulence.py
+// FIXME, slow, without kernel...
+Grid<Vec3> obstacleGradient(const FlagGrid &flags)
+{
+ LevelsetGrid levelset(flags.getParent(), false);
+ Grid<Vec3> gradient(flags.getParent());
+
+ // rebuild obstacle levelset
+ FOR_IDX(levelset)
+ {
+ levelset[idx] = flags.isObstacle(idx) ? -0.5 : 0.5;
+ }
+ levelset.reinitMarching(flags, 6.0, 0, true, false, FlagGrid::TypeReserved);
+
+ // build levelset gradient
+ GradientOp(gradient, levelset);
+
+ FOR_IDX(levelset)
+ {
+ Vec3 grad = gradient[idx];
+ Real s = normalize(grad);
+ if (s <= 0.1 || levelset[idx] >= 0)
+ grad = Vec3(0.);
+ gradient[idx] = grad * levelset[idx];
+ }
+
+ return gradient;
+}
+static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "obstacleGradient", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ _retval = toPy(obstacleGradient(flags));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "obstacleGradient", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("obstacleGradient", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_obstacleGradient("", "obstacleGradient", _W_5);
+extern "C" {
+void PbRegister_obstacleGradient()
+{
+ KEEP_UNUSED(_RP_obstacleGradient);
+}
+}
+
+//! SDF from obstacle flags, for turbulence.py
+LevelsetGrid obstacleLevelset(const FlagGrid &flags)
+{
+ LevelsetGrid levelset(flags.getParent(), false);
+
+ // rebuild obstacle levelset
+ FOR_IDX(levelset)
+ {
+ levelset[idx] = flags.isObstacle(idx) ? -0.5 : 0.5;
+ }
+ levelset.reinitMarching(flags, 6.0, 0, true, false, FlagGrid::TypeReserved);
+
+ return levelset;
+}
+static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "obstacleLevelset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ _retval = toPy(obstacleLevelset(flags));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "obstacleLevelset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("obstacleLevelset", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_obstacleLevelset("", "obstacleLevelset", _W_6);
+extern "C" {
+void PbRegister_obstacleLevelset()
+{
+ KEEP_UNUSED(_RP_obstacleLevelset);
+}
+}
+
+//*****************************************************************************
+// blender init functions
+
+struct KnApplyEmission : public KernelBase {
+ KnApplyEmission(const FlagGrid &flags,
+ Grid<Real> &target,
+ const Grid<Real> &source,
+ const Grid<Real> *emissionTexture,
+ bool isAbsolute,
+ int type)
+ : KernelBase(&flags, 0),
+ flags(flags),
+ target(target),
+ source(source),
+ emissionTexture(emissionTexture),
+ isAbsolute(isAbsolute),
+ type(type)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<Real> &target,
+ const Grid<Real> &source,
+ const Grid<Real> *emissionTexture,
+ bool isAbsolute,
+ int type) const
+ {
+ // if type is given, only apply emission when celltype matches type from flaggrid
+ // and if emission texture is given, only apply emission when some emission is present at cell
+ // (important for emit from particles)
+ bool isInflow = (type & FlagGrid::TypeInflow && flags.isInflow(i, j, k));
+ bool isOutflow = (type & FlagGrid::TypeOutflow && flags.isOutflow(i, j, k));
+ if ((type && !isInflow && !isOutflow) && (emissionTexture && !(*emissionTexture)(i, j, k)))
+ return;
+
+ if (isAbsolute)
+ target(i, j, k) = source(i, j, k);
+ else
+ target(i, j, k) += source(i, j, k);
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return target;
+ }
+ typedef Grid<Real> type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return source;
+ }
+ typedef Grid<Real> type2;
+ inline const Grid<Real> *getArg3()
+ {
+ return emissionTexture;
+ }
+ typedef Grid<Real> type3;
+ inline bool &getArg4()
+ {
+ return isAbsolute;
+ }
+ typedef bool type4;
+ inline int &getArg5()
+ {
+ return type;
+ }
+ typedef int type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnApplyEmission ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, target, source, emissionTexture, isAbsolute, type);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, target, source, emissionTexture, isAbsolute, type);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &target;
+ const Grid<Real> &source;
+ const Grid<Real> *emissionTexture;
+ bool isAbsolute;
+ int type;
+};
+
+//! Add emission values
+// isAbsolute: whether to add emission values to existing, or replace
+void applyEmission(FlagGrid &flags,
+ Grid<Real> &target,
+ Grid<Real> &source,
+ Grid<Real> *emissionTexture = NULL,
+ bool isAbsolute = true,
+ int type = 0)
+{
+ KnApplyEmission(flags, target, source, emissionTexture, isAbsolute, type);
+}
+static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "applyEmission", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &target = *_args.getPtr<Grid<Real>>("target", 1, &_lock);
+ Grid<Real> &source = *_args.getPtr<Grid<Real>>("source", 2, &_lock);
+ Grid<Real> *emissionTexture = _args.getPtrOpt<Grid<Real>>(
+ "emissionTexture", 3, NULL, &_lock);
+ bool isAbsolute = _args.getOpt<bool>("isAbsolute", 4, true, &_lock);
+ int type = _args.getOpt<int>("type", 5, 0, &_lock);
+ _retval = getPyNone();
+ applyEmission(flags, target, source, emissionTexture, isAbsolute, type);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "applyEmission", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("applyEmission", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_applyEmission("", "applyEmission", _W_7);
+extern "C" {
+void PbRegister_applyEmission()
+{
+ KEEP_UNUSED(_RP_applyEmission);
+}
+}
+
+// blender init functions for meshes
+
+struct KnApplyDensity : public KernelBase {
+ KnApplyDensity(
+ const FlagGrid &flags, Grid<Real> &density, const Grid<Real> &sdf, Real value, Real sigma)
+ : KernelBase(&flags, 0), flags(flags), density(density), sdf(sdf), value(value), sigma(sigma)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<Real> &density,
+ const Grid<Real> &sdf,
+ Real value,
+ Real sigma) const
+ {
+ if (!flags.isFluid(i, j, k) || sdf(i, j, k) > sigma)
+ return;
+ density(i, j, k) = value;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return density;
+ }
+ typedef Grid<Real> type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return sdf;
+ }
+ typedef Grid<Real> type2;
+ inline Real &getArg3()
+ {
+ return value;
+ }
+ typedef Real type3;
+ inline Real &getArg4()
+ {
+ return sigma;
+ }
+ typedef Real type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnApplyDensity ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, density, sdf, value, sigma);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, density, sdf, value, sigma);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &density;
+ const Grid<Real> &sdf;
+ Real value;
+ Real sigma;
+};
+//! Init noise-modulated density inside mesh
+
+void densityInflowMeshNoise(const FlagGrid &flags,
+ Grid<Real> &density,
+ const WaveletNoiseField &noise,
+ Mesh *mesh,
+ Real scale = 1.0,
+ Real sigma = 0)
+{
+ LevelsetGrid sdf(density.getParent(), false);
+ mesh->computeLevelset(sdf, 1.);
+ KnApplyNoiseInfl(flags, density, noise, sdf, scale, sigma);
+}
+static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "densityInflowMeshNoise", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &density = *_args.getPtr<Grid<Real>>("density", 1, &_lock);
+ const WaveletNoiseField &noise = *_args.getPtr<WaveletNoiseField>("noise", 2, &_lock);
+ Mesh *mesh = _args.getPtr<Mesh>("mesh", 3, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 4, 1.0, &_lock);
+ Real sigma = _args.getOpt<Real>("sigma", 5, 0, &_lock);
+ _retval = getPyNone();
+ densityInflowMeshNoise(flags, density, noise, mesh, scale, sigma);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "densityInflowMeshNoise", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("densityInflowMeshNoise", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_densityInflowMeshNoise("", "densityInflowMeshNoise", _W_8);
+extern "C" {
+void PbRegister_densityInflowMeshNoise()
+{
+ KEEP_UNUSED(_RP_densityInflowMeshNoise);
+}
+}
+
+//! Init constant density inside mesh
+
+void densityInflowMesh(const FlagGrid &flags,
+ Grid<Real> &density,
+ Mesh *mesh,
+ Real value = 1.,
+ Real cutoff = 7,
+ Real sigma = 0)
+{
+ LevelsetGrid sdf(density.getParent(), false);
+ mesh->computeLevelset(sdf, 2., cutoff);
+ KnApplyDensity(flags, density, sdf, value, sigma);
+}
+static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "densityInflowMesh", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &density = *_args.getPtr<Grid<Real>>("density", 1, &_lock);
+ Mesh *mesh = _args.getPtr<Mesh>("mesh", 2, &_lock);
+ Real value = _args.getOpt<Real>("value", 3, 1., &_lock);
+ Real cutoff = _args.getOpt<Real>("cutoff", 4, 7, &_lock);
+ Real sigma = _args.getOpt<Real>("sigma", 5, 0, &_lock);
+ _retval = getPyNone();
+ densityInflowMesh(flags, density, mesh, value, cutoff, sigma);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "densityInflowMesh", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("densityInflowMesh", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_densityInflowMesh("", "densityInflowMesh", _W_9);
+extern "C" {
+void PbRegister_densityInflowMesh()
+{
+ KEEP_UNUSED(_RP_densityInflowMesh);
+}
+}
+
+//*****************************************************************************
+
+//! check for symmetry , optionally enfore by copying
+
+void checkSymmetry(
+ Grid<Real> &a, Grid<Real> *err = NULL, bool symmetrize = false, int axis = 0, int bound = 0)
+{
+ const int c = axis;
+ const int s = a.getSize()[c];
+ FOR_IJK(a)
+ {
+ Vec3i idx(i, j, k), mdx(i, j, k);
+ mdx[c] = s - 1 - idx[c];
+ if (bound > 0 && ((!a.isInBounds(idx, bound)) || (!a.isInBounds(mdx, bound))))
+ continue;
+
+ if (err)
+ (*err)(idx) = fabs((double)(a(idx) - a(mdx)));
+ if (symmetrize && (idx[c] < s / 2)) {
+ a(idx) = a(mdx);
+ }
+ }
+}
+static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "checkSymmetry", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &a = *_args.getPtr<Grid<Real>>("a", 0, &_lock);
+ Grid<Real> *err = _args.getPtrOpt<Grid<Real>>("err", 1, NULL, &_lock);
+ bool symmetrize = _args.getOpt<bool>("symmetrize", 2, false, &_lock);
+ int axis = _args.getOpt<int>("axis", 3, 0, &_lock);
+ int bound = _args.getOpt<int>("bound", 4, 0, &_lock);
+ _retval = getPyNone();
+ checkSymmetry(a, err, symmetrize, axis, bound);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "checkSymmetry", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("checkSymmetry", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_checkSymmetry("", "checkSymmetry", _W_10);
+extern "C" {
+void PbRegister_checkSymmetry()
+{
+ KEEP_UNUSED(_RP_checkSymmetry);
+}
+}
+
+//! check for symmetry , mac grid version
+
+void checkSymmetryVec3(Grid<Vec3> &a,
+ Grid<Real> *err = NULL,
+ bool symmetrize = false,
+ int axis = 0,
+ int bound = 0,
+ int disable = 0)
+{
+ if (err)
+ err->setConst(0.);
+
+ // each dimension is measured separately for flexibility (could be combined)
+ const int c = axis;
+ const int o1 = (c + 1) % 3;
+ const int o2 = (c + 2) % 3;
+
+ // x
+ if (!(disable & 1)) {
+ const int s = a.getSize()[c] + 1;
+ FOR_IJK(a)
+ {
+ Vec3i idx(i, j, k), mdx(i, j, k);
+ mdx[c] = s - 1 - idx[c];
+ if (mdx[c] >= a.getSize()[c])
+ continue;
+ if (bound > 0 && ((!a.isInBounds(idx, bound)) || (!a.isInBounds(mdx, bound))))
+ continue;
+
+ // special case: center "line" of values , should be zero!
+ if (mdx[c] == idx[c]) {
+ if (err)
+ (*err)(idx) += fabs((double)(a(idx)[c]));
+ if (symmetrize)
+ a(idx)[c] = 0.;
+ continue;
+ }
+
+ // note - the a(mdx) component needs to be inverted here!
+ if (err)
+ (*err)(idx) += fabs((double)(a(idx)[c] - (a(mdx)[c] * -1.)));
+ if (symmetrize && (idx[c] < s / 2)) {
+ a(idx)[c] = a(mdx)[c] * -1.;
+ }
+ }
+ }
+
+ // y
+ if (!(disable & 2)) {
+ const int s = a.getSize()[c];
+ FOR_IJK(a)
+ {
+ Vec3i idx(i, j, k), mdx(i, j, k);
+ mdx[c] = s - 1 - idx[c];
+ if (bound > 0 && ((!a.isInBounds(idx, bound)) || (!a.isInBounds(mdx, bound))))
+ continue;
+
+ if (err)
+ (*err)(idx) += fabs((double)(a(idx)[o1] - a(mdx)[o1]));
+ if (symmetrize && (idx[c] < s / 2)) {
+ a(idx)[o1] = a(mdx)[o1];
+ }
+ }
+ }
+
+ // z
+ if (!(disable & 4)) {
+ const int s = a.getSize()[c];
+ FOR_IJK(a)
+ {
+ Vec3i idx(i, j, k), mdx(i, j, k);
+ mdx[c] = s - 1 - idx[c];
+ if (bound > 0 && ((!a.isInBounds(idx, bound)) || (!a.isInBounds(mdx, bound))))
+ continue;
+
+ if (err)
+ (*err)(idx) += fabs((double)(a(idx)[o2] - a(mdx)[o2]));
+ if (symmetrize && (idx[c] < s / 2)) {
+ a(idx)[o2] = a(mdx)[o2];
+ }
+ }
+ }
+}
+static PyObject *_W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "checkSymmetryVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &a = *_args.getPtr<Grid<Vec3>>("a", 0, &_lock);
+ Grid<Real> *err = _args.getPtrOpt<Grid<Real>>("err", 1, NULL, &_lock);
+ bool symmetrize = _args.getOpt<bool>("symmetrize", 2, false, &_lock);
+ int axis = _args.getOpt<int>("axis", 3, 0, &_lock);
+ int bound = _args.getOpt<int>("bound", 4, 0, &_lock);
+ int disable = _args.getOpt<int>("disable", 5, 0, &_lock);
+ _retval = getPyNone();
+ checkSymmetryVec3(a, err, symmetrize, axis, bound, disable);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "checkSymmetryVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("checkSymmetryVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_checkSymmetryVec3("", "checkSymmetryVec3", _W_11);
+extern "C" {
+void PbRegister_checkSymmetryVec3()
+{
+ KEEP_UNUSED(_RP_checkSymmetryVec3);
+}
+}
+
+// from simpleimage.cpp
+void projectImg(SimpleImage &img, const Grid<Real> &val, int shadeMode = 0, Real scale = 1.);
+
+//! output shaded (all 3 axes at once for 3D)
+//! shading modes: 0 smoke, 1 surfaces
+
+void projectPpmFull(const Grid<Real> &val, string name, int shadeMode = 0, Real scale = 1.)
+{
+ SimpleImage img;
+ projectImg(img, val, shadeMode, scale);
+ img.writePpm(name);
+}
+static PyObject *_W_12(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "projectPpmFull", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<Real> &val = *_args.getPtr<Grid<Real>>("val", 0, &_lock);
+ string name = _args.get<string>("name", 1, &_lock);
+ int shadeMode = _args.getOpt<int>("shadeMode", 2, 0, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 3, 1., &_lock);
+ _retval = getPyNone();
+ projectPpmFull(val, name, shadeMode, scale);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "projectPpmFull", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("projectPpmFull", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_projectPpmFull("", "projectPpmFull", _W_12);
+extern "C" {
+void PbRegister_projectPpmFull()
+{
+ KEEP_UNUSED(_RP_projectPpmFull);
+}
+}
+
+// helper functions for pdata operator tests
+
+//! init some test particles at the origin
+
+void addTestParts(BasicParticleSystem &parts, int num)
+{
+ for (int i = 0; i < num; ++i)
+ parts.addBuffered(Vec3(0, 0, 0));
+
+ parts.doCompress();
+ parts.insertBufferedParticles();
+}
+static PyObject *_W_13(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "addTestParts", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ int num = _args.get<int>("num", 1, &_lock);
+ _retval = getPyNone();
+ addTestParts(parts, num);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "addTestParts", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("addTestParts", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_addTestParts("", "addTestParts", _W_13);
+extern "C" {
+void PbRegister_addTestParts()
+{
+ KEEP_UNUSED(_RP_addTestParts);
+}
+}
+
+//! calculate the difference between two pdata fields (note - slow!, not parallelized)
+
+Real pdataMaxDiff(const ParticleDataBase *a, const ParticleDataBase *b)
+{
+ double maxVal = 0.;
+ // debMsg(" PD "<< a->getType()<<" as"<<a->getSizeSlow()<<" bs"<<b->getSizeSlow() , 1);
+ assertMsg(a->getType() == b->getType(), "pdataMaxDiff problem - different pdata types!");
+ assertMsg(a->getSizeSlow() == b->getSizeSlow(), "pdataMaxDiff problem - different pdata sizes!");
+
+ if (a->getType() & ParticleDataBase::TypeReal) {
+ const ParticleDataImpl<Real> &av = *dynamic_cast<const ParticleDataImpl<Real> *>(a);
+ const ParticleDataImpl<Real> &bv = *dynamic_cast<const ParticleDataImpl<Real> *>(b);
+ FOR_PARTS(av)
+ {
+ maxVal = std::max(maxVal, (double)fabs(av[idx] - bv[idx]));
+ }
+ }
+ else if (a->getType() & ParticleDataBase::TypeInt) {
+ const ParticleDataImpl<int> &av = *dynamic_cast<const ParticleDataImpl<int> *>(a);
+ const ParticleDataImpl<int> &bv = *dynamic_cast<const ParticleDataImpl<int> *>(b);
+ FOR_PARTS(av)
+ {
+ maxVal = std::max(maxVal, (double)fabs((double)av[idx] - bv[idx]));
+ }
+ }
+ else if (a->getType() & ParticleDataBase::TypeVec3) {
+ const ParticleDataImpl<Vec3> &av = *dynamic_cast<const ParticleDataImpl<Vec3> *>(a);
+ const ParticleDataImpl<Vec3> &bv = *dynamic_cast<const ParticleDataImpl<Vec3> *>(b);
+ FOR_PARTS(av)
+ {
+ double d = 0.;
+ for (int c = 0; c < 3; ++c) {
+ d += fabs((double)av[idx][c] - (double)bv[idx][c]);
+ }
+ maxVal = std::max(maxVal, d);
+ }
+ }
+ else {
+ errMsg("pdataMaxDiff: Grid Type is not supported (only Real, Vec3, int)");
+ }
+
+ return maxVal;
+}
+static PyObject *_W_14(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "pdataMaxDiff", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const ParticleDataBase *a = _args.getPtr<ParticleDataBase>("a", 0, &_lock);
+ const ParticleDataBase *b = _args.getPtr<ParticleDataBase>("b", 1, &_lock);
+ _retval = toPy(pdataMaxDiff(a, b));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "pdataMaxDiff", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("pdataMaxDiff", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_pdataMaxDiff("", "pdataMaxDiff", _W_14);
+extern "C" {
+void PbRegister_pdataMaxDiff()
+{
+ KEEP_UNUSED(_RP_pdataMaxDiff);
+}
+}
+
+//! calculate center of mass given density grid, for re-centering
+
+Vec3 calcCenterOfMass(const Grid<Real> &density)
+{
+ Vec3 p(0.0f);
+ Real w = 0.0f;
+ FOR_IJK(density)
+ {
+ p += density(i, j, k) * Vec3(i + 0.5f, j + 0.5f, k + 0.5f);
+ w += density(i, j, k);
+ }
+ if (w > 1e-6f)
+ p /= w;
+ return p;
+}
+static PyObject *_W_15(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "calcCenterOfMass", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<Real> &density = *_args.getPtr<Grid<Real>>("density", 0, &_lock);
+ _retval = toPy(calcCenterOfMass(density));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "calcCenterOfMass", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("calcCenterOfMass", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_calcCenterOfMass("", "calcCenterOfMass", _W_15);
+extern "C" {
+void PbRegister_calcCenterOfMass()
+{
+ KEEP_UNUSED(_RP_calcCenterOfMass);
+}
+}
+
+//*****************************************************************************
+// helper functions for volume fractions (which are needed for second order obstacle boundaries)
+
+inline static Real calcFraction(Real phi1, Real phi2, Real fracThreshold)
+{
+ if (phi1 > 0. && phi2 > 0.)
+ return 1.;
+ if (phi1 < 0. && phi2 < 0.)
+ return 0.;
+
+ // make sure phi1 < phi2
+ if (phi2 < phi1) {
+ Real t = phi1;
+ phi1 = phi2;
+ phi2 = t;
+ }
+ Real denom = phi1 - phi2;
+ if (denom > -1e-04)
+ return 0.5;
+
+ Real frac = 1. - phi1 / denom;
+ if (frac < fracThreshold)
+ frac = 0.; // stomp small values , dont mark as fluid
+ return std::min(Real(1), frac);
+}
+
+struct KnUpdateFractions : public KernelBase {
+ KnUpdateFractions(const FlagGrid &flags,
+ const Grid<Real> &phiObs,
+ MACGrid &fractions,
+ const int &boundaryWidth,
+ const Real fracThreshold)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ phiObs(phiObs),
+ fractions(fractions),
+ boundaryWidth(boundaryWidth),
+ fracThreshold(fracThreshold)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ const Grid<Real> &phiObs,
+ MACGrid &fractions,
+ const int &boundaryWidth,
+ const Real fracThreshold) const
+ {
+
+ // walls at domain bounds and inner objects
+ fractions(i, j, k).x = calcFraction(phiObs(i, j, k), phiObs(i - 1, j, k), fracThreshold);
+ fractions(i, j, k).y = calcFraction(phiObs(i, j, k), phiObs(i, j - 1, k), fracThreshold);
+ if (phiObs.is3D()) {
+ fractions(i, j, k).z = calcFraction(phiObs(i, j, k), phiObs(i, j, k - 1), fracThreshold);
+ }
+
+ // remaining BCs at the domain boundaries
+ const int w = boundaryWidth;
+ // only set if not in obstacle
+ if (phiObs(i, j, k) < 0.)
+ return;
+
+ // x-direction boundaries
+ if (i <= w + 1) { // min x
+ if ((flags.isInflow(i - 1, j, k)) || (flags.isOutflow(i - 1, j, k)) ||
+ (flags.isOpen(i - 1, j, k))) {
+ fractions(i, j, k).x = fractions(i, j, k).y = 1.;
+ if (flags.is3D())
+ fractions(i, j, k).z = 1.;
+ }
+ }
+ if (i >= flags.getSizeX() - w - 2) { // max x
+ if ((flags.isInflow(i + 1, j, k)) || (flags.isOutflow(i + 1, j, k)) ||
+ (flags.isOpen(i + 1, j, k))) {
+ fractions(i + 1, j, k).x = fractions(i + 1, j, k).y = 1.;
+ if (flags.is3D())
+ fractions(i + 1, j, k).z = 1.;
+ }
+ }
+ // y-direction boundaries
+ if (j <= w + 1) { // min y
+ if ((flags.isInflow(i, j - 1, k)) || (flags.isOutflow(i, j - 1, k)) ||
+ (flags.isOpen(i, j - 1, k))) {
+ fractions(i, j, k).x = fractions(i, j, k).y = 1.;
+ if (flags.is3D())
+ fractions(i, j, k).z = 1.;
+ }
+ }
+ if (j >= flags.getSizeY() - w - 2) { // max y
+ if ((flags.isInflow(i, j + 1, k)) || (flags.isOutflow(i, j + 1, k)) ||
+ (flags.isOpen(i, j + 1, k))) {
+ fractions(i, j + 1, k).x = fractions(i, j + 1, k).y = 1.;
+ if (flags.is3D())
+ fractions(i, j + 1, k).z = 1.;
+ }
+ }
+ // z-direction boundaries
+ if (flags.is3D()) {
+ if (k <= w + 1) { // min z
+ if ((flags.isInflow(i, j, k - 1)) || (flags.isOutflow(i, j, k - 1)) ||
+ (flags.isOpen(i, j, k - 1))) {
+ fractions(i, j, k).x = fractions(i, j, k).y = 1.;
+ if (flags.is3D())
+ fractions(i, j, k).z = 1.;
+ }
+ }
+ if (j >= flags.getSizeZ() - w - 2) { // max z
+ if ((flags.isInflow(i, j, k + 1)) || (flags.isOutflow(i, j, k + 1)) ||
+ (flags.isOpen(i, j, k + 1))) {
+ fractions(i, j, k + 1).x = fractions(i, j, k + 1).y = 1.;
+ if (flags.is3D())
+ fractions(i, j, k + 1).z = 1.;
+ }
+ }
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return phiObs;
+ }
+ typedef Grid<Real> type1;
+ inline MACGrid &getArg2()
+ {
+ return fractions;
+ }
+ typedef MACGrid type2;
+ inline const int &getArg3()
+ {
+ return boundaryWidth;
+ }
+ typedef int type3;
+ inline const Real &getArg4()
+ {
+ return fracThreshold;
+ }
+ typedef Real type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnUpdateFractions ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, phiObs, fractions, boundaryWidth, fracThreshold);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, phiObs, fractions, boundaryWidth, fracThreshold);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ const Grid<Real> &phiObs;
+ MACGrid &fractions;
+ const int &boundaryWidth;
+ const Real fracThreshold;
+};
+
+//! update fill fraction values
+void updateFractions(const FlagGrid &flags,
+ const Grid<Real> &phiObs,
+ MACGrid &fractions,
+ const int &boundaryWidth = 0,
+ const Real fracThreshold = 0.01)
+{
+ fractions.setConst(Vec3(0.));
+ KnUpdateFractions(flags, phiObs, fractions, boundaryWidth, fracThreshold);
+}
+static PyObject *_W_16(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "updateFractions", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ const Grid<Real> &phiObs = *_args.getPtr<Grid<Real>>("phiObs", 1, &_lock);
+ MACGrid &fractions = *_args.getPtr<MACGrid>("fractions", 2, &_lock);
+ const int &boundaryWidth = _args.getOpt<int>("boundaryWidth", 3, 0, &_lock);
+ const Real fracThreshold = _args.getOpt<Real>("fracThreshold", 4, 0.01, &_lock);
+ _retval = getPyNone();
+ updateFractions(flags, phiObs, fractions, boundaryWidth, fracThreshold);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "updateFractions", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("updateFractions", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_updateFractions("", "updateFractions", _W_16);
+extern "C" {
+void PbRegister_updateFractions()
+{
+ KEEP_UNUSED(_RP_updateFractions);
+}
+}
+
+struct KnUpdateFlagsObs : public KernelBase {
+ KnUpdateFlagsObs(FlagGrid &flags,
+ const MACGrid *fractions,
+ const Grid<Real> &phiObs,
+ const Grid<Real> *phiOut,
+ const Grid<Real> *phiIn)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ fractions(fractions),
+ phiObs(phiObs),
+ phiOut(phiOut),
+ phiIn(phiIn)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ FlagGrid &flags,
+ const MACGrid *fractions,
+ const Grid<Real> &phiObs,
+ const Grid<Real> *phiOut,
+ const Grid<Real> *phiIn) const
+ {
+
+ bool isObs = false;
+ if (fractions) {
+ Real f = 0.;
+ f += fractions->get(i, j, k).x;
+ f += fractions->get(i + 1, j, k).x;
+ f += fractions->get(i, j, k).y;
+ f += fractions->get(i, j + 1, k).y;
+ if (flags.is3D()) {
+ f += fractions->get(i, j, k).z;
+ f += fractions->get(i, j, k + 1).z;
+ }
+ if (f == 0.)
+ isObs = true;
+ }
+ else {
+ if (phiObs(i, j, k) < 0.)
+ isObs = true;
+ }
+
+ bool isOutflow = false;
+ bool isInflow = false;
+ if (phiOut && (*phiOut)(i, j, k) < 0.)
+ isOutflow = true;
+ if (phiIn && (*phiIn)(i, j, k) < 0.)
+ isInflow = true;
+
+ if (isObs)
+ flags(i, j, k) = FlagGrid::TypeObstacle;
+ else if (isInflow)
+ flags(i, j, k) = (FlagGrid::TypeFluid | FlagGrid::TypeInflow);
+ else if (isOutflow)
+ flags(i, j, k) = (FlagGrid::TypeEmpty | FlagGrid::TypeOutflow);
+ else
+ flags(i, j, k) = FlagGrid::TypeEmpty;
+ }
+ inline FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const MACGrid *getArg1()
+ {
+ return fractions;
+ }
+ typedef MACGrid type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return phiObs;
+ }
+ typedef Grid<Real> type2;
+ inline const Grid<Real> *getArg3()
+ {
+ return phiOut;
+ }
+ typedef Grid<Real> type3;
+ inline const Grid<Real> *getArg4()
+ {
+ return phiIn;
+ }
+ typedef Grid<Real> type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnUpdateFlagsObs ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, fractions, phiObs, phiOut, phiIn);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, fractions, phiObs, phiOut, phiIn);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ FlagGrid &flags;
+ const MACGrid *fractions;
+ const Grid<Real> &phiObs;
+ const Grid<Real> *phiOut;
+ const Grid<Real> *phiIn;
+};
+
+//! update obstacle and outflow flags from levelsets
+//! optionally uses fill fractions for obstacle
+void setObstacleFlags(FlagGrid &flags,
+ const Grid<Real> &phiObs,
+ const MACGrid *fractions = NULL,
+ const Grid<Real> *phiOut = NULL,
+ const Grid<Real> *phiIn = NULL)
+{
+ KnUpdateFlagsObs(flags, fractions, phiObs, phiOut, phiIn);
+}
+static PyObject *_W_17(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setObstacleFlags", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ const Grid<Real> &phiObs = *_args.getPtr<Grid<Real>>("phiObs", 1, &_lock);
+ const MACGrid *fractions = _args.getPtrOpt<MACGrid>("fractions", 2, NULL, &_lock);
+ const Grid<Real> *phiOut = _args.getPtrOpt<Grid<Real>>("phiOut", 3, NULL, &_lock);
+ const Grid<Real> *phiIn = _args.getPtrOpt<Grid<Real>>("phiIn", 4, NULL, &_lock);
+ _retval = getPyNone();
+ setObstacleFlags(flags, phiObs, fractions, phiOut, phiIn);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setObstacleFlags", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setObstacleFlags", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setObstacleFlags("", "setObstacleFlags", _W_17);
+extern "C" {
+void PbRegister_setObstacleFlags()
+{
+ KEEP_UNUSED(_RP_setObstacleFlags);
+}
+}
+
+//! small helper for test case test_1040_secOrderBnd.py
+struct kninitVortexVelocity : public KernelBase {
+ kninitVortexVelocity(const Grid<Real> &phiObs,
+ MACGrid &vel,
+ const Vec3 &center,
+ const Real &radius)
+ : KernelBase(&phiObs, 0), phiObs(phiObs), vel(vel), center(center), radius(radius)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const Grid<Real> &phiObs,
+ MACGrid &vel,
+ const Vec3 &center,
+ const Real &radius) const
+ {
+
+ if (phiObs(i, j, k) >= -1.) {
+
+ Real dx = i - center.x;
+ if (dx >= 0)
+ dx -= .5;
+ else
+ dx += .5;
+ Real dy = j - center.y;
+ Real r = std::sqrt(dx * dx + dy * dy);
+ Real alpha = atan2(dy, dx);
+
+ vel(i, j, k).x = -std::sin(alpha) * (r / radius);
+
+ dx = i - center.x;
+ dy = j - center.y;
+ if (dy >= 0)
+ dy -= .5;
+ else
+ dy += .5;
+ r = std::sqrt(dx * dx + dy * dy);
+ alpha = atan2(dy, dx);
+
+ vel(i, j, k).y = std::cos(alpha) * (r / radius);
+ }
+ }
+ inline const Grid<Real> &getArg0()
+ {
+ return phiObs;
+ }
+ typedef Grid<Real> type0;
+ inline MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline const Vec3 &getArg2()
+ {
+ return center;
+ }
+ typedef Vec3 type2;
+ inline const Real &getArg3()
+ {
+ return radius;
+ }
+ typedef Real type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel kninitVortexVelocity ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, phiObs, vel, center, radius);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, phiObs, vel, center, radius);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const Grid<Real> &phiObs;
+ MACGrid &vel;
+ const Vec3 &center;
+ const Real &radius;
+};
+
+void initVortexVelocity(const Grid<Real> &phiObs,
+ MACGrid &vel,
+ const Vec3 &center,
+ const Real &radius)
+{
+ kninitVortexVelocity(phiObs, vel, center, radius);
+}
+static PyObject *_W_18(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "initVortexVelocity", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<Real> &phiObs = *_args.getPtr<Grid<Real>>("phiObs", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const Vec3 &center = _args.get<Vec3>("center", 2, &_lock);
+ const Real &radius = _args.get<Real>("radius", 3, &_lock);
+ _retval = getPyNone();
+ initVortexVelocity(phiObs, vel, center, radius);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "initVortexVelocity", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("initVortexVelocity", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_initVortexVelocity("", "initVortexVelocity", _W_18);
+extern "C" {
+void PbRegister_initVortexVelocity()
+{
+ KEEP_UNUSED(_RP_initVortexVelocity);
+}
+}
+
+//*****************************************************************************
+// helper functions for blurring
+
+//! class for Gaussian Blur
+struct GaussianKernelCreator {
+ public:
+ float mSigma;
+ int mDim;
+ float *mMat1D;
+
+ GaussianKernelCreator() : mSigma(0.0f), mDim(0), mMat1D(NULL)
+ {
+ }
+ GaussianKernelCreator(float sigma, int dim = 0) : mSigma(0.0f), mDim(0), mMat1D(NULL)
+ {
+ setGaussianSigma(sigma, dim);
+ }
+
+ Real getWeiAtDis(float disx, float disy)
+ {
+ float m = 1.0 / (sqrt(2.0 * M_PI) * mSigma);
+ float v = m * exp(-(1.0 * disx * disx + 1.0 * disy * disy) / (2.0 * mSigma * mSigma));
+ return v;
+ }
+
+ Real getWeiAtDis(float disx, float disy, float disz)
+ {
+ float m = 1.0 / (sqrt(2.0 * M_PI) * mSigma);
+ float v = m * exp(-(1.0 * disx * disx + 1.0 * disy * disy + 1.0 * disz * disz) /
+ (2.0 * mSigma * mSigma));
+ return v;
+ }
+
+ void setGaussianSigma(float sigma, int dim = 0)
+ {
+ mSigma = sigma;
+ if (dim < 3)
+ mDim = (int)(2.0 * 3.0 * sigma + 1.0f);
+ else
+ mDim = dim;
+ if (mDim < 3)
+ mDim = 3;
+
+ if (mDim % 2 == 0)
+ ++mDim; // make dim odd
+
+ float s2 = mSigma * mSigma;
+ int c = mDim / 2;
+ float m = 1.0 / (sqrt(2.0 * M_PI) * mSigma);
+
+ // create 1D matrix
+ if (mMat1D)
+ delete[] mMat1D;
+ mMat1D = new float[mDim];
+ for (int i = 0; i < (mDim + 1) / 2; i++) {
+ float v = m * exp(-(1.0 * i * i) / (2.0 * s2));
+ mMat1D[c + i] = v;
+ mMat1D[c - i] = v;
+ }
+ }
+
+ ~GaussianKernelCreator()
+ {
+ if (mMat1D)
+ delete[] mMat1D;
+ }
+
+ float get1DKernelValue(int off)
+ {
+ assertMsg(off >= 0 && off < mDim, "off exceeded boundary in Gaussian Kernel 1D!");
+ return mMat1D[off];
+ }
+};
+
+template<class T>
+T convolveGrid(Grid<T> &originGrid, GaussianKernelCreator &gkSigma, Vec3 pos, int cdir)
+{
+ // pos should be the centre pos, e.g., 1.5, 4.5, 0.5 for grid pos 1,4,0
+ Vec3 step(1.0, 0.0, 0.0);
+ if (cdir == 1) // todo, z
+ step = Vec3(0.0, 1.0, 0.0);
+ else if (cdir == 2)
+ step = Vec3(0.0, 0.0, 1.0);
+ T pxResult(0);
+ for (int i = 0; i < gkSigma.mDim; ++i) {
+ Vec3i curpos = toVec3i(pos - step * (i - gkSigma.mDim / 2));
+ if (originGrid.isInBounds(curpos))
+ pxResult += gkSigma.get1DKernelValue(i) * originGrid.get(curpos);
+ else { // TODO , improve...
+ Vec3i curfitpos = curpos;
+ if (curfitpos.x < 0)
+ curfitpos.x = 0;
+ else if (curfitpos.x >= originGrid.getSizeX())
+ curfitpos.x = originGrid.getSizeX() - 1;
+ if (curfitpos.y < 0)
+ curfitpos.y = 0;
+ else if (curfitpos.y >= originGrid.getSizeY())
+ curfitpos.y = originGrid.getSizeY() - 1;
+ if (curfitpos.z < 0)
+ curfitpos.z = 0;
+ else if (curfitpos.z >= originGrid.getSizeZ())
+ curfitpos.z = originGrid.getSizeZ() - 1;
+ pxResult += gkSigma.get1DKernelValue(i) * originGrid.get(curfitpos);
+ }
+ }
+ return pxResult;
+}
+
+template<class T> struct knBlurGrid : public KernelBase {
+ knBlurGrid(Grid<T> &originGrid, Grid<T> &targetGrid, GaussianKernelCreator &gkSigma, int cdir)
+ : KernelBase(&originGrid, 0),
+ originGrid(originGrid),
+ targetGrid(targetGrid),
+ gkSigma(gkSigma),
+ cdir(cdir)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ Grid<T> &originGrid,
+ Grid<T> &targetGrid,
+ GaussianKernelCreator &gkSigma,
+ int cdir) const
+ {
+ targetGrid(i, j, k) = convolveGrid<T>(originGrid, gkSigma, Vec3(i, j, k), cdir);
+ }
+ inline Grid<T> &getArg0()
+ {
+ return originGrid;
+ }
+ typedef Grid<T> type0;
+ inline Grid<T> &getArg1()
+ {
+ return targetGrid;
+ }
+ typedef Grid<T> type1;
+ inline GaussianKernelCreator &getArg2()
+ {
+ return gkSigma;
+ }
+ typedef GaussianKernelCreator type2;
+ inline int &getArg3()
+ {
+ return cdir;
+ }
+ typedef int type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knBlurGrid ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, originGrid, targetGrid, gkSigma, cdir);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, originGrid, targetGrid, gkSigma, cdir);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<T> &originGrid;
+ Grid<T> &targetGrid;
+ GaussianKernelCreator &gkSigma;
+ int cdir;
+};
+
+template<class T> int blurGrid(Grid<T> &originGrid, Grid<T> &targetGrid, float sigma)
+{
+ GaussianKernelCreator tmGK(sigma);
+ Grid<T> tmpGrid(originGrid);
+ knBlurGrid<T>(originGrid, tmpGrid, tmGK, 0); // blur x
+ knBlurGrid<T>(tmpGrid, targetGrid, tmGK, 1); // blur y
+ if (targetGrid.is3D()) {
+ tmpGrid.copyFrom(targetGrid);
+ knBlurGrid<T>(tmpGrid, targetGrid, tmGK, 2);
+ }
+ return tmGK.mDim;
+}
+
+struct KnBlurMACGridGauss : public KernelBase {
+ KnBlurMACGridGauss(MACGrid &originGrid,
+ MACGrid &target,
+ GaussianKernelCreator &gkSigma,
+ int cdir)
+ : KernelBase(&originGrid, 0),
+ originGrid(originGrid),
+ target(target),
+ gkSigma(gkSigma),
+ cdir(cdir)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ MACGrid &originGrid,
+ MACGrid &target,
+ GaussianKernelCreator &gkSigma,
+ int cdir) const
+ {
+ Vec3 pos(i, j, k);
+ Vec3 step(1.0, 0.0, 0.0);
+ if (cdir == 1)
+ step = Vec3(0.0, 1.0, 0.0);
+ else if (cdir == 2)
+ step = Vec3(0.0, 0.0, 1.0);
+
+ Vec3 pxResult(0.0f);
+ for (int di = 0; di < gkSigma.mDim; ++di) {
+ Vec3i curpos = toVec3i(pos - step * (di - gkSigma.mDim / 2));
+ if (!originGrid.isInBounds(curpos)) {
+ if (curpos.x < 0)
+ curpos.x = 0;
+ else if (curpos.x >= originGrid.getSizeX())
+ curpos.x = originGrid.getSizeX() - 1;
+ if (curpos.y < 0)
+ curpos.y = 0;
+ else if (curpos.y >= originGrid.getSizeY())
+ curpos.y = originGrid.getSizeY() - 1;
+ if (curpos.z < 0)
+ curpos.z = 0;
+ else if (curpos.z >= originGrid.getSizeZ())
+ curpos.z = originGrid.getSizeZ() - 1;
+ }
+ pxResult += gkSigma.get1DKernelValue(di) * originGrid.get(curpos);
+ }
+ target(i, j, k) = pxResult;
+ }
+ inline MACGrid &getArg0()
+ {
+ return originGrid;
+ }
+ typedef MACGrid type0;
+ inline MACGrid &getArg1()
+ {
+ return target;
+ }
+ typedef MACGrid type1;
+ inline GaussianKernelCreator &getArg2()
+ {
+ return gkSigma;
+ }
+ typedef GaussianKernelCreator type2;
+ inline int &getArg3()
+ {
+ return cdir;
+ }
+ typedef int type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnBlurMACGridGauss ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, originGrid, target, gkSigma, cdir);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, originGrid, target, gkSigma, cdir);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ MACGrid &originGrid;
+ MACGrid &target;
+ GaussianKernelCreator &gkSigma;
+ int cdir;
+};
+
+int blurMacGrid(MACGrid &oG, MACGrid &tG, float si)
+{
+ GaussianKernelCreator tmGK(si);
+ MACGrid tmpGrid(oG);
+ KnBlurMACGridGauss(oG, tmpGrid, tmGK, 0); // blur x
+ KnBlurMACGridGauss(tmpGrid, tG, tmGK, 1); // blur y
+ if (tG.is3D()) {
+ tmpGrid.copyFrom(tG);
+ KnBlurMACGridGauss(tmpGrid, tG, tmGK, 2);
+ }
+ return tmGK.mDim;
+}
+static PyObject *_W_19(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "blurMacGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &oG = *_args.getPtr<MACGrid>("oG", 0, &_lock);
+ MACGrid &tG = *_args.getPtr<MACGrid>("tG", 1, &_lock);
+ float si = _args.get<float>("si", 2, &_lock);
+ _retval = toPy(blurMacGrid(oG, tG, si));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "blurMacGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("blurMacGrid", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_blurMacGrid("", "blurMacGrid", _W_19);
+extern "C" {
+void PbRegister_blurMacGrid()
+{
+ KEEP_UNUSED(_RP_blurMacGrid);
+}
+}
+
+int blurRealGrid(Grid<Real> &oG, Grid<Real> &tG, float si)
+{
+ return blurGrid<Real>(oG, tG, si);
+}
+static PyObject *_W_20(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "blurRealGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &oG = *_args.getPtr<Grid<Real>>("oG", 0, &_lock);
+ Grid<Real> &tG = *_args.getPtr<Grid<Real>>("tG", 1, &_lock);
+ float si = _args.get<float>("si", 2, &_lock);
+ _retval = toPy(blurRealGrid(oG, tG, si));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "blurRealGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("blurRealGrid", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_blurRealGrid("", "blurRealGrid", _W_20);
+extern "C" {
+void PbRegister_blurRealGrid()
+{
+ KEEP_UNUSED(_RP_blurRealGrid);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/kepsilon.cpp b/extern/mantaflow/preprocessed/plugin/kepsilon.cpp
new file mode 100644
index 00000000000..306db9e20cc
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/kepsilon.cpp
@@ -0,0 +1,578 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Turbulence modeling plugins
+ *
+ ******************************************************************************/
+
+#include "grid.h"
+#include "commonkernels.h"
+#include "vortexsheet.h"
+#include "conjugategrad.h"
+
+using namespace std;
+
+namespace Manta {
+
+// k-epsilon model constants
+const Real keCmu = 0.09;
+const Real keC1 = 1.44;
+const Real keC2 = 1.92;
+const Real keS1 = 1.0;
+const Real keS2 = 1.3;
+
+// k-epsilon limiters
+const Real keU0 = 1.0;
+const Real keImin = 2e-3;
+const Real keImax = 1.0;
+const Real keNuMin = 1e-3;
+const Real keNuMax = 5.0;
+
+//! clamp k and epsilon to limits
+
+struct KnTurbulenceClamp : public KernelBase {
+ KnTurbulenceClamp(
+ Grid<Real> &kgrid, Grid<Real> &egrid, Real minK, Real maxK, Real minNu, Real maxNu)
+ : KernelBase(&kgrid, 0),
+ kgrid(kgrid),
+ egrid(egrid),
+ minK(minK),
+ maxK(maxK),
+ minNu(minNu),
+ maxNu(maxNu)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ Grid<Real> &kgrid,
+ Grid<Real> &egrid,
+ Real minK,
+ Real maxK,
+ Real minNu,
+ Real maxNu) const
+ {
+ Real eps = egrid[idx];
+ Real ke = clamp(kgrid[idx], minK, maxK);
+ Real nu = keCmu * square(ke) / eps;
+ if (nu > maxNu)
+ eps = keCmu * square(ke) / maxNu;
+ if (nu < minNu)
+ eps = keCmu * square(ke) / minNu;
+
+ kgrid[idx] = ke;
+ egrid[idx] = eps;
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return kgrid;
+ }
+ typedef Grid<Real> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return egrid;
+ }
+ typedef Grid<Real> type1;
+ inline Real &getArg2()
+ {
+ return minK;
+ }
+ typedef Real type2;
+ inline Real &getArg3()
+ {
+ return maxK;
+ }
+ typedef Real type3;
+ inline Real &getArg4()
+ {
+ return minNu;
+ }
+ typedef Real type4;
+ inline Real &getArg5()
+ {
+ return maxNu;
+ }
+ typedef Real type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnTurbulenceClamp ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, kgrid, egrid, minK, maxK, minNu, maxNu);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<Real> &kgrid;
+ Grid<Real> &egrid;
+ Real minK;
+ Real maxK;
+ Real minNu;
+ Real maxNu;
+};
+
+//! Compute k-epsilon production term P = 2*nu_T*sum_ij(Sij^2) and the turbulent viscosity
+//! nu_T=C_mu*k^2/eps
+
+struct KnComputeProduction : public KernelBase {
+ KnComputeProduction(const MACGrid &vel,
+ const Grid<Vec3> &velCenter,
+ const Grid<Real> &ke,
+ const Grid<Real> &eps,
+ Grid<Real> &prod,
+ Grid<Real> &nuT,
+ Grid<Real> *strain,
+ Real pscale = 1.0f)
+ : KernelBase(&vel, 1),
+ vel(vel),
+ velCenter(velCenter),
+ ke(ke),
+ eps(eps),
+ prod(prod),
+ nuT(nuT),
+ strain(strain),
+ pscale(pscale)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const MACGrid &vel,
+ const Grid<Vec3> &velCenter,
+ const Grid<Real> &ke,
+ const Grid<Real> &eps,
+ Grid<Real> &prod,
+ Grid<Real> &nuT,
+ Grid<Real> *strain,
+ Real pscale = 1.0f) const
+ {
+ Real curEps = eps(i, j, k);
+ if (curEps > 0) {
+ // turbulent viscosity: nu_T = C_mu * k^2/eps
+ Real curNu = keCmu * square(ke(i, j, k)) / curEps;
+
+ // compute Sij = 1/2 * (dU_i/dx_j + dU_j/dx_i)
+ Vec3 diag = Vec3(vel(i + 1, j, k).x, vel(i, j + 1, k).y, vel(i, j, k + 1).z) - vel(i, j, k);
+ Vec3 ux = 0.5 * (velCenter(i + 1, j, k) - velCenter(i - 1, j, k));
+ Vec3 uy = 0.5 * (velCenter(i, j + 1, k) - velCenter(i, j - 1, k));
+ Vec3 uz = 0.5 * (velCenter(i, j, k + 1) - velCenter(i, j, k - 1));
+ Real S12 = 0.5 * (ux.y + uy.x);
+ Real S13 = 0.5 * (ux.z + uz.x);
+ Real S23 = 0.5 * (uy.z + uz.y);
+ Real S2 = square(diag.x) + square(diag.y) + square(diag.z) + 2.0 * square(S12) +
+ 2.0 * square(S13) + 2.0 * square(S23);
+
+ // P = 2*nu_T*sum_ij(Sij^2)
+ prod(i, j, k) = 2.0 * curNu * S2 * pscale;
+ nuT(i, j, k) = curNu;
+ if (strain)
+ (*strain)(i, j, k) = sqrt(S2);
+ }
+ else {
+ prod(i, j, k) = 0;
+ nuT(i, j, k) = 0;
+ if (strain)
+ (*strain)(i, j, k) = 0;
+ }
+ }
+ inline const MACGrid &getArg0()
+ {
+ return vel;
+ }
+ typedef MACGrid type0;
+ inline const Grid<Vec3> &getArg1()
+ {
+ return velCenter;
+ }
+ typedef Grid<Vec3> type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return ke;
+ }
+ typedef Grid<Real> type2;
+ inline const Grid<Real> &getArg3()
+ {
+ return eps;
+ }
+ typedef Grid<Real> type3;
+ inline Grid<Real> &getArg4()
+ {
+ return prod;
+ }
+ typedef Grid<Real> type4;
+ inline Grid<Real> &getArg5()
+ {
+ return nuT;
+ }
+ typedef Grid<Real> type5;
+ inline Grid<Real> *getArg6()
+ {
+ return strain;
+ }
+ typedef Grid<Real> type6;
+ inline Real &getArg7()
+ {
+ return pscale;
+ }
+ typedef Real type7;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnComputeProduction ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, velCenter, ke, eps, prod, nuT, strain, pscale);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, velCenter, ke, eps, prod, nuT, strain, pscale);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const MACGrid &vel;
+ const Grid<Vec3> &velCenter;
+ const Grid<Real> &ke;
+ const Grid<Real> &eps;
+ Grid<Real> &prod;
+ Grid<Real> &nuT;
+ Grid<Real> *strain;
+ Real pscale;
+};
+
+//! Compute k-epsilon production term P = 2*nu_T*sum_ij(Sij^2) and the turbulent viscosity
+//! nu_T=C_mu*k^2/eps
+
+void KEpsilonComputeProduction(const MACGrid &vel,
+ Grid<Real> &k,
+ Grid<Real> &eps,
+ Grid<Real> &prod,
+ Grid<Real> &nuT,
+ Grid<Real> *strain = 0,
+ Real pscale = 1.0f)
+{
+ // get centered velocity grid
+ Grid<Vec3> vcenter(k.getParent());
+ GetCentered(vcenter, vel);
+ FillInBoundary(vcenter, 1);
+
+ // compute limits
+ const Real minK = 1.5 * square(keU0) * square(keImin);
+ const Real maxK = 1.5 * square(keU0) * square(keImax);
+ KnTurbulenceClamp(k, eps, minK, maxK, keNuMin, keNuMax);
+
+ KnComputeProduction(vel, vcenter, k, eps, prod, nuT, strain, pscale);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "KEpsilonComputeProduction", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 0, &_lock);
+ Grid<Real> &k = *_args.getPtr<Grid<Real>>("k", 1, &_lock);
+ Grid<Real> &eps = *_args.getPtr<Grid<Real>>("eps", 2, &_lock);
+ Grid<Real> &prod = *_args.getPtr<Grid<Real>>("prod", 3, &_lock);
+ Grid<Real> &nuT = *_args.getPtr<Grid<Real>>("nuT", 4, &_lock);
+ Grid<Real> *strain = _args.getPtrOpt<Grid<Real>>("strain", 5, 0, &_lock);
+ Real pscale = _args.getOpt<Real>("pscale", 6, 1.0f, &_lock);
+ _retval = getPyNone();
+ KEpsilonComputeProduction(vel, k, eps, prod, nuT, strain, pscale);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "KEpsilonComputeProduction", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("KEpsilonComputeProduction", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_KEpsilonComputeProduction("", "KEpsilonComputeProduction", _W_0);
+extern "C" {
+void PbRegister_KEpsilonComputeProduction()
+{
+ KEEP_UNUSED(_RP_KEpsilonComputeProduction);
+}
+}
+
+//! Integrate source terms of k-epsilon equation
+
+struct KnAddTurbulenceSource : public KernelBase {
+ KnAddTurbulenceSource(Grid<Real> &kgrid, Grid<Real> &egrid, const Grid<Real> &pgrid, Real dt)
+ : KernelBase(&kgrid, 0), kgrid(kgrid), egrid(egrid), pgrid(pgrid), dt(dt)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ IndexInt idx, Grid<Real> &kgrid, Grid<Real> &egrid, const Grid<Real> &pgrid, Real dt) const
+ {
+ Real eps = egrid[idx], prod = pgrid[idx], ke = kgrid[idx];
+ if (ke <= 0)
+ ke = 1e-3; // pre-clamp to avoid nan
+
+ Real newK = ke + dt * (prod - eps);
+ Real newEps = eps + dt * (prod * keC1 - eps * keC2) * (eps / ke);
+ if (newEps <= 0)
+ newEps = 1e-4; // pre-clamp to avoid nan
+
+ kgrid[idx] = newK;
+ egrid[idx] = newEps;
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return kgrid;
+ }
+ typedef Grid<Real> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return egrid;
+ }
+ typedef Grid<Real> type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return pgrid;
+ }
+ typedef Grid<Real> type2;
+ inline Real &getArg3()
+ {
+ return dt;
+ }
+ typedef Real type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnAddTurbulenceSource ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, kgrid, egrid, pgrid, dt);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<Real> &kgrid;
+ Grid<Real> &egrid;
+ const Grid<Real> &pgrid;
+ Real dt;
+};
+
+//! Integrate source terms of k-epsilon equation
+void KEpsilonSources(Grid<Real> &k, Grid<Real> &eps, Grid<Real> &prod)
+{
+ Real dt = k.getParent()->getDt();
+
+ KnAddTurbulenceSource(k, eps, prod, dt);
+
+ // compute limits
+ const Real minK = 1.5 * square(keU0) * square(keImin);
+ const Real maxK = 1.5 * square(keU0) * square(keImax);
+ KnTurbulenceClamp(k, eps, minK, maxK, keNuMin, keNuMax);
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "KEpsilonSources", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &k = *_args.getPtr<Grid<Real>>("k", 0, &_lock);
+ Grid<Real> &eps = *_args.getPtr<Grid<Real>>("eps", 1, &_lock);
+ Grid<Real> &prod = *_args.getPtr<Grid<Real>>("prod", 2, &_lock);
+ _retval = getPyNone();
+ KEpsilonSources(k, eps, prod);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "KEpsilonSources", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("KEpsilonSources", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_KEpsilonSources("", "KEpsilonSources", _W_1);
+extern "C" {
+void PbRegister_KEpsilonSources()
+{
+ KEEP_UNUSED(_RP_KEpsilonSources);
+}
+}
+
+//! Initialize the domain or boundary conditions
+void KEpsilonBcs(
+ const FlagGrid &flags, Grid<Real> &k, Grid<Real> &eps, Real intensity, Real nu, bool fillArea)
+{
+ // compute limits
+ const Real vk = 1.5 * square(keU0) * square(intensity);
+ const Real ve = keCmu * square(vk) / nu;
+
+ FOR_IDX(k)
+ {
+ if (fillArea || flags.isObstacle(idx)) {
+ k[idx] = vk;
+ eps[idx] = ve;
+ }
+ }
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "KEpsilonBcs", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &k = *_args.getPtr<Grid<Real>>("k", 1, &_lock);
+ Grid<Real> &eps = *_args.getPtr<Grid<Real>>("eps", 2, &_lock);
+ Real intensity = _args.get<Real>("intensity", 3, &_lock);
+ Real nu = _args.get<Real>("nu", 4, &_lock);
+ bool fillArea = _args.get<bool>("fillArea", 5, &_lock);
+ _retval = getPyNone();
+ KEpsilonBcs(flags, k, eps, intensity, nu, fillArea);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "KEpsilonBcs", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("KEpsilonBcs", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_KEpsilonBcs("", "KEpsilonBcs", _W_2);
+extern "C" {
+void PbRegister_KEpsilonBcs()
+{
+ KEEP_UNUSED(_RP_KEpsilonBcs);
+}
+}
+
+//! Gradient diffusion smoothing. Not unconditionally stable -- should probably do substepping etc.
+void ApplyGradDiff(
+ const Grid<Real> &grid, Grid<Real> &res, const Grid<Real> &nu, Real dt, Real sigma)
+{
+ // should do this (but requires better boundary handling)
+ /*MACGrid grad(grid.getParent());
+ GradientOpMAC(grad, grid);
+ grad *= nu;
+ DivergenceOpMAC(res, grad);
+ res *= dt/sigma; */
+
+ LaplaceOp(res, grid);
+ res *= nu;
+ res *= dt / sigma;
+}
+
+//! Compute k-epsilon turbulent viscosity
+void KEpsilonGradientDiffusion(
+ Grid<Real> &k, Grid<Real> &eps, Grid<Real> &nuT, Real sigmaU = 4.0, MACGrid *vel = 0)
+{
+ Real dt = k.getParent()->getDt();
+ Grid<Real> res(k.getParent());
+
+ // gradient diffusion of k
+ ApplyGradDiff(k, res, nuT, dt, keS1);
+ k += res;
+
+ // gradient diffusion of epsilon
+ ApplyGradDiff(eps, res, nuT, dt, keS2);
+ eps += res;
+
+ // gradient diffusion of velocity
+ if (vel) {
+ Grid<Real> vc(k.getParent());
+ for (int c = 0; c < 3; c++) {
+ GetComponent(*vel, vc, c);
+ ApplyGradDiff(vc, res, nuT, dt, sigmaU);
+ vc += res;
+ SetComponent(*vel, vc, c);
+ }
+ }
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "KEpsilonGradientDiffusion", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &k = *_args.getPtr<Grid<Real>>("k", 0, &_lock);
+ Grid<Real> &eps = *_args.getPtr<Grid<Real>>("eps", 1, &_lock);
+ Grid<Real> &nuT = *_args.getPtr<Grid<Real>>("nuT", 2, &_lock);
+ Real sigmaU = _args.getOpt<Real>("sigmaU", 3, 4.0, &_lock);
+ MACGrid *vel = _args.getPtrOpt<MACGrid>("vel", 4, 0, &_lock);
+ _retval = getPyNone();
+ KEpsilonGradientDiffusion(k, eps, nuT, sigmaU, vel);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "KEpsilonGradientDiffusion", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("KEpsilonGradientDiffusion", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_KEpsilonGradientDiffusion("", "KEpsilonGradientDiffusion", _W_3);
+extern "C" {
+void PbRegister_KEpsilonGradientDiffusion()
+{
+ KEEP_UNUSED(_RP_KEpsilonGradientDiffusion);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/meshplugins.cpp b/extern/mantaflow/preprocessed/plugin/meshplugins.cpp
new file mode 100644
index 00000000000..415bca153d0
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/meshplugins.cpp
@@ -0,0 +1,780 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Smoothing etc. for meshes
+ *
+ ******************************************************************************/
+
+/******************************************************************************/
+// Copyright note:
+//
+// These functions (C) Chris Wojtan
+// Long-term goal is to unify with his split&merge codebase
+//
+/******************************************************************************/
+
+#include <queue>
+#include <algorithm>
+#include "mesh.h"
+#include "kernel.h"
+#include "edgecollapse.h"
+#include <mesh.h>
+#include <stack>
+
+using namespace std;
+
+namespace Manta {
+
+//! Mesh smoothing
+/*! see Desbrun 99 "Implicit fairing of of irregular meshes using diffusion and curvature flow"*/
+void smoothMesh(Mesh &mesh, Real strength, int steps = 1, Real minLength = 1e-5)
+{
+ const Real dt = mesh.getParent()->getDt();
+ const Real str = min(dt * strength, (Real)1);
+ mesh.rebuildQuickCheck();
+
+ // calculate original mesh volume
+ Vec3 origCM;
+ Real origVolume = mesh.computeCenterOfMass(origCM);
+
+ // temp vertices
+ const int numCorners = mesh.numTris() * 3;
+ const int numNodes = mesh.numNodes();
+ vector<Vec3> temp(numNodes);
+ vector<bool> visited(numNodes);
+
+ for (int s = 0; s < steps; s++) {
+ // reset markers
+ for (size_t i = 0; i < visited.size(); i++)
+ visited[i] = false;
+
+ for (int c = 0; c < numCorners; c++) {
+ const int node = mesh.corners(c).node;
+ if (visited[node])
+ continue;
+
+ const Vec3 pos = mesh.nodes(node).pos;
+ Vec3 dx(0.0);
+ Real totalLen = 0;
+
+ // rotate around vertex
+ set<int> &ring = mesh.get1Ring(node).nodes;
+ for (set<int>::iterator it = ring.begin(); it != ring.end(); it++) {
+ Vec3 edge = mesh.nodes(*it).pos - pos;
+ Real len = norm(edge);
+
+ if (len > minLength) {
+ dx += edge * (1.0 / len);
+ totalLen += len;
+ }
+ else {
+ totalLen = 0.0;
+ break;
+ }
+ }
+ visited[node] = true;
+ temp[node] = pos;
+ if (totalLen != 0)
+ temp[node] += dx * (str / totalLen);
+ }
+
+ // copy back
+ for (int n = 0; n < numNodes; n++)
+ if (!mesh.isNodeFixed(n))
+ mesh.nodes(n).pos = temp[n];
+ }
+
+ // calculate new mesh volume
+ Vec3 newCM;
+ Real newVolume = mesh.computeCenterOfMass(newCM);
+
+ // preserve volume : scale relative to CM
+ Real beta;
+#if defined(WIN32) || defined(_WIN32)
+ beta = pow((Real)std::abs(origVolume / newVolume), (Real)(1. / 3.));
+#else
+ beta = cbrt(origVolume / newVolume);
+#endif
+
+ for (int n = 0; n < numNodes; n++)
+ if (!mesh.isNodeFixed(n))
+ mesh.nodes(n).pos = origCM + (mesh.nodes(n).pos - newCM) * beta;
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "smoothMesh", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Mesh &mesh = *_args.getPtr<Mesh>("mesh", 0, &_lock);
+ Real strength = _args.get<Real>("strength", 1, &_lock);
+ int steps = _args.getOpt<int>("steps", 2, 1, &_lock);
+ Real minLength = _args.getOpt<Real>("minLength", 3, 1e-5, &_lock);
+ _retval = getPyNone();
+ smoothMesh(mesh, strength, steps, minLength);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "smoothMesh", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("smoothMesh", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_smoothMesh("", "smoothMesh", _W_0);
+extern "C" {
+void PbRegister_smoothMesh()
+{
+ KEEP_UNUSED(_RP_smoothMesh);
+}
+}
+
+//! Subdivide and edgecollapse to guarantee mesh with edgelengths between
+//! min/maxLength and an angle below minAngle
+void subdivideMesh(
+ Mesh &mesh, Real minAngle, Real minLength, Real maxLength, bool cutTubes = false)
+{
+ // gather some statistics
+ int edgeSubdivs = 0, edgeCollsAngle = 0, edgeCollsLen = 0, edgeKill = 0;
+ mesh.rebuildQuickCheck();
+
+ vector<int> deletedNodes;
+ map<int, bool> taintedTris;
+ priority_queue<pair<Real, int>> pq;
+
+ //////////////////////////////////////////
+ // EDGE COLLAPSE //
+ // - particles marked for deletation //
+ //////////////////////////////////////////
+
+ for (int t = 0; t < mesh.numTris(); t++) {
+ if (taintedTris.find(t) != taintedTris.end())
+ continue;
+
+ // check if at least 2 nodes are marked for delete
+ bool k[3];
+ int numKill = 0;
+ for (int i = 0; i < 3; i++) {
+ k[i] = mesh.nodes(mesh.tris(t).c[i]).flags & Mesh::NfKillme;
+ if (k[i])
+ numKill++;
+ }
+ if (numKill < 2)
+ continue;
+
+ if (k[0] && k[1])
+ CollapseEdge(mesh,
+ t,
+ 2,
+ mesh.getEdge(t, 0),
+ mesh.getNode(t, 0),
+ deletedNodes,
+ taintedTris,
+ edgeKill,
+ cutTubes);
+ else if (k[1] && k[2])
+ CollapseEdge(mesh,
+ t,
+ 0,
+ mesh.getEdge(t, 1),
+ mesh.getNode(t, 1),
+ deletedNodes,
+ taintedTris,
+ edgeKill,
+ cutTubes);
+ else if (k[2] && k[0])
+ CollapseEdge(mesh,
+ t,
+ 1,
+ mesh.getEdge(t, 2),
+ mesh.getNode(t, 2),
+ deletedNodes,
+ taintedTris,
+ edgeKill,
+ cutTubes);
+ }
+
+ //////////////////////////////////////////
+ // EDGE COLLAPSING //
+ // - based on small triangle angle //
+ //////////////////////////////////////////
+
+ if (minAngle > 0) {
+ for (int t = 0; t < mesh.numTris(); t++) {
+ // we only want to run through the edge list ONCE.
+ // we achieve this in a method very similar to the above subdivision method.
+
+ // if this triangle has already been deleted, ignore it
+ if (taintedTris.find(t) != taintedTris.end())
+ continue;
+
+ // first we find the angles of this triangle
+ Vec3 e0 = mesh.getEdge(t, 0), e1 = mesh.getEdge(t, 1), e2 = mesh.getEdge(t, 2);
+ Vec3 ne0 = e0;
+ Vec3 ne1 = e1;
+ Vec3 ne2 = e2;
+ normalize(ne0);
+ normalize(ne1);
+ normalize(ne2);
+
+ // Real thisArea = sqrMag(cross(-e2,e0));
+ // small angle approximation says sin(x) = arcsin(x) = x,
+ // arccos(x) = pi/2 - arcsin(x),
+ // cos(x) = dot(A,B),
+ // so angle is approximately 1 - dot(A,B).
+ Real angle[3];
+ angle[0] = 1.0 - dot(ne0, -ne2);
+ angle[1] = 1.0 - dot(ne1, -ne0);
+ angle[2] = 1.0 - dot(ne2, -ne1);
+ Real worstAngle = angle[0];
+ int which = 0;
+ if (angle[1] < worstAngle) {
+ worstAngle = angle[1];
+ which = 1;
+ }
+ if (angle[2] < worstAngle) {
+ worstAngle = angle[2];
+ which = 2;
+ }
+
+ // then we see if the angle is too small
+ if (worstAngle < minAngle) {
+ Vec3 edgevect;
+ Vec3 endpoint;
+ switch (which) {
+ case 0:
+ endpoint = mesh.getNode(t, 1);
+ edgevect = e1;
+ break;
+ case 1:
+ endpoint = mesh.getNode(t, 2);
+ edgevect = e2;
+ break;
+ case 2:
+ endpoint = mesh.getNode(t, 0);
+ edgevect = e0;
+ break;
+ default:
+ break;
+ }
+
+ CollapseEdge(mesh,
+ t,
+ which,
+ edgevect,
+ endpoint,
+ deletedNodes,
+ taintedTris,
+ edgeCollsAngle,
+ cutTubes);
+ }
+ }
+ }
+
+ //////////////////////
+ // EDGE SUBDIVISION //
+ //////////////////////
+
+ Real maxLength2 = maxLength * maxLength;
+ for (int t = 0; t < mesh.numTris(); t++) {
+ // first we find the maximum length edge in this triangle
+ Vec3 e0 = mesh.getEdge(t, 0), e1 = mesh.getEdge(t, 1), e2 = mesh.getEdge(t, 2);
+ Real d0 = normSquare(e0);
+ Real d1 = normSquare(e1);
+ Real d2 = normSquare(e2);
+
+ Real longest = max(d0, max(d1, d2));
+ if (longest > maxLength2) {
+ pq.push(pair<Real, int>(longest, t));
+ }
+ }
+ if (maxLength > 0) {
+
+ while (!pq.empty() && pq.top().first > maxLength2) {
+ // we only want to run through the edge list ONCE
+ // and we want to subdivide the original edges before we subdivide any newer, shorter edges,
+ // so whenever we subdivide, we add the 2 new triangles on the end of the SurfaceTri vector
+ // and mark the original subdivided triangles for deletion.
+ // when we are done subdividing, we delete the obsolete triangles
+
+ int triA = pq.top().second;
+ pq.pop();
+
+ if (taintedTris.find(triA) != taintedTris.end())
+ continue;
+
+ // first we find the maximum length edge in this triangle
+ Vec3 e0 = mesh.getEdge(triA, 0), e1 = mesh.getEdge(triA, 1), e2 = mesh.getEdge(triA, 2);
+ Real d0 = normSquare(e0);
+ Real d1 = normSquare(e1);
+ Real d2 = normSquare(e2);
+
+ Vec3 edgevect;
+ Vec3 endpoint;
+ int which;
+ if (d0 > d1) {
+ if (d0 > d2) {
+ edgevect = e0;
+ endpoint = mesh.getNode(triA, 0);
+ ;
+ which = 2; // 2 opposite of edge 0-1
+ }
+ else {
+ edgevect = e2;
+ endpoint = mesh.getNode(triA, 2);
+ which = 1; // 1 opposite of edge 2-0
+ }
+ }
+ else {
+ if (d1 > d2) {
+ edgevect = e1;
+ endpoint = mesh.getNode(triA, 1);
+ which = 0; // 0 opposite of edge 1-2
+ }
+ else {
+ edgevect = e2;
+ endpoint = mesh.getNode(triA, 2);
+ which = 1; // 1 opposite of edge 2-0
+ }
+ }
+ // This edge is too long, so we split it in the middle
+
+ // *
+ // / \.
+ // /C0 \.
+ // / \.
+ // / \.
+ // / B \.
+ // / \.
+ // /C1 C2 \.
+ // *---------------*
+ // \C2 C1 /
+ // \ /
+ // \ A /
+ // \ /
+ // \ /
+ // \C0 /
+ // \ /
+ // *
+ //
+ // BECOMES
+ //
+ // *
+ // /|\.
+ // / | \.
+ // /C0|C0\.
+ // / | \.
+ // / B1 | B2 \.
+ // / | \.
+ // /C1 C2|C1 C2 \.
+ // *-------*-------*
+ // \C2 C1|C2 C1/
+ // \ | /
+ // \ A2 | A1 /
+ // \ | /
+ // \C0|C0/
+ // \ | /
+ // \|/
+ // *
+
+ int triB = -1;
+ bool haveB = false;
+ Corner ca_old[3], cb_old[3];
+ ca_old[0] = mesh.corners(triA, which);
+ ca_old[1] = mesh.corners(ca_old[0].next);
+ ca_old[2] = mesh.corners(ca_old[0].prev);
+ if (ca_old[0].opposite >= 0) {
+ cb_old[0] = mesh.corners(ca_old[0].opposite);
+ cb_old[1] = mesh.corners(cb_old[0].next);
+ cb_old[2] = mesh.corners(cb_old[0].prev);
+ triB = cb_old[0].tri;
+ haveB = true;
+ }
+ // else throw Error("nonmanifold");
+
+ // subdivide in the middle of the edge and create new triangles
+ Node newNode;
+ newNode.flags = 0;
+
+ newNode.pos = endpoint + 0.5 * edgevect; // fallback: linear average
+ // default: use butterfly
+ if (haveB)
+ newNode.pos = ModifiedButterflySubdivision(mesh, ca_old[0], cb_old[0], newNode.pos);
+
+ // find indices of two points of 'which'-edge
+ // merge flags
+ int P0 = ca_old[1].node;
+ int P1 = ca_old[2].node;
+ newNode.flags = mesh.nodes(P0).flags | mesh.nodes(P1).flags;
+
+ Real len0 = norm(mesh.nodes(P0).pos - newNode.pos);
+ Real len1 = norm(mesh.nodes(P1).pos - newNode.pos);
+
+ // remove P0/P1 1-ring connection
+ mesh.get1Ring(P0).nodes.erase(P1);
+ mesh.get1Ring(P1).nodes.erase(P0);
+ mesh.get1Ring(P0).tris.erase(triA);
+ mesh.get1Ring(P1).tris.erase(triA);
+ mesh.get1Ring(ca_old[0].node).tris.erase(triA);
+ if (haveB) {
+ mesh.get1Ring(P0).tris.erase(triB);
+ mesh.get1Ring(P1).tris.erase(triB);
+ mesh.get1Ring(cb_old[0].node).tris.erase(triB);
+ }
+
+ // init channel properties for new node
+ for (int i = 0; i < mesh.numNodeChannels(); i++) {
+ mesh.nodeChannel(i)->addInterpol(P0, P1, len0 / (len0 + len1));
+ }
+
+ // write to array
+ mesh.addTri(Triangle(ca_old[0].node, ca_old[1].node, mesh.numNodes()));
+ mesh.addTri(Triangle(ca_old[0].node, mesh.numNodes(), ca_old[2].node));
+ if (haveB) {
+ mesh.addTri(Triangle(cb_old[0].node, cb_old[1].node, mesh.numNodes()));
+ mesh.addTri(Triangle(cb_old[0].node, mesh.numNodes(), cb_old[2].node));
+ }
+ mesh.addNode(newNode);
+
+ const int nt = haveB ? 4 : 2;
+ int triA1 = mesh.numTris() - nt;
+ int triA2 = mesh.numTris() - nt + 1;
+ int triB1 = 0, triB2 = 0;
+ if (haveB) {
+ triB1 = mesh.numTris() - nt + 2;
+ triB2 = mesh.numTris() - nt + 3;
+ }
+ mesh.tris(triA1).flags = mesh.tris(triA).flags;
+ mesh.tris(triA2).flags = mesh.tris(triA).flags;
+ mesh.tris(triB1).flags = mesh.tris(triB).flags;
+ mesh.tris(triB2).flags = mesh.tris(triB).flags;
+
+ // connect new triangles to outside triangles,
+ // and connect outside triangles to these new ones
+ for (int c = 0; c < 3; c++)
+ mesh.addCorner(Corner(triA1, mesh.tris(triA1).c[c]));
+ for (int c = 0; c < 3; c++)
+ mesh.addCorner(Corner(triA2, mesh.tris(triA2).c[c]));
+ if (haveB) {
+ for (int c = 0; c < 3; c++)
+ mesh.addCorner(Corner(triB1, mesh.tris(triB1).c[c]));
+ for (int c = 0; c < 3; c++)
+ mesh.addCorner(Corner(triB2, mesh.tris(triB2).c[c]));
+ }
+
+ int baseIdx = 3 * (mesh.numTris() - nt);
+ Corner *cBase = &mesh.corners(baseIdx);
+
+ // set next/prev
+ for (int t = 0; t < nt; t++)
+ for (int c = 0; c < 3; c++) {
+ cBase[t * 3 + c].next = baseIdx + t * 3 + ((c + 1) % 3);
+ cBase[t * 3 + c].prev = baseIdx + t * 3 + ((c + 2) % 3);
+ }
+
+ // set opposites
+ // A1
+ cBase[0].opposite = haveB ? (baseIdx + 9) : -1;
+ cBase[1].opposite = baseIdx + 5;
+ cBase[2].opposite = -1;
+ if (ca_old[2].opposite >= 0) {
+ cBase[2].opposite = ca_old[2].opposite;
+ mesh.corners(cBase[2].opposite).opposite = baseIdx + 2;
+ }
+ // A2
+ cBase[3].opposite = haveB ? (baseIdx + 6) : -1;
+ cBase[4].opposite = -1;
+ if (ca_old[1].opposite >= 0) {
+ cBase[4].opposite = ca_old[1].opposite;
+ mesh.corners(cBase[4].opposite).opposite = baseIdx + 4;
+ }
+ cBase[5].opposite = baseIdx + 1;
+ if (haveB) {
+ // B1
+ cBase[6].opposite = baseIdx + 3;
+ cBase[7].opposite = baseIdx + 11;
+ cBase[8].opposite = -1;
+ if (cb_old[2].opposite >= 0) {
+ cBase[8].opposite = cb_old[2].opposite;
+ mesh.corners(cBase[8].opposite).opposite = baseIdx + 8;
+ }
+ // B2
+ cBase[9].opposite = baseIdx + 0;
+ cBase[10].opposite = -1;
+ if (cb_old[1].opposite >= 0) {
+ cBase[10].opposite = cb_old[1].opposite;
+ mesh.corners(cBase[10].opposite).opposite = baseIdx + 10;
+ }
+ cBase[11].opposite = baseIdx + 7;
+ }
+
+ ////////////////////
+ // mark the two original triangles for deletion
+ taintedTris[triA] = true;
+ mesh.removeTriFromLookup(triA);
+ if (haveB) {
+ taintedTris[triB] = true;
+ mesh.removeTriFromLookup(triB);
+ }
+
+ Real areaA1 = mesh.getFaceArea(triA1), areaA2 = mesh.getFaceArea(triA2);
+ Real areaB1 = 0, areaB2 = 0;
+ if (haveB) {
+ areaB1 = mesh.getFaceArea(triB1);
+ areaB2 = mesh.getFaceArea(triB2);
+ }
+
+ // add channel props for new triangles
+ for (int i = 0; i < mesh.numTriChannels(); i++) {
+ mesh.triChannel(i)->addSplit(triA, areaA1 / (areaA1 + areaA2));
+ mesh.triChannel(i)->addSplit(triA, areaA2 / (areaA1 + areaA2));
+ if (haveB) {
+ mesh.triChannel(i)->addSplit(triB, areaB1 / (areaB1 + areaB2));
+ mesh.triChannel(i)->addSplit(triB, areaB2 / (areaB1 + areaB2));
+ }
+ }
+
+ // add the four new triangles to the prority queue
+ for (int i = mesh.numTris() - nt; i < mesh.numTris(); i++) {
+ // find the maximum length edge in this triangle
+ Vec3 ne0 = mesh.getEdge(i, 0), ne1 = mesh.getEdge(i, 1), ne2 = mesh.getEdge(i, 2);
+ Real nd0 = normSquare(ne0);
+ Real nd1 = normSquare(ne1);
+ Real nd2 = normSquare(ne2);
+ Real longest = max(nd0, max(nd1, nd2));
+ // longest = (int)(longest * 1e2) / 1e2; // HACK: truncate
+ pq.push(pair<Real, int>(longest, i));
+ }
+ edgeSubdivs++;
+ }
+ }
+
+ //////////////////////////////////////////
+ // EDGE COLLAPSING //
+ // - based on short edge length //
+ //////////////////////////////////////////
+ if (minLength > 0) {
+ const Real minLength2 = minLength * minLength;
+ for (int t = 0; t < mesh.numTris(); t++) {
+ // we only want to run through the edge list ONCE.
+ // we achieve this in a method very similar to the above subdivision method.
+
+ // NOTE:
+ // priority queue does not work so great in the edge collapse case,
+ // because collapsing one triangle affects the edge lengths
+ // of many neighbor triangles,
+ // and we do not update their maximum edge length in the queue.
+
+ // if this triangle has already been deleted, ignore it
+ // if(taintedTris[t])
+ // continue;
+
+ if (taintedTris.find(t) != taintedTris.end())
+ continue;
+
+ // first we find the minimum length edge in this triangle
+ Vec3 e0 = mesh.getEdge(t, 0), e1 = mesh.getEdge(t, 1), e2 = mesh.getEdge(t, 2);
+ Real d0 = normSquare(e0);
+ Real d1 = normSquare(e1);
+ Real d2 = normSquare(e2);
+
+ Vec3 edgevect;
+ Vec3 endpoint;
+ Real dist2;
+ int which;
+ if (d0 < d1) {
+ if (d0 < d2) {
+ dist2 = d0;
+ edgevect = e0;
+ endpoint = mesh.getNode(t, 0);
+ which = 2; // 2 opposite of edge 0-1
+ }
+ else {
+ dist2 = d2;
+ edgevect = e2;
+ endpoint = mesh.getNode(t, 2);
+ which = 1; // 1 opposite of edge 2-0
+ }
+ }
+ else {
+ if (d1 < d2) {
+ dist2 = d1;
+ edgevect = e1;
+ endpoint = mesh.getNode(t, 1);
+ which = 0; // 0 opposite of edge 1-2
+ }
+ else {
+ dist2 = d2;
+ edgevect = e2;
+ endpoint = mesh.getNode(t, 2);
+ which = 1; // 1 opposite of edge 2-0
+ }
+ }
+ // then we see if the min length edge is too short
+ if (dist2 < minLength2) {
+ CollapseEdge(
+ mesh, t, which, edgevect, endpoint, deletedNodes, taintedTris, edgeCollsLen, cutTubes);
+ }
+ }
+ }
+ // cleanup nodes and triangles marked for deletion
+
+ // we run backwards through the deleted array,
+ // replacing triangles with ones from the back
+ // (this avoids the potential problem of overwriting a triangle
+ // with a to-be-deleted triangle)
+ std::map<int, bool>::reverse_iterator tti = taintedTris.rbegin();
+ for (; tti != taintedTris.rend(); tti++)
+ mesh.removeTri(tti->first);
+
+ mesh.removeNodes(deletedNodes);
+ cout << "Surface subdivision finished with " << mesh.numNodes() << " surface nodes and "
+ << mesh.numTris();
+ cout << " surface triangles, edgeSubdivs:" << edgeSubdivs << ", edgeCollapses: " << edgeCollsLen;
+ cout << " + " << edgeCollsAngle << " + " << edgeKill << endl;
+ // mesh.sanityCheck();
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "subdivideMesh", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Mesh &mesh = *_args.getPtr<Mesh>("mesh", 0, &_lock);
+ Real minAngle = _args.get<Real>("minAngle", 1, &_lock);
+ Real minLength = _args.get<Real>("minLength", 2, &_lock);
+ Real maxLength = _args.get<Real>("maxLength", 3, &_lock);
+ bool cutTubes = _args.getOpt<bool>("cutTubes", 4, false, &_lock);
+ _retval = getPyNone();
+ subdivideMesh(mesh, minAngle, minLength, maxLength, cutTubes);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "subdivideMesh", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("subdivideMesh", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_subdivideMesh("", "subdivideMesh", _W_1);
+extern "C" {
+void PbRegister_subdivideMesh()
+{
+ KEEP_UNUSED(_RP_subdivideMesh);
+}
+}
+
+void killSmallComponents(Mesh &mesh, int elements = 10)
+{
+ const int num = mesh.numTris();
+ vector<int> comp(num);
+ vector<int> numEl;
+ vector<int> deletedNodes;
+ vector<bool> isNodeDel(mesh.numNodes());
+ map<int, bool> taintedTris;
+ // enumerate components
+ int cur = 0;
+ for (int i = 0; i < num; i++) {
+ if (comp[i] == 0) {
+ cur++;
+ comp[i] = cur;
+
+ stack<int> stack;
+ stack.push(i);
+ int cnt = 1;
+ while (!stack.empty()) {
+ int tri = stack.top();
+ stack.pop();
+ for (int c = 0; c < 3; c++) {
+ int op = mesh.corners(tri, c).opposite;
+ if (op < 0)
+ continue;
+ int ntri = mesh.corners(op).tri;
+ if (comp[ntri] == 0) {
+ comp[ntri] = cur;
+ stack.push(ntri);
+ cnt++;
+ }
+ }
+ }
+ numEl.push_back(cnt);
+ }
+ }
+ // kill small components
+ for (int j = 0; j < num; j++) {
+ if (numEl[comp[j] - 1] < elements) {
+ taintedTris[j] = true;
+ for (int c = 0; c < 3; c++) {
+ int n = mesh.tris(j).c[c];
+ if (!isNodeDel[n]) {
+ isNodeDel[n] = true;
+ deletedNodes.push_back(n);
+ }
+ }
+ }
+ }
+
+ std::map<int, bool>::reverse_iterator tti = taintedTris.rbegin();
+ for (; tti != taintedTris.rend(); tti++)
+ mesh.removeTri(tti->first);
+
+ mesh.removeNodes(deletedNodes);
+
+ if (!taintedTris.empty())
+ cout << "Killed small components : " << deletedNodes.size() << " nodes, " << taintedTris.size()
+ << " tris deleted." << endl;
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "killSmallComponents", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Mesh &mesh = *_args.getPtr<Mesh>("mesh", 0, &_lock);
+ int elements = _args.getOpt<int>("elements", 1, 10, &_lock);
+ _retval = getPyNone();
+ killSmallComponents(mesh, elements);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "killSmallComponents", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("killSmallComponents", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_killSmallComponents("", "killSmallComponents", _W_2);
+extern "C" {
+void PbRegister_killSmallComponents()
+{
+ KEEP_UNUSED(_RP_killSmallComponents);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/pressure.cpp b/extern/mantaflow/preprocessed/plugin/pressure.cpp
new file mode 100644
index 00000000000..7def2669e36
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/pressure.cpp
@@ -0,0 +1,1511 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Plugins for pressure correction: solve_pressure, and ghost fluid helpers
+ *
+ ******************************************************************************/
+#include "vectorbase.h"
+#include "kernel.h"
+#include "conjugategrad.h"
+#include "multigrid.h"
+
+using namespace std;
+namespace Manta {
+
+//! Preconditioner for CG solver
+// - None: Use standard CG
+// - MIC: Modified incomplete Cholesky preconditioner
+// - MGDynamic: Multigrid preconditioner, rebuilt for each solve
+// - MGStatic: Multigrid preconditioner, built only once (faster than
+// MGDynamic, but works only if Poisson equation does not change)
+enum Preconditioner { PcNone = 0, PcMIC = 1, PcMGDynamic = 2, PcMGStatic = 3 };
+
+inline static Real surfTensHelper(const IndexInt idx,
+ const int offset,
+ const Grid<Real> &phi,
+ const Grid<Real> &curv,
+ const Real surfTens,
+ const Real gfClamp);
+
+//! Kernel: Construct the right-hand side of the poisson equation
+
+struct MakeRhs : public KernelBase {
+ MakeRhs(const FlagGrid &flags,
+ Grid<Real> &rhs,
+ const MACGrid &vel,
+ const Grid<Real> *perCellCorr,
+ const MACGrid *fractions,
+ const MACGrid *obvel,
+ const Grid<Real> *phi,
+ const Grid<Real> *curv,
+ const Real surfTens,
+ const Real gfClamp)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ rhs(rhs),
+ vel(vel),
+ perCellCorr(perCellCorr),
+ fractions(fractions),
+ obvel(obvel),
+ phi(phi),
+ curv(curv),
+ surfTens(surfTens),
+ gfClamp(gfClamp),
+ cnt(0),
+ sum(0)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<Real> &rhs,
+ const MACGrid &vel,
+ const Grid<Real> *perCellCorr,
+ const MACGrid *fractions,
+ const MACGrid *obvel,
+ const Grid<Real> *phi,
+ const Grid<Real> *curv,
+ const Real surfTens,
+ const Real gfClamp,
+ int &cnt,
+ double &sum)
+ {
+ if (!flags.isFluid(i, j, k)) {
+ rhs(i, j, k) = 0;
+ return;
+ }
+
+ // compute negative divergence
+ // no flag checks: assumes vel at obstacle interfaces is set to zero
+ Real set(0);
+ if (!fractions) {
+ set = vel(i, j, k).x - vel(i + 1, j, k).x + vel(i, j, k).y - vel(i, j + 1, k).y;
+ if (vel.is3D())
+ set += vel(i, j, k).z - vel(i, j, k + 1).z;
+ }
+ else {
+ set = (*fractions)(i, j, k).x * vel(i, j, k).x -
+ (*fractions)(i + 1, j, k).x * vel(i + 1, j, k).x +
+ (*fractions)(i, j, k).y * vel(i, j, k).y -
+ (*fractions)(i, j + 1, k).y * vel(i, j + 1, k).y;
+ if (vel.is3D())
+ set += (*fractions)(i, j, k).z * vel(i, j, k).z -
+ (*fractions)(i, j, k + 1).z * vel(i, j, k + 1).z;
+
+ // compute divergence from obstacle by using obstacle velocity (optional)
+ if (obvel) {
+ set += (1 - (*fractions)(i, j, k).x) * (*obvel)(i, j, k).x -
+ (1 - (*fractions)(i + 1, j, k).x) * (*obvel)(i + 1, j, k).x +
+ (1 - (*fractions)(i, j, k).y) * (*obvel)(i, j, k).y -
+ (1 - (*fractions)(i, j + 1, k).y) * (*obvel)(i, j + 1, k).y;
+ if (obvel->is3D())
+ set += (1 - (*fractions)(i, j, k).z) * (*obvel)(i, j, k).z -
+ (1 - (*fractions)(i, j, k + 1).z) * (*obvel)(i, j, k + 1).z;
+ }
+ }
+
+ // compute surface tension effect (optional)
+ if (phi && curv) {
+ const IndexInt idx = flags.index(i, j, k);
+ const int X = flags.getStrideX(), Y = flags.getStrideY(), Z = flags.getStrideZ();
+ if (flags.isEmpty(i - 1, j, k))
+ set += surfTensHelper(idx, -X, *phi, *curv, surfTens, gfClamp);
+ if (flags.isEmpty(i + 1, j, k))
+ set += surfTensHelper(idx, +X, *phi, *curv, surfTens, gfClamp);
+ if (flags.isEmpty(i, j - 1, k))
+ set += surfTensHelper(idx, -Y, *phi, *curv, surfTens, gfClamp);
+ if (flags.isEmpty(i, j + 1, k))
+ set += surfTensHelper(idx, +Y, *phi, *curv, surfTens, gfClamp);
+ if (vel.is3D()) {
+ if (flags.isEmpty(i, j, k - 1))
+ set += surfTensHelper(idx, -Z, *phi, *curv, surfTens, gfClamp);
+ if (flags.isEmpty(i, j, k + 1))
+ set += surfTensHelper(idx, +Z, *phi, *curv, surfTens, gfClamp);
+ }
+ }
+
+ // per cell divergence correction (optional)
+ if (perCellCorr)
+ set += perCellCorr->get(i, j, k);
+
+ // obtain sum, cell count
+ sum += set;
+ cnt++;
+
+ rhs(i, j, k) = set;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return rhs;
+ }
+ typedef Grid<Real> type1;
+ inline const MACGrid &getArg2()
+ {
+ return vel;
+ }
+ typedef MACGrid type2;
+ inline const Grid<Real> *getArg3()
+ {
+ return perCellCorr;
+ }
+ typedef Grid<Real> type3;
+ inline const MACGrid *getArg4()
+ {
+ return fractions;
+ }
+ typedef MACGrid type4;
+ inline const MACGrid *getArg5()
+ {
+ return obvel;
+ }
+ typedef MACGrid type5;
+ inline const Grid<Real> *getArg6()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type6;
+ inline const Grid<Real> *getArg7()
+ {
+ return curv;
+ }
+ typedef Grid<Real> type7;
+ inline const Real &getArg8()
+ {
+ return surfTens;
+ }
+ typedef Real type8;
+ inline const Real &getArg9()
+ {
+ return gfClamp;
+ }
+ typedef Real type9;
+ void runMessage()
+ {
+ debMsg("Executing kernel MakeRhs ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i,
+ j,
+ k,
+ flags,
+ rhs,
+ vel,
+ perCellCorr,
+ fractions,
+ obvel,
+ phi,
+ curv,
+ surfTens,
+ gfClamp,
+ cnt,
+ sum);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i,
+ j,
+ k,
+ flags,
+ rhs,
+ vel,
+ perCellCorr,
+ fractions,
+ obvel,
+ phi,
+ curv,
+ surfTens,
+ gfClamp,
+ cnt,
+ sum);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ MakeRhs(MakeRhs &o, tbb::split)
+ : KernelBase(o),
+ flags(o.flags),
+ rhs(o.rhs),
+ vel(o.vel),
+ perCellCorr(o.perCellCorr),
+ fractions(o.fractions),
+ obvel(o.obvel),
+ phi(o.phi),
+ curv(o.curv),
+ surfTens(o.surfTens),
+ gfClamp(o.gfClamp),
+ cnt(0),
+ sum(0)
+ {
+ }
+ void join(const MakeRhs &o)
+ {
+ cnt += o.cnt;
+ sum += o.sum;
+ }
+ const FlagGrid &flags;
+ Grid<Real> &rhs;
+ const MACGrid &vel;
+ const Grid<Real> *perCellCorr;
+ const MACGrid *fractions;
+ const MACGrid *obvel;
+ const Grid<Real> *phi;
+ const Grid<Real> *curv;
+ const Real surfTens;
+ const Real gfClamp;
+ int cnt;
+ double sum;
+};
+
+//! Kernel: make velocity divergence free by subtracting pressure gradient
+
+struct knCorrectVelocity : public KernelBase {
+ knCorrectVelocity(const FlagGrid &flags, MACGrid &vel, const Grid<Real> &pressure)
+ : KernelBase(&flags, 1), flags(flags), vel(vel), pressure(pressure)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, const FlagGrid &flags, MACGrid &vel, const Grid<Real> &pressure) const
+ {
+ const IndexInt idx = flags.index(i, j, k);
+ if (flags.isFluid(idx)) {
+ if (flags.isFluid(i - 1, j, k))
+ vel[idx].x -= (pressure[idx] - pressure(i - 1, j, k));
+ if (flags.isFluid(i, j - 1, k))
+ vel[idx].y -= (pressure[idx] - pressure(i, j - 1, k));
+ if (flags.is3D() && flags.isFluid(i, j, k - 1))
+ vel[idx].z -= (pressure[idx] - pressure(i, j, k - 1));
+
+ if (flags.isEmpty(i - 1, j, k))
+ vel[idx].x -= pressure[idx];
+ if (flags.isEmpty(i, j - 1, k))
+ vel[idx].y -= pressure[idx];
+ if (flags.is3D() && flags.isEmpty(i, j, k - 1))
+ vel[idx].z -= pressure[idx];
+ }
+ else if (flags.isEmpty(idx) &&
+ !flags.isOutflow(idx)) { // don't change velocities in outflow cells
+ if (flags.isFluid(i - 1, j, k))
+ vel[idx].x += pressure(i - 1, j, k);
+ else
+ vel[idx].x = 0.f;
+ if (flags.isFluid(i, j - 1, k))
+ vel[idx].y += pressure(i, j - 1, k);
+ else
+ vel[idx].y = 0.f;
+ if (flags.is3D()) {
+ if (flags.isFluid(i, j, k - 1))
+ vel[idx].z += pressure(i, j, k - 1);
+ else
+ vel[idx].z = 0.f;
+ }
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return pressure;
+ }
+ typedef Grid<Real> type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knCorrectVelocity ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, pressure);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, vel, pressure);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ MACGrid &vel;
+ const Grid<Real> &pressure;
+};
+
+// *****************************************************************************
+// Ghost fluid helpers
+
+// calculate fraction filled with liquid (note, assumes inside value is < outside!)
+inline static Real thetaHelper(const Real inside, const Real outside)
+{
+ const Real denom = inside - outside;
+ if (denom > -1e-04)
+ return 0.5; // should always be neg, and large enough...
+ return std::max(Real(0), std::min(Real(1), inside / denom));
+}
+
+// calculate ghost fluid factor, cell at idx should be a fluid cell
+inline static Real ghostFluidHelper(const IndexInt idx,
+ const int offset,
+ const Grid<Real> &phi,
+ const Real gfClamp)
+{
+ Real alpha = thetaHelper(phi[idx], phi[idx + offset]);
+ if (alpha < gfClamp)
+ return alpha = gfClamp;
+ return (1. - (1. / alpha));
+}
+
+inline static Real surfTensHelper(const IndexInt idx,
+ const int offset,
+ const Grid<Real> &phi,
+ const Grid<Real> &curv,
+ const Real surfTens,
+ const Real gfClamp)
+{
+ return surfTens * (curv[idx + offset] - ghostFluidHelper(idx, offset, phi, gfClamp) * curv[idx]);
+}
+
+//! Kernel: Adapt A0 for ghost fluid
+
+struct ApplyGhostFluidDiagonal : public KernelBase {
+ ApplyGhostFluidDiagonal(Grid<Real> &A0,
+ const FlagGrid &flags,
+ const Grid<Real> &phi,
+ const Real gfClamp)
+ : KernelBase(&A0, 1), A0(A0), flags(flags), phi(phi), gfClamp(gfClamp)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ Grid<Real> &A0,
+ const FlagGrid &flags,
+ const Grid<Real> &phi,
+ const Real gfClamp) const
+ {
+ const int X = flags.getStrideX(), Y = flags.getStrideY(), Z = flags.getStrideZ();
+ const IndexInt idx = flags.index(i, j, k);
+ if (!flags.isFluid(idx))
+ return;
+
+ if (flags.isEmpty(i - 1, j, k))
+ A0[idx] -= ghostFluidHelper(idx, -X, phi, gfClamp);
+ if (flags.isEmpty(i + 1, j, k))
+ A0[idx] -= ghostFluidHelper(idx, +X, phi, gfClamp);
+ if (flags.isEmpty(i, j - 1, k))
+ A0[idx] -= ghostFluidHelper(idx, -Y, phi, gfClamp);
+ if (flags.isEmpty(i, j + 1, k))
+ A0[idx] -= ghostFluidHelper(idx, +Y, phi, gfClamp);
+ if (flags.is3D()) {
+ if (flags.isEmpty(i, j, k - 1))
+ A0[idx] -= ghostFluidHelper(idx, -Z, phi, gfClamp);
+ if (flags.isEmpty(i, j, k + 1))
+ A0[idx] -= ghostFluidHelper(idx, +Z, phi, gfClamp);
+ }
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return A0;
+ }
+ typedef Grid<Real> type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type2;
+ inline const Real &getArg3()
+ {
+ return gfClamp;
+ }
+ typedef Real type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel ApplyGhostFluidDiagonal ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, A0, flags, phi, gfClamp);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, A0, flags, phi, gfClamp);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<Real> &A0;
+ const FlagGrid &flags;
+ const Grid<Real> &phi;
+ const Real gfClamp;
+};
+
+//! Kernel: Apply velocity update: ghost fluid contribution
+
+struct knCorrectVelocityGhostFluid : public KernelBase {
+ knCorrectVelocityGhostFluid(MACGrid &vel,
+ const FlagGrid &flags,
+ const Grid<Real> &pressure,
+ const Grid<Real> &phi,
+ Real gfClamp,
+ const Grid<Real> *curv,
+ const Real surfTens)
+ : KernelBase(&vel, 1),
+ vel(vel),
+ flags(flags),
+ pressure(pressure),
+ phi(phi),
+ gfClamp(gfClamp),
+ curv(curv),
+ surfTens(surfTens)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ MACGrid &vel,
+ const FlagGrid &flags,
+ const Grid<Real> &pressure,
+ const Grid<Real> &phi,
+ Real gfClamp,
+ const Grid<Real> *curv,
+ const Real surfTens) const
+ {
+ const IndexInt X = flags.getStrideX(), Y = flags.getStrideY(), Z = flags.getStrideZ();
+ const IndexInt idx = flags.index(i, j, k);
+ if (flags.isFluid(idx)) {
+ if (flags.isEmpty(i - 1, j, k))
+ vel[idx][0] += pressure[idx] * ghostFluidHelper(idx, -X, phi, gfClamp);
+ if (flags.isEmpty(i, j - 1, k))
+ vel[idx][1] += pressure[idx] * ghostFluidHelper(idx, -Y, phi, gfClamp);
+ if (flags.is3D() && flags.isEmpty(i, j, k - 1))
+ vel[idx][2] += pressure[idx] * ghostFluidHelper(idx, -Z, phi, gfClamp);
+ }
+ else if (flags.isEmpty(idx) &&
+ !flags.isOutflow(idx)) { // do not change velocities in outflow cells
+ if (flags.isFluid(i - 1, j, k))
+ vel[idx][0] -= pressure(i - 1, j, k) * ghostFluidHelper(idx - X, +X, phi, gfClamp);
+ else
+ vel[idx].x = 0.f;
+ if (flags.isFluid(i, j - 1, k))
+ vel[idx][1] -= pressure(i, j - 1, k) * ghostFluidHelper(idx - Y, +Y, phi, gfClamp);
+ else
+ vel[idx].y = 0.f;
+ if (flags.is3D()) {
+ if (flags.isFluid(i, j, k - 1))
+ vel[idx][2] -= pressure(i, j, k - 1) * ghostFluidHelper(idx - Z, +Z, phi, gfClamp);
+ else
+ vel[idx].z = 0.f;
+ }
+ }
+
+ if (curv) {
+ if (flags.isFluid(idx)) {
+ if (flags.isEmpty(i - 1, j, k))
+ vel[idx].x += surfTensHelper(idx, -X, phi, *curv, surfTens, gfClamp);
+ if (flags.isEmpty(i, j - 1, k))
+ vel[idx].y += surfTensHelper(idx, -Y, phi, *curv, surfTens, gfClamp);
+ if (flags.is3D() && flags.isEmpty(i, j, k - 1))
+ vel[idx].z += surfTensHelper(idx, -Z, phi, *curv, surfTens, gfClamp);
+ }
+ else if (flags.isEmpty(idx) &&
+ !flags.isOutflow(idx)) { // do not change velocities in outflow cells
+ vel[idx].x -= (flags.isFluid(i - 1, j, k)) ?
+ surfTensHelper(idx - X, +X, phi, *curv, surfTens, gfClamp) :
+ 0.f;
+ vel[idx].y -= (flags.isFluid(i, j - 1, k)) ?
+ surfTensHelper(idx - Y, +Y, phi, *curv, surfTens, gfClamp) :
+ 0.f;
+ if (flags.is3D())
+ vel[idx].z -= (flags.isFluid(i, j, k - 1)) ?
+ surfTensHelper(idx - Z, +Z, phi, *curv, surfTens, gfClamp) :
+ 0.f;
+ }
+ }
+ }
+ inline MACGrid &getArg0()
+ {
+ return vel;
+ }
+ typedef MACGrid type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return pressure;
+ }
+ typedef Grid<Real> type2;
+ inline const Grid<Real> &getArg3()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type3;
+ inline Real &getArg4()
+ {
+ return gfClamp;
+ }
+ typedef Real type4;
+ inline const Grid<Real> *getArg5()
+ {
+ return curv;
+ }
+ typedef Grid<Real> type5;
+ inline const Real &getArg6()
+ {
+ return surfTens;
+ }
+ typedef Real type6;
+ void runMessage()
+ {
+ debMsg("Executing kernel knCorrectVelocityGhostFluid ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, flags, pressure, phi, gfClamp, curv, surfTens);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, flags, pressure, phi, gfClamp, curv, surfTens);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ MACGrid &vel;
+ const FlagGrid &flags;
+ const Grid<Real> &pressure;
+ const Grid<Real> &phi;
+ Real gfClamp;
+ const Grid<Real> *curv;
+ const Real surfTens;
+};
+
+// improve behavior of clamping for large time steps:
+inline static Real ghostFluidWasClamped(const IndexInt idx,
+ const int offset,
+ const Grid<Real> &phi,
+ const Real gfClamp)
+{
+ const Real alpha = thetaHelper(phi[idx], phi[idx + offset]);
+ if (alpha < gfClamp)
+ return true;
+ return false;
+}
+
+struct knReplaceClampedGhostFluidVels : public KernelBase {
+ knReplaceClampedGhostFluidVels(MACGrid &vel,
+ const FlagGrid &flags,
+ const Grid<Real> &pressure,
+ const Grid<Real> &phi,
+ Real gfClamp)
+ : KernelBase(&vel, 1), vel(vel), flags(flags), pressure(pressure), phi(phi), gfClamp(gfClamp)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ MACGrid &vel,
+ const FlagGrid &flags,
+ const Grid<Real> &pressure,
+ const Grid<Real> &phi,
+ Real gfClamp) const
+ {
+ const IndexInt idx = flags.index(i, j, k);
+ const IndexInt X = flags.getStrideX(), Y = flags.getStrideY(), Z = flags.getStrideZ();
+ if (!flags.isEmpty(idx))
+ return;
+
+ if (flags.isFluid(i - 1, j, k) && ghostFluidWasClamped(idx - X, +X, phi, gfClamp))
+ vel[idx][0] = vel[idx - X][0];
+ if (flags.isFluid(i, j - 1, k) && ghostFluidWasClamped(idx - Y, +Y, phi, gfClamp))
+ vel[idx][1] = vel[idx - Y][1];
+ if (flags.is3D() && flags.isFluid(i, j, k - 1) &&
+ ghostFluidWasClamped(idx - Z, +Z, phi, gfClamp))
+ vel[idx][2] = vel[idx - Z][2];
+
+ if (flags.isFluid(i + 1, j, k) && ghostFluidWasClamped(idx + X, -X, phi, gfClamp))
+ vel[idx][0] = vel[idx + X][0];
+ if (flags.isFluid(i, j + 1, k) && ghostFluidWasClamped(idx + Y, -Y, phi, gfClamp))
+ vel[idx][1] = vel[idx + Y][1];
+ if (flags.is3D() && flags.isFluid(i, j, k + 1) &&
+ ghostFluidWasClamped(idx + Z, -Z, phi, gfClamp))
+ vel[idx][2] = vel[idx + Z][2];
+ }
+ inline MACGrid &getArg0()
+ {
+ return vel;
+ }
+ typedef MACGrid type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return pressure;
+ }
+ typedef Grid<Real> type2;
+ inline const Grid<Real> &getArg3()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type3;
+ inline Real &getArg4()
+ {
+ return gfClamp;
+ }
+ typedef Real type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel knReplaceClampedGhostFluidVels ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, flags, pressure, phi, gfClamp);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, flags, pressure, phi, gfClamp);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ MACGrid &vel;
+ const FlagGrid &flags;
+ const Grid<Real> &pressure;
+ const Grid<Real> &phi;
+ Real gfClamp;
+};
+
+//! Kernel: Compute min value of Real grid
+
+struct CountEmptyCells : public KernelBase {
+ CountEmptyCells(const FlagGrid &flags) : KernelBase(&flags, 0), flags(flags), numEmpty(0)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const FlagGrid &flags, int &numEmpty)
+ {
+ if (flags.isEmpty(idx))
+ numEmpty++;
+ }
+ inline operator int()
+ {
+ return numEmpty;
+ }
+ inline int &getRet()
+ {
+ return numEmpty;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel CountEmptyCells ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, flags, numEmpty);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ CountEmptyCells(CountEmptyCells &o, tbb::split) : KernelBase(o), flags(o.flags), numEmpty(0)
+ {
+ }
+ void join(const CountEmptyCells &o)
+ {
+ numEmpty += o.numEmpty;
+ }
+ const FlagGrid &flags;
+ int numEmpty;
+};
+
+// *****************************************************************************
+// Misc helpers
+
+//! Change 'A' and 'rhs' such that pressure at 'fixPidx' is fixed to 'value'
+void fixPressure(int fixPidx,
+ Real value,
+ Grid<Real> &rhs,
+ Grid<Real> &A0,
+ Grid<Real> &Ai,
+ Grid<Real> &Aj,
+ Grid<Real> &Ak)
+{
+ // Bring to rhs at neighbors
+ rhs[fixPidx + Ai.getStrideX()] -= Ai[fixPidx] * value;
+ rhs[fixPidx + Aj.getStrideY()] -= Aj[fixPidx] * value;
+ rhs[fixPidx - Ai.getStrideX()] -= Ai[fixPidx - Ai.getStrideX()] * value;
+ rhs[fixPidx - Aj.getStrideY()] -= Aj[fixPidx - Aj.getStrideY()] * value;
+ if (rhs.is3D()) {
+ rhs[fixPidx + Ak.getStrideZ()] -= Ak[fixPidx] * value;
+ rhs[fixPidx - Ak.getStrideZ()] -= Ak[fixPidx - Ak.getStrideZ()] * value;
+ }
+
+ // Trivialize equation at 'fixPidx' to: pressure[fixPidx] = value
+ rhs[fixPidx] = value;
+ A0[fixPidx] = Real(1);
+ Ai[fixPidx] = Aj[fixPidx] = Ak[fixPidx] = Real(0);
+ Ai[fixPidx - Ai.getStrideX()] = Real(0);
+ Aj[fixPidx - Aj.getStrideY()] = Real(0);
+ if (rhs.is3D()) {
+ Ak[fixPidx - Ak.getStrideZ()] = Real(0);
+ }
+}
+
+// for "static" MG mode, keep one MG data structure per fluid solver
+// leave cleanup to OS/user if nonzero at program termination (PcMGStatic mode)
+// alternatively, manually release in scene file with releaseMG
+static std::map<FluidSolver *, GridMg *> gMapMG;
+
+void releaseMG(FluidSolver *solver = nullptr)
+{
+ // release all?
+ if (!solver) {
+ for (std::map<FluidSolver *, GridMg *>::iterator it = gMapMG.begin(); it != gMapMG.end();
+ it++) {
+ if (it->first != nullptr)
+ releaseMG(it->first);
+ }
+ return;
+ }
+
+ GridMg *mg = gMapMG[solver];
+ if (mg) {
+ delete mg;
+ gMapMG[solver] = nullptr;
+ }
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "releaseMG", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FluidSolver *solver = _args.getPtrOpt<FluidSolver>("solver", 0, nullptr, &_lock);
+ _retval = getPyNone();
+ releaseMG(solver);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "releaseMG", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("releaseMG", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_releaseMG("", "releaseMG", _W_0);
+extern "C" {
+void PbRegister_releaseMG()
+{
+ KEEP_UNUSED(_RP_releaseMG);
+}
+}
+
+// *****************************************************************************
+// Main pressure solve
+
+// Note , all three pressure solve helper functions take
+// identical parameters, apart from the RHS grid (and different const values)
+
+//! Compute rhs for pressure solve
+
+void computePressureRhs(Grid<Real> &rhs,
+ const MACGrid &vel,
+ const Grid<Real> &pressure,
+ const FlagGrid &flags,
+ Real cgAccuracy = 1e-3,
+ const Grid<Real> *phi = 0,
+ const Grid<Real> *perCellCorr = 0,
+ const MACGrid *fractions = 0,
+ const MACGrid *obvel = 0,
+ Real gfClamp = 1e-04,
+ Real cgMaxIterFac = 1.5,
+ bool precondition = true,
+ int preconditioner = PcMIC,
+ bool enforceCompatibility = false,
+ bool useL2Norm = false,
+ bool zeroPressureFixing = false,
+ const Grid<Real> *curv = NULL,
+ const Real surfTens = 0.)
+{
+ // compute divergence and init right hand side
+ MakeRhs kernMakeRhs(
+ flags, rhs, vel, perCellCorr, fractions, obvel, phi, curv, surfTens, gfClamp);
+
+ if (enforceCompatibility)
+ rhs += (Real)(-kernMakeRhs.sum / (Real)kernMakeRhs.cnt);
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "computePressureRhs", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &rhs = *_args.getPtr<Grid<Real>>("rhs", 0, &_lock);
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ const Grid<Real> &pressure = *_args.getPtr<Grid<Real>>("pressure", 2, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 3, &_lock);
+ Real cgAccuracy = _args.getOpt<Real>("cgAccuracy", 4, 1e-3, &_lock);
+ const Grid<Real> *phi = _args.getPtrOpt<Grid<Real>>("phi", 5, 0, &_lock);
+ const Grid<Real> *perCellCorr = _args.getPtrOpt<Grid<Real>>("perCellCorr", 6, 0, &_lock);
+ const MACGrid *fractions = _args.getPtrOpt<MACGrid>("fractions", 7, 0, &_lock);
+ const MACGrid *obvel = _args.getPtrOpt<MACGrid>("obvel", 8, 0, &_lock);
+ Real gfClamp = _args.getOpt<Real>("gfClamp", 9, 1e-04, &_lock);
+ Real cgMaxIterFac = _args.getOpt<Real>("cgMaxIterFac", 10, 1.5, &_lock);
+ bool precondition = _args.getOpt<bool>("precondition", 11, true, &_lock);
+ int preconditioner = _args.getOpt<int>("preconditioner", 12, PcMIC, &_lock);
+ bool enforceCompatibility = _args.getOpt<bool>("enforceCompatibility", 13, false, &_lock);
+ bool useL2Norm = _args.getOpt<bool>("useL2Norm", 14, false, &_lock);
+ bool zeroPressureFixing = _args.getOpt<bool>("zeroPressureFixing", 15, false, &_lock);
+ const Grid<Real> *curv = _args.getPtrOpt<Grid<Real>>("curv", 16, NULL, &_lock);
+ const Real surfTens = _args.getOpt<Real>("surfTens", 17, 0., &_lock);
+ _retval = getPyNone();
+ computePressureRhs(rhs,
+ vel,
+ pressure,
+ flags,
+ cgAccuracy,
+ phi,
+ perCellCorr,
+ fractions,
+ obvel,
+ gfClamp,
+ cgMaxIterFac,
+ precondition,
+ preconditioner,
+ enforceCompatibility,
+ useL2Norm,
+ zeroPressureFixing,
+ curv,
+ surfTens);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "computePressureRhs", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("computePressureRhs", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_computePressureRhs("", "computePressureRhs", _W_1);
+extern "C" {
+void PbRegister_computePressureRhs()
+{
+ KEEP_UNUSED(_RP_computePressureRhs);
+}
+}
+
+//! Build and solve pressure system of equations
+//! perCellCorr: a divergence correction for each cell, optional
+//! fractions: for 2nd order obstacle boundaries, optional
+//! gfClamp: clamping threshold for ghost fluid method
+//! cgMaxIterFac: heuristic to determine maximal number of CG iteations, increase for more accurate
+//! solutions preconditioner: MIC, or MG (see Preconditioner enum) useL2Norm: use max norm by
+//! default, can be turned to L2 here zeroPressureFixing: remove null space by fixing a single
+//! pressure value, needed for MG curv: curvature for surface tension effects surfTens: surface
+//! tension coefficient retRhs: return RHS divergence, e.g., for debugging; optional
+
+void solvePressureSystem(Grid<Real> &rhs,
+ MACGrid &vel,
+ Grid<Real> &pressure,
+ const FlagGrid &flags,
+ Real cgAccuracy = 1e-3,
+ const Grid<Real> *phi = 0,
+ const Grid<Real> *perCellCorr = 0,
+ const MACGrid *fractions = 0,
+ Real gfClamp = 1e-04,
+ Real cgMaxIterFac = 1.5,
+ bool precondition = true,
+ int preconditioner = PcMIC,
+ const bool enforceCompatibility = false,
+ const bool useL2Norm = false,
+ const bool zeroPressureFixing = false,
+ const Grid<Real> *curv = NULL,
+ const Real surfTens = 0.)
+{
+ if (precondition == false)
+ preconditioner = PcNone; // for backwards compatibility
+
+ // reserve temp grids
+ FluidSolver *parent = flags.getParent();
+ Grid<Real> residual(parent);
+ Grid<Real> search(parent);
+ Grid<Real> A0(parent);
+ Grid<Real> Ai(parent);
+ Grid<Real> Aj(parent);
+ Grid<Real> Ak(parent);
+ Grid<Real> tmp(parent);
+
+ // setup matrix and boundaries
+ MakeLaplaceMatrix(flags, A0, Ai, Aj, Ak, fractions);
+
+ if (phi) {
+ ApplyGhostFluidDiagonal(A0, flags, *phi, gfClamp);
+ }
+
+ // check whether we need to fix some pressure value...
+ // (manually enable, or automatically for high accuracy, can cause asymmetries otherwise)
+ if (zeroPressureFixing || cgAccuracy < 1e-07) {
+ if (FLOATINGPOINT_PRECISION == 1)
+ debMsg(
+ "Warning - high CG accuracy with single-precision floating point accuracy might not "
+ "converge...",
+ 2);
+
+ int numEmpty = CountEmptyCells(flags);
+ IndexInt fixPidx = -1;
+ if (numEmpty == 0) {
+ // Determine appropriate fluid cell for pressure fixing
+ // 1) First check some preferred positions for approx. symmetric zeroPressureFixing
+ Vec3i topCenter(
+ flags.getSizeX() / 2, flags.getSizeY() - 1, flags.is3D() ? flags.getSizeZ() / 2 : 0);
+ Vec3i preferredPos[] = {topCenter, topCenter - Vec3i(0, 1, 0), topCenter - Vec3i(0, 2, 0)};
+
+ for (Vec3i pos : preferredPos) {
+ if (flags.isFluid(pos)) {
+ fixPidx = flags.index(pos);
+ break;
+ }
+ }
+
+ // 2) Then search whole domain
+ if (fixPidx == -1) {
+ FOR_IJK_BND(flags, 1)
+ {
+ if (flags.isFluid(i, j, k)) {
+ fixPidx = flags.index(i, j, k);
+ // break FOR_IJK_BND loop
+ i = flags.getSizeX() - 1;
+ j = flags.getSizeY() - 1;
+ k = __kmax;
+ }
+ }
+ }
+ // debMsg("No empty cells! Fixing pressure of cell "<<fixPidx<<" to zero",1);
+ }
+ if (fixPidx >= 0) {
+ fixPressure(fixPidx, Real(0), rhs, A0, Ai, Aj, Ak);
+ static bool msgOnce = false;
+ if (!msgOnce) {
+ debMsg("Pinning pressure of cell " << fixPidx << " to zero", 2);
+ msgOnce = true;
+ }
+ }
+ }
+
+ // CG setup
+ // note: the last factor increases the max iterations for 2d, which right now can't use a
+ // preconditioner
+ GridCgInterface *gcg;
+ if (vel.is3D())
+ gcg = new GridCg<ApplyMatrix>(pressure, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
+ else
+ gcg = new GridCg<ApplyMatrix2D>(
+ pressure, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
+
+ gcg->setAccuracy(cgAccuracy);
+ gcg->setUseL2Norm(useL2Norm);
+
+ int maxIter = 0;
+
+ Grid<Real> *pca0 = nullptr, *pca1 = nullptr, *pca2 = nullptr, *pca3 = nullptr;
+ GridMg *pmg = nullptr;
+
+ // optional preconditioning
+ if (preconditioner == PcNone || preconditioner == PcMIC) {
+ maxIter = (int)(cgMaxIterFac * flags.getSize().max()) * (flags.is3D() ? 1 : 4);
+
+ pca0 = new Grid<Real>(parent);
+ pca1 = new Grid<Real>(parent);
+ pca2 = new Grid<Real>(parent);
+ pca3 = new Grid<Real>(parent);
+
+ gcg->setICPreconditioner(preconditioner == PcMIC ? GridCgInterface::PC_mICP :
+ GridCgInterface::PC_None,
+ pca0,
+ pca1,
+ pca2,
+ pca3);
+ }
+ else if (preconditioner == PcMGDynamic || preconditioner == PcMGStatic) {
+ maxIter = 100;
+
+ pmg = gMapMG[parent];
+ if (!pmg) {
+ pmg = new GridMg(pressure.getSize());
+ gMapMG[parent] = pmg;
+ }
+
+ gcg->setMGPreconditioner(GridCgInterface::PC_MGP, pmg);
+ }
+
+ // CG solve
+ for (int iter = 0; iter < maxIter; iter++) {
+ if (!gcg->iterate())
+ iter = maxIter;
+ if (iter < maxIter)
+ debMsg("FluidSolver::solvePressure iteration " << iter
+ << ", residual: " << gcg->getResNorm(),
+ 9);
+ }
+ debMsg("FluidSolver::solvePressure done. Iterations:" << gcg->getIterations()
+ << ", residual:" << gcg->getResNorm(),
+ 2);
+
+ // Cleanup
+ if (gcg)
+ delete gcg;
+ if (pca0)
+ delete pca0;
+ if (pca1)
+ delete pca1;
+ if (pca2)
+ delete pca2;
+ if (pca3)
+ delete pca3;
+
+ // PcMGDynamic: always delete multigrid solver after use
+ // PcMGStatic: keep multigrid solver for next solve
+ if (pmg && preconditioner == PcMGDynamic)
+ releaseMG(parent);
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "solvePressureSystem", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &rhs = *_args.getPtr<Grid<Real>>("rhs", 0, &_lock);
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ Grid<Real> &pressure = *_args.getPtr<Grid<Real>>("pressure", 2, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 3, &_lock);
+ Real cgAccuracy = _args.getOpt<Real>("cgAccuracy", 4, 1e-3, &_lock);
+ const Grid<Real> *phi = _args.getPtrOpt<Grid<Real>>("phi", 5, 0, &_lock);
+ const Grid<Real> *perCellCorr = _args.getPtrOpt<Grid<Real>>("perCellCorr", 6, 0, &_lock);
+ const MACGrid *fractions = _args.getPtrOpt<MACGrid>("fractions", 7, 0, &_lock);
+ Real gfClamp = _args.getOpt<Real>("gfClamp", 8, 1e-04, &_lock);
+ Real cgMaxIterFac = _args.getOpt<Real>("cgMaxIterFac", 9, 1.5, &_lock);
+ bool precondition = _args.getOpt<bool>("precondition", 10, true, &_lock);
+ int preconditioner = _args.getOpt<int>("preconditioner", 11, PcMIC, &_lock);
+ const bool enforceCompatibility = _args.getOpt<bool>(
+ "enforceCompatibility", 12, false, &_lock);
+ const bool useL2Norm = _args.getOpt<bool>("useL2Norm", 13, false, &_lock);
+ const bool zeroPressureFixing = _args.getOpt<bool>("zeroPressureFixing", 14, false, &_lock);
+ const Grid<Real> *curv = _args.getPtrOpt<Grid<Real>>("curv", 15, NULL, &_lock);
+ const Real surfTens = _args.getOpt<Real>("surfTens", 16, 0., &_lock);
+ _retval = getPyNone();
+ solvePressureSystem(rhs,
+ vel,
+ pressure,
+ flags,
+ cgAccuracy,
+ phi,
+ perCellCorr,
+ fractions,
+ gfClamp,
+ cgMaxIterFac,
+ precondition,
+ preconditioner,
+ enforceCompatibility,
+ useL2Norm,
+ zeroPressureFixing,
+ curv,
+ surfTens);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "solvePressureSystem", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("solvePressureSystem", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_solvePressureSystem("", "solvePressureSystem", _W_2);
+extern "C" {
+void PbRegister_solvePressureSystem()
+{
+ KEEP_UNUSED(_RP_solvePressureSystem);
+}
+}
+
+//! Apply pressure gradient to make velocity field divergence free
+
+void correctVelocity(MACGrid &vel,
+ Grid<Real> &pressure,
+ const FlagGrid &flags,
+ Real cgAccuracy = 1e-3,
+ const Grid<Real> *phi = 0,
+ const Grid<Real> *perCellCorr = 0,
+ const MACGrid *fractions = 0,
+ Real gfClamp = 1e-04,
+ Real cgMaxIterFac = 1.5,
+ bool precondition = true,
+ int preconditioner = PcMIC,
+ bool enforceCompatibility = false,
+ bool useL2Norm = false,
+ bool zeroPressureFixing = false,
+ const Grid<Real> *curv = NULL,
+ const Real surfTens = 0.)
+{
+ knCorrectVelocity(flags, vel, pressure);
+ if (phi) {
+ knCorrectVelocityGhostFluid(vel, flags, pressure, *phi, gfClamp, curv, surfTens);
+ // improve behavior of clamping for large time steps:
+ knReplaceClampedGhostFluidVels(vel, flags, pressure, *phi, gfClamp);
+ }
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "correctVelocity", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 0, &_lock);
+ Grid<Real> &pressure = *_args.getPtr<Grid<Real>>("pressure", 1, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 2, &_lock);
+ Real cgAccuracy = _args.getOpt<Real>("cgAccuracy", 3, 1e-3, &_lock);
+ const Grid<Real> *phi = _args.getPtrOpt<Grid<Real>>("phi", 4, 0, &_lock);
+ const Grid<Real> *perCellCorr = _args.getPtrOpt<Grid<Real>>("perCellCorr", 5, 0, &_lock);
+ const MACGrid *fractions = _args.getPtrOpt<MACGrid>("fractions", 6, 0, &_lock);
+ Real gfClamp = _args.getOpt<Real>("gfClamp", 7, 1e-04, &_lock);
+ Real cgMaxIterFac = _args.getOpt<Real>("cgMaxIterFac", 8, 1.5, &_lock);
+ bool precondition = _args.getOpt<bool>("precondition", 9, true, &_lock);
+ int preconditioner = _args.getOpt<int>("preconditioner", 10, PcMIC, &_lock);
+ bool enforceCompatibility = _args.getOpt<bool>("enforceCompatibility", 11, false, &_lock);
+ bool useL2Norm = _args.getOpt<bool>("useL2Norm", 12, false, &_lock);
+ bool zeroPressureFixing = _args.getOpt<bool>("zeroPressureFixing", 13, false, &_lock);
+ const Grid<Real> *curv = _args.getPtrOpt<Grid<Real>>("curv", 14, NULL, &_lock);
+ const Real surfTens = _args.getOpt<Real>("surfTens", 15, 0., &_lock);
+ _retval = getPyNone();
+ correctVelocity(vel,
+ pressure,
+ flags,
+ cgAccuracy,
+ phi,
+ perCellCorr,
+ fractions,
+ gfClamp,
+ cgMaxIterFac,
+ precondition,
+ preconditioner,
+ enforceCompatibility,
+ useL2Norm,
+ zeroPressureFixing,
+ curv,
+ surfTens);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "correctVelocity", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("correctVelocity", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_correctVelocity("", "correctVelocity", _W_3);
+extern "C" {
+void PbRegister_correctVelocity()
+{
+ KEEP_UNUSED(_RP_correctVelocity);
+}
+}
+
+//! Perform pressure projection of the velocity grid, calls
+//! all three pressure helper functions in a row.
+
+void solvePressure(MACGrid &vel,
+ Grid<Real> &pressure,
+ const FlagGrid &flags,
+ Real cgAccuracy = 1e-3,
+ const Grid<Real> *phi = 0,
+ const Grid<Real> *perCellCorr = 0,
+ const MACGrid *fractions = 0,
+ const MACGrid *obvel = 0,
+ Real gfClamp = 1e-04,
+ Real cgMaxIterFac = 1.5,
+ bool precondition = true,
+ int preconditioner = PcMIC,
+ bool enforceCompatibility = false,
+ bool useL2Norm = false,
+ bool zeroPressureFixing = false,
+ const Grid<Real> *curv = NULL,
+ const Real surfTens = 0.,
+ Grid<Real> *retRhs = NULL)
+{
+ Grid<Real> rhs(vel.getParent());
+
+ computePressureRhs(rhs,
+ vel,
+ pressure,
+ flags,
+ cgAccuracy,
+ phi,
+ perCellCorr,
+ fractions,
+ obvel,
+ gfClamp,
+ cgMaxIterFac,
+ precondition,
+ preconditioner,
+ enforceCompatibility,
+ useL2Norm,
+ zeroPressureFixing,
+ curv,
+ surfTens);
+
+ solvePressureSystem(rhs,
+ vel,
+ pressure,
+ flags,
+ cgAccuracy,
+ phi,
+ perCellCorr,
+ fractions,
+ gfClamp,
+ cgMaxIterFac,
+ precondition,
+ preconditioner,
+ enforceCompatibility,
+ useL2Norm,
+ zeroPressureFixing,
+ curv,
+ surfTens);
+
+ correctVelocity(vel,
+ pressure,
+ flags,
+ cgAccuracy,
+ phi,
+ perCellCorr,
+ fractions,
+ gfClamp,
+ cgMaxIterFac,
+ precondition,
+ preconditioner,
+ enforceCompatibility,
+ useL2Norm,
+ zeroPressureFixing,
+ curv,
+ surfTens);
+
+ // optionally , return RHS
+ if (retRhs) {
+ retRhs->copyFrom(rhs);
+ }
+}
+static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "solvePressure", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &vel = *_args.getPtr<MACGrid>("vel", 0, &_lock);
+ Grid<Real> &pressure = *_args.getPtr<Grid<Real>>("pressure", 1, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 2, &_lock);
+ Real cgAccuracy = _args.getOpt<Real>("cgAccuracy", 3, 1e-3, &_lock);
+ const Grid<Real> *phi = _args.getPtrOpt<Grid<Real>>("phi", 4, 0, &_lock);
+ const Grid<Real> *perCellCorr = _args.getPtrOpt<Grid<Real>>("perCellCorr", 5, 0, &_lock);
+ const MACGrid *fractions = _args.getPtrOpt<MACGrid>("fractions", 6, 0, &_lock);
+ const MACGrid *obvel = _args.getPtrOpt<MACGrid>("obvel", 7, 0, &_lock);
+ Real gfClamp = _args.getOpt<Real>("gfClamp", 8, 1e-04, &_lock);
+ Real cgMaxIterFac = _args.getOpt<Real>("cgMaxIterFac", 9, 1.5, &_lock);
+ bool precondition = _args.getOpt<bool>("precondition", 10, true, &_lock);
+ int preconditioner = _args.getOpt<int>("preconditioner", 11, PcMIC, &_lock);
+ bool enforceCompatibility = _args.getOpt<bool>("enforceCompatibility", 12, false, &_lock);
+ bool useL2Norm = _args.getOpt<bool>("useL2Norm", 13, false, &_lock);
+ bool zeroPressureFixing = _args.getOpt<bool>("zeroPressureFixing", 14, false, &_lock);
+ const Grid<Real> *curv = _args.getPtrOpt<Grid<Real>>("curv", 15, NULL, &_lock);
+ const Real surfTens = _args.getOpt<Real>("surfTens", 16, 0., &_lock);
+ Grid<Real> *retRhs = _args.getPtrOpt<Grid<Real>>("retRhs", 17, NULL, &_lock);
+ _retval = getPyNone();
+ solvePressure(vel,
+ pressure,
+ flags,
+ cgAccuracy,
+ phi,
+ perCellCorr,
+ fractions,
+ obvel,
+ gfClamp,
+ cgMaxIterFac,
+ precondition,
+ preconditioner,
+ enforceCompatibility,
+ useL2Norm,
+ zeroPressureFixing,
+ curv,
+ surfTens,
+ retRhs);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "solvePressure", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("solvePressure", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_solvePressure("", "solvePressure", _W_4);
+extern "C" {
+void PbRegister_solvePressure()
+{
+ KEEP_UNUSED(_RP_solvePressure);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/ptsplugins.cpp b/extern/mantaflow/preprocessed/plugin/ptsplugins.cpp
new file mode 100644
index 00000000000..a6bbccc5966
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/ptsplugins.cpp
@@ -0,0 +1,502 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+// ----------------------------------------------------------------------------
+//
+// MantaFlow fluid solver framework
+// Copyright 2018 Kiwon Um, Nils Thuerey
+//
+// This program is free software, distributed under the terms of the
+// GNU General Public License (GPL)
+// http://www.gnu.org/licenses
+//
+// Particle system helper
+//
+// ----------------------------------------------------------------------------
+
+#include "particle.h"
+
+namespace Manta {
+
+struct KnAddForcePvel : public KernelBase {
+ KnAddForcePvel(ParticleDataImpl<Vec3> &v,
+ const Vec3 &da,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(v.size()), v(v), da(da), ptype(ptype), exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ ParticleDataImpl<Vec3> &v,
+ const Vec3 &da,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude) const
+ {
+ if (ptype && ((*ptype)[idx] & exclude))
+ return;
+ v[idx] += da;
+ }
+ inline ParticleDataImpl<Vec3> &getArg0()
+ {
+ return v;
+ }
+ typedef ParticleDataImpl<Vec3> type0;
+ inline const Vec3 &getArg1()
+ {
+ return da;
+ }
+ typedef Vec3 type1;
+ inline const ParticleDataImpl<int> *getArg2()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type2;
+ inline const int &getArg3()
+ {
+ return exclude;
+ }
+ typedef int type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnAddForcePvel ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, v, da, ptype, exclude);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<Vec3> &v;
+ const Vec3 &da;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+//! add force to vec3 particle data; a: acceleration
+
+void addForcePvel(ParticleDataImpl<Vec3> &vel,
+ const Vec3 &a,
+ const Real dt,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+{
+ KnAddForcePvel(vel, a * dt, ptype, exclude);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "addForcePvel", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ ParticleDataImpl<Vec3> &vel = *_args.getPtr<ParticleDataImpl<Vec3>>("vel", 0, &_lock);
+ const Vec3 &a = _args.get<Vec3>("a", 1, &_lock);
+ const Real dt = _args.get<Real>("dt", 2, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtr<ParticleDataImpl<int>>("ptype", 3, &_lock);
+ const int exclude = _args.get<int>("exclude", 4, &_lock);
+ _retval = getPyNone();
+ addForcePvel(vel, a, dt, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "addForcePvel", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("addForcePvel", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_addForcePvel("", "addForcePvel", _W_0);
+extern "C" {
+void PbRegister_addForcePvel()
+{
+ KEEP_UNUSED(_RP_addForcePvel);
+}
+}
+
+struct KnUpdateVelocityFromDeltaPos : public KernelBase {
+ KnUpdateVelocityFromDeltaPos(const BasicParticleSystem &p,
+ ParticleDataImpl<Vec3> &v,
+ const ParticleDataImpl<Vec3> &x_prev,
+ const Real over_dt,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(p.size()),
+ p(p),
+ v(v),
+ x_prev(x_prev),
+ over_dt(over_dt),
+ ptype(ptype),
+ exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystem &p,
+ ParticleDataImpl<Vec3> &v,
+ const ParticleDataImpl<Vec3> &x_prev,
+ const Real over_dt,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude) const
+ {
+ if (ptype && ((*ptype)[idx] & exclude))
+ return;
+ v[idx] = (p[idx].pos - x_prev[idx]) * over_dt;
+ }
+ inline const BasicParticleSystem &getArg0()
+ {
+ return p;
+ }
+ typedef BasicParticleSystem type0;
+ inline ParticleDataImpl<Vec3> &getArg1()
+ {
+ return v;
+ }
+ typedef ParticleDataImpl<Vec3> type1;
+ inline const ParticleDataImpl<Vec3> &getArg2()
+ {
+ return x_prev;
+ }
+ typedef ParticleDataImpl<Vec3> type2;
+ inline const Real &getArg3()
+ {
+ return over_dt;
+ }
+ typedef Real type3;
+ inline const ParticleDataImpl<int> *getArg4()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type4;
+ inline const int &getArg5()
+ {
+ return exclude;
+ }
+ typedef int type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnUpdateVelocityFromDeltaPos ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, p, v, x_prev, over_dt, ptype, exclude);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystem &p;
+ ParticleDataImpl<Vec3> &v;
+ const ParticleDataImpl<Vec3> &x_prev;
+ const Real over_dt;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+//! retrieve velocity from position change
+
+void updateVelocityFromDeltaPos(const BasicParticleSystem &parts,
+ ParticleDataImpl<Vec3> &vel,
+ const ParticleDataImpl<Vec3> &x_prev,
+ const Real dt,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+{
+ KnUpdateVelocityFromDeltaPos(parts, vel, x_prev, 1.0 / dt, ptype, exclude);
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "updateVelocityFromDeltaPos", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ ParticleDataImpl<Vec3> &vel = *_args.getPtr<ParticleDataImpl<Vec3>>("vel", 1, &_lock);
+ const ParticleDataImpl<Vec3> &x_prev = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "x_prev", 2, &_lock);
+ const Real dt = _args.get<Real>("dt", 3, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtr<ParticleDataImpl<int>>("ptype", 4, &_lock);
+ const int exclude = _args.get<int>("exclude", 5, &_lock);
+ _retval = getPyNone();
+ updateVelocityFromDeltaPos(parts, vel, x_prev, dt, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "updateVelocityFromDeltaPos", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("updateVelocityFromDeltaPos", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_updateVelocityFromDeltaPos("", "updateVelocityFromDeltaPos", _W_1);
+extern "C" {
+void PbRegister_updateVelocityFromDeltaPos()
+{
+ KEEP_UNUSED(_RP_updateVelocityFromDeltaPos);
+}
+}
+
+struct KnStepEuler : public KernelBase {
+ KnStepEuler(BasicParticleSystem &p,
+ const ParticleDataImpl<Vec3> &v,
+ const Real dt,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+ : KernelBase(p.size()), p(p), v(v), dt(dt), ptype(ptype), exclude(exclude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ BasicParticleSystem &p,
+ const ParticleDataImpl<Vec3> &v,
+ const Real dt,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude) const
+ {
+ if (ptype && ((*ptype)[idx] & exclude))
+ return;
+ p[idx].pos += v[idx] * dt;
+ }
+ inline BasicParticleSystem &getArg0()
+ {
+ return p;
+ }
+ typedef BasicParticleSystem type0;
+ inline const ParticleDataImpl<Vec3> &getArg1()
+ {
+ return v;
+ }
+ typedef ParticleDataImpl<Vec3> type1;
+ inline const Real &getArg2()
+ {
+ return dt;
+ }
+ typedef Real type2;
+ inline const ParticleDataImpl<int> *getArg3()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type3;
+ inline const int &getArg4()
+ {
+ return exclude;
+ }
+ typedef int type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnStepEuler ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, p, v, dt, ptype, exclude);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ BasicParticleSystem &p;
+ const ParticleDataImpl<Vec3> &v;
+ const Real dt;
+ const ParticleDataImpl<int> *ptype;
+ const int exclude;
+};
+//! simple foward Euler integration for particle system
+
+void eulerStep(BasicParticleSystem &parts,
+ const ParticleDataImpl<Vec3> &vel,
+ const ParticleDataImpl<int> *ptype,
+ const int exclude)
+{
+ KnStepEuler(parts, vel, parts.getParent()->getDt(), ptype, exclude);
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "eulerStep", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ const ParticleDataImpl<Vec3> &vel = *_args.getPtr<ParticleDataImpl<Vec3>>("vel", 1, &_lock);
+ const ParticleDataImpl<int> *ptype = _args.getPtr<ParticleDataImpl<int>>("ptype", 2, &_lock);
+ const int exclude = _args.get<int>("exclude", 3, &_lock);
+ _retval = getPyNone();
+ eulerStep(parts, vel, ptype, exclude);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "eulerStep", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("eulerStep", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_eulerStep("", "eulerStep", _W_2);
+extern "C" {
+void PbRegister_eulerStep()
+{
+ KEEP_UNUSED(_RP_eulerStep);
+}
+}
+
+struct KnSetPartType : public KernelBase {
+ KnSetPartType(ParticleDataImpl<int> &ptype,
+ const BasicParticleSystem &part,
+ const int mark,
+ const int stype,
+ const FlagGrid &flags,
+ const int cflag)
+ : KernelBase(ptype.size()),
+ ptype(ptype),
+ part(part),
+ mark(mark),
+ stype(stype),
+ flags(flags),
+ cflag(cflag)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ ParticleDataImpl<int> &ptype,
+ const BasicParticleSystem &part,
+ const int mark,
+ const int stype,
+ const FlagGrid &flags,
+ const int cflag) const
+ {
+ if (flags.isInBounds(part.getPos(idx), 0) && (flags.getAt(part.getPos(idx)) & cflag) &&
+ (ptype[idx] & stype))
+ ptype[idx] = mark;
+ }
+ inline ParticleDataImpl<int> &getArg0()
+ {
+ return ptype;
+ }
+ typedef ParticleDataImpl<int> type0;
+ inline const BasicParticleSystem &getArg1()
+ {
+ return part;
+ }
+ typedef BasicParticleSystem type1;
+ inline const int &getArg2()
+ {
+ return mark;
+ }
+ typedef int type2;
+ inline const int &getArg3()
+ {
+ return stype;
+ }
+ typedef int type3;
+ inline const FlagGrid &getArg4()
+ {
+ return flags;
+ }
+ typedef FlagGrid type4;
+ inline const int &getArg5()
+ {
+ return cflag;
+ }
+ typedef int type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnSetPartType ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, ptype, part, mark, stype, flags, cflag);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ ParticleDataImpl<int> &ptype;
+ const BasicParticleSystem &part;
+ const int mark;
+ const int stype;
+ const FlagGrid &flags;
+ const int cflag;
+};
+//! if particle is stype and in cflag cell, set ptype as mark
+
+void setPartType(const BasicParticleSystem &parts,
+ ParticleDataImpl<int> &ptype,
+ const int mark,
+ const int stype,
+ const FlagGrid &flags,
+ const int cflag)
+{
+ KnSetPartType(ptype, parts, mark, stype, flags, cflag);
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setPartType", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ ParticleDataImpl<int> &ptype = *_args.getPtr<ParticleDataImpl<int>>("ptype", 1, &_lock);
+ const int mark = _args.get<int>("mark", 2, &_lock);
+ const int stype = _args.get<int>("stype", 3, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 4, &_lock);
+ const int cflag = _args.get<int>("cflag", 5, &_lock);
+ _retval = getPyNone();
+ setPartType(parts, ptype, mark, stype, flags, cflag);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setPartType", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setPartType", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setPartType("", "setPartType", _W_3);
+extern "C" {
+void PbRegister_setPartType()
+{
+ KEEP_UNUSED(_RP_setPartType);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/secondaryparticles.cpp b/extern/mantaflow/preprocessed/plugin/secondaryparticles.cpp
new file mode 100644
index 00000000000..281e12ef04b
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/secondaryparticles.cpp
@@ -0,0 +1,3065 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2017 Georg Kohl, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * GNU General Public License (GPL)
+ * http://www.gnu.org/licenses
+ *
+ * Secondary particle plugin for FLIP simulations
+ *
+ ******************************************************************************/
+
+#include "particle.h"
+#include "commonkernels.h"
+
+namespace Manta {
+
+#pragma region Secondary Particles for FLIP
+//----------------------------------------------------------------------------------------------------------------------------------------------------
+// Secondary Particles for FLIP
+//----------------------------------------------------------------------------------------------------------------------------------------------------
+
+// helper function that clamps the value in potential to the interval [tauMin, tauMax] and
+// normalizes it to [0, 1] afterwards
+Real clampPotential(Real potential, Real tauMin, Real tauMax)
+{
+ return (std::min(potential, tauMax) - std::min(potential, tauMin)) / (tauMax - tauMin);
+}
+
+// computes all three potentials(trapped air, wave crest, kinetic energy) and the neighbor ratio
+// for every fluid cell and stores it in the respective grid. Is less readable but significantly
+// faster than using seperate potential computation
+
+struct knFlipComputeSecondaryParticlePotentials : public KernelBase {
+ knFlipComputeSecondaryParticlePotentials(Grid<Real> &potTA,
+ Grid<Real> &potWC,
+ Grid<Real> &potKE,
+ Grid<Real> &neighborRatio,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const Grid<Vec3> &normal,
+ const int radius,
+ const Real tauMinTA,
+ const Real tauMaxTA,
+ const Real tauMinWC,
+ const Real tauMaxWC,
+ const Real tauMinKE,
+ const Real tauMaxKE,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeObstacle |
+ FlagGrid::TypeOutflow |
+ FlagGrid::TypeInflow)
+ : KernelBase(&potTA, radius),
+ potTA(potTA),
+ potWC(potWC),
+ potKE(potKE),
+ neighborRatio(neighborRatio),
+ flags(flags),
+ v(v),
+ normal(normal),
+ radius(radius),
+ tauMinTA(tauMinTA),
+ tauMaxTA(tauMaxTA),
+ tauMinWC(tauMinWC),
+ tauMaxWC(tauMaxWC),
+ tauMinKE(tauMinKE),
+ tauMaxKE(tauMaxKE),
+ scaleFromManta(scaleFromManta),
+ itype(itype),
+ jtype(jtype)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ Grid<Real> &potTA,
+ Grid<Real> &potWC,
+ Grid<Real> &potKE,
+ Grid<Real> &neighborRatio,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const Grid<Vec3> &normal,
+ const int radius,
+ const Real tauMinTA,
+ const Real tauMaxTA,
+ const Real tauMinWC,
+ const Real tauMaxWC,
+ const Real tauMinKE,
+ const Real tauMaxKE,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeObstacle | FlagGrid::TypeOutflow |
+ FlagGrid::TypeInflow) const
+ {
+
+ if (!(flags(i, j, k) & itype))
+ return;
+
+ // compute trapped air potential + wave crest potential + neighbor ratio at once
+ const Vec3 &xi = scaleFromManta * Vec3(i, j, k); // scale to unit cube
+ const Vec3 &vi = scaleFromManta * v.getCentered(i, j, k);
+ const Vec3 &ni = getNormalized(normal(i, j, k));
+ Real vdiff = 0; // for trapped air
+ Real kappa = 0; // for wave crests
+ int countFluid = 0; // for neighbor ratio
+ int countMaxFluid = 0; // for neighbor ratio
+
+ // iterate over neighboring cells within radius
+ for (IndexInt x = i - radius; x <= i + radius; x++) {
+ for (IndexInt y = j - radius; y <= j + radius; y++) {
+ for (IndexInt z = k - radius; z <= k + radius; z++) {
+ if ((x == i && y == j && z == k) || !flags.isInBounds(Vec3i(x, y, z)) ||
+ (flags(x, y, z) & jtype))
+ continue;
+
+ if (flags(x, y, z) & itype) {
+ countFluid++;
+ countMaxFluid++;
+ }
+ else {
+ countMaxFluid++;
+ }
+
+ const Vec3 &xj = scaleFromManta * Vec3(x, y, z); // scale to unit cube
+ const Vec3 &vj = scaleFromManta * v.getCentered(x, y, z);
+ const Vec3 &nj = getNormalized(normal(x, y, z));
+ const Vec3 xij = xi - xj;
+ const Vec3 vij = vi - vj;
+ Real h = !potTA.is3D() ?
+ 1.414 * radius :
+ 1.732 * radius; // estimate sqrt(2)*radius resp. sqrt(3)*radius for h, due
+ // to squared resp. cubic neighbor area
+ vdiff += norm(vij) * (1 - dot(getNormalized(vij), getNormalized(xij))) *
+ (1 - norm(xij) / h);
+
+ if (dot(getNormalized(xij), ni) < 0) { // identifies wave crests
+ kappa += (1 - dot(ni, nj)) * (1 - norm(xij) / h);
+ }
+ }
+ }
+ }
+
+ neighborRatio(i, j, k) = float(countFluid) / float(countMaxFluid);
+
+ potTA(i, j, k) = clampPotential(vdiff, tauMinTA, tauMaxTA);
+ if (dot(getNormalized(vi), ni) >= 0.6) { // avoid to mark boarders of the scene as wave crest
+ potWC(i, j, k) = clampPotential(kappa, tauMinWC, tauMaxWC);
+ }
+ else {
+ potWC(i, j, k) = Real(0);
+ }
+
+ // compute kinetic energy potential
+ Real ek =
+ Real(0.5) * 125 *
+ normSquare(
+ vi); // use arbitrary constant for mass, potential adjusts with thresholds anyways
+ potKE(i, j, k) = clampPotential(ek, tauMinKE, tauMaxKE);
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return potTA;
+ }
+ typedef Grid<Real> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return potWC;
+ }
+ typedef Grid<Real> type1;
+ inline Grid<Real> &getArg2()
+ {
+ return potKE;
+ }
+ typedef Grid<Real> type2;
+ inline Grid<Real> &getArg3()
+ {
+ return neighborRatio;
+ }
+ typedef Grid<Real> type3;
+ inline const FlagGrid &getArg4()
+ {
+ return flags;
+ }
+ typedef FlagGrid type4;
+ inline const MACGrid &getArg5()
+ {
+ return v;
+ }
+ typedef MACGrid type5;
+ inline const Grid<Vec3> &getArg6()
+ {
+ return normal;
+ }
+ typedef Grid<Vec3> type6;
+ inline const int &getArg7()
+ {
+ return radius;
+ }
+ typedef int type7;
+ inline const Real &getArg8()
+ {
+ return tauMinTA;
+ }
+ typedef Real type8;
+ inline const Real &getArg9()
+ {
+ return tauMaxTA;
+ }
+ typedef Real type9;
+ inline const Real &getArg10()
+ {
+ return tauMinWC;
+ }
+ typedef Real type10;
+ inline const Real &getArg11()
+ {
+ return tauMaxWC;
+ }
+ typedef Real type11;
+ inline const Real &getArg12()
+ {
+ return tauMinKE;
+ }
+ typedef Real type12;
+ inline const Real &getArg13()
+ {
+ return tauMaxKE;
+ }
+ typedef Real type13;
+ inline const Real &getArg14()
+ {
+ return scaleFromManta;
+ }
+ typedef Real type14;
+ inline const int &getArg15()
+ {
+ return itype;
+ }
+ typedef int type15;
+ inline const int &getArg16()
+ {
+ return jtype;
+ }
+ typedef int type16;
+ void runMessage()
+ {
+ debMsg("Executing kernel knFlipComputeSecondaryParticlePotentials ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = radius; j < _maxY; j++)
+ for (int i = radius; i < _maxX; i++)
+ op(i,
+ j,
+ k,
+ potTA,
+ potWC,
+ potKE,
+ neighborRatio,
+ flags,
+ v,
+ normal,
+ radius,
+ tauMinTA,
+ tauMaxTA,
+ tauMinWC,
+ tauMaxWC,
+ tauMinKE,
+ tauMaxKE,
+ scaleFromManta,
+ itype,
+ jtype);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = radius; i < _maxX; i++)
+ op(i,
+ j,
+ k,
+ potTA,
+ potWC,
+ potKE,
+ neighborRatio,
+ flags,
+ v,
+ normal,
+ radius,
+ tauMinTA,
+ tauMaxTA,
+ tauMinWC,
+ tauMaxWC,
+ tauMinKE,
+ tauMaxKE,
+ scaleFromManta,
+ itype,
+ jtype);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(radius, maxY), *this);
+ }
+ Grid<Real> &potTA;
+ Grid<Real> &potWC;
+ Grid<Real> &potKE;
+ Grid<Real> &neighborRatio;
+ const FlagGrid &flags;
+ const MACGrid &v;
+ const Grid<Vec3> &normal;
+ const int radius;
+ const Real tauMinTA;
+ const Real tauMaxTA;
+ const Real tauMinWC;
+ const Real tauMaxWC;
+ const Real tauMinKE;
+ const Real tauMaxKE;
+ const Real scaleFromManta;
+ const int itype;
+ const int jtype;
+};
+
+void flipComputeSecondaryParticlePotentials(Grid<Real> &potTA,
+ Grid<Real> &potWC,
+ Grid<Real> &potKE,
+ Grid<Real> &neighborRatio,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ Grid<Vec3> &normal,
+ const Grid<Real> &phi,
+ const int radius,
+ const Real tauMinTA,
+ const Real tauMaxTA,
+ const Real tauMinWC,
+ const Real tauMaxWC,
+ const Real tauMinKE,
+ const Real tauMaxKE,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeObstacle |
+ FlagGrid::TypeOutflow |
+ FlagGrid::TypeInflow)
+{
+ potTA.clear();
+ potWC.clear();
+ potKE.clear();
+ neighborRatio.clear();
+ GradientOp(normal, phi);
+ knFlipComputeSecondaryParticlePotentials(potTA,
+ potWC,
+ potKE,
+ neighborRatio,
+ flags,
+ v,
+ normal,
+ radius,
+ tauMinTA,
+ tauMaxTA,
+ tauMinWC,
+ tauMaxWC,
+ tauMinKE,
+ tauMaxKE,
+ scaleFromManta,
+ itype,
+ jtype);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "flipComputeSecondaryParticlePotentials", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &potTA = *_args.getPtr<Grid<Real>>("potTA", 0, &_lock);
+ Grid<Real> &potWC = *_args.getPtr<Grid<Real>>("potWC", 1, &_lock);
+ Grid<Real> &potKE = *_args.getPtr<Grid<Real>>("potKE", 2, &_lock);
+ Grid<Real> &neighborRatio = *_args.getPtr<Grid<Real>>("neighborRatio", 3, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 4, &_lock);
+ const MACGrid &v = *_args.getPtr<MACGrid>("v", 5, &_lock);
+ Grid<Vec3> &normal = *_args.getPtr<Grid<Vec3>>("normal", 6, &_lock);
+ const Grid<Real> &phi = *_args.getPtr<Grid<Real>>("phi", 7, &_lock);
+ const int radius = _args.get<int>("radius", 8, &_lock);
+ const Real tauMinTA = _args.get<Real>("tauMinTA", 9, &_lock);
+ const Real tauMaxTA = _args.get<Real>("tauMaxTA", 10, &_lock);
+ const Real tauMinWC = _args.get<Real>("tauMinWC", 11, &_lock);
+ const Real tauMaxWC = _args.get<Real>("tauMaxWC", 12, &_lock);
+ const Real tauMinKE = _args.get<Real>("tauMinKE", 13, &_lock);
+ const Real tauMaxKE = _args.get<Real>("tauMaxKE", 14, &_lock);
+ const Real scaleFromManta = _args.get<Real>("scaleFromManta", 15, &_lock);
+ const int itype = _args.getOpt<int>("itype", 16, FlagGrid::TypeFluid, &_lock);
+ const int jtype = _args.getOpt<int>("jtype",
+ 17,
+ FlagGrid::TypeObstacle | FlagGrid::TypeOutflow |
+ FlagGrid::TypeInflow,
+ &_lock);
+ _retval = getPyNone();
+ flipComputeSecondaryParticlePotentials(potTA,
+ potWC,
+ potKE,
+ neighborRatio,
+ flags,
+ v,
+ normal,
+ phi,
+ radius,
+ tauMinTA,
+ tauMaxTA,
+ tauMinWC,
+ tauMaxWC,
+ tauMinKE,
+ tauMaxKE,
+ scaleFromManta,
+ itype,
+ jtype);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "flipComputeSecondaryParticlePotentials", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("flipComputeSecondaryParticlePotentials", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_flipComputeSecondaryParticlePotentials(
+ "", "flipComputeSecondaryParticlePotentials", _W_0);
+extern "C" {
+void PbRegister_flipComputeSecondaryParticlePotentials()
+{
+ KEEP_UNUSED(_RP_flipComputeSecondaryParticlePotentials);
+}
+}
+
+// adds secondary particles to &pts_sec for every fluid cell in &flags according to the potential
+// grids &potTA, &potWC and &potKE secondary particles are uniformly sampled in every fluid cell in
+// a randomly offset cylinder in fluid movement direction In contrast to
+// flipSampleSecondaryParticles this uses more cylinders per cell and interpolates velocity and
+// potentials. To control number of cylinders in each dimension adjust radius(0.25=>2 cyl,
+// 0.1666=>3 cyl, 0.125=>3cyl etc.).
+
+struct knFlipSampleSecondaryParticlesMoreCylinders : public KernelBase {
+ knFlipSampleSecondaryParticlesMoreCylinders(const FlagGrid &flags,
+ const MACGrid &v,
+ BasicParticleSystem &pts_sec,
+ ParticleDataImpl<Vec3> &v_sec,
+ ParticleDataImpl<Real> &l_sec,
+ const Real lMin,
+ const Real lMax,
+ const Grid<Real> &potTA,
+ const Grid<Real> &potWC,
+ const Grid<Real> &potKE,
+ const Grid<Real> &neighborRatio,
+ const Real c_s,
+ const Real c_b,
+ const Real k_ta,
+ const Real k_wc,
+ const Real dt,
+ const int itype = FlagGrid::TypeFluid)
+ : KernelBase(&flags, 0),
+ flags(flags),
+ v(v),
+ pts_sec(pts_sec),
+ v_sec(v_sec),
+ l_sec(l_sec),
+ lMin(lMin),
+ lMax(lMax),
+ potTA(potTA),
+ potWC(potWC),
+ potKE(potKE),
+ neighborRatio(neighborRatio),
+ c_s(c_s),
+ c_b(c_b),
+ k_ta(k_ta),
+ k_wc(k_wc),
+ dt(dt),
+ itype(itype)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ BasicParticleSystem &pts_sec,
+ ParticleDataImpl<Vec3> &v_sec,
+ ParticleDataImpl<Real> &l_sec,
+ const Real lMin,
+ const Real lMax,
+ const Grid<Real> &potTA,
+ const Grid<Real> &potWC,
+ const Grid<Real> &potKE,
+ const Grid<Real> &neighborRatio,
+ const Real c_s,
+ const Real c_b,
+ const Real k_ta,
+ const Real k_wc,
+ const Real dt,
+ const int itype = FlagGrid::TypeFluid)
+ {
+
+ if (!(flags(i, j, k) & itype))
+ return;
+
+ RandomStream mRand(9832);
+ Real radius =
+ 0.25; // diameter=0.5 => sampling with two cylinders in each dimension since cell size=1
+ for (Real x = i - radius; x <= i + radius; x += 2 * radius) {
+ for (Real y = j - radius; y <= j + radius; y += 2 * radius) {
+ for (Real z = k - radius; z <= k + radius; z += 2 * radius) {
+
+ Vec3 xi = Vec3(x, y, z);
+ Real KE = potKE.getInterpolated(xi);
+ Real TA = potTA.getInterpolated(xi);
+ Real WC = potWC.getInterpolated(xi);
+
+ const int n = KE * (k_ta * TA + k_wc * WC) * dt; // number of secondary particles
+ if (n == 0)
+ continue;
+ Vec3 vi = v.getInterpolated(xi);
+ Vec3 dir = dt * vi; // direction of movement of current particle
+ Vec3 e1 = getNormalized(Vec3(dir.z, 0, -dir.x)); // perpendicular to dir
+ Vec3 e2 = getNormalized(
+ cross(e1, dir)); // perpendicular to dir and e1, so e1 and e1 create reference plane
+
+ for (int di = 0; di < n; di++) {
+ const Real r = radius * sqrt(mRand.getReal()); // distance to cylinder axis
+ const Real theta = mRand.getReal() * Real(2) * M_PI; // azimuth
+ const Real h = mRand.getReal() * norm(dt * vi); // distance to reference plane
+ Vec3 xd = xi + r * cos(theta) * e1 + r * sin(theta) * e2 + h * getNormalized(vi);
+ if (!flags.is3D())
+ xd.z = 0;
+ pts_sec.add(xd);
+
+ v_sec[v_sec.size() - 1] = r * cos(theta) * e1 + r * sin(theta) * e2 +
+ vi; // init velocity of new particle
+ Real temp = (KE + TA + WC) / 3;
+ l_sec[l_sec.size() - 1] = ((lMax - lMin) * temp) + lMin +
+ mRand.getReal() * 0.1; // init lifetime of new particle
+
+ // init type of new particle
+ if (neighborRatio(i, j, k) < c_s) {
+ pts_sec[pts_sec.size() - 1].flag = ParticleBase::PSPRAY;
+ }
+ else if (neighborRatio(i, j, k) > c_b) {
+ pts_sec[pts_sec.size() - 1].flag = ParticleBase::PBUBBLE;
+ }
+ else {
+ pts_sec[pts_sec.size() - 1].flag = ParticleBase::PFOAM;
+ }
+ }
+ }
+ }
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return v;
+ }
+ typedef MACGrid type1;
+ inline BasicParticleSystem &getArg2()
+ {
+ return pts_sec;
+ }
+ typedef BasicParticleSystem type2;
+ inline ParticleDataImpl<Vec3> &getArg3()
+ {
+ return v_sec;
+ }
+ typedef ParticleDataImpl<Vec3> type3;
+ inline ParticleDataImpl<Real> &getArg4()
+ {
+ return l_sec;
+ }
+ typedef ParticleDataImpl<Real> type4;
+ inline const Real &getArg5()
+ {
+ return lMin;
+ }
+ typedef Real type5;
+ inline const Real &getArg6()
+ {
+ return lMax;
+ }
+ typedef Real type6;
+ inline const Grid<Real> &getArg7()
+ {
+ return potTA;
+ }
+ typedef Grid<Real> type7;
+ inline const Grid<Real> &getArg8()
+ {
+ return potWC;
+ }
+ typedef Grid<Real> type8;
+ inline const Grid<Real> &getArg9()
+ {
+ return potKE;
+ }
+ typedef Grid<Real> type9;
+ inline const Grid<Real> &getArg10()
+ {
+ return neighborRatio;
+ }
+ typedef Grid<Real> type10;
+ inline const Real &getArg11()
+ {
+ return c_s;
+ }
+ typedef Real type11;
+ inline const Real &getArg12()
+ {
+ return c_b;
+ }
+ typedef Real type12;
+ inline const Real &getArg13()
+ {
+ return k_ta;
+ }
+ typedef Real type13;
+ inline const Real &getArg14()
+ {
+ return k_wc;
+ }
+ typedef Real type14;
+ inline const Real &getArg15()
+ {
+ return dt;
+ }
+ typedef Real type15;
+ inline const int &getArg16()
+ {
+ return itype;
+ }
+ typedef int type16;
+ void runMessage()
+ {
+ debMsg("Executing kernel knFlipSampleSecondaryParticlesMoreCylinders ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void run()
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ for (int k = minZ; k < maxZ; k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i,
+ j,
+ k,
+ flags,
+ v,
+ pts_sec,
+ v_sec,
+ l_sec,
+ lMin,
+ lMax,
+ potTA,
+ potWC,
+ potKE,
+ neighborRatio,
+ c_s,
+ c_b,
+ k_ta,
+ k_wc,
+ dt,
+ itype);
+ }
+ const FlagGrid &flags;
+ const MACGrid &v;
+ BasicParticleSystem &pts_sec;
+ ParticleDataImpl<Vec3> &v_sec;
+ ParticleDataImpl<Real> &l_sec;
+ const Real lMin;
+ const Real lMax;
+ const Grid<Real> &potTA;
+ const Grid<Real> &potWC;
+ const Grid<Real> &potKE;
+ const Grid<Real> &neighborRatio;
+ const Real c_s;
+ const Real c_b;
+ const Real k_ta;
+ const Real k_wc;
+ const Real dt;
+ const int itype;
+};
+
+// adds secondary particles to &pts_sec for every fluid cell in &flags according to the potential
+// grids &potTA, &potWC and &potKE secondary particles are uniformly sampled in every fluid cell in
+// a randomly offset cylinder in fluid movement direction
+
+struct knFlipSampleSecondaryParticles : public KernelBase {
+ knFlipSampleSecondaryParticles(const FlagGrid &flags,
+ const MACGrid &v,
+ BasicParticleSystem &pts_sec,
+ ParticleDataImpl<Vec3> &v_sec,
+ ParticleDataImpl<Real> &l_sec,
+ const Real lMin,
+ const Real lMax,
+ const Grid<Real> &potTA,
+ const Grid<Real> &potWC,
+ const Grid<Real> &potKE,
+ const Grid<Real> &neighborRatio,
+ const Real c_s,
+ const Real c_b,
+ const Real k_ta,
+ const Real k_wc,
+ const Real dt,
+ const int itype = FlagGrid::TypeFluid)
+ : KernelBase(&flags, 0),
+ flags(flags),
+ v(v),
+ pts_sec(pts_sec),
+ v_sec(v_sec),
+ l_sec(l_sec),
+ lMin(lMin),
+ lMax(lMax),
+ potTA(potTA),
+ potWC(potWC),
+ potKE(potKE),
+ neighborRatio(neighborRatio),
+ c_s(c_s),
+ c_b(c_b),
+ k_ta(k_ta),
+ k_wc(k_wc),
+ dt(dt),
+ itype(itype)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ BasicParticleSystem &pts_sec,
+ ParticleDataImpl<Vec3> &v_sec,
+ ParticleDataImpl<Real> &l_sec,
+ const Real lMin,
+ const Real lMax,
+ const Grid<Real> &potTA,
+ const Grid<Real> &potWC,
+ const Grid<Real> &potKE,
+ const Grid<Real> &neighborRatio,
+ const Real c_s,
+ const Real c_b,
+ const Real k_ta,
+ const Real k_wc,
+ const Real dt,
+ const int itype = FlagGrid::TypeFluid)
+ {
+
+ if (!(flags(i, j, k) & itype))
+ return;
+
+ Real KE = potKE(i, j, k);
+ Real TA = potTA(i, j, k);
+ Real WC = potWC(i, j, k);
+
+ const int n = KE * (k_ta * TA + k_wc * WC) * dt; // number of secondary particles
+ if (n == 0)
+ return;
+ RandomStream mRand(9832);
+
+ Vec3 xi = Vec3(i + mRand.getReal(),
+ j + mRand.getReal(),
+ k + mRand.getReal()); // randomized offset uniform in cell
+ Vec3 vi = v.getInterpolated(xi);
+ Vec3 dir = dt * vi; // direction of movement of current particle
+ Vec3 e1 = getNormalized(Vec3(dir.z, 0, -dir.x)); // perpendicular to dir
+ Vec3 e2 = getNormalized(
+ cross(e1, dir)); // perpendicular to dir and e1, so e1 and e1 create reference plane
+
+ for (int di = 0; di < n; di++) {
+ const Real r = Real(0.5) * sqrt(mRand.getReal()); // distance to cylinder axis
+ const Real theta = mRand.getReal() * Real(2) * M_PI; // azimuth
+ const Real h = mRand.getReal() * norm(dt * vi); // distance to reference plane
+ Vec3 xd = xi + r * cos(theta) * e1 + r * sin(theta) * e2 + h * getNormalized(vi);
+ if (!flags.is3D())
+ xd.z = 0;
+ pts_sec.add(xd);
+
+ v_sec[v_sec.size() - 1] = r * cos(theta) * e1 + r * sin(theta) * e2 +
+ vi; // init velocity of new particle
+ Real temp = (KE + TA + WC) / 3;
+ l_sec[l_sec.size() - 1] = ((lMax - lMin) * temp) + lMin +
+ mRand.getReal() * 0.1; // init lifetime of new particle
+
+ // init type of new particle
+ if (neighborRatio(i, j, k) < c_s) {
+ pts_sec[pts_sec.size() - 1].flag = ParticleBase::PSPRAY;
+ }
+ else if (neighborRatio(i, j, k) > c_b) {
+ pts_sec[pts_sec.size() - 1].flag = ParticleBase::PBUBBLE;
+ }
+ else {
+ pts_sec[pts_sec.size() - 1].flag = ParticleBase::PFOAM;
+ }
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return v;
+ }
+ typedef MACGrid type1;
+ inline BasicParticleSystem &getArg2()
+ {
+ return pts_sec;
+ }
+ typedef BasicParticleSystem type2;
+ inline ParticleDataImpl<Vec3> &getArg3()
+ {
+ return v_sec;
+ }
+ typedef ParticleDataImpl<Vec3> type3;
+ inline ParticleDataImpl<Real> &getArg4()
+ {
+ return l_sec;
+ }
+ typedef ParticleDataImpl<Real> type4;
+ inline const Real &getArg5()
+ {
+ return lMin;
+ }
+ typedef Real type5;
+ inline const Real &getArg6()
+ {
+ return lMax;
+ }
+ typedef Real type6;
+ inline const Grid<Real> &getArg7()
+ {
+ return potTA;
+ }
+ typedef Grid<Real> type7;
+ inline const Grid<Real> &getArg8()
+ {
+ return potWC;
+ }
+ typedef Grid<Real> type8;
+ inline const Grid<Real> &getArg9()
+ {
+ return potKE;
+ }
+ typedef Grid<Real> type9;
+ inline const Grid<Real> &getArg10()
+ {
+ return neighborRatio;
+ }
+ typedef Grid<Real> type10;
+ inline const Real &getArg11()
+ {
+ return c_s;
+ }
+ typedef Real type11;
+ inline const Real &getArg12()
+ {
+ return c_b;
+ }
+ typedef Real type12;
+ inline const Real &getArg13()
+ {
+ return k_ta;
+ }
+ typedef Real type13;
+ inline const Real &getArg14()
+ {
+ return k_wc;
+ }
+ typedef Real type14;
+ inline const Real &getArg15()
+ {
+ return dt;
+ }
+ typedef Real type15;
+ inline const int &getArg16()
+ {
+ return itype;
+ }
+ typedef int type16;
+ void runMessage()
+ {
+ debMsg("Executing kernel knFlipSampleSecondaryParticles ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void run()
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ for (int k = minZ; k < maxZ; k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i,
+ j,
+ k,
+ flags,
+ v,
+ pts_sec,
+ v_sec,
+ l_sec,
+ lMin,
+ lMax,
+ potTA,
+ potWC,
+ potKE,
+ neighborRatio,
+ c_s,
+ c_b,
+ k_ta,
+ k_wc,
+ dt,
+ itype);
+ }
+ const FlagGrid &flags;
+ const MACGrid &v;
+ BasicParticleSystem &pts_sec;
+ ParticleDataImpl<Vec3> &v_sec;
+ ParticleDataImpl<Real> &l_sec;
+ const Real lMin;
+ const Real lMax;
+ const Grid<Real> &potTA;
+ const Grid<Real> &potWC;
+ const Grid<Real> &potKE;
+ const Grid<Real> &neighborRatio;
+ const Real c_s;
+ const Real c_b;
+ const Real k_ta;
+ const Real k_wc;
+ const Real dt;
+ const int itype;
+};
+
+void flipSampleSecondaryParticles(const std::string mode,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ BasicParticleSystem &pts_sec,
+ ParticleDataImpl<Vec3> &v_sec,
+ ParticleDataImpl<Real> &l_sec,
+ const Real lMin,
+ const Real lMax,
+ const Grid<Real> &potTA,
+ const Grid<Real> &potWC,
+ const Grid<Real> &potKE,
+ const Grid<Real> &neighborRatio,
+ const Real c_s,
+ const Real c_b,
+ const Real k_ta,
+ const Real k_wc,
+ const Real dt,
+ const int itype = FlagGrid::TypeFluid)
+{
+ if (mode == "single") {
+ knFlipSampleSecondaryParticles(flags,
+ v,
+ pts_sec,
+ v_sec,
+ l_sec,
+ lMin,
+ lMax,
+ potTA,
+ potWC,
+ potKE,
+ neighborRatio,
+ c_s,
+ c_b,
+ k_ta,
+ k_wc,
+ dt,
+ itype);
+ }
+ else if (mode == "multiple") {
+ knFlipSampleSecondaryParticlesMoreCylinders(flags,
+ v,
+ pts_sec,
+ v_sec,
+ l_sec,
+ lMin,
+ lMax,
+ potTA,
+ potWC,
+ potKE,
+ neighborRatio,
+ c_s,
+ c_b,
+ k_ta,
+ k_wc,
+ dt,
+ itype);
+ }
+ else {
+ throw std::invalid_argument("Unknown mode: use \"single\" or \"multiple\" instead!");
+ }
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "flipSampleSecondaryParticles", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const std::string mode = _args.get<std::string>("mode", 0, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 1, &_lock);
+ const MACGrid &v = *_args.getPtr<MACGrid>("v", 2, &_lock);
+ BasicParticleSystem &pts_sec = *_args.getPtr<BasicParticleSystem>("pts_sec", 3, &_lock);
+ ParticleDataImpl<Vec3> &v_sec = *_args.getPtr<ParticleDataImpl<Vec3>>("v_sec", 4, &_lock);
+ ParticleDataImpl<Real> &l_sec = *_args.getPtr<ParticleDataImpl<Real>>("l_sec", 5, &_lock);
+ const Real lMin = _args.get<Real>("lMin", 6, &_lock);
+ const Real lMax = _args.get<Real>("lMax", 7, &_lock);
+ const Grid<Real> &potTA = *_args.getPtr<Grid<Real>>("potTA", 8, &_lock);
+ const Grid<Real> &potWC = *_args.getPtr<Grid<Real>>("potWC", 9, &_lock);
+ const Grid<Real> &potKE = *_args.getPtr<Grid<Real>>("potKE", 10, &_lock);
+ const Grid<Real> &neighborRatio = *_args.getPtr<Grid<Real>>("neighborRatio", 11, &_lock);
+ const Real c_s = _args.get<Real>("c_s", 12, &_lock);
+ const Real c_b = _args.get<Real>("c_b", 13, &_lock);
+ const Real k_ta = _args.get<Real>("k_ta", 14, &_lock);
+ const Real k_wc = _args.get<Real>("k_wc", 15, &_lock);
+ const Real dt = _args.get<Real>("dt", 16, &_lock);
+ const int itype = _args.getOpt<int>("itype", 17, FlagGrid::TypeFluid, &_lock);
+ _retval = getPyNone();
+ flipSampleSecondaryParticles(mode,
+ flags,
+ v,
+ pts_sec,
+ v_sec,
+ l_sec,
+ lMin,
+ lMax,
+ potTA,
+ potWC,
+ potKE,
+ neighborRatio,
+ c_s,
+ c_b,
+ k_ta,
+ k_wc,
+ dt,
+ itype);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "flipSampleSecondaryParticles", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("flipSampleSecondaryParticles", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_flipSampleSecondaryParticles("",
+ "flipSampleSecondaryParticles",
+ _W_1);
+extern "C" {
+void PbRegister_flipSampleSecondaryParticles()
+{
+ KEEP_UNUSED(_RP_flipSampleSecondaryParticles);
+}
+}
+
+// evaluates cubic spline with radius h and distance l in dim dimensions
+Real cubicSpline(const Real h, const Real l, const int dim)
+{
+ const Real h2 = square(h), h3 = h2 * h, h4 = h3 * h, h5 = h4 * h;
+ const Real c[] = {
+ Real(2e0 / (3e0 * h)), Real(10e0 / (7e0 * M_PI * h2)), Real(1e0 / (M_PI * h3))};
+ const Real q = l / h;
+ if (q < 1e0)
+ return c[dim - 1] * (1e0 - 1.5 * square(q) + 0.75 * cubed(q));
+ else if (q < 2e0)
+ return c[dim - 1] * (0.25 * cubed(2e0 - q));
+ return 0;
+}
+
+// updates position &pts_sec.pos and velocity &v_sec of secondary particles according to the
+// particle type determined by the neighbor ratio with linear interpolation
+
+struct knFlipUpdateSecondaryParticlesLinear : public KernelBase {
+ knFlipUpdateSecondaryParticlesLinear(BasicParticleSystem &pts_sec,
+ ParticleDataImpl<Vec3> &v_sec,
+ ParticleDataImpl<Real> &l_sec,
+ const ParticleDataImpl<Vec3> &f_sec,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const Grid<Real> &neighborRatio,
+ const Vec3 gravity,
+ const Real k_b,
+ const Real k_d,
+ const Real c_s,
+ const Real c_b,
+ const Real dt,
+ const int exclude,
+ const int antitunneling)
+ : KernelBase(pts_sec.size()),
+ pts_sec(pts_sec),
+ v_sec(v_sec),
+ l_sec(l_sec),
+ f_sec(f_sec),
+ flags(flags),
+ v(v),
+ neighborRatio(neighborRatio),
+ gravity(gravity),
+ k_b(k_b),
+ k_d(k_d),
+ c_s(c_s),
+ c_b(c_b),
+ dt(dt),
+ exclude(exclude),
+ antitunneling(antitunneling)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ BasicParticleSystem &pts_sec,
+ ParticleDataImpl<Vec3> &v_sec,
+ ParticleDataImpl<Real> &l_sec,
+ const ParticleDataImpl<Vec3> &f_sec,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const Grid<Real> &neighborRatio,
+ const Vec3 gravity,
+ const Real k_b,
+ const Real k_d,
+ const Real c_s,
+ const Real c_b,
+ const Real dt,
+ const int exclude,
+ const int antitunneling) const
+ {
+
+ if (!pts_sec.isActive(idx) || pts_sec[idx].flag & exclude)
+ return;
+ if (!flags.isInBounds(pts_sec[idx].pos)) {
+ pts_sec.kill(idx);
+ return;
+ }
+
+ Vec3i gridpos = toVec3i(pts_sec[idx].pos);
+ int i = gridpos.x;
+ int j = gridpos.y;
+ int k = gridpos.z;
+
+ // spray particle
+ if (neighborRatio(gridpos) < c_s) {
+ pts_sec[idx].flag |= ParticleBase::PSPRAY;
+ pts_sec[idx].flag &= ~(ParticleBase::PBUBBLE | ParticleBase::PFOAM);
+ v_sec[idx] += dt *
+ ((f_sec[idx] / 1) + gravity); // TODO: if forces are added (e.g. fluid
+ // guiding), add parameter for mass instead of 1
+
+ // anti tunneling for small obstacles
+ for (int ct = 1; ct < antitunneling; ct++) {
+ Vec3i tempPos = toVec3i(pts_sec[idx].pos +
+ ct * (1 / Real(antitunneling)) * dt * v_sec[idx]);
+ if (!flags.isInBounds(tempPos) || flags(tempPos) & FlagGrid::TypeObstacle) {
+ pts_sec.kill(idx);
+ return;
+ }
+ }
+ pts_sec[idx].pos += dt * v_sec[idx];
+ }
+
+ // air bubble particle
+ else if (neighborRatio(gridpos) > c_b) {
+ pts_sec[idx].flag |= ParticleBase::PBUBBLE;
+ pts_sec[idx].flag &= ~(ParticleBase::PSPRAY | ParticleBase::PFOAM);
+
+ const Vec3 vj = (v.getInterpolated(pts_sec[idx].pos) - v_sec[idx]) / dt;
+ v_sec[idx] += dt * (k_b * -gravity + k_d * vj);
+
+ // anti tunneling for small obstacles
+ for (int ct = 1; ct < antitunneling; ct++) {
+ Vec3i tempPos = toVec3i(pts_sec[idx].pos +
+ ct * (1 / Real(antitunneling)) * dt * v_sec[idx]);
+ if (!flags.isInBounds(tempPos) || flags(tempPos) & FlagGrid::TypeObstacle) {
+ pts_sec.kill(idx);
+ return;
+ }
+ }
+ pts_sec[idx].pos += dt * v_sec[idx];
+ }
+
+ // foam particle
+ else {
+ pts_sec[idx].flag |= ParticleBase::PFOAM;
+ pts_sec[idx].flag &= ~(ParticleBase::PBUBBLE | ParticleBase::PSPRAY);
+
+ const Vec3 vj = v.getInterpolated(pts_sec[idx].pos);
+ // anti tunneling for small obstacles
+ for (int ct = 1; ct < antitunneling; ct++) {
+ Vec3i tempPos = toVec3i(pts_sec[idx].pos + ct * (1 / Real(antitunneling)) * dt * vj);
+ if (!flags.isInBounds(tempPos) || flags(tempPos) & FlagGrid::TypeObstacle) {
+ pts_sec.kill(idx);
+ return;
+ }
+ }
+ pts_sec[idx].pos += dt * v.getInterpolated(pts_sec[idx].pos);
+ }
+
+ // lifetime
+ l_sec[idx] -= dt;
+ if (l_sec[idx] <= Real(0)) {
+ pts_sec.kill(idx);
+ }
+ }
+ inline BasicParticleSystem &getArg0()
+ {
+ return pts_sec;
+ }
+ typedef BasicParticleSystem type0;
+ inline ParticleDataImpl<Vec3> &getArg1()
+ {
+ return v_sec;
+ }
+ typedef ParticleDataImpl<Vec3> type1;
+ inline ParticleDataImpl<Real> &getArg2()
+ {
+ return l_sec;
+ }
+ typedef ParticleDataImpl<Real> type2;
+ inline const ParticleDataImpl<Vec3> &getArg3()
+ {
+ return f_sec;
+ }
+ typedef ParticleDataImpl<Vec3> type3;
+ inline const FlagGrid &getArg4()
+ {
+ return flags;
+ }
+ typedef FlagGrid type4;
+ inline const MACGrid &getArg5()
+ {
+ return v;
+ }
+ typedef MACGrid type5;
+ inline const Grid<Real> &getArg6()
+ {
+ return neighborRatio;
+ }
+ typedef Grid<Real> type6;
+ inline const Vec3 &getArg7()
+ {
+ return gravity;
+ }
+ typedef Vec3 type7;
+ inline const Real &getArg8()
+ {
+ return k_b;
+ }
+ typedef Real type8;
+ inline const Real &getArg9()
+ {
+ return k_d;
+ }
+ typedef Real type9;
+ inline const Real &getArg10()
+ {
+ return c_s;
+ }
+ typedef Real type10;
+ inline const Real &getArg11()
+ {
+ return c_b;
+ }
+ typedef Real type11;
+ inline const Real &getArg12()
+ {
+ return dt;
+ }
+ typedef Real type12;
+ inline const int &getArg13()
+ {
+ return exclude;
+ }
+ typedef int type13;
+ inline const int &getArg14()
+ {
+ return antitunneling;
+ }
+ typedef int type14;
+ void runMessage()
+ {
+ debMsg("Executing kernel knFlipUpdateSecondaryParticlesLinear ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx,
+ pts_sec,
+ v_sec,
+ l_sec,
+ f_sec,
+ flags,
+ v,
+ neighborRatio,
+ gravity,
+ k_b,
+ k_d,
+ c_s,
+ c_b,
+ dt,
+ exclude,
+ antitunneling);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ BasicParticleSystem &pts_sec;
+ ParticleDataImpl<Vec3> &v_sec;
+ ParticleDataImpl<Real> &l_sec;
+ const ParticleDataImpl<Vec3> &f_sec;
+ const FlagGrid &flags;
+ const MACGrid &v;
+ const Grid<Real> &neighborRatio;
+ const Vec3 gravity;
+ const Real k_b;
+ const Real k_d;
+ const Real c_s;
+ const Real c_b;
+ const Real dt;
+ const int exclude;
+ const int antitunneling;
+};
+// updates position &pts_sec.pos and velocity &v_sec of secondary particles according to the
+// particle type determined by the neighbor ratio with cubic spline interpolation
+
+struct knFlipUpdateSecondaryParticlesCubic : public KernelBase {
+ knFlipUpdateSecondaryParticlesCubic(BasicParticleSystem &pts_sec,
+ ParticleDataImpl<Vec3> &v_sec,
+ ParticleDataImpl<Real> &l_sec,
+ const ParticleDataImpl<Vec3> &f_sec,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const Grid<Real> &neighborRatio,
+ const int radius,
+ const Vec3 gravity,
+ const Real k_b,
+ const Real k_d,
+ const Real c_s,
+ const Real c_b,
+ const Real dt,
+ const int exclude,
+ const int antitunneling,
+ const int itype)
+ : KernelBase(pts_sec.size()),
+ pts_sec(pts_sec),
+ v_sec(v_sec),
+ l_sec(l_sec),
+ f_sec(f_sec),
+ flags(flags),
+ v(v),
+ neighborRatio(neighborRatio),
+ radius(radius),
+ gravity(gravity),
+ k_b(k_b),
+ k_d(k_d),
+ c_s(c_s),
+ c_b(c_b),
+ dt(dt),
+ exclude(exclude),
+ antitunneling(antitunneling),
+ itype(itype)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ BasicParticleSystem &pts_sec,
+ ParticleDataImpl<Vec3> &v_sec,
+ ParticleDataImpl<Real> &l_sec,
+ const ParticleDataImpl<Vec3> &f_sec,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const Grid<Real> &neighborRatio,
+ const int radius,
+ const Vec3 gravity,
+ const Real k_b,
+ const Real k_d,
+ const Real c_s,
+ const Real c_b,
+ const Real dt,
+ const int exclude,
+ const int antitunneling,
+ const int itype) const
+ {
+
+ if (!pts_sec.isActive(idx) || pts_sec[idx].flag & exclude)
+ return;
+ if (!flags.isInBounds(pts_sec[idx].pos)) {
+ pts_sec.kill(idx);
+ return;
+ }
+
+ Vec3i gridpos = toVec3i(pts_sec[idx].pos);
+ int i = gridpos.x;
+ int j = gridpos.y;
+ int k = gridpos.z;
+
+ // spray particle
+ if (neighborRatio(gridpos) < c_s) {
+ pts_sec[idx].flag |= ParticleBase::PSPRAY;
+ pts_sec[idx].flag &= ~(ParticleBase::PBUBBLE | ParticleBase::PFOAM);
+ v_sec[idx] += dt *
+ ((f_sec[idx] / 1) + gravity); // TODO: if forces are added (e.g. fluid
+ // guiding), add parameter for mass instead of 1
+
+ // anti tunneling for small obstacles
+ for (int ct = 1; ct < antitunneling; ct++) {
+ Vec3i tempPos = toVec3i(pts_sec[idx].pos +
+ ct * (1 / Real(antitunneling)) * dt * v_sec[idx]);
+ if (!flags.isInBounds(tempPos) || flags(tempPos) & FlagGrid::TypeObstacle) {
+ pts_sec.kill(idx);
+ return;
+ }
+ }
+ pts_sec[idx].pos += dt * v_sec[idx];
+ }
+
+ // air bubble particle
+ else if (neighborRatio(gridpos) > c_b) {
+ pts_sec[idx].flag |= ParticleBase::PBUBBLE;
+ pts_sec[idx].flag &= ~(ParticleBase::PSPRAY | ParticleBase::PFOAM);
+ const Vec3 &xi = pts_sec[idx].pos;
+ Vec3 sumNumerator = Vec3(0, 0, 0);
+ Real sumDenominator = 0;
+ for (IndexInt x = i - radius; x <= i + radius; x++) {
+ for (IndexInt y = j - radius; y <= j + radius; y++) {
+ for (IndexInt z = k - radius; z <= k + radius; z++) {
+ Vec3i xj = Vec3i(x, y, z);
+ if ((x == i && y == j && z == k) || !flags.isInBounds(xj))
+ continue;
+ if (!(flags(xj) & itype))
+ continue;
+ const Real len_xij = norm(xi - Vec3(x, y, z));
+
+ int dim = flags.is3D() ? 3 : 2;
+ Real dist = flags.is3D() ? 1.732 : 1.414;
+ Real weight = cubicSpline(radius * dist, len_xij, dim);
+ sumNumerator += v.getCentered(xj) *
+ weight; // estimate next position by current velocity
+ sumDenominator += weight;
+ }
+ }
+ }
+ const Vec3 temp = ((sumNumerator / sumDenominator) - v_sec[idx]) / dt;
+ v_sec[idx] += dt * (k_b * -gravity + k_d * temp);
+
+ // anti tunneling for small obstacles
+ for (int ct = 1; ct < antitunneling; ct++) {
+ Vec3i tempPos = toVec3i(pts_sec[idx].pos +
+ ct * (1 / Real(antitunneling)) * dt * v_sec[idx]);
+ if (!flags.isInBounds(tempPos) || flags(tempPos) & FlagGrid::TypeObstacle) {
+ pts_sec.kill(idx);
+ return;
+ }
+ }
+ pts_sec[idx].pos += dt * v_sec[idx];
+ }
+
+ // foam particle
+ else {
+ pts_sec[idx].flag |= ParticleBase::PFOAM;
+ pts_sec[idx].flag &= ~(ParticleBase::PBUBBLE | ParticleBase::PSPRAY);
+ const Vec3 &xi = pts_sec[idx].pos;
+ Vec3 sumNumerator = Vec3(0, 0, 0);
+ Real sumDenominator = 0;
+ for (IndexInt x = i - radius; x <= i + radius; x++) {
+ for (IndexInt y = j - radius; y <= j + radius; y++) {
+ for (IndexInt z = k - radius; z <= k + radius; z++) {
+ Vec3i xj = Vec3i(x, y, z);
+ if ((x == i && y == j && z == k) || !flags.isInBounds(xj))
+ continue;
+ if (!(flags(xj) & itype))
+ continue;
+ const Real len_xij = norm(xi - Vec3(x, y, z));
+
+ int dim = flags.is3D() ? 3 : 2;
+ Real dist = flags.is3D() ? 1.732 : 1.414;
+ Real weight = cubicSpline(radius * dist, len_xij, dim);
+ sumNumerator += v.getCentered(xj) *
+ weight; // estimate next position by current velocity
+ sumDenominator += weight;
+ }
+ }
+ }
+
+ // anti tunneling for small obstacles
+ for (int ct = 1; ct < antitunneling; ct++) {
+ Vec3i tempPos = toVec3i(pts_sec[idx].pos + ct * (1 / Real(antitunneling)) * dt *
+ (sumNumerator / sumDenominator));
+ if (!flags.isInBounds(tempPos) || flags(tempPos) & FlagGrid::TypeObstacle) {
+ pts_sec.kill(idx);
+ return;
+ }
+ }
+ pts_sec[idx].pos += dt * (sumNumerator / sumDenominator);
+ }
+
+ // lifetime
+ l_sec[idx] -= dt;
+ if (l_sec[idx] <= Real(0)) {
+ pts_sec.kill(idx);
+ }
+ }
+ inline BasicParticleSystem &getArg0()
+ {
+ return pts_sec;
+ }
+ typedef BasicParticleSystem type0;
+ inline ParticleDataImpl<Vec3> &getArg1()
+ {
+ return v_sec;
+ }
+ typedef ParticleDataImpl<Vec3> type1;
+ inline ParticleDataImpl<Real> &getArg2()
+ {
+ return l_sec;
+ }
+ typedef ParticleDataImpl<Real> type2;
+ inline const ParticleDataImpl<Vec3> &getArg3()
+ {
+ return f_sec;
+ }
+ typedef ParticleDataImpl<Vec3> type3;
+ inline const FlagGrid &getArg4()
+ {
+ return flags;
+ }
+ typedef FlagGrid type4;
+ inline const MACGrid &getArg5()
+ {
+ return v;
+ }
+ typedef MACGrid type5;
+ inline const Grid<Real> &getArg6()
+ {
+ return neighborRatio;
+ }
+ typedef Grid<Real> type6;
+ inline const int &getArg7()
+ {
+ return radius;
+ }
+ typedef int type7;
+ inline const Vec3 &getArg8()
+ {
+ return gravity;
+ }
+ typedef Vec3 type8;
+ inline const Real &getArg9()
+ {
+ return k_b;
+ }
+ typedef Real type9;
+ inline const Real &getArg10()
+ {
+ return k_d;
+ }
+ typedef Real type10;
+ inline const Real &getArg11()
+ {
+ return c_s;
+ }
+ typedef Real type11;
+ inline const Real &getArg12()
+ {
+ return c_b;
+ }
+ typedef Real type12;
+ inline const Real &getArg13()
+ {
+ return dt;
+ }
+ typedef Real type13;
+ inline const int &getArg14()
+ {
+ return exclude;
+ }
+ typedef int type14;
+ inline const int &getArg15()
+ {
+ return antitunneling;
+ }
+ typedef int type15;
+ inline const int &getArg16()
+ {
+ return itype;
+ }
+ typedef int type16;
+ void runMessage()
+ {
+ debMsg("Executing kernel knFlipUpdateSecondaryParticlesCubic ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx,
+ pts_sec,
+ v_sec,
+ l_sec,
+ f_sec,
+ flags,
+ v,
+ neighborRatio,
+ radius,
+ gravity,
+ k_b,
+ k_d,
+ c_s,
+ c_b,
+ dt,
+ exclude,
+ antitunneling,
+ itype);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ BasicParticleSystem &pts_sec;
+ ParticleDataImpl<Vec3> &v_sec;
+ ParticleDataImpl<Real> &l_sec;
+ const ParticleDataImpl<Vec3> &f_sec;
+ const FlagGrid &flags;
+ const MACGrid &v;
+ const Grid<Real> &neighborRatio;
+ const int radius;
+ const Vec3 gravity;
+ const Real k_b;
+ const Real k_d;
+ const Real c_s;
+ const Real c_b;
+ const Real dt;
+ const int exclude;
+ const int antitunneling;
+ const int itype;
+};
+
+void flipUpdateSecondaryParticles(const std::string mode,
+ BasicParticleSystem &pts_sec,
+ ParticleDataImpl<Vec3> &v_sec,
+ ParticleDataImpl<Real> &l_sec,
+ const ParticleDataImpl<Vec3> &f_sec,
+ FlagGrid &flags,
+ const MACGrid &v,
+ const Grid<Real> &neighborRatio,
+ const int radius,
+ const Vec3 gravity,
+ const Real k_b,
+ const Real k_d,
+ const Real c_s,
+ const Real c_b,
+ const Real dt,
+ const int exclude = ParticleBase::PTRACER,
+ const int antitunneling = 0,
+ const int itype = FlagGrid::TypeFluid)
+{
+
+ Vec3 g = gravity / flags.getDx();
+ if (mode == "linear") {
+ knFlipUpdateSecondaryParticlesLinear(pts_sec,
+ v_sec,
+ l_sec,
+ f_sec,
+ flags,
+ v,
+ neighborRatio,
+ g,
+ k_b,
+ k_d,
+ c_s,
+ c_b,
+ dt,
+ exclude,
+ antitunneling);
+ }
+ else if (mode == "cubic") {
+ knFlipUpdateSecondaryParticlesCubic(pts_sec,
+ v_sec,
+ l_sec,
+ f_sec,
+ flags,
+ v,
+ neighborRatio,
+ radius,
+ g,
+ k_b,
+ k_d,
+ c_s,
+ c_b,
+ dt,
+ exclude,
+ antitunneling,
+ itype);
+ }
+ else {
+ throw std::invalid_argument("Unknown mode: use \"linear\" or \"cubic\" instead!");
+ }
+ pts_sec.doCompress();
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "flipUpdateSecondaryParticles", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const std::string mode = _args.get<std::string>("mode", 0, &_lock);
+ BasicParticleSystem &pts_sec = *_args.getPtr<BasicParticleSystem>("pts_sec", 1, &_lock);
+ ParticleDataImpl<Vec3> &v_sec = *_args.getPtr<ParticleDataImpl<Vec3>>("v_sec", 2, &_lock);
+ ParticleDataImpl<Real> &l_sec = *_args.getPtr<ParticleDataImpl<Real>>("l_sec", 3, &_lock);
+ const ParticleDataImpl<Vec3> &f_sec = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "f_sec", 4, &_lock);
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 5, &_lock);
+ const MACGrid &v = *_args.getPtr<MACGrid>("v", 6, &_lock);
+ const Grid<Real> &neighborRatio = *_args.getPtr<Grid<Real>>("neighborRatio", 7, &_lock);
+ const int radius = _args.get<int>("radius", 8, &_lock);
+ const Vec3 gravity = _args.get<Vec3>("gravity", 9, &_lock);
+ const Real k_b = _args.get<Real>("k_b", 10, &_lock);
+ const Real k_d = _args.get<Real>("k_d", 11, &_lock);
+ const Real c_s = _args.get<Real>("c_s", 12, &_lock);
+ const Real c_b = _args.get<Real>("c_b", 13, &_lock);
+ const Real dt = _args.get<Real>("dt", 14, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 15, ParticleBase::PTRACER, &_lock);
+ const int antitunneling = _args.getOpt<int>("antitunneling", 16, 0, &_lock);
+ const int itype = _args.getOpt<int>("itype", 17, FlagGrid::TypeFluid, &_lock);
+ _retval = getPyNone();
+ flipUpdateSecondaryParticles(mode,
+ pts_sec,
+ v_sec,
+ l_sec,
+ f_sec,
+ flags,
+ v,
+ neighborRatio,
+ radius,
+ gravity,
+ k_b,
+ k_d,
+ c_s,
+ c_b,
+ dt,
+ exclude,
+ antitunneling,
+ itype);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "flipUpdateSecondaryParticles", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("flipUpdateSecondaryParticles", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_flipUpdateSecondaryParticles("",
+ "flipUpdateSecondaryParticles",
+ _W_2);
+extern "C" {
+void PbRegister_flipUpdateSecondaryParticles()
+{
+ KEEP_UNUSED(_RP_flipUpdateSecondaryParticles);
+}
+}
+
+// removes secondary particles in &pts_sec that are inside boundaries (cells that are marked as
+// obstacle/outflow in &flags)
+
+struct knFlipDeleteParticlesInObstacle : public KernelBase {
+ knFlipDeleteParticlesInObstacle(BasicParticleSystem &pts, const FlagGrid &flags)
+ : KernelBase(pts.size()), pts(pts), flags(flags)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, BasicParticleSystem &pts, const FlagGrid &flags) const
+ {
+
+ if (!pts.isActive(idx))
+ return;
+
+ const Vec3 &xi = pts[idx].pos;
+ const Vec3i xidx = toVec3i(xi);
+ // remove particles that completely left the bounds
+ if (!flags.isInBounds(xidx)) {
+ pts.kill(idx);
+ return;
+ }
+ int gridIndex = flags.index(xidx);
+ // remove particles that penetrate obstacles
+ if (flags[gridIndex] == FlagGrid::TypeObstacle || flags[gridIndex] == FlagGrid::TypeOutflow) {
+ pts.kill(idx);
+ }
+ }
+ inline BasicParticleSystem &getArg0()
+ {
+ return pts;
+ }
+ typedef BasicParticleSystem type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knFlipDeleteParticlesInObstacle ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, pts, flags);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ BasicParticleSystem &pts;
+ const FlagGrid &flags;
+};
+
+void flipDeleteParticlesInObstacle(BasicParticleSystem &pts, const FlagGrid &flags)
+{
+
+ knFlipDeleteParticlesInObstacle(pts, flags);
+ pts.doCompress();
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "flipDeleteParticlesInObstacle", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ BasicParticleSystem &pts = *_args.getPtr<BasicParticleSystem>("pts", 0, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 1, &_lock);
+ _retval = getPyNone();
+ flipDeleteParticlesInObstacle(pts, flags);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "flipDeleteParticlesInObstacle", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("flipDeleteParticlesInObstacle", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_flipDeleteParticlesInObstacle("",
+ "flipDeleteParticlesInObstacle",
+ _W_3);
+extern "C" {
+void PbRegister_flipDeleteParticlesInObstacle()
+{
+ KEEP_UNUSED(_RP_flipDeleteParticlesInObstacle);
+}
+}
+
+// helper method to debug statistical data from grid
+
+void debugGridInfo(const FlagGrid &flags,
+ Grid<Real> &grid,
+ std::string name,
+ const int itype = FlagGrid::TypeFluid)
+{
+ FluidSolver *s = flags.getParent();
+ int countFluid = 0;
+ int countLargerZero = 0;
+ Real avg = 0;
+ Real max = 0;
+ Real sum = 0;
+ Real avgLargerZero = 0;
+ FOR_IJK_BND(grid, 1)
+ {
+ if (!(flags(i, j, k) & itype))
+ continue;
+ countFluid++;
+ if (grid(i, j, k) > 0)
+ countLargerZero++;
+ sum += grid(i, j, k);
+ if (grid(i, j, k) > max)
+ max = grid(i, j, k);
+ }
+ avg = sum / std::max(Real(countFluid), Real(1));
+ avgLargerZero = sum / std::max(Real(countLargerZero), Real(1));
+
+ debMsg("Step: " << s->mFrame << " - Grid " << name << "\n\tcountFluid \t\t" << countFluid
+ << "\n\tcountLargerZero \t" << countLargerZero << "\n\tsum \t\t\t" << sum
+ << "\n\tavg \t\t\t" << avg << "\n\tavgLargerZero \t\t" << avgLargerZero
+ << "\n\tmax \t\t\t" << max,
+ 1);
+}
+static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "debugGridInfo", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &grid = *_args.getPtr<Grid<Real>>("grid", 1, &_lock);
+ std::string name = _args.get<std::string>("name", 2, &_lock);
+ const int itype = _args.getOpt<int>("itype", 3, FlagGrid::TypeFluid, &_lock);
+ _retval = getPyNone();
+ debugGridInfo(flags, grid, name, itype);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "debugGridInfo", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("debugGridInfo", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_debugGridInfo("", "debugGridInfo", _W_4);
+extern "C" {
+void PbRegister_debugGridInfo()
+{
+ KEEP_UNUSED(_RP_debugGridInfo);
+}
+}
+
+// The following methods are helper functions to recreate the velocity and flag grid from the
+// underlying FLIP simulation. They cannot simply be loaded because of the upres to a higher
+// resolution, instead a levelset is used.
+
+struct knSetFlagsFromLevelset : public KernelBase {
+ knSetFlagsFromLevelset(FlagGrid &flags,
+ const Grid<Real> &phi,
+ const int exclude = FlagGrid::TypeObstacle,
+ const int itype = FlagGrid::TypeFluid)
+ : KernelBase(&flags, 0), flags(flags), phi(phi), exclude(exclude), itype(itype)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ FlagGrid &flags,
+ const Grid<Real> &phi,
+ const int exclude = FlagGrid::TypeObstacle,
+ const int itype = FlagGrid::TypeFluid) const
+ {
+ if (phi(idx) < 0 && !(flags(idx) & exclude))
+ flags(idx) = itype;
+ }
+ inline FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type1;
+ inline const int &getArg2()
+ {
+ return exclude;
+ }
+ typedef int type2;
+ inline const int &getArg3()
+ {
+ return itype;
+ }
+ typedef int type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetFlagsFromLevelset ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, flags, phi, exclude, itype);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ FlagGrid &flags;
+ const Grid<Real> &phi;
+ const int exclude;
+ const int itype;
+};
+
+void setFlagsFromLevelset(FlagGrid &flags,
+ const Grid<Real> &phi,
+ const int exclude = FlagGrid::TypeObstacle,
+ const int itype = FlagGrid::TypeFluid)
+{
+ knSetFlagsFromLevelset(flags, phi, exclude, itype);
+}
+static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setFlagsFromLevelset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ const Grid<Real> &phi = *_args.getPtr<Grid<Real>>("phi", 1, &_lock);
+ const int exclude = _args.getOpt<int>("exclude", 2, FlagGrid::TypeObstacle, &_lock);
+ const int itype = _args.getOpt<int>("itype", 3, FlagGrid::TypeFluid, &_lock);
+ _retval = getPyNone();
+ setFlagsFromLevelset(flags, phi, exclude, itype);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setFlagsFromLevelset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setFlagsFromLevelset", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setFlagsFromLevelset("", "setFlagsFromLevelset", _W_5);
+extern "C" {
+void PbRegister_setFlagsFromLevelset()
+{
+ KEEP_UNUSED(_RP_setFlagsFromLevelset);
+}
+}
+
+struct knSetMACFromLevelset : public KernelBase {
+ knSetMACFromLevelset(MACGrid &v, const Grid<Real> &phi, const Vec3 c)
+ : KernelBase(&v, 0), v(v), phi(phi), c(c)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, MACGrid &v, const Grid<Real> &phi, const Vec3 c) const
+ {
+ if (phi.getInterpolated(Vec3(i, j, k)) > 0)
+ v(i, j, k) = c;
+ }
+ inline MACGrid &getArg0()
+ {
+ return v;
+ }
+ typedef MACGrid type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type1;
+ inline const Vec3 &getArg2()
+ {
+ return c;
+ }
+ typedef Vec3 type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel knSetMACFromLevelset ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, v, phi, c);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, v, phi, c);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ MACGrid &v;
+ const Grid<Real> &phi;
+ const Vec3 c;
+};
+
+void setMACFromLevelset(MACGrid &v, const Grid<Real> &phi, const Vec3 c)
+{
+ knSetMACFromLevelset(v, phi, c);
+}
+static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "setMACFromLevelset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &v = *_args.getPtr<MACGrid>("v", 0, &_lock);
+ const Grid<Real> &phi = *_args.getPtr<Grid<Real>>("phi", 1, &_lock);
+ const Vec3 c = _args.get<Vec3>("c", 2, &_lock);
+ _retval = getPyNone();
+ setMACFromLevelset(v, phi, c);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "setMACFromLevelset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("setMACFromLevelset", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_setMACFromLevelset("", "setMACFromLevelset", _W_6);
+extern "C" {
+void PbRegister_setMACFromLevelset()
+{
+ KEEP_UNUSED(_RP_setMACFromLevelset);
+}
+}
+
+//----------------------------------------------------------------------------------------------------------------------------------------------------
+// END Secondary Particles for FLIP
+//----------------------------------------------------------------------------------------------------------------------------------------------------
+#pragma endregion
+
+#pragma region Legacy Methods(still useful for debugging)
+//-----------------------------------------------------------------------------------------------------------------------------------
+//-----------------
+// Legacy Methods (still useful for debugging)
+//----------------------------------------------------------------------------------------------------------------------------------------------------
+
+// LEGACY METHOD! Use flipComputeSecondaryParticlePotentials instead!
+// computes trapped air potential for all fluid cells in &flags and saves it in &pot
+
+struct knFlipComputePotentialTrappedAir : public KernelBase {
+ knFlipComputePotentialTrappedAir(Grid<Real> &pot,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const int radius,
+ const Real tauMin,
+ const Real tauMax,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeFluid)
+ : KernelBase(&pot, 1),
+ pot(pot),
+ flags(flags),
+ v(v),
+ radius(radius),
+ tauMin(tauMin),
+ tauMax(tauMax),
+ scaleFromManta(scaleFromManta),
+ itype(itype),
+ jtype(jtype)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ Grid<Real> &pot,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const int radius,
+ const Real tauMin,
+ const Real tauMax,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeFluid) const
+ {
+
+ if (!(flags(i, j, k) & itype))
+ return;
+
+ const Vec3 &xi = scaleFromManta * Vec3(i, j, k); // scale to unit cube
+ const Vec3 &vi = scaleFromManta * v.getCentered(i, j, k);
+ Real vdiff = 0;
+ for (IndexInt x = i - radius; x <= i + radius; x++) {
+ for (IndexInt y = j - radius; y <= j + radius; y++) {
+ for (IndexInt z = k - radius; z <= k + radius; z++) {
+ if ((x == i && y == j && z == k) || !(flags(x, y, z) & jtype))
+ continue;
+
+ const Vec3 &xj = scaleFromManta * Vec3(x, y, z); // scale to unit cube
+ const Vec3 &vj = scaleFromManta * v.getCentered(x, y, z);
+ const Vec3 xij = xi - xj;
+ const Vec3 vij = vi - vj;
+ Real h = !pot.is3D() ? 1.414 * radius :
+ 1.732 * radius; // estimate sqrt(2)*radius resp. sqrt(3)*radius
+ // for h, due to squared resp. cubic neighbor area
+ vdiff += norm(vij) * (1 - dot(getNormalized(vij), getNormalized(xij))) *
+ (1 - norm(xij) / h);
+ }
+ }
+ }
+ pot(i, j, k) = (std::min(vdiff, tauMax) - std::min(vdiff, tauMin)) / (tauMax - tauMin);
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return pot;
+ }
+ typedef Grid<Real> type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const MACGrid &getArg2()
+ {
+ return v;
+ }
+ typedef MACGrid type2;
+ inline const int &getArg3()
+ {
+ return radius;
+ }
+ typedef int type3;
+ inline const Real &getArg4()
+ {
+ return tauMin;
+ }
+ typedef Real type4;
+ inline const Real &getArg5()
+ {
+ return tauMax;
+ }
+ typedef Real type5;
+ inline const Real &getArg6()
+ {
+ return scaleFromManta;
+ }
+ typedef Real type6;
+ inline const int &getArg7()
+ {
+ return itype;
+ }
+ typedef int type7;
+ inline const int &getArg8()
+ {
+ return jtype;
+ }
+ typedef int type8;
+ void runMessage()
+ {
+ debMsg("Executing kernel knFlipComputePotentialTrappedAir ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, pot, flags, v, radius, tauMin, tauMax, scaleFromManta, itype, jtype);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, pot, flags, v, radius, tauMin, tauMax, scaleFromManta, itype, jtype);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<Real> &pot;
+ const FlagGrid &flags;
+ const MACGrid &v;
+ const int radius;
+ const Real tauMin;
+ const Real tauMax;
+ const Real scaleFromManta;
+ const int itype;
+ const int jtype;
+};
+
+void flipComputePotentialTrappedAir(Grid<Real> &pot,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const int radius,
+ const Real tauMin,
+ const Real tauMax,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeFluid)
+{
+ pot.clear();
+ knFlipComputePotentialTrappedAir(
+ pot, flags, v, radius, tauMin, tauMax, scaleFromManta, itype, jtype);
+}
+static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "flipComputePotentialTrappedAir", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &pot = *_args.getPtr<Grid<Real>>("pot", 0, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 1, &_lock);
+ const MACGrid &v = *_args.getPtr<MACGrid>("v", 2, &_lock);
+ const int radius = _args.get<int>("radius", 3, &_lock);
+ const Real tauMin = _args.get<Real>("tauMin", 4, &_lock);
+ const Real tauMax = _args.get<Real>("tauMax", 5, &_lock);
+ const Real scaleFromManta = _args.get<Real>("scaleFromManta", 6, &_lock);
+ const int itype = _args.getOpt<int>("itype", 7, FlagGrid::TypeFluid, &_lock);
+ const int jtype = _args.getOpt<int>("jtype", 8, FlagGrid::TypeFluid, &_lock);
+ _retval = getPyNone();
+ flipComputePotentialTrappedAir(
+ pot, flags, v, radius, tauMin, tauMax, scaleFromManta, itype, jtype);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "flipComputePotentialTrappedAir", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("flipComputePotentialTrappedAir", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_flipComputePotentialTrappedAir("",
+ "flipComputePotentialTrappedAir",
+ _W_7);
+extern "C" {
+void PbRegister_flipComputePotentialTrappedAir()
+{
+ KEEP_UNUSED(_RP_flipComputePotentialTrappedAir);
+}
+}
+
+// LEGACY METHOD! Use flipComputeSecondaryParticlePotentials instead!
+// computes kinetic energy potential for all fluid cells in &flags and saves it in &pot
+
+struct knFlipComputePotentialKineticEnergy : public KernelBase {
+ knFlipComputePotentialKineticEnergy(Grid<Real> &pot,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const Real tauMin,
+ const Real tauMax,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid)
+ : KernelBase(&pot, 0),
+ pot(pot),
+ flags(flags),
+ v(v),
+ tauMin(tauMin),
+ tauMax(tauMax),
+ scaleFromManta(scaleFromManta),
+ itype(itype)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ Grid<Real> &pot,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const Real tauMin,
+ const Real tauMax,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid) const
+ {
+
+ if (!(flags(i, j, k) & itype))
+ return;
+
+ const Vec3 &vi = scaleFromManta * v.getCentered(i, j, k); // scale to unit cube
+ Real ek =
+ Real(0.5) * 125 *
+ normSquare(
+ vi); // use arbitrary constant for mass, potential adjusts with thresholds anyways
+ pot(i, j, k) = (std::min(ek, tauMax) - std::min(ek, tauMin)) / (tauMax - tauMin);
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return pot;
+ }
+ typedef Grid<Real> type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const MACGrid &getArg2()
+ {
+ return v;
+ }
+ typedef MACGrid type2;
+ inline const Real &getArg3()
+ {
+ return tauMin;
+ }
+ typedef Real type3;
+ inline const Real &getArg4()
+ {
+ return tauMax;
+ }
+ typedef Real type4;
+ inline const Real &getArg5()
+ {
+ return scaleFromManta;
+ }
+ typedef Real type5;
+ inline const int &getArg6()
+ {
+ return itype;
+ }
+ typedef int type6;
+ void runMessage()
+ {
+ debMsg("Executing kernel knFlipComputePotentialKineticEnergy ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, pot, flags, v, tauMin, tauMax, scaleFromManta, itype);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, pot, flags, v, tauMin, tauMax, scaleFromManta, itype);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<Real> &pot;
+ const FlagGrid &flags;
+ const MACGrid &v;
+ const Real tauMin;
+ const Real tauMax;
+ const Real scaleFromManta;
+ const int itype;
+};
+
+void flipComputePotentialKineticEnergy(Grid<Real> &pot,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const Real tauMin,
+ const Real tauMax,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid)
+{
+ pot.clear();
+ knFlipComputePotentialKineticEnergy(pot, flags, v, tauMin, tauMax, scaleFromManta, itype);
+}
+static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "flipComputePotentialKineticEnergy", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &pot = *_args.getPtr<Grid<Real>>("pot", 0, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 1, &_lock);
+ const MACGrid &v = *_args.getPtr<MACGrid>("v", 2, &_lock);
+ const Real tauMin = _args.get<Real>("tauMin", 3, &_lock);
+ const Real tauMax = _args.get<Real>("tauMax", 4, &_lock);
+ const Real scaleFromManta = _args.get<Real>("scaleFromManta", 5, &_lock);
+ const int itype = _args.getOpt<int>("itype", 6, FlagGrid::TypeFluid, &_lock);
+ _retval = getPyNone();
+ flipComputePotentialKineticEnergy(pot, flags, v, tauMin, tauMax, scaleFromManta, itype);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "flipComputePotentialKineticEnergy", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("flipComputePotentialKineticEnergy", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_flipComputePotentialKineticEnergy(
+ "", "flipComputePotentialKineticEnergy", _W_8);
+extern "C" {
+void PbRegister_flipComputePotentialKineticEnergy()
+{
+ KEEP_UNUSED(_RP_flipComputePotentialKineticEnergy);
+}
+}
+
+// LEGACY METHOD! Use flipComputeSecondaryParticlePotentials instead!
+// computes wave crest potential for all fluid cells in &flags and saves it in &pot
+
+struct knFlipComputePotentialWaveCrest : public KernelBase {
+ knFlipComputePotentialWaveCrest(Grid<Real> &pot,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const int radius,
+ Grid<Vec3> &normal,
+ const Real tauMin,
+ const Real tauMax,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeFluid)
+ : KernelBase(&pot, 1),
+ pot(pot),
+ flags(flags),
+ v(v),
+ radius(radius),
+ normal(normal),
+ tauMin(tauMin),
+ tauMax(tauMax),
+ scaleFromManta(scaleFromManta),
+ itype(itype),
+ jtype(jtype)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ Grid<Real> &pot,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const int radius,
+ Grid<Vec3> &normal,
+ const Real tauMin,
+ const Real tauMax,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeFluid) const
+ {
+
+ if (!(flags(i, j, k) & itype))
+ return;
+
+ const Vec3 &xi = scaleFromManta * Vec3(i, j, k); // scale to unit cube
+ const Vec3 &vi = scaleFromManta * v.getCentered(i, j, k);
+ const Vec3 &ni = normal(i, j, k);
+ Real kappa = 0;
+ for (IndexInt x = i - radius; x <= i + radius; x++) {
+ for (IndexInt y = j - radius; y <= j + radius; y++) {
+ for (IndexInt z = k - radius; z <= k + radius; z++) {
+ if ((x == i && y == j && z == k) || !(flags(x, y, z) & jtype))
+ continue;
+ const Vec3 &xj = scaleFromManta * Vec3(x, y, z); // scale to unit cube
+ const Vec3 &nj = normal(x, y, z);
+ const Vec3 xij = xi - xj;
+ if (dot(getNormalized(xij), ni) < 0) { // identifies wave crests
+ Real h = !pot.is3D() ?
+ 1.414 * radius :
+ 1.732 * radius; // estimate sqrt(2)*radius resp. sqrt(3)*radius for h,
+ // due to squared resp. cubic neighbor area
+ kappa += (1 - dot(ni, nj)) * (1 - norm(xij) / h);
+ }
+ }
+ }
+ }
+
+ if (dot(getNormalized(vi), ni) >= 0.6) { // avoid to mark boarders of the scene as wave crest
+ pot(i, j, k) = (std::min(kappa, tauMax) - std::min(kappa, tauMin)) / (tauMax - tauMin);
+ }
+ else {
+ pot(i, j, k) = Real(0);
+ }
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return pot;
+ }
+ typedef Grid<Real> type0;
+ inline const FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline const MACGrid &getArg2()
+ {
+ return v;
+ }
+ typedef MACGrid type2;
+ inline const int &getArg3()
+ {
+ return radius;
+ }
+ typedef int type3;
+ inline Grid<Vec3> &getArg4()
+ {
+ return normal;
+ }
+ typedef Grid<Vec3> type4;
+ inline const Real &getArg5()
+ {
+ return tauMin;
+ }
+ typedef Real type5;
+ inline const Real &getArg6()
+ {
+ return tauMax;
+ }
+ typedef Real type6;
+ inline const Real &getArg7()
+ {
+ return scaleFromManta;
+ }
+ typedef Real type7;
+ inline const int &getArg8()
+ {
+ return itype;
+ }
+ typedef int type8;
+ inline const int &getArg9()
+ {
+ return jtype;
+ }
+ typedef int type9;
+ void runMessage()
+ {
+ debMsg("Executing kernel knFlipComputePotentialWaveCrest ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i,
+ j,
+ k,
+ pot,
+ flags,
+ v,
+ radius,
+ normal,
+ tauMin,
+ tauMax,
+ scaleFromManta,
+ itype,
+ jtype);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, pot, flags, v, radius, normal, tauMin, tauMax, scaleFromManta, itype, jtype);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ Grid<Real> &pot;
+ const FlagGrid &flags;
+ const MACGrid &v;
+ const int radius;
+ Grid<Vec3> &normal;
+ const Real tauMin;
+ const Real tauMax;
+ const Real scaleFromManta;
+ const int itype;
+ const int jtype;
+};
+
+void flipComputePotentialWaveCrest(Grid<Real> &pot,
+ const FlagGrid &flags,
+ const MACGrid &v,
+ const int radius,
+ Grid<Vec3> &normal,
+ const Real tauMin,
+ const Real tauMax,
+ const Real scaleFromManta,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeFluid)
+{
+
+ pot.clear();
+ knFlipComputePotentialWaveCrest(
+ pot, flags, v, radius, normal, tauMin, tauMax, scaleFromManta, itype, jtype);
+}
+static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "flipComputePotentialWaveCrest", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &pot = *_args.getPtr<Grid<Real>>("pot", 0, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 1, &_lock);
+ const MACGrid &v = *_args.getPtr<MACGrid>("v", 2, &_lock);
+ const int radius = _args.get<int>("radius", 3, &_lock);
+ Grid<Vec3> &normal = *_args.getPtr<Grid<Vec3>>("normal", 4, &_lock);
+ const Real tauMin = _args.get<Real>("tauMin", 5, &_lock);
+ const Real tauMax = _args.get<Real>("tauMax", 6, &_lock);
+ const Real scaleFromManta = _args.get<Real>("scaleFromManta", 7, &_lock);
+ const int itype = _args.getOpt<int>("itype", 8, FlagGrid::TypeFluid, &_lock);
+ const int jtype = _args.getOpt<int>("jtype", 9, FlagGrid::TypeFluid, &_lock);
+ _retval = getPyNone();
+ flipComputePotentialWaveCrest(
+ pot, flags, v, radius, normal, tauMin, tauMax, scaleFromManta, itype, jtype);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "flipComputePotentialWaveCrest", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("flipComputePotentialWaveCrest", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_flipComputePotentialWaveCrest("",
+ "flipComputePotentialWaveCrest",
+ _W_9);
+extern "C" {
+void PbRegister_flipComputePotentialWaveCrest()
+{
+ KEEP_UNUSED(_RP_flipComputePotentialWaveCrest);
+}
+}
+
+// LEGACY METHOD! Use flipComputeSecondaryParticlePotentials instead!
+// computes normal grid &normal as gradient of levelset &phi and normalizes it
+
+struct knFlipComputeSurfaceNormals : public KernelBase {
+ knFlipComputeSurfaceNormals(Grid<Vec3> &normal, const Grid<Real> &phi)
+ : KernelBase(&normal, 0), normal(normal), phi(phi)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, Grid<Vec3> &normal, const Grid<Real> &phi) const
+ {
+ normal[idx] = getNormalized(normal[idx]);
+ }
+ inline Grid<Vec3> &getArg0()
+ {
+ return normal;
+ }
+ typedef Grid<Vec3> type0;
+ inline const Grid<Real> &getArg1()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knFlipComputeSurfaceNormals ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, normal, phi);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ Grid<Vec3> &normal;
+ const Grid<Real> &phi;
+};
+
+void flipComputeSurfaceNormals(Grid<Vec3> &normal, const Grid<Real> &phi)
+{
+ GradientOp(normal, phi);
+ knFlipComputeSurfaceNormals(normal, phi);
+}
+static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "flipComputeSurfaceNormals", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &normal = *_args.getPtr<Grid<Vec3>>("normal", 0, &_lock);
+ const Grid<Real> &phi = *_args.getPtr<Grid<Real>>("phi", 1, &_lock);
+ _retval = getPyNone();
+ flipComputeSurfaceNormals(normal, phi);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "flipComputeSurfaceNormals", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("flipComputeSurfaceNormals", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_flipComputeSurfaceNormals("", "flipComputeSurfaceNormals", _W_10);
+extern "C" {
+void PbRegister_flipComputeSurfaceNormals()
+{
+ KEEP_UNUSED(_RP_flipComputeSurfaceNormals);
+}
+}
+
+// LEGACY METHOD! Use flipComputeSecondaryParticlePotentials instead!
+// computes the neighbor ratio for every fluid cell in &flags as the number of fluid neighbors over
+// the maximum possible number of fluid neighbors
+
+struct knFlipUpdateNeighborRatio : public KernelBase {
+ knFlipUpdateNeighborRatio(const FlagGrid &flags,
+ Grid<Real> &neighborRatio,
+ const int radius,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeObstacle)
+ : KernelBase(&flags, 1),
+ flags(flags),
+ neighborRatio(neighborRatio),
+ radius(radius),
+ itype(itype),
+ jtype(jtype)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<Real> &neighborRatio,
+ const int radius,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeObstacle) const
+ {
+
+ if (!(flags(i, j, k) & itype))
+ return;
+
+ int countFluid = 0;
+ int countMaxFluid = 0;
+ for (IndexInt x = i - radius; x <= i + radius; x++) {
+ for (IndexInt y = j - radius; y <= j + radius; y++) {
+ for (IndexInt z = k - radius; z <= k + radius; z++) {
+ if ((x == i && y == j && z == k) || (flags(x, y, z) & jtype))
+ continue;
+ if (flags(x, y, z) & itype) {
+ countFluid++;
+ countMaxFluid++;
+ }
+ else {
+ countMaxFluid++;
+ }
+ }
+ }
+ }
+ neighborRatio(i, j, k) = float(countFluid) / float(countMaxFluid);
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return neighborRatio;
+ }
+ typedef Grid<Real> type1;
+ inline const int &getArg2()
+ {
+ return radius;
+ }
+ typedef int type2;
+ inline const int &getArg3()
+ {
+ return itype;
+ }
+ typedef int type3;
+ inline const int &getArg4()
+ {
+ return jtype;
+ }
+ typedef int type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel knFlipUpdateNeighborRatio ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, neighborRatio, radius, itype, jtype);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, neighborRatio, radius, itype, jtype);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &neighborRatio;
+ const int radius;
+ const int itype;
+ const int jtype;
+};
+
+void flipUpdateNeighborRatio(const FlagGrid &flags,
+ Grid<Real> &neighborRatio,
+ const int radius,
+ const int itype = FlagGrid::TypeFluid,
+ const int jtype = FlagGrid::TypeObstacle)
+{
+
+ neighborRatio.clear();
+ knFlipUpdateNeighborRatio(flags, neighborRatio, radius, itype, jtype);
+}
+static PyObject *_W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "flipUpdateNeighborRatio", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &neighborRatio = *_args.getPtr<Grid<Real>>("neighborRatio", 1, &_lock);
+ const int radius = _args.get<int>("radius", 2, &_lock);
+ const int itype = _args.getOpt<int>("itype", 3, FlagGrid::TypeFluid, &_lock);
+ const int jtype = _args.getOpt<int>("jtype", 4, FlagGrid::TypeObstacle, &_lock);
+ _retval = getPyNone();
+ flipUpdateNeighborRatio(flags, neighborRatio, radius, itype, jtype);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "flipUpdateNeighborRatio", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("flipUpdateNeighborRatio", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_flipUpdateNeighborRatio("", "flipUpdateNeighborRatio", _W_11);
+extern "C" {
+void PbRegister_flipUpdateNeighborRatio()
+{
+ KEEP_UNUSED(_RP_flipUpdateNeighborRatio);
+}
+}
+
+//----------------------------------------------------------------------------------------------------------------------------------------------------
+// Legacy Methods (still useful for debugging)
+//----------------------------------------------------------------------------------------------------------------------------------------------------
+#pragma endregion
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/surfaceturbulence.cpp b/extern/mantaflow/preprocessed/plugin/surfaceturbulence.cpp
new file mode 100644
index 00000000000..465314f51ed
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/surfaceturbulence.cpp
@@ -0,0 +1,2189 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2016 Olivier Mercier, oli.mercier@gmail.com
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Surface Turbulence for Particle-Based Liquid Simulations
+ * Mercier et al., SIGGRAPH Asia 2015
+ *
+ * Possible speedups :
+ * - only initialize surface points around coarse particles near the surface. Use the flags in the
+ *fluid grid and only use cells with non-fluid neighbors.
+ *
+ ******************************************************************************/
+
+// use chrono stl for detailed timing only if available
+#ifdef __GNUC__
+# if __GNUC__ < 5
+# define USE_CHRONO 0
+# endif
+#endif
+
+#if MANTA_WITHCPP11 == 1
+# ifndef USE_CHRONO
+# define USE_CHRONO 1
+# endif
+#endif
+
+#include <iomanip>
+#if USE_CHRONO == 1
+# include <chrono>
+#endif
+#include "particle.h"
+
+using namespace std;
+namespace Manta {
+
+// own namespace for globals
+namespace SurfaceTurbulence {
+
+//
+// **** surface turbulence parameters ****
+//
+struct SurfaceTurbulenceParameters {
+ int res;
+ Real outerRadius;
+ int surfaceDensity;
+ int nbSurfaceMaintenanceIterations;
+ Real dt;
+ Real waveSpeed;
+ Real waveDamping;
+ Real waveSeedFrequency;
+ Real waveMaxAmplitude;
+ Real waveMaxFrequency;
+ Real waveMaxSeedingAmplitude; // as ratio of max amp;
+ Real waveSeedingCurvatureThresholdRegionCenter;
+ Real waveSeedingCurvatureThresholdRegionRadius;
+ Real waveSeedStepSizeRatioOfMax;
+ Real innerRadius;
+ Real meanFineDistance;
+ Real constraintA;
+ Real normalRadius;
+ Real tangentRadius;
+ Real bndXm, bndXp, bndYm, bndYp, bndZm, bndZp;
+};
+SurfaceTurbulenceParameters params;
+
+//
+// **** acceleration grid for particle neighbor queries ****
+//
+struct ParticleAccelGrid {
+ int res;
+ vector<int> ***indices;
+
+ void init(int inRes)
+ {
+ res = inRes;
+ indices = new vector<int> **[res];
+ for (int i = 0; i < res; i++) {
+ indices[i] = new vector<int> *[res];
+ for (int j = 0; j < res; j++) {
+ indices[i][j] = new vector<int>[res];
+ }
+ }
+ }
+
+ void fillWith(const BasicParticleSystem &particles)
+ {
+ // clear
+ for (int i = 0; i < res; i++) {
+ for (int j = 0; j < res; j++) {
+ for (int k = 0; k < res; k++) {
+ indices[i][j][k].clear();
+ }
+ }
+ }
+
+ // fill
+ for (int id = 0; id < particles.size(); id++) {
+ Vec3 pos = particles.getPos(id);
+ int i = clamp<int>(floor(pos.x / params.res * res), 0, res - 1);
+ int j = clamp<int>(floor(pos.y / params.res * res), 0, res - 1);
+ int k = clamp<int>(floor(pos.z / params.res * res), 0, res - 1);
+ indices[i][j][k].push_back(id);
+ }
+ }
+
+ void fillWith(const ParticleDataImpl<Vec3> &particles)
+ {
+ // clear
+ for (int i = 0; i < res; i++) {
+ for (int j = 0; j < res; j++) {
+ for (int k = 0; k < res; k++) {
+ indices[i][j][k].clear();
+ }
+ }
+ }
+
+ // fill
+ for (int id = 0; id < particles.size(); id++) {
+ Vec3 pos = particles[id];
+ int i = clamp<int>(floor(pos.x / params.res * res), 0, res - 1);
+ int j = clamp<int>(floor(pos.y / params.res * res), 0, res - 1);
+ int k = clamp<int>(floor(pos.z / params.res * res), 0, res - 1);
+ indices[i][j][k].push_back(id);
+ }
+ }
+};
+
+#define LOOP_NEIGHBORS_BEGIN(points, center, radius) \
+ int minI = clamp<int>( \
+ floor((center.x - radius) / params.res * points.accel->res), 0, points.accel->res - 1); \
+ int maxI = clamp<int>( \
+ floor((center.x + radius) / params.res * points.accel->res), 0, points.accel->res - 1); \
+ int minJ = clamp<int>( \
+ floor((center.y - radius) / params.res * points.accel->res), 0, points.accel->res - 1); \
+ int maxJ = clamp<int>( \
+ floor((center.y + radius) / params.res * points.accel->res), 0, points.accel->res - 1); \
+ int minK = clamp<int>( \
+ floor((center.z - radius) / params.res * points.accel->res), 0, points.accel->res - 1); \
+ int maxK = clamp<int>( \
+ floor((center.z + radius) / params.res * points.accel->res), 0, points.accel->res - 1); \
+ for (int i = minI; i <= maxI; i++) { \
+ for (int j = minJ; j <= maxJ; j++) { \
+ for (int k = minK; k <= maxK; k++) { \
+ for (int idLOOPNEIGHBORS = 0; \
+ idLOOPNEIGHBORS < (int)points.accel->indices[i][j][k].size(); \
+ idLOOPNEIGHBORS++) { \
+ int idn = points.accel->indices[i][j][k][idLOOPNEIGHBORS]; \
+ if (points.isActive(idn)) {
+#define LOOP_NEIGHBORS_END \
+ } \
+ } \
+ } \
+ } \
+ }
+
+#define LOOP_GHOSTS_POS_BEGIN(pos, radius) \
+ int flagLOOPGHOSTS = -1; \
+ Vec3 gPos; \
+ while (flagLOOPGHOSTS < 6) { \
+ if (flagLOOPGHOSTS < 0 && pos.x - params.bndXm <= radius) { \
+ flagLOOPGHOSTS = 0; \
+ gPos = Vec3(2.f * params.bndXm - pos.x, pos.y, pos.z); \
+ } \
+ else if (flagLOOPGHOSTS < 1 && params.bndXp - pos.x <= radius) { \
+ flagLOOPGHOSTS = 1; \
+ gPos = Vec3(2.f * params.bndXp - pos.x, pos.y, pos.z); \
+ } \
+ else if (flagLOOPGHOSTS < 2 && pos.y - params.bndYm <= radius) { \
+ flagLOOPGHOSTS = 2; \
+ gPos = Vec3(pos.x, 2.f * params.bndYm - pos.y, pos.z); \
+ } \
+ else if (flagLOOPGHOSTS < 3 && params.bndYp - pos.y <= radius) { \
+ flagLOOPGHOSTS = 3; \
+ gPos = Vec3(pos.x, 2.f * params.bndYp - pos.y, pos.z); \
+ } \
+ else if (flagLOOPGHOSTS < 4 && pos.z - params.bndZm <= radius) { \
+ flagLOOPGHOSTS = 4; \
+ gPos = Vec3(pos.x, pos.y, 2.f * params.bndZm - pos.z); \
+ } \
+ else if (flagLOOPGHOSTS < 5 && params.bndZp - pos.Z <= radius) { \
+ flagLOOPGHOSTS = 5; \
+ gPos = Vec3(pos.x, pos.y, 2.f * params.bndZp - pos.z); \
+ } \
+ else { \
+ flagLOOPGHOSTS = 6; \
+ gPos = Vec3(pos.x, pos.y, pos.z); \
+ }
+#define LOOP_GHOSTS_POS_NORMAL_BEGIN(pos, normal, radius) \
+ int flagLOOPGHOSTS = -1; \
+ Vec3 gPos, gNormal; \
+ while (flagLOOPGHOSTS < 6) { \
+ if (flagLOOPGHOSTS < 0 && pos.x - params.bndXm <= radius) { \
+ flagLOOPGHOSTS = 0; \
+ gPos = Vec3(2.f * params.bndXm - pos.x, pos.y, pos.z); \
+ gNormal = Vec3(-normal.x, normal.y, normal.z); \
+ } \
+ else if (flagLOOPGHOSTS < 1 && params.bndXp - pos.x <= radius) { \
+ flagLOOPGHOSTS = 1; \
+ gPos = Vec3(2.f * params.bndXp - pos.x, pos.y, pos.z); \
+ gNormal = Vec3(-normal.x, normal.y, normal.z); \
+ } \
+ else if (flagLOOPGHOSTS < 2 && pos.y - params.bndYm <= radius) { \
+ flagLOOPGHOSTS = 2; \
+ gPos = Vec3(pos.x, 2.f * params.bndYm - pos.y, pos.z); \
+ gNormal = Vec3(normal.x, -normal.y, normal.z); \
+ } \
+ else if (flagLOOPGHOSTS < 3 && params.bndYp - pos.y <= radius) { \
+ flagLOOPGHOSTS = 3; \
+ gPos = Vec3(pos.x, 2.f * params.bndYp - pos.y, pos.z); \
+ gNormal = Vec3(normal.x, -normal.y, normal.z); \
+ } \
+ else if (flagLOOPGHOSTS < 4 && pos.z - params.bndZm <= radius) { \
+ flagLOOPGHOSTS = 4; \
+ gPos = Vec3(pos.x, pos.y, 2.f * params.bndZm - pos.z); \
+ gNormal = Vec3(normal.x, normal.y, -normal.z); \
+ } \
+ else if (flagLOOPGHOSTS < 5 && params.bndZp - pos.Z <= radius) { \
+ flagLOOPGHOSTS = 5; \
+ gPos = Vec3(pos.x, pos.y, 2.f * params.bndZp - pos.z); \
+ gNormal = Vec3(normal.x, normal.y, -normal.z); \
+ } \
+ else { \
+ flagLOOPGHOSTS = 6; \
+ gPos = pos; \
+ gNormal = normal; \
+ }
+#define LOOP_GHOSTS_END }
+
+//
+// **** Wrappers around point sets to attach it an acceleration grid ****
+//
+struct PointSetWrapper {
+ ParticleAccelGrid *accel;
+
+ PointSetWrapper(ParticleAccelGrid *inAccel)
+ {
+ accel = inAccel;
+ }
+ virtual void updateAccel() = 0;
+};
+
+struct BasicParticleSystemWrapper : PointSetWrapper {
+ BasicParticleSystem *points;
+
+ BasicParticleSystemWrapper(ParticleAccelGrid *inAccel) : PointSetWrapper(inAccel)
+ {
+ }
+
+ Vec3 getPos(int id) const
+ {
+ return points->getPos(id);
+ }
+ void setPos(int id, Vec3 pos)
+ {
+ points->setPos(id, pos);
+ }
+ void updateAccel()
+ {
+ accel->fillWith(*points);
+ }
+ void clear()
+ {
+ points->clear();
+ }
+ int size() const
+ {
+ return points->size();
+ }
+ bool isActive(int id) const
+ {
+ return points->isActive(id);
+ }
+ void addParticle(Vec3 pos)
+ {
+ points->addParticle(pos);
+ }
+ int getStatus(int id) const
+ {
+ return points->getStatus(id);
+ }
+ void addBuffered(Vec3 pos)
+ {
+ points->addBuffered(pos);
+ }
+ void doCompress()
+ {
+ points->doCompress();
+ }
+ void insertBufferedParticles()
+ {
+ points->insertBufferedParticles();
+ }
+ void kill(int id)
+ {
+ points->kill(id);
+ }
+
+ bool hasNeighbor(Vec3 pos, Real radius) const
+ {
+ bool answer = false;
+ int minI = clamp<int>(floor((pos.x - radius) / params.res * accel->res), 0, accel->res - 1);
+ int maxI = clamp<int>(floor((pos.x + radius) / params.res * accel->res), 0, accel->res - 1);
+ int minJ = clamp<int>(floor((pos.y - radius) / params.res * accel->res), 0, accel->res - 1);
+ int maxJ = clamp<int>(floor((pos.y + radius) / params.res * accel->res), 0, accel->res - 1);
+ int minK = clamp<int>(floor((pos.z - radius) / params.res * accel->res), 0, accel->res - 1);
+ int maxK = clamp<int>(floor((pos.z + radius) / params.res * accel->res), 0, accel->res - 1);
+ for (int i = minI; i <= maxI; i++) {
+ for (int j = minJ; j <= maxJ; j++) {
+ for (int k = minK; k <= maxK; k++) {
+ for (int id = 0; id < (int)accel->indices[i][j][k].size(); id++) {
+ if (points->isActive(accel->indices[i][j][k][id]) &&
+ norm(points->getPos(accel->indices[i][j][k][id]) - pos) <= radius) {
+ answer = true;
+ break;
+ }
+ }
+ if (answer)
+ break;
+ }
+ if (answer)
+ break;
+ }
+ if (answer)
+ break;
+ }
+ return answer;
+ }
+
+ bool hasNeighborOtherThanItself(int idx, Real radius) const
+ {
+ bool answer = false;
+ Vec3 pos = points->getPos(idx);
+ int minI = clamp<int>(floor((pos.x - radius) / params.res * accel->res), 0, accel->res - 1);
+ int maxI = clamp<int>(floor((pos.x + radius) / params.res * accel->res), 0, accel->res - 1);
+ int minJ = clamp<int>(floor((pos.y - radius) / params.res * accel->res), 0, accel->res - 1);
+ int maxJ = clamp<int>(floor((pos.y + radius) / params.res * accel->res), 0, accel->res - 1);
+ int minK = clamp<int>(floor((pos.z - radius) / params.res * accel->res), 0, accel->res - 1);
+ int maxK = clamp<int>(floor((pos.z + radius) / params.res * accel->res), 0, accel->res - 1);
+ for (int i = minI; i <= maxI; i++) {
+ for (int j = minJ; j <= maxJ; j++) {
+ for (int k = minK; k <= maxK; k++) {
+ for (int id = 0; id < (int)accel->indices[i][j][k].size(); id++) {
+ if (accel->indices[i][j][k][id] != idx &&
+ points->isActive(accel->indices[i][j][k][id]) &&
+ norm(points->getPos(accel->indices[i][j][k][id]) - pos) <= radius) {
+ answer = true;
+ break;
+ }
+ }
+ if (answer)
+ break;
+ }
+ if (answer)
+ break;
+ }
+ if (answer)
+ break;
+ }
+ return answer;
+ }
+
+ void removeInvalidIndices(vector<int> &indices)
+ {
+ vector<int> copy;
+ copy.resize(indices.size());
+ for (int i = 0; i < (int)indices.size(); i++) {
+ copy[i] = indices[i];
+ }
+ indices.clear();
+ for (int i = 0; i < (int)copy.size(); i++) {
+ if (points->isActive(copy[i])) {
+ indices.push_back(copy[i]);
+ }
+ }
+ }
+};
+
+struct ParticleDataImplVec3Wrapper : PointSetWrapper {
+ ParticleDataImpl<Vec3> *points;
+
+ ParticleDataImplVec3Wrapper(ParticleAccelGrid *inAccel) : PointSetWrapper(inAccel)
+ {
+ }
+
+ Vec3 getVec3(int id) const
+ {
+ return (*points)[id];
+ }
+ void setVec3(int id, Vec3 vec)
+ {
+ (*points)[id] = vec;
+ }
+ void updateAccel()
+ {
+ accel->fillWith(*points);
+ }
+ bool isActive(int i) const
+ {
+ return true;
+ }
+};
+
+//
+// **** globals ****
+//
+ParticleAccelGrid accelCoarse, accelSurface;
+BasicParticleSystemWrapper coarseParticles(&accelCoarse), surfacePoints(&accelSurface);
+ParticleDataImplVec3Wrapper coarseParticlesPrevPos(
+ &accelCoarse); // WARNING: reusing the coarse accel grid to save space, don't query
+ // coarseParticlesPrevPos and coarseParticles at the same time.
+vector<Vec3> tempSurfaceVec3; // to store misc info on surface points
+vector<Real> tempSurfaceFloat; // to store misc info on surface points
+int frameCount = 0;
+
+//
+//**** weighting kernels *****
+//
+Real triangularWeight(Real distance, Real radius)
+{
+ return 1.0f - distance / radius;
+}
+Real exponentialWeight(Real distance, Real radius, Real falloff)
+{
+ if (distance > radius)
+ return 0;
+ Real tmp = distance / radius;
+ return expf(-falloff * tmp * tmp);
+}
+
+Real weightKernelAdvection(Real distance)
+{
+ if (distance > 2.f * params.outerRadius) {
+ return 0;
+ }
+ else {
+ return triangularWeight(distance, 2.f * params.outerRadius);
+ }
+}
+
+Real weightKernelCoarseDensity(Real distance)
+{
+ return exponentialWeight(distance, params.outerRadius, 2.0f);
+}
+
+Real weightSurfaceNormal(Real distance)
+{
+ if (distance > params.normalRadius) {
+ return 0;
+ }
+ else {
+ return triangularWeight(distance, params.normalRadius);
+ }
+}
+
+Real weightSurfaceTangent(Real distance)
+{
+ if (distance > params.tangentRadius) {
+ return 0;
+ }
+ else {
+ return triangularWeight(distance, params.tangentRadius);
+ }
+}
+
+//
+// **** utility ****
+//
+
+bool isInDomain(Vec3 pos)
+{
+ return params.bndXm <= pos.x && pos.x <= params.bndXp && params.bndYm <= pos.y &&
+ pos.y <= params.bndYp && params.bndZm <= pos.z && pos.z <= params.bndZp;
+}
+
+Real smoothstep(Real edgeLeft, Real edgeRight, Real val)
+{
+ Real x = clamp((val - edgeLeft) / (edgeRight - edgeLeft), Real(0.), Real(1.));
+ return x * x * (3 - 2 * x);
+}
+
+//
+// **** surface initialization ****
+//
+
+void initFines(const BasicParticleSystemWrapper &coarseParticles,
+ BasicParticleSystemWrapper &surfacePoints,
+ const FlagGrid &flags)
+{
+ unsigned int discretization = (unsigned int)M_PI * (params.outerRadius + params.innerRadius) /
+ params.meanFineDistance;
+ Real dtheta = 2 * params.meanFineDistance / (params.outerRadius + params.innerRadius);
+ Real outerRadius2 = params.outerRadius * params.outerRadius;
+
+ surfacePoints.clear();
+ for (int idx = 0; idx < (int)coarseParticles.size(); idx++) {
+
+ if (idx % 500 == 0) {
+ cout << "Initializing surface points : " << setprecision(4)
+ << 100.f * idx / coarseParticles.size() << "%" << endl;
+ }
+
+ if (coarseParticles.isActive(idx)) {
+
+ // check flags if we are near surface
+ bool nearSurface = false;
+ Vec3 pos = coarseParticles.getPos(idx);
+ for (int i = -1; i <= 1; i++) {
+ for (int j = -1; j <= 1; j++) {
+ for (int k = -1; k <= 1; k++) {
+ if (!flags.isFluid(((int)pos.x) + i, ((int)pos.y) + j, ((int)pos.z) + k)) {
+ nearSurface = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (nearSurface) {
+ for (unsigned int i = 0; i <= discretization / 2; ++i) {
+ Real discretization2 = Real(floor(2 * M_PI * sin(i * dtheta) / dtheta) + 1);
+ for (Real phi = 0; phi < 2 * M_PI; phi += Real(2 * M_PI / discretization2)) {
+ Real theta = i * dtheta;
+ Vec3 normal(sin(theta) * cos(phi), cos(theta), sin(theta) * sin(phi));
+ Vec3 position = coarseParticles.getPos(idx) + params.outerRadius * normal;
+
+ bool valid = true;
+ LOOP_NEIGHBORS_BEGIN(coarseParticles, position, 2.f * params.outerRadius)
+ if (idx != idn && normSquare(position - coarseParticles.getPos(idn)) < outerRadius2) {
+ valid = false;
+ break;
+ }
+ LOOP_NEIGHBORS_END
+ if (valid) {
+ surfacePoints.addParticle(position);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+//
+// **** surface advection ****
+//
+
+struct advectSurfacePoints : public KernelBase {
+ advectSurfacePoints(BasicParticleSystemWrapper &surfacePoints,
+ const BasicParticleSystemWrapper &coarseParticles,
+ const ParticleDataImplVec3Wrapper &coarseParticlesPrevPos)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ coarseParticles(coarseParticles),
+ coarseParticlesPrevPos(coarseParticlesPrevPos)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ BasicParticleSystemWrapper &surfacePoints,
+ const BasicParticleSystemWrapper &coarseParticles,
+ const ParticleDataImplVec3Wrapper &coarseParticlesPrevPos) const
+ {
+ if (surfacePoints.isActive(idx)) {
+ Vec3 avgDisplacement(0, 0, 0);
+ Real totalWeight = 0;
+ Vec3 p = surfacePoints.getPos(idx);
+ LOOP_NEIGHBORS_BEGIN(
+ coarseParticlesPrevPos, surfacePoints.getPos(idx), 2.0f * params.outerRadius)
+ if ((coarseParticles.getStatus(idn) & ParticleBase::PNEW) == 0 &&
+ (coarseParticles.getStatus(idn) & ParticleBase::PDELETE) == 0) {
+ Vec3 disp = coarseParticles.getPos(idn) - coarseParticlesPrevPos.getVec3(idn);
+ Real distance = norm(coarseParticlesPrevPos.getVec3(idn) - p);
+ Real w = weightKernelAdvection(distance);
+ avgDisplacement += w * disp;
+ totalWeight += w;
+ }
+ LOOP_NEIGHBORS_END
+ if (totalWeight != 0)
+ avgDisplacement /= totalWeight;
+ surfacePoints.setPos(idx, p + avgDisplacement);
+ }
+ }
+ inline BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline const BasicParticleSystemWrapper &getArg1()
+ {
+ return coarseParticles;
+ }
+ typedef BasicParticleSystemWrapper type1;
+ inline const ParticleDataImplVec3Wrapper &getArg2()
+ {
+ return coarseParticlesPrevPos;
+ }
+ typedef ParticleDataImplVec3Wrapper type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel advectSurfacePoints ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, coarseParticles, coarseParticlesPrevPos);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ BasicParticleSystemWrapper &surfacePoints;
+ const BasicParticleSystemWrapper &coarseParticles;
+ const ParticleDataImplVec3Wrapper &coarseParticlesPrevPos;
+};
+
+//
+// **** value and gradient of level-set band constraint ****
+//
+Real computeConstraintLevel(const BasicParticleSystemWrapper &coarseParticles, Vec3 pos)
+{
+ Real lvl = 0.0f;
+ LOOP_NEIGHBORS_BEGIN(coarseParticles, pos, 1.5f * params.outerRadius)
+ lvl += expf(-params.constraintA * normSquare(coarseParticles.getPos(idn) - pos));
+ LOOP_NEIGHBORS_END
+ if (lvl > 1.0f)
+ lvl = 1.0f;
+ lvl = (sqrtf(-logf(lvl) / params.constraintA) - params.innerRadius) /
+ (params.outerRadius - params.innerRadius);
+ return lvl;
+}
+
+Vec3 computeConstraintGradient(const BasicParticleSystemWrapper &coarseParticles, Vec3 pos)
+{
+ Vec3 gradient(0, 0, 0);
+ LOOP_NEIGHBORS_BEGIN(coarseParticles, pos, 1.5f * params.outerRadius)
+ gradient += 2.f * params.constraintA *
+ (Real)(expf(-params.constraintA * normSquare(coarseParticles.getPos(idn) - pos))) *
+ (pos - coarseParticles.getPos(idn));
+ LOOP_NEIGHBORS_END
+ return getNormalized(gradient);
+}
+
+//
+// **** compute surface normals ****
+//
+
+struct computeSurfaceNormals : public KernelBase {
+ computeSurfaceNormals(const BasicParticleSystemWrapper &surfacePoints,
+ const BasicParticleSystemWrapper &coarseParticles,
+ ParticleDataImpl<Vec3> &surfaceNormals)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ coarseParticles(coarseParticles),
+ surfaceNormals(surfaceNormals)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ const BasicParticleSystemWrapper &coarseParticles,
+ ParticleDataImpl<Vec3> &surfaceNormals) const
+ {
+ Vec3 pos = surfacePoints.getPos(idx);
+
+ // approx normal with gradient
+ Vec3 gradient = computeConstraintGradient(coarseParticles, pos);
+
+ // get tangent frame
+ Vec3 n = getNormalized(gradient);
+ Vec3 vx(1, 0, 0);
+ Vec3 vy(0, 1, 0);
+ Real dotX = dot(n, vx);
+ Real dotY = dot(n, vy);
+ Vec3 t1 = getNormalized(fabs(dotX) < fabs(dotY) ? cross(n, vx) : cross(n, vy));
+ Vec3 t2 = getNormalized(cross(n, t1)); // initial frame
+
+ // linear fit of neighboring surface points in approximated tangent frame
+ Real sw = 0, swx = 0, swy = 0, swxy = 0, swx2 = 0, swy2 = 0, swxz = 0, swyz = 0, swz = 0;
+ LOOP_NEIGHBORS_BEGIN(surfacePoints, pos, params.normalRadius)
+ LOOP_GHOSTS_POS_BEGIN(surfacePoints.getPos(idn), params.normalRadius)
+ Real x = dot(gPos - pos, t1);
+ Real y = dot(gPos - pos, t2);
+ Real z = dot(gPos - pos, n);
+ Real w = weightSurfaceNormal(norm(pos - gPos));
+ swx2 += w * x * x;
+ swy2 += w * y * y;
+ swxy += w * x * y;
+ swxz += w * x * z;
+ swyz += w * y * z;
+ swx += w * x;
+ swy += w * y;
+ swz += w * z;
+ sw += w;
+ LOOP_GHOSTS_END
+ LOOP_NEIGHBORS_END
+ Real det = -sw * swxy * swxy + 2.f * swx * swxy * swy - swx2 * swy * swy - swx * swx * swy2 +
+ sw * swx2 * swy2;
+ if (det == 0) {
+ surfaceNormals[idx] = Vec3(0, 0, 0);
+ }
+ else {
+ Vec3 abc = 1.f / det *
+ Vec3(swxz * (-swy * swy + sw * swy2) + swyz * (-sw * swxy + swx * swy) +
+ swz * (swxy * swy - swx * swy2),
+ swxz * (-sw * swxy + swx * swy) + swyz * (-swx * swx + sw * swx2) +
+ swz * (swx * swxy - swx2 * swy),
+ swxz * (swxy * swy - swx * swy2) + swyz * (swx * swxy - swx2 * swy) +
+ swz * (-swxy * swxy + swx2 * swy2));
+ Vec3 normal = -getNormalized(t1 * abc.x + t2 * abc.y - n);
+ if (dot(gradient, normal) < 0) {
+ normal = -normal;
+ }
+ surfaceNormals[idx] = normal;
+ }
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline const BasicParticleSystemWrapper &getArg1()
+ {
+ return coarseParticles;
+ }
+ typedef BasicParticleSystemWrapper type1;
+ inline ParticleDataImpl<Vec3> &getArg2()
+ {
+ return surfaceNormals;
+ }
+ typedef ParticleDataImpl<Vec3> type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel computeSurfaceNormals ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, coarseParticles, surfaceNormals);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ const BasicParticleSystemWrapper &coarseParticles;
+ ParticleDataImpl<Vec3> &surfaceNormals;
+};
+
+//
+// **** smooth surface normals ****
+//
+
+struct computeAveragedNormals : public KernelBase {
+ computeAveragedNormals(const BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ surfaceNormals(surfaceNormals)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals) const
+ {
+ Vec3 pos = surfacePoints.getPos(idx);
+ Vec3 newNormal = Vec3(0, 0, 0);
+ LOOP_NEIGHBORS_BEGIN(surfacePoints, pos, params.normalRadius)
+ Real w = weightSurfaceNormal(norm(pos - surfacePoints.getPos(idn)));
+ newNormal += w * surfaceNormals[idn];
+ LOOP_NEIGHBORS_END
+ tempSurfaceVec3[idx] = getNormalized(newNormal);
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline const ParticleDataImpl<Vec3> &getArg1()
+ {
+ return surfaceNormals;
+ }
+ typedef ParticleDataImpl<Vec3> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel computeAveragedNormals ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, surfaceNormals);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ const ParticleDataImpl<Vec3> &surfaceNormals;
+};
+
+struct assignNormals : public KernelBase {
+ assignNormals(const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Vec3> &surfaceNormals)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ surfaceNormals(surfaceNormals)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Vec3> &surfaceNormals) const
+ {
+ surfaceNormals[idx] = tempSurfaceVec3[idx];
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline ParticleDataImpl<Vec3> &getArg1()
+ {
+ return surfaceNormals;
+ }
+ typedef ParticleDataImpl<Vec3> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel assignNormals ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, surfaceNormals);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ ParticleDataImpl<Vec3> &surfaceNormals;
+};
+
+void smoothSurfaceNormals(const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Vec3> &surfaceNormals)
+{
+ tempSurfaceVec3.resize(surfacePoints.size());
+
+ computeAveragedNormals(surfacePoints, surfaceNormals);
+ assignNormals(surfacePoints, surfaceNormals);
+}
+
+//
+// **** addition/deletion of particles. Not parallel to prevent write/delete conflicts ****
+//
+
+void addDeleteSurfacePoints(BasicParticleSystemWrapper &surfacePoints)
+{
+ int fixedSize = surfacePoints.size();
+ for (int idx = 0; idx < fixedSize; idx++) {
+ // compute proxy tangent displacement
+ Vec3 pos = surfacePoints.getPos(idx);
+
+ Vec3 gradient = computeConstraintGradient(coarseParticles, pos);
+
+ Real wt = 0;
+ Vec3 tangentDisplacement(0, 0, 0);
+ LOOP_NEIGHBORS_BEGIN(surfacePoints, pos, params.tangentRadius)
+ if (idn != idx) {
+ Vec3 dir = pos - surfacePoints.getPos(idn);
+ Real length = norm(dir);
+ dir = getNormalized(dir);
+
+ // Decompose direction into normal and tangent directions.
+ Vec3 dn = dot(dir, gradient) * gradient;
+ Vec3 dt = dir - dn;
+
+ Real w = weightSurfaceTangent(length);
+ wt += w;
+ tangentDisplacement += w * dt;
+ }
+ LOOP_NEIGHBORS_END
+ if (norm(tangentDisplacement) != 0) {
+ tangentDisplacement = getNormalized(tangentDisplacement);
+ }
+
+ // check density criterion, add surface point if necessary
+ Vec3 creationPos = pos + params.meanFineDistance * tangentDisplacement;
+ if (isInDomain(creationPos) &&
+ !surfacePoints.hasNeighbor(creationPos, params.meanFineDistance - (1e-6))) {
+ // create point
+ surfacePoints.addBuffered(creationPos);
+ }
+ }
+
+ surfacePoints.doCompress();
+ surfacePoints.insertBufferedParticles();
+
+ // check density criterion, delete surface points if necessary
+ fixedSize = surfacePoints.size();
+ for (int idx = 0; idx < fixedSize; idx++) {
+ if (!isInDomain(surfacePoints.getPos(idx)) ||
+ surfacePoints.hasNeighborOtherThanItself(idx, 0.67 * params.meanFineDistance)) {
+ surfacePoints.kill(idx);
+ }
+ }
+
+ // delete surface points if no coarse neighbors in advection radius
+ fixedSize = surfacePoints.size();
+ for (int idx = 0; idx < fixedSize; idx++) {
+ Vec3 pos = surfacePoints.getPos(idx);
+ if (!coarseParticles.hasNeighbor(pos, 2.f * params.outerRadius)) {
+ surfacePoints.kill(idx);
+ }
+ }
+
+ // delete surface point if too far from constraint
+ fixedSize = surfacePoints.size();
+ for (int idx = 0; idx < fixedSize; idx++) {
+ Real level = computeConstraintLevel(coarseParticles, surfacePoints.getPos(idx));
+ if (level < -0.2 || level > 1.2) {
+ surfacePoints.kill(idx);
+ }
+ }
+
+ surfacePoints.doCompress();
+ surfacePoints.insertBufferedParticles();
+}
+
+//
+// **** surface maintenance ****
+//
+
+struct computeSurfaceDensities : public KernelBase {
+ computeSurfaceDensities(const BasicParticleSystemWrapper &surfacePoints, void *dummy)
+ : KernelBase(surfacePoints.size()), surfacePoints(surfacePoints), dummy(dummy)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const BasicParticleSystemWrapper &surfacePoints, void *dummy) const
+ {
+ Vec3 pos = surfacePoints.getPos(idx);
+ Real density = 0;
+ LOOP_NEIGHBORS_BEGIN(surfacePoints, pos, params.normalRadius)
+ LOOP_GHOSTS_POS_BEGIN(surfacePoints.getPos(idn), params.normalRadius)
+ density += weightSurfaceNormal(norm(pos - gPos));
+ LOOP_GHOSTS_END
+ LOOP_NEIGHBORS_END
+ tempSurfaceFloat[idx] = density;
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline void *getArg1()
+ {
+ return dummy;
+ }
+ typedef void type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel computeSurfaceDensities ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, dummy);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ void *dummy;
+};
+
+struct computeSurfaceDisplacements : public KernelBase {
+ computeSurfaceDisplacements(const BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ surfaceNormals(surfaceNormals)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals) const
+ {
+ Vec3 pos = surfacePoints.getPos(idx);
+ Vec3 normal = surfaceNormals[idx];
+
+ Vec3 displacementNormal(0, 0, 0);
+ Vec3 displacementTangent(0, 0, 0);
+ Real wTotal = 0;
+ LOOP_NEIGHBORS_BEGIN(surfacePoints, pos, params.normalRadius)
+
+ LOOP_GHOSTS_POS_NORMAL_BEGIN(
+ surfacePoints.getPos(idn), surfaceNormals[idn], params.normalRadius)
+ Vec3 dir = pos - gPos;
+ Real length = norm(dir);
+ Vec3 dn = dot(dir, surfaceNormals[idx]) * surfaceNormals[idx];
+ Vec3 dt = dir - dn;
+ if (tempSurfaceFloat[idn] == 0) {
+ continue;
+ }
+ Real w = weightSurfaceNormal(length) / tempSurfaceFloat[idn];
+
+ Vec3 crossVec = getNormalized(cross(normal, -dir));
+ Vec3 projectedNormal = getNormalized(gNormal - dot(crossVec, gNormal) * crossVec);
+ if (dot(projectedNormal, normal) < 0 || abs(dot(normal, normal + projectedNormal)) < 1e-6) {
+ continue;
+ }
+ dn = -dot(normal + projectedNormal, dir) / dot(normal, normal + projectedNormal) * normal;
+
+ displacementNormal += w * dn;
+ displacementTangent += w * getNormalized(dt);
+ wTotal += w;
+ LOOP_GHOSTS_END
+
+ LOOP_NEIGHBORS_END
+ if (wTotal != 0) {
+ displacementNormal /= wTotal;
+ displacementTangent /= wTotal;
+ }
+ displacementNormal *= .75f;
+ displacementTangent *= .25f * params.meanFineDistance;
+ tempSurfaceVec3[idx] = displacementNormal + displacementTangent;
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline const ParticleDataImpl<Vec3> &getArg1()
+ {
+ return surfaceNormals;
+ }
+ typedef ParticleDataImpl<Vec3> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel computeSurfaceDisplacements ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, surfaceNormals);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ const ParticleDataImpl<Vec3> &surfaceNormals;
+};
+
+struct applySurfaceDisplacements : public KernelBase {
+ applySurfaceDisplacements(BasicParticleSystemWrapper &surfacePoints, void *dummy)
+ : KernelBase(surfacePoints.size()), surfacePoints(surfacePoints), dummy(dummy)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, BasicParticleSystemWrapper &surfacePoints, void *dummy) const
+ {
+ surfacePoints.setPos(idx, surfacePoints.getPos(idx) + tempSurfaceVec3[idx]);
+ }
+ inline BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline void *getArg1()
+ {
+ return dummy;
+ }
+ typedef void type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel applySurfaceDisplacements ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, dummy);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ BasicParticleSystemWrapper &surfacePoints;
+ void *dummy;
+};
+
+void regularizeSurfacePoints(BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals)
+{
+ tempSurfaceVec3.resize(surfacePoints.size());
+ tempSurfaceFloat.resize(surfacePoints.size());
+
+ computeSurfaceDensities(surfacePoints, 0);
+ computeSurfaceDisplacements(surfacePoints, surfaceNormals);
+ applySurfaceDisplacements(surfacePoints, 0);
+}
+
+struct constrainSurface : public KernelBase {
+ constrainSurface(BasicParticleSystemWrapper &surfacePoints,
+ const BasicParticleSystemWrapper &coarseParticles)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ coarseParticles(coarseParticles)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ BasicParticleSystemWrapper &surfacePoints,
+ const BasicParticleSystemWrapper &coarseParticles) const
+ {
+ Vec3 pos = surfacePoints.getPos(idx);
+ Real level = computeConstraintLevel(coarseParticles, surfacePoints.getPos(idx));
+ if (level > 1) {
+ surfacePoints.setPos(
+ idx,
+ pos - (params.outerRadius - params.innerRadius) * (level - 1) *
+ computeConstraintGradient(coarseParticles, surfacePoints.getPos(idx)));
+ }
+ else if (level < 0) {
+ surfacePoints.setPos(
+ idx,
+ pos - (params.outerRadius - params.innerRadius) * level *
+ computeConstraintGradient(coarseParticles, surfacePoints.getPos(idx)));
+ }
+ }
+ inline BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline const BasicParticleSystemWrapper &getArg1()
+ {
+ return coarseParticles;
+ }
+ typedef BasicParticleSystemWrapper type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel constrainSurface ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, coarseParticles);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ BasicParticleSystemWrapper &surfacePoints;
+ const BasicParticleSystemWrapper &coarseParticles;
+};
+
+struct interpolateNewWaveData : public KernelBase {
+ interpolateNewWaveData(const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Real> &surfaceWaveH,
+ ParticleDataImpl<Real> &surfaceWaveDtH,
+ ParticleDataImpl<Real> &surfaceWaveSeed,
+ ParticleDataImpl<Real> &surfaceWaveSeedAmplitude)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ surfaceWaveH(surfaceWaveH),
+ surfaceWaveDtH(surfaceWaveDtH),
+ surfaceWaveSeed(surfaceWaveSeed),
+ surfaceWaveSeedAmplitude(surfaceWaveSeedAmplitude)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Real> &surfaceWaveH,
+ ParticleDataImpl<Real> &surfaceWaveDtH,
+ ParticleDataImpl<Real> &surfaceWaveSeed,
+ ParticleDataImpl<Real> &surfaceWaveSeedAmplitude) const
+ {
+ if (surfacePoints.getStatus(idx) & ParticleBase::PNEW) {
+ Vec3 pos = surfacePoints.getPos(idx);
+ surfaceWaveH[idx] = 0;
+ surfaceWaveDtH[idx] = 0;
+ Real wTotal = 0;
+ LOOP_NEIGHBORS_BEGIN(surfacePoints, pos, params.tangentRadius)
+ if (!(surfacePoints.getStatus(idn) & ParticleBase::PNEW)) {
+ Real w = weightSurfaceTangent(norm(pos - surfacePoints.getPos(idn)));
+ surfaceWaveH[idx] += w * surfaceWaveH[idn];
+ surfaceWaveDtH[idx] += w * surfaceWaveDtH[idn];
+ surfaceWaveSeed[idx] += w * surfaceWaveSeed[idn];
+ surfaceWaveSeedAmplitude[idx] += w * surfaceWaveSeedAmplitude[idn];
+ wTotal += w;
+ }
+ LOOP_NEIGHBORS_END
+ if (wTotal != 0) {
+ surfaceWaveH[idx] /= wTotal;
+ surfaceWaveDtH[idx] /= wTotal;
+ surfaceWaveSeed[idx] /= wTotal;
+ surfaceWaveSeedAmplitude[idx] /= wTotal;
+ }
+ }
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline ParticleDataImpl<Real> &getArg1()
+ {
+ return surfaceWaveH;
+ }
+ typedef ParticleDataImpl<Real> type1;
+ inline ParticleDataImpl<Real> &getArg2()
+ {
+ return surfaceWaveDtH;
+ }
+ typedef ParticleDataImpl<Real> type2;
+ inline ParticleDataImpl<Real> &getArg3()
+ {
+ return surfaceWaveSeed;
+ }
+ typedef ParticleDataImpl<Real> type3;
+ inline ParticleDataImpl<Real> &getArg4()
+ {
+ return surfaceWaveSeedAmplitude;
+ }
+ typedef ParticleDataImpl<Real> type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel interpolateNewWaveData ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx,
+ surfacePoints,
+ surfaceWaveH,
+ surfaceWaveDtH,
+ surfaceWaveSeed,
+ surfaceWaveSeedAmplitude);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ ParticleDataImpl<Real> &surfaceWaveH;
+ ParticleDataImpl<Real> &surfaceWaveDtH;
+ ParticleDataImpl<Real> &surfaceWaveSeed;
+ ParticleDataImpl<Real> &surfaceWaveSeedAmplitude;
+};
+
+void surfaceMaintenance(const BasicParticleSystemWrapper &coarseParticles,
+ BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Vec3> &surfaceNormals,
+ ParticleDataImpl<Real> &surfaceWaveH,
+ ParticleDataImpl<Real> &surfaceWaveDtH,
+ ParticleDataImpl<Real> &surfaceWaveSeed,
+ ParticleDataImpl<Real> &surfaceWaveSeedAmplitude,
+ int nbIterations)
+{
+ int countIterations = nbIterations;
+ while (countIterations > 0) {
+ addDeleteSurfacePoints(surfacePoints);
+ surfacePoints.updateAccel();
+ computeSurfaceNormals(surfacePoints, coarseParticles, surfaceNormals);
+ smoothSurfaceNormals(surfacePoints, surfaceNormals);
+
+ regularizeSurfacePoints(surfacePoints, surfaceNormals);
+ surfacePoints.updateAccel();
+ constrainSurface(surfacePoints, coarseParticles);
+ surfacePoints.updateAccel();
+
+ interpolateNewWaveData(
+ surfacePoints, surfaceWaveH, surfaceWaveDtH, surfaceWaveSeed, surfaceWaveSeedAmplitude);
+
+ countIterations--;
+ }
+}
+
+//
+// **** surface wave seeding and evolution ****
+//
+
+struct addSeed : public KernelBase {
+ addSeed(const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Real> &surfaceWaveH,
+ const ParticleDataImpl<Real> &surfaceWaveSeed)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ surfaceWaveH(surfaceWaveH),
+ surfaceWaveSeed(surfaceWaveSeed)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Real> &surfaceWaveH,
+ const ParticleDataImpl<Real> &surfaceWaveSeed) const
+ {
+ surfaceWaveH[idx] += surfaceWaveSeed[idx];
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline ParticleDataImpl<Real> &getArg1()
+ {
+ return surfaceWaveH;
+ }
+ typedef ParticleDataImpl<Real> type1;
+ inline const ParticleDataImpl<Real> &getArg2()
+ {
+ return surfaceWaveSeed;
+ }
+ typedef ParticleDataImpl<Real> type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel addSeed ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, surfaceWaveH, surfaceWaveSeed);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ ParticleDataImpl<Real> &surfaceWaveH;
+ const ParticleDataImpl<Real> &surfaceWaveSeed;
+};
+
+struct computeSurfaceWaveNormal : public KernelBase {
+ computeSurfaceWaveNormal(const BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals,
+ const ParticleDataImpl<Real> &surfaceWaveH)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ surfaceNormals(surfaceNormals),
+ surfaceWaveH(surfaceWaveH)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals,
+ const ParticleDataImpl<Real> &surfaceWaveH) const
+ {
+ Vec3 pos = surfacePoints.getPos(idx);
+
+ // get tangent frame
+ Vec3 n = getNormalized(surfaceNormals[idx]);
+ Vec3 vx(1, 0, 0);
+ Vec3 vy(0, 1, 0);
+ Real dotX = dot(n, vx);
+ Real dotY = dot(n, vy);
+ Vec3 t1 = getNormalized(fabs(dotX) < fabs(dotY) ? cross(n, vx) : cross(n, vy));
+ Vec3 t2 = getNormalized(cross(n, t1));
+
+ // linear fit
+ Real sw = 0, swx = 0, swy = 0, swxy = 0, swx2 = 0, swy2 = 0, swxz = 0, swyz = 0, swz = 0;
+ LOOP_NEIGHBORS_BEGIN(surfacePoints, pos, params.tangentRadius)
+ LOOP_GHOSTS_POS_BEGIN(surfacePoints.getPos(idn), params.tangentRadius)
+ Real x = dot(gPos - pos, t1);
+ Real y = dot(gPos - pos, t2);
+ Real z = surfaceWaveH[idn];
+ Real w = weightSurfaceTangent(norm(pos - gPos));
+ swx2 += w * x * x;
+ swy2 += w * y * y;
+ swxy += w * x * y;
+ swxz += w * x * z;
+ swyz += w * y * z;
+ swx += w * x;
+ swy += w * y;
+ swz += w * z;
+ sw += w;
+ LOOP_GHOSTS_END
+ LOOP_NEIGHBORS_END
+ Real det = -sw * swxy * swxy + 2.f * swx * swxy * swy - swx2 * swy * swy - swx * swx * swy2 +
+ sw * swx2 * swy2;
+ if (det == 0) {
+ tempSurfaceVec3[idx] = Vec3(0, 0, 0);
+ }
+ else {
+ Vec3 abc = 1.f / det *
+ Vec3(swxz * (-swy * swy + sw * swy2) + swyz * (-sw * swxy + swx * swy) +
+ swz * (swxy * swy - swx * swy2),
+ swxz * (-sw * swxy + swx * swy) + swyz * (-swx * swx + sw * swx2) +
+ swz * (swx * swxy - swx2 * swy),
+ swxz * (swxy * swy - swx * swy2) + swyz * (swx * swxy - swx2 * swy) +
+ swz * (-swxy * swxy + swx2 * swy2));
+ Vec3 waveNormal = -getNormalized(vx * abc.x + vy * abc.y - Vec3(0, 0, 1));
+ tempSurfaceVec3[idx] = waveNormal;
+ }
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline const ParticleDataImpl<Vec3> &getArg1()
+ {
+ return surfaceNormals;
+ }
+ typedef ParticleDataImpl<Vec3> type1;
+ inline const ParticleDataImpl<Real> &getArg2()
+ {
+ return surfaceWaveH;
+ }
+ typedef ParticleDataImpl<Real> type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel computeSurfaceWaveNormal ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, surfaceNormals, surfaceWaveH);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ const ParticleDataImpl<Vec3> &surfaceNormals;
+ const ParticleDataImpl<Real> &surfaceWaveH;
+};
+
+struct computeSurfaceWaveLaplacians : public KernelBase {
+ computeSurfaceWaveLaplacians(const BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals,
+ const ParticleDataImpl<Real> &surfaceWaveH)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ surfaceNormals(surfaceNormals),
+ surfaceWaveH(surfaceWaveH)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals,
+ const ParticleDataImpl<Real> &surfaceWaveH) const
+ {
+ Real laplacian = 0;
+ Real wTotal = 0;
+ Vec3 pPos = surfacePoints.getPos(idx);
+ Vec3 pNormal = surfaceNormals[idx];
+
+ Vec3 vx(1, 0, 0);
+ Vec3 vy(0, 1, 0);
+ Real dotX = dot(pNormal, vx);
+ Real dotY = dot(pNormal, vy);
+ Vec3 t1 = getNormalized(fabs(dotX) < fabs(dotY) ? cross(pNormal, vx) : cross(pNormal, vy));
+ Vec3 t2 = getNormalized(cross(pNormal, t1));
+
+ Vec3 pWaveNormal = tempSurfaceVec3[idx];
+ Real ph = surfaceWaveH[idx];
+ if (pWaveNormal.z == 0) {
+ tempSurfaceFloat[idx] = 0;
+ }
+ else {
+
+ LOOP_NEIGHBORS_BEGIN(surfacePoints, pPos, params.tangentRadius)
+ Real nh = surfaceWaveH[idn];
+ LOOP_GHOSTS_POS_BEGIN(surfacePoints.getPos(idn), params.tangentRadius)
+ Vec3 dir = gPos - pPos;
+ Real lengthDir = norm(dir);
+ if (lengthDir < 1e-5)
+ continue;
+ Vec3 tangentDir = lengthDir * getNormalized(dir - dot(dir, pNormal) * pNormal);
+ Real dirX = dot(tangentDir, t1);
+ Real dirY = dot(tangentDir, t2);
+ Real dz = nh - ph - (-pWaveNormal.x / pWaveNormal.z) * dirX -
+ (-pWaveNormal.y / pWaveNormal.z) * dirY;
+ Real w = weightSurfaceTangent(norm(pPos - gPos));
+ wTotal += w;
+ laplacian += clamp(w * 4 * dz / (lengthDir * lengthDir), Real(-100.), Real(100.));
+ LOOP_GHOSTS_END
+ LOOP_NEIGHBORS_END
+ if (wTotal != 0) {
+ tempSurfaceFloat[idx] = laplacian / wTotal;
+ }
+ else {
+ tempSurfaceFloat[idx] = 0;
+ }
+ }
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline const ParticleDataImpl<Vec3> &getArg1()
+ {
+ return surfaceNormals;
+ }
+ typedef ParticleDataImpl<Vec3> type1;
+ inline const ParticleDataImpl<Real> &getArg2()
+ {
+ return surfaceWaveH;
+ }
+ typedef ParticleDataImpl<Real> type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel computeSurfaceWaveLaplacians ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, surfaceNormals, surfaceWaveH);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ const ParticleDataImpl<Vec3> &surfaceNormals;
+ const ParticleDataImpl<Real> &surfaceWaveH;
+};
+
+struct evolveWave : public KernelBase {
+ evolveWave(const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Real> &surfaceWaveH,
+ ParticleDataImpl<Real> &surfaceWaveDtH,
+ const ParticleDataImpl<Real> &surfaceWaveSeed)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ surfaceWaveH(surfaceWaveH),
+ surfaceWaveDtH(surfaceWaveDtH),
+ surfaceWaveSeed(surfaceWaveSeed)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Real> &surfaceWaveH,
+ ParticleDataImpl<Real> &surfaceWaveDtH,
+ const ParticleDataImpl<Real> &surfaceWaveSeed) const
+ {
+ surfaceWaveDtH[idx] += params.waveSpeed * params.waveSpeed * params.dt * tempSurfaceFloat[idx];
+ surfaceWaveDtH[idx] /= (1 + params.dt * params.waveDamping);
+ surfaceWaveH[idx] += params.dt * surfaceWaveDtH[idx];
+ surfaceWaveH[idx] /= (1 + params.dt * params.waveDamping);
+ surfaceWaveH[idx] -= surfaceWaveSeed[idx];
+
+ // clamp H and DtH (to prevent rare extreme behaviors)
+ surfaceWaveDtH[idx] = clamp(surfaceWaveDtH[idx],
+ -params.waveMaxFrequency * params.waveMaxAmplitude,
+ params.waveMaxFrequency * params.waveMaxAmplitude);
+ surfaceWaveH[idx] = clamp(
+ surfaceWaveH[idx], -params.waveMaxAmplitude, params.waveMaxAmplitude);
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline ParticleDataImpl<Real> &getArg1()
+ {
+ return surfaceWaveH;
+ }
+ typedef ParticleDataImpl<Real> type1;
+ inline ParticleDataImpl<Real> &getArg2()
+ {
+ return surfaceWaveDtH;
+ }
+ typedef ParticleDataImpl<Real> type2;
+ inline const ParticleDataImpl<Real> &getArg3()
+ {
+ return surfaceWaveSeed;
+ }
+ typedef ParticleDataImpl<Real> type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel evolveWave ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, surfaceWaveH, surfaceWaveDtH, surfaceWaveSeed);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ ParticleDataImpl<Real> &surfaceWaveH;
+ ParticleDataImpl<Real> &surfaceWaveDtH;
+ const ParticleDataImpl<Real> &surfaceWaveSeed;
+};
+
+struct computeSurfaceCurvature : public KernelBase {
+ computeSurfaceCurvature(const BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ surfaceNormals(surfaceNormals)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals) const
+ {
+ Vec3 pPos = surfacePoints.getPos(idx);
+ Real wTotal = 0;
+ Real curv = 0;
+ Vec3 pNormal = surfaceNormals[idx];
+
+ LOOP_NEIGHBORS_BEGIN(surfacePoints, pPos, params.normalRadius)
+ LOOP_GHOSTS_POS_NORMAL_BEGIN(
+ surfacePoints.getPos(idn), surfaceNormals[idn], params.normalRadius)
+ Vec3 dir = pPos - gPos;
+ if (dot(pNormal, gNormal) < 0) {
+ continue;
+ } // backfacing
+ Real dist = norm(dir);
+ if (dist < params.normalRadius / 100.f) {
+ continue;
+ }
+
+ Real distn = dot(dir, pNormal);
+
+ Real w = weightSurfaceNormal(dist);
+ curv += w * distn;
+ wTotal += w;
+ LOOP_GHOSTS_END
+ LOOP_NEIGHBORS_END
+ if (wTotal != 0) {
+ curv /= wTotal;
+ }
+ tempSurfaceFloat[idx] = fabs(curv);
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline const ParticleDataImpl<Vec3> &getArg1()
+ {
+ return surfaceNormals;
+ }
+ typedef ParticleDataImpl<Vec3> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel computeSurfaceCurvature ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, surfaceNormals);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ const ParticleDataImpl<Vec3> &surfaceNormals;
+};
+
+struct smoothCurvature : public KernelBase {
+ smoothCurvature(const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Real> &surfaceWaveSource)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ surfaceWaveSource(surfaceWaveSource)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Real> &surfaceWaveSource) const
+ {
+ Vec3 pPos = surfacePoints.getPos(idx);
+ Real curv = 0;
+ Real wTotal = 0;
+
+ LOOP_NEIGHBORS_BEGIN(surfacePoints, pPos, params.normalRadius)
+ Real w = weightSurfaceNormal(norm(pPos - surfacePoints.getPos(idn)));
+ curv += w * tempSurfaceFloat[idn];
+ wTotal += w;
+ LOOP_NEIGHBORS_END
+ if (wTotal != 0) {
+ curv /= wTotal;
+ }
+ surfaceWaveSource[idx] = curv;
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline ParticleDataImpl<Real> &getArg1()
+ {
+ return surfaceWaveSource;
+ }
+ typedef ParticleDataImpl<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel smoothCurvature ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, surfaceWaveSource);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ ParticleDataImpl<Real> &surfaceWaveSource;
+};
+
+struct seedWaves : public KernelBase {
+ seedWaves(const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Real> &surfaceWaveSeed,
+ ParticleDataImpl<Real> &surfaceWaveSeedAmplitude,
+ ParticleDataImpl<Real> &surfaceWaveSource)
+ : KernelBase(surfacePoints.size()),
+ surfacePoints(surfacePoints),
+ surfaceWaveSeed(surfaceWaveSeed),
+ surfaceWaveSeedAmplitude(surfaceWaveSeedAmplitude),
+ surfaceWaveSource(surfaceWaveSource)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ const BasicParticleSystemWrapper &surfacePoints,
+ ParticleDataImpl<Real> &surfaceWaveSeed,
+ ParticleDataImpl<Real> &surfaceWaveSeedAmplitude,
+ ParticleDataImpl<Real> &surfaceWaveSource) const
+ {
+ Real source = smoothstep(params.waveSeedingCurvatureThresholdRegionCenter -
+ params.waveSeedingCurvatureThresholdRegionRadius,
+ params.waveSeedingCurvatureThresholdRegionCenter +
+ params.waveSeedingCurvatureThresholdRegionRadius,
+ (Real)surfaceWaveSource[idx]) *
+ 2.f -
+ 1.f;
+ Real freq = params.waveSeedFrequency;
+ Real theta = params.dt * frameCount * params.waveSpeed * freq;
+ Real costheta = cosf(theta);
+ Real maxSeedAmplitude = params.waveMaxSeedingAmplitude * params.waveMaxAmplitude;
+
+ surfaceWaveSeedAmplitude[idx] = clamp<Real>(surfaceWaveSeedAmplitude[idx] +
+ source * params.waveSeedStepSizeRatioOfMax *
+ maxSeedAmplitude,
+ 0.f,
+ maxSeedAmplitude);
+ surfaceWaveSeed[idx] = surfaceWaveSeedAmplitude[idx] * costheta;
+
+ // source values for display (not used after this point anyway)
+ surfaceWaveSource[idx] = (source >= 0) ? 1 : 0;
+ }
+ inline const BasicParticleSystemWrapper &getArg0()
+ {
+ return surfacePoints;
+ }
+ typedef BasicParticleSystemWrapper type0;
+ inline ParticleDataImpl<Real> &getArg1()
+ {
+ return surfaceWaveSeed;
+ }
+ typedef ParticleDataImpl<Real> type1;
+ inline ParticleDataImpl<Real> &getArg2()
+ {
+ return surfaceWaveSeedAmplitude;
+ }
+ typedef ParticleDataImpl<Real> type2;
+ inline ParticleDataImpl<Real> &getArg3()
+ {
+ return surfaceWaveSource;
+ }
+ typedef ParticleDataImpl<Real> type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel seedWaves ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, surfacePoints, surfaceWaveSeed, surfaceWaveSeedAmplitude, surfaceWaveSource);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ const BasicParticleSystemWrapper &surfacePoints;
+ ParticleDataImpl<Real> &surfaceWaveSeed;
+ ParticleDataImpl<Real> &surfaceWaveSeedAmplitude;
+ ParticleDataImpl<Real> &surfaceWaveSource;
+};
+
+void surfaceWaves(const BasicParticleSystemWrapper &surfacePoints,
+ const ParticleDataImpl<Vec3> &surfaceNormals,
+ ParticleDataImpl<Real> &surfaceWaveH,
+ ParticleDataImpl<Real> &surfaceWaveDtH,
+ ParticleDataImpl<Real> &surfaceWaveSource,
+ ParticleDataImpl<Real> &surfaceWaveSeed,
+ ParticleDataImpl<Real> &surfaceWaveSeedAmplitude)
+{
+ addSeed(surfacePoints, surfaceWaveH, surfaceWaveSeed);
+ computeSurfaceWaveNormal(surfacePoints, surfaceNormals, surfaceWaveH);
+ computeSurfaceWaveLaplacians(surfacePoints, surfaceNormals, surfaceWaveH);
+ evolveWave(surfacePoints, surfaceWaveH, surfaceWaveDtH, surfaceWaveSeed);
+ computeSurfaceCurvature(surfacePoints, surfaceNormals);
+ smoothCurvature(surfacePoints, surfaceWaveSource);
+ seedWaves(surfacePoints, surfaceWaveSeed, surfaceWaveSeedAmplitude, surfaceWaveSource);
+}
+
+//
+// **** main function ****
+//
+
+void particleSurfaceTurbulence(const FlagGrid &flags,
+ BasicParticleSystem &coarseParts,
+ ParticleDataImpl<Vec3> &coarsePartsPrevPos,
+ BasicParticleSystem &surfPoints,
+ ParticleDataImpl<Vec3> &surfaceNormals,
+ ParticleDataImpl<Real> &surfaceWaveH,
+ ParticleDataImpl<Real> &surfaceWaveDtH,
+ BasicParticleSystem &surfacePointsDisplaced,
+ ParticleDataImpl<Real> &surfaceWaveSource,
+ ParticleDataImpl<Real> &surfaceWaveSeed,
+ ParticleDataImpl<Real> &surfaceWaveSeedAmplitude,
+ int res,
+ Real outerRadius = 1.0f,
+ int surfaceDensity = 20,
+ int nbSurfaceMaintenanceIterations = 4,
+ Real dt = 0.005f,
+ Real waveSpeed = 16.0f,
+ Real waveDamping = 0.0f,
+ Real waveSeedFrequency = 4,
+ Real waveMaxAmplitude = 0.25f,
+ Real waveMaxFrequency = 800,
+ Real waveMaxSeedingAmplitude = 0.5,
+ Real waveSeedingCurvatureThresholdRegionCenter = 0.025f,
+ Real waveSeedingCurvatureThresholdRegionRadius = 0.01f,
+ Real waveSeedStepSizeRatioOfMax = 0.05f)
+{
+#if USE_CHRONO == 1
+ static std::chrono::high_resolution_clock::time_point begin, end;
+ end = std::chrono::high_resolution_clock::now();
+ cout << std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count() / 1000000000.f
+ << " : time sim" << endl;
+ begin = std::chrono::high_resolution_clock::now();
+#endif
+
+ // wrap data
+ coarseParticles.points = &coarseParts;
+ coarseParticlesPrevPos.points = &coarsePartsPrevPos;
+ surfacePoints.points = &surfPoints;
+
+ // copy parameters
+ params.res = res;
+ params.outerRadius = outerRadius;
+ params.surfaceDensity = surfaceDensity;
+ params.nbSurfaceMaintenanceIterations = nbSurfaceMaintenanceIterations;
+ params.dt = dt;
+ params.waveSpeed = waveSpeed;
+ params.waveDamping = waveDamping;
+ params.waveSeedFrequency = waveSeedFrequency;
+ params.waveMaxAmplitude = waveMaxAmplitude;
+ params.waveMaxFrequency = waveMaxFrequency;
+ params.waveMaxSeedingAmplitude = waveMaxSeedingAmplitude;
+ params.waveSeedingCurvatureThresholdRegionCenter = waveSeedingCurvatureThresholdRegionCenter;
+ params.waveSeedingCurvatureThresholdRegionRadius = waveSeedingCurvatureThresholdRegionRadius;
+ params.waveSeedStepSizeRatioOfMax = waveSeedStepSizeRatioOfMax;
+
+ // compute other parameters
+ params.innerRadius = params.outerRadius / 2.0;
+ params.meanFineDistance = M_PI * (params.outerRadius + params.innerRadius) /
+ params.surfaceDensity;
+ params.constraintA = logf(2.0f / (1.0f + weightKernelCoarseDensity(params.outerRadius +
+ params.innerRadius))) /
+ (powf((params.outerRadius + params.innerRadius) / 2, 2) -
+ params.innerRadius * params.innerRadius);
+ params.normalRadius = 0.5f * (params.outerRadius + params.innerRadius);
+ params.tangentRadius = 2.1f * params.meanFineDistance;
+ params.bndXm = params.bndYm = params.bndZm = 2;
+ params.bndXp = params.bndYp = params.bndZp = params.res - 2;
+
+ if (frameCount == 0) {
+
+ // initialize accel grids
+ accelCoarse.init(2.f * res / params.outerRadius);
+ accelSurface.init(1.f * res / (2.f * params.meanFineDistance));
+
+ // update coarse accel structure
+ coarseParticles.updateAccel();
+
+ // create surface points
+ initFines(coarseParticles, surfacePoints, flags);
+
+ // smooth surface
+ surfaceMaintenance(coarseParticles,
+ surfacePoints,
+ surfaceNormals,
+ surfaceWaveH,
+ surfaceWaveDtH,
+ surfaceWaveSeed,
+ surfaceWaveSeedAmplitude,
+ 6 * params.nbSurfaceMaintenanceIterations);
+
+ // set wave values to zero
+ for (int idx = 0; idx < surfacePoints.size(); idx++) {
+ surfaceWaveH[idx] = 0;
+ surfaceWaveDtH[idx] = 0;
+ surfaceWaveSeed[idx] = 0;
+ surfaceWaveSeedAmplitude[idx] = 0;
+ }
+ }
+ else {
+
+ // update coarse accel structure with previous coarse particles positions
+ coarseParticlesPrevPos.updateAccel();
+
+ // advect surface points following coarse particles
+ advectSurfacePoints(surfacePoints, coarseParticles, coarseParticlesPrevPos);
+ surfacePoints.updateAccel();
+
+ // update acceleration structure for surface points
+ coarseParticles.updateAccel();
+
+ // surface maintenance
+ surfaceMaintenance(coarseParticles,
+ surfacePoints,
+ surfaceNormals,
+ surfaceWaveH,
+ surfaceWaveDtH,
+ surfaceWaveSeed,
+ surfaceWaveSeedAmplitude,
+ params.nbSurfaceMaintenanceIterations);
+
+ // surface waves
+ surfaceWaves(surfacePoints,
+ surfaceNormals,
+ surfaceWaveH,
+ surfaceWaveDtH,
+ surfaceWaveSource,
+ surfaceWaveSeed,
+ surfaceWaveSeedAmplitude);
+ }
+ frameCount++;
+
+ // save positions as previous positions for next step
+ for (int id = 0; id < coarseParticles.size(); id++) {
+ if ((coarseParticles.getStatus(id) & ParticleBase::PNEW) == 0 &&
+ (coarseParticles.getStatus(id) & ParticleBase::PDELETE) == 0) {
+ coarseParticlesPrevPos.setVec3(id, coarseParticles.getPos(id));
+ }
+ }
+
+ // create displaced points for display
+ surfacePointsDisplaced.clear();
+ for (int idx = 0; idx < surfacePoints.size(); idx++) {
+ if ((surfacePoints.getStatus(idx) & ParticleBase::PDELETE) == 0) {
+ surfacePointsDisplaced.addParticle(surfacePoints.getPos(idx) +
+ surfaceNormals[idx] * surfaceWaveH[idx]);
+ }
+ }
+
+#if USE_CHRONO == 1
+ end = std::chrono::high_resolution_clock::now();
+ cout << std::chrono::duration_cast<std::chrono::nanoseconds>(end - begin).count() / 1000000000.f
+ << " : time upres" << endl;
+ begin = std::chrono::high_resolution_clock::now();
+#endif
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "particleSurfaceTurbulence", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ BasicParticleSystem &coarseParts = *_args.getPtr<BasicParticleSystem>(
+ "coarseParts", 1, &_lock);
+ ParticleDataImpl<Vec3> &coarsePartsPrevPos = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "coarsePartsPrevPos", 2, &_lock);
+ BasicParticleSystem &surfPoints = *_args.getPtr<BasicParticleSystem>(
+ "surfPoints", 3, &_lock);
+ ParticleDataImpl<Vec3> &surfaceNormals = *_args.getPtr<ParticleDataImpl<Vec3>>(
+ "surfaceNormals", 4, &_lock);
+ ParticleDataImpl<Real> &surfaceWaveH = *_args.getPtr<ParticleDataImpl<Real>>(
+ "surfaceWaveH", 5, &_lock);
+ ParticleDataImpl<Real> &surfaceWaveDtH = *_args.getPtr<ParticleDataImpl<Real>>(
+ "surfaceWaveDtH", 6, &_lock);
+ BasicParticleSystem &surfacePointsDisplaced = *_args.getPtr<BasicParticleSystem>(
+ "surfacePointsDisplaced", 7, &_lock);
+ ParticleDataImpl<Real> &surfaceWaveSource = *_args.getPtr<ParticleDataImpl<Real>>(
+ "surfaceWaveSource", 8, &_lock);
+ ParticleDataImpl<Real> &surfaceWaveSeed = *_args.getPtr<ParticleDataImpl<Real>>(
+ "surfaceWaveSeed", 9, &_lock);
+ ParticleDataImpl<Real> &surfaceWaveSeedAmplitude = *_args.getPtr<ParticleDataImpl<Real>>(
+ "surfaceWaveSeedAmplitude", 10, &_lock);
+ int res = _args.get<int>("res", 11, &_lock);
+ Real outerRadius = _args.getOpt<Real>("outerRadius", 12, 1.0f, &_lock);
+ int surfaceDensity = _args.getOpt<int>("surfaceDensity", 13, 20, &_lock);
+ int nbSurfaceMaintenanceIterations = _args.getOpt<int>(
+ "nbSurfaceMaintenanceIterations", 14, 4, &_lock);
+ Real dt = _args.getOpt<Real>("dt", 15, 0.005f, &_lock);
+ Real waveSpeed = _args.getOpt<Real>("waveSpeed", 16, 16.0f, &_lock);
+ Real waveDamping = _args.getOpt<Real>("waveDamping", 17, 0.0f, &_lock);
+ Real waveSeedFrequency = _args.getOpt<Real>("waveSeedFrequency", 18, 4, &_lock);
+ Real waveMaxAmplitude = _args.getOpt<Real>("waveMaxAmplitude", 19, 0.25f, &_lock);
+ Real waveMaxFrequency = _args.getOpt<Real>("waveMaxFrequency", 20, 800, &_lock);
+ Real waveMaxSeedingAmplitude = _args.getOpt<Real>(
+ "waveMaxSeedingAmplitude", 21, 0.5, &_lock);
+ Real waveSeedingCurvatureThresholdRegionCenter = _args.getOpt<Real>(
+ "waveSeedingCurvatureThresholdRegionCenter", 22, 0.025f, &_lock);
+ Real waveSeedingCurvatureThresholdRegionRadius = _args.getOpt<Real>(
+ "waveSeedingCurvatureThresholdRegionRadius", 23, 0.01f, &_lock);
+ Real waveSeedStepSizeRatioOfMax = _args.getOpt<Real>(
+ "waveSeedStepSizeRatioOfMax", 24, 0.05f, &_lock);
+ _retval = getPyNone();
+ particleSurfaceTurbulence(flags,
+ coarseParts,
+ coarsePartsPrevPos,
+ surfPoints,
+ surfaceNormals,
+ surfaceWaveH,
+ surfaceWaveDtH,
+ surfacePointsDisplaced,
+ surfaceWaveSource,
+ surfaceWaveSeed,
+ surfaceWaveSeedAmplitude,
+ res,
+ outerRadius,
+ surfaceDensity,
+ nbSurfaceMaintenanceIterations,
+ dt,
+ waveSpeed,
+ waveDamping,
+ waveSeedFrequency,
+ waveMaxAmplitude,
+ waveMaxFrequency,
+ waveMaxSeedingAmplitude,
+ waveSeedingCurvatureThresholdRegionCenter,
+ waveSeedingCurvatureThresholdRegionRadius,
+ waveSeedStepSizeRatioOfMax);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "particleSurfaceTurbulence", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("particleSurfaceTurbulence", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_particleSurfaceTurbulence("", "particleSurfaceTurbulence", _W_0);
+extern "C" {
+void PbRegister_particleSurfaceTurbulence()
+{
+ KEEP_UNUSED(_RP_particleSurfaceTurbulence);
+}
+}
+
+void debugCheckParts(const BasicParticleSystem &parts, const FlagGrid &flags)
+{
+ for (int idx = 0; idx < parts.size(); idx++) {
+ Vec3i p = toVec3i(parts.getPos(idx));
+ if (!flags.isInBounds(p)) {
+ debMsg("bad position??? " << idx << " " << parts.getPos(idx), 1);
+ exit(1);
+ }
+ }
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "debugCheckParts", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const BasicParticleSystem &parts = *_args.getPtr<BasicParticleSystem>("parts", 0, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 1, &_lock);
+ _retval = getPyNone();
+ debugCheckParts(parts, flags);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "debugCheckParts", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("debugCheckParts", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_debugCheckParts("", "debugCheckParts", _W_1);
+extern "C" {
+void PbRegister_debugCheckParts()
+{
+ KEEP_UNUSED(_RP_debugCheckParts);
+}
+}
+
+} // namespace SurfaceTurbulence
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/vortexplugins.cpp b/extern/mantaflow/preprocessed/plugin/vortexplugins.cpp
new file mode 100644
index 00000000000..c2a21d82689
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/vortexplugins.cpp
@@ -0,0 +1,695 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Plugins for using vortex sheet meshes
+ *
+ ******************************************************************************/
+
+#include <iostream>
+#include "vortexsheet.h"
+#include "vortexpart.h"
+#include "shapes.h"
+#include "commonkernels.h"
+#include "conjugategrad.h"
+#include "randomstream.h"
+#include "levelset.h"
+
+using namespace std;
+
+namespace Manta {
+
+//! Mark area of mesh inside shape as fixed nodes.
+//! Remove all other fixed nodes if 'exclusive' is set
+
+void markAsFixed(Mesh &mesh, const Shape *shape, bool exclusive = true)
+{
+ for (int i = 0; i < mesh.numNodes(); i++) {
+ if (shape->isInside(mesh.nodes(i).pos))
+ mesh.nodes(i).flags |= Mesh::NfFixed;
+ else if (exclusive)
+ mesh.nodes(i).flags &= ~Mesh::NfFixed;
+ }
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "markAsFixed", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Mesh &mesh = *_args.getPtr<Mesh>("mesh", 0, &_lock);
+ const Shape *shape = _args.getPtr<Shape>("shape", 1, &_lock);
+ bool exclusive = _args.getOpt<bool>("exclusive", 2, true, &_lock);
+ _retval = getPyNone();
+ markAsFixed(mesh, shape, exclusive);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "markAsFixed", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("markAsFixed", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_markAsFixed("", "markAsFixed", _W_0);
+extern "C" {
+void PbRegister_markAsFixed()
+{
+ KEEP_UNUSED(_RP_markAsFixed);
+}
+}
+
+//! Adapt texture coordinates of mesh inside shape
+//! to obtain an effective inflow effect
+
+void texcoordInflow(VortexSheetMesh &mesh, const Shape *shape, const MACGrid &vel)
+{
+ static Vec3 t0 = Vec3::Zero;
+
+ // get mean velocity
+ int cnt = 0;
+ Vec3 meanV(0.0);
+ FOR_IJK(vel)
+ {
+ if (shape->isInsideGrid(i, j, k)) {
+ cnt++;
+ meanV += vel.getCentered(i, j, k);
+ }
+ }
+ meanV /= (Real)cnt;
+ t0 -= mesh.getParent()->getDt() * meanV;
+ mesh.setReferenceTexOffset(t0);
+
+ // apply mean velocity
+ for (int i = 0; i < mesh.numNodes(); i++) {
+ if (shape->isInside(mesh.nodes(i).pos)) {
+ Vec3 tc = mesh.nodes(i).pos + t0;
+ mesh.tex1(i) = tc;
+ mesh.tex2(i) = tc;
+ }
+ }
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "texcoordInflow", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ VortexSheetMesh &mesh = *_args.getPtr<VortexSheetMesh>("mesh", 0, &_lock);
+ const Shape *shape = _args.getPtr<Shape>("shape", 1, &_lock);
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 2, &_lock);
+ _retval = getPyNone();
+ texcoordInflow(mesh, shape, vel);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "texcoordInflow", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("texcoordInflow", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_texcoordInflow("", "texcoordInflow", _W_1);
+extern "C" {
+void PbRegister_texcoordInflow()
+{
+ KEEP_UNUSED(_RP_texcoordInflow);
+}
+}
+
+;
+
+//! Init smoke density values of the mesh surface inside source shape
+
+void meshSmokeInflow(VortexSheetMesh &mesh, const Shape *shape, Real amount)
+{
+ for (int t = 0; t < mesh.numTris(); t++) {
+ if (shape->isInside(mesh.getFaceCenter(t)))
+ mesh.sheet(t).smokeAmount = amount;
+ }
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "meshSmokeInflow", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ VortexSheetMesh &mesh = *_args.getPtr<VortexSheetMesh>("mesh", 0, &_lock);
+ const Shape *shape = _args.getPtr<Shape>("shape", 1, &_lock);
+ Real amount = _args.get<Real>("amount", 2, &_lock);
+ _retval = getPyNone();
+ meshSmokeInflow(mesh, shape, amount);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "meshSmokeInflow", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("meshSmokeInflow", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_meshSmokeInflow("", "meshSmokeInflow", _W_2);
+extern "C" {
+void PbRegister_meshSmokeInflow()
+{
+ KEEP_UNUSED(_RP_meshSmokeInflow);
+}
+}
+
+struct KnAcceleration : public KernelBase {
+ KnAcceleration(MACGrid &a, const MACGrid &v1, const MACGrid &v0, const Real idt)
+ : KernelBase(&a, 0), a(a), v1(v1), v0(v0), idt(idt)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ IndexInt idx, MACGrid &a, const MACGrid &v1, const MACGrid &v0, const Real idt) const
+ {
+ a[idx] = (v1[idx] - v0[idx]) * idt;
+ }
+ inline MACGrid &getArg0()
+ {
+ return a;
+ }
+ typedef MACGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return v1;
+ }
+ typedef MACGrid type1;
+ inline const MACGrid &getArg2()
+ {
+ return v0;
+ }
+ typedef MACGrid type2;
+ inline const Real &getArg3()
+ {
+ return idt;
+ }
+ typedef Real type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnAcceleration ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, a, v1, v0, idt);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ MACGrid &a;
+ const MACGrid &v1;
+ const MACGrid &v0;
+ const Real idt;
+};
+
+//! Add vorticity to vortex sheets based on buoyancy
+
+void vorticitySource(VortexSheetMesh &mesh,
+ Vec3 gravity,
+ const MACGrid *vel = NULL,
+ const MACGrid *velOld = NULL,
+ Real scale = 0.1,
+ Real maxAmount = 0,
+ Real mult = 1.0)
+{
+ Real dt = mesh.getParent()->getDt();
+ Real dx = mesh.getParent()->getDx();
+ MACGrid acceleration(mesh.getParent());
+ if (vel)
+ KnAcceleration(acceleration, *vel, *velOld, 1.0 / dt);
+ const Real A = -1.0;
+ Real maxV = 0, meanV = 0;
+
+ for (int t = 0; t < mesh.numTris(); t++) {
+ Vec3 fn = mesh.getFaceNormal(t);
+ Vec3 source;
+ if (vel) {
+ Vec3 a = acceleration.getInterpolated(mesh.getFaceCenter(t));
+ source = A * cross(fn, a - gravity) * scale;
+ }
+ else {
+ source = A * cross(fn, -gravity) * scale;
+ }
+
+ if (mesh.isTriangleFixed(t))
+ source = 0;
+
+ mesh.sheet(t).vorticity *= mult;
+ mesh.sheet(t).vorticity += dt * source / dx;
+ // upper limit
+ Real v = norm(mesh.sheet(t).vorticity);
+ if (maxAmount > 0 && v > maxAmount)
+ mesh.sheet(t).vorticity *= maxAmount / v;
+
+ // stats
+ if (v > maxV)
+ maxV = v;
+ meanV += v;
+ }
+
+ cout << "vorticity: max " << maxV << " / mean " << meanV / mesh.numTris() << endl;
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "vorticitySource", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ VortexSheetMesh &mesh = *_args.getPtr<VortexSheetMesh>("mesh", 0, &_lock);
+ Vec3 gravity = _args.get<Vec3>("gravity", 1, &_lock);
+ const MACGrid *vel = _args.getPtrOpt<MACGrid>("vel", 2, NULL, &_lock);
+ const MACGrid *velOld = _args.getPtrOpt<MACGrid>("velOld", 3, NULL, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 4, 0.1, &_lock);
+ Real maxAmount = _args.getOpt<Real>("maxAmount", 5, 0, &_lock);
+ Real mult = _args.getOpt<Real>("mult", 6, 1.0, &_lock);
+ _retval = getPyNone();
+ vorticitySource(mesh, gravity, vel, velOld, scale, maxAmount, mult);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "vorticitySource", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("vorticitySource", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_vorticitySource("", "vorticitySource", _W_3);
+extern "C" {
+void PbRegister_vorticitySource()
+{
+ KEEP_UNUSED(_RP_vorticitySource);
+}
+}
+
+void smoothVorticity(VortexSheetMesh &mesh, int iter = 1, Real sigma = 0.2, Real alpha = 0.8)
+{
+ const Real mult = -0.5 / sigma / sigma;
+
+ // pre-calculate positions and weights
+ vector<Vec3> vort(mesh.numTris()), pos(mesh.numTris());
+ vector<Real> weights(3 * mesh.numTris());
+ vector<int> index(3 * mesh.numTris());
+ for (int i = 0; i < mesh.numTris(); i++) {
+ pos[i] = mesh.getFaceCenter(i);
+ mesh.sheet(i).vorticitySmoothed = mesh.sheet(i).vorticity;
+ }
+ for (int i = 0; i < mesh.numTris(); i++) {
+ for (int c = 0; c < 3; c++) {
+ int oc = mesh.corners(i, c).opposite;
+ if (oc >= 0) {
+ int t = mesh.corners(oc).tri;
+ weights[3 * i + c] = exp(normSquare(pos[t] - pos[i]) * mult);
+ index[3 * i + c] = t;
+ }
+ else {
+ weights[3 * i + c] = 0;
+ index[3 * i + c] = 0;
+ }
+ }
+ }
+
+ for (int it = 0; it < iter; ++it) {
+ // first, preload
+ for (int i = 0; i < mesh.numTris(); i++)
+ vort[i] = mesh.sheet(i).vorticitySmoothed;
+
+ for (int i = 0, idx = 0; i < mesh.numTris(); i++) {
+ // loop over adjacent tris
+ Real sum = 1.0f;
+ Vec3 v = vort[i];
+ for (int c = 0; c < 3; c++, idx++) {
+ Real w = weights[index[idx]];
+ v += w * vort[index[idx]];
+ sum += w;
+ }
+ mesh.sheet(i).vorticitySmoothed = v / sum;
+ }
+ }
+ for (int i = 0; i < mesh.numTris(); i++)
+ mesh.sheet(i).vorticitySmoothed *= alpha;
+}
+static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "smoothVorticity", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ VortexSheetMesh &mesh = *_args.getPtr<VortexSheetMesh>("mesh", 0, &_lock);
+ int iter = _args.getOpt<int>("iter", 1, 1, &_lock);
+ Real sigma = _args.getOpt<Real>("sigma", 2, 0.2, &_lock);
+ Real alpha = _args.getOpt<Real>("alpha", 3, 0.8, &_lock);
+ _retval = getPyNone();
+ smoothVorticity(mesh, iter, sigma, alpha);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "smoothVorticity", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("smoothVorticity", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_smoothVorticity("", "smoothVorticity", _W_4);
+extern "C" {
+void PbRegister_smoothVorticity()
+{
+ KEEP_UNUSED(_RP_smoothVorticity);
+}
+}
+
+//! Seed Vortex Particles inside shape with K41 characteristics
+void VPseedK41(VortexParticleSystem &system,
+ const Shape *shape,
+ Real strength = 0,
+ Real sigma0 = 0.2,
+ Real sigma1 = 1.0,
+ Real probability = 1.0,
+ Real N = 3.0)
+{
+ Grid<Real> temp(system.getParent());
+ const Real dt = system.getParent()->getDt();
+ static RandomStream rand(3489572);
+ Real s0 = pow((Real)sigma0, (Real)(-N + 1.0));
+ Real s1 = pow((Real)sigma1, (Real)(-N + 1.0));
+
+ FOR_IJK(temp)
+ {
+ if (shape->isInsideGrid(i, j, k)) {
+ if (rand.getReal() < probability * dt) {
+ Real p = rand.getReal();
+ Real sigma = pow((1.0 - p) * s0 + p * s1, 1. / (-N + 1.0));
+ Vec3 randDir(rand.getReal(), rand.getReal(), rand.getReal());
+ Vec3 posUpd(i + rand.getReal(), j + rand.getReal(), k + rand.getReal());
+ normalize(randDir);
+ Vec3 vorticity = randDir * strength * pow((Real)sigma, (Real)(-10. / 6. + N / 2.0));
+ system.add(VortexParticleData(posUpd, vorticity, sigma));
+ }
+ }
+ }
+}
+static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "VPseedK41", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ VortexParticleSystem &system = *_args.getPtr<VortexParticleSystem>("system", 0, &_lock);
+ const Shape *shape = _args.getPtr<Shape>("shape", 1, &_lock);
+ Real strength = _args.getOpt<Real>("strength", 2, 0, &_lock);
+ Real sigma0 = _args.getOpt<Real>("sigma0", 3, 0.2, &_lock);
+ Real sigma1 = _args.getOpt<Real>("sigma1", 4, 1.0, &_lock);
+ Real probability = _args.getOpt<Real>("probability", 5, 1.0, &_lock);
+ Real N = _args.getOpt<Real>("N", 6, 3.0, &_lock);
+ _retval = getPyNone();
+ VPseedK41(system, shape, strength, sigma0, sigma1, probability, N);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "VPseedK41", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("VPseedK41", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_VPseedK41("", "VPseedK41", _W_5);
+extern "C" {
+void PbRegister_VPseedK41()
+{
+ KEEP_UNUSED(_RP_VPseedK41);
+}
+}
+
+//! Vortex-in-cell integration
+
+void VICintegration(VortexSheetMesh &mesh,
+ Real sigma,
+ Grid<Vec3> &vel,
+ const FlagGrid &flags,
+ Grid<Vec3> *vorticity = NULL,
+ Real cgMaxIterFac = 1.5,
+ Real cgAccuracy = 1e-3,
+ Real scale = 0.01,
+ int precondition = 0)
+{
+
+ MuTime t0;
+ const Real fac = 16.0; // experimental factor to balance out regularization
+
+ // if no vort grid is given, use a temporary one
+ Grid<Vec3> vortTemp(mesh.getParent());
+ Grid<Vec3> &vort = (vorticity) ? (*vorticity) : (vortTemp);
+ vort.clear();
+
+ // map vorticity to grid using Peskin kernel
+ int sgi = ceil(sigma);
+ Real pkfac = M_PI / sigma;
+ const int numTris = mesh.numTris();
+ for (int t = 0; t < numTris; t++) {
+ Vec3 pos = mesh.getFaceCenter(t);
+ Vec3 v = mesh.sheet(t).vorticity * mesh.getFaceArea(t) * fac;
+
+ // inner kernel
+ // first, summate
+ Real sum = 0;
+ for (int i = -sgi; i < sgi; i++) {
+ if (pos.x + i < 0 || (int)pos.x + i >= vort.getSizeX())
+ continue;
+ for (int j = -sgi; j < sgi; j++) {
+ if (pos.y + j < 0 || (int)pos.y + j >= vort.getSizeY())
+ continue;
+ for (int k = -sgi; k < sgi; k++) {
+ if (pos.z + k < 0 || (int)pos.z + k >= vort.getSizeZ())
+ continue;
+ Vec3i cell(pos.x + i, pos.y + j, pos.z + k);
+ if (!flags.isFluid(cell))
+ continue;
+ Vec3 d = pos -
+ Vec3(i + 0.5 + floor(pos.x), j + 0.5 + floor(pos.y), k + 0.5 + floor(pos.z));
+ Real dl = norm(d);
+ if (dl > sigma)
+ continue;
+ // precalc Peskin kernel
+ sum += 1.0 + cos(dl * pkfac);
+ }
+ }
+ }
+ // then, apply normalized kernel
+ Real wnorm = 1.0 / sum;
+ for (int i = -sgi; i < sgi; i++) {
+ if (pos.x + i < 0 || (int)pos.x + i >= vort.getSizeX())
+ continue;
+ for (int j = -sgi; j < sgi; j++) {
+ if (pos.y + j < 0 || (int)pos.y + j >= vort.getSizeY())
+ continue;
+ for (int k = -sgi; k < sgi; k++) {
+ if (pos.z + k < 0 || (int)pos.z + k >= vort.getSizeZ())
+ continue;
+ Vec3i cell(pos.x + i, pos.y + j, pos.z + k);
+ if (!flags.isFluid(cell))
+ continue;
+ Vec3 d = pos -
+ Vec3(i + 0.5 + floor(pos.x), j + 0.5 + floor(pos.y), k + 0.5 + floor(pos.z));
+ Real dl = norm(d);
+ if (dl > sigma)
+ continue;
+ Real w = (1.0 + cos(dl * pkfac)) * wnorm;
+ vort(cell) += v * w;
+ }
+ }
+ }
+ }
+
+ // Prepare grids for poisson solve
+ Grid<Vec3> vortexCurl(mesh.getParent());
+ Grid<Real> rhs(mesh.getParent());
+ Grid<Real> solution(mesh.getParent());
+ Grid<Real> residual(mesh.getParent());
+ Grid<Real> search(mesh.getParent());
+ Grid<Real> temp1(mesh.getParent());
+ Grid<Real> A0(mesh.getParent());
+ Grid<Real> Ai(mesh.getParent());
+ Grid<Real> Aj(mesh.getParent());
+ Grid<Real> Ak(mesh.getParent());
+ Grid<Real> pca0(mesh.getParent());
+ Grid<Real> pca1(mesh.getParent());
+ Grid<Real> pca2(mesh.getParent());
+ Grid<Real> pca3(mesh.getParent());
+
+ MakeLaplaceMatrix(flags, A0, Ai, Aj, Ak);
+ CurlOp(vort, vortexCurl);
+
+ // Solve vector poisson equation
+ for (int c = 0; c < 3; c++) {
+ // construct rhs
+ if (vel.getType() & GridBase::TypeMAC)
+ GetShiftedComponent(vortexCurl, rhs, c);
+ else
+ GetComponent(vortexCurl, rhs, c);
+
+ // prepare CG solver
+ const int maxIter = (int)(cgMaxIterFac * vel.getSize().max());
+ GridCgInterface *gcg = new GridCg<ApplyMatrix>(
+ solution, rhs, residual, search, flags, temp1, &A0, &Ai, &Aj, &Ak);
+ gcg->setAccuracy(cgAccuracy);
+ gcg->setUseL2Norm(true);
+ gcg->setICPreconditioner(
+ (GridCgInterface::PreconditionType)precondition, &pca0, &pca1, &pca2, &pca3);
+
+ // iterations
+ for (int iter = 0; iter < maxIter; iter++) {
+ if (!gcg->iterate())
+ iter = maxIter;
+ }
+ debMsg("VICintegration CG iterations:" << gcg->getIterations() << ", res:" << gcg->getSigma(),
+ 1);
+ delete gcg;
+
+ // copy back
+ solution *= scale;
+ SetComponent(vel, solution, c);
+ }
+}
+static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "VICintegration", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ VortexSheetMesh &mesh = *_args.getPtr<VortexSheetMesh>("mesh", 0, &_lock);
+ Real sigma = _args.get<Real>("sigma", 1, &_lock);
+ Grid<Vec3> &vel = *_args.getPtr<Grid<Vec3>>("vel", 2, &_lock);
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 3, &_lock);
+ Grid<Vec3> *vorticity = _args.getPtrOpt<Grid<Vec3>>("vorticity", 4, NULL, &_lock);
+ Real cgMaxIterFac = _args.getOpt<Real>("cgMaxIterFac", 5, 1.5, &_lock);
+ Real cgAccuracy = _args.getOpt<Real>("cgAccuracy", 6, 1e-3, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 7, 0.01, &_lock);
+ int precondition = _args.getOpt<int>("precondition", 8, 0, &_lock);
+ _retval = getPyNone();
+ VICintegration(
+ mesh, sigma, vel, flags, vorticity, cgMaxIterFac, cgAccuracy, scale, precondition);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "VICintegration", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("VICintegration", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_VICintegration("", "VICintegration", _W_6);
+extern "C" {
+void PbRegister_VICintegration()
+{
+ KEEP_UNUSED(_RP_VICintegration);
+}
+}
+
+//! Obtain density field from levelset with linear gradient of size sigma over the interface
+void densityFromLevelset(const LevelsetGrid &phi,
+ Grid<Real> &density,
+ Real value = 1.0,
+ Real sigma = 1.0)
+{
+ FOR_IJK(phi)
+ {
+ // remove boundary
+ if (i < 2 || j < 2 || k < 2 || i >= phi.getSizeX() - 2 || j >= phi.getSizeY() - 2 ||
+ k >= phi.getSizeZ() - 2)
+ density(i, j, k) = 0;
+ else if (phi(i, j, k) < -sigma)
+ density(i, j, k) = value;
+ else if (phi(i, j, k) > sigma)
+ density(i, j, k) = 0;
+ else
+ density(i, j, k) = clamp(
+ (Real)(0.5 * value / sigma * (1.0 - phi(i, j, k))), (Real)0.0, value);
+ }
+}
+static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "densityFromLevelset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const LevelsetGrid &phi = *_args.getPtr<LevelsetGrid>("phi", 0, &_lock);
+ Grid<Real> &density = *_args.getPtr<Grid<Real>>("density", 1, &_lock);
+ Real value = _args.getOpt<Real>("value", 2, 1.0, &_lock);
+ Real sigma = _args.getOpt<Real>("sigma", 3, 1.0, &_lock);
+ _retval = getPyNone();
+ densityFromLevelset(phi, density, value, sigma);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "densityFromLevelset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("densityFromLevelset", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_densityFromLevelset("", "densityFromLevelset", _W_7);
+extern "C" {
+void PbRegister_densityFromLevelset()
+{
+ KEEP_UNUSED(_RP_densityFromLevelset);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/waveletturbulence.cpp b/extern/mantaflow/preprocessed/plugin/waveletturbulence.cpp
new file mode 100644
index 00000000000..9d3bdaa3f21
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/waveletturbulence.cpp
@@ -0,0 +1,1292 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Functions for calculating wavelet turbulence,
+ * plus helpers to compute vorticity, and strain rate magnitude
+ *
+ ******************************************************************************/
+
+#include "vectorbase.h"
+#include "shapes.h"
+#include "commonkernels.h"
+#include "noisefield.h"
+
+using namespace std;
+
+namespace Manta {
+
+//*****************************************************************************
+
+// first some fairly generic interpolation functions for grids with multiple sizes
+
+//! same as in grid.h , but takes an additional optional "desired" size
+inline void calcGridSizeFactorMod(
+ Vec3i s1, Vec3i s2, Vec3i optSize, Vec3 scale, Vec3 &sourceFactor, Vec3 &retOff)
+{
+ for (int c = 0; c < 3; c++) {
+ if (optSize[c] > 0) {
+ s2[c] = optSize[c];
+ }
+ }
+ sourceFactor = calcGridSizeFactor(s1, s2) / scale;
+ retOff = -retOff * sourceFactor + sourceFactor * 0.5;
+}
+
+void interpolateGrid(Grid<Real> &target,
+ const Grid<Real> &source,
+ Vec3 scale = Vec3(1.),
+ Vec3 offset = Vec3(0.),
+ Vec3i size = Vec3i(-1, -1, -1),
+ int orderSpace = 1)
+{
+ Vec3 sourceFactor(1.), off2 = offset;
+ calcGridSizeFactorMod(source.getSize(), target.getSize(), size, scale, sourceFactor, off2);
+
+ // a brief note on a mantaflow specialty: the target grid has to be the first argument here!
+ // the parent fluidsolver object is taken from the first grid, and it determines the size of the
+ // loop for the kernel call. as we're writing into target, it's important to loop exactly over
+ // all cells of the target grid... (note, when calling the plugin in python, it doesnt matter
+ // anymore).
+
+ // sourceFactor offset necessary to shift eval points by half a small cell width
+ knInterpolateGridTempl<Real>(target, source, sourceFactor, off2, orderSpace);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "interpolateGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &target = *_args.getPtr<Grid<Real>>("target", 0, &_lock);
+ const Grid<Real> &source = *_args.getPtr<Grid<Real>>("source", 1, &_lock);
+ Vec3 scale = _args.getOpt<Vec3>("scale", 2, Vec3(1.), &_lock);
+ Vec3 offset = _args.getOpt<Vec3>("offset", 3, Vec3(0.), &_lock);
+ Vec3i size = _args.getOpt<Vec3i>("size", 4, Vec3i(-1, -1, -1), &_lock);
+ int orderSpace = _args.getOpt<int>("orderSpace", 5, 1, &_lock);
+ _retval = getPyNone();
+ interpolateGrid(target, source, scale, offset, size, orderSpace);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "interpolateGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("interpolateGrid", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_interpolateGrid("", "interpolateGrid", _W_0);
+extern "C" {
+void PbRegister_interpolateGrid()
+{
+ KEEP_UNUSED(_RP_interpolateGrid);
+}
+}
+
+void interpolateGridVec3(Grid<Vec3> &target,
+ const Grid<Vec3> &source,
+ Vec3 scale = Vec3(1.),
+ Vec3 offset = Vec3(0.),
+ Vec3i size = Vec3i(-1, -1, -1),
+ int orderSpace = 1)
+{
+ Vec3 sourceFactor(1.), off2 = offset;
+ calcGridSizeFactorMod(source.getSize(), target.getSize(), size, scale, sourceFactor, off2);
+ knInterpolateGridTempl<Vec3>(target, source, sourceFactor, off2, orderSpace);
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "interpolateGridVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Vec3> &target = *_args.getPtr<Grid<Vec3>>("target", 0, &_lock);
+ const Grid<Vec3> &source = *_args.getPtr<Grid<Vec3>>("source", 1, &_lock);
+ Vec3 scale = _args.getOpt<Vec3>("scale", 2, Vec3(1.), &_lock);
+ Vec3 offset = _args.getOpt<Vec3>("offset", 3, Vec3(0.), &_lock);
+ Vec3i size = _args.getOpt<Vec3i>("size", 4, Vec3i(-1, -1, -1), &_lock);
+ int orderSpace = _args.getOpt<int>("orderSpace", 5, 1, &_lock);
+ _retval = getPyNone();
+ interpolateGridVec3(target, source, scale, offset, size, orderSpace);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "interpolateGridVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("interpolateGridVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_interpolateGridVec3("", "interpolateGridVec3", _W_1);
+extern "C" {
+void PbRegister_interpolateGridVec3()
+{
+ KEEP_UNUSED(_RP_interpolateGridVec3);
+}
+}
+
+//! interpolate a mac velocity grid from one size to another size
+
+struct KnInterpolateMACGrid : public KernelBase {
+ KnInterpolateMACGrid(MACGrid &target,
+ const MACGrid &source,
+ const Vec3 &sourceFactor,
+ const Vec3 &off,
+ int orderSpace)
+ : KernelBase(&target, 0),
+ target(target),
+ source(source),
+ sourceFactor(sourceFactor),
+ off(off),
+ orderSpace(orderSpace)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ MACGrid &target,
+ const MACGrid &source,
+ const Vec3 &sourceFactor,
+ const Vec3 &off,
+ int orderSpace) const
+ {
+ Vec3 pos = Vec3(i, j, k) * sourceFactor + off;
+
+ Real vx = source.getInterpolatedHi(pos - Vec3(0.5, 0, 0), orderSpace)[0];
+ Real vy = source.getInterpolatedHi(pos - Vec3(0, 0.5, 0), orderSpace)[1];
+ Real vz = 0.f;
+ if (source.is3D())
+ vz = source.getInterpolatedHi(pos - Vec3(0, 0, 0.5), orderSpace)[2];
+
+ target(i, j, k) = Vec3(vx, vy, vz);
+ }
+ inline MACGrid &getArg0()
+ {
+ return target;
+ }
+ typedef MACGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return source;
+ }
+ typedef MACGrid type1;
+ inline const Vec3 &getArg2()
+ {
+ return sourceFactor;
+ }
+ typedef Vec3 type2;
+ inline const Vec3 &getArg3()
+ {
+ return off;
+ }
+ typedef Vec3 type3;
+ inline int &getArg4()
+ {
+ return orderSpace;
+ }
+ typedef int type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnInterpolateMACGrid ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, target, source, sourceFactor, off, orderSpace);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, target, source, sourceFactor, off, orderSpace);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ MACGrid &target;
+ const MACGrid &source;
+ const Vec3 &sourceFactor;
+ const Vec3 &off;
+ int orderSpace;
+};
+
+void interpolateMACGrid(MACGrid &target,
+ const MACGrid &source,
+ Vec3 scale = Vec3(1.),
+ Vec3 offset = Vec3(0.),
+ Vec3i size = Vec3i(-1, -1, -1),
+ int orderSpace = 1)
+{
+ Vec3 sourceFactor(1.), off2 = offset;
+ calcGridSizeFactorMod(source.getSize(), target.getSize(), size, scale, sourceFactor, off2);
+ KnInterpolateMACGrid(target, source, sourceFactor, off2, orderSpace);
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "interpolateMACGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ MACGrid &target = *_args.getPtr<MACGrid>("target", 0, &_lock);
+ const MACGrid &source = *_args.getPtr<MACGrid>("source", 1, &_lock);
+ Vec3 scale = _args.getOpt<Vec3>("scale", 2, Vec3(1.), &_lock);
+ Vec3 offset = _args.getOpt<Vec3>("offset", 3, Vec3(0.), &_lock);
+ Vec3i size = _args.getOpt<Vec3i>("size", 4, Vec3i(-1, -1, -1), &_lock);
+ int orderSpace = _args.getOpt<int>("orderSpace", 5, 1, &_lock);
+ _retval = getPyNone();
+ interpolateMACGrid(target, source, scale, offset, size, orderSpace);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "interpolateMACGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("interpolateMACGrid", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_interpolateMACGrid("", "interpolateMACGrid", _W_2);
+extern "C" {
+void PbRegister_interpolateMACGrid()
+{
+ KEEP_UNUSED(_RP_interpolateMACGrid);
+}
+}
+
+//*****************************************************************************
+
+//! Apply vector noise to grid, this is a simplified version - no position scaling or UVs
+
+struct knApplySimpleNoiseVec3 : public KernelBase {
+ knApplySimpleNoiseVec3(const FlagGrid &flags,
+ Grid<Vec3> &target,
+ const WaveletNoiseField &noise,
+ Real scale,
+ const Grid<Real> *weight)
+ : KernelBase(&flags, 0),
+ flags(flags),
+ target(target),
+ noise(noise),
+ scale(scale),
+ weight(weight)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<Vec3> &target,
+ const WaveletNoiseField &noise,
+ Real scale,
+ const Grid<Real> *weight) const
+ {
+ if (!flags.isFluid(i, j, k))
+ return;
+ Real factor = 1;
+ if (weight)
+ factor = (*weight)(i, j, k);
+ target(i, j, k) += noise.evaluateCurl(Vec3(i, j, k) + Vec3(0.5)) * scale * factor;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Vec3> &getArg1()
+ {
+ return target;
+ }
+ typedef Grid<Vec3> type1;
+ inline const WaveletNoiseField &getArg2()
+ {
+ return noise;
+ }
+ typedef WaveletNoiseField type2;
+ inline Real &getArg3()
+ {
+ return scale;
+ }
+ typedef Real type3;
+ inline const Grid<Real> *getArg4()
+ {
+ return weight;
+ }
+ typedef Grid<Real> type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel knApplySimpleNoiseVec3 ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, target, noise, scale, weight);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, target, noise, scale, weight);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Vec3> &target;
+ const WaveletNoiseField &noise;
+ Real scale;
+ const Grid<Real> *weight;
+};
+
+void applySimpleNoiseVec3(const FlagGrid &flags,
+ Grid<Vec3> &target,
+ const WaveletNoiseField &noise,
+ Real scale = 1.0,
+ const Grid<Real> *weight = NULL)
+{
+ // note - passing a MAC grid here is slightly inaccurate, we should evaluate each component
+ // separately
+ knApplySimpleNoiseVec3(flags, target, noise, scale, weight);
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "applySimpleNoiseVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Vec3> &target = *_args.getPtr<Grid<Vec3>>("target", 1, &_lock);
+ const WaveletNoiseField &noise = *_args.getPtr<WaveletNoiseField>("noise", 2, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 3, 1.0, &_lock);
+ const Grid<Real> *weight = _args.getPtrOpt<Grid<Real>>("weight", 4, NULL, &_lock);
+ _retval = getPyNone();
+ applySimpleNoiseVec3(flags, target, noise, scale, weight);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "applySimpleNoiseVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("applySimpleNoiseVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_applySimpleNoiseVec3("", "applySimpleNoiseVec3", _W_3);
+extern "C" {
+void PbRegister_applySimpleNoiseVec3()
+{
+ KEEP_UNUSED(_RP_applySimpleNoiseVec3);
+}
+}
+
+//! Simple noise for a real grid , follows applySimpleNoiseVec3
+
+struct knApplySimpleNoiseReal : public KernelBase {
+ knApplySimpleNoiseReal(const FlagGrid &flags,
+ Grid<Real> &target,
+ const WaveletNoiseField &noise,
+ Real scale,
+ const Grid<Real> *weight)
+ : KernelBase(&flags, 0),
+ flags(flags),
+ target(target),
+ noise(noise),
+ scale(scale),
+ weight(weight)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<Real> &target,
+ const WaveletNoiseField &noise,
+ Real scale,
+ const Grid<Real> *weight) const
+ {
+ if (!flags.isFluid(i, j, k))
+ return;
+ Real factor = 1;
+ if (weight)
+ factor = (*weight)(i, j, k);
+ target(i, j, k) += noise.evaluate(Vec3(i, j, k) + Vec3(0.5)) * scale * factor;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return target;
+ }
+ typedef Grid<Real> type1;
+ inline const WaveletNoiseField &getArg2()
+ {
+ return noise;
+ }
+ typedef WaveletNoiseField type2;
+ inline Real &getArg3()
+ {
+ return scale;
+ }
+ typedef Real type3;
+ inline const Grid<Real> *getArg4()
+ {
+ return weight;
+ }
+ typedef Grid<Real> type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel knApplySimpleNoiseReal ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, target, noise, scale, weight);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, target, noise, scale, weight);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &target;
+ const WaveletNoiseField &noise;
+ Real scale;
+ const Grid<Real> *weight;
+};
+
+void applySimpleNoiseReal(const FlagGrid &flags,
+ Grid<Real> &target,
+ const WaveletNoiseField &noise,
+ Real scale = 1.0,
+ const Grid<Real> *weight = NULL)
+{
+ knApplySimpleNoiseReal(flags, target, noise, scale, weight);
+}
+static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "applySimpleNoiseReal", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &target = *_args.getPtr<Grid<Real>>("target", 1, &_lock);
+ const WaveletNoiseField &noise = *_args.getPtr<WaveletNoiseField>("noise", 2, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 3, 1.0, &_lock);
+ const Grid<Real> *weight = _args.getPtrOpt<Grid<Real>>("weight", 4, NULL, &_lock);
+ _retval = getPyNone();
+ applySimpleNoiseReal(flags, target, noise, scale, weight);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "applySimpleNoiseReal", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("applySimpleNoiseReal", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_applySimpleNoiseReal("", "applySimpleNoiseReal", _W_4);
+extern "C" {
+void PbRegister_applySimpleNoiseReal()
+{
+ KEEP_UNUSED(_RP_applySimpleNoiseReal);
+}
+}
+
+//! Apply vector-based wavelet noise to target grid
+//! This is the version with more functionality - supports uv grids, and on-the-fly interpolation
+//! of input grids.
+
+struct knApplyNoiseVec3 : public KernelBase {
+ knApplyNoiseVec3(const FlagGrid &flags,
+ Grid<Vec3> &target,
+ const WaveletNoiseField &noise,
+ Real scale,
+ Real scaleSpatial,
+ const Grid<Real> *weight,
+ const Grid<Vec3> *uv,
+ bool uvInterpol,
+ const Vec3 &sourceFactor)
+ : KernelBase(&flags, 0),
+ flags(flags),
+ target(target),
+ noise(noise),
+ scale(scale),
+ scaleSpatial(scaleSpatial),
+ weight(weight),
+ uv(uv),
+ uvInterpol(uvInterpol),
+ sourceFactor(sourceFactor)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<Vec3> &target,
+ const WaveletNoiseField &noise,
+ Real scale,
+ Real scaleSpatial,
+ const Grid<Real> *weight,
+ const Grid<Vec3> *uv,
+ bool uvInterpol,
+ const Vec3 &sourceFactor) const
+ {
+ if (!flags.isFluid(i, j, k))
+ return;
+
+ // get weighting, interpolate if necessary
+ Real w = 1;
+ if (weight) {
+ if (!uvInterpol) {
+ w = (*weight)(i, j, k);
+ }
+ else {
+ w = weight->getInterpolated(Vec3(i, j, k) * sourceFactor);
+ }
+ }
+
+ // compute position where to evaluate the noise
+ Vec3 pos = Vec3(i, j, k) + Vec3(0.5);
+ if (uv) {
+ if (!uvInterpol) {
+ pos = (*uv)(i, j, k);
+ }
+ else {
+ pos = uv->getInterpolated(Vec3(i, j, k) * sourceFactor);
+ // uv coordinates are in local space - so we need to adjust the values of the positions
+ pos /= sourceFactor;
+ }
+ }
+ pos *= scaleSpatial;
+
+ Vec3 noiseVec3 = noise.evaluateCurl(pos) * scale * w;
+ // noiseVec3=pos; // debug , show interpolated positions
+ target(i, j, k) += noiseVec3;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Vec3> &getArg1()
+ {
+ return target;
+ }
+ typedef Grid<Vec3> type1;
+ inline const WaveletNoiseField &getArg2()
+ {
+ return noise;
+ }
+ typedef WaveletNoiseField type2;
+ inline Real &getArg3()
+ {
+ return scale;
+ }
+ typedef Real type3;
+ inline Real &getArg4()
+ {
+ return scaleSpatial;
+ }
+ typedef Real type4;
+ inline const Grid<Real> *getArg5()
+ {
+ return weight;
+ }
+ typedef Grid<Real> type5;
+ inline const Grid<Vec3> *getArg6()
+ {
+ return uv;
+ }
+ typedef Grid<Vec3> type6;
+ inline bool &getArg7()
+ {
+ return uvInterpol;
+ }
+ typedef bool type7;
+ inline const Vec3 &getArg8()
+ {
+ return sourceFactor;
+ }
+ typedef Vec3 type8;
+ void runMessage()
+ {
+ debMsg("Executing kernel knApplyNoiseVec3 ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i,
+ j,
+ k,
+ flags,
+ target,
+ noise,
+ scale,
+ scaleSpatial,
+ weight,
+ uv,
+ uvInterpol,
+ sourceFactor);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i,
+ j,
+ k,
+ flags,
+ target,
+ noise,
+ scale,
+ scaleSpatial,
+ weight,
+ uv,
+ uvInterpol,
+ sourceFactor);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Vec3> &target;
+ const WaveletNoiseField &noise;
+ Real scale;
+ Real scaleSpatial;
+ const Grid<Real> *weight;
+ const Grid<Vec3> *uv;
+ bool uvInterpol;
+ const Vec3 &sourceFactor;
+};
+
+void applyNoiseVec3(const FlagGrid &flags,
+ Grid<Vec3> &target,
+ const WaveletNoiseField &noise,
+ Real scale = 1.0,
+ Real scaleSpatial = 1.0,
+ const Grid<Real> *weight = NULL,
+ const Grid<Vec3> *uv = NULL)
+{
+ // check whether the uv grid has a different resolution
+ bool uvInterpol = false;
+ // and pre-compute conversion (only used if uvInterpol==true)
+ // used for both uv and weight grid...
+ Vec3 sourceFactor = Vec3(1.);
+ if (uv) {
+ uvInterpol = (target.getSize() != uv->getSize());
+ sourceFactor = calcGridSizeFactor(uv->getSize(), target.getSize());
+ }
+ else if (weight) {
+ uvInterpol = (target.getSize() != weight->getSize());
+ sourceFactor = calcGridSizeFactor(weight->getSize(), target.getSize());
+ }
+ if (uv && weight)
+ assertMsg(uv->getSize() == weight->getSize(), "UV and weight grid have to match!");
+
+ // note - passing a MAC grid here is slightly inaccurate, we should evaluate each component
+ // separately
+ knApplyNoiseVec3(
+ flags, target, noise, scale, scaleSpatial, weight, uv, uvInterpol, sourceFactor);
+}
+static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "applyNoiseVec3", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Vec3> &target = *_args.getPtr<Grid<Vec3>>("target", 1, &_lock);
+ const WaveletNoiseField &noise = *_args.getPtr<WaveletNoiseField>("noise", 2, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 3, 1.0, &_lock);
+ Real scaleSpatial = _args.getOpt<Real>("scaleSpatial", 4, 1.0, &_lock);
+ const Grid<Real> *weight = _args.getPtrOpt<Grid<Real>>("weight", 5, NULL, &_lock);
+ const Grid<Vec3> *uv = _args.getPtrOpt<Grid<Vec3>>("uv", 6, NULL, &_lock);
+ _retval = getPyNone();
+ applyNoiseVec3(flags, target, noise, scale, scaleSpatial, weight, uv);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "applyNoiseVec3", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("applyNoiseVec3", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_applyNoiseVec3("", "applyNoiseVec3", _W_5);
+extern "C" {
+void PbRegister_applyNoiseVec3()
+{
+ KEEP_UNUSED(_RP_applyNoiseVec3);
+}
+}
+
+//! Compute energy of a staggered velocity field (at cell center)
+
+struct KnApplyComputeEnergy : public KernelBase {
+ KnApplyComputeEnergy(const FlagGrid &flags, const MACGrid &vel, Grid<Real> &energy)
+ : KernelBase(&flags, 0), flags(flags), vel(vel), energy(energy)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, const FlagGrid &flags, const MACGrid &vel, Grid<Real> &energy) const
+ {
+ Real e = 0.f;
+ if (flags.isFluid(i, j, k)) {
+ Vec3 v = vel.getCentered(i, j, k);
+ e = 0.5 * (v[0] * v[0] + v[1] * v[1] + v[2] * v[2]);
+ }
+ energy(i, j, k) = e;
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline const MACGrid &getArg1()
+ {
+ return vel;
+ }
+ typedef MACGrid type1;
+ inline Grid<Real> &getArg2()
+ {
+ return energy;
+ }
+ typedef Grid<Real> type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnApplyComputeEnergy ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, vel, energy);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, flags, vel, energy);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const FlagGrid &flags;
+ const MACGrid &vel;
+ Grid<Real> &energy;
+};
+
+void computeEnergy(const FlagGrid &flags, const MACGrid &vel, Grid<Real> &energy)
+{
+ KnApplyComputeEnergy(flags, vel, energy);
+}
+static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "computeEnergy", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 1, &_lock);
+ Grid<Real> &energy = *_args.getPtr<Grid<Real>>("energy", 2, &_lock);
+ _retval = getPyNone();
+ computeEnergy(flags, vel, energy);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "computeEnergy", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("computeEnergy", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_computeEnergy("", "computeEnergy", _W_6);
+extern "C" {
+void PbRegister_computeEnergy()
+{
+ KEEP_UNUSED(_RP_computeEnergy);
+}
+}
+
+void computeWaveletCoeffs(Grid<Real> &input)
+{
+ Grid<Real> temp1(input.getParent()), temp2(input.getParent());
+ WaveletNoiseField::computeCoefficients(input, temp1, temp2);
+}
+static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "computeWaveletCoeffs", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &input = *_args.getPtr<Grid<Real>>("input", 0, &_lock);
+ _retval = getPyNone();
+ computeWaveletCoeffs(input);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "computeWaveletCoeffs", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("computeWaveletCoeffs", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_computeWaveletCoeffs("", "computeWaveletCoeffs", _W_7);
+extern "C" {
+void PbRegister_computeWaveletCoeffs()
+{
+ KEEP_UNUSED(_RP_computeWaveletCoeffs);
+}
+}
+
+// note - alomst the same as for vorticity confinement
+void computeVorticity(const MACGrid &vel, Grid<Vec3> &vorticity, Grid<Real> *norm = NULL)
+{
+ Grid<Vec3> velCenter(vel.getParent());
+ GetCentered(velCenter, vel);
+ CurlOp(velCenter, vorticity);
+ if (norm)
+ GridNorm(*norm, vorticity);
+}
+static PyObject *_W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "computeVorticity", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 0, &_lock);
+ Grid<Vec3> &vorticity = *_args.getPtr<Grid<Vec3>>("vorticity", 1, &_lock);
+ Grid<Real> *norm = _args.getPtrOpt<Grid<Real>>("norm", 2, NULL, &_lock);
+ _retval = getPyNone();
+ computeVorticity(vel, vorticity, norm);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "computeVorticity", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("computeVorticity", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_computeVorticity("", "computeVorticity", _W_8);
+extern "C" {
+void PbRegister_computeVorticity()
+{
+ KEEP_UNUSED(_RP_computeVorticity);
+}
+}
+
+// note - very similar to KnComputeProductionStrain, but for use as wavelet turb weighting
+
+struct KnComputeStrainRateMag : public KernelBase {
+ KnComputeStrainRateMag(const MACGrid &vel, const Grid<Vec3> &velCenter, Grid<Real> &prod)
+ : KernelBase(&vel, 1), vel(vel), velCenter(velCenter), prod(prod)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, const MACGrid &vel, const Grid<Vec3> &velCenter, Grid<Real> &prod) const
+ {
+ // compute Sij = 1/2 * (dU_i/dx_j + dU_j/dx_i)
+ Vec3 diag = Vec3(vel(i + 1, j, k).x, vel(i, j + 1, k).y, 0.) - vel(i, j, k);
+ if (vel.is3D())
+ diag[2] += vel(i, j, k + 1).z;
+ else
+ diag[2] = 0.;
+
+ Vec3 ux = 0.5 * (velCenter(i + 1, j, k) - velCenter(i - 1, j, k));
+ Vec3 uy = 0.5 * (velCenter(i, j + 1, k) - velCenter(i, j - 1, k));
+ Vec3 uz;
+ if (vel.is3D())
+ uz = 0.5 * (velCenter(i, j, k + 1) - velCenter(i, j, k - 1));
+
+ Real S12 = 0.5 * (ux.y + uy.x);
+ Real S13 = 0.5 * (ux.z + uz.x);
+ Real S23 = 0.5 * (uy.z + uz.y);
+ Real S2 = square(diag.x) + square(diag.y) + square(diag.z) + 2.0 * square(S12) +
+ 2.0 * square(S13) + 2.0 * square(S23);
+ prod(i, j, k) = S2;
+ }
+ inline const MACGrid &getArg0()
+ {
+ return vel;
+ }
+ typedef MACGrid type0;
+ inline const Grid<Vec3> &getArg1()
+ {
+ return velCenter;
+ }
+ typedef Grid<Vec3> type1;
+ inline Grid<Real> &getArg2()
+ {
+ return prod;
+ }
+ typedef Grid<Real> type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnComputeStrainRateMag ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, velCenter, prod);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, vel, velCenter, prod);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const MACGrid &vel;
+ const Grid<Vec3> &velCenter;
+ Grid<Real> &prod;
+};
+void computeStrainRateMag(const MACGrid &vel, Grid<Real> &mag)
+{
+ Grid<Vec3> velCenter(vel.getParent());
+ GetCentered(velCenter, vel);
+ KnComputeStrainRateMag(vel, velCenter, mag);
+}
+static PyObject *_W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "computeStrainRateMag", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 0, &_lock);
+ Grid<Real> &mag = *_args.getPtr<Grid<Real>>("mag", 1, &_lock);
+ _retval = getPyNone();
+ computeStrainRateMag(vel, mag);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "computeStrainRateMag", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("computeStrainRateMag", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_computeStrainRateMag("", "computeStrainRateMag", _W_9);
+extern "C" {
+void PbRegister_computeStrainRateMag()
+{
+ KEEP_UNUSED(_RP_computeStrainRateMag);
+}
+}
+
+// extrapolate a real grid into a flagged region (based on initial flags)
+// by default extrapolates from fluid to obstacle cells
+template<class T>
+void extrapolSimpleFlagsHelper(const FlagGrid &flags,
+ Grid<T> &val,
+ int distance = 4,
+ int flagFrom = FlagGrid::TypeFluid,
+ int flagTo = FlagGrid::TypeObstacle)
+{
+ Grid<int> tmp(flags.getParent());
+ int dim = (flags.is3D() ? 3 : 2);
+ const Vec3i nb[6] = {Vec3i(1, 0, 0),
+ Vec3i(-1, 0, 0),
+ Vec3i(0, 1, 0),
+ Vec3i(0, -1, 0),
+ Vec3i(0, 0, 1),
+ Vec3i(0, 0, -1)};
+
+ // remove all fluid cells (set to 1)
+ tmp.clear();
+ bool foundTarget = false;
+ FOR_IJK_BND(flags, 0)
+ {
+ if (flags(i, j, k) & flagFrom)
+ tmp(Vec3i(i, j, k)) = 1;
+ if (!foundTarget && (flags(i, j, k) & flagTo))
+ foundTarget = true;
+ }
+ // optimization, skip extrapolation if we dont have any cells to extrapolate to
+ if (!foundTarget) {
+ debMsg("No target cells found, skipping extrapolation", 1);
+ return;
+ }
+
+ // extrapolate for given distance
+ for (int d = 1; d < 1 + distance; ++d) {
+
+ // TODO, parallelize
+ FOR_IJK_BND(flags, 1)
+ {
+ if (tmp(i, j, k) != 0)
+ continue;
+ if (!(flags(i, j, k) & flagTo))
+ continue;
+
+ // copy from initialized neighbors
+ Vec3i p(i, j, k);
+ int nbs = 0;
+ T avgVal = 0.;
+ for (int n = 0; n < 2 * dim; ++n) {
+ if (tmp(p + nb[n]) == d) {
+ avgVal += val(p + nb[n]);
+ nbs++;
+ }
+ }
+
+ if (nbs > 0) {
+ tmp(p) = d + 1;
+ val(p) = avgVal / nbs;
+ }
+ }
+
+ } // distance
+}
+
+void extrapolateSimpleFlags(const FlagGrid &flags,
+ GridBase *val,
+ int distance = 4,
+ int flagFrom = FlagGrid::TypeFluid,
+ int flagTo = FlagGrid::TypeObstacle)
+{
+ if (val->getType() & GridBase::TypeReal) {
+ extrapolSimpleFlagsHelper<Real>(flags, *((Grid<Real> *)val), distance, flagFrom, flagTo);
+ }
+ else if (val->getType() & GridBase::TypeInt) {
+ extrapolSimpleFlagsHelper<int>(flags, *((Grid<int> *)val), distance, flagFrom, flagTo);
+ }
+ else if (val->getType() & GridBase::TypeVec3) {
+ extrapolSimpleFlagsHelper<Vec3>(flags, *((Grid<Vec3> *)val), distance, flagFrom, flagTo);
+ }
+ else
+ errMsg("extrapolateSimpleFlags: Grid Type is not supported (only int, Real, Vec3)");
+}
+static PyObject *_W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "extrapolateSimpleFlags", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ GridBase *val = _args.getPtr<GridBase>("val", 1, &_lock);
+ int distance = _args.getOpt<int>("distance", 2, 4, &_lock);
+ int flagFrom = _args.getOpt<int>("flagFrom", 3, FlagGrid::TypeFluid, &_lock);
+ int flagTo = _args.getOpt<int>("flagTo", 4, FlagGrid::TypeObstacle, &_lock);
+ _retval = getPyNone();
+ extrapolateSimpleFlags(flags, val, distance, flagFrom, flagTo);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "extrapolateSimpleFlags", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("extrapolateSimpleFlags", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_extrapolateSimpleFlags("", "extrapolateSimpleFlags", _W_10);
+extern "C" {
+void PbRegister_extrapolateSimpleFlags()
+{
+ KEEP_UNUSED(_RP_extrapolateSimpleFlags);
+}
+}
+
+//! convert vel to a centered grid, then compute its curl
+void getCurl(const MACGrid &vel, Grid<Real> &vort, int comp)
+{
+ Grid<Vec3> velCenter(vel.getParent()), curl(vel.getParent());
+
+ GetCentered(velCenter, vel);
+ CurlOp(velCenter, curl);
+ GetComponent(curl, vort, comp);
+}
+static PyObject *_W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "getCurl", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const MACGrid &vel = *_args.getPtr<MACGrid>("vel", 0, &_lock);
+ Grid<Real> &vort = *_args.getPtr<Grid<Real>>("vort", 1, &_lock);
+ int comp = _args.get<int>("comp", 2, &_lock);
+ _retval = getPyNone();
+ getCurl(vel, vort, comp);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "getCurl", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("getCurl", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_getCurl("", "getCurl", _W_11);
+extern "C" {
+void PbRegister_getCurl()
+{
+ KEEP_UNUSED(_RP_getCurl);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/plugin/waves.cpp b/extern/mantaflow/preprocessed/plugin/waves.cpp
new file mode 100644
index 00000000000..7745dce4711
--- /dev/null
+++ b/extern/mantaflow/preprocessed/plugin/waves.cpp
@@ -0,0 +1,483 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Wave equation
+ *
+ ******************************************************************************/
+
+#include "levelset.h"
+#include "commonkernels.h"
+#include "particle.h"
+#include "conjugategrad.h"
+#include <cmath>
+
+using namespace std;
+
+namespace Manta {
+
+/******************************************************************************
+ *
+ * explicit integration
+ *
+ ******************************************************************************/
+
+struct knCalcSecDeriv2d : public KernelBase {
+ knCalcSecDeriv2d(const Grid<Real> &v, Grid<Real> &ret) : KernelBase(&v, 1), v(v), ret(ret)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, const Grid<Real> &v, Grid<Real> &ret) const
+ {
+ ret(i, j, k) = (-4. * v(i, j, k) + v(i - 1, j, k) + v(i + 1, j, k) + v(i, j - 1, k) +
+ v(i, j + 1, k));
+ }
+ inline const Grid<Real> &getArg0()
+ {
+ return v;
+ }
+ typedef Grid<Real> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return ret;
+ }
+ typedef Grid<Real> type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel knCalcSecDeriv2d ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, v, ret);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, v, ret);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const Grid<Real> &v;
+ Grid<Real> &ret;
+};
+;
+
+//! calculate a second derivative for the wave equation
+void calcSecDeriv2d(const Grid<Real> &v, Grid<Real> &curv)
+{
+ knCalcSecDeriv2d(v, curv);
+}
+static PyObject *_W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "calcSecDeriv2d", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Grid<Real> &v = *_args.getPtr<Grid<Real>>("v", 0, &_lock);
+ Grid<Real> &curv = *_args.getPtr<Grid<Real>>("curv", 1, &_lock);
+ _retval = getPyNone();
+ calcSecDeriv2d(v, curv);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "calcSecDeriv2d", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("calcSecDeriv2d", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_calcSecDeriv2d("", "calcSecDeriv2d", _W_0);
+extern "C" {
+void PbRegister_calcSecDeriv2d()
+{
+ KEEP_UNUSED(_RP_calcSecDeriv2d);
+}
+}
+
+// mass conservation
+
+struct knTotalSum : public KernelBase {
+ knTotalSum(Grid<Real> &h) : KernelBase(&h, 1), h(h), sum(0)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Real> &h, double &sum)
+ {
+ sum += h(i, j, k);
+ }
+ inline operator double()
+ {
+ return sum;
+ }
+ inline double &getRet()
+ {
+ return sum;
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return h;
+ }
+ typedef Grid<Real> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel knTotalSum ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, h, sum);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, h, sum);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ knTotalSum(knTotalSum &o, tbb::split) : KernelBase(o), h(o.h), sum(0)
+ {
+ }
+ void join(const knTotalSum &o)
+ {
+ sum += o.sum;
+ }
+ Grid<Real> &h;
+ double sum;
+};
+
+//! calculate the sum of all values in a grid (for wave equation solves)
+Real totalSum(Grid<Real> &height)
+{
+ knTotalSum ts(height);
+ return ts.sum;
+}
+static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "totalSum", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &height = *_args.getPtr<Grid<Real>>("height", 0, &_lock);
+ _retval = toPy(totalSum(height));
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "totalSum", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("totalSum", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_totalSum("", "totalSum", _W_1);
+extern "C" {
+void PbRegister_totalSum()
+{
+ KEEP_UNUSED(_RP_totalSum);
+}
+}
+
+//! normalize all values in a grid (for wave equation solves)
+void normalizeSumTo(Grid<Real> &height, Real target)
+{
+ knTotalSum ts(height);
+ Real factor = target / ts.sum;
+ height.multConst(factor);
+}
+static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "normalizeSumTo", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Grid<Real> &height = *_args.getPtr<Grid<Real>>("height", 0, &_lock);
+ Real target = _args.get<Real>("target", 1, &_lock);
+ _retval = getPyNone();
+ normalizeSumTo(height, target);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "normalizeSumTo", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("normalizeSumTo", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_normalizeSumTo("", "normalizeSumTo", _W_2);
+extern "C" {
+void PbRegister_normalizeSumTo()
+{
+ KEEP_UNUSED(_RP_normalizeSumTo);
+}
+}
+
+/******************************************************************************
+ *
+ * implicit time integration
+ *
+ ******************************************************************************/
+
+//! Kernel: Construct the right-hand side of the poisson equation
+
+struct MakeRhsWE : public KernelBase {
+ MakeRhsWE(const FlagGrid &flags,
+ Grid<Real> &rhs,
+ const Grid<Real> &ut,
+ const Grid<Real> &utm1,
+ Real s,
+ bool crankNic = false)
+ : KernelBase(&flags, 1), flags(flags), rhs(rhs), ut(ut), utm1(utm1), s(s), crankNic(crankNic)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const FlagGrid &flags,
+ Grid<Real> &rhs,
+ const Grid<Real> &ut,
+ const Grid<Real> &utm1,
+ Real s,
+ bool crankNic = false) const
+ {
+ rhs(i, j, k) = (2. * ut(i, j, k) - utm1(i, j, k));
+ if (crankNic) {
+ rhs(i, j, k) += s * (-4. * ut(i, j, k) + 1. * ut(i - 1, j, k) + 1. * ut(i + 1, j, k) +
+ 1. * ut(i, j - 1, k) + 1. * ut(i, j + 1, k));
+ }
+ }
+ inline const FlagGrid &getArg0()
+ {
+ return flags;
+ }
+ typedef FlagGrid type0;
+ inline Grid<Real> &getArg1()
+ {
+ return rhs;
+ }
+ typedef Grid<Real> type1;
+ inline const Grid<Real> &getArg2()
+ {
+ return ut;
+ }
+ typedef Grid<Real> type2;
+ inline const Grid<Real> &getArg3()
+ {
+ return utm1;
+ }
+ typedef Grid<Real> type3;
+ inline Real &getArg4()
+ {
+ return s;
+ }
+ typedef Real type4;
+ inline bool &getArg5()
+ {
+ return crankNic;
+ }
+ typedef bool type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel MakeRhsWE ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 1; j < _maxY; j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, rhs, ut, utm1, s, crankNic);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 1; i < _maxX; i++)
+ op(i, j, k, flags, rhs, ut, utm1, s, crankNic);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(1, maxY), *this);
+ }
+ const FlagGrid &flags;
+ Grid<Real> &rhs;
+ const Grid<Real> &ut;
+ const Grid<Real> &utm1;
+ Real s;
+ bool crankNic;
+};
+
+//! do a CG solve for the wave equation (note, out grid only there for debugging... could be
+//! removed)
+
+void cgSolveWE(const FlagGrid &flags,
+ Grid<Real> &ut,
+ Grid<Real> &utm1,
+ Grid<Real> &out,
+ bool crankNic = false,
+ Real cSqr = 0.25,
+ Real cgMaxIterFac = 1.5,
+ Real cgAccuracy = 1e-5)
+{
+ // reserve temp grids
+ FluidSolver *parent = flags.getParent();
+ Grid<Real> rhs(parent);
+ Grid<Real> residual(parent);
+ Grid<Real> search(parent);
+ Grid<Real> A0(parent);
+ Grid<Real> Ai(parent);
+ Grid<Real> Aj(parent);
+ Grid<Real> Ak(parent);
+ Grid<Real> tmp(parent);
+ // solution...
+ out.clear();
+
+ // setup matrix and boundaries
+ MakeLaplaceMatrix(flags, A0, Ai, Aj, Ak);
+ Real dt = parent->getDt();
+ Real s = dt * dt * cSqr * 0.5;
+ FOR_IJK(flags)
+ {
+ Ai(i, j, k) *= s;
+ Aj(i, j, k) *= s;
+ Ak(i, j, k) *= s;
+ A0(i, j, k) *= s;
+ A0(i, j, k) += 1.;
+ }
+
+ // compute divergence and init right hand side
+ rhs.clear();
+ // h=dt
+ // rhs: = 2 ut - ut-1
+ // A: (h2 c2/ dx)=s , (1+4s)uij + s ui-1j + ...
+ // Cr.Nic.
+ // rhs: cr nic = 2 ut - ut-1 + h^2c^2/2 b
+ // A: (h2 c2/2 dx)=s , (1+4s)uij + s ui-1j + ...
+ MakeRhsWE kernMakeRhs(flags, rhs, ut, utm1, s, crankNic);
+
+ const int maxIter = (int)(cgMaxIterFac * flags.getSize().max()) * (flags.is3D() ? 1 : 4);
+ GridCgInterface *gcg;
+ if (flags.is3D())
+ gcg = new GridCg<ApplyMatrix>(out, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
+ else
+ gcg = new GridCg<ApplyMatrix2D>(out, rhs, residual, search, flags, tmp, &A0, &Ai, &Aj, &Ak);
+
+ gcg->setAccuracy(cgAccuracy);
+
+ // no preconditioning for now...
+ for (int iter = 0; iter < maxIter; iter++) {
+ if (!gcg->iterate())
+ iter = maxIter;
+ }
+ debMsg("cgSolveWaveEq iterations:" << gcg->getIterations() << ", res:" << gcg->getSigma(), 1);
+
+ utm1.swap(ut);
+ ut.copyFrom(out);
+
+ delete gcg;
+}
+static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+{
+ try {
+ PbArgs _args(_linargs, _kwds);
+ FluidSolver *parent = _args.obtainParent();
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(parent, "cgSolveWE", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &ut = *_args.getPtr<Grid<Real>>("ut", 1, &_lock);
+ Grid<Real> &utm1 = *_args.getPtr<Grid<Real>>("utm1", 2, &_lock);
+ Grid<Real> &out = *_args.getPtr<Grid<Real>>("out", 3, &_lock);
+ bool crankNic = _args.getOpt<bool>("crankNic", 4, false, &_lock);
+ Real cSqr = _args.getOpt<Real>("cSqr", 5, 0.25, &_lock);
+ Real cgMaxIterFac = _args.getOpt<Real>("cgMaxIterFac", 6, 1.5, &_lock);
+ Real cgAccuracy = _args.getOpt<Real>("cgAccuracy", 7, 1e-5, &_lock);
+ _retval = getPyNone();
+ cgSolveWE(flags, ut, utm1, out, crankNic, cSqr, cgMaxIterFac, cgAccuracy);
+ _args.check();
+ }
+ pbFinalizePlugin(parent, "cgSolveWE", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("cgSolveWE", e.what());
+ return 0;
+ }
+}
+static const Pb::Register _RP_cgSolveWE("", "cgSolveWE", _W_3);
+extern "C" {
+void PbRegister_cgSolveWE()
+{
+ KEEP_UNUSED(_RP_cgSolveWE);
+}
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/python/defines.py b/extern/mantaflow/preprocessed/python/defines.py
new file mode 100644
index 00000000000..1c7f01ab034
--- /dev/null
+++ b/extern/mantaflow/preprocessed/python/defines.py
@@ -0,0 +1,11 @@
+
+
+
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+
+
+
diff --git a/extern/mantaflow/preprocessed/python/defines.py.reg.cpp b/extern/mantaflow/preprocessed/python/defines.py.reg.cpp
new file mode 100644
index 00000000000..1866957534c
--- /dev/null
+++ b/extern/mantaflow/preprocessed/python/defines.py.reg.cpp
@@ -0,0 +1,24 @@
+#include "registry.h"
+static const Pb::Register _reg(
+ "python/defines.py",
+ "################################################################################\n#\n# "
+ "MantaFlow fluid solver framework\n# Copyright 2011 Tobias Pfaff, Nils Thuerey \n#\n# This "
+ "program is free software, distributed under the terms of the\n# Apache License, Version 2.0 "
+ "\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Defines some constants for use in "
+ "python "
+ "subprograms\n#\n#############################################################################"
+ "####\n\n# mantaflow conventions\nReal = float\n\n# some defines to make C code and scripts "
+ "more alike...\nfalse = False\ntrue = True\nVec3 = vec3\nVec4 = vec4\nVec3Grid = "
+ "VecGrid\n\n# grid flags\nFlagFluid = 1\nFlagObstacle = 2\nFlagEmpty = 4\nFlagInflow "
+ "= 8\nFlagOutflow = 16\nFlagStick = 64\nFlagReserved = 256\n# and same for "
+ "FlagGrid::CellType enum names:\nTypeFluid = 1\nTypeObstacle = 2\nTypeEmpty = "
+ "4\nTypeInflow = 8\nTypeOutflow = 16\nTypeStick = 64\nTypeReserved = 256\n\n# "
+ "integration mode\nIntEuler = 0\nIntRK2 = 1\nIntRK4 = 2\n\n# CG preconditioner\nPcNone "
+ " = 0\nPcMIC = 1\nPcMGDynamic = 2\nPcMGStatic = 3\n\n# particles\nPtypeSpray = "
+ "2\nPtypeBubble = 4\nPtypeFoam = 8\nPtypeTracer = 16\n\n\n\n\n");
+extern "C" {
+void PbRegister_file_0()
+{
+ KEEP_UNUSED(_reg);
+}
+} \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/registration.cpp b/extern/mantaflow/preprocessed/registration.cpp
new file mode 100644
index 00000000000..e86d19f7f7a
--- /dev/null
+++ b/extern/mantaflow/preprocessed/registration.cpp
@@ -0,0 +1,382 @@
+extern "C" {
+extern void PbRegister_mantaMsg();
+extern void PbRegister_printBuildInfo();
+extern void PbRegister_setDebugLevel();
+extern void PbRegister_assertNumpy();
+extern void PbRegister_cgSolveDiffusion();
+extern void PbRegister_gridMaxDiff();
+extern void PbRegister_gridMaxDiffInt();
+extern void PbRegister_gridMaxDiffVec3();
+extern void PbRegister_copyMacToVec3();
+extern void PbRegister_convertMacToVec3();
+extern void PbRegister_resampleVec3ToMac();
+extern void PbRegister_resampleMacToVec3();
+extern void PbRegister_copyLevelsetToReal();
+extern void PbRegister_copyVec3ToReal();
+extern void PbRegister_copyRealToVec3();
+extern void PbRegister_convertLevelsetToReal();
+extern void PbRegister_swapComponents();
+extern void PbRegister_getUvWeight();
+extern void PbRegister_resetUvGrid();
+extern void PbRegister_updateUvWeight();
+extern void PbRegister_getGridAvg();
+extern void PbRegister_getComponent();
+extern void PbRegister_setComponent();
+extern void PbRegister_markIsolatedFluidCell();
+extern void PbRegister_copyMACData();
+extern void PbRegister_getComp4d();
+extern void PbRegister_setComp4d();
+extern void PbRegister_grid4dMaxDiff();
+extern void PbRegister_grid4dMaxDiffInt();
+extern void PbRegister_grid4dMaxDiffVec3();
+extern void PbRegister_grid4dMaxDiffVec4();
+extern void PbRegister_setRegion4d();
+extern void PbRegister_setRegion4dVec4();
+extern void PbRegister_getSliceFrom4d();
+extern void PbRegister_getSliceFrom4dVec();
+extern void PbRegister_interpolateGrid4d();
+extern void PbRegister_interpolateGrid4dVec();
+extern void PbRegister_extrapolateMACSimple();
+extern void PbRegister_extrapolateMACFromWeight();
+extern void PbRegister_extrapolateLsSimple();
+extern void PbRegister_extrapolateVec3Simple();
+extern void PbRegister_getUniFileSize();
+extern void PbRegister_printUniFileInfoString();
+extern void PbRegister_getNpzFileSize();
+extern void PbRegister_quantizeGrid();
+extern void PbRegister_quantizeGridVec3();
+extern void PbRegister_resetPhiInObs();
+extern void PbRegister_advectSemiLagrange();
+extern void PbRegister_addGravity();
+extern void PbRegister_addGravityNoScale();
+extern void PbRegister_addBuoyancy();
+extern void PbRegister_setOpenBound();
+extern void PbRegister_resetOutflow();
+extern void PbRegister_setInflowBcs();
+extern void PbRegister_setWallBcs();
+extern void PbRegister_setInitialVelocity();
+extern void PbRegister_vorticityConfinement();
+extern void PbRegister_addForceField();
+extern void PbRegister_setForceField();
+extern void PbRegister_dissolveSmoke();
+extern void PbRegister_apicMapPartsToMAC();
+extern void PbRegister_apicMapMACGridToParts();
+extern void PbRegister_sampleFlagsWithParticles();
+extern void PbRegister_sampleLevelsetWithParticles();
+extern void PbRegister_sampleShapeWithParticles();
+extern void PbRegister_markFluidCells();
+extern void PbRegister_testInitGridWithPos();
+extern void PbRegister_adjustNumber();
+extern void PbRegister_debugIntToReal();
+extern void PbRegister_gridParticleIndex();
+extern void PbRegister_unionParticleLevelset();
+extern void PbRegister_averagedParticleLevelset();
+extern void PbRegister_improvedParticleLevelset();
+extern void PbRegister_pushOutofObs();
+extern void PbRegister_mapPartsToMAC();
+extern void PbRegister_mapPartsToGrid();
+extern void PbRegister_mapPartsToGridVec3();
+extern void PbRegister_mapGridToParts();
+extern void PbRegister_mapGridToPartsVec3();
+extern void PbRegister_mapMACToParts();
+extern void PbRegister_flipVelocityUpdate();
+extern void PbRegister_combineGridVel();
+extern void PbRegister_getLaplacian();
+extern void PbRegister_getCurvature();
+extern void PbRegister_processBurn();
+extern void PbRegister_updateFlame();
+extern void PbRegister_getSpiralVelocity();
+extern void PbRegister_setGradientYWeight();
+extern void PbRegister_PD_fluid_guiding();
+extern void PbRegister_releaseBlurPrecomp();
+extern void PbRegister_KEpsilonComputeProduction();
+extern void PbRegister_KEpsilonSources();
+extern void PbRegister_KEpsilonBcs();
+extern void PbRegister_KEpsilonGradientDiffusion();
+extern void PbRegister_densityInflow();
+extern void PbRegister_addNoise();
+extern void PbRegister_setNoisePdata();
+extern void PbRegister_setNoisePdataVec3();
+extern void PbRegister_setNoisePdataInt();
+extern void PbRegister_obstacleGradient();
+extern void PbRegister_obstacleLevelset();
+extern void PbRegister_applyEmission();
+extern void PbRegister_densityInflowMeshNoise();
+extern void PbRegister_densityInflowMesh();
+extern void PbRegister_checkSymmetry();
+extern void PbRegister_checkSymmetryVec3();
+extern void PbRegister_projectPpmFull();
+extern void PbRegister_addTestParts();
+extern void PbRegister_pdataMaxDiff();
+extern void PbRegister_calcCenterOfMass();
+extern void PbRegister_updateFractions();
+extern void PbRegister_setObstacleFlags();
+extern void PbRegister_initVortexVelocity();
+extern void PbRegister_blurMacGrid();
+extern void PbRegister_blurRealGrid();
+extern void PbRegister_smoothMesh();
+extern void PbRegister_subdivideMesh();
+extern void PbRegister_killSmallComponents();
+extern void PbRegister_releaseMG();
+extern void PbRegister_computePressureRhs();
+extern void PbRegister_solvePressureSystem();
+extern void PbRegister_correctVelocity();
+extern void PbRegister_solvePressure();
+extern void PbRegister_addForcePvel();
+extern void PbRegister_updateVelocityFromDeltaPos();
+extern void PbRegister_eulerStep();
+extern void PbRegister_setPartType();
+extern void PbRegister_flipComputeSecondaryParticlePotentials();
+extern void PbRegister_flipSampleSecondaryParticles();
+extern void PbRegister_flipUpdateSecondaryParticles();
+extern void PbRegister_flipDeleteParticlesInObstacle();
+extern void PbRegister_debugGridInfo();
+extern void PbRegister_setFlagsFromLevelset();
+extern void PbRegister_setMACFromLevelset();
+extern void PbRegister_flipComputePotentialTrappedAir();
+extern void PbRegister_flipComputePotentialKineticEnergy();
+extern void PbRegister_flipComputePotentialWaveCrest();
+extern void PbRegister_flipComputeSurfaceNormals();
+extern void PbRegister_flipUpdateNeighborRatio();
+extern void PbRegister_particleSurfaceTurbulence();
+extern void PbRegister_debugCheckParts();
+extern void PbRegister_markAsFixed();
+extern void PbRegister_texcoordInflow();
+extern void PbRegister_meshSmokeInflow();
+extern void PbRegister_vorticitySource();
+extern void PbRegister_smoothVorticity();
+extern void PbRegister_VPseedK41();
+extern void PbRegister_VICintegration();
+extern void PbRegister_densityFromLevelset();
+extern void PbRegister_interpolateGrid();
+extern void PbRegister_interpolateGridVec3();
+extern void PbRegister_interpolateMACGrid();
+extern void PbRegister_applySimpleNoiseVec3();
+extern void PbRegister_applySimpleNoiseReal();
+extern void PbRegister_applyNoiseVec3();
+extern void PbRegister_computeEnergy();
+extern void PbRegister_computeWaveletCoeffs();
+extern void PbRegister_computeVorticity();
+extern void PbRegister_computeStrainRateMag();
+extern void PbRegister_extrapolateSimpleFlags();
+extern void PbRegister_getCurl();
+extern void PbRegister_calcSecDeriv2d();
+extern void PbRegister_totalSum();
+extern void PbRegister_normalizeSumTo();
+extern void PbRegister_cgSolveWE();
+extern void PbRegister_file_0();
+extern void PbRegister_file_1();
+extern void PbRegister_file_2();
+extern void PbRegister_file_3();
+extern void PbRegister_file_4();
+extern void PbRegister_file_5();
+extern void PbRegister_file_6();
+extern void PbRegister_file_7();
+extern void PbRegister_file_8();
+extern void PbRegister_file_9();
+extern void PbRegister_file_10();
+extern void PbRegister_file_11();
+extern void PbRegister_file_12();
+extern void PbRegister_file_13();
+extern void PbRegister_file_14();
+extern void PbRegister_file_15();
+extern void PbRegister_file_16();
+extern void PbRegister_file_17();
+extern void PbRegister_file_18();
+extern void PbRegister_file_19();
+extern void PbRegister_file_20();
+extern void PbRegister_file_21();
+}
+
+namespace Pb {
+void MantaEnsureRegistration()
+{
+ PbRegister_mantaMsg();
+ PbRegister_printBuildInfo();
+ PbRegister_setDebugLevel();
+ PbRegister_assertNumpy();
+ PbRegister_cgSolveDiffusion();
+ PbRegister_gridMaxDiff();
+ PbRegister_gridMaxDiffInt();
+ PbRegister_gridMaxDiffVec3();
+ PbRegister_copyMacToVec3();
+ PbRegister_convertMacToVec3();
+ PbRegister_resampleVec3ToMac();
+ PbRegister_resampleMacToVec3();
+ PbRegister_copyLevelsetToReal();
+ PbRegister_copyVec3ToReal();
+ PbRegister_copyRealToVec3();
+ PbRegister_convertLevelsetToReal();
+ PbRegister_swapComponents();
+ PbRegister_getUvWeight();
+ PbRegister_resetUvGrid();
+ PbRegister_updateUvWeight();
+ PbRegister_getGridAvg();
+ PbRegister_getComponent();
+ PbRegister_setComponent();
+ PbRegister_markIsolatedFluidCell();
+ PbRegister_copyMACData();
+ PbRegister_getComp4d();
+ PbRegister_setComp4d();
+ PbRegister_grid4dMaxDiff();
+ PbRegister_grid4dMaxDiffInt();
+ PbRegister_grid4dMaxDiffVec3();
+ PbRegister_grid4dMaxDiffVec4();
+ PbRegister_setRegion4d();
+ PbRegister_setRegion4dVec4();
+ PbRegister_getSliceFrom4d();
+ PbRegister_getSliceFrom4dVec();
+ PbRegister_interpolateGrid4d();
+ PbRegister_interpolateGrid4dVec();
+ PbRegister_extrapolateMACSimple();
+ PbRegister_extrapolateMACFromWeight();
+ PbRegister_extrapolateLsSimple();
+ PbRegister_extrapolateVec3Simple();
+ PbRegister_getUniFileSize();
+ PbRegister_printUniFileInfoString();
+ PbRegister_getNpzFileSize();
+ PbRegister_quantizeGrid();
+ PbRegister_quantizeGridVec3();
+ PbRegister_resetPhiInObs();
+ PbRegister_advectSemiLagrange();
+ PbRegister_addGravity();
+ PbRegister_addGravityNoScale();
+ PbRegister_addBuoyancy();
+ PbRegister_setOpenBound();
+ PbRegister_resetOutflow();
+ PbRegister_setInflowBcs();
+ PbRegister_setWallBcs();
+ PbRegister_setInitialVelocity();
+ PbRegister_vorticityConfinement();
+ PbRegister_addForceField();
+ PbRegister_setForceField();
+ PbRegister_dissolveSmoke();
+ PbRegister_apicMapPartsToMAC();
+ PbRegister_apicMapMACGridToParts();
+ PbRegister_sampleFlagsWithParticles();
+ PbRegister_sampleLevelsetWithParticles();
+ PbRegister_sampleShapeWithParticles();
+ PbRegister_markFluidCells();
+ PbRegister_testInitGridWithPos();
+ PbRegister_adjustNumber();
+ PbRegister_debugIntToReal();
+ PbRegister_gridParticleIndex();
+ PbRegister_unionParticleLevelset();
+ PbRegister_averagedParticleLevelset();
+ PbRegister_improvedParticleLevelset();
+ PbRegister_pushOutofObs();
+ PbRegister_mapPartsToMAC();
+ PbRegister_mapPartsToGrid();
+ PbRegister_mapPartsToGridVec3();
+ PbRegister_mapGridToParts();
+ PbRegister_mapGridToPartsVec3();
+ PbRegister_mapMACToParts();
+ PbRegister_flipVelocityUpdate();
+ PbRegister_combineGridVel();
+ PbRegister_getLaplacian();
+ PbRegister_getCurvature();
+ PbRegister_processBurn();
+ PbRegister_updateFlame();
+ PbRegister_getSpiralVelocity();
+ PbRegister_setGradientYWeight();
+ PbRegister_PD_fluid_guiding();
+ PbRegister_releaseBlurPrecomp();
+ PbRegister_KEpsilonComputeProduction();
+ PbRegister_KEpsilonSources();
+ PbRegister_KEpsilonBcs();
+ PbRegister_KEpsilonGradientDiffusion();
+ PbRegister_densityInflow();
+ PbRegister_addNoise();
+ PbRegister_setNoisePdata();
+ PbRegister_setNoisePdataVec3();
+ PbRegister_setNoisePdataInt();
+ PbRegister_obstacleGradient();
+ PbRegister_obstacleLevelset();
+ PbRegister_applyEmission();
+ PbRegister_densityInflowMeshNoise();
+ PbRegister_densityInflowMesh();
+ PbRegister_checkSymmetry();
+ PbRegister_checkSymmetryVec3();
+ PbRegister_projectPpmFull();
+ PbRegister_addTestParts();
+ PbRegister_pdataMaxDiff();
+ PbRegister_calcCenterOfMass();
+ PbRegister_updateFractions();
+ PbRegister_setObstacleFlags();
+ PbRegister_initVortexVelocity();
+ PbRegister_blurMacGrid();
+ PbRegister_blurRealGrid();
+ PbRegister_smoothMesh();
+ PbRegister_subdivideMesh();
+ PbRegister_killSmallComponents();
+ PbRegister_releaseMG();
+ PbRegister_computePressureRhs();
+ PbRegister_solvePressureSystem();
+ PbRegister_correctVelocity();
+ PbRegister_solvePressure();
+ PbRegister_addForcePvel();
+ PbRegister_updateVelocityFromDeltaPos();
+ PbRegister_eulerStep();
+ PbRegister_setPartType();
+ PbRegister_flipComputeSecondaryParticlePotentials();
+ PbRegister_flipSampleSecondaryParticles();
+ PbRegister_flipUpdateSecondaryParticles();
+ PbRegister_flipDeleteParticlesInObstacle();
+ PbRegister_debugGridInfo();
+ PbRegister_setFlagsFromLevelset();
+ PbRegister_setMACFromLevelset();
+ PbRegister_flipComputePotentialTrappedAir();
+ PbRegister_flipComputePotentialKineticEnergy();
+ PbRegister_flipComputePotentialWaveCrest();
+ PbRegister_flipComputeSurfaceNormals();
+ PbRegister_flipUpdateNeighborRatio();
+ PbRegister_particleSurfaceTurbulence();
+ PbRegister_debugCheckParts();
+ PbRegister_markAsFixed();
+ PbRegister_texcoordInflow();
+ PbRegister_meshSmokeInflow();
+ PbRegister_vorticitySource();
+ PbRegister_smoothVorticity();
+ PbRegister_VPseedK41();
+ PbRegister_VICintegration();
+ PbRegister_densityFromLevelset();
+ PbRegister_interpolateGrid();
+ PbRegister_interpolateGridVec3();
+ PbRegister_interpolateMACGrid();
+ PbRegister_applySimpleNoiseVec3();
+ PbRegister_applySimpleNoiseReal();
+ PbRegister_applyNoiseVec3();
+ PbRegister_computeEnergy();
+ PbRegister_computeWaveletCoeffs();
+ PbRegister_computeVorticity();
+ PbRegister_computeStrainRateMag();
+ PbRegister_extrapolateSimpleFlags();
+ PbRegister_getCurl();
+ PbRegister_calcSecDeriv2d();
+ PbRegister_totalSum();
+ PbRegister_normalizeSumTo();
+ PbRegister_cgSolveWE();
+ PbRegister_file_0();
+ PbRegister_file_1();
+ PbRegister_file_2();
+ PbRegister_file_3();
+ PbRegister_file_4();
+ PbRegister_file_5();
+ PbRegister_file_6();
+ PbRegister_file_7();
+ PbRegister_file_8();
+ PbRegister_file_9();
+ PbRegister_file_10();
+ PbRegister_file_11();
+ PbRegister_file_12();
+ PbRegister_file_13();
+ PbRegister_file_14();
+ PbRegister_file_15();
+ PbRegister_file_16();
+ PbRegister_file_17();
+ PbRegister_file_18();
+ PbRegister_file_19();
+ PbRegister_file_20();
+ PbRegister_file_21();
+}
+} // namespace Pb
diff --git a/extern/mantaflow/preprocessed/shapes.cpp b/extern/mantaflow/preprocessed/shapes.cpp
new file mode 100644
index 00000000000..4095758cbc0
--- /dev/null
+++ b/extern/mantaflow/preprocessed/shapes.cpp
@@ -0,0 +1,1010 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Shape classes
+ *
+ ******************************************************************************/
+
+#include "shapes.h"
+#include "commonkernels.h"
+#include "mesh.h"
+
+using namespace std;
+namespace Manta {
+
+//******************************************************************************
+// Shape class members
+
+Shape::Shape(FluidSolver *parent) : PbClass(parent), mType(TypeNone)
+{
+}
+
+LevelsetGrid Shape::computeLevelset()
+{
+ // note - 3d check deactivated! TODO double check...
+ LevelsetGrid phi(getParent());
+ generateLevelset(phi);
+ return phi;
+}
+
+bool Shape::isInside(const Vec3 &pos) const
+{
+ return false;
+}
+
+//! Kernel: Apply a shape to a grid, setting value inside
+
+template<class T> struct ApplyShapeToGrid : public KernelBase {
+ ApplyShapeToGrid(Grid<T> *grid, Shape *shape, T value, FlagGrid *respectFlags)
+ : KernelBase(grid, 0), grid(grid), shape(shape), value(value), respectFlags(respectFlags)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, Grid<T> *grid, Shape *shape, T value, FlagGrid *respectFlags) const
+ {
+ if (respectFlags && respectFlags->isObstacle(i, j, k))
+ return;
+ if (shape->isInsideGrid(i, j, k))
+ (*grid)(i, j, k) = value;
+ }
+ inline Grid<T> *getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<T> type0;
+ inline Shape *getArg1()
+ {
+ return shape;
+ }
+ typedef Shape type1;
+ inline T &getArg2()
+ {
+ return value;
+ }
+ typedef T type2;
+ inline FlagGrid *getArg3()
+ {
+ return respectFlags;
+ }
+ typedef FlagGrid type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel ApplyShapeToGrid ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, shape, value, respectFlags);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, shape, value, respectFlags);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<T> *grid;
+ Shape *shape;
+ T value;
+ FlagGrid *respectFlags;
+};
+
+//! Kernel: Apply a shape to a grid, setting value inside (scaling by SDF value)
+
+template<class T> struct ApplyShapeToGridSmooth : public KernelBase {
+ ApplyShapeToGridSmooth(
+ Grid<T> *grid, Grid<Real> &phi, Real sigma, Real shift, T value, FlagGrid *respectFlags)
+ : KernelBase(grid, 0),
+ grid(grid),
+ phi(phi),
+ sigma(sigma),
+ shift(shift),
+ value(value),
+ respectFlags(respectFlags)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ Grid<T> *grid,
+ Grid<Real> &phi,
+ Real sigma,
+ Real shift,
+ T value,
+ FlagGrid *respectFlags) const
+ {
+ if (respectFlags && respectFlags->isObstacle(i, j, k))
+ return;
+ const Real p = phi(i, j, k) - shift;
+ if (p < -sigma)
+ (*grid)(i, j, k) = value;
+ else if (p < sigma)
+ (*grid)(i, j, k) = value * (0.5f * (1.0f - p / sigma));
+ }
+ inline Grid<T> *getArg0()
+ {
+ return grid;
+ }
+ typedef Grid<T> type0;
+ inline Grid<Real> &getArg1()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type1;
+ inline Real &getArg2()
+ {
+ return sigma;
+ }
+ typedef Real type2;
+ inline Real &getArg3()
+ {
+ return shift;
+ }
+ typedef Real type3;
+ inline T &getArg4()
+ {
+ return value;
+ }
+ typedef T type4;
+ inline FlagGrid *getArg5()
+ {
+ return respectFlags;
+ }
+ typedef FlagGrid type5;
+ void runMessage()
+ {
+ debMsg("Executing kernel ApplyShapeToGridSmooth ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, phi, sigma, shift, value, respectFlags);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, phi, sigma, shift, value, respectFlags);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<T> *grid;
+ Grid<Real> &phi;
+ Real sigma;
+ Real shift;
+ T value;
+ FlagGrid *respectFlags;
+};
+
+//! Kernel: Apply a shape to a MAC grid, setting value inside
+
+struct ApplyShapeToMACGrid : public KernelBase {
+ ApplyShapeToMACGrid(MACGrid *grid, Shape *shape, Vec3 value, FlagGrid *respectFlags)
+ : KernelBase(grid, 0), grid(grid), shape(shape), value(value), respectFlags(respectFlags)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, MACGrid *grid, Shape *shape, Vec3 value, FlagGrid *respectFlags) const
+ {
+ if (respectFlags && respectFlags->isObstacle(i, j, k))
+ return;
+ if (shape->isInside(Vec3(i, j + 0.5, k + 0.5)))
+ (*grid)(i, j, k).x = value.x;
+ if (shape->isInside(Vec3(i + 0.5, j, k + 0.5)))
+ (*grid)(i, j, k).y = value.y;
+ if (shape->isInside(Vec3(i + 0.5, j + 0.5, k)))
+ (*grid)(i, j, k).z = value.z;
+ }
+ inline MACGrid *getArg0()
+ {
+ return grid;
+ }
+ typedef MACGrid type0;
+ inline Shape *getArg1()
+ {
+ return shape;
+ }
+ typedef Shape type1;
+ inline Vec3 &getArg2()
+ {
+ return value;
+ }
+ typedef Vec3 type2;
+ inline FlagGrid *getArg3()
+ {
+ return respectFlags;
+ }
+ typedef FlagGrid type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel ApplyShapeToMACGrid ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, shape, value, respectFlags);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, grid, shape, value, respectFlags);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ MACGrid *grid;
+ Shape *shape;
+ Vec3 value;
+ FlagGrid *respectFlags;
+};
+
+void Shape::applyToGrid(GridBase *grid, FlagGrid *respectFlags)
+{
+#if NOPYTHON != 1
+ if (grid->getType() & GridBase::TypeInt)
+ ApplyShapeToGrid<int>((Grid<int> *)grid, this, _args.get<int>("value"), respectFlags);
+ else if (grid->getType() & GridBase::TypeReal)
+ ApplyShapeToGrid<Real>((Grid<Real> *)grid, this, _args.get<Real>("value"), respectFlags);
+ else if (grid->getType() & GridBase::TypeMAC)
+ ApplyShapeToMACGrid((MACGrid *)grid, this, _args.get<Vec3>("value"), respectFlags);
+ else if (grid->getType() & GridBase::TypeVec3)
+ ApplyShapeToGrid<Vec3>((Grid<Vec3> *)grid, this, _args.get<Vec3>("value"), respectFlags);
+ else
+ errMsg("Shape::applyToGrid(): unknown grid type");
+#else
+ errMsg("Not yet supported...");
+#endif
+}
+
+void Shape::applyToGridSmooth(GridBase *grid, Real sigma, Real shift, FlagGrid *respectFlags)
+{
+ Grid<Real> phi(grid->getParent());
+ generateLevelset(phi);
+
+#if NOPYTHON != 1
+ if (grid->getType() & GridBase::TypeInt)
+ ApplyShapeToGridSmooth<int>(
+ (Grid<int> *)grid, phi, sigma, shift, _args.get<int>("value"), respectFlags);
+ else if (grid->getType() & GridBase::TypeReal)
+ ApplyShapeToGridSmooth<Real>(
+ (Grid<Real> *)grid, phi, sigma, shift, _args.get<Real>("value"), respectFlags);
+ else if (grid->getType() & GridBase::TypeVec3)
+ ApplyShapeToGridSmooth<Vec3>(
+ (Grid<Vec3> *)grid, phi, sigma, shift, _args.get<Vec3>("value"), respectFlags);
+ else
+ errMsg("Shape::applyToGridSmooth(): unknown grid type");
+#else
+ errMsg("Not yet supported...");
+#endif
+}
+
+void Shape::collideMesh(Mesh &mesh)
+{
+ const Real margin = 0.2;
+
+ Grid<Real> phi(getParent());
+ Grid<Vec3> grad(getParent());
+ generateLevelset(phi);
+ GradientOp(grad, phi);
+
+ const int num = mesh.numNodes();
+ for (int i = 0; i < num; i++) {
+ const Vec3 &p = mesh.nodes(i).pos;
+ mesh.nodes(i).flags &= ~(Mesh::NfCollide | Mesh::NfMarked);
+ if (!phi.isInBounds(p, 1))
+ continue;
+
+ for (int iter = 0; iter < 10; iter++) {
+ const Real dist = phi.getInterpolated(p);
+ if (dist < margin) {
+ Vec3 n = grad.getInterpolated(p);
+ normalize(n);
+ mesh.nodes(i).pos += (margin - dist) * n;
+ mesh.nodes(i).flags |= Mesh::NfCollide | Mesh::NfMarked;
+ }
+ else
+ break;
+ }
+ }
+}
+
+//******************************************************************************
+// Derived shape class members
+
+Box::Box(FluidSolver *parent, Vec3 center, Vec3 p0, Vec3 p1, Vec3 size) : Shape(parent)
+{
+ mType = TypeBox;
+ if (center.isValid() && size.isValid()) {
+ mP0 = center - size;
+ mP1 = center + size;
+ }
+ else if (p0.isValid() && p1.isValid()) {
+ mP0 = p0;
+ mP1 = p1;
+ }
+ else
+ errMsg("Box: specify either p0,p1 or size,center");
+}
+
+bool Box::isInside(const Vec3 &pos) const
+{
+ return (pos.x >= mP0.x && pos.y >= mP0.y && pos.z >= mP0.z && pos.x <= mP1.x && pos.y <= mP1.y &&
+ pos.z <= mP1.z);
+}
+
+void Box::generateMesh(Mesh *mesh)
+{
+ const int quadidx[24] = {0, 4, 6, 2, 3, 7, 5, 1, 0, 1, 5, 4, 6, 7, 3, 2, 0, 2, 3, 1, 5, 7, 6, 4};
+ const int nodebase = mesh->numNodes();
+ int oldtri = mesh->numTris();
+ for (int i = 0; i < 8; i++) {
+ Node p;
+ p.flags = 0;
+ p.pos = mP0;
+ if (i & 1)
+ p.pos.x = mP1.x;
+ if (i & 2)
+ p.pos.y = mP1.y;
+ if (i & 4)
+ p.pos.z = mP1.z;
+ mesh->addNode(p);
+ }
+ for (int i = 0; i < 6; i++) {
+ mesh->addTri(Triangle(nodebase + quadidx[i * 4 + 0],
+ nodebase + quadidx[i * 4 + 1],
+ nodebase + quadidx[i * 4 + 3]));
+ mesh->addTri(Triangle(nodebase + quadidx[i * 4 + 1],
+ nodebase + quadidx[i * 4 + 2],
+ nodebase + quadidx[i * 4 + 3]));
+ }
+ mesh->rebuildCorners(oldtri, -1);
+ mesh->rebuildLookup(oldtri, -1);
+}
+
+//! Kernel: Analytic SDF for box shape
+struct BoxSDF : public KernelBase {
+ BoxSDF(Grid<Real> &phi, const Vec3 &p1, const Vec3 &p2)
+ : KernelBase(&phi, 0), phi(phi), p1(p1), p2(p2)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Real> &phi, const Vec3 &p1, const Vec3 &p2) const
+ {
+ const Vec3 p(i + 0.5, j + 0.5, k + 0.5);
+ if (p.x <= p2.x && p.x >= p1.x && p.y <= p2.y && p.y >= p1.y && p.z <= p2.z && p.z >= p1.z) {
+ // inside: minimal surface distance
+ Real mx = max(p.x - p2.x, p1.x - p.x);
+ Real my = max(p.y - p2.y, p1.y - p.y);
+ Real mz = max(p.z - p2.z, p1.z - p.z);
+ if (!phi.is3D())
+ mz = mx; // skip for 2d...
+ phi(i, j, k) = max(mx, max(my, mz));
+ }
+ else if (p.y <= p2.y && p.y >= p1.y && p.z <= p2.z && p.z >= p1.z) {
+ // outside plane X
+ phi(i, j, k) = max(p.x - p2.x, p1.x - p.x);
+ }
+ else if (p.x <= p2.x && p.x >= p1.x && p.z <= p2.z && p.z >= p1.z) {
+ // outside plane Y
+ phi(i, j, k) = max(p.y - p2.y, p1.y - p.y);
+ }
+ else if (p.x <= p2.x && p.x >= p1.x && p.y <= p2.y && p.y >= p1.y) {
+ // outside plane Z
+ phi(i, j, k) = max(p.z - p2.z, p1.z - p.z);
+ }
+ else if (p.x > p1.x && p.x < p2.x) {
+ // lines X
+ Real m1 = sqrt(square(p1.y - p.y) + square(p1.z - p.z));
+ Real m2 = sqrt(square(p2.y - p.y) + square(p1.z - p.z));
+ Real m3 = sqrt(square(p1.y - p.y) + square(p2.z - p.z));
+ Real m4 = sqrt(square(p2.y - p.y) + square(p2.z - p.z));
+ phi(i, j, k) = min(m1, min(m2, min(m3, m4)));
+ }
+ else if (p.y > p1.y && p.y < p2.y) {
+ // lines Y
+ Real m1 = sqrt(square(p1.x - p.x) + square(p1.z - p.z));
+ Real m2 = sqrt(square(p2.x - p.x) + square(p1.z - p.z));
+ Real m3 = sqrt(square(p1.x - p.x) + square(p2.z - p.z));
+ Real m4 = sqrt(square(p2.x - p.x) + square(p2.z - p.z));
+ phi(i, j, k) = min(m1, min(m2, min(m3, m4)));
+ }
+ else if (p.z > p1.x && p.z < p2.z) {
+ // lines Z
+ Real m1 = sqrt(square(p1.y - p.y) + square(p1.x - p.x));
+ Real m2 = sqrt(square(p2.y - p.y) + square(p1.x - p.x));
+ Real m3 = sqrt(square(p1.y - p.y) + square(p2.x - p.x));
+ Real m4 = sqrt(square(p2.y - p.y) + square(p2.x - p.x));
+ phi(i, j, k) = min(m1, min(m2, min(m3, m4)));
+ }
+ else {
+ // points
+ Real m = norm(p - Vec3(p1.x, p1.y, p1.z));
+ m = min(m, norm(p - Vec3(p1.x, p1.y, p2.z)));
+ m = min(m, norm(p - Vec3(p1.x, p2.y, p1.z)));
+ m = min(m, norm(p - Vec3(p1.x, p2.y, p2.z)));
+ m = min(m, norm(p - Vec3(p2.x, p1.y, p1.z)));
+ m = min(m, norm(p - Vec3(p2.x, p1.y, p2.z)));
+ m = min(m, norm(p - Vec3(p2.x, p2.y, p1.z)));
+ m = min(m, norm(p - Vec3(p2.x, p2.y, p2.z)));
+ phi(i, j, k) = m;
+ }
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type0;
+ inline const Vec3 &getArg1()
+ {
+ return p1;
+ }
+ typedef Vec3 type1;
+ inline const Vec3 &getArg2()
+ {
+ return p2;
+ }
+ typedef Vec3 type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel BoxSDF ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, phi, p1, p2);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, phi, p1, p2);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<Real> &phi;
+ const Vec3 &p1;
+ const Vec3 &p2;
+};
+void Box::generateLevelset(Grid<Real> &phi)
+{
+ BoxSDF(phi, mP0, mP1);
+}
+
+Sphere::Sphere(FluidSolver *parent, Vec3 center, Real radius, Vec3 scale)
+ : Shape(parent), mCenter(center), mScale(scale), mRadius(radius)
+{
+ mType = TypeSphere;
+}
+
+bool Sphere::isInside(const Vec3 &pos) const
+{
+ return normSquare((pos - mCenter) / mScale) <= mRadius * mRadius;
+}
+
+struct Tri {
+ Vec3 t[3];
+ int i[3];
+ Tri(Vec3 a, Vec3 b, Vec3 c)
+ {
+ t[0] = a;
+ t[1] = b;
+ t[2] = c;
+ }
+};
+void Sphere::generateMesh(Mesh *mesh)
+{
+ vector<Tri> tris;
+ const int iterations = 3;
+ int oldtri = mesh->numTris();
+
+ // start with octahedron
+ const Real d = sqrt(0.5);
+ Vec3 p[6] = {Vec3(0, 1, 0),
+ Vec3(0, -1, 0),
+ Vec3(-d, 0, -d),
+ Vec3(d, 0, -d),
+ Vec3(d, 0, d),
+ Vec3(-d, 0, d)};
+ tris.push_back(Tri(p[0], p[4], p[3]));
+ tris.push_back(Tri(p[0], p[5], p[4]));
+ tris.push_back(Tri(p[0], p[2], p[5]));
+ tris.push_back(Tri(p[0], p[3], p[2]));
+ tris.push_back(Tri(p[1], p[3], p[4]));
+ tris.push_back(Tri(p[1], p[4], p[5]));
+ tris.push_back(Tri(p[1], p[5], p[2]));
+ tris.push_back(Tri(p[1], p[2], p[3]));
+
+ // Bisect each edge and move to the surface of a unit sphere
+ for (int it = 0; it < iterations; it++) {
+ int ntold = tris.size();
+ for (int i = 0; i < ntold; i++) {
+ Vec3 pa = 0.5 * (tris[i].t[0] + tris[i].t[1]);
+ Vec3 pb = 0.5 * (tris[i].t[1] + tris[i].t[2]);
+ Vec3 pc = 0.5 * (tris[i].t[2] + tris[i].t[0]);
+ normalize(pa);
+ normalize(pb);
+ normalize(pc);
+
+ tris.push_back(Tri(tris[i].t[0], pa, pc));
+ tris.push_back(Tri(pa, tris[i].t[1], pb));
+ tris.push_back(Tri(pb, tris[i].t[2], pc));
+ tris[i].t[0] = pa;
+ tris[i].t[1] = pb;
+ tris[i].t[2] = pc;
+ }
+ }
+
+ // index + scale
+ vector<Vec3> nodes;
+ for (size_t i = 0; i < tris.size(); i++) {
+ for (int t = 0; t < 3; t++) {
+ Vec3 p = mCenter + tris[i].t[t] * mRadius * mScale;
+ // vector already there ?
+ int idx = nodes.size();
+ for (size_t j = 0; j < nodes.size(); j++) {
+ if (p == nodes[j]) {
+ idx = j;
+ break;
+ }
+ }
+ if (idx == (int)nodes.size())
+ nodes.push_back(p);
+ tris[i].i[t] = idx;
+ }
+ }
+
+ // add the to mesh
+ const int ni = mesh->numNodes();
+ for (size_t i = 0; i < nodes.size(); i++) {
+ mesh->addNode(Node(nodes[i]));
+ }
+ for (size_t t = 0; t < tris.size(); t++)
+ mesh->addTri(Triangle(tris[t].i[0] + ni, tris[t].i[1] + ni, tris[t].i[2] + ni));
+
+ mesh->rebuildCorners(oldtri, -1);
+ mesh->rebuildLookup(oldtri, -1);
+}
+
+struct SphereSDF : public KernelBase {
+ SphereSDF(Grid<Real> &phi, Vec3 center, Real radius, Vec3 scale)
+ : KernelBase(&phi, 0), phi(phi), center(center), radius(radius), scale(scale)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i, int j, int k, Grid<Real> &phi, Vec3 center, Real radius, Vec3 scale) const
+ {
+ phi(i, j, k) = norm((Vec3(i + 0.5, j + 0.5, k + 0.5) - center) / scale) - radius;
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type0;
+ inline Vec3 &getArg1()
+ {
+ return center;
+ }
+ typedef Vec3 type1;
+ inline Real &getArg2()
+ {
+ return radius;
+ }
+ typedef Real type2;
+ inline Vec3 &getArg3()
+ {
+ return scale;
+ }
+ typedef Vec3 type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel SphereSDF ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, phi, center, radius, scale);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, phi, center, radius, scale);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<Real> &phi;
+ Vec3 center;
+ Real radius;
+ Vec3 scale;
+};
+void Sphere::generateLevelset(Grid<Real> &phi)
+{
+ SphereSDF(phi, mCenter, mRadius, mScale);
+}
+
+Cylinder::Cylinder(FluidSolver *parent, Vec3 center, Real radius, Vec3 z)
+ : Shape(parent), mCenter(center), mRadius(radius)
+{
+ mType = TypeCylinder;
+ mZDir = z;
+ mZ = normalize(mZDir);
+}
+
+bool Cylinder::isInside(const Vec3 &pos) const
+{
+ Real z = dot(pos - mCenter, mZDir);
+ if (fabs(z) > mZ)
+ return false;
+ Real r2 = normSquare(pos - mCenter) - square(z);
+ return r2 < square(mRadius);
+}
+
+void Cylinder::generateMesh(Mesh *mesh)
+{
+ // generate coordinate system
+ Vec3 x = getOrthogonalVector(mZDir) * mRadius;
+ Vec3 y = cross(x, mZDir);
+ Vec3 z = mZDir * mZ;
+ int oldtri = mesh->numTris();
+
+ // construct node ring
+ const int N = 20;
+ const int base = mesh->numNodes();
+ for (int i = 0; i < N; i++) {
+ const Real phi = 2.0 * M_PI * (Real)i / (Real)N;
+ Vec3 r = x * cos(phi) + y * sin(phi) + mCenter;
+ mesh->addNode(Node(r + z));
+ mesh->addNode(Node(r - z));
+ }
+ // top/bottom center
+ mesh->addNode(Node(mCenter + z));
+ mesh->addNode(Node(mCenter - z));
+
+ // connect with tris
+ for (int i = 0; i < N; i++) {
+ int cur = base + 2 * i;
+ int next = base + 2 * ((i + 1) % N);
+ // outside
+ mesh->addTri(Triangle(cur, next, cur + 1));
+ mesh->addTri(Triangle(next, next + 1, cur + 1));
+ // upper / lower
+ mesh->addTri(Triangle(cur, base + 2 * N, next));
+ mesh->addTri(Triangle(cur + 1, next + 1, base + 2 * N + 1));
+ }
+
+ mesh->rebuildCorners(oldtri, -1);
+ mesh->rebuildLookup(oldtri, -1);
+}
+
+struct CylinderSDF : public KernelBase {
+ CylinderSDF(Grid<Real> &phi, Vec3 center, Real radius, Vec3 zaxis, Real maxz)
+ : KernelBase(&phi, 0), phi(phi), center(center), radius(radius), zaxis(zaxis), maxz(maxz)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(
+ int i, int j, int k, Grid<Real> &phi, Vec3 center, Real radius, Vec3 zaxis, Real maxz) const
+ {
+ Vec3 p = Vec3(i + 0.5, j + 0.5, k + 0.5) - center;
+ Real z = fabs(dot(p, zaxis));
+ Real r = sqrt(normSquare(p) - z * z);
+ if (z < maxz) {
+ // cylinder z area
+ if (r < radius)
+ phi(i, j, k) = max(r - radius, z - maxz);
+ else
+ phi(i, j, k) = r - radius;
+ }
+ else if (r < radius) {
+ // cylinder top area
+ phi(i, j, k) = fabs(z - maxz);
+ }
+ else {
+ // edge
+ phi(i, j, k) = sqrt(square(z - maxz) + square(r - radius));
+ }
+ }
+ inline Grid<Real> &getArg0()
+ {
+ return phi;
+ }
+ typedef Grid<Real> type0;
+ inline Vec3 &getArg1()
+ {
+ return center;
+ }
+ typedef Vec3 type1;
+ inline Real &getArg2()
+ {
+ return radius;
+ }
+ typedef Real type2;
+ inline Vec3 &getArg3()
+ {
+ return zaxis;
+ }
+ typedef Vec3 type3;
+ inline Real &getArg4()
+ {
+ return maxz;
+ }
+ typedef Real type4;
+ void runMessage()
+ {
+ debMsg("Executing kernel CylinderSDF ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, phi, center, radius, zaxis, maxz);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, phi, center, radius, zaxis, maxz);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ Grid<Real> &phi;
+ Vec3 center;
+ Real radius;
+ Vec3 zaxis;
+ Real maxz;
+};
+void Cylinder::generateLevelset(Grid<Real> &phi)
+{
+ CylinderSDF(phi, mCenter, mRadius, mZDir, mZ);
+}
+
+Slope::Slope(FluidSolver *parent, Real anglexy, Real angleyz, Real origin, Vec3 gs)
+ : Shape(parent), mAnglexy(anglexy), mAngleyz(angleyz), mOrigin(origin), mGs(gs)
+{
+ mType = TypeSlope;
+}
+
+void Slope::generateMesh(Mesh *mesh)
+{
+
+ const int oldtri = mesh->numTris();
+
+ Vec3 v1(0., mOrigin, 0.);
+ mesh->addNode(Node(v1));
+
+ Real dy1 = mGs.z * std::tan(mAngleyz);
+ Vec3 v2(0., mOrigin - dy1, mGs.z);
+ mesh->addNode(Node(v2));
+
+ Real dy2 = mGs.x * std::tan(mAnglexy);
+ Vec3 v3(mGs.x, v2.y - dy2, mGs.z);
+ mesh->addNode(Node(v3));
+
+ Vec3 v4(mGs.x, mOrigin - dy2, 0.);
+ mesh->addNode(Node(v4));
+
+ mesh->addTri(Triangle(0, 1, 2));
+ mesh->addTri(Triangle(2, 3, 0));
+
+ mesh->rebuildCorners(oldtri, -1);
+ mesh->rebuildLookup(oldtri, -1);
+}
+
+bool Slope::isInside(const Vec3 &pos) const
+{
+
+ const Real alpha = -mAnglexy * M_PI / 180.;
+ const Real beta = -mAngleyz * M_PI / 180.;
+
+ Vec3 n(0, 1, 0);
+
+ n.x = std::sin(alpha) * std::cos(beta);
+ n.y = std::cos(alpha) * std::cos(beta);
+ n.z = std::sin(beta);
+
+ normalize(n);
+
+ const Real fac = std::sqrt(n.x * n.x + n.y * n.y + n.z * n.z);
+
+ return ((n.x * (double)pos.x + n.y * (double)pos.y + n.z * (double)pos.z - mOrigin) / fac) <= 0.;
+}
+
+struct SlopeSDF : public KernelBase {
+ SlopeSDF(const Vec3 &n, Grid<Real> &phiObs, const Real &fac, const Real &origin)
+ : KernelBase(&phiObs, 0), n(n), phiObs(phiObs), fac(fac), origin(origin)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(int i,
+ int j,
+ int k,
+ const Vec3 &n,
+ Grid<Real> &phiObs,
+ const Real &fac,
+ const Real &origin) const
+ {
+
+ phiObs(i, j, k) = (n.x * (double)i + n.y * (double)j + n.z * (double)k - origin) * fac;
+ }
+ inline const Vec3 &getArg0()
+ {
+ return n;
+ }
+ typedef Vec3 type0;
+ inline Grid<Real> &getArg1()
+ {
+ return phiObs;
+ }
+ typedef Grid<Real> type1;
+ inline const Real &getArg2()
+ {
+ return fac;
+ }
+ typedef Real type2;
+ inline const Real &getArg3()
+ {
+ return origin;
+ }
+ typedef Real type3;
+ void runMessage()
+ {
+ debMsg("Executing kernel SlopeSDF ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ const int _maxX = maxX;
+ const int _maxY = maxY;
+ if (maxZ > 1) {
+ for (int k = __r.begin(); k != (int)__r.end(); k++)
+ for (int j = 0; j < _maxY; j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, n, phiObs, fac, origin);
+ }
+ else {
+ const int k = 0;
+ for (int j = __r.begin(); j != (int)__r.end(); j++)
+ for (int i = 0; i < _maxX; i++)
+ op(i, j, k, n, phiObs, fac, origin);
+ }
+ }
+ void run()
+ {
+ if (maxZ > 1)
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(minZ, maxZ), *this);
+ else
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, maxY), *this);
+ }
+ const Vec3 &n;
+ Grid<Real> &phiObs;
+ const Real &fac;
+ const Real &origin;
+};
+
+void Slope::generateLevelset(Grid<Real> &phi)
+{
+
+ const Real alpha = -mAnglexy * M_PI / 180.;
+ const Real beta = -mAngleyz * M_PI / 180.;
+
+ Vec3 n(0, 1, 0);
+
+ n.x = std::sin(alpha) * std::cos(beta);
+ n.y = std::cos(alpha) * std::cos(beta);
+ n.z = std::sin(beta);
+
+ normalize(n);
+
+ const Real fac = 1. / std::sqrt(n.x * n.x + n.y * n.y + n.z * n.z);
+
+ SlopeSDF(n, phi, fac, mOrigin);
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/shapes.h b/extern/mantaflow/preprocessed/shapes.h
new file mode 100644
index 00000000000..09d3d23d938
--- /dev/null
+++ b/extern/mantaflow/preprocessed/shapes.h
@@ -0,0 +1,665 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * shapes classes
+ *
+ ******************************************************************************/
+
+#ifndef _SHAPES_H
+#define _SHAPES_H
+
+#include "manta.h"
+#include "vectorbase.h"
+#include "levelset.h"
+
+namespace Manta {
+
+// forward declaration
+class Mesh;
+
+//! Base class for all shapes
+class Shape : public PbClass {
+ public:
+ enum GridType { TypeNone = 0, TypeBox = 1, TypeSphere = 2, TypeCylinder = 3, TypeSlope = 4 };
+
+ Shape(FluidSolver *parent);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "Shape::Shape", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new Shape(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "Shape::Shape", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("Shape::Shape", e.what());
+ return -1;
+ }
+ }
+
+ //! Get the type of grid
+ inline GridType getType() const
+ {
+ return mType;
+ }
+
+ //! Apply shape to flag grid, set inside cells to <value>
+ void applyToGrid(GridBase *grid, FlagGrid *respectFlags = 0);
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Shape *pbo = dynamic_cast<Shape *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Shape::applyToGrid", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ GridBase *grid = _args.getPtr<GridBase>("grid", 0, &_lock);
+ FlagGrid *respectFlags = _args.getPtrOpt<FlagGrid>("respectFlags", 1, 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->applyToGrid(grid, respectFlags);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Shape::applyToGrid", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Shape::applyToGrid", e.what());
+ return 0;
+ }
+ }
+
+ void applyToGridSmooth(GridBase *grid,
+ Real sigma = 1.0,
+ Real shift = 0,
+ FlagGrid *respectFlags = 0);
+ static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Shape *pbo = dynamic_cast<Shape *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Shape::applyToGridSmooth", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ GridBase *grid = _args.getPtr<GridBase>("grid", 0, &_lock);
+ Real sigma = _args.getOpt<Real>("sigma", 1, 1.0, &_lock);
+ Real shift = _args.getOpt<Real>("shift", 2, 0, &_lock);
+ FlagGrid *respectFlags = _args.getPtrOpt<FlagGrid>("respectFlags", 3, 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->applyToGridSmooth(grid, sigma, shift, respectFlags);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Shape::applyToGridSmooth", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Shape::applyToGridSmooth", e.what());
+ return 0;
+ }
+ }
+
+ LevelsetGrid computeLevelset();
+ static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Shape *pbo = dynamic_cast<Shape *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Shape::computeLevelset", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->computeLevelset());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Shape::computeLevelset", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Shape::computeLevelset", e.what());
+ return 0;
+ }
+ }
+
+ void collideMesh(Mesh &mesh);
+ static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Shape *pbo = dynamic_cast<Shape *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Shape::collideMesh", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Mesh &mesh = *_args.getPtr<Mesh>("mesh", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->collideMesh(mesh);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Shape::collideMesh", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Shape::collideMesh", e.what());
+ return 0;
+ }
+ }
+
+ virtual Vec3 getCenter() const
+ {
+ return Vec3::Zero;
+ }
+ static PyObject *_W_5(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Shape *pbo = dynamic_cast<Shape *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Shape::getCenter", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getCenter());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Shape::getCenter", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Shape::getCenter", e.what());
+ return 0;
+ }
+ }
+
+ virtual void setCenter(const Vec3 &center)
+ {
+ }
+ static PyObject *_W_6(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Shape *pbo = dynamic_cast<Shape *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Shape::setCenter", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ const Vec3 &center = _args.get<Vec3>("center", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setCenter(center);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Shape::setCenter", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Shape::setCenter", e.what());
+ return 0;
+ }
+ }
+
+ virtual Vec3 getExtent() const
+ {
+ return Vec3::Zero;
+ }
+ static PyObject *_W_7(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Shape *pbo = dynamic_cast<Shape *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Shape::getExtent", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = toPy(pbo->getExtent());
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Shape::getExtent", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Shape::getExtent", e.what());
+ return 0;
+ }
+ }
+
+ //! Inside test of the shape
+ virtual bool isInside(const Vec3 &pos) const;
+ inline bool isInsideGrid(int i, int j, int k) const
+ {
+ return isInside(Vec3(i + 0.5, j + 0.5, k + 0.5));
+ };
+
+ virtual void generateMesh(Mesh *mesh){};
+ virtual void generateLevelset(Grid<Real> &phi){};
+
+ protected:
+ GridType mType;
+ public:
+ PbArgs _args;
+}
+#define _C_Shape
+;
+
+//! Dummy shape
+class NullShape : public Shape {
+ public:
+ NullShape(FluidSolver *parent) : Shape(parent)
+ {
+ }
+ static int _W_8(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "NullShape::NullShape", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new NullShape(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "NullShape::NullShape", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("NullShape::NullShape", e.what());
+ return -1;
+ }
+ }
+
+ virtual bool isInside(const Vec3 &pos) const
+ {
+ return false;
+ }
+ virtual void generateMesh(Mesh *mesh)
+ {
+ }
+
+ protected:
+ virtual void generateLevelset(Grid<Real> &phi)
+ {
+ gridSetConst<Real>(phi, 1000.0f);
+ }
+ public:
+ PbArgs _args;
+}
+#define _C_NullShape
+;
+
+//! Box shape
+class Box : public Shape {
+ public:
+ Box(FluidSolver *parent,
+ Vec3 center = Vec3::Invalid,
+ Vec3 p0 = Vec3::Invalid,
+ Vec3 p1 = Vec3::Invalid,
+ Vec3 size = Vec3::Invalid);
+ static int _W_9(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "Box::Box", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ Vec3 center = _args.getOpt<Vec3>("center", 1, Vec3::Invalid, &_lock);
+ Vec3 p0 = _args.getOpt<Vec3>("p0", 2, Vec3::Invalid, &_lock);
+ Vec3 p1 = _args.getOpt<Vec3>("p1", 3, Vec3::Invalid, &_lock);
+ Vec3 size = _args.getOpt<Vec3>("size", 4, Vec3::Invalid, &_lock);
+ obj = new Box(parent, center, p0, p1, size);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "Box::Box", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("Box::Box", e.what());
+ return -1;
+ }
+ }
+
+ inline Vec3 getSize() const
+ {
+ return mP1 - mP0;
+ }
+ inline Vec3 getP0() const
+ {
+ return mP0;
+ }
+ inline Vec3 getP1() const
+ {
+ return mP1;
+ }
+ virtual void setCenter(const Vec3 &center)
+ {
+ Vec3 dh = 0.5 * (mP1 - mP0);
+ mP0 = center - dh;
+ mP1 = center + dh;
+ }
+ virtual Vec3 getCenter() const
+ {
+ return 0.5 * (mP1 + mP0);
+ }
+ virtual Vec3 getExtent() const
+ {
+ return getSize();
+ }
+ virtual bool isInside(const Vec3 &pos) const;
+ virtual void generateMesh(Mesh *mesh);
+ virtual void generateLevelset(Grid<Real> &phi);
+
+ protected:
+ Vec3 mP0, mP1;
+ public:
+ PbArgs _args;
+}
+#define _C_Box
+;
+
+//! Spherical shape
+class Sphere : public Shape {
+ public:
+ Sphere(FluidSolver *parent, Vec3 center, Real radius, Vec3 scale = Vec3(1, 1, 1));
+ static int _W_10(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "Sphere::Sphere", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ Vec3 center = _args.get<Vec3>("center", 1, &_lock);
+ Real radius = _args.get<Real>("radius", 2, &_lock);
+ Vec3 scale = _args.getOpt<Vec3>("scale", 3, Vec3(1, 1, 1), &_lock);
+ obj = new Sphere(parent, center, radius, scale);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "Sphere::Sphere", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("Sphere::Sphere", e.what());
+ return -1;
+ }
+ }
+
+ virtual void setCenter(const Vec3 &center)
+ {
+ mCenter = center;
+ }
+ virtual Vec3 getCenter() const
+ {
+ return mCenter;
+ }
+ inline Real getRadius() const
+ {
+ return mRadius;
+ }
+ virtual Vec3 getExtent() const
+ {
+ return Vec3(2.0 * mRadius);
+ }
+ virtual bool isInside(const Vec3 &pos) const;
+ virtual void generateMesh(Mesh *mesh);
+ virtual void generateLevelset(Grid<Real> &phi);
+
+ protected:
+ Vec3 mCenter, mScale;
+ Real mRadius;
+ public:
+ PbArgs _args;
+}
+#define _C_Sphere
+;
+
+//! Cylindrical shape
+class Cylinder : public Shape {
+ public:
+ Cylinder(FluidSolver *parent, Vec3 center, Real radius, Vec3 z);
+ static int _W_11(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "Cylinder::Cylinder", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ Vec3 center = _args.get<Vec3>("center", 1, &_lock);
+ Real radius = _args.get<Real>("radius", 2, &_lock);
+ Vec3 z = _args.get<Vec3>("z", 3, &_lock);
+ obj = new Cylinder(parent, center, radius, z);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "Cylinder::Cylinder", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("Cylinder::Cylinder", e.what());
+ return -1;
+ }
+ }
+
+ void setRadius(Real r)
+ {
+ mRadius = r;
+ }
+ static PyObject *_W_12(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Cylinder *pbo = dynamic_cast<Cylinder *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Cylinder::setRadius", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Real r = _args.get<Real>("r", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setRadius(r);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Cylinder::setRadius", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Cylinder::setRadius", e.what());
+ return 0;
+ }
+ }
+
+ void setZ(Vec3 z)
+ {
+ mZDir = z;
+ mZ = normalize(mZDir);
+ }
+ static PyObject *_W_13(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Cylinder *pbo = dynamic_cast<Cylinder *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Cylinder::setZ", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Vec3 z = _args.get<Vec3>("z", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->setZ(z);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Cylinder::setZ", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Cylinder::setZ", e.what());
+ return 0;
+ }
+ }
+
+ virtual void setCenter(const Vec3 &center)
+ {
+ mCenter = center;
+ }
+ virtual Vec3 getCenter() const
+ {
+ return mCenter;
+ }
+ inline Real getRadius() const
+ {
+ return mRadius;
+ }
+ inline Vec3 getZ() const
+ {
+ return mZ * mZDir;
+ }
+ virtual Vec3 getExtent() const
+ {
+ return Vec3(2.0 * sqrt(square(mZ) + square(mRadius)));
+ }
+ virtual bool isInside(const Vec3 &pos) const;
+ virtual void generateMesh(Mesh *mesh);
+ virtual void generateLevelset(Grid<Real> &phi);
+
+ protected:
+ Vec3 mCenter, mZDir;
+ Real mRadius, mZ;
+ public:
+ PbArgs _args;
+}
+#define _C_Cylinder
+;
+
+//! Slope shape
+// generates a levelset based on a plane
+// plane is specified by two angles and an offset on the y axis in (offset vector would be ( 0,
+// offset, 0) ) the two angles are specified in degrees, between: y-axis and x-axis
+// y-axis and z-axis
+class Slope : public Shape {
+ public:
+ Slope(FluidSolver *parent, Real anglexy, Real angleyz, Real origin, Vec3 gs);
+ static int _W_14(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "Slope::Slope", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ Real anglexy = _args.get<Real>("anglexy", 1, &_lock);
+ Real angleyz = _args.get<Real>("angleyz", 2, &_lock);
+ Real origin = _args.get<Real>("origin", 3, &_lock);
+ Vec3 gs = _args.get<Vec3>("gs", 4, &_lock);
+ obj = new Slope(parent, anglexy, angleyz, origin, gs);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "Slope::Slope", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("Slope::Slope", e.what());
+ return -1;
+ }
+ }
+
+ virtual void setOrigin(const Real &origin)
+ {
+ mOrigin = origin;
+ }
+ virtual void setAnglexy(const Real &anglexy)
+ {
+ mAnglexy = anglexy;
+ }
+ virtual void setAngleyz(const Real &angleyz)
+ {
+ mAnglexy = angleyz;
+ }
+
+ inline Real getOrigin() const
+ {
+ return mOrigin;
+ }
+ inline Real getmAnglexy() const
+ {
+ return mAnglexy;
+ }
+ inline Real getmAngleyz() const
+ {
+ return mAngleyz;
+ }
+ virtual bool isInside(const Vec3 &pos) const;
+ virtual void generateMesh(Mesh *mesh);
+ virtual void generateLevelset(Grid<Real> &phi);
+
+ protected:
+ Real mAnglexy, mAngleyz;
+ Real mOrigin;
+ Vec3 mGs;
+ public:
+ PbArgs _args;
+}
+#define _C_Slope
+;
+
+} // namespace Manta
+#endif
diff --git a/extern/mantaflow/preprocessed/shapes.h.reg.cpp b/extern/mantaflow/preprocessed/shapes.h.reg.cpp
new file mode 100644
index 00000000000..72c9c61284c
--- /dev/null
+++ b/extern/mantaflow/preprocessed/shapes.h.reg.cpp
@@ -0,0 +1,73 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "shapes.h"
+namespace Manta {
+#ifdef _C_Box
+static const Pb::Register _R_12("Box", "Box", "Shape");
+template<> const char *Namify<Box>::S = "Box";
+static const Pb::Register _R_13("Box", "Box", Box::_W_9);
+#endif
+#ifdef _C_Cylinder
+static const Pb::Register _R_14("Cylinder", "Cylinder", "Shape");
+template<> const char *Namify<Cylinder>::S = "Cylinder";
+static const Pb::Register _R_15("Cylinder", "Cylinder", Cylinder::_W_11);
+static const Pb::Register _R_16("Cylinder", "setRadius", Cylinder::_W_12);
+static const Pb::Register _R_17("Cylinder", "setZ", Cylinder::_W_13);
+#endif
+#ifdef _C_NullShape
+static const Pb::Register _R_18("NullShape", "NullShape", "Shape");
+template<> const char *Namify<NullShape>::S = "NullShape";
+static const Pb::Register _R_19("NullShape", "NullShape", NullShape::_W_8);
+#endif
+#ifdef _C_Shape
+static const Pb::Register _R_20("Shape", "Shape", "PbClass");
+template<> const char *Namify<Shape>::S = "Shape";
+static const Pb::Register _R_21("Shape", "Shape", Shape::_W_0);
+static const Pb::Register _R_22("Shape", "applyToGrid", Shape::_W_1);
+static const Pb::Register _R_23("Shape", "applyToGridSmooth", Shape::_W_2);
+static const Pb::Register _R_24("Shape", "computeLevelset", Shape::_W_3);
+static const Pb::Register _R_25("Shape", "collideMesh", Shape::_W_4);
+static const Pb::Register _R_26("Shape", "getCenter", Shape::_W_5);
+static const Pb::Register _R_27("Shape", "setCenter", Shape::_W_6);
+static const Pb::Register _R_28("Shape", "getExtent", Shape::_W_7);
+#endif
+#ifdef _C_Slope
+static const Pb::Register _R_29("Slope", "Slope", "Shape");
+template<> const char *Namify<Slope>::S = "Slope";
+static const Pb::Register _R_30("Slope", "Slope", Slope::_W_14);
+#endif
+#ifdef _C_Sphere
+static const Pb::Register _R_31("Sphere", "Sphere", "Shape");
+template<> const char *Namify<Sphere>::S = "Sphere";
+static const Pb::Register _R_32("Sphere", "Sphere", Sphere::_W_10);
+#endif
+extern "C" {
+void PbRegister_file_12()
+{
+ KEEP_UNUSED(_R_12);
+ KEEP_UNUSED(_R_13);
+ KEEP_UNUSED(_R_14);
+ KEEP_UNUSED(_R_15);
+ KEEP_UNUSED(_R_16);
+ KEEP_UNUSED(_R_17);
+ KEEP_UNUSED(_R_18);
+ KEEP_UNUSED(_R_19);
+ KEEP_UNUSED(_R_20);
+ KEEP_UNUSED(_R_21);
+ KEEP_UNUSED(_R_22);
+ KEEP_UNUSED(_R_23);
+ KEEP_UNUSED(_R_24);
+ KEEP_UNUSED(_R_25);
+ KEEP_UNUSED(_R_26);
+ KEEP_UNUSED(_R_27);
+ KEEP_UNUSED(_R_28);
+ KEEP_UNUSED(_R_29);
+ KEEP_UNUSED(_R_30);
+ KEEP_UNUSED(_R_31);
+ KEEP_UNUSED(_R_32);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/test.cpp b/extern/mantaflow/preprocessed/test.cpp
new file mode 100644
index 00000000000..b90c886efe7
--- /dev/null
+++ b/extern/mantaflow/preprocessed/test.cpp
@@ -0,0 +1,133 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Use this file to test new functionality
+ *
+ ******************************************************************************/
+
+#include "levelset.h"
+#include "commonkernels.h"
+#include "particle.h"
+#include <cmath>
+
+using namespace std;
+
+namespace Manta {
+
+// two simple example kernels
+
+struct reductionTest : public KernelBase {
+ reductionTest(const Grid<Real> &v) : KernelBase(&v, 0), v(v), sum(0)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Real> &v, double &sum)
+ {
+ sum += v[idx];
+ }
+ inline operator double()
+ {
+ return sum;
+ }
+ inline double &getRet()
+ {
+ return sum;
+ }
+ inline const Grid<Real> &getArg0()
+ {
+ return v;
+ }
+ typedef Grid<Real> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel reductionTest ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, v, sum);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ reductionTest(reductionTest &o, tbb::split) : KernelBase(o), v(o.v), sum(0)
+ {
+ }
+ void join(const reductionTest &o)
+ {
+ sum += o.sum;
+ }
+ const Grid<Real> &v;
+ double sum;
+};
+
+struct minReduction : public KernelBase {
+ minReduction(const Grid<Real> &v) : KernelBase(&v, 0), v(v), sum(0)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx, const Grid<Real> &v, double &sum)
+ {
+ if (sum < v[idx])
+ sum = v[idx];
+ }
+ inline operator double()
+ {
+ return sum;
+ }
+ inline double &getRet()
+ {
+ return sum;
+ }
+ inline const Grid<Real> &getArg0()
+ {
+ return v;
+ }
+ typedef Grid<Real> type0;
+ void runMessage()
+ {
+ debMsg("Executing kernel minReduction ", 3);
+ debMsg("Kernel range"
+ << " x " << maxX << " y " << maxY << " z " << minZ << " - " << maxZ << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r)
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, v, sum);
+ }
+ void run()
+ {
+ tbb::parallel_reduce(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ minReduction(minReduction &o, tbb::split) : KernelBase(o), v(o.v), sum(0)
+ {
+ }
+ void join(const minReduction &o)
+ {
+ sum = min(sum, o.sum);
+ }
+ const Grid<Real> &v;
+ double sum;
+};
+
+// ... add more test code here if necessary ...
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/timing.cpp b/extern/mantaflow/preprocessed/timing.cpp
new file mode 100644
index 00000000000..ae572032e4a
--- /dev/null
+++ b/extern/mantaflow/preprocessed/timing.cpp
@@ -0,0 +1,128 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Plugin timing
+ *
+ ******************************************************************************/
+
+#include "timing.h"
+#include <fstream>
+
+using namespace std;
+namespace Manta {
+
+TimingData::TimingData() : updated(false), num(0)
+{
+}
+
+void TimingData::start(FluidSolver *parent, const string &name)
+{
+ mLastPlugin = name;
+ mPluginTimer.get();
+}
+
+void TimingData::stop(FluidSolver *parent, const string &name)
+{
+ if (mLastPlugin == name && name != "FluidSolver::step") {
+ updated = true;
+ const string parentName = parent ? parent->getName() : "";
+ MuTime diff = mPluginTimer.update();
+ vector<TimingSet> &cur = mData[name];
+ for (vector<TimingSet>::iterator it = cur.begin(); it != cur.end(); it++) {
+ if (it->solver == parentName) {
+ it->cur += diff;
+ it->updated = true;
+ return;
+ }
+ }
+ TimingSet s;
+ s.solver = parentName;
+ s.cur = diff;
+ s.updated = true;
+ cur.push_back(s);
+ }
+}
+
+void TimingData::step()
+{
+ if (updated)
+ num++;
+ std::map<std::string, std::vector<TimingSet>>::iterator it;
+ for (it = mData.begin(); it != mData.end(); it++) {
+ for (vector<TimingSet>::iterator it2 = it->second.begin(); it2 != it->second.end(); it2++) {
+ if (it2->updated) {
+ it2->total += it2->cur;
+ it2->num++;
+ }
+ it2->cur.clear();
+ it2->updated = false;
+ }
+ }
+ updated = false;
+}
+
+void TimingData::print()
+{
+ MuTime total;
+ total.clear();
+ std::map<std::string, std::vector<TimingSet>>::iterator it;
+ for (it = mData.begin(); it != mData.end(); it++)
+ for (vector<TimingSet>::iterator it2 = it->second.begin(); it2 != it->second.end(); it2++)
+ total += it2->cur;
+
+ printf("\n-- STEP %3d ----------------------------\n", num);
+ for (it = mData.begin(); it != mData.end(); it++) {
+ for (vector<TimingSet>::iterator it2 = it->second.begin(); it2 != it->second.end(); it2++) {
+ if (!it2->updated)
+ continue;
+ string name = it->first;
+ if (it->second.size() > 1 && !it2->solver.empty())
+ name += "[" + it2->solver + "]";
+ printf("[%4.1f%%] %s (%s)\n",
+ 100.0 * ((Real)it2->cur.time / (Real)total.time),
+ name.c_str(),
+ it2->cur.toString().c_str());
+ }
+ }
+ step();
+
+ printf("----------------------------------------\n");
+ printf("Total : %s\n\n", total.toString().c_str());
+}
+
+void TimingData::saveMean(const string &filename)
+{
+ ofstream ofs(filename.c_str());
+ step();
+ if (!ofs.good())
+ errMsg("can't open " + filename + " as timing log");
+ ofs << "Mean timings of " << num << " steps :" << endl << endl;
+ MuTime total;
+ total.clear();
+ std::map<std::string, std::vector<TimingSet>>::iterator it;
+ for (it = mData.begin(); it != mData.end(); it++)
+ for (vector<TimingSet>::iterator it2 = it->second.begin(); it2 != it->second.end(); it2++) {
+ total += it2->cur;
+ string name = it->first;
+ if (it->second.size() > 1)
+ name += "[" + it2->solver + "]";
+
+ ofs << name << " " << (it2->total / it2->num) << endl;
+ }
+
+ ofs << endl << "Total : " << total << " (mean " << total / num << ")" << endl;
+ ofs.close();
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/timing.h b/extern/mantaflow/preprocessed/timing.h
new file mode 100644
index 00000000000..a05e5cd3323
--- /dev/null
+++ b/extern/mantaflow/preprocessed/timing.h
@@ -0,0 +1,157 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Plugin timing
+ *
+ ******************************************************************************/
+
+#ifndef _TIMING_H
+#define _TIMING_H
+
+#include "manta.h"
+#include <map>
+namespace Manta {
+
+class TimingData {
+ private:
+ TimingData();
+
+ public:
+ static TimingData &instance()
+ {
+ static TimingData a;
+ return a;
+ }
+
+ void print();
+ void saveMean(const std::string &filename);
+ void start(FluidSolver *parent, const std::string &name);
+ void stop(FluidSolver *parent, const std::string &name);
+
+ protected:
+ void step();
+ struct TimingSet {
+ TimingSet() : num(0), updated(false)
+ {
+ cur.clear();
+ total.clear();
+ }
+ MuTime cur, total;
+ int num;
+ bool updated;
+ std::string solver;
+ };
+ bool updated;
+
+ int num;
+ MuTime mPluginTimer;
+ std::string mLastPlugin;
+ std::map<std::string, std::vector<TimingSet>> mData;
+};
+
+// Python interface
+class Timings : public PbClass {
+ public:
+ Timings() : PbClass(0)
+ {
+ }
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "Timings::Timings", !noTiming);
+ {
+ ArgLocker _lock;
+ obj = new Timings();
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "Timings::Timings", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("Timings::Timings", e.what());
+ return -1;
+ }
+ }
+
+ void display()
+ {
+ TimingData::instance().print();
+ }
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Timings *pbo = dynamic_cast<Timings *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Timings::display", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->display();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Timings::display", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Timings::display", e.what());
+ return 0;
+ }
+ }
+ void saveMean(std::string file)
+ {
+ TimingData::instance().saveMean(file);
+ }
+ static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ Timings *pbo = dynamic_cast<Timings *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "Timings::saveMean", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ std::string file = _args.get<std::string>("file", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->saveMean(file);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "Timings::saveMean", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("Timings::saveMean", e.what());
+ return 0;
+ }
+ }
+
+ public:
+ PbArgs _args;
+}
+#define _C_Timings
+;
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/timing.h.reg.cpp b/extern/mantaflow/preprocessed/timing.h.reg.cpp
new file mode 100644
index 00000000000..c0f63ec7850
--- /dev/null
+++ b/extern/mantaflow/preprocessed/timing.h.reg.cpp
@@ -0,0 +1,24 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "timing.h"
+namespace Manta {
+#ifdef _C_Timings
+static const Pb::Register _R_16("Timings", "Timings", "PbClass");
+template<> const char *Namify<Timings>::S = "Timings";
+static const Pb::Register _R_17("Timings", "Timings", Timings::_W_0);
+static const Pb::Register _R_18("Timings", "display", Timings::_W_1);
+static const Pb::Register _R_19("Timings", "saveMean", Timings::_W_2);
+#endif
+extern "C" {
+void PbRegister_file_16()
+{
+ KEEP_UNUSED(_R_16);
+ KEEP_UNUSED(_R_17);
+ KEEP_UNUSED(_R_18);
+ KEEP_UNUSED(_R_19);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/turbulencepart.cpp b/extern/mantaflow/preprocessed/turbulencepart.cpp
new file mode 100644
index 00000000000..168ae9cc2f2
--- /dev/null
+++ b/extern/mantaflow/preprocessed/turbulencepart.cpp
@@ -0,0 +1,288 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.ynu.org/licenses
+ *
+ * Turbulence particles
+ *
+ ******************************************************************************/
+
+#include "turbulencepart.h"
+#include "shapes.h"
+#include "randomstream.h"
+
+using namespace std;
+namespace Manta {
+
+TurbulenceParticleSystem::TurbulenceParticleSystem(FluidSolver *parent, WaveletNoiseField &noise)
+ : ParticleSystem<TurbulenceParticleData>(parent), noise(noise)
+{
+}
+
+ParticleBase *TurbulenceParticleSystem::clone()
+{
+ TurbulenceParticleSystem *nm = new TurbulenceParticleSystem(getParent(), noise);
+ compress();
+
+ nm->mData = mData;
+ nm->setName(getName());
+ return nm;
+}
+
+inline Vec3 hsv2rgb(Real h, Real s, Real v)
+{
+ Real r = 0, g = 0, b = 0;
+
+ int i = (int)(h * 6);
+ Real f = h * 6 - i;
+ Real p = v * (1 - s);
+ Real q = v * (1 - f * s);
+ Real t = v * (1 - (1 - f) * s);
+
+ switch (i % 6) {
+ case 0:
+ r = v, g = t, b = p;
+ break;
+ case 1:
+ r = q, g = v, b = p;
+ break;
+ case 2:
+ r = p, g = v, b = t;
+ break;
+ case 3:
+ r = p, g = q, b = v;
+ break;
+ case 4:
+ r = t, g = p, b = v;
+ break;
+ case 5:
+ r = v, g = p, b = q;
+ break;
+ default:
+ break;
+ }
+
+ return Vec3(r, g, b);
+}
+
+void TurbulenceParticleSystem::seed(Shape *shape, int num)
+{
+ static RandomStream rand(34894231);
+ Vec3 sz = shape->getExtent(), p0 = shape->getCenter() - sz * 0.5;
+ for (int i = 0; i < num; i++) {
+ Vec3 p;
+ do {
+ p = rand.getVec3() * sz + p0;
+ } while (!shape->isInside(p));
+ Real z = (p.z - p0.z) / sz.z;
+ add(TurbulenceParticleData(p, hsv2rgb(z, 0.75, 1.0)));
+ }
+}
+
+void TurbulenceParticleSystem::resetTexCoords(int num, const Vec3 &inflow)
+{
+ if (num == 0) {
+ for (int i = 0; i < size(); i++)
+ mData[i].tex0 = mData[i].pos - inflow;
+ }
+ else {
+ for (int i = 0; i < size(); i++)
+ mData[i].tex1 = mData[i].pos - inflow;
+ }
+}
+
+struct KnSynthesizeTurbulence : public KernelBase {
+ KnSynthesizeTurbulence(TurbulenceParticleSystem &p,
+ FlagGrid &flags,
+ WaveletNoiseField &noise,
+ Grid<Real> &kGrid,
+ Real alpha,
+ Real dt,
+ int octaves,
+ Real scale,
+ Real invL0,
+ Real kmin)
+ : KernelBase(p.size()),
+ p(p),
+ flags(flags),
+ noise(noise),
+ kGrid(kGrid),
+ alpha(alpha),
+ dt(dt),
+ octaves(octaves),
+ scale(scale),
+ invL0(invL0),
+ kmin(kmin)
+ {
+ runMessage();
+ run();
+ }
+ inline void op(IndexInt idx,
+ TurbulenceParticleSystem &p,
+ FlagGrid &flags,
+ WaveletNoiseField &noise,
+ Grid<Real> &kGrid,
+ Real alpha,
+ Real dt,
+ int octaves,
+ Real scale,
+ Real invL0,
+ Real kmin) const
+ {
+ const Real PERSISTENCE = 0.56123f;
+
+ const Vec3 pos(p[idx].pos);
+ if (flags.isInBounds(pos)) { // && !flags.isObstacle(pos)) {
+ Real k2 = kGrid.getInterpolated(pos) - kmin;
+ Real ks = k2 < 0 ? 0.0 : sqrt(k2);
+
+ // Wavelet noise lookup
+ Real amplitude = scale * ks;
+ Real multiplier = invL0;
+ Vec3 vel(0.);
+ for (int o = 0; o < octaves; o++) {
+ // Vec3 ns = noise.evaluateCurl(p[i].pos * multiplier) * amplitude;
+ Vec3 n0 = noise.evaluateCurl(p[idx].tex0 * multiplier) * amplitude;
+ Vec3 n1 = noise.evaluateCurl(p[idx].tex1 * multiplier) * amplitude;
+ vel += alpha * n0 + (1.0f - alpha) * n1;
+
+ // next scale
+ amplitude *= PERSISTENCE;
+ multiplier *= 2.0f;
+ }
+
+ // advection
+ Vec3 dx = vel * dt;
+ p[idx].pos += dx;
+ p[idx].tex0 += dx;
+ p[idx].tex1 += dx;
+ }
+ }
+ inline TurbulenceParticleSystem &getArg0()
+ {
+ return p;
+ }
+ typedef TurbulenceParticleSystem type0;
+ inline FlagGrid &getArg1()
+ {
+ return flags;
+ }
+ typedef FlagGrid type1;
+ inline WaveletNoiseField &getArg2()
+ {
+ return noise;
+ }
+ typedef WaveletNoiseField type2;
+ inline Grid<Real> &getArg3()
+ {
+ return kGrid;
+ }
+ typedef Grid<Real> type3;
+ inline Real &getArg4()
+ {
+ return alpha;
+ }
+ typedef Real type4;
+ inline Real &getArg5()
+ {
+ return dt;
+ }
+ typedef Real type5;
+ inline int &getArg6()
+ {
+ return octaves;
+ }
+ typedef int type6;
+ inline Real &getArg7()
+ {
+ return scale;
+ }
+ typedef Real type7;
+ inline Real &getArg8()
+ {
+ return invL0;
+ }
+ typedef Real type8;
+ inline Real &getArg9()
+ {
+ return kmin;
+ }
+ typedef Real type9;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnSynthesizeTurbulence ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, p, flags, noise, kGrid, alpha, dt, octaves, scale, invL0, kmin);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ TurbulenceParticleSystem &p;
+ FlagGrid &flags;
+ WaveletNoiseField &noise;
+ Grid<Real> &kGrid;
+ Real alpha;
+ Real dt;
+ int octaves;
+ Real scale;
+ Real invL0;
+ Real kmin;
+};
+
+void TurbulenceParticleSystem::synthesize(FlagGrid &flags,
+ Grid<Real> &k,
+ int octaves,
+ Real switchLength,
+ Real L0,
+ Real scale,
+ Vec3 inflowBias)
+{
+ static Real ctime = 0;
+ static Vec3 inflow(0.);
+ Real dt = getParent()->getDt();
+
+ // collect inflow bias
+ inflow += inflowBias * dt;
+
+ // alpha: hat function over time
+ Real oldAlpha = 2.0f * nmod(ctime / switchLength, Real(1.0));
+ ctime += dt;
+ Real alpha = 2.0f * nmod(ctime / switchLength, Real(1.0));
+
+ if (oldAlpha < 1.0f && alpha >= 1.0f)
+ resetTexCoords(0, inflow);
+ if (oldAlpha > alpha)
+ resetTexCoords(1, inflow);
+ if (alpha > 1.0f)
+ alpha = 2.0f - alpha;
+ alpha = 1.0;
+
+ KnSynthesizeTurbulence(
+ *this, flags, noise, k, alpha, dt, octaves, scale, 1.0f / L0, 1.5 * square(0.1));
+}
+
+void TurbulenceParticleSystem::deleteInObstacle(FlagGrid &flags)
+{
+ for (int i = 0; i < size(); i++)
+ if (flags.isObstacle(mData[i].pos))
+ mData[i].flag |= PDELETE;
+ compress();
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/turbulencepart.h b/extern/mantaflow/preprocessed/turbulencepart.h
new file mode 100644
index 00000000000..7e7fbae15fd
--- /dev/null
+++ b/extern/mantaflow/preprocessed/turbulencepart.h
@@ -0,0 +1,210 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Turbulence particles
+ *
+ ******************************************************************************/
+
+#ifndef _TURBULENCEPART_H_
+#define _TURBULENCEPART_H_
+
+#include "particle.h"
+#include "noisefield.h"
+
+namespace Manta {
+class Shape;
+
+struct TurbulenceParticleData {
+ TurbulenceParticleData() : pos(0.0), color(1.), tex0(0.0), tex1(0.0), flag(0)
+ {
+ }
+ TurbulenceParticleData(const Vec3 &p, const Vec3 &color = Vec3(1.))
+ : pos(p), color(color), tex0(p), tex1(p), flag(0)
+ {
+ }
+ Vec3 pos, color;
+ Vec3 tex0, tex1;
+ int flag;
+ static ParticleBase::SystemType getType()
+ {
+ return ParticleBase::TURBULENCE;
+ }
+};
+
+//! Turbulence particles
+class TurbulenceParticleSystem : public ParticleSystem<TurbulenceParticleData> {
+ public:
+ TurbulenceParticleSystem(FluidSolver *parent, WaveletNoiseField &noise);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "TurbulenceParticleSystem::TurbulenceParticleSystem", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ WaveletNoiseField &noise = *_args.getPtr<WaveletNoiseField>("noise", 1, &_lock);
+ obj = new TurbulenceParticleSystem(parent, noise);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(
+ obj->getParent(), "TurbulenceParticleSystem::TurbulenceParticleSystem", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("TurbulenceParticleSystem::TurbulenceParticleSystem", e.what());
+ return -1;
+ }
+ }
+
+ void resetTexCoords(int num, const Vec3 &inflow);
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ TurbulenceParticleSystem *pbo = dynamic_cast<TurbulenceParticleSystem *>(
+ Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "TurbulenceParticleSystem::resetTexCoords", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ int num = _args.get<int>("num", 0, &_lock);
+ const Vec3 &inflow = _args.get<Vec3>("inflow", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->resetTexCoords(num, inflow);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "TurbulenceParticleSystem::resetTexCoords", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("TurbulenceParticleSystem::resetTexCoords", e.what());
+ return 0;
+ }
+ }
+
+ void seed(Shape *source, int num);
+ static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ TurbulenceParticleSystem *pbo = dynamic_cast<TurbulenceParticleSystem *>(
+ Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "TurbulenceParticleSystem::seed", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Shape *source = _args.getPtr<Shape>("source", 0, &_lock);
+ int num = _args.get<int>("num", 1, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->seed(source, num);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "TurbulenceParticleSystem::seed", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("TurbulenceParticleSystem::seed", e.what());
+ return 0;
+ }
+ }
+
+ void synthesize(FlagGrid &flags,
+ Grid<Real> &k,
+ int octaves = 2,
+ Real switchLength = 10.0,
+ Real L0 = 0.1,
+ Real scale = 1.0,
+ Vec3 inflowBias = 0.0);
+ static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ TurbulenceParticleSystem *pbo = dynamic_cast<TurbulenceParticleSystem *>(
+ Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "TurbulenceParticleSystem::synthesize", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ Grid<Real> &k = *_args.getPtr<Grid<Real>>("k", 1, &_lock);
+ int octaves = _args.getOpt<int>("octaves", 2, 2, &_lock);
+ Real switchLength = _args.getOpt<Real>("switchLength", 3, 10.0, &_lock);
+ Real L0 = _args.getOpt<Real>("L0", 4, 0.1, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 5, 1.0, &_lock);
+ Vec3 inflowBias = _args.getOpt<Vec3>("inflowBias", 6, 0.0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->synthesize(flags, k, octaves, switchLength, L0, scale, inflowBias);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "TurbulenceParticleSystem::synthesize", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("TurbulenceParticleSystem::synthesize", e.what());
+ return 0;
+ }
+ }
+
+ void deleteInObstacle(FlagGrid &flags);
+ static PyObject *_W_4(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ TurbulenceParticleSystem *pbo = dynamic_cast<TurbulenceParticleSystem *>(
+ Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "TurbulenceParticleSystem::deleteInObstacle", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ FlagGrid &flags = *_args.getPtr<FlagGrid>("flags", 0, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->deleteInObstacle(flags);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "TurbulenceParticleSystem::deleteInObstacle", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("TurbulenceParticleSystem::deleteInObstacle", e.what());
+ return 0;
+ }
+ }
+
+ virtual ParticleBase *clone();
+
+ private:
+ WaveletNoiseField &noise;
+ public:
+ PbArgs _args;
+}
+#define _C_TurbulenceParticleSystem
+;
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/turbulencepart.h.reg.cpp b/extern/mantaflow/preprocessed/turbulencepart.h.reg.cpp
new file mode 100644
index 00000000000..1f035908830
--- /dev/null
+++ b/extern/mantaflow/preprocessed/turbulencepart.h.reg.cpp
@@ -0,0 +1,89 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "turbulencepart.h"
+namespace Manta {
+#ifdef _C_ParticleSystem
+static const Pb::Register _R_21("ParticleSystem<TurbulenceParticleData>",
+ "ParticleSystem<TurbulenceParticleData>",
+ "ParticleBase");
+template<>
+const char *Namify<ParticleSystem<TurbulenceParticleData>>::S =
+ "ParticleSystem<TurbulenceParticleData>";
+static const Pb::Register _R_22("ParticleSystem<TurbulenceParticleData>",
+ "ParticleSystem",
+ ParticleSystem<TurbulenceParticleData>::_W_2);
+static const Pb::Register _R_23("ParticleSystem<TurbulenceParticleData>",
+ "pySize",
+ ParticleSystem<TurbulenceParticleData>::_W_3);
+static const Pb::Register _R_24("ParticleSystem<TurbulenceParticleData>",
+ "setPos",
+ ParticleSystem<TurbulenceParticleData>::_W_4);
+static const Pb::Register _R_25("ParticleSystem<TurbulenceParticleData>",
+ "getPos",
+ ParticleSystem<TurbulenceParticleData>::_W_5);
+static const Pb::Register _R_26("ParticleSystem<TurbulenceParticleData>",
+ "getPosPdata",
+ ParticleSystem<TurbulenceParticleData>::_W_6);
+static const Pb::Register _R_27("ParticleSystem<TurbulenceParticleData>",
+ "setPosPdata",
+ ParticleSystem<TurbulenceParticleData>::_W_7);
+static const Pb::Register _R_28("ParticleSystem<TurbulenceParticleData>",
+ "clear",
+ ParticleSystem<TurbulenceParticleData>::_W_8);
+static const Pb::Register _R_29("ParticleSystem<TurbulenceParticleData>",
+ "advectInGrid",
+ ParticleSystem<TurbulenceParticleData>::_W_9);
+static const Pb::Register _R_30("ParticleSystem<TurbulenceParticleData>",
+ "projectOutside",
+ ParticleSystem<TurbulenceParticleData>::_W_10);
+static const Pb::Register _R_31("ParticleSystem<TurbulenceParticleData>",
+ "projectOutOfBnd",
+ ParticleSystem<TurbulenceParticleData>::_W_11);
+#endif
+#ifdef _C_TurbulenceParticleSystem
+static const Pb::Register _R_32("TurbulenceParticleSystem",
+ "TurbulenceParticleSystem",
+ "ParticleSystem<TurbulenceParticleData>");
+template<> const char *Namify<TurbulenceParticleSystem>::S = "TurbulenceParticleSystem";
+static const Pb::Register _R_33("TurbulenceParticleSystem",
+ "TurbulenceParticleSystem",
+ TurbulenceParticleSystem::_W_0);
+static const Pb::Register _R_34("TurbulenceParticleSystem",
+ "resetTexCoords",
+ TurbulenceParticleSystem::_W_1);
+static const Pb::Register _R_35("TurbulenceParticleSystem",
+ "seed",
+ TurbulenceParticleSystem::_W_2);
+static const Pb::Register _R_36("TurbulenceParticleSystem",
+ "synthesize",
+ TurbulenceParticleSystem::_W_3);
+static const Pb::Register _R_37("TurbulenceParticleSystem",
+ "deleteInObstacle",
+ TurbulenceParticleSystem::_W_4);
+#endif
+extern "C" {
+void PbRegister_file_21()
+{
+ KEEP_UNUSED(_R_21);
+ KEEP_UNUSED(_R_22);
+ KEEP_UNUSED(_R_23);
+ KEEP_UNUSED(_R_24);
+ KEEP_UNUSED(_R_25);
+ KEEP_UNUSED(_R_26);
+ KEEP_UNUSED(_R_27);
+ KEEP_UNUSED(_R_28);
+ KEEP_UNUSED(_R_29);
+ KEEP_UNUSED(_R_30);
+ KEEP_UNUSED(_R_31);
+ KEEP_UNUSED(_R_32);
+ KEEP_UNUSED(_R_33);
+ KEEP_UNUSED(_R_34);
+ KEEP_UNUSED(_R_35);
+ KEEP_UNUSED(_R_36);
+ KEEP_UNUSED(_R_37);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/vortexpart.cpp b/extern/mantaflow/preprocessed/vortexpart.cpp
new file mode 100644
index 00000000000..0eba2743ee8
--- /dev/null
+++ b/extern/mantaflow/preprocessed/vortexpart.cpp
@@ -0,0 +1,251 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Vortex particles
+ * (warning, the vortex methods are currently experimental, and not fully supported!)
+ *
+ ******************************************************************************/
+
+#include "vortexpart.h"
+#include "integrator.h"
+#include "mesh.h"
+
+using namespace std;
+namespace Manta {
+
+// vortex particle effect: (cyl coord around wp)
+// u = -|wp|*rho*exp( (-rho^2-z^2)/(2sigma^2) ) e_phi
+inline Vec3 VortexKernel(const Vec3 &p, const vector<VortexParticleData> &vp, Real scale)
+{
+ Vec3 u(0.0);
+ for (size_t i = 0; i < vp.size(); i++) {
+ if (vp[i].flag & ParticleBase::PDELETE)
+ continue;
+
+ // cutoff radius
+ const Vec3 r = p - vp[i].pos;
+ const Real rlen2 = normSquare(r);
+ const Real sigma2 = square(vp[i].sigma);
+ if (rlen2 > 6.0 * sigma2 || rlen2 < 1e-8)
+ continue;
+
+ // split vortex strength
+ Vec3 vortNorm = vp[i].vorticity;
+ Real strength = normalize(vortNorm) * scale;
+
+ // transform in cylinder coordinate system
+ const Real rlen = sqrt(rlen2);
+ const Real z = dot(r, vortNorm);
+ const Vec3 ePhi = cross(r, vortNorm) / rlen;
+ const Real rho2 = rlen2 - z * z;
+
+ Real vortex = 0;
+ if (rho2 > 1e-10) {
+ // evaluate Kernel
+ vortex = strength * sqrt(rho2) * exp(rlen2 * -0.5 / sigma2);
+ }
+ u += vortex * ePhi;
+ }
+ return u;
+}
+
+struct _KnVpAdvectMesh : public KernelBase {
+ _KnVpAdvectMesh(const KernelBase &base,
+ vector<Node> &nodes,
+ const vector<VortexParticleData> &vp,
+ Real scale,
+ vector<Vec3> &u)
+ : KernelBase(base), nodes(nodes), vp(vp), scale(scale), u(u)
+ {
+ }
+ inline void op(IndexInt idx,
+ vector<Node> &nodes,
+ const vector<VortexParticleData> &vp,
+ Real scale,
+ vector<Vec3> &u) const
+ {
+ if (nodes[idx].flags & Mesh::NfFixed)
+ u[idx] = 0.0;
+ else
+ u[idx] = VortexKernel(nodes[idx].pos, vp, scale);
+ }
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, nodes, vp, scale, u);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ vector<Node> &nodes;
+ const vector<VortexParticleData> &vp;
+ Real scale;
+ vector<Vec3> &u;
+};
+struct KnVpAdvectMesh : public KernelBase {
+ KnVpAdvectMesh(vector<Node> &nodes, const vector<VortexParticleData> &vp, Real scale)
+ : KernelBase(nodes.size()),
+ _inner(KernelBase(nodes.size()), nodes, vp, scale, u),
+ nodes(nodes),
+ vp(vp),
+ scale(scale),
+ u((size))
+ {
+ runMessage();
+ run();
+ }
+ void run()
+ {
+ _inner.run();
+ }
+ inline operator vector<Vec3>()
+ {
+ return u;
+ }
+ inline vector<Vec3> &getRet()
+ {
+ return u;
+ }
+ inline vector<Node> &getArg0()
+ {
+ return nodes;
+ }
+ typedef vector<Node> type0;
+ inline const vector<VortexParticleData> &getArg1()
+ {
+ return vp;
+ }
+ typedef vector<VortexParticleData> type1;
+ inline Real &getArg2()
+ {
+ return scale;
+ }
+ typedef Real type2;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnVpAdvectMesh ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ _KnVpAdvectMesh _inner;
+ vector<Node> &nodes;
+ const vector<VortexParticleData> &vp;
+ Real scale;
+ vector<Vec3> u;
+};
+
+struct _KnVpAdvectSelf : public KernelBase {
+ _KnVpAdvectSelf(const KernelBase &base,
+ vector<VortexParticleData> &vp,
+ Real scale,
+ vector<Vec3> &u)
+ : KernelBase(base), vp(vp), scale(scale), u(u)
+ {
+ }
+ inline void op(IndexInt idx, vector<VortexParticleData> &vp, Real scale, vector<Vec3> &u) const
+ {
+ if (vp[idx].flag & ParticleBase::PDELETE)
+ u[idx] = 0.0;
+ else
+ u[idx] = VortexKernel(vp[idx].pos, vp, scale);
+ }
+ void operator()(const tbb::blocked_range<IndexInt> &__r) const
+ {
+ for (IndexInt idx = __r.begin(); idx != (IndexInt)__r.end(); idx++)
+ op(idx, vp, scale, u);
+ }
+ void run()
+ {
+ tbb::parallel_for(tbb::blocked_range<IndexInt>(0, size), *this);
+ }
+ vector<VortexParticleData> &vp;
+ Real scale;
+ vector<Vec3> &u;
+};
+struct KnVpAdvectSelf : public KernelBase {
+ KnVpAdvectSelf(vector<VortexParticleData> &vp, Real scale)
+ : KernelBase(vp.size()),
+ _inner(KernelBase(vp.size()), vp, scale, u),
+ vp(vp),
+ scale(scale),
+ u((size))
+ {
+ runMessage();
+ run();
+ }
+ void run()
+ {
+ _inner.run();
+ }
+ inline operator vector<Vec3>()
+ {
+ return u;
+ }
+ inline vector<Vec3> &getRet()
+ {
+ return u;
+ }
+ inline vector<VortexParticleData> &getArg0()
+ {
+ return vp;
+ }
+ typedef vector<VortexParticleData> type0;
+ inline Real &getArg1()
+ {
+ return scale;
+ }
+ typedef Real type1;
+ void runMessage()
+ {
+ debMsg("Executing kernel KnVpAdvectSelf ", 3);
+ debMsg("Kernel range"
+ << " size " << size << " ",
+ 4);
+ };
+ _KnVpAdvectSelf _inner;
+ vector<VortexParticleData> &vp;
+ Real scale;
+ vector<Vec3> u;
+};
+
+VortexParticleSystem::VortexParticleSystem(FluidSolver *parent)
+ : ParticleSystem<VortexParticleData>(parent)
+{
+}
+
+void VortexParticleSystem::advectSelf(Real scale, int integrationMode)
+{
+ KnVpAdvectSelf kernel(mData, scale * getParent()->getDt());
+ integratePointSet(kernel, integrationMode);
+}
+
+void VortexParticleSystem::applyToMesh(Mesh &mesh, Real scale, int integrationMode)
+{
+ KnVpAdvectMesh kernel(mesh.getNodeData(), mData, scale * getParent()->getDt());
+ integratePointSet(kernel, integrationMode);
+}
+
+ParticleBase *VortexParticleSystem::clone()
+{
+ VortexParticleSystem *nm = new VortexParticleSystem(getParent());
+ compress();
+
+ nm->mData = mData;
+ nm->setName(getName());
+ return nm;
+}
+
+} // namespace Manta
diff --git a/extern/mantaflow/preprocessed/vortexpart.h b/extern/mantaflow/preprocessed/vortexpart.h
new file mode 100644
index 00000000000..20335c20058
--- /dev/null
+++ b/extern/mantaflow/preprocessed/vortexpart.h
@@ -0,0 +1,138 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Vortex particles
+ * (warning, the vortex methods are currently experimental, and not fully supported!)
+ *
+ ******************************************************************************/
+
+#ifndef _VORTEXPART_H
+#define _VORTEXPART_H
+
+#include "particle.h"
+
+namespace Manta {
+class Mesh;
+
+struct VortexParticleData {
+ VortexParticleData() : pos(0.0), vorticity(0.0), sigma(0), flag(0)
+ {
+ }
+ VortexParticleData(const Vec3 &p, const Vec3 &v, Real sig)
+ : pos(p), vorticity(v), sigma(sig), flag(0)
+ {
+ }
+ Vec3 pos, vorticity;
+ Real sigma;
+ int flag;
+ static ParticleBase::SystemType getType()
+ {
+ return ParticleBase::VORTEX;
+ }
+};
+
+//! Vortex particles
+class VortexParticleSystem : public ParticleSystem<VortexParticleData> {
+ public:
+ VortexParticleSystem(FluidSolver *parent);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "VortexParticleSystem::VortexParticleSystem", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new VortexParticleSystem(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "VortexParticleSystem::VortexParticleSystem", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("VortexParticleSystem::VortexParticleSystem", e.what());
+ return -1;
+ }
+ }
+
+ void advectSelf(Real scale = 1.0, int integrationMode = IntRK4);
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ VortexParticleSystem *pbo = dynamic_cast<VortexParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "VortexParticleSystem::advectSelf", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Real scale = _args.getOpt<Real>("scale", 0, 1.0, &_lock);
+ int integrationMode = _args.getOpt<int>("integrationMode", 1, IntRK4, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->advectSelf(scale, integrationMode);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "VortexParticleSystem::advectSelf", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("VortexParticleSystem::advectSelf", e.what());
+ return 0;
+ }
+ }
+
+ void applyToMesh(Mesh &mesh, Real scale = 1.0, int integrationMode = IntRK4);
+ static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ VortexParticleSystem *pbo = dynamic_cast<VortexParticleSystem *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "VortexParticleSystem::applyToMesh", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ Mesh &mesh = *_args.getPtr<Mesh>("mesh", 0, &_lock);
+ Real scale = _args.getOpt<Real>("scale", 1, 1.0, &_lock);
+ int integrationMode = _args.getOpt<int>("integrationMode", 2, IntRK4, &_lock);
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->applyToMesh(mesh, scale, integrationMode);
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "VortexParticleSystem::applyToMesh", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("VortexParticleSystem::applyToMesh", e.what());
+ return 0;
+ }
+ }
+
+ virtual ParticleBase *clone();
+ public:
+ PbArgs _args;
+}
+#define _C_VortexParticleSystem
+;
+
+} // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/vortexpart.h.reg.cpp b/extern/mantaflow/preprocessed/vortexpart.h.reg.cpp
new file mode 100644
index 00000000000..0dad1802f3c
--- /dev/null
+++ b/extern/mantaflow/preprocessed/vortexpart.h.reg.cpp
@@ -0,0 +1,76 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "vortexpart.h"
+namespace Manta {
+#ifdef _C_ParticleSystem
+static const Pb::Register _R_20("ParticleSystem<VortexParticleData>",
+ "ParticleSystem<VortexParticleData>",
+ "ParticleBase");
+template<>
+const char *Namify<ParticleSystem<VortexParticleData>>::S = "ParticleSystem<VortexParticleData>";
+static const Pb::Register _R_21("ParticleSystem<VortexParticleData>",
+ "ParticleSystem",
+ ParticleSystem<VortexParticleData>::_W_2);
+static const Pb::Register _R_22("ParticleSystem<VortexParticleData>",
+ "pySize",
+ ParticleSystem<VortexParticleData>::_W_3);
+static const Pb::Register _R_23("ParticleSystem<VortexParticleData>",
+ "setPos",
+ ParticleSystem<VortexParticleData>::_W_4);
+static const Pb::Register _R_24("ParticleSystem<VortexParticleData>",
+ "getPos",
+ ParticleSystem<VortexParticleData>::_W_5);
+static const Pb::Register _R_25("ParticleSystem<VortexParticleData>",
+ "getPosPdata",
+ ParticleSystem<VortexParticleData>::_W_6);
+static const Pb::Register _R_26("ParticleSystem<VortexParticleData>",
+ "setPosPdata",
+ ParticleSystem<VortexParticleData>::_W_7);
+static const Pb::Register _R_27("ParticleSystem<VortexParticleData>",
+ "clear",
+ ParticleSystem<VortexParticleData>::_W_8);
+static const Pb::Register _R_28("ParticleSystem<VortexParticleData>",
+ "advectInGrid",
+ ParticleSystem<VortexParticleData>::_W_9);
+static const Pb::Register _R_29("ParticleSystem<VortexParticleData>",
+ "projectOutside",
+ ParticleSystem<VortexParticleData>::_W_10);
+static const Pb::Register _R_30("ParticleSystem<VortexParticleData>",
+ "projectOutOfBnd",
+ ParticleSystem<VortexParticleData>::_W_11);
+#endif
+#ifdef _C_VortexParticleSystem
+static const Pb::Register _R_31("VortexParticleSystem",
+ "VortexParticleSystem",
+ "ParticleSystem<VortexParticleData>");
+template<> const char *Namify<VortexParticleSystem>::S = "VortexParticleSystem";
+static const Pb::Register _R_32("VortexParticleSystem",
+ "VortexParticleSystem",
+ VortexParticleSystem::_W_0);
+static const Pb::Register _R_33("VortexParticleSystem", "advectSelf", VortexParticleSystem::_W_1);
+static const Pb::Register _R_34("VortexParticleSystem", "applyToMesh", VortexParticleSystem::_W_2);
+#endif
+extern "C" {
+void PbRegister_file_20()
+{
+ KEEP_UNUSED(_R_20);
+ KEEP_UNUSED(_R_21);
+ KEEP_UNUSED(_R_22);
+ KEEP_UNUSED(_R_23);
+ KEEP_UNUSED(_R_24);
+ KEEP_UNUSED(_R_25);
+ KEEP_UNUSED(_R_26);
+ KEEP_UNUSED(_R_27);
+ KEEP_UNUSED(_R_28);
+ KEEP_UNUSED(_R_29);
+ KEEP_UNUSED(_R_30);
+ KEEP_UNUSED(_R_31);
+ KEEP_UNUSED(_R_32);
+ KEEP_UNUSED(_R_33);
+ KEEP_UNUSED(_R_34);
+}
+}
+} // namespace Manta \ No newline at end of file
diff --git a/extern/mantaflow/preprocessed/vortexsheet.cpp b/extern/mantaflow/preprocessed/vortexsheet.cpp
new file mode 100644
index 00000000000..695b881006d
--- /dev/null
+++ b/extern/mantaflow/preprocessed/vortexsheet.cpp
@@ -0,0 +1,116 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Vortex sheets
+ * (warning, the vortex methods are currently experimental, and not fully supported!)
+ *
+ ******************************************************************************/
+
+#include "vortexsheet.h"
+#include "solvana.h"
+
+using namespace std;
+namespace Manta {
+
+// *****************************************************************************
+// VorticityChannel class members
+
+// *****************************************************************************
+// VortexSheet Mesh class members
+
+VortexSheetMesh::VortexSheetMesh(FluidSolver *parent) : Mesh(parent), mTexOffset(0.0f)
+{
+ addTriChannel(&mVorticity);
+ addNodeChannel(&mTex1);
+ addNodeChannel(&mTex2);
+ addNodeChannel(&mTurb);
+}
+
+Mesh *VortexSheetMesh::clone()
+{
+ VortexSheetMesh *nm = new VortexSheetMesh(mParent);
+ *nm = *this;
+ nm->setName(getName());
+ return nm;
+}
+
+void VortexSheetMesh::calcVorticity()
+{
+ for (size_t tri = 0; tri < mTris.size(); tri++) {
+ VortexSheetInfo &v = mVorticity.data[tri];
+ Vec3 e0 = getEdge(tri, 0), e1 = getEdge(tri, 1), e2 = getEdge(tri, 2);
+ Real area = getFaceArea(tri);
+
+ if (area < 1e-10) {
+ v.smokeAmount = 0;
+ v.vorticity = 0;
+ }
+ else {
+ v.smokeAmount = 0;
+ v.vorticity = (v.circulation[0] * e0 + v.circulation[1] * e1 + v.circulation[2] * e2) / area;
+ }
+ }
+}
+
+void VortexSheetMesh::calcCirculation()
+{
+ for (size_t tri = 0; tri < mTris.size(); tri++) {
+ VortexSheetInfo &v = mVorticity.data[tri];
+ Vec3 e0 = getEdge(tri, 0), e1 = getEdge(tri, 1), e2 = getEdge(tri, 2);
+ Real area = getFaceArea(tri);
+
+ if (area < 1e-10 || normSquare(v.vorticity) < 1e-10) {
+ v.circulation = 0;
+ continue;
+ }
+
+ float cx, cy, cz;
+ SolveOverconstraint34(e0.x,
+ e0.y,
+ e0.z,
+ e1.x,
+ e1.y,
+ e1.z,
+ e2.x,
+ e2.y,
+ e2.z,
+ v.vorticity.x,
+ v.vorticity.y,
+ v.vorticity.z,
+ cx,
+ cy,
+ cz);
+ v.circulation = Vec3(cx, cy, cz) * area;
+ }
+}
+
+void VortexSheetMesh::resetTex1()
+{
+ for (size_t i = 0; i < mNodes.size(); i++)
+ mTex1.data[i] = mNodes[i].pos + mTexOffset;
+}
+
+void VortexSheetMesh::resetTex2()
+{
+ for (size_t i = 0; i < mNodes.size(); i++)
+ mTex2.data[i] = mNodes[i].pos + mTexOffset;
+}
+
+void VortexSheetMesh::reinitTexCoords()
+{
+ resetTex1();
+ resetTex2();
+}
+
+}; // namespace Manta
diff --git a/extern/mantaflow/preprocessed/vortexsheet.h b/extern/mantaflow/preprocessed/vortexsheet.h
new file mode 100644
index 00000000000..1fd53784f48
--- /dev/null
+++ b/extern/mantaflow/preprocessed/vortexsheet.h
@@ -0,0 +1,251 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep generate).
+
+/******************************************************************************
+ *
+ * MantaFlow fluid solver framework
+ * Copyright 2011 Tobias Pfaff, Nils Thuerey
+ *
+ * This program is free software, distributed under the terms of the
+ * Apache License, Version 2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Vortex sheets
+ * (warning, the vortex methods are currently experimental, and not fully supported!)
+ *
+ ******************************************************************************/
+
+#ifndef _VORTEXSHEET_H
+#define _VORTEXSHEET_H
+
+#include "mesh.h"
+
+namespace Manta {
+
+//! Stores vortex sheet info
+struct VortexSheetInfo {
+ VortexSheetInfo()
+ : vorticity(0.0),
+ vorticitySmoothed(0.0),
+ circulation(0.0),
+ smokeAmount(1.0),
+ smokeParticles(0.0)
+ {
+ }
+
+ Vec3 vorticity;
+ Vec3 vorticitySmoothed;
+ Vec3 circulation;
+ Real smokeAmount, smokeParticles;
+};
+
+//! Manages vortex sheet info
+struct VorticityChannel : public SimpleTriChannel<VortexSheetInfo> {
+ virtual TriChannel *clone()
+ {
+ VorticityChannel *vc = new VorticityChannel();
+ *vc = *this;
+ return vc;
+ }
+};
+
+//! Manages 3D texture coordinates
+struct TexCoord3Channel : public SimpleNodeChannel<Vec3> {
+ virtual NodeChannel *clone()
+ {
+ TexCoord3Channel *tc = new TexCoord3Channel();
+ *tc = *this;
+ return tc;
+ }
+
+ void addInterpol(int a, int b, Real alpha)
+ {
+ data.push_back((1.0 - alpha) * data[a] + alpha * data[b]);
+ }
+ void mergeWith(int node, int delnode, Real alpha)
+ {
+ data[node] = 0.5 * (data[node] + data[delnode]);
+ }
+};
+
+struct TurbulenceInfo {
+ TurbulenceInfo() : k(0.0), epsilon(0.0)
+ {
+ }
+ TurbulenceInfo(const TurbulenceInfo &a, const TurbulenceInfo &b, Real alpha)
+ : k((1.0 - alpha) * a.k + alpha * b.k),
+ epsilon((1.0 - alpha) * a.epsilon + alpha * b.epsilon)
+ {
+ }
+ Real k, epsilon;
+};
+
+//! Manages k-epsilon information
+struct TurbulenceChannel : public SimpleNodeChannel<TurbulenceInfo> {
+ virtual NodeChannel *clone()
+ {
+ TurbulenceChannel *tc = new TurbulenceChannel();
+ *tc = *this;
+ return tc;
+ }
+
+ void addInterpol(int a, int b, Real alpha)
+ {
+ data.push_back(TurbulenceInfo(data[a], data[b], alpha));
+ }
+ void mergeWith(int node, int delnode, Real alpha)
+ {
+ data[node] = TurbulenceInfo(data[node], data[delnode], 0.5);
+ }
+};
+
+//! Typed Mesh with a vorticity and 2 texcoord3 channels
+class VortexSheetMesh : public Mesh {
+ public:
+ VortexSheetMesh(FluidSolver *parent);
+ static int _W_0(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ PbClass *obj = Pb::objFromPy(_self);
+ if (obj)
+ delete obj;
+ try {
+ PbArgs _args(_linargs, _kwds);
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(0, "VortexSheetMesh::VortexSheetMesh", !noTiming);
+ {
+ ArgLocker _lock;
+ FluidSolver *parent = _args.getPtr<FluidSolver>("parent", 0, &_lock);
+ obj = new VortexSheetMesh(parent);
+ obj->registerObject(_self, &_args);
+ _args.check();
+ }
+ pbFinalizePlugin(obj->getParent(), "VortexSheetMesh::VortexSheetMesh", !noTiming);
+ return 0;
+ }
+ catch (std::exception &e) {
+ pbSetError("VortexSheetMesh::VortexSheetMesh", e.what());
+ return -1;
+ }
+ }
+
+ virtual Mesh *clone();
+
+ virtual MeshType getType()
+ {
+ return TypeVortexSheet;
+ }
+
+ inline VortexSheetInfo &sheet(int i)
+ {
+ return mVorticity.data[i];
+ };
+ inline Vec3 &tex1(int i)
+ {
+ return mTex1.data[i];
+ }
+ inline Vec3 &tex2(int i)
+ {
+ return mTex2.data[i];
+ }
+ inline TurbulenceInfo &turb(int i)
+ {
+ return mTurb.data[i];
+ }
+ void setReferenceTexOffset(const Vec3 &ref)
+ {
+ mTexOffset = ref;
+ }
+ void resetTex1();
+ void resetTex2();
+
+ void calcCirculation();
+ static PyObject *_W_1(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ VortexSheetMesh *pbo = dynamic_cast<VortexSheetMesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "VortexSheetMesh::calcCirculation", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->calcCirculation();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "VortexSheetMesh::calcCirculation", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("VortexSheetMesh::calcCirculation", e.what());
+ return 0;
+ }
+ }
+
+ void calcVorticity();
+ static PyObject *_W_2(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ VortexSheetMesh *pbo = dynamic_cast<VortexSheetMesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "VortexSheetMesh::calcVorticity", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->calcVorticity();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "VortexSheetMesh::calcVorticity", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("VortexSheetMesh::calcVorticity", e.what());
+ return 0;
+ }
+ }
+
+ void reinitTexCoords();
+ static PyObject *_W_3(PyObject *_self, PyObject *_linargs, PyObject *_kwds)
+ {
+ try {
+ PbArgs _args(_linargs, _kwds);
+ VortexSheetMesh *pbo = dynamic_cast<VortexSheetMesh *>(Pb::objFromPy(_self));
+ bool noTiming = _args.getOpt<bool>("notiming", -1, 0);
+ pbPreparePlugin(pbo->getParent(), "VortexSheetMesh::reinitTexCoords", !noTiming);
+ PyObject *_retval = 0;
+ {
+ ArgLocker _lock;
+ pbo->_args.copy(_args);
+ _retval = getPyNone();
+ pbo->reinitTexCoords();
+ pbo->_args.check();
+ }
+ pbFinalizePlugin(pbo->getParent(), "VortexSheetMesh::reinitTexCoords", !noTiming);
+ return _retval;
+ }
+ catch (std::exception &e) {
+ pbSetError("VortexSheetMesh::reinitTexCoords", e.what());
+ return 0;
+ }
+ }
+
+ protected:
+ Vec3 mTexOffset;
+ VorticityChannel mVorticity;
+ TexCoord3Channel mTex1, mTex2;
+ TurbulenceChannel mTurb;
+ public:
+ PbArgs _args;
+}
+#define _C_VortexSheetMesh
+;
+
+}; // namespace Manta
+
+#endif
diff --git a/extern/mantaflow/preprocessed/vortexsheet.h.reg.cpp b/extern/mantaflow/preprocessed/vortexsheet.h.reg.cpp
new file mode 100644
index 00000000000..c86f530f771
--- /dev/null
+++ b/extern/mantaflow/preprocessed/vortexsheet.h.reg.cpp
@@ -0,0 +1,26 @@
+
+
+// DO NOT EDIT !
+// This file is generated using the MantaFlow preprocessor (prep link).
+
+#include "vortexsheet.h"
+namespace Manta {
+#ifdef _C_VortexSheetMesh
+static const Pb::Register _R_14("VortexSheetMesh", "VortexSheetMesh", "Mesh");
+template<> const char *Namify<VortexSheetMesh>::S = "VortexSheetMesh";
+static const Pb::Register _R_15("VortexSheetMesh", "VortexSheetMesh", VortexSheetMesh::_W_0);
+static const Pb::Register _R_16("VortexSheetMesh", "calcCirculation", VortexSheetMesh::_W_1);
+static const Pb::Register _R_17("VortexSheetMesh", "calcVorticity", VortexSheetMesh::_W_2);
+static const Pb::Register _R_18("VortexSheetMesh", "reinitTexCoords", VortexSheetMesh::_W_3);
+#endif
+extern "C" {
+void PbRegister_file_14()
+{
+ KEEP_UNUSED(_R_14);
+ KEEP_UNUSED(_R_15);
+ KEEP_UNUSED(_R_16);
+ KEEP_UNUSED(_R_17);
+ KEEP_UNUSED(_R_18);
+}
+}
+} // namespace Manta \ No newline at end of file